aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-x86
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-x86')
-rw-r--r--include/asm-x86/Kbuild2
-rw-r--r--include/asm-x86/a.out-core.h6
-rw-r--r--include/asm-x86/a.out.h6
-rw-r--r--include/asm-x86/acpi.h13
-rw-r--r--include/asm-x86/agp.h6
-rw-r--r--include/asm-x86/alternative.h8
-rw-r--r--include/asm-x86/amd_iommu.h35
-rw-r--r--include/asm-x86/amd_iommu_types.h404
-rw-r--r--include/asm-x86/apic.h116
-rw-r--r--include/asm-x86/apicdef.h9
-rw-r--r--include/asm-x86/arch_hooks.h9
-rw-r--r--include/asm-x86/asm.h66
-rw-r--r--include/asm-x86/atomic_32.h6
-rw-r--r--include/asm-x86/atomic_64.h46
-rw-r--r--include/asm-x86/auxvec.h6
-rw-r--r--include/asm-x86/bigsmp/apic.h (renamed from include/asm-x86/mach-bigsmp/mach_apic.h)10
-rw-r--r--include/asm-x86/bigsmp/apicdef.h (renamed from include/asm-x86/mach-es7000/mach_apicdef.h)6
-rw-r--r--include/asm-x86/bigsmp/ipi.h (renamed from include/asm-x86/mach-bigsmp/mach_ipi.h)0
-rw-r--r--include/asm-x86/bios_ebda.h25
-rw-r--r--include/asm-x86/bitops.h86
-rw-r--r--include/asm-x86/boot.h8
-rw-r--r--include/asm-x86/bootparam.h8
-rw-r--r--include/asm-x86/bug.h6
-rw-r--r--include/asm-x86/bugs.h11
-rw-r--r--include/asm-x86/byteorder.h6
-rw-r--r--include/asm-x86/cache.h6
-rw-r--r--include/asm-x86/cacheflush.h13
-rw-r--r--include/asm-x86/calgary.h6
-rw-r--r--include/asm-x86/calling.h6
-rw-r--r--include/asm-x86/checksum_32.h6
-rw-r--r--include/asm-x86/checksum_64.h6
-rw-r--r--include/asm-x86/cmpxchg_32.h6
-rw-r--r--include/asm-x86/cmpxchg_64.h43
-rw-r--r--include/asm-x86/compat.h6
-rw-r--r--include/asm-x86/cpu.h6
-rw-r--r--include/asm-x86/cpufeature.h130
-rw-r--r--include/asm-x86/current.h42
-rw-r--r--include/asm-x86/current_32.h17
-rw-r--r--include/asm-x86/current_64.h27
-rw-r--r--include/asm-x86/debugreg.h6
-rw-r--r--include/asm-x86/delay.h6
-rw-r--r--include/asm-x86/desc.h71
-rw-r--r--include/asm-x86/desc_defs.h10
-rw-r--r--include/asm-x86/device.h9
-rw-r--r--include/asm-x86/div64.h6
-rw-r--r--include/asm-x86/dma-mapping.h201
-rw-r--r--include/asm-x86/dma.h6
-rw-r--r--include/asm-x86/dmi.h14
-rw-r--r--include/asm-x86/ds.h266
-rw-r--r--include/asm-x86/dwarf2.h62
-rw-r--r--include/asm-x86/dwarf2_32.h61
-rw-r--r--include/asm-x86/dwarf2_64.h56
-rw-r--r--include/asm-x86/e820.h125
-rw-r--r--include/asm-x86/e820_32.h50
-rw-r--r--include/asm-x86/e820_64.h56
-rw-r--r--include/asm-x86/edac.h6
-rw-r--r--include/asm-x86/efi.h10
-rw-r--r--include/asm-x86/elf.h13
-rw-r--r--include/asm-x86/emergency-restart.h6
-rw-r--r--include/asm-x86/es7000/apic.h (renamed from include/asm-x86/mach-es7000/mach_apic.h)36
-rw-r--r--include/asm-x86/es7000/apicdef.h13
-rw-r--r--include/asm-x86/es7000/ipi.h (renamed from include/asm-x86/mach-es7000/mach_ipi.h)6
-rw-r--r--include/asm-x86/es7000/mpparse.h (renamed from include/asm-x86/mach-es7000/mach_mpparse.h)4
-rw-r--r--include/asm-x86/es7000/wakecpu.h (renamed from include/asm-x86/mach-es7000/mach_wakecpu.h)6
-rw-r--r--include/asm-x86/fb.h6
-rw-r--r--include/asm-x86/fixmap.h61
-rw-r--r--include/asm-x86/fixmap_32.h62
-rw-r--r--include/asm-x86/fixmap_64.h65
-rw-r--r--include/asm-x86/floppy.h6
-rw-r--r--include/asm-x86/ftrace.h14
-rw-r--r--include/asm-x86/futex.h12
-rw-r--r--include/asm-x86/gart.h87
-rw-r--r--include/asm-x86/genapic_32.h7
-rw-r--r--include/asm-x86/genapic_64.h17
-rw-r--r--include/asm-x86/geode.h13
-rw-r--r--include/asm-x86/gpio.h54
-rw-r--r--include/asm-x86/hardirq.h6
-rw-r--r--include/asm-x86/hardirq_32.h6
-rw-r--r--include/asm-x86/hardirq_64.h6
-rw-r--r--include/asm-x86/highmem.h9
-rw-r--r--include/asm-x86/hpet.h8
-rw-r--r--include/asm-x86/hugetlb.h16
-rw-r--r--include/asm-x86/hw_irq.h135
-rw-r--r--include/asm-x86/hw_irq_32.h66
-rw-r--r--include/asm-x86/hw_irq_64.h173
-rw-r--r--include/asm-x86/hypertransport.h6
-rw-r--r--include/asm-x86/i387.h173
-rw-r--r--include/asm-x86/i8253.h6
-rw-r--r--include/asm-x86/i8259.h11
-rw-r--r--include/asm-x86/ia32.h6
-rw-r--r--include/asm-x86/ia32_unistd.h6
-rw-r--r--include/asm-x86/ide.h65
-rw-r--r--include/asm-x86/idle.h8
-rw-r--r--include/asm-x86/intel_arch_perfmon.h6
-rw-r--r--include/asm-x86/io.h91
-rw-r--r--include/asm-x86/io_32.h69
-rw-r--r--include/asm-x86/io_64.h80
-rw-r--r--include/asm-x86/io_apic.h65
-rw-r--r--include/asm-x86/ioctls.h6
-rw-r--r--include/asm-x86/iommu.h45
-rw-r--r--include/asm-x86/ipcbuf.h6
-rw-r--r--include/asm-x86/ipi.h25
-rw-r--r--include/asm-x86/irq.h51
-rw-r--r--include/asm-x86/irq_32.h51
-rw-r--r--include/asm-x86/irq_64.h51
-rw-r--r--include/asm-x86/irq_regs_32.h6
-rw-r--r--include/asm-x86/irq_remapping.h8
-rw-r--r--include/asm-x86/irq_vectors.h182
-rw-r--r--include/asm-x86/irqflags.h65
-rw-r--r--include/asm-x86/ist.h6
-rw-r--r--include/asm-x86/k8.h6
-rw-r--r--include/asm-x86/kdebug.h6
-rw-r--r--include/asm-x86/kexec.h32
-rw-r--r--include/asm-x86/kgdb.h30
-rw-r--r--include/asm-x86/kmap_types.h6
-rw-r--r--include/asm-x86/kprobes.h6
-rw-r--r--include/asm-x86/kvm.h7
-rw-r--r--include/asm-x86/kvm_host.h88
-rw-r--r--include/asm-x86/kvm_para.h39
-rw-r--r--include/asm-x86/kvm_x86_emulate.h17
-rw-r--r--include/asm-x86/ldt.h6
-rw-r--r--include/asm-x86/lguest.h6
-rw-r--r--include/asm-x86/lguest_hcall.h6
-rw-r--r--include/asm-x86/linkage.h6
-rw-r--r--include/asm-x86/local.h6
-rw-r--r--include/asm-x86/mach-bigsmp/mach_apicdef.h13
-rw-r--r--include/asm-x86/mach-bigsmp/mach_mpspec.h8
-rw-r--r--include/asm-x86/mach-default/apm.h6
-rw-r--r--include/asm-x86/mach-default/entry_arch.h1
-rw-r--r--include/asm-x86/mach-default/irq_vectors.h96
-rw-r--r--include/asm-x86/mach-default/irq_vectors_limits.h16
-rw-r--r--include/asm-x86/mach-default/mach_apic.h18
-rw-r--r--include/asm-x86/mach-default/mach_apicdef.h12
-rw-r--r--include/asm-x86/mach-default/mach_ipi.h6
-rw-r--r--include/asm-x86/mach-default/mach_mpparse.h6
-rw-r--r--include/asm-x86/mach-default/mach_mpspec.h6
-rw-r--r--include/asm-x86/mach-default/mach_timer.h6
-rw-r--r--include/asm-x86/mach-default/mach_traps.h6
-rw-r--r--include/asm-x86/mach-default/mach_wakecpu.h6
-rw-r--r--include/asm-x86/mach-default/setup_arch.h4
-rw-r--r--include/asm-x86/mach-default/smpboot_hooks.h16
-rw-r--r--include/asm-x86/mach-es7000/mach_mpspec.h8
-rw-r--r--include/asm-x86/mach-generic/gpio.h6
-rw-r--r--include/asm-x86/mach-generic/irq_vectors_limits.h6
-rw-r--r--include/asm-x86/mach-generic/mach_apic.h6
-rw-r--r--include/asm-x86/mach-generic/mach_apicdef.h6
-rw-r--r--include/asm-x86/mach-generic/mach_ipi.h6
-rw-r--r--include/asm-x86/mach-generic/mach_mpparse.h13
-rw-r--r--include/asm-x86/mach-generic/mach_mpspec.h8
-rw-r--r--include/asm-x86/mach-numaq/mach_mpparse.h14
-rw-r--r--include/asm-x86/mach-numaq/mach_mpspec.h8
-rw-r--r--include/asm-x86/mach-rdc321x/gpio.h9
-rw-r--r--include/asm-x86/mach-summit/mach_apicdef.h13
-rw-r--r--include/asm-x86/mach-summit/mach_mpspec.h9
-rw-r--r--include/asm-x86/mach-visws/entry_arch.h23
-rw-r--r--include/asm-x86/mach-visws/irq_vectors.h62
-rw-r--r--include/asm-x86/mach-visws/mach_apic.h103
-rw-r--r--include/asm-x86/mach-visws/mach_apicdef.h12
-rw-r--r--include/asm-x86/mach-visws/setup_arch.h8
-rw-r--r--include/asm-x86/mach-visws/smpboot_hooks.h28
-rw-r--r--include/asm-x86/mach-voyager/entry_arch.h2
-rw-r--r--include/asm-x86/mach-voyager/irq_vectors.h79
-rw-r--r--include/asm-x86/math_emu.h6
-rw-r--r--include/asm-x86/mc146818rtc.h6
-rw-r--r--include/asm-x86/mca.h6
-rw-r--r--include/asm-x86/mca_dma.h6
-rw-r--r--include/asm-x86/mce.h7
-rw-r--r--include/asm-x86/microcode.h47
-rw-r--r--include/asm-x86/mman.h7
-rw-r--r--include/asm-x86/mmconfig.h12
-rw-r--r--include/asm-x86/mmu.h11
-rw-r--r--include/asm-x86/mmu_context.h32
-rw-r--r--include/asm-x86/mmu_context_32.h34
-rw-r--r--include/asm-x86/mmu_context_64.h24
-rw-r--r--include/asm-x86/mmx.h6
-rw-r--r--include/asm-x86/mmzone_32.h38
-rw-r--r--include/asm-x86/mmzone_64.h6
-rw-r--r--include/asm-x86/module.h6
-rw-r--r--include/asm-x86/mpspec.h45
-rw-r--r--include/asm-x86/mpspec_def.h15
-rw-r--r--include/asm-x86/msgbuf.h6
-rw-r--r--include/asm-x86/msidef.h10
-rw-r--r--include/asm-x86/msr-index.h26
-rw-r--r--include/asm-x86/msr.h55
-rw-r--r--include/asm-x86/mtrr.h6
-rw-r--r--include/asm-x86/mutex_32.h6
-rw-r--r--include/asm-x86/mutex_64.h6
-rw-r--r--include/asm-x86/namei.h11
-rw-r--r--include/asm-x86/nmi.h54
-rw-r--r--include/asm-x86/nops.h6
-rw-r--r--include/asm-x86/numa_32.h14
-rw-r--r--include/asm-x86/numa_64.h26
-rw-r--r--include/asm-x86/numaq.h14
-rw-r--r--include/asm-x86/numaq/apic.h (renamed from include/asm-x86/mach-numaq/mach_apic.h)45
-rw-r--r--include/asm-x86/numaq/apicdef.h (renamed from include/asm-x86/mach-numaq/mach_apicdef.h)4
-rw-r--r--include/asm-x86/numaq/ipi.h (renamed from include/asm-x86/mach-numaq/mach_ipi.h)6
-rw-r--r--include/asm-x86/numaq/mpparse.h7
-rw-r--r--include/asm-x86/numaq/wakecpu.h (renamed from include/asm-x86/mach-numaq/mach_wakecpu.h)6
-rw-r--r--include/asm-x86/olpc.h6
-rw-r--r--include/asm-x86/page.h34
-rw-r--r--include/asm-x86/page_32.h26
-rw-r--r--include/asm-x86/page_64.h25
-rw-r--r--include/asm-x86/param.h6
-rw-r--r--include/asm-x86/paravirt.h362
-rw-r--r--include/asm-x86/parport.h6
-rw-r--r--include/asm-x86/pat.h12
-rw-r--r--include/asm-x86/pci-direct.h10
-rw-r--r--include/asm-x86/pci.h8
-rw-r--r--include/asm-x86/pci_32.h20
-rw-r--r--include/asm-x86/pci_64.h6
-rw-r--r--include/asm-x86/pda.h11
-rw-r--r--include/asm-x86/percpu.h78
-rw-r--r--include/asm-x86/pgalloc.h10
-rw-r--r--include/asm-x86/pgtable-2level-defs.h6
-rw-r--r--include/asm-x86/pgtable-2level.h8
-rw-r--r--include/asm-x86/pgtable-3level-defs.h6
-rw-r--r--include/asm-x86/pgtable-3level.h19
-rw-r--r--include/asm-x86/pgtable.h198
-rw-r--r--include/asm-x86/pgtable_32.h51
-rw-r--r--include/asm-x86/pgtable_64.h30
-rw-r--r--include/asm-x86/posix_types_32.h6
-rw-r--r--include/asm-x86/posix_types_64.h6
-rw-r--r--include/asm-x86/prctl.h6
-rw-r--r--include/asm-x86/processor-cyrix.h8
-rw-r--r--include/asm-x86/processor-flags.h15
-rw-r--r--include/asm-x86/processor.h122
-rw-r--r--include/asm-x86/proto.h8
-rw-r--r--include/asm-x86/ptrace-abi.h26
-rw-r--r--include/asm-x86/ptrace.h66
-rw-r--r--include/asm-x86/pvclock-abi.h42
-rw-r--r--include/asm-x86/pvclock.h13
-rw-r--r--include/asm-x86/reboot.h8
-rw-r--r--include/asm-x86/reboot_fixups.h6
-rw-r--r--include/asm-x86/required-features.h22
-rw-r--r--include/asm-x86/resume-trace.h10
-rw-r--r--include/asm-x86/rio.h6
-rw-r--r--include/asm-x86/rwlock.h6
-rw-r--r--include/asm-x86/rwsem.h6
-rw-r--r--include/asm-x86/scatterlist.h6
-rw-r--r--include/asm-x86/seccomp_32.h5
-rw-r--r--include/asm-x86/seccomp_64.h5
-rw-r--r--include/asm-x86/segment.h38
-rw-r--r--include/asm-x86/semaphore.h1
-rw-r--r--include/asm-x86/sembuf.h6
-rw-r--r--include/asm-x86/serial.h6
-rw-r--r--include/asm-x86/setup.h61
-rw-r--r--include/asm-x86/shmbuf.h6
-rw-r--r--include/asm-x86/shmparam.h6
-rw-r--r--include/asm-x86/sigcontext.h93
-rw-r--r--include/asm-x86/sigcontext32.h12
-rw-r--r--include/asm-x86/siginfo.h6
-rw-r--r--include/asm-x86/signal.h13
-rw-r--r--include/asm-x86/smp.h109
-rw-r--r--include/asm-x86/socket.h6
-rw-r--r--include/asm-x86/sockios.h6
-rw-r--r--include/asm-x86/sparsemem.h6
-rw-r--r--include/asm-x86/spinlock.h187
-rw-r--r--include/asm-x86/spinlock_types.h8
-rw-r--r--include/asm-x86/srat.h18
-rw-r--r--include/asm-x86/stacktrace.h6
-rw-r--r--include/asm-x86/stat.h6
-rw-r--r--include/asm-x86/statfs.h6
-rw-r--r--include/asm-x86/string_32.h329
-rw-r--r--include/asm-x86/string_64.h6
-rw-r--r--include/asm-x86/summit/apic.h (renamed from include/asm-x86/mach-summit/mach_apic.h)26
-rw-r--r--include/asm-x86/summit/apicdef.h13
-rw-r--r--include/asm-x86/summit/ipi.h (renamed from include/asm-x86/mach-summit/mach_ipi.h)6
-rw-r--r--include/asm-x86/summit/irq_vectors_limits.h (renamed from include/asm-x86/mach-summit/irq_vectors_limits.h)0
-rw-r--r--include/asm-x86/summit/mpparse.h (renamed from include/asm-x86/mach-summit/mach_mpparse.h)13
-rw-r--r--include/asm-x86/suspend_32.h5
-rw-r--r--include/asm-x86/suspend_64.h6
-rw-r--r--include/asm-x86/swiotlb.h14
-rw-r--r--include/asm-x86/sync_bitops.h6
-rw-r--r--include/asm-x86/syscall.h211
-rw-r--r--include/asm-x86/syscalls.h93
-rw-r--r--include/asm-x86/system.h16
-rw-r--r--include/asm-x86/system_64.h6
-rw-r--r--include/asm-x86/tce.h6
-rw-r--r--include/asm-x86/termbits.h6
-rw-r--r--include/asm-x86/termios.h6
-rw-r--r--include/asm-x86/therm_throt.h6
-rw-r--r--include/asm-x86/thread_info.h258
-rw-r--r--include/asm-x86/thread_info_32.h205
-rw-r--r--include/asm-x86/thread_info_64.h195
-rw-r--r--include/asm-x86/time.h10
-rw-r--r--include/asm-x86/timer.h15
-rw-r--r--include/asm-x86/timex.h6
-rw-r--r--include/asm-x86/tlb.h6
-rw-r--r--include/asm-x86/tlbflush.h16
-rw-r--r--include/asm-x86/topology.h161
-rw-r--r--include/asm-x86/trampoline.h6
-rw-r--r--include/asm-x86/traps.h82
-rw-r--r--include/asm-x86/tsc.h8
-rw-r--r--include/asm-x86/types.h6
-rw-r--r--include/asm-x86/uaccess.h449
-rw-r--r--include/asm-x86/uaccess_32.h428
-rw-r--r--include/asm-x86/uaccess_64.h270
-rw-r--r--include/asm-x86/ucontext.h12
-rw-r--r--include/asm-x86/unaligned.h6
-rw-r--r--include/asm-x86/unistd_32.h12
-rw-r--r--include/asm-x86/unistd_64.h22
-rw-r--r--include/asm-x86/unwind.h6
-rw-r--r--include/asm-x86/user32.h6
-rw-r--r--include/asm-x86/user_32.h6
-rw-r--r--include/asm-x86/user_64.h6
-rw-r--r--include/asm-x86/uv/bios.h68
-rw-r--r--include/asm-x86/uv/uv_bau.h332
-rw-r--r--include/asm-x86/uv/uv_hub.h196
-rw-r--r--include/asm-x86/uv/uv_mmrs.h960
-rw-r--r--include/asm-x86/vdso.h14
-rw-r--r--include/asm-x86/vga.h6
-rw-r--r--include/asm-x86/vgtod.h6
-rw-r--r--include/asm-x86/visws/cobalt.h (renamed from include/asm-x86/mach-visws/cobalt.h)6
-rw-r--r--include/asm-x86/visws/lithium.h (renamed from include/asm-x86/mach-visws/lithium.h)6
-rw-r--r--include/asm-x86/visws/piix4.h (renamed from include/asm-x86/mach-visws/piix4.h)6
-rw-r--r--include/asm-x86/visws/sgivw.h5
-rw-r--r--include/asm-x86/vm86.h17
-rw-r--r--include/asm-x86/vmi_time.h8
-rw-r--r--include/asm-x86/vsyscall.h9
-rw-r--r--include/asm-x86/xcr.h49
-rw-r--r--include/asm-x86/xen/events.h8
-rw-r--r--include/asm-x86/xen/grant_table.h6
-rw-r--r--include/asm-x86/xen/hypercall.h280
-rw-r--r--include/asm-x86/xen/hypervisor.h21
-rw-r--r--include/asm-x86/xen/interface.h139
-rw-r--r--include/asm-x86/xen/interface_32.h97
-rw-r--r--include/asm-x86/xen/interface_64.h159
-rw-r--r--include/asm-x86/xen/page.h43
-rw-r--r--include/asm-x86/xor_32.h5
-rw-r--r--include/asm-x86/xor_64.h5
-rw-r--r--include/asm-x86/xsave.h118
331 files changed, 8352 insertions, 4926 deletions
diff --git a/include/asm-x86/Kbuild b/include/asm-x86/Kbuild
index 1e3554596f72..4a8e80cdcfa5 100644
--- a/include/asm-x86/Kbuild
+++ b/include/asm-x86/Kbuild
@@ -3,7 +3,6 @@ include include/asm-generic/Kbuild.asm
3header-y += boot.h 3header-y += boot.h
4header-y += bootparam.h 4header-y += bootparam.h
5header-y += debugreg.h 5header-y += debugreg.h
6header-y += kvm.h
7header-y += ldt.h 6header-y += ldt.h
8header-y += msr-index.h 7header-y += msr-index.h
9header-y += prctl.h 8header-y += prctl.h
@@ -19,7 +18,6 @@ unifdef-y += msr.h
19unifdef-y += mtrr.h 18unifdef-y += mtrr.h
20unifdef-y += posix_types_32.h 19unifdef-y += posix_types_32.h
21unifdef-y += posix_types_64.h 20unifdef-y += posix_types_64.h
22unifdef-y += ptrace.h
23unifdef-y += unistd_32.h 21unifdef-y += unistd_32.h
24unifdef-y += unistd_64.h 22unifdef-y += unistd_64.h
25unifdef-y += vm86.h 23unifdef-y += vm86.h
diff --git a/include/asm-x86/a.out-core.h b/include/asm-x86/a.out-core.h
index 714207a1c387..f5705761a37b 100644
--- a/include/asm-x86/a.out-core.h
+++ b/include/asm-x86/a.out-core.h
@@ -9,8 +9,8 @@
9 * 2 of the Licence, or (at your option) any later version. 9 * 2 of the Licence, or (at your option) any later version.
10 */ 10 */
11 11
12#ifndef _ASM_A_OUT_CORE_H 12#ifndef ASM_X86__A_OUT_CORE_H
13#define _ASM_A_OUT_CORE_H 13#define ASM_X86__A_OUT_CORE_H
14 14
15#ifdef __KERNEL__ 15#ifdef __KERNEL__
16#ifdef CONFIG_X86_32 16#ifdef CONFIG_X86_32
@@ -70,4 +70,4 @@ static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump)
70 70
71#endif /* CONFIG_X86_32 */ 71#endif /* CONFIG_X86_32 */
72#endif /* __KERNEL__ */ 72#endif /* __KERNEL__ */
73#endif /* _ASM_A_OUT_CORE_H */ 73#endif /* ASM_X86__A_OUT_CORE_H */
diff --git a/include/asm-x86/a.out.h b/include/asm-x86/a.out.h
index 4684f97a5bbd..0948748bc69c 100644
--- a/include/asm-x86/a.out.h
+++ b/include/asm-x86/a.out.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_A_OUT_H 1#ifndef ASM_X86__A_OUT_H
2#define _ASM_X86_A_OUT_H 2#define ASM_X86__A_OUT_H
3 3
4struct exec 4struct exec
5{ 5{
@@ -17,4 +17,4 @@ struct exec
17#define N_DRSIZE(a) ((a).a_drsize) 17#define N_DRSIZE(a) ((a).a_drsize)
18#define N_SYMSIZE(a) ((a).a_syms) 18#define N_SYMSIZE(a) ((a).a_syms)
19 19
20#endif /* _ASM_X86_A_OUT_H */ 20#endif /* ASM_X86__A_OUT_H */
diff --git a/include/asm-x86/acpi.h b/include/asm-x86/acpi.h
index 14411c9de46f..392e17336be1 100644
--- a/include/asm-x86/acpi.h
+++ b/include/asm-x86/acpi.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_ACPI_H 1#ifndef ASM_X86__ACPI_H
2#define _ASM_X86_ACPI_H 2#define ASM_X86__ACPI_H
3 3
4/* 4/*
5 * Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 5 * Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
@@ -28,6 +28,7 @@
28#include <asm/numa.h> 28#include <asm/numa.h>
29#include <asm/processor.h> 29#include <asm/processor.h>
30#include <asm/mmu.h> 30#include <asm/mmu.h>
31#include <asm/mpspec.h>
31 32
32#define COMPILER_DEPENDENT_INT64 long long 33#define COMPILER_DEPENDENT_INT64 long long
33#define COMPILER_DEPENDENT_UINT64 unsigned long long 34#define COMPILER_DEPENDENT_UINT64 unsigned long long
@@ -139,6 +140,8 @@ static inline unsigned int acpi_processor_cstate_check(unsigned int max_cstate)
139 boot_cpu_data.x86_model <= 0x05 && 140 boot_cpu_data.x86_model <= 0x05 &&
140 boot_cpu_data.x86_mask < 0x0A) 141 boot_cpu_data.x86_mask < 0x0A)
141 return 1; 142 return 1;
143 else if (boot_cpu_has(X86_FEATURE_AMDC1E))
144 return 1;
142 else 145 else
143 return max_cstate; 146 return max_cstate;
144} 147}
@@ -160,9 +163,7 @@ struct bootnode;
160#ifdef CONFIG_ACPI_NUMA 163#ifdef CONFIG_ACPI_NUMA
161extern int acpi_numa; 164extern int acpi_numa;
162extern int acpi_scan_nodes(unsigned long start, unsigned long end); 165extern int acpi_scan_nodes(unsigned long start, unsigned long end);
163#ifdef CONFIG_X86_64 166#define NR_NODE_MEMBLKS (MAX_NUMNODES*2)
164# define NR_NODE_MEMBLKS (MAX_NUMNODES*2)
165#endif
166extern void acpi_fake_nodes(const struct bootnode *fake_nodes, 167extern void acpi_fake_nodes(const struct bootnode *fake_nodes,
167 int num_nodes); 168 int num_nodes);
168#else 169#else
@@ -174,4 +175,4 @@ static inline void acpi_fake_nodes(const struct bootnode *fake_nodes,
174 175
175#define acpi_unlazy_tlb(x) leave_mm(x) 176#define acpi_unlazy_tlb(x) leave_mm(x)
176 177
177#endif /*__X86_ASM_ACPI_H*/ 178#endif /* ASM_X86__ACPI_H */
diff --git a/include/asm-x86/agp.h b/include/asm-x86/agp.h
index e4004a9f6a9a..3617fd4fcdf9 100644
--- a/include/asm-x86/agp.h
+++ b/include/asm-x86/agp.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_AGP_H 1#ifndef ASM_X86__AGP_H
2#define _ASM_X86_AGP_H 2#define ASM_X86__AGP_H
3 3
4#include <asm/pgtable.h> 4#include <asm/pgtable.h>
5#include <asm/cacheflush.h> 5#include <asm/cacheflush.h>
@@ -32,4 +32,4 @@
32#define free_gatt_pages(table, order) \ 32#define free_gatt_pages(table, order) \
33 free_pages((unsigned long)(table), (order)) 33 free_pages((unsigned long)(table), (order))
34 34
35#endif 35#endif /* ASM_X86__AGP_H */
diff --git a/include/asm-x86/alternative.h b/include/asm-x86/alternative.h
index 1f6a9ca10126..22d3c9862bf3 100644
--- a/include/asm-x86/alternative.h
+++ b/include/asm-x86/alternative.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_ALTERNATIVE_H 1#ifndef ASM_X86__ALTERNATIVE_H
2#define _ASM_X86_ALTERNATIVE_H 2#define ASM_X86__ALTERNATIVE_H
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5#include <linux/stddef.h> 5#include <linux/stddef.h>
@@ -72,6 +72,8 @@ static inline void alternatives_smp_module_del(struct module *mod) {}
72static inline void alternatives_smp_switch(int smp) {} 72static inline void alternatives_smp_switch(int smp) {}
73#endif /* CONFIG_SMP */ 73#endif /* CONFIG_SMP */
74 74
75const unsigned char *const *find_nop_table(void);
76
75/* 77/*
76 * Alternative instructions for different CPU types or capabilities. 78 * Alternative instructions for different CPU types or capabilities.
77 * 79 *
@@ -178,4 +180,4 @@ extern void add_nops(void *insns, unsigned int len);
178extern void *text_poke(void *addr, const void *opcode, size_t len); 180extern void *text_poke(void *addr, const void *opcode, size_t len);
179extern void *text_poke_early(void *addr, const void *opcode, size_t len); 181extern void *text_poke_early(void *addr, const void *opcode, size_t len);
180 182
181#endif /* _ASM_X86_ALTERNATIVE_H */ 183#endif /* ASM_X86__ALTERNATIVE_H */
diff --git a/include/asm-x86/amd_iommu.h b/include/asm-x86/amd_iommu.h
new file mode 100644
index 000000000000..041d0db7da27
--- /dev/null
+++ b/include/asm-x86/amd_iommu.h
@@ -0,0 +1,35 @@
1/*
2 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com>
4 * Leo Duran <leo.duran@amd.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#ifndef ASM_X86__AMD_IOMMU_H
21#define ASM_X86__AMD_IOMMU_H
22
23#include <linux/irqreturn.h>
24
25#ifdef CONFIG_AMD_IOMMU
26extern int amd_iommu_init(void);
27extern int amd_iommu_init_dma_ops(void);
28extern void amd_iommu_detect(void);
29extern irqreturn_t amd_iommu_int_handler(int irq, void *data);
30#else
31static inline int amd_iommu_init(void) { return -ENODEV; }
32static inline void amd_iommu_detect(void) { }
33#endif
34
35#endif /* ASM_X86__AMD_IOMMU_H */
diff --git a/include/asm-x86/amd_iommu_types.h b/include/asm-x86/amd_iommu_types.h
new file mode 100644
index 000000000000..b3085869a17b
--- /dev/null
+++ b/include/asm-x86/amd_iommu_types.h
@@ -0,0 +1,404 @@
1/*
2 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com>
4 * Leo Duran <leo.duran@amd.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#ifndef ASM_X86__AMD_IOMMU_TYPES_H
21#define ASM_X86__AMD_IOMMU_TYPES_H
22
23#include <linux/types.h>
24#include <linux/list.h>
25#include <linux/spinlock.h>
26
27/*
28 * some size calculation constants
29 */
30#define DEV_TABLE_ENTRY_SIZE 32
31#define ALIAS_TABLE_ENTRY_SIZE 2
32#define RLOOKUP_TABLE_ENTRY_SIZE (sizeof(void *))
33
34/* Length of the MMIO region for the AMD IOMMU */
35#define MMIO_REGION_LENGTH 0x4000
36
37/* Capability offsets used by the driver */
38#define MMIO_CAP_HDR_OFFSET 0x00
39#define MMIO_RANGE_OFFSET 0x0c
40#define MMIO_MISC_OFFSET 0x10
41
42/* Masks, shifts and macros to parse the device range capability */
43#define MMIO_RANGE_LD_MASK 0xff000000
44#define MMIO_RANGE_FD_MASK 0x00ff0000
45#define MMIO_RANGE_BUS_MASK 0x0000ff00
46#define MMIO_RANGE_LD_SHIFT 24
47#define MMIO_RANGE_FD_SHIFT 16
48#define MMIO_RANGE_BUS_SHIFT 8
49#define MMIO_GET_LD(x) (((x) & MMIO_RANGE_LD_MASK) >> MMIO_RANGE_LD_SHIFT)
50#define MMIO_GET_FD(x) (((x) & MMIO_RANGE_FD_MASK) >> MMIO_RANGE_FD_SHIFT)
51#define MMIO_GET_BUS(x) (((x) & MMIO_RANGE_BUS_MASK) >> MMIO_RANGE_BUS_SHIFT)
52#define MMIO_MSI_NUM(x) ((x) & 0x1f)
53
54/* Flag masks for the AMD IOMMU exclusion range */
55#define MMIO_EXCL_ENABLE_MASK 0x01ULL
56#define MMIO_EXCL_ALLOW_MASK 0x02ULL
57
58/* Used offsets into the MMIO space */
59#define MMIO_DEV_TABLE_OFFSET 0x0000
60#define MMIO_CMD_BUF_OFFSET 0x0008
61#define MMIO_EVT_BUF_OFFSET 0x0010
62#define MMIO_CONTROL_OFFSET 0x0018
63#define MMIO_EXCL_BASE_OFFSET 0x0020
64#define MMIO_EXCL_LIMIT_OFFSET 0x0028
65#define MMIO_CMD_HEAD_OFFSET 0x2000
66#define MMIO_CMD_TAIL_OFFSET 0x2008
67#define MMIO_EVT_HEAD_OFFSET 0x2010
68#define MMIO_EVT_TAIL_OFFSET 0x2018
69#define MMIO_STATUS_OFFSET 0x2020
70
71/* MMIO status bits */
72#define MMIO_STATUS_COM_WAIT_INT_MASK 0x04
73
74/* event logging constants */
75#define EVENT_ENTRY_SIZE 0x10
76#define EVENT_TYPE_SHIFT 28
77#define EVENT_TYPE_MASK 0xf
78#define EVENT_TYPE_ILL_DEV 0x1
79#define EVENT_TYPE_IO_FAULT 0x2
80#define EVENT_TYPE_DEV_TAB_ERR 0x3
81#define EVENT_TYPE_PAGE_TAB_ERR 0x4
82#define EVENT_TYPE_ILL_CMD 0x5
83#define EVENT_TYPE_CMD_HARD_ERR 0x6
84#define EVENT_TYPE_IOTLB_INV_TO 0x7
85#define EVENT_TYPE_INV_DEV_REQ 0x8
86#define EVENT_DEVID_MASK 0xffff
87#define EVENT_DEVID_SHIFT 0
88#define EVENT_DOMID_MASK 0xffff
89#define EVENT_DOMID_SHIFT 0
90#define EVENT_FLAGS_MASK 0xfff
91#define EVENT_FLAGS_SHIFT 0x10
92
93/* feature control bits */
94#define CONTROL_IOMMU_EN 0x00ULL
95#define CONTROL_HT_TUN_EN 0x01ULL
96#define CONTROL_EVT_LOG_EN 0x02ULL
97#define CONTROL_EVT_INT_EN 0x03ULL
98#define CONTROL_COMWAIT_EN 0x04ULL
99#define CONTROL_PASSPW_EN 0x08ULL
100#define CONTROL_RESPASSPW_EN 0x09ULL
101#define CONTROL_COHERENT_EN 0x0aULL
102#define CONTROL_ISOC_EN 0x0bULL
103#define CONTROL_CMDBUF_EN 0x0cULL
104#define CONTROL_PPFLOG_EN 0x0dULL
105#define CONTROL_PPFINT_EN 0x0eULL
106
107/* command specific defines */
108#define CMD_COMPL_WAIT 0x01
109#define CMD_INV_DEV_ENTRY 0x02
110#define CMD_INV_IOMMU_PAGES 0x03
111
112#define CMD_COMPL_WAIT_STORE_MASK 0x01
113#define CMD_COMPL_WAIT_INT_MASK 0x02
114#define CMD_INV_IOMMU_PAGES_SIZE_MASK 0x01
115#define CMD_INV_IOMMU_PAGES_PDE_MASK 0x02
116
117#define CMD_INV_IOMMU_ALL_PAGES_ADDRESS 0x7fffffffffffffffULL
118
119/* macros and definitions for device table entries */
120#define DEV_ENTRY_VALID 0x00
121#define DEV_ENTRY_TRANSLATION 0x01
122#define DEV_ENTRY_IR 0x3d
123#define DEV_ENTRY_IW 0x3e
124#define DEV_ENTRY_NO_PAGE_FAULT 0x62
125#define DEV_ENTRY_EX 0x67
126#define DEV_ENTRY_SYSMGT1 0x68
127#define DEV_ENTRY_SYSMGT2 0x69
128#define DEV_ENTRY_INIT_PASS 0xb8
129#define DEV_ENTRY_EINT_PASS 0xb9
130#define DEV_ENTRY_NMI_PASS 0xba
131#define DEV_ENTRY_LINT0_PASS 0xbe
132#define DEV_ENTRY_LINT1_PASS 0xbf
133#define DEV_ENTRY_MODE_MASK 0x07
134#define DEV_ENTRY_MODE_SHIFT 0x09
135
136/* constants to configure the command buffer */
137#define CMD_BUFFER_SIZE 8192
138#define CMD_BUFFER_ENTRIES 512
139#define MMIO_CMD_SIZE_SHIFT 56
140#define MMIO_CMD_SIZE_512 (0x9ULL << MMIO_CMD_SIZE_SHIFT)
141
142/* constants for event buffer handling */
143#define EVT_BUFFER_SIZE 8192 /* 512 entries */
144#define EVT_LEN_MASK (0x9ULL << 56)
145
146#define PAGE_MODE_1_LEVEL 0x01
147#define PAGE_MODE_2_LEVEL 0x02
148#define PAGE_MODE_3_LEVEL 0x03
149
150#define IOMMU_PDE_NL_0 0x000ULL
151#define IOMMU_PDE_NL_1 0x200ULL
152#define IOMMU_PDE_NL_2 0x400ULL
153#define IOMMU_PDE_NL_3 0x600ULL
154
155#define IOMMU_PTE_L2_INDEX(address) (((address) >> 30) & 0x1ffULL)
156#define IOMMU_PTE_L1_INDEX(address) (((address) >> 21) & 0x1ffULL)
157#define IOMMU_PTE_L0_INDEX(address) (((address) >> 12) & 0x1ffULL)
158
159#define IOMMU_MAP_SIZE_L1 (1ULL << 21)
160#define IOMMU_MAP_SIZE_L2 (1ULL << 30)
161#define IOMMU_MAP_SIZE_L3 (1ULL << 39)
162
163#define IOMMU_PTE_P (1ULL << 0)
164#define IOMMU_PTE_TV (1ULL << 1)
165#define IOMMU_PTE_U (1ULL << 59)
166#define IOMMU_PTE_FC (1ULL << 60)
167#define IOMMU_PTE_IR (1ULL << 61)
168#define IOMMU_PTE_IW (1ULL << 62)
169
170#define IOMMU_L1_PDE(address) \
171 ((address) | IOMMU_PDE_NL_1 | IOMMU_PTE_P | IOMMU_PTE_IR | IOMMU_PTE_IW)
172#define IOMMU_L2_PDE(address) \
173 ((address) | IOMMU_PDE_NL_2 | IOMMU_PTE_P | IOMMU_PTE_IR | IOMMU_PTE_IW)
174
175#define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL)
176#define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_P)
177#define IOMMU_PTE_PAGE(pte) (phys_to_virt((pte) & IOMMU_PAGE_MASK))
178#define IOMMU_PTE_MODE(pte) (((pte) >> 9) & 0x07)
179
180#define IOMMU_PROT_MASK 0x03
181#define IOMMU_PROT_IR 0x01
182#define IOMMU_PROT_IW 0x02
183
184/* IOMMU capabilities */
185#define IOMMU_CAP_IOTLB 24
186#define IOMMU_CAP_NPCACHE 26
187
188#define MAX_DOMAIN_ID 65536
189
190/* FIXME: move this macro to <linux/pci.h> */
191#define PCI_BUS(x) (((x) >> 8) & 0xff)
192
193/*
194 * This structure contains generic data for IOMMU protection domains
195 * independent of their use.
196 */
197struct protection_domain {
198 spinlock_t lock; /* mostly used to lock the page table*/
199 u16 id; /* the domain id written to the device table */
200 int mode; /* paging mode (0-6 levels) */
201 u64 *pt_root; /* page table root pointer */
202 void *priv; /* private data */
203};
204
205/*
206 * Data container for a dma_ops specific protection domain
207 */
208struct dma_ops_domain {
209 struct list_head list;
210
211 /* generic protection domain information */
212 struct protection_domain domain;
213
214 /* size of the aperture for the mappings */
215 unsigned long aperture_size;
216
217 /* address we start to search for free addresses */
218 unsigned long next_bit;
219
220 /* address allocation bitmap */
221 unsigned long *bitmap;
222
223 /*
224 * Array of PTE pages for the aperture. In this array we save all the
225 * leaf pages of the domain page table used for the aperture. This way
226 * we don't need to walk the page table to find a specific PTE. We can
227 * just calculate its address in constant time.
228 */
229 u64 **pte_pages;
230
231 /* This will be set to true when TLB needs to be flushed */
232 bool need_flush;
233
234 /*
235 * if this is a preallocated domain, keep the device for which it was
236 * preallocated in this variable
237 */
238 u16 target_dev;
239};
240
241/*
242 * Structure where we save information about one hardware AMD IOMMU in the
243 * system.
244 */
245struct amd_iommu {
246 struct list_head list;
247
248 /* locks the accesses to the hardware */
249 spinlock_t lock;
250
251 /* Pointer to PCI device of this IOMMU */
252 struct pci_dev *dev;
253
254 /*
255 * Capability pointer. There could be more than one IOMMU per PCI
256 * device function if there are more than one AMD IOMMU capability
257 * pointers.
258 */
259 u16 cap_ptr;
260
261 /* physical address of MMIO space */
262 u64 mmio_phys;
263 /* virtual address of MMIO space */
264 u8 *mmio_base;
265
266 /* capabilities of that IOMMU read from ACPI */
267 u32 cap;
268
269 /* pci domain of this IOMMU */
270 u16 pci_seg;
271
272 /* first device this IOMMU handles. read from PCI */
273 u16 first_device;
274 /* last device this IOMMU handles. read from PCI */
275 u16 last_device;
276
277 /* start of exclusion range of that IOMMU */
278 u64 exclusion_start;
279 /* length of exclusion range of that IOMMU */
280 u64 exclusion_length;
281
282 /* command buffer virtual address */
283 u8 *cmd_buf;
284 /* size of command buffer */
285 u32 cmd_buf_size;
286
287 /* event buffer virtual address */
288 u8 *evt_buf;
289 /* size of event buffer */
290 u32 evt_buf_size;
291 /* MSI number for event interrupt */
292 u16 evt_msi_num;
293
294 /* if one, we need to send a completion wait command */
295 int need_sync;
296
297 /* true if interrupts for this IOMMU are already enabled */
298 bool int_enabled;
299
300 /* default dma_ops domain for that IOMMU */
301 struct dma_ops_domain *default_dom;
302};
303
304/*
305 * List with all IOMMUs in the system. This list is not locked because it is
306 * only written and read at driver initialization or suspend time
307 */
308extern struct list_head amd_iommu_list;
309
310/*
311 * Structure defining one entry in the device table
312 */
313struct dev_table_entry {
314 u32 data[8];
315};
316
317/*
318 * One entry for unity mappings parsed out of the ACPI table.
319 */
320struct unity_map_entry {
321 struct list_head list;
322
323 /* starting device id this entry is used for (including) */
324 u16 devid_start;
325 /* end device id this entry is used for (including) */
326 u16 devid_end;
327
328 /* start address to unity map (including) */
329 u64 address_start;
330 /* end address to unity map (including) */
331 u64 address_end;
332
333 /* required protection */
334 int prot;
335};
336
337/*
338 * List of all unity mappings. It is not locked because as runtime it is only
339 * read. It is created at ACPI table parsing time.
340 */
341extern struct list_head amd_iommu_unity_map;
342
343/*
344 * Data structures for device handling
345 */
346
347/*
348 * Device table used by hardware. Read and write accesses by software are
349 * locked with the amd_iommu_pd_table lock.
350 */
351extern struct dev_table_entry *amd_iommu_dev_table;
352
353/*
354 * Alias table to find requestor ids to device ids. Not locked because only
355 * read on runtime.
356 */
357extern u16 *amd_iommu_alias_table;
358
359/*
360 * Reverse lookup table to find the IOMMU which translates a specific device.
361 */
362extern struct amd_iommu **amd_iommu_rlookup_table;
363
364/* size of the dma_ops aperture as power of 2 */
365extern unsigned amd_iommu_aperture_order;
366
367/* largest PCI device id we expect translation requests for */
368extern u16 amd_iommu_last_bdf;
369
370/* data structures for protection domain handling */
371extern struct protection_domain **amd_iommu_pd_table;
372
373/* allocation bitmap for domain ids */
374extern unsigned long *amd_iommu_pd_alloc_bitmap;
375
376/* will be 1 if device isolation is enabled */
377extern int amd_iommu_isolate;
378
379/*
380 * If true, the addresses will be flushed on unmap time, not when
381 * they are reused
382 */
383extern bool amd_iommu_unmap_flush;
384
385/* takes a PCI device id and prints it out in a readable form */
386static inline void print_devid(u16 devid, int nl)
387{
388 int bus = devid >> 8;
389 int dev = devid >> 3 & 0x1f;
390 int fn = devid & 0x07;
391
392 printk("%02x:%02x.%x", bus, dev, fn);
393 if (nl)
394 printk("\n");
395}
396
397/* takes bus and device/function and returns the device id
398 * FIXME: should that be in generic PCI code? */
399static inline u16 calc_devid(u8 bus, u8 devfn)
400{
401 return (((u16)bus) << 8) | devfn;
402}
403
404#endif /* ASM_X86__AMD_IOMMU_TYPES_H */
diff --git a/include/asm-x86/apic.h b/include/asm-x86/apic.h
index be9639a9a186..d76a0839abe9 100644
--- a/include/asm-x86/apic.h
+++ b/include/asm-x86/apic.h
@@ -1,17 +1,19 @@
1#ifndef _ASM_X86_APIC_H 1#ifndef ASM_X86__APIC_H
2#define _ASM_X86_APIC_H 2#define ASM_X86__APIC_H
3 3
4#include <linux/pm.h> 4#include <linux/pm.h>
5#include <linux/delay.h> 5#include <linux/delay.h>
6
7#include <asm/alternative.h>
6#include <asm/fixmap.h> 8#include <asm/fixmap.h>
7#include <asm/apicdef.h> 9#include <asm/apicdef.h>
8#include <asm/processor.h> 10#include <asm/processor.h>
9#include <asm/system.h> 11#include <asm/system.h>
12#include <asm/cpufeature.h>
13#include <asm/msr.h>
10 14
11#define ARCH_APICTIMER_STOPS_ON_C3 1 15#define ARCH_APICTIMER_STOPS_ON_C3 1
12 16
13#define Dprintk(x...)
14
15/* 17/*
16 * Debugging macros 18 * Debugging macros
17 */ 19 */
@@ -35,71 +37,109 @@ extern void generic_apic_probe(void);
35 37
36#ifdef CONFIG_X86_LOCAL_APIC 38#ifdef CONFIG_X86_LOCAL_APIC
37 39
38extern int apic_verbosity; 40extern unsigned int apic_verbosity;
39extern int timer_over_8254;
40extern int local_apic_timer_c2_ok; 41extern int local_apic_timer_c2_ok;
41extern int local_apic_timer_disabled;
42 42
43extern int apic_runs_main_timer;
44extern int ioapic_force; 43extern int ioapic_force;
45extern int disable_apic;
46extern int disable_apic_timer;
47 44
45extern int disable_apic;
48/* 46/*
49 * Basic functions accessing APICs. 47 * Basic functions accessing APICs.
50 */ 48 */
51#ifdef CONFIG_PARAVIRT 49#ifdef CONFIG_PARAVIRT
52#include <asm/paravirt.h> 50#include <asm/paravirt.h>
53#else 51#else
54#define apic_write native_apic_write
55#define apic_write_atomic native_apic_write_atomic
56#define apic_read native_apic_read
57#define setup_boot_clock setup_boot_APIC_clock 52#define setup_boot_clock setup_boot_APIC_clock
58#define setup_secondary_clock setup_secondary_APIC_clock 53#define setup_secondary_clock setup_secondary_APIC_clock
59#endif 54#endif
60 55
61extern int is_vsmp_box(void); 56extern int is_vsmp_box(void);
57extern void xapic_wait_icr_idle(void);
58extern u32 safe_xapic_wait_icr_idle(void);
59extern u64 xapic_icr_read(void);
60extern void xapic_icr_write(u32, u32);
61extern int setup_profiling_timer(unsigned int);
62 62
63static inline void native_apic_write(unsigned long reg, u32 v) 63static inline void native_apic_mem_write(u32 reg, u32 v)
64{ 64{
65 *((volatile u32 *)(APIC_BASE + reg)) = v; 65 volatile u32 *addr = (volatile u32 *)(APIC_BASE + reg);
66
67 alternative_io("movl %0, %1", "xchgl %0, %1", X86_FEATURE_11AP,
68 ASM_OUTPUT2("=r" (v), "=m" (*addr)),
69 ASM_OUTPUT2("0" (v), "m" (*addr)));
66} 70}
67 71
68static inline void native_apic_write_atomic(unsigned long reg, u32 v) 72static inline u32 native_apic_mem_read(u32 reg)
69{ 73{
70 (void)xchg((u32 *)(APIC_BASE + reg), v); 74 return *((volatile u32 *)(APIC_BASE + reg));
71} 75}
72 76
73static inline u32 native_apic_read(unsigned long reg) 77static inline void native_apic_msr_write(u32 reg, u32 v)
74{ 78{
75 return *((volatile u32 *)(APIC_BASE + reg)); 79 if (reg == APIC_DFR || reg == APIC_ID || reg == APIC_LDR ||
80 reg == APIC_LVR)
81 return;
82
83 wrmsr(APIC_BASE_MSR + (reg >> 4), v, 0);
76} 84}
77 85
78extern void apic_wait_icr_idle(void); 86static inline u32 native_apic_msr_read(u32 reg)
79extern u32 safe_apic_wait_icr_idle(void); 87{
88 u32 low, high;
89
90 if (reg == APIC_DFR)
91 return -1;
92
93 rdmsr(APIC_BASE_MSR + (reg >> 4), low, high);
94 return low;
95}
96
97#ifndef CONFIG_X86_32
98extern int x2apic, x2apic_preenabled;
99extern void check_x2apic(void);
100extern void enable_x2apic(void);
101extern void enable_IR_x2apic(void);
102extern void x2apic_icr_write(u32 low, u32 id);
103#endif
104
105struct apic_ops {
106 u32 (*read)(u32 reg);
107 void (*write)(u32 reg, u32 v);
108 u64 (*icr_read)(void);
109 void (*icr_write)(u32 low, u32 high);
110 void (*wait_icr_idle)(void);
111 u32 (*safe_wait_icr_idle)(void);
112};
113
114extern struct apic_ops *apic_ops;
115
116#define apic_read (apic_ops->read)
117#define apic_write (apic_ops->write)
118#define apic_icr_read (apic_ops->icr_read)
119#define apic_icr_write (apic_ops->icr_write)
120#define apic_wait_icr_idle (apic_ops->wait_icr_idle)
121#define safe_apic_wait_icr_idle (apic_ops->safe_wait_icr_idle)
122
80extern int get_physical_broadcast(void); 123extern int get_physical_broadcast(void);
81 124
82#ifdef CONFIG_X86_GOOD_APIC 125#ifdef CONFIG_X86_64
83# define FORCE_READ_AROUND_WRITE 0 126static inline void ack_x2APIC_irq(void)
84# define apic_read_around(x) 127{
85# define apic_write_around(x, y) apic_write((x), (y)) 128 /* Docs say use 0 for future compatibility */
86#else 129 native_apic_msr_write(APIC_EOI, 0);
87# define FORCE_READ_AROUND_WRITE 1 130}
88# define apic_read_around(x) apic_read(x)
89# define apic_write_around(x, y) apic_write_atomic((x), (y))
90#endif 131#endif
91 132
133
92static inline void ack_APIC_irq(void) 134static inline void ack_APIC_irq(void)
93{ 135{
94 /* 136 /*
95 * ack_APIC_irq() actually gets compiled as a single instruction: 137 * ack_APIC_irq() actually gets compiled as a single instruction
96 * - a single rmw on Pentium/82489DX
97 * - a single write on P6+ cores (CONFIG_X86_GOOD_APIC)
98 * ... yummie. 138 * ... yummie.
99 */ 139 */
100 140
101 /* Docs say use 0 for future compatibility */ 141 /* Docs say use 0 for future compatibility */
102 apic_write_around(APIC_EOI, 0); 142 apic_write(APIC_EOI, 0);
103} 143}
104 144
105extern int lapic_get_maxlvt(void); 145extern int lapic_get_maxlvt(void);
@@ -125,17 +165,23 @@ extern void enable_NMI_through_LVT0(void);
125 */ 165 */
126#ifdef CONFIG_X86_64 166#ifdef CONFIG_X86_64
127extern void early_init_lapic_mapping(void); 167extern void early_init_lapic_mapping(void);
168extern int apic_is_clustered_box(void);
169#else
170static inline int apic_is_clustered_box(void)
171{
172 return 0;
173}
128#endif 174#endif
129 175
130extern u8 setup_APIC_eilvt_mce(u8 vector, u8 msg_type, u8 mask); 176extern u8 setup_APIC_eilvt_mce(u8 vector, u8 msg_type, u8 mask);
131extern u8 setup_APIC_eilvt_ibs(u8 vector, u8 msg_type, u8 mask); 177extern u8 setup_APIC_eilvt_ibs(u8 vector, u8 msg_type, u8 mask);
132 178
133extern int apic_is_clustered_box(void);
134 179
135#else /* !CONFIG_X86_LOCAL_APIC */ 180#else /* !CONFIG_X86_LOCAL_APIC */
136static inline void lapic_shutdown(void) { } 181static inline void lapic_shutdown(void) { }
137#define local_apic_timer_c2_ok 1 182#define local_apic_timer_c2_ok 1
183static inline void init_apic_mappings(void) { }
138 184
139#endif /* !CONFIG_X86_LOCAL_APIC */ 185#endif /* !CONFIG_X86_LOCAL_APIC */
140 186
141#endif /* __ASM_APIC_H */ 187#endif /* ASM_X86__APIC_H */
diff --git a/include/asm-x86/apicdef.h b/include/asm-x86/apicdef.h
index 6b9008c78731..b922c85ac91d 100644
--- a/include/asm-x86/apicdef.h
+++ b/include/asm-x86/apicdef.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_APICDEF_H 1#ifndef ASM_X86__APICDEF_H
2#define _ASM_X86_APICDEF_H 2#define ASM_X86__APICDEF_H
3 3
4/* 4/*
5 * Constants for various Intel APICs. (local APIC, IOAPIC, etc.) 5 * Constants for various Intel APICs. (local APIC, IOAPIC, etc.)
@@ -105,6 +105,7 @@
105#define APIC_TMICT 0x380 105#define APIC_TMICT 0x380
106#define APIC_TMCCT 0x390 106#define APIC_TMCCT 0x390
107#define APIC_TDCR 0x3E0 107#define APIC_TDCR 0x3E0
108#define APIC_SELF_IPI 0x3F0
108#define APIC_TDR_DIV_TMBASE (1 << 2) 109#define APIC_TDR_DIV_TMBASE (1 << 2)
109#define APIC_TDR_DIV_1 0xB 110#define APIC_TDR_DIV_1 0xB
110#define APIC_TDR_DIV_2 0x0 111#define APIC_TDR_DIV_2 0x0
@@ -128,6 +129,8 @@
128#define APIC_EILVT3 0x530 129#define APIC_EILVT3 0x530
129 130
130#define APIC_BASE (fix_to_virt(FIX_APIC_BASE)) 131#define APIC_BASE (fix_to_virt(FIX_APIC_BASE))
132#define APIC_BASE_MSR 0x800
133#define X2APIC_ENABLE (1UL << 10)
131 134
132#ifdef CONFIG_X86_32 135#ifdef CONFIG_X86_32
133# define MAX_IO_APICS 64 136# define MAX_IO_APICS 64
@@ -411,4 +414,4 @@ struct local_apic {
411#else 414#else
412 #define BAD_APICID 0xFFFFu 415 #define BAD_APICID 0xFFFFu
413#endif 416#endif
414#endif 417#endif /* ASM_X86__APICDEF_H */
diff --git a/include/asm-x86/arch_hooks.h b/include/asm-x86/arch_hooks.h
index 768aee8a04ef..de4596b24c23 100644
--- a/include/asm-x86/arch_hooks.h
+++ b/include/asm-x86/arch_hooks.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_ARCH_HOOKS_H 1#ifndef ASM_X86__ARCH_HOOKS_H
2#define _ASM_ARCH_HOOKS_H 2#define ASM_X86__ARCH_HOOKS_H
3 3
4#include <linux/interrupt.h> 4#include <linux/interrupt.h>
5 5
@@ -12,8 +12,6 @@
12/* these aren't arch hooks, they are generic routines 12/* these aren't arch hooks, they are generic routines
13 * that can be used by the hooks */ 13 * that can be used by the hooks */
14extern void init_ISA_irqs(void); 14extern void init_ISA_irqs(void);
15extern void apic_intr_init(void);
16extern void smp_intr_init(void);
17extern irqreturn_t timer_interrupt(int irq, void *dev_id); 15extern irqreturn_t timer_interrupt(int irq, void *dev_id);
18 16
19/* these are the defined hooks */ 17/* these are the defined hooks */
@@ -21,7 +19,8 @@ extern void intr_init_hook(void);
21extern void pre_intr_init_hook(void); 19extern void pre_intr_init_hook(void);
22extern void pre_setup_arch_hook(void); 20extern void pre_setup_arch_hook(void);
23extern void trap_init_hook(void); 21extern void trap_init_hook(void);
22extern void pre_time_init_hook(void);
24extern void time_init_hook(void); 23extern void time_init_hook(void);
25extern void mca_nmi_hook(void); 24extern void mca_nmi_hook(void);
26 25
27#endif 26#endif /* ASM_X86__ARCH_HOOKS_H */
diff --git a/include/asm-x86/asm.h b/include/asm-x86/asm.h
index 90dec0c23646..e1355f44d7c3 100644
--- a/include/asm-x86/asm.h
+++ b/include/asm-x86/asm.h
@@ -1,39 +1,47 @@
1#ifndef _ASM_X86_ASM_H 1#ifndef ASM_X86__ASM_H
2#define _ASM_X86_ASM_H 2#define ASM_X86__ASM_H
3
4#ifdef CONFIG_X86_32
5/* 32 bits */
6
7# define _ASM_PTR " .long "
8# define _ASM_ALIGN " .balign 4 "
9# define _ASM_MOV_UL " movl "
10
11# define _ASM_INC " incl "
12# define _ASM_DEC " decl "
13# define _ASM_ADD " addl "
14# define _ASM_SUB " subl "
15# define _ASM_XADD " xaddl "
16 3
4#ifdef __ASSEMBLY__
5# define __ASM_FORM(x) x
6# define __ASM_EX_SEC .section __ex_table
17#else 7#else
18/* 64 bits */ 8# define __ASM_FORM(x) " " #x " "
9# define __ASM_EX_SEC " .section __ex_table,\"a\"\n"
10#endif
19 11
20# define _ASM_PTR " .quad " 12#ifdef CONFIG_X86_32
21# define _ASM_ALIGN " .balign 8 " 13# define __ASM_SEL(a,b) __ASM_FORM(a)
22# define _ASM_MOV_UL " movq " 14#else
23 15# define __ASM_SEL(a,b) __ASM_FORM(b)
24# define _ASM_INC " incq " 16#endif
25# define _ASM_DEC " decq " 17
26# define _ASM_ADD " addq " 18#define __ASM_SIZE(inst) __ASM_SEL(inst##l, inst##q)
27# define _ASM_SUB " subq " 19#define __ASM_REG(reg) __ASM_SEL(e##reg, r##reg)
28# define _ASM_XADD " xaddq " 20
29 21#define _ASM_PTR __ASM_SEL(.long, .quad)
30#endif /* CONFIG_X86_32 */ 22#define _ASM_ALIGN __ASM_SEL(.balign 4, .balign 8)
23
24#define _ASM_MOV __ASM_SIZE(mov)
25#define _ASM_INC __ASM_SIZE(inc)
26#define _ASM_DEC __ASM_SIZE(dec)
27#define _ASM_ADD __ASM_SIZE(add)
28#define _ASM_SUB __ASM_SIZE(sub)
29#define _ASM_XADD __ASM_SIZE(xadd)
30
31#define _ASM_AX __ASM_REG(ax)
32#define _ASM_BX __ASM_REG(bx)
33#define _ASM_CX __ASM_REG(cx)
34#define _ASM_DX __ASM_REG(dx)
35#define _ASM_SP __ASM_REG(sp)
36#define _ASM_BP __ASM_REG(bp)
37#define _ASM_SI __ASM_REG(si)
38#define _ASM_DI __ASM_REG(di)
31 39
32/* Exception table entry */ 40/* Exception table entry */
33# define _ASM_EXTABLE(from,to) \ 41# define _ASM_EXTABLE(from,to) \
34 " .section __ex_table,\"a\"\n" \ 42 __ASM_EX_SEC \
35 _ASM_ALIGN "\n" \ 43 _ASM_ALIGN "\n" \
36 _ASM_PTR #from "," #to "\n" \ 44 _ASM_PTR #from "," #to "\n" \
37 " .previous\n" 45 " .previous\n"
38 46
39#endif /* _ASM_X86_ASM_H */ 47#endif /* ASM_X86__ASM_H */
diff --git a/include/asm-x86/atomic_32.h b/include/asm-x86/atomic_32.h
index 21a4825148c0..14d3f0beb889 100644
--- a/include/asm-x86/atomic_32.h
+++ b/include/asm-x86/atomic_32.h
@@ -1,5 +1,5 @@
1#ifndef __ARCH_I386_ATOMIC__ 1#ifndef ASM_X86__ATOMIC_32_H
2#define __ARCH_I386_ATOMIC__ 2#define ASM_X86__ATOMIC_32_H
3 3
4#include <linux/compiler.h> 4#include <linux/compiler.h>
5#include <asm/processor.h> 5#include <asm/processor.h>
@@ -256,4 +256,4 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
256#define smp_mb__after_atomic_inc() barrier() 256#define smp_mb__after_atomic_inc() barrier()
257 257
258#include <asm-generic/atomic.h> 258#include <asm-generic/atomic.h>
259#endif 259#endif /* ASM_X86__ATOMIC_32_H */
diff --git a/include/asm-x86/atomic_64.h b/include/asm-x86/atomic_64.h
index 3e0cd7d38335..2cb218c4a356 100644
--- a/include/asm-x86/atomic_64.h
+++ b/include/asm-x86/atomic_64.h
@@ -1,5 +1,5 @@
1#ifndef __ARCH_X86_64_ATOMIC__ 1#ifndef ASM_X86__ATOMIC_64_H
2#define __ARCH_X86_64_ATOMIC__ 2#define ASM_X86__ATOMIC_64_H
3 3
4#include <asm/alternative.h> 4#include <asm/alternative.h>
5#include <asm/cmpxchg.h> 5#include <asm/cmpxchg.h>
@@ -11,12 +11,6 @@
11 * resource counting etc.. 11 * resource counting etc..
12 */ 12 */
13 13
14#ifdef CONFIG_SMP
15#define LOCK "lock ; "
16#else
17#define LOCK ""
18#endif
19
20/* 14/*
21 * Make sure gcc doesn't try to be clever and move things around 15 * Make sure gcc doesn't try to be clever and move things around
22 * on us. We need to use _exactly_ the address the user gave us, 16 * on us. We need to use _exactly_ the address the user gave us,
@@ -234,7 +228,7 @@ static inline void atomic64_add(long i, atomic64_t *v)
234{ 228{
235 asm volatile(LOCK_PREFIX "addq %1,%0" 229 asm volatile(LOCK_PREFIX "addq %1,%0"
236 : "=m" (v->counter) 230 : "=m" (v->counter)
237 : "ir" (i), "m" (v->counter)); 231 : "er" (i), "m" (v->counter));
238} 232}
239 233
240/** 234/**
@@ -248,7 +242,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
248{ 242{
249 asm volatile(LOCK_PREFIX "subq %1,%0" 243 asm volatile(LOCK_PREFIX "subq %1,%0"
250 : "=m" (v->counter) 244 : "=m" (v->counter)
251 : "ir" (i), "m" (v->counter)); 245 : "er" (i), "m" (v->counter));
252} 246}
253 247
254/** 248/**
@@ -266,7 +260,7 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
266 260
267 asm volatile(LOCK_PREFIX "subq %2,%0; sete %1" 261 asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
268 : "=m" (v->counter), "=qm" (c) 262 : "=m" (v->counter), "=qm" (c)
269 : "ir" (i), "m" (v->counter) : "memory"); 263 : "er" (i), "m" (v->counter) : "memory");
270 return c; 264 return c;
271} 265}
272 266
@@ -347,7 +341,7 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
347 341
348 asm volatile(LOCK_PREFIX "addq %2,%0; sets %1" 342 asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
349 : "=m" (v->counter), "=qm" (c) 343 : "=m" (v->counter), "=qm" (c)
350 : "ir" (i), "m" (v->counter) : "memory"); 344 : "er" (i), "m" (v->counter) : "memory");
351 return c; 345 return c;
352} 346}
353 347
@@ -431,6 +425,32 @@ static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
431 return c != (u); 425 return c != (u);
432} 426}
433 427
428/**
429 * atomic_inc_short - increment of a short integer
430 * @v: pointer to type int
431 *
432 * Atomically adds 1 to @v
433 * Returns the new value of @u
434 */
435static inline short int atomic_inc_short(short int *v)
436{
437 asm(LOCK_PREFIX "addw $1, %0" : "+m" (*v));
438 return *v;
439}
440
441/**
442 * atomic_or_long - OR of two long integers
443 * @v1: pointer to type unsigned long
444 * @v2: pointer to type unsigned long
445 *
446 * Atomically ORs @v1 and @v2
447 * Returns the result of the OR
448 */
449static inline void atomic_or_long(unsigned long *v1, unsigned long v2)
450{
451 asm(LOCK_PREFIX "orq %1, %0" : "+m" (*v1) : "r" (v2));
452}
453
434#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) 454#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
435 455
436/* These are x86-specific, used by some header files */ 456/* These are x86-specific, used by some header files */
@@ -450,4 +470,4 @@ static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
450#define smp_mb__after_atomic_inc() barrier() 470#define smp_mb__after_atomic_inc() barrier()
451 471
452#include <asm-generic/atomic.h> 472#include <asm-generic/atomic.h>
453#endif 473#endif /* ASM_X86__ATOMIC_64_H */
diff --git a/include/asm-x86/auxvec.h b/include/asm-x86/auxvec.h
index 87f5e6d5a020..12c7cac74202 100644
--- a/include/asm-x86/auxvec.h
+++ b/include/asm-x86/auxvec.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_AUXVEC_H 1#ifndef ASM_X86__AUXVEC_H
2#define _ASM_X86_AUXVEC_H 2#define ASM_X86__AUXVEC_H
3/* 3/*
4 * Architecture-neutral AT_ values in 0-17, leave some room 4 * Architecture-neutral AT_ values in 0-17, leave some room
5 * for more of them, start the x86-specific ones at 32. 5 * for more of them, start the x86-specific ones at 32.
@@ -9,4 +9,4 @@
9#endif 9#endif
10#define AT_SYSINFO_EHDR 33 10#define AT_SYSINFO_EHDR 33
11 11
12#endif 12#endif /* ASM_X86__AUXVEC_H */
diff --git a/include/asm-x86/mach-bigsmp/mach_apic.h b/include/asm-x86/bigsmp/apic.h
index 8327907c79bf..0a9cd7c5ca0c 100644
--- a/include/asm-x86/mach-bigsmp/mach_apic.h
+++ b/include/asm-x86/bigsmp/apic.h
@@ -11,7 +11,7 @@ static inline int apic_id_registered(void)
11 11
12/* Round robin the irqs amoung the online cpus */ 12/* Round robin the irqs amoung the online cpus */
13static inline cpumask_t target_cpus(void) 13static inline cpumask_t target_cpus(void)
14{ 14{
15 static unsigned long cpu = NR_CPUS; 15 static unsigned long cpu = NR_CPUS;
16 do { 16 do {
17 if (cpu >= NR_CPUS) 17 if (cpu >= NR_CPUS)
@@ -23,7 +23,7 @@ static inline cpumask_t target_cpus(void)
23} 23}
24 24
25#undef APIC_DEST_LOGICAL 25#undef APIC_DEST_LOGICAL
26#define APIC_DEST_LOGICAL 0 26#define APIC_DEST_LOGICAL 0
27#define TARGET_CPUS (target_cpus()) 27#define TARGET_CPUS (target_cpus())
28#define APIC_DFR_VALUE (APIC_DFR_FLAT) 28#define APIC_DFR_VALUE (APIC_DFR_FLAT)
29#define INT_DELIVERY_MODE (dest_Fixed) 29#define INT_DELIVERY_MODE (dest_Fixed)
@@ -63,9 +63,9 @@ static inline void init_apic_ldr(void)
63 unsigned long val; 63 unsigned long val;
64 int cpu = smp_processor_id(); 64 int cpu = smp_processor_id();
65 65
66 apic_write_around(APIC_DFR, APIC_DFR_VALUE); 66 apic_write(APIC_DFR, APIC_DFR_VALUE);
67 val = calculate_ldr(cpu); 67 val = calculate_ldr(cpu);
68 apic_write_around(APIC_LDR, val); 68 apic_write(APIC_LDR, val);
69} 69}
70 70
71static inline void setup_apic_routing(void) 71static inline void setup_apic_routing(void)
@@ -81,7 +81,7 @@ static inline int multi_timer_check(int apic, int irq)
81 81
82static inline int apicid_to_node(int logical_apicid) 82static inline int apicid_to_node(int logical_apicid)
83{ 83{
84 return (0); 84 return apicid_2_node[hard_smp_processor_id()];
85} 85}
86 86
87static inline int cpu_present_to_apicid(int mps_cpu) 87static inline int cpu_present_to_apicid(int mps_cpu)
diff --git a/include/asm-x86/mach-es7000/mach_apicdef.h b/include/asm-x86/bigsmp/apicdef.h
index a58ab5a75c8c..392c3f5ef2fe 100644
--- a/include/asm-x86/mach-es7000/mach_apicdef.h
+++ b/include/asm-x86/bigsmp/apicdef.h
@@ -3,10 +3,10 @@
3 3
4#define APIC_ID_MASK (0xFF<<24) 4#define APIC_ID_MASK (0xFF<<24)
5 5
6static inline unsigned get_apic_id(unsigned long x) 6static inline unsigned get_apic_id(unsigned long x)
7{ 7{
8 return (((x)>>24)&0xFF); 8 return (((x)>>24)&0xFF);
9} 9}
10 10
11#define GET_APIC_ID(x) get_apic_id(x) 11#define GET_APIC_ID(x) get_apic_id(x)
12 12
diff --git a/include/asm-x86/mach-bigsmp/mach_ipi.h b/include/asm-x86/bigsmp/ipi.h
index 9404c535b7ec..9404c535b7ec 100644
--- a/include/asm-x86/mach-bigsmp/mach_ipi.h
+++ b/include/asm-x86/bigsmp/ipi.h
diff --git a/include/asm-x86/bios_ebda.h b/include/asm-x86/bios_ebda.h
index b4a46b7be794..79b4b88505d7 100644
--- a/include/asm-x86/bios_ebda.h
+++ b/include/asm-x86/bios_ebda.h
@@ -1,5 +1,5 @@
1#ifndef _MACH_BIOS_EBDA_H 1#ifndef ASM_X86__BIOS_EBDA_H
2#define _MACH_BIOS_EBDA_H 2#define ASM_X86__BIOS_EBDA_H
3 3
4#include <asm/io.h> 4#include <asm/io.h>
5 5
@@ -14,4 +14,23 @@ static inline unsigned int get_bios_ebda(void)
14 return address; /* 0 means none */ 14 return address; /* 0 means none */
15} 15}
16 16
17#endif /* _MACH_BIOS_EBDA_H */ 17void reserve_ebda_region(void);
18
19#ifdef CONFIG_X86_CHECK_BIOS_CORRUPTION
20/*
21 * This is obviously not a great place for this, but we want to be
22 * able to scatter it around anywhere in the kernel.
23 */
24void check_for_bios_corruption(void);
25void start_periodic_check_for_corruption(void);
26#else
27static inline void check_for_bios_corruption(void)
28{
29}
30
31static inline void start_periodic_check_for_corruption(void)
32{
33}
34#endif
35
36#endif /* ASM_X86__BIOS_EBDA_H */
diff --git a/include/asm-x86/bitops.h b/include/asm-x86/bitops.h
index ee4b3ead6a43..451a74762bd4 100644
--- a/include/asm-x86/bitops.h
+++ b/include/asm-x86/bitops.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_BITOPS_H 1#ifndef ASM_X86__BITOPS_H
2#define _ASM_X86_BITOPS_H 2#define ASM_X86__BITOPS_H
3 3
4/* 4/*
5 * Copyright 1992, Linus Torvalds. 5 * Copyright 1992, Linus Torvalds.
@@ -23,11 +23,21 @@
23#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1) 23#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1)
24/* Technically wrong, but this avoids compilation errors on some gcc 24/* Technically wrong, but this avoids compilation errors on some gcc
25 versions. */ 25 versions. */
26#define ADDR "=m" (*(volatile long *) addr) 26#define BITOP_ADDR(x) "=m" (*(volatile long *) (x))
27#else 27#else
28#define ADDR "+m" (*(volatile long *) addr) 28#define BITOP_ADDR(x) "+m" (*(volatile long *) (x))
29#endif 29#endif
30 30
31#define ADDR BITOP_ADDR(addr)
32
33/*
34 * We do the locked ops that don't return the old value as
35 * a mask operation on a byte.
36 */
37#define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
38#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
39#define CONST_MASK(nr) (1 << ((nr) & 7))
40
31/** 41/**
32 * set_bit - Atomically set a bit in memory 42 * set_bit - Atomically set a bit in memory
33 * @nr: the bit to set 43 * @nr: the bit to set
@@ -43,9 +53,17 @@
43 * Note that @nr may be almost arbitrarily large; this function is not 53 * Note that @nr may be almost arbitrarily large; this function is not
44 * restricted to acting on a single-word quantity. 54 * restricted to acting on a single-word quantity.
45 */ 55 */
46static inline void set_bit(int nr, volatile void *addr) 56static inline void set_bit(unsigned int nr, volatile unsigned long *addr)
47{ 57{
48 asm volatile(LOCK_PREFIX "bts %1,%0" : ADDR : "Ir" (nr) : "memory"); 58 if (IS_IMMEDIATE(nr)) {
59 asm volatile(LOCK_PREFIX "orb %1,%0"
60 : CONST_MASK_ADDR(nr, addr)
61 : "iq" ((u8)CONST_MASK(nr))
62 : "memory");
63 } else {
64 asm volatile(LOCK_PREFIX "bts %1,%0"
65 : BITOP_ADDR(addr) : "Ir" (nr) : "memory");
66 }
49} 67}
50 68
51/** 69/**
@@ -57,7 +75,7 @@ static inline void set_bit(int nr, volatile void *addr)
57 * If it's called on the same region of memory simultaneously, the effect 75 * If it's called on the same region of memory simultaneously, the effect
58 * may be that only one operation succeeds. 76 * may be that only one operation succeeds.
59 */ 77 */
60static inline void __set_bit(int nr, volatile void *addr) 78static inline void __set_bit(int nr, volatile unsigned long *addr)
61{ 79{
62 asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory"); 80 asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory");
63} 81}
@@ -72,9 +90,17 @@ static inline void __set_bit(int nr, volatile void *addr)
72 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() 90 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
73 * in order to ensure changes are visible on other processors. 91 * in order to ensure changes are visible on other processors.
74 */ 92 */
75static inline void clear_bit(int nr, volatile void *addr) 93static inline void clear_bit(int nr, volatile unsigned long *addr)
76{ 94{
77 asm volatile(LOCK_PREFIX "btr %1,%0" : ADDR : "Ir" (nr)); 95 if (IS_IMMEDIATE(nr)) {
96 asm volatile(LOCK_PREFIX "andb %1,%0"
97 : CONST_MASK_ADDR(nr, addr)
98 : "iq" ((u8)~CONST_MASK(nr)));
99 } else {
100 asm volatile(LOCK_PREFIX "btr %1,%0"
101 : BITOP_ADDR(addr)
102 : "Ir" (nr));
103 }
78} 104}
79 105
80/* 106/*
@@ -85,13 +111,13 @@ static inline void clear_bit(int nr, volatile void *addr)
85 * clear_bit() is atomic and implies release semantics before the memory 111 * clear_bit() is atomic and implies release semantics before the memory
86 * operation. It can be used for an unlock. 112 * operation. It can be used for an unlock.
87 */ 113 */
88static inline void clear_bit_unlock(unsigned nr, volatile void *addr) 114static inline void clear_bit_unlock(unsigned nr, volatile unsigned long *addr)
89{ 115{
90 barrier(); 116 barrier();
91 clear_bit(nr, addr); 117 clear_bit(nr, addr);
92} 118}
93 119
94static inline void __clear_bit(int nr, volatile void *addr) 120static inline void __clear_bit(int nr, volatile unsigned long *addr)
95{ 121{
96 asm volatile("btr %1,%0" : ADDR : "Ir" (nr)); 122 asm volatile("btr %1,%0" : ADDR : "Ir" (nr));
97} 123}
@@ -108,7 +134,7 @@ static inline void __clear_bit(int nr, volatile void *addr)
108 * No memory barrier is required here, because x86 cannot reorder stores past 134 * No memory barrier is required here, because x86 cannot reorder stores past
109 * older loads. Same principle as spin_unlock. 135 * older loads. Same principle as spin_unlock.
110 */ 136 */
111static inline void __clear_bit_unlock(unsigned nr, volatile void *addr) 137static inline void __clear_bit_unlock(unsigned nr, volatile unsigned long *addr)
112{ 138{
113 barrier(); 139 barrier();
114 __clear_bit(nr, addr); 140 __clear_bit(nr, addr);
@@ -126,7 +152,7 @@ static inline void __clear_bit_unlock(unsigned nr, volatile void *addr)
126 * If it's called on the same region of memory simultaneously, the effect 152 * If it's called on the same region of memory simultaneously, the effect
127 * may be that only one operation succeeds. 153 * may be that only one operation succeeds.
128 */ 154 */
129static inline void __change_bit(int nr, volatile void *addr) 155static inline void __change_bit(int nr, volatile unsigned long *addr)
130{ 156{
131 asm volatile("btc %1,%0" : ADDR : "Ir" (nr)); 157 asm volatile("btc %1,%0" : ADDR : "Ir" (nr));
132} 158}
@@ -140,7 +166,7 @@ static inline void __change_bit(int nr, volatile void *addr)
140 * Note that @nr may be almost arbitrarily large; this function is not 166 * Note that @nr may be almost arbitrarily large; this function is not
141 * restricted to acting on a single-word quantity. 167 * restricted to acting on a single-word quantity.
142 */ 168 */
143static inline void change_bit(int nr, volatile void *addr) 169static inline void change_bit(int nr, volatile unsigned long *addr)
144{ 170{
145 asm volatile(LOCK_PREFIX "btc %1,%0" : ADDR : "Ir" (nr)); 171 asm volatile(LOCK_PREFIX "btc %1,%0" : ADDR : "Ir" (nr));
146} 172}
@@ -153,7 +179,7 @@ static inline void change_bit(int nr, volatile void *addr)
153 * This operation is atomic and cannot be reordered. 179 * This operation is atomic and cannot be reordered.
154 * It also implies a memory barrier. 180 * It also implies a memory barrier.
155 */ 181 */
156static inline int test_and_set_bit(int nr, volatile void *addr) 182static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
157{ 183{
158 int oldbit; 184 int oldbit;
159 185
@@ -170,7 +196,7 @@ static inline int test_and_set_bit(int nr, volatile void *addr)
170 * 196 *
171 * This is the same as test_and_set_bit on x86. 197 * This is the same as test_and_set_bit on x86.
172 */ 198 */
173static inline int test_and_set_bit_lock(int nr, volatile void *addr) 199static inline int test_and_set_bit_lock(int nr, volatile unsigned long *addr)
174{ 200{
175 return test_and_set_bit(nr, addr); 201 return test_and_set_bit(nr, addr);
176} 202}
@@ -184,7 +210,7 @@ static inline int test_and_set_bit_lock(int nr, volatile void *addr)
184 * If two examples of this operation race, one can appear to succeed 210 * If two examples of this operation race, one can appear to succeed
185 * but actually fail. You must protect multiple accesses with a lock. 211 * but actually fail. You must protect multiple accesses with a lock.
186 */ 212 */
187static inline int __test_and_set_bit(int nr, volatile void *addr) 213static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
188{ 214{
189 int oldbit; 215 int oldbit;
190 216
@@ -203,7 +229,7 @@ static inline int __test_and_set_bit(int nr, volatile void *addr)
203 * This operation is atomic and cannot be reordered. 229 * This operation is atomic and cannot be reordered.
204 * It also implies a memory barrier. 230 * It also implies a memory barrier.
205 */ 231 */
206static inline int test_and_clear_bit(int nr, volatile void *addr) 232static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
207{ 233{
208 int oldbit; 234 int oldbit;
209 235
@@ -223,7 +249,7 @@ static inline int test_and_clear_bit(int nr, volatile void *addr)
223 * If two examples of this operation race, one can appear to succeed 249 * If two examples of this operation race, one can appear to succeed
224 * but actually fail. You must protect multiple accesses with a lock. 250 * but actually fail. You must protect multiple accesses with a lock.
225 */ 251 */
226static inline int __test_and_clear_bit(int nr, volatile void *addr) 252static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
227{ 253{
228 int oldbit; 254 int oldbit;
229 255
@@ -235,7 +261,7 @@ static inline int __test_and_clear_bit(int nr, volatile void *addr)
235} 261}
236 262
237/* WARNING: non atomic and it can be reordered! */ 263/* WARNING: non atomic and it can be reordered! */
238static inline int __test_and_change_bit(int nr, volatile void *addr) 264static inline int __test_and_change_bit(int nr, volatile unsigned long *addr)
239{ 265{
240 int oldbit; 266 int oldbit;
241 267
@@ -255,7 +281,7 @@ static inline int __test_and_change_bit(int nr, volatile void *addr)
255 * This operation is atomic and cannot be reordered. 281 * This operation is atomic and cannot be reordered.
256 * It also implies a memory barrier. 282 * It also implies a memory barrier.
257 */ 283 */
258static inline int test_and_change_bit(int nr, volatile void *addr) 284static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
259{ 285{
260 int oldbit; 286 int oldbit;
261 287
@@ -266,13 +292,13 @@ static inline int test_and_change_bit(int nr, volatile void *addr)
266 return oldbit; 292 return oldbit;
267} 293}
268 294
269static inline int constant_test_bit(int nr, const volatile void *addr) 295static inline int constant_test_bit(int nr, const volatile unsigned long *addr)
270{ 296{
271 return ((1UL << (nr % BITS_PER_LONG)) & 297 return ((1UL << (nr % BITS_PER_LONG)) &
272 (((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0; 298 (((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0;
273} 299}
274 300
275static inline int variable_test_bit(int nr, volatile const void *addr) 301static inline int variable_test_bit(int nr, volatile const unsigned long *addr)
276{ 302{
277 int oldbit; 303 int oldbit;
278 304
@@ -330,7 +356,7 @@ static inline unsigned long ffz(unsigned long word)
330 * __fls: find last set bit in word 356 * __fls: find last set bit in word
331 * @word: The word to search 357 * @word: The word to search
332 * 358 *
333 * Undefined if no zero exists, so code should check against ~0UL first. 359 * Undefined if no set bit exists, so code should check against 0 first.
334 */ 360 */
335static inline unsigned long __fls(unsigned long word) 361static inline unsigned long __fls(unsigned long word)
336{ 362{
@@ -398,16 +424,6 @@ static inline int fls(int x)
398 424
399#undef ADDR 425#undef ADDR
400 426
401static inline void set_bit_string(unsigned long *bitmap,
402 unsigned long i, int len)
403{
404 unsigned long end = i + len;
405 while (i < end) {
406 __set_bit(i, bitmap);
407 i++;
408 }
409}
410
411#ifdef __KERNEL__ 427#ifdef __KERNEL__
412 428
413#include <asm-generic/bitops/sched.h> 429#include <asm-generic/bitops/sched.h>
@@ -432,4 +448,4 @@ static inline void set_bit_string(unsigned long *bitmap,
432#include <asm-generic/bitops/minix.h> 448#include <asm-generic/bitops/minix.h>
433 449
434#endif /* __KERNEL__ */ 450#endif /* __KERNEL__ */
435#endif /* _ASM_X86_BITOPS_H */ 451#endif /* ASM_X86__BITOPS_H */
diff --git a/include/asm-x86/boot.h b/include/asm-x86/boot.h
index 2faed7ecb092..1d63bd5d5946 100644
--- a/include/asm-x86/boot.h
+++ b/include/asm-x86/boot.h
@@ -1,10 +1,8 @@
1#ifndef _ASM_BOOT_H 1#ifndef ASM_X86__BOOT_H
2#define _ASM_BOOT_H 2#define ASM_X86__BOOT_H
3 3
4/* Don't touch these, unless you really know what you're doing. */ 4/* Don't touch these, unless you really know what you're doing. */
5#define DEF_INITSEG 0x9000
6#define DEF_SYSSEG 0x1000 5#define DEF_SYSSEG 0x1000
7#define DEF_SETUPSEG 0x9020
8#define DEF_SYSSIZE 0x7F00 6#define DEF_SYSSIZE 0x7F00
9 7
10/* Internal svga startup constants */ 8/* Internal svga startup constants */
@@ -25,4 +23,4 @@
25#define BOOT_STACK_SIZE 0x1000 23#define BOOT_STACK_SIZE 0x1000
26#endif 24#endif
27 25
28#endif /* _ASM_BOOT_H */ 26#endif /* ASM_X86__BOOT_H */
diff --git a/include/asm-x86/bootparam.h b/include/asm-x86/bootparam.h
index f62f4733606b..ccf027e2d97d 100644
--- a/include/asm-x86/bootparam.h
+++ b/include/asm-x86/bootparam.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_BOOTPARAM_H 1#ifndef ASM_X86__BOOTPARAM_H
2#define _ASM_BOOTPARAM_H 2#define ASM_X86__BOOTPARAM_H
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5#include <linux/screen_info.h> 5#include <linux/screen_info.h>
@@ -11,6 +11,7 @@
11 11
12/* setup data types */ 12/* setup data types */
13#define SETUP_NONE 0 13#define SETUP_NONE 0
14#define SETUP_E820_EXT 1
14 15
15/* extensible setup data list node */ 16/* extensible setup data list node */
16struct setup_data { 17struct setup_data {
@@ -40,6 +41,7 @@ struct setup_header {
40 __u8 type_of_loader; 41 __u8 type_of_loader;
41 __u8 loadflags; 42 __u8 loadflags;
42#define LOADED_HIGH (1<<0) 43#define LOADED_HIGH (1<<0)
44#define QUIET_FLAG (1<<5)
43#define KEEP_SEGMENTS (1<<6) 45#define KEEP_SEGMENTS (1<<6)
44#define CAN_USE_HEAP (1<<7) 46#define CAN_USE_HEAP (1<<7)
45 __u16 setup_move_size; 47 __u16 setup_move_size;
@@ -106,4 +108,4 @@ struct boot_params {
106 __u8 _pad9[276]; /* 0xeec */ 108 __u8 _pad9[276]; /* 0xeec */
107} __attribute__((packed)); 109} __attribute__((packed));
108 110
109#endif /* _ASM_BOOTPARAM_H */ 111#endif /* ASM_X86__BOOTPARAM_H */
diff --git a/include/asm-x86/bug.h b/include/asm-x86/bug.h
index b69aa64b82a4..91ad43a54c47 100644
--- a/include/asm-x86/bug.h
+++ b/include/asm-x86/bug.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_BUG_H 1#ifndef ASM_X86__BUG_H
2#define _ASM_X86_BUG_H 2#define ASM_X86__BUG_H
3 3
4#ifdef CONFIG_BUG 4#ifdef CONFIG_BUG
5#define HAVE_ARCH_BUG 5#define HAVE_ARCH_BUG
@@ -36,4 +36,4 @@ do { \
36#endif /* !CONFIG_BUG */ 36#endif /* !CONFIG_BUG */
37 37
38#include <asm-generic/bug.h> 38#include <asm-generic/bug.h>
39#endif 39#endif /* ASM_X86__BUG_H */
diff --git a/include/asm-x86/bugs.h b/include/asm-x86/bugs.h
index 021cbdd5f258..dc604985f2ad 100644
--- a/include/asm-x86/bugs.h
+++ b/include/asm-x86/bugs.h
@@ -1,7 +1,12 @@
1#ifndef _ASM_X86_BUGS_H 1#ifndef ASM_X86__BUGS_H
2#define _ASM_X86_BUGS_H 2#define ASM_X86__BUGS_H
3 3
4extern void check_bugs(void); 4extern void check_bugs(void);
5
6#if defined(CONFIG_CPU_SUP_INTEL) && defined(CONFIG_X86_32)
5int ppro_with_ram_bug(void); 7int ppro_with_ram_bug(void);
8#else
9static inline int ppro_with_ram_bug(void) { return 0; }
10#endif
6 11
7#endif /* _ASM_X86_BUGS_H */ 12#endif /* ASM_X86__BUGS_H */
diff --git a/include/asm-x86/byteorder.h b/include/asm-x86/byteorder.h
index e02ae2d89acf..722f27d68105 100644
--- a/include/asm-x86/byteorder.h
+++ b/include/asm-x86/byteorder.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_BYTEORDER_H 1#ifndef ASM_X86__BYTEORDER_H
2#define _ASM_X86_BYTEORDER_H 2#define ASM_X86__BYTEORDER_H
3 3
4#include <asm/types.h> 4#include <asm/types.h>
5#include <linux/compiler.h> 5#include <linux/compiler.h>
@@ -78,4 +78,4 @@ static inline __attribute_const__ __u32 ___arch__swab32(__u32 x)
78 78
79#include <linux/byteorder/little_endian.h> 79#include <linux/byteorder/little_endian.h>
80 80
81#endif /* _ASM_X86_BYTEORDER_H */ 81#endif /* ASM_X86__BYTEORDER_H */
diff --git a/include/asm-x86/cache.h b/include/asm-x86/cache.h
index 1e0bac86f38f..ea3f1cc06a97 100644
--- a/include/asm-x86/cache.h
+++ b/include/asm-x86/cache.h
@@ -1,5 +1,5 @@
1#ifndef _ARCH_X86_CACHE_H 1#ifndef ASM_X86__CACHE_H
2#define _ARCH_X86_CACHE_H 2#define ASM_X86__CACHE_H
3 3
4/* L1 cache line size */ 4/* L1 cache line size */
5#define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT) 5#define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
@@ -17,4 +17,4 @@
17#endif 17#endif
18#endif 18#endif
19 19
20#endif 20#endif /* ASM_X86__CACHE_H */
diff --git a/include/asm-x86/cacheflush.h b/include/asm-x86/cacheflush.h
index f4c0ab50d2c2..68840ef1b35a 100644
--- a/include/asm-x86/cacheflush.h
+++ b/include/asm-x86/cacheflush.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_CACHEFLUSH_H 1#ifndef ASM_X86__CACHEFLUSH_H
2#define _ASM_X86_CACHEFLUSH_H 2#define ASM_X86__CACHEFLUSH_H
3 3
4/* Keep includes the same across arches. */ 4/* Keep includes the same across arches. */
5#include <linux/mm.h> 5#include <linux/mm.h>
@@ -24,6 +24,8 @@
24#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ 24#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
25 memcpy((dst), (src), (len)) 25 memcpy((dst), (src), (len))
26 26
27#define PG_non_WB PG_arch_1
28PAGEFLAG(NonWB, non_WB)
27 29
28/* 30/*
29 * The set_memory_* API can be used to change various attributes of a virtual 31 * The set_memory_* API can be used to change various attributes of a virtual
@@ -66,6 +68,9 @@ int set_memory_rw(unsigned long addr, int numpages);
66int set_memory_np(unsigned long addr, int numpages); 68int set_memory_np(unsigned long addr, int numpages);
67int set_memory_4k(unsigned long addr, int numpages); 69int set_memory_4k(unsigned long addr, int numpages);
68 70
71int set_memory_array_uc(unsigned long *addr, int addrinarray);
72int set_memory_array_wb(unsigned long *addr, int addrinarray);
73
69/* 74/*
70 * For legacy compatibility with the old APIs, a few functions 75 * For legacy compatibility with the old APIs, a few functions
71 * are provided that work on a "struct page". 76 * are provided that work on a "struct page".
@@ -96,8 +101,6 @@ int set_pages_rw(struct page *page, int numpages);
96 101
97void clflush_cache_range(void *addr, unsigned int size); 102void clflush_cache_range(void *addr, unsigned int size);
98 103
99void cpa_init(void);
100
101#ifdef CONFIG_DEBUG_RODATA 104#ifdef CONFIG_DEBUG_RODATA
102void mark_rodata_ro(void); 105void mark_rodata_ro(void);
103extern const int rodata_test_data; 106extern const int rodata_test_data;
@@ -112,4 +115,4 @@ static inline int rodata_test(void)
112} 115}
113#endif 116#endif
114 117
115#endif 118#endif /* ASM_X86__CACHEFLUSH_H */
diff --git a/include/asm-x86/calgary.h b/include/asm-x86/calgary.h
index 67f60406e2d8..933fd272f826 100644
--- a/include/asm-x86/calgary.h
+++ b/include/asm-x86/calgary.h
@@ -21,8 +21,8 @@
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 */ 22 */
23 23
24#ifndef _ASM_X86_64_CALGARY_H 24#ifndef ASM_X86__CALGARY_H
25#define _ASM_X86_64_CALGARY_H 25#define ASM_X86__CALGARY_H
26 26
27#include <linux/spinlock.h> 27#include <linux/spinlock.h>
28#include <linux/device.h> 28#include <linux/device.h>
@@ -69,4 +69,4 @@ static inline int calgary_iommu_init(void) { return 1; }
69static inline void detect_calgary(void) { return; } 69static inline void detect_calgary(void) { return; }
70#endif 70#endif
71 71
72#endif /* _ASM_X86_64_CALGARY_H */ 72#endif /* ASM_X86__CALGARY_H */
diff --git a/include/asm-x86/calling.h b/include/asm-x86/calling.h
index f13e62e2cb3e..2bc162e0ec6e 100644
--- a/include/asm-x86/calling.h
+++ b/include/asm-x86/calling.h
@@ -104,7 +104,7 @@
104 .endif 104 .endif
105 .endm 105 .endm
106 106
107 .macro LOAD_ARGS offset 107 .macro LOAD_ARGS offset, skiprax=0
108 movq \offset(%rsp), %r11 108 movq \offset(%rsp), %r11
109 movq \offset+8(%rsp), %r10 109 movq \offset+8(%rsp), %r10
110 movq \offset+16(%rsp), %r9 110 movq \offset+16(%rsp), %r9
@@ -113,7 +113,10 @@
113 movq \offset+48(%rsp), %rdx 113 movq \offset+48(%rsp), %rdx
114 movq \offset+56(%rsp), %rsi 114 movq \offset+56(%rsp), %rsi
115 movq \offset+64(%rsp), %rdi 115 movq \offset+64(%rsp), %rdi
116 .if \skiprax
117 .else
116 movq \offset+72(%rsp), %rax 118 movq \offset+72(%rsp), %rax
119 .endif
117 .endm 120 .endm
118 121
119#define REST_SKIP 6*8 122#define REST_SKIP 6*8
@@ -165,4 +168,3 @@
165 .macro icebp 168 .macro icebp
166 .byte 0xf1 169 .byte 0xf1
167 .endm 170 .endm
168
diff --git a/include/asm-x86/checksum_32.h b/include/asm-x86/checksum_32.h
index 52bbb0d8c4c1..d041e8cda227 100644
--- a/include/asm-x86/checksum_32.h
+++ b/include/asm-x86/checksum_32.h
@@ -1,5 +1,5 @@
1#ifndef _I386_CHECKSUM_H 1#ifndef ASM_X86__CHECKSUM_32_H
2#define _I386_CHECKSUM_H 2#define ASM_X86__CHECKSUM_32_H
3 3
4#include <linux/in6.h> 4#include <linux/in6.h>
5 5
@@ -186,4 +186,4 @@ static inline __wsum csum_and_copy_to_user(const void *src,
186 return (__force __wsum)-1; /* invalid checksum */ 186 return (__force __wsum)-1; /* invalid checksum */
187} 187}
188 188
189#endif 189#endif /* ASM_X86__CHECKSUM_32_H */
diff --git a/include/asm-x86/checksum_64.h b/include/asm-x86/checksum_64.h
index 8bd861cc5267..110f403beb89 100644
--- a/include/asm-x86/checksum_64.h
+++ b/include/asm-x86/checksum_64.h
@@ -1,5 +1,5 @@
1#ifndef _X86_64_CHECKSUM_H 1#ifndef ASM_X86__CHECKSUM_64_H
2#define _X86_64_CHECKSUM_H 2#define ASM_X86__CHECKSUM_64_H
3 3
4/* 4/*
5 * Checksums for x86-64 5 * Checksums for x86-64
@@ -188,4 +188,4 @@ static inline unsigned add32_with_carry(unsigned a, unsigned b)
188 return a; 188 return a;
189} 189}
190 190
191#endif 191#endif /* ASM_X86__CHECKSUM_64_H */
diff --git a/include/asm-x86/cmpxchg_32.h b/include/asm-x86/cmpxchg_32.h
index bf5a69d1329e..0622e45cdf7c 100644
--- a/include/asm-x86/cmpxchg_32.h
+++ b/include/asm-x86/cmpxchg_32.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_CMPXCHG_H 1#ifndef ASM_X86__CMPXCHG_32_H
2#define __ASM_CMPXCHG_H 2#define ASM_X86__CMPXCHG_32_H
3 3
4#include <linux/bitops.h> /* for LOCK_PREFIX */ 4#include <linux/bitops.h> /* for LOCK_PREFIX */
5 5
@@ -341,4 +341,4 @@ extern unsigned long long cmpxchg_486_u64(volatile void *, u64, u64);
341 341
342#endif 342#endif
343 343
344#endif 344#endif /* ASM_X86__CMPXCHG_32_H */
diff --git a/include/asm-x86/cmpxchg_64.h b/include/asm-x86/cmpxchg_64.h
index d9b26b9a28cf..63c1a5e61b99 100644
--- a/include/asm-x86/cmpxchg_64.h
+++ b/include/asm-x86/cmpxchg_64.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_CMPXCHG_H 1#ifndef ASM_X86__CMPXCHG_64_H
2#define __ASM_CMPXCHG_H 2#define ASM_X86__CMPXCHG_64_H
3 3
4#include <asm/alternative.h> /* Provides LOCK_PREFIX */ 4#include <asm/alternative.h> /* Provides LOCK_PREFIX */
5 5
@@ -93,6 +93,39 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
93 return old; 93 return old;
94} 94}
95 95
96/*
97 * Always use locked operations when touching memory shared with a
98 * hypervisor, since the system may be SMP even if the guest kernel
99 * isn't.
100 */
101static inline unsigned long __sync_cmpxchg(volatile void *ptr,
102 unsigned long old,
103 unsigned long new, int size)
104{
105 unsigned long prev;
106 switch (size) {
107 case 1:
108 asm volatile("lock; cmpxchgb %b1,%2"
109 : "=a"(prev)
110 : "q"(new), "m"(*__xg(ptr)), "0"(old)
111 : "memory");
112 return prev;
113 case 2:
114 asm volatile("lock; cmpxchgw %w1,%2"
115 : "=a"(prev)
116 : "r"(new), "m"(*__xg(ptr)), "0"(old)
117 : "memory");
118 return prev;
119 case 4:
120 asm volatile("lock; cmpxchgl %1,%2"
121 : "=a"(prev)
122 : "r"(new), "m"(*__xg(ptr)), "0"(old)
123 : "memory");
124 return prev;
125 }
126 return old;
127}
128
96static inline unsigned long __cmpxchg_local(volatile void *ptr, 129static inline unsigned long __cmpxchg_local(volatile void *ptr,
97 unsigned long old, 130 unsigned long old,
98 unsigned long new, int size) 131 unsigned long new, int size)
@@ -139,10 +172,14 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
139 ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \ 172 ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
140 (unsigned long)(n), \ 173 (unsigned long)(n), \
141 sizeof(*(ptr)))) 174 sizeof(*(ptr))))
175#define sync_cmpxchg(ptr, o, n) \
176 ((__typeof__(*(ptr)))__sync_cmpxchg((ptr), (unsigned long)(o), \
177 (unsigned long)(n), \
178 sizeof(*(ptr))))
142#define cmpxchg64_local(ptr, o, n) \ 179#define cmpxchg64_local(ptr, o, n) \
143({ \ 180({ \
144 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ 181 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
145 cmpxchg_local((ptr), (o), (n)); \ 182 cmpxchg_local((ptr), (o), (n)); \
146}) 183})
147 184
148#endif 185#endif /* ASM_X86__CMPXCHG_64_H */
diff --git a/include/asm-x86/compat.h b/include/asm-x86/compat.h
index 1793ac317a30..6732b150949e 100644
--- a/include/asm-x86/compat.h
+++ b/include/asm-x86/compat.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_64_COMPAT_H 1#ifndef ASM_X86__COMPAT_H
2#define _ASM_X86_64_COMPAT_H 2#define ASM_X86__COMPAT_H
3 3
4/* 4/*
5 * Architecture specific compatibility types 5 * Architecture specific compatibility types
@@ -215,4 +215,4 @@ static inline int is_compat_task(void)
215 return current_thread_info()->status & TS_COMPAT; 215 return current_thread_info()->status & TS_COMPAT;
216} 216}
217 217
218#endif /* _ASM_X86_64_COMPAT_H */ 218#endif /* ASM_X86__COMPAT_H */
diff --git a/include/asm-x86/cpu.h b/include/asm-x86/cpu.h
index 73f2ea84fd74..83a115083f0d 100644
--- a/include/asm-x86/cpu.h
+++ b/include/asm-x86/cpu.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_I386_CPU_H_ 1#ifndef ASM_X86__CPU_H
2#define _ASM_I386_CPU_H_ 2#define ASM_X86__CPU_H
3 3
4#include <linux/device.h> 4#include <linux/device.h>
5#include <linux/cpu.h> 5#include <linux/cpu.h>
@@ -17,4 +17,4 @@ extern void arch_unregister_cpu(int);
17#endif 17#endif
18 18
19DECLARE_PER_CPU(int, cpu_state); 19DECLARE_PER_CPU(int, cpu_state);
20#endif /* _ASM_I386_CPU_H_ */ 20#endif /* ASM_X86__CPU_H */
diff --git a/include/asm-x86/cpufeature.h b/include/asm-x86/cpufeature.h
index 0d609c837a41..adfeae6586e1 100644
--- a/include/asm-x86/cpufeature.h
+++ b/include/asm-x86/cpufeature.h
@@ -1,12 +1,18 @@
1/* 1/*
2 * Defines x86 CPU feature bits 2 * Defines x86 CPU feature bits
3 */ 3 */
4#ifndef _ASM_X86_CPUFEATURE_H 4#ifndef ASM_X86__CPUFEATURE_H
5#define _ASM_X86_CPUFEATURE_H 5#define ASM_X86__CPUFEATURE_H
6 6
7#include <asm/required-features.h> 7#include <asm/required-features.h>
8 8
9#define NCAPINTS 8 /* N 32-bit words worth of info */ 9#define NCAPINTS 9 /* N 32-bit words worth of info */
10
11/*
12 * Note: If the comment begins with a quoted string, that string is used
13 * in /proc/cpuinfo instead of the macro name. If the string is "",
14 * this feature bit is not displayed in /proc/cpuinfo at all.
15 */
10 16
11/* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */ 17/* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */
12#define X86_FEATURE_FPU (0*32+ 0) /* Onboard FPU */ 18#define X86_FEATURE_FPU (0*32+ 0) /* Onboard FPU */
@@ -14,7 +20,7 @@
14#define X86_FEATURE_DE (0*32+ 2) /* Debugging Extensions */ 20#define X86_FEATURE_DE (0*32+ 2) /* Debugging Extensions */
15#define X86_FEATURE_PSE (0*32+ 3) /* Page Size Extensions */ 21#define X86_FEATURE_PSE (0*32+ 3) /* Page Size Extensions */
16#define X86_FEATURE_TSC (0*32+ 4) /* Time Stamp Counter */ 22#define X86_FEATURE_TSC (0*32+ 4) /* Time Stamp Counter */
17#define X86_FEATURE_MSR (0*32+ 5) /* Model-Specific Registers, RDMSR, WRMSR */ 23#define X86_FEATURE_MSR (0*32+ 5) /* Model-Specific Registers */
18#define X86_FEATURE_PAE (0*32+ 6) /* Physical Address Extensions */ 24#define X86_FEATURE_PAE (0*32+ 6) /* Physical Address Extensions */
19#define X86_FEATURE_MCE (0*32+ 7) /* Machine Check Architecture */ 25#define X86_FEATURE_MCE (0*32+ 7) /* Machine Check Architecture */
20#define X86_FEATURE_CX8 (0*32+ 8) /* CMPXCHG8 instruction */ 26#define X86_FEATURE_CX8 (0*32+ 8) /* CMPXCHG8 instruction */
@@ -23,22 +29,23 @@
23#define X86_FEATURE_MTRR (0*32+12) /* Memory Type Range Registers */ 29#define X86_FEATURE_MTRR (0*32+12) /* Memory Type Range Registers */
24#define X86_FEATURE_PGE (0*32+13) /* Page Global Enable */ 30#define X86_FEATURE_PGE (0*32+13) /* Page Global Enable */
25#define X86_FEATURE_MCA (0*32+14) /* Machine Check Architecture */ 31#define X86_FEATURE_MCA (0*32+14) /* Machine Check Architecture */
26#define X86_FEATURE_CMOV (0*32+15) /* CMOV instruction (FCMOVCC and FCOMI too if FPU present) */ 32#define X86_FEATURE_CMOV (0*32+15) /* CMOV instructions */
33 /* (plus FCMOVcc, FCOMI with FPU) */
27#define X86_FEATURE_PAT (0*32+16) /* Page Attribute Table */ 34#define X86_FEATURE_PAT (0*32+16) /* Page Attribute Table */
28#define X86_FEATURE_PSE36 (0*32+17) /* 36-bit PSEs */ 35#define X86_FEATURE_PSE36 (0*32+17) /* 36-bit PSEs */
29#define X86_FEATURE_PN (0*32+18) /* Processor serial number */ 36#define X86_FEATURE_PN (0*32+18) /* Processor serial number */
30#define X86_FEATURE_CLFLSH (0*32+19) /* Supports the CLFLUSH instruction */ 37#define X86_FEATURE_CLFLSH (0*32+19) /* "clflush" CLFLUSH instruction */
31#define X86_FEATURE_DS (0*32+21) /* Debug Store */ 38#define X86_FEATURE_DS (0*32+21) /* "dts" Debug Store */
32#define X86_FEATURE_ACPI (0*32+22) /* ACPI via MSR */ 39#define X86_FEATURE_ACPI (0*32+22) /* ACPI via MSR */
33#define X86_FEATURE_MMX (0*32+23) /* Multimedia Extensions */ 40#define X86_FEATURE_MMX (0*32+23) /* Multimedia Extensions */
34#define X86_FEATURE_FXSR (0*32+24) /* FXSAVE and FXRSTOR instructions (fast save and restore */ 41#define X86_FEATURE_FXSR (0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */
35 /* of FPU context), and CR4.OSFXSR available */ 42#define X86_FEATURE_XMM (0*32+25) /* "sse" */
36#define X86_FEATURE_XMM (0*32+25) /* Streaming SIMD Extensions */ 43#define X86_FEATURE_XMM2 (0*32+26) /* "sse2" */
37#define X86_FEATURE_XMM2 (0*32+26) /* Streaming SIMD Extensions-2 */ 44#define X86_FEATURE_SELFSNOOP (0*32+27) /* "ss" CPU self snoop */
38#define X86_FEATURE_SELFSNOOP (0*32+27) /* CPU self snoop */
39#define X86_FEATURE_HT (0*32+28) /* Hyper-Threading */ 45#define X86_FEATURE_HT (0*32+28) /* Hyper-Threading */
40#define X86_FEATURE_ACC (0*32+29) /* Automatic clock control */ 46#define X86_FEATURE_ACC (0*32+29) /* "tm" Automatic clock control */
41#define X86_FEATURE_IA64 (0*32+30) /* IA-64 processor */ 47#define X86_FEATURE_IA64 (0*32+30) /* IA-64 processor */
48#define X86_FEATURE_PBE (0*32+31) /* Pending Break Enable */
42 49
43/* AMD-defined CPU features, CPUID level 0x80000001, word 1 */ 50/* AMD-defined CPU features, CPUID level 0x80000001, word 1 */
44/* Don't duplicate feature flags which are redundant with Intel! */ 51/* Don't duplicate feature flags which are redundant with Intel! */
@@ -46,7 +53,8 @@
46#define X86_FEATURE_MP (1*32+19) /* MP Capable. */ 53#define X86_FEATURE_MP (1*32+19) /* MP Capable. */
47#define X86_FEATURE_NX (1*32+20) /* Execute Disable */ 54#define X86_FEATURE_NX (1*32+20) /* Execute Disable */
48#define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */ 55#define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */
49#define X86_FEATURE_GBPAGES (1*32+26) /* GB pages */ 56#define X86_FEATURE_FXSR_OPT (1*32+25) /* FXSAVE/FXRSTOR optimizations */
57#define X86_FEATURE_GBPAGES (1*32+26) /* "pdpe1gb" GB pages */
50#define X86_FEATURE_RDTSCP (1*32+27) /* RDTSCP */ 58#define X86_FEATURE_RDTSCP (1*32+27) /* RDTSCP */
51#define X86_FEATURE_LM (1*32+29) /* Long Mode (x86-64) */ 59#define X86_FEATURE_LM (1*32+29) /* Long Mode (x86-64) */
52#define X86_FEATURE_3DNOWEXT (1*32+30) /* AMD 3DNow! extensions */ 60#define X86_FEATURE_3DNOWEXT (1*32+30) /* AMD 3DNow! extensions */
@@ -64,48 +72,79 @@
64#define X86_FEATURE_CYRIX_ARR (3*32+ 2) /* Cyrix ARRs (= MTRRs) */ 72#define X86_FEATURE_CYRIX_ARR (3*32+ 2) /* Cyrix ARRs (= MTRRs) */
65#define X86_FEATURE_CENTAUR_MCR (3*32+ 3) /* Centaur MCRs (= MTRRs) */ 73#define X86_FEATURE_CENTAUR_MCR (3*32+ 3) /* Centaur MCRs (= MTRRs) */
66/* cpu types for specific tunings: */ 74/* cpu types for specific tunings: */
67#define X86_FEATURE_K8 (3*32+ 4) /* Opteron, Athlon64 */ 75#define X86_FEATURE_K8 (3*32+ 4) /* "" Opteron, Athlon64 */
68#define X86_FEATURE_K7 (3*32+ 5) /* Athlon */ 76#define X86_FEATURE_K7 (3*32+ 5) /* "" Athlon */
69#define X86_FEATURE_P3 (3*32+ 6) /* P3 */ 77#define X86_FEATURE_P3 (3*32+ 6) /* "" P3 */
70#define X86_FEATURE_P4 (3*32+ 7) /* P4 */ 78#define X86_FEATURE_P4 (3*32+ 7) /* "" P4 */
71#define X86_FEATURE_CONSTANT_TSC (3*32+ 8) /* TSC ticks at a constant rate */ 79#define X86_FEATURE_CONSTANT_TSC (3*32+ 8) /* TSC ticks at a constant rate */
72#define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */ 80#define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */
73#define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* FXSAVE leaks FOP/FIP/FOP */ 81#define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* "" FXSAVE leaks FOP/FIP/FOP */
74#define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */ 82#define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */
75#define X86_FEATURE_PEBS (3*32+12) /* Precise-Event Based Sampling */ 83#define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */
76#define X86_FEATURE_BTS (3*32+13) /* Branch Trace Store */ 84#define X86_FEATURE_PEBS (3*32+12) /* Precise-Event Based Sampling */
77/* 14 free */ 85#define X86_FEATURE_BTS (3*32+13) /* Branch Trace Store */
78/* 15 free */ 86#define X86_FEATURE_SYSCALL32 (3*32+14) /* "" syscall in ia32 userspace */
79#define X86_FEATURE_REP_GOOD (3*32+16) /* rep microcode works well on this CPU */ 87#define X86_FEATURE_SYSENTER32 (3*32+15) /* "" sysenter in ia32 userspace */
80#define X86_FEATURE_MFENCE_RDTSC (3*32+17) /* Mfence synchronizes RDTSC */ 88#define X86_FEATURE_REP_GOOD (3*32+16) /* rep microcode works well */
81#define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* Lfence synchronizes RDTSC */ 89#define X86_FEATURE_MFENCE_RDTSC (3*32+17) /* "" Mfence synchronizes RDTSC */
90#define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* "" Lfence synchronizes RDTSC */
91#define X86_FEATURE_11AP (3*32+19) /* "" Bad local APIC aka 11AP */
92#define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */
93#define X86_FEATURE_AMDC1E (3*32+21) /* AMD C1E detected */
94#define X86_FEATURE_XTOPOLOGY (3*32+21) /* cpu topology enum extensions */
82 95
83/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ 96/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
84#define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */ 97#define X86_FEATURE_XMM3 (4*32+ 0) /* "pni" SSE-3 */
85#define X86_FEATURE_MWAIT (4*32+ 3) /* Monitor/Mwait support */ 98#define X86_FEATURE_PCLMULQDQ (4*32+ 1) /* PCLMULQDQ instruction */
86#define X86_FEATURE_DSCPL (4*32+ 4) /* CPL Qualified Debug Store */ 99#define X86_FEATURE_DTES64 (4*32+ 2) /* 64-bit Debug Store */
100#define X86_FEATURE_MWAIT (4*32+ 3) /* "monitor" Monitor/Mwait support */
101#define X86_FEATURE_DSCPL (4*32+ 4) /* "ds_cpl" CPL Qual. Debug Store */
102#define X86_FEATURE_VMX (4*32+ 5) /* Hardware virtualization */
103#define X86_FEATURE_SMX (4*32+ 6) /* Safer mode */
87#define X86_FEATURE_EST (4*32+ 7) /* Enhanced SpeedStep */ 104#define X86_FEATURE_EST (4*32+ 7) /* Enhanced SpeedStep */
88#define X86_FEATURE_TM2 (4*32+ 8) /* Thermal Monitor 2 */ 105#define X86_FEATURE_TM2 (4*32+ 8) /* Thermal Monitor 2 */
106#define X86_FEATURE_SSSE3 (4*32+ 9) /* Supplemental SSE-3 */
89#define X86_FEATURE_CID (4*32+10) /* Context ID */ 107#define X86_FEATURE_CID (4*32+10) /* Context ID */
108#define X86_FEATURE_FMA (4*32+12) /* Fused multiply-add */
90#define X86_FEATURE_CX16 (4*32+13) /* CMPXCHG16B */ 109#define X86_FEATURE_CX16 (4*32+13) /* CMPXCHG16B */
91#define X86_FEATURE_XTPR (4*32+14) /* Send Task Priority Messages */ 110#define X86_FEATURE_XTPR (4*32+14) /* Send Task Priority Messages */
111#define X86_FEATURE_PDCM (4*32+15) /* Performance Capabilities */
92#define X86_FEATURE_DCA (4*32+18) /* Direct Cache Access */ 112#define X86_FEATURE_DCA (4*32+18) /* Direct Cache Access */
113#define X86_FEATURE_XMM4_1 (4*32+19) /* "sse4_1" SSE-4.1 */
114#define X86_FEATURE_XMM4_2 (4*32+20) /* "sse4_2" SSE-4.2 */
115#define X86_FEATURE_X2APIC (4*32+21) /* x2APIC */
116#define X86_FEATURE_AES (4*32+25) /* AES instructions */
117#define X86_FEATURE_XSAVE (4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */
118#define X86_FEATURE_OSXSAVE (4*32+27) /* "" XSAVE enabled in the OS */
119#define X86_FEATURE_AVX (4*32+28) /* Advanced Vector Extensions */
93 120
94/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */ 121/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
95#define X86_FEATURE_XSTORE (5*32+ 2) /* on-CPU RNG present (xstore insn) */ 122#define X86_FEATURE_XSTORE (5*32+ 2) /* "rng" RNG present (xstore) */
96#define X86_FEATURE_XSTORE_EN (5*32+ 3) /* on-CPU RNG enabled */ 123#define X86_FEATURE_XSTORE_EN (5*32+ 3) /* "rng_en" RNG enabled */
97#define X86_FEATURE_XCRYPT (5*32+ 6) /* on-CPU crypto (xcrypt insn) */ 124#define X86_FEATURE_XCRYPT (5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */
98#define X86_FEATURE_XCRYPT_EN (5*32+ 7) /* on-CPU crypto enabled */ 125#define X86_FEATURE_XCRYPT_EN (5*32+ 7) /* "ace_en" on-CPU crypto enabled */
99#define X86_FEATURE_ACE2 (5*32+ 8) /* Advanced Cryptography Engine v2 */ 126#define X86_FEATURE_ACE2 (5*32+ 8) /* Advanced Cryptography Engine v2 */
100#define X86_FEATURE_ACE2_EN (5*32+ 9) /* ACE v2 enabled */ 127#define X86_FEATURE_ACE2_EN (5*32+ 9) /* ACE v2 enabled */
101#define X86_FEATURE_PHE (5*32+ 10) /* PadLock Hash Engine */ 128#define X86_FEATURE_PHE (5*32+10) /* PadLock Hash Engine */
102#define X86_FEATURE_PHE_EN (5*32+ 11) /* PHE enabled */ 129#define X86_FEATURE_PHE_EN (5*32+11) /* PHE enabled */
103#define X86_FEATURE_PMM (5*32+ 12) /* PadLock Montgomery Multiplier */ 130#define X86_FEATURE_PMM (5*32+12) /* PadLock Montgomery Multiplier */
104#define X86_FEATURE_PMM_EN (5*32+ 13) /* PMM enabled */ 131#define X86_FEATURE_PMM_EN (5*32+13) /* PMM enabled */
105 132
106/* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */ 133/* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */
107#define X86_FEATURE_LAHF_LM (6*32+ 0) /* LAHF/SAHF in long mode */ 134#define X86_FEATURE_LAHF_LM (6*32+ 0) /* LAHF/SAHF in long mode */
108#define X86_FEATURE_CMP_LEGACY (6*32+ 1) /* If yes HyperThreading not valid */ 135#define X86_FEATURE_CMP_LEGACY (6*32+ 1) /* If yes HyperThreading not valid */
136#define X86_FEATURE_SVM (6*32+ 2) /* Secure virtual machine */
137#define X86_FEATURE_EXTAPIC (6*32+ 3) /* Extended APIC space */
138#define X86_FEATURE_CR8_LEGACY (6*32+ 4) /* CR8 in 32-bit mode */
139#define X86_FEATURE_ABM (6*32+ 5) /* Advanced bit manipulation */
140#define X86_FEATURE_SSE4A (6*32+ 6) /* SSE-4A */
141#define X86_FEATURE_MISALIGNSSE (6*32+ 7) /* Misaligned SSE mode */
142#define X86_FEATURE_3DNOWPREFETCH (6*32+ 8) /* 3DNow prefetch instructions */
143#define X86_FEATURE_OSVW (6*32+ 9) /* OS Visible Workaround */
144#define X86_FEATURE_IBS (6*32+10) /* Instruction Based Sampling */
145#define X86_FEATURE_SSE5 (6*32+11) /* SSE-5 */
146#define X86_FEATURE_SKINIT (6*32+12) /* SKINIT/STGI instructions */
147#define X86_FEATURE_WDT (6*32+13) /* Watchdog timer */
109 148
110/* 149/*
111 * Auxiliary flags: Linux defined - For features scattered in various 150 * Auxiliary flags: Linux defined - For features scattered in various
@@ -113,6 +152,13 @@
113 */ 152 */
114#define X86_FEATURE_IDA (7*32+ 0) /* Intel Dynamic Acceleration */ 153#define X86_FEATURE_IDA (7*32+ 0) /* Intel Dynamic Acceleration */
115 154
155/* Virtualization flags: Linux defined */
156#define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */
157#define X86_FEATURE_VNMI (8*32+ 1) /* Intel Virtual NMI */
158#define X86_FEATURE_FLEXPRIORITY (8*32+ 2) /* Intel FlexPriority */
159#define X86_FEATURE_EPT (8*32+ 3) /* Intel Extended Page Table */
160#define X86_FEATURE_VPID (8*32+ 4) /* Intel Virtual Processor ID */
161
116#if defined(__KERNEL__) && !defined(__ASSEMBLY__) 162#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
117 163
118#include <linux/bitops.h> 164#include <linux/bitops.h>
@@ -142,11 +188,11 @@ extern const char * const x86_power_flags[32];
142#define clear_cpu_cap(c, bit) clear_bit(bit, (unsigned long *)((c)->x86_capability)) 188#define clear_cpu_cap(c, bit) clear_bit(bit, (unsigned long *)((c)->x86_capability))
143#define setup_clear_cpu_cap(bit) do { \ 189#define setup_clear_cpu_cap(bit) do { \
144 clear_cpu_cap(&boot_cpu_data, bit); \ 190 clear_cpu_cap(&boot_cpu_data, bit); \
145 set_bit(bit, cleared_cpu_caps); \ 191 set_bit(bit, (unsigned long *)cleared_cpu_caps); \
146} while (0) 192} while (0)
147#define setup_force_cpu_cap(bit) do { \ 193#define setup_force_cpu_cap(bit) do { \
148 set_cpu_cap(&boot_cpu_data, bit); \ 194 set_cpu_cap(&boot_cpu_data, bit); \
149 clear_bit(bit, cleared_cpu_caps); \ 195 clear_bit(bit, (unsigned long *)cleared_cpu_caps); \
150} while (0) 196} while (0)
151 197
152#define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU) 198#define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU)
@@ -187,6 +233,10 @@ extern const char * const x86_power_flags[32];
187#define cpu_has_gbpages boot_cpu_has(X86_FEATURE_GBPAGES) 233#define cpu_has_gbpages boot_cpu_has(X86_FEATURE_GBPAGES)
188#define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON) 234#define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON)
189#define cpu_has_pat boot_cpu_has(X86_FEATURE_PAT) 235#define cpu_has_pat boot_cpu_has(X86_FEATURE_PAT)
236#define cpu_has_xmm4_1 boot_cpu_has(X86_FEATURE_XMM4_1)
237#define cpu_has_xmm4_2 boot_cpu_has(X86_FEATURE_XMM4_2)
238#define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC)
239#define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE)
190 240
191#if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64) 241#if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64)
192# define cpu_has_invlpg 1 242# define cpu_has_invlpg 1
@@ -218,4 +268,4 @@ extern const char * const x86_power_flags[32];
218 268
219#endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */ 269#endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */
220 270
221#endif /* _ASM_X86_CPUFEATURE_H */ 271#endif /* ASM_X86__CPUFEATURE_H */
diff --git a/include/asm-x86/current.h b/include/asm-x86/current.h
index d2526d3f7346..a863ead856f3 100644
--- a/include/asm-x86/current.h
+++ b/include/asm-x86/current.h
@@ -1,5 +1,39 @@
1#ifndef ASM_X86__CURRENT_H
2#define ASM_X86__CURRENT_H
3
1#ifdef CONFIG_X86_32 4#ifdef CONFIG_X86_32
2# include "current_32.h" 5#include <linux/compiler.h>
3#else 6#include <asm/percpu.h>
4# include "current_64.h" 7
5#endif 8struct task_struct;
9
10DECLARE_PER_CPU(struct task_struct *, current_task);
11static __always_inline struct task_struct *get_current(void)
12{
13 return x86_read_percpu(current_task);
14}
15
16#else /* X86_32 */
17
18#ifndef __ASSEMBLY__
19#include <asm/pda.h>
20
21struct task_struct;
22
23static __always_inline struct task_struct *get_current(void)
24{
25 return read_pda(pcurrent);
26}
27
28#else /* __ASSEMBLY__ */
29
30#include <asm/asm-offsets.h>
31#define GET_CURRENT(reg) movq %gs:(pda_pcurrent),reg
32
33#endif /* __ASSEMBLY__ */
34
35#endif /* X86_32 */
36
37#define current get_current()
38
39#endif /* ASM_X86__CURRENT_H */
diff --git a/include/asm-x86/current_32.h b/include/asm-x86/current_32.h
deleted file mode 100644
index 5af9bdb97a16..000000000000
--- a/include/asm-x86/current_32.h
+++ /dev/null
@@ -1,17 +0,0 @@
1#ifndef _I386_CURRENT_H
2#define _I386_CURRENT_H
3
4#include <linux/compiler.h>
5#include <asm/percpu.h>
6
7struct task_struct;
8
9DECLARE_PER_CPU(struct task_struct *, current_task);
10static __always_inline struct task_struct *get_current(void)
11{
12 return x86_read_percpu(current_task);
13}
14
15#define current get_current()
16
17#endif /* !(_I386_CURRENT_H) */
diff --git a/include/asm-x86/current_64.h b/include/asm-x86/current_64.h
deleted file mode 100644
index 2d368ede2fc1..000000000000
--- a/include/asm-x86/current_64.h
+++ /dev/null
@@ -1,27 +0,0 @@
1#ifndef _X86_64_CURRENT_H
2#define _X86_64_CURRENT_H
3
4#if !defined(__ASSEMBLY__)
5struct task_struct;
6
7#include <asm/pda.h>
8
9static inline struct task_struct *get_current(void)
10{
11 struct task_struct *t = read_pda(pcurrent);
12 return t;
13}
14
15#define current get_current()
16
17#else
18
19#ifndef ASM_OFFSET_H
20#include <asm/asm-offsets.h>
21#endif
22
23#define GET_CURRENT(reg) movq %gs:(pda_pcurrent),reg
24
25#endif
26
27#endif /* !(_X86_64_CURRENT_H) */
diff --git a/include/asm-x86/debugreg.h b/include/asm-x86/debugreg.h
index c6344d572b03..ecb6907c3ea4 100644
--- a/include/asm-x86/debugreg.h
+++ b/include/asm-x86/debugreg.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_DEBUGREG_H 1#ifndef ASM_X86__DEBUGREG_H
2#define _ASM_X86_DEBUGREG_H 2#define ASM_X86__DEBUGREG_H
3 3
4 4
5/* Indicate the register numbers for a number of the specific 5/* Indicate the register numbers for a number of the specific
@@ -67,4 +67,4 @@
67#define DR_LOCAL_SLOWDOWN (0x100) /* Local slow the pipeline */ 67#define DR_LOCAL_SLOWDOWN (0x100) /* Local slow the pipeline */
68#define DR_GLOBAL_SLOWDOWN (0x200) /* Global slow the pipeline */ 68#define DR_GLOBAL_SLOWDOWN (0x200) /* Global slow the pipeline */
69 69
70#endif 70#endif /* ASM_X86__DEBUGREG_H */
diff --git a/include/asm-x86/delay.h b/include/asm-x86/delay.h
index 409a649204aa..8a0da95b4fc5 100644
--- a/include/asm-x86/delay.h
+++ b/include/asm-x86/delay.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_DELAY_H 1#ifndef ASM_X86__DELAY_H
2#define _ASM_X86_DELAY_H 2#define ASM_X86__DELAY_H
3 3
4/* 4/*
5 * Copyright (C) 1993 Linus Torvalds 5 * Copyright (C) 1993 Linus Torvalds
@@ -28,4 +28,4 @@ extern void __delay(unsigned long loops);
28 28
29void use_tsc_delay(void); 29void use_tsc_delay(void);
30 30
31#endif /* _ASM_X86_DELAY_H */ 31#endif /* ASM_X86__DELAY_H */
diff --git a/include/asm-x86/desc.h b/include/asm-x86/desc.h
index 268a012bcd79..ebc307817e98 100644
--- a/include/asm-x86/desc.h
+++ b/include/asm-x86/desc.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_DESC_H_ 1#ifndef ASM_X86__DESC_H
2#define _ASM_DESC_H_ 2#define ASM_X86__DESC_H
3 3
4#ifndef __ASSEMBLY__ 4#ifndef __ASSEMBLY__
5#include <asm/desc_defs.h> 5#include <asm/desc_defs.h>
@@ -24,16 +24,27 @@ static inline void fill_ldt(struct desc_struct *desc,
24 desc->d = info->seg_32bit; 24 desc->d = info->seg_32bit;
25 desc->g = info->limit_in_pages; 25 desc->g = info->limit_in_pages;
26 desc->base2 = (info->base_addr & 0xff000000) >> 24; 26 desc->base2 = (info->base_addr & 0xff000000) >> 24;
27 /*
28 * Don't allow setting of the lm bit. It is useless anyway
29 * because 64bit system calls require __USER_CS:
30 */
31 desc->l = 0;
27} 32}
28 33
29extern struct desc_ptr idt_descr; 34extern struct desc_ptr idt_descr;
30extern gate_desc idt_table[]; 35extern gate_desc idt_table[];
31 36
37struct gdt_page {
38 struct desc_struct gdt[GDT_ENTRIES];
39} __attribute__((aligned(PAGE_SIZE)));
40DECLARE_PER_CPU(struct gdt_page, gdt_page);
41
42static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
43{
44 return per_cpu(gdt_page, cpu).gdt;
45}
46
32#ifdef CONFIG_X86_64 47#ifdef CONFIG_X86_64
33extern struct desc_struct cpu_gdt_table[GDT_ENTRIES];
34extern struct desc_ptr cpu_gdt_descr[];
35/* the cpu gdt accessor */
36#define get_cpu_gdt_table(x) ((struct desc_struct *)cpu_gdt_descr[x].address)
37 48
38static inline void pack_gate(gate_desc *gate, unsigned type, unsigned long func, 49static inline void pack_gate(gate_desc *gate, unsigned type, unsigned long func,
39 unsigned dpl, unsigned ist, unsigned seg) 50 unsigned dpl, unsigned ist, unsigned seg)
@@ -51,16 +62,6 @@ static inline void pack_gate(gate_desc *gate, unsigned type, unsigned long func,
51} 62}
52 63
53#else 64#else
54struct gdt_page {
55 struct desc_struct gdt[GDT_ENTRIES];
56} __attribute__((aligned(PAGE_SIZE)));
57DECLARE_PER_CPU(struct gdt_page, gdt_page);
58
59static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
60{
61 return per_cpu(gdt_page, cpu).gdt;
62}
63
64static inline void pack_gate(gate_desc *gate, unsigned char type, 65static inline void pack_gate(gate_desc *gate, unsigned char type,
65 unsigned long base, unsigned dpl, unsigned flags, 66 unsigned long base, unsigned dpl, unsigned flags,
66 unsigned short seg) 67 unsigned short seg)
@@ -101,7 +102,15 @@ static inline int desc_empty(const void *ptr)
101 native_write_gdt_entry(dt, entry, desc, type) 102 native_write_gdt_entry(dt, entry, desc, type)
102#define write_idt_entry(dt, entry, g) \ 103#define write_idt_entry(dt, entry, g) \
103 native_write_idt_entry(dt, entry, g) 104 native_write_idt_entry(dt, entry, g)
104#endif 105
106static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
107{
108}
109
110static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
111{
112}
113#endif /* CONFIG_PARAVIRT */
105 114
106static inline void native_write_idt_entry(gate_desc *idt, int entry, 115static inline void native_write_idt_entry(gate_desc *idt, int entry,
107 const gate_desc *gate) 116 const gate_desc *gate)
@@ -192,8 +201,8 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
192 unsigned cpu = smp_processor_id(); 201 unsigned cpu = smp_processor_id();
193 ldt_desc ldt; 202 ldt_desc ldt;
194 203
195 set_tssldt_descriptor(&ldt, (unsigned long)addr, 204 set_tssldt_descriptor(&ldt, (unsigned long)addr, DESC_LDT,
196 DESC_LDT, entries * sizeof(ldt) - 1); 205 entries * LDT_ENTRY_SIZE - 1);
197 write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT, 206 write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT,
198 &ldt, DESC_LDT); 207 &ldt, DESC_LDT);
199 asm volatile("lldt %w0"::"q" (GDT_ENTRY_LDT*8)); 208 asm volatile("lldt %w0"::"q" (GDT_ENTRY_LDT*8));
@@ -311,6 +320,28 @@ static inline void set_intr_gate(unsigned int n, void *addr)
311 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS); 320 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
312} 321}
313 322
323#define SYS_VECTOR_FREE 0
324#define SYS_VECTOR_ALLOCED 1
325
326extern int first_system_vector;
327extern char system_vectors[];
328
329static inline void alloc_system_vector(int vector)
330{
331 if (system_vectors[vector] == SYS_VECTOR_FREE) {
332 system_vectors[vector] = SYS_VECTOR_ALLOCED;
333 if (first_system_vector > vector)
334 first_system_vector = vector;
335 } else
336 BUG();
337}
338
339static inline void alloc_intr_gate(unsigned int n, void *addr)
340{
341 alloc_system_vector(n);
342 set_intr_gate(n, addr);
343}
344
314/* 345/*
315 * This routine sets up an interrupt gate at directory privilege level 3. 346 * This routine sets up an interrupt gate at directory privilege level 3.
316 */ 347 */
@@ -379,4 +410,4 @@ static inline void set_system_gate_ist(int n, void *addr, unsigned ist)
379 410
380#endif /* __ASSEMBLY__ */ 411#endif /* __ASSEMBLY__ */
381 412
382#endif 413#endif /* ASM_X86__DESC_H */
diff --git a/include/asm-x86/desc_defs.h b/include/asm-x86/desc_defs.h
index eccb4ea1f918..b881db664b46 100644
--- a/include/asm-x86/desc_defs.h
+++ b/include/asm-x86/desc_defs.h
@@ -1,6 +1,6 @@
1/* Written 2000 by Andi Kleen */ 1/* Written 2000 by Andi Kleen */
2#ifndef __ARCH_DESC_DEFS_H 2#ifndef ASM_X86__DESC_DEFS_H
3#define __ARCH_DESC_DEFS_H 3#define ASM_X86__DESC_DEFS_H
4 4
5/* 5/*
6 * Segment descriptor structure definitions, usable from both x86_64 and i386 6 * Segment descriptor structure definitions, usable from both x86_64 and i386
@@ -75,10 +75,14 @@ struct ldttss_desc64 {
75typedef struct gate_struct64 gate_desc; 75typedef struct gate_struct64 gate_desc;
76typedef struct ldttss_desc64 ldt_desc; 76typedef struct ldttss_desc64 ldt_desc;
77typedef struct ldttss_desc64 tss_desc; 77typedef struct ldttss_desc64 tss_desc;
78#define gate_offset(g) ((g).offset_low | ((unsigned long)(g).offset_middle << 16) | ((unsigned long)(g).offset_high << 32))
79#define gate_segment(g) ((g).segment)
78#else 80#else
79typedef struct desc_struct gate_desc; 81typedef struct desc_struct gate_desc;
80typedef struct desc_struct ldt_desc; 82typedef struct desc_struct ldt_desc;
81typedef struct desc_struct tss_desc; 83typedef struct desc_struct tss_desc;
84#define gate_offset(g) (((g).b & 0xffff0000) | ((g).a & 0x0000ffff))
85#define gate_segment(g) ((g).a >> 16)
82#endif 86#endif
83 87
84struct desc_ptr { 88struct desc_ptr {
@@ -88,4 +92,4 @@ struct desc_ptr {
88 92
89#endif /* !__ASSEMBLY__ */ 93#endif /* !__ASSEMBLY__ */
90 94
91#endif 95#endif /* ASM_X86__DESC_DEFS_H */
diff --git a/include/asm-x86/device.h b/include/asm-x86/device.h
index 87a715367a1b..1bece04c7d9d 100644
--- a/include/asm-x86/device.h
+++ b/include/asm-x86/device.h
@@ -1,13 +1,16 @@
1#ifndef _ASM_X86_DEVICE_H 1#ifndef ASM_X86__DEVICE_H
2#define _ASM_X86_DEVICE_H 2#define ASM_X86__DEVICE_H
3 3
4struct dev_archdata { 4struct dev_archdata {
5#ifdef CONFIG_ACPI 5#ifdef CONFIG_ACPI
6 void *acpi_handle; 6 void *acpi_handle;
7#endif 7#endif
8#ifdef CONFIG_X86_64
9struct dma_mapping_ops *dma_ops;
10#endif
8#ifdef CONFIG_DMAR 11#ifdef CONFIG_DMAR
9 void *iommu; /* hook for IOMMU specific extension */ 12 void *iommu; /* hook for IOMMU specific extension */
10#endif 13#endif
11}; 14};
12 15
13#endif /* _ASM_X86_DEVICE_H */ 16#endif /* ASM_X86__DEVICE_H */
diff --git a/include/asm-x86/div64.h b/include/asm-x86/div64.h
index 9a2d644c08ef..f9530f23f1d6 100644
--- a/include/asm-x86/div64.h
+++ b/include/asm-x86/div64.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_DIV64_H 1#ifndef ASM_X86__DIV64_H
2#define _ASM_X86_DIV64_H 2#define ASM_X86__DIV64_H
3 3
4#ifdef CONFIG_X86_32 4#ifdef CONFIG_X86_32
5 5
@@ -57,4 +57,4 @@ static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
57# include <asm-generic/div64.h> 57# include <asm-generic/div64.h>
58#endif /* CONFIG_X86_32 */ 58#endif /* CONFIG_X86_32 */
59 59
60#endif /* _ASM_X86_DIV64_H */ 60#endif /* ASM_X86__DIV64_H */
diff --git a/include/asm-x86/dma-mapping.h b/include/asm-x86/dma-mapping.h
index a1a4dc7fe6ec..219c33d6361c 100644
--- a/include/asm-x86/dma-mapping.h
+++ b/include/asm-x86/dma-mapping.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_DMA_MAPPING_H_ 1#ifndef ASM_X86__DMA_MAPPING_H
2#define _ASM_DMA_MAPPING_H_ 2#define ASM_X86__DMA_MAPPING_H
3 3
4/* 4/*
5 * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for 5 * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
@@ -9,25 +9,22 @@
9#include <linux/scatterlist.h> 9#include <linux/scatterlist.h>
10#include <asm/io.h> 10#include <asm/io.h>
11#include <asm/swiotlb.h> 11#include <asm/swiotlb.h>
12#include <asm-generic/dma-coherent.h>
12 13
13extern dma_addr_t bad_dma_address; 14extern dma_addr_t bad_dma_address;
14extern int iommu_merge; 15extern int iommu_merge;
15extern struct device fallback_dev; 16extern struct device x86_dma_fallback_dev;
16extern int panic_on_overflow; 17extern int panic_on_overflow;
17extern int forbid_dac;
18extern int force_iommu;
19 18
20struct dma_mapping_ops { 19struct dma_mapping_ops {
21 int (*mapping_error)(dma_addr_t dma_addr); 20 int (*mapping_error)(struct device *dev,
21 dma_addr_t dma_addr);
22 void* (*alloc_coherent)(struct device *dev, size_t size, 22 void* (*alloc_coherent)(struct device *dev, size_t size,
23 dma_addr_t *dma_handle, gfp_t gfp); 23 dma_addr_t *dma_handle, gfp_t gfp);
24 void (*free_coherent)(struct device *dev, size_t size, 24 void (*free_coherent)(struct device *dev, size_t size,
25 void *vaddr, dma_addr_t dma_handle); 25 void *vaddr, dma_addr_t dma_handle);
26 dma_addr_t (*map_single)(struct device *hwdev, phys_addr_t ptr, 26 dma_addr_t (*map_single)(struct device *hwdev, phys_addr_t ptr,
27 size_t size, int direction); 27 size_t size, int direction);
28 /* like map_single, but doesn't check the device mask */
29 dma_addr_t (*map_simple)(struct device *hwdev, phys_addr_t ptr,
30 size_t size, int direction);
31 void (*unmap_single)(struct device *dev, dma_addr_t addr, 28 void (*unmap_single)(struct device *dev, dma_addr_t addr,
32 size_t size, int direction); 29 size_t size, int direction);
33 void (*sync_single_for_cpu)(struct device *hwdev, 30 void (*sync_single_for_cpu)(struct device *hwdev,
@@ -57,71 +54,95 @@ struct dma_mapping_ops {
57 int is_phys; 54 int is_phys;
58}; 55};
59 56
60extern const struct dma_mapping_ops *dma_ops; 57extern struct dma_mapping_ops *dma_ops;
61 58
62static inline int dma_mapping_error(dma_addr_t dma_addr) 59static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
63{ 60{
64 if (dma_ops->mapping_error) 61#ifdef CONFIG_X86_32
65 return dma_ops->mapping_error(dma_addr); 62 return dma_ops;
63#else
64 if (unlikely(!dev) || !dev->archdata.dma_ops)
65 return dma_ops;
66 else
67 return dev->archdata.dma_ops;
68#endif /* ASM_X86__DMA_MAPPING_H */
69}
70
71/* Make sure we keep the same behaviour */
72static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
73{
74#ifdef CONFIG_X86_32
75 return 0;
76#else
77 struct dma_mapping_ops *ops = get_dma_ops(dev);
78 if (ops->mapping_error)
79 return ops->mapping_error(dev, dma_addr);
66 80
67 return (dma_addr == bad_dma_address); 81 return (dma_addr == bad_dma_address);
82#endif
68} 83}
69 84
70#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) 85#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
71#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) 86#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
72 87#define dma_is_consistent(d, h) (1)
73void *dma_alloc_coherent(struct device *dev, size_t size,
74 dma_addr_t *dma_handle, gfp_t flag);
75
76void dma_free_coherent(struct device *dev, size_t size,
77 void *vaddr, dma_addr_t dma_handle);
78
79 88
80extern int dma_supported(struct device *hwdev, u64 mask); 89extern int dma_supported(struct device *hwdev, u64 mask);
81extern int dma_set_mask(struct device *dev, u64 mask); 90extern int dma_set_mask(struct device *dev, u64 mask);
82 91
92extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
93 dma_addr_t *dma_addr, gfp_t flag);
94
83static inline dma_addr_t 95static inline dma_addr_t
84dma_map_single(struct device *hwdev, void *ptr, size_t size, 96dma_map_single(struct device *hwdev, void *ptr, size_t size,
85 int direction) 97 int direction)
86{ 98{
99 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
100
87 BUG_ON(!valid_dma_direction(direction)); 101 BUG_ON(!valid_dma_direction(direction));
88 return dma_ops->map_single(hwdev, virt_to_phys(ptr), size, direction); 102 return ops->map_single(hwdev, virt_to_phys(ptr), size, direction);
89} 103}
90 104
91static inline void 105static inline void
92dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, 106dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
93 int direction) 107 int direction)
94{ 108{
109 struct dma_mapping_ops *ops = get_dma_ops(dev);
110
95 BUG_ON(!valid_dma_direction(direction)); 111 BUG_ON(!valid_dma_direction(direction));
96 if (dma_ops->unmap_single) 112 if (ops->unmap_single)
97 dma_ops->unmap_single(dev, addr, size, direction); 113 ops->unmap_single(dev, addr, size, direction);
98} 114}
99 115
100static inline int 116static inline int
101dma_map_sg(struct device *hwdev, struct scatterlist *sg, 117dma_map_sg(struct device *hwdev, struct scatterlist *sg,
102 int nents, int direction) 118 int nents, int direction)
103{ 119{
120 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
121
104 BUG_ON(!valid_dma_direction(direction)); 122 BUG_ON(!valid_dma_direction(direction));
105 return dma_ops->map_sg(hwdev, sg, nents, direction); 123 return ops->map_sg(hwdev, sg, nents, direction);
106} 124}
107 125
108static inline void 126static inline void
109dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, 127dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
110 int direction) 128 int direction)
111{ 129{
130 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
131
112 BUG_ON(!valid_dma_direction(direction)); 132 BUG_ON(!valid_dma_direction(direction));
113 if (dma_ops->unmap_sg) 133 if (ops->unmap_sg)
114 dma_ops->unmap_sg(hwdev, sg, nents, direction); 134 ops->unmap_sg(hwdev, sg, nents, direction);
115} 135}
116 136
117static inline void 137static inline void
118dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle, 138dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
119 size_t size, int direction) 139 size_t size, int direction)
120{ 140{
141 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
142
121 BUG_ON(!valid_dma_direction(direction)); 143 BUG_ON(!valid_dma_direction(direction));
122 if (dma_ops->sync_single_for_cpu) 144 if (ops->sync_single_for_cpu)
123 dma_ops->sync_single_for_cpu(hwdev, dma_handle, size, 145 ops->sync_single_for_cpu(hwdev, dma_handle, size, direction);
124 direction);
125 flush_write_buffers(); 146 flush_write_buffers();
126} 147}
127 148
@@ -129,10 +150,11 @@ static inline void
129dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle, 150dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
130 size_t size, int direction) 151 size_t size, int direction)
131{ 152{
153 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
154
132 BUG_ON(!valid_dma_direction(direction)); 155 BUG_ON(!valid_dma_direction(direction));
133 if (dma_ops->sync_single_for_device) 156 if (ops->sync_single_for_device)
134 dma_ops->sync_single_for_device(hwdev, dma_handle, size, 157 ops->sync_single_for_device(hwdev, dma_handle, size, direction);
135 direction);
136 flush_write_buffers(); 158 flush_write_buffers();
137} 159}
138 160
@@ -140,11 +162,12 @@ static inline void
140dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle, 162dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
141 unsigned long offset, size_t size, int direction) 163 unsigned long offset, size_t size, int direction)
142{ 164{
143 BUG_ON(!valid_dma_direction(direction)); 165 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
144 if (dma_ops->sync_single_range_for_cpu)
145 dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
146 size, direction);
147 166
167 BUG_ON(!valid_dma_direction(direction));
168 if (ops->sync_single_range_for_cpu)
169 ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
170 size, direction);
148 flush_write_buffers(); 171 flush_write_buffers();
149} 172}
150 173
@@ -153,11 +176,12 @@ dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
153 unsigned long offset, size_t size, 176 unsigned long offset, size_t size,
154 int direction) 177 int direction)
155{ 178{
156 BUG_ON(!valid_dma_direction(direction)); 179 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
157 if (dma_ops->sync_single_range_for_device)
158 dma_ops->sync_single_range_for_device(hwdev, dma_handle,
159 offset, size, direction);
160 180
181 BUG_ON(!valid_dma_direction(direction));
182 if (ops->sync_single_range_for_device)
183 ops->sync_single_range_for_device(hwdev, dma_handle,
184 offset, size, direction);
161 flush_write_buffers(); 185 flush_write_buffers();
162} 186}
163 187
@@ -165,9 +189,11 @@ static inline void
165dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, 189dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
166 int nelems, int direction) 190 int nelems, int direction)
167{ 191{
192 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
193
168 BUG_ON(!valid_dma_direction(direction)); 194 BUG_ON(!valid_dma_direction(direction));
169 if (dma_ops->sync_sg_for_cpu) 195 if (ops->sync_sg_for_cpu)
170 dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction); 196 ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
171 flush_write_buffers(); 197 flush_write_buffers();
172} 198}
173 199
@@ -175,9 +201,11 @@ static inline void
175dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, 201dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
176 int nelems, int direction) 202 int nelems, int direction)
177{ 203{
204 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
205
178 BUG_ON(!valid_dma_direction(direction)); 206 BUG_ON(!valid_dma_direction(direction));
179 if (dma_ops->sync_sg_for_device) 207 if (ops->sync_sg_for_device)
180 dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction); 208 ops->sync_sg_for_device(hwdev, sg, nelems, direction);
181 209
182 flush_write_buffers(); 210 flush_write_buffers();
183} 211}
@@ -186,9 +214,11 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
186 size_t offset, size_t size, 214 size_t offset, size_t size,
187 int direction) 215 int direction)
188{ 216{
217 struct dma_mapping_ops *ops = get_dma_ops(dev);
218
189 BUG_ON(!valid_dma_direction(direction)); 219 BUG_ON(!valid_dma_direction(direction));
190 return dma_ops->map_single(dev, page_to_phys(page)+offset, 220 return ops->map_single(dev, page_to_phys(page) + offset,
191 size, direction); 221 size, direction);
192} 222}
193 223
194static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, 224static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
@@ -211,27 +241,68 @@ static inline int dma_get_cache_alignment(void)
211 return boot_cpu_data.x86_clflush_size; 241 return boot_cpu_data.x86_clflush_size;
212} 242}
213 243
214#define dma_is_consistent(d, h) (1) 244static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
245 gfp_t gfp)
246{
247 unsigned long dma_mask = 0;
215 248
216#ifdef CONFIG_X86_32 249 dma_mask = dev->coherent_dma_mask;
217# define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY 250 if (!dma_mask)
218struct dma_coherent_mem { 251 dma_mask = (gfp & GFP_DMA) ? DMA_24BIT_MASK : DMA_32BIT_MASK;
219 void *virt_base; 252
220 u32 device_base; 253 return dma_mask;
221 int size; 254}
222 int flags; 255
223 unsigned long *bitmap; 256static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
224}; 257{
258#ifdef CONFIG_X86_64
259 unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp);
260
261 if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA))
262 gfp |= GFP_DMA32;
263#endif
264 return gfp;
265}
266
267static inline void *
268dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
269 gfp_t gfp)
270{
271 struct dma_mapping_ops *ops = get_dma_ops(dev);
272 void *memory;
273
274 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
275
276 if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
277 return memory;
278
279 if (!dev) {
280 dev = &x86_dma_fallback_dev;
281 gfp |= GFP_DMA;
282 }
283
284 if (!is_device_dma_capable(dev))
285 return NULL;
286
287 if (!ops->alloc_coherent)
288 return NULL;
225 289
226extern int 290 return ops->alloc_coherent(dev, size, dma_handle,
227dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, 291 dma_alloc_coherent_gfp_flags(dev, gfp));
228 dma_addr_t device_addr, size_t size, int flags); 292}
293
294static inline void dma_free_coherent(struct device *dev, size_t size,
295 void *vaddr, dma_addr_t bus)
296{
297 struct dma_mapping_ops *ops = get_dma_ops(dev);
229 298
230extern void 299 WARN_ON(irqs_disabled()); /* for portability */
231dma_release_declared_memory(struct device *dev); 300
301 if (dma_release_from_coherent(dev, get_order(size), vaddr))
302 return;
303
304 if (ops->free_coherent)
305 ops->free_coherent(dev, size, vaddr, bus);
306}
232 307
233extern void *
234dma_mark_declared_memory_occupied(struct device *dev,
235 dma_addr_t device_addr, size_t size);
236#endif /* CONFIG_X86_32 */
237#endif 308#endif
diff --git a/include/asm-x86/dma.h b/include/asm-x86/dma.h
index ca1098a7e580..c9f7a4eec555 100644
--- a/include/asm-x86/dma.h
+++ b/include/asm-x86/dma.h
@@ -5,8 +5,8 @@
5 * and John Boyd, Nov. 1992. 5 * and John Boyd, Nov. 1992.
6 */ 6 */
7 7
8#ifndef _ASM_X86_DMA_H 8#ifndef ASM_X86__DMA_H
9#define _ASM_X86_DMA_H 9#define ASM_X86__DMA_H
10 10
11#include <linux/spinlock.h> /* And spinlocks */ 11#include <linux/spinlock.h> /* And spinlocks */
12#include <asm/io.h> /* need byte IO */ 12#include <asm/io.h> /* need byte IO */
@@ -315,4 +315,4 @@ extern int isa_dma_bridge_buggy;
315#define isa_dma_bridge_buggy (0) 315#define isa_dma_bridge_buggy (0)
316#endif 316#endif
317 317
318#endif /* _ASM_X86_DMA_H */ 318#endif /* ASM_X86__DMA_H */
diff --git a/include/asm-x86/dmi.h b/include/asm-x86/dmi.h
index 4edf7514a750..1cff6fe81fa5 100644
--- a/include/asm-x86/dmi.h
+++ b/include/asm-x86/dmi.h
@@ -1,14 +1,8 @@
1#ifndef _ASM_X86_DMI_H 1#ifndef ASM_X86__DMI_H
2#define _ASM_X86_DMI_H 2#define ASM_X86__DMI_H
3 3
4#include <asm/io.h> 4#include <asm/io.h>
5 5
6#ifdef CONFIG_X86_32
7
8#define dmi_alloc alloc_bootmem
9
10#else /* CONFIG_X86_32 */
11
12#define DMI_MAX_DATA 2048 6#define DMI_MAX_DATA 2048
13 7
14extern int dmi_alloc_index; 8extern int dmi_alloc_index;
@@ -25,10 +19,8 @@ static inline void *dmi_alloc(unsigned len)
25 return dmi_alloc_data + idx; 19 return dmi_alloc_data + idx;
26} 20}
27 21
28#endif
29
30/* Use early IO mappings for DMI because it's initialized early */ 22/* Use early IO mappings for DMI because it's initialized early */
31#define dmi_ioremap early_ioremap 23#define dmi_ioremap early_ioremap
32#define dmi_iounmap early_iounmap 24#define dmi_iounmap early_iounmap
33 25
34#endif 26#endif /* ASM_X86__DMI_H */
diff --git a/include/asm-x86/ds.h b/include/asm-x86/ds.h
index 7881368142fa..c3c953a45b21 100644
--- a/include/asm-x86/ds.h
+++ b/include/asm-x86/ds.h
@@ -2,71 +2,237 @@
2 * Debug Store (DS) support 2 * Debug Store (DS) support
3 * 3 *
4 * This provides a low-level interface to the hardware's Debug Store 4 * This provides a low-level interface to the hardware's Debug Store
5 * feature that is used for last branch recording (LBR) and 5 * feature that is used for branch trace store (BTS) and
6 * precise-event based sampling (PEBS). 6 * precise-event based sampling (PEBS).
7 * 7 *
8 * Different architectures use a different DS layout/pointer size. 8 * It manages:
9 * The below functions therefore work on a void*. 9 * - per-thread and per-cpu allocation of BTS and PEBS
10 * - buffer memory allocation (optional)
11 * - buffer overflow handling
12 * - buffer access
10 * 13 *
14 * It assumes:
15 * - get_task_struct on all parameter tasks
16 * - current is allowed to trace parameter tasks
11 * 17 *
12 * Since there is no user for PEBS, yet, only LBR (or branch
13 * trace store, BTS) is supported.
14 * 18 *
15 * 19 * Copyright (C) 2007-2008 Intel Corporation.
16 * Copyright (C) 2007 Intel Corporation. 20 * Markus Metzger <markus.t.metzger@intel.com>, 2007-2008
17 * Markus Metzger <markus.t.metzger@intel.com>, Dec 2007
18 */ 21 */
19 22
20#ifndef _ASM_X86_DS_H 23#ifndef ASM_X86__DS_H
21#define _ASM_X86_DS_H 24#define ASM_X86__DS_H
25
26#ifdef CONFIG_X86_DS
22 27
23#include <linux/types.h> 28#include <linux/types.h>
24#include <linux/init.h> 29#include <linux/init.h>
25 30
26struct cpuinfo_x86;
27 31
32struct task_struct;
28 33
29/* a branch trace record entry 34/*
35 * Request BTS or PEBS
36 *
37 * Due to alignement constraints, the actual buffer may be slightly
38 * smaller than the requested or provided buffer.
30 * 39 *
31 * In order to unify the interface between various processor versions, 40 * Returns 0 on success; -Eerrno otherwise
32 * we use the below data structure for all processors. 41 *
42 * task: the task to request recording for;
43 * NULL for per-cpu recording on the current cpu
44 * base: the base pointer for the (non-pageable) buffer;
45 * NULL if buffer allocation requested
46 * size: the size of the requested or provided buffer
47 * ovfl: pointer to a function to be called on buffer overflow;
48 * NULL if cyclic buffer requested
33 */ 49 */
34enum bts_qualifier { 50typedef void (*ds_ovfl_callback_t)(struct task_struct *);
35 BTS_INVALID = 0, 51extern int ds_request_bts(struct task_struct *task, void *base, size_t size,
36 BTS_BRANCH, 52 ds_ovfl_callback_t ovfl);
37 BTS_TASK_ARRIVES, 53extern int ds_request_pebs(struct task_struct *task, void *base, size_t size,
38 BTS_TASK_DEPARTS 54 ds_ovfl_callback_t ovfl);
39};
40 55
41struct bts_struct { 56/*
42 u64 qualifier; 57 * Release BTS or PEBS resources
43 union { 58 *
44 /* BTS_BRANCH */ 59 * Frees buffers allocated on ds_request.
45 struct { 60 *
46 u64 from_ip; 61 * Returns 0 on success; -Eerrno otherwise
47 u64 to_ip; 62 *
48 } lbr; 63 * task: the task to release resources for;
49 /* BTS_TASK_ARRIVES or 64 * NULL to release resources for the current cpu
50 BTS_TASK_DEPARTS */ 65 */
51 u64 jiffies; 66extern int ds_release_bts(struct task_struct *task);
52 } variant; 67extern int ds_release_pebs(struct task_struct *task);
68
69/*
70 * Return the (array) index of the write pointer.
71 * (assuming an array of BTS/PEBS records)
72 *
73 * Returns -Eerrno on error
74 *
75 * task: the task to access;
76 * NULL to access the current cpu
77 * pos (out): if not NULL, will hold the result
78 */
79extern int ds_get_bts_index(struct task_struct *task, size_t *pos);
80extern int ds_get_pebs_index(struct task_struct *task, size_t *pos);
81
82/*
83 * Return the (array) index one record beyond the end of the array.
84 * (assuming an array of BTS/PEBS records)
85 *
86 * Returns -Eerrno on error
87 *
88 * task: the task to access;
89 * NULL to access the current cpu
90 * pos (out): if not NULL, will hold the result
91 */
92extern int ds_get_bts_end(struct task_struct *task, size_t *pos);
93extern int ds_get_pebs_end(struct task_struct *task, size_t *pos);
94
95/*
96 * Provide a pointer to the BTS/PEBS record at parameter index.
97 * (assuming an array of BTS/PEBS records)
98 *
99 * The pointer points directly into the buffer. The user is
100 * responsible for copying the record.
101 *
102 * Returns the size of a single record on success; -Eerrno on error
103 *
104 * task: the task to access;
105 * NULL to access the current cpu
106 * index: the index of the requested record
107 * record (out): pointer to the requested record
108 */
109extern int ds_access_bts(struct task_struct *task,
110 size_t index, const void **record);
111extern int ds_access_pebs(struct task_struct *task,
112 size_t index, const void **record);
113
114/*
115 * Write one or more BTS/PEBS records at the write pointer index and
116 * advance the write pointer.
117 *
118 * If size is not a multiple of the record size, trailing bytes are
119 * zeroed out.
120 *
121 * May result in one or more overflow notifications.
122 *
123 * If called during overflow handling, that is, with index >=
124 * interrupt threshold, the write will wrap around.
125 *
126 * An overflow notification is given if and when the interrupt
127 * threshold is reached during or after the write.
128 *
129 * Returns the number of bytes written or -Eerrno.
130 *
131 * task: the task to access;
132 * NULL to access the current cpu
133 * buffer: the buffer to write
134 * size: the size of the buffer
135 */
136extern int ds_write_bts(struct task_struct *task,
137 const void *buffer, size_t size);
138extern int ds_write_pebs(struct task_struct *task,
139 const void *buffer, size_t size);
140
141/*
142 * Same as ds_write_bts/pebs, but omit ownership checks.
143 *
144 * This is needed to have some other task than the owner of the
145 * BTS/PEBS buffer or the parameter task itself write into the
146 * respective buffer.
147 */
148extern int ds_unchecked_write_bts(struct task_struct *task,
149 const void *buffer, size_t size);
150extern int ds_unchecked_write_pebs(struct task_struct *task,
151 const void *buffer, size_t size);
152
153/*
154 * Reset the write pointer of the BTS/PEBS buffer.
155 *
156 * Returns 0 on success; -Eerrno on error
157 *
158 * task: the task to access;
159 * NULL to access the current cpu
160 */
161extern int ds_reset_bts(struct task_struct *task);
162extern int ds_reset_pebs(struct task_struct *task);
163
164/*
165 * Clear the BTS/PEBS buffer and reset the write pointer.
166 * The entire buffer will be zeroed out.
167 *
168 * Returns 0 on success; -Eerrno on error
169 *
170 * task: the task to access;
171 * NULL to access the current cpu
172 */
173extern int ds_clear_bts(struct task_struct *task);
174extern int ds_clear_pebs(struct task_struct *task);
175
176/*
177 * Provide the PEBS counter reset value.
178 *
179 * Returns 0 on success; -Eerrno on error
180 *
181 * task: the task to access;
182 * NULL to access the current cpu
183 * value (out): the counter reset value
184 */
185extern int ds_get_pebs_reset(struct task_struct *task, u64 *value);
186
187/*
188 * Set the PEBS counter reset value.
189 *
190 * Returns 0 on success; -Eerrno on error
191 *
192 * task: the task to access;
193 * NULL to access the current cpu
194 * value: the new counter reset value
195 */
196extern int ds_set_pebs_reset(struct task_struct *task, u64 value);
197
198/*
199 * Initialization
200 */
201struct cpuinfo_x86;
202extern void __cpuinit ds_init_intel(struct cpuinfo_x86 *);
203
204
205
206/*
207 * The DS context - part of struct thread_struct.
208 */
209struct ds_context {
210 /* pointer to the DS configuration; goes into MSR_IA32_DS_AREA */
211 unsigned char *ds;
212 /* the owner of the BTS and PEBS configuration, respectively */
213 struct task_struct *owner[2];
214 /* buffer overflow notification function for BTS and PEBS */
215 ds_ovfl_callback_t callback[2];
216 /* the original buffer address */
217 void *buffer[2];
218 /* the number of allocated pages for on-request allocated buffers */
219 unsigned int pages[2];
220 /* use count */
221 unsigned long count;
222 /* a pointer to the context location inside the thread_struct
223 * or the per_cpu context array */
224 struct ds_context **this;
225 /* a pointer to the task owning this context, or NULL, if the
226 * context is owned by a cpu */
227 struct task_struct *task;
53}; 228};
54 229
55/* Overflow handling mechanisms */ 230/* called by exit_thread() to free leftover contexts */
56#define DS_O_SIGNAL 1 /* send overflow signal */ 231extern void ds_free(struct ds_context *context);
57#define DS_O_WRAP 2 /* wrap around */ 232
58 233#else /* CONFIG_X86_DS */
59extern int ds_allocate(void **, size_t); 234
60extern int ds_free(void **); 235#define ds_init_intel(config) do {} while (0)
61extern int ds_get_bts_size(void *); 236
62extern int ds_get_bts_end(void *); 237#endif /* CONFIG_X86_DS */
63extern int ds_get_bts_index(void *); 238#endif /* ASM_X86__DS_H */
64extern int ds_set_overflow(void *, int);
65extern int ds_get_overflow(void *);
66extern int ds_clear(void *);
67extern int ds_read_bts(void *, int, struct bts_struct *);
68extern int ds_write_bts(void *, const struct bts_struct *);
69extern unsigned long ds_debugctl_mask(void);
70extern void __cpuinit ds_init_intel(struct cpuinfo_x86 *c);
71
72#endif /* _ASM_X86_DS_H */
diff --git a/include/asm-x86/dwarf2.h b/include/asm-x86/dwarf2.h
index b3cbb0ccae18..21d1bc32ad7c 100644
--- a/include/asm-x86/dwarf2.h
+++ b/include/asm-x86/dwarf2.h
@@ -1,5 +1,61 @@
1#ifdef CONFIG_X86_32 1#ifndef ASM_X86__DWARF2_H
2# include "dwarf2_32.h" 2#define ASM_X86__DWARF2_H
3
4#ifndef __ASSEMBLY__
5#warning "asm/dwarf2.h should be only included in pure assembly files"
6#endif
7
8/*
9 Macros for dwarf2 CFI unwind table entries.
10 See "as.info" for details on these pseudo ops. Unfortunately
11 they are only supported in very new binutils, so define them
12 away for older version.
13 */
14
15#ifdef CONFIG_AS_CFI
16
17#define CFI_STARTPROC .cfi_startproc
18#define CFI_ENDPROC .cfi_endproc
19#define CFI_DEF_CFA .cfi_def_cfa
20#define CFI_DEF_CFA_REGISTER .cfi_def_cfa_register
21#define CFI_DEF_CFA_OFFSET .cfi_def_cfa_offset
22#define CFI_ADJUST_CFA_OFFSET .cfi_adjust_cfa_offset
23#define CFI_OFFSET .cfi_offset
24#define CFI_REL_OFFSET .cfi_rel_offset
25#define CFI_REGISTER .cfi_register
26#define CFI_RESTORE .cfi_restore
27#define CFI_REMEMBER_STATE .cfi_remember_state
28#define CFI_RESTORE_STATE .cfi_restore_state
29#define CFI_UNDEFINED .cfi_undefined
30
31#ifdef CONFIG_AS_CFI_SIGNAL_FRAME
32#define CFI_SIGNAL_FRAME .cfi_signal_frame
33#else
34#define CFI_SIGNAL_FRAME
35#endif
36
3#else 37#else
4# include "dwarf2_64.h" 38
39/* Due to the structure of pre-exisiting code, don't use assembler line
40 comment character # to ignore the arguments. Instead, use a dummy macro. */
41.macro cfi_ignore a=0, b=0, c=0, d=0
42.endm
43
44#define CFI_STARTPROC cfi_ignore
45#define CFI_ENDPROC cfi_ignore
46#define CFI_DEF_CFA cfi_ignore
47#define CFI_DEF_CFA_REGISTER cfi_ignore
48#define CFI_DEF_CFA_OFFSET cfi_ignore
49#define CFI_ADJUST_CFA_OFFSET cfi_ignore
50#define CFI_OFFSET cfi_ignore
51#define CFI_REL_OFFSET cfi_ignore
52#define CFI_REGISTER cfi_ignore
53#define CFI_RESTORE cfi_ignore
54#define CFI_REMEMBER_STATE cfi_ignore
55#define CFI_RESTORE_STATE cfi_ignore
56#define CFI_UNDEFINED cfi_ignore
57#define CFI_SIGNAL_FRAME cfi_ignore
58
5#endif 59#endif
60
61#endif /* ASM_X86__DWARF2_H */
diff --git a/include/asm-x86/dwarf2_32.h b/include/asm-x86/dwarf2_32.h
deleted file mode 100644
index 6d66398a307d..000000000000
--- a/include/asm-x86/dwarf2_32.h
+++ /dev/null
@@ -1,61 +0,0 @@
1#ifndef _DWARF2_H
2#define _DWARF2_H
3
4#ifndef __ASSEMBLY__
5#warning "asm/dwarf2.h should be only included in pure assembly files"
6#endif
7
8/*
9 Macros for dwarf2 CFI unwind table entries.
10 See "as.info" for details on these pseudo ops. Unfortunately
11 they are only supported in very new binutils, so define them
12 away for older version.
13 */
14
15#ifdef CONFIG_UNWIND_INFO
16
17#define CFI_STARTPROC .cfi_startproc
18#define CFI_ENDPROC .cfi_endproc
19#define CFI_DEF_CFA .cfi_def_cfa
20#define CFI_DEF_CFA_REGISTER .cfi_def_cfa_register
21#define CFI_DEF_CFA_OFFSET .cfi_def_cfa_offset
22#define CFI_ADJUST_CFA_OFFSET .cfi_adjust_cfa_offset
23#define CFI_OFFSET .cfi_offset
24#define CFI_REL_OFFSET .cfi_rel_offset
25#define CFI_REGISTER .cfi_register
26#define CFI_RESTORE .cfi_restore
27#define CFI_REMEMBER_STATE .cfi_remember_state
28#define CFI_RESTORE_STATE .cfi_restore_state
29#define CFI_UNDEFINED .cfi_undefined
30
31#ifdef CONFIG_AS_CFI_SIGNAL_FRAME
32#define CFI_SIGNAL_FRAME .cfi_signal_frame
33#else
34#define CFI_SIGNAL_FRAME
35#endif
36
37#else
38
39/* Due to the structure of pre-exisiting code, don't use assembler line
40 comment character # to ignore the arguments. Instead, use a dummy macro. */
41.macro ignore a=0, b=0, c=0, d=0
42.endm
43
44#define CFI_STARTPROC ignore
45#define CFI_ENDPROC ignore
46#define CFI_DEF_CFA ignore
47#define CFI_DEF_CFA_REGISTER ignore
48#define CFI_DEF_CFA_OFFSET ignore
49#define CFI_ADJUST_CFA_OFFSET ignore
50#define CFI_OFFSET ignore
51#define CFI_REL_OFFSET ignore
52#define CFI_REGISTER ignore
53#define CFI_RESTORE ignore
54#define CFI_REMEMBER_STATE ignore
55#define CFI_RESTORE_STATE ignore
56#define CFI_UNDEFINED ignore
57#define CFI_SIGNAL_FRAME ignore
58
59#endif
60
61#endif
diff --git a/include/asm-x86/dwarf2_64.h b/include/asm-x86/dwarf2_64.h
deleted file mode 100644
index c950519a264d..000000000000
--- a/include/asm-x86/dwarf2_64.h
+++ /dev/null
@@ -1,56 +0,0 @@
1#ifndef _DWARF2_H
2#define _DWARF2_H 1
3
4#ifndef __ASSEMBLY__
5#warning "asm/dwarf2.h should be only included in pure assembly files"
6#endif
7
8/*
9 Macros for dwarf2 CFI unwind table entries.
10 See "as.info" for details on these pseudo ops. Unfortunately
11 they are only supported in very new binutils, so define them
12 away for older version.
13 */
14
15#ifdef CONFIG_AS_CFI
16
17#define CFI_STARTPROC .cfi_startproc
18#define CFI_ENDPROC .cfi_endproc
19#define CFI_DEF_CFA .cfi_def_cfa
20#define CFI_DEF_CFA_REGISTER .cfi_def_cfa_register
21#define CFI_DEF_CFA_OFFSET .cfi_def_cfa_offset
22#define CFI_ADJUST_CFA_OFFSET .cfi_adjust_cfa_offset
23#define CFI_OFFSET .cfi_offset
24#define CFI_REL_OFFSET .cfi_rel_offset
25#define CFI_REGISTER .cfi_register
26#define CFI_RESTORE .cfi_restore
27#define CFI_REMEMBER_STATE .cfi_remember_state
28#define CFI_RESTORE_STATE .cfi_restore_state
29#define CFI_UNDEFINED .cfi_undefined
30#ifdef CONFIG_AS_CFI_SIGNAL_FRAME
31#define CFI_SIGNAL_FRAME .cfi_signal_frame
32#else
33#define CFI_SIGNAL_FRAME
34#endif
35
36#else
37
38/* use assembler line comment character # to ignore the arguments. */
39#define CFI_STARTPROC #
40#define CFI_ENDPROC #
41#define CFI_DEF_CFA #
42#define CFI_DEF_CFA_REGISTER #
43#define CFI_DEF_CFA_OFFSET #
44#define CFI_ADJUST_CFA_OFFSET #
45#define CFI_OFFSET #
46#define CFI_REL_OFFSET #
47#define CFI_REGISTER #
48#define CFI_RESTORE #
49#define CFI_REMEMBER_STATE #
50#define CFI_RESTORE_STATE #
51#define CFI_UNDEFINED #
52#define CFI_SIGNAL_FRAME #
53
54#endif
55
56#endif
diff --git a/include/asm-x86/e820.h b/include/asm-x86/e820.h
index 7004251fc66b..5abbdec06bd2 100644
--- a/include/asm-x86/e820.h
+++ b/include/asm-x86/e820.h
@@ -1,13 +1,52 @@
1#ifndef __ASM_E820_H 1#ifndef ASM_X86__E820_H
2#define __ASM_E820_H 2#define ASM_X86__E820_H
3#define E820MAP 0x2d0 /* our map */ 3#define E820MAP 0x2d0 /* our map */
4#define E820MAX 128 /* number of entries in E820MAP */ 4#define E820MAX 128 /* number of entries in E820MAP */
5
6/*
7 * Legacy E820 BIOS limits us to 128 (E820MAX) nodes due to the
8 * constrained space in the zeropage. If we have more nodes than
9 * that, and if we've booted off EFI firmware, then the EFI tables
10 * passed us from the EFI firmware can list more nodes. Size our
11 * internal memory map tables to have room for these additional
12 * nodes, based on up to three entries per node for which the
13 * kernel was built: MAX_NUMNODES == (1 << CONFIG_NODES_SHIFT),
14 * plus E820MAX, allowing space for the possible duplicate E820
15 * entries that might need room in the same arrays, prior to the
16 * call to sanitize_e820_map() to remove duplicates. The allowance
17 * of three memory map entries per node is "enough" entries for
18 * the initial hardware platform motivating this mechanism to make
19 * use of additional EFI map entries. Future platforms may want
20 * to allow more than three entries per node or otherwise refine
21 * this size.
22 */
23
24/*
25 * Odd: 'make headers_check' complains about numa.h if I try
26 * to collapse the next two #ifdef lines to a single line:
27 * #if defined(__KERNEL__) && defined(CONFIG_EFI)
28 */
29#ifdef __KERNEL__
30#ifdef CONFIG_EFI
31#include <linux/numa.h>
32#define E820_X_MAX (E820MAX + 3 * MAX_NUMNODES)
33#else /* ! CONFIG_EFI */
34#define E820_X_MAX E820MAX
35#endif
36#else /* ! __KERNEL__ */
37#define E820_X_MAX E820MAX
38#endif
39
5#define E820NR 0x1e8 /* # entries in E820MAP */ 40#define E820NR 0x1e8 /* # entries in E820MAP */
6 41
7#define E820_RAM 1 42#define E820_RAM 1
8#define E820_RESERVED 2 43#define E820_RESERVED 2
9#define E820_ACPI 3 44#define E820_ACPI 3
10#define E820_NVS 4 45#define E820_NVS 4
46#define E820_UNUSABLE 5
47
48/* reserved RAM used by kernel itself */
49#define E820_RESERVED_KERN 128
11 50
12#ifndef __ASSEMBLY__ 51#ifndef __ASSEMBLY__
13struct e820entry { 52struct e820entry {
@@ -18,22 +57,90 @@ struct e820entry {
18 57
19struct e820map { 58struct e820map {
20 __u32 nr_map; 59 __u32 nr_map;
21 struct e820entry map[E820MAX]; 60 struct e820entry map[E820_X_MAX];
22}; 61};
62
63#ifdef __KERNEL__
64/* see comment in arch/x86/kernel/e820.c */
65extern struct e820map e820;
66extern struct e820map e820_saved;
67
68extern unsigned long pci_mem_start;
69extern int e820_any_mapped(u64 start, u64 end, unsigned type);
70extern int e820_all_mapped(u64 start, u64 end, unsigned type);
71extern void e820_add_region(u64 start, u64 size, int type);
72extern void e820_print_map(char *who);
73extern int
74sanitize_e820_map(struct e820entry *biosmap, int max_nr_map, int *pnr_map);
75extern u64 e820_update_range(u64 start, u64 size, unsigned old_type,
76 unsigned new_type);
77extern u64 e820_remove_range(u64 start, u64 size, unsigned old_type,
78 int checktype);
79extern void update_e820(void);
80extern void e820_setup_gap(void);
81extern int e820_search_gap(unsigned long *gapstart, unsigned long *gapsize,
82 unsigned long start_addr, unsigned long long end_addr);
83struct setup_data;
84extern void parse_e820_ext(struct setup_data *data, unsigned long pa_data);
85
86#if defined(CONFIG_X86_64) || \
87 (defined(CONFIG_X86_32) && defined(CONFIG_HIBERNATION))
88extern void e820_mark_nosave_regions(unsigned long limit_pfn);
89#else
90static inline void e820_mark_nosave_regions(unsigned long limit_pfn)
91{
92}
93#endif
94
95#ifdef CONFIG_MEMTEST
96extern void early_memtest(unsigned long start, unsigned long end);
97#else
98static inline void early_memtest(unsigned long start, unsigned long end)
99{
100}
101#endif
102
103extern unsigned long end_user_pfn;
104
105extern u64 find_e820_area(u64 start, u64 end, u64 size, u64 align);
106extern u64 find_e820_area_size(u64 start, u64 *sizep, u64 align);
107extern void reserve_early(u64 start, u64 end, char *name);
108extern void reserve_early_overlap_ok(u64 start, u64 end, char *name);
109extern void free_early(u64 start, u64 end);
110extern void early_res_to_bootmem(u64 start, u64 end);
111extern u64 early_reserve_e820(u64 startt, u64 sizet, u64 align);
112
113extern unsigned long e820_end_of_ram_pfn(void);
114extern unsigned long e820_end_of_low_ram_pfn(void);
115extern int e820_find_active_region(const struct e820entry *ei,
116 unsigned long start_pfn,
117 unsigned long last_pfn,
118 unsigned long *ei_startpfn,
119 unsigned long *ei_endpfn);
120extern void e820_register_active_regions(int nid, unsigned long start_pfn,
121 unsigned long end_pfn);
122extern u64 e820_hole_size(u64 start, u64 end);
123extern void finish_e820_parsing(void);
124extern void e820_reserve_resources(void);
125extern void e820_reserve_resources_late(void);
126extern void setup_memory_map(void);
127extern char *default_machine_specific_memory_setup(void);
128extern char *machine_specific_memory_setup(void);
129extern char *memory_setup(void);
130#endif /* __KERNEL__ */
23#endif /* __ASSEMBLY__ */ 131#endif /* __ASSEMBLY__ */
24 132
25#define ISA_START_ADDRESS 0xa0000 133#define ISA_START_ADDRESS 0xa0000
26#define ISA_END_ADDRESS 0x100000 134#define ISA_END_ADDRESS 0x100000
135#define is_ISA_range(s, e) ((s) >= ISA_START_ADDRESS && (e) < ISA_END_ADDRESS)
27 136
28#define BIOS_BEGIN 0x000a0000 137#define BIOS_BEGIN 0x000a0000
29#define BIOS_END 0x00100000 138#define BIOS_END 0x00100000
30 139
31#ifdef __KERNEL__ 140#ifdef __KERNEL__
32#ifdef CONFIG_X86_32 141#include <linux/ioport.h>
33# include "e820_32.h" 142
34#else 143#define HIGH_MEMORY (1024*1024)
35# include "e820_64.h"
36#endif
37#endif /* __KERNEL__ */ 144#endif /* __KERNEL__ */
38 145
39#endif /* __ASM_E820_H */ 146#endif /* ASM_X86__E820_H */
diff --git a/include/asm-x86/e820_32.h b/include/asm-x86/e820_32.h
deleted file mode 100644
index a9f7c6ec32bf..000000000000
--- a/include/asm-x86/e820_32.h
+++ /dev/null
@@ -1,50 +0,0 @@
1/*
2 * structures and definitions for the int 15, ax=e820 memory map
3 * scheme.
4 *
5 * In a nutshell, arch/i386/boot/setup.S populates a scratch table
6 * in the empty_zero_block that contains a list of usable address/size
7 * duples. In arch/i386/kernel/setup.c, this information is
8 * transferred into the e820map, and in arch/i386/mm/init.c, that
9 * new information is used to mark pages reserved or not.
10 *
11 */
12#ifndef __E820_HEADER
13#define __E820_HEADER
14
15#include <linux/ioport.h>
16
17#define HIGH_MEMORY (1024*1024)
18
19#ifndef __ASSEMBLY__
20
21extern struct e820map e820;
22extern void update_e820(void);
23
24extern int e820_all_mapped(unsigned long start, unsigned long end,
25 unsigned type);
26extern int e820_any_mapped(u64 start, u64 end, unsigned type);
27extern void propagate_e820_map(void);
28extern void register_bootmem_low_pages(unsigned long max_low_pfn);
29extern void add_memory_region(unsigned long long start,
30 unsigned long long size, int type);
31extern void update_memory_range(u64 start, u64 size, unsigned old_type,
32 unsigned new_type);
33extern void e820_register_memory(void);
34extern void limit_regions(unsigned long long size);
35extern void print_memory_map(char *who);
36extern void init_iomem_resources(struct resource *code_resource,
37 struct resource *data_resource,
38 struct resource *bss_resource);
39
40#if defined(CONFIG_PM) && defined(CONFIG_HIBERNATION)
41extern void e820_mark_nosave_regions(void);
42#else
43static inline void e820_mark_nosave_regions(void)
44{
45}
46#endif
47
48
49#endif/*!__ASSEMBLY__*/
50#endif/*__E820_HEADER*/
diff --git a/include/asm-x86/e820_64.h b/include/asm-x86/e820_64.h
deleted file mode 100644
index 71c4d685d30d..000000000000
--- a/include/asm-x86/e820_64.h
+++ /dev/null
@@ -1,56 +0,0 @@
1/*
2 * structures and definitions for the int 15, ax=e820 memory map
3 * scheme.
4 *
5 * In a nutshell, setup.S populates a scratch table in the
6 * empty_zero_block that contains a list of usable address/size
7 * duples. setup.c, this information is transferred into the e820map,
8 * and in init.c/numa.c, that new information is used to mark pages
9 * reserved or not.
10 */
11#ifndef __E820_HEADER
12#define __E820_HEADER
13
14#include <linux/ioport.h>
15
16#ifndef __ASSEMBLY__
17extern unsigned long find_e820_area(unsigned long start, unsigned long end,
18 unsigned long size, unsigned long align);
19extern unsigned long find_e820_area_size(unsigned long start,
20 unsigned long *sizep,
21 unsigned long align);
22extern void add_memory_region(unsigned long start, unsigned long size,
23 int type);
24extern void update_memory_range(u64 start, u64 size, unsigned old_type,
25 unsigned new_type);
26extern void setup_memory_region(void);
27extern void contig_e820_setup(void);
28extern unsigned long e820_end_of_ram(void);
29extern void e820_reserve_resources(void);
30extern void e820_mark_nosave_regions(void);
31extern int e820_any_mapped(unsigned long start, unsigned long end,
32 unsigned type);
33extern int e820_all_mapped(unsigned long start, unsigned long end,
34 unsigned type);
35extern int e820_any_non_reserved(unsigned long start, unsigned long end);
36extern int is_memory_any_valid(unsigned long start, unsigned long end);
37extern int e820_all_non_reserved(unsigned long start, unsigned long end);
38extern int is_memory_all_valid(unsigned long start, unsigned long end);
39extern unsigned long e820_hole_size(unsigned long start, unsigned long end);
40
41extern void e820_setup_gap(void);
42extern void e820_register_active_regions(int nid, unsigned long start_pfn,
43 unsigned long end_pfn);
44
45extern void finish_e820_parsing(void);
46
47extern struct e820map e820;
48extern void update_e820(void);
49
50extern void reserve_early(unsigned long start, unsigned long end, char *name);
51extern void free_early(unsigned long start, unsigned long end);
52extern void early_res_to_bootmem(unsigned long start, unsigned long end);
53
54#endif/*!__ASSEMBLY__*/
55
56#endif/*__E820_HEADER*/
diff --git a/include/asm-x86/edac.h b/include/asm-x86/edac.h
index a8088f63a30e..9493c5b27bbd 100644
--- a/include/asm-x86/edac.h
+++ b/include/asm-x86/edac.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_EDAC_H 1#ifndef ASM_X86__EDAC_H
2#define _ASM_X86_EDAC_H 2#define ASM_X86__EDAC_H
3 3
4/* ECC atomic, DMA, SMP and interrupt safe scrub function */ 4/* ECC atomic, DMA, SMP and interrupt safe scrub function */
5 5
@@ -15,4 +15,4 @@ static inline void atomic_scrub(void *va, u32 size)
15 asm volatile("lock; addl $0, %0"::"m" (*virt_addr)); 15 asm volatile("lock; addl $0, %0"::"m" (*virt_addr));
16} 16}
17 17
18#endif 18#endif /* ASM_X86__EDAC_H */
diff --git a/include/asm-x86/efi.h b/include/asm-x86/efi.h
index d53004b855cc..ed2de22e8705 100644
--- a/include/asm-x86/efi.h
+++ b/include/asm-x86/efi.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_EFI_H 1#ifndef ASM_X86__EFI_H
2#define _ASM_X86_EFI_H 2#define ASM_X86__EFI_H
3 3
4#ifdef CONFIG_X86_32 4#ifdef CONFIG_X86_32
5 5
@@ -86,12 +86,12 @@ extern u64 efi_call6(void *fp, u64 arg1, u64 arg2, u64 arg3,
86 efi_call6((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \ 86 efi_call6((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
87 (u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6)) 87 (u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6))
88 88
89extern void *efi_ioremap(unsigned long addr, unsigned long size); 89extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size);
90 90
91#endif /* CONFIG_X86_32 */ 91#endif /* CONFIG_X86_32 */
92 92
93extern void efi_reserve_bootmem(void); 93extern void efi_reserve_early(void);
94extern void efi_call_phys_prelog(void); 94extern void efi_call_phys_prelog(void);
95extern void efi_call_phys_epilog(void); 95extern void efi_call_phys_epilog(void);
96 96
97#endif 97#endif /* ASM_X86__EFI_H */
diff --git a/include/asm-x86/elf.h b/include/asm-x86/elf.h
index 8f232dc5b5fe..5c4745bec906 100644
--- a/include/asm-x86/elf.h
+++ b/include/asm-x86/elf.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_ELF_H 1#ifndef ASM_X86__ELF_H
2#define _ASM_X86_ELF_H 2#define ASM_X86__ELF_H
3 3
4/* 4/*
5 * ELF register definitions.. 5 * ELF register definitions..
@@ -83,9 +83,9 @@ extern unsigned int vdso_enabled;
83 (((x)->e_machine == EM_386) || ((x)->e_machine == EM_486)) 83 (((x)->e_machine == EM_386) || ((x)->e_machine == EM_486))
84 84
85#include <asm/processor.h> 85#include <asm/processor.h>
86#include <asm/system.h>
86 87
87#ifdef CONFIG_X86_32 88#ifdef CONFIG_X86_32
88#include <asm/system.h> /* for savesegment */
89#include <asm/desc.h> 89#include <asm/desc.h>
90 90
91#define elf_check_arch(x) elf_check_arch_ia32(x) 91#define elf_check_arch(x) elf_check_arch_ia32(x)
@@ -148,8 +148,9 @@ do { \
148 148
149static inline void start_ia32_thread(struct pt_regs *regs, u32 ip, u32 sp) 149static inline void start_ia32_thread(struct pt_regs *regs, u32 ip, u32 sp)
150{ 150{
151 asm volatile("movl %0,%%fs" :: "r" (0)); 151 loadsegment(fs, 0);
152 asm volatile("movl %0,%%es; movl %0,%%ds" : : "r" (__USER32_DS)); 152 loadsegment(ds, __USER32_DS);
153 loadsegment(es, __USER32_DS);
153 load_gs_index(0); 154 load_gs_index(0);
154 regs->ip = ip; 155 regs->ip = ip;
155 regs->sp = sp; 156 regs->sp = sp;
@@ -332,4 +333,4 @@ extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
332extern unsigned long arch_randomize_brk(struct mm_struct *mm); 333extern unsigned long arch_randomize_brk(struct mm_struct *mm);
333#define arch_randomize_brk arch_randomize_brk 334#define arch_randomize_brk arch_randomize_brk
334 335
335#endif 336#endif /* ASM_X86__ELF_H */
diff --git a/include/asm-x86/emergency-restart.h b/include/asm-x86/emergency-restart.h
index 8e6aef19f8f0..190d0d8b71e3 100644
--- a/include/asm-x86/emergency-restart.h
+++ b/include/asm-x86/emergency-restart.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_EMERGENCY_RESTART_H 1#ifndef ASM_X86__EMERGENCY_RESTART_H
2#define _ASM_EMERGENCY_RESTART_H 2#define ASM_X86__EMERGENCY_RESTART_H
3 3
4enum reboot_type { 4enum reboot_type {
5 BOOT_TRIPLE = 't', 5 BOOT_TRIPLE = 't',
@@ -15,4 +15,4 @@ extern enum reboot_type reboot_type;
15 15
16extern void machine_emergency_restart(void); 16extern void machine_emergency_restart(void);
17 17
18#endif /* _ASM_EMERGENCY_RESTART_H */ 18#endif /* ASM_X86__EMERGENCY_RESTART_H */
diff --git a/include/asm-x86/mach-es7000/mach_apic.h b/include/asm-x86/es7000/apic.h
index fbc8ad256f5a..bd2c44d1f7ac 100644
--- a/include/asm-x86/mach-es7000/mach_apic.h
+++ b/include/asm-x86/es7000/apic.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_MACH_APIC_H 1#ifndef __ASM_ES7000_APIC_H
2#define __ASM_MACH_APIC_H 2#define __ASM_ES7000_APIC_H
3 3
4#define xapic_phys_to_log_apicid(cpu) per_cpu(x86_bios_cpu_apicid, cpu) 4#define xapic_phys_to_log_apicid(cpu) per_cpu(x86_bios_cpu_apicid, cpu)
5#define esr_disable (1) 5#define esr_disable (1)
@@ -10,7 +10,7 @@ static inline int apic_id_registered(void)
10} 10}
11 11
12static inline cpumask_t target_cpus(void) 12static inline cpumask_t target_cpus(void)
13{ 13{
14#if defined CONFIG_ES7000_CLUSTERED_APIC 14#if defined CONFIG_ES7000_CLUSTERED_APIC
15 return CPU_MASK_ALL; 15 return CPU_MASK_ALL;
16#else 16#else
@@ -23,24 +23,24 @@ static inline cpumask_t target_cpus(void)
23#define APIC_DFR_VALUE (APIC_DFR_CLUSTER) 23#define APIC_DFR_VALUE (APIC_DFR_CLUSTER)
24#define INT_DELIVERY_MODE (dest_LowestPrio) 24#define INT_DELIVERY_MODE (dest_LowestPrio)
25#define INT_DEST_MODE (1) /* logical delivery broadcast to all procs */ 25#define INT_DEST_MODE (1) /* logical delivery broadcast to all procs */
26#define NO_BALANCE_IRQ (1) 26#define NO_BALANCE_IRQ (1)
27#undef WAKE_SECONDARY_VIA_INIT 27#undef WAKE_SECONDARY_VIA_INIT
28#define WAKE_SECONDARY_VIA_MIP 28#define WAKE_SECONDARY_VIA_MIP
29#else 29#else
30#define APIC_DFR_VALUE (APIC_DFR_FLAT) 30#define APIC_DFR_VALUE (APIC_DFR_FLAT)
31#define INT_DELIVERY_MODE (dest_Fixed) 31#define INT_DELIVERY_MODE (dest_Fixed)
32#define INT_DEST_MODE (0) /* phys delivery to target procs */ 32#define INT_DEST_MODE (0) /* phys delivery to target procs */
33#define NO_BALANCE_IRQ (0) 33#define NO_BALANCE_IRQ (0)
34#undef APIC_DEST_LOGICAL 34#undef APIC_DEST_LOGICAL
35#define APIC_DEST_LOGICAL 0x0 35#define APIC_DEST_LOGICAL 0x0
36#define WAKE_SECONDARY_VIA_INIT 36#define WAKE_SECONDARY_VIA_INIT
37#endif 37#endif
38 38
39static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid) 39static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
40{ 40{
41 return 0; 41 return 0;
42} 42}
43static inline unsigned long check_apicid_present(int bit) 43static inline unsigned long check_apicid_present(int bit)
44{ 44{
45 return physid_isset(bit, phys_cpu_present_map); 45 return physid_isset(bit, phys_cpu_present_map);
46} 46}
@@ -66,9 +66,9 @@ static inline void init_apic_ldr(void)
66 unsigned long val; 66 unsigned long val;
67 int cpu = smp_processor_id(); 67 int cpu = smp_processor_id();
68 68
69 apic_write_around(APIC_DFR, APIC_DFR_VALUE); 69 apic_write(APIC_DFR, APIC_DFR_VALUE);
70 val = calculate_ldr(cpu); 70 val = calculate_ldr(cpu);
71 apic_write_around(APIC_LDR, val); 71 apic_write(APIC_LDR, val);
72} 72}
73 73
74#ifndef CONFIG_X86_GENERICARCH 74#ifndef CONFIG_X86_GENERICARCH
@@ -80,7 +80,7 @@ static inline void setup_apic_routing(void)
80{ 80{
81 int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id()); 81 int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id());
82 printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n", 82 printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n",
83 (apic_version[apic] == 0x14) ? 83 (apic_version[apic] == 0x14) ?
84 "Physical Cluster" : "Logical Cluster", nr_ioapics, cpus_addr(TARGET_CPUS)[0]); 84 "Physical Cluster" : "Logical Cluster", nr_ioapics, cpus_addr(TARGET_CPUS)[0]);
85} 85}
86 86
@@ -141,7 +141,7 @@ static inline void setup_portio_remap(void)
141extern unsigned int boot_cpu_physical_apicid; 141extern unsigned int boot_cpu_physical_apicid;
142static inline int check_phys_apicid_present(int cpu_physical_apicid) 142static inline int check_phys_apicid_present(int cpu_physical_apicid)
143{ 143{
144 boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id()); 144 boot_cpu_physical_apicid = read_apic_id();
145 return (1); 145 return (1);
146} 146}
147 147
@@ -150,7 +150,7 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
150 int num_bits_set; 150 int num_bits_set;
151 int cpus_found = 0; 151 int cpus_found = 0;
152 int cpu; 152 int cpu;
153 int apicid; 153 int apicid;
154 154
155 num_bits_set = cpus_weight(cpumask); 155 num_bits_set = cpus_weight(cpumask);
156 /* Return id to all */ 156 /* Return id to all */
@@ -160,16 +160,16 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
160#else 160#else
161 return cpu_to_logical_apicid(0); 161 return cpu_to_logical_apicid(0);
162#endif 162#endif
163 /* 163 /*
164 * The cpus in the mask must all be on the apic cluster. If are not 164 * The cpus in the mask must all be on the apic cluster. If are not
165 * on the same apicid cluster return default value of TARGET_CPUS. 165 * on the same apicid cluster return default value of TARGET_CPUS.
166 */ 166 */
167 cpu = first_cpu(cpumask); 167 cpu = first_cpu(cpumask);
168 apicid = cpu_to_logical_apicid(cpu); 168 apicid = cpu_to_logical_apicid(cpu);
169 while (cpus_found < num_bits_set) { 169 while (cpus_found < num_bits_set) {
170 if (cpu_isset(cpu, cpumask)) { 170 if (cpu_isset(cpu, cpumask)) {
171 int new_apicid = cpu_to_logical_apicid(cpu); 171 int new_apicid = cpu_to_logical_apicid(cpu);
172 if (apicid_cluster(apicid) != 172 if (apicid_cluster(apicid) !=
173 apicid_cluster(new_apicid)){ 173 apicid_cluster(new_apicid)){
174 printk ("%s: Not a valid mask!\n",__FUNCTION__); 174 printk ("%s: Not a valid mask!\n",__FUNCTION__);
175#if defined CONFIG_ES7000_CLUSTERED_APIC 175#if defined CONFIG_ES7000_CLUSTERED_APIC
@@ -191,4 +191,4 @@ static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
191 return cpuid_apic >> index_msb; 191 return cpuid_apic >> index_msb;
192} 192}
193 193
194#endif /* __ASM_MACH_APIC_H */ 194#endif /* __ASM_ES7000_APIC_H */
diff --git a/include/asm-x86/es7000/apicdef.h b/include/asm-x86/es7000/apicdef.h
new file mode 100644
index 000000000000..8b234a3cb851
--- /dev/null
+++ b/include/asm-x86/es7000/apicdef.h
@@ -0,0 +1,13 @@
1#ifndef __ASM_ES7000_APICDEF_H
2#define __ASM_ES7000_APICDEF_H
3
4#define APIC_ID_MASK (0xFF<<24)
5
6static inline unsigned get_apic_id(unsigned long x)
7{
8 return (((x)>>24)&0xFF);
9}
10
11#define GET_APIC_ID(x) get_apic_id(x)
12
13#endif
diff --git a/include/asm-x86/mach-es7000/mach_ipi.h b/include/asm-x86/es7000/ipi.h
index 5e61bd220b06..632a955fcc0a 100644
--- a/include/asm-x86/mach-es7000/mach_ipi.h
+++ b/include/asm-x86/es7000/ipi.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_MACH_IPI_H 1#ifndef __ASM_ES7000_IPI_H
2#define __ASM_MACH_IPI_H 2#define __ASM_ES7000_IPI_H
3 3
4void send_IPI_mask_sequence(cpumask_t mask, int vector); 4void send_IPI_mask_sequence(cpumask_t mask, int vector);
5 5
@@ -21,4 +21,4 @@ static inline void send_IPI_all(int vector)
21 send_IPI_mask(cpu_online_map, vector); 21 send_IPI_mask(cpu_online_map, vector);
22} 22}
23 23
24#endif /* __ASM_MACH_IPI_H */ 24#endif /* __ASM_ES7000_IPI_H */
diff --git a/include/asm-x86/mach-es7000/mach_mpparse.h b/include/asm-x86/es7000/mpparse.h
index ef26d3523625..7b5c889d8e7d 100644
--- a/include/asm-x86/mach-es7000/mach_mpparse.h
+++ b/include/asm-x86/es7000/mpparse.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_MACH_MPPARSE_H 1#ifndef __ASM_ES7000_MPPARSE_H
2#define __ASM_MACH_MPPARSE_H 2#define __ASM_ES7000_MPPARSE_H
3 3
4#include <linux/acpi.h> 4#include <linux/acpi.h>
5 5
diff --git a/include/asm-x86/mach-es7000/mach_wakecpu.h b/include/asm-x86/es7000/wakecpu.h
index 84ff58314501..3ffc5a7bf667 100644
--- a/include/asm-x86/mach-es7000/mach_wakecpu.h
+++ b/include/asm-x86/es7000/wakecpu.h
@@ -1,7 +1,7 @@
1#ifndef __ASM_MACH_WAKECPU_H 1#ifndef __ASM_ES7000_WAKECPU_H
2#define __ASM_MACH_WAKECPU_H 2#define __ASM_ES7000_WAKECPU_H
3 3
4/* 4/*
5 * This file copes with machines that wakeup secondary CPUs by the 5 * This file copes with machines that wakeup secondary CPUs by the
6 * INIT, INIT, STARTUP sequence. 6 * INIT, INIT, STARTUP sequence.
7 */ 7 */
diff --git a/include/asm-x86/fb.h b/include/asm-x86/fb.h
index 53018464aea6..aca38dbd9a64 100644
--- a/include/asm-x86/fb.h
+++ b/include/asm-x86/fb.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_FB_H 1#ifndef ASM_X86__FB_H
2#define _ASM_X86_FB_H 2#define ASM_X86__FB_H
3 3
4#include <linux/fb.h> 4#include <linux/fb.h>
5#include <linux/fs.h> 5#include <linux/fs.h>
@@ -18,4 +18,4 @@ extern int fb_is_primary_device(struct fb_info *info);
18static inline int fb_is_primary_device(struct fb_info *info) { return 0; } 18static inline int fb_is_primary_device(struct fb_info *info) { return 0; }
19#endif 19#endif
20 20
21#endif /* _ASM_X86_FB_H */ 21#endif /* ASM_X86__FB_H */
diff --git a/include/asm-x86/fixmap.h b/include/asm-x86/fixmap.h
index 5bd206973dca..78e33a1bc591 100644
--- a/include/asm-x86/fixmap.h
+++ b/include/asm-x86/fixmap.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_FIXMAP_H 1#ifndef ASM_X86__FIXMAP_H
2#define _ASM_FIXMAP_H 2#define ASM_X86__FIXMAP_H
3 3
4#ifdef CONFIG_X86_32 4#ifdef CONFIG_X86_32
5# include "fixmap_32.h" 5# include "fixmap_32.h"
@@ -7,7 +7,62 @@
7# include "fixmap_64.h" 7# include "fixmap_64.h"
8#endif 8#endif
9 9
10extern int fixmaps_set;
11
12void __native_set_fixmap(enum fixed_addresses idx, pte_t pte);
13void native_set_fixmap(enum fixed_addresses idx,
14 unsigned long phys, pgprot_t flags);
15
16#ifndef CONFIG_PARAVIRT
17static inline void __set_fixmap(enum fixed_addresses idx,
18 unsigned long phys, pgprot_t flags)
19{
20 native_set_fixmap(idx, phys, flags);
21}
22#endif
23
24#define set_fixmap(idx, phys) \
25 __set_fixmap(idx, phys, PAGE_KERNEL)
26
27/*
28 * Some hardware wants to get fixmapped without caching.
29 */
30#define set_fixmap_nocache(idx, phys) \
31 __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
32
10#define clear_fixmap(idx) \ 33#define clear_fixmap(idx) \
11 __set_fixmap(idx, 0, __pgprot(0)) 34 __set_fixmap(idx, 0, __pgprot(0))
12 35
13#endif 36#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
37#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT)
38
39extern void __this_fixmap_does_not_exist(void);
40
41/*
42 * 'index to address' translation. If anyone tries to use the idx
43 * directly without translation, we catch the bug with a NULL-deference
44 * kernel oops. Illegal ranges of incoming indices are caught too.
45 */
46static __always_inline unsigned long fix_to_virt(const unsigned int idx)
47{
48 /*
49 * this branch gets completely eliminated after inlining,
50 * except when someone tries to use fixaddr indices in an
51 * illegal way. (such as mixing up address types or using
52 * out-of-range indices).
53 *
54 * If it doesn't get removed, the linker will complain
55 * loudly with a reasonably clear error message..
56 */
57 if (idx >= __end_of_fixed_addresses)
58 __this_fixmap_does_not_exist();
59
60 return __fix_to_virt(idx);
61}
62
63static inline unsigned long virt_to_fix(const unsigned long vaddr)
64{
65 BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
66 return __virt_to_fix(vaddr);
67}
68#endif /* ASM_X86__FIXMAP_H */
diff --git a/include/asm-x86/fixmap_32.h b/include/asm-x86/fixmap_32.h
index 4b96148e90c1..784e3e759866 100644
--- a/include/asm-x86/fixmap_32.h
+++ b/include/asm-x86/fixmap_32.h
@@ -10,8 +10,8 @@
10 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 10 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
11 */ 11 */
12 12
13#ifndef _ASM_FIXMAP_32_H 13#ifndef ASM_X86__FIXMAP_32_H
14#define _ASM_FIXMAP_32_H 14#define ASM_X86__FIXMAP_32_H
15 15
16 16
17/* used by vmalloc.c, vsyscall.lds.S. 17/* used by vmalloc.c, vsyscall.lds.S.
@@ -79,10 +79,6 @@ enum fixed_addresses {
79 FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ 79 FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
80 FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, 80 FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
81#endif 81#endif
82#ifdef CONFIG_ACPI
83 FIX_ACPI_BEGIN,
84 FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1,
85#endif
86#ifdef CONFIG_PCI_MMCONFIG 82#ifdef CONFIG_PCI_MMCONFIG
87 FIX_PCIE_MCFG, 83 FIX_PCIE_MCFG,
88#endif 84#endif
@@ -94,32 +90,27 @@ enum fixed_addresses {
94 * 256 temporary boot-time mappings, used by early_ioremap(), 90 * 256 temporary boot-time mappings, used by early_ioremap(),
95 * before ioremap() is functional. 91 * before ioremap() is functional.
96 * 92 *
97 * We round it up to the next 512 pages boundary so that we 93 * We round it up to the next 256 pages boundary so that we
98 * can have a single pgd entry and a single pte table: 94 * can have a single pgd entry and a single pte table:
99 */ 95 */
100#define NR_FIX_BTMAPS 64 96#define NR_FIX_BTMAPS 64
101#define FIX_BTMAPS_NESTING 4 97#define FIX_BTMAPS_NESTING 4
102 FIX_BTMAP_END = __end_of_permanent_fixed_addresses + 512 - 98 FIX_BTMAP_END = __end_of_permanent_fixed_addresses + 256 -
103 (__end_of_permanent_fixed_addresses & 511), 99 (__end_of_permanent_fixed_addresses & 255),
104 FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_NESTING - 1, 100 FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_NESTING - 1,
105 FIX_WP_TEST, 101 FIX_WP_TEST,
102#ifdef CONFIG_ACPI
103 FIX_ACPI_BEGIN,
104 FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1,
105#endif
106#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT 106#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
107 FIX_OHCI1394_BASE, 107 FIX_OHCI1394_BASE,
108#endif 108#endif
109 __end_of_fixed_addresses 109 __end_of_fixed_addresses
110}; 110};
111 111
112extern void __set_fixmap(enum fixed_addresses idx,
113 unsigned long phys, pgprot_t flags);
114extern void reserve_top_address(unsigned long reserve); 112extern void reserve_top_address(unsigned long reserve);
115 113
116#define set_fixmap(idx, phys) \
117 __set_fixmap(idx, phys, PAGE_KERNEL)
118/*
119 * Some hardware wants to get fixmapped without caching.
120 */
121#define set_fixmap_nocache(idx, phys) \
122 __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
123 114
124#define FIXADDR_TOP ((unsigned long)__FIXADDR_TOP) 115#define FIXADDR_TOP ((unsigned long)__FIXADDR_TOP)
125 116
@@ -128,38 +119,5 @@ extern void reserve_top_address(unsigned long reserve);
128#define FIXADDR_START (FIXADDR_TOP - __FIXADDR_SIZE) 119#define FIXADDR_START (FIXADDR_TOP - __FIXADDR_SIZE)
129#define FIXADDR_BOOT_START (FIXADDR_TOP - __FIXADDR_BOOT_SIZE) 120#define FIXADDR_BOOT_START (FIXADDR_TOP - __FIXADDR_BOOT_SIZE)
130 121
131#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
132#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT)
133
134extern void __this_fixmap_does_not_exist(void);
135
136/*
137 * 'index to address' translation. If anyone tries to use the idx
138 * directly without tranlation, we catch the bug with a NULL-deference
139 * kernel oops. Illegal ranges of incoming indices are caught too.
140 */
141static __always_inline unsigned long fix_to_virt(const unsigned int idx)
142{
143 /*
144 * this branch gets completely eliminated after inlining,
145 * except when someone tries to use fixaddr indices in an
146 * illegal way. (such as mixing up address types or using
147 * out-of-range indices).
148 *
149 * If it doesn't get removed, the linker will complain
150 * loudly with a reasonably clear error message..
151 */
152 if (idx >= __end_of_fixed_addresses)
153 __this_fixmap_does_not_exist();
154
155 return __fix_to_virt(idx);
156}
157
158static inline unsigned long virt_to_fix(const unsigned long vaddr)
159{
160 BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
161 return __virt_to_fix(vaddr);
162}
163
164#endif /* !__ASSEMBLY__ */ 122#endif /* !__ASSEMBLY__ */
165#endif 123#endif /* ASM_X86__FIXMAP_32_H */
diff --git a/include/asm-x86/fixmap_64.h b/include/asm-x86/fixmap_64.h
index 355d26a75a82..dafb24bc0424 100644
--- a/include/asm-x86/fixmap_64.h
+++ b/include/asm-x86/fixmap_64.h
@@ -8,10 +8,11 @@
8 * Copyright (C) 1998 Ingo Molnar 8 * Copyright (C) 1998 Ingo Molnar
9 */ 9 */
10 10
11#ifndef _ASM_FIXMAP_64_H 11#ifndef ASM_X86__FIXMAP_64_H
12#define _ASM_FIXMAP_64_H 12#define ASM_X86__FIXMAP_64_H
13 13
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <asm/acpi.h>
15#include <asm/apicdef.h> 16#include <asm/apicdef.h>
16#include <asm/page.h> 17#include <asm/page.h>
17#include <asm/vsyscall.h> 18#include <asm/vsyscall.h>
@@ -39,30 +40,38 @@ enum fixed_addresses {
39 VSYSCALL_HPET, 40 VSYSCALL_HPET,
40 FIX_DBGP_BASE, 41 FIX_DBGP_BASE,
41 FIX_EARLYCON_MEM_BASE, 42 FIX_EARLYCON_MEM_BASE,
42 FIX_HPET_BASE,
43 FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */ 43 FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
44 FIX_IO_APIC_BASE_0, 44 FIX_IO_APIC_BASE_0,
45 FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS - 1, 45 FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS - 1,
46 FIX_EFI_IO_MAP_LAST_PAGE, 46 FIX_EFI_IO_MAP_LAST_PAGE,
47 FIX_EFI_IO_MAP_FIRST_PAGE = FIX_EFI_IO_MAP_LAST_PAGE 47 FIX_EFI_IO_MAP_FIRST_PAGE = FIX_EFI_IO_MAP_LAST_PAGE
48 + MAX_EFI_IO_PAGES - 1, 48 + MAX_EFI_IO_PAGES - 1,
49#ifdef CONFIG_PARAVIRT
50 FIX_PARAVIRT_BOOTMAP,
51#endif
52#ifdef CONFIG_ACPI
53 FIX_ACPI_BEGIN,
54 FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1,
55#endif
49#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT 56#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
50 FIX_OHCI1394_BASE, 57 FIX_OHCI1394_BASE,
51#endif 58#endif
59 __end_of_permanent_fixed_addresses,
60 /*
61 * 256 temporary boot-time mappings, used by early_ioremap(),
62 * before ioremap() is functional.
63 *
64 * We round it up to the next 512 pages boundary so that we
65 * can have a single pgd entry and a single pte table:
66 */
67#define NR_FIX_BTMAPS 64
68#define FIX_BTMAPS_NESTING 4
69 FIX_BTMAP_END = __end_of_permanent_fixed_addresses + 512 -
70 (__end_of_permanent_fixed_addresses & 511),
71 FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_NESTING - 1,
52 __end_of_fixed_addresses 72 __end_of_fixed_addresses
53}; 73};
54 74
55extern void __set_fixmap(enum fixed_addresses idx,
56 unsigned long phys, pgprot_t flags);
57
58#define set_fixmap(idx, phys) \
59 __set_fixmap(idx, phys, PAGE_KERNEL)
60/*
61 * Some hardware wants to get fixmapped without caching.
62 */
63#define set_fixmap_nocache(idx, phys) \
64 __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
65
66#define FIXADDR_TOP (VSYSCALL_END-PAGE_SIZE) 75#define FIXADDR_TOP (VSYSCALL_END-PAGE_SIZE)
67#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) 76#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
68#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) 77#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
@@ -71,30 +80,4 @@ extern void __set_fixmap(enum fixed_addresses idx,
71#define FIXADDR_USER_START ((unsigned long)VSYSCALL32_VSYSCALL) 80#define FIXADDR_USER_START ((unsigned long)VSYSCALL32_VSYSCALL)
72#define FIXADDR_USER_END (FIXADDR_USER_START + PAGE_SIZE) 81#define FIXADDR_USER_END (FIXADDR_USER_START + PAGE_SIZE)
73 82
74#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) 83#endif /* ASM_X86__FIXMAP_64_H */
75
76extern void __this_fixmap_does_not_exist(void);
77
78/*
79 * 'index to address' translation. If anyone tries to use the idx
80 * directly without translation, we catch the bug with a NULL-deference
81 * kernel oops. Illegal ranges of incoming indices are caught too.
82 */
83static __always_inline unsigned long fix_to_virt(const unsigned int idx)
84{
85 /*
86 * this branch gets completely eliminated after inlining,
87 * except when someone tries to use fixaddr indices in an
88 * illegal way. (such as mixing up address types or using
89 * out-of-range indices).
90 *
91 * If it doesn't get removed, the linker will complain
92 * loudly with a reasonably clear error message..
93 */
94 if (idx >= __end_of_fixed_addresses)
95 __this_fixmap_does_not_exist();
96
97 return __fix_to_virt(idx);
98}
99
100#endif
diff --git a/include/asm-x86/floppy.h b/include/asm-x86/floppy.h
index dbe82a5c5eac..7d83a3a83e37 100644
--- a/include/asm-x86/floppy.h
+++ b/include/asm-x86/floppy.h
@@ -7,8 +7,8 @@
7 * 7 *
8 * Copyright (C) 1995 8 * Copyright (C) 1995
9 */ 9 */
10#ifndef _ASM_X86_FLOPPY_H 10#ifndef ASM_X86__FLOPPY_H
11#define _ASM_X86_FLOPPY_H 11#define ASM_X86__FLOPPY_H
12 12
13#include <linux/vmalloc.h> 13#include <linux/vmalloc.h>
14 14
@@ -278,4 +278,4 @@ static int FDC2 = -1;
278 278
279#define EXTRA_FLOPPY_PARAMS 279#define EXTRA_FLOPPY_PARAMS
280 280
281#endif /* _ASM_X86_FLOPPY_H */ 281#endif /* ASM_X86__FLOPPY_H */
diff --git a/include/asm-x86/ftrace.h b/include/asm-x86/ftrace.h
new file mode 100644
index 000000000000..be0e004ad148
--- /dev/null
+++ b/include/asm-x86/ftrace.h
@@ -0,0 +1,14 @@
1#ifndef ASM_X86__FTRACE_H
2#define ASM_X86__FTRACE_H
3
4#ifdef CONFIG_FTRACE
5#define MCOUNT_ADDR ((long)(mcount))
6#define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */
7
8#ifndef __ASSEMBLY__
9extern void mcount(void);
10#endif
11
12#endif /* CONFIG_FTRACE */
13
14#endif /* ASM_X86__FTRACE_H */
diff --git a/include/asm-x86/futex.h b/include/asm-x86/futex.h
index e7a76b37b333..06b924ef6fa5 100644
--- a/include/asm-x86/futex.h
+++ b/include/asm-x86/futex.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_FUTEX_H 1#ifndef ASM_X86__FUTEX_H
2#define _ASM_X86_FUTEX_H 2#define ASM_X86__FUTEX_H
3 3
4#ifdef __KERNEL__ 4#ifdef __KERNEL__
5 5
@@ -25,7 +25,7 @@
25 asm volatile("1:\tmovl %2, %0\n" \ 25 asm volatile("1:\tmovl %2, %0\n" \
26 "\tmovl\t%0, %3\n" \ 26 "\tmovl\t%0, %3\n" \
27 "\t" insn "\n" \ 27 "\t" insn "\n" \
28 "2:\tlock; cmpxchgl %3, %2\n" \ 28 "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
29 "\tjnz\t1b\n" \ 29 "\tjnz\t1b\n" \
30 "3:\t.section .fixup,\"ax\"\n" \ 30 "3:\t.section .fixup,\"ax\"\n" \
31 "4:\tmov\t%5, %1\n" \ 31 "4:\tmov\t%5, %1\n" \
@@ -64,7 +64,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
64 __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg); 64 __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
65 break; 65 break;
66 case FUTEX_OP_ADD: 66 case FUTEX_OP_ADD:
67 __futex_atomic_op1("lock; xaddl %0, %2", ret, oldval, 67 __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
68 uaddr, oparg); 68 uaddr, oparg);
69 break; 69 break;
70 case FUTEX_OP_OR: 70 case FUTEX_OP_OR:
@@ -122,7 +122,7 @@ static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
122 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) 122 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
123 return -EFAULT; 123 return -EFAULT;
124 124
125 asm volatile("1:\tlock; cmpxchgl %3, %1\n" 125 asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
126 "2:\t.section .fixup, \"ax\"\n" 126 "2:\t.section .fixup, \"ax\"\n"
127 "3:\tmov %2, %0\n" 127 "3:\tmov %2, %0\n"
128 "\tjmp 2b\n" 128 "\tjmp 2b\n"
@@ -137,4 +137,4 @@ static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
137} 137}
138 138
139#endif 139#endif
140#endif 140#endif /* ASM_X86__FUTEX_H */
diff --git a/include/asm-x86/gart.h b/include/asm-x86/gart.h
index 90958ed993fa..605edb39ef9e 100644
--- a/include/asm-x86/gart.h
+++ b/include/asm-x86/gart.h
@@ -1,34 +1,73 @@
1#ifndef _ASM_X8664_IOMMU_H 1#ifndef ASM_X86__GART_H
2#define _ASM_X8664_IOMMU_H 1 2#define ASM_X86__GART_H
3 3
4extern void pci_iommu_shutdown(void); 4#include <asm/e820.h>
5extern void no_iommu_init(void); 5
6extern int force_iommu, no_iommu; 6extern void set_up_gart_resume(u32, u32);
7extern int iommu_detected; 7
8#ifdef CONFIG_GART_IOMMU
9extern void gart_iommu_init(void);
10extern void gart_iommu_shutdown(void);
11extern void __init gart_parse_options(char *);
12extern void early_gart_iommu_check(void);
13extern void gart_iommu_hole_init(void);
14extern int fallback_aper_order; 8extern int fallback_aper_order;
15extern int fallback_aper_force; 9extern int fallback_aper_force;
16extern int gart_iommu_aperture;
17extern int gart_iommu_aperture_allowed;
18extern int gart_iommu_aperture_disabled;
19extern int fix_aperture; 10extern int fix_aperture;
20#else
21#define gart_iommu_aperture 0
22#define gart_iommu_aperture_allowed 0
23 11
24static inline void early_gart_iommu_check(void) 12/* PTE bits. */
13#define GPTE_VALID 1
14#define GPTE_COHERENT 2
15
16/* Aperture control register bits. */
17#define GARTEN (1<<0)
18#define DISGARTCPU (1<<4)
19#define DISGARTIO (1<<5)
20
21/* GART cache control register bits. */
22#define INVGART (1<<0)
23#define GARTPTEERR (1<<1)
24
25/* K8 On-cpu GART registers */
26#define AMD64_GARTAPERTURECTL 0x90
27#define AMD64_GARTAPERTUREBASE 0x94
28#define AMD64_GARTTABLEBASE 0x98
29#define AMD64_GARTCACHECTL 0x9c
30#define AMD64_GARTEN (1<<0)
31
32extern int agp_amd64_init(void);
33
34static inline void enable_gart_translation(struct pci_dev *dev, u64 addr)
25{ 35{
36 u32 tmp, ctl;
37
38 /* address of the mappings table */
39 addr >>= 12;
40 tmp = (u32) addr<<4;
41 tmp &= ~0xf;
42 pci_write_config_dword(dev, AMD64_GARTTABLEBASE, tmp);
43
44 /* Enable GART translation for this hammer. */
45 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl);
46 ctl |= GARTEN;
47 ctl &= ~(DISGARTCPU | DISGARTIO);
48 pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl);
26} 49}
27 50
28static inline void gart_iommu_shutdown(void) 51static inline int aperture_valid(u64 aper_base, u32 aper_size, u32 min_size)
29{ 52{
30} 53 if (!aper_base)
54 return 0;
55
56 if (aper_base + aper_size > 0x100000000ULL) {
57 printk(KERN_INFO "Aperture beyond 4GB. Ignoring.\n");
58 return 0;
59 }
60 if (e820_any_mapped(aper_base, aper_base + aper_size, E820_RAM)) {
61 printk(KERN_INFO "Aperture pointing to e820 RAM. Ignoring.\n");
62 return 0;
63 }
64 if (aper_size < min_size) {
65 printk(KERN_INFO "Aperture too small (%d MB) than (%d MB)\n",
66 aper_size>>20, min_size>>20);
67 return 0;
68 }
31 69
32#endif 70 return 1;
71}
33 72
34#endif 73#endif /* ASM_X86__GART_H */
diff --git a/include/asm-x86/genapic_32.h b/include/asm-x86/genapic_32.h
index b02ea6e17de8..34280f027664 100644
--- a/include/asm-x86/genapic_32.h
+++ b/include/asm-x86/genapic_32.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_GENAPIC_H 1#ifndef ASM_X86__GENAPIC_32_H
2#define _ASM_GENAPIC_H 1 2#define ASM_X86__GENAPIC_32_H
3 3
4#include <asm/mpspec.h> 4#include <asm/mpspec.h>
5 5
@@ -118,6 +118,7 @@ enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC};
118#define get_uv_system_type() UV_NONE 118#define get_uv_system_type() UV_NONE
119#define is_uv_system() 0 119#define is_uv_system() 0
120#define uv_wakeup_secondary(a, b) 1 120#define uv_wakeup_secondary(a, b) 1
121#define uv_system_init() do {} while (0)
121 122
122 123
123#endif 124#endif /* ASM_X86__GENAPIC_32_H */
diff --git a/include/asm-x86/genapic_64.h b/include/asm-x86/genapic_64.h
index 1de931b263ce..ed6a4886c082 100644
--- a/include/asm-x86/genapic_64.h
+++ b/include/asm-x86/genapic_64.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_GENAPIC_H 1#ifndef ASM_X86__GENAPIC_64_H
2#define _ASM_GENAPIC_H 1 2#define ASM_X86__GENAPIC_64_H
3 3
4/* 4/*
5 * Copyright 2004 James Cleverdon, IBM. 5 * Copyright 2004 James Cleverdon, IBM.
@@ -14,6 +14,7 @@
14 14
15struct genapic { 15struct genapic {
16 char *name; 16 char *name;
17 int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id);
17 u32 int_delivery_mode; 18 u32 int_delivery_mode;
18 u32 int_dest_mode; 19 u32 int_dest_mode;
19 int (*apic_id_registered)(void); 20 int (*apic_id_registered)(void);
@@ -24,17 +25,24 @@ struct genapic {
24 void (*send_IPI_mask)(cpumask_t mask, int vector); 25 void (*send_IPI_mask)(cpumask_t mask, int vector);
25 void (*send_IPI_allbutself)(int vector); 26 void (*send_IPI_allbutself)(int vector);
26 void (*send_IPI_all)(int vector); 27 void (*send_IPI_all)(int vector);
28 void (*send_IPI_self)(int vector);
27 /* */ 29 /* */
28 unsigned int (*cpu_mask_to_apicid)(cpumask_t cpumask); 30 unsigned int (*cpu_mask_to_apicid)(cpumask_t cpumask);
29 unsigned int (*phys_pkg_id)(int index_msb); 31 unsigned int (*phys_pkg_id)(int index_msb);
32 unsigned int (*get_apic_id)(unsigned long x);
33 unsigned long (*set_apic_id)(unsigned int id);
34 unsigned long apic_id_mask;
30}; 35};
31 36
32extern struct genapic *genapic; 37extern struct genapic *genapic;
33 38
34extern struct genapic apic_flat; 39extern struct genapic apic_flat;
35extern struct genapic apic_physflat; 40extern struct genapic apic_physflat;
41extern struct genapic apic_x2apic_cluster;
42extern struct genapic apic_x2apic_phys;
36extern int acpi_madt_oem_check(char *, char *); 43extern int acpi_madt_oem_check(char *, char *);
37 44
45extern void apic_send_IPI_self(int vector);
38enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC}; 46enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC};
39extern enum uv_system_type get_uv_system_type(void); 47extern enum uv_system_type get_uv_system_type(void);
40extern int is_uv_system(void); 48extern int is_uv_system(void);
@@ -42,6 +50,9 @@ extern int is_uv_system(void);
42extern struct genapic apic_x2apic_uv_x; 50extern struct genapic apic_x2apic_uv_x;
43DECLARE_PER_CPU(int, x2apic_extra_bits); 51DECLARE_PER_CPU(int, x2apic_extra_bits);
44extern void uv_cpu_init(void); 52extern void uv_cpu_init(void);
53extern void uv_system_init(void);
45extern int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip); 54extern int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip);
46 55
47#endif 56extern void setup_apic_routing(void);
57
58#endif /* ASM_X86__GENAPIC_64_H */
diff --git a/include/asm-x86/geode.h b/include/asm-x86/geode.h
index 6e6458853a36..3f3444be2638 100644
--- a/include/asm-x86/geode.h
+++ b/include/asm-x86/geode.h
@@ -7,8 +7,8 @@
7 * as published by the Free Software Foundation. 7 * as published by the Free Software Foundation.
8 */ 8 */
9 9
10#ifndef _ASM_GEODE_H_ 10#ifndef ASM_X86__GEODE_H
11#define _ASM_GEODE_H_ 11#define ASM_X86__GEODE_H
12 12
13#include <asm/processor.h> 13#include <asm/processor.h>
14#include <linux/io.h> 14#include <linux/io.h>
@@ -50,6 +50,7 @@ extern int geode_get_dev_base(unsigned int dev);
50#define MSR_PIC_YSEL_HIGH 0x51400021 50#define MSR_PIC_YSEL_HIGH 0x51400021
51#define MSR_PIC_ZSEL_LOW 0x51400022 51#define MSR_PIC_ZSEL_LOW 0x51400022
52#define MSR_PIC_ZSEL_HIGH 0x51400023 52#define MSR_PIC_ZSEL_HIGH 0x51400023
53#define MSR_PIC_IRQM_LPC 0x51400025
53 54
54#define MSR_MFGPT_IRQ 0x51400028 55#define MSR_MFGPT_IRQ 0x51400028
55#define MSR_MFGPT_NR 0x51400029 56#define MSR_MFGPT_NR 0x51400029
@@ -112,8 +113,8 @@ extern int geode_get_dev_base(unsigned int dev);
112#define VSA_VR_UNLOCK 0xFC53 /* unlock virtual register */ 113#define VSA_VR_UNLOCK 0xFC53 /* unlock virtual register */
113#define VSA_VR_SIGNATURE 0x0003 114#define VSA_VR_SIGNATURE 0x0003
114#define VSA_VR_MEM_SIZE 0x0200 115#define VSA_VR_MEM_SIZE 0x0200
115#define VSA_SIG 0x4132 /* signature is ascii 'VSA2' */ 116#define AMD_VSA_SIG 0x4132 /* signature is ascii 'VSA2' */
116 117#define GSW_VSA_SIG 0x534d /* General Software signature */
117/* GPIO */ 118/* GPIO */
118 119
119#define GPIO_OUTPUT_VAL 0x00 120#define GPIO_OUTPUT_VAL 0x00
@@ -237,7 +238,7 @@ static inline u16 geode_mfgpt_read(int timer, u16 reg)
237} 238}
238 239
239extern int geode_mfgpt_toggle_event(int timer, int cmp, int event, int enable); 240extern int geode_mfgpt_toggle_event(int timer, int cmp, int event, int enable);
240extern int geode_mfgpt_set_irq(int timer, int cmp, int irq, int enable); 241extern int geode_mfgpt_set_irq(int timer, int cmp, int *irq, int enable);
241extern int geode_mfgpt_alloc_timer(int timer, int domain); 242extern int geode_mfgpt_alloc_timer(int timer, int domain);
242 243
243#define geode_mfgpt_setup_irq(t, c, i) geode_mfgpt_set_irq((t), (c), (i), 1) 244#define geode_mfgpt_setup_irq(t, c, i) geode_mfgpt_set_irq((t), (c), (i), 1)
@@ -249,4 +250,4 @@ extern int __init mfgpt_timer_setup(void);
249static inline int mfgpt_timer_setup(void) { return 0; } 250static inline int mfgpt_timer_setup(void) { return 0; }
250#endif 251#endif
251 252
252#endif 253#endif /* ASM_X86__GEODE_H */
diff --git a/include/asm-x86/gpio.h b/include/asm-x86/gpio.h
index ff87fca0caf9..497fb980d962 100644
--- a/include/asm-x86/gpio.h
+++ b/include/asm-x86/gpio.h
@@ -1,6 +1,56 @@
1/*
2 * Generic GPIO API implementation for x86.
3 *
4 * Derived from the generic GPIO API for powerpc:
5 *
6 * Copyright (c) 2007-2008 MontaVista Software, Inc.
7 *
8 * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 */
15
1#ifndef _ASM_I386_GPIO_H 16#ifndef _ASM_I386_GPIO_H
2#define _ASM_I386_GPIO_H 17#define _ASM_I386_GPIO_H
3 18
4#include <gpio.h> 19#include <asm-generic/gpio.h>
20
21#ifdef CONFIG_GPIOLIB
22
23/*
24 * Just call gpiolib.
25 */
26static inline int gpio_get_value(unsigned int gpio)
27{
28 return __gpio_get_value(gpio);
29}
30
31static inline void gpio_set_value(unsigned int gpio, int value)
32{
33 __gpio_set_value(gpio, value);
34}
35
36static inline int gpio_cansleep(unsigned int gpio)
37{
38 return __gpio_cansleep(gpio);
39}
40
41/*
42 * Not implemented, yet.
43 */
44static inline int gpio_to_irq(unsigned int gpio)
45{
46 return -ENOSYS;
47}
48
49static inline int irq_to_gpio(unsigned int irq)
50{
51 return -EINVAL;
52}
53
54#endif /* CONFIG_GPIOLIB */
5 55
6#endif /* _ASM_I386_GPIO_H */ 56#endif /* ASM_X86__GPIO_H */
diff --git a/include/asm-x86/hardirq.h b/include/asm-x86/hardirq.h
index 314434d664e7..000787df66e6 100644
--- a/include/asm-x86/hardirq.h
+++ b/include/asm-x86/hardirq.h
@@ -3,3 +3,9 @@
3#else 3#else
4# include "hardirq_64.h" 4# include "hardirq_64.h"
5#endif 5#endif
6
7extern u64 arch_irq_stat_cpu(unsigned int cpu);
8#define arch_irq_stat_cpu arch_irq_stat_cpu
9
10extern u64 arch_irq_stat(void);
11#define arch_irq_stat arch_irq_stat
diff --git a/include/asm-x86/hardirq_32.h b/include/asm-x86/hardirq_32.h
index 4f85f0f4b563..700fe230d919 100644
--- a/include/asm-x86/hardirq_32.h
+++ b/include/asm-x86/hardirq_32.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_HARDIRQ_H 1#ifndef ASM_X86__HARDIRQ_32_H
2#define __ASM_HARDIRQ_H 2#define ASM_X86__HARDIRQ_32_H
3 3
4#include <linux/threads.h> 4#include <linux/threads.h>
5#include <linux/irq.h> 5#include <linux/irq.h>
@@ -25,4 +25,4 @@ DECLARE_PER_CPU(irq_cpustat_t, irq_stat);
25void ack_bad_irq(unsigned int irq); 25void ack_bad_irq(unsigned int irq);
26#include <linux/irq_cpustat.h> 26#include <linux/irq_cpustat.h>
27 27
28#endif /* __ASM_HARDIRQ_H */ 28#endif /* ASM_X86__HARDIRQ_32_H */
diff --git a/include/asm-x86/hardirq_64.h b/include/asm-x86/hardirq_64.h
index 95d5e090ed89..f8bd2919a8ce 100644
--- a/include/asm-x86/hardirq_64.h
+++ b/include/asm-x86/hardirq_64.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_HARDIRQ_H 1#ifndef ASM_X86__HARDIRQ_64_H
2#define __ASM_HARDIRQ_H 2#define ASM_X86__HARDIRQ_64_H
3 3
4#include <linux/threads.h> 4#include <linux/threads.h>
5#include <linux/irq.h> 5#include <linux/irq.h>
@@ -20,4 +20,4 @@
20 20
21extern void ack_bad_irq(unsigned int irq); 21extern void ack_bad_irq(unsigned int irq);
22 22
23#endif /* __ASM_HARDIRQ_H */ 23#endif /* ASM_X86__HARDIRQ_64_H */
diff --git a/include/asm-x86/highmem.h b/include/asm-x86/highmem.h
index e153f3b44774..bc3f6a280316 100644
--- a/include/asm-x86/highmem.h
+++ b/include/asm-x86/highmem.h
@@ -15,8 +15,8 @@
15 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com> 15 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
16 */ 16 */
17 17
18#ifndef _ASM_HIGHMEM_H 18#ifndef ASM_X86__HIGHMEM_H
19#define _ASM_HIGHMEM_H 19#define ASM_X86__HIGHMEM_H
20 20
21#ifdef __KERNEL__ 21#ifdef __KERNEL__
22 22
@@ -74,6 +74,9 @@ struct page *kmap_atomic_to_page(void *ptr);
74 74
75#define flush_cache_kmaps() do { } while (0) 75#define flush_cache_kmaps() do { } while (0)
76 76
77extern void add_highpages_with_active_regions(int nid, unsigned long start_pfn,
78 unsigned long end_pfn);
79
77#endif /* __KERNEL__ */ 80#endif /* __KERNEL__ */
78 81
79#endif /* _ASM_HIGHMEM_H */ 82#endif /* ASM_X86__HIGHMEM_H */
diff --git a/include/asm-x86/hpet.h b/include/asm-x86/hpet.h
index 6a9b4ac59bf7..cbbbb6d4dd32 100644
--- a/include/asm-x86/hpet.h
+++ b/include/asm-x86/hpet.h
@@ -1,5 +1,5 @@
1#ifndef ASM_X86_HPET_H 1#ifndef ASM_X86__HPET_H
2#define ASM_X86_HPET_H 2#define ASM_X86__HPET_H
3 3
4#ifdef CONFIG_HPET_TIMER 4#ifdef CONFIG_HPET_TIMER
5 5
@@ -86,8 +86,8 @@ extern void hpet_unregister_irq_handler(rtc_irq_handler handler);
86#else /* CONFIG_HPET_TIMER */ 86#else /* CONFIG_HPET_TIMER */
87 87
88static inline int hpet_enable(void) { return 0; } 88static inline int hpet_enable(void) { return 0; }
89static inline unsigned long hpet_readl(unsigned long a) { return 0; }
90static inline int is_hpet_enabled(void) { return 0; } 89static inline int is_hpet_enabled(void) { return 0; }
90#define hpet_readl(a) 0
91 91
92#endif 92#endif
93#endif /* ASM_X86_HPET_H */ 93#endif /* ASM_X86__HPET_H */
diff --git a/include/asm-x86/hugetlb.h b/include/asm-x86/hugetlb.h
index 14171a4924f6..0b7ec5dc0884 100644
--- a/include/asm-x86/hugetlb.h
+++ b/include/asm-x86/hugetlb.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_HUGETLB_H 1#ifndef ASM_X86__HUGETLB_H
2#define _ASM_X86_HUGETLB_H 2#define ASM_X86__HUGETLB_H
3 3
4#include <asm/page.h> 4#include <asm/page.h>
5 5
@@ -14,11 +14,13 @@ static inline int is_hugepage_only_range(struct mm_struct *mm,
14 * If the arch doesn't supply something else, assume that hugepage 14 * If the arch doesn't supply something else, assume that hugepage
15 * size aligned regions are ok without further preparation. 15 * size aligned regions are ok without further preparation.
16 */ 16 */
17static inline int prepare_hugepage_range(unsigned long addr, unsigned long len) 17static inline int prepare_hugepage_range(struct file *file,
18 unsigned long addr, unsigned long len)
18{ 19{
19 if (len & ~HPAGE_MASK) 20 struct hstate *h = hstate_file(file);
21 if (len & ~huge_page_mask(h))
20 return -EINVAL; 22 return -EINVAL;
21 if (addr & ~HPAGE_MASK) 23 if (addr & ~huge_page_mask(h))
22 return -EINVAL; 24 return -EINVAL;
23 return 0; 25 return 0;
24} 26}
@@ -26,7 +28,7 @@ static inline int prepare_hugepage_range(unsigned long addr, unsigned long len)
26static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) { 28static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) {
27} 29}
28 30
29static inline void hugetlb_free_pgd_range(struct mmu_gather **tlb, 31static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
30 unsigned long addr, unsigned long end, 32 unsigned long addr, unsigned long end,
31 unsigned long floor, 33 unsigned long floor,
32 unsigned long ceiling) 34 unsigned long ceiling)
@@ -88,4 +90,4 @@ static inline void arch_release_hugepage(struct page *page)
88{ 90{
89} 91}
90 92
91#endif /* _ASM_X86_HUGETLB_H */ 93#endif /* ASM_X86__HUGETLB_H */
diff --git a/include/asm-x86/hw_irq.h b/include/asm-x86/hw_irq.h
index bf025399d939..50f6e0316b50 100644
--- a/include/asm-x86/hw_irq.h
+++ b/include/asm-x86/hw_irq.h
@@ -1,5 +1,136 @@
1#ifndef ASM_X86__HW_IRQ_H
2#define ASM_X86__HW_IRQ_H
3
4/*
5 * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
6 *
7 * moved some of the old arch/i386/kernel/irq.h to here. VY
8 *
9 * IRQ/IPI changes taken from work by Thomas Radke
10 * <tomsoft@informatik.tu-chemnitz.de>
11 *
12 * hacked by Andi Kleen for x86-64.
13 * unified by tglx
14 */
15
16#include <asm/irq_vectors.h>
17
18#ifndef __ASSEMBLY__
19
20#include <linux/percpu.h>
21#include <linux/profile.h>
22#include <linux/smp.h>
23
24#include <asm/atomic.h>
25#include <asm/irq.h>
26#include <asm/sections.h>
27
28#define platform_legacy_irq(irq) ((irq) < 16)
29
30/* Interrupt handlers registered during init_IRQ */
31extern void apic_timer_interrupt(void);
32extern void error_interrupt(void);
33extern void spurious_interrupt(void);
34extern void thermal_interrupt(void);
35extern void reschedule_interrupt(void);
36
37extern void invalidate_interrupt(void);
38extern void invalidate_interrupt0(void);
39extern void invalidate_interrupt1(void);
40extern void invalidate_interrupt2(void);
41extern void invalidate_interrupt3(void);
42extern void invalidate_interrupt4(void);
43extern void invalidate_interrupt5(void);
44extern void invalidate_interrupt6(void);
45extern void invalidate_interrupt7(void);
46
47extern void irq_move_cleanup_interrupt(void);
48extern void threshold_interrupt(void);
49
50extern void call_function_interrupt(void);
51extern void call_function_single_interrupt(void);
52
53/* PIC specific functions */
54extern void disable_8259A_irq(unsigned int irq);
55extern void enable_8259A_irq(unsigned int irq);
56extern int i8259A_irq_pending(unsigned int irq);
57extern void make_8259A_irq(unsigned int irq);
58extern void init_8259A(int aeoi);
59
60/* IOAPIC */
61#define IO_APIC_IRQ(x) (((x) >= 16) || ((1<<(x)) & io_apic_irqs))
62extern unsigned long io_apic_irqs;
63
64extern void init_VISWS_APIC_irqs(void);
65extern void setup_IO_APIC(void);
66extern void disable_IO_APIC(void);
67extern int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn);
68extern void setup_ioapic_dest(void);
69
70#ifdef CONFIG_X86_64
71extern void enable_IO_APIC(void);
72#endif
73
74/* IPI functions */
75#ifdef CONFIG_X86_32
76extern void send_IPI_self(int vector);
77#endif
78extern void send_IPI(int dest, int vector);
79
80/* Statistics */
81extern atomic_t irq_err_count;
82extern atomic_t irq_mis_count;
83
84/* EISA */
85extern void eisa_set_level_irq(unsigned int irq);
86
87/* Voyager functions */
88extern asmlinkage void vic_cpi_interrupt(void);
89extern asmlinkage void vic_sys_interrupt(void);
90extern asmlinkage void vic_cmn_interrupt(void);
91extern asmlinkage void qic_timer_interrupt(void);
92extern asmlinkage void qic_invalidate_interrupt(void);
93extern asmlinkage void qic_reschedule_interrupt(void);
94extern asmlinkage void qic_enable_irq_interrupt(void);
95extern asmlinkage void qic_call_function_interrupt(void);
96
97/* SMP */
98extern void smp_apic_timer_interrupt(struct pt_regs *);
99#ifdef CONFIG_X86_32
100extern void smp_spurious_interrupt(struct pt_regs *);
101extern void smp_error_interrupt(struct pt_regs *);
102#else
103extern asmlinkage void smp_spurious_interrupt(void);
104extern asmlinkage void smp_error_interrupt(void);
105#endif
106#ifdef CONFIG_X86_SMP
107extern void smp_reschedule_interrupt(struct pt_regs *);
108extern void smp_call_function_interrupt(struct pt_regs *);
109extern void smp_call_function_single_interrupt(struct pt_regs *);
1#ifdef CONFIG_X86_32 110#ifdef CONFIG_X86_32
2# include "hw_irq_32.h" 111extern void smp_invalidate_interrupt(struct pt_regs *);
112#else
113extern asmlinkage void smp_invalidate_interrupt(struct pt_regs *);
114#endif
115#endif
116
117#ifdef CONFIG_X86_32
118extern void (*const interrupt[NR_IRQS])(void);
119#else
120typedef int vector_irq_t[NR_VECTORS];
121DECLARE_PER_CPU(vector_irq_t, vector_irq);
122#endif
123
124#if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_X86_64)
125extern void lock_vector_lock(void);
126extern void unlock_vector_lock(void);
127extern void __setup_vector_irq(int cpu);
3#else 128#else
4# include "hw_irq_64.h" 129static inline void lock_vector_lock(void) {}
130static inline void unlock_vector_lock(void) {}
131static inline void __setup_vector_irq(int cpu) {}
5#endif 132#endif
133
134#endif /* !ASSEMBLY_ */
135
136#endif /* ASM_X86__HW_IRQ_H */
diff --git a/include/asm-x86/hw_irq_32.h b/include/asm-x86/hw_irq_32.h
deleted file mode 100644
index ea88054e03f3..000000000000
--- a/include/asm-x86/hw_irq_32.h
+++ /dev/null
@@ -1,66 +0,0 @@
1#ifndef _ASM_HW_IRQ_H
2#define _ASM_HW_IRQ_H
3
4/*
5 * linux/include/asm/hw_irq.h
6 *
7 * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
8 *
9 * moved some of the old arch/i386/kernel/irq.h to here. VY
10 *
11 * IRQ/IPI changes taken from work by Thomas Radke
12 * <tomsoft@informatik.tu-chemnitz.de>
13 */
14
15#include <linux/profile.h>
16#include <asm/atomic.h>
17#include <asm/irq.h>
18#include <asm/sections.h>
19
20#define NMI_VECTOR 0x02
21
22/*
23 * Various low-level irq details needed by irq.c, process.c,
24 * time.c, io_apic.c and smp.c
25 *
26 * Interrupt entry/exit code at both C and assembly level
27 */
28
29extern void (*const interrupt[NR_IRQS])(void);
30
31#ifdef CONFIG_SMP
32void reschedule_interrupt(void);
33void invalidate_interrupt(void);
34void call_function_interrupt(void);
35#endif
36
37#ifdef CONFIG_X86_LOCAL_APIC
38void apic_timer_interrupt(void);
39void error_interrupt(void);
40void spurious_interrupt(void);
41void thermal_interrupt(void);
42#define platform_legacy_irq(irq) ((irq) < 16)
43#endif
44
45void disable_8259A_irq(unsigned int irq);
46void enable_8259A_irq(unsigned int irq);
47int i8259A_irq_pending(unsigned int irq);
48void make_8259A_irq(unsigned int irq);
49void init_8259A(int aeoi);
50void send_IPI_self(int vector);
51void init_VISWS_APIC_irqs(void);
52void setup_IO_APIC(void);
53void disable_IO_APIC(void);
54void print_IO_APIC(void);
55int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn);
56void send_IPI(int dest, int vector);
57void setup_ioapic_dest(void);
58
59extern unsigned long io_apic_irqs;
60
61extern atomic_t irq_err_count;
62extern atomic_t irq_mis_count;
63
64#define IO_APIC_IRQ(x) (((x) >= 16) || ((1<<(x)) & io_apic_irqs))
65
66#endif /* _ASM_HW_IRQ_H */
diff --git a/include/asm-x86/hw_irq_64.h b/include/asm-x86/hw_irq_64.h
deleted file mode 100644
index 0062ef390f67..000000000000
--- a/include/asm-x86/hw_irq_64.h
+++ /dev/null
@@ -1,173 +0,0 @@
1#ifndef _ASM_HW_IRQ_H
2#define _ASM_HW_IRQ_H
3
4/*
5 * linux/include/asm/hw_irq.h
6 *
7 * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
8 *
9 * moved some of the old arch/i386/kernel/irq.h to here. VY
10 *
11 * IRQ/IPI changes taken from work by Thomas Radke
12 * <tomsoft@informatik.tu-chemnitz.de>
13 *
14 * hacked by Andi Kleen for x86-64.
15 */
16
17#ifndef __ASSEMBLY__
18#include <asm/atomic.h>
19#include <asm/irq.h>
20#include <linux/profile.h>
21#include <linux/smp.h>
22#include <linux/percpu.h>
23#endif
24
25#define NMI_VECTOR 0x02
26/*
27 * IDT vectors usable for external interrupt sources start
28 * at 0x20:
29 */
30#define FIRST_EXTERNAL_VECTOR 0x20
31
32#define IA32_SYSCALL_VECTOR 0x80
33
34
35/* Reserve the lowest usable priority level 0x20 - 0x2f for triggering
36 * cleanup after irq migration.
37 */
38#define IRQ_MOVE_CLEANUP_VECTOR FIRST_EXTERNAL_VECTOR
39
40/*
41 * Vectors 0x30-0x3f are used for ISA interrupts.
42 */
43#define IRQ0_VECTOR (FIRST_EXTERNAL_VECTOR + 0x10)
44#define IRQ1_VECTOR (IRQ0_VECTOR + 1)
45#define IRQ2_VECTOR (IRQ0_VECTOR + 2)
46#define IRQ3_VECTOR (IRQ0_VECTOR + 3)
47#define IRQ4_VECTOR (IRQ0_VECTOR + 4)
48#define IRQ5_VECTOR (IRQ0_VECTOR + 5)
49#define IRQ6_VECTOR (IRQ0_VECTOR + 6)
50#define IRQ7_VECTOR (IRQ0_VECTOR + 7)
51#define IRQ8_VECTOR (IRQ0_VECTOR + 8)
52#define IRQ9_VECTOR (IRQ0_VECTOR + 9)
53#define IRQ10_VECTOR (IRQ0_VECTOR + 10)
54#define IRQ11_VECTOR (IRQ0_VECTOR + 11)
55#define IRQ12_VECTOR (IRQ0_VECTOR + 12)
56#define IRQ13_VECTOR (IRQ0_VECTOR + 13)
57#define IRQ14_VECTOR (IRQ0_VECTOR + 14)
58#define IRQ15_VECTOR (IRQ0_VECTOR + 15)
59
60/*
61 * Special IRQ vectors used by the SMP architecture, 0xf0-0xff
62 *
63 * some of the following vectors are 'rare', they are merged
64 * into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
65 * TLB, reschedule and local APIC vectors are performance-critical.
66 */
67#define SPURIOUS_APIC_VECTOR 0xff
68#define ERROR_APIC_VECTOR 0xfe
69#define RESCHEDULE_VECTOR 0xfd
70#define CALL_FUNCTION_VECTOR 0xfc
71/* fb free - please don't readd KDB here because it's useless
72 (hint - think what a NMI bit does to a vector) */
73#define THERMAL_APIC_VECTOR 0xfa
74#define THRESHOLD_APIC_VECTOR 0xf9
75/* f8 free */
76#define INVALIDATE_TLB_VECTOR_END 0xf7
77#define INVALIDATE_TLB_VECTOR_START 0xf0 /* f0-f7 used for TLB flush */
78
79#define NUM_INVALIDATE_TLB_VECTORS 8
80
81/*
82 * Local APIC timer IRQ vector is on a different priority level,
83 * to work around the 'lost local interrupt if more than 2 IRQ
84 * sources per level' errata.
85 */
86#define LOCAL_TIMER_VECTOR 0xef
87
88/*
89 * First APIC vector available to drivers: (vectors 0x30-0xee)
90 * we start at 0x41 to spread out vectors evenly between priority
91 * levels. (0x80 is the syscall vector)
92 */
93#define FIRST_DEVICE_VECTOR (IRQ15_VECTOR + 2)
94#define FIRST_SYSTEM_VECTOR 0xef /* duplicated in irq.h */
95
96
97#ifndef __ASSEMBLY__
98
99/* Interrupt handlers registered during init_IRQ */
100void apic_timer_interrupt(void);
101void spurious_interrupt(void);
102void error_interrupt(void);
103void reschedule_interrupt(void);
104void call_function_interrupt(void);
105void irq_move_cleanup_interrupt(void);
106void invalidate_interrupt0(void);
107void invalidate_interrupt1(void);
108void invalidate_interrupt2(void);
109void invalidate_interrupt3(void);
110void invalidate_interrupt4(void);
111void invalidate_interrupt5(void);
112void invalidate_interrupt6(void);
113void invalidate_interrupt7(void);
114void thermal_interrupt(void);
115void threshold_interrupt(void);
116void i8254_timer_resume(void);
117
118typedef int vector_irq_t[NR_VECTORS];
119DECLARE_PER_CPU(vector_irq_t, vector_irq);
120extern void __setup_vector_irq(int cpu);
121extern spinlock_t vector_lock;
122
123/*
124 * Various low-level irq details needed by irq.c, process.c,
125 * time.c, io_apic.c and smp.c
126 *
127 * Interrupt entry/exit code at both C and assembly level
128 */
129
130extern void disable_8259A_irq(unsigned int irq);
131extern void enable_8259A_irq(unsigned int irq);
132extern int i8259A_irq_pending(unsigned int irq);
133extern void make_8259A_irq(unsigned int irq);
134extern void init_8259A(int aeoi);
135extern void send_IPI_self(int vector);
136extern void init_VISWS_APIC_irqs(void);
137extern void setup_IO_APIC(void);
138extern void enable_IO_APIC(void);
139extern void disable_IO_APIC(void);
140extern void print_IO_APIC(void);
141extern int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn);
142extern void send_IPI(int dest, int vector);
143extern void setup_ioapic_dest(void);
144extern void native_init_IRQ(void);
145
146extern unsigned long io_apic_irqs;
147
148extern atomic_t irq_err_count;
149extern atomic_t irq_mis_count;
150
151#define IO_APIC_IRQ(x) (((x) >= 16) || ((1<<(x)) & io_apic_irqs))
152
153#include <asm/ptrace.h>
154
155#define IRQ_NAME2(nr) nr##_interrupt(void)
156#define IRQ_NAME(nr) IRQ_NAME2(IRQ##nr)
157
158/*
159 * SMP has a few special interrupts for IPI messages
160 */
161
162#define BUILD_IRQ(nr) \
163 asmlinkage void IRQ_NAME(nr); \
164 asm("\n.p2align\n" \
165 "IRQ" #nr "_interrupt:\n\t" \
166 "push $~(" #nr ") ; " \
167 "jmp common_interrupt");
168
169#define platform_legacy_irq(irq) ((irq) < 16)
170
171#endif
172
173#endif /* _ASM_HW_IRQ_H */
diff --git a/include/asm-x86/hypertransport.h b/include/asm-x86/hypertransport.h
index d2bbd238b3e1..cc011a3bc1c2 100644
--- a/include/asm-x86/hypertransport.h
+++ b/include/asm-x86/hypertransport.h
@@ -1,5 +1,5 @@
1#ifndef ASM_HYPERTRANSPORT_H 1#ifndef ASM_X86__HYPERTRANSPORT_H
2#define ASM_HYPERTRANSPORT_H 2#define ASM_X86__HYPERTRANSPORT_H
3 3
4/* 4/*
5 * Constants for x86 Hypertransport Interrupts. 5 * Constants for x86 Hypertransport Interrupts.
@@ -42,4 +42,4 @@
42#define HT_IRQ_HIGH_DEST_ID(v) \ 42#define HT_IRQ_HIGH_DEST_ID(v) \
43 ((((v) >> 8) << HT_IRQ_HIGH_DEST_ID_SHIFT) & HT_IRQ_HIGH_DEST_ID_MASK) 43 ((((v) >> 8) << HT_IRQ_HIGH_DEST_ID_SHIFT) & HT_IRQ_HIGH_DEST_ID_MASK)
44 44
45#endif /* ASM_HYPERTRANSPORT_H */ 45#endif /* ASM_X86__HYPERTRANSPORT_H */
diff --git a/include/asm-x86/i387.h b/include/asm-x86/i387.h
index 37672f79dcc8..9ba862a4eac0 100644
--- a/include/asm-x86/i387.h
+++ b/include/asm-x86/i387.h
@@ -7,34 +7,44 @@
7 * x86-64 work by Andi Kleen 2002 7 * x86-64 work by Andi Kleen 2002
8 */ 8 */
9 9
10#ifndef _ASM_X86_I387_H 10#ifndef ASM_X86__I387_H
11#define _ASM_X86_I387_H 11#define ASM_X86__I387_H
12 12
13#include <linux/sched.h> 13#include <linux/sched.h>
14#include <linux/kernel_stat.h> 14#include <linux/kernel_stat.h>
15#include <linux/regset.h> 15#include <linux/regset.h>
16#include <linux/hardirq.h>
16#include <asm/asm.h> 17#include <asm/asm.h>
17#include <asm/processor.h> 18#include <asm/processor.h>
18#include <asm/sigcontext.h> 19#include <asm/sigcontext.h>
19#include <asm/user.h> 20#include <asm/user.h>
20#include <asm/uaccess.h> 21#include <asm/uaccess.h>
22#include <asm/xsave.h>
21 23
24extern unsigned int sig_xstate_size;
22extern void fpu_init(void); 25extern void fpu_init(void);
23extern void mxcsr_feature_mask_init(void); 26extern void mxcsr_feature_mask_init(void);
24extern int init_fpu(struct task_struct *child); 27extern int init_fpu(struct task_struct *child);
25extern asmlinkage void math_state_restore(void); 28extern asmlinkage void math_state_restore(void);
26extern void init_thread_xstate(void); 29extern void init_thread_xstate(void);
30extern int dump_fpu(struct pt_regs *, struct user_i387_struct *);
27 31
28extern user_regset_active_fn fpregs_active, xfpregs_active; 32extern user_regset_active_fn fpregs_active, xfpregs_active;
29extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get; 33extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get;
30extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set; 34extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set;
31 35
36extern struct _fpx_sw_bytes fx_sw_reserved;
32#ifdef CONFIG_IA32_EMULATION 37#ifdef CONFIG_IA32_EMULATION
38extern unsigned int sig_xstate_ia32_size;
39extern struct _fpx_sw_bytes fx_sw_reserved_ia32;
33struct _fpstate_ia32; 40struct _fpstate_ia32;
34extern int save_i387_ia32(struct _fpstate_ia32 __user *buf); 41struct _xstate_ia32;
35extern int restore_i387_ia32(struct _fpstate_ia32 __user *buf); 42extern int save_i387_xstate_ia32(void __user *buf);
43extern int restore_i387_xstate_ia32(void __user *buf);
36#endif 44#endif
37 45
46#define X87_FSW_ES (1 << 7) /* Exception Summary */
47
38#ifdef CONFIG_X86_64 48#ifdef CONFIG_X86_64
39 49
40/* Ignore delayed exceptions from user space */ 50/* Ignore delayed exceptions from user space */
@@ -45,7 +55,7 @@ static inline void tolerant_fwait(void)
45 _ASM_EXTABLE(1b, 2b)); 55 _ASM_EXTABLE(1b, 2b));
46} 56}
47 57
48static inline int restore_fpu_checking(struct i387_fxsave_struct *fx) 58static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
49{ 59{
50 int err; 60 int err;
51 61
@@ -62,20 +72,34 @@ static inline int restore_fpu_checking(struct i387_fxsave_struct *fx)
62#else 72#else
63 : [fx] "cdaSDb" (fx), "m" (*fx), "0" (0)); 73 : [fx] "cdaSDb" (fx), "m" (*fx), "0" (0));
64#endif 74#endif
65 if (unlikely(err))
66 init_fpu(current);
67 return err; 75 return err;
68} 76}
69 77
70#define X87_FSW_ES (1 << 7) /* Exception Summary */ 78static inline int restore_fpu_checking(struct task_struct *tsk)
79{
80 if (task_thread_info(tsk)->status & TS_XSAVE)
81 return xrstor_checking(&tsk->thread.xstate->xsave);
82 else
83 return fxrstor_checking(&tsk->thread.xstate->fxsave);
84}
71 85
72/* AMD CPUs don't save/restore FDP/FIP/FOP unless an exception 86/* AMD CPUs don't save/restore FDP/FIP/FOP unless an exception
73 is pending. Clear the x87 state here by setting it to fixed 87 is pending. Clear the x87 state here by setting it to fixed
74 values. The kernel data segment can be sometimes 0 and sometimes 88 values. The kernel data segment can be sometimes 0 and sometimes
75 new user value. Both should be ok. 89 new user value. Both should be ok.
76 Use the PDA as safe address because it should be already in L1. */ 90 Use the PDA as safe address because it should be already in L1. */
77static inline void clear_fpu_state(struct i387_fxsave_struct *fx) 91static inline void clear_fpu_state(struct task_struct *tsk)
78{ 92{
93 struct xsave_struct *xstate = &tsk->thread.xstate->xsave;
94 struct i387_fxsave_struct *fx = &tsk->thread.xstate->fxsave;
95
96 /*
97 * xsave header may indicate the init state of the FP.
98 */
99 if ((task_thread_info(tsk)->status & TS_XSAVE) &&
100 !(xstate->xsave_hdr.xstate_bv & XSTATE_FP))
101 return;
102
79 if (unlikely(fx->swd & X87_FSW_ES)) 103 if (unlikely(fx->swd & X87_FSW_ES))
80 asm volatile("fnclex"); 104 asm volatile("fnclex");
81 alternative_input(ASM_NOP8 ASM_NOP2, 105 alternative_input(ASM_NOP8 ASM_NOP2,
@@ -84,7 +108,7 @@ static inline void clear_fpu_state(struct i387_fxsave_struct *fx)
84 X86_FEATURE_FXSAVE_LEAK); 108 X86_FEATURE_FXSAVE_LEAK);
85} 109}
86 110
87static inline int save_i387_checking(struct i387_fxsave_struct __user *fx) 111static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
88{ 112{
89 int err; 113 int err;
90 114
@@ -108,7 +132,7 @@ static inline int save_i387_checking(struct i387_fxsave_struct __user *fx)
108 return err; 132 return err;
109} 133}
110 134
111static inline void __save_init_fpu(struct task_struct *tsk) 135static inline void fxsave(struct task_struct *tsk)
112{ 136{
113 /* Using "rex64; fxsave %0" is broken because, if the memory operand 137 /* Using "rex64; fxsave %0" is broken because, if the memory operand
114 uses any extended registers for addressing, a second REX prefix 138 uses any extended registers for addressing, a second REX prefix
@@ -133,62 +157,17 @@ static inline void __save_init_fpu(struct task_struct *tsk)
133 : "=m" (tsk->thread.xstate->fxsave) 157 : "=m" (tsk->thread.xstate->fxsave)
134 : "cdaSDb" (&tsk->thread.xstate->fxsave)); 158 : "cdaSDb" (&tsk->thread.xstate->fxsave));
135#endif 159#endif
136 clear_fpu_state(&tsk->thread.xstate->fxsave);
137 task_thread_info(tsk)->status &= ~TS_USEDFPU;
138}
139
140/*
141 * Signal frame handlers.
142 */
143
144static inline int save_i387(struct _fpstate __user *buf)
145{
146 struct task_struct *tsk = current;
147 int err = 0;
148
149 BUILD_BUG_ON(sizeof(struct user_i387_struct) !=
150 sizeof(tsk->thread.xstate->fxsave));
151
152 if ((unsigned long)buf % 16)
153 printk("save_i387: bad fpstate %p\n", buf);
154
155 if (!used_math())
156 return 0;
157 clear_used_math(); /* trigger finit */
158 if (task_thread_info(tsk)->status & TS_USEDFPU) {
159 err = save_i387_checking((struct i387_fxsave_struct __user *)
160 buf);
161 if (err)
162 return err;
163 task_thread_info(tsk)->status &= ~TS_USEDFPU;
164 stts();
165 } else {
166 if (__copy_to_user(buf, &tsk->thread.xstate->fxsave,
167 sizeof(struct i387_fxsave_struct)))
168 return -1;
169 }
170 return 1;
171} 160}
172 161
173/* 162static inline void __save_init_fpu(struct task_struct *tsk)
174 * This restores directly out of user space. Exceptions are handled.
175 */
176static inline int restore_i387(struct _fpstate __user *buf)
177{ 163{
178 struct task_struct *tsk = current; 164 if (task_thread_info(tsk)->status & TS_XSAVE)
179 int err; 165 xsave(tsk);
180 166 else
181 if (!used_math()) { 167 fxsave(tsk);
182 err = init_fpu(tsk);
183 if (err)
184 return err;
185 }
186 168
187 if (!(task_thread_info(current)->status & TS_USEDFPU)) { 169 clear_fpu_state(tsk);
188 clts(); 170 task_thread_info(tsk)->status &= ~TS_USEDFPU;
189 task_thread_info(current)->status |= TS_USEDFPU;
190 }
191 return restore_fpu_checking((__force struct i387_fxsave_struct *)buf);
192} 171}
193 172
194#else /* CONFIG_X86_32 */ 173#else /* CONFIG_X86_32 */
@@ -202,6 +181,10 @@ static inline void tolerant_fwait(void)
202 181
203static inline void restore_fpu(struct task_struct *tsk) 182static inline void restore_fpu(struct task_struct *tsk)
204{ 183{
184 if (task_thread_info(tsk)->status & TS_XSAVE) {
185 xrstor_checking(&tsk->thread.xstate->xsave);
186 return;
187 }
205 /* 188 /*
206 * The "nop" is needed to make the instructions the same 189 * The "nop" is needed to make the instructions the same
207 * length. 190 * length.
@@ -227,6 +210,27 @@ static inline void restore_fpu(struct task_struct *tsk)
227 */ 210 */
228static inline void __save_init_fpu(struct task_struct *tsk) 211static inline void __save_init_fpu(struct task_struct *tsk)
229{ 212{
213 if (task_thread_info(tsk)->status & TS_XSAVE) {
214 struct xsave_struct *xstate = &tsk->thread.xstate->xsave;
215 struct i387_fxsave_struct *fx = &tsk->thread.xstate->fxsave;
216
217 xsave(tsk);
218
219 /*
220 * xsave header may indicate the init state of the FP.
221 */
222 if (!(xstate->xsave_hdr.xstate_bv & XSTATE_FP))
223 goto end;
224
225 if (unlikely(fx->swd & X87_FSW_ES))
226 asm volatile("fnclex");
227
228 /*
229 * we can do a simple return here or be paranoid :)
230 */
231 goto clear_state;
232 }
233
230 /* Use more nops than strictly needed in case the compiler 234 /* Use more nops than strictly needed in case the compiler
231 varies code */ 235 varies code */
232 alternative_input( 236 alternative_input(
@@ -236,6 +240,7 @@ static inline void __save_init_fpu(struct task_struct *tsk)
236 X86_FEATURE_FXSR, 240 X86_FEATURE_FXSR,
237 [fx] "m" (tsk->thread.xstate->fxsave), 241 [fx] "m" (tsk->thread.xstate->fxsave),
238 [fsw] "m" (tsk->thread.xstate->fxsave.swd) : "memory"); 242 [fsw] "m" (tsk->thread.xstate->fxsave.swd) : "memory");
243clear_state:
239 /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception 244 /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
240 is pending. Clear the x87 state here by setting it to fixed 245 is pending. Clear the x87 state here by setting it to fixed
241 values. safe_address is a random variable that should be in L1 */ 246 values. safe_address is a random variable that should be in L1 */
@@ -245,16 +250,17 @@ static inline void __save_init_fpu(struct task_struct *tsk)
245 "fildl %[addr]", /* set F?P to defined value */ 250 "fildl %[addr]", /* set F?P to defined value */
246 X86_FEATURE_FXSAVE_LEAK, 251 X86_FEATURE_FXSAVE_LEAK,
247 [addr] "m" (safe_address)); 252 [addr] "m" (safe_address));
253end:
248 task_thread_info(tsk)->status &= ~TS_USEDFPU; 254 task_thread_info(tsk)->status &= ~TS_USEDFPU;
249} 255}
250 256
257#endif /* CONFIG_X86_64 */
258
251/* 259/*
252 * Signal frame handlers... 260 * Signal frame handlers...
253 */ 261 */
254extern int save_i387(struct _fpstate __user *buf); 262extern int save_i387_xstate(void __user *buf);
255extern int restore_i387(struct _fpstate __user *buf); 263extern int restore_i387_xstate(void __user *buf);
256
257#endif /* CONFIG_X86_64 */
258 264
259static inline void __unlazy_fpu(struct task_struct *tsk) 265static inline void __unlazy_fpu(struct task_struct *tsk)
260{ 266{
@@ -290,6 +296,37 @@ static inline void kernel_fpu_end(void)
290 preempt_enable(); 296 preempt_enable();
291} 297}
292 298
299/*
300 * Some instructions like VIA's padlock instructions generate a spurious
301 * DNA fault but don't modify SSE registers. And these instructions
302 * get used from interrupt context aswell. To prevent these kernel instructions
303 * in interrupt context interact wrongly with other user/kernel fpu usage, we
304 * should use them only in the context of irq_ts_save/restore()
305 */
306static inline int irq_ts_save(void)
307{
308 /*
309 * If we are in process context, we are ok to take a spurious DNA fault.
310 * Otherwise, doing clts() in process context require pre-emption to
311 * be disabled or some heavy lifting like kernel_fpu_begin()
312 */
313 if (!in_interrupt())
314 return 0;
315
316 if (read_cr0() & X86_CR0_TS) {
317 clts();
318 return 1;
319 }
320
321 return 0;
322}
323
324static inline void irq_ts_restore(int TS_state)
325{
326 if (TS_state)
327 stts();
328}
329
293#ifdef CONFIG_X86_64 330#ifdef CONFIG_X86_64
294 331
295static inline void save_init_fpu(struct task_struct *tsk) 332static inline void save_init_fpu(struct task_struct *tsk)
@@ -360,4 +397,4 @@ static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk)
360 } 397 }
361} 398}
362 399
363#endif /* _ASM_X86_I387_H */ 400#endif /* ASM_X86__I387_H */
diff --git a/include/asm-x86/i8253.h b/include/asm-x86/i8253.h
index b51c0487fc41..15a5b530044e 100644
--- a/include/asm-x86/i8253.h
+++ b/include/asm-x86/i8253.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_I8253_H__ 1#ifndef ASM_X86__I8253_H
2#define __ASM_I8253_H__ 2#define ASM_X86__I8253_H
3 3
4/* i8253A PIT registers */ 4/* i8253A PIT registers */
5#define PIT_MODE 0x43 5#define PIT_MODE 0x43
@@ -15,4 +15,4 @@ extern void setup_pit_timer(void);
15#define inb_pit inb_p 15#define inb_pit inb_p
16#define outb_pit outb_p 16#define outb_pit outb_p
17 17
18#endif /* __ASM_I8253_H__ */ 18#endif /* ASM_X86__I8253_H */
diff --git a/include/asm-x86/i8259.h b/include/asm-x86/i8259.h
index 45d4df3e51e6..23c1b3baaecd 100644
--- a/include/asm-x86/i8259.h
+++ b/include/asm-x86/i8259.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_I8259_H__ 1#ifndef ASM_X86__I8259_H
2#define __ASM_I8259_H__ 2#define ASM_X86__I8259_H
3 3
4#include <linux/delay.h> 4#include <linux/delay.h>
5 5
@@ -55,4 +55,9 @@ static inline void outb_pic(unsigned char value, unsigned int port)
55 udelay(2); 55 udelay(2);
56} 56}
57 57
58#endif /* __ASM_I8259_H__ */ 58extern struct irq_chip i8259A_chip;
59
60extern void mask_8259A(void);
61extern void unmask_8259A(void);
62
63#endif /* ASM_X86__I8259_H */
diff --git a/include/asm-x86/ia32.h b/include/asm-x86/ia32.h
index 55d3abe5276f..f932f7ad51dd 100644
--- a/include/asm-x86/ia32.h
+++ b/include/asm-x86/ia32.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_64_IA32_H 1#ifndef ASM_X86__IA32_H
2#define _ASM_X86_64_IA32_H 2#define ASM_X86__IA32_H
3 3
4 4
5#ifdef CONFIG_IA32_EMULATION 5#ifdef CONFIG_IA32_EMULATION
@@ -167,4 +167,4 @@ extern void ia32_pick_mmap_layout(struct mm_struct *mm);
167 167
168#endif /* !CONFIG_IA32_SUPPORT */ 168#endif /* !CONFIG_IA32_SUPPORT */
169 169
170#endif 170#endif /* ASM_X86__IA32_H */
diff --git a/include/asm-x86/ia32_unistd.h b/include/asm-x86/ia32_unistd.h
index 61cea9e7c5c1..dbd887d8a5a5 100644
--- a/include/asm-x86/ia32_unistd.h
+++ b/include/asm-x86/ia32_unistd.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_64_IA32_UNISTD_H_ 1#ifndef ASM_X86__IA32_UNISTD_H
2#define _ASM_X86_64_IA32_UNISTD_H_ 2#define ASM_X86__IA32_UNISTD_H
3 3
4/* 4/*
5 * This file contains the system call numbers of the ia32 port, 5 * This file contains the system call numbers of the ia32 port,
@@ -15,4 +15,4 @@
15#define __NR_ia32_sigreturn 119 15#define __NR_ia32_sigreturn 119
16#define __NR_ia32_rt_sigreturn 173 16#define __NR_ia32_rt_sigreturn 173
17 17
18#endif /* _ASM_X86_64_IA32_UNISTD_H_ */ 18#endif /* ASM_X86__IA32_UNISTD_H */
diff --git a/include/asm-x86/ide.h b/include/asm-x86/ide.h
deleted file mode 100644
index cf9c98e5bdb5..000000000000
--- a/include/asm-x86/ide.h
+++ /dev/null
@@ -1,65 +0,0 @@
1/*
2 * Copyright (C) 1994-1996 Linus Torvalds & authors
3 */
4
5/*
6 * This file contains the i386 architecture specific IDE code.
7 */
8
9#ifndef __ASMi386_IDE_H
10#define __ASMi386_IDE_H
11
12#ifdef __KERNEL__
13
14
15#ifndef MAX_HWIFS
16# ifdef CONFIG_BLK_DEV_IDEPCI
17#define MAX_HWIFS 10
18# else
19#define MAX_HWIFS 6
20# endif
21#endif
22
23static __inline__ int ide_default_irq(unsigned long base)
24{
25 switch (base) {
26 case 0x1f0: return 14;
27 case 0x170: return 15;
28 case 0x1e8: return 11;
29 case 0x168: return 10;
30 case 0x1e0: return 8;
31 case 0x160: return 12;
32 default:
33 return 0;
34 }
35}
36
37static __inline__ unsigned long ide_default_io_base(int index)
38{
39 /*
40 * If PCI is present then it is not safe to poke around
41 * the other legacy IDE ports. Only 0x1f0 and 0x170 are
42 * defined compatibility mode ports for PCI. A user can
43 * override this using ide= but we must default safe.
44 */
45 if (no_pci_devices()) {
46 switch(index) {
47 case 2: return 0x1e8;
48 case 3: return 0x168;
49 case 4: return 0x1e0;
50 case 5: return 0x160;
51 }
52 }
53 switch (index) {
54 case 0: return 0x1f0;
55 case 1: return 0x170;
56 default:
57 return 0;
58 }
59}
60
61#include <asm-generic/ide_iops.h>
62
63#endif /* __KERNEL__ */
64
65#endif /* __ASMi386_IDE_H */
diff --git a/include/asm-x86/idle.h b/include/asm-x86/idle.h
index d240e5b30a45..baa3f783d27d 100644
--- a/include/asm-x86/idle.h
+++ b/include/asm-x86/idle.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_64_IDLE_H 1#ifndef ASM_X86__IDLE_H
2#define _ASM_X86_64_IDLE_H 1 2#define ASM_X86__IDLE_H
3 3
4#define IDLE_START 1 4#define IDLE_START 1
5#define IDLE_END 2 5#define IDLE_END 2
@@ -10,4 +10,6 @@ void idle_notifier_register(struct notifier_block *n);
10void enter_idle(void); 10void enter_idle(void);
11void exit_idle(void); 11void exit_idle(void);
12 12
13#endif 13void c1e_remove_cpu(int cpu);
14
15#endif /* ASM_X86__IDLE_H */
diff --git a/include/asm-x86/intel_arch_perfmon.h b/include/asm-x86/intel_arch_perfmon.h
index fa0fd068bc2e..07c03c6c9a16 100644
--- a/include/asm-x86/intel_arch_perfmon.h
+++ b/include/asm-x86/intel_arch_perfmon.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_INTEL_ARCH_PERFMON_H 1#ifndef ASM_X86__INTEL_ARCH_PERFMON_H
2#define _ASM_X86_INTEL_ARCH_PERFMON_H 2#define ASM_X86__INTEL_ARCH_PERFMON_H
3 3
4#define MSR_ARCH_PERFMON_PERFCTR0 0xc1 4#define MSR_ARCH_PERFMON_PERFCTR0 0xc1
5#define MSR_ARCH_PERFMON_PERFCTR1 0xc2 5#define MSR_ARCH_PERFMON_PERFCTR1 0xc2
@@ -28,4 +28,4 @@ union cpuid10_eax {
28 unsigned int full; 28 unsigned int full;
29}; 29};
30 30
31#endif /* _ASM_X86_INTEL_ARCH_PERFMON_H */ 31#endif /* ASM_X86__INTEL_ARCH_PERFMON_H */
diff --git a/include/asm-x86/io.h b/include/asm-x86/io.h
index d5b11f60dbd0..72b7719523bf 100644
--- a/include/asm-x86/io.h
+++ b/include/asm-x86/io.h
@@ -1,8 +1,80 @@
1#ifndef _ASM_X86_IO_H 1#ifndef ASM_X86__IO_H
2#define _ASM_X86_IO_H 2#define ASM_X86__IO_H
3 3
4#define ARCH_HAS_IOREMAP_WC 4#define ARCH_HAS_IOREMAP_WC
5 5
6#include <linux/compiler.h>
7
8/*
9 * early_ioremap() and early_iounmap() are for temporary early boot-time
10 * mappings, before the real ioremap() is functional.
11 * A boot-time mapping is currently limited to at most 16 pages.
12 */
13#ifndef __ASSEMBLY__
14extern void early_ioremap_init(void);
15extern void early_ioremap_clear(void);
16extern void early_ioremap_reset(void);
17extern void *early_ioremap(unsigned long offset, unsigned long size);
18extern void early_iounmap(void *addr, unsigned long size);
19extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys);
20#endif
21
22#define build_mmio_read(name, size, type, reg, barrier) \
23static inline type name(const volatile void __iomem *addr) \
24{ type ret; asm volatile("mov" size " %1,%0":reg (ret) \
25:"m" (*(volatile type __force *)addr) barrier); return ret; }
26
27#define build_mmio_write(name, size, type, reg, barrier) \
28static inline void name(type val, volatile void __iomem *addr) \
29{ asm volatile("mov" size " %0,%1": :reg (val), \
30"m" (*(volatile type __force *)addr) barrier); }
31
32build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
33build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
34build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
35
36build_mmio_read(__readb, "b", unsigned char, "=q", )
37build_mmio_read(__readw, "w", unsigned short, "=r", )
38build_mmio_read(__readl, "l", unsigned int, "=r", )
39
40build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
41build_mmio_write(writew, "w", unsigned short, "r", :"memory")
42build_mmio_write(writel, "l", unsigned int, "r", :"memory")
43
44build_mmio_write(__writeb, "b", unsigned char, "q", )
45build_mmio_write(__writew, "w", unsigned short, "r", )
46build_mmio_write(__writel, "l", unsigned int, "r", )
47
48#define readb_relaxed(a) __readb(a)
49#define readw_relaxed(a) __readw(a)
50#define readl_relaxed(a) __readl(a)
51#define __raw_readb __readb
52#define __raw_readw __readw
53#define __raw_readl __readl
54
55#define __raw_writeb __writeb
56#define __raw_writew __writew
57#define __raw_writel __writel
58
59#define mmiowb() barrier()
60
61#ifdef CONFIG_X86_64
62build_mmio_read(readq, "q", unsigned long, "=r", :"memory")
63build_mmio_read(__readq, "q", unsigned long, "=r", )
64build_mmio_write(writeq, "q", unsigned long, "r", :"memory")
65build_mmio_write(__writeq, "q", unsigned long, "r", )
66
67#define readq_relaxed(a) __readq(a)
68#define __raw_readq __readq
69#define __raw_writeq writeq
70
71/* Let people know we have them */
72#define readq readq
73#define writeq writeq
74#endif
75
76extern int iommu_bio_merge;
77
6#ifdef CONFIG_X86_32 78#ifdef CONFIG_X86_32
7# include "io_32.h" 79# include "io_32.h"
8#else 80#else
@@ -16,4 +88,17 @@ extern int ioremap_change_attr(unsigned long vaddr, unsigned long size,
16 unsigned long prot_val); 88 unsigned long prot_val);
17extern void __iomem *ioremap_wc(unsigned long offset, unsigned long size); 89extern void __iomem *ioremap_wc(unsigned long offset, unsigned long size);
18 90
19#endif /* _ASM_X86_IO_H */ 91/*
92 * early_ioremap() and early_iounmap() are for temporary early boot-time
93 * mappings, before the real ioremap() is functional.
94 * A boot-time mapping is currently limited to at most 16 pages.
95 */
96extern void early_ioremap_init(void);
97extern void early_ioremap_clear(void);
98extern void early_ioremap_reset(void);
99extern void *early_ioremap(unsigned long offset, unsigned long size);
100extern void early_iounmap(void *addr, unsigned long size);
101extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys);
102
103
104#endif /* ASM_X86__IO_H */
diff --git a/include/asm-x86/io_32.h b/include/asm-x86/io_32.h
index 049e81e797a0..4f7d878bda18 100644
--- a/include/asm-x86/io_32.h
+++ b/include/asm-x86/io_32.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_IO_H 1#ifndef ASM_X86__IO_32_H
2#define _ASM_IO_H 2#define ASM_X86__IO_32_H
3 3
4#include <linux/string.h> 4#include <linux/string.h>
5#include <linux/compiler.h> 5#include <linux/compiler.h>
@@ -110,6 +110,8 @@ static inline void *phys_to_virt(unsigned long address)
110 */ 110 */
111extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size); 111extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
112extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size); 112extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
113extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size,
114 unsigned long prot_val);
113 115
114/* 116/*
115 * The default ioremap() behavior is non-cached: 117 * The default ioremap() behavior is non-cached:
@@ -122,18 +124,6 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
122extern void iounmap(volatile void __iomem *addr); 124extern void iounmap(volatile void __iomem *addr);
123 125
124/* 126/*
125 * early_ioremap() and early_iounmap() are for temporary early boot-time
126 * mappings, before the real ioremap() is functional.
127 * A boot-time mapping is currently limited to at most 16 pages.
128 */
129extern void early_ioremap_init(void);
130extern void early_ioremap_clear(void);
131extern void early_ioremap_reset(void);
132extern void *early_ioremap(unsigned long offset, unsigned long size);
133extern void early_iounmap(void *addr, unsigned long size);
134extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys);
135
136/*
137 * ISA I/O bus memory addresses are 1:1 with the physical address. 127 * ISA I/O bus memory addresses are 1:1 with the physical address.
138 */ 128 */
139#define isa_virt_to_bus virt_to_phys 129#define isa_virt_to_bus virt_to_phys
@@ -149,55 +139,6 @@ extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys);
149#define virt_to_bus virt_to_phys 139#define virt_to_bus virt_to_phys
150#define bus_to_virt phys_to_virt 140#define bus_to_virt phys_to_virt
151 141
152/*
153 * readX/writeX() are used to access memory mapped devices. On some
154 * architectures the memory mapped IO stuff needs to be accessed
155 * differently. On the x86 architecture, we just read/write the
156 * memory location directly.
157 */
158
159static inline unsigned char readb(const volatile void __iomem *addr)
160{
161 return *(volatile unsigned char __force *)addr;
162}
163
164static inline unsigned short readw(const volatile void __iomem *addr)
165{
166 return *(volatile unsigned short __force *)addr;
167}
168
169static inline unsigned int readl(const volatile void __iomem *addr)
170{
171 return *(volatile unsigned int __force *) addr;
172}
173
174#define readb_relaxed(addr) readb(addr)
175#define readw_relaxed(addr) readw(addr)
176#define readl_relaxed(addr) readl(addr)
177#define __raw_readb readb
178#define __raw_readw readw
179#define __raw_readl readl
180
181static inline void writeb(unsigned char b, volatile void __iomem *addr)
182{
183 *(volatile unsigned char __force *)addr = b;
184}
185
186static inline void writew(unsigned short b, volatile void __iomem *addr)
187{
188 *(volatile unsigned short __force *)addr = b;
189}
190
191static inline void writel(unsigned int b, volatile void __iomem *addr)
192{
193 *(volatile unsigned int __force *)addr = b;
194}
195#define __raw_writeb writeb
196#define __raw_writew writew
197#define __raw_writel writel
198
199#define mmiowb()
200
201static inline void 142static inline void
202memset_io(volatile void __iomem *addr, unsigned char val, int count) 143memset_io(volatile void __iomem *addr, unsigned char val, int count)
203{ 144{
@@ -340,4 +281,4 @@ BUILDIO(b, b, char)
340BUILDIO(w, w, short) 281BUILDIO(w, w, short)
341BUILDIO(l, , int) 282BUILDIO(l, , int)
342 283
343#endif 284#endif /* ASM_X86__IO_32_H */
diff --git a/include/asm-x86/io_64.h b/include/asm-x86/io_64.h
index 0930bedf9e4d..64429e9431a8 100644
--- a/include/asm-x86/io_64.h
+++ b/include/asm-x86/io_64.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_IO_H 1#ifndef ASM_X86__IO_64_H
2#define _ASM_IO_H 2#define ASM_X86__IO_64_H
3 3
4 4
5/* 5/*
@@ -175,6 +175,8 @@ extern void early_iounmap(void *addr, unsigned long size);
175 */ 175 */
176extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size); 176extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
177extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size); 177extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
178extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size,
179 unsigned long prot_val);
178 180
179/* 181/*
180 * The default ioremap() behavior is non-cached: 182 * The default ioremap() behavior is non-cached:
@@ -204,77 +206,6 @@ extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys);
204#define virt_to_bus virt_to_phys 206#define virt_to_bus virt_to_phys
205#define bus_to_virt phys_to_virt 207#define bus_to_virt phys_to_virt
206 208
207/*
208 * readX/writeX() are used to access memory mapped devices. On some
209 * architectures the memory mapped IO stuff needs to be accessed
210 * differently. On the x86 architecture, we just read/write the
211 * memory location directly.
212 */
213
214static inline __u8 __readb(const volatile void __iomem *addr)
215{
216 return *(__force volatile __u8 *)addr;
217}
218
219static inline __u16 __readw(const volatile void __iomem *addr)
220{
221 return *(__force volatile __u16 *)addr;
222}
223
224static __always_inline __u32 __readl(const volatile void __iomem *addr)
225{
226 return *(__force volatile __u32 *)addr;
227}
228
229static inline __u64 __readq(const volatile void __iomem *addr)
230{
231 return *(__force volatile __u64 *)addr;
232}
233
234#define readb(x) __readb(x)
235#define readw(x) __readw(x)
236#define readl(x) __readl(x)
237#define readq(x) __readq(x)
238#define readb_relaxed(a) readb(a)
239#define readw_relaxed(a) readw(a)
240#define readl_relaxed(a) readl(a)
241#define readq_relaxed(a) readq(a)
242#define __raw_readb readb
243#define __raw_readw readw
244#define __raw_readl readl
245#define __raw_readq readq
246
247#define mmiowb()
248
249static inline void __writel(__u32 b, volatile void __iomem *addr)
250{
251 *(__force volatile __u32 *)addr = b;
252}
253
254static inline void __writeq(__u64 b, volatile void __iomem *addr)
255{
256 *(__force volatile __u64 *)addr = b;
257}
258
259static inline void __writeb(__u8 b, volatile void __iomem *addr)
260{
261 *(__force volatile __u8 *)addr = b;
262}
263
264static inline void __writew(__u16 b, volatile void __iomem *addr)
265{
266 *(__force volatile __u16 *)addr = b;
267}
268
269#define writeq(val, addr) __writeq((val), (addr))
270#define writel(val, addr) __writel((val), (addr))
271#define writew(val, addr) __writew((val), (addr))
272#define writeb(val, addr) __writeb((val), (addr))
273#define __raw_writeb writeb
274#define __raw_writew writew
275#define __raw_writel writel
276#define __raw_writeq writeq
277
278void __memcpy_fromio(void *, unsigned long, unsigned); 209void __memcpy_fromio(void *, unsigned long, unsigned);
279void __memcpy_toio(unsigned long, const void *, unsigned); 210void __memcpy_toio(unsigned long, const void *, unsigned);
280 211
@@ -304,7 +235,6 @@ void memset_io(volatile void __iomem *a, int b, size_t c);
304 235
305#define flush_write_buffers() 236#define flush_write_buffers()
306 237
307extern int iommu_bio_merge;
308#define BIO_VMERGE_BOUNDARY iommu_bio_merge 238#define BIO_VMERGE_BOUNDARY iommu_bio_merge
309 239
310/* 240/*
@@ -314,4 +244,4 @@ extern int iommu_bio_merge;
314 244
315#endif /* __KERNEL__ */ 245#endif /* __KERNEL__ */
316 246
317#endif 247#endif /* ASM_X86__IO_64_H */
diff --git a/include/asm-x86/io_apic.h b/include/asm-x86/io_apic.h
index d593e14f0341..8ec68a50cf10 100644
--- a/include/asm-x86/io_apic.h
+++ b/include/asm-x86/io_apic.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_IO_APIC_H 1#ifndef ASM_X86__IO_APIC_H
2#define __ASM_IO_APIC_H 2#define ASM_X86__IO_APIC_H
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5#include <asm/mpspec.h> 5#include <asm/mpspec.h>
@@ -11,6 +11,15 @@
11 * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar 11 * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar
12 */ 12 */
13 13
14/* I/O Unit Redirection Table */
15#define IO_APIC_REDIR_VECTOR_MASK 0x000FF
16#define IO_APIC_REDIR_DEST_LOGICAL 0x00800
17#define IO_APIC_REDIR_DEST_PHYSICAL 0x00000
18#define IO_APIC_REDIR_SEND_PENDING (1 << 12)
19#define IO_APIC_REDIR_REMOTE_IRR (1 << 14)
20#define IO_APIC_REDIR_LEVEL_TRIGGER (1 << 15)
21#define IO_APIC_REDIR_MASKED (1 << 16)
22
14/* 23/*
15 * The structure of the IO-APIC: 24 * The structure of the IO-APIC:
16 */ 25 */
@@ -98,6 +107,20 @@ struct IO_APIC_route_entry {
98 107
99} __attribute__ ((packed)); 108} __attribute__ ((packed));
100 109
110struct IR_IO_APIC_route_entry {
111 __u64 vector : 8,
112 zero : 3,
113 index2 : 1,
114 delivery_status : 1,
115 polarity : 1,
116 irr : 1,
117 trigger : 1,
118 mask : 1,
119 reserved : 31,
120 format : 1,
121 index : 15;
122} __attribute__ ((packed));
123
101#ifdef CONFIG_X86_IO_APIC 124#ifdef CONFIG_X86_IO_APIC
102 125
103/* 126/*
@@ -112,21 +135,32 @@ extern int nr_ioapic_registers[MAX_IO_APICS];
112 135
113#define MP_MAX_IOAPIC_PIN 127 136#define MP_MAX_IOAPIC_PIN 127
114 137
115struct mp_ioapic_routing { 138struct mp_config_ioapic {
116 int apic_id; 139 unsigned long mp_apicaddr;
117 int gsi_base; 140 unsigned int mp_apicid;
118 int gsi_end; 141 unsigned char mp_type;
119 DECLARE_BITMAP(pin_programmed, MP_MAX_IOAPIC_PIN + 1); 142 unsigned char mp_apicver;
143 unsigned char mp_flags;
144};
145
146struct mp_config_intsrc {
147 unsigned int mp_dstapic;
148 unsigned char mp_type;
149 unsigned char mp_irqtype;
150 unsigned short mp_irqflag;
151 unsigned char mp_srcbus;
152 unsigned char mp_srcbusirq;
153 unsigned char mp_dstirq;
120}; 154};
121 155
122/* I/O APIC entries */ 156/* I/O APIC entries */
123extern struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS]; 157extern struct mp_config_ioapic mp_ioapics[MAX_IO_APICS];
124 158
125/* # of MP IRQ source entries */ 159/* # of MP IRQ source entries */
126extern int mp_irq_entries; 160extern int mp_irq_entries;
127 161
128/* MP IRQ source entries */ 162/* MP IRQ source entries */
129extern struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES]; 163extern struct mp_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
130 164
131/* non-0 if default (table-less) MP configuration */ 165/* non-0 if default (table-less) MP configuration */
132extern int mpc_default_type; 166extern int mpc_default_type;
@@ -137,6 +171,9 @@ extern int sis_apic_bug;
137/* 1 if "noapic" boot option passed */ 171/* 1 if "noapic" boot option passed */
138extern int skip_ioapic_setup; 172extern int skip_ioapic_setup;
139 173
174/* 1 if the timer IRQ uses the '8259A Virtual Wire' mode */
175extern int timer_through_8259;
176
140static inline void disable_ioapic_setup(void) 177static inline void disable_ioapic_setup(void)
141{ 178{
142 skip_ioapic_setup = 1; 179 skip_ioapic_setup = 1;
@@ -160,8 +197,16 @@ extern int io_apic_set_pci_routing(int ioapic, int pin, int irq,
160extern int (*ioapic_renumber_irq)(int ioapic, int irq); 197extern int (*ioapic_renumber_irq)(int ioapic, int irq);
161extern void ioapic_init_mappings(void); 198extern void ioapic_init_mappings(void);
162 199
200#ifdef CONFIG_X86_64
201extern int save_mask_IO_APIC_setup(void);
202extern void restore_IO_APIC_setup(void);
203extern void reinit_intr_remapped_IO_APIC(int);
204#endif
205
163#else /* !CONFIG_X86_IO_APIC */ 206#else /* !CONFIG_X86_IO_APIC */
164#define io_apic_assign_pci_irqs 0 207#define io_apic_assign_pci_irqs 0
208static const int timer_through_8259 = 0;
209static inline void ioapic_init_mappings(void) { }
165#endif 210#endif
166 211
167#endif 212#endif /* ASM_X86__IO_APIC_H */
diff --git a/include/asm-x86/ioctls.h b/include/asm-x86/ioctls.h
index c0c338bd4068..336603512399 100644
--- a/include/asm-x86/ioctls.h
+++ b/include/asm-x86/ioctls.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_IOCTLS_H 1#ifndef ASM_X86__IOCTLS_H
2#define _ASM_X86_IOCTLS_H 2#define ASM_X86__IOCTLS_H
3 3
4#include <asm/ioctl.h> 4#include <asm/ioctl.h>
5 5
@@ -85,4 +85,4 @@
85 85
86#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */ 86#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
87 87
88#endif 88#endif /* ASM_X86__IOCTLS_H */
diff --git a/include/asm-x86/iommu.h b/include/asm-x86/iommu.h
index 07862fdd23c0..546ad3110fea 100644
--- a/include/asm-x86/iommu.h
+++ b/include/asm-x86/iommu.h
@@ -1,29 +1,46 @@
1#ifndef _ASM_X8664_GART_H 1#ifndef ASM_X86__IOMMU_H
2#define _ASM_X8664_GART_H 1 2#define ASM_X86__IOMMU_H
3 3
4extern void pci_iommu_shutdown(void); 4extern void pci_iommu_shutdown(void);
5extern void no_iommu_init(void); 5extern void no_iommu_init(void);
6extern struct dma_mapping_ops nommu_dma_ops;
6extern int force_iommu, no_iommu; 7extern int force_iommu, no_iommu;
7extern int iommu_detected; 8extern int iommu_detected;
8#ifdef CONFIG_IOMMU 9extern int dmar_disabled;
10
11extern unsigned long iommu_num_pages(unsigned long addr, unsigned long len);
12
13#ifdef CONFIG_GART_IOMMU
14extern int gart_iommu_aperture;
15extern int gart_iommu_aperture_allowed;
16extern int gart_iommu_aperture_disabled;
17
18extern void early_gart_iommu_check(void);
9extern void gart_iommu_init(void); 19extern void gart_iommu_init(void);
10extern void gart_iommu_shutdown(void); 20extern void gart_iommu_shutdown(void);
11extern void __init gart_parse_options(char *); 21extern void __init gart_parse_options(char *);
12extern void iommu_hole_init(void); 22extern void gart_iommu_hole_init(void);
13extern int fallback_aper_order; 23
14extern int fallback_aper_force;
15extern int iommu_aperture;
16extern int iommu_aperture_allowed;
17extern int iommu_aperture_disabled;
18extern int fix_aperture;
19#else 24#else
20#define iommu_aperture 0 25#define gart_iommu_aperture 0
21#define iommu_aperture_allowed 0 26#define gart_iommu_aperture_allowed 0
27#define gart_iommu_aperture_disabled 1
22 28
29static inline void early_gart_iommu_check(void)
30{
31}
32static inline void gart_iommu_init(void)
33{
34}
23static inline void gart_iommu_shutdown(void) 35static inline void gart_iommu_shutdown(void)
24{ 36{
25} 37}
26 38static inline void gart_parse_options(char *options)
39{
40}
41static inline void gart_iommu_hole_init(void)
42{
43}
27#endif 44#endif
28 45
29#endif 46#endif /* ASM_X86__IOMMU_H */
diff --git a/include/asm-x86/ipcbuf.h b/include/asm-x86/ipcbuf.h
index ee678fd51594..910304fbdc8f 100644
--- a/include/asm-x86/ipcbuf.h
+++ b/include/asm-x86/ipcbuf.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_IPCBUF_H 1#ifndef ASM_X86__IPCBUF_H
2#define _ASM_X86_IPCBUF_H 2#define ASM_X86__IPCBUF_H
3 3
4/* 4/*
5 * The ipc64_perm structure for x86 architecture. 5 * The ipc64_perm structure for x86 architecture.
@@ -25,4 +25,4 @@ struct ipc64_perm {
25 unsigned long __unused2; 25 unsigned long __unused2;
26}; 26};
27 27
28#endif /* _ASM_X86_IPCBUF_H */ 28#endif /* ASM_X86__IPCBUF_H */
diff --git a/include/asm-x86/ipi.h b/include/asm-x86/ipi.h
index ecc80f341f37..30a692cfaff8 100644
--- a/include/asm-x86/ipi.h
+++ b/include/asm-x86/ipi.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_IPI_H 1#ifndef ASM_X86__IPI_H
2#define __ASM_IPI_H 2#define ASM_X86__IPI_H
3 3
4/* 4/*
5 * Copyright 2004 James Cleverdon, IBM. 5 * Copyright 2004 James Cleverdon, IBM.
@@ -20,6 +20,7 @@
20 20
21#include <asm/hw_irq.h> 21#include <asm/hw_irq.h>
22#include <asm/apic.h> 22#include <asm/apic.h>
23#include <asm/smp.h>
23 24
24/* 25/*
25 * the following functions deal with sending IPIs between CPUs. 26 * the following functions deal with sending IPIs between CPUs.
@@ -48,6 +49,12 @@ static inline int __prepare_ICR2(unsigned int mask)
48 return SET_APIC_DEST_FIELD(mask); 49 return SET_APIC_DEST_FIELD(mask);
49} 50}
50 51
52static inline void __xapic_wait_icr_idle(void)
53{
54 while (native_apic_mem_read(APIC_ICR) & APIC_ICR_BUSY)
55 cpu_relax();
56}
57
51static inline void __send_IPI_shortcut(unsigned int shortcut, int vector, 58static inline void __send_IPI_shortcut(unsigned int shortcut, int vector,
52 unsigned int dest) 59 unsigned int dest)
53{ 60{
@@ -63,7 +70,7 @@ static inline void __send_IPI_shortcut(unsigned int shortcut, int vector,
63 /* 70 /*
64 * Wait for idle. 71 * Wait for idle.
65 */ 72 */
66 apic_wait_icr_idle(); 73 __xapic_wait_icr_idle();
67 74
68 /* 75 /*
69 * No need to touch the target chip field 76 * No need to touch the target chip field
@@ -73,7 +80,7 @@ static inline void __send_IPI_shortcut(unsigned int shortcut, int vector,
73 /* 80 /*
74 * Send the IPI. The write to APIC_ICR fires this off. 81 * Send the IPI. The write to APIC_ICR fires this off.
75 */ 82 */
76 apic_write(APIC_ICR, cfg); 83 native_apic_mem_write(APIC_ICR, cfg);
77} 84}
78 85
79/* 86/*
@@ -91,13 +98,13 @@ static inline void __send_IPI_dest_field(unsigned int mask, int vector,
91 if (unlikely(vector == NMI_VECTOR)) 98 if (unlikely(vector == NMI_VECTOR))
92 safe_apic_wait_icr_idle(); 99 safe_apic_wait_icr_idle();
93 else 100 else
94 apic_wait_icr_idle(); 101 __xapic_wait_icr_idle();
95 102
96 /* 103 /*
97 * prepare target chip field 104 * prepare target chip field
98 */ 105 */
99 cfg = __prepare_ICR2(mask); 106 cfg = __prepare_ICR2(mask);
100 apic_write(APIC_ICR2, cfg); 107 native_apic_mem_write(APIC_ICR2, cfg);
101 108
102 /* 109 /*
103 * program the ICR 110 * program the ICR
@@ -107,7 +114,7 @@ static inline void __send_IPI_dest_field(unsigned int mask, int vector,
107 /* 114 /*
108 * Send the IPI. The write to APIC_ICR fires this off. 115 * Send the IPI. The write to APIC_ICR fires this off.
109 */ 116 */
110 apic_write(APIC_ICR, cfg); 117 native_apic_mem_write(APIC_ICR, cfg);
111} 118}
112 119
113static inline void send_IPI_mask_sequence(cpumask_t mask, int vector) 120static inline void send_IPI_mask_sequence(cpumask_t mask, int vector)
@@ -121,11 +128,11 @@ static inline void send_IPI_mask_sequence(cpumask_t mask, int vector)
121 * - mbligh 128 * - mbligh
122 */ 129 */
123 local_irq_save(flags); 130 local_irq_save(flags);
124 for_each_cpu_mask(query_cpu, mask) { 131 for_each_cpu_mask_nr(query_cpu, mask) {
125 __send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, query_cpu), 132 __send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, query_cpu),
126 vector, APIC_DEST_PHYSICAL); 133 vector, APIC_DEST_PHYSICAL);
127 } 134 }
128 local_irq_restore(flags); 135 local_irq_restore(flags);
129} 136}
130 137
131#endif /* __ASM_IPI_H */ 138#endif /* ASM_X86__IPI_H */
diff --git a/include/asm-x86/irq.h b/include/asm-x86/irq.h
index 7ba905465a53..1e5f2909c1db 100644
--- a/include/asm-x86/irq.h
+++ b/include/asm-x86/irq.h
@@ -1,5 +1,50 @@
1#ifdef CONFIG_X86_32 1#ifndef ASM_X86__IRQ_H
2# include "irq_32.h" 2#define ASM_X86__IRQ_H
3/*
4 * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
5 *
6 * IRQ/IPI changes taken from work by Thomas Radke
7 * <tomsoft@informatik.tu-chemnitz.de>
8 */
9
10#include <asm/apicdef.h>
11#include <asm/irq_vectors.h>
12
13static inline int irq_canonicalize(int irq)
14{
15 return ((irq == 2) ? 9 : irq);
16}
17
18#ifdef CONFIG_X86_LOCAL_APIC
19# define ARCH_HAS_NMI_WATCHDOG
20#endif
21
22#ifdef CONFIG_4KSTACKS
23 extern void irq_ctx_init(int cpu);
24 extern void irq_ctx_exit(int cpu);
25# define __ARCH_HAS_DO_SOFTIRQ
3#else 26#else
4# include "irq_64.h" 27# define irq_ctx_init(cpu) do { } while (0)
28# define irq_ctx_exit(cpu) do { } while (0)
29# ifdef CONFIG_X86_64
30# define __ARCH_HAS_DO_SOFTIRQ
31# endif
32#endif
33
34#ifdef CONFIG_IRQBALANCE
35extern int irqbalance_disable(char *str);
36#endif
37
38#ifdef CONFIG_HOTPLUG_CPU
39#include <linux/cpumask.h>
40extern void fixup_irqs(cpumask_t map);
5#endif 41#endif
42
43extern unsigned int do_IRQ(struct pt_regs *regs);
44extern void init_IRQ(void);
45extern void native_init_IRQ(void);
46
47/* Interrupt vector management */
48extern DECLARE_BITMAP(used_vectors, NR_VECTORS);
49
50#endif /* ASM_X86__IRQ_H */
diff --git a/include/asm-x86/irq_32.h b/include/asm-x86/irq_32.h
deleted file mode 100644
index 0b79f3185243..000000000000
--- a/include/asm-x86/irq_32.h
+++ /dev/null
@@ -1,51 +0,0 @@
1#ifndef _ASM_IRQ_H
2#define _ASM_IRQ_H
3
4/*
5 * linux/include/asm/irq.h
6 *
7 * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
8 *
9 * IRQ/IPI changes taken from work by Thomas Radke
10 * <tomsoft@informatik.tu-chemnitz.de>
11 */
12
13#include <linux/sched.h>
14/* include comes from machine specific directory */
15#include "irq_vectors.h"
16#include <asm/thread_info.h>
17
18static inline int irq_canonicalize(int irq)
19{
20 return ((irq == 2) ? 9 : irq);
21}
22
23#ifdef CONFIG_X86_LOCAL_APIC
24# define ARCH_HAS_NMI_WATCHDOG /* See include/linux/nmi.h */
25#endif
26
27#ifdef CONFIG_4KSTACKS
28 extern void irq_ctx_init(int cpu);
29 extern void irq_ctx_exit(int cpu);
30# define __ARCH_HAS_DO_SOFTIRQ
31#else
32# define irq_ctx_init(cpu) do { } while (0)
33# define irq_ctx_exit(cpu) do { } while (0)
34#endif
35
36#ifdef CONFIG_IRQBALANCE
37extern int irqbalance_disable(char *str);
38#endif
39
40#ifdef CONFIG_HOTPLUG_CPU
41extern void fixup_irqs(cpumask_t map);
42#endif
43
44unsigned int do_IRQ(struct pt_regs *regs);
45void init_IRQ(void);
46void __init native_init_IRQ(void);
47
48/* Interrupt vector management */
49extern DECLARE_BITMAP(used_vectors, NR_VECTORS);
50
51#endif /* _ASM_IRQ_H */
diff --git a/include/asm-x86/irq_64.h b/include/asm-x86/irq_64.h
deleted file mode 100644
index 083d35a62c94..000000000000
--- a/include/asm-x86/irq_64.h
+++ /dev/null
@@ -1,51 +0,0 @@
1#ifndef _ASM_IRQ_H
2#define _ASM_IRQ_H
3
4/*
5 * linux/include/asm/irq.h
6 *
7 * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
8 *
9 * IRQ/IPI changes taken from work by Thomas Radke
10 * <tomsoft@informatik.tu-chemnitz.de>
11 */
12
13#define TIMER_IRQ 0
14
15/*
16 * 16 8259A IRQ's, 208 potential APIC interrupt sources.
17 * Right now the APIC is mostly only used for SMP.
18 * 256 vectors is an architectural limit. (we can have
19 * more than 256 devices theoretically, but they will
20 * have to use shared interrupts)
21 * Since vectors 0x00-0x1f are used/reserved for the CPU,
22 * the usable vector space is 0x20-0xff (224 vectors)
23 */
24
25/*
26 * The maximum number of vectors supported by x86_64 processors
27 * is limited to 256. For processors other than x86_64, NR_VECTORS
28 * should be changed accordingly.
29 */
30#define NR_VECTORS 256
31
32#define FIRST_SYSTEM_VECTOR 0xef /* duplicated in hw_irq.h */
33
34#define NR_IRQS (NR_VECTORS + (32 * NR_CPUS))
35#define NR_IRQ_VECTORS NR_IRQS
36
37static inline int irq_canonicalize(int irq)
38{
39 return ((irq == 2) ? 9 : irq);
40}
41
42#define ARCH_HAS_NMI_WATCHDOG /* See include/linux/nmi.h */
43
44#ifdef CONFIG_HOTPLUG_CPU
45#include <linux/cpumask.h>
46extern void fixup_irqs(cpumask_t map);
47#endif
48
49#define __ARCH_HAS_DO_SOFTIRQ 1
50
51#endif /* _ASM_IRQ_H */
diff --git a/include/asm-x86/irq_regs_32.h b/include/asm-x86/irq_regs_32.h
index 3368b20c0b48..316a3b258871 100644
--- a/include/asm-x86/irq_regs_32.h
+++ b/include/asm-x86/irq_regs_32.h
@@ -4,8 +4,8 @@
4 * 4 *
5 * Jeremy Fitzhardinge <jeremy@goop.org> 5 * Jeremy Fitzhardinge <jeremy@goop.org>
6 */ 6 */
7#ifndef _ASM_I386_IRQ_REGS_H 7#ifndef ASM_X86__IRQ_REGS_32_H
8#define _ASM_I386_IRQ_REGS_H 8#define ASM_X86__IRQ_REGS_32_H
9 9
10#include <asm/percpu.h> 10#include <asm/percpu.h>
11 11
@@ -26,4 +26,4 @@ static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs)
26 return old_regs; 26 return old_regs;
27} 27}
28 28
29#endif /* _ASM_I386_IRQ_REGS_H */ 29#endif /* ASM_X86__IRQ_REGS_32_H */
diff --git a/include/asm-x86/irq_remapping.h b/include/asm-x86/irq_remapping.h
new file mode 100644
index 000000000000..78242c6ffa58
--- /dev/null
+++ b/include/asm-x86/irq_remapping.h
@@ -0,0 +1,8 @@
1#ifndef _ASM_IRQ_REMAPPING_H
2#define _ASM_IRQ_REMAPPING_H
3
4extern int x2apic;
5
6#define IRTE_DEST(dest) ((x2apic) ? dest : dest << 8)
7
8#endif
diff --git a/include/asm-x86/irq_vectors.h b/include/asm-x86/irq_vectors.h
new file mode 100644
index 000000000000..c5d2d767a1f3
--- /dev/null
+++ b/include/asm-x86/irq_vectors.h
@@ -0,0 +1,182 @@
1#ifndef ASM_X86__IRQ_VECTORS_H
2#define ASM_X86__IRQ_VECTORS_H
3
4#include <linux/threads.h>
5
6#define NMI_VECTOR 0x02
7
8/*
9 * IDT vectors usable for external interrupt sources start
10 * at 0x20:
11 */
12#define FIRST_EXTERNAL_VECTOR 0x20
13
14#ifdef CONFIG_X86_32
15# define SYSCALL_VECTOR 0x80
16#else
17# define IA32_SYSCALL_VECTOR 0x80
18#endif
19
20/*
21 * Reserve the lowest usable priority level 0x20 - 0x2f for triggering
22 * cleanup after irq migration on 64 bit.
23 */
24#define IRQ_MOVE_CLEANUP_VECTOR FIRST_EXTERNAL_VECTOR
25
26/*
27 * Vectors 0x20-0x2f are used for ISA interrupts on 32 bit.
28 * Vectors 0x30-0x3f are used for ISA interrupts on 64 bit.
29 */
30#ifdef CONFIG_X86_32
31#define IRQ0_VECTOR (FIRST_EXTERNAL_VECTOR)
32#else
33#define IRQ0_VECTOR (FIRST_EXTERNAL_VECTOR + 0x10)
34#endif
35#define IRQ1_VECTOR (IRQ0_VECTOR + 1)
36#define IRQ2_VECTOR (IRQ0_VECTOR + 2)
37#define IRQ3_VECTOR (IRQ0_VECTOR + 3)
38#define IRQ4_VECTOR (IRQ0_VECTOR + 4)
39#define IRQ5_VECTOR (IRQ0_VECTOR + 5)
40#define IRQ6_VECTOR (IRQ0_VECTOR + 6)
41#define IRQ7_VECTOR (IRQ0_VECTOR + 7)
42#define IRQ8_VECTOR (IRQ0_VECTOR + 8)
43#define IRQ9_VECTOR (IRQ0_VECTOR + 9)
44#define IRQ10_VECTOR (IRQ0_VECTOR + 10)
45#define IRQ11_VECTOR (IRQ0_VECTOR + 11)
46#define IRQ12_VECTOR (IRQ0_VECTOR + 12)
47#define IRQ13_VECTOR (IRQ0_VECTOR + 13)
48#define IRQ14_VECTOR (IRQ0_VECTOR + 14)
49#define IRQ15_VECTOR (IRQ0_VECTOR + 15)
50
51/*
52 * Special IRQ vectors used by the SMP architecture, 0xf0-0xff
53 *
54 * some of the following vectors are 'rare', they are merged
55 * into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
56 * TLB, reschedule and local APIC vectors are performance-critical.
57 *
58 * Vectors 0xf0-0xfa are free (reserved for future Linux use).
59 */
60#ifdef CONFIG_X86_32
61
62# define SPURIOUS_APIC_VECTOR 0xff
63# define ERROR_APIC_VECTOR 0xfe
64# define INVALIDATE_TLB_VECTOR 0xfd
65# define RESCHEDULE_VECTOR 0xfc
66# define CALL_FUNCTION_VECTOR 0xfb
67# define CALL_FUNCTION_SINGLE_VECTOR 0xfa
68# define THERMAL_APIC_VECTOR 0xf0
69
70#else
71
72#define SPURIOUS_APIC_VECTOR 0xff
73#define ERROR_APIC_VECTOR 0xfe
74#define RESCHEDULE_VECTOR 0xfd
75#define CALL_FUNCTION_VECTOR 0xfc
76#define CALL_FUNCTION_SINGLE_VECTOR 0xfb
77#define THERMAL_APIC_VECTOR 0xfa
78#define THRESHOLD_APIC_VECTOR 0xf9
79#define UV_BAU_MESSAGE 0xf8
80#define INVALIDATE_TLB_VECTOR_END 0xf7
81#define INVALIDATE_TLB_VECTOR_START 0xf0 /* f0-f7 used for TLB flush */
82
83#define NUM_INVALIDATE_TLB_VECTORS 8
84
85#endif
86
87/*
88 * Local APIC timer IRQ vector is on a different priority level,
89 * to work around the 'lost local interrupt if more than 2 IRQ
90 * sources per level' errata.
91 */
92#define LOCAL_TIMER_VECTOR 0xef
93
94/*
95 * First APIC vector available to drivers: (vectors 0x30-0xee) we
96 * start at 0x31(0x41) to spread out vectors evenly between priority
97 * levels. (0x80 is the syscall vector)
98 */
99#ifdef CONFIG_X86_32
100# define FIRST_DEVICE_VECTOR 0x31
101#else
102# define FIRST_DEVICE_VECTOR (IRQ15_VECTOR + 2)
103#endif
104
105#define NR_VECTORS 256
106
107#define FPU_IRQ 13
108
109#define FIRST_VM86_IRQ 3
110#define LAST_VM86_IRQ 15
111#define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15)
112
113#ifdef CONFIG_X86_64
114# if NR_CPUS < MAX_IO_APICS
115# define NR_IRQS (NR_VECTORS + (32 * NR_CPUS))
116# else
117# define NR_IRQS (NR_VECTORS + (32 * MAX_IO_APICS))
118# endif
119# define NR_IRQ_VECTORS NR_IRQS
120
121#elif !defined(CONFIG_X86_VOYAGER)
122
123# if defined(CONFIG_X86_IO_APIC) || defined(CONFIG_PARAVIRT) || defined(CONFIG_X86_VISWS)
124
125# define NR_IRQS 224
126
127# if (224 >= 32 * NR_CPUS)
128# define NR_IRQ_VECTORS NR_IRQS
129# else
130# define NR_IRQ_VECTORS (32 * NR_CPUS)
131# endif
132
133# else /* IO_APIC || PARAVIRT */
134
135# define NR_IRQS 16
136# define NR_IRQ_VECTORS NR_IRQS
137
138# endif
139
140#else /* !VISWS && !VOYAGER */
141
142# define NR_IRQS 224
143# define NR_IRQ_VECTORS NR_IRQS
144
145#endif /* VISWS */
146
147/* Voyager specific defines */
148/* These define the CPIs we use in linux */
149#define VIC_CPI_LEVEL0 0
150#define VIC_CPI_LEVEL1 1
151/* now the fake CPIs */
152#define VIC_TIMER_CPI 2
153#define VIC_INVALIDATE_CPI 3
154#define VIC_RESCHEDULE_CPI 4
155#define VIC_ENABLE_IRQ_CPI 5
156#define VIC_CALL_FUNCTION_CPI 6
157#define VIC_CALL_FUNCTION_SINGLE_CPI 7
158
159/* Now the QIC CPIs: Since we don't need the two initial levels,
160 * these are 2 less than the VIC CPIs */
161#define QIC_CPI_OFFSET 1
162#define QIC_TIMER_CPI (VIC_TIMER_CPI - QIC_CPI_OFFSET)
163#define QIC_INVALIDATE_CPI (VIC_INVALIDATE_CPI - QIC_CPI_OFFSET)
164#define QIC_RESCHEDULE_CPI (VIC_RESCHEDULE_CPI - QIC_CPI_OFFSET)
165#define QIC_ENABLE_IRQ_CPI (VIC_ENABLE_IRQ_CPI - QIC_CPI_OFFSET)
166#define QIC_CALL_FUNCTION_CPI (VIC_CALL_FUNCTION_CPI - QIC_CPI_OFFSET)
167#define QIC_CALL_FUNCTION_SINGLE_CPI (VIC_CALL_FUNCTION_SINGLE_CPI - QIC_CPI_OFFSET)
168
169#define VIC_START_FAKE_CPI VIC_TIMER_CPI
170#define VIC_END_FAKE_CPI VIC_CALL_FUNCTION_SINGLE_CPI
171
172/* this is the SYS_INT CPI. */
173#define VIC_SYS_INT 8
174#define VIC_CMN_INT 15
175
176/* This is the boot CPI for alternate processors. It gets overwritten
177 * by the above once the system has activated all available processors */
178#define VIC_CPU_BOOT_CPI VIC_CPI_LEVEL0
179#define VIC_CPU_BOOT_ERRATA_CPI (VIC_CPI_LEVEL0 + 8)
180
181
182#endif /* ASM_X86__IRQ_VECTORS_H */
diff --git a/include/asm-x86/irqflags.h b/include/asm-x86/irqflags.h
index c242527f970e..424acb48cd61 100644
--- a/include/asm-x86/irqflags.h
+++ b/include/asm-x86/irqflags.h
@@ -111,14 +111,35 @@ static inline unsigned long __raw_local_irq_save(void)
111#define DISABLE_INTERRUPTS(x) cli 111#define DISABLE_INTERRUPTS(x) cli
112 112
113#ifdef CONFIG_X86_64 113#ifdef CONFIG_X86_64
114#define SWAPGS swapgs
115/*
116 * Currently paravirt can't handle swapgs nicely when we
117 * don't have a stack we can rely on (such as a user space
118 * stack). So we either find a way around these or just fault
119 * and emulate if a guest tries to call swapgs directly.
120 *
121 * Either way, this is a good way to document that we don't
122 * have a reliable stack. x86_64 only.
123 */
124#define SWAPGS_UNSAFE_STACK swapgs
125
126#define PARAVIRT_ADJUST_EXCEPTION_FRAME /* */
127
114#define INTERRUPT_RETURN iretq 128#define INTERRUPT_RETURN iretq
115#define ENABLE_INTERRUPTS_SYSCALL_RET \ 129#define USERGS_SYSRET64 \
116 movq %gs:pda_oldrsp, %rsp; \ 130 swapgs; \
117 swapgs; \ 131 sysretq;
118 sysretq; 132#define USERGS_SYSRET32 \
133 swapgs; \
134 sysretl
135#define ENABLE_INTERRUPTS_SYSEXIT32 \
136 swapgs; \
137 sti; \
138 sysexit
139
119#else 140#else
120#define INTERRUPT_RETURN iret 141#define INTERRUPT_RETURN iret
121#define ENABLE_INTERRUPTS_SYSCALL_RET sti; sysexit 142#define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
122#define GET_CR0_INTO_EAX movl %cr0, %eax 143#define GET_CR0_INTO_EAX movl %cr0, %eax
123#endif 144#endif
124 145
@@ -169,18 +190,6 @@ static inline void trace_hardirqs_fixup(void)
169#else 190#else
170 191
171#ifdef CONFIG_X86_64 192#ifdef CONFIG_X86_64
172/*
173 * Currently paravirt can't handle swapgs nicely when we
174 * don't have a stack we can rely on (such as a user space
175 * stack). So we either find a way around these or just fault
176 * and emulate if a guest tries to call swapgs directly.
177 *
178 * Either way, this is a good way to document that we don't
179 * have a reliable stack. x86_64 only.
180 */
181#define SWAPGS_UNSAFE_STACK swapgs
182#define ARCH_TRACE_IRQS_ON call trace_hardirqs_on_thunk
183#define ARCH_TRACE_IRQS_OFF call trace_hardirqs_off_thunk
184#define ARCH_LOCKDEP_SYS_EXIT call lockdep_sys_exit_thunk 193#define ARCH_LOCKDEP_SYS_EXIT call lockdep_sys_exit_thunk
185#define ARCH_LOCKDEP_SYS_EXIT_IRQ \ 194#define ARCH_LOCKDEP_SYS_EXIT_IRQ \
186 TRACE_IRQS_ON; \ 195 TRACE_IRQS_ON; \
@@ -192,24 +201,6 @@ static inline void trace_hardirqs_fixup(void)
192 TRACE_IRQS_OFF; 201 TRACE_IRQS_OFF;
193 202
194#else 203#else
195#define ARCH_TRACE_IRQS_ON \
196 pushl %eax; \
197 pushl %ecx; \
198 pushl %edx; \
199 call trace_hardirqs_on; \
200 popl %edx; \
201 popl %ecx; \
202 popl %eax;
203
204#define ARCH_TRACE_IRQS_OFF \
205 pushl %eax; \
206 pushl %ecx; \
207 pushl %edx; \
208 call trace_hardirqs_off; \
209 popl %edx; \
210 popl %ecx; \
211 popl %eax;
212
213#define ARCH_LOCKDEP_SYS_EXIT \ 204#define ARCH_LOCKDEP_SYS_EXIT \
214 pushl %eax; \ 205 pushl %eax; \
215 pushl %ecx; \ 206 pushl %ecx; \
@@ -223,8 +214,8 @@ static inline void trace_hardirqs_fixup(void)
223#endif 214#endif
224 215
225#ifdef CONFIG_TRACE_IRQFLAGS 216#ifdef CONFIG_TRACE_IRQFLAGS
226# define TRACE_IRQS_ON ARCH_TRACE_IRQS_ON 217# define TRACE_IRQS_ON call trace_hardirqs_on_thunk;
227# define TRACE_IRQS_OFF ARCH_TRACE_IRQS_OFF 218# define TRACE_IRQS_OFF call trace_hardirqs_off_thunk;
228#else 219#else
229# define TRACE_IRQS_ON 220# define TRACE_IRQS_ON
230# define TRACE_IRQS_OFF 221# define TRACE_IRQS_OFF
diff --git a/include/asm-x86/ist.h b/include/asm-x86/ist.h
index 6ec6ceed95a7..35a2fe9bc921 100644
--- a/include/asm-x86/ist.h
+++ b/include/asm-x86/ist.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_IST_H 1#ifndef ASM_X86__IST_H
2#define _ASM_IST_H 2#define ASM_X86__IST_H
3 3
4/* 4/*
5 * Include file for the interface to IST BIOS 5 * Include file for the interface to IST BIOS
@@ -31,4 +31,4 @@ struct ist_info {
31extern struct ist_info ist_info; 31extern struct ist_info ist_info;
32 32
33#endif /* __KERNEL__ */ 33#endif /* __KERNEL__ */
34#endif /* _ASM_IST_H */ 34#endif /* ASM_X86__IST_H */
diff --git a/include/asm-x86/k8.h b/include/asm-x86/k8.h
index 452e2b696ff4..2bbaf4370a55 100644
--- a/include/asm-x86/k8.h
+++ b/include/asm-x86/k8.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_K8_H 1#ifndef ASM_X86__K8_H
2#define _ASM_K8_H 1 2#define ASM_X86__K8_H
3 3
4#include <linux/pci.h> 4#include <linux/pci.h>
5 5
@@ -12,4 +12,4 @@ extern int cache_k8_northbridges(void);
12extern void k8_flush_garts(void); 12extern void k8_flush_garts(void);
13extern int k8_scan_nodes(unsigned long start, unsigned long end); 13extern int k8_scan_nodes(unsigned long start, unsigned long end);
14 14
15#endif 15#endif /* ASM_X86__K8_H */
diff --git a/include/asm-x86/kdebug.h b/include/asm-x86/kdebug.h
index 96651bb59ba1..5ec3ad3e825c 100644
--- a/include/asm-x86/kdebug.h
+++ b/include/asm-x86/kdebug.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_KDEBUG_H 1#ifndef ASM_X86__KDEBUG_H
2#define _ASM_X86_KDEBUG_H 2#define ASM_X86__KDEBUG_H
3 3
4#include <linux/notifier.h> 4#include <linux/notifier.h>
5 5
@@ -35,4 +35,4 @@ extern void show_regs(struct pt_regs *regs);
35extern unsigned long oops_begin(void); 35extern unsigned long oops_begin(void);
36extern void oops_end(unsigned long, struct pt_regs *, int signr); 36extern void oops_end(unsigned long, struct pt_regs *, int signr);
37 37
38#endif 38#endif /* ASM_X86__KDEBUG_H */
diff --git a/include/asm-x86/kexec.h b/include/asm-x86/kexec.h
index 8f855a15f64d..ea09600d6129 100644
--- a/include/asm-x86/kexec.h
+++ b/include/asm-x86/kexec.h
@@ -1,5 +1,5 @@
1#ifndef _KEXEC_H 1#ifndef ASM_X86__KEXEC_H
2#define _KEXEC_H 2#define ASM_X86__KEXEC_H
3 3
4#ifdef CONFIG_X86_32 4#ifdef CONFIG_X86_32
5# define PA_CONTROL_PAGE 0 5# define PA_CONTROL_PAGE 0
@@ -10,14 +10,15 @@
10# define VA_PTE_0 5 10# define VA_PTE_0 5
11# define PA_PTE_1 6 11# define PA_PTE_1 6
12# define VA_PTE_1 7 12# define VA_PTE_1 7
13# define PA_SWAP_PAGE 8
13# ifdef CONFIG_X86_PAE 14# ifdef CONFIG_X86_PAE
14# define PA_PMD_0 8 15# define PA_PMD_0 9
15# define VA_PMD_0 9 16# define VA_PMD_0 10
16# define PA_PMD_1 10 17# define PA_PMD_1 11
17# define VA_PMD_1 11 18# define VA_PMD_1 12
18# define PAGES_NR 12 19# define PAGES_NR 13
19# else 20# else
20# define PAGES_NR 8 21# define PAGES_NR 9
21# endif 22# endif
22#else 23#else
23# define PA_CONTROL_PAGE 0 24# define PA_CONTROL_PAGE 0
@@ -40,6 +41,10 @@
40# define PAGES_NR 17 41# define PAGES_NR 17
41#endif 42#endif
42 43
44#ifdef CONFIG_X86_32
45# define KEXEC_CONTROL_CODE_MAX_SIZE 2048
46#endif
47
43#ifndef __ASSEMBLY__ 48#ifndef __ASSEMBLY__
44 49
45#include <linux/string.h> 50#include <linux/string.h>
@@ -62,7 +67,7 @@
62/* Maximum address we can use for the control code buffer */ 67/* Maximum address we can use for the control code buffer */
63# define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE 68# define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE
64 69
65# define KEXEC_CONTROL_CODE_SIZE 4096 70# define KEXEC_CONTROL_PAGE_SIZE 4096
66 71
67/* The native architecture */ 72/* The native architecture */
68# define KEXEC_ARCH KEXEC_ARCH_386 73# define KEXEC_ARCH KEXEC_ARCH_386
@@ -78,7 +83,7 @@
78# define KEXEC_CONTROL_MEMORY_LIMIT (0xFFFFFFFFFFUL) 83# define KEXEC_CONTROL_MEMORY_LIMIT (0xFFFFFFFFFFUL)
79 84
80/* Allocate one page for the pdp and the second for the code */ 85/* Allocate one page for the pdp and the second for the code */
81# define KEXEC_CONTROL_CODE_SIZE (4096UL + 4096UL) 86# define KEXEC_CONTROL_PAGE_SIZE (4096UL + 4096UL)
82 87
83/* The native architecture */ 88/* The native architecture */
84# define KEXEC_ARCH KEXEC_ARCH_X86_64 89# define KEXEC_ARCH KEXEC_ARCH_X86_64
@@ -152,11 +157,12 @@ static inline void crash_setup_regs(struct pt_regs *newregs,
152} 157}
153 158
154#ifdef CONFIG_X86_32 159#ifdef CONFIG_X86_32
155asmlinkage NORET_TYPE void 160asmlinkage unsigned long
156relocate_kernel(unsigned long indirection_page, 161relocate_kernel(unsigned long indirection_page,
157 unsigned long control_page, 162 unsigned long control_page,
158 unsigned long start_address, 163 unsigned long start_address,
159 unsigned int has_pae) ATTRIB_NORET; 164 unsigned int has_pae,
165 unsigned int preserve_context);
160#else 166#else
161NORET_TYPE void 167NORET_TYPE void
162relocate_kernel(unsigned long indirection_page, 168relocate_kernel(unsigned long indirection_page,
@@ -166,4 +172,4 @@ relocate_kernel(unsigned long indirection_page,
166 172
167#endif /* __ASSEMBLY__ */ 173#endif /* __ASSEMBLY__ */
168 174
169#endif /* _KEXEC_H */ 175#endif /* ASM_X86__KEXEC_H */
diff --git a/include/asm-x86/kgdb.h b/include/asm-x86/kgdb.h
index 484c47554f3b..d283863354de 100644
--- a/include/asm-x86/kgdb.h
+++ b/include/asm-x86/kgdb.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_KGDB_H_ 1#ifndef ASM_X86__KGDB_H
2#define _ASM_KGDB_H_ 2#define ASM_X86__KGDB_H
3 3
4/* 4/*
5 * Copyright (C) 2001-2004 Amit S. Kale 5 * Copyright (C) 2001-2004 Amit S. Kale
@@ -39,12 +39,13 @@ enum regnames {
39 GDB_FS, /* 14 */ 39 GDB_FS, /* 14 */
40 GDB_GS, /* 15 */ 40 GDB_GS, /* 15 */
41}; 41};
42#define NUMREGBYTES ((GDB_GS+1)*4)
42#else /* ! CONFIG_X86_32 */ 43#else /* ! CONFIG_X86_32 */
43enum regnames { 44enum regnames64 {
44 GDB_AX, /* 0 */ 45 GDB_AX, /* 0 */
45 GDB_DX, /* 1 */ 46 GDB_BX, /* 1 */
46 GDB_CX, /* 2 */ 47 GDB_CX, /* 2 */
47 GDB_BX, /* 3 */ 48 GDB_DX, /* 3 */
48 GDB_SI, /* 4 */ 49 GDB_SI, /* 4 */
49 GDB_DI, /* 5 */ 50 GDB_DI, /* 5 */
50 GDB_BP, /* 6 */ 51 GDB_BP, /* 6 */
@@ -58,18 +59,15 @@ enum regnames {
58 GDB_R14, /* 14 */ 59 GDB_R14, /* 14 */
59 GDB_R15, /* 15 */ 60 GDB_R15, /* 15 */
60 GDB_PC, /* 16 */ 61 GDB_PC, /* 16 */
61 GDB_PS, /* 17 */
62}; 62};
63#endif /* CONFIG_X86_32 */
64 63
65/* 64enum regnames32 {
66 * Number of bytes of registers: 65 GDB_PS = 34,
67 */ 66 GDB_CS,
68#ifdef CONFIG_X86_32 67 GDB_SS,
69# define NUMREGBYTES 64 68};
70#else 69#define NUMREGBYTES ((GDB_SS+1)*4)
71# define NUMREGBYTES ((GDB_PS+1)*8) 70#endif /* CONFIG_X86_32 */
72#endif
73 71
74static inline void arch_kgdb_breakpoint(void) 72static inline void arch_kgdb_breakpoint(void)
75{ 73{
@@ -78,4 +76,4 @@ static inline void arch_kgdb_breakpoint(void)
78#define BREAK_INSTR_SIZE 1 76#define BREAK_INSTR_SIZE 1
79#define CACHE_FLUSH_IS_SAFE 1 77#define CACHE_FLUSH_IS_SAFE 1
80 78
81#endif /* _ASM_KGDB_H_ */ 79#endif /* ASM_X86__KGDB_H */
diff --git a/include/asm-x86/kmap_types.h b/include/asm-x86/kmap_types.h
index 5f4174132a22..89f44493e643 100644
--- a/include/asm-x86/kmap_types.h
+++ b/include/asm-x86/kmap_types.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_KMAP_TYPES_H 1#ifndef ASM_X86__KMAP_TYPES_H
2#define _ASM_X86_KMAP_TYPES_H 2#define ASM_X86__KMAP_TYPES_H
3 3
4#if defined(CONFIG_X86_32) && defined(CONFIG_DEBUG_HIGHMEM) 4#if defined(CONFIG_X86_32) && defined(CONFIG_DEBUG_HIGHMEM)
5# define D(n) __KM_FENCE_##n , 5# define D(n) __KM_FENCE_##n ,
@@ -26,4 +26,4 @@ D(13) KM_TYPE_NR
26 26
27#undef D 27#undef D
28 28
29#endif 29#endif /* ASM_X86__KMAP_TYPES_H */
diff --git a/include/asm-x86/kprobes.h b/include/asm-x86/kprobes.h
index 54980b0b3892..bd8407863c13 100644
--- a/include/asm-x86/kprobes.h
+++ b/include/asm-x86/kprobes.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_KPROBES_H 1#ifndef ASM_X86__KPROBES_H
2#define _ASM_KPROBES_H 2#define ASM_X86__KPROBES_H
3/* 3/*
4 * Kernel Probes (KProbes) 4 * Kernel Probes (KProbes)
5 * 5 *
@@ -94,4 +94,4 @@ static inline void restore_interrupts(struct pt_regs *regs)
94extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr); 94extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
95extern int kprobe_exceptions_notify(struct notifier_block *self, 95extern int kprobe_exceptions_notify(struct notifier_block *self,
96 unsigned long val, void *data); 96 unsigned long val, void *data);
97#endif /* _ASM_KPROBES_H */ 97#endif /* ASM_X86__KPROBES_H */
diff --git a/include/asm-x86/kvm.h b/include/asm-x86/kvm.h
index 80eefef2cc76..78e954db1e7f 100644
--- a/include/asm-x86/kvm.h
+++ b/include/asm-x86/kvm.h
@@ -1,5 +1,5 @@
1#ifndef __LINUX_KVM_X86_H 1#ifndef ASM_X86__KVM_H
2#define __LINUX_KVM_X86_H 2#define ASM_X86__KVM_H
3 3
4/* 4/*
5 * KVM x86 specific structures and definitions 5 * KVM x86 specific structures and definitions
@@ -228,5 +228,6 @@ struct kvm_pit_state {
228#define KVM_TRC_CLTS (KVM_TRC_HANDLER + 0x12) 228#define KVM_TRC_CLTS (KVM_TRC_HANDLER + 0x12)
229#define KVM_TRC_LMSW (KVM_TRC_HANDLER + 0x13) 229#define KVM_TRC_LMSW (KVM_TRC_HANDLER + 0x13)
230#define KVM_TRC_APIC_ACCESS (KVM_TRC_HANDLER + 0x14) 230#define KVM_TRC_APIC_ACCESS (KVM_TRC_HANDLER + 0x14)
231#define KVM_TRC_TDP_FAULT (KVM_TRC_HANDLER + 0x15)
231 232
232#endif 233#endif /* ASM_X86__KVM_H */
diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h
index 1d8cd01fa514..69794547f514 100644
--- a/include/asm-x86/kvm_host.h
+++ b/include/asm-x86/kvm_host.h
@@ -1,4 +1,4 @@
1#/* 1/*
2 * Kernel-based Virtual Machine driver for Linux 2 * Kernel-based Virtual Machine driver for Linux
3 * 3 *
4 * This header defines architecture specific interfaces, x86 version 4 * This header defines architecture specific interfaces, x86 version
@@ -8,16 +8,18 @@
8 * 8 *
9 */ 9 */
10 10
11#ifndef ASM_KVM_HOST_H 11#ifndef ASM_X86__KVM_HOST_H
12#define ASM_KVM_HOST_H 12#define ASM_X86__KVM_HOST_H
13 13
14#include <linux/types.h> 14#include <linux/types.h>
15#include <linux/mm.h> 15#include <linux/mm.h>
16#include <linux/mmu_notifier.h>
16 17
17#include <linux/kvm.h> 18#include <linux/kvm.h>
18#include <linux/kvm_para.h> 19#include <linux/kvm_para.h>
19#include <linux/kvm_types.h> 20#include <linux/kvm_types.h>
20 21
22#include <asm/pvclock-abi.h>
21#include <asm/desc.h> 23#include <asm/desc.h>
22 24
23#define KVM_MAX_VCPUS 16 25#define KVM_MAX_VCPUS 16
@@ -26,6 +28,7 @@
26#define KVM_PRIVATE_MEM_SLOTS 4 28#define KVM_PRIVATE_MEM_SLOTS 4
27 29
28#define KVM_PIO_PAGE_OFFSET 1 30#define KVM_PIO_PAGE_OFFSET 1
31#define KVM_COALESCED_MMIO_PAGE_OFFSET 2
29 32
30#define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1) 33#define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1)
31#define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD)) 34#define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD))
@@ -78,6 +81,7 @@
78#define KVM_MIN_FREE_MMU_PAGES 5 81#define KVM_MIN_FREE_MMU_PAGES 5
79#define KVM_REFILL_PAGES 25 82#define KVM_REFILL_PAGES 25
80#define KVM_MAX_CPUID_ENTRIES 40 83#define KVM_MAX_CPUID_ENTRIES 40
84#define KVM_NR_VAR_MTRR 8
81 85
82extern spinlock_t kvm_lock; 86extern spinlock_t kvm_lock;
83extern struct list_head vm_list; 87extern struct list_head vm_list;
@@ -108,12 +112,12 @@ enum {
108}; 112};
109 113
110enum { 114enum {
115 VCPU_SREG_ES,
111 VCPU_SREG_CS, 116 VCPU_SREG_CS,
117 VCPU_SREG_SS,
112 VCPU_SREG_DS, 118 VCPU_SREG_DS,
113 VCPU_SREG_ES,
114 VCPU_SREG_FS, 119 VCPU_SREG_FS,
115 VCPU_SREG_GS, 120 VCPU_SREG_GS,
116 VCPU_SREG_SS,
117 VCPU_SREG_TR, 121 VCPU_SREG_TR,
118 VCPU_SREG_LDTR, 122 VCPU_SREG_LDTR,
119}; 123};
@@ -242,11 +246,13 @@ struct kvm_vcpu_arch {
242 gfn_t last_pt_write_gfn; 246 gfn_t last_pt_write_gfn;
243 int last_pt_write_count; 247 int last_pt_write_count;
244 u64 *last_pte_updated; 248 u64 *last_pte_updated;
249 gfn_t last_pte_gfn;
245 250
246 struct { 251 struct {
247 gfn_t gfn; /* presumed gfn during guest pte update */ 252 gfn_t gfn; /* presumed gfn during guest pte update */
248 pfn_t pfn; /* pfn corresponding to that gfn */ 253 pfn_t pfn; /* pfn corresponding to that gfn */
249 int largepage; 254 int largepage;
255 unsigned long mmu_seq;
250 } update_pte; 256 } update_pte;
251 257
252 struct i387_fxsave_struct host_fx_image; 258 struct i387_fxsave_struct host_fx_image;
@@ -282,9 +288,14 @@ struct kvm_vcpu_arch {
282 struct x86_emulate_ctxt emulate_ctxt; 288 struct x86_emulate_ctxt emulate_ctxt;
283 289
284 gpa_t time; 290 gpa_t time;
285 struct kvm_vcpu_time_info hv_clock; 291 struct pvclock_vcpu_time_info hv_clock;
292 unsigned int hv_clock_tsc_khz;
286 unsigned int time_offset; 293 unsigned int time_offset;
287 struct page *time_page; 294 struct page *time_page;
295
296 bool nmi_pending;
297
298 u64 mtrr[0x100];
288}; 299};
289 300
290struct kvm_mem_alias { 301struct kvm_mem_alias {
@@ -342,6 +353,7 @@ struct kvm_vcpu_stat {
342 u32 mmio_exits; 353 u32 mmio_exits;
343 u32 signal_exits; 354 u32 signal_exits;
344 u32 irq_window_exits; 355 u32 irq_window_exits;
356 u32 nmi_window_exits;
345 u32 halt_exits; 357 u32 halt_exits;
346 u32 halt_wakeup; 358 u32 halt_wakeup;
347 u32 request_irq_exits; 359 u32 request_irq_exits;
@@ -377,7 +389,6 @@ struct kvm_x86_ops {
377 void (*prepare_guest_switch)(struct kvm_vcpu *vcpu); 389 void (*prepare_guest_switch)(struct kvm_vcpu *vcpu);
378 void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu); 390 void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
379 void (*vcpu_put)(struct kvm_vcpu *vcpu); 391 void (*vcpu_put)(struct kvm_vcpu *vcpu);
380 void (*vcpu_decache)(struct kvm_vcpu *vcpu);
381 392
382 int (*set_guest_debug)(struct kvm_vcpu *vcpu, 393 int (*set_guest_debug)(struct kvm_vcpu *vcpu,
383 struct kvm_debug_guest *dbg); 394 struct kvm_debug_guest *dbg);
@@ -495,6 +506,10 @@ int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr,
495int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, 506int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr,
496 unsigned long value); 507 unsigned long value);
497 508
509void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
510int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
511 int type_bits, int seg);
512
498int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason); 513int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason);
499 514
500void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); 515void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
@@ -513,6 +528,8 @@ void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
513void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long cr2, 528void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long cr2,
514 u32 error_code); 529 u32 error_code);
515 530
531void kvm_inject_nmi(struct kvm_vcpu *vcpu);
532
516void fx_init(struct kvm_vcpu *vcpu); 533void fx_init(struct kvm_vcpu *vcpu);
517 534
518int emulator_read_std(unsigned long addr, 535int emulator_read_std(unsigned long addr,
@@ -541,6 +558,7 @@ int kvm_fix_hypercall(struct kvm_vcpu *vcpu);
541int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code); 558int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code);
542 559
543void kvm_enable_tdp(void); 560void kvm_enable_tdp(void);
561void kvm_disable_tdp(void);
544 562
545int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3); 563int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3);
546int complete_pio(struct kvm_vcpu *vcpu); 564int complete_pio(struct kvm_vcpu *vcpu);
@@ -552,55 +570,53 @@ static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
552 return (struct kvm_mmu_page *)page_private(page); 570 return (struct kvm_mmu_page *)page_private(page);
553} 571}
554 572
555static inline u16 read_fs(void) 573static inline u16 kvm_read_fs(void)
556{ 574{
557 u16 seg; 575 u16 seg;
558 asm("mov %%fs, %0" : "=g"(seg)); 576 asm("mov %%fs, %0" : "=g"(seg));
559 return seg; 577 return seg;
560} 578}
561 579
562static inline u16 read_gs(void) 580static inline u16 kvm_read_gs(void)
563{ 581{
564 u16 seg; 582 u16 seg;
565 asm("mov %%gs, %0" : "=g"(seg)); 583 asm("mov %%gs, %0" : "=g"(seg));
566 return seg; 584 return seg;
567} 585}
568 586
569static inline u16 read_ldt(void) 587static inline u16 kvm_read_ldt(void)
570{ 588{
571 u16 ldt; 589 u16 ldt;
572 asm("sldt %0" : "=g"(ldt)); 590 asm("sldt %0" : "=g"(ldt));
573 return ldt; 591 return ldt;
574} 592}
575 593
576static inline void load_fs(u16 sel) 594static inline void kvm_load_fs(u16 sel)
577{ 595{
578 asm("mov %0, %%fs" : : "rm"(sel)); 596 asm("mov %0, %%fs" : : "rm"(sel));
579} 597}
580 598
581static inline void load_gs(u16 sel) 599static inline void kvm_load_gs(u16 sel)
582{ 600{
583 asm("mov %0, %%gs" : : "rm"(sel)); 601 asm("mov %0, %%gs" : : "rm"(sel));
584} 602}
585 603
586#ifndef load_ldt 604static inline void kvm_load_ldt(u16 sel)
587static inline void load_ldt(u16 sel)
588{ 605{
589 asm("lldt %0" : : "rm"(sel)); 606 asm("lldt %0" : : "rm"(sel));
590} 607}
591#endif
592 608
593static inline void get_idt(struct descriptor_table *table) 609static inline void kvm_get_idt(struct descriptor_table *table)
594{ 610{
595 asm("sidt %0" : "=m"(*table)); 611 asm("sidt %0" : "=m"(*table));
596} 612}
597 613
598static inline void get_gdt(struct descriptor_table *table) 614static inline void kvm_get_gdt(struct descriptor_table *table)
599{ 615{
600 asm("sgdt %0" : "=m"(*table)); 616 asm("sgdt %0" : "=m"(*table));
601} 617}
602 618
603static inline unsigned long read_tr_base(void) 619static inline unsigned long kvm_read_tr_base(void)
604{ 620{
605 u16 tr; 621 u16 tr;
606 asm("str %0" : "=g"(tr)); 622 asm("str %0" : "=g"(tr));
@@ -617,17 +633,17 @@ static inline unsigned long read_msr(unsigned long msr)
617} 633}
618#endif 634#endif
619 635
620static inline void fx_save(struct i387_fxsave_struct *image) 636static inline void kvm_fx_save(struct i387_fxsave_struct *image)
621{ 637{
622 asm("fxsave (%0)":: "r" (image)); 638 asm("fxsave (%0)":: "r" (image));
623} 639}
624 640
625static inline void fx_restore(struct i387_fxsave_struct *image) 641static inline void kvm_fx_restore(struct i387_fxsave_struct *image)
626{ 642{
627 asm("fxrstor (%0)":: "r" (image)); 643 asm("fxrstor (%0)":: "r" (image));
628} 644}
629 645
630static inline void fx_finit(void) 646static inline void kvm_fx_finit(void)
631{ 647{
632 asm("finit"); 648 asm("finit");
633} 649}
@@ -689,4 +705,34 @@ enum {
689 trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \ 705 trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
690 vcpu, 0, 0, 0, 0, 0, 0) 706 vcpu, 0, 0, 0, 0, 0, 0)
691 707
708#ifdef CONFIG_64BIT
709# define KVM_EX_ENTRY ".quad"
710# define KVM_EX_PUSH "pushq"
711#else
712# define KVM_EX_ENTRY ".long"
713# define KVM_EX_PUSH "pushl"
692#endif 714#endif
715
716/*
717 * Hardware virtualization extension instructions may fault if a
718 * reboot turns off virtualization while processes are running.
719 * Trap the fault and ignore the instruction if that happens.
720 */
721asmlinkage void kvm_handle_fault_on_reboot(void);
722
723#define __kvm_handle_fault_on_reboot(insn) \
724 "666: " insn "\n\t" \
725 ".pushsection .fixup, \"ax\" \n" \
726 "667: \n\t" \
727 KVM_EX_PUSH " $666b \n\t" \
728 "jmp kvm_handle_fault_on_reboot \n\t" \
729 ".popsection \n\t" \
730 ".pushsection __ex_table, \"a\" \n\t" \
731 KVM_EX_ENTRY " 666b, 667b \n\t" \
732 ".popsection"
733
734#define KVM_ARCH_WANT_MMU_NOTIFIER
735int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
736int kvm_age_hva(struct kvm *kvm, unsigned long hva);
737
738#endif /* ASM_X86__KVM_HOST_H */
diff --git a/include/asm-x86/kvm_para.h b/include/asm-x86/kvm_para.h
index 509845942070..30054fded4fb 100644
--- a/include/asm-x86/kvm_para.h
+++ b/include/asm-x86/kvm_para.h
@@ -1,5 +1,5 @@
1#ifndef __X86_KVM_PARA_H 1#ifndef ASM_X86__KVM_PARA_H
2#define __X86_KVM_PARA_H 2#define ASM_X86__KVM_PARA_H
3 3
4/* This CPUID returns the signature 'KVMKVMKVM' in ebx, ecx, and edx. It 4/* This CPUID returns the signature 'KVMKVMKVM' in ebx, ecx, and edx. It
5 * should be used to determine that a VM is running under KVM. 5 * should be used to determine that a VM is running under KVM.
@@ -48,24 +48,6 @@ struct kvm_mmu_op_release_pt {
48#ifdef __KERNEL__ 48#ifdef __KERNEL__
49#include <asm/processor.h> 49#include <asm/processor.h>
50 50
51/* xen binary-compatible interface. See xen headers for details */
52struct kvm_vcpu_time_info {
53 uint32_t version;
54 uint32_t pad0;
55 uint64_t tsc_timestamp;
56 uint64_t system_time;
57 uint32_t tsc_to_system_mul;
58 int8_t tsc_shift;
59 int8_t pad[3];
60} __attribute__((__packed__)); /* 32 bytes */
61
62struct kvm_wall_clock {
63 uint32_t wc_version;
64 uint32_t wc_sec;
65 uint32_t wc_nsec;
66} __attribute__((__packed__));
67
68
69extern void kvmclock_init(void); 51extern void kvmclock_init(void);
70 52
71 53
@@ -89,7 +71,8 @@ static inline long kvm_hypercall0(unsigned int nr)
89 long ret; 71 long ret;
90 asm volatile(KVM_HYPERCALL 72 asm volatile(KVM_HYPERCALL
91 : "=a"(ret) 73 : "=a"(ret)
92 : "a"(nr)); 74 : "a"(nr)
75 : "memory");
93 return ret; 76 return ret;
94} 77}
95 78
@@ -98,7 +81,8 @@ static inline long kvm_hypercall1(unsigned int nr, unsigned long p1)
98 long ret; 81 long ret;
99 asm volatile(KVM_HYPERCALL 82 asm volatile(KVM_HYPERCALL
100 : "=a"(ret) 83 : "=a"(ret)
101 : "a"(nr), "b"(p1)); 84 : "a"(nr), "b"(p1)
85 : "memory");
102 return ret; 86 return ret;
103} 87}
104 88
@@ -108,7 +92,8 @@ static inline long kvm_hypercall2(unsigned int nr, unsigned long p1,
108 long ret; 92 long ret;
109 asm volatile(KVM_HYPERCALL 93 asm volatile(KVM_HYPERCALL
110 : "=a"(ret) 94 : "=a"(ret)
111 : "a"(nr), "b"(p1), "c"(p2)); 95 : "a"(nr), "b"(p1), "c"(p2)
96 : "memory");
112 return ret; 97 return ret;
113} 98}
114 99
@@ -118,7 +103,8 @@ static inline long kvm_hypercall3(unsigned int nr, unsigned long p1,
118 long ret; 103 long ret;
119 asm volatile(KVM_HYPERCALL 104 asm volatile(KVM_HYPERCALL
120 : "=a"(ret) 105 : "=a"(ret)
121 : "a"(nr), "b"(p1), "c"(p2), "d"(p3)); 106 : "a"(nr), "b"(p1), "c"(p2), "d"(p3)
107 : "memory");
122 return ret; 108 return ret;
123} 109}
124 110
@@ -129,7 +115,8 @@ static inline long kvm_hypercall4(unsigned int nr, unsigned long p1,
129 long ret; 115 long ret;
130 asm volatile(KVM_HYPERCALL 116 asm volatile(KVM_HYPERCALL
131 : "=a"(ret) 117 : "=a"(ret)
132 : "a"(nr), "b"(p1), "c"(p2), "d"(p3), "S"(p4)); 118 : "a"(nr), "b"(p1), "c"(p2), "d"(p3), "S"(p4)
119 : "memory");
133 return ret; 120 return ret;
134} 121}
135 122
@@ -157,4 +144,4 @@ static inline unsigned int kvm_arch_para_features(void)
157 144
158#endif 145#endif
159 146
160#endif 147#endif /* ASM_X86__KVM_PARA_H */
diff --git a/include/asm-x86/kvm_x86_emulate.h b/include/asm-x86/kvm_x86_emulate.h
index b877bbd2d3a7..e2d9b030c1ac 100644
--- a/include/asm-x86/kvm_x86_emulate.h
+++ b/include/asm-x86/kvm_x86_emulate.h
@@ -8,8 +8,8 @@
8 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4 8 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
9 */ 9 */
10 10
11#ifndef __X86_EMULATE_H__ 11#ifndef ASM_X86__KVM_X86_EMULATE_H
12#define __X86_EMULATE_H__ 12#define ASM_X86__KVM_X86_EMULATE_H
13 13
14struct x86_emulate_ctxt; 14struct x86_emulate_ctxt;
15 15
@@ -124,7 +124,8 @@ struct decode_cache {
124 u8 rex_prefix; 124 u8 rex_prefix;
125 struct operand src; 125 struct operand src;
126 struct operand dst; 126 struct operand dst;
127 unsigned long *override_base; 127 bool has_seg_override;
128 u8 seg_override;
128 unsigned int d; 129 unsigned int d;
129 unsigned long regs[NR_VCPU_REGS]; 130 unsigned long regs[NR_VCPU_REGS];
130 unsigned long eip; 131 unsigned long eip;
@@ -134,6 +135,7 @@ struct decode_cache {
134 u8 modrm_reg; 135 u8 modrm_reg;
135 u8 modrm_rm; 136 u8 modrm_rm;
136 u8 use_modrm_ea; 137 u8 use_modrm_ea;
138 bool rip_relative;
137 unsigned long modrm_ea; 139 unsigned long modrm_ea;
138 void *modrm_ptr; 140 void *modrm_ptr;
139 unsigned long modrm_val; 141 unsigned long modrm_val;
@@ -150,12 +152,7 @@ struct x86_emulate_ctxt {
150 /* Emulated execution mode, represented by an X86EMUL_MODE value. */ 152 /* Emulated execution mode, represented by an X86EMUL_MODE value. */
151 int mode; 153 int mode;
152 154
153 unsigned long cs_base; 155 u32 cs_base;
154 unsigned long ds_base;
155 unsigned long es_base;
156 unsigned long ss_base;
157 unsigned long gs_base;
158 unsigned long fs_base;
159 156
160 /* decode cache */ 157 /* decode cache */
161 158
@@ -184,4 +181,4 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt,
184int x86_emulate_insn(struct x86_emulate_ctxt *ctxt, 181int x86_emulate_insn(struct x86_emulate_ctxt *ctxt,
185 struct x86_emulate_ops *ops); 182 struct x86_emulate_ops *ops);
186 183
187#endif /* __X86_EMULATE_H__ */ 184#endif /* ASM_X86__KVM_X86_EMULATE_H */
diff --git a/include/asm-x86/ldt.h b/include/asm-x86/ldt.h
index 20c597242b53..a5228504d867 100644
--- a/include/asm-x86/ldt.h
+++ b/include/asm-x86/ldt.h
@@ -3,8 +3,8 @@
3 * 3 *
4 * Definitions of structures used with the modify_ldt system call. 4 * Definitions of structures used with the modify_ldt system call.
5 */ 5 */
6#ifndef _ASM_X86_LDT_H 6#ifndef ASM_X86__LDT_H
7#define _ASM_X86_LDT_H 7#define ASM_X86__LDT_H
8 8
9/* Maximum number of LDT entries supported. */ 9/* Maximum number of LDT entries supported. */
10#define LDT_ENTRIES 8192 10#define LDT_ENTRIES 8192
@@ -37,4 +37,4 @@ struct user_desc {
37#define MODIFY_LDT_CONTENTS_CODE 2 37#define MODIFY_LDT_CONTENTS_CODE 2
38 38
39#endif /* !__ASSEMBLY__ */ 39#endif /* !__ASSEMBLY__ */
40#endif 40#endif /* ASM_X86__LDT_H */
diff --git a/include/asm-x86/lguest.h b/include/asm-x86/lguest.h
index be4a7247fa2b..7505e947ed27 100644
--- a/include/asm-x86/lguest.h
+++ b/include/asm-x86/lguest.h
@@ -1,5 +1,5 @@
1#ifndef _X86_LGUEST_H 1#ifndef ASM_X86__LGUEST_H
2#define _X86_LGUEST_H 2#define ASM_X86__LGUEST_H
3 3
4#define GDT_ENTRY_LGUEST_CS 10 4#define GDT_ENTRY_LGUEST_CS 10
5#define GDT_ENTRY_LGUEST_DS 11 5#define GDT_ENTRY_LGUEST_DS 11
@@ -91,4 +91,4 @@ static inline void lguest_set_ts(void)
91 91
92#endif /* __ASSEMBLY__ */ 92#endif /* __ASSEMBLY__ */
93 93
94#endif 94#endif /* ASM_X86__LGUEST_H */
diff --git a/include/asm-x86/lguest_hcall.h b/include/asm-x86/lguest_hcall.h
index a3241f28e34a..8f034ba4b53e 100644
--- a/include/asm-x86/lguest_hcall.h
+++ b/include/asm-x86/lguest_hcall.h
@@ -1,6 +1,6 @@
1/* Architecture specific portion of the lguest hypercalls */ 1/* Architecture specific portion of the lguest hypercalls */
2#ifndef _X86_LGUEST_HCALL_H 2#ifndef ASM_X86__LGUEST_HCALL_H
3#define _X86_LGUEST_HCALL_H 3#define ASM_X86__LGUEST_HCALL_H
4 4
5#define LHCALL_FLUSH_ASYNC 0 5#define LHCALL_FLUSH_ASYNC 0
6#define LHCALL_LGUEST_INIT 1 6#define LHCALL_LGUEST_INIT 1
@@ -68,4 +68,4 @@ struct hcall_args {
68}; 68};
69 69
70#endif /* !__ASSEMBLY__ */ 70#endif /* !__ASSEMBLY__ */
71#endif /* _I386_LGUEST_HCALL_H */ 71#endif /* ASM_X86__LGUEST_HCALL_H */
diff --git a/include/asm-x86/linkage.h b/include/asm-x86/linkage.h
index 64e444f8e85b..42d8b62ee8ab 100644
--- a/include/asm-x86/linkage.h
+++ b/include/asm-x86/linkage.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_LINKAGE_H 1#ifndef ASM_X86__LINKAGE_H
2#define __ASM_LINKAGE_H 2#define ASM_X86__LINKAGE_H
3 3
4#undef notrace 4#undef notrace
5#define notrace __attribute__((no_instrument_function)) 5#define notrace __attribute__((no_instrument_function))
@@ -57,5 +57,5 @@
57#define __ALIGN_STR ".align 16,0x90" 57#define __ALIGN_STR ".align 16,0x90"
58#endif 58#endif
59 59
60#endif 60#endif /* ASM_X86__LINKAGE_H */
61 61
diff --git a/include/asm-x86/local.h b/include/asm-x86/local.h
index 330a72496abd..ae91994fd6c9 100644
--- a/include/asm-x86/local.h
+++ b/include/asm-x86/local.h
@@ -1,5 +1,5 @@
1#ifndef _ARCH_LOCAL_H 1#ifndef ASM_X86__LOCAL_H
2#define _ARCH_LOCAL_H 2#define ASM_X86__LOCAL_H
3 3
4#include <linux/percpu.h> 4#include <linux/percpu.h>
5 5
@@ -232,4 +232,4 @@ static inline long local_sub_return(long i, local_t *l)
232#define __cpu_local_add(i, l) cpu_local_add((i), (l)) 232#define __cpu_local_add(i, l) cpu_local_add((i), (l))
233#define __cpu_local_sub(i, l) cpu_local_sub((i), (l)) 233#define __cpu_local_sub(i, l) cpu_local_sub((i), (l))
234 234
235#endif /* _ARCH_LOCAL_H */ 235#endif /* ASM_X86__LOCAL_H */
diff --git a/include/asm-x86/mach-bigsmp/mach_apicdef.h b/include/asm-x86/mach-bigsmp/mach_apicdef.h
deleted file mode 100644
index a58ab5a75c8c..000000000000
--- a/include/asm-x86/mach-bigsmp/mach_apicdef.h
+++ /dev/null
@@ -1,13 +0,0 @@
1#ifndef __ASM_MACH_APICDEF_H
2#define __ASM_MACH_APICDEF_H
3
4#define APIC_ID_MASK (0xFF<<24)
5
6static inline unsigned get_apic_id(unsigned long x)
7{
8 return (((x)>>24)&0xFF);
9}
10
11#define GET_APIC_ID(x) get_apic_id(x)
12
13#endif
diff --git a/include/asm-x86/mach-bigsmp/mach_mpspec.h b/include/asm-x86/mach-bigsmp/mach_mpspec.h
deleted file mode 100644
index 6b5dadcf1d0e..000000000000
--- a/include/asm-x86/mach-bigsmp/mach_mpspec.h
+++ /dev/null
@@ -1,8 +0,0 @@
1#ifndef __ASM_MACH_MPSPEC_H
2#define __ASM_MACH_MPSPEC_H
3
4#define MAX_IRQ_SOURCES 256
5
6#define MAX_MP_BUSSES 32
7
8#endif /* __ASM_MACH_MPSPEC_H */
diff --git a/include/asm-x86/mach-default/apm.h b/include/asm-x86/mach-default/apm.h
index 989f34c37d32..2aa61b54fbd5 100644
--- a/include/asm-x86/mach-default/apm.h
+++ b/include/asm-x86/mach-default/apm.h
@@ -3,8 +3,8 @@
3 * Split out from apm.c by Osamu Tomita <tomita@cinet.co.jp> 3 * Split out from apm.c by Osamu Tomita <tomita@cinet.co.jp>
4 */ 4 */
5 5
6#ifndef _ASM_APM_H 6#ifndef ASM_X86__MACH_DEFAULT__APM_H
7#define _ASM_APM_H 7#define ASM_X86__MACH_DEFAULT__APM_H
8 8
9#ifdef APM_ZERO_SEGS 9#ifdef APM_ZERO_SEGS
10# define APM_DO_ZERO_SEGS \ 10# define APM_DO_ZERO_SEGS \
@@ -70,4 +70,4 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
70 return error; 70 return error;
71} 71}
72 72
73#endif /* _ASM_APM_H */ 73#endif /* ASM_X86__MACH_DEFAULT__APM_H */
diff --git a/include/asm-x86/mach-default/entry_arch.h b/include/asm-x86/mach-default/entry_arch.h
index bc861469bdba..9283b60a1dd2 100644
--- a/include/asm-x86/mach-default/entry_arch.h
+++ b/include/asm-x86/mach-default/entry_arch.h
@@ -13,6 +13,7 @@
13BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR) 13BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR)
14BUILD_INTERRUPT(invalidate_interrupt,INVALIDATE_TLB_VECTOR) 14BUILD_INTERRUPT(invalidate_interrupt,INVALIDATE_TLB_VECTOR)
15BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR) 15BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR)
16BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR)
16#endif 17#endif
17 18
18/* 19/*
diff --git a/include/asm-x86/mach-default/irq_vectors.h b/include/asm-x86/mach-default/irq_vectors.h
deleted file mode 100644
index 881c63ca61ad..000000000000
--- a/include/asm-x86/mach-default/irq_vectors.h
+++ /dev/null
@@ -1,96 +0,0 @@
1/*
2 * This file should contain #defines for all of the interrupt vector
3 * numbers used by this architecture.
4 *
5 * In addition, there are some standard defines:
6 *
7 * FIRST_EXTERNAL_VECTOR:
8 * The first free place for external interrupts
9 *
10 * SYSCALL_VECTOR:
11 * The IRQ vector a syscall makes the user to kernel transition
12 * under.
13 *
14 * TIMER_IRQ:
15 * The IRQ number the timer interrupt comes in at.
16 *
17 * NR_IRQS:
18 * The total number of interrupt vectors (including all the
19 * architecture specific interrupts) needed.
20 *
21 */
22#ifndef _ASM_IRQ_VECTORS_H
23#define _ASM_IRQ_VECTORS_H
24
25/*
26 * IDT vectors usable for external interrupt sources start
27 * at 0x20:
28 */
29#define FIRST_EXTERNAL_VECTOR 0x20
30
31#define SYSCALL_VECTOR 0x80
32
33/*
34 * Vectors 0x20-0x2f are used for ISA interrupts.
35 */
36
37/*
38 * Special IRQ vectors used by the SMP architecture, 0xf0-0xff
39 *
40 * some of the following vectors are 'rare', they are merged
41 * into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
42 * TLB, reschedule and local APIC vectors are performance-critical.
43 *
44 * Vectors 0xf0-0xfa are free (reserved for future Linux use).
45 */
46#define SPURIOUS_APIC_VECTOR 0xff
47#define ERROR_APIC_VECTOR 0xfe
48#define INVALIDATE_TLB_VECTOR 0xfd
49#define RESCHEDULE_VECTOR 0xfc
50#define CALL_FUNCTION_VECTOR 0xfb
51
52#define THERMAL_APIC_VECTOR 0xf0
53/*
54 * Local APIC timer IRQ vector is on a different priority level,
55 * to work around the 'lost local interrupt if more than 2 IRQ
56 * sources per level' errata.
57 */
58#define LOCAL_TIMER_VECTOR 0xef
59
60/*
61 * First APIC vector available to drivers: (vectors 0x30-0xee)
62 * we start at 0x31 to spread out vectors evenly between priority
63 * levels. (0x80 is the syscall vector)
64 */
65#define FIRST_DEVICE_VECTOR 0x31
66#define FIRST_SYSTEM_VECTOR 0xef
67
68#define TIMER_IRQ 0
69
70/*
71 * 16 8259A IRQ's, 208 potential APIC interrupt sources.
72 * Right now the APIC is mostly only used for SMP.
73 * 256 vectors is an architectural limit. (we can have
74 * more than 256 devices theoretically, but they will
75 * have to use shared interrupts)
76 * Since vectors 0x00-0x1f are used/reserved for the CPU,
77 * the usable vector space is 0x20-0xff (224 vectors)
78 */
79
80/*
81 * The maximum number of vectors supported by i386 processors
82 * is limited to 256. For processors other than i386, NR_VECTORS
83 * should be changed accordingly.
84 */
85#define NR_VECTORS 256
86
87#include "irq_vectors_limits.h"
88
89#define FPU_IRQ 13
90
91#define FIRST_VM86_IRQ 3
92#define LAST_VM86_IRQ 15
93#define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15)
94
95
96#endif /* _ASM_IRQ_VECTORS_H */
diff --git a/include/asm-x86/mach-default/irq_vectors_limits.h b/include/asm-x86/mach-default/irq_vectors_limits.h
deleted file mode 100644
index a90c7a60109f..000000000000
--- a/include/asm-x86/mach-default/irq_vectors_limits.h
+++ /dev/null
@@ -1,16 +0,0 @@
1#ifndef _ASM_IRQ_VECTORS_LIMITS_H
2#define _ASM_IRQ_VECTORS_LIMITS_H
3
4#if defined(CONFIG_X86_IO_APIC) || defined(CONFIG_PARAVIRT)
5#define NR_IRQS 224
6# if (224 >= 32 * NR_CPUS)
7# define NR_IRQ_VECTORS NR_IRQS
8# else
9# define NR_IRQ_VECTORS (32 * NR_CPUS)
10# endif
11#else
12#define NR_IRQS 16
13#define NR_IRQ_VECTORS NR_IRQS
14#endif
15
16#endif /* _ASM_IRQ_VECTORS_LIMITS_H */
diff --git a/include/asm-x86/mach-default/mach_apic.h b/include/asm-x86/mach-default/mach_apic.h
index 21003b56ae95..2a330a41b3dd 100644
--- a/include/asm-x86/mach-default/mach_apic.h
+++ b/include/asm-x86/mach-default/mach_apic.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_MACH_APIC_H 1#ifndef ASM_X86__MACH_DEFAULT__MACH_APIC_H
2#define __ASM_MACH_APIC_H 2#define ASM_X86__MACH_DEFAULT__MACH_APIC_H
3 3
4#ifdef CONFIG_X86_LOCAL_APIC 4#ifdef CONFIG_X86_LOCAL_APIC
5 5
@@ -30,6 +30,8 @@ static inline cpumask_t target_cpus(void)
30#define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid) 30#define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid)
31#define phys_pkg_id (genapic->phys_pkg_id) 31#define phys_pkg_id (genapic->phys_pkg_id)
32#define vector_allocation_domain (genapic->vector_allocation_domain) 32#define vector_allocation_domain (genapic->vector_allocation_domain)
33#define read_apic_id() (GET_APIC_ID(apic_read(APIC_ID)))
34#define send_IPI_self (genapic->send_IPI_self)
33extern void setup_apic_routing(void); 35extern void setup_apic_routing(void);
34#else 36#else
35#define INT_DELIVERY_MODE dest_LowestPrio 37#define INT_DELIVERY_MODE dest_LowestPrio
@@ -46,15 +48,15 @@ static inline void init_apic_ldr(void)
46{ 48{
47 unsigned long val; 49 unsigned long val;
48 50
49 apic_write_around(APIC_DFR, APIC_DFR_VALUE); 51 apic_write(APIC_DFR, APIC_DFR_VALUE);
50 val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; 52 val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
51 val |= SET_APIC_LOGICAL_ID(1UL << smp_processor_id()); 53 val |= SET_APIC_LOGICAL_ID(1UL << smp_processor_id());
52 apic_write_around(APIC_LDR, val); 54 apic_write(APIC_LDR, val);
53} 55}
54 56
55static inline int apic_id_registered(void) 57static inline int apic_id_registered(void)
56{ 58{
57 return physid_isset(GET_APIC_ID(read_apic_id()), phys_cpu_present_map); 59 return physid_isset(read_apic_id(), phys_cpu_present_map);
58} 60}
59 61
60static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) 62static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
@@ -77,7 +79,11 @@ static inline void setup_apic_routing(void)
77 79
78static inline int apicid_to_node(int logical_apicid) 80static inline int apicid_to_node(int logical_apicid)
79{ 81{
82#ifdef CONFIG_SMP
83 return apicid_2_node[hard_smp_processor_id()];
84#else
80 return 0; 85 return 0;
86#endif
81} 87}
82#endif 88#endif
83 89
@@ -134,4 +140,4 @@ static inline void enable_apic_mode(void)
134} 140}
135 141
136#endif /* CONFIG_X86_LOCAL_APIC */ 142#endif /* CONFIG_X86_LOCAL_APIC */
137#endif /* __ASM_MACH_APIC_H */ 143#endif /* ASM_X86__MACH_DEFAULT__MACH_APIC_H */
diff --git a/include/asm-x86/mach-default/mach_apicdef.h b/include/asm-x86/mach-default/mach_apicdef.h
index e4b29ba37de6..0c2d41c41b20 100644
--- a/include/asm-x86/mach-default/mach_apicdef.h
+++ b/include/asm-x86/mach-default/mach_apicdef.h
@@ -1,12 +1,12 @@
1#ifndef __ASM_MACH_APICDEF_H 1#ifndef ASM_X86__MACH_DEFAULT__MACH_APICDEF_H
2#define __ASM_MACH_APICDEF_H 2#define ASM_X86__MACH_DEFAULT__MACH_APICDEF_H
3 3
4#include <asm/apic.h> 4#include <asm/apic.h>
5 5
6#ifdef CONFIG_X86_64 6#ifdef CONFIG_X86_64
7#define APIC_ID_MASK (0xFFu<<24) 7#define APIC_ID_MASK (genapic->apic_id_mask)
8#define GET_APIC_ID(x) (((x)>>24)&0xFFu) 8#define GET_APIC_ID(x) (genapic->get_apic_id(x))
9#define SET_APIC_ID(x) (((x)<<24)) 9#define SET_APIC_ID(x) (genapic->set_apic_id(x))
10#else 10#else
11#define APIC_ID_MASK (0xF<<24) 11#define APIC_ID_MASK (0xF<<24)
12static inline unsigned get_apic_id(unsigned long x) 12static inline unsigned get_apic_id(unsigned long x)
@@ -21,4 +21,4 @@ static inline unsigned get_apic_id(unsigned long x)
21#define GET_APIC_ID(x) get_apic_id(x) 21#define GET_APIC_ID(x) get_apic_id(x)
22#endif 22#endif
23 23
24#endif 24#endif /* ASM_X86__MACH_DEFAULT__MACH_APICDEF_H */
diff --git a/include/asm-x86/mach-default/mach_ipi.h b/include/asm-x86/mach-default/mach_ipi.h
index be323364e68f..674bc7e50c35 100644
--- a/include/asm-x86/mach-default/mach_ipi.h
+++ b/include/asm-x86/mach-default/mach_ipi.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_MACH_IPI_H 1#ifndef ASM_X86__MACH_DEFAULT__MACH_IPI_H
2#define __ASM_MACH_IPI_H 2#define ASM_X86__MACH_DEFAULT__MACH_IPI_H
3 3
4/* Avoid include hell */ 4/* Avoid include hell */
5#define NMI_VECTOR 0x02 5#define NMI_VECTOR 0x02
@@ -61,4 +61,4 @@ static inline void send_IPI_all(int vector)
61} 61}
62#endif 62#endif
63 63
64#endif /* __ASM_MACH_IPI_H */ 64#endif /* ASM_X86__MACH_DEFAULT__MACH_IPI_H */
diff --git a/include/asm-x86/mach-default/mach_mpparse.h b/include/asm-x86/mach-default/mach_mpparse.h
index d14108505bb8..9c381f2815ac 100644
--- a/include/asm-x86/mach-default/mach_mpparse.h
+++ b/include/asm-x86/mach-default/mach_mpparse.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_MACH_MPPARSE_H 1#ifndef ASM_X86__MACH_DEFAULT__MACH_MPPARSE_H
2#define __ASM_MACH_MPPARSE_H 2#define ASM_X86__MACH_DEFAULT__MACH_MPPARSE_H
3 3
4static inline int mps_oem_check(struct mp_config_table *mpc, char *oem, 4static inline int mps_oem_check(struct mp_config_table *mpc, char *oem,
5 char *productid) 5 char *productid)
@@ -14,4 +14,4 @@ static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id)
14} 14}
15 15
16 16
17#endif /* __ASM_MACH_MPPARSE_H */ 17#endif /* ASM_X86__MACH_DEFAULT__MACH_MPPARSE_H */
diff --git a/include/asm-x86/mach-default/mach_mpspec.h b/include/asm-x86/mach-default/mach_mpspec.h
index 51c9a9775932..d77646f011f1 100644
--- a/include/asm-x86/mach-default/mach_mpspec.h
+++ b/include/asm-x86/mach-default/mach_mpspec.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_MACH_MPSPEC_H 1#ifndef ASM_X86__MACH_DEFAULT__MACH_MPSPEC_H
2#define __ASM_MACH_MPSPEC_H 2#define ASM_X86__MACH_DEFAULT__MACH_MPSPEC_H
3 3
4#define MAX_IRQ_SOURCES 256 4#define MAX_IRQ_SOURCES 256
5 5
@@ -9,4 +9,4 @@
9#define MAX_MP_BUSSES 32 9#define MAX_MP_BUSSES 32
10#endif 10#endif
11 11
12#endif /* __ASM_MACH_MPSPEC_H */ 12#endif /* ASM_X86__MACH_DEFAULT__MACH_MPSPEC_H */
diff --git a/include/asm-x86/mach-default/mach_timer.h b/include/asm-x86/mach-default/mach_timer.h
index 4b76e536cd98..990b15833834 100644
--- a/include/asm-x86/mach-default/mach_timer.h
+++ b/include/asm-x86/mach-default/mach_timer.h
@@ -10,8 +10,8 @@
10 * directly because of the awkward 8-bit access mechanism of the 82C54 10 * directly because of the awkward 8-bit access mechanism of the 82C54
11 * device. 11 * device.
12 */ 12 */
13#ifndef _MACH_TIMER_H 13#ifndef ASM_X86__MACH_DEFAULT__MACH_TIMER_H
14#define _MACH_TIMER_H 14#define ASM_X86__MACH_DEFAULT__MACH_TIMER_H
15 15
16#define CALIBRATE_TIME_MSEC 30 /* 30 msecs */ 16#define CALIBRATE_TIME_MSEC 30 /* 30 msecs */
17#define CALIBRATE_LATCH \ 17#define CALIBRATE_LATCH \
@@ -45,4 +45,4 @@ static inline void mach_countup(unsigned long *count_p)
45 *count_p = count; 45 *count_p = count;
46} 46}
47 47
48#endif /* !_MACH_TIMER_H */ 48#endif /* ASM_X86__MACH_DEFAULT__MACH_TIMER_H */
diff --git a/include/asm-x86/mach-default/mach_traps.h b/include/asm-x86/mach-default/mach_traps.h
index 2fe7705c0484..de9ac3f5c4ce 100644
--- a/include/asm-x86/mach-default/mach_traps.h
+++ b/include/asm-x86/mach-default/mach_traps.h
@@ -2,8 +2,8 @@
2 * Machine specific NMI handling for generic. 2 * Machine specific NMI handling for generic.
3 * Split out from traps.c by Osamu Tomita <tomita@cinet.co.jp> 3 * Split out from traps.c by Osamu Tomita <tomita@cinet.co.jp>
4 */ 4 */
5#ifndef _MACH_TRAPS_H 5#ifndef ASM_X86__MACH_DEFAULT__MACH_TRAPS_H
6#define _MACH_TRAPS_H 6#define ASM_X86__MACH_DEFAULT__MACH_TRAPS_H
7 7
8#include <asm/mc146818rtc.h> 8#include <asm/mc146818rtc.h>
9 9
@@ -36,4 +36,4 @@ static inline void reassert_nmi(void)
36 unlock_cmos(); 36 unlock_cmos();
37} 37}
38 38
39#endif /* !_MACH_TRAPS_H */ 39#endif /* ASM_X86__MACH_DEFAULT__MACH_TRAPS_H */
diff --git a/include/asm-x86/mach-default/mach_wakecpu.h b/include/asm-x86/mach-default/mach_wakecpu.h
index 3ebb17893aa5..361b810f5160 100644
--- a/include/asm-x86/mach-default/mach_wakecpu.h
+++ b/include/asm-x86/mach-default/mach_wakecpu.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_MACH_WAKECPU_H 1#ifndef ASM_X86__MACH_DEFAULT__MACH_WAKECPU_H
2#define __ASM_MACH_WAKECPU_H 2#define ASM_X86__MACH_DEFAULT__MACH_WAKECPU_H
3 3
4/* 4/*
5 * This file copes with machines that wakeup secondary CPUs by the 5 * This file copes with machines that wakeup secondary CPUs by the
@@ -39,4 +39,4 @@ static inline void restore_NMI_vector(unsigned short *high, unsigned short *low)
39 #define inquire_remote_apic(apicid) {} 39 #define inquire_remote_apic(apicid) {}
40#endif 40#endif
41 41
42#endif /* __ASM_MACH_WAKECPU_H */ 42#endif /* ASM_X86__MACH_DEFAULT__MACH_WAKECPU_H */
diff --git a/include/asm-x86/mach-default/setup_arch.h b/include/asm-x86/mach-default/setup_arch.h
index 605e3ccb991b..38846208b548 100644
--- a/include/asm-x86/mach-default/setup_arch.h
+++ b/include/asm-x86/mach-default/setup_arch.h
@@ -1,7 +1,3 @@
1/* Hook to call BIOS initialisation function */ 1/* Hook to call BIOS initialisation function */
2 2
3/* no action for generic */ 3/* no action for generic */
4
5#ifndef ARCH_SETUP
6#define ARCH_SETUP
7#endif
diff --git a/include/asm-x86/mach-default/smpboot_hooks.h b/include/asm-x86/mach-default/smpboot_hooks.h
index 56d0e1fa0258..dbab36d64d48 100644
--- a/include/asm-x86/mach-default/smpboot_hooks.h
+++ b/include/asm-x86/mach-default/smpboot_hooks.h
@@ -3,18 +3,20 @@
3 3
4static inline void smpboot_clear_io_apic_irqs(void) 4static inline void smpboot_clear_io_apic_irqs(void)
5{ 5{
6#ifdef CONFIG_X86_IO_APIC
6 io_apic_irqs = 0; 7 io_apic_irqs = 0;
8#endif
7} 9}
8 10
9static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip) 11static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
10{ 12{
11 CMOS_WRITE(0xa, 0xf); 13 CMOS_WRITE(0xa, 0xf);
12 local_flush_tlb(); 14 local_flush_tlb();
13 Dprintk("1.\n"); 15 pr_debug("1.\n");
14 *((volatile unsigned short *) TRAMPOLINE_HIGH) = start_eip >> 4; 16 *((volatile unsigned short *) TRAMPOLINE_HIGH) = start_eip >> 4;
15 Dprintk("2.\n"); 17 pr_debug("2.\n");
16 *((volatile unsigned short *) TRAMPOLINE_LOW) = start_eip & 0xf; 18 *((volatile unsigned short *) TRAMPOLINE_LOW) = start_eip & 0xf;
17 Dprintk("3.\n"); 19 pr_debug("3.\n");
18} 20}
19 21
20static inline void smpboot_restore_warm_reset_vector(void) 22static inline void smpboot_restore_warm_reset_vector(void)
@@ -35,17 +37,23 @@ static inline void smpboot_restore_warm_reset_vector(void)
35 37
36static inline void __init smpboot_setup_io_apic(void) 38static inline void __init smpboot_setup_io_apic(void)
37{ 39{
40#ifdef CONFIG_X86_IO_APIC
38 /* 41 /*
39 * Here we can be sure that there is an IO-APIC in the system. Let's 42 * Here we can be sure that there is an IO-APIC in the system. Let's
40 * go and set it up: 43 * go and set it up:
41 */ 44 */
42 if (!skip_ioapic_setup && nr_ioapics) 45 if (!skip_ioapic_setup && nr_ioapics)
43 setup_IO_APIC(); 46 setup_IO_APIC();
44 else 47 else {
45 nr_ioapics = 0; 48 nr_ioapics = 0;
49 localise_nmi_watchdog();
50 }
51#endif
46} 52}
47 53
48static inline void smpboot_clear_io_apic(void) 54static inline void smpboot_clear_io_apic(void)
49{ 55{
56#ifdef CONFIG_X86_IO_APIC
50 nr_ioapics = 0; 57 nr_ioapics = 0;
58#endif
51} 59}
diff --git a/include/asm-x86/mach-es7000/mach_mpspec.h b/include/asm-x86/mach-es7000/mach_mpspec.h
deleted file mode 100644
index b1f5039d4506..000000000000
--- a/include/asm-x86/mach-es7000/mach_mpspec.h
+++ /dev/null
@@ -1,8 +0,0 @@
1#ifndef __ASM_MACH_MPSPEC_H
2#define __ASM_MACH_MPSPEC_H
3
4#define MAX_IRQ_SOURCES 256
5
6#define MAX_MP_BUSSES 256
7
8#endif /* __ASM_MACH_MPSPEC_H */
diff --git a/include/asm-x86/mach-generic/gpio.h b/include/asm-x86/mach-generic/gpio.h
index 5305dcb96df2..6ce0f7786ef8 100644
--- a/include/asm-x86/mach-generic/gpio.h
+++ b/include/asm-x86/mach-generic/gpio.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_MACH_GENERIC_GPIO_H 1#ifndef ASM_X86__MACH_GENERIC__GPIO_H
2#define __ASM_MACH_GENERIC_GPIO_H 2#define ASM_X86__MACH_GENERIC__GPIO_H
3 3
4int gpio_request(unsigned gpio, const char *label); 4int gpio_request(unsigned gpio, const char *label);
5void gpio_free(unsigned gpio); 5void gpio_free(unsigned gpio);
@@ -12,4 +12,4 @@ int irq_to_gpio(unsigned irq);
12 12
13#include <asm-generic/gpio.h> /* cansleep wrappers */ 13#include <asm-generic/gpio.h> /* cansleep wrappers */
14 14
15#endif /* __ASM_MACH_GENERIC_GPIO_H */ 15#endif /* ASM_X86__MACH_GENERIC__GPIO_H */
diff --git a/include/asm-x86/mach-generic/irq_vectors_limits.h b/include/asm-x86/mach-generic/irq_vectors_limits.h
index 890ce3f5e09a..f7870e1a220d 100644
--- a/include/asm-x86/mach-generic/irq_vectors_limits.h
+++ b/include/asm-x86/mach-generic/irq_vectors_limits.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_IRQ_VECTORS_LIMITS_H 1#ifndef ASM_X86__MACH_GENERIC__IRQ_VECTORS_LIMITS_H
2#define _ASM_IRQ_VECTORS_LIMITS_H 2#define ASM_X86__MACH_GENERIC__IRQ_VECTORS_LIMITS_H
3 3
4/* 4/*
5 * For Summit or generic (i.e. installer) kernels, we have lots of I/O APICs, 5 * For Summit or generic (i.e. installer) kernels, we have lots of I/O APICs,
@@ -11,4 +11,4 @@
11#define NR_IRQS 224 11#define NR_IRQS 224
12#define NR_IRQ_VECTORS 1024 12#define NR_IRQ_VECTORS 1024
13 13
14#endif /* _ASM_IRQ_VECTORS_LIMITS_H */ 14#endif /* ASM_X86__MACH_GENERIC__IRQ_VECTORS_LIMITS_H */
diff --git a/include/asm-x86/mach-generic/mach_apic.h b/include/asm-x86/mach-generic/mach_apic.h
index 6eff343e1233..5d010c6881dd 100644
--- a/include/asm-x86/mach-generic/mach_apic.h
+++ b/include/asm-x86/mach-generic/mach_apic.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_MACH_APIC_H 1#ifndef ASM_X86__MACH_GENERIC__MACH_APIC_H
2#define __ASM_MACH_APIC_H 2#define ASM_X86__MACH_GENERIC__MACH_APIC_H
3 3
4#include <asm/genapic.h> 4#include <asm/genapic.h>
5 5
@@ -29,4 +29,4 @@
29 29
30extern void generic_bigsmp_probe(void); 30extern void generic_bigsmp_probe(void);
31 31
32#endif /* __ASM_MACH_APIC_H */ 32#endif /* ASM_X86__MACH_GENERIC__MACH_APIC_H */
diff --git a/include/asm-x86/mach-generic/mach_apicdef.h b/include/asm-x86/mach-generic/mach_apicdef.h
index 28ed98972ca8..1657f38b8f27 100644
--- a/include/asm-x86/mach-generic/mach_apicdef.h
+++ b/include/asm-x86/mach-generic/mach_apicdef.h
@@ -1,5 +1,5 @@
1#ifndef _GENAPIC_MACH_APICDEF_H 1#ifndef ASM_X86__MACH_GENERIC__MACH_APICDEF_H
2#define _GENAPIC_MACH_APICDEF_H 1 2#define ASM_X86__MACH_GENERIC__MACH_APICDEF_H
3 3
4#ifndef APIC_DEFINITION 4#ifndef APIC_DEFINITION
5#include <asm/genapic.h> 5#include <asm/genapic.h>
@@ -8,4 +8,4 @@
8#define APIC_ID_MASK (genapic->apic_id_mask) 8#define APIC_ID_MASK (genapic->apic_id_mask)
9#endif 9#endif
10 10
11#endif 11#endif /* ASM_X86__MACH_GENERIC__MACH_APICDEF_H */
diff --git a/include/asm-x86/mach-generic/mach_ipi.h b/include/asm-x86/mach-generic/mach_ipi.h
index 441b0fe3ed1d..f67433dbd65f 100644
--- a/include/asm-x86/mach-generic/mach_ipi.h
+++ b/include/asm-x86/mach-generic/mach_ipi.h
@@ -1,5 +1,5 @@
1#ifndef _MACH_IPI_H 1#ifndef ASM_X86__MACH_GENERIC__MACH_IPI_H
2#define _MACH_IPI_H 1 2#define ASM_X86__MACH_GENERIC__MACH_IPI_H
3 3
4#include <asm/genapic.h> 4#include <asm/genapic.h>
5 5
@@ -7,4 +7,4 @@
7#define send_IPI_allbutself (genapic->send_IPI_allbutself) 7#define send_IPI_allbutself (genapic->send_IPI_allbutself)
8#define send_IPI_all (genapic->send_IPI_all) 8#define send_IPI_all (genapic->send_IPI_all)
9 9
10#endif 10#endif /* ASM_X86__MACH_GENERIC__MACH_IPI_H */
diff --git a/include/asm-x86/mach-generic/mach_mpparse.h b/include/asm-x86/mach-generic/mach_mpparse.h
index 0d0b5ba2e9d1..3115564e557c 100644
--- a/include/asm-x86/mach-generic/mach_mpparse.h
+++ b/include/asm-x86/mach-generic/mach_mpparse.h
@@ -1,7 +1,10 @@
1#ifndef _MACH_MPPARSE_H 1#ifndef ASM_X86__MACH_GENERIC__MACH_MPPARSE_H
2#define _MACH_MPPARSE_H 1 2#define ASM_X86__MACH_GENERIC__MACH_MPPARSE_H
3 3
4int mps_oem_check(struct mp_config_table *mpc, char *oem, char *productid);
5int acpi_madt_oem_check(char *oem_id, char *oem_table_id);
6 4
7#endif 5extern int mps_oem_check(struct mp_config_table *mpc, char *oem,
6 char *productid);
7
8extern int acpi_madt_oem_check(char *oem_id, char *oem_table_id);
9
10#endif /* ASM_X86__MACH_GENERIC__MACH_MPPARSE_H */
diff --git a/include/asm-x86/mach-generic/mach_mpspec.h b/include/asm-x86/mach-generic/mach_mpspec.h
index 9ef0b941bb22..6061b153613e 100644
--- a/include/asm-x86/mach-generic/mach_mpspec.h
+++ b/include/asm-x86/mach-generic/mach_mpspec.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_MACH_MPSPEC_H 1#ifndef ASM_X86__MACH_GENERIC__MACH_MPSPEC_H
2#define __ASM_MACH_MPSPEC_H 2#define ASM_X86__MACH_GENERIC__MACH_MPSPEC_H
3 3
4#define MAX_IRQ_SOURCES 256 4#define MAX_IRQ_SOURCES 256
5 5
@@ -7,4 +7,6 @@
7/* Maximum 256 PCI busses, plus 1 ISA bus in each of 4 cabinets. */ 7/* Maximum 256 PCI busses, plus 1 ISA bus in each of 4 cabinets. */
8#define MAX_MP_BUSSES 260 8#define MAX_MP_BUSSES 260
9 9
10#endif /* __ASM_MACH_MPSPEC_H */ 10extern void numaq_mps_oem_check(struct mp_config_table *mpc, char *oem,
11 char *productid);
12#endif /* ASM_X86__MACH_GENERIC__MACH_MPSPEC_H */
diff --git a/include/asm-x86/mach-numaq/mach_mpparse.h b/include/asm-x86/mach-numaq/mach_mpparse.h
deleted file mode 100644
index 459b12401187..000000000000
--- a/include/asm-x86/mach-numaq/mach_mpparse.h
+++ /dev/null
@@ -1,14 +0,0 @@
1#ifndef __ASM_MACH_MPPARSE_H
2#define __ASM_MACH_MPPARSE_H
3
4extern void mpc_oem_bus_info(struct mpc_config_bus *m, char *name,
5 struct mpc_config_translation *translation);
6extern void mpc_oem_pci_bus(struct mpc_config_bus *m,
7 struct mpc_config_translation *translation);
8
9/* Hook from generic ACPI tables.c */
10static inline void acpi_madt_oem_check(char *oem_id, char *oem_table_id)
11{
12}
13
14#endif /* __ASM_MACH_MPPARSE_H */
diff --git a/include/asm-x86/mach-numaq/mach_mpspec.h b/include/asm-x86/mach-numaq/mach_mpspec.h
deleted file mode 100644
index dffb09856f8f..000000000000
--- a/include/asm-x86/mach-numaq/mach_mpspec.h
+++ /dev/null
@@ -1,8 +0,0 @@
1#ifndef __ASM_MACH_MPSPEC_H
2#define __ASM_MACH_MPSPEC_H
3
4#define MAX_IRQ_SOURCES 512
5
6#define MAX_MP_BUSSES 32
7
8#endif /* __ASM_MACH_MPSPEC_H */
diff --git a/include/asm-x86/mach-rdc321x/gpio.h b/include/asm-x86/mach-rdc321x/gpio.h
index acce0b7d397b..94b6cdf532e2 100644
--- a/include/asm-x86/mach-rdc321x/gpio.h
+++ b/include/asm-x86/mach-rdc321x/gpio.h
@@ -1,5 +1,7 @@
1#ifndef _RDC321X_GPIO_H 1#ifndef ASM_X86__MACH_RDC321X__GPIO_H
2#define _RDC321X_GPIO_H 2#define ASM_X86__MACH_RDC321X__GPIO_H
3
4#include <linux/kernel.h>
3 5
4extern int rdc_gpio_get_value(unsigned gpio); 6extern int rdc_gpio_get_value(unsigned gpio);
5extern void rdc_gpio_set_value(unsigned gpio, int value); 7extern void rdc_gpio_set_value(unsigned gpio, int value);
@@ -18,6 +20,7 @@ static inline int gpio_request(unsigned gpio, const char *label)
18 20
19static inline void gpio_free(unsigned gpio) 21static inline void gpio_free(unsigned gpio)
20{ 22{
23 might_sleep();
21 rdc_gpio_free(gpio); 24 rdc_gpio_free(gpio);
22} 25}
23 26
@@ -54,4 +57,4 @@ static inline int irq_to_gpio(unsigned irq)
54/* For cansleep */ 57/* For cansleep */
55#include <asm-generic/gpio.h> 58#include <asm-generic/gpio.h>
56 59
57#endif /* _RDC321X_GPIO_H_ */ 60#endif /* ASM_X86__MACH_RDC321X__GPIO_H */
diff --git a/include/asm-x86/mach-summit/mach_apicdef.h b/include/asm-x86/mach-summit/mach_apicdef.h
deleted file mode 100644
index a58ab5a75c8c..000000000000
--- a/include/asm-x86/mach-summit/mach_apicdef.h
+++ /dev/null
@@ -1,13 +0,0 @@
1#ifndef __ASM_MACH_APICDEF_H
2#define __ASM_MACH_APICDEF_H
3
4#define APIC_ID_MASK (0xFF<<24)
5
6static inline unsigned get_apic_id(unsigned long x)
7{
8 return (((x)>>24)&0xFF);
9}
10
11#define GET_APIC_ID(x) get_apic_id(x)
12
13#endif
diff --git a/include/asm-x86/mach-summit/mach_mpspec.h b/include/asm-x86/mach-summit/mach_mpspec.h
deleted file mode 100644
index bd765523511a..000000000000
--- a/include/asm-x86/mach-summit/mach_mpspec.h
+++ /dev/null
@@ -1,9 +0,0 @@
1#ifndef __ASM_MACH_MPSPEC_H
2#define __ASM_MACH_MPSPEC_H
3
4#define MAX_IRQ_SOURCES 256
5
6/* Maximum 256 PCI busses, plus 1 ISA bus in each of 4 cabinets. */
7#define MAX_MP_BUSSES 260
8
9#endif /* __ASM_MACH_MPSPEC_H */
diff --git a/include/asm-x86/mach-visws/entry_arch.h b/include/asm-x86/mach-visws/entry_arch.h
deleted file mode 100644
index b183fa6d83d9..000000000000
--- a/include/asm-x86/mach-visws/entry_arch.h
+++ /dev/null
@@ -1,23 +0,0 @@
1/*
2 * The following vectors are part of the Linux architecture, there
3 * is no hardware IRQ pin equivalent for them, they are triggered
4 * through the ICC by us (IPIs)
5 */
6#ifdef CONFIG_X86_SMP
7BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR)
8BUILD_INTERRUPT(invalidate_interrupt,INVALIDATE_TLB_VECTOR)
9BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR)
10#endif
11
12/*
13 * every pentium local APIC has two 'local interrupts', with a
14 * soft-definable vector attached to both interrupts, one of
15 * which is a timer interrupt, the other one is error counter
16 * overflow. Linux uses the local APIC timer interrupt to get
17 * a much simpler SMP time architecture:
18 */
19#ifdef CONFIG_X86_LOCAL_APIC
20BUILD_INTERRUPT(apic_timer_interrupt,LOCAL_TIMER_VECTOR)
21BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR)
22BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR)
23#endif
diff --git a/include/asm-x86/mach-visws/irq_vectors.h b/include/asm-x86/mach-visws/irq_vectors.h
deleted file mode 100644
index cb572d8db505..000000000000
--- a/include/asm-x86/mach-visws/irq_vectors.h
+++ /dev/null
@@ -1,62 +0,0 @@
1#ifndef _ASM_IRQ_VECTORS_H
2#define _ASM_IRQ_VECTORS_H
3
4/*
5 * IDT vectors usable for external interrupt sources start
6 * at 0x20:
7 */
8#define FIRST_EXTERNAL_VECTOR 0x20
9
10#define SYSCALL_VECTOR 0x80
11
12/*
13 * Vectors 0x20-0x2f are used for ISA interrupts.
14 */
15
16/*
17 * Special IRQ vectors used by the SMP architecture, 0xf0-0xff
18 *
19 * some of the following vectors are 'rare', they are merged
20 * into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
21 * TLB, reschedule and local APIC vectors are performance-critical.
22 *
23 * Vectors 0xf0-0xfa are free (reserved for future Linux use).
24 */
25#define SPURIOUS_APIC_VECTOR 0xff
26#define ERROR_APIC_VECTOR 0xfe
27#define INVALIDATE_TLB_VECTOR 0xfd
28#define RESCHEDULE_VECTOR 0xfc
29#define CALL_FUNCTION_VECTOR 0xfb
30
31#define THERMAL_APIC_VECTOR 0xf0
32/*
33 * Local APIC timer IRQ vector is on a different priority level,
34 * to work around the 'lost local interrupt if more than 2 IRQ
35 * sources per level' errata.
36 */
37#define LOCAL_TIMER_VECTOR 0xef
38
39/*
40 * First APIC vector available to drivers: (vectors 0x30-0xee)
41 * we start at 0x31 to spread out vectors evenly between priority
42 * levels. (0x80 is the syscall vector)
43 */
44#define FIRST_DEVICE_VECTOR 0x31
45#define FIRST_SYSTEM_VECTOR 0xef
46
47#define TIMER_IRQ 0
48
49/*
50 * IRQ definitions
51 */
52#define NR_VECTORS 256
53#define NR_IRQS 224
54#define NR_IRQ_VECTORS NR_IRQS
55
56#define FPU_IRQ 13
57
58#define FIRST_VM86_IRQ 3
59#define LAST_VM86_IRQ 15
60#define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15)
61
62#endif /* _ASM_IRQ_VECTORS_H */
diff --git a/include/asm-x86/mach-visws/mach_apic.h b/include/asm-x86/mach-visws/mach_apic.h
deleted file mode 100644
index a9ef33a8a995..000000000000
--- a/include/asm-x86/mach-visws/mach_apic.h
+++ /dev/null
@@ -1,103 +0,0 @@
1#ifndef __ASM_MACH_APIC_H
2#define __ASM_MACH_APIC_H
3
4#include <mach_apicdef.h>
5#include <asm/smp.h>
6
7#define APIC_DFR_VALUE (APIC_DFR_FLAT)
8
9#define no_balance_irq (0)
10#define esr_disable (0)
11
12#define INT_DELIVERY_MODE dest_LowestPrio
13#define INT_DEST_MODE 1 /* logical delivery broadcast to all procs */
14
15#ifdef CONFIG_SMP
16 #define TARGET_CPUS cpu_online_map
17#else
18 #define TARGET_CPUS cpumask_of_cpu(0)
19#endif
20
21#define check_apicid_used(bitmap, apicid) physid_isset(apicid, bitmap)
22#define check_apicid_present(bit) physid_isset(bit, phys_cpu_present_map)
23
24static inline int apic_id_registered(void)
25{
26 return physid_isset(GET_APIC_ID(read_apic_id()), phys_cpu_present_map);
27}
28
29/*
30 * Set up the logical destination ID.
31 *
32 * Intel recommends to set DFR, LDR and TPR before enabling
33 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
34 * document number 292116). So here it goes...
35 */
36static inline void init_apic_ldr(void)
37{
38 unsigned long val;
39
40 apic_write_around(APIC_DFR, APIC_DFR_VALUE);
41 val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
42 val |= SET_APIC_LOGICAL_ID(1UL << smp_processor_id());
43 apic_write_around(APIC_LDR, val);
44}
45
46static inline void summit_check(char *oem, char *productid)
47{
48}
49
50static inline void setup_apic_routing(void)
51{
52}
53
54static inline int apicid_to_node(int logical_apicid)
55{
56 return 0;
57}
58
59/* Mapping from cpu number to logical apicid */
60static inline int cpu_to_logical_apicid(int cpu)
61{
62 return 1 << cpu;
63}
64
65static inline int cpu_present_to_apicid(int mps_cpu)
66{
67 if (mps_cpu < get_physical_broadcast())
68 return mps_cpu;
69 else
70 return BAD_APICID;
71}
72
73static inline physid_mask_t apicid_to_cpu_present(int apicid)
74{
75 return physid_mask_of_physid(apicid);
76}
77
78#define WAKE_SECONDARY_VIA_INIT
79
80static inline void setup_portio_remap(void)
81{
82}
83
84static inline void enable_apic_mode(void)
85{
86}
87
88static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
89{
90 return physid_isset(boot_cpu_physical_apicid, phys_cpu_present_map);
91}
92
93static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
94{
95 return cpus_addr(cpumask)[0];
96}
97
98static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
99{
100 return cpuid_apic >> index_msb;
101}
102
103#endif /* __ASM_MACH_APIC_H */
diff --git a/include/asm-x86/mach-visws/mach_apicdef.h b/include/asm-x86/mach-visws/mach_apicdef.h
deleted file mode 100644
index 826cfa97d778..000000000000
--- a/include/asm-x86/mach-visws/mach_apicdef.h
+++ /dev/null
@@ -1,12 +0,0 @@
1#ifndef __ASM_MACH_APICDEF_H
2#define __ASM_MACH_APICDEF_H
3
4#define APIC_ID_MASK (0xF<<24)
5
6static inline unsigned get_apic_id(unsigned long x)
7{
8 return (((x)>>24)&0xF);
9}
10#define GET_APIC_ID(x) get_apic_id(x)
11
12#endif
diff --git a/include/asm-x86/mach-visws/setup_arch.h b/include/asm-x86/mach-visws/setup_arch.h
deleted file mode 100644
index 33f700ef6831..000000000000
--- a/include/asm-x86/mach-visws/setup_arch.h
+++ /dev/null
@@ -1,8 +0,0 @@
1/* Hook to call BIOS initialisation function */
2
3extern unsigned long sgivwfb_mem_phys;
4extern unsigned long sgivwfb_mem_size;
5
6/* no action for visws */
7
8#define ARCH_SETUP
diff --git a/include/asm-x86/mach-visws/smpboot_hooks.h b/include/asm-x86/mach-visws/smpboot_hooks.h
deleted file mode 100644
index c9b83e395a2e..000000000000
--- a/include/asm-x86/mach-visws/smpboot_hooks.h
+++ /dev/null
@@ -1,28 +0,0 @@
1static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
2{
3 CMOS_WRITE(0xa, 0xf);
4 local_flush_tlb();
5 Dprintk("1.\n");
6 *((volatile unsigned short *) TRAMPOLINE_HIGH) = start_eip >> 4;
7 Dprintk("2.\n");
8 *((volatile unsigned short *) TRAMPOLINE_LOW) = start_eip & 0xf;
9 Dprintk("3.\n");
10}
11
12/* for visws do nothing for any of these */
13
14static inline void smpboot_clear_io_apic_irqs(void)
15{
16}
17
18static inline void smpboot_restore_warm_reset_vector(void)
19{
20}
21
22static inline void smpboot_setup_io_apic(void)
23{
24}
25
26static inline void smpboot_clear_io_apic(void)
27{
28}
diff --git a/include/asm-x86/mach-voyager/entry_arch.h b/include/asm-x86/mach-voyager/entry_arch.h
index 4a1e1e8c10b6..ae52624b5937 100644
--- a/include/asm-x86/mach-voyager/entry_arch.h
+++ b/include/asm-x86/mach-voyager/entry_arch.h
@@ -23,4 +23,4 @@ BUILD_INTERRUPT(qic_invalidate_interrupt, QIC_INVALIDATE_CPI);
23BUILD_INTERRUPT(qic_reschedule_interrupt, QIC_RESCHEDULE_CPI); 23BUILD_INTERRUPT(qic_reschedule_interrupt, QIC_RESCHEDULE_CPI);
24BUILD_INTERRUPT(qic_enable_irq_interrupt, QIC_ENABLE_IRQ_CPI); 24BUILD_INTERRUPT(qic_enable_irq_interrupt, QIC_ENABLE_IRQ_CPI);
25BUILD_INTERRUPT(qic_call_function_interrupt, QIC_CALL_FUNCTION_CPI); 25BUILD_INTERRUPT(qic_call_function_interrupt, QIC_CALL_FUNCTION_CPI);
26 26BUILD_INTERRUPT(qic_call_function_single_interrupt, QIC_CALL_FUNCTION_SINGLE_CPI);
diff --git a/include/asm-x86/mach-voyager/irq_vectors.h b/include/asm-x86/mach-voyager/irq_vectors.h
deleted file mode 100644
index 165421f5821c..000000000000
--- a/include/asm-x86/mach-voyager/irq_vectors.h
+++ /dev/null
@@ -1,79 +0,0 @@
1/* -*- mode: c; c-basic-offset: 8 -*- */
2
3/* Copyright (C) 2002
4 *
5 * Author: James.Bottomley@HansenPartnership.com
6 *
7 * linux/arch/i386/voyager/irq_vectors.h
8 *
9 * This file provides definitions for the VIC and QIC CPIs
10 */
11
12#ifndef _ASM_IRQ_VECTORS_H
13#define _ASM_IRQ_VECTORS_H
14
15/*
16 * IDT vectors usable for external interrupt sources start
17 * at 0x20:
18 */
19#define FIRST_EXTERNAL_VECTOR 0x20
20
21#define SYSCALL_VECTOR 0x80
22
23/*
24 * Vectors 0x20-0x2f are used for ISA interrupts.
25 */
26
27/* These define the CPIs we use in linux */
28#define VIC_CPI_LEVEL0 0
29#define VIC_CPI_LEVEL1 1
30/* now the fake CPIs */
31#define VIC_TIMER_CPI 2
32#define VIC_INVALIDATE_CPI 3
33#define VIC_RESCHEDULE_CPI 4
34#define VIC_ENABLE_IRQ_CPI 5
35#define VIC_CALL_FUNCTION_CPI 6
36
37/* Now the QIC CPIs: Since we don't need the two initial levels,
38 * these are 2 less than the VIC CPIs */
39#define QIC_CPI_OFFSET 1
40#define QIC_TIMER_CPI (VIC_TIMER_CPI - QIC_CPI_OFFSET)
41#define QIC_INVALIDATE_CPI (VIC_INVALIDATE_CPI - QIC_CPI_OFFSET)
42#define QIC_RESCHEDULE_CPI (VIC_RESCHEDULE_CPI - QIC_CPI_OFFSET)
43#define QIC_ENABLE_IRQ_CPI (VIC_ENABLE_IRQ_CPI - QIC_CPI_OFFSET)
44#define QIC_CALL_FUNCTION_CPI (VIC_CALL_FUNCTION_CPI - QIC_CPI_OFFSET)
45
46#define VIC_START_FAKE_CPI VIC_TIMER_CPI
47#define VIC_END_FAKE_CPI VIC_CALL_FUNCTION_CPI
48
49/* this is the SYS_INT CPI. */
50#define VIC_SYS_INT 8
51#define VIC_CMN_INT 15
52
53/* This is the boot CPI for alternate processors. It gets overwritten
54 * by the above once the system has activated all available processors */
55#define VIC_CPU_BOOT_CPI VIC_CPI_LEVEL0
56#define VIC_CPU_BOOT_ERRATA_CPI (VIC_CPI_LEVEL0 + 8)
57
58#define NR_VECTORS 256
59#define NR_IRQS 224
60#define NR_IRQ_VECTORS NR_IRQS
61
62#define FPU_IRQ 13
63
64#define FIRST_VM86_IRQ 3
65#define LAST_VM86_IRQ 15
66#define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15)
67
68#ifndef __ASSEMBLY__
69extern asmlinkage void vic_cpi_interrupt(void);
70extern asmlinkage void vic_sys_interrupt(void);
71extern asmlinkage void vic_cmn_interrupt(void);
72extern asmlinkage void qic_timer_interrupt(void);
73extern asmlinkage void qic_invalidate_interrupt(void);
74extern asmlinkage void qic_reschedule_interrupt(void);
75extern asmlinkage void qic_enable_irq_interrupt(void);
76extern asmlinkage void qic_call_function_interrupt(void);
77#endif /* !__ASSEMBLY__ */
78
79#endif /* _ASM_IRQ_VECTORS_H */
diff --git a/include/asm-x86/math_emu.h b/include/asm-x86/math_emu.h
index 9bf4ae93ab10..5768d8e95c8c 100644
--- a/include/asm-x86/math_emu.h
+++ b/include/asm-x86/math_emu.h
@@ -1,5 +1,5 @@
1#ifndef _I386_MATH_EMU_H 1#ifndef ASM_X86__MATH_EMU_H
2#define _I386_MATH_EMU_H 2#define ASM_X86__MATH_EMU_H
3 3
4/* This structure matches the layout of the data saved to the stack 4/* This structure matches the layout of the data saved to the stack
5 following a device-not-present interrupt, part of it saved 5 following a device-not-present interrupt, part of it saved
@@ -28,4 +28,4 @@ struct info {
28 long ___vm86_fs; 28 long ___vm86_fs;
29 long ___vm86_gs; 29 long ___vm86_gs;
30}; 30};
31#endif 31#endif /* ASM_X86__MATH_EMU_H */
diff --git a/include/asm-x86/mc146818rtc.h b/include/asm-x86/mc146818rtc.h
index daf1ccde77af..a995f33176cd 100644
--- a/include/asm-x86/mc146818rtc.h
+++ b/include/asm-x86/mc146818rtc.h
@@ -1,8 +1,8 @@
1/* 1/*
2 * Machine dependent access functions for RTC registers. 2 * Machine dependent access functions for RTC registers.
3 */ 3 */
4#ifndef _ASM_MC146818RTC_H 4#ifndef ASM_X86__MC146818RTC_H
5#define _ASM_MC146818RTC_H 5#define ASM_X86__MC146818RTC_H
6 6
7#include <asm/io.h> 7#include <asm/io.h>
8#include <asm/system.h> 8#include <asm/system.h>
@@ -101,4 +101,4 @@ extern unsigned long mach_get_cmos_time(void);
101 101
102#define RTC_IRQ 8 102#define RTC_IRQ 8
103 103
104#endif /* _ASM_MC146818RTC_H */ 104#endif /* ASM_X86__MC146818RTC_H */
diff --git a/include/asm-x86/mca.h b/include/asm-x86/mca.h
index 09adf2eac4dc..60d1ed287b13 100644
--- a/include/asm-x86/mca.h
+++ b/include/asm-x86/mca.h
@@ -1,8 +1,8 @@
1/* -*- mode: c; c-basic-offset: 8 -*- */ 1/* -*- mode: c; c-basic-offset: 8 -*- */
2 2
3/* Platform specific MCA defines */ 3/* Platform specific MCA defines */
4#ifndef _ASM_MCA_H 4#ifndef ASM_X86__MCA_H
5#define _ASM_MCA_H 5#define ASM_X86__MCA_H
6 6
7/* Maximal number of MCA slots - actually, some machines have less, but 7/* Maximal number of MCA slots - actually, some machines have less, but
8 * they all have sufficient number of POS registers to cover 8. 8 * they all have sufficient number of POS registers to cover 8.
@@ -40,4 +40,4 @@
40 */ 40 */
41#define MCA_NUMADAPTERS (MCA_MAX_SLOT_NR+3) 41#define MCA_NUMADAPTERS (MCA_MAX_SLOT_NR+3)
42 42
43#endif 43#endif /* ASM_X86__MCA_H */
diff --git a/include/asm-x86/mca_dma.h b/include/asm-x86/mca_dma.h
index c3dca6edc6b1..49f22be237d2 100644
--- a/include/asm-x86/mca_dma.h
+++ b/include/asm-x86/mca_dma.h
@@ -1,5 +1,5 @@
1#ifndef MCA_DMA_H 1#ifndef ASM_X86__MCA_DMA_H
2#define MCA_DMA_H 2#define ASM_X86__MCA_DMA_H
3 3
4#include <asm/io.h> 4#include <asm/io.h>
5#include <linux/ioport.h> 5#include <linux/ioport.h>
@@ -198,4 +198,4 @@ static inline void mca_set_dma_mode(unsigned int dmanr, unsigned int mode)
198 outb(mode, MCA_DMA_REG_EXE); 198 outb(mode, MCA_DMA_REG_EXE);
199} 199}
200 200
201#endif /* MCA_DMA_H */ 201#endif /* ASM_X86__MCA_DMA_H */
diff --git a/include/asm-x86/mce.h b/include/asm-x86/mce.h
index 94f1fd79e22a..036133eaf744 100644
--- a/include/asm-x86/mce.h
+++ b/include/asm-x86/mce.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_MCE_H 1#ifndef ASM_X86__MCE_H
2#define _ASM_X86_MCE_H 2#define ASM_X86__MCE_H
3 3
4#ifdef __x86_64__ 4#ifdef __x86_64__
5 5
@@ -92,6 +92,7 @@ extern int mce_disabled;
92 92
93void mce_log(struct mce *m); 93void mce_log(struct mce *m);
94DECLARE_PER_CPU(struct sys_device, device_mce); 94DECLARE_PER_CPU(struct sys_device, device_mce);
95extern void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu);
95 96
96#ifdef CONFIG_X86_MCE_INTEL 97#ifdef CONFIG_X86_MCE_INTEL
97void mce_intel_feature_init(struct cpuinfo_x86 *c); 98void mce_intel_feature_init(struct cpuinfo_x86 *c);
@@ -126,4 +127,4 @@ extern void restart_mce(void);
126 127
127#endif /* __KERNEL__ */ 128#endif /* __KERNEL__ */
128 129
129#endif 130#endif /* ASM_X86__MCE_H */
diff --git a/include/asm-x86/microcode.h b/include/asm-x86/microcode.h
new file mode 100644
index 000000000000..62c793bb70ca
--- /dev/null
+++ b/include/asm-x86/microcode.h
@@ -0,0 +1,47 @@
1#ifndef ASM_X86__MICROCODE_H
2#define ASM_X86__MICROCODE_H
3
4struct cpu_signature {
5 unsigned int sig;
6 unsigned int pf;
7 unsigned int rev;
8};
9
10struct device;
11
12struct microcode_ops {
13 int (*request_microcode_user) (int cpu, const void __user *buf, size_t size);
14 int (*request_microcode_fw) (int cpu, struct device *device);
15
16 void (*apply_microcode) (int cpu);
17
18 int (*collect_cpu_info) (int cpu, struct cpu_signature *csig);
19 void (*microcode_fini_cpu) (int cpu);
20};
21
22struct ucode_cpu_info {
23 struct cpu_signature cpu_sig;
24 int valid;
25 void *mc;
26};
27extern struct ucode_cpu_info ucode_cpu_info[];
28
29#ifdef CONFIG_MICROCODE_INTEL
30extern struct microcode_ops * __init init_intel_microcode(void);
31#else
32static inline struct microcode_ops * __init init_intel_microcode(void)
33{
34 return NULL;
35}
36#endif /* CONFIG_MICROCODE_INTEL */
37
38#ifdef CONFIG_MICROCODE_AMD
39extern struct microcode_ops * __init init_amd_microcode(void);
40#else
41static inline struct microcode_ops * __init init_amd_microcode(void)
42{
43 return NULL;
44}
45#endif
46
47#endif /* ASM_X86__MICROCODE_H */
diff --git a/include/asm-x86/mman.h b/include/asm-x86/mman.h
index c1682b542daf..4ef28e6de383 100644
--- a/include/asm-x86/mman.h
+++ b/include/asm-x86/mman.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_MMAN_H 1#ifndef ASM_X86__MMAN_H
2#define _ASM_X86_MMAN_H 2#define ASM_X86__MMAN_H
3 3
4#include <asm-generic/mman.h> 4#include <asm-generic/mman.h>
5 5
@@ -12,8 +12,9 @@
12#define MAP_NORESERVE 0x4000 /* don't check for reservations */ 12#define MAP_NORESERVE 0x4000 /* don't check for reservations */
13#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ 13#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
14#define MAP_NONBLOCK 0x10000 /* do not block on IO */ 14#define MAP_NONBLOCK 0x10000 /* do not block on IO */
15#define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */
15 16
16#define MCL_CURRENT 1 /* lock all current mappings */ 17#define MCL_CURRENT 1 /* lock all current mappings */
17#define MCL_FUTURE 2 /* lock all future mappings */ 18#define MCL_FUTURE 2 /* lock all future mappings */
18 19
19#endif /* _ASM_X86_MMAN_H */ 20#endif /* ASM_X86__MMAN_H */
diff --git a/include/asm-x86/mmconfig.h b/include/asm-x86/mmconfig.h
new file mode 100644
index 000000000000..fb79b1cf5d07
--- /dev/null
+++ b/include/asm-x86/mmconfig.h
@@ -0,0 +1,12 @@
1#ifndef ASM_X86__MMCONFIG_H
2#define ASM_X86__MMCONFIG_H
3
4#ifdef CONFIG_PCI_MMCONFIG
5extern void __cpuinit fam10h_check_enable_mmcfg(void);
6extern void __cpuinit check_enable_amd_mmconf_dmi(void);
7#else
8static inline void fam10h_check_enable_mmcfg(void) { }
9static inline void check_enable_amd_mmconf_dmi(void) { }
10#endif
11
12#endif /* ASM_X86__MMCONFIG_H */
diff --git a/include/asm-x86/mmu.h b/include/asm-x86/mmu.h
index 00e88679e11f..9d5aff14334a 100644
--- a/include/asm-x86/mmu.h
+++ b/include/asm-x86/mmu.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_MMU_H 1#ifndef ASM_X86__MMU_H
2#define _ASM_X86_MMU_H 2#define ASM_X86__MMU_H
3 3
4#include <linux/spinlock.h> 4#include <linux/spinlock.h>
5#include <linux/mutex.h> 5#include <linux/mutex.h>
@@ -7,14 +7,9 @@
7/* 7/*
8 * The x86 doesn't have a mmu context, but 8 * The x86 doesn't have a mmu context, but
9 * we put the segment information here. 9 * we put the segment information here.
10 *
11 * cpu_vm_mask is used to optimize ldt flushing.
12 */ 10 */
13typedef struct { 11typedef struct {
14 void *ldt; 12 void *ldt;
15#ifdef CONFIG_X86_64
16 rwlock_t ldtlock;
17#endif
18 int size; 13 int size;
19 struct mutex lock; 14 struct mutex lock;
20 void *vdso; 15 void *vdso;
@@ -28,4 +23,4 @@ static inline void leave_mm(int cpu)
28} 23}
29#endif 24#endif
30 25
31#endif /* _ASM_X86_MMU_H */ 26#endif /* ASM_X86__MMU_H */
diff --git a/include/asm-x86/mmu_context.h b/include/asm-x86/mmu_context.h
index 6598450da6c6..8ec940bfd079 100644
--- a/include/asm-x86/mmu_context.h
+++ b/include/asm-x86/mmu_context.h
@@ -1,5 +1,37 @@
1#ifndef ASM_X86__MMU_CONTEXT_H
2#define ASM_X86__MMU_CONTEXT_H
3
4#include <asm/desc.h>
5#include <asm/atomic.h>
6#include <asm/pgalloc.h>
7#include <asm/tlbflush.h>
8#include <asm/paravirt.h>
9#ifndef CONFIG_PARAVIRT
10#include <asm-generic/mm_hooks.h>
11
12static inline void paravirt_activate_mm(struct mm_struct *prev,
13 struct mm_struct *next)
14{
15}
16#endif /* !CONFIG_PARAVIRT */
17
18/*
19 * Used for LDT copy/destruction.
20 */
21int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
22void destroy_context(struct mm_struct *mm);
23
1#ifdef CONFIG_X86_32 24#ifdef CONFIG_X86_32
2# include "mmu_context_32.h" 25# include "mmu_context_32.h"
3#else 26#else
4# include "mmu_context_64.h" 27# include "mmu_context_64.h"
5#endif 28#endif
29
30#define activate_mm(prev, next) \
31do { \
32 paravirt_activate_mm((prev), (next)); \
33 switch_mm((prev), (next), NULL); \
34} while (0);
35
36
37#endif /* ASM_X86__MMU_CONTEXT_H */
diff --git a/include/asm-x86/mmu_context_32.h b/include/asm-x86/mmu_context_32.h
index 9756ae0f1dd3..cce6f6e4afd6 100644
--- a/include/asm-x86/mmu_context_32.h
+++ b/include/asm-x86/mmu_context_32.h
@@ -1,27 +1,5 @@
1#ifndef __I386_SCHED_H 1#ifndef ASM_X86__MMU_CONTEXT_32_H
2#define __I386_SCHED_H 2#define ASM_X86__MMU_CONTEXT_32_H
3
4#include <asm/desc.h>
5#include <asm/atomic.h>
6#include <asm/pgalloc.h>
7#include <asm/tlbflush.h>
8#include <asm/paravirt.h>
9#ifndef CONFIG_PARAVIRT
10#include <asm-generic/mm_hooks.h>
11
12static inline void paravirt_activate_mm(struct mm_struct *prev,
13 struct mm_struct *next)
14{
15}
16#endif /* !CONFIG_PARAVIRT */
17
18
19/*
20 * Used for LDT copy/destruction.
21 */
22int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
23void destroy_context(struct mm_struct *mm);
24
25 3
26static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) 4static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
27{ 5{
@@ -75,10 +53,4 @@ static inline void switch_mm(struct mm_struct *prev,
75#define deactivate_mm(tsk, mm) \ 53#define deactivate_mm(tsk, mm) \
76 asm("movl %0,%%gs": :"r" (0)); 54 asm("movl %0,%%gs": :"r" (0));
77 55
78#define activate_mm(prev, next) \ 56#endif /* ASM_X86__MMU_CONTEXT_32_H */
79do { \
80 paravirt_activate_mm((prev), (next)); \
81 switch_mm((prev), (next), NULL); \
82} while (0);
83
84#endif
diff --git a/include/asm-x86/mmu_context_64.h b/include/asm-x86/mmu_context_64.h
index ca44c71e7fb3..26758673c828 100644
--- a/include/asm-x86/mmu_context_64.h
+++ b/include/asm-x86/mmu_context_64.h
@@ -1,21 +1,7 @@
1#ifndef __X86_64_MMU_CONTEXT_H 1#ifndef ASM_X86__MMU_CONTEXT_64_H
2#define __X86_64_MMU_CONTEXT_H 2#define ASM_X86__MMU_CONTEXT_64_H
3 3
4#include <asm/desc.h>
5#include <asm/atomic.h>
6#include <asm/pgalloc.h>
7#include <asm/pda.h> 4#include <asm/pda.h>
8#include <asm/pgtable.h>
9#include <asm/tlbflush.h>
10#ifndef CONFIG_PARAVIRT
11#include <asm-generic/mm_hooks.h>
12#endif
13
14/*
15 * possibly do the LDT unload here?
16 */
17int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
18void destroy_context(struct mm_struct *mm);
19 5
20static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) 6static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
21{ 7{
@@ -65,8 +51,4 @@ do { \
65 asm volatile("movl %0,%%fs"::"r"(0)); \ 51 asm volatile("movl %0,%%fs"::"r"(0)); \
66} while (0) 52} while (0)
67 53
68#define activate_mm(prev, next) \ 54#endif /* ASM_X86__MMU_CONTEXT_64_H */
69 switch_mm((prev), (next), NULL)
70
71
72#endif
diff --git a/include/asm-x86/mmx.h b/include/asm-x86/mmx.h
index 940881218ff8..2e7299bb3653 100644
--- a/include/asm-x86/mmx.h
+++ b/include/asm-x86/mmx.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_MMX_H 1#ifndef ASM_X86__MMX_H
2#define _ASM_MMX_H 2#define ASM_X86__MMX_H
3 3
4/* 4/*
5 * MMX 3Dnow! helper operations 5 * MMX 3Dnow! helper operations
@@ -11,4 +11,4 @@ extern void *_mmx_memcpy(void *to, const void *from, size_t size);
11extern void mmx_clear_page(void *page); 11extern void mmx_clear_page(void *page);
12extern void mmx_copy_page(void *to, void *from); 12extern void mmx_copy_page(void *to, void *from);
13 13
14#endif 14#endif /* ASM_X86__MMX_H */
diff --git a/include/asm-x86/mmzone_32.h b/include/asm-x86/mmzone_32.h
index cb2cad0b65a7..121b65d61d86 100644
--- a/include/asm-x86/mmzone_32.h
+++ b/include/asm-x86/mmzone_32.h
@@ -3,8 +3,8 @@
3 * 3 *
4 */ 4 */
5 5
6#ifndef _ASM_MMZONE_H_ 6#ifndef ASM_X86__MMZONE_32_H
7#define _ASM_MMZONE_H_ 7#define ASM_X86__MMZONE_32_H
8 8
9#include <asm/smp.h> 9#include <asm/smp.h>
10 10
@@ -12,11 +12,9 @@
12extern struct pglist_data *node_data[]; 12extern struct pglist_data *node_data[];
13#define NODE_DATA(nid) (node_data[nid]) 13#define NODE_DATA(nid) (node_data[nid])
14 14
15#ifdef CONFIG_X86_NUMAQ 15#include <asm/numaq.h>
16 #include <asm/numaq.h> 16/* summit or generic arch */
17#elif defined(CONFIG_ACPI_SRAT)/* summit or generic arch */ 17#include <asm/srat.h>
18 #include <asm/srat.h>
19#endif
20 18
21extern int get_memcfg_numa_flat(void); 19extern int get_memcfg_numa_flat(void);
22/* 20/*
@@ -26,28 +24,20 @@ extern int get_memcfg_numa_flat(void);
26 */ 24 */
27static inline void get_memcfg_numa(void) 25static inline void get_memcfg_numa(void)
28{ 26{
29#ifdef CONFIG_X86_NUMAQ 27
30 if (get_memcfg_numaq()) 28 if (get_memcfg_numaq())
31 return; 29 return;
32#elif defined(CONFIG_ACPI_SRAT)
33 if (get_memcfg_from_srat()) 30 if (get_memcfg_from_srat())
34 return; 31 return;
35#endif
36
37 get_memcfg_numa_flat(); 32 get_memcfg_numa_flat();
38} 33}
39 34
40extern int early_pfn_to_nid(unsigned long pfn); 35extern int early_pfn_to_nid(unsigned long pfn);
41extern void numa_kva_reserve(void);
42 36
43#else /* !CONFIG_NUMA */ 37#else /* !CONFIG_NUMA */
44 38
45#define get_memcfg_numa get_memcfg_numa_flat 39#define get_memcfg_numa get_memcfg_numa_flat
46#define get_zholes_size(n) (0)
47 40
48static inline void numa_kva_reserve(void)
49{
50}
51#endif /* CONFIG_NUMA */ 41#endif /* CONFIG_NUMA */
52 42
53#ifdef CONFIG_DISCONTIGMEM 43#ifdef CONFIG_DISCONTIGMEM
@@ -55,14 +45,14 @@ static inline void numa_kva_reserve(void)
55/* 45/*
56 * generic node memory support, the following assumptions apply: 46 * generic node memory support, the following assumptions apply:
57 * 47 *
58 * 1) memory comes in 256Mb contigious chunks which are either present or not 48 * 1) memory comes in 64Mb contigious chunks which are either present or not
59 * 2) we will not have more than 64Gb in total 49 * 2) we will not have more than 64Gb in total
60 * 50 *
61 * for now assume that 64Gb is max amount of RAM for whole system 51 * for now assume that 64Gb is max amount of RAM for whole system
62 * 64Gb / 4096bytes/page = 16777216 pages 52 * 64Gb / 4096bytes/page = 16777216 pages
63 */ 53 */
64#define MAX_NR_PAGES 16777216 54#define MAX_NR_PAGES 16777216
65#define MAX_ELEMENTS 256 55#define MAX_ELEMENTS 1024
66#define PAGES_PER_ELEMENT (MAX_NR_PAGES/MAX_ELEMENTS) 56#define PAGES_PER_ELEMENT (MAX_NR_PAGES/MAX_ELEMENTS)
67 57
68extern s8 physnode_map[]; 58extern s8 physnode_map[];
@@ -87,9 +77,6 @@ static inline int pfn_to_nid(unsigned long pfn)
87 __pgdat->node_start_pfn + __pgdat->node_spanned_pages; \ 77 __pgdat->node_start_pfn + __pgdat->node_spanned_pages; \
88}) 78})
89 79
90#ifdef CONFIG_X86_NUMAQ /* we have contiguous memory on NUMA-Q */
91#define pfn_valid(pfn) ((pfn) < num_physpages)
92#else
93static inline int pfn_valid(int pfn) 80static inline int pfn_valid(int pfn)
94{ 81{
95 int nid = pfn_to_nid(pfn); 82 int nid = pfn_to_nid(pfn);
@@ -98,7 +85,6 @@ static inline int pfn_valid(int pfn)
98 return (pfn < node_end_pfn(nid)); 85 return (pfn < node_end_pfn(nid));
99 return 0; 86 return 0;
100} 87}
101#endif /* CONFIG_X86_NUMAQ */
102 88
103#endif /* CONFIG_DISCONTIGMEM */ 89#endif /* CONFIG_DISCONTIGMEM */
104 90
@@ -111,10 +97,16 @@ static inline int pfn_valid(int pfn)
111 reserve_bootmem_node(NODE_DATA(0), (addr), (size), (flags)) 97 reserve_bootmem_node(NODE_DATA(0), (addr), (size), (flags))
112#define alloc_bootmem(x) \ 98#define alloc_bootmem(x) \
113 __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) 99 __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
100#define alloc_bootmem_nopanic(x) \
101 __alloc_bootmem_node_nopanic(NODE_DATA(0), (x), SMP_CACHE_BYTES, \
102 __pa(MAX_DMA_ADDRESS))
114#define alloc_bootmem_low(x) \ 103#define alloc_bootmem_low(x) \
115 __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, 0) 104 __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, 0)
116#define alloc_bootmem_pages(x) \ 105#define alloc_bootmem_pages(x) \
117 __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) 106 __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
107#define alloc_bootmem_pages_nopanic(x) \
108 __alloc_bootmem_node_nopanic(NODE_DATA(0), (x), PAGE_SIZE, \
109 __pa(MAX_DMA_ADDRESS))
118#define alloc_bootmem_low_pages(x) \ 110#define alloc_bootmem_low_pages(x) \
119 __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0) 111 __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0)
120#define alloc_bootmem_node(pgdat, x) \ 112#define alloc_bootmem_node(pgdat, x) \
@@ -139,4 +131,4 @@ static inline int pfn_valid(int pfn)
139}) 131})
140#endif /* CONFIG_NEED_MULTIPLE_NODES */ 132#endif /* CONFIG_NEED_MULTIPLE_NODES */
141 133
142#endif /* _ASM_MMZONE_H_ */ 134#endif /* ASM_X86__MMZONE_32_H */
diff --git a/include/asm-x86/mmzone_64.h b/include/asm-x86/mmzone_64.h
index 5e3a6cbddb49..6480f3333b2a 100644
--- a/include/asm-x86/mmzone_64.h
+++ b/include/asm-x86/mmzone_64.h
@@ -1,8 +1,8 @@
1/* K8 NUMA support */ 1/* K8 NUMA support */
2/* Copyright 2002,2003 by Andi Kleen, SuSE Labs */ 2/* Copyright 2002,2003 by Andi Kleen, SuSE Labs */
3/* 2.5 Version loosely based on the NUMAQ Code by Pat Gaughen. */ 3/* 2.5 Version loosely based on the NUMAQ Code by Pat Gaughen. */
4#ifndef _ASM_X86_64_MMZONE_H 4#ifndef ASM_X86__MMZONE_64_H
5#define _ASM_X86_64_MMZONE_H 1 5#define ASM_X86__MMZONE_64_H
6 6
7 7
8#ifdef CONFIG_NUMA 8#ifdef CONFIG_NUMA
@@ -48,4 +48,4 @@ extern int early_pfn_to_nid(unsigned long pfn);
48#endif 48#endif
49 49
50#endif 50#endif
51#endif 51#endif /* ASM_X86__MMZONE_64_H */
diff --git a/include/asm-x86/module.h b/include/asm-x86/module.h
index bfedb247871c..48dc3e0c07d9 100644
--- a/include/asm-x86/module.h
+++ b/include/asm-x86/module.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_MODULE_H 1#ifndef ASM_X86__MODULE_H
2#define _ASM_MODULE_H 2#define ASM_X86__MODULE_H
3 3
4/* x86_32/64 are simple */ 4/* x86_32/64 are simple */
5struct mod_arch_specific {}; 5struct mod_arch_specific {};
@@ -79,4 +79,4 @@ struct mod_arch_specific {};
79# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE 79# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE
80#endif 80#endif
81 81
82#endif /* _ASM_MODULE_H */ 82#endif /* ASM_X86__MODULE_H */
diff --git a/include/asm-x86/mpspec.h b/include/asm-x86/mpspec.h
index 57a991b9c053..be2241a818f1 100644
--- a/include/asm-x86/mpspec.h
+++ b/include/asm-x86/mpspec.h
@@ -1,18 +1,25 @@
1#ifndef _AM_X86_MPSPEC_H 1#ifndef ASM_X86__MPSPEC_H
2#define _AM_X86_MPSPEC_H 2#define ASM_X86__MPSPEC_H
3 3
4#include <linux/init.h> 4#include <linux/init.h>
5 5
6#include <asm/mpspec_def.h> 6#include <asm/mpspec_def.h>
7 7
8extern int apic_version[MAX_APICS];
9
8#ifdef CONFIG_X86_32 10#ifdef CONFIG_X86_32
9#include <mach_mpspec.h> 11#include <mach_mpspec.h>
10 12
11extern unsigned int def_to_bigsmp; 13extern unsigned int def_to_bigsmp;
12extern int apic_version[MAX_APICS];
13extern u8 apicid_2_node[]; 14extern u8 apicid_2_node[];
14extern int pic_mode; 15extern int pic_mode;
15 16
17#ifdef CONFIG_X86_NUMAQ
18extern int mp_bus_id_to_node[MAX_MP_BUSSES];
19extern int mp_bus_id_to_local[MAX_MP_BUSSES];
20extern int quad_local_to_mp_bus_id [NR_CPUS/4][4];
21#endif
22
16#define MAX_APICID 256 23#define MAX_APICID 256
17 24
18#else 25#else
@@ -21,26 +28,30 @@ extern int pic_mode;
21/* Each PCI slot may be a combo card with its own bus. 4 IRQ pins per slot. */ 28/* Each PCI slot may be a combo card with its own bus. 4 IRQ pins per slot. */
22#define MAX_IRQ_SOURCES (MAX_MP_BUSSES * 4) 29#define MAX_IRQ_SOURCES (MAX_MP_BUSSES * 4)
23 30
31#endif
32
24extern void early_find_smp_config(void); 33extern void early_find_smp_config(void);
25extern void early_get_smp_config(void); 34extern void early_get_smp_config(void);
26 35
27#endif
28
29#if defined(CONFIG_MCA) || defined(CONFIG_EISA) 36#if defined(CONFIG_MCA) || defined(CONFIG_EISA)
30extern int mp_bus_id_to_type[MAX_MP_BUSSES]; 37extern int mp_bus_id_to_type[MAX_MP_BUSSES];
31#endif 38#endif
32 39
33extern DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES); 40extern DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);
34 41
35extern int mp_bus_id_to_pci_bus[MAX_MP_BUSSES];
36
37extern unsigned int boot_cpu_physical_apicid; 42extern unsigned int boot_cpu_physical_apicid;
43extern unsigned int max_physical_apicid;
38extern int smp_found_config; 44extern int smp_found_config;
39extern int mpc_default_type; 45extern int mpc_default_type;
40extern unsigned long mp_lapic_addr; 46extern unsigned long mp_lapic_addr;
41 47
42extern void find_smp_config(void); 48extern void find_smp_config(void);
43extern void get_smp_config(void); 49extern void get_smp_config(void);
50#ifdef CONFIG_X86_MPPARSE
51extern void early_reserve_e820_mpc_new(void);
52#else
53static inline void early_reserve_e820_mpc_new(void) { }
54#endif
44 55
45void __cpuinit generic_processor_info(int apicid, int version); 56void __cpuinit generic_processor_info(int apicid, int version);
46#ifdef CONFIG_ACPI 57#ifdef CONFIG_ACPI
@@ -49,6 +60,17 @@ extern void mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger,
49 u32 gsi); 60 u32 gsi);
50extern void mp_config_acpi_legacy_irqs(void); 61extern void mp_config_acpi_legacy_irqs(void);
51extern int mp_register_gsi(u32 gsi, int edge_level, int active_high_low); 62extern int mp_register_gsi(u32 gsi, int edge_level, int active_high_low);
63#ifdef CONFIG_X86_IO_APIC
64extern int mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin,
65 u32 gsi, int triggering, int polarity);
66#else
67static inline int
68mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin,
69 u32 gsi, int triggering, int polarity)
70{
71 return 0;
72}
73#endif
52#endif /* CONFIG_ACPI */ 74#endif /* CONFIG_ACPI */
53 75
54#define PHYSID_ARRAY_SIZE BITS_TO_LONGS(MAX_APICS) 76#define PHYSID_ARRAY_SIZE BITS_TO_LONGS(MAX_APICS)
@@ -101,6 +123,7 @@ typedef struct physid_mask physid_mask_t;
101 __physid_mask; \ 123 __physid_mask; \
102 }) 124 })
103 125
126/* Note: will create very large stack frames if physid_mask_t is big */
104#define physid_mask_of_physid(physid) \ 127#define physid_mask_of_physid(physid) \
105 ({ \ 128 ({ \
106 physid_mask_t __physid_mask = PHYSID_MASK_NONE; \ 129 physid_mask_t __physid_mask = PHYSID_MASK_NONE; \
@@ -108,9 +131,15 @@ typedef struct physid_mask physid_mask_t;
108 __physid_mask; \ 131 __physid_mask; \
109 }) 132 })
110 133
134static inline void physid_set_mask_of_physid(int physid, physid_mask_t *map)
135{
136 physids_clear(*map);
137 physid_set(physid, *map);
138}
139
111#define PHYSID_MASK_ALL { {[0 ... PHYSID_ARRAY_SIZE-1] = ~0UL} } 140#define PHYSID_MASK_ALL { {[0 ... PHYSID_ARRAY_SIZE-1] = ~0UL} }
112#define PHYSID_MASK_NONE { {[0 ... PHYSID_ARRAY_SIZE-1] = 0UL} } 141#define PHYSID_MASK_NONE { {[0 ... PHYSID_ARRAY_SIZE-1] = 0UL} }
113 142
114extern physid_mask_t phys_cpu_present_map; 143extern physid_mask_t phys_cpu_present_map;
115 144
116#endif 145#endif /* ASM_X86__MPSPEC_H */
diff --git a/include/asm-x86/mpspec_def.h b/include/asm-x86/mpspec_def.h
index dc6ef85e3624..79166b048012 100644
--- a/include/asm-x86/mpspec_def.h
+++ b/include/asm-x86/mpspec_def.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_MPSPEC_DEF_H 1#ifndef ASM_X86__MPSPEC_DEF_H
2#define __ASM_MPSPEC_DEF_H 2#define ASM_X86__MPSPEC_DEF_H
3 3
4/* 4/*
5 * Structure definitions for SMP machines following the 5 * Structure definitions for SMP machines following the
@@ -17,10 +17,11 @@
17# define MAX_MPC_ENTRY 1024 17# define MAX_MPC_ENTRY 1024
18# define MAX_APICS 256 18# define MAX_APICS 256
19#else 19#else
20/* 20# if NR_CPUS <= 255
21 * A maximum of 255 APICs with the current APIC ID architecture. 21# define MAX_APICS 255
22 */ 22# else
23# define MAX_APICS 255 23# define MAX_APICS 32768
24# endif
24#endif 25#endif
25 26
26struct intel_mp_floating { 27struct intel_mp_floating {
@@ -176,4 +177,4 @@ enum mp_bustype {
176 MP_BUS_PCI, 177 MP_BUS_PCI,
177 MP_BUS_MCA, 178 MP_BUS_MCA,
178}; 179};
179#endif 180#endif /* ASM_X86__MPSPEC_DEF_H */
diff --git a/include/asm-x86/msgbuf.h b/include/asm-x86/msgbuf.h
index 7e4e9481f51c..1b538c907a3d 100644
--- a/include/asm-x86/msgbuf.h
+++ b/include/asm-x86/msgbuf.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_MSGBUF_H 1#ifndef ASM_X86__MSGBUF_H
2#define _ASM_X86_MSGBUF_H 2#define ASM_X86__MSGBUF_H
3 3
4/* 4/*
5 * The msqid64_ds structure for i386 architecture. 5 * The msqid64_ds structure for i386 architecture.
@@ -36,4 +36,4 @@ struct msqid64_ds {
36 unsigned long __unused5; 36 unsigned long __unused5;
37}; 37};
38 38
39#endif /* _ASM_X86_MSGBUF_H */ 39#endif /* ASM_X86__MSGBUF_H */
diff --git a/include/asm-x86/msidef.h b/include/asm-x86/msidef.h
index 296f29ce426d..ed9190246876 100644
--- a/include/asm-x86/msidef.h
+++ b/include/asm-x86/msidef.h
@@ -1,5 +1,5 @@
1#ifndef ASM_MSIDEF_H 1#ifndef ASM_X86__MSIDEF_H
2#define ASM_MSIDEF_H 2#define ASM_X86__MSIDEF_H
3 3
4/* 4/*
5 * Constants for Intel APIC based MSI messages. 5 * Constants for Intel APIC based MSI messages.
@@ -48,4 +48,8 @@
48#define MSI_ADDR_DEST_ID(dest) (((dest) << MSI_ADDR_DEST_ID_SHIFT) & \ 48#define MSI_ADDR_DEST_ID(dest) (((dest) << MSI_ADDR_DEST_ID_SHIFT) & \
49 MSI_ADDR_DEST_ID_MASK) 49 MSI_ADDR_DEST_ID_MASK)
50 50
51#endif /* ASM_MSIDEF_H */ 51#define MSI_ADDR_IR_EXT_INT (1 << 4)
52#define MSI_ADDR_IR_SHV (1 << 3)
53#define MSI_ADDR_IR_INDEX1(index) ((index & 0x8000) >> 13)
54#define MSI_ADDR_IR_INDEX2(index) ((index & 0x7fff) << 5)
55#endif /* ASM_X86__MSIDEF_H */
diff --git a/include/asm-x86/msr-index.h b/include/asm-x86/msr-index.h
index 09413ad39d3c..0bb43301a202 100644
--- a/include/asm-x86/msr-index.h
+++ b/include/asm-x86/msr-index.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_MSR_INDEX_H 1#ifndef ASM_X86__MSR_INDEX_H
2#define __ASM_MSR_INDEX_H 2#define ASM_X86__MSR_INDEX_H
3 3
4/* CPU model specific register (MSR) numbers */ 4/* CPU model specific register (MSR) numbers */
5 5
@@ -111,7 +111,9 @@
111#define MSR_K8_TOP_MEM2 0xc001001d 111#define MSR_K8_TOP_MEM2 0xc001001d
112#define MSR_K8_SYSCFG 0xc0010010 112#define MSR_K8_SYSCFG 0xc0010010
113#define MSR_K8_HWCR 0xc0010015 113#define MSR_K8_HWCR 0xc0010015
114#define MSR_K8_ENABLE_C1E 0xc0010055 114#define MSR_K8_INT_PENDING_MSG 0xc0010055
115/* C1E active bits in int pending message */
116#define K8_INTP_C1E_ACTIVE_MASK 0x18000000
115#define MSR_K8_TSEG_ADDR 0xc0010112 117#define MSR_K8_TSEG_ADDR 0xc0010112
116#define K8_MTRRFIXRANGE_DRAM_ENABLE 0x00040000 /* MtrrFixDramEn bit */ 118#define K8_MTRRFIXRANGE_DRAM_ENABLE 0x00040000 /* MtrrFixDramEn bit */
117#define K8_MTRRFIXRANGE_DRAM_MODIFY 0x00080000 /* MtrrFixDramModEn bit */ 119#define K8_MTRRFIXRANGE_DRAM_MODIFY 0x00080000 /* MtrrFixDramModEn bit */
@@ -174,6 +176,7 @@
174#define MSR_IA32_TSC 0x00000010 176#define MSR_IA32_TSC 0x00000010
175#define MSR_IA32_PLATFORM_ID 0x00000017 177#define MSR_IA32_PLATFORM_ID 0x00000017
176#define MSR_IA32_EBL_CR_POWERON 0x0000002a 178#define MSR_IA32_EBL_CR_POWERON 0x0000002a
179#define MSR_IA32_FEATURE_CONTROL 0x0000003a
177 180
178#define MSR_IA32_APICBASE 0x0000001b 181#define MSR_IA32_APICBASE 0x0000001b
179#define MSR_IA32_APICBASE_BSP (1<<8) 182#define MSR_IA32_APICBASE_BSP (1<<8)
@@ -308,4 +311,19 @@
308/* Geode defined MSRs */ 311/* Geode defined MSRs */
309#define MSR_GEODE_BUSCONT_CONF0 0x00001900 312#define MSR_GEODE_BUSCONT_CONF0 0x00001900
310 313
311#endif /* __ASM_MSR_INDEX_H */ 314/* Intel VT MSRs */
315#define MSR_IA32_VMX_BASIC 0x00000480
316#define MSR_IA32_VMX_PINBASED_CTLS 0x00000481
317#define MSR_IA32_VMX_PROCBASED_CTLS 0x00000482
318#define MSR_IA32_VMX_EXIT_CTLS 0x00000483
319#define MSR_IA32_VMX_ENTRY_CTLS 0x00000484
320#define MSR_IA32_VMX_MISC 0x00000485
321#define MSR_IA32_VMX_CR0_FIXED0 0x00000486
322#define MSR_IA32_VMX_CR0_FIXED1 0x00000487
323#define MSR_IA32_VMX_CR4_FIXED0 0x00000488
324#define MSR_IA32_VMX_CR4_FIXED1 0x00000489
325#define MSR_IA32_VMX_VMCS_ENUM 0x0000048a
326#define MSR_IA32_VMX_PROCBASED_CTLS2 0x0000048b
327#define MSR_IA32_VMX_EPT_VPID_CAP 0x0000048c
328
329#endif /* ASM_X86__MSR_INDEX_H */
diff --git a/include/asm-x86/msr.h b/include/asm-x86/msr.h
index 3707650a169b..530af1f6389e 100644
--- a/include/asm-x86/msr.h
+++ b/include/asm-x86/msr.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_X86_MSR_H_ 1#ifndef ASM_X86__MSR_H
2#define __ASM_X86_MSR_H_ 2#define ASM_X86__MSR_H
3 3
4#include <asm/msr-index.h> 4#include <asm/msr-index.h>
5 5
@@ -18,7 +18,7 @@ static inline unsigned long long native_read_tscp(unsigned int *aux)
18 unsigned long low, high; 18 unsigned long low, high;
19 asm volatile(".byte 0x0f,0x01,0xf9" 19 asm volatile(".byte 0x0f,0x01,0xf9"
20 : "=a" (low), "=d" (high), "=c" (*aux)); 20 : "=a" (low), "=d" (high), "=c" (*aux));
21 return low | ((u64)high >> 32); 21 return low | ((u64)high << 32);
22} 22}
23 23
24/* 24/*
@@ -52,6 +52,22 @@ static inline unsigned long long native_read_msr_safe(unsigned int msr,
52{ 52{
53 DECLARE_ARGS(val, low, high); 53 DECLARE_ARGS(val, low, high);
54 54
55 asm volatile("2: rdmsr ; xor %[err],%[err]\n"
56 "1:\n\t"
57 ".section .fixup,\"ax\"\n\t"
58 "3: mov %[fault],%[err] ; jmp 1b\n\t"
59 ".previous\n\t"
60 _ASM_EXTABLE(2b, 3b)
61 : [err] "=r" (*err), EAX_EDX_RET(val, low, high)
62 : "c" (msr), [fault] "i" (-EFAULT));
63 return EAX_EDX_VAL(val, low, high);
64}
65
66static inline unsigned long long native_read_msr_amd_safe(unsigned int msr,
67 int *err)
68{
69 DECLARE_ARGS(val, low, high);
70
55 asm volatile("2: rdmsr ; xor %0,%0\n" 71 asm volatile("2: rdmsr ; xor %0,%0\n"
56 "1:\n\t" 72 "1:\n\t"
57 ".section .fixup,\"ax\"\n\t" 73 ".section .fixup,\"ax\"\n\t"
@@ -59,29 +75,30 @@ static inline unsigned long long native_read_msr_safe(unsigned int msr,
59 ".previous\n\t" 75 ".previous\n\t"
60 _ASM_EXTABLE(2b, 3b) 76 _ASM_EXTABLE(2b, 3b)
61 : "=r" (*err), EAX_EDX_RET(val, low, high) 77 : "=r" (*err), EAX_EDX_RET(val, low, high)
62 : "c" (msr), "i" (-EFAULT)); 78 : "c" (msr), "D" (0x9c5a203a), "i" (-EFAULT));
63 return EAX_EDX_VAL(val, low, high); 79 return EAX_EDX_VAL(val, low, high);
64} 80}
65 81
66static inline void native_write_msr(unsigned int msr, 82static inline void native_write_msr(unsigned int msr,
67 unsigned low, unsigned high) 83 unsigned low, unsigned high)
68{ 84{
69 asm volatile("wrmsr" : : "c" (msr), "a"(low), "d" (high)); 85 asm volatile("wrmsr" : : "c" (msr), "a"(low), "d" (high) : "memory");
70} 86}
71 87
72static inline int native_write_msr_safe(unsigned int msr, 88static inline int native_write_msr_safe(unsigned int msr,
73 unsigned low, unsigned high) 89 unsigned low, unsigned high)
74{ 90{
75 int err; 91 int err;
76 asm volatile("2: wrmsr ; xor %0,%0\n" 92 asm volatile("2: wrmsr ; xor %[err],%[err]\n"
77 "1:\n\t" 93 "1:\n\t"
78 ".section .fixup,\"ax\"\n\t" 94 ".section .fixup,\"ax\"\n\t"
79 "3: mov %4,%0 ; jmp 1b\n\t" 95 "3: mov %[fault],%[err] ; jmp 1b\n\t"
80 ".previous\n\t" 96 ".previous\n\t"
81 _ASM_EXTABLE(2b, 3b) 97 _ASM_EXTABLE(2b, 3b)
82 : "=a" (err) 98 : [err] "=a" (err)
83 : "c" (msr), "0" (low), "d" (high), 99 : "c" (msr), "0" (low), "d" (high),
84 "i" (-EFAULT)); 100 [fault] "i" (-EFAULT)
101 : "memory");
85 return err; 102 return err;
86} 103}
87 104
@@ -157,6 +174,13 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
157 *p = native_read_msr_safe(msr, &err); 174 *p = native_read_msr_safe(msr, &err);
158 return err; 175 return err;
159} 176}
177static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
178{
179 int err;
180
181 *p = native_read_msr_amd_safe(msr, &err);
182 return err;
183}
160 184
161#define rdtscl(low) \ 185#define rdtscl(low) \
162 ((low) = (u32)native_read_tsc()) 186 ((low) = (u32)native_read_tsc())
@@ -191,19 +215,20 @@ do { \
191#define write_rdtscp_aux(val) wrmsr(0xc0000103, (val), 0) 215#define write_rdtscp_aux(val) wrmsr(0xc0000103, (val), 0)
192 216
193#ifdef CONFIG_SMP 217#ifdef CONFIG_SMP
194void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); 218int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
195void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); 219int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
196int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); 220int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
197
198int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); 221int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
199#else /* CONFIG_SMP */ 222#else /* CONFIG_SMP */
200static inline void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) 223static inline int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
201{ 224{
202 rdmsr(msr_no, *l, *h); 225 rdmsr(msr_no, *l, *h);
226 return 0;
203} 227}
204static inline void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) 228static inline int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
205{ 229{
206 wrmsr(msr_no, l, h); 230 wrmsr(msr_no, l, h);
231 return 0;
207} 232}
208static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, 233static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no,
209 u32 *l, u32 *h) 234 u32 *l, u32 *h)
@@ -219,4 +244,4 @@ static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
219#endif /* __KERNEL__ */ 244#endif /* __KERNEL__ */
220 245
221 246
222#endif 247#endif /* ASM_X86__MSR_H */
diff --git a/include/asm-x86/mtrr.h b/include/asm-x86/mtrr.h
index a69a01a51729..23a7f83da953 100644
--- a/include/asm-x86/mtrr.h
+++ b/include/asm-x86/mtrr.h
@@ -20,8 +20,8 @@
20 The postal address is: 20 The postal address is:
21 Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia. 21 Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia.
22*/ 22*/
23#ifndef _ASM_X86_MTRR_H 23#ifndef ASM_X86__MTRR_H
24#define _ASM_X86_MTRR_H 24#define ASM_X86__MTRR_H
25 25
26#include <linux/ioctl.h> 26#include <linux/ioctl.h>
27#include <linux/errno.h> 27#include <linux/errno.h>
@@ -170,4 +170,4 @@ struct mtrr_gentry32 {
170 170
171#endif /* __KERNEL__ */ 171#endif /* __KERNEL__ */
172 172
173#endif /* _ASM_X86_MTRR_H */ 173#endif /* ASM_X86__MTRR_H */
diff --git a/include/asm-x86/mutex_32.h b/include/asm-x86/mutex_32.h
index 73e928ef5f03..25c16d8ba3c7 100644
--- a/include/asm-x86/mutex_32.h
+++ b/include/asm-x86/mutex_32.h
@@ -6,8 +6,8 @@
6 * 6 *
7 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 7 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
8 */ 8 */
9#ifndef _ASM_MUTEX_H 9#ifndef ASM_X86__MUTEX_32_H
10#define _ASM_MUTEX_H 10#define ASM_X86__MUTEX_32_H
11 11
12#include <asm/alternative.h> 12#include <asm/alternative.h>
13 13
@@ -122,4 +122,4 @@ static inline int __mutex_fastpath_trylock(atomic_t *count,
122#endif 122#endif
123} 123}
124 124
125#endif 125#endif /* ASM_X86__MUTEX_32_H */
diff --git a/include/asm-x86/mutex_64.h b/include/asm-x86/mutex_64.h
index f3fae9becb38..918ba21ab9d9 100644
--- a/include/asm-x86/mutex_64.h
+++ b/include/asm-x86/mutex_64.h
@@ -6,8 +6,8 @@
6 * 6 *
7 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 7 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
8 */ 8 */
9#ifndef _ASM_MUTEX_H 9#ifndef ASM_X86__MUTEX_64_H
10#define _ASM_MUTEX_H 10#define ASM_X86__MUTEX_64_H
11 11
12/** 12/**
13 * __mutex_fastpath_lock - decrement and call function if negative 13 * __mutex_fastpath_lock - decrement and call function if negative
@@ -97,4 +97,4 @@ static inline int __mutex_fastpath_trylock(atomic_t *count,
97 return 0; 97 return 0;
98} 98}
99 99
100#endif 100#endif /* ASM_X86__MUTEX_64_H */
diff --git a/include/asm-x86/namei.h b/include/asm-x86/namei.h
deleted file mode 100644
index 415ef5d9550e..000000000000
--- a/include/asm-x86/namei.h
+++ /dev/null
@@ -1,11 +0,0 @@
1#ifndef _ASM_X86_NAMEI_H
2#define _ASM_X86_NAMEI_H
3
4/* This dummy routine maybe changed to something useful
5 * for /usr/gnemul/ emulation stuff.
6 * Look at asm-sparc/namei.h for details.
7 */
8
9#define __emul_prefix() NULL
10
11#endif /* _ASM_X86_NAMEI_H */
diff --git a/include/asm-x86/nmi.h b/include/asm-x86/nmi.h
index 1e363021e72f..d5e715f024dc 100644
--- a/include/asm-x86/nmi.h
+++ b/include/asm-x86/nmi.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_NMI_H_ 1#ifndef ASM_X86__NMI_H
2#define _ASM_X86_NMI_H_ 2#define ASM_X86__NMI_H
3 3
4#include <linux/pm.h> 4#include <linux/pm.h>
5#include <asm/irq.h> 5#include <asm/irq.h>
@@ -15,38 +15,13 @@
15 */ 15 */
16int do_nmi_callback(struct pt_regs *regs, int cpu); 16int do_nmi_callback(struct pt_regs *regs, int cpu);
17 17
18#ifdef CONFIG_PM
19
20/** Replace the PM callback routine for NMI. */
21struct pm_dev *set_nmi_pm_callback(pm_callback callback);
22
23/** Unset the PM callback routine back to the default. */
24void unset_nmi_pm_callback(struct pm_dev *dev);
25
26#else
27
28static inline struct pm_dev *set_nmi_pm_callback(pm_callback callback)
29{
30 return 0;
31}
32
33static inline void unset_nmi_pm_callback(struct pm_dev *dev)
34{
35}
36
37#endif /* CONFIG_PM */
38
39#ifdef CONFIG_X86_64 18#ifdef CONFIG_X86_64
40extern void default_do_nmi(struct pt_regs *); 19extern void default_do_nmi(struct pt_regs *);
41extern void die_nmi(char *str, struct pt_regs *regs, int do_panic);
42extern void nmi_watchdog_default(void);
43#else
44#define nmi_watchdog_default() do {} while (0)
45#endif 20#endif
46 21
22extern void die_nmi(char *str, struct pt_regs *regs, int do_panic);
47extern int check_nmi_watchdog(void); 23extern int check_nmi_watchdog(void);
48extern int nmi_watchdog_enabled; 24extern int nmi_watchdog_enabled;
49extern int unknown_nmi_panic;
50extern int avail_to_resrv_perfctr_nmi_bit(unsigned int); 25extern int avail_to_resrv_perfctr_nmi_bit(unsigned int);
51extern int avail_to_resrv_perfctr_nmi(unsigned int); 26extern int avail_to_resrv_perfctr_nmi(unsigned int);
52extern int reserve_perfctr_nmi(unsigned int); 27extern int reserve_perfctr_nmi(unsigned int);
@@ -59,15 +34,14 @@ extern void stop_apic_nmi_watchdog(void *);
59extern void disable_timer_nmi_watchdog(void); 34extern void disable_timer_nmi_watchdog(void);
60extern void enable_timer_nmi_watchdog(void); 35extern void enable_timer_nmi_watchdog(void);
61extern int nmi_watchdog_tick(struct pt_regs *regs, unsigned reason); 36extern int nmi_watchdog_tick(struct pt_regs *regs, unsigned reason);
37extern void cpu_nmi_set_wd_enabled(void);
62 38
63extern atomic_t nmi_active; 39extern atomic_t nmi_active;
64extern unsigned int nmi_watchdog; 40extern unsigned int nmi_watchdog;
65#define NMI_DISABLED -1
66#define NMI_NONE 0 41#define NMI_NONE 0
67#define NMI_IO_APIC 1 42#define NMI_IO_APIC 1
68#define NMI_LOCAL_APIC 2 43#define NMI_LOCAL_APIC 2
69#define NMI_INVALID 3 44#define NMI_INVALID 3
70#define NMI_DEFAULT NMI_DISABLED
71 45
72struct ctl_table; 46struct ctl_table;
73struct file; 47struct file;
@@ -78,6 +52,24 @@ extern int unknown_nmi_panic;
78void __trigger_all_cpu_backtrace(void); 52void __trigger_all_cpu_backtrace(void);
79#define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace() 53#define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace()
80 54
55static inline void localise_nmi_watchdog(void)
56{
57 if (nmi_watchdog == NMI_IO_APIC)
58 nmi_watchdog = NMI_LOCAL_APIC;
59}
60
61/* check if nmi_watchdog is active (ie was specified at boot) */
62static inline int nmi_watchdog_active(void)
63{
64 /*
65 * actually it should be:
66 * return (nmi_watchdog == NMI_LOCAL_APIC ||
67 * nmi_watchdog == NMI_IO_APIC)
68 * but since they are power of two we could use a
69 * cheaper way --cvg
70 */
71 return nmi_watchdog & 0x3;
72}
81#endif 73#endif
82 74
83void lapic_watchdog_stop(void); 75void lapic_watchdog_stop(void);
@@ -90,4 +82,4 @@ void enable_lapic_nmi_watchdog(void);
90void stop_nmi(void); 82void stop_nmi(void);
91void restart_nmi(void); 83void restart_nmi(void);
92 84
93#endif 85#endif /* ASM_X86__NMI_H */
diff --git a/include/asm-x86/nops.h b/include/asm-x86/nops.h
index ad0bedd10b89..ae742721ae73 100644
--- a/include/asm-x86/nops.h
+++ b/include/asm-x86/nops.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_NOPS_H 1#ifndef ASM_X86__NOPS_H
2#define _ASM_NOPS_H 1 2#define ASM_X86__NOPS_H
3 3
4/* Define nops for use with alternative() */ 4/* Define nops for use with alternative() */
5 5
@@ -115,4 +115,4 @@
115 115
116#define ASM_NOP_MAX 8 116#define ASM_NOP_MAX 8
117 117
118#endif 118#endif /* ASM_X86__NOPS_H */
diff --git a/include/asm-x86/numa_32.h b/include/asm-x86/numa_32.h
index 03d0f7a9bf02..44cb07855c5b 100644
--- a/include/asm-x86/numa_32.h
+++ b/include/asm-x86/numa_32.h
@@ -1,15 +1,11 @@
1#ifndef _ASM_X86_32_NUMA_H 1#ifndef ASM_X86__NUMA_32_H
2#define _ASM_X86_32_NUMA_H 1 2#define ASM_X86__NUMA_32_H
3 3
4extern int pxm_to_nid(int pxm); 4extern int pxm_to_nid(int pxm);
5extern void numa_remove_cpu(int cpu);
5 6
6#ifdef CONFIG_NUMA 7#ifdef CONFIG_NUMA
7extern void __init remap_numa_kva(void); 8extern void set_highmem_pages_init(void);
8extern void set_highmem_pages_init(int);
9#else
10static inline void remap_numa_kva(void)
11{
12}
13#endif 9#endif
14 10
15#endif /* _ASM_X86_32_NUMA_H */ 11#endif /* ASM_X86__NUMA_32_H */
diff --git a/include/asm-x86/numa_64.h b/include/asm-x86/numa_64.h
index 22e87c9f6a80..15c990395b02 100644
--- a/include/asm-x86/numa_64.h
+++ b/include/asm-x86/numa_64.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X8664_NUMA_H 1#ifndef ASM_X86__NUMA_64_H
2#define _ASM_X8664_NUMA_H 1 2#define ASM_X86__NUMA_64_H
3 3
4#include <linux/nodemask.h> 4#include <linux/nodemask.h>
5#include <asm/apicdef.h> 5#include <asm/apicdef.h>
@@ -14,32 +14,30 @@ extern int compute_hash_shift(struct bootnode *nodes, int numblks,
14 14
15#define ZONE_ALIGN (1UL << (MAX_ORDER+PAGE_SHIFT)) 15#define ZONE_ALIGN (1UL << (MAX_ORDER+PAGE_SHIFT))
16 16
17extern void numa_add_cpu(int cpu);
18extern void numa_init_array(void); 17extern void numa_init_array(void);
19extern int numa_off; 18extern int numa_off;
20 19
21extern void numa_set_node(int cpu, int node);
22extern void srat_reserve_add_area(int nodeid); 20extern void srat_reserve_add_area(int nodeid);
23extern int hotadd_percent; 21extern int hotadd_percent;
24 22
25extern s16 apicid_to_node[MAX_LOCAL_APIC]; 23extern s16 apicid_to_node[MAX_LOCAL_APIC];
26 24
27extern void numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn);
28extern unsigned long numa_free_all_bootmem(void); 25extern unsigned long numa_free_all_bootmem(void);
29extern void setup_node_bootmem(int nodeid, unsigned long start, 26extern void setup_node_bootmem(int nodeid, unsigned long start,
30 unsigned long end); 27 unsigned long end);
31 28
32#ifdef CONFIG_NUMA 29#ifdef CONFIG_NUMA
33extern void __init init_cpu_to_node(void); 30extern void __init init_cpu_to_node(void);
34 31extern void __cpuinit numa_set_node(int cpu, int node);
35static inline void clear_node_cpumask(int cpu) 32extern void __cpuinit numa_clear_node(int cpu);
36{ 33extern void __cpuinit numa_add_cpu(int cpu);
37 clear_bit(cpu, (unsigned long *)&node_to_cpumask_map[cpu_to_node(cpu)]); 34extern void __cpuinit numa_remove_cpu(int cpu);
38}
39
40#else 35#else
41#define init_cpu_to_node() do {} while (0) 36static inline void init_cpu_to_node(void) { }
42#define clear_node_cpumask(cpu) do {} while (0) 37static inline void numa_set_node(int cpu, int node) { }
38static inline void numa_clear_node(int cpu) { }
39static inline void numa_add_cpu(int cpu, int node) { }
40static inline void numa_remove_cpu(int cpu) { }
43#endif 41#endif
44 42
45#endif 43#endif /* ASM_X86__NUMA_64_H */
diff --git a/include/asm-x86/numaq.h b/include/asm-x86/numaq.h
index 94b86c31239a..124bf7d4b70a 100644
--- a/include/asm-x86/numaq.h
+++ b/include/asm-x86/numaq.h
@@ -23,11 +23,12 @@
23 * Send feedback to <gone@us.ibm.com> 23 * Send feedback to <gone@us.ibm.com>
24 */ 24 */
25 25
26#ifndef NUMAQ_H 26#ifndef ASM_X86__NUMAQ_H
27#define NUMAQ_H 27#define ASM_X86__NUMAQ_H
28 28
29#ifdef CONFIG_X86_NUMAQ 29#ifdef CONFIG_X86_NUMAQ
30 30
31extern int found_numaq;
31extern int get_memcfg_numaq(void); 32extern int get_memcfg_numaq(void);
32 33
33/* 34/*
@@ -156,10 +157,13 @@ struct sys_cfg_data {
156 struct eachquadmem eq[MAX_NUMNODES]; /* indexed by quad id */ 157 struct eachquadmem eq[MAX_NUMNODES]; /* indexed by quad id */
157}; 158};
158 159
159static inline unsigned long *get_zholes_size(int nid) 160void numaq_tsc_disable(void);
161
162#else
163static inline int get_memcfg_numaq(void)
160{ 164{
161 return NULL; 165 return 0;
162} 166}
163#endif /* CONFIG_X86_NUMAQ */ 167#endif /* CONFIG_X86_NUMAQ */
164#endif /* NUMAQ_H */ 168#endif /* ASM_X86__NUMAQ_H */
165 169
diff --git a/include/asm-x86/mach-numaq/mach_apic.h b/include/asm-x86/numaq/apic.h
index 75a56e5afbe7..a8344ba6ea15 100644
--- a/include/asm-x86/mach-numaq/mach_apic.h
+++ b/include/asm-x86/numaq/apic.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_MACH_APIC_H 1#ifndef __ASM_NUMAQ_APIC_H
2#define __ASM_MACH_APIC_H 2#define __ASM_NUMAQ_APIC_H
3 3
4#include <asm/io.h> 4#include <asm/io.h>
5#include <linux/mmzone.h> 5#include <linux/mmzone.h>
@@ -20,8 +20,14 @@ static inline cpumask_t target_cpus(void)
20#define INT_DELIVERY_MODE dest_LowestPrio 20#define INT_DELIVERY_MODE dest_LowestPrio
21#define INT_DEST_MODE 0 /* physical delivery on LOCAL quad */ 21#define INT_DEST_MODE 0 /* physical delivery on LOCAL quad */
22 22
23#define check_apicid_used(bitmap, apicid) physid_isset(apicid, bitmap) 23static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
24#define check_apicid_present(bit) physid_isset(bit, phys_cpu_present_map) 24{
25 return physid_isset(apicid, bitmap);
26}
27static inline unsigned long check_apicid_present(int bit)
28{
29 return physid_isset(bit, phys_cpu_present_map);
30}
25#define apicid_cluster(apicid) (apicid & 0xF0) 31#define apicid_cluster(apicid) (apicid & 0xF0)
26 32
27static inline int apic_id_registered(void) 33static inline int apic_id_registered(void)
@@ -77,11 +83,6 @@ static inline int cpu_present_to_apicid(int mps_cpu)
77 return BAD_APICID; 83 return BAD_APICID;
78} 84}
79 85
80static inline int generate_logical_apicid(int quad, int phys_apicid)
81{
82 return (quad << 4) + (phys_apicid ? phys_apicid << 1 : 1);
83}
84
85static inline int apicid_to_node(int logical_apicid) 86static inline int apicid_to_node(int logical_apicid)
86{ 87{
87 return logical_apicid >> 4; 88 return logical_apicid >> 4;
@@ -95,30 +96,6 @@ static inline physid_mask_t apicid_to_cpu_present(int logical_apicid)
95 return physid_mask_of_physid(cpu + 4*node); 96 return physid_mask_of_physid(cpu + 4*node);
96} 97}
97 98
98struct mpc_config_translation {
99 unsigned char mpc_type;
100 unsigned char trans_len;
101 unsigned char trans_type;
102 unsigned char trans_quad;
103 unsigned char trans_global;
104 unsigned char trans_local;
105 unsigned short trans_reserved;
106};
107
108static inline int mpc_apic_id(struct mpc_config_processor *m,
109 struct mpc_config_translation *translation_record)
110{
111 int quad = translation_record->trans_quad;
112 int logical_apicid = generate_logical_apicid(quad, m->mpc_apicid);
113
114 printk("Processor #%d %u:%u APIC version %d (quad %d, apic %d)\n",
115 m->mpc_apicid,
116 (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8,
117 (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4,
118 m->mpc_apicver, quad, logical_apicid);
119 return logical_apicid;
120}
121
122extern void *xquad_portio; 99extern void *xquad_portio;
123 100
124static inline void setup_portio_remap(void) 101static inline void setup_portio_remap(void)
@@ -158,4 +135,4 @@ static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
158 return cpuid_apic >> index_msb; 135 return cpuid_apic >> index_msb;
159} 136}
160 137
161#endif /* __ASM_MACH_APIC_H */ 138#endif /* __ASM_NUMAQ_APIC_H */
diff --git a/include/asm-x86/mach-numaq/mach_apicdef.h b/include/asm-x86/numaq/apicdef.h
index bf439d0690f5..e012a46cc22a 100644
--- a/include/asm-x86/mach-numaq/mach_apicdef.h
+++ b/include/asm-x86/numaq/apicdef.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_MACH_APICDEF_H 1#ifndef __ASM_NUMAQ_APICDEF_H
2#define __ASM_MACH_APICDEF_H 2#define __ASM_NUMAQ_APICDEF_H
3 3
4 4
5#define APIC_ID_MASK (0xF<<24) 5#define APIC_ID_MASK (0xF<<24)
diff --git a/include/asm-x86/mach-numaq/mach_ipi.h b/include/asm-x86/numaq/ipi.h
index c6044488e9e6..935588d286cf 100644
--- a/include/asm-x86/mach-numaq/mach_ipi.h
+++ b/include/asm-x86/numaq/ipi.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_MACH_IPI_H 1#ifndef __ASM_NUMAQ_IPI_H
2#define __ASM_MACH_IPI_H 2#define __ASM_NUMAQ_IPI_H
3 3
4void send_IPI_mask_sequence(cpumask_t, int vector); 4void send_IPI_mask_sequence(cpumask_t, int vector);
5 5
@@ -22,4 +22,4 @@ static inline void send_IPI_all(int vector)
22 send_IPI_mask(cpu_online_map, vector); 22 send_IPI_mask(cpu_online_map, vector);
23} 23}
24 24
25#endif /* __ASM_MACH_IPI_H */ 25#endif /* __ASM_NUMAQ_IPI_H */
diff --git a/include/asm-x86/numaq/mpparse.h b/include/asm-x86/numaq/mpparse.h
new file mode 100644
index 000000000000..252292e077b6
--- /dev/null
+++ b/include/asm-x86/numaq/mpparse.h
@@ -0,0 +1,7 @@
1#ifndef __ASM_NUMAQ_MPPARSE_H
2#define __ASM_NUMAQ_MPPARSE_H
3
4extern void numaq_mps_oem_check(struct mp_config_table *mpc, char *oem,
5 char *productid);
6
7#endif /* __ASM_NUMAQ_MPPARSE_H */
diff --git a/include/asm-x86/mach-numaq/mach_wakecpu.h b/include/asm-x86/numaq/wakecpu.h
index 00530041a991..c577bda5b1c5 100644
--- a/include/asm-x86/mach-numaq/mach_wakecpu.h
+++ b/include/asm-x86/numaq/wakecpu.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_MACH_WAKECPU_H 1#ifndef __ASM_NUMAQ_WAKECPU_H
2#define __ASM_MACH_WAKECPU_H 2#define __ASM_NUMAQ_WAKECPU_H
3 3
4/* This file copes with machines that wakeup secondary CPUs by NMIs */ 4/* This file copes with machines that wakeup secondary CPUs by NMIs */
5 5
@@ -40,4 +40,4 @@ static inline void restore_NMI_vector(unsigned short *high, unsigned short *low)
40 40
41#define inquire_remote_apic(apicid) {} 41#define inquire_remote_apic(apicid) {}
42 42
43#endif /* __ASM_MACH_WAKECPU_H */ 43#endif /* __ASM_NUMAQ_WAKECPU_H */
diff --git a/include/asm-x86/olpc.h b/include/asm-x86/olpc.h
index 97d47133486f..d7328b1a05c1 100644
--- a/include/asm-x86/olpc.h
+++ b/include/asm-x86/olpc.h
@@ -1,7 +1,7 @@
1/* OLPC machine specific definitions */ 1/* OLPC machine specific definitions */
2 2
3#ifndef ASM_OLPC_H_ 3#ifndef ASM_X86__OLPC_H
4#define ASM_OLPC_H_ 4#define ASM_X86__OLPC_H
5 5
6#include <asm/geode.h> 6#include <asm/geode.h>
7 7
@@ -129,4 +129,4 @@ extern int olpc_ec_mask_unset(uint8_t bits);
129#define OLPC_GPIO_LID geode_gpio(26) 129#define OLPC_GPIO_LID geode_gpio(26)
130#define OLPC_GPIO_ECSCI geode_gpio(27) 130#define OLPC_GPIO_ECSCI geode_gpio(27)
131 131
132#endif 132#endif /* ASM_X86__OLPC_H */
diff --git a/include/asm-x86/page.h b/include/asm-x86/page.h
index dc936dddf161..c91574776751 100644
--- a/include/asm-x86/page.h
+++ b/include/asm-x86/page.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_PAGE_H 1#ifndef ASM_X86__PAGE_H
2#define _ASM_X86_PAGE_H 2#define ASM_X86__PAGE_H
3 3
4#include <linux/const.h> 4#include <linux/const.h>
5 5
@@ -18,8 +18,11 @@
18 (ie, 32-bit PAE). */ 18 (ie, 32-bit PAE). */
19#define PHYSICAL_PAGE_MASK (((signed long)PAGE_MASK) & __PHYSICAL_MASK) 19#define PHYSICAL_PAGE_MASK (((signed long)PAGE_MASK) & __PHYSICAL_MASK)
20 20
21/* PTE_MASK extracts the PFN from a (pte|pmd|pud|pgd)val_t */ 21/* PTE_PFN_MASK extracts the PFN from a (pte|pmd|pud|pgd)val_t */
22#define PTE_MASK ((pteval_t)PHYSICAL_PAGE_MASK) 22#define PTE_PFN_MASK ((pteval_t)PHYSICAL_PAGE_MASK)
23
24/* PTE_FLAGS_MASK extracts the flags from a (pte|pmd|pud|pgd)val_t */
25#define PTE_FLAGS_MASK (~PTE_PFN_MASK)
23 26
24#define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT) 27#define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT)
25#define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1)) 28#define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1))
@@ -29,8 +32,7 @@
29#define HPAGE_MASK (~(HPAGE_SIZE - 1)) 32#define HPAGE_MASK (~(HPAGE_SIZE - 1))
30#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) 33#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
31 34
32/* to align the pointer to the (next) page boundary */ 35#define HUGE_MAX_HSTATE 2
33#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
34 36
35#ifndef __ASSEMBLY__ 37#ifndef __ASSEMBLY__
36#include <linux/types.h> 38#include <linux/types.h>
@@ -51,9 +53,18 @@
51 53
52#ifndef __ASSEMBLY__ 54#ifndef __ASSEMBLY__
53 55
56typedef struct { pgdval_t pgd; } pgd_t;
57typedef struct { pgprotval_t pgprot; } pgprot_t;
58
54extern int page_is_ram(unsigned long pagenr); 59extern int page_is_ram(unsigned long pagenr);
60extern int pagerange_is_ram(unsigned long start, unsigned long end);
55extern int devmem_is_allowed(unsigned long pagenr); 61extern int devmem_is_allowed(unsigned long pagenr);
62extern void map_devmem(unsigned long pfn, unsigned long size,
63 pgprot_t vma_prot);
64extern void unmap_devmem(unsigned long pfn, unsigned long size,
65 pgprot_t vma_prot);
56 66
67extern unsigned long max_low_pfn_mapped;
57extern unsigned long max_pfn_mapped; 68extern unsigned long max_pfn_mapped;
58 69
59struct page; 70struct page;
@@ -74,9 +85,6 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
74 alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr) 85 alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
75#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE 86#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
76 87
77typedef struct { pgdval_t pgd; } pgd_t;
78typedef struct { pgprotval_t pgprot; } pgprot_t;
79
80static inline pgd_t native_make_pgd(pgdval_t val) 88static inline pgd_t native_make_pgd(pgdval_t val)
81{ 89{
82 return (pgd_t) { val }; 90 return (pgd_t) { val };
@@ -139,6 +147,11 @@ static inline pteval_t native_pte_val(pte_t pte)
139 return pte.pte; 147 return pte.pte;
140} 148}
141 149
150static inline pteval_t native_pte_flags(pte_t pte)
151{
152 return native_pte_val(pte) & PTE_FLAGS_MASK;
153}
154
142#define pgprot_val(x) ((x).pgprot) 155#define pgprot_val(x) ((x).pgprot)
143#define __pgprot(x) ((pgprot_t) { (x) } ) 156#define __pgprot(x) ((pgprot_t) { (x) } )
144 157
@@ -160,6 +173,7 @@ static inline pteval_t native_pte_val(pte_t pte)
160#endif 173#endif
161 174
162#define pte_val(x) native_pte_val(x) 175#define pte_val(x) native_pte_val(x)
176#define pte_flags(x) native_pte_flags(x)
163#define __pte(x) native_make_pte(x) 177#define __pte(x) native_make_pte(x)
164 178
165#endif /* CONFIG_PARAVIRT */ 179#endif /* CONFIG_PARAVIRT */
@@ -186,4 +200,4 @@ static inline pteval_t native_pte_val(pte_t pte)
186#define __HAVE_ARCH_GATE_AREA 1 200#define __HAVE_ARCH_GATE_AREA 1
187 201
188#endif /* __KERNEL__ */ 202#endif /* __KERNEL__ */
189#endif /* _ASM_X86_PAGE_H */ 203#endif /* ASM_X86__PAGE_H */
diff --git a/include/asm-x86/page_32.h b/include/asm-x86/page_32.h
index 71a2e424e584..9c5a737a9af9 100644
--- a/include/asm-x86/page_32.h
+++ b/include/asm-x86/page_32.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_PAGE_32_H 1#ifndef ASM_X86__PAGE_32_H
2#define _ASM_X86_PAGE_32_H 2#define ASM_X86__PAGE_32_H
3 3
4/* 4/*
5 * This handles the memory map. 5 * This handles the memory map.
@@ -13,8 +13,17 @@
13 */ 13 */
14#define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL) 14#define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
15 15
16#ifdef CONFIG_4KSTACKS
17#define THREAD_ORDER 0
18#else
19#define THREAD_ORDER 1
20#endif
21#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
22
23
16#ifdef CONFIG_X86_PAE 24#ifdef CONFIG_X86_PAE
17#define __PHYSICAL_MASK_SHIFT 36 25/* 44=32+12, the limit we can fit into an unsigned long pfn */
26#define __PHYSICAL_MASK_SHIFT 44
18#define __VIRTUAL_MASK_SHIFT 32 27#define __VIRTUAL_MASK_SHIFT 32
19#define PAGETABLE_LEVELS 3 28#define PAGETABLE_LEVELS 3
20 29
@@ -85,8 +94,13 @@ extern int nx_enabled;
85extern unsigned int __VMALLOC_RESERVE; 94extern unsigned int __VMALLOC_RESERVE;
86extern int sysctl_legacy_va_layout; 95extern int sysctl_legacy_va_layout;
87 96
88#define VMALLOC_RESERVE ((unsigned long)__VMALLOC_RESERVE) 97extern void find_low_pfn_range(void);
89#define MAXMEM (-__PAGE_OFFSET - __VMALLOC_RESERVE) 98extern unsigned long init_memory_mapping(unsigned long start,
99 unsigned long end);
100extern void initmem_init(unsigned long, unsigned long);
101extern void free_initmem(void);
102extern void setup_bootmem_allocator(void);
103
90 104
91#ifdef CONFIG_X86_USE_3DNOW 105#ifdef CONFIG_X86_USE_3DNOW
92#include <asm/mmx.h> 106#include <asm/mmx.h>
@@ -115,4 +129,4 @@ static inline void copy_page(void *to, void *from)
115#endif /* CONFIG_X86_3DNOW */ 129#endif /* CONFIG_X86_3DNOW */
116#endif /* !__ASSEMBLY__ */ 130#endif /* !__ASSEMBLY__ */
117 131
118#endif /* _ASM_X86_PAGE_32_H */ 132#endif /* ASM_X86__PAGE_32_H */
diff --git a/include/asm-x86/page_64.h b/include/asm-x86/page_64.h
index 6ea72859c491..5e64acfed0a4 100644
--- a/include/asm-x86/page_64.h
+++ b/include/asm-x86/page_64.h
@@ -1,5 +1,5 @@
1#ifndef _X86_64_PAGE_H 1#ifndef ASM_X86__PAGE_64_H
2#define _X86_64_PAGE_H 2#define ASM_X86__PAGE_64_H
3 3
4#define PAGETABLE_LEVELS 4 4#define PAGETABLE_LEVELS 4
5 5
@@ -26,7 +26,13 @@
26#define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT) 26#define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT)
27#define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1)) 27#define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1))
28 28
29#define __PAGE_OFFSET _AC(0xffff810000000000, UL) 29/*
30 * Set __PAGE_OFFSET to the most negative possible address +
31 * PGDIR_SIZE*16 (pgd slot 272). The gap is to allow a space for a
32 * hypervisor to fit. Choosing 16 slots here is arbitrary, but it's
33 * what Xen requires.
34 */
35#define __PAGE_OFFSET _AC(0xffff880000000000, UL)
30 36
31#define __PHYSICAL_START CONFIG_PHYSICAL_START 37#define __PHYSICAL_START CONFIG_PHYSICAL_START
32#define __KERNEL_ALIGN 0x200000 38#define __KERNEL_ALIGN 0x200000
@@ -58,7 +64,8 @@
58void clear_page(void *page); 64void clear_page(void *page);
59void copy_page(void *to, void *from); 65void copy_page(void *to, void *from);
60 66
61extern unsigned long end_pfn; 67/* duplicated to the one in bootmem.h */
68extern unsigned long max_pfn;
62extern unsigned long phys_base; 69extern unsigned long phys_base;
63 70
64extern unsigned long __phys_addr(unsigned long); 71extern unsigned long __phys_addr(unsigned long);
@@ -83,11 +90,17 @@ typedef struct { pteval_t pte; } pte_t;
83extern unsigned long init_memory_mapping(unsigned long start, 90extern unsigned long init_memory_mapping(unsigned long start,
84 unsigned long end); 91 unsigned long end);
85 92
93extern void initmem_init(unsigned long start_pfn, unsigned long end_pfn);
94extern void free_initmem(void);
95
96extern void init_extra_mapping_uc(unsigned long phys, unsigned long size);
97extern void init_extra_mapping_wb(unsigned long phys, unsigned long size);
98
86#endif /* !__ASSEMBLY__ */ 99#endif /* !__ASSEMBLY__ */
87 100
88#ifdef CONFIG_FLATMEM 101#ifdef CONFIG_FLATMEM
89#define pfn_valid(pfn) ((pfn) < end_pfn) 102#define pfn_valid(pfn) ((pfn) < max_pfn)
90#endif 103#endif
91 104
92 105
93#endif /* _X86_64_PAGE_H */ 106#endif /* ASM_X86__PAGE_64_H */
diff --git a/include/asm-x86/param.h b/include/asm-x86/param.h
index 6f0d0422f4ca..0009cfb11a5f 100644
--- a/include/asm-x86/param.h
+++ b/include/asm-x86/param.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_PARAM_H 1#ifndef ASM_X86__PARAM_H
2#define _ASM_X86_PARAM_H 2#define ASM_X86__PARAM_H
3 3
4#ifdef __KERNEL__ 4#ifdef __KERNEL__
5# define HZ CONFIG_HZ /* Internal kernel timer frequency */ 5# define HZ CONFIG_HZ /* Internal kernel timer frequency */
@@ -19,4 +19,4 @@
19 19
20#define MAXHOSTNAMELEN 64 /* max length of hostname */ 20#define MAXHOSTNAMELEN 64 /* max length of hostname */
21 21
22#endif /* _ASM_X86_PARAM_H */ 22#endif /* ASM_X86__PARAM_H */
diff --git a/include/asm-x86/paravirt.h b/include/asm-x86/paravirt.h
index 0f13b945e240..8d6ae2f760d0 100644
--- a/include/asm-x86/paravirt.h
+++ b/include/asm-x86/paravirt.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_PARAVIRT_H 1#ifndef ASM_X86__PARAVIRT_H
2#define __ASM_PARAVIRT_H 2#define ASM_X86__PARAVIRT_H
3/* Various instructions on x86 need to be replaced for 3/* Various instructions on x86 need to be replaced for
4 * para-virtualization: those hooks are defined here. */ 4 * para-virtualization: those hooks are defined here. */
5 5
@@ -84,7 +84,7 @@ struct pv_time_ops {
84 int (*set_wallclock)(unsigned long); 84 int (*set_wallclock)(unsigned long);
85 85
86 unsigned long long (*sched_clock)(void); 86 unsigned long long (*sched_clock)(void);
87 unsigned long (*get_cpu_khz)(void); 87 unsigned long (*get_tsc_khz)(void);
88}; 88};
89 89
90struct pv_cpu_ops { 90struct pv_cpu_ops {
@@ -115,12 +115,18 @@ struct pv_cpu_ops {
115 void (*set_ldt)(const void *desc, unsigned entries); 115 void (*set_ldt)(const void *desc, unsigned entries);
116 unsigned long (*store_tr)(void); 116 unsigned long (*store_tr)(void);
117 void (*load_tls)(struct thread_struct *t, unsigned int cpu); 117 void (*load_tls)(struct thread_struct *t, unsigned int cpu);
118#ifdef CONFIG_X86_64
119 void (*load_gs_index)(unsigned int idx);
120#endif
118 void (*write_ldt_entry)(struct desc_struct *ldt, int entrynum, 121 void (*write_ldt_entry)(struct desc_struct *ldt, int entrynum,
119 const void *desc); 122 const void *desc);
120 void (*write_gdt_entry)(struct desc_struct *, 123 void (*write_gdt_entry)(struct desc_struct *,
121 int entrynum, const void *desc, int size); 124 int entrynum, const void *desc, int size);
122 void (*write_idt_entry)(gate_desc *, 125 void (*write_idt_entry)(gate_desc *,
123 int entrynum, const gate_desc *gate); 126 int entrynum, const gate_desc *gate);
127 void (*alloc_ldt)(struct desc_struct *ldt, unsigned entries);
128 void (*free_ldt)(struct desc_struct *ldt, unsigned entries);
129
124 void (*load_sp0)(struct tss_struct *tss, struct thread_struct *t); 130 void (*load_sp0)(struct tss_struct *tss, struct thread_struct *t);
125 131
126 void (*set_iopl_mask)(unsigned mask); 132 void (*set_iopl_mask)(unsigned mask);
@@ -134,6 +140,7 @@ struct pv_cpu_ops {
134 140
135 /* MSR, PMC and TSR operations. 141 /* MSR, PMC and TSR operations.
136 err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */ 142 err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */
143 u64 (*read_msr_amd)(unsigned int msr, int *err);
137 u64 (*read_msr)(unsigned int msr, int *err); 144 u64 (*read_msr)(unsigned int msr, int *err);
138 int (*write_msr)(unsigned int msr, unsigned low, unsigned high); 145 int (*write_msr)(unsigned int msr, unsigned low, unsigned high);
139 146
@@ -141,8 +148,32 @@ struct pv_cpu_ops {
141 u64 (*read_pmc)(int counter); 148 u64 (*read_pmc)(int counter);
142 unsigned long long (*read_tscp)(unsigned int *aux); 149 unsigned long long (*read_tscp)(unsigned int *aux);
143 150
144 /* These two are jmp to, not actually called. */ 151 /*
145 void (*irq_enable_syscall_ret)(void); 152 * Atomically enable interrupts and return to userspace. This
153 * is only ever used to return to 32-bit processes; in a
154 * 64-bit kernel, it's used for 32-on-64 compat processes, but
155 * never native 64-bit processes. (Jump, not call.)
156 */
157 void (*irq_enable_sysexit)(void);
158
159 /*
160 * Switch to usermode gs and return to 64-bit usermode using
161 * sysret. Only used in 64-bit kernels to return to 64-bit
162 * processes. Usermode register state, including %rsp, must
163 * already be restored.
164 */
165 void (*usergs_sysret64)(void);
166
167 /*
168 * Switch to usermode gs and return to 32-bit usermode using
169 * sysret. Used to return to 32-on-64 compat processes.
170 * Other usermode register state, including %esp, must already
171 * be restored.
172 */
173 void (*usergs_sysret32)(void);
174
175 /* Normal iret. Jump to this with the standard iret stack
176 frame set up. */
146 void (*iret)(void); 177 void (*iret)(void);
147 178
148 void (*swapgs)(void); 179 void (*swapgs)(void);
@@ -165,17 +196,14 @@ struct pv_irq_ops {
165 void (*irq_enable)(void); 196 void (*irq_enable)(void);
166 void (*safe_halt)(void); 197 void (*safe_halt)(void);
167 void (*halt)(void); 198 void (*halt)(void);
199
200#ifdef CONFIG_X86_64
201 void (*adjust_exception_frame)(void);
202#endif
168}; 203};
169 204
170struct pv_apic_ops { 205struct pv_apic_ops {
171#ifdef CONFIG_X86_LOCAL_APIC 206#ifdef CONFIG_X86_LOCAL_APIC
172 /*
173 * Direct APIC operations, principally for VMI. Ideally
174 * these shouldn't be in this interface.
175 */
176 void (*apic_write)(unsigned long reg, u32 v);
177 void (*apic_write_atomic)(unsigned long reg, u32 v);
178 u32 (*apic_read)(unsigned long reg);
179 void (*setup_boot_clock)(void); 207 void (*setup_boot_clock)(void);
180 void (*setup_secondary_clock)(void); 208 void (*setup_secondary_clock)(void);
181 209
@@ -219,14 +247,21 @@ struct pv_mmu_ops {
219 void (*flush_tlb_others)(const cpumask_t *cpus, struct mm_struct *mm, 247 void (*flush_tlb_others)(const cpumask_t *cpus, struct mm_struct *mm,
220 unsigned long va); 248 unsigned long va);
221 249
222 /* Hooks for allocating/releasing pagetable pages */ 250 /* Hooks for allocating and freeing a pagetable top-level */
223 void (*alloc_pte)(struct mm_struct *mm, u32 pfn); 251 int (*pgd_alloc)(struct mm_struct *mm);
224 void (*alloc_pmd)(struct mm_struct *mm, u32 pfn); 252 void (*pgd_free)(struct mm_struct *mm, pgd_t *pgd);
225 void (*alloc_pmd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count); 253
226 void (*alloc_pud)(struct mm_struct *mm, u32 pfn); 254 /*
227 void (*release_pte)(u32 pfn); 255 * Hooks for allocating/releasing pagetable pages when they're
228 void (*release_pmd)(u32 pfn); 256 * attached to a pagetable
229 void (*release_pud)(u32 pfn); 257 */
258 void (*alloc_pte)(struct mm_struct *mm, unsigned long pfn);
259 void (*alloc_pmd)(struct mm_struct *mm, unsigned long pfn);
260 void (*alloc_pmd_clone)(unsigned long pfn, unsigned long clonepfn, unsigned long start, unsigned long count);
261 void (*alloc_pud)(struct mm_struct *mm, unsigned long pfn);
262 void (*release_pte)(unsigned long pfn);
263 void (*release_pmd)(unsigned long pfn);
264 void (*release_pud)(unsigned long pfn);
230 265
231 /* Pagetable manipulation functions */ 266 /* Pagetable manipulation functions */
232 void (*set_pte)(pte_t *ptep, pte_t pteval); 267 void (*set_pte)(pte_t *ptep, pte_t pteval);
@@ -238,7 +273,13 @@ struct pv_mmu_ops {
238 void (*pte_update_defer)(struct mm_struct *mm, 273 void (*pte_update_defer)(struct mm_struct *mm,
239 unsigned long addr, pte_t *ptep); 274 unsigned long addr, pte_t *ptep);
240 275
276 pte_t (*ptep_modify_prot_start)(struct mm_struct *mm, unsigned long addr,
277 pte_t *ptep);
278 void (*ptep_modify_prot_commit)(struct mm_struct *mm, unsigned long addr,
279 pte_t *ptep, pte_t pte);
280
241 pteval_t (*pte_val)(pte_t); 281 pteval_t (*pte_val)(pte_t);
282 pteval_t (*pte_flags)(pte_t);
242 pte_t (*make_pte)(pteval_t pte); 283 pte_t (*make_pte)(pteval_t pte);
243 284
244 pgdval_t (*pgd_val)(pgd_t); 285 pgdval_t (*pgd_val)(pgd_t);
@@ -273,6 +314,23 @@ struct pv_mmu_ops {
273#endif 314#endif
274 315
275 struct pv_lazy_ops lazy_mode; 316 struct pv_lazy_ops lazy_mode;
317
318 /* dom0 ops */
319
320 /* Sometimes the physical address is a pfn, and sometimes its
321 an mfn. We can tell which is which from the index. */
322 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
323 unsigned long phys, pgprot_t flags);
324};
325
326struct raw_spinlock;
327struct pv_lock_ops {
328 int (*spin_is_locked)(struct raw_spinlock *lock);
329 int (*spin_is_contended)(struct raw_spinlock *lock);
330 void (*spin_lock)(struct raw_spinlock *lock);
331 void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags);
332 int (*spin_trylock)(struct raw_spinlock *lock);
333 void (*spin_unlock)(struct raw_spinlock *lock);
276}; 334};
277 335
278/* This contains all the paravirt structures: we get a convenient 336/* This contains all the paravirt structures: we get a convenient
@@ -285,6 +343,7 @@ struct paravirt_patch_template {
285 struct pv_irq_ops pv_irq_ops; 343 struct pv_irq_ops pv_irq_ops;
286 struct pv_apic_ops pv_apic_ops; 344 struct pv_apic_ops pv_apic_ops;
287 struct pv_mmu_ops pv_mmu_ops; 345 struct pv_mmu_ops pv_mmu_ops;
346 struct pv_lock_ops pv_lock_ops;
288}; 347};
289 348
290extern struct pv_info pv_info; 349extern struct pv_info pv_info;
@@ -294,6 +353,7 @@ extern struct pv_cpu_ops pv_cpu_ops;
294extern struct pv_irq_ops pv_irq_ops; 353extern struct pv_irq_ops pv_irq_ops;
295extern struct pv_apic_ops pv_apic_ops; 354extern struct pv_apic_ops pv_apic_ops;
296extern struct pv_mmu_ops pv_mmu_ops; 355extern struct pv_mmu_ops pv_mmu_ops;
356extern struct pv_lock_ops pv_lock_ops;
297 357
298#define PARAVIRT_PATCH(x) \ 358#define PARAVIRT_PATCH(x) \
299 (offsetof(struct paravirt_patch_template, x) / sizeof(void *)) 359 (offsetof(struct paravirt_patch_template, x) / sizeof(void *))
@@ -439,10 +499,17 @@ int paravirt_disable_iospace(void);
439#define VEXTRA_CLOBBERS , "rax", "r8", "r9", "r10", "r11" 499#define VEXTRA_CLOBBERS , "rax", "r8", "r9", "r10", "r11"
440#endif 500#endif
441 501
502#ifdef CONFIG_PARAVIRT_DEBUG
503#define PVOP_TEST_NULL(op) BUG_ON(op == NULL)
504#else
505#define PVOP_TEST_NULL(op) ((void)op)
506#endif
507
442#define __PVOP_CALL(rettype, op, pre, post, ...) \ 508#define __PVOP_CALL(rettype, op, pre, post, ...) \
443 ({ \ 509 ({ \
444 rettype __ret; \ 510 rettype __ret; \
445 PVOP_CALL_ARGS; \ 511 PVOP_CALL_ARGS; \
512 PVOP_TEST_NULL(op); \
446 /* This is 32-bit specific, but is okay in 64-bit */ \ 513 /* This is 32-bit specific, but is okay in 64-bit */ \
447 /* since this condition will never hold */ \ 514 /* since this condition will never hold */ \
448 if (sizeof(rettype) > sizeof(unsigned long)) { \ 515 if (sizeof(rettype) > sizeof(unsigned long)) { \
@@ -471,6 +538,7 @@ int paravirt_disable_iospace(void);
471#define __PVOP_VCALL(op, pre, post, ...) \ 538#define __PVOP_VCALL(op, pre, post, ...) \
472 ({ \ 539 ({ \
473 PVOP_VCALL_ARGS; \ 540 PVOP_VCALL_ARGS; \
541 PVOP_TEST_NULL(op); \
474 asm volatile(pre \ 542 asm volatile(pre \
475 paravirt_alt(PARAVIRT_CALL) \ 543 paravirt_alt(PARAVIRT_CALL) \
476 post \ 544 post \
@@ -657,6 +725,10 @@ static inline u64 paravirt_read_msr(unsigned msr, int *err)
657{ 725{
658 return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err); 726 return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err);
659} 727}
728static inline u64 paravirt_read_msr_amd(unsigned msr, int *err)
729{
730 return PVOP_CALL2(u64, pv_cpu_ops.read_msr_amd, msr, err);
731}
660static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high) 732static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
661{ 733{
662 return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high); 734 return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high);
@@ -702,6 +774,13 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
702 *p = paravirt_read_msr(msr, &err); 774 *p = paravirt_read_msr(msr, &err);
703 return err; 775 return err;
704} 776}
777static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
778{
779 int err;
780
781 *p = paravirt_read_msr_amd(msr, &err);
782 return err;
783}
705 784
706static inline u64 paravirt_read_tsc(void) 785static inline u64 paravirt_read_tsc(void)
707{ 786{
@@ -720,7 +799,7 @@ static inline unsigned long long paravirt_sched_clock(void)
720{ 799{
721 return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock); 800 return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock);
722} 801}
723#define calculate_cpu_khz() (pv_time_ops.get_cpu_khz()) 802#define calibrate_tsc() (pv_time_ops.get_tsc_khz())
724 803
725static inline unsigned long long paravirt_read_pmc(int counter) 804static inline unsigned long long paravirt_read_pmc(int counter)
726{ 805{
@@ -755,6 +834,16 @@ do { \
755 (aux) = __aux; \ 834 (aux) = __aux; \
756} while (0) 835} while (0)
757 836
837static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
838{
839 PVOP_VCALL2(pv_cpu_ops.alloc_ldt, ldt, entries);
840}
841
842static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
843{
844 PVOP_VCALL2(pv_cpu_ops.free_ldt, ldt, entries);
845}
846
758static inline void load_TR_desc(void) 847static inline void load_TR_desc(void)
759{ 848{
760 PVOP_VCALL0(pv_cpu_ops.load_tr_desc); 849 PVOP_VCALL0(pv_cpu_ops.load_tr_desc);
@@ -789,6 +878,13 @@ static inline void load_TLS(struct thread_struct *t, unsigned cpu)
789 PVOP_VCALL2(pv_cpu_ops.load_tls, t, cpu); 878 PVOP_VCALL2(pv_cpu_ops.load_tls, t, cpu);
790} 879}
791 880
881#ifdef CONFIG_X86_64
882static inline void load_gs_index(unsigned int gs)
883{
884 PVOP_VCALL1(pv_cpu_ops.load_gs_index, gs);
885}
886#endif
887
792static inline void write_ldt_entry(struct desc_struct *dt, int entry, 888static inline void write_ldt_entry(struct desc_struct *dt, int entry,
793 const void *desc) 889 const void *desc)
794{ 890{
@@ -822,24 +918,6 @@ static inline void slow_down_io(void)
822} 918}
823 919
824#ifdef CONFIG_X86_LOCAL_APIC 920#ifdef CONFIG_X86_LOCAL_APIC
825/*
826 * Basic functions accessing APICs.
827 */
828static inline void apic_write(unsigned long reg, u32 v)
829{
830 PVOP_VCALL2(pv_apic_ops.apic_write, reg, v);
831}
832
833static inline void apic_write_atomic(unsigned long reg, u32 v)
834{
835 PVOP_VCALL2(pv_apic_ops.apic_write_atomic, reg, v);
836}
837
838static inline u32 apic_read(unsigned long reg)
839{
840 return PVOP_CALL1(unsigned long, pv_apic_ops.apic_read, reg);
841}
842
843static inline void setup_boot_clock(void) 921static inline void setup_boot_clock(void)
844{ 922{
845 PVOP_VCALL0(pv_apic_ops.setup_boot_clock); 923 PVOP_VCALL0(pv_apic_ops.setup_boot_clock);
@@ -912,35 +990,45 @@ static inline void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
912 PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, &cpumask, mm, va); 990 PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, &cpumask, mm, va);
913} 991}
914 992
915static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned pfn) 993static inline int paravirt_pgd_alloc(struct mm_struct *mm)
994{
995 return PVOP_CALL1(int, pv_mmu_ops.pgd_alloc, mm);
996}
997
998static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
999{
1000 PVOP_VCALL2(pv_mmu_ops.pgd_free, mm, pgd);
1001}
1002
1003static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
916{ 1004{
917 PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn); 1005 PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn);
918} 1006}
919static inline void paravirt_release_pte(unsigned pfn) 1007static inline void paravirt_release_pte(unsigned long pfn)
920{ 1008{
921 PVOP_VCALL1(pv_mmu_ops.release_pte, pfn); 1009 PVOP_VCALL1(pv_mmu_ops.release_pte, pfn);
922} 1010}
923 1011
924static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned pfn) 1012static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
925{ 1013{
926 PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn); 1014 PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn);
927} 1015}
928 1016
929static inline void paravirt_alloc_pmd_clone(unsigned pfn, unsigned clonepfn, 1017static inline void paravirt_alloc_pmd_clone(unsigned long pfn, unsigned long clonepfn,
930 unsigned start, unsigned count) 1018 unsigned long start, unsigned long count)
931{ 1019{
932 PVOP_VCALL4(pv_mmu_ops.alloc_pmd_clone, pfn, clonepfn, start, count); 1020 PVOP_VCALL4(pv_mmu_ops.alloc_pmd_clone, pfn, clonepfn, start, count);
933} 1021}
934static inline void paravirt_release_pmd(unsigned pfn) 1022static inline void paravirt_release_pmd(unsigned long pfn)
935{ 1023{
936 PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn); 1024 PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn);
937} 1025}
938 1026
939static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned pfn) 1027static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
940{ 1028{
941 PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn); 1029 PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn);
942} 1030}
943static inline void paravirt_release_pud(unsigned pfn) 1031static inline void paravirt_release_pud(unsigned long pfn)
944{ 1032{
945 PVOP_VCALL1(pv_mmu_ops.release_pud, pfn); 1033 PVOP_VCALL1(pv_mmu_ops.release_pud, pfn);
946} 1034}
@@ -996,6 +1084,23 @@ static inline pteval_t pte_val(pte_t pte)
996 return ret; 1084 return ret;
997} 1085}
998 1086
1087static inline pteval_t pte_flags(pte_t pte)
1088{
1089 pteval_t ret;
1090
1091 if (sizeof(pteval_t) > sizeof(long))
1092 ret = PVOP_CALL2(pteval_t, pv_mmu_ops.pte_flags,
1093 pte.pte, (u64)pte.pte >> 32);
1094 else
1095 ret = PVOP_CALL1(pteval_t, pv_mmu_ops.pte_flags,
1096 pte.pte);
1097
1098#ifdef CONFIG_PARAVIRT_DEBUG
1099 BUG_ON(ret & PTE_PFN_MASK);
1100#endif
1101 return ret;
1102}
1103
999static inline pgd_t __pgd(pgdval_t val) 1104static inline pgd_t __pgd(pgdval_t val)
1000{ 1105{
1001 pgdval_t ret; 1106 pgdval_t ret;
@@ -1024,6 +1129,29 @@ static inline pgdval_t pgd_val(pgd_t pgd)
1024 return ret; 1129 return ret;
1025} 1130}
1026 1131
1132#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1133static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr,
1134 pte_t *ptep)
1135{
1136 pteval_t ret;
1137
1138 ret = PVOP_CALL3(pteval_t, pv_mmu_ops.ptep_modify_prot_start,
1139 mm, addr, ptep);
1140
1141 return (pte_t) { .pte = ret };
1142}
1143
1144static inline void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
1145 pte_t *ptep, pte_t pte)
1146{
1147 if (sizeof(pteval_t) > sizeof(long))
1148 /* 5 arg words */
1149 pv_mmu_ops.ptep_modify_prot_commit(mm, addr, ptep, pte);
1150 else
1151 PVOP_VCALL4(pv_mmu_ops.ptep_modify_prot_commit,
1152 mm, addr, ptep, pte.pte);
1153}
1154
1027static inline void set_pte(pte_t *ptep, pte_t pte) 1155static inline void set_pte(pte_t *ptep, pte_t pte)
1028{ 1156{
1029 if (sizeof(pteval_t) > sizeof(long)) 1157 if (sizeof(pteval_t) > sizeof(long))
@@ -1252,9 +1380,52 @@ static inline void arch_flush_lazy_mmu_mode(void)
1252 } 1380 }
1253} 1381}
1254 1382
1383static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
1384 unsigned long phys, pgprot_t flags)
1385{
1386 pv_mmu_ops.set_fixmap(idx, phys, flags);
1387}
1388
1255void _paravirt_nop(void); 1389void _paravirt_nop(void);
1256#define paravirt_nop ((void *)_paravirt_nop) 1390#define paravirt_nop ((void *)_paravirt_nop)
1257 1391
1392void paravirt_use_bytelocks(void);
1393
1394#ifdef CONFIG_SMP
1395
1396static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
1397{
1398 return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock);
1399}
1400
1401static inline int __raw_spin_is_contended(struct raw_spinlock *lock)
1402{
1403 return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock);
1404}
1405
1406static __always_inline void __raw_spin_lock(struct raw_spinlock *lock)
1407{
1408 PVOP_VCALL1(pv_lock_ops.spin_lock, lock);
1409}
1410
1411static __always_inline void __raw_spin_lock_flags(struct raw_spinlock *lock,
1412 unsigned long flags)
1413{
1414 PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags);
1415}
1416
1417static __always_inline int __raw_spin_trylock(struct raw_spinlock *lock)
1418{
1419 return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock);
1420}
1421
1422static __always_inline void __raw_spin_unlock(struct raw_spinlock *lock)
1423{
1424 PVOP_VCALL1(pv_lock_ops.spin_unlock, lock);
1425}
1426
1427#endif
1428
1258/* These all sit in the .parainstructions section to tell us what to patch. */ 1429/* These all sit in the .parainstructions section to tell us what to patch. */
1259struct paravirt_patch_site { 1430struct paravirt_patch_site {
1260 u8 *instr; /* original instructions */ 1431 u8 *instr; /* original instructions */
@@ -1277,8 +1448,8 @@ extern struct paravirt_patch_site __parainstructions[],
1277 * caller saved registers but the argument parameter */ 1448 * caller saved registers but the argument parameter */
1278#define PV_SAVE_REGS "pushq %%rdi;" 1449#define PV_SAVE_REGS "pushq %%rdi;"
1279#define PV_RESTORE_REGS "popq %%rdi;" 1450#define PV_RESTORE_REGS "popq %%rdi;"
1280#define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx" 1451#define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx", "rsi"
1281#define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx" 1452#define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx", "rsi"
1282#define PV_FLAGS_ARG "D" 1453#define PV_FLAGS_ARG "D"
1283#endif 1454#endif
1284 1455
@@ -1339,6 +1510,7 @@ static inline unsigned long __raw_local_irq_save(void)
1339 return f; 1510 return f;
1340} 1511}
1341 1512
1513
1342/* Make sure as little as possible of this mess escapes. */ 1514/* Make sure as little as possible of this mess escapes. */
1343#undef PARAVIRT_CALL 1515#undef PARAVIRT_CALL
1344#undef __PVOP_CALL 1516#undef __PVOP_CALL
@@ -1370,59 +1542,109 @@ static inline unsigned long __raw_local_irq_save(void)
1370 1542
1371 1543
1372#ifdef CONFIG_X86_64 1544#ifdef CONFIG_X86_64
1373#define PV_SAVE_REGS pushq %rax; pushq %rdi; pushq %rcx; pushq %rdx 1545#define PV_SAVE_REGS \
1374#define PV_RESTORE_REGS popq %rdx; popq %rcx; popq %rdi; popq %rax 1546 push %rax; \
1547 push %rcx; \
1548 push %rdx; \
1549 push %rsi; \
1550 push %rdi; \
1551 push %r8; \
1552 push %r9; \
1553 push %r10; \
1554 push %r11
1555#define PV_RESTORE_REGS \
1556 pop %r11; \
1557 pop %r10; \
1558 pop %r9; \
1559 pop %r8; \
1560 pop %rdi; \
1561 pop %rsi; \
1562 pop %rdx; \
1563 pop %rcx; \
1564 pop %rax
1375#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 8) 1565#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 8)
1376#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8) 1566#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8)
1567#define PARA_INDIRECT(addr) *addr(%rip)
1377#else 1568#else
1378#define PV_SAVE_REGS pushl %eax; pushl %edi; pushl %ecx; pushl %edx 1569#define PV_SAVE_REGS pushl %eax; pushl %edi; pushl %ecx; pushl %edx
1379#define PV_RESTORE_REGS popl %edx; popl %ecx; popl %edi; popl %eax 1570#define PV_RESTORE_REGS popl %edx; popl %ecx; popl %edi; popl %eax
1380#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4) 1571#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
1381#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4) 1572#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
1573#define PARA_INDIRECT(addr) *%cs:addr
1382#endif 1574#endif
1383 1575
1384#define INTERRUPT_RETURN \ 1576#define INTERRUPT_RETURN \
1385 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE, \ 1577 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE, \
1386 jmp *%cs:pv_cpu_ops+PV_CPU_iret) 1578 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret))
1387 1579
1388#define DISABLE_INTERRUPTS(clobbers) \ 1580#define DISABLE_INTERRUPTS(clobbers) \
1389 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \ 1581 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
1390 PV_SAVE_REGS; \ 1582 PV_SAVE_REGS; \
1391 call *%cs:pv_irq_ops+PV_IRQ_irq_disable; \ 1583 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable); \
1392 PV_RESTORE_REGS;) \ 1584 PV_RESTORE_REGS;) \
1393 1585
1394#define ENABLE_INTERRUPTS(clobbers) \ 1586#define ENABLE_INTERRUPTS(clobbers) \
1395 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \ 1587 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \
1396 PV_SAVE_REGS; \ 1588 PV_SAVE_REGS; \
1397 call *%cs:pv_irq_ops+PV_IRQ_irq_enable; \ 1589 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \
1398 PV_RESTORE_REGS;) 1590 PV_RESTORE_REGS;)
1399 1591
1400#define ENABLE_INTERRUPTS_SYSCALL_RET \ 1592#define USERGS_SYSRET32 \
1401 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_syscall_ret),\ 1593 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret32), \
1402 CLBR_NONE, \ 1594 CLBR_NONE, \
1403 jmp *%cs:pv_cpu_ops+PV_CPU_irq_enable_syscall_ret) 1595 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret32))
1404
1405 1596
1406#ifdef CONFIG_X86_32 1597#ifdef CONFIG_X86_32
1407#define GET_CR0_INTO_EAX \ 1598#define GET_CR0_INTO_EAX \
1408 push %ecx; push %edx; \ 1599 push %ecx; push %edx; \
1409 call *pv_cpu_ops+PV_CPU_read_cr0; \ 1600 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
1410 pop %edx; pop %ecx 1601 pop %edx; pop %ecx
1411#else 1602
1603#define ENABLE_INTERRUPTS_SYSEXIT \
1604 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
1605 CLBR_NONE, \
1606 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
1607
1608
1609#else /* !CONFIG_X86_32 */
1610
1611/*
1612 * If swapgs is used while the userspace stack is still current,
1613 * there's no way to call a pvop. The PV replacement *must* be
1614 * inlined, or the swapgs instruction must be trapped and emulated.
1615 */
1616#define SWAPGS_UNSAFE_STACK \
1617 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
1618 swapgs)
1619
1412#define SWAPGS \ 1620#define SWAPGS \
1413 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \ 1621 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
1414 PV_SAVE_REGS; \ 1622 PV_SAVE_REGS; \
1415 call *pv_cpu_ops+PV_CPU_swapgs; \ 1623 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs); \
1416 PV_RESTORE_REGS \ 1624 PV_RESTORE_REGS \
1417 ) 1625 )
1418 1626
1419#define GET_CR2_INTO_RCX \ 1627#define GET_CR2_INTO_RCX \
1420 call *pv_mmu_ops+PV_MMU_read_cr2; \ 1628 call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2); \
1421 movq %rax, %rcx; \ 1629 movq %rax, %rcx; \
1422 xorq %rax, %rax; 1630 xorq %rax, %rax;
1423 1631
1424#endif 1632#define PARAVIRT_ADJUST_EXCEPTION_FRAME \
1633 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_adjust_exception_frame), \
1634 CLBR_NONE, \
1635 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_adjust_exception_frame))
1636
1637#define USERGS_SYSRET64 \
1638 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64), \
1639 CLBR_NONE, \
1640 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
1641
1642#define ENABLE_INTERRUPTS_SYSEXIT32 \
1643 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
1644 CLBR_NONE, \
1645 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
1646#endif /* CONFIG_X86_32 */
1425 1647
1426#endif /* __ASSEMBLY__ */ 1648#endif /* __ASSEMBLY__ */
1427#endif /* CONFIG_PARAVIRT */ 1649#endif /* CONFIG_PARAVIRT */
1428#endif /* __ASM_PARAVIRT_H */ 1650#endif /* ASM_X86__PARAVIRT_H */
diff --git a/include/asm-x86/parport.h b/include/asm-x86/parport.h
index 3c4ffeb467e9..2e3dda4dc3d9 100644
--- a/include/asm-x86/parport.h
+++ b/include/asm-x86/parport.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_PARPORT_H 1#ifndef ASM_X86__PARPORT_H
2#define _ASM_X86_PARPORT_H 2#define ASM_X86__PARPORT_H
3 3
4static int __devinit parport_pc_find_isa_ports(int autoirq, int autodma); 4static int __devinit parport_pc_find_isa_ports(int autoirq, int autodma);
5static int __devinit parport_pc_find_nonpci_ports(int autoirq, int autodma) 5static int __devinit parport_pc_find_nonpci_ports(int autoirq, int autodma)
@@ -7,4 +7,4 @@ static int __devinit parport_pc_find_nonpci_ports(int autoirq, int autodma)
7 return parport_pc_find_isa_ports(autoirq, autodma); 7 return parport_pc_find_isa_ports(autoirq, autodma);
8} 8}
9 9
10#endif /* _ASM_X86_PARPORT_H */ 10#endif /* ASM_X86__PARPORT_H */
diff --git a/include/asm-x86/pat.h b/include/asm-x86/pat.h
index 88f60cc6a227..482c3e3f9879 100644
--- a/include/asm-x86/pat.h
+++ b/include/asm-x86/pat.h
@@ -1,14 +1,13 @@
1 1#ifndef ASM_X86__PAT_H
2#ifndef _ASM_PAT_H 2#define ASM_X86__PAT_H
3#define _ASM_PAT_H 1
4 3
5#include <linux/types.h> 4#include <linux/types.h>
6 5
7#ifdef CONFIG_X86_PAT 6#ifdef CONFIG_X86_PAT
8extern int pat_wc_enabled; 7extern int pat_enabled;
9extern void validate_pat_support(struct cpuinfo_x86 *c); 8extern void validate_pat_support(struct cpuinfo_x86 *c);
10#else 9#else
11static const int pat_wc_enabled = 0; 10static const int pat_enabled;
12static inline void validate_pat_support(struct cpuinfo_x86 *c) { } 11static inline void validate_pat_support(struct cpuinfo_x86 *c) { }
13#endif 12#endif
14 13
@@ -20,5 +19,4 @@ extern int free_memtype(u64 start, u64 end);
20 19
21extern void pat_disable(char *reason); 20extern void pat_disable(char *reason);
22 21
23#endif 22#endif /* ASM_X86__PAT_H */
24
diff --git a/include/asm-x86/pci-direct.h b/include/asm-x86/pci-direct.h
index 5b21485be573..da42be07b690 100644
--- a/include/asm-x86/pci-direct.h
+++ b/include/asm-x86/pci-direct.h
@@ -1,5 +1,5 @@
1#ifndef ASM_PCI_DIRECT_H 1#ifndef ASM_X86__PCI_DIRECT_H
2#define ASM_PCI_DIRECT_H 1 2#define ASM_X86__PCI_DIRECT_H
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5 5
@@ -11,7 +11,11 @@ extern u8 read_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset);
11extern u16 read_pci_config_16(u8 bus, u8 slot, u8 func, u8 offset); 11extern u16 read_pci_config_16(u8 bus, u8 slot, u8 func, u8 offset);
12extern void write_pci_config(u8 bus, u8 slot, u8 func, u8 offset, u32 val); 12extern void write_pci_config(u8 bus, u8 slot, u8 func, u8 offset, u32 val);
13extern void write_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset, u8 val); 13extern void write_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset, u8 val);
14extern void write_pci_config_16(u8 bus, u8 slot, u8 func, u8 offset, u16 val);
14 15
15extern int early_pci_allowed(void); 16extern int early_pci_allowed(void);
16 17
17#endif 18extern unsigned int pci_early_dump_regs;
19extern void early_dump_pci_device(u8 bus, u8 slot, u8 func);
20extern void early_dump_pci_devices(void);
21#endif /* ASM_X86__PCI_DIRECT_H */
diff --git a/include/asm-x86/pci.h b/include/asm-x86/pci.h
index 30bbde0cb34b..602583192991 100644
--- a/include/asm-x86/pci.h
+++ b/include/asm-x86/pci.h
@@ -1,5 +1,5 @@
1#ifndef __x86_PCI_H 1#ifndef ASM_X86__PCI_H
2#define __x86_PCI_H 2#define ASM_X86__PCI_H
3 3
4#include <linux/mm.h> /* for struct page */ 4#include <linux/mm.h> /* for struct page */
5#include <linux/types.h> 5#include <linux/types.h>
@@ -18,6 +18,8 @@ struct pci_sysdata {
18#endif 18#endif
19}; 19};
20 20
21extern int pci_routeirq;
22
21/* scan a bus after allocating a pci_sysdata for it */ 23/* scan a bus after allocating a pci_sysdata for it */
22extern struct pci_bus *pci_scan_bus_on_node(int busno, struct pci_ops *ops, 24extern struct pci_bus *pci_scan_bus_on_node(int busno, struct pci_ops *ops,
23 int node); 25 int node);
@@ -109,4 +111,4 @@ static inline cpumask_t __pcibus_to_cpumask(struct pci_bus *bus)
109} 111}
110#endif 112#endif
111 113
112#endif 114#endif /* ASM_X86__PCI_H */
diff --git a/include/asm-x86/pci_32.h b/include/asm-x86/pci_32.h
index 8c4c3a0368e2..3f2288207c0c 100644
--- a/include/asm-x86/pci_32.h
+++ b/include/asm-x86/pci_32.h
@@ -1,5 +1,5 @@
1#ifndef __i386_PCI_H 1#ifndef ASM_X86__PCI_32_H
2#define __i386_PCI_H 2#define ASM_X86__PCI_32_H
3 3
4 4
5#ifdef __KERNEL__ 5#ifdef __KERNEL__
@@ -18,15 +18,17 @@ struct pci_dev;
18#define PCI_DMA_BUS_IS_PHYS (1) 18#define PCI_DMA_BUS_IS_PHYS (1)
19 19
20/* pci_unmap_{page,single} is a nop so... */ 20/* pci_unmap_{page,single} is a nop so... */
21#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) 21#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME[0];
22#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) 22#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) unsigned LEN_NAME[0];
23#define pci_unmap_addr(PTR, ADDR_NAME) (0) 23#define pci_unmap_addr(PTR, ADDR_NAME) sizeof((PTR)->ADDR_NAME)
24#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0) 24#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
25#define pci_unmap_len(PTR, LEN_NAME) (0) 25 do { break; } while (pci_unmap_addr(PTR, ADDR_NAME))
26#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0) 26#define pci_unmap_len(PTR, LEN_NAME) sizeof((PTR)->LEN_NAME)
27#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
28 do { break; } while (pci_unmap_len(PTR, LEN_NAME))
27 29
28 30
29#endif /* __KERNEL__ */ 31#endif /* __KERNEL__ */
30 32
31 33
32#endif /* __i386_PCI_H */ 34#endif /* ASM_X86__PCI_32_H */
diff --git a/include/asm-x86/pci_64.h b/include/asm-x86/pci_64.h
index f330234ffa5c..f72e12d5770e 100644
--- a/include/asm-x86/pci_64.h
+++ b/include/asm-x86/pci_64.h
@@ -1,5 +1,5 @@
1#ifndef __x8664_PCI_H 1#ifndef ASM_X86__PCI_64_H
2#define __x8664_PCI_H 2#define ASM_X86__PCI_64_H
3 3
4#ifdef __KERNEL__ 4#ifdef __KERNEL__
5 5
@@ -63,4 +63,4 @@ extern void pci_iommu_alloc(void);
63 63
64#endif /* __KERNEL__ */ 64#endif /* __KERNEL__ */
65 65
66#endif /* __x8664_PCI_H */ 66#endif /* ASM_X86__PCI_64_H */
diff --git a/include/asm-x86/pda.h b/include/asm-x86/pda.h
index 101fb9e11954..80860afffbdb 100644
--- a/include/asm-x86/pda.h
+++ b/include/asm-x86/pda.h
@@ -1,5 +1,5 @@
1#ifndef X86_64_PDA_H 1#ifndef ASM_X86__PDA_H
2#define X86_64_PDA_H 2#define ASM_X86__PDA_H
3 3
4#ifndef __ASSEMBLY__ 4#ifndef __ASSEMBLY__
5#include <linux/stddef.h> 5#include <linux/stddef.h>
@@ -22,6 +22,8 @@ struct x8664_pda {
22 offset 40!!! */ 22 offset 40!!! */
23#endif 23#endif
24 char *irqstackptr; 24 char *irqstackptr;
25 short nodenumber; /* number of current node (32k max) */
26 short in_bootmem; /* pda lives in bootmem */
25 unsigned int __softirq_pending; 27 unsigned int __softirq_pending;
26 unsigned int __nmi_count; /* number of NMI on this CPUs */ 28 unsigned int __nmi_count; /* number of NMI on this CPUs */
27 short mmu_state; 29 short mmu_state;
@@ -37,8 +39,7 @@ struct x8664_pda {
37 unsigned irq_spurious_count; 39 unsigned irq_spurious_count;
38} ____cacheline_aligned_in_smp; 40} ____cacheline_aligned_in_smp;
39 41
40extern struct x8664_pda *_cpu_pda[]; 42extern struct x8664_pda **_cpu_pda;
41extern struct x8664_pda boot_cpu_pda[];
42extern void pda_init(int); 43extern void pda_init(int);
43 44
44#define cpu_pda(i) (_cpu_pda[i]) 45#define cpu_pda(i) (_cpu_pda[i])
@@ -133,4 +134,4 @@ do { \
133 134
134#define PDA_STACKOFFSET (5*8) 135#define PDA_STACKOFFSET (5*8)
135 136
136#endif 137#endif /* ASM_X86__PDA_H */
diff --git a/include/asm-x86/percpu.h b/include/asm-x86/percpu.h
index 736fc3bb8e1e..e10a1d0678cf 100644
--- a/include/asm-x86/percpu.h
+++ b/include/asm-x86/percpu.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_PERCPU_H_ 1#ifndef ASM_X86__PERCPU_H
2#define _ASM_X86_PERCPU_H_ 2#define ASM_X86__PERCPU_H
3 3
4#ifdef CONFIG_X86_64 4#ifdef CONFIG_X86_64
5#include <linux/compiler.h> 5#include <linux/compiler.h>
@@ -22,6 +22,32 @@
22 22
23DECLARE_PER_CPU(struct x8664_pda, pda); 23DECLARE_PER_CPU(struct x8664_pda, pda);
24 24
25/*
26 * These are supposed to be implemented as a single instruction which
27 * operates on the per-cpu data base segment. x86-64 doesn't have
28 * that yet, so this is a fairly inefficient workaround for the
29 * meantime. The single instruction is atomic with respect to
30 * preemption and interrupts, so we need to explicitly disable
31 * interrupts here to achieve the same effect. However, because it
32 * can be used from within interrupt-disable/enable, we can't actually
33 * disable interrupts; disabling preemption is enough.
34 */
35#define x86_read_percpu(var) \
36 ({ \
37 typeof(per_cpu_var(var)) __tmp; \
38 preempt_disable(); \
39 __tmp = __get_cpu_var(var); \
40 preempt_enable(); \
41 __tmp; \
42 })
43
44#define x86_write_percpu(var, val) \
45 do { \
46 preempt_disable(); \
47 __get_cpu_var(var) = (val); \
48 preempt_enable(); \
49 } while(0)
50
25#else /* CONFIG_X86_64 */ 51#else /* CONFIG_X86_64 */
26 52
27#ifdef __ASSEMBLY__ 53#ifdef __ASSEMBLY__
@@ -143,4 +169,50 @@ do { \
143#define x86_or_percpu(var, val) percpu_to_op("or", per_cpu__##var, val) 169#define x86_or_percpu(var, val) percpu_to_op("or", per_cpu__##var, val)
144#endif /* !__ASSEMBLY__ */ 170#endif /* !__ASSEMBLY__ */
145#endif /* !CONFIG_X86_64 */ 171#endif /* !CONFIG_X86_64 */
146#endif /* _ASM_X86_PERCPU_H_ */ 172
173#ifdef CONFIG_SMP
174
175/*
176 * Define the "EARLY_PER_CPU" macros. These are used for some per_cpu
177 * variables that are initialized and accessed before there are per_cpu
178 * areas allocated.
179 */
180
181#define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \
182 DEFINE_PER_CPU(_type, _name) = _initvalue; \
183 __typeof__(_type) _name##_early_map[NR_CPUS] __initdata = \
184 { [0 ... NR_CPUS-1] = _initvalue }; \
185 __typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map
186
187#define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \
188 EXPORT_PER_CPU_SYMBOL(_name)
189
190#define DECLARE_EARLY_PER_CPU(_type, _name) \
191 DECLARE_PER_CPU(_type, _name); \
192 extern __typeof__(_type) *_name##_early_ptr; \
193 extern __typeof__(_type) _name##_early_map[]
194
195#define early_per_cpu_ptr(_name) (_name##_early_ptr)
196#define early_per_cpu_map(_name, _idx) (_name##_early_map[_idx])
197#define early_per_cpu(_name, _cpu) \
198 (early_per_cpu_ptr(_name) ? \
199 early_per_cpu_ptr(_name)[_cpu] : \
200 per_cpu(_name, _cpu))
201
202#else /* !CONFIG_SMP */
203#define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \
204 DEFINE_PER_CPU(_type, _name) = _initvalue
205
206#define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \
207 EXPORT_PER_CPU_SYMBOL(_name)
208
209#define DECLARE_EARLY_PER_CPU(_type, _name) \
210 DECLARE_PER_CPU(_type, _name)
211
212#define early_per_cpu(_name, _cpu) per_cpu(_name, _cpu)
213#define early_per_cpu_ptr(_name) NULL
214/* no early_per_cpu_map() */
215
216#endif /* !CONFIG_SMP */
217
218#endif /* ASM_X86__PERCPU_H */
diff --git a/include/asm-x86/pgalloc.h b/include/asm-x86/pgalloc.h
index 91e4641f3f31..3cd23adedae8 100644
--- a/include/asm-x86/pgalloc.h
+++ b/include/asm-x86/pgalloc.h
@@ -1,13 +1,17 @@
1#ifndef _ASM_X86_PGALLOC_H 1#ifndef ASM_X86__PGALLOC_H
2#define _ASM_X86_PGALLOC_H 2#define ASM_X86__PGALLOC_H
3 3
4#include <linux/threads.h> 4#include <linux/threads.h>
5#include <linux/mm.h> /* for struct page */ 5#include <linux/mm.h> /* for struct page */
6#include <linux/pagemap.h> 6#include <linux/pagemap.h>
7 7
8static inline int __paravirt_pgd_alloc(struct mm_struct *mm) { return 0; }
9
8#ifdef CONFIG_PARAVIRT 10#ifdef CONFIG_PARAVIRT
9#include <asm/paravirt.h> 11#include <asm/paravirt.h>
10#else 12#else
13#define paravirt_pgd_alloc(mm) __paravirt_pgd_alloc(mm)
14static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd) {}
11static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn) {} 15static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn) {}
12static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn) {} 16static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn) {}
13static inline void paravirt_alloc_pmd_clone(unsigned long pfn, unsigned long clonepfn, 17static inline void paravirt_alloc_pmd_clone(unsigned long pfn, unsigned long clonepfn,
@@ -107,4 +111,4 @@ extern void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud);
107#endif /* PAGETABLE_LEVELS > 3 */ 111#endif /* PAGETABLE_LEVELS > 3 */
108#endif /* PAGETABLE_LEVELS > 2 */ 112#endif /* PAGETABLE_LEVELS > 2 */
109 113
110#endif /* _ASM_X86_PGALLOC_H */ 114#endif /* ASM_X86__PGALLOC_H */
diff --git a/include/asm-x86/pgtable-2level-defs.h b/include/asm-x86/pgtable-2level-defs.h
index 0f71c9f13da4..7ec48f4e5347 100644
--- a/include/asm-x86/pgtable-2level-defs.h
+++ b/include/asm-x86/pgtable-2level-defs.h
@@ -1,5 +1,5 @@
1#ifndef _I386_PGTABLE_2LEVEL_DEFS_H 1#ifndef ASM_X86__PGTABLE_2LEVEL_DEFS_H
2#define _I386_PGTABLE_2LEVEL_DEFS_H 2#define ASM_X86__PGTABLE_2LEVEL_DEFS_H
3 3
4#define SHARED_KERNEL_PMD 0 4#define SHARED_KERNEL_PMD 0
5 5
@@ -17,4 +17,4 @@
17 17
18#define PTRS_PER_PTE 1024 18#define PTRS_PER_PTE 1024
19 19
20#endif /* _I386_PGTABLE_2LEVEL_DEFS_H */ 20#endif /* ASM_X86__PGTABLE_2LEVEL_DEFS_H */
diff --git a/include/asm-x86/pgtable-2level.h b/include/asm-x86/pgtable-2level.h
index 46bc52c0eae1..81762081dcd8 100644
--- a/include/asm-x86/pgtable-2level.h
+++ b/include/asm-x86/pgtable-2level.h
@@ -1,5 +1,5 @@
1#ifndef _I386_PGTABLE_2LEVEL_H 1#ifndef ASM_X86__PGTABLE_2LEVEL_H
2#define _I386_PGTABLE_2LEVEL_H 2#define ASM_X86__PGTABLE_2LEVEL_H
3 3
4#define pte_ERROR(e) \ 4#define pte_ERROR(e) \
5 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low) 5 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low)
@@ -53,9 +53,7 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp)
53#define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp) 53#define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
54#endif 54#endif
55 55
56#define pte_page(x) pfn_to_page(pte_pfn(x))
57#define pte_none(x) (!(x).pte_low) 56#define pte_none(x) (!(x).pte_low)
58#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
59 57
60/* 58/*
61 * Bits 0, 6 and 7 are taken, split up the 29 bits of offset 59 * Bits 0, 6 and 7 are taken, split up the 29 bits of offset
@@ -78,4 +76,4 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp)
78#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low }) 76#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low })
79#define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val }) 77#define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val })
80 78
81#endif /* _I386_PGTABLE_2LEVEL_H */ 79#endif /* ASM_X86__PGTABLE_2LEVEL_H */
diff --git a/include/asm-x86/pgtable-3level-defs.h b/include/asm-x86/pgtable-3level-defs.h
index 448ac9516314..c05fe6ff3720 100644
--- a/include/asm-x86/pgtable-3level-defs.h
+++ b/include/asm-x86/pgtable-3level-defs.h
@@ -1,5 +1,5 @@
1#ifndef _I386_PGTABLE_3LEVEL_DEFS_H 1#ifndef ASM_X86__PGTABLE_3LEVEL_DEFS_H
2#define _I386_PGTABLE_3LEVEL_DEFS_H 2#define ASM_X86__PGTABLE_3LEVEL_DEFS_H
3 3
4#ifdef CONFIG_PARAVIRT 4#ifdef CONFIG_PARAVIRT
5#define SHARED_KERNEL_PMD (pv_info.shared_kernel_pmd) 5#define SHARED_KERNEL_PMD (pv_info.shared_kernel_pmd)
@@ -25,4 +25,4 @@
25 */ 25 */
26#define PTRS_PER_PTE 512 26#define PTRS_PER_PTE 512
27 27
28#endif /* _I386_PGTABLE_3LEVEL_DEFS_H */ 28#endif /* ASM_X86__PGTABLE_3LEVEL_DEFS_H */
diff --git a/include/asm-x86/pgtable-3level.h b/include/asm-x86/pgtable-3level.h
index c93dbb6c2624..75f4276b5ddb 100644
--- a/include/asm-x86/pgtable-3level.h
+++ b/include/asm-x86/pgtable-3level.h
@@ -1,5 +1,5 @@
1#ifndef _I386_PGTABLE_3LEVEL_H 1#ifndef ASM_X86__PGTABLE_3LEVEL_H
2#define _I386_PGTABLE_3LEVEL_H 2#define ASM_X86__PGTABLE_3LEVEL_H
3 3
4/* 4/*
5 * Intel Physical Address Extension (PAE) Mode - three-level page 5 * Intel Physical Address Extension (PAE) Mode - three-level page
@@ -25,7 +25,7 @@ static inline int pud_none(pud_t pud)
25 25
26static inline int pud_bad(pud_t pud) 26static inline int pud_bad(pud_t pud)
27{ 27{
28 return (pud_val(pud) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER)) != 0; 28 return (pud_val(pud) & ~(PTE_PFN_MASK | _KERNPG_TABLE | _PAGE_USER)) != 0;
29} 29}
30 30
31static inline int pud_present(pud_t pud) 31static inline int pud_present(pud_t pud)
@@ -120,9 +120,9 @@ static inline void pud_clear(pud_t *pudp)
120 write_cr3(pgd); 120 write_cr3(pgd);
121} 121}
122 122
123#define pud_page(pud) ((struct page *) __va(pud_val(pud) & PTE_MASK)) 123#define pud_page(pud) ((struct page *) __va(pud_val(pud) & PTE_PFN_MASK))
124 124
125#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PTE_MASK)) 125#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PTE_PFN_MASK))
126 126
127 127
128/* Find an entry in the second-level page table.. */ 128/* Find an entry in the second-level page table.. */
@@ -151,18 +151,11 @@ static inline int pte_same(pte_t a, pte_t b)
151 return a.pte_low == b.pte_low && a.pte_high == b.pte_high; 151 return a.pte_low == b.pte_low && a.pte_high == b.pte_high;
152} 152}
153 153
154#define pte_page(x) pfn_to_page(pte_pfn(x))
155
156static inline int pte_none(pte_t pte) 154static inline int pte_none(pte_t pte)
157{ 155{
158 return !pte.pte_low && !pte.pte_high; 156 return !pte.pte_low && !pte.pte_high;
159} 157}
160 158
161static inline unsigned long pte_pfn(pte_t pte)
162{
163 return (pte_val(pte) & PTE_MASK) >> PAGE_SHIFT;
164}
165
166/* 159/*
167 * Bits 0, 6 and 7 are taken in the low part of the pte, 160 * Bits 0, 6 and 7 are taken in the low part of the pte,
168 * put the 32 bits of offset into the high part. 161 * put the 32 bits of offset into the high part.
@@ -179,4 +172,4 @@ static inline unsigned long pte_pfn(pte_t pte)
179#define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high }) 172#define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high })
180#define __swp_entry_to_pte(x) ((pte_t){ { .pte_high = (x).val } }) 173#define __swp_entry_to_pte(x) ((pte_t){ { .pte_high = (x).val } })
181 174
182#endif /* _I386_PGTABLE_3LEVEL_H */ 175#endif /* ASM_X86__PGTABLE_3LEVEL_H */
diff --git a/include/asm-x86/pgtable.h b/include/asm-x86/pgtable.h
index 97c271b2910b..ed932453ef26 100644
--- a/include/asm-x86/pgtable.h
+++ b/include/asm-x86/pgtable.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_PGTABLE_H 1#ifndef ASM_X86__PGTABLE_H
2#define _ASM_X86_PGTABLE_H 2#define ASM_X86__PGTABLE_H
3 3
4#define FIRST_USER_ADDRESS 0 4#define FIRST_USER_ADDRESS 0
5 5
@@ -18,32 +18,32 @@
18#define _PAGE_BIT_UNUSED2 10 18#define _PAGE_BIT_UNUSED2 10
19#define _PAGE_BIT_UNUSED3 11 19#define _PAGE_BIT_UNUSED3 11
20#define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */ 20#define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
21#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
22#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
21#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */ 23#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
22 24
23/* 25#define _PAGE_PRESENT (_AT(pteval_t, 1) << _PAGE_BIT_PRESENT)
24 * Note: we use _AC(1, L) instead of _AC(1, UL) so that we get a 26#define _PAGE_RW (_AT(pteval_t, 1) << _PAGE_BIT_RW)
25 * sign-extended value on 32-bit with all 1's in the upper word, 27#define _PAGE_USER (_AT(pteval_t, 1) << _PAGE_BIT_USER)
26 * which preserves the upper pte values on 64-bit ptes: 28#define _PAGE_PWT (_AT(pteval_t, 1) << _PAGE_BIT_PWT)
27 */ 29#define _PAGE_PCD (_AT(pteval_t, 1) << _PAGE_BIT_PCD)
28#define _PAGE_PRESENT (_AC(1, L)<<_PAGE_BIT_PRESENT) 30#define _PAGE_ACCESSED (_AT(pteval_t, 1) << _PAGE_BIT_ACCESSED)
29#define _PAGE_RW (_AC(1, L)<<_PAGE_BIT_RW) 31#define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
30#define _PAGE_USER (_AC(1, L)<<_PAGE_BIT_USER) 32#define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
31#define _PAGE_PWT (_AC(1, L)<<_PAGE_BIT_PWT) 33#define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
32#define _PAGE_PCD (_AC(1, L)<<_PAGE_BIT_PCD) 34#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
33#define _PAGE_ACCESSED (_AC(1, L)<<_PAGE_BIT_ACCESSED) 35#define _PAGE_UNUSED2 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED2)
34#define _PAGE_DIRTY (_AC(1, L)<<_PAGE_BIT_DIRTY) 36#define _PAGE_UNUSED3 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED3)
35#define _PAGE_PSE (_AC(1, L)<<_PAGE_BIT_PSE) /* 2MB page */ 37#define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
36#define _PAGE_GLOBAL (_AC(1, L)<<_PAGE_BIT_GLOBAL) /* Global TLB entry */ 38#define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
37#define _PAGE_UNUSED1 (_AC(1, L)<<_PAGE_BIT_UNUSED1) 39#define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL)
38#define _PAGE_UNUSED2 (_AC(1, L)<<_PAGE_BIT_UNUSED2) 40#define _PAGE_CPA_TEST (_AT(pteval_t, 1) << _PAGE_BIT_CPA_TEST)
39#define _PAGE_UNUSED3 (_AC(1, L)<<_PAGE_BIT_UNUSED3) 41#define __HAVE_ARCH_PTE_SPECIAL
40#define _PAGE_PAT (_AC(1, L)<<_PAGE_BIT_PAT)
41#define _PAGE_PAT_LARGE (_AC(1, L)<<_PAGE_BIT_PAT_LARGE)
42 42
43#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) 43#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
44#define _PAGE_NX (_AC(1, ULL) << _PAGE_BIT_NX) 44#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
45#else 45#else
46#define _PAGE_NX 0 46#define _PAGE_NX (_AT(pteval_t, 0))
47#endif 47#endif
48 48
49/* If _PAGE_PRESENT is clear, we use these: */ 49/* If _PAGE_PRESENT is clear, we use these: */
@@ -58,8 +58,8 @@
58 _PAGE_DIRTY) 58 _PAGE_DIRTY)
59 59
60/* Set of bits not changed in pte_modify */ 60/* Set of bits not changed in pte_modify */
61#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_PCD | _PAGE_PWT | \ 61#define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
62 _PAGE_ACCESSED | _PAGE_DIRTY) 62 _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY)
63 63
64#define _PAGE_CACHE_MASK (_PAGE_PCD | _PAGE_PWT) 64#define _PAGE_CACHE_MASK (_PAGE_PCD | _PAGE_PWT)
65#define _PAGE_CACHE_WB (0) 65#define _PAGE_CACHE_WB (0)
@@ -83,19 +83,9 @@
83#define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \ 83#define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
84 _PAGE_ACCESSED) 84 _PAGE_ACCESSED)
85 85
86#ifdef CONFIG_X86_32
87#define _PAGE_KERNEL_EXEC \
88 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
89#define _PAGE_KERNEL (_PAGE_KERNEL_EXEC | _PAGE_NX)
90
91#ifndef __ASSEMBLY__
92extern pteval_t __PAGE_KERNEL, __PAGE_KERNEL_EXEC;
93#endif /* __ASSEMBLY__ */
94#else
95#define __PAGE_KERNEL_EXEC \ 86#define __PAGE_KERNEL_EXEC \
96 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED) 87 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
97#define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX) 88#define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
98#endif
99 89
100#define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW) 90#define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
101#define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW) 91#define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
@@ -106,26 +96,22 @@ extern pteval_t __PAGE_KERNEL, __PAGE_KERNEL_EXEC;
106#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER) 96#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
107#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT) 97#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
108#define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE) 98#define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
99#define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
109#define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE) 100#define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
110 101
111#ifdef CONFIG_X86_32 102#define PAGE_KERNEL __pgprot(__PAGE_KERNEL)
112# define MAKE_GLOBAL(x) __pgprot((x)) 103#define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO)
113#else 104#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
114# define MAKE_GLOBAL(x) __pgprot((x) | _PAGE_GLOBAL) 105#define PAGE_KERNEL_RX __pgprot(__PAGE_KERNEL_RX)
115#endif 106#define PAGE_KERNEL_WC __pgprot(__PAGE_KERNEL_WC)
116 107#define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE)
117#define PAGE_KERNEL MAKE_GLOBAL(__PAGE_KERNEL) 108#define PAGE_KERNEL_UC_MINUS __pgprot(__PAGE_KERNEL_UC_MINUS)
118#define PAGE_KERNEL_RO MAKE_GLOBAL(__PAGE_KERNEL_RO) 109#define PAGE_KERNEL_EXEC_NOCACHE __pgprot(__PAGE_KERNEL_EXEC_NOCACHE)
119#define PAGE_KERNEL_EXEC MAKE_GLOBAL(__PAGE_KERNEL_EXEC) 110#define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE)
120#define PAGE_KERNEL_RX MAKE_GLOBAL(__PAGE_KERNEL_RX) 111#define PAGE_KERNEL_LARGE_NOCACHE __pgprot(__PAGE_KERNEL_LARGE_NOCACHE)
121#define PAGE_KERNEL_WC MAKE_GLOBAL(__PAGE_KERNEL_WC) 112#define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC)
122#define PAGE_KERNEL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_NOCACHE) 113#define PAGE_KERNEL_VSYSCALL __pgprot(__PAGE_KERNEL_VSYSCALL)
123#define PAGE_KERNEL_UC_MINUS MAKE_GLOBAL(__PAGE_KERNEL_UC_MINUS) 114#define PAGE_KERNEL_VSYSCALL_NOCACHE __pgprot(__PAGE_KERNEL_VSYSCALL_NOCACHE)
124#define PAGE_KERNEL_EXEC_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_EXEC_NOCACHE)
125#define PAGE_KERNEL_LARGE MAKE_GLOBAL(__PAGE_KERNEL_LARGE)
126#define PAGE_KERNEL_LARGE_EXEC MAKE_GLOBAL(__PAGE_KERNEL_LARGE_EXEC)
127#define PAGE_KERNEL_VSYSCALL MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL)
128#define PAGE_KERNEL_VSYSCALL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL_NOCACHE)
129 115
130/* xwr */ 116/* xwr */
131#define __P000 PAGE_NONE 117#define __P000 PAGE_NONE
@@ -146,6 +132,17 @@ extern pteval_t __PAGE_KERNEL, __PAGE_KERNEL_EXEC;
146#define __S110 PAGE_SHARED_EXEC 132#define __S110 PAGE_SHARED_EXEC
147#define __S111 PAGE_SHARED_EXEC 133#define __S111 PAGE_SHARED_EXEC
148 134
135/*
136 * early identity mapping pte attrib macros.
137 */
138#ifdef CONFIG_X86_64
139#define __PAGE_KERNEL_IDENT_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC
140#else
141#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
142#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
143#define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
144#endif
145
149#ifndef __ASSEMBLY__ 146#ifndef __ASSEMBLY__
150 147
151/* 148/*
@@ -164,44 +161,51 @@ extern struct list_head pgd_list;
164 */ 161 */
165static inline int pte_dirty(pte_t pte) 162static inline int pte_dirty(pte_t pte)
166{ 163{
167 return pte_val(pte) & _PAGE_DIRTY; 164 return pte_flags(pte) & _PAGE_DIRTY;
168} 165}
169 166
170static inline int pte_young(pte_t pte) 167static inline int pte_young(pte_t pte)
171{ 168{
172 return pte_val(pte) & _PAGE_ACCESSED; 169 return pte_flags(pte) & _PAGE_ACCESSED;
173} 170}
174 171
175static inline int pte_write(pte_t pte) 172static inline int pte_write(pte_t pte)
176{ 173{
177 return pte_val(pte) & _PAGE_RW; 174 return pte_flags(pte) & _PAGE_RW;
178} 175}
179 176
180static inline int pte_file(pte_t pte) 177static inline int pte_file(pte_t pte)
181{ 178{
182 return pte_val(pte) & _PAGE_FILE; 179 return pte_flags(pte) & _PAGE_FILE;
183} 180}
184 181
185static inline int pte_huge(pte_t pte) 182static inline int pte_huge(pte_t pte)
186{ 183{
187 return pte_val(pte) & _PAGE_PSE; 184 return pte_flags(pte) & _PAGE_PSE;
188} 185}
189 186
190static inline int pte_global(pte_t pte) 187static inline int pte_global(pte_t pte)
191{ 188{
192 return pte_val(pte) & _PAGE_GLOBAL; 189 return pte_flags(pte) & _PAGE_GLOBAL;
193} 190}
194 191
195static inline int pte_exec(pte_t pte) 192static inline int pte_exec(pte_t pte)
196{ 193{
197 return !(pte_val(pte) & _PAGE_NX); 194 return !(pte_flags(pte) & _PAGE_NX);
198} 195}
199 196
200static inline int pte_special(pte_t pte) 197static inline int pte_special(pte_t pte)
201{ 198{
202 return 0; 199 return pte_val(pte) & _PAGE_SPECIAL;
200}
201
202static inline unsigned long pte_pfn(pte_t pte)
203{
204 return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
203} 205}
204 206
207#define pte_page(pte) pfn_to_page(pte_pfn(pte))
208
205static inline int pmd_large(pmd_t pte) 209static inline int pmd_large(pmd_t pte)
206{ 210{
207 return (pmd_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) == 211 return (pmd_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
@@ -210,22 +214,22 @@ static inline int pmd_large(pmd_t pte)
210 214
211static inline pte_t pte_mkclean(pte_t pte) 215static inline pte_t pte_mkclean(pte_t pte)
212{ 216{
213 return __pte(pte_val(pte) & ~(pteval_t)_PAGE_DIRTY); 217 return __pte(pte_val(pte) & ~_PAGE_DIRTY);
214} 218}
215 219
216static inline pte_t pte_mkold(pte_t pte) 220static inline pte_t pte_mkold(pte_t pte)
217{ 221{
218 return __pte(pte_val(pte) & ~(pteval_t)_PAGE_ACCESSED); 222 return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
219} 223}
220 224
221static inline pte_t pte_wrprotect(pte_t pte) 225static inline pte_t pte_wrprotect(pte_t pte)
222{ 226{
223 return __pte(pte_val(pte) & ~(pteval_t)_PAGE_RW); 227 return __pte(pte_val(pte) & ~_PAGE_RW);
224} 228}
225 229
226static inline pte_t pte_mkexec(pte_t pte) 230static inline pte_t pte_mkexec(pte_t pte)
227{ 231{
228 return __pte(pte_val(pte) & ~(pteval_t)_PAGE_NX); 232 return __pte(pte_val(pte) & ~_PAGE_NX);
229} 233}
230 234
231static inline pte_t pte_mkdirty(pte_t pte) 235static inline pte_t pte_mkdirty(pte_t pte)
@@ -250,7 +254,7 @@ static inline pte_t pte_mkhuge(pte_t pte)
250 254
251static inline pte_t pte_clrhuge(pte_t pte) 255static inline pte_t pte_clrhuge(pte_t pte)
252{ 256{
253 return __pte(pte_val(pte) & ~(pteval_t)_PAGE_PSE); 257 return __pte(pte_val(pte) & ~_PAGE_PSE);
254} 258}
255 259
256static inline pte_t pte_mkglobal(pte_t pte) 260static inline pte_t pte_mkglobal(pte_t pte)
@@ -260,12 +264,12 @@ static inline pte_t pte_mkglobal(pte_t pte)
260 264
261static inline pte_t pte_clrglobal(pte_t pte) 265static inline pte_t pte_clrglobal(pte_t pte)
262{ 266{
263 return __pte(pte_val(pte) & ~(pteval_t)_PAGE_GLOBAL); 267 return __pte(pte_val(pte) & ~_PAGE_GLOBAL);
264} 268}
265 269
266static inline pte_t pte_mkspecial(pte_t pte) 270static inline pte_t pte_mkspecial(pte_t pte)
267{ 271{
268 return pte; 272 return __pte(pte_val(pte) | _PAGE_SPECIAL);
269} 273}
270 274
271extern pteval_t __supported_pte_mask; 275extern pteval_t __supported_pte_mask;
@@ -305,7 +309,7 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
305 return __pgprot(preservebits | addbits); 309 return __pgprot(preservebits | addbits);
306} 310}
307 311
308#define pte_pgprot(x) __pgprot(pte_val(x) & ~PTE_MASK) 312#define pte_pgprot(x) __pgprot(pte_flags(x) & PTE_FLAGS_MASK)
309 313
310#define canon_pgprot(p) __pgprot(pgprot_val(p) & __supported_pte_mask) 314#define canon_pgprot(p) __pgprot(pgprot_val(p) & __supported_pte_mask)
311 315
@@ -318,6 +322,19 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
318 unsigned long size, pgprot_t *vma_prot); 322 unsigned long size, pgprot_t *vma_prot);
319#endif 323#endif
320 324
325/* Install a pte for a particular vaddr in kernel space. */
326void set_pte_vaddr(unsigned long vaddr, pte_t pte);
327
328#ifdef CONFIG_X86_32
329extern void native_pagetable_setup_start(pgd_t *base);
330extern void native_pagetable_setup_done(pgd_t *base);
331#else
332static inline void native_pagetable_setup_start(pgd_t *base) {}
333static inline void native_pagetable_setup_done(pgd_t *base) {}
334#endif
335
336extern int arch_report_meminfo(char *page);
337
321#ifdef CONFIG_PARAVIRT 338#ifdef CONFIG_PARAVIRT
322#include <asm/paravirt.h> 339#include <asm/paravirt.h>
323#else /* !CONFIG_PARAVIRT */ 340#else /* !CONFIG_PARAVIRT */
@@ -349,6 +366,16 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
349 366
350#define pte_update(mm, addr, ptep) do { } while (0) 367#define pte_update(mm, addr, ptep) do { } while (0)
351#define pte_update_defer(mm, addr, ptep) do { } while (0) 368#define pte_update_defer(mm, addr, ptep) do { } while (0)
369
370static inline void __init paravirt_pagetable_setup_start(pgd_t *base)
371{
372 native_pagetable_setup_start(base);
373}
374
375static inline void __init paravirt_pagetable_setup_done(pgd_t *base)
376{
377 native_pagetable_setup_done(base);
378}
352#endif /* CONFIG_PARAVIRT */ 379#endif /* CONFIG_PARAVIRT */
353 380
354#endif /* __ASSEMBLY__ */ 381#endif /* __ASSEMBLY__ */
@@ -359,6 +386,26 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
359# include "pgtable_64.h" 386# include "pgtable_64.h"
360#endif 387#endif
361 388
389/*
390 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
391 *
392 * this macro returns the index of the entry in the pgd page which would
393 * control the given virtual address
394 */
395#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
396
397/*
398 * pgd_offset() returns a (pgd_t *)
399 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
400 */
401#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
402/*
403 * a shortcut which implies the use of the kernel's pgd, instead
404 * of a process's
405 */
406#define pgd_offset_k(address) pgd_offset(&init_mm, (address))
407
408
362#define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET) 409#define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
363#define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY) 410#define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
364 411
@@ -369,8 +416,15 @@ enum {
369 PG_LEVEL_4K, 416 PG_LEVEL_4K,
370 PG_LEVEL_2M, 417 PG_LEVEL_2M,
371 PG_LEVEL_1G, 418 PG_LEVEL_1G,
419 PG_LEVEL_NUM
372}; 420};
373 421
422#ifdef CONFIG_PROC_FS
423extern void update_page_count(int level, unsigned long pages);
424#else
425static inline void update_page_count(int level, unsigned long pages) { }
426#endif
427
374/* 428/*
375 * Helper function that returns the kernel pagetable entry controlling 429 * Helper function that returns the kernel pagetable entry controlling
376 * the virtual address 'address'. NULL means no pagetable entry present. 430 * the virtual address 'address'. NULL means no pagetable entry present.
@@ -420,6 +474,8 @@ static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
420 * race with other CPU's that might be updating the dirty 474 * race with other CPU's that might be updating the dirty
421 * bit at the same time. 475 * bit at the same time.
422 */ 476 */
477struct vm_area_struct;
478
423#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 479#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
424extern int ptep_set_access_flags(struct vm_area_struct *vma, 480extern int ptep_set_access_flags(struct vm_area_struct *vma,
425 unsigned long address, pte_t *ptep, 481 unsigned long address, pte_t *ptep,
@@ -487,4 +543,4 @@ static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
487#include <asm-generic/pgtable.h> 543#include <asm-generic/pgtable.h>
488#endif /* __ASSEMBLY__ */ 544#endif /* __ASSEMBLY__ */
489 545
490#endif /* _ASM_X86_PGTABLE_H */ 546#endif /* ASM_X86__PGTABLE_H */
diff --git a/include/asm-x86/pgtable_32.h b/include/asm-x86/pgtable_32.h
index 32ca03109a4c..8de702dc7d62 100644
--- a/include/asm-x86/pgtable_32.h
+++ b/include/asm-x86/pgtable_32.h
@@ -1,5 +1,5 @@
1#ifndef _I386_PGTABLE_H 1#ifndef ASM_X86__PGTABLE_32_H
2#define _I386_PGTABLE_H 2#define ASM_X86__PGTABLE_32_H
3 3
4 4
5/* 5/*
@@ -31,6 +31,7 @@ static inline void pgtable_cache_init(void) { }
31static inline void check_pgt_cache(void) { } 31static inline void check_pgt_cache(void) { }
32void paging_init(void); 32void paging_init(void);
33 33
34extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
34 35
35/* 36/*
36 * The Linux x86 paging architecture is 'compile-time dual-mode', it 37 * The Linux x86 paging architecture is 'compile-time dual-mode', it
@@ -56,8 +57,7 @@ void paging_init(void);
56 * area for the same reason. ;) 57 * area for the same reason. ;)
57 */ 58 */
58#define VMALLOC_OFFSET (8 * 1024 * 1024) 59#define VMALLOC_OFFSET (8 * 1024 * 1024)
59#define VMALLOC_START (((unsigned long)high_memory + 2 * VMALLOC_OFFSET - 1) \ 60#define VMALLOC_START ((unsigned long)high_memory + VMALLOC_OFFSET)
60 & ~(VMALLOC_OFFSET - 1))
61#ifdef CONFIG_X86_PAE 61#ifdef CONFIG_X86_PAE
62#define LAST_PKMAP 512 62#define LAST_PKMAP 512
63#else 63#else
@@ -73,6 +73,8 @@ void paging_init(void);
73# define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE) 73# define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
74#endif 74#endif
75 75
76#define MAXMEM (VMALLOC_END - PAGE_OFFSET - __VMALLOC_RESERVE)
77
76/* 78/*
77 * Define this if things work differently on an i386 and an i486: 79 * Define this if things work differently on an i386 and an i486:
78 * it will (on an i486) warn about kernel memory accesses that are 80 * it will (on an i486) warn about kernel memory accesses that are
@@ -88,7 +90,7 @@ extern unsigned long pg0[];
88/* To avoid harmful races, pmd_none(x) should check only the lower when PAE */ 90/* To avoid harmful races, pmd_none(x) should check only the lower when PAE */
89#define pmd_none(x) (!(unsigned long)pmd_val((x))) 91#define pmd_none(x) (!(unsigned long)pmd_val((x)))
90#define pmd_present(x) (pmd_val((x)) & _PAGE_PRESENT) 92#define pmd_present(x) (pmd_val((x)) & _PAGE_PRESENT)
91#define pmd_bad(x) ((pmd_val(x) & (~PTE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) 93#define pmd_bad(x) ((pmd_val(x) & (PTE_FLAGS_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
92 94
93#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) 95#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
94 96
@@ -113,26 +115,6 @@ extern unsigned long pg0[];
113 */ 115 */
114#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) 116#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
115 117
116/*
117 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
118 *
119 * this macro returns the index of the entry in the pgd page which would
120 * control the given virtual address
121 */
122#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
123#define pgd_index_k(addr) pgd_index((addr))
124
125/*
126 * pgd_offset() returns a (pgd_t *)
127 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
128 */
129#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
130
131/*
132 * a shortcut which implies the use of the kernel's pgd, instead
133 * of a process's
134 */
135#define pgd_offset_k(address) pgd_offset(&init_mm, (address))
136 118
137static inline int pud_large(pud_t pud) { return 0; } 119static inline int pud_large(pud_t pud) { return 0; }
138 120
@@ -159,7 +141,7 @@ static inline int pud_large(pud_t pud) { return 0; }
159#define pmd_page(pmd) (pfn_to_page(pmd_val((pmd)) >> PAGE_SHIFT)) 141#define pmd_page(pmd) (pfn_to_page(pmd_val((pmd)) >> PAGE_SHIFT))
160 142
161#define pmd_page_vaddr(pmd) \ 143#define pmd_page_vaddr(pmd) \
162 ((unsigned long)__va(pmd_val((pmd)) & PTE_MASK)) 144 ((unsigned long)__va(pmd_val((pmd)) & PTE_PFN_MASK))
163 145
164#if defined(CONFIG_HIGHPTE) 146#if defined(CONFIG_HIGHPTE)
165#define pte_offset_map(dir, address) \ 147#define pte_offset_map(dir, address) \
@@ -191,21 +173,6 @@ do { \
191 */ 173 */
192#define update_mmu_cache(vma, address, pte) do { } while (0) 174#define update_mmu_cache(vma, address, pte) do { } while (0)
193 175
194extern void native_pagetable_setup_start(pgd_t *base);
195extern void native_pagetable_setup_done(pgd_t *base);
196
197#ifndef CONFIG_PARAVIRT
198static inline void __init paravirt_pagetable_setup_start(pgd_t *base)
199{
200 native_pagetable_setup_start(base);
201}
202
203static inline void __init paravirt_pagetable_setup_done(pgd_t *base)
204{
205 native_pagetable_setup_done(base);
206}
207#endif /* !CONFIG_PARAVIRT */
208
209#endif /* !__ASSEMBLY__ */ 176#endif /* !__ASSEMBLY__ */
210 177
211/* 178/*
@@ -221,4 +188,4 @@ static inline void __init paravirt_pagetable_setup_done(pgd_t *base)
221#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ 188#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
222 remap_pfn_range(vma, vaddr, pfn, size, prot) 189 remap_pfn_range(vma, vaddr, pfn, size, prot)
223 190
224#endif /* _I386_PGTABLE_H */ 191#endif /* ASM_X86__PGTABLE_32_H */
diff --git a/include/asm-x86/pgtable_64.h b/include/asm-x86/pgtable_64.h
index 1cc50d22d735..fde9770e53d1 100644
--- a/include/asm-x86/pgtable_64.h
+++ b/include/asm-x86/pgtable_64.h
@@ -1,5 +1,5 @@
1#ifndef _X86_64_PGTABLE_H 1#ifndef ASM_X86__PGTABLE_64_H
2#define _X86_64_PGTABLE_H 2#define ASM_X86__PGTABLE_64_H
3 3
4#include <linux/const.h> 4#include <linux/const.h>
5#ifndef __ASSEMBLY__ 5#ifndef __ASSEMBLY__
@@ -16,6 +16,8 @@
16extern pud_t level3_kernel_pgt[512]; 16extern pud_t level3_kernel_pgt[512];
17extern pud_t level3_ident_pgt[512]; 17extern pud_t level3_ident_pgt[512];
18extern pmd_t level2_kernel_pgt[512]; 18extern pmd_t level2_kernel_pgt[512];
19extern pmd_t level2_fixmap_pgt[512];
20extern pmd_t level2_ident_pgt[512];
19extern pgd_t init_level4_pgt[]; 21extern pgd_t init_level4_pgt[];
20 22
21#define swapper_pg_dir init_level4_pgt 23#define swapper_pg_dir init_level4_pgt
@@ -70,6 +72,9 @@ extern void paging_init(void);
70 72
71struct mm_struct; 73struct mm_struct;
72 74
75void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte);
76
77
73static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, 78static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr,
74 pte_t *ptep) 79 pte_t *ptep)
75{ 80{
@@ -146,32 +151,30 @@ static inline void native_pgd_clear(pgd_t *pgd)
146#define VMALLOC_END _AC(0xffffe1ffffffffff, UL) 151#define VMALLOC_END _AC(0xffffe1ffffffffff, UL)
147#define VMEMMAP_START _AC(0xffffe20000000000, UL) 152#define VMEMMAP_START _AC(0xffffe20000000000, UL)
148#define MODULES_VADDR _AC(0xffffffffa0000000, UL) 153#define MODULES_VADDR _AC(0xffffffffa0000000, UL)
149#define MODULES_END _AC(0xfffffffffff00000, UL) 154#define MODULES_END _AC(0xffffffffff000000, UL)
150#define MODULES_LEN (MODULES_END - MODULES_VADDR) 155#define MODULES_LEN (MODULES_END - MODULES_VADDR)
151 156
152#ifndef __ASSEMBLY__ 157#ifndef __ASSEMBLY__
153 158
154static inline int pgd_bad(pgd_t pgd) 159static inline int pgd_bad(pgd_t pgd)
155{ 160{
156 return (pgd_val(pgd) & ~(PTE_MASK | _PAGE_USER)) != _KERNPG_TABLE; 161 return (pgd_val(pgd) & ~(PTE_PFN_MASK | _PAGE_USER)) != _KERNPG_TABLE;
157} 162}
158 163
159static inline int pud_bad(pud_t pud) 164static inline int pud_bad(pud_t pud)
160{ 165{
161 return (pud_val(pud) & ~(PTE_MASK | _PAGE_USER)) != _KERNPG_TABLE; 166 return (pud_val(pud) & ~(PTE_PFN_MASK | _PAGE_USER)) != _KERNPG_TABLE;
162} 167}
163 168
164static inline int pmd_bad(pmd_t pmd) 169static inline int pmd_bad(pmd_t pmd)
165{ 170{
166 return (pmd_val(pmd) & ~(PTE_MASK | _PAGE_USER)) != _KERNPG_TABLE; 171 return (pmd_val(pmd) & ~(PTE_PFN_MASK | _PAGE_USER)) != _KERNPG_TABLE;
167} 172}
168 173
169#define pte_none(x) (!pte_val((x))) 174#define pte_none(x) (!pte_val((x)))
170#define pte_present(x) (pte_val((x)) & (_PAGE_PRESENT | _PAGE_PROTNONE)) 175#define pte_present(x) (pte_val((x)) & (_PAGE_PRESENT | _PAGE_PROTNONE))
171 176
172#define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT)) /* FIXME: is this right? */ 177#define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT)) /* FIXME: is this right? */
173#define pte_page(x) pfn_to_page(pte_pfn((x)))
174#define pte_pfn(x) ((pte_val((x)) & __PHYSICAL_MASK) >> PAGE_SHIFT)
175 178
176/* 179/*
177 * Macro to mark a page protection value as "uncacheable". 180 * Macro to mark a page protection value as "uncacheable".
@@ -188,14 +191,11 @@ static inline int pmd_bad(pmd_t pmd)
188 * Level 4 access. 191 * Level 4 access.
189 */ 192 */
190#define pgd_page_vaddr(pgd) \ 193#define pgd_page_vaddr(pgd) \
191 ((unsigned long)__va((unsigned long)pgd_val((pgd)) & PTE_MASK)) 194 ((unsigned long)__va((unsigned long)pgd_val((pgd)) & PTE_PFN_MASK))
192#define pgd_page(pgd) (pfn_to_page(pgd_val((pgd)) >> PAGE_SHIFT)) 195#define pgd_page(pgd) (pfn_to_page(pgd_val((pgd)) >> PAGE_SHIFT))
193#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
194#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
195#define pgd_offset_k(address) (init_level4_pgt + pgd_index((address)))
196#define pgd_present(pgd) (pgd_val(pgd) & _PAGE_PRESENT) 196#define pgd_present(pgd) (pgd_val(pgd) & _PAGE_PRESENT)
197static inline int pgd_large(pgd_t pgd) { return 0; } 197static inline int pgd_large(pgd_t pgd) { return 0; }
198#define mk_kernel_pgd(address) ((pgd_t){ (address) | _KERNPG_TABLE }) 198#define mk_kernel_pgd(address) __pgd((address) | _KERNPG_TABLE)
199 199
200/* PUD - Level3 access */ 200/* PUD - Level3 access */
201/* to find an entry in a page-table-directory. */ 201/* to find an entry in a page-table-directory. */
@@ -214,7 +214,7 @@ static inline int pud_large(pud_t pte)
214} 214}
215 215
216/* PMD - Level 2 access */ 216/* PMD - Level 2 access */
217#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val((pmd)) & PTE_MASK)) 217#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val((pmd)) & PTE_PFN_MASK))
218#define pmd_page(pmd) (pfn_to_page(pmd_val((pmd)) >> PAGE_SHIFT)) 218#define pmd_page(pmd) (pfn_to_page(pmd_val((pmd)) >> PAGE_SHIFT))
219 219
220#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) 220#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
@@ -282,4 +282,4 @@ extern void cleanup_highmap(void);
282#define __HAVE_ARCH_PTE_SAME 282#define __HAVE_ARCH_PTE_SAME
283#endif /* !__ASSEMBLY__ */ 283#endif /* !__ASSEMBLY__ */
284 284
285#endif /* _X86_64_PGTABLE_H */ 285#endif /* ASM_X86__PGTABLE_64_H */
diff --git a/include/asm-x86/posix_types_32.h b/include/asm-x86/posix_types_32.h
index b031efda37ec..70cf2bb05939 100644
--- a/include/asm-x86/posix_types_32.h
+++ b/include/asm-x86/posix_types_32.h
@@ -1,5 +1,5 @@
1#ifndef __ARCH_I386_POSIX_TYPES_H 1#ifndef ASM_X86__POSIX_TYPES_32_H
2#define __ARCH_I386_POSIX_TYPES_H 2#define ASM_X86__POSIX_TYPES_32_H
3 3
4/* 4/*
5 * This file is generally used by user-level software, so you need to 5 * This file is generally used by user-level software, so you need to
@@ -82,4 +82,4 @@ do { \
82 82
83#endif /* defined(__KERNEL__) */ 83#endif /* defined(__KERNEL__) */
84 84
85#endif 85#endif /* ASM_X86__POSIX_TYPES_32_H */
diff --git a/include/asm-x86/posix_types_64.h b/include/asm-x86/posix_types_64.h
index d6624c95854a..388b4e7f4a44 100644
--- a/include/asm-x86/posix_types_64.h
+++ b/include/asm-x86/posix_types_64.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_64_POSIX_TYPES_H 1#ifndef ASM_X86__POSIX_TYPES_64_H
2#define _ASM_X86_64_POSIX_TYPES_H 2#define ASM_X86__POSIX_TYPES_64_H
3 3
4/* 4/*
5 * This file is generally used by user-level software, so you need to 5 * This file is generally used by user-level software, so you need to
@@ -116,4 +116,4 @@ static inline void __FD_ZERO(__kernel_fd_set *p)
116 116
117#endif /* defined(__KERNEL__) */ 117#endif /* defined(__KERNEL__) */
118 118
119#endif 119#endif /* ASM_X86__POSIX_TYPES_64_H */
diff --git a/include/asm-x86/prctl.h b/include/asm-x86/prctl.h
index 52952adef1ca..e7ae34eb4103 100644
--- a/include/asm-x86/prctl.h
+++ b/include/asm-x86/prctl.h
@@ -1,5 +1,5 @@
1#ifndef X86_64_PRCTL_H 1#ifndef ASM_X86__PRCTL_H
2#define X86_64_PRCTL_H 1 2#define ASM_X86__PRCTL_H
3 3
4#define ARCH_SET_GS 0x1001 4#define ARCH_SET_GS 0x1001
5#define ARCH_SET_FS 0x1002 5#define ARCH_SET_FS 0x1002
@@ -7,4 +7,4 @@
7#define ARCH_GET_GS 0x1004 7#define ARCH_GET_GS 0x1004
8 8
9 9
10#endif 10#endif /* ASM_X86__PRCTL_H */
diff --git a/include/asm-x86/processor-cyrix.h b/include/asm-x86/processor-cyrix.h
index 97568ada1f97..1198f2a0e42c 100644
--- a/include/asm-x86/processor-cyrix.h
+++ b/include/asm-x86/processor-cyrix.h
@@ -28,3 +28,11 @@ static inline void setCx86(u8 reg, u8 data)
28 outb(reg, 0x22); 28 outb(reg, 0x22);
29 outb(data, 0x23); 29 outb(data, 0x23);
30} 30}
31
32#define getCx86_old(reg) ({ outb((reg), 0x22); inb(0x23); })
33
34#define setCx86_old(reg, data) do { \
35 outb((reg), 0x22); \
36 outb((data), 0x23); \
37} while (0)
38
diff --git a/include/asm-x86/processor-flags.h b/include/asm-x86/processor-flags.h
index 199cab107d85..dc5f0712f9fa 100644
--- a/include/asm-x86/processor-flags.h
+++ b/include/asm-x86/processor-flags.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_I386_PROCESSOR_FLAGS_H 1#ifndef ASM_X86__PROCESSOR_FLAGS_H
2#define __ASM_I386_PROCESSOR_FLAGS_H 2#define ASM_X86__PROCESSOR_FLAGS_H
3/* Various flags defined: can be included from assembler. */ 3/* Various flags defined: can be included from assembler. */
4 4
5/* 5/*
@@ -59,6 +59,7 @@
59#define X86_CR4_OSFXSR 0x00000200 /* enable fast FPU save and restore */ 59#define X86_CR4_OSFXSR 0x00000200 /* enable fast FPU save and restore */
60#define X86_CR4_OSXMMEXCPT 0x00000400 /* enable unmasked SSE exceptions */ 60#define X86_CR4_OSXMMEXCPT 0x00000400 /* enable unmasked SSE exceptions */
61#define X86_CR4_VMXE 0x00002000 /* enable VMX virtualization */ 61#define X86_CR4_VMXE 0x00002000 /* enable VMX virtualization */
62#define X86_CR4_OSXSAVE 0x00040000 /* enable xsave and xrestore */
62 63
63/* 64/*
64 * x86-64 Task Priority Register, CR8 65 * x86-64 Task Priority Register, CR8
@@ -88,4 +89,12 @@
88#define CX86_ARR_BASE 0xc4 89#define CX86_ARR_BASE 0xc4
89#define CX86_RCR_BASE 0xdc 90#define CX86_RCR_BASE 0xdc
90 91
91#endif /* __ASM_I386_PROCESSOR_FLAGS_H */ 92#ifdef __KERNEL__
93#ifdef CONFIG_VM86
94#define X86_VM_MASK X86_EFLAGS_VM
95#else
96#define X86_VM_MASK 0 /* No VM86 support */
97#endif
98#endif
99
100#endif /* ASM_X86__PROCESSOR_FLAGS_H */
diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h
index 559105220a47..ee7cbb30773a 100644
--- a/include/asm-x86/processor.h
+++ b/include/asm-x86/processor.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_X86_PROCESSOR_H 1#ifndef ASM_X86__PROCESSOR_H
2#define __ASM_X86_PROCESSOR_H 2#define ASM_X86__PROCESSOR_H
3 3
4#include <asm/processor-flags.h> 4#include <asm/processor-flags.h>
5 5
@@ -20,6 +20,7 @@ struct mm_struct;
20#include <asm/msr.h> 20#include <asm/msr.h>
21#include <asm/desc_defs.h> 21#include <asm/desc_defs.h>
22#include <asm/nops.h> 22#include <asm/nops.h>
23#include <asm/ds.h>
23 24
24#include <linux/personality.h> 25#include <linux/personality.h>
25#include <linux/cpumask.h> 26#include <linux/cpumask.h>
@@ -75,11 +76,11 @@ struct cpuinfo_x86 {
75 int x86_tlbsize; 76 int x86_tlbsize;
76 __u8 x86_virt_bits; 77 __u8 x86_virt_bits;
77 __u8 x86_phys_bits; 78 __u8 x86_phys_bits;
79#endif
78 /* CPUID returned core id bits: */ 80 /* CPUID returned core id bits: */
79 __u8 x86_coreid_bits; 81 __u8 x86_coreid_bits;
80 /* Max extended CPUID function supported: */ 82 /* Max extended CPUID function supported: */
81 __u32 extended_cpuid_level; 83 __u32 extended_cpuid_level;
82#endif
83 /* Maximum supported CPUID level, -1=no CPUID: */ 84 /* Maximum supported CPUID level, -1=no CPUID: */
84 int cpuid_level; 85 int cpuid_level;
85 __u32 x86_capability[NCAPINTS]; 86 __u32 x86_capability[NCAPINTS];
@@ -134,12 +135,14 @@ extern __u32 cleared_cpu_caps[NCAPINTS];
134#ifdef CONFIG_SMP 135#ifdef CONFIG_SMP
135DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info); 136DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info);
136#define cpu_data(cpu) per_cpu(cpu_info, cpu) 137#define cpu_data(cpu) per_cpu(cpu_info, cpu)
137#define current_cpu_data cpu_data(smp_processor_id()) 138#define current_cpu_data __get_cpu_var(cpu_info)
138#else 139#else
139#define cpu_data(cpu) boot_cpu_data 140#define cpu_data(cpu) boot_cpu_data
140#define current_cpu_data boot_cpu_data 141#define current_cpu_data boot_cpu_data
141#endif 142#endif
142 143
144extern const struct seq_operations cpuinfo_op;
145
143static inline int hlt_works(int cpu) 146static inline int hlt_works(int cpu)
144{ 147{
145#ifdef CONFIG_X86_32 148#ifdef CONFIG_X86_32
@@ -153,7 +156,9 @@ static inline int hlt_works(int cpu)
153 156
154extern void cpu_detect(struct cpuinfo_x86 *c); 157extern void cpu_detect(struct cpuinfo_x86 *c);
155 158
156extern void identify_cpu(struct cpuinfo_x86 *); 159extern struct pt_regs *idle_regs(struct pt_regs *);
160
161extern void early_cpu_init(void);
157extern void identify_boot_cpu(void); 162extern void identify_boot_cpu(void);
158extern void identify_secondary_cpu(struct cpuinfo_x86 *); 163extern void identify_secondary_cpu(struct cpuinfo_x86 *);
159extern void print_cpu_info(struct cpuinfo_x86 *); 164extern void print_cpu_info(struct cpuinfo_x86 *);
@@ -161,11 +166,8 @@ extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
161extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); 166extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
162extern unsigned short num_cache_leaves; 167extern unsigned short num_cache_leaves;
163 168
164#if defined(CONFIG_X86_HT) || defined(CONFIG_X86_64) 169extern void detect_extended_topology(struct cpuinfo_x86 *c);
165extern void detect_ht(struct cpuinfo_x86 *c); 170extern void detect_ht(struct cpuinfo_x86 *c);
166#else
167static inline void detect_ht(struct cpuinfo_x86 *c) {}
168#endif
169 171
170static inline void native_cpuid(unsigned int *eax, unsigned int *ebx, 172static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
171 unsigned int *ecx, unsigned int *edx) 173 unsigned int *ecx, unsigned int *edx)
@@ -263,15 +265,11 @@ struct tss_struct {
263 struct thread_struct *io_bitmap_owner; 265 struct thread_struct *io_bitmap_owner;
264 266
265 /* 267 /*
266 * Pad the TSS to be cacheline-aligned (size is 0x100):
267 */
268 unsigned long __cacheline_filler[35];
269 /*
270 * .. and then another 0x100 bytes for the emergency kernel stack: 268 * .. and then another 0x100 bytes for the emergency kernel stack:
271 */ 269 */
272 unsigned long stack[64]; 270 unsigned long stack[64];
273 271
274} __attribute__((packed)); 272} ____cacheline_aligned;
275 273
276DECLARE_PER_CPU(struct tss_struct, init_tss); 274DECLARE_PER_CPU(struct tss_struct, init_tss);
277 275
@@ -326,7 +324,12 @@ struct i387_fxsave_struct {
326 /* 16*16 bytes for each XMM-reg = 256 bytes: */ 324 /* 16*16 bytes for each XMM-reg = 256 bytes: */
327 u32 xmm_space[64]; 325 u32 xmm_space[64];
328 326
329 u32 padding[24]; 327 u32 padding[12];
328
329 union {
330 u32 padding1[12];
331 u32 sw_reserved[12];
332 };
330 333
331} __attribute__((aligned(16))); 334} __attribute__((aligned(16)));
332 335
@@ -350,10 +353,23 @@ struct i387_soft_struct {
350 u32 entry_eip; 353 u32 entry_eip;
351}; 354};
352 355
356struct xsave_hdr_struct {
357 u64 xstate_bv;
358 u64 reserved1[2];
359 u64 reserved2[5];
360} __attribute__((packed));
361
362struct xsave_struct {
363 struct i387_fxsave_struct i387;
364 struct xsave_hdr_struct xsave_hdr;
365 /* new processor state extensions will go here */
366} __attribute__ ((packed, aligned (64)));
367
353union thread_xstate { 368union thread_xstate {
354 struct i387_fsave_struct fsave; 369 struct i387_fsave_struct fsave;
355 struct i387_fxsave_struct fxsave; 370 struct i387_fxsave_struct fxsave;
356 struct i387_soft_struct soft; 371 struct i387_soft_struct soft;
372 struct xsave_struct xsave;
357}; 373};
358 374
359#ifdef CONFIG_X86_64 375#ifdef CONFIG_X86_64
@@ -415,9 +431,14 @@ struct thread_struct {
415 unsigned io_bitmap_max; 431 unsigned io_bitmap_max;
416/* MSR_IA32_DEBUGCTLMSR value to switch in if TIF_DEBUGCTLMSR is set. */ 432/* MSR_IA32_DEBUGCTLMSR value to switch in if TIF_DEBUGCTLMSR is set. */
417 unsigned long debugctlmsr; 433 unsigned long debugctlmsr;
418/* Debug Store - if not 0 points to a DS Save Area configuration; 434#ifdef CONFIG_X86_DS
419 * goes into MSR_IA32_DS_AREA */ 435/* Debug Store context; see include/asm-x86/ds.h; goes into MSR_IA32_DS_AREA */
420 unsigned long ds_area_msr; 436 struct ds_context *ds_ctx;
437#endif /* CONFIG_X86_DS */
438#ifdef CONFIG_X86_PTRACE_BTS
439/* the signal to send on a bts buffer overflow */
440 unsigned int bts_ovfl_signal;
441#endif /* CONFIG_X86_PTRACE_BTS */
421}; 442};
422 443
423static inline unsigned long native_get_debugreg(int regno) 444static inline unsigned long native_get_debugreg(int regno)
@@ -535,7 +556,6 @@ static inline void load_sp0(struct tss_struct *tss,
535} 556}
536 557
537#define set_iopl_mask native_set_iopl_mask 558#define set_iopl_mask native_set_iopl_mask
538#define SWAPGS swapgs
539#endif /* CONFIG_PARAVIRT */ 559#endif /* CONFIG_PARAVIRT */
540 560
541/* 561/*
@@ -566,41 +586,6 @@ static inline void clear_in_cr4(unsigned long mask)
566 write_cr4(cr4); 586 write_cr4(cr4);
567} 587}
568 588
569struct microcode_header {
570 unsigned int hdrver;
571 unsigned int rev;
572 unsigned int date;
573 unsigned int sig;
574 unsigned int cksum;
575 unsigned int ldrver;
576 unsigned int pf;
577 unsigned int datasize;
578 unsigned int totalsize;
579 unsigned int reserved[3];
580};
581
582struct microcode {
583 struct microcode_header hdr;
584 unsigned int bits[0];
585};
586
587typedef struct microcode microcode_t;
588typedef struct microcode_header microcode_header_t;
589
590/* microcode format is extended from prescott processors */
591struct extended_signature {
592 unsigned int sig;
593 unsigned int pf;
594 unsigned int cksum;
595};
596
597struct extended_sigtable {
598 unsigned int count;
599 unsigned int cksum;
600 unsigned int reserved[3];
601 struct extended_signature sigs[0];
602};
603
604typedef struct { 589typedef struct {
605 unsigned long seg; 590 unsigned long seg;
606} mm_segment_t; 591} mm_segment_t;
@@ -727,11 +712,34 @@ static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
727 712
728extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx); 713extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx);
729 714
730extern int force_mwait;
731
732extern void select_idle_routine(const struct cpuinfo_x86 *c); 715extern void select_idle_routine(const struct cpuinfo_x86 *c);
733 716
734extern unsigned long boot_option_idle_override; 717extern unsigned long boot_option_idle_override;
718extern unsigned long idle_halt;
719extern unsigned long idle_nomwait;
720
721/*
722 * on systems with caches, caches must be flashed as the absolute
723 * last instruction before going into a suspended halt. Otherwise,
724 * dirty data can linger in the cache and become stale on resume,
725 * leading to strange errors.
726 *
727 * perform a variety of operations to guarantee that the compiler
728 * will not reorder instructions. wbinvd itself is serializing
729 * so the processor will not reorder.
730 *
731 * Systems without cache can just go into halt.
732 */
733static inline void wbinvd_halt(void)
734{
735 mb();
736 /* check for clflush to determine if wbinvd is legal */
737 if (cpu_has_clflush)
738 asm volatile("cli; wbinvd; 1: hlt; jmp 1b" : : : "memory");
739 else
740 while (1)
741 halt();
742}
735 743
736extern void enable_sep_cpu(void); 744extern void enable_sep_cpu(void);
737extern int sysenter_setup(void); 745extern int sysenter_setup(void);
@@ -925,4 +933,4 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
925extern int get_tsc_mode(unsigned long adr); 933extern int get_tsc_mode(unsigned long adr);
926extern int set_tsc_mode(unsigned int val); 934extern int set_tsc_mode(unsigned int val);
927 935
928#endif 936#endif /* ASM_X86__PROCESSOR_H */
diff --git a/include/asm-x86/proto.h b/include/asm-x86/proto.h
index 6c8b41b03f6d..6e89e8b4de0e 100644
--- a/include/asm-x86/proto.h
+++ b/include/asm-x86/proto.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X8664_PROTO_H 1#ifndef ASM_X86__PROTO_H
2#define _ASM_X8664_PROTO_H 1 2#define ASM_X86__PROTO_H
3 3
4#include <asm/ldt.h> 4#include <asm/ldt.h>
5 5
@@ -14,8 +14,6 @@ extern void ia32_syscall(void);
14extern void ia32_cstar_target(void); 14extern void ia32_cstar_target(void);
15extern void ia32_sysenter_target(void); 15extern void ia32_sysenter_target(void);
16 16
17extern void reserve_bootmem_generic(unsigned long phys, unsigned len);
18
19extern void syscall32_cpu_init(void); 17extern void syscall32_cpu_init(void);
20 18
21extern void check_efer(void); 19extern void check_efer(void);
@@ -31,4 +29,4 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr);
31#define round_up(x, y) (((x) + (y) - 1) & ~((y) - 1)) 29#define round_up(x, y) (((x) + (y) - 1) & ~((y) - 1))
32#define round_down(x, y) ((x) & ~((y) - 1)) 30#define round_down(x, y) ((x) & ~((y) - 1))
33 31
34#endif 32#endif /* ASM_X86__PROTO_H */
diff --git a/include/asm-x86/ptrace-abi.h b/include/asm-x86/ptrace-abi.h
index f224eb3c3157..4298b8882a78 100644
--- a/include/asm-x86/ptrace-abi.h
+++ b/include/asm-x86/ptrace-abi.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_PTRACE_ABI_H 1#ifndef ASM_X86__PTRACE_ABI_H
2#define _ASM_X86_PTRACE_ABI_H 2#define ASM_X86__PTRACE_ABI_H
3 3
4#ifdef __i386__ 4#ifdef __i386__
5 5
@@ -73,15 +73,16 @@
73 73
74#ifdef __x86_64__ 74#ifdef __x86_64__
75# define PTRACE_ARCH_PRCTL 30 75# define PTRACE_ARCH_PRCTL 30
76#else
77# define PTRACE_SYSEMU 31
78# define PTRACE_SYSEMU_SINGLESTEP 32
79#endif 76#endif
80 77
78#define PTRACE_SYSEMU 31
79#define PTRACE_SYSEMU_SINGLESTEP 32
80
81#define PTRACE_SINGLEBLOCK 33 /* resume execution until next branch */ 81#define PTRACE_SINGLEBLOCK 33 /* resume execution until next branch */
82 82
83#ifndef __ASSEMBLY__ 83#ifdef CONFIG_X86_PTRACE_BTS
84 84
85#ifndef __ASSEMBLY__
85#include <asm/types.h> 86#include <asm/types.h>
86 87
87/* configuration/status structure used in PTRACE_BTS_CONFIG and 88/* configuration/status structure used in PTRACE_BTS_CONFIG and
@@ -97,20 +98,20 @@ struct ptrace_bts_config {
97 /* actual size of bts_struct in bytes */ 98 /* actual size of bts_struct in bytes */
98 __u32 bts_size; 99 __u32 bts_size;
99}; 100};
100#endif 101#endif /* __ASSEMBLY__ */
101 102
102#define PTRACE_BTS_O_TRACE 0x1 /* branch trace */ 103#define PTRACE_BTS_O_TRACE 0x1 /* branch trace */
103#define PTRACE_BTS_O_SCHED 0x2 /* scheduling events w/ jiffies */ 104#define PTRACE_BTS_O_SCHED 0x2 /* scheduling events w/ jiffies */
104#define PTRACE_BTS_O_SIGNAL 0x4 /* send SIG<signal> on buffer overflow 105#define PTRACE_BTS_O_SIGNAL 0x4 /* send SIG<signal> on buffer overflow
105 instead of wrapping around */ 106 instead of wrapping around */
106#define PTRACE_BTS_O_CUT_SIZE 0x8 /* cut requested size to max available 107#define PTRACE_BTS_O_ALLOC 0x8 /* (re)allocate buffer */
107 instead of failing */
108 108
109#define PTRACE_BTS_CONFIG 40 109#define PTRACE_BTS_CONFIG 40
110/* Configure branch trace recording. 110/* Configure branch trace recording.
111 ADDR points to a struct ptrace_bts_config. 111 ADDR points to a struct ptrace_bts_config.
112 DATA gives the size of that buffer. 112 DATA gives the size of that buffer.
113 A new buffer is allocated, iff the size changes. 113 A new buffer is allocated, if requested in the flags.
114 An overflow signal may only be requested for new buffers.
114 Returns the number of bytes read. 115 Returns the number of bytes read.
115*/ 116*/
116#define PTRACE_BTS_STATUS 41 117#define PTRACE_BTS_STATUS 41
@@ -119,7 +120,7 @@ struct ptrace_bts_config {
119 Returns the number of bytes written. 120 Returns the number of bytes written.
120*/ 121*/
121#define PTRACE_BTS_SIZE 42 122#define PTRACE_BTS_SIZE 42
122/* Return the number of available BTS records. 123/* Return the number of available BTS records for draining.
123 DATA and ADDR are ignored. 124 DATA and ADDR are ignored.
124*/ 125*/
125#define PTRACE_BTS_GET 43 126#define PTRACE_BTS_GET 43
@@ -139,5 +140,6 @@ struct ptrace_bts_config {
139 BTS records are read from oldest to newest. 140 BTS records are read from oldest to newest.
140 Returns number of BTS records drained. 141 Returns number of BTS records drained.
141*/ 142*/
143#endif /* CONFIG_X86_PTRACE_BTS */
142 144
143#endif 145#endif /* ASM_X86__PTRACE_ABI_H */
diff --git a/include/asm-x86/ptrace.h b/include/asm-x86/ptrace.h
index 9f922b0b95d6..ac578f11c1c5 100644
--- a/include/asm-x86/ptrace.h
+++ b/include/asm-x86/ptrace.h
@@ -1,9 +1,14 @@
1#ifndef _ASM_X86_PTRACE_H 1#ifndef ASM_X86__PTRACE_H
2#define _ASM_X86_PTRACE_H 2#define ASM_X86__PTRACE_H
3 3
4#include <linux/compiler.h> /* For __user */ 4#include <linux/compiler.h> /* For __user */
5#include <asm/ptrace-abi.h> 5#include <asm/ptrace-abi.h>
6#include <asm/processor-flags.h>
6 7
8#ifdef __KERNEL__
9#include <asm/ds.h> /* the DS BTS struct is used for ptrace too */
10#include <asm/segment.h>
11#endif
7 12
8#ifndef __ASSEMBLY__ 13#ifndef __ASSEMBLY__
9 14
@@ -55,9 +60,6 @@ struct pt_regs {
55 unsigned long ss; 60 unsigned long ss;
56}; 61};
57 62
58#include <asm/vm86.h>
59#include <asm/segment.h>
60
61#endif /* __KERNEL__ */ 63#endif /* __KERNEL__ */
62 64
63#else /* __i386__ */ 65#else /* __i386__ */
@@ -125,14 +127,48 @@ struct pt_regs {
125#endif /* __KERNEL__ */ 127#endif /* __KERNEL__ */
126#endif /* !__i386__ */ 128#endif /* !__i386__ */
127 129
130
131#ifdef CONFIG_X86_PTRACE_BTS
132/* a branch trace record entry
133 *
134 * In order to unify the interface between various processor versions,
135 * we use the below data structure for all processors.
136 */
137enum bts_qualifier {
138 BTS_INVALID = 0,
139 BTS_BRANCH,
140 BTS_TASK_ARRIVES,
141 BTS_TASK_DEPARTS
142};
143
144struct bts_struct {
145 __u64 qualifier;
146 union {
147 /* BTS_BRANCH */
148 struct {
149 __u64 from_ip;
150 __u64 to_ip;
151 } lbr;
152 /* BTS_TASK_ARRIVES or
153 BTS_TASK_DEPARTS */
154 __u64 jiffies;
155 } variant;
156};
157#endif /* CONFIG_X86_PTRACE_BTS */
158
128#ifdef __KERNEL__ 159#ifdef __KERNEL__
129 160
130/* the DS BTS struct is used for ptrace as well */ 161#include <linux/init.h>
131#include <asm/ds.h>
132 162
163struct cpuinfo_x86;
133struct task_struct; 164struct task_struct;
134 165
166#ifdef CONFIG_X86_PTRACE_BTS
167extern void __cpuinit ptrace_bts_init_intel(struct cpuinfo_x86 *);
135extern void ptrace_bts_take_timestamp(struct task_struct *, enum bts_qualifier); 168extern void ptrace_bts_take_timestamp(struct task_struct *, enum bts_qualifier);
169#else
170#define ptrace_bts_init_intel(config) do {} while (0)
171#endif /* CONFIG_X86_PTRACE_BTS */
136 172
137extern unsigned long profile_pc(struct pt_regs *regs); 173extern unsigned long profile_pc(struct pt_regs *regs);
138 174
@@ -141,11 +177,14 @@ convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs);
141 177
142#ifdef CONFIG_X86_32 178#ifdef CONFIG_X86_32
143extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, 179extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
144 int error_code); 180 int error_code, int si_code);
145#else
146void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
147#endif 181#endif
148 182
183void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
184
185extern long syscall_trace_enter(struct pt_regs *);
186extern void syscall_trace_leave(struct pt_regs *);
187
149static inline unsigned long regs_return_value(struct pt_regs *regs) 188static inline unsigned long regs_return_value(struct pt_regs *regs)
150{ 189{
151 return regs->ax; 190 return regs->ax;
@@ -211,6 +250,11 @@ static inline unsigned long frame_pointer(struct pt_regs *regs)
211 return regs->bp; 250 return regs->bp;
212} 251}
213 252
253static inline unsigned long user_stack_pointer(struct pt_regs *regs)
254{
255 return regs->sp;
256}
257
214/* 258/*
215 * These are defined as per linux/ptrace.h, which see. 259 * These are defined as per linux/ptrace.h, which see.
216 */ 260 */
@@ -237,4 +281,4 @@ extern int do_set_thread_area(struct task_struct *p, int idx,
237 281
238#endif /* !__ASSEMBLY__ */ 282#endif /* !__ASSEMBLY__ */
239 283
240#endif 284#endif /* ASM_X86__PTRACE_H */
diff --git a/include/asm-x86/pvclock-abi.h b/include/asm-x86/pvclock-abi.h
new file mode 100644
index 000000000000..edb3b4ecfc81
--- /dev/null
+++ b/include/asm-x86/pvclock-abi.h
@@ -0,0 +1,42 @@
1#ifndef ASM_X86__PVCLOCK_ABI_H
2#define ASM_X86__PVCLOCK_ABI_H
3#ifndef __ASSEMBLY__
4
5/*
6 * These structs MUST NOT be changed.
7 * They are the ABI between hypervisor and guest OS.
8 * Both Xen and KVM are using this.
9 *
10 * pvclock_vcpu_time_info holds the system time and the tsc timestamp
11 * of the last update. So the guest can use the tsc delta to get a
12 * more precise system time. There is one per virtual cpu.
13 *
14 * pvclock_wall_clock references the point in time when the system
15 * time was zero (usually boot time), thus the guest calculates the
16 * current wall clock by adding the system time.
17 *
18 * Protocol for the "version" fields is: hypervisor raises it (making
19 * it uneven) before it starts updating the fields and raises it again
20 * (making it even) when it is done. Thus the guest can make sure the
21 * time values it got are consistent by checking the version before
22 * and after reading them.
23 */
24
25struct pvclock_vcpu_time_info {
26 u32 version;
27 u32 pad0;
28 u64 tsc_timestamp;
29 u64 system_time;
30 u32 tsc_to_system_mul;
31 s8 tsc_shift;
32 u8 pad[3];
33} __attribute__((__packed__)); /* 32 bytes */
34
35struct pvclock_wall_clock {
36 u32 version;
37 u32 sec;
38 u32 nsec;
39} __attribute__((__packed__));
40
41#endif /* __ASSEMBLY__ */
42#endif /* ASM_X86__PVCLOCK_ABI_H */
diff --git a/include/asm-x86/pvclock.h b/include/asm-x86/pvclock.h
new file mode 100644
index 000000000000..1a38f6834800
--- /dev/null
+++ b/include/asm-x86/pvclock.h
@@ -0,0 +1,13 @@
1#ifndef ASM_X86__PVCLOCK_H
2#define ASM_X86__PVCLOCK_H
3
4#include <linux/clocksource.h>
5#include <asm/pvclock-abi.h>
6
7/* some helper functions for xen and kvm pv clock sources */
8cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src);
9void pvclock_read_wallclock(struct pvclock_wall_clock *wall,
10 struct pvclock_vcpu_time_info *vcpu,
11 struct timespec *ts);
12
13#endif /* ASM_X86__PVCLOCK_H */
diff --git a/include/asm-x86/reboot.h b/include/asm-x86/reboot.h
index e63741f19392..1c2f0ce9e31e 100644
--- a/include/asm-x86/reboot.h
+++ b/include/asm-x86/reboot.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_REBOOT_H 1#ifndef ASM_X86__REBOOT_H
2#define _ASM_REBOOT_H 2#define ASM_X86__REBOOT_H
3 3
4struct pt_regs; 4struct pt_regs;
5 5
@@ -14,8 +14,8 @@ struct machine_ops {
14 14
15extern struct machine_ops machine_ops; 15extern struct machine_ops machine_ops;
16 16
17void machine_real_restart(unsigned char *code, int length);
18void native_machine_crash_shutdown(struct pt_regs *regs); 17void native_machine_crash_shutdown(struct pt_regs *regs);
19void native_machine_shutdown(void); 18void native_machine_shutdown(void);
19void machine_real_restart(const unsigned char *code, int length);
20 20
21#endif /* _ASM_REBOOT_H */ 21#endif /* ASM_X86__REBOOT_H */
diff --git a/include/asm-x86/reboot_fixups.h b/include/asm-x86/reboot_fixups.h
index 0cb7d87c2b68..2c2987d97570 100644
--- a/include/asm-x86/reboot_fixups.h
+++ b/include/asm-x86/reboot_fixups.h
@@ -1,6 +1,6 @@
1#ifndef _LINUX_REBOOT_FIXUPS_H 1#ifndef ASM_X86__REBOOT_FIXUPS_H
2#define _LINUX_REBOOT_FIXUPS_H 2#define ASM_X86__REBOOT_FIXUPS_H
3 3
4extern void mach_reboot_fixups(void); 4extern void mach_reboot_fixups(void);
5 5
6#endif /* _LINUX_REBOOT_FIXUPS_H */ 6#endif /* ASM_X86__REBOOT_FIXUPS_H */
diff --git a/include/asm-x86/required-features.h b/include/asm-x86/required-features.h
index 7400d3ad75c6..a01c4e376331 100644
--- a/include/asm-x86/required-features.h
+++ b/include/asm-x86/required-features.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_REQUIRED_FEATURES_H 1#ifndef ASM_X86__REQUIRED_FEATURES_H
2#define _ASM_REQUIRED_FEATURES_H 1 2#define ASM_X86__REQUIRED_FEATURES_H
3 3
4/* Define minimum CPUID feature set for kernel These bits are checked 4/* Define minimum CPUID feature set for kernel These bits are checked
5 really early to actually display a visible error message before the 5 really early to actually display a visible error message before the
@@ -19,9 +19,13 @@
19 19
20#if defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64) 20#if defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
21# define NEED_PAE (1<<(X86_FEATURE_PAE & 31)) 21# define NEED_PAE (1<<(X86_FEATURE_PAE & 31))
22# define NEED_CX8 (1<<(X86_FEATURE_CX8 & 31))
23#else 22#else
24# define NEED_PAE 0 23# define NEED_PAE 0
24#endif
25
26#ifdef CONFIG_X86_CMPXCHG64
27# define NEED_CX8 (1<<(X86_FEATURE_CX8 & 31))
28#else
25# define NEED_CX8 0 29# define NEED_CX8 0
26#endif 30#endif
27 31
@@ -37,8 +41,14 @@
37# define NEED_3DNOW 0 41# define NEED_3DNOW 0
38#endif 42#endif
39 43
44#if defined(CONFIG_X86_P6_NOP) || defined(CONFIG_X86_64)
45# define NEED_NOPL (1<<(X86_FEATURE_NOPL & 31))
46#else
47# define NEED_NOPL 0
48#endif
49
40#ifdef CONFIG_X86_64 50#ifdef CONFIG_X86_64
41#define NEED_PSE (1<<(X86_FEATURE_PSE & 31)) 51#define NEED_PSE 0
42#define NEED_MSR (1<<(X86_FEATURE_MSR & 31)) 52#define NEED_MSR (1<<(X86_FEATURE_MSR & 31))
43#define NEED_PGE (1<<(X86_FEATURE_PGE & 31)) 53#define NEED_PGE (1<<(X86_FEATURE_PGE & 31))
44#define NEED_FXSR (1<<(X86_FEATURE_FXSR & 31)) 54#define NEED_FXSR (1<<(X86_FEATURE_FXSR & 31))
@@ -63,10 +73,10 @@
63#define REQUIRED_MASK1 (NEED_LM|NEED_3DNOW) 73#define REQUIRED_MASK1 (NEED_LM|NEED_3DNOW)
64 74
65#define REQUIRED_MASK2 0 75#define REQUIRED_MASK2 0
66#define REQUIRED_MASK3 0 76#define REQUIRED_MASK3 (NEED_NOPL)
67#define REQUIRED_MASK4 0 77#define REQUIRED_MASK4 0
68#define REQUIRED_MASK5 0 78#define REQUIRED_MASK5 0
69#define REQUIRED_MASK6 0 79#define REQUIRED_MASK6 0
70#define REQUIRED_MASK7 0 80#define REQUIRED_MASK7 0
71 81
72#endif 82#endif /* ASM_X86__REQUIRED_FEATURES_H */
diff --git a/include/asm-x86/resume-trace.h b/include/asm-x86/resume-trace.h
index 2557514d7ef6..e39376d7de50 100644
--- a/include/asm-x86/resume-trace.h
+++ b/include/asm-x86/resume-trace.h
@@ -1,13 +1,13 @@
1#ifndef _ASM_X86_RESUME_TRACE_H 1#ifndef ASM_X86__RESUME_TRACE_H
2#define _ASM_X86_RESUME_TRACE_H 2#define ASM_X86__RESUME_TRACE_H
3 3
4#include <asm/asm.h> 4#include <asm/asm.h>
5 5
6#define TRACE_RESUME(user) \ 6#define TRACE_RESUME(user) \
7do { \ 7do { \
8 if (pm_trace_enabled) { \ 8 if (pm_trace_enabled) { \
9 void *tracedata; \ 9 const void *tracedata; \
10 asm volatile(_ASM_MOV_UL " $1f,%0\n" \ 10 asm volatile(_ASM_MOV " $1f,%0\n" \
11 ".section .tracedata,\"a\"\n" \ 11 ".section .tracedata,\"a\"\n" \
12 "1:\t.word %c1\n\t" \ 12 "1:\t.word %c1\n\t" \
13 _ASM_PTR " %c2\n" \ 13 _ASM_PTR " %c2\n" \
@@ -18,4 +18,4 @@ do { \
18 } \ 18 } \
19} while (0) 19} while (0)
20 20
21#endif 21#endif /* ASM_X86__RESUME_TRACE_H */
diff --git a/include/asm-x86/rio.h b/include/asm-x86/rio.h
index c9448bd8968f..5e1256bdee83 100644
--- a/include/asm-x86/rio.h
+++ b/include/asm-x86/rio.h
@@ -5,8 +5,8 @@
5 * Author: Laurent Vivier <Laurent.Vivier@bull.net> 5 * Author: Laurent Vivier <Laurent.Vivier@bull.net>
6 */ 6 */
7 7
8#ifndef __ASM_RIO_H 8#ifndef ASM_X86__RIO_H
9#define __ASM_RIO_H 9#define ASM_X86__RIO_H
10 10
11#define RIO_TABLE_VERSION 3 11#define RIO_TABLE_VERSION 3
12 12
@@ -60,4 +60,4 @@ enum {
60 ALT_CALGARY = 5, /* Second Planar Calgary */ 60 ALT_CALGARY = 5, /* Second Planar Calgary */
61}; 61};
62 62
63#endif /* __ASM_RIO_H */ 63#endif /* ASM_X86__RIO_H */
diff --git a/include/asm-x86/rwlock.h b/include/asm-x86/rwlock.h
index 6a8c0d645108..48a3109e1a7d 100644
--- a/include/asm-x86/rwlock.h
+++ b/include/asm-x86/rwlock.h
@@ -1,8 +1,8 @@
1#ifndef _ASM_X86_RWLOCK_H 1#ifndef ASM_X86__RWLOCK_H
2#define _ASM_X86_RWLOCK_H 2#define ASM_X86__RWLOCK_H
3 3
4#define RW_LOCK_BIAS 0x01000000 4#define RW_LOCK_BIAS 0x01000000
5 5
6/* Actual code is in asm/spinlock.h or in arch/x86/lib/rwlock.S */ 6/* Actual code is in asm/spinlock.h or in arch/x86/lib/rwlock.S */
7 7
8#endif /* _ASM_X86_RWLOCK_H */ 8#endif /* ASM_X86__RWLOCK_H */
diff --git a/include/asm-x86/rwsem.h b/include/asm-x86/rwsem.h
index 750f2a3542b3..3ff3015b71a8 100644
--- a/include/asm-x86/rwsem.h
+++ b/include/asm-x86/rwsem.h
@@ -29,8 +29,8 @@
29 * front, then they'll all be woken up, but no other readers will be. 29 * front, then they'll all be woken up, but no other readers will be.
30 */ 30 */
31 31
32#ifndef _I386_RWSEM_H 32#ifndef ASM_X86__RWSEM_H
33#define _I386_RWSEM_H 33#define ASM_X86__RWSEM_H
34 34
35#ifndef _LINUX_RWSEM_H 35#ifndef _LINUX_RWSEM_H
36#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead" 36#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
@@ -262,4 +262,4 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem)
262} 262}
263 263
264#endif /* __KERNEL__ */ 264#endif /* __KERNEL__ */
265#endif /* _I386_RWSEM_H */ 265#endif /* ASM_X86__RWSEM_H */
diff --git a/include/asm-x86/scatterlist.h b/include/asm-x86/scatterlist.h
index c0432061f81a..ee48f880005d 100644
--- a/include/asm-x86/scatterlist.h
+++ b/include/asm-x86/scatterlist.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_SCATTERLIST_H 1#ifndef ASM_X86__SCATTERLIST_H
2#define _ASM_X86_SCATTERLIST_H 2#define ASM_X86__SCATTERLIST_H
3 3
4#include <asm/types.h> 4#include <asm/types.h>
5 5
@@ -30,4 +30,4 @@ struct scatterlist {
30# define sg_dma_len(sg) ((sg)->dma_length) 30# define sg_dma_len(sg) ((sg)->dma_length)
31#endif 31#endif
32 32
33#endif 33#endif /* ASM_X86__SCATTERLIST_H */
diff --git a/include/asm-x86/seccomp_32.h b/include/asm-x86/seccomp_32.h
index 18da19e89bff..cf9ab2dbcef1 100644
--- a/include/asm-x86/seccomp_32.h
+++ b/include/asm-x86/seccomp_32.h
@@ -1,4 +1,5 @@
1#ifndef _ASM_SECCOMP_H 1#ifndef ASM_X86__SECCOMP_32_H
2#define ASM_X86__SECCOMP_32_H
2 3
3#include <linux/thread_info.h> 4#include <linux/thread_info.h>
4 5
@@ -13,4 +14,4 @@
13#define __NR_seccomp_exit __NR_exit 14#define __NR_seccomp_exit __NR_exit
14#define __NR_seccomp_sigreturn __NR_sigreturn 15#define __NR_seccomp_sigreturn __NR_sigreturn
15 16
16#endif /* _ASM_SECCOMP_H */ 17#endif /* ASM_X86__SECCOMP_32_H */
diff --git a/include/asm-x86/seccomp_64.h b/include/asm-x86/seccomp_64.h
index 553af65a2287..03274cea751f 100644
--- a/include/asm-x86/seccomp_64.h
+++ b/include/asm-x86/seccomp_64.h
@@ -1,4 +1,5 @@
1#ifndef _ASM_SECCOMP_H 1#ifndef ASM_X86__SECCOMP_64_H
2#define ASM_X86__SECCOMP_64_H
2 3
3#include <linux/thread_info.h> 4#include <linux/thread_info.h>
4 5
@@ -21,4 +22,4 @@
21#define __NR_seccomp_exit_32 __NR_ia32_exit 22#define __NR_seccomp_exit_32 __NR_ia32_exit
22#define __NR_seccomp_sigreturn_32 __NR_ia32_sigreturn 23#define __NR_seccomp_sigreturn_32 __NR_ia32_sigreturn
23 24
24#endif /* _ASM_SECCOMP_H */ 25#endif /* ASM_X86__SECCOMP_64_H */
diff --git a/include/asm-x86/segment.h b/include/asm-x86/segment.h
index ed5131dd7d92..ea5f0a8686f7 100644
--- a/include/asm-x86/segment.h
+++ b/include/asm-x86/segment.h
@@ -1,5 +1,14 @@
1#ifndef _ASM_X86_SEGMENT_H_ 1#ifndef ASM_X86__SEGMENT_H
2#define _ASM_X86_SEGMENT_H_ 2#define ASM_X86__SEGMENT_H
3
4/* Constructor for a conventional segment GDT (or LDT) entry */
5/* This is a macro so it can be used in initializers */
6#define GDT_ENTRY(flags, base, limit) \
7 ((((base) & 0xff000000ULL) << (56-24)) | \
8 (((flags) & 0x0000f0ffULL) << 40) | \
9 (((limit) & 0x000f0000ULL) << (48-16)) | \
10 (((base) & 0x00ffffffULL) << 16) | \
11 (((limit) & 0x0000ffffULL)))
3 12
4/* Simple and small GDT entries for booting only */ 13/* Simple and small GDT entries for booting only */
5 14
@@ -61,18 +70,14 @@
61#define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1) 70#define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
62 71
63#define GDT_ENTRY_DEFAULT_USER_CS 14 72#define GDT_ENTRY_DEFAULT_USER_CS 14
64#define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS * 8 + 3)
65 73
66#define GDT_ENTRY_DEFAULT_USER_DS 15 74#define GDT_ENTRY_DEFAULT_USER_DS 15
67#define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS * 8 + 3)
68 75
69#define GDT_ENTRY_KERNEL_BASE 12 76#define GDT_ENTRY_KERNEL_BASE 12
70 77
71#define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0) 78#define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0)
72#define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8)
73 79
74#define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1) 80#define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1)
75#define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8)
76 81
77#define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4) 82#define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4)
78#define GDT_ENTRY_LDT (GDT_ENTRY_KERNEL_BASE + 5) 83#define GDT_ENTRY_LDT (GDT_ENTRY_KERNEL_BASE + 5)
@@ -139,10 +144,11 @@
139#else 144#else
140#include <asm/cache.h> 145#include <asm/cache.h>
141 146
142#define __KERNEL_CS 0x10 147#define GDT_ENTRY_KERNEL32_CS 1
143#define __KERNEL_DS 0x18 148#define GDT_ENTRY_KERNEL_CS 2
149#define GDT_ENTRY_KERNEL_DS 3
144 150
145#define __KERNEL32_CS 0x08 151#define __KERNEL32_CS (GDT_ENTRY_KERNEL32_CS * 8)
146 152
147/* 153/*
148 * we cannot use the same code segment descriptor for user and kernel 154 * we cannot use the same code segment descriptor for user and kernel
@@ -150,10 +156,10 @@
150 * The segment offset needs to contain a RPL. Grr. -AK 156 * The segment offset needs to contain a RPL. Grr. -AK
151 * GDT layout to get 64bit syscall right (sysret hardcodes gdt offsets) 157 * GDT layout to get 64bit syscall right (sysret hardcodes gdt offsets)
152 */ 158 */
153 159#define GDT_ENTRY_DEFAULT_USER32_CS 4
154#define __USER32_CS 0x23 /* 4*8+3 */ 160#define GDT_ENTRY_DEFAULT_USER_DS 5
155#define __USER_DS 0x2b /* 5*8+3 */ 161#define GDT_ENTRY_DEFAULT_USER_CS 6
156#define __USER_CS 0x33 /* 6*8+3 */ 162#define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
157#define __USER32_DS __USER_DS 163#define __USER32_DS __USER_DS
158 164
159#define GDT_ENTRY_TSS 8 /* needs two entries */ 165#define GDT_ENTRY_TSS 8 /* needs two entries */
@@ -175,6 +181,10 @@
175 181
176#endif 182#endif
177 183
184#define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8)
185#define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8)
186#define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS* 8 + 3)
187#define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS* 8 + 3)
178#ifndef CONFIG_PARAVIRT 188#ifndef CONFIG_PARAVIRT
179#define get_kernel_rpl() 0 189#define get_kernel_rpl() 0
180#endif 190#endif
@@ -202,4 +212,4 @@ extern const char early_idt_handlers[NUM_EXCEPTION_VECTORS][10];
202#endif 212#endif
203#endif 213#endif
204 214
205#endif 215#endif /* ASM_X86__SEGMENT_H */
diff --git a/include/asm-x86/semaphore.h b/include/asm-x86/semaphore.h
deleted file mode 100644
index d9b2034ed1d2..000000000000
--- a/include/asm-x86/semaphore.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <linux/semaphore.h>
diff --git a/include/asm-x86/sembuf.h b/include/asm-x86/sembuf.h
index ee50c801f7b7..81f06b7e5a3f 100644
--- a/include/asm-x86/sembuf.h
+++ b/include/asm-x86/sembuf.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_SEMBUF_H 1#ifndef ASM_X86__SEMBUF_H
2#define _ASM_X86_SEMBUF_H 2#define ASM_X86__SEMBUF_H
3 3
4/* 4/*
5 * The semid64_ds structure for x86 architecture. 5 * The semid64_ds structure for x86 architecture.
@@ -21,4 +21,4 @@ struct semid64_ds {
21 unsigned long __unused4; 21 unsigned long __unused4;
22}; 22};
23 23
24#endif /* _ASM_X86_SEMBUF_H */ 24#endif /* ASM_X86__SEMBUF_H */
diff --git a/include/asm-x86/serial.h b/include/asm-x86/serial.h
index 628c801535ea..303660b671e5 100644
--- a/include/asm-x86/serial.h
+++ b/include/asm-x86/serial.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_SERIAL_H 1#ifndef ASM_X86__SERIAL_H
2#define _ASM_X86_SERIAL_H 2#define ASM_X86__SERIAL_H
3 3
4/* 4/*
5 * This assumes you have a 1.8432 MHz clock for your UART. 5 * This assumes you have a 1.8432 MHz clock for your UART.
@@ -26,4 +26,4 @@
26 { 0, BASE_BAUD, 0x3E8, 4, STD_COM_FLAGS }, /* ttyS2 */ \ 26 { 0, BASE_BAUD, 0x3E8, 4, STD_COM_FLAGS }, /* ttyS2 */ \
27 { 0, BASE_BAUD, 0x2E8, 3, STD_COM4_FLAGS }, /* ttyS3 */ 27 { 0, BASE_BAUD, 0x2E8, 3, STD_COM4_FLAGS }, /* ttyS3 */
28 28
29#endif /* _ASM_X86_SERIAL_H */ 29#endif /* ASM_X86__SERIAL_H */
diff --git a/include/asm-x86/setup.h b/include/asm-x86/setup.h
index fa6763af8d26..11b6cc14b289 100644
--- a/include/asm-x86/setup.h
+++ b/include/asm-x86/setup.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_SETUP_H 1#ifndef ASM_X86__SETUP_H
2#define _ASM_X86_SETUP_H 2#define ASM_X86__SETUP_H
3 3
4#define COMMAND_LINE_SIZE 2048 4#define COMMAND_LINE_SIZE 2048
5 5
@@ -8,7 +8,42 @@
8/* Interrupt control for vSMPowered x86_64 systems */ 8/* Interrupt control for vSMPowered x86_64 systems */
9void vsmp_init(void); 9void vsmp_init(void);
10 10
11char *machine_specific_memory_setup(void); 11#ifdef CONFIG_X86_VISWS
12extern void visws_early_detect(void);
13extern int is_visws_box(void);
14#else
15static inline void visws_early_detect(void) { }
16static inline int is_visws_box(void) { return 0; }
17#endif
18
19/*
20 * Any setup quirks to be performed?
21 */
22struct mpc_config_processor;
23struct mpc_config_bus;
24struct mp_config_oemtable;
25struct x86_quirks {
26 int (*arch_pre_time_init)(void);
27 int (*arch_time_init)(void);
28 int (*arch_pre_intr_init)(void);
29 int (*arch_intr_init)(void);
30 int (*arch_trap_init)(void);
31 char * (*arch_memory_setup)(void);
32 int (*mach_get_smp_config)(unsigned int early);
33 int (*mach_find_smp_config)(unsigned int reserve);
34
35 int *mpc_record;
36 int (*mpc_apic_id)(struct mpc_config_processor *m);
37 void (*mpc_oem_bus_info)(struct mpc_config_bus *m, char *name);
38 void (*mpc_oem_pci_bus)(struct mpc_config_bus *m);
39 void (*smp_read_mpc_oem)(struct mp_config_oemtable *oemtable,
40 unsigned short oemsize);
41 int (*setup_ioapic_ids)(void);
42};
43
44extern struct x86_quirks *x86_quirks;
45extern unsigned long saved_video_mode;
46
12#ifndef CONFIG_PARAVIRT 47#ifndef CONFIG_PARAVIRT
13#define paravirt_post_allocator_init() do {} while (0) 48#define paravirt_post_allocator_init() do {} while (0)
14#endif 49#endif
@@ -43,30 +78,28 @@ char *machine_specific_memory_setup(void);
43 */ 78 */
44extern struct boot_params boot_params; 79extern struct boot_params boot_params;
45 80
46#ifdef __i386__
47/* 81/*
48 * Do NOT EVER look at the BIOS memory size location. 82 * Do NOT EVER look at the BIOS memory size location.
49 * It does not work on many machines. 83 * It does not work on many machines.
50 */ 84 */
51#define LOWMEMSIZE() (0x9f000) 85#define LOWMEMSIZE() (0x9f000)
52 86
53struct e820entry; 87#ifdef __i386__
54
55char * __init machine_specific_memory_setup(void);
56char *memory_setup(void);
57 88
58int __init copy_e820_map(struct e820entry *biosmap, int nr_map); 89void __init i386_start_kernel(void);
59int __init sanitize_e820_map(struct e820entry *biosmap, char *pnr_map); 90extern void probe_roms(void);
60void __init add_memory_region(unsigned long long start,
61 unsigned long long size, int type);
62 91
92extern unsigned long init_pg_tables_start;
63extern unsigned long init_pg_tables_end; 93extern unsigned long init_pg_tables_end;
64 94
65 95#else
96void __init x86_64_init_pda(void);
97void __init x86_64_start_kernel(char *real_mode);
98void __init x86_64_start_reservations(char *real_mode_data);
66 99
67#endif /* __i386__ */ 100#endif /* __i386__ */
68#endif /* _SETUP */ 101#endif /* _SETUP */
69#endif /* __ASSEMBLY__ */ 102#endif /* __ASSEMBLY__ */
70#endif /* __KERNEL__ */ 103#endif /* __KERNEL__ */
71 104
72#endif /* _ASM_X86_SETUP_H */ 105#endif /* ASM_X86__SETUP_H */
diff --git a/include/asm-x86/shmbuf.h b/include/asm-x86/shmbuf.h
index b51413b74971..f51aec2298e9 100644
--- a/include/asm-x86/shmbuf.h
+++ b/include/asm-x86/shmbuf.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_SHMBUF_H 1#ifndef ASM_X86__SHMBUF_H
2#define _ASM_X86_SHMBUF_H 2#define ASM_X86__SHMBUF_H
3 3
4/* 4/*
5 * The shmid64_ds structure for x86 architecture. 5 * The shmid64_ds structure for x86 architecture.
@@ -48,4 +48,4 @@ struct shminfo64 {
48 unsigned long __unused4; 48 unsigned long __unused4;
49}; 49};
50 50
51#endif /* _ASM_X86_SHMBUF_H */ 51#endif /* ASM_X86__SHMBUF_H */
diff --git a/include/asm-x86/shmparam.h b/include/asm-x86/shmparam.h
index 0880cf0917b9..a83a1fd96a0e 100644
--- a/include/asm-x86/shmparam.h
+++ b/include/asm-x86/shmparam.h
@@ -1,6 +1,6 @@
1#ifndef _ASM_X86_SHMPARAM_H 1#ifndef ASM_X86__SHMPARAM_H
2#define _ASM_X86_SHMPARAM_H 2#define ASM_X86__SHMPARAM_H
3 3
4#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */ 4#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */
5 5
6#endif /* _ASM_X86_SHMPARAM_H */ 6#endif /* ASM_X86__SHMPARAM_H */
diff --git a/include/asm-x86/sigcontext.h b/include/asm-x86/sigcontext.h
index 2f9c884d2c0f..ee813f4fe5d5 100644
--- a/include/asm-x86/sigcontext.h
+++ b/include/asm-x86/sigcontext.h
@@ -1,9 +1,43 @@
1#ifndef _ASM_X86_SIGCONTEXT_H 1#ifndef ASM_X86__SIGCONTEXT_H
2#define _ASM_X86_SIGCONTEXT_H 2#define ASM_X86__SIGCONTEXT_H
3 3
4#include <linux/compiler.h> 4#include <linux/compiler.h>
5#include <asm/types.h> 5#include <asm/types.h>
6 6
7#define FP_XSTATE_MAGIC1 0x46505853U
8#define FP_XSTATE_MAGIC2 0x46505845U
9#define FP_XSTATE_MAGIC2_SIZE sizeof(FP_XSTATE_MAGIC2)
10
11/*
12 * bytes 464..511 in the current 512byte layout of fxsave/fxrstor frame
13 * are reserved for SW usage. On cpu's supporting xsave/xrstor, these bytes
14 * are used to extended the fpstate pointer in the sigcontext, which now
15 * includes the extended state information along with fpstate information.
16 *
17 * Presence of FP_XSTATE_MAGIC1 at the beginning of this SW reserved
18 * area and FP_XSTATE_MAGIC2 at the end of memory layout
19 * (extended_size - FP_XSTATE_MAGIC2_SIZE) indicates the presence of the
20 * extended state information in the memory layout pointed by the fpstate
21 * pointer in sigcontext.
22 */
23struct _fpx_sw_bytes {
24 __u32 magic1; /* FP_XSTATE_MAGIC1 */
25 __u32 extended_size; /* total size of the layout referred by
26 * fpstate pointer in the sigcontext.
27 */
28 __u64 xstate_bv;
29 /* feature bit mask (including fp/sse/extended
30 * state) that is present in the memory
31 * layout.
32 */
33 __u32 xstate_size; /* actual xsave state size, based on the
34 * features saved in the layout.
35 * 'extended_size' will be greater than
36 * 'xstate_size'.
37 */
38 __u32 padding[7]; /* for future use. */
39};
40
7#ifdef __i386__ 41#ifdef __i386__
8/* 42/*
9 * As documented in the iBCS2 standard.. 43 * As documented in the iBCS2 standard..
@@ -53,7 +87,13 @@ struct _fpstate {
53 unsigned long reserved; 87 unsigned long reserved;
54 struct _fpxreg _fxsr_st[8]; /* FXSR FPU reg data is ignored */ 88 struct _fpxreg _fxsr_st[8]; /* FXSR FPU reg data is ignored */
55 struct _xmmreg _xmm[8]; 89 struct _xmmreg _xmm[8];
56 unsigned long padding[56]; 90 unsigned long padding1[44];
91
92 union {
93 unsigned long padding2[12];
94 struct _fpx_sw_bytes sw_reserved; /* represents the extended
95 * state info */
96 };
57}; 97};
58 98
59#define X86_FXSR_MAGIC 0x0000 99#define X86_FXSR_MAGIC 0x0000
@@ -79,7 +119,15 @@ struct sigcontext {
79 unsigned long flags; 119 unsigned long flags;
80 unsigned long sp_at_signal; 120 unsigned long sp_at_signal;
81 unsigned short ss, __ssh; 121 unsigned short ss, __ssh;
82 struct _fpstate __user *fpstate; 122
123 /*
124 * fpstate is really (struct _fpstate *) or (struct _xstate *)
125 * depending on the FP_XSTATE_MAGIC1 encoded in the SW reserved
126 * bytes of (struct _fpstate) and FP_XSTATE_MAGIC2 present at the end
127 * of extended memory layout. See comments at the defintion of
128 * (struct _fpx_sw_bytes)
129 */
130 void __user *fpstate; /* zero when no FPU/extended context */
83 unsigned long oldmask; 131 unsigned long oldmask;
84 unsigned long cr2; 132 unsigned long cr2;
85}; 133};
@@ -130,7 +178,12 @@ struct _fpstate {
130 __u32 mxcsr_mask; 178 __u32 mxcsr_mask;
131 __u32 st_space[32]; /* 8*16 bytes for each FP-reg */ 179 __u32 st_space[32]; /* 8*16 bytes for each FP-reg */
132 __u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg */ 180 __u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg */
133 __u32 reserved2[24]; 181 __u32 reserved2[12];
182 union {
183 __u32 reserved3[12];
184 struct _fpx_sw_bytes sw_reserved; /* represents the extended
185 * state information */
186 };
134}; 187};
135 188
136#ifdef __KERNEL__ 189#ifdef __KERNEL__
@@ -161,7 +214,15 @@ struct sigcontext {
161 unsigned long trapno; 214 unsigned long trapno;
162 unsigned long oldmask; 215 unsigned long oldmask;
163 unsigned long cr2; 216 unsigned long cr2;
164 struct _fpstate __user *fpstate; /* zero when no FPU context */ 217
218 /*
219 * fpstate is really (struct _fpstate *) or (struct _xstate *)
220 * depending on the FP_XSTATE_MAGIC1 encoded in the SW reserved
221 * bytes of (struct _fpstate) and FP_XSTATE_MAGIC2 present at the end
222 * of extended memory layout. See comments at the defintion of
223 * (struct _fpx_sw_bytes)
224 */
225 void __user *fpstate; /* zero when no FPU/extended context */
165 unsigned long reserved1[8]; 226 unsigned long reserved1[8];
166}; 227};
167#else /* __KERNEL__ */ 228#else /* __KERNEL__ */
@@ -202,4 +263,22 @@ struct sigcontext {
202 263
203#endif /* !__i386__ */ 264#endif /* !__i386__ */
204 265
205#endif 266struct _xsave_hdr {
267 __u64 xstate_bv;
268 __u64 reserved1[2];
269 __u64 reserved2[5];
270};
271
272/*
273 * Extended state pointed by the fpstate pointer in the sigcontext.
274 * In addition to the fpstate, information encoded in the xstate_hdr
275 * indicates the presence of other extended state information
276 * supported by the processor and OS.
277 */
278struct _xstate {
279 struct _fpstate fpstate;
280 struct _xsave_hdr xstate_hdr;
281 /* new processor state extensions go here */
282};
283
284#endif /* ASM_X86__SIGCONTEXT_H */
diff --git a/include/asm-x86/sigcontext32.h b/include/asm-x86/sigcontext32.h
index 57a9686fb491..8c347032c2f2 100644
--- a/include/asm-x86/sigcontext32.h
+++ b/include/asm-x86/sigcontext32.h
@@ -1,5 +1,5 @@
1#ifndef _SIGCONTEXT32_H 1#ifndef ASM_X86__SIGCONTEXT32_H
2#define _SIGCONTEXT32_H 1 2#define ASM_X86__SIGCONTEXT32_H
3 3
4/* signal context for 32bit programs. */ 4/* signal context for 32bit programs. */
5 5
@@ -40,7 +40,11 @@ struct _fpstate_ia32 {
40 __u32 reserved; 40 __u32 reserved;
41 struct _fpxreg _fxsr_st[8]; 41 struct _fpxreg _fxsr_st[8];
42 struct _xmmreg _xmm[8]; /* It's actually 16 */ 42 struct _xmmreg _xmm[8]; /* It's actually 16 */
43 __u32 padding[56]; 43 __u32 padding[44];
44 union {
45 __u32 padding2[12];
46 struct _fpx_sw_bytes sw_reserved;
47 };
44}; 48};
45 49
46struct sigcontext_ia32 { 50struct sigcontext_ia32 {
@@ -68,4 +72,4 @@ struct sigcontext_ia32 {
68 unsigned int cr2; 72 unsigned int cr2;
69}; 73};
70 74
71#endif 75#endif /* ASM_X86__SIGCONTEXT32_H */
diff --git a/include/asm-x86/siginfo.h b/include/asm-x86/siginfo.h
index a477bea0c2a1..808bdfb2958c 100644
--- a/include/asm-x86/siginfo.h
+++ b/include/asm-x86/siginfo.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_SIGINFO_H 1#ifndef ASM_X86__SIGINFO_H
2#define _ASM_X86_SIGINFO_H 2#define ASM_X86__SIGINFO_H
3 3
4#ifdef __x86_64__ 4#ifdef __x86_64__
5# define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int)) 5# define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int))
@@ -7,4 +7,4 @@
7 7
8#include <asm-generic/siginfo.h> 8#include <asm-generic/siginfo.h>
9 9
10#endif 10#endif /* ASM_X86__SIGINFO_H */
diff --git a/include/asm-x86/signal.h b/include/asm-x86/signal.h
index f15186d39c69..65acc82d267a 100644
--- a/include/asm-x86/signal.h
+++ b/include/asm-x86/signal.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_SIGNAL_H 1#ifndef ASM_X86__SIGNAL_H
2#define _ASM_X86_SIGNAL_H 2#define ASM_X86__SIGNAL_H
3 3
4#ifndef __ASSEMBLY__ 4#ifndef __ASSEMBLY__
5#include <linux/types.h> 5#include <linux/types.h>
@@ -140,6 +140,9 @@ struct sigaction {
140struct k_sigaction { 140struct k_sigaction {
141 struct sigaction sa; 141 struct sigaction sa;
142}; 142};
143
144extern void do_notify_resume(struct pt_regs *, void *, __u32);
145
143# else /* __KERNEL__ */ 146# else /* __KERNEL__ */
144/* Here we must cater to libcs that poke about in kernel headers. */ 147/* Here we must cater to libcs that poke about in kernel headers. */
145 148
@@ -181,12 +184,12 @@ typedef struct sigaltstack {
181#ifdef __KERNEL__ 184#ifdef __KERNEL__
182#include <asm/sigcontext.h> 185#include <asm/sigcontext.h>
183 186
184#ifdef __386__ 187#ifdef __i386__
185 188
186#define __HAVE_ARCH_SIG_BITOPS 189#define __HAVE_ARCH_SIG_BITOPS
187 190
188#define sigaddset(set,sig) \ 191#define sigaddset(set,sig) \
189 (__builtin_constantp(sig) \ 192 (__builtin_constant_p(sig) \
190 ? __const_sigaddset((set), (sig)) \ 193 ? __const_sigaddset((set), (sig)) \
191 : __gen_sigaddset((set), (sig))) 194 : __gen_sigaddset((set), (sig)))
192 195
@@ -256,4 +259,4 @@ struct pt_regs;
256#endif /* __KERNEL__ */ 259#endif /* __KERNEL__ */
257#endif /* __ASSEMBLY__ */ 260#endif /* __ASSEMBLY__ */
258 261
259#endif 262#endif /* ASM_X86__SIGNAL_H */
diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h
index 1ebaa5cd3112..6df2615f9138 100644
--- a/include/asm-x86/smp.h
+++ b/include/asm-x86/smp.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_SMP_H_ 1#ifndef ASM_X86__SMP_H
2#define _ASM_X86_SMP_H_ 2#define ASM_X86__SMP_H
3#ifndef __ASSEMBLY__ 3#ifndef __ASSEMBLY__
4#include <linux/cpumask.h> 4#include <linux/cpumask.h>
5#include <linux/init.h> 5#include <linux/init.h>
@@ -25,25 +25,21 @@ extern cpumask_t cpu_callin_map;
25extern void (*mtrr_hook)(void); 25extern void (*mtrr_hook)(void);
26extern void zap_low_mappings(void); 26extern void zap_low_mappings(void);
27 27
28extern int __cpuinit get_local_pda(int cpu);
29
28extern int smp_num_siblings; 30extern int smp_num_siblings;
29extern unsigned int num_processors; 31extern unsigned int num_processors;
30extern cpumask_t cpu_initialized; 32extern cpumask_t cpu_initialized;
31 33
32#ifdef CONFIG_SMP
33extern u16 x86_cpu_to_apicid_init[];
34extern u16 x86_bios_cpu_apicid_init[];
35extern void *x86_cpu_to_apicid_early_ptr;
36extern void *x86_bios_cpu_apicid_early_ptr;
37#else
38#define x86_cpu_to_apicid_early_ptr NULL
39#define x86_bios_cpu_apicid_early_ptr NULL
40#endif
41
42DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); 34DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
43DECLARE_PER_CPU(cpumask_t, cpu_core_map); 35DECLARE_PER_CPU(cpumask_t, cpu_core_map);
44DECLARE_PER_CPU(u16, cpu_llc_id); 36DECLARE_PER_CPU(u16, cpu_llc_id);
45DECLARE_PER_CPU(u16, x86_cpu_to_apicid); 37#ifdef CONFIG_X86_32
46DECLARE_PER_CPU(u16, x86_bios_cpu_apicid); 38DECLARE_PER_CPU(int, cpu_number);
39#endif
40
41DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid);
42DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
47 43
48/* Static state in head.S used to set up a CPU */ 44/* Static state in head.S used to set up a CPU */
49extern struct { 45extern struct {
@@ -54,14 +50,18 @@ extern struct {
54struct smp_ops { 50struct smp_ops {
55 void (*smp_prepare_boot_cpu)(void); 51 void (*smp_prepare_boot_cpu)(void);
56 void (*smp_prepare_cpus)(unsigned max_cpus); 52 void (*smp_prepare_cpus)(unsigned max_cpus);
57 int (*cpu_up)(unsigned cpu);
58 void (*smp_cpus_done)(unsigned max_cpus); 53 void (*smp_cpus_done)(unsigned max_cpus);
59 54
60 void (*smp_send_stop)(void); 55 void (*smp_send_stop)(void);
61 void (*smp_send_reschedule)(int cpu); 56 void (*smp_send_reschedule)(int cpu);
62 int (*smp_call_function_mask)(cpumask_t mask, 57
63 void (*func)(void *info), void *info, 58 int (*cpu_up)(unsigned cpu);
64 int wait); 59 int (*cpu_disable)(void);
60 void (*cpu_die)(unsigned int cpu);
61 void (*play_dead)(void);
62
63 void (*send_call_func_ipi)(cpumask_t mask);
64 void (*send_call_func_single_ipi)(int cpu);
65}; 65};
66 66
67/* Globals due to paravirt */ 67/* Globals due to paravirt */
@@ -98,27 +98,48 @@ static inline int __cpu_up(unsigned int cpu)
98 return smp_ops.cpu_up(cpu); 98 return smp_ops.cpu_up(cpu);
99} 99}
100 100
101static inline int __cpu_disable(void)
102{
103 return smp_ops.cpu_disable();
104}
105
106static inline void __cpu_die(unsigned int cpu)
107{
108 smp_ops.cpu_die(cpu);
109}
110
111static inline void play_dead(void)
112{
113 smp_ops.play_dead();
114}
115
101static inline void smp_send_reschedule(int cpu) 116static inline void smp_send_reschedule(int cpu)
102{ 117{
103 smp_ops.smp_send_reschedule(cpu); 118 smp_ops.smp_send_reschedule(cpu);
104} 119}
105 120
106static inline int smp_call_function_mask(cpumask_t mask, 121static inline void arch_send_call_function_single_ipi(int cpu)
107 void (*func) (void *info), void *info,
108 int wait)
109{ 122{
110 return smp_ops.smp_call_function_mask(mask, func, info, wait); 123 smp_ops.send_call_func_single_ipi(cpu);
111} 124}
112 125
126static inline void arch_send_call_function_ipi(cpumask_t mask)
127{
128 smp_ops.send_call_func_ipi(mask);
129}
130
131void cpu_disable_common(void);
113void native_smp_prepare_boot_cpu(void); 132void native_smp_prepare_boot_cpu(void);
114void native_smp_prepare_cpus(unsigned int max_cpus); 133void native_smp_prepare_cpus(unsigned int max_cpus);
115void native_smp_cpus_done(unsigned int max_cpus); 134void native_smp_cpus_done(unsigned int max_cpus);
116int native_cpu_up(unsigned int cpunum); 135int native_cpu_up(unsigned int cpunum);
136int native_cpu_disable(void);
137void native_cpu_die(unsigned int cpu);
138void native_play_dead(void);
139void play_dead_common(void);
117 140
118extern int __cpu_disable(void); 141void native_send_call_func_ipi(cpumask_t mask);
119extern void __cpu_die(unsigned int cpu); 142void native_send_call_func_single_ipi(int cpu);
120
121extern void prefill_possible_map(void);
122 143
123void smp_store_cpu_info(int id); 144void smp_store_cpu_info(int id);
124#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) 145#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu)
@@ -130,6 +151,14 @@ static inline int num_booting_cpus(void)
130} 151}
131#endif /* CONFIG_SMP */ 152#endif /* CONFIG_SMP */
132 153
154#if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_CPU)
155extern void prefill_possible_map(void);
156#else
157static inline void prefill_possible_map(void)
158{
159}
160#endif
161
133extern unsigned disabled_cpus __cpuinitdata; 162extern unsigned disabled_cpus __cpuinitdata;
134 163
135#ifdef CONFIG_X86_32_SMP 164#ifdef CONFIG_X86_32_SMP
@@ -138,7 +167,6 @@ extern unsigned disabled_cpus __cpuinitdata;
138 * from the initial startup. We map APIC_BASE very early in page_setup(), 167 * from the initial startup. We map APIC_BASE very early in page_setup(),
139 * so this is correct in the x86 case. 168 * so this is correct in the x86 case.
140 */ 169 */
141DECLARE_PER_CPU(int, cpu_number);
142#define raw_smp_processor_id() (x86_read_percpu(cpu_number)) 170#define raw_smp_processor_id() (x86_read_percpu(cpu_number))
143extern int safe_smp_processor_id(void); 171extern int safe_smp_processor_id(void);
144 172
@@ -161,30 +189,33 @@ extern int safe_smp_processor_id(void);
161 189
162#ifdef CONFIG_X86_LOCAL_APIC 190#ifdef CONFIG_X86_LOCAL_APIC
163 191
192#ifndef CONFIG_X86_64
164static inline int logical_smp_processor_id(void) 193static inline int logical_smp_processor_id(void)
165{ 194{
166 /* we don't want to mark this access volatile - bad code generation */ 195 /* we don't want to mark this access volatile - bad code generation */
167 return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR)); 196 return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR));
168} 197}
169 198
170#ifndef CONFIG_X86_64 199#include <mach_apicdef.h>
171static inline unsigned int read_apic_id(void) 200static inline unsigned int read_apic_id(void)
172{ 201{
173 return *(u32 *)(APIC_BASE + APIC_ID); 202 unsigned int reg;
203
204 reg = *(u32 *)(APIC_BASE + APIC_ID);
205
206 return GET_APIC_ID(reg);
174} 207}
175#else
176extern unsigned int read_apic_id(void);
177#endif 208#endif
178 209
179 210
180# ifdef APIC_DEFINITION 211# if defined(APIC_DEFINITION) || defined(CONFIG_X86_64)
181extern int hard_smp_processor_id(void); 212extern int hard_smp_processor_id(void);
182# else 213# else
183# include <mach_apicdef.h> 214#include <mach_apicdef.h>
184static inline int hard_smp_processor_id(void) 215static inline int hard_smp_processor_id(void)
185{ 216{
186 /* we don't want to mark this access volatile - bad code generation */ 217 /* we don't want to mark this access volatile - bad code generation */
187 return GET_APIC_ID(read_apic_id()); 218 return read_apic_id();
188} 219}
189# endif /* APIC_DEFINITION */ 220# endif /* APIC_DEFINITION */
190 221
@@ -196,13 +227,5 @@ static inline int hard_smp_processor_id(void)
196 227
197#endif /* CONFIG_X86_LOCAL_APIC */ 228#endif /* CONFIG_X86_LOCAL_APIC */
198 229
199#ifdef CONFIG_HOTPLUG_CPU
200extern void cpu_exit_clear(void);
201extern void cpu_uninit(void);
202#endif
203
204extern void smp_alloc_memory(void);
205extern void lock_ipi_call_lock(void);
206extern void unlock_ipi_call_lock(void);
207#endif /* __ASSEMBLY__ */ 230#endif /* __ASSEMBLY__ */
208#endif 231#endif /* ASM_X86__SMP_H */
diff --git a/include/asm-x86/socket.h b/include/asm-x86/socket.h
index 80af9c4ccad7..db73274c83c3 100644
--- a/include/asm-x86/socket.h
+++ b/include/asm-x86/socket.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_SOCKET_H 1#ifndef ASM_X86__SOCKET_H
2#define _ASM_SOCKET_H 2#define ASM_X86__SOCKET_H
3 3
4#include <asm/sockios.h> 4#include <asm/sockios.h>
5 5
@@ -54,4 +54,4 @@
54 54
55#define SO_MARK 36 55#define SO_MARK 36
56 56
57#endif /* _ASM_SOCKET_H */ 57#endif /* ASM_X86__SOCKET_H */
diff --git a/include/asm-x86/sockios.h b/include/asm-x86/sockios.h
index 49cc72b5d3c9..a006704fdc84 100644
--- a/include/asm-x86/sockios.h
+++ b/include/asm-x86/sockios.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_SOCKIOS_H 1#ifndef ASM_X86__SOCKIOS_H
2#define _ASM_X86_SOCKIOS_H 2#define ASM_X86__SOCKIOS_H
3 3
4/* Socket-level I/O control calls. */ 4/* Socket-level I/O control calls. */
5#define FIOSETOWN 0x8901 5#define FIOSETOWN 0x8901
@@ -10,4 +10,4 @@
10#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */ 10#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */
11#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */ 11#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */
12 12
13#endif /* _ASM_X86_SOCKIOS_H */ 13#endif /* ASM_X86__SOCKIOS_H */
diff --git a/include/asm-x86/sparsemem.h b/include/asm-x86/sparsemem.h
index 9bd48b0a534b..38f8e6bc3186 100644
--- a/include/asm-x86/sparsemem.h
+++ b/include/asm-x86/sparsemem.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_SPARSEMEM_H 1#ifndef ASM_X86__SPARSEMEM_H
2#define _ASM_X86_SPARSEMEM_H 2#define ASM_X86__SPARSEMEM_H
3 3
4#ifdef CONFIG_SPARSEMEM 4#ifdef CONFIG_SPARSEMEM
5/* 5/*
@@ -31,4 +31,4 @@
31#endif 31#endif
32 32
33#endif /* CONFIG_SPARSEMEM */ 33#endif /* CONFIG_SPARSEMEM */
34#endif 34#endif /* ASM_X86__SPARSEMEM_H */
diff --git a/include/asm-x86/spinlock.h b/include/asm-x86/spinlock.h
index 21e89bf92f1c..157ff7fab97a 100644
--- a/include/asm-x86/spinlock.h
+++ b/include/asm-x86/spinlock.h
@@ -1,12 +1,12 @@
1#ifndef _X86_SPINLOCK_H_ 1#ifndef ASM_X86__SPINLOCK_H
2#define _X86_SPINLOCK_H_ 2#define ASM_X86__SPINLOCK_H
3 3
4#include <asm/atomic.h> 4#include <asm/atomic.h>
5#include <asm/rwlock.h> 5#include <asm/rwlock.h>
6#include <asm/page.h> 6#include <asm/page.h>
7#include <asm/processor.h> 7#include <asm/processor.h>
8#include <linux/compiler.h> 8#include <linux/compiler.h>
9 9#include <asm/paravirt.h>
10/* 10/*
11 * Your basic SMP spinlocks, allowing only a single CPU anywhere 11 * Your basic SMP spinlocks, allowing only a single CPU anywhere
12 * 12 *
@@ -21,8 +21,10 @@
21 21
22#ifdef CONFIG_X86_32 22#ifdef CONFIG_X86_32
23# define LOCK_PTR_REG "a" 23# define LOCK_PTR_REG "a"
24# define REG_PTR_MODE "k"
24#else 25#else
25# define LOCK_PTR_REG "D" 26# define LOCK_PTR_REG "D"
27# define REG_PTR_MODE "q"
26#endif 28#endif
27 29
28#if defined(CONFIG_X86_32) && \ 30#if defined(CONFIG_X86_32) && \
@@ -54,21 +56,9 @@
54 * much between them in performance though, especially as locks are out of line. 56 * much between them in performance though, especially as locks are out of line.
55 */ 57 */
56#if (NR_CPUS < 256) 58#if (NR_CPUS < 256)
57static inline int __raw_spin_is_locked(raw_spinlock_t *lock) 59#define TICKET_SHIFT 8
58{
59 int tmp = ACCESS_ONCE(lock->slock);
60
61 return (((tmp >> 8) & 0xff) != (tmp & 0xff));
62}
63
64static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
65{
66 int tmp = ACCESS_ONCE(lock->slock);
67
68 return (((tmp >> 8) & 0xff) - (tmp & 0xff)) > 1;
69}
70 60
71static __always_inline void __raw_spin_lock(raw_spinlock_t *lock) 61static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
72{ 62{
73 short inc = 0x0100; 63 short inc = 0x0100;
74 64
@@ -87,30 +77,26 @@ static __always_inline void __raw_spin_lock(raw_spinlock_t *lock)
87 : "memory", "cc"); 77 : "memory", "cc");
88} 78}
89 79
90#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 80static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
91
92static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock)
93{ 81{
94 int tmp; 82 int tmp, new;
95 short new;
96 83
97 asm volatile("movw %2,%w0\n\t" 84 asm volatile("movzwl %2, %0\n\t"
98 "cmpb %h0,%b0\n\t" 85 "cmpb %h0,%b0\n\t"
86 "leal 0x100(%" REG_PTR_MODE "0), %1\n\t"
99 "jne 1f\n\t" 87 "jne 1f\n\t"
100 "movw %w0,%w1\n\t" 88 LOCK_PREFIX "cmpxchgw %w1,%2\n\t"
101 "incb %h1\n\t"
102 "lock ; cmpxchgw %w1,%2\n\t"
103 "1:" 89 "1:"
104 "sete %b1\n\t" 90 "sete %b1\n\t"
105 "movzbl %b1,%0\n\t" 91 "movzbl %b1,%0\n\t"
106 : "=&a" (tmp), "=Q" (new), "+m" (lock->slock) 92 : "=&a" (tmp), "=&q" (new), "+m" (lock->slock)
107 : 93 :
108 : "memory", "cc"); 94 : "memory", "cc");
109 95
110 return tmp; 96 return tmp;
111} 97}
112 98
113static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock) 99static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
114{ 100{
115 asm volatile(UNLOCK_LOCK_PREFIX "incb %0" 101 asm volatile(UNLOCK_LOCK_PREFIX "incb %0"
116 : "+m" (lock->slock) 102 : "+m" (lock->slock)
@@ -118,26 +104,14 @@ static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock)
118 : "memory", "cc"); 104 : "memory", "cc");
119} 105}
120#else 106#else
121static inline int __raw_spin_is_locked(raw_spinlock_t *lock) 107#define TICKET_SHIFT 16
122{
123 int tmp = ACCESS_ONCE(lock->slock);
124
125 return (((tmp >> 16) & 0xffff) != (tmp & 0xffff));
126}
127 108
128static inline int __raw_spin_is_contended(raw_spinlock_t *lock) 109static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
129{
130 int tmp = ACCESS_ONCE(lock->slock);
131
132 return (((tmp >> 16) & 0xffff) - (tmp & 0xffff)) > 1;
133}
134
135static __always_inline void __raw_spin_lock(raw_spinlock_t *lock)
136{ 110{
137 int inc = 0x00010000; 111 int inc = 0x00010000;
138 int tmp; 112 int tmp;
139 113
140 asm volatile("lock ; xaddl %0, %1\n" 114 asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
141 "movzwl %w0, %2\n\t" 115 "movzwl %w0, %2\n\t"
142 "shrl $16, %0\n\t" 116 "shrl $16, %0\n\t"
143 "1:\t" 117 "1:\t"
@@ -148,14 +122,12 @@ static __always_inline void __raw_spin_lock(raw_spinlock_t *lock)
148 /* don't need lfence here, because loads are in-order */ 122 /* don't need lfence here, because loads are in-order */
149 "jmp 1b\n" 123 "jmp 1b\n"
150 "2:" 124 "2:"
151 : "+Q" (inc), "+m" (lock->slock), "=r" (tmp) 125 : "+r" (inc), "+m" (lock->slock), "=&r" (tmp)
152 : 126 :
153 : "memory", "cc"); 127 : "memory", "cc");
154} 128}
155 129
156#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 130static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
157
158static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock)
159{ 131{
160 int tmp; 132 int tmp;
161 int new; 133 int new;
@@ -164,20 +136,20 @@ static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock)
164 "movl %0,%1\n\t" 136 "movl %0,%1\n\t"
165 "roll $16, %0\n\t" 137 "roll $16, %0\n\t"
166 "cmpl %0,%1\n\t" 138 "cmpl %0,%1\n\t"
139 "leal 0x00010000(%" REG_PTR_MODE "0), %1\n\t"
167 "jne 1f\n\t" 140 "jne 1f\n\t"
168 "addl $0x00010000, %1\n\t" 141 LOCK_PREFIX "cmpxchgl %1,%2\n\t"
169 "lock ; cmpxchgl %1,%2\n\t"
170 "1:" 142 "1:"
171 "sete %b1\n\t" 143 "sete %b1\n\t"
172 "movzbl %b1,%0\n\t" 144 "movzbl %b1,%0\n\t"
173 : "=&a" (tmp), "=r" (new), "+m" (lock->slock) 145 : "=&a" (tmp), "=&q" (new), "+m" (lock->slock)
174 : 146 :
175 : "memory", "cc"); 147 : "memory", "cc");
176 148
177 return tmp; 149 return tmp;
178} 150}
179 151
180static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock) 152static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
181{ 153{
182 asm volatile(UNLOCK_LOCK_PREFIX "incw %0" 154 asm volatile(UNLOCK_LOCK_PREFIX "incw %0"
183 : "+m" (lock->slock) 155 : "+m" (lock->slock)
@@ -186,6 +158,117 @@ static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock)
186} 158}
187#endif 159#endif
188 160
161static inline int __ticket_spin_is_locked(raw_spinlock_t *lock)
162{
163 int tmp = ACCESS_ONCE(lock->slock);
164
165 return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1 << TICKET_SHIFT) - 1));
166}
167
168static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
169{
170 int tmp = ACCESS_ONCE(lock->slock);
171
172 return (((tmp >> TICKET_SHIFT) - tmp) & ((1 << TICKET_SHIFT) - 1)) > 1;
173}
174
175#ifdef CONFIG_PARAVIRT
176/*
177 * Define virtualization-friendly old-style lock byte lock, for use in
178 * pv_lock_ops if desired.
179 *
180 * This differs from the pre-2.6.24 spinlock by always using xchgb
181 * rather than decb to take the lock; this allows it to use a
182 * zero-initialized lock structure. It also maintains a 1-byte
183 * contention counter, so that we can implement
184 * __byte_spin_is_contended.
185 */
186struct __byte_spinlock {
187 s8 lock;
188 s8 spinners;
189};
190
191static inline int __byte_spin_is_locked(raw_spinlock_t *lock)
192{
193 struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
194 return bl->lock != 0;
195}
196
197static inline int __byte_spin_is_contended(raw_spinlock_t *lock)
198{
199 struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
200 return bl->spinners != 0;
201}
202
203static inline void __byte_spin_lock(raw_spinlock_t *lock)
204{
205 struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
206 s8 val = 1;
207
208 asm("1: xchgb %1, %0\n"
209 " test %1,%1\n"
210 " jz 3f\n"
211 " " LOCK_PREFIX "incb %2\n"
212 "2: rep;nop\n"
213 " cmpb $1, %0\n"
214 " je 2b\n"
215 " " LOCK_PREFIX "decb %2\n"
216 " jmp 1b\n"
217 "3:"
218 : "+m" (bl->lock), "+q" (val), "+m" (bl->spinners): : "memory");
219}
220
221static inline int __byte_spin_trylock(raw_spinlock_t *lock)
222{
223 struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
224 u8 old = 1;
225
226 asm("xchgb %1,%0"
227 : "+m" (bl->lock), "+q" (old) : : "memory");
228
229 return old == 0;
230}
231
232static inline void __byte_spin_unlock(raw_spinlock_t *lock)
233{
234 struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
235 smp_wmb();
236 bl->lock = 0;
237}
238#else /* !CONFIG_PARAVIRT */
239static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
240{
241 return __ticket_spin_is_locked(lock);
242}
243
244static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
245{
246 return __ticket_spin_is_contended(lock);
247}
248
249static __always_inline void __raw_spin_lock(raw_spinlock_t *lock)
250{
251 __ticket_spin_lock(lock);
252}
253
254static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock)
255{
256 return __ticket_spin_trylock(lock);
257}
258
259static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock)
260{
261 __ticket_spin_unlock(lock);
262}
263
264static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
265 unsigned long flags)
266{
267 __raw_spin_lock(lock);
268}
269
270#endif /* CONFIG_PARAVIRT */
271
189static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) 272static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
190{ 273{
191 while (__raw_spin_is_locked(lock)) 274 while (__raw_spin_is_locked(lock))
@@ -278,4 +361,4 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw)
278#define _raw_read_relax(lock) cpu_relax() 361#define _raw_read_relax(lock) cpu_relax()
279#define _raw_write_relax(lock) cpu_relax() 362#define _raw_write_relax(lock) cpu_relax()
280 363
281#endif 364#endif /* ASM_X86__SPINLOCK_H */
diff --git a/include/asm-x86/spinlock_types.h b/include/asm-x86/spinlock_types.h
index 9029cf78cf5d..6aa9b562c508 100644
--- a/include/asm-x86/spinlock_types.h
+++ b/include/asm-x86/spinlock_types.h
@@ -1,11 +1,11 @@
1#ifndef __ASM_SPINLOCK_TYPES_H 1#ifndef ASM_X86__SPINLOCK_TYPES_H
2#define __ASM_SPINLOCK_TYPES_H 2#define ASM_X86__SPINLOCK_TYPES_H
3 3
4#ifndef __LINUX_SPINLOCK_TYPES_H 4#ifndef __LINUX_SPINLOCK_TYPES_H
5# error "please don't include this file directly" 5# error "please don't include this file directly"
6#endif 6#endif
7 7
8typedef struct { 8typedef struct raw_spinlock {
9 unsigned int slock; 9 unsigned int slock;
10} raw_spinlock_t; 10} raw_spinlock_t;
11 11
@@ -17,4 +17,4 @@ typedef struct {
17 17
18#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } 18#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
19 19
20#endif 20#endif /* ASM_X86__SPINLOCK_TYPES_H */
diff --git a/include/asm-x86/srat.h b/include/asm-x86/srat.h
index f4bba131d068..5363e4f7e1cd 100644
--- a/include/asm-x86/srat.h
+++ b/include/asm-x86/srat.h
@@ -24,14 +24,16 @@
24 * Send feedback to Pat Gaughen <gone@us.ibm.com> 24 * Send feedback to Pat Gaughen <gone@us.ibm.com>
25 */ 25 */
26 26
27#ifndef _ASM_SRAT_H_ 27#ifndef ASM_X86__SRAT_H
28#define _ASM_SRAT_H_ 28#define ASM_X86__SRAT_H
29
30#ifndef CONFIG_ACPI_SRAT
31#error CONFIG_ACPI_SRAT not defined, and srat.h header has been included
32#endif
33 29
30#ifdef CONFIG_ACPI_NUMA
34extern int get_memcfg_from_srat(void); 31extern int get_memcfg_from_srat(void);
35extern unsigned long *get_zholes_size(int); 32#else
33static inline int get_memcfg_from_srat(void)
34{
35 return 0;
36}
37#endif
36 38
37#endif /* _ASM_SRAT_H_ */ 39#endif /* ASM_X86__SRAT_H */
diff --git a/include/asm-x86/stacktrace.h b/include/asm-x86/stacktrace.h
index 30f82526a8e2..f43517e28532 100644
--- a/include/asm-x86/stacktrace.h
+++ b/include/asm-x86/stacktrace.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_STACKTRACE_H 1#ifndef ASM_X86__STACKTRACE_H
2#define _ASM_STACKTRACE_H 1 2#define ASM_X86__STACKTRACE_H
3 3
4extern int kstack_depth_to_print; 4extern int kstack_depth_to_print;
5 5
@@ -18,4 +18,4 @@ void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
18 unsigned long *stack, unsigned long bp, 18 unsigned long *stack, unsigned long bp,
19 const struct stacktrace_ops *ops, void *data); 19 const struct stacktrace_ops *ops, void *data);
20 20
21#endif 21#endif /* ASM_X86__STACKTRACE_H */
diff --git a/include/asm-x86/stat.h b/include/asm-x86/stat.h
index 5c22dcb5d17e..1e120f628905 100644
--- a/include/asm-x86/stat.h
+++ b/include/asm-x86/stat.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_STAT_H 1#ifndef ASM_X86__STAT_H
2#define _ASM_X86_STAT_H 2#define ASM_X86__STAT_H
3 3
4#define STAT_HAVE_NSEC 1 4#define STAT_HAVE_NSEC 1
5 5
@@ -111,4 +111,4 @@ struct __old_kernel_stat {
111#endif 111#endif
112}; 112};
113 113
114#endif 114#endif /* ASM_X86__STAT_H */
diff --git a/include/asm-x86/statfs.h b/include/asm-x86/statfs.h
index 7c651aa97252..3f005bc3aa5b 100644
--- a/include/asm-x86/statfs.h
+++ b/include/asm-x86/statfs.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_STATFS_H 1#ifndef ASM_X86__STATFS_H
2#define _ASM_X86_STATFS_H 2#define ASM_X86__STATFS_H
3 3
4#ifdef __i386__ 4#ifdef __i386__
5#include <asm-generic/statfs.h> 5#include <asm-generic/statfs.h>
@@ -60,4 +60,4 @@ struct compat_statfs64 {
60} __attribute__((packed)); 60} __attribute__((packed));
61 61
62#endif /* !__i386__ */ 62#endif /* !__i386__ */
63#endif 63#endif /* ASM_X86__STATFS_H */
diff --git a/include/asm-x86/string_32.h b/include/asm-x86/string_32.h
index b49369ad9a61..487843ed245a 100644
--- a/include/asm-x86/string_32.h
+++ b/include/asm-x86/string_32.h
@@ -1,5 +1,5 @@
1#ifndef _I386_STRING_H_ 1#ifndef ASM_X86__STRING_32_H
2#define _I386_STRING_H_ 2#define ASM_X86__STRING_32_H
3 3
4#ifdef __KERNEL__ 4#ifdef __KERNEL__
5 5
@@ -29,81 +29,116 @@ extern char *strchr(const char *s, int c);
29#define __HAVE_ARCH_STRLEN 29#define __HAVE_ARCH_STRLEN
30extern size_t strlen(const char *s); 30extern size_t strlen(const char *s);
31 31
32static __always_inline void * __memcpy(void * to, const void * from, size_t n) 32static __always_inline void *__memcpy(void *to, const void *from, size_t n)
33{ 33{
34int d0, d1, d2; 34 int d0, d1, d2;
35__asm__ __volatile__( 35 asm volatile("rep ; movsl\n\t"
36 "rep ; movsl\n\t" 36 "movl %4,%%ecx\n\t"
37 "movl %4,%%ecx\n\t" 37 "andl $3,%%ecx\n\t"
38 "andl $3,%%ecx\n\t" 38 "jz 1f\n\t"
39 "jz 1f\n\t" 39 "rep ; movsb\n\t"
40 "rep ; movsb\n\t" 40 "1:"
41 "1:" 41 : "=&c" (d0), "=&D" (d1), "=&S" (d2)
42 : "=&c" (d0), "=&D" (d1), "=&S" (d2) 42 : "0" (n / 4), "g" (n), "1" ((long)to), "2" ((long)from)
43 : "0" (n/4), "g" (n), "1" ((long) to), "2" ((long) from) 43 : "memory");
44 : "memory"); 44 return to;
45return (to);
46} 45}
47 46
48/* 47/*
49 * This looks ugly, but the compiler can optimize it totally, 48 * This looks ugly, but the compiler can optimize it totally,
50 * as the count is constant. 49 * as the count is constant.
51 */ 50 */
52static __always_inline void * __constant_memcpy(void * to, const void * from, size_t n) 51static __always_inline void *__constant_memcpy(void *to, const void *from,
52 size_t n)
53{ 53{
54 long esi, edi; 54 long esi, edi;
55 if (!n) return to; 55 if (!n)
56#if 1 /* want to do small copies with non-string ops? */ 56 return to;
57
57 switch (n) { 58 switch (n) {
58 case 1: *(char*)to = *(char*)from; return to; 59 case 1:
59 case 2: *(short*)to = *(short*)from; return to; 60 *(char *)to = *(char *)from;
60 case 4: *(int*)to = *(int*)from; return to; 61 return to;
61#if 1 /* including those doable with two moves? */ 62 case 2:
62 case 3: *(short*)to = *(short*)from; 63 *(short *)to = *(short *)from;
63 *((char*)to+2) = *((char*)from+2); return to; 64 return to;
64 case 5: *(int*)to = *(int*)from; 65 case 4:
65 *((char*)to+4) = *((char*)from+4); return to; 66 *(int *)to = *(int *)from;
66 case 6: *(int*)to = *(int*)from; 67 return to;
67 *((short*)to+2) = *((short*)from+2); return to; 68
68 case 8: *(int*)to = *(int*)from; 69 case 3:
69 *((int*)to+1) = *((int*)from+1); return to; 70 *(short *)to = *(short *)from;
70#endif 71 *((char *)to + 2) = *((char *)from + 2);
72 return to;
73 case 5:
74 *(int *)to = *(int *)from;
75 *((char *)to + 4) = *((char *)from + 4);
76 return to;
77 case 6:
78 *(int *)to = *(int *)from;
79 *((short *)to + 2) = *((short *)from + 2);
80 return to;
81 case 8:
82 *(int *)to = *(int *)from;
83 *((int *)to + 1) = *((int *)from + 1);
84 return to;
71 } 85 }
72#endif 86
73 esi = (long) from; 87 esi = (long)from;
74 edi = (long) to; 88 edi = (long)to;
75 if (n >= 5*4) { 89 if (n >= 5 * 4) {
76 /* large block: use rep prefix */ 90 /* large block: use rep prefix */
77 int ecx; 91 int ecx;
78 __asm__ __volatile__( 92 asm volatile("rep ; movsl"
79 "rep ; movsl" 93 : "=&c" (ecx), "=&D" (edi), "=&S" (esi)
80 : "=&c" (ecx), "=&D" (edi), "=&S" (esi) 94 : "0" (n / 4), "1" (edi), "2" (esi)
81 : "0" (n/4), "1" (edi),"2" (esi) 95 : "memory"
82 : "memory"
83 ); 96 );
84 } else { 97 } else {
85 /* small block: don't clobber ecx + smaller code */ 98 /* small block: don't clobber ecx + smaller code */
86 if (n >= 4*4) __asm__ __volatile__("movsl" 99 if (n >= 4 * 4)
87 :"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory"); 100 asm volatile("movsl"
88 if (n >= 3*4) __asm__ __volatile__("movsl" 101 : "=&D"(edi), "=&S"(esi)
89 :"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory"); 102 : "0"(edi), "1"(esi)
90 if (n >= 2*4) __asm__ __volatile__("movsl" 103 : "memory");
91 :"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory"); 104 if (n >= 3 * 4)
92 if (n >= 1*4) __asm__ __volatile__("movsl" 105 asm volatile("movsl"
93 :"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory"); 106 : "=&D"(edi), "=&S"(esi)
107 : "0"(edi), "1"(esi)
108 : "memory");
109 if (n >= 2 * 4)
110 asm volatile("movsl"
111 : "=&D"(edi), "=&S"(esi)
112 : "0"(edi), "1"(esi)
113 : "memory");
114 if (n >= 1 * 4)
115 asm volatile("movsl"
116 : "=&D"(edi), "=&S"(esi)
117 : "0"(edi), "1"(esi)
118 : "memory");
94 } 119 }
95 switch (n % 4) { 120 switch (n % 4) {
96 /* tail */ 121 /* tail */
97 case 0: return to; 122 case 0:
98 case 1: __asm__ __volatile__("movsb" 123 return to;
99 :"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory"); 124 case 1:
100 return to; 125 asm volatile("movsb"
101 case 2: __asm__ __volatile__("movsw" 126 : "=&D"(edi), "=&S"(esi)
102 :"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory"); 127 : "0"(edi), "1"(esi)
103 return to; 128 : "memory");
104 default: __asm__ __volatile__("movsw\n\tmovsb" 129 return to;
105 :"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory"); 130 case 2:
106 return to; 131 asm volatile("movsw"
132 : "=&D"(edi), "=&S"(esi)
133 : "0"(edi), "1"(esi)
134 : "memory");
135 return to;
136 default:
137 asm volatile("movsw\n\tmovsb"
138 : "=&D"(edi), "=&S"(esi)
139 : "0"(edi), "1"(esi)
140 : "memory");
141 return to;
107 } 142 }
108} 143}
109 144
@@ -117,87 +152,86 @@ static __always_inline void * __constant_memcpy(void * to, const void * from, si
117 * This CPU favours 3DNow strongly (eg AMD Athlon) 152 * This CPU favours 3DNow strongly (eg AMD Athlon)
118 */ 153 */
119 154
120static inline void * __constant_memcpy3d(void * to, const void * from, size_t len) 155static inline void *__constant_memcpy3d(void *to, const void *from, size_t len)
121{ 156{
122 if (len < 512) 157 if (len < 512)
123 return __constant_memcpy(to, from, len); 158 return __constant_memcpy(to, from, len);
124 return _mmx_memcpy(to, from, len); 159 return _mmx_memcpy(to, from, len);
125} 160}
126 161
127static __inline__ void *__memcpy3d(void *to, const void *from, size_t len) 162static inline void *__memcpy3d(void *to, const void *from, size_t len)
128{ 163{
129 if (len < 512) 164 if (len < 512)
130 return __memcpy(to, from, len); 165 return __memcpy(to, from, len);
131 return _mmx_memcpy(to, from, len); 166 return _mmx_memcpy(to, from, len);
132} 167}
133 168
134#define memcpy(t, f, n) \ 169#define memcpy(t, f, n) \
135(__builtin_constant_p(n) ? \ 170 (__builtin_constant_p((n)) \
136 __constant_memcpy3d((t),(f),(n)) : \ 171 ? __constant_memcpy3d((t), (f), (n)) \
137 __memcpy3d((t),(f),(n))) 172 : __memcpy3d((t), (f), (n)))
138 173
139#else 174#else
140 175
141/* 176/*
142 * No 3D Now! 177 * No 3D Now!
143 */ 178 */
144 179
145#define memcpy(t, f, n) \ 180#define memcpy(t, f, n) \
146(__builtin_constant_p(n) ? \ 181 (__builtin_constant_p((n)) \
147 __constant_memcpy((t),(f),(n)) : \ 182 ? __constant_memcpy((t), (f), (n)) \
148 __memcpy((t),(f),(n))) 183 : __memcpy((t), (f), (n)))
149 184
150#endif 185#endif
151 186
152#define __HAVE_ARCH_MEMMOVE 187#define __HAVE_ARCH_MEMMOVE
153void *memmove(void * dest,const void * src, size_t n); 188void *memmove(void *dest, const void *src, size_t n);
154 189
155#define memcmp __builtin_memcmp 190#define memcmp __builtin_memcmp
156 191
157#define __HAVE_ARCH_MEMCHR 192#define __HAVE_ARCH_MEMCHR
158extern void *memchr(const void * cs,int c,size_t count); 193extern void *memchr(const void *cs, int c, size_t count);
159 194
160static inline void * __memset_generic(void * s, char c,size_t count) 195static inline void *__memset_generic(void *s, char c, size_t count)
161{ 196{
162int d0, d1; 197 int d0, d1;
163__asm__ __volatile__( 198 asm volatile("rep\n\t"
164 "rep\n\t" 199 "stosb"
165 "stosb" 200 : "=&c" (d0), "=&D" (d1)
166 : "=&c" (d0), "=&D" (d1) 201 : "a" (c), "1" (s), "0" (count)
167 :"a" (c),"1" (s),"0" (count) 202 : "memory");
168 :"memory"); 203 return s;
169return s;
170} 204}
171 205
172/* we might want to write optimized versions of these later */ 206/* we might want to write optimized versions of these later */
173#define __constant_count_memset(s,c,count) __memset_generic((s),(c),(count)) 207#define __constant_count_memset(s, c, count) __memset_generic((s), (c), (count))
174 208
175/* 209/*
176 * memset(x,0,y) is a reasonably common thing to do, so we want to fill 210 * memset(x, 0, y) is a reasonably common thing to do, so we want to fill
177 * things 32 bits at a time even when we don't know the size of the 211 * things 32 bits at a time even when we don't know the size of the
178 * area at compile-time.. 212 * area at compile-time..
179 */ 213 */
180static __always_inline void * __constant_c_memset(void * s, unsigned long c, size_t count) 214static __always_inline
215void *__constant_c_memset(void *s, unsigned long c, size_t count)
181{ 216{
182int d0, d1; 217 int d0, d1;
183__asm__ __volatile__( 218 asm volatile("rep ; stosl\n\t"
184 "rep ; stosl\n\t" 219 "testb $2,%b3\n\t"
185 "testb $2,%b3\n\t" 220 "je 1f\n\t"
186 "je 1f\n\t" 221 "stosw\n"
187 "stosw\n" 222 "1:\ttestb $1,%b3\n\t"
188 "1:\ttestb $1,%b3\n\t" 223 "je 2f\n\t"
189 "je 2f\n\t" 224 "stosb\n"
190 "stosb\n" 225 "2:"
191 "2:" 226 : "=&c" (d0), "=&D" (d1)
192 :"=&c" (d0), "=&D" (d1) 227 : "a" (c), "q" (count), "0" (count/4), "1" ((long)s)
193 :"a" (c), "q" (count), "0" (count/4), "1" ((long) s) 228 : "memory");
194 :"memory"); 229 return s;
195return (s);
196} 230}
197 231
198/* Added by Gertjan van Wingerde to make minix and sysv module work */ 232/* Added by Gertjan van Wingerde to make minix and sysv module work */
199#define __HAVE_ARCH_STRNLEN 233#define __HAVE_ARCH_STRNLEN
200extern size_t strnlen(const char * s, size_t count); 234extern size_t strnlen(const char *s, size_t count);
201/* end of additional stuff */ 235/* end of additional stuff */
202 236
203#define __HAVE_ARCH_STRSTR 237#define __HAVE_ARCH_STRSTR
@@ -207,67 +241,86 @@ extern char *strstr(const char *cs, const char *ct);
207 * This looks horribly ugly, but the compiler can optimize it totally, 241 * This looks horribly ugly, but the compiler can optimize it totally,
208 * as we by now know that both pattern and count is constant.. 242 * as we by now know that both pattern and count is constant..
209 */ 243 */
210static __always_inline void * __constant_c_and_count_memset(void * s, unsigned long pattern, size_t count) 244static __always_inline
245void *__constant_c_and_count_memset(void *s, unsigned long pattern,
246 size_t count)
211{ 247{
212 switch (count) { 248 switch (count) {
249 case 0:
250 return s;
251 case 1:
252 *(unsigned char *)s = pattern & 0xff;
253 return s;
254 case 2:
255 *(unsigned short *)s = pattern & 0xffff;
256 return s;
257 case 3:
258 *(unsigned short *)s = pattern & 0xffff;
259 *((unsigned char *)s + 2) = pattern & 0xff;
260 return s;
261 case 4:
262 *(unsigned long *)s = pattern;
263 return s;
264 }
265
266#define COMMON(x) \
267 asm volatile("rep ; stosl" \
268 x \
269 : "=&c" (d0), "=&D" (d1) \
270 : "a" (eax), "0" (count/4), "1" ((long)s) \
271 : "memory")
272
273 {
274 int d0, d1;
275#if __GNUC__ == 4 && __GNUC_MINOR__ == 0
276 /* Workaround for broken gcc 4.0 */
277 register unsigned long eax asm("%eax") = pattern;
278#else
279 unsigned long eax = pattern;
280#endif
281
282 switch (count % 4) {
213 case 0: 283 case 0:
284 COMMON("");
214 return s; 285 return s;
215 case 1: 286 case 1:
216 *(unsigned char *)s = pattern & 0xff; 287 COMMON("\n\tstosb");
217 return s; 288 return s;
218 case 2: 289 case 2:
219 *(unsigned short *)s = pattern & 0xffff; 290 COMMON("\n\tstosw");
220 return s;
221 case 3:
222 *(unsigned short *)s = pattern & 0xffff;
223 *(2+(unsigned char *)s) = pattern & 0xff;
224 return s; 291 return s;
225 case 4: 292 default:
226 *(unsigned long *)s = pattern; 293 COMMON("\n\tstosw\n\tstosb");
227 return s; 294 return s;
295 }
228 } 296 }
229#define COMMON(x) \ 297
230__asm__ __volatile__( \
231 "rep ; stosl" \
232 x \
233 : "=&c" (d0), "=&D" (d1) \
234 : "a" (pattern),"0" (count/4),"1" ((long) s) \
235 : "memory")
236{
237 int d0, d1;
238 switch (count % 4) {
239 case 0: COMMON(""); return s;
240 case 1: COMMON("\n\tstosb"); return s;
241 case 2: COMMON("\n\tstosw"); return s;
242 default: COMMON("\n\tstosw\n\tstosb"); return s;
243 }
244}
245
246#undef COMMON 298#undef COMMON
247} 299}
248 300
249#define __constant_c_x_memset(s, c, count) \ 301#define __constant_c_x_memset(s, c, count) \
250(__builtin_constant_p(count) ? \ 302 (__builtin_constant_p(count) \
251 __constant_c_and_count_memset((s),(c),(count)) : \ 303 ? __constant_c_and_count_memset((s), (c), (count)) \
252 __constant_c_memset((s),(c),(count))) 304 : __constant_c_memset((s), (c), (count)))
253 305
254#define __memset(s, c, count) \ 306#define __memset(s, c, count) \
255(__builtin_constant_p(count) ? \ 307 (__builtin_constant_p(count) \
256 __constant_count_memset((s),(c),(count)) : \ 308 ? __constant_count_memset((s), (c), (count)) \
257 __memset_generic((s),(c),(count))) 309 : __memset_generic((s), (c), (count)))
258 310
259#define __HAVE_ARCH_MEMSET 311#define __HAVE_ARCH_MEMSET
260#define memset(s, c, count) \ 312#define memset(s, c, count) \
261(__builtin_constant_p(c) ? \ 313 (__builtin_constant_p(c) \
262 __constant_c_x_memset((s),(0x01010101UL*(unsigned char)(c)),(count)) : \ 314 ? __constant_c_x_memset((s), (0x01010101UL * (unsigned char)(c)), \
263 __memset((s),(c),(count))) 315 (count)) \
316 : __memset((s), (c), (count)))
264 317
265/* 318/*
266 * find the first occurrence of byte 'c', or 1 past the area if none 319 * find the first occurrence of byte 'c', or 1 past the area if none
267 */ 320 */
268#define __HAVE_ARCH_MEMSCAN 321#define __HAVE_ARCH_MEMSCAN
269extern void *memscan(void * addr, int c, size_t size); 322extern void *memscan(void *addr, int c, size_t size);
270 323
271#endif /* __KERNEL__ */ 324#endif /* __KERNEL__ */
272 325
273#endif 326#endif /* ASM_X86__STRING_32_H */
diff --git a/include/asm-x86/string_64.h b/include/asm-x86/string_64.h
index 52b5ab383395..a2add11d3b66 100644
--- a/include/asm-x86/string_64.h
+++ b/include/asm-x86/string_64.h
@@ -1,5 +1,5 @@
1#ifndef _X86_64_STRING_H_ 1#ifndef ASM_X86__STRING_64_H
2#define _X86_64_STRING_H_ 2#define ASM_X86__STRING_64_H
3 3
4#ifdef __KERNEL__ 4#ifdef __KERNEL__
5 5
@@ -57,4 +57,4 @@ int strcmp(const char *cs, const char *ct);
57 57
58#endif /* __KERNEL__ */ 58#endif /* __KERNEL__ */
59 59
60#endif 60#endif /* ASM_X86__STRING_64_H */
diff --git a/include/asm-x86/mach-summit/mach_apic.h b/include/asm-x86/summit/apic.h
index 1f76c2e70232..c5b2e4b10358 100644
--- a/include/asm-x86/mach-summit/mach_apic.h
+++ b/include/asm-x86/summit/apic.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_MACH_APIC_H 1#ifndef __ASM_SUMMIT_APIC_H
2#define __ASM_MACH_APIC_H 2#define __ASM_SUMMIT_APIC_H
3 3
4#include <asm/smp.h> 4#include <asm/smp.h>
5 5
@@ -21,7 +21,7 @@ static inline cpumask_t target_cpus(void)
21 * Just start on cpu 0. IRQ balancing will spread load 21 * Just start on cpu 0. IRQ balancing will spread load
22 */ 22 */
23 return cpumask_of_cpu(0); 23 return cpumask_of_cpu(0);
24} 24}
25#define TARGET_CPUS (target_cpus()) 25#define TARGET_CPUS (target_cpus())
26 26
27#define INT_DELIVERY_MODE (dest_LowestPrio) 27#define INT_DELIVERY_MODE (dest_LowestPrio)
@@ -30,10 +30,10 @@ static inline cpumask_t target_cpus(void)
30static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid) 30static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
31{ 31{
32 return 0; 32 return 0;
33} 33}
34 34
35/* we don't use the phys_cpu_present_map to indicate apicid presence */ 35/* we don't use the phys_cpu_present_map to indicate apicid presence */
36static inline unsigned long check_apicid_present(int bit) 36static inline unsigned long check_apicid_present(int bit)
37{ 37{
38 return 1; 38 return 1;
39} 39}
@@ -63,10 +63,10 @@ static inline void init_apic_ldr(void)
63 * BIOS puts 5 CPUs in one APIC cluster, we're hosed. */ 63 * BIOS puts 5 CPUs in one APIC cluster, we're hosed. */
64 BUG_ON(count >= XAPIC_DEST_CPUS_SHIFT); 64 BUG_ON(count >= XAPIC_DEST_CPUS_SHIFT);
65 id = my_cluster | (1UL << count); 65 id = my_cluster | (1UL << count);
66 apic_write_around(APIC_DFR, APIC_DFR_VALUE); 66 apic_write(APIC_DFR, APIC_DFR_VALUE);
67 val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; 67 val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
68 val |= SET_APIC_LOGICAL_ID(id); 68 val |= SET_APIC_LOGICAL_ID(id);
69 apic_write_around(APIC_LDR, val); 69 apic_write(APIC_LDR, val);
70} 70}
71 71
72static inline int multi_timer_check(int apic, int irq) 72static inline int multi_timer_check(int apic, int irq)
@@ -143,22 +143,22 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
143 int num_bits_set; 143 int num_bits_set;
144 int cpus_found = 0; 144 int cpus_found = 0;
145 int cpu; 145 int cpu;
146 int apicid; 146 int apicid;
147 147
148 num_bits_set = cpus_weight(cpumask); 148 num_bits_set = cpus_weight(cpumask);
149 /* Return id to all */ 149 /* Return id to all */
150 if (num_bits_set == NR_CPUS) 150 if (num_bits_set == NR_CPUS)
151 return (int) 0xFF; 151 return (int) 0xFF;
152 /* 152 /*
153 * The cpus in the mask must all be on the apic cluster. If are not 153 * The cpus in the mask must all be on the apic cluster. If are not
154 * on the same apicid cluster return default value of TARGET_CPUS. 154 * on the same apicid cluster return default value of TARGET_CPUS.
155 */ 155 */
156 cpu = first_cpu(cpumask); 156 cpu = first_cpu(cpumask);
157 apicid = cpu_to_logical_apicid(cpu); 157 apicid = cpu_to_logical_apicid(cpu);
158 while (cpus_found < num_bits_set) { 158 while (cpus_found < num_bits_set) {
159 if (cpu_isset(cpu, cpumask)) { 159 if (cpu_isset(cpu, cpumask)) {
160 int new_apicid = cpu_to_logical_apicid(cpu); 160 int new_apicid = cpu_to_logical_apicid(cpu);
161 if (apicid_cluster(apicid) != 161 if (apicid_cluster(apicid) !=
162 apicid_cluster(new_apicid)){ 162 apicid_cluster(new_apicid)){
163 printk ("%s: Not a valid mask!\n",__FUNCTION__); 163 printk ("%s: Not a valid mask!\n",__FUNCTION__);
164 return 0xFF; 164 return 0xFF;
@@ -182,4 +182,4 @@ static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
182 return hard_smp_processor_id() >> index_msb; 182 return hard_smp_processor_id() >> index_msb;
183} 183}
184 184
185#endif /* __ASM_MACH_APIC_H */ 185#endif /* __ASM_SUMMIT_APIC_H */
diff --git a/include/asm-x86/summit/apicdef.h b/include/asm-x86/summit/apicdef.h
new file mode 100644
index 000000000000..f3fbca1f61c1
--- /dev/null
+++ b/include/asm-x86/summit/apicdef.h
@@ -0,0 +1,13 @@
1#ifndef __ASM_SUMMIT_APICDEF_H
2#define __ASM_SUMMIT_APICDEF_H
3
4#define APIC_ID_MASK (0xFF<<24)
5
6static inline unsigned get_apic_id(unsigned long x)
7{
8 return (x>>24)&0xFF;
9}
10
11#define GET_APIC_ID(x) get_apic_id(x)
12
13#endif
diff --git a/include/asm-x86/mach-summit/mach_ipi.h b/include/asm-x86/summit/ipi.h
index 9404c535b7ec..53bd1e7bd7b4 100644
--- a/include/asm-x86/mach-summit/mach_ipi.h
+++ b/include/asm-x86/summit/ipi.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_MACH_IPI_H 1#ifndef __ASM_SUMMIT_IPI_H
2#define __ASM_MACH_IPI_H 2#define __ASM_SUMMIT_IPI_H
3 3
4void send_IPI_mask_sequence(cpumask_t mask, int vector); 4void send_IPI_mask_sequence(cpumask_t mask, int vector);
5 5
@@ -22,4 +22,4 @@ static inline void send_IPI_all(int vector)
22 send_IPI_mask(cpu_online_map, vector); 22 send_IPI_mask(cpu_online_map, vector);
23} 23}
24 24
25#endif /* __ASM_MACH_IPI_H */ 25#endif /* __ASM_SUMMIT_IPI_H */
diff --git a/include/asm-x86/mach-summit/irq_vectors_limits.h b/include/asm-x86/summit/irq_vectors_limits.h
index 890ce3f5e09a..890ce3f5e09a 100644
--- a/include/asm-x86/mach-summit/irq_vectors_limits.h
+++ b/include/asm-x86/summit/irq_vectors_limits.h
diff --git a/include/asm-x86/mach-summit/mach_mpparse.h b/include/asm-x86/summit/mpparse.h
index fdf591701339..013ce6fab2d5 100644
--- a/include/asm-x86/mach-summit/mach_mpparse.h
+++ b/include/asm-x86/summit/mpparse.h
@@ -1,7 +1,6 @@
1#ifndef __ASM_MACH_MPPARSE_H 1#ifndef __ASM_SUMMIT_MPPARSE_H
2#define __ASM_MACH_MPPARSE_H 2#define __ASM_SUMMIT_MPPARSE_H
3 3
4#include <mach_apic.h>
5#include <asm/tsc.h> 4#include <asm/tsc.h>
6 5
7extern int use_cyclone; 6extern int use_cyclone;
@@ -12,11 +11,11 @@ extern void setup_summit(void);
12#define setup_summit() {} 11#define setup_summit() {}
13#endif 12#endif
14 13
15static inline int mps_oem_check(struct mp_config_table *mpc, char *oem, 14static inline int mps_oem_check(struct mp_config_table *mpc, char *oem,
16 char *productid) 15 char *productid)
17{ 16{
18 if (!strncmp(oem, "IBM ENSW", 8) && 17 if (!strncmp(oem, "IBM ENSW", 8) &&
19 (!strncmp(productid, "VIGIL SMP", 9) 18 (!strncmp(productid, "VIGIL SMP", 9)
20 || !strncmp(productid, "EXA", 3) 19 || !strncmp(productid, "EXA", 3)
21 || !strncmp(productid, "RUTHLESS SMP", 12))){ 20 || !strncmp(productid, "RUTHLESS SMP", 12))){
22 mark_tsc_unstable("Summit based system"); 21 mark_tsc_unstable("Summit based system");
@@ -107,4 +106,4 @@ static inline int is_WPEG(struct rio_detail *rio){
107 rio->type == LookOutAWPEG || rio->type == LookOutBWPEG); 106 rio->type == LookOutAWPEG || rio->type == LookOutBWPEG);
108} 107}
109 108
110#endif /* __ASM_MACH_MPPARSE_H */ 109#endif /* __ASM_SUMMIT_MPPARSE_H */
diff --git a/include/asm-x86/suspend_32.h b/include/asm-x86/suspend_32.h
index 24e1c080aa8a..acb6d4d491f4 100644
--- a/include/asm-x86/suspend_32.h
+++ b/include/asm-x86/suspend_32.h
@@ -3,6 +3,9 @@
3 * Based on code 3 * Based on code
4 * Copyright 2001 Patrick Mochel <mochel@osdl.org> 4 * Copyright 2001 Patrick Mochel <mochel@osdl.org>
5 */ 5 */
6#ifndef ASM_X86__SUSPEND_32_H
7#define ASM_X86__SUSPEND_32_H
8
6#include <asm/desc.h> 9#include <asm/desc.h>
7#include <asm/i387.h> 10#include <asm/i387.h>
8 11
@@ -44,3 +47,5 @@ static inline void acpi_save_register_state(unsigned long return_point)
44/* routines for saving/restoring kernel state */ 47/* routines for saving/restoring kernel state */
45extern int acpi_save_state_mem(void); 48extern int acpi_save_state_mem(void);
46#endif 49#endif
50
51#endif /* ASM_X86__SUSPEND_32_H */
diff --git a/include/asm-x86/suspend_64.h b/include/asm-x86/suspend_64.h
index dc3262b43072..cf821dd310e8 100644
--- a/include/asm-x86/suspend_64.h
+++ b/include/asm-x86/suspend_64.h
@@ -3,8 +3,8 @@
3 * Based on code 3 * Based on code
4 * Copyright 2001 Patrick Mochel <mochel@osdl.org> 4 * Copyright 2001 Patrick Mochel <mochel@osdl.org>
5 */ 5 */
6#ifndef __ASM_X86_64_SUSPEND_H 6#ifndef ASM_X86__SUSPEND_64_H
7#define __ASM_X86_64_SUSPEND_H 7#define ASM_X86__SUSPEND_64_H
8 8
9#include <asm/desc.h> 9#include <asm/desc.h>
10#include <asm/i387.h> 10#include <asm/i387.h>
@@ -49,4 +49,4 @@ extern int acpi_save_state_mem(void);
49extern char core_restore_code; 49extern char core_restore_code;
50extern char restore_registers; 50extern char restore_registers;
51 51
52#endif /* __ASM_X86_64_SUSPEND_H */ 52#endif /* ASM_X86__SUSPEND_64_H */
diff --git a/include/asm-x86/swiotlb.h b/include/asm-x86/swiotlb.h
index f5d9e74b1e4a..1e20adbcad4b 100644
--- a/include/asm-x86/swiotlb.h
+++ b/include/asm-x86/swiotlb.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_SWIOTLB_H 1#ifndef ASM_X86__SWIOTLB_H
2#define _ASM_SWIOTLB_H 1 2#define ASM_X86__SWIOTLB_H
3 3
4#include <asm/dma-mapping.h> 4#include <asm/dma-mapping.h>
5 5
@@ -35,7 +35,7 @@ extern int swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg,
35 int nents, int direction); 35 int nents, int direction);
36extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, 36extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg,
37 int nents, int direction); 37 int nents, int direction);
38extern int swiotlb_dma_mapping_error(dma_addr_t dma_addr); 38extern int swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr);
39extern void swiotlb_free_coherent(struct device *hwdev, size_t size, 39extern void swiotlb_free_coherent(struct device *hwdev, size_t size,
40 void *vaddr, dma_addr_t dma_handle); 40 void *vaddr, dma_addr_t dma_handle);
41extern int swiotlb_dma_supported(struct device *hwdev, u64 mask); 41extern int swiotlb_dma_supported(struct device *hwdev, u64 mask);
@@ -45,12 +45,14 @@ extern int swiotlb_force;
45 45
46#ifdef CONFIG_SWIOTLB 46#ifdef CONFIG_SWIOTLB
47extern int swiotlb; 47extern int swiotlb;
48extern void pci_swiotlb_init(void);
48#else 49#else
49#define swiotlb 0 50#define swiotlb 0
51static inline void pci_swiotlb_init(void)
52{
53}
50#endif 54#endif
51 55
52extern void pci_swiotlb_init(void);
53
54static inline void dma_mark_clean(void *addr, size_t size) {} 56static inline void dma_mark_clean(void *addr, size_t size) {}
55 57
56#endif /* _ASM_SWIOTLB_H */ 58#endif /* ASM_X86__SWIOTLB_H */
diff --git a/include/asm-x86/sync_bitops.h b/include/asm-x86/sync_bitops.h
index b47a1d0b8a83..b689bee71104 100644
--- a/include/asm-x86/sync_bitops.h
+++ b/include/asm-x86/sync_bitops.h
@@ -1,5 +1,5 @@
1#ifndef _I386_SYNC_BITOPS_H 1#ifndef ASM_X86__SYNC_BITOPS_H
2#define _I386_SYNC_BITOPS_H 2#define ASM_X86__SYNC_BITOPS_H
3 3
4/* 4/*
5 * Copyright 1992, Linus Torvalds. 5 * Copyright 1992, Linus Torvalds.
@@ -127,4 +127,4 @@ static inline int sync_test_and_change_bit(int nr, volatile unsigned long *addr)
127 127
128#undef ADDR 128#undef ADDR
129 129
130#endif /* _I386_SYNC_BITOPS_H */ 130#endif /* ASM_X86__SYNC_BITOPS_H */
diff --git a/include/asm-x86/syscall.h b/include/asm-x86/syscall.h
new file mode 100644
index 000000000000..04c47dc5597c
--- /dev/null
+++ b/include/asm-x86/syscall.h
@@ -0,0 +1,211 @@
1/*
2 * Access to user system call parameters and results
3 *
4 * Copyright (C) 2008 Red Hat, Inc. All rights reserved.
5 *
6 * This copyrighted material is made available to anyone wishing to use,
7 * modify, copy, or redistribute it subject to the terms and conditions
8 * of the GNU General Public License v.2.
9 *
10 * See asm-generic/syscall.h for descriptions of what we must do here.
11 */
12
13#ifndef _ASM_SYSCALL_H
14#define _ASM_SYSCALL_H 1
15
16#include <linux/sched.h>
17#include <linux/err.h>
18
19static inline long syscall_get_nr(struct task_struct *task,
20 struct pt_regs *regs)
21{
22 /*
23 * We always sign-extend a -1 value being set here,
24 * so this is always either -1L or a syscall number.
25 */
26 return regs->orig_ax;
27}
28
29static inline void syscall_rollback(struct task_struct *task,
30 struct pt_regs *regs)
31{
32 regs->ax = regs->orig_ax;
33}
34
35static inline long syscall_get_error(struct task_struct *task,
36 struct pt_regs *regs)
37{
38 unsigned long error = regs->ax;
39#ifdef CONFIG_IA32_EMULATION
40 /*
41 * TS_COMPAT is set for 32-bit syscall entries and then
42 * remains set until we return to user mode.
43 */
44 if (task_thread_info(task)->status & TS_COMPAT)
45 /*
46 * Sign-extend the value so (int)-EFOO becomes (long)-EFOO
47 * and will match correctly in comparisons.
48 */
49 error = (long) (int) error;
50#endif
51 return IS_ERR_VALUE(error) ? error : 0;
52}
53
54static inline long syscall_get_return_value(struct task_struct *task,
55 struct pt_regs *regs)
56{
57 return regs->ax;
58}
59
60static inline void syscall_set_return_value(struct task_struct *task,
61 struct pt_regs *regs,
62 int error, long val)
63{
64 regs->ax = (long) error ?: val;
65}
66
67#ifdef CONFIG_X86_32
68
69static inline void syscall_get_arguments(struct task_struct *task,
70 struct pt_regs *regs,
71 unsigned int i, unsigned int n,
72 unsigned long *args)
73{
74 BUG_ON(i + n > 6);
75 memcpy(args, &regs->bx + i, n * sizeof(args[0]));
76}
77
78static inline void syscall_set_arguments(struct task_struct *task,
79 struct pt_regs *regs,
80 unsigned int i, unsigned int n,
81 const unsigned long *args)
82{
83 BUG_ON(i + n > 6);
84 memcpy(&regs->bx + i, args, n * sizeof(args[0]));
85}
86
87#else /* CONFIG_X86_64 */
88
89static inline void syscall_get_arguments(struct task_struct *task,
90 struct pt_regs *regs,
91 unsigned int i, unsigned int n,
92 unsigned long *args)
93{
94# ifdef CONFIG_IA32_EMULATION
95 if (task_thread_info(task)->status & TS_COMPAT)
96 switch (i + n) {
97 case 6:
98 if (!n--) break;
99 *args++ = regs->bp;
100 case 5:
101 if (!n--) break;
102 *args++ = regs->di;
103 case 4:
104 if (!n--) break;
105 *args++ = regs->si;
106 case 3:
107 if (!n--) break;
108 *args++ = regs->dx;
109 case 2:
110 if (!n--) break;
111 *args++ = regs->cx;
112 case 1:
113 if (!n--) break;
114 *args++ = regs->bx;
115 case 0:
116 if (!n--) break;
117 default:
118 BUG();
119 break;
120 }
121 else
122# endif
123 switch (i + n) {
124 case 6:
125 if (!n--) break;
126 *args++ = regs->r9;
127 case 5:
128 if (!n--) break;
129 *args++ = regs->r8;
130 case 4:
131 if (!n--) break;
132 *args++ = regs->r10;
133 case 3:
134 if (!n--) break;
135 *args++ = regs->dx;
136 case 2:
137 if (!n--) break;
138 *args++ = regs->si;
139 case 1:
140 if (!n--) break;
141 *args++ = regs->di;
142 case 0:
143 if (!n--) break;
144 default:
145 BUG();
146 break;
147 }
148}
149
150static inline void syscall_set_arguments(struct task_struct *task,
151 struct pt_regs *regs,
152 unsigned int i, unsigned int n,
153 const unsigned long *args)
154{
155# ifdef CONFIG_IA32_EMULATION
156 if (task_thread_info(task)->status & TS_COMPAT)
157 switch (i + n) {
158 case 6:
159 if (!n--) break;
160 regs->bp = *args++;
161 case 5:
162 if (!n--) break;
163 regs->di = *args++;
164 case 4:
165 if (!n--) break;
166 regs->si = *args++;
167 case 3:
168 if (!n--) break;
169 regs->dx = *args++;
170 case 2:
171 if (!n--) break;
172 regs->cx = *args++;
173 case 1:
174 if (!n--) break;
175 regs->bx = *args++;
176 case 0:
177 if (!n--) break;
178 default:
179 BUG();
180 }
181 else
182# endif
183 switch (i + n) {
184 case 6:
185 if (!n--) break;
186 regs->r9 = *args++;
187 case 5:
188 if (!n--) break;
189 regs->r8 = *args++;
190 case 4:
191 if (!n--) break;
192 regs->r10 = *args++;
193 case 3:
194 if (!n--) break;
195 regs->dx = *args++;
196 case 2:
197 if (!n--) break;
198 regs->si = *args++;
199 case 1:
200 if (!n--) break;
201 regs->di = *args++;
202 case 0:
203 if (!n--) break;
204 default:
205 BUG();
206 }
207}
208
209#endif /* CONFIG_X86_32 */
210
211#endif /* _ASM_SYSCALL_H */
diff --git a/include/asm-x86/syscalls.h b/include/asm-x86/syscalls.h
new file mode 100644
index 000000000000..87803da44010
--- /dev/null
+++ b/include/asm-x86/syscalls.h
@@ -0,0 +1,93 @@
1/*
2 * syscalls.h - Linux syscall interfaces (arch-specific)
3 *
4 * Copyright (c) 2008 Jaswinder Singh
5 *
6 * This file is released under the GPLv2.
7 * See the file COPYING for more details.
8 */
9
10#ifndef _ASM_X86_SYSCALLS_H
11#define _ASM_X86_SYSCALLS_H
12
13#include <linux/compiler.h>
14#include <linux/linkage.h>
15#include <linux/types.h>
16#include <linux/signal.h>
17
18/* Common in X86_32 and X86_64 */
19/* kernel/ioport.c */
20asmlinkage long sys_ioperm(unsigned long, unsigned long, int);
21
22/* X86_32 only */
23#ifdef CONFIG_X86_32
24/* kernel/process_32.c */
25asmlinkage int sys_fork(struct pt_regs);
26asmlinkage int sys_clone(struct pt_regs);
27asmlinkage int sys_vfork(struct pt_regs);
28asmlinkage int sys_execve(struct pt_regs);
29
30/* kernel/signal_32.c */
31asmlinkage int sys_sigsuspend(int, int, old_sigset_t);
32asmlinkage int sys_sigaction(int, const struct old_sigaction __user *,
33 struct old_sigaction __user *);
34asmlinkage int sys_sigaltstack(unsigned long);
35asmlinkage unsigned long sys_sigreturn(unsigned long);
36asmlinkage int sys_rt_sigreturn(unsigned long);
37
38/* kernel/ioport.c */
39asmlinkage long sys_iopl(unsigned long);
40
41/* kernel/ldt.c */
42asmlinkage int sys_modify_ldt(int, void __user *, unsigned long);
43
44/* kernel/sys_i386_32.c */
45asmlinkage long sys_mmap2(unsigned long, unsigned long, unsigned long,
46 unsigned long, unsigned long, unsigned long);
47struct mmap_arg_struct;
48asmlinkage int old_mmap(struct mmap_arg_struct __user *);
49struct sel_arg_struct;
50asmlinkage int old_select(struct sel_arg_struct __user *);
51asmlinkage int sys_ipc(uint, int, int, int, void __user *, long);
52struct old_utsname;
53asmlinkage int sys_uname(struct old_utsname __user *);
54struct oldold_utsname;
55asmlinkage int sys_olduname(struct oldold_utsname __user *);
56
57/* kernel/tls.c */
58asmlinkage int sys_set_thread_area(struct user_desc __user *);
59asmlinkage int sys_get_thread_area(struct user_desc __user *);
60
61/* kernel/vm86_32.c */
62asmlinkage int sys_vm86old(struct pt_regs);
63asmlinkage int sys_vm86(struct pt_regs);
64
65#else /* CONFIG_X86_32 */
66
67/* X86_64 only */
68/* kernel/process_64.c */
69asmlinkage long sys_fork(struct pt_regs *);
70asmlinkage long sys_clone(unsigned long, unsigned long,
71 void __user *, void __user *,
72 struct pt_regs *);
73asmlinkage long sys_vfork(struct pt_regs *);
74asmlinkage long sys_execve(char __user *, char __user * __user *,
75 char __user * __user *,
76 struct pt_regs *);
77
78/* kernel/ioport.c */
79asmlinkage long sys_iopl(unsigned int, struct pt_regs *);
80
81/* kernel/signal_64.c */
82asmlinkage long sys_sigaltstack(const stack_t __user *, stack_t __user *,
83 struct pt_regs *);
84asmlinkage long sys_rt_sigreturn(struct pt_regs *);
85
86/* kernel/sys_x86_64.c */
87asmlinkage long sys_mmap(unsigned long, unsigned long, unsigned long,
88 unsigned long, unsigned long, unsigned long);
89struct new_utsname;
90asmlinkage long sys_uname(struct new_utsname __user *);
91
92#endif /* CONFIG_X86_32 */
93#endif /* _ASM_X86_SYSCALLS_H */
diff --git a/include/asm-x86/system.h b/include/asm-x86/system.h
index a2f04cd79b29..34505dd7b24d 100644
--- a/include/asm-x86/system.h
+++ b/include/asm-x86/system.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_SYSTEM_H_ 1#ifndef ASM_X86__SYSTEM_H
2#define _ASM_X86_SYSTEM_H_ 2#define ASM_X86__SYSTEM_H
3 3
4#include <asm/asm.h> 4#include <asm/asm.h>
5#include <asm/segment.h> 5#include <asm/segment.h>
@@ -136,7 +136,7 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \
136#define set_base(ldt, base) _set_base(((char *)&(ldt)) , (base)) 136#define set_base(ldt, base) _set_base(((char *)&(ldt)) , (base))
137#define set_limit(ldt, limit) _set_limit(((char *)&(ldt)) , ((limit)-1)) 137#define set_limit(ldt, limit) _set_limit(((char *)&(ldt)) , ((limit)-1))
138 138
139extern void load_gs_index(unsigned); 139extern void native_load_gs_index(unsigned);
140 140
141/* 141/*
142 * Load a segment. Fall back on loading the zero 142 * Load a segment. Fall back on loading the zero
@@ -153,14 +153,14 @@ extern void load_gs_index(unsigned);
153 "jmp 2b\n" \ 153 "jmp 2b\n" \
154 ".previous\n" \ 154 ".previous\n" \
155 _ASM_EXTABLE(1b,3b) \ 155 _ASM_EXTABLE(1b,3b) \
156 : :"r" (value), "r" (0)) 156 : :"r" (value), "r" (0) : "memory")
157 157
158 158
159/* 159/*
160 * Save a segment register away 160 * Save a segment register away
161 */ 161 */
162#define savesegment(seg, value) \ 162#define savesegment(seg, value) \
163 asm volatile("mov %%" #seg ",%0":"=rm" (value)) 163 asm("mov %%" #seg ",%0":"=r" (value) : : "memory")
164 164
165static inline unsigned long get_limit(unsigned long segment) 165static inline unsigned long get_limit(unsigned long segment)
166{ 166{
@@ -282,6 +282,7 @@ static inline void native_wbinvd(void)
282#ifdef CONFIG_X86_64 282#ifdef CONFIG_X86_64
283#define read_cr8() (native_read_cr8()) 283#define read_cr8() (native_read_cr8())
284#define write_cr8(x) (native_write_cr8(x)) 284#define write_cr8(x) (native_write_cr8(x))
285#define load_gs_index native_load_gs_index
285#endif 286#endif
286 287
287/* Clear the 'TS' bit */ 288/* Clear the 'TS' bit */
@@ -289,7 +290,7 @@ static inline void native_wbinvd(void)
289 290
290#endif/* CONFIG_PARAVIRT */ 291#endif/* CONFIG_PARAVIRT */
291 292
292#define stts() write_cr0(8 | read_cr0()) 293#define stts() write_cr0(read_cr0() | X86_CR0_TS)
293 294
294#endif /* __KERNEL__ */ 295#endif /* __KERNEL__ */
295 296
@@ -303,7 +304,6 @@ static inline void clflush(volatile void *__p)
303void disable_hlt(void); 304void disable_hlt(void);
304void enable_hlt(void); 305void enable_hlt(void);
305 306
306extern int es7000_plat;
307void cpu_idle_wait(void); 307void cpu_idle_wait(void);
308 308
309extern unsigned long arch_align_stack(unsigned long sp); 309extern unsigned long arch_align_stack(unsigned long sp);
@@ -419,4 +419,4 @@ static inline void rdtsc_barrier(void)
419 alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC); 419 alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
420} 420}
421 421
422#endif 422#endif /* ASM_X86__SYSTEM_H */
diff --git a/include/asm-x86/system_64.h b/include/asm-x86/system_64.h
index 97fa251ccb2b..5aedb8bffc5a 100644
--- a/include/asm-x86/system_64.h
+++ b/include/asm-x86/system_64.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_SYSTEM_H 1#ifndef ASM_X86__SYSTEM_64_H
2#define __ASM_SYSTEM_H 2#define ASM_X86__SYSTEM_64_H
3 3
4#include <asm/segment.h> 4#include <asm/segment.h>
5#include <asm/cmpxchg.h> 5#include <asm/cmpxchg.h>
@@ -19,4 +19,4 @@ static inline void write_cr8(unsigned long val)
19 19
20#include <linux/irqflags.h> 20#include <linux/irqflags.h>
21 21
22#endif 22#endif /* ASM_X86__SYSTEM_64_H */
diff --git a/include/asm-x86/tce.h b/include/asm-x86/tce.h
index b1a4ea00df78..e7932d7fbbab 100644
--- a/include/asm-x86/tce.h
+++ b/include/asm-x86/tce.h
@@ -21,8 +21,8 @@
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 */ 22 */
23 23
24#ifndef _ASM_X86_64_TCE_H 24#ifndef ASM_X86__TCE_H
25#define _ASM_X86_64_TCE_H 25#define ASM_X86__TCE_H
26 26
27extern unsigned int specified_table_size; 27extern unsigned int specified_table_size;
28struct iommu_table; 28struct iommu_table;
@@ -45,4 +45,4 @@ extern void * __init alloc_tce_table(void);
45extern void __init free_tce_table(void *tbl); 45extern void __init free_tce_table(void *tbl);
46extern int __init build_tce_table(struct pci_dev *dev, void __iomem *bbar); 46extern int __init build_tce_table(struct pci_dev *dev, void __iomem *bbar);
47 47
48#endif /* _ASM_X86_64_TCE_H */ 48#endif /* ASM_X86__TCE_H */
diff --git a/include/asm-x86/termbits.h b/include/asm-x86/termbits.h
index af1b70ea440f..3d00dc5e0c71 100644
--- a/include/asm-x86/termbits.h
+++ b/include/asm-x86/termbits.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_TERMBITS_H 1#ifndef ASM_X86__TERMBITS_H
2#define _ASM_X86_TERMBITS_H 2#define ASM_X86__TERMBITS_H
3 3
4#include <linux/posix_types.h> 4#include <linux/posix_types.h>
5 5
@@ -195,4 +195,4 @@ struct ktermios {
195#define TCSADRAIN 1 195#define TCSADRAIN 1
196#define TCSAFLUSH 2 196#define TCSAFLUSH 2
197 197
198#endif /* _ASM_X86_TERMBITS_H */ 198#endif /* ASM_X86__TERMBITS_H */
diff --git a/include/asm-x86/termios.h b/include/asm-x86/termios.h
index f72956331c49..e235db248071 100644
--- a/include/asm-x86/termios.h
+++ b/include/asm-x86/termios.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_TERMIOS_H 1#ifndef ASM_X86__TERMIOS_H
2#define _ASM_X86_TERMIOS_H 2#define ASM_X86__TERMIOS_H
3 3
4#include <asm/termbits.h> 4#include <asm/termbits.h>
5#include <asm/ioctls.h> 5#include <asm/ioctls.h>
@@ -110,4 +110,4 @@ static inline int kernel_termios_to_user_termios_1(struct termios __user *u,
110 110
111#endif /* __KERNEL__ */ 111#endif /* __KERNEL__ */
112 112
113#endif /* _ASM_X86_TERMIOS_H */ 113#endif /* ASM_X86__TERMIOS_H */
diff --git a/include/asm-x86/therm_throt.h b/include/asm-x86/therm_throt.h
index 399bf6026b16..1c7f57b6b66e 100644
--- a/include/asm-x86/therm_throt.h
+++ b/include/asm-x86/therm_throt.h
@@ -1,9 +1,9 @@
1#ifndef __ASM_I386_THERM_THROT_H__ 1#ifndef ASM_X86__THERM_THROT_H
2#define __ASM_I386_THERM_THROT_H__ 1 2#define ASM_X86__THERM_THROT_H
3 3
4#include <asm/atomic.h> 4#include <asm/atomic.h>
5 5
6extern atomic_t therm_throt_en; 6extern atomic_t therm_throt_en;
7int therm_throt_process(int curr); 7int therm_throt_process(int curr);
8 8
9#endif /* __ASM_I386_THERM_THROT_H__ */ 9#endif /* ASM_X86__THERM_THROT_H */
diff --git a/include/asm-x86/thread_info.h b/include/asm-x86/thread_info.h
index 77244f17993f..3f4e52bb77f5 100644
--- a/include/asm-x86/thread_info.h
+++ b/include/asm-x86/thread_info.h
@@ -1,14 +1,264 @@
1#ifndef _ASM_X86_THREAD_INFO_H 1/* thread_info.h: low-level thread information
2 *
3 * Copyright (C) 2002 David Howells (dhowells@redhat.com)
4 * - Incorporating suggestions made by Linus Torvalds and Dave Miller
5 */
6
7#ifndef ASM_X86__THREAD_INFO_H
8#define ASM_X86__THREAD_INFO_H
9
10#include <linux/compiler.h>
11#include <asm/page.h>
12#include <asm/types.h>
13
14/*
15 * low level task data that entry.S needs immediate access to
16 * - this struct should fit entirely inside of one cache line
17 * - this struct shares the supervisor stack pages
18 */
19#ifndef __ASSEMBLY__
20struct task_struct;
21struct exec_domain;
22#include <asm/processor.h>
23
24struct thread_info {
25 struct task_struct *task; /* main task structure */
26 struct exec_domain *exec_domain; /* execution domain */
27 unsigned long flags; /* low level flags */
28 __u32 status; /* thread synchronous flags */
29 __u32 cpu; /* current CPU */
30 int preempt_count; /* 0 => preemptable,
31 <0 => BUG */
32 mm_segment_t addr_limit;
33 struct restart_block restart_block;
34 void __user *sysenter_return;
2#ifdef CONFIG_X86_32 35#ifdef CONFIG_X86_32
3# include "thread_info_32.h" 36 unsigned long previous_esp; /* ESP of the previous stack in
37 case of nested (IRQ) stacks
38 */
39 __u8 supervisor_stack[0];
40#endif
41};
42
43#define INIT_THREAD_INFO(tsk) \
44{ \
45 .task = &tsk, \
46 .exec_domain = &default_exec_domain, \
47 .flags = 0, \
48 .cpu = 0, \
49 .preempt_count = 1, \
50 .addr_limit = KERNEL_DS, \
51 .restart_block = { \
52 .fn = do_no_restart_syscall, \
53 }, \
54}
55
56#define init_thread_info (init_thread_union.thread_info)
57#define init_stack (init_thread_union.stack)
58
59#else /* !__ASSEMBLY__ */
60
61#include <asm/asm-offsets.h>
62
63#endif
64
65/*
66 * thread information flags
67 * - these are process state flags that various assembly files
68 * may need to access
69 * - pending work-to-be-done flags are in LSW
70 * - other flags in MSW
71 * Warning: layout of LSW is hardcoded in entry.S
72 */
73#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
74#define TIF_NOTIFY_RESUME 1 /* callback before returning to user */
75#define TIF_SIGPENDING 2 /* signal pending */
76#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
77#define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/
78#define TIF_IRET 5 /* force IRET */
79#define TIF_SYSCALL_EMU 6 /* syscall emulation active */
80#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
81#define TIF_SECCOMP 8 /* secure computing */
82#define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */
83#define TIF_NOTSC 16 /* TSC is not accessible in userland */
84#define TIF_IA32 17 /* 32bit process */
85#define TIF_FORK 18 /* ret_from_fork */
86#define TIF_ABI_PENDING 19
87#define TIF_MEMDIE 20
88#define TIF_DEBUG 21 /* uses debug registers */
89#define TIF_IO_BITMAP 22 /* uses I/O bitmap */
90#define TIF_FREEZE 23 /* is freezing for suspend */
91#define TIF_FORCED_TF 24 /* true if TF in eflags artificially */
92#define TIF_DEBUGCTLMSR 25 /* uses thread_struct.debugctlmsr */
93#define TIF_DS_AREA_MSR 26 /* uses thread_struct.ds_area_msr */
94#define TIF_BTS_TRACE_TS 27 /* record scheduling event timestamps */
95
96#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
97#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
98#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
99#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
100#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
101#define _TIF_IRET (1 << TIF_IRET)
102#define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
103#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
104#define _TIF_SECCOMP (1 << TIF_SECCOMP)
105#define _TIF_MCE_NOTIFY (1 << TIF_MCE_NOTIFY)
106#define _TIF_NOTSC (1 << TIF_NOTSC)
107#define _TIF_IA32 (1 << TIF_IA32)
108#define _TIF_FORK (1 << TIF_FORK)
109#define _TIF_ABI_PENDING (1 << TIF_ABI_PENDING)
110#define _TIF_DEBUG (1 << TIF_DEBUG)
111#define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP)
112#define _TIF_FREEZE (1 << TIF_FREEZE)
113#define _TIF_FORCED_TF (1 << TIF_FORCED_TF)
114#define _TIF_DEBUGCTLMSR (1 << TIF_DEBUGCTLMSR)
115#define _TIF_DS_AREA_MSR (1 << TIF_DS_AREA_MSR)
116#define _TIF_BTS_TRACE_TS (1 << TIF_BTS_TRACE_TS)
117
118/* work to do in syscall_trace_enter() */
119#define _TIF_WORK_SYSCALL_ENTRY \
120 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | \
121 _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | _TIF_SINGLESTEP)
122
123/* work to do in syscall_trace_leave() */
124#define _TIF_WORK_SYSCALL_EXIT \
125 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP)
126
127/* work to do on interrupt/exception return */
128#define _TIF_WORK_MASK \
129 (0x0000FFFF & \
130 ~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT| \
131 _TIF_SINGLESTEP|_TIF_SECCOMP|_TIF_SYSCALL_EMU))
132
133/* work to do on any return to user space */
134#define _TIF_ALLWORK_MASK (0x0000FFFF & ~_TIF_SECCOMP)
135
136/* Only used for 64 bit */
137#define _TIF_DO_NOTIFY_MASK \
138 (_TIF_SIGPENDING|_TIF_MCE_NOTIFY|_TIF_NOTIFY_RESUME)
139
140/* flags to check in __switch_to() */
141#define _TIF_WORK_CTXSW \
142 (_TIF_IO_BITMAP|_TIF_DEBUGCTLMSR|_TIF_DS_AREA_MSR|_TIF_BTS_TRACE_TS| \
143 _TIF_NOTSC)
144
145#define _TIF_WORK_CTXSW_PREV _TIF_WORK_CTXSW
146#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW|_TIF_DEBUG)
147
148#define PREEMPT_ACTIVE 0x10000000
149
150/* thread information allocation */
151#ifdef CONFIG_DEBUG_STACK_USAGE
152#define THREAD_FLAGS (GFP_KERNEL | __GFP_ZERO)
4#else 153#else
5# include "thread_info_64.h" 154#define THREAD_FLAGS GFP_KERNEL
6#endif 155#endif
7 156
157#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
158
159#define alloc_thread_info(tsk) \
160 ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER))
161
162#ifdef CONFIG_X86_32
163
164#define STACK_WARN (THREAD_SIZE/8)
165/*
166 * macros/functions for gaining access to the thread information structure
167 *
168 * preempt_count needs to be 1 initially, until the scheduler is functional.
169 */
170#ifndef __ASSEMBLY__
171
172
173/* how to get the current stack pointer from C */
174register unsigned long current_stack_pointer asm("esp") __used;
175
176/* how to get the thread information struct from C */
177static inline struct thread_info *current_thread_info(void)
178{
179 return (struct thread_info *)
180 (current_stack_pointer & ~(THREAD_SIZE - 1));
181}
182
183#else /* !__ASSEMBLY__ */
184
185/* how to get the thread information struct from ASM */
186#define GET_THREAD_INFO(reg) \
187 movl $-THREAD_SIZE, reg; \
188 andl %esp, reg
189
190/* use this one if reg already contains %esp */
191#define GET_THREAD_INFO_WITH_ESP(reg) \
192 andl $-THREAD_SIZE, reg
193
194#endif
195
196#else /* X86_32 */
197
198#include <asm/pda.h>
199
200/*
201 * macros/functions for gaining access to the thread information structure
202 * preempt_count needs to be 1 initially, until the scheduler is functional.
203 */
204#ifndef __ASSEMBLY__
205static inline struct thread_info *current_thread_info(void)
206{
207 struct thread_info *ti;
208 ti = (void *)(read_pda(kernelstack) + PDA_STACKOFFSET - THREAD_SIZE);
209 return ti;
210}
211
212/* do not use in interrupt context */
213static inline struct thread_info *stack_thread_info(void)
214{
215 struct thread_info *ti;
216 asm("andq %%rsp,%0; " : "=r" (ti) : "0" (~(THREAD_SIZE - 1)));
217 return ti;
218}
219
220#else /* !__ASSEMBLY__ */
221
222/* how to get the thread information struct from ASM */
223#define GET_THREAD_INFO(reg) \
224 movq %gs:pda_kernelstack,reg ; \
225 subq $(THREAD_SIZE-PDA_STACKOFFSET),reg
226
227#endif
228
229#endif /* !X86_32 */
230
231/*
232 * Thread-synchronous status.
233 *
234 * This is different from the flags in that nobody else
235 * ever touches our thread-synchronous status, so we don't
236 * have to worry about atomic accesses.
237 */
238#define TS_USEDFPU 0x0001 /* FPU was used by this task
239 this quantum (SMP) */
240#define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/
241#define TS_POLLING 0x0004 /* true if in idle loop
242 and not sleeping */
243#define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal() */
244#define TS_XSAVE 0x0010 /* Use xsave/xrstor */
245
246#define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING)
247
248#ifndef __ASSEMBLY__
249#define HAVE_SET_RESTORE_SIGMASK 1
250static inline void set_restore_sigmask(void)
251{
252 struct thread_info *ti = current_thread_info();
253 ti->status |= TS_RESTORE_SIGMASK;
254 set_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags);
255}
256#endif /* !__ASSEMBLY__ */
257
8#ifndef __ASSEMBLY__ 258#ifndef __ASSEMBLY__
9extern void arch_task_cache_init(void); 259extern void arch_task_cache_init(void);
10extern void free_thread_info(struct thread_info *ti); 260extern void free_thread_info(struct thread_info *ti);
11extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); 261extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
12#define arch_task_cache_init arch_task_cache_init 262#define arch_task_cache_init arch_task_cache_init
13#endif 263#endif
14#endif /* _ASM_X86_THREAD_INFO_H */ 264#endif /* ASM_X86__THREAD_INFO_H */
diff --git a/include/asm-x86/thread_info_32.h b/include/asm-x86/thread_info_32.h
deleted file mode 100644
index b6338829d1a8..000000000000
--- a/include/asm-x86/thread_info_32.h
+++ /dev/null
@@ -1,205 +0,0 @@
1/* thread_info.h: i386 low-level thread information
2 *
3 * Copyright (C) 2002 David Howells (dhowells@redhat.com)
4 * - Incorporating suggestions made by Linus Torvalds and Dave Miller
5 */
6
7#ifndef _ASM_THREAD_INFO_H
8#define _ASM_THREAD_INFO_H
9
10#ifdef __KERNEL__
11
12#include <linux/compiler.h>
13#include <asm/page.h>
14
15#ifndef __ASSEMBLY__
16#include <asm/processor.h>
17#endif
18
19/*
20 * low level task data that entry.S needs immediate access to
21 * - this struct should fit entirely inside of one cache line
22 * - this struct shares the supervisor stack pages
23 * - if the contents of this structure are changed,
24 * the assembly constants must also be changed
25 */
26#ifndef __ASSEMBLY__
27
28struct thread_info {
29 struct task_struct *task; /* main task structure */
30 struct exec_domain *exec_domain; /* execution domain */
31 unsigned long flags; /* low level flags */
32 unsigned long status; /* thread-synchronous flags */
33 __u32 cpu; /* current CPU */
34 int preempt_count; /* 0 => preemptable,
35 <0 => BUG */
36 mm_segment_t addr_limit; /* thread address space:
37 0-0xBFFFFFFF user-thread
38 0-0xFFFFFFFF kernel-thread
39 */
40 void *sysenter_return;
41 struct restart_block restart_block;
42 unsigned long previous_esp; /* ESP of the previous stack in
43 case of nested (IRQ) stacks
44 */
45 __u8 supervisor_stack[0];
46};
47
48#else /* !__ASSEMBLY__ */
49
50#include <asm/asm-offsets.h>
51
52#endif
53
54#define PREEMPT_ACTIVE 0x10000000
55#ifdef CONFIG_4KSTACKS
56#define THREAD_SIZE (4096)
57#else
58#define THREAD_SIZE (8192)
59#endif
60
61#define STACK_WARN (THREAD_SIZE/8)
62/*
63 * macros/functions for gaining access to the thread information structure
64 *
65 * preempt_count needs to be 1 initially, until the scheduler is functional.
66 */
67#ifndef __ASSEMBLY__
68
69#define INIT_THREAD_INFO(tsk) \
70{ \
71 .task = &tsk, \
72 .exec_domain = &default_exec_domain, \
73 .flags = 0, \
74 .cpu = 0, \
75 .preempt_count = 1, \
76 .addr_limit = KERNEL_DS, \
77 .restart_block = { \
78 .fn = do_no_restart_syscall, \
79 }, \
80}
81
82#define init_thread_info (init_thread_union.thread_info)
83#define init_stack (init_thread_union.stack)
84
85
86/* how to get the current stack pointer from C */
87register unsigned long current_stack_pointer asm("esp") __used;
88
89/* how to get the thread information struct from C */
90static inline struct thread_info *current_thread_info(void)
91{
92 return (struct thread_info *)
93 (current_stack_pointer & ~(THREAD_SIZE - 1));
94}
95
96/* thread information allocation */
97#ifdef CONFIG_DEBUG_STACK_USAGE
98#define alloc_thread_info(tsk) ((struct thread_info *) \
99 __get_free_pages(GFP_KERNEL | __GFP_ZERO, get_order(THREAD_SIZE)))
100#else
101#define alloc_thread_info(tsk) ((struct thread_info *) \
102 __get_free_pages(GFP_KERNEL, get_order(THREAD_SIZE)))
103#endif
104
105#else /* !__ASSEMBLY__ */
106
107/* how to get the thread information struct from ASM */
108#define GET_THREAD_INFO(reg) \
109 movl $-THREAD_SIZE, reg; \
110 andl %esp, reg
111
112/* use this one if reg already contains %esp */
113#define GET_THREAD_INFO_WITH_ESP(reg) \
114 andl $-THREAD_SIZE, reg
115
116#endif
117
118/*
119 * thread information flags
120 * - these are process state flags that various
121 * assembly files may need to access
122 * - pending work-to-be-done flags are in LSW
123 * - other flags in MSW
124 */
125#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
126#define TIF_SIGPENDING 1 /* signal pending */
127#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
128#define TIF_SINGLESTEP 3 /* restore singlestep on return to
129 user mode */
130#define TIF_IRET 4 /* return with iret */
131#define TIF_SYSCALL_EMU 5 /* syscall emulation active */
132#define TIF_SYSCALL_AUDIT 6 /* syscall auditing active */
133#define TIF_SECCOMP 7 /* secure computing */
134#define TIF_HRTICK_RESCHED 9 /* reprogram hrtick timer */
135#define TIF_MEMDIE 16
136#define TIF_DEBUG 17 /* uses debug registers */
137#define TIF_IO_BITMAP 18 /* uses I/O bitmap */
138#define TIF_FREEZE 19 /* is freezing for suspend */
139#define TIF_NOTSC 20 /* TSC is not accessible in userland */
140#define TIF_FORCED_TF 21 /* true if TF in eflags artificially */
141#define TIF_DEBUGCTLMSR 22 /* uses thread_struct.debugctlmsr */
142#define TIF_DS_AREA_MSR 23 /* uses thread_struct.ds_area_msr */
143#define TIF_BTS_TRACE_TS 24 /* record scheduling event timestamps */
144
145#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
146#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
147#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
148#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
149#define _TIF_IRET (1 << TIF_IRET)
150#define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
151#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
152#define _TIF_SECCOMP (1 << TIF_SECCOMP)
153#define _TIF_HRTICK_RESCHED (1 << TIF_HRTICK_RESCHED)
154#define _TIF_DEBUG (1 << TIF_DEBUG)
155#define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP)
156#define _TIF_FREEZE (1 << TIF_FREEZE)
157#define _TIF_NOTSC (1 << TIF_NOTSC)
158#define _TIF_FORCED_TF (1 << TIF_FORCED_TF)
159#define _TIF_DEBUGCTLMSR (1 << TIF_DEBUGCTLMSR)
160#define _TIF_DS_AREA_MSR (1 << TIF_DS_AREA_MSR)
161#define _TIF_BTS_TRACE_TS (1 << TIF_BTS_TRACE_TS)
162
163/* work to do on interrupt/exception return */
164#define _TIF_WORK_MASK \
165 (0x0000FFFF & ~(_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
166 _TIF_SECCOMP | _TIF_SYSCALL_EMU))
167/* work to do on any return to u-space */
168#define _TIF_ALLWORK_MASK (0x0000FFFF & ~_TIF_SECCOMP)
169
170/* flags to check in __switch_to() */
171#define _TIF_WORK_CTXSW \
172 (_TIF_IO_BITMAP | _TIF_NOTSC | _TIF_DEBUGCTLMSR | \
173 _TIF_DS_AREA_MSR | _TIF_BTS_TRACE_TS)
174#define _TIF_WORK_CTXSW_PREV _TIF_WORK_CTXSW
175#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW | _TIF_DEBUG)
176
177
178/*
179 * Thread-synchronous status.
180 *
181 * This is different from the flags in that nobody else
182 * ever touches our thread-synchronous status, so we don't
183 * have to worry about atomic accesses.
184 */
185#define TS_USEDFPU 0x0001 /* FPU was used by this task
186 this quantum (SMP) */
187#define TS_POLLING 0x0002 /* True if in idle loop
188 and not sleeping */
189#define TS_RESTORE_SIGMASK 0x0004 /* restore signal mask in do_signal() */
190
191#define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING)
192
193#ifndef __ASSEMBLY__
194#define HAVE_SET_RESTORE_SIGMASK 1
195static inline void set_restore_sigmask(void)
196{
197 struct thread_info *ti = current_thread_info();
198 ti->status |= TS_RESTORE_SIGMASK;
199 set_bit(TIF_SIGPENDING, &ti->flags);
200}
201#endif /* !__ASSEMBLY__ */
202
203#endif /* __KERNEL__ */
204
205#endif /* _ASM_THREAD_INFO_H */
diff --git a/include/asm-x86/thread_info_64.h b/include/asm-x86/thread_info_64.h
deleted file mode 100644
index cb69f70abba1..000000000000
--- a/include/asm-x86/thread_info_64.h
+++ /dev/null
@@ -1,195 +0,0 @@
1/* thread_info.h: x86_64 low-level thread information
2 *
3 * Copyright (C) 2002 David Howells (dhowells@redhat.com)
4 * - Incorporating suggestions made by Linus Torvalds and Dave Miller
5 */
6
7#ifndef _ASM_THREAD_INFO_H
8#define _ASM_THREAD_INFO_H
9
10#ifdef __KERNEL__
11
12#include <asm/page.h>
13#include <asm/types.h>
14#include <asm/pda.h>
15
16/*
17 * low level task data that entry.S needs immediate access to
18 * - this struct should fit entirely inside of one cache line
19 * - this struct shares the supervisor stack pages
20 */
21#ifndef __ASSEMBLY__
22struct task_struct;
23struct exec_domain;
24#include <asm/processor.h>
25
26struct thread_info {
27 struct task_struct *task; /* main task structure */
28 struct exec_domain *exec_domain; /* execution domain */
29 __u32 flags; /* low level flags */
30 __u32 status; /* thread synchronous flags */
31 __u32 cpu; /* current CPU */
32 int preempt_count; /* 0 => preemptable,
33 <0 => BUG */
34 mm_segment_t addr_limit;
35 struct restart_block restart_block;
36#ifdef CONFIG_IA32_EMULATION
37 void __user *sysenter_return;
38#endif
39};
40#endif
41
42/*
43 * macros/functions for gaining access to the thread information structure
44 * preempt_count needs to be 1 initially, until the scheduler is functional.
45 */
46#ifndef __ASSEMBLY__
47#define INIT_THREAD_INFO(tsk) \
48{ \
49 .task = &tsk, \
50 .exec_domain = &default_exec_domain, \
51 .flags = 0, \
52 .cpu = 0, \
53 .preempt_count = 1, \
54 .addr_limit = KERNEL_DS, \
55 .restart_block = { \
56 .fn = do_no_restart_syscall, \
57 }, \
58}
59
60#define init_thread_info (init_thread_union.thread_info)
61#define init_stack (init_thread_union.stack)
62
63static inline struct thread_info *current_thread_info(void)
64{
65 struct thread_info *ti;
66 ti = (void *)(read_pda(kernelstack) + PDA_STACKOFFSET - THREAD_SIZE);
67 return ti;
68}
69
70/* do not use in interrupt context */
71static inline struct thread_info *stack_thread_info(void)
72{
73 struct thread_info *ti;
74 asm("andq %%rsp,%0; " : "=r" (ti) : "0" (~(THREAD_SIZE - 1)));
75 return ti;
76}
77
78/* thread information allocation */
79#ifdef CONFIG_DEBUG_STACK_USAGE
80#define THREAD_FLAGS (GFP_KERNEL | __GFP_ZERO)
81#else
82#define THREAD_FLAGS GFP_KERNEL
83#endif
84
85#define alloc_thread_info(tsk) \
86 ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER))
87
88#else /* !__ASSEMBLY__ */
89
90/* how to get the thread information struct from ASM */
91#define GET_THREAD_INFO(reg) \
92 movq %gs:pda_kernelstack,reg ; \
93 subq $(THREAD_SIZE-PDA_STACKOFFSET),reg
94
95#endif
96
97/*
98 * thread information flags
99 * - these are process state flags that various assembly files
100 * may need to access
101 * - pending work-to-be-done flags are in LSW
102 * - other flags in MSW
103 * Warning: layout of LSW is hardcoded in entry.S
104 */
105#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
106#define TIF_SIGPENDING 2 /* signal pending */
107#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
108#define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/
109#define TIF_IRET 5 /* force IRET */
110#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
111#define TIF_SECCOMP 8 /* secure computing */
112#define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */
113#define TIF_HRTICK_RESCHED 11 /* reprogram hrtick timer */
114/* 16 free */
115#define TIF_IA32 17 /* 32bit process */
116#define TIF_FORK 18 /* ret_from_fork */
117#define TIF_ABI_PENDING 19
118#define TIF_MEMDIE 20
119#define TIF_DEBUG 21 /* uses debug registers */
120#define TIF_IO_BITMAP 22 /* uses I/O bitmap */
121#define TIF_FREEZE 23 /* is freezing for suspend */
122#define TIF_FORCED_TF 24 /* true if TF in eflags artificially */
123#define TIF_DEBUGCTLMSR 25 /* uses thread_struct.debugctlmsr */
124#define TIF_DS_AREA_MSR 26 /* uses thread_struct.ds_area_msr */
125#define TIF_BTS_TRACE_TS 27 /* record scheduling event timestamps */
126#define TIF_NOTSC 28 /* TSC is not accessible in userland */
127
128#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
129#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
130#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
131#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
132#define _TIF_IRET (1 << TIF_IRET)
133#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
134#define _TIF_SECCOMP (1 << TIF_SECCOMP)
135#define _TIF_MCE_NOTIFY (1 << TIF_MCE_NOTIFY)
136#define _TIF_HRTICK_RESCHED (1 << TIF_HRTICK_RESCHED)
137#define _TIF_IA32 (1 << TIF_IA32)
138#define _TIF_FORK (1 << TIF_FORK)
139#define _TIF_ABI_PENDING (1 << TIF_ABI_PENDING)
140#define _TIF_DEBUG (1 << TIF_DEBUG)
141#define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP)
142#define _TIF_FREEZE (1 << TIF_FREEZE)
143#define _TIF_FORCED_TF (1 << TIF_FORCED_TF)
144#define _TIF_DEBUGCTLMSR (1 << TIF_DEBUGCTLMSR)
145#define _TIF_DS_AREA_MSR (1 << TIF_DS_AREA_MSR)
146#define _TIF_BTS_TRACE_TS (1 << TIF_BTS_TRACE_TS)
147#define _TIF_NOTSC (1 << TIF_NOTSC)
148
149/* work to do on interrupt/exception return */
150#define _TIF_WORK_MASK \
151 (0x0000FFFF & \
152 ~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP|_TIF_SECCOMP))
153/* work to do on any return to user space */
154#define _TIF_ALLWORK_MASK (0x0000FFFF & ~_TIF_SECCOMP)
155
156#define _TIF_DO_NOTIFY_MASK \
157 (_TIF_SIGPENDING|_TIF_SINGLESTEP|_TIF_MCE_NOTIFY|_TIF_HRTICK_RESCHED)
158
159/* flags to check in __switch_to() */
160#define _TIF_WORK_CTXSW \
161 (_TIF_IO_BITMAP|_TIF_DEBUGCTLMSR|_TIF_DS_AREA_MSR|_TIF_BTS_TRACE_TS|_TIF_NOTSC)
162#define _TIF_WORK_CTXSW_PREV _TIF_WORK_CTXSW
163#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW|_TIF_DEBUG)
164
165#define PREEMPT_ACTIVE 0x10000000
166
167/*
168 * Thread-synchronous status.
169 *
170 * This is different from the flags in that nobody else
171 * ever touches our thread-synchronous status, so we don't
172 * have to worry about atomic accesses.
173 */
174#define TS_USEDFPU 0x0001 /* FPU was used by this task
175 this quantum (SMP) */
176#define TS_COMPAT 0x0002 /* 32bit syscall active */
177#define TS_POLLING 0x0004 /* true if in idle loop
178 and not sleeping */
179#define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal() */
180
181#define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING)
182
183#ifndef __ASSEMBLY__
184#define HAVE_SET_RESTORE_SIGMASK 1
185static inline void set_restore_sigmask(void)
186{
187 struct thread_info *ti = current_thread_info();
188 ti->status |= TS_RESTORE_SIGMASK;
189 set_bit(TIF_SIGPENDING, &ti->flags);
190}
191#endif /* !__ASSEMBLY__ */
192
193#endif /* __KERNEL__ */
194
195#endif /* _ASM_THREAD_INFO_H */
diff --git a/include/asm-x86/time.h b/include/asm-x86/time.h
index bce72d7a958c..3e724eef7ac4 100644
--- a/include/asm-x86/time.h
+++ b/include/asm-x86/time.h
@@ -1,5 +1,5 @@
1#ifndef _ASMX86_TIME_H 1#ifndef ASM_X86__TIME_H
2#define _ASMX86_TIME_H 2#define ASM_X86__TIME_H
3 3
4extern void hpet_time_init(void); 4extern void hpet_time_init(void);
5 5
@@ -46,6 +46,8 @@ static inline int native_set_wallclock(unsigned long nowtime)
46 46
47#endif 47#endif
48 48
49extern void time_init(void);
50
49#ifdef CONFIG_PARAVIRT 51#ifdef CONFIG_PARAVIRT
50#include <asm/paravirt.h> 52#include <asm/paravirt.h>
51#else /* !CONFIG_PARAVIRT */ 53#else /* !CONFIG_PARAVIRT */
@@ -56,4 +58,6 @@ static inline int native_set_wallclock(unsigned long nowtime)
56 58
57#endif /* CONFIG_PARAVIRT */ 59#endif /* CONFIG_PARAVIRT */
58 60
59#endif 61extern unsigned long __init calibrate_cpu(void);
62
63#endif /* ASM_X86__TIME_H */
diff --git a/include/asm-x86/timer.h b/include/asm-x86/timer.h
index 4f6fcb050c11..d0babce4b47a 100644
--- a/include/asm-x86/timer.h
+++ b/include/asm-x86/timer.h
@@ -1,5 +1,5 @@
1#ifndef _ASMi386_TIMER_H 1#ifndef ASM_X86__TIMER_H
2#define _ASMi386_TIMER_H 2#define ASM_X86__TIMER_H
3#include <linux/init.h> 3#include <linux/init.h>
4#include <linux/pm.h> 4#include <linux/pm.h>
5#include <linux/percpu.h> 5#include <linux/percpu.h>
@@ -7,14 +7,17 @@
7#define TICK_SIZE (tick_nsec / 1000) 7#define TICK_SIZE (tick_nsec / 1000)
8 8
9unsigned long long native_sched_clock(void); 9unsigned long long native_sched_clock(void);
10unsigned long native_calculate_cpu_khz(void); 10unsigned long native_calibrate_tsc(void);
11 11
12#ifdef CONFIG_X86_32
12extern int timer_ack; 13extern int timer_ack;
13extern int no_timer_check;
14extern int recalibrate_cpu_khz(void); 14extern int recalibrate_cpu_khz(void);
15#endif /* CONFIG_X86_32 */
16
17extern int no_timer_check;
15 18
16#ifndef CONFIG_PARAVIRT 19#ifndef CONFIG_PARAVIRT
17#define calculate_cpu_khz() native_calculate_cpu_khz() 20#define calibrate_tsc() native_calibrate_tsc()
18#endif 21#endif
19 22
20/* Accelerators for sched_clock() 23/* Accelerators for sched_clock()
@@ -60,4 +63,4 @@ static inline unsigned long long cycles_2_ns(unsigned long long cyc)
60 return ns; 63 return ns;
61} 64}
62 65
63#endif 66#endif /* ASM_X86__TIMER_H */
diff --git a/include/asm-x86/timex.h b/include/asm-x86/timex.h
index 43e5a78500c5..d1ce2416a5da 100644
--- a/include/asm-x86/timex.h
+++ b/include/asm-x86/timex.h
@@ -1,6 +1,6 @@
1/* x86 architecture timex specifications */ 1/* x86 architecture timex specifications */
2#ifndef _ASM_X86_TIMEX_H 2#ifndef ASM_X86__TIMEX_H
3#define _ASM_X86_TIMEX_H 3#define ASM_X86__TIMEX_H
4 4
5#include <asm/processor.h> 5#include <asm/processor.h>
6#include <asm/tsc.h> 6#include <asm/tsc.h>
@@ -16,4 +16,4 @@
16 16
17#define ARCH_HAS_READ_CURRENT_TIMER 17#define ARCH_HAS_READ_CURRENT_TIMER
18 18
19#endif 19#endif /* ASM_X86__TIMEX_H */
diff --git a/include/asm-x86/tlb.h b/include/asm-x86/tlb.h
index e4e9e2d07a93..db36e9e89e87 100644
--- a/include/asm-x86/tlb.h
+++ b/include/asm-x86/tlb.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_TLB_H 1#ifndef ASM_X86__TLB_H
2#define _ASM_X86_TLB_H 2#define ASM_X86__TLB_H
3 3
4#define tlb_start_vma(tlb, vma) do { } while (0) 4#define tlb_start_vma(tlb, vma) do { } while (0)
5#define tlb_end_vma(tlb, vma) do { } while (0) 5#define tlb_end_vma(tlb, vma) do { } while (0)
@@ -8,4 +8,4 @@
8 8
9#include <asm-generic/tlb.h> 9#include <asm-generic/tlb.h>
10 10
11#endif 11#endif /* ASM_X86__TLB_H */
diff --git a/include/asm-x86/tlbflush.h b/include/asm-x86/tlbflush.h
index 35c76ceb9f40..3cdd08b5bdb7 100644
--- a/include/asm-x86/tlbflush.h
+++ b/include/asm-x86/tlbflush.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_TLBFLUSH_H 1#ifndef ASM_X86__TLBFLUSH_H
2#define _ASM_X86_TLBFLUSH_H 2#define ASM_X86__TLBFLUSH_H
3 3
4#include <linux/mm.h> 4#include <linux/mm.h>
5#include <linux/sched.h> 5#include <linux/sched.h>
@@ -119,6 +119,10 @@ static inline void native_flush_tlb_others(const cpumask_t *cpumask,
119{ 119{
120} 120}
121 121
122static inline void reset_lazy_tlbstate(void)
123{
124}
125
122#else /* SMP */ 126#else /* SMP */
123 127
124#include <asm/smp.h> 128#include <asm/smp.h>
@@ -151,6 +155,12 @@ struct tlb_state {
151 char __cacheline_padding[L1_CACHE_BYTES-8]; 155 char __cacheline_padding[L1_CACHE_BYTES-8];
152}; 156};
153DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate); 157DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate);
158
159void reset_lazy_tlbstate(void);
160#else
161static inline void reset_lazy_tlbstate(void)
162{
163}
154#endif 164#endif
155 165
156#endif /* SMP */ 166#endif /* SMP */
@@ -165,4 +175,4 @@ static inline void flush_tlb_kernel_range(unsigned long start,
165 flush_tlb_all(); 175 flush_tlb_all();
166} 176}
167 177
168#endif /* _ASM_X86_TLBFLUSH_H */ 178#endif /* ASM_X86__TLBFLUSH_H */
diff --git a/include/asm-x86/topology.h b/include/asm-x86/topology.h
index dcf3f8131d6b..7eca9bc022b2 100644
--- a/include/asm-x86/topology.h
+++ b/include/asm-x86/topology.h
@@ -22,8 +22,8 @@
22 * 22 *
23 * Send feedback to <colpatch@us.ibm.com> 23 * Send feedback to <colpatch@us.ibm.com>
24 */ 24 */
25#ifndef _ASM_X86_TOPOLOGY_H 25#ifndef ASM_X86__TOPOLOGY_H
26#define _ASM_X86_TOPOLOGY_H 26#define ASM_X86__TOPOLOGY_H
27 27
28#ifdef CONFIG_X86_32 28#ifdef CONFIG_X86_32
29# ifdef CONFIG_X86_HT 29# ifdef CONFIG_X86_HT
@@ -35,79 +35,93 @@
35# endif 35# endif
36#endif 36#endif
37 37
38/* Node not present */
39#define NUMA_NO_NODE (-1)
40
38#ifdef CONFIG_NUMA 41#ifdef CONFIG_NUMA
39#include <linux/cpumask.h> 42#include <linux/cpumask.h>
40#include <asm/mpspec.h> 43#include <asm/mpspec.h>
41 44
42/* Mappings between logical cpu number and node number */
43#ifdef CONFIG_X86_32 45#ifdef CONFIG_X86_32
44extern int cpu_to_node_map[];
45#else
46/* Returns the number of the current Node. */
47#define numa_node_id() (early_cpu_to_node(raw_smp_processor_id()))
48#endif
49
50DECLARE_PER_CPU(int, x86_cpu_to_node_map);
51
52#ifdef CONFIG_SMP
53extern int x86_cpu_to_node_map_init[];
54extern void *x86_cpu_to_node_map_early_ptr;
55#else
56#define x86_cpu_to_node_map_early_ptr NULL
57#endif
58 46
47/* Mappings between node number and cpus on that node. */
59extern cpumask_t node_to_cpumask_map[]; 48extern cpumask_t node_to_cpumask_map[];
60 49
61#define NUMA_NO_NODE (-1) 50/* Mappings between logical cpu number and node number */
51extern int cpu_to_node_map[];
62 52
63/* Returns the number of the node containing CPU 'cpu' */ 53/* Returns the number of the node containing CPU 'cpu' */
64#ifdef CONFIG_X86_32
65#define early_cpu_to_node(cpu) cpu_to_node(cpu)
66static inline int cpu_to_node(int cpu) 54static inline int cpu_to_node(int cpu)
67{ 55{
68 return cpu_to_node_map[cpu]; 56 return cpu_to_node_map[cpu];
69} 57}
58#define early_cpu_to_node(cpu) cpu_to_node(cpu)
70 59
71#else /* CONFIG_X86_64 */ 60/* Returns a bitmask of CPUs on Node 'node'.
72 61 *
73#ifdef CONFIG_SMP 62 * Side note: this function creates the returned cpumask on the stack
74static inline int early_cpu_to_node(int cpu) 63 * so with a high NR_CPUS count, excessive stack space is used. The
64 * node_to_cpumask_ptr function should be used whenever possible.
65 */
66static inline cpumask_t node_to_cpumask(int node)
75{ 67{
76 int *cpu_to_node_map = x86_cpu_to_node_map_early_ptr; 68 return node_to_cpumask_map[node];
77
78 if (cpu_to_node_map)
79 return cpu_to_node_map[cpu];
80 else if (per_cpu_offset(cpu))
81 return per_cpu(x86_cpu_to_node_map, cpu);
82 else
83 return NUMA_NO_NODE;
84} 69}
85#else
86#define early_cpu_to_node(cpu) cpu_to_node(cpu)
87#endif
88 70
71#else /* CONFIG_X86_64 */
72
73/* Mappings between node number and cpus on that node. */
74extern cpumask_t *node_to_cpumask_map;
75
76/* Mappings between logical cpu number and node number */
77DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map);
78
79/* Returns the number of the current Node. */
80#define numa_node_id() read_pda(nodenumber)
81
82#ifdef CONFIG_DEBUG_PER_CPU_MAPS
83extern int cpu_to_node(int cpu);
84extern int early_cpu_to_node(int cpu);
85extern const cpumask_t *_node_to_cpumask_ptr(int node);
86extern cpumask_t node_to_cpumask(int node);
87
88#else /* !CONFIG_DEBUG_PER_CPU_MAPS */
89
90/* Returns the number of the node containing CPU 'cpu' */
89static inline int cpu_to_node(int cpu) 91static inline int cpu_to_node(int cpu)
90{ 92{
91#ifdef CONFIG_DEBUG_PER_CPU_MAPS
92 if (x86_cpu_to_node_map_early_ptr) {
93 printk("KERN_NOTICE cpu_to_node(%d): usage too early!\n",
94 (int)cpu);
95 dump_stack();
96 return ((int *)x86_cpu_to_node_map_early_ptr)[cpu];
97 }
98#endif
99 return per_cpu(x86_cpu_to_node_map, cpu); 93 return per_cpu(x86_cpu_to_node_map, cpu);
100} 94}
101 95
102#ifdef CONFIG_NUMA 96/* Same function but used if called before per_cpu areas are setup */
97static inline int early_cpu_to_node(int cpu)
98{
99 if (early_per_cpu_ptr(x86_cpu_to_node_map))
100 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
101
102 return per_cpu(x86_cpu_to_node_map, cpu);
103}
103 104
104/* Returns a pointer to the cpumask of CPUs on Node 'node'. */ 105/* Returns a pointer to the cpumask of CPUs on Node 'node'. */
106static inline const cpumask_t *_node_to_cpumask_ptr(int node)
107{
108 return &node_to_cpumask_map[node];
109}
110
111/* Returns a bitmask of CPUs on Node 'node'. */
112static inline cpumask_t node_to_cpumask(int node)
113{
114 return node_to_cpumask_map[node];
115}
116
117#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
118
119/* Replace default node_to_cpumask_ptr with optimized version */
105#define node_to_cpumask_ptr(v, node) \ 120#define node_to_cpumask_ptr(v, node) \
106 cpumask_t *v = &(node_to_cpumask_map[node]) 121 const cpumask_t *v = _node_to_cpumask_ptr(node)
107 122
108#define node_to_cpumask_ptr_next(v, node) \ 123#define node_to_cpumask_ptr_next(v, node) \
109 v = &(node_to_cpumask_map[node]) 124 v = _node_to_cpumask_ptr(node)
110#endif
111 125
112#endif /* CONFIG_X86_64 */ 126#endif /* CONFIG_X86_64 */
113 127
@@ -117,20 +131,6 @@ static inline int cpu_to_node(int cpu)
117 */ 131 */
118#define parent_node(node) (node) 132#define parent_node(node) (node)
119 133
120/* Returns a bitmask of CPUs on Node 'node'. */
121static inline cpumask_t node_to_cpumask(int node)
122{
123 return node_to_cpumask_map[node];
124}
125
126/* Returns the number of the first CPU on Node 'node'. */
127static inline int node_to_first_cpu(int node)
128{
129 cpumask_t mask = node_to_cpumask(node);
130
131 return first_cpu(mask);
132}
133
134#define pcibus_to_node(bus) __pcibus_to_node(bus) 134#define pcibus_to_node(bus) __pcibus_to_node(bus)
135#define pcibus_to_cpumask(bus) __pcibus_to_cpumask(bus) 135#define pcibus_to_cpumask(bus) __pcibus_to_cpumask(bus)
136 136
@@ -180,12 +180,44 @@ extern int __node_distance(int, int);
180#define node_distance(a, b) __node_distance(a, b) 180#define node_distance(a, b) __node_distance(a, b)
181#endif 181#endif
182 182
183#else /* CONFIG_NUMA */ 183#else /* !CONFIG_NUMA */
184 184
185#define numa_node_id() 0
186#define cpu_to_node(cpu) 0
187#define early_cpu_to_node(cpu) 0
188
189static inline const cpumask_t *_node_to_cpumask_ptr(int node)
190{
191 return &cpu_online_map;
192}
193static inline cpumask_t node_to_cpumask(int node)
194{
195 return cpu_online_map;
196}
197static inline int node_to_first_cpu(int node)
198{
199 return first_cpu(cpu_online_map);
200}
201
202/* Replace default node_to_cpumask_ptr with optimized version */
203#define node_to_cpumask_ptr(v, node) \
204 const cpumask_t *v = _node_to_cpumask_ptr(node)
205
206#define node_to_cpumask_ptr_next(v, node) \
207 v = _node_to_cpumask_ptr(node)
185#endif 208#endif
186 209
187#include <asm-generic/topology.h> 210#include <asm-generic/topology.h>
188 211
212#ifdef CONFIG_NUMA
213/* Returns the number of the first CPU on Node 'node'. */
214static inline int node_to_first_cpu(int node)
215{
216 node_to_cpumask_ptr(mask, node);
217 return first_cpu(*mask);
218}
219#endif
220
189extern cpumask_t cpu_coregroup_map(int cpu); 221extern cpumask_t cpu_coregroup_map(int cpu);
190 222
191#ifdef ENABLE_TOPO_DEFINES 223#ifdef ENABLE_TOPO_DEFINES
@@ -193,6 +225,9 @@ extern cpumask_t cpu_coregroup_map(int cpu);
193#define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id) 225#define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id)
194#define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu)) 226#define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu))
195#define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu)) 227#define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu))
228
229/* indicates that pointers to the topology cpumask_t maps are valid */
230#define arch_provides_topology_pointers yes
196#endif 231#endif
197 232
198static inline void arch_fix_phys_package_id(int num, u32 slot) 233static inline void arch_fix_phys_package_id(int num, u32 slot)
@@ -220,4 +255,4 @@ static inline void set_mp_bus_to_node(int busnum, int node)
220} 255}
221#endif 256#endif
222 257
223#endif 258#endif /* ASM_X86__TOPOLOGY_H */
diff --git a/include/asm-x86/trampoline.h b/include/asm-x86/trampoline.h
index b156b08d0131..0406bbd898a9 100644
--- a/include/asm-x86/trampoline.h
+++ b/include/asm-x86/trampoline.h
@@ -1,5 +1,5 @@
1#ifndef __TRAMPOLINE_HEADER 1#ifndef ASM_X86__TRAMPOLINE_H
2#define __TRAMPOLINE_HEADER 2#define ASM_X86__TRAMPOLINE_H
3 3
4#ifndef __ASSEMBLY__ 4#ifndef __ASSEMBLY__
5 5
@@ -18,4 +18,4 @@ extern unsigned long setup_trampoline(void);
18 18
19#endif /* __ASSEMBLY__ */ 19#endif /* __ASSEMBLY__ */
20 20
21#endif /* __TRAMPOLINE_HEADER */ 21#endif /* ASM_X86__TRAMPOLINE_H */
diff --git a/include/asm-x86/traps.h b/include/asm-x86/traps.h
new file mode 100644
index 000000000000..7a692baa51ae
--- /dev/null
+++ b/include/asm-x86/traps.h
@@ -0,0 +1,82 @@
1#ifndef ASM_X86__TRAPS_H
2#define ASM_X86__TRAPS_H
3
4#include <asm/debugreg.h>
5
6/* Common in X86_32 and X86_64 */
7asmlinkage void divide_error(void);
8asmlinkage void debug(void);
9asmlinkage void nmi(void);
10asmlinkage void int3(void);
11asmlinkage void overflow(void);
12asmlinkage void bounds(void);
13asmlinkage void invalid_op(void);
14asmlinkage void device_not_available(void);
15asmlinkage void coprocessor_segment_overrun(void);
16asmlinkage void invalid_TSS(void);
17asmlinkage void segment_not_present(void);
18asmlinkage void stack_segment(void);
19asmlinkage void general_protection(void);
20asmlinkage void page_fault(void);
21asmlinkage void coprocessor_error(void);
22asmlinkage void simd_coprocessor_error(void);
23asmlinkage void alignment_check(void);
24asmlinkage void spurious_interrupt_bug(void);
25#ifdef CONFIG_X86_MCE
26asmlinkage void machine_check(void);
27#endif /* CONFIG_X86_MCE */
28
29void do_divide_error(struct pt_regs *, long);
30void do_overflow(struct pt_regs *, long);
31void do_bounds(struct pt_regs *, long);
32void do_coprocessor_segment_overrun(struct pt_regs *, long);
33void do_invalid_TSS(struct pt_regs *, long);
34void do_segment_not_present(struct pt_regs *, long);
35void do_stack_segment(struct pt_regs *, long);
36void do_alignment_check(struct pt_regs *, long);
37void do_invalid_op(struct pt_regs *, long);
38void do_general_protection(struct pt_regs *, long);
39void do_nmi(struct pt_regs *, long);
40
41static inline int get_si_code(unsigned long condition)
42{
43 if (condition & DR_STEP)
44 return TRAP_TRACE;
45 else if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3))
46 return TRAP_HWBKPT;
47 else
48 return TRAP_BRKPT;
49}
50
51extern int panic_on_unrecovered_nmi;
52extern int kstack_depth_to_print;
53
54#ifdef CONFIG_X86_32
55
56void do_iret_error(struct pt_regs *, long);
57void do_int3(struct pt_regs *, long);
58void do_debug(struct pt_regs *, long);
59void math_error(void __user *);
60void do_coprocessor_error(struct pt_regs *, long);
61void do_simd_coprocessor_error(struct pt_regs *, long);
62void do_spurious_interrupt_bug(struct pt_regs *, long);
63unsigned long patch_espfix_desc(unsigned long, unsigned long);
64asmlinkage void math_emulate(long);
65
66void do_page_fault(struct pt_regs *regs, unsigned long error_code);
67
68#else /* CONFIG_X86_32 */
69
70asmlinkage void double_fault(void);
71
72asmlinkage void do_int3(struct pt_regs *, long);
73asmlinkage void do_stack_segment(struct pt_regs *, long);
74asmlinkage void do_debug(struct pt_regs *, unsigned long);
75asmlinkage void do_coprocessor_error(struct pt_regs *);
76asmlinkage void do_simd_coprocessor_error(struct pt_regs *);
77asmlinkage void do_spurious_interrupt_bug(struct pt_regs *);
78
79asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code);
80
81#endif /* CONFIG_X86_32 */
82#endif /* ASM_X86__TRAPS_H */
diff --git a/include/asm-x86/tsc.h b/include/asm-x86/tsc.h
index 548873ab5fc1..ad0f5c41e78c 100644
--- a/include/asm-x86/tsc.h
+++ b/include/asm-x86/tsc.h
@@ -1,8 +1,8 @@
1/* 1/*
2 * x86 TSC related functions 2 * x86 TSC related functions
3 */ 3 */
4#ifndef _ASM_X86_TSC_H 4#ifndef ASM_X86__TSC_H
5#define _ASM_X86_TSC_H 5#define ASM_X86__TSC_H
6 6
7#include <asm/processor.h> 7#include <asm/processor.h>
8 8
@@ -48,7 +48,6 @@ static __always_inline cycles_t vget_cycles(void)
48extern void tsc_init(void); 48extern void tsc_init(void);
49extern void mark_tsc_unstable(char *reason); 49extern void mark_tsc_unstable(char *reason);
50extern int unsynchronized_tsc(void); 50extern int unsynchronized_tsc(void);
51extern void init_tsc_clocksource(void);
52int check_tsc_unstable(void); 51int check_tsc_unstable(void);
53 52
54/* 53/*
@@ -58,7 +57,6 @@ int check_tsc_unstable(void);
58extern void check_tsc_sync_source(int cpu); 57extern void check_tsc_sync_source(int cpu);
59extern void check_tsc_sync_target(void); 58extern void check_tsc_sync_target(void);
60 59
61extern void tsc_calibrate(void);
62extern int notsc_setup(char *); 60extern int notsc_setup(char *);
63 61
64#endif 62#endif /* ASM_X86__TSC_H */
diff --git a/include/asm-x86/types.h b/include/asm-x86/types.h
index 1ac80cd9acf8..e78b52e17444 100644
--- a/include/asm-x86/types.h
+++ b/include/asm-x86/types.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_TYPES_H 1#ifndef ASM_X86__TYPES_H
2#define _ASM_X86_TYPES_H 2#define ASM_X86__TYPES_H
3 3
4#include <asm-generic/int-ll64.h> 4#include <asm-generic/int-ll64.h>
5 5
@@ -33,4 +33,4 @@ typedef u32 dma_addr_t;
33#endif /* __ASSEMBLY__ */ 33#endif /* __ASSEMBLY__ */
34#endif /* __KERNEL__ */ 34#endif /* __KERNEL__ */
35 35
36#endif 36#endif /* ASM_X86__TYPES_H */
diff --git a/include/asm-x86/uaccess.h b/include/asm-x86/uaccess.h
index 9fefd2947e78..48ebc0ad40ec 100644
--- a/include/asm-x86/uaccess.h
+++ b/include/asm-x86/uaccess.h
@@ -1,5 +1,454 @@
1#ifndef ASM_X86__UACCESS_H
2#define ASM_X86__UACCESS_H
3/*
4 * User space memory access functions
5 */
6#include <linux/errno.h>
7#include <linux/compiler.h>
8#include <linux/thread_info.h>
9#include <linux/prefetch.h>
10#include <linux/string.h>
11#include <asm/asm.h>
12#include <asm/page.h>
13
14#define VERIFY_READ 0
15#define VERIFY_WRITE 1
16
17/*
18 * The fs value determines whether argument validity checking should be
19 * performed or not. If get_fs() == USER_DS, checking is performed, with
20 * get_fs() == KERNEL_DS, checking is bypassed.
21 *
22 * For historical reasons, these macros are grossly misnamed.
23 */
24
25#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
26
27#define KERNEL_DS MAKE_MM_SEG(-1UL)
28#define USER_DS MAKE_MM_SEG(PAGE_OFFSET)
29
30#define get_ds() (KERNEL_DS)
31#define get_fs() (current_thread_info()->addr_limit)
32#define set_fs(x) (current_thread_info()->addr_limit = (x))
33
34#define segment_eq(a, b) ((a).seg == (b).seg)
35
36#define __addr_ok(addr) \
37 ((unsigned long __force)(addr) < \
38 (current_thread_info()->addr_limit.seg))
39
40/*
41 * Test whether a block of memory is a valid user space address.
42 * Returns 0 if the range is valid, nonzero otherwise.
43 *
44 * This is equivalent to the following test:
45 * (u33)addr + (u33)size >= (u33)current->addr_limit.seg (u65 for x86_64)
46 *
47 * This needs 33-bit (65-bit for x86_64) arithmetic. We have a carry...
48 */
49
50#define __range_not_ok(addr, size) \
51({ \
52 unsigned long flag, roksum; \
53 __chk_user_ptr(addr); \
54 asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" \
55 : "=&r" (flag), "=r" (roksum) \
56 : "1" (addr), "g" ((long)(size)), \
57 "rm" (current_thread_info()->addr_limit.seg)); \
58 flag; \
59})
60
61/**
62 * access_ok: - Checks if a user space pointer is valid
63 * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that
64 * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
65 * to write to a block, it is always safe to read from it.
66 * @addr: User space pointer to start of block to check
67 * @size: Size of block to check
68 *
69 * Context: User context only. This function may sleep.
70 *
71 * Checks if a pointer to a block of memory in user space is valid.
72 *
73 * Returns true (nonzero) if the memory block may be valid, false (zero)
74 * if it is definitely invalid.
75 *
76 * Note that, depending on architecture, this function probably just
77 * checks that the pointer is in the user space range - after calling
78 * this function, memory access functions may still return -EFAULT.
79 */
80#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
81
82/*
83 * The exception table consists of pairs of addresses: the first is the
84 * address of an instruction that is allowed to fault, and the second is
85 * the address at which the program should continue. No registers are
86 * modified, so it is entirely up to the continuation code to figure out
87 * what to do.
88 *
89 * All the routines below use bits of fixup code that are out of line
90 * with the main instruction path. This means when everything is well,
91 * we don't even have to jump over them. Further, they do not intrude
92 * on our cache or tlb entries.
93 */
94
95struct exception_table_entry {
96 unsigned long insn, fixup;
97};
98
99extern int fixup_exception(struct pt_regs *regs);
100
101/*
102 * These are the main single-value transfer routines. They automatically
103 * use the right size if we just have the right pointer type.
104 *
105 * This gets kind of ugly. We want to return _two_ values in "get_user()"
106 * and yet we don't want to do any pointers, because that is too much
107 * of a performance impact. Thus we have a few rather ugly macros here,
108 * and hide all the ugliness from the user.
109 *
110 * The "__xxx" versions of the user access functions are versions that
111 * do not verify the address space, that must have been done previously
112 * with a separate "access_ok()" call (this is used when we do multiple
113 * accesses to the same area of user memory).
114 */
115
116extern int __get_user_1(void);
117extern int __get_user_2(void);
118extern int __get_user_4(void);
119extern int __get_user_8(void);
120extern int __get_user_bad(void);
121
122#define __get_user_x(size, ret, x, ptr) \
123 asm volatile("call __get_user_" #size \
124 : "=a" (ret),"=d" (x) \
125 : "0" (ptr)) \
126
127/* Careful: we have to cast the result to the type of the pointer
128 * for sign reasons */
129
130/**
131 * get_user: - Get a simple variable from user space.
132 * @x: Variable to store result.
133 * @ptr: Source address, in user space.
134 *
135 * Context: User context only. This function may sleep.
136 *
137 * This macro copies a single simple variable from user space to kernel
138 * space. It supports simple types like char and int, but not larger
139 * data types like structures or arrays.
140 *
141 * @ptr must have pointer-to-simple-variable type, and the result of
142 * dereferencing @ptr must be assignable to @x without a cast.
143 *
144 * Returns zero on success, or -EFAULT on error.
145 * On error, the variable @x is set to zero.
146 */
147#ifdef CONFIG_X86_32
148#define __get_user_8(__ret_gu, __val_gu, ptr) \
149 __get_user_x(X, __ret_gu, __val_gu, ptr)
150#else
151#define __get_user_8(__ret_gu, __val_gu, ptr) \
152 __get_user_x(8, __ret_gu, __val_gu, ptr)
153#endif
154
155#define get_user(x, ptr) \
156({ \
157 int __ret_gu; \
158 unsigned long __val_gu; \
159 __chk_user_ptr(ptr); \
160 switch (sizeof(*(ptr))) { \
161 case 1: \
162 __get_user_x(1, __ret_gu, __val_gu, ptr); \
163 break; \
164 case 2: \
165 __get_user_x(2, __ret_gu, __val_gu, ptr); \
166 break; \
167 case 4: \
168 __get_user_x(4, __ret_gu, __val_gu, ptr); \
169 break; \
170 case 8: \
171 __get_user_8(__ret_gu, __val_gu, ptr); \
172 break; \
173 default: \
174 __get_user_x(X, __ret_gu, __val_gu, ptr); \
175 break; \
176 } \
177 (x) = (__typeof__(*(ptr)))__val_gu; \
178 __ret_gu; \
179})
180
181#define __put_user_x(size, x, ptr, __ret_pu) \
182 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
183 :"0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
184
185
186
187#ifdef CONFIG_X86_32
188#define __put_user_u64(x, addr, err) \
189 asm volatile("1: movl %%eax,0(%2)\n" \
190 "2: movl %%edx,4(%2)\n" \
191 "3:\n" \
192 ".section .fixup,\"ax\"\n" \
193 "4: movl %3,%0\n" \
194 " jmp 3b\n" \
195 ".previous\n" \
196 _ASM_EXTABLE(1b, 4b) \
197 _ASM_EXTABLE(2b, 4b) \
198 : "=r" (err) \
199 : "A" (x), "r" (addr), "i" (-EFAULT), "0" (err))
200
201#define __put_user_x8(x, ptr, __ret_pu) \
202 asm volatile("call __put_user_8" : "=a" (__ret_pu) \
203 : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
204#else
205#define __put_user_u64(x, ptr, retval) \
206 __put_user_asm(x, ptr, retval, "q", "", "Zr", -EFAULT)
207#define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
208#endif
209
210extern void __put_user_bad(void);
211
212/*
213 * Strange magic calling convention: pointer in %ecx,
214 * value in %eax(:%edx), return value in %eax. clobbers %rbx
215 */
216extern void __put_user_1(void);
217extern void __put_user_2(void);
218extern void __put_user_4(void);
219extern void __put_user_8(void);
220
221#ifdef CONFIG_X86_WP_WORKS_OK
222
223/**
224 * put_user: - Write a simple value into user space.
225 * @x: Value to copy to user space.
226 * @ptr: Destination address, in user space.
227 *
228 * Context: User context only. This function may sleep.
229 *
230 * This macro copies a single simple value from kernel space to user
231 * space. It supports simple types like char and int, but not larger
232 * data types like structures or arrays.
233 *
234 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
235 * to the result of dereferencing @ptr.
236 *
237 * Returns zero on success, or -EFAULT on error.
238 */
239#define put_user(x, ptr) \
240({ \
241 int __ret_pu; \
242 __typeof__(*(ptr)) __pu_val; \
243 __chk_user_ptr(ptr); \
244 __pu_val = x; \
245 switch (sizeof(*(ptr))) { \
246 case 1: \
247 __put_user_x(1, __pu_val, ptr, __ret_pu); \
248 break; \
249 case 2: \
250 __put_user_x(2, __pu_val, ptr, __ret_pu); \
251 break; \
252 case 4: \
253 __put_user_x(4, __pu_val, ptr, __ret_pu); \
254 break; \
255 case 8: \
256 __put_user_x8(__pu_val, ptr, __ret_pu); \
257 break; \
258 default: \
259 __put_user_x(X, __pu_val, ptr, __ret_pu); \
260 break; \
261 } \
262 __ret_pu; \
263})
264
265#define __put_user_size(x, ptr, size, retval, errret) \
266do { \
267 retval = 0; \
268 __chk_user_ptr(ptr); \
269 switch (size) { \
270 case 1: \
271 __put_user_asm(x, ptr, retval, "b", "b", "iq", errret); \
272 break; \
273 case 2: \
274 __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \
275 break; \
276 case 4: \
277 __put_user_asm(x, ptr, retval, "l", "k", "ir", errret);\
278 break; \
279 case 8: \
280 __put_user_u64((__typeof__(*ptr))(x), ptr, retval); \
281 break; \
282 default: \
283 __put_user_bad(); \
284 } \
285} while (0)
286
287#else
288
289#define __put_user_size(x, ptr, size, retval, errret) \
290do { \
291 __typeof__(*(ptr))__pus_tmp = x; \
292 retval = 0; \
293 \
294 if (unlikely(__copy_to_user_ll(ptr, &__pus_tmp, size) != 0)) \
295 retval = errret; \
296} while (0)
297
298#define put_user(x, ptr) \
299({ \
300 int __ret_pu; \
301 __typeof__(*(ptr))__pus_tmp = x; \
302 __ret_pu = 0; \
303 if (unlikely(__copy_to_user_ll(ptr, &__pus_tmp, \
304 sizeof(*(ptr))) != 0)) \
305 __ret_pu = -EFAULT; \
306 __ret_pu; \
307})
308#endif
309
310#ifdef CONFIG_X86_32
311#define __get_user_asm_u64(x, ptr, retval, errret) (x) = __get_user_bad()
312#else
313#define __get_user_asm_u64(x, ptr, retval, errret) \
314 __get_user_asm(x, ptr, retval, "q", "", "=r", errret)
315#endif
316
317#define __get_user_size(x, ptr, size, retval, errret) \
318do { \
319 retval = 0; \
320 __chk_user_ptr(ptr); \
321 switch (size) { \
322 case 1: \
323 __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \
324 break; \
325 case 2: \
326 __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \
327 break; \
328 case 4: \
329 __get_user_asm(x, ptr, retval, "l", "k", "=r", errret); \
330 break; \
331 case 8: \
332 __get_user_asm_u64(x, ptr, retval, errret); \
333 break; \
334 default: \
335 (x) = __get_user_bad(); \
336 } \
337} while (0)
338
339#define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
340 asm volatile("1: mov"itype" %2,%"rtype"1\n" \
341 "2:\n" \
342 ".section .fixup,\"ax\"\n" \
343 "3: mov %3,%0\n" \
344 " xor"itype" %"rtype"1,%"rtype"1\n" \
345 " jmp 2b\n" \
346 ".previous\n" \
347 _ASM_EXTABLE(1b, 3b) \
348 : "=r" (err), ltype(x) \
349 : "m" (__m(addr)), "i" (errret), "0" (err))
350
351#define __put_user_nocheck(x, ptr, size) \
352({ \
353 long __pu_err; \
354 __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \
355 __pu_err; \
356})
357
358#define __get_user_nocheck(x, ptr, size) \
359({ \
360 long __gu_err; \
361 unsigned long __gu_val; \
362 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
363 (x) = (__force __typeof__(*(ptr)))__gu_val; \
364 __gu_err; \
365})
366
367/* FIXME: this hack is definitely wrong -AK */
368struct __large_struct { unsigned long buf[100]; };
369#define __m(x) (*(struct __large_struct __user *)(x))
370
371/*
372 * Tell gcc we read from memory instead of writing: this is because
373 * we do not write to any memory gcc knows about, so there are no
374 * aliasing issues.
375 */
376#define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
377 asm volatile("1: mov"itype" %"rtype"1,%2\n" \
378 "2:\n" \
379 ".section .fixup,\"ax\"\n" \
380 "3: mov %3,%0\n" \
381 " jmp 2b\n" \
382 ".previous\n" \
383 _ASM_EXTABLE(1b, 3b) \
384 : "=r"(err) \
385 : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
386/**
387 * __get_user: - Get a simple variable from user space, with less checking.
388 * @x: Variable to store result.
389 * @ptr: Source address, in user space.
390 *
391 * Context: User context only. This function may sleep.
392 *
393 * This macro copies a single simple variable from user space to kernel
394 * space. It supports simple types like char and int, but not larger
395 * data types like structures or arrays.
396 *
397 * @ptr must have pointer-to-simple-variable type, and the result of
398 * dereferencing @ptr must be assignable to @x without a cast.
399 *
400 * Caller must check the pointer with access_ok() before calling this
401 * function.
402 *
403 * Returns zero on success, or -EFAULT on error.
404 * On error, the variable @x is set to zero.
405 */
406
407#define __get_user(x, ptr) \
408 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
409/**
410 * __put_user: - Write a simple value into user space, with less checking.
411 * @x: Value to copy to user space.
412 * @ptr: Destination address, in user space.
413 *
414 * Context: User context only. This function may sleep.
415 *
416 * This macro copies a single simple value from kernel space to user
417 * space. It supports simple types like char and int, but not larger
418 * data types like structures or arrays.
419 *
420 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
421 * to the result of dereferencing @ptr.
422 *
423 * Caller must check the pointer with access_ok() before calling this
424 * function.
425 *
426 * Returns zero on success, or -EFAULT on error.
427 */
428
429#define __put_user(x, ptr) \
430 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
431
432#define __get_user_unaligned __get_user
433#define __put_user_unaligned __put_user
434
435/*
436 * movsl can be slow when source and dest are not both 8-byte aligned
437 */
438#ifdef CONFIG_X86_INTEL_USERCOPY
439extern struct movsl_mask {
440 int mask;
441} ____cacheline_aligned_in_smp movsl_mask;
442#endif
443
444#define ARCH_HAS_NOCACHE_UACCESS 1
445
1#ifdef CONFIG_X86_32 446#ifdef CONFIG_X86_32
2# include "uaccess_32.h" 447# include "uaccess_32.h"
3#else 448#else
449# define ARCH_HAS_SEARCH_EXTABLE
4# include "uaccess_64.h" 450# include "uaccess_64.h"
5#endif 451#endif
452
453#endif /* ASM_X86__UACCESS_H */
454
diff --git a/include/asm-x86/uaccess_32.h b/include/asm-x86/uaccess_32.h
index 8e7595c1f34e..6b5b57d9c6d1 100644
--- a/include/asm-x86/uaccess_32.h
+++ b/include/asm-x86/uaccess_32.h
@@ -1,5 +1,5 @@
1#ifndef __i386_UACCESS_H 1#ifndef ASM_X86__UACCESS_32_H
2#define __i386_UACCESS_H 2#define ASM_X86__UACCESS_32_H
3 3
4/* 4/*
5 * User space memory access functions 5 * User space memory access functions
@@ -11,426 +11,6 @@
11#include <asm/asm.h> 11#include <asm/asm.h>
12#include <asm/page.h> 12#include <asm/page.h>
13 13
14#define VERIFY_READ 0
15#define VERIFY_WRITE 1
16
17/*
18 * The fs value determines whether argument validity checking should be
19 * performed or not. If get_fs() == USER_DS, checking is performed, with
20 * get_fs() == KERNEL_DS, checking is bypassed.
21 *
22 * For historical reasons, these macros are grossly misnamed.
23 */
24
25#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
26
27
28#define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFFUL)
29#define USER_DS MAKE_MM_SEG(PAGE_OFFSET)
30
31#define get_ds() (KERNEL_DS)
32#define get_fs() (current_thread_info()->addr_limit)
33#define set_fs(x) (current_thread_info()->addr_limit = (x))
34
35#define segment_eq(a, b) ((a).seg == (b).seg)
36
37/*
38 * movsl can be slow when source and dest are not both 8-byte aligned
39 */
40#ifdef CONFIG_X86_INTEL_USERCOPY
41extern struct movsl_mask {
42 int mask;
43} ____cacheline_aligned_in_smp movsl_mask;
44#endif
45
46#define __addr_ok(addr) \
47 ((unsigned long __force)(addr) < \
48 (current_thread_info()->addr_limit.seg))
49
50/*
51 * Test whether a block of memory is a valid user space address.
52 * Returns 0 if the range is valid, nonzero otherwise.
53 *
54 * This is equivalent to the following test:
55 * (u33)addr + (u33)size >= (u33)current->addr_limit.seg
56 *
57 * This needs 33-bit arithmetic. We have a carry...
58 */
59#define __range_ok(addr, size) \
60({ \
61 unsigned long flag, roksum; \
62 __chk_user_ptr(addr); \
63 asm("addl %3,%1 ; sbbl %0,%0; cmpl %1,%4; sbbl $0,%0" \
64 :"=&r" (flag), "=r" (roksum) \
65 :"1" (addr), "g" ((int)(size)), \
66 "rm" (current_thread_info()->addr_limit.seg)); \
67 flag; \
68})
69
70/**
71 * access_ok: - Checks if a user space pointer is valid
72 * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that
73 * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
74 * to write to a block, it is always safe to read from it.
75 * @addr: User space pointer to start of block to check
76 * @size: Size of block to check
77 *
78 * Context: User context only. This function may sleep.
79 *
80 * Checks if a pointer to a block of memory in user space is valid.
81 *
82 * Returns true (nonzero) if the memory block may be valid, false (zero)
83 * if it is definitely invalid.
84 *
85 * Note that, depending on architecture, this function probably just
86 * checks that the pointer is in the user space range - after calling
87 * this function, memory access functions may still return -EFAULT.
88 */
89#define access_ok(type, addr, size) (likely(__range_ok(addr, size) == 0))
90
91/*
92 * The exception table consists of pairs of addresses: the first is the
93 * address of an instruction that is allowed to fault, and the second is
94 * the address at which the program should continue. No registers are
95 * modified, so it is entirely up to the continuation code to figure out
96 * what to do.
97 *
98 * All the routines below use bits of fixup code that are out of line
99 * with the main instruction path. This means when everything is well,
100 * we don't even have to jump over them. Further, they do not intrude
101 * on our cache or tlb entries.
102 */
103
104struct exception_table_entry {
105 unsigned long insn, fixup;
106};
107
108extern int fixup_exception(struct pt_regs *regs);
109
110/*
111 * These are the main single-value transfer routines. They automatically
112 * use the right size if we just have the right pointer type.
113 *
114 * This gets kind of ugly. We want to return _two_ values in "get_user()"
115 * and yet we don't want to do any pointers, because that is too much
116 * of a performance impact. Thus we have a few rather ugly macros here,
117 * and hide all the ugliness from the user.
118 *
119 * The "__xxx" versions of the user access functions are versions that
120 * do not verify the address space, that must have been done previously
121 * with a separate "access_ok()" call (this is used when we do multiple
122 * accesses to the same area of user memory).
123 */
124
125extern void __get_user_1(void);
126extern void __get_user_2(void);
127extern void __get_user_4(void);
128
129#define __get_user_x(size, ret, x, ptr) \
130 asm volatile("call __get_user_" #size \
131 :"=a" (ret),"=d" (x) \
132 :"0" (ptr))
133
134
135/* Careful: we have to cast the result to the type of the pointer
136 * for sign reasons */
137
138/**
139 * get_user: - Get a simple variable from user space.
140 * @x: Variable to store result.
141 * @ptr: Source address, in user space.
142 *
143 * Context: User context only. This function may sleep.
144 *
145 * This macro copies a single simple variable from user space to kernel
146 * space. It supports simple types like char and int, but not larger
147 * data types like structures or arrays.
148 *
149 * @ptr must have pointer-to-simple-variable type, and the result of
150 * dereferencing @ptr must be assignable to @x without a cast.
151 *
152 * Returns zero on success, or -EFAULT on error.
153 * On error, the variable @x is set to zero.
154 */
155#define get_user(x, ptr) \
156({ \
157 int __ret_gu; \
158 unsigned long __val_gu; \
159 __chk_user_ptr(ptr); \
160 switch (sizeof(*(ptr))) { \
161 case 1: \
162 __get_user_x(1, __ret_gu, __val_gu, ptr); \
163 break; \
164 case 2: \
165 __get_user_x(2, __ret_gu, __val_gu, ptr); \
166 break; \
167 case 4: \
168 __get_user_x(4, __ret_gu, __val_gu, ptr); \
169 break; \
170 default: \
171 __get_user_x(X, __ret_gu, __val_gu, ptr); \
172 break; \
173 } \
174 (x) = (__typeof__(*(ptr)))__val_gu; \
175 __ret_gu; \
176})
177
178extern void __put_user_bad(void);
179
180/*
181 * Strange magic calling convention: pointer in %ecx,
182 * value in %eax(:%edx), return value in %eax, no clobbers.
183 */
184extern void __put_user_1(void);
185extern void __put_user_2(void);
186extern void __put_user_4(void);
187extern void __put_user_8(void);
188
189#define __put_user_1(x, ptr) \
190 asm volatile("call __put_user_1" : "=a" (__ret_pu) \
191 : "0" ((typeof(*(ptr)))(x)), "c" (ptr))
192
193#define __put_user_2(x, ptr) \
194 asm volatile("call __put_user_2" : "=a" (__ret_pu) \
195 : "0" ((typeof(*(ptr)))(x)), "c" (ptr))
196
197#define __put_user_4(x, ptr) \
198 asm volatile("call __put_user_4" : "=a" (__ret_pu) \
199 : "0" ((typeof(*(ptr)))(x)), "c" (ptr))
200
201#define __put_user_8(x, ptr) \
202 asm volatile("call __put_user_8" : "=a" (__ret_pu) \
203 : "A" ((typeof(*(ptr)))(x)), "c" (ptr))
204
205#define __put_user_X(x, ptr) \
206 asm volatile("call __put_user_X" : "=a" (__ret_pu) \
207 : "c" (ptr))
208
209/**
210 * put_user: - Write a simple value into user space.
211 * @x: Value to copy to user space.
212 * @ptr: Destination address, in user space.
213 *
214 * Context: User context only. This function may sleep.
215 *
216 * This macro copies a single simple value from kernel space to user
217 * space. It supports simple types like char and int, but not larger
218 * data types like structures or arrays.
219 *
220 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
221 * to the result of dereferencing @ptr.
222 *
223 * Returns zero on success, or -EFAULT on error.
224 */
225#ifdef CONFIG_X86_WP_WORKS_OK
226
227#define put_user(x, ptr) \
228({ \
229 int __ret_pu; \
230 __typeof__(*(ptr)) __pu_val; \
231 __chk_user_ptr(ptr); \
232 __pu_val = x; \
233 switch (sizeof(*(ptr))) { \
234 case 1: \
235 __put_user_1(__pu_val, ptr); \
236 break; \
237 case 2: \
238 __put_user_2(__pu_val, ptr); \
239 break; \
240 case 4: \
241 __put_user_4(__pu_val, ptr); \
242 break; \
243 case 8: \
244 __put_user_8(__pu_val, ptr); \
245 break; \
246 default: \
247 __put_user_X(__pu_val, ptr); \
248 break; \
249 } \
250 __ret_pu; \
251})
252
253#else
254#define put_user(x, ptr) \
255({ \
256 int __ret_pu; \
257 __typeof__(*(ptr))__pus_tmp = x; \
258 __ret_pu = 0; \
259 if (unlikely(__copy_to_user_ll(ptr, &__pus_tmp, \
260 sizeof(*(ptr))) != 0)) \
261 __ret_pu = -EFAULT; \
262 __ret_pu; \
263})
264
265
266#endif
267
268/**
269 * __get_user: - Get a simple variable from user space, with less checking.
270 * @x: Variable to store result.
271 * @ptr: Source address, in user space.
272 *
273 * Context: User context only. This function may sleep.
274 *
275 * This macro copies a single simple variable from user space to kernel
276 * space. It supports simple types like char and int, but not larger
277 * data types like structures or arrays.
278 *
279 * @ptr must have pointer-to-simple-variable type, and the result of
280 * dereferencing @ptr must be assignable to @x without a cast.
281 *
282 * Caller must check the pointer with access_ok() before calling this
283 * function.
284 *
285 * Returns zero on success, or -EFAULT on error.
286 * On error, the variable @x is set to zero.
287 */
288#define __get_user(x, ptr) \
289 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
290
291
292/**
293 * __put_user: - Write a simple value into user space, with less checking.
294 * @x: Value to copy to user space.
295 * @ptr: Destination address, in user space.
296 *
297 * Context: User context only. This function may sleep.
298 *
299 * This macro copies a single simple value from kernel space to user
300 * space. It supports simple types like char and int, but not larger
301 * data types like structures or arrays.
302 *
303 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
304 * to the result of dereferencing @ptr.
305 *
306 * Caller must check the pointer with access_ok() before calling this
307 * function.
308 *
309 * Returns zero on success, or -EFAULT on error.
310 */
311#define __put_user(x, ptr) \
312 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
313
314#define __put_user_nocheck(x, ptr, size) \
315({ \
316 long __pu_err; \
317 __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \
318 __pu_err; \
319})
320
321
322#define __put_user_u64(x, addr, err) \
323 asm volatile("1: movl %%eax,0(%2)\n" \
324 "2: movl %%edx,4(%2)\n" \
325 "3:\n" \
326 ".section .fixup,\"ax\"\n" \
327 "4: movl %3,%0\n" \
328 " jmp 3b\n" \
329 ".previous\n" \
330 _ASM_EXTABLE(1b, 4b) \
331 _ASM_EXTABLE(2b, 4b) \
332 : "=r" (err) \
333 : "A" (x), "r" (addr), "i" (-EFAULT), "0" (err))
334
335#ifdef CONFIG_X86_WP_WORKS_OK
336
337#define __put_user_size(x, ptr, size, retval, errret) \
338do { \
339 retval = 0; \
340 __chk_user_ptr(ptr); \
341 switch (size) { \
342 case 1: \
343 __put_user_asm(x, ptr, retval, "b", "b", "iq", errret); \
344 break; \
345 case 2: \
346 __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \
347 break; \
348 case 4: \
349 __put_user_asm(x, ptr, retval, "l", "", "ir", errret); \
350 break; \
351 case 8: \
352 __put_user_u64((__typeof__(*ptr))(x), ptr, retval); \
353 break; \
354 default: \
355 __put_user_bad(); \
356 } \
357} while (0)
358
359#else
360
361#define __put_user_size(x, ptr, size, retval, errret) \
362do { \
363 __typeof__(*(ptr))__pus_tmp = x; \
364 retval = 0; \
365 \
366 if (unlikely(__copy_to_user_ll(ptr, &__pus_tmp, size) != 0)) \
367 retval = errret; \
368} while (0)
369
370#endif
371struct __large_struct { unsigned long buf[100]; };
372#define __m(x) (*(struct __large_struct __user *)(x))
373
374/*
375 * Tell gcc we read from memory instead of writing: this is because
376 * we do not write to any memory gcc knows about, so there are no
377 * aliasing issues.
378 */
379#define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
380 asm volatile("1: mov"itype" %"rtype"1,%2\n" \
381 "2:\n" \
382 ".section .fixup,\"ax\"\n" \
383 "3: movl %3,%0\n" \
384 " jmp 2b\n" \
385 ".previous\n" \
386 _ASM_EXTABLE(1b, 3b) \
387 : "=r"(err) \
388 : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
389
390
391#define __get_user_nocheck(x, ptr, size) \
392({ \
393 long __gu_err; \
394 unsigned long __gu_val; \
395 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
396 (x) = (__typeof__(*(ptr)))__gu_val; \
397 __gu_err; \
398})
399
400extern long __get_user_bad(void);
401
402#define __get_user_size(x, ptr, size, retval, errret) \
403do { \
404 retval = 0; \
405 __chk_user_ptr(ptr); \
406 switch (size) { \
407 case 1: \
408 __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \
409 break; \
410 case 2: \
411 __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \
412 break; \
413 case 4: \
414 __get_user_asm(x, ptr, retval, "l", "", "=r", errret); \
415 break; \
416 default: \
417 (x) = __get_user_bad(); \
418 } \
419} while (0)
420
421#define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
422 asm volatile("1: mov"itype" %2,%"rtype"1\n" \
423 "2:\n" \
424 ".section .fixup,\"ax\"\n" \
425 "3: movl %3,%0\n" \
426 " xor"itype" %"rtype"1,%"rtype"1\n" \
427 " jmp 2b\n" \
428 ".previous\n" \
429 _ASM_EXTABLE(1b, 3b) \
430 : "=r" (err), ltype (x) \
431 : "m" (__m(addr)), "i" (errret), "0" (err))
432
433
434unsigned long __must_check __copy_to_user_ll 14unsigned long __must_check __copy_to_user_ll
435 (void __user *to, const void *from, unsigned long n); 15 (void __user *to, const void *from, unsigned long n);
436unsigned long __must_check __copy_from_user_ll 16unsigned long __must_check __copy_from_user_ll
@@ -576,8 +156,6 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
576 return __copy_from_user_ll(to, from, n); 156 return __copy_from_user_ll(to, from, n);
577} 157}
578 158
579#define ARCH_HAS_NOCACHE_UACCESS
580
581static __always_inline unsigned long __copy_from_user_nocache(void *to, 159static __always_inline unsigned long __copy_from_user_nocache(void *to,
582 const void __user *from, unsigned long n) 160 const void __user *from, unsigned long n)
583{ 161{
@@ -637,4 +215,4 @@ long strnlen_user(const char __user *str, long n);
637unsigned long __must_check clear_user(void __user *mem, unsigned long len); 215unsigned long __must_check clear_user(void __user *mem, unsigned long len);
638unsigned long __must_check __clear_user(void __user *mem, unsigned long len); 216unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
639 217
640#endif /* __i386_UACCESS_H */ 218#endif /* ASM_X86__UACCESS_32_H */
diff --git a/include/asm-x86/uaccess_64.h b/include/asm-x86/uaccess_64.h
index b8a2f4339903..c96c1f5d07a2 100644
--- a/include/asm-x86/uaccess_64.h
+++ b/include/asm-x86/uaccess_64.h
@@ -1,5 +1,5 @@
1#ifndef __X86_64_UACCESS_H 1#ifndef ASM_X86__UACCESS_64_H
2#define __X86_64_UACCESS_H 2#define ASM_X86__UACCESS_64_H
3 3
4/* 4/*
5 * User space memory access functions 5 * User space memory access functions
@@ -7,267 +7,9 @@
7#include <linux/compiler.h> 7#include <linux/compiler.h>
8#include <linux/errno.h> 8#include <linux/errno.h>
9#include <linux/prefetch.h> 9#include <linux/prefetch.h>
10#include <linux/lockdep.h>
10#include <asm/page.h> 11#include <asm/page.h>
11 12
12#define VERIFY_READ 0
13#define VERIFY_WRITE 1
14
15/*
16 * The fs value determines whether argument validity checking should be
17 * performed or not. If get_fs() == USER_DS, checking is performed, with
18 * get_fs() == KERNEL_DS, checking is bypassed.
19 *
20 * For historical reasons, these macros are grossly misnamed.
21 */
22
23#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
24
25#define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFFFFFFFFFFUL)
26#define USER_DS MAKE_MM_SEG(PAGE_OFFSET)
27
28#define get_ds() (KERNEL_DS)
29#define get_fs() (current_thread_info()->addr_limit)
30#define set_fs(x) (current_thread_info()->addr_limit = (x))
31
32#define segment_eq(a, b) ((a).seg == (b).seg)
33
34#define __addr_ok(addr) (!((unsigned long)(addr) & \
35 (current_thread_info()->addr_limit.seg)))
36
37/*
38 * Uhhuh, this needs 65-bit arithmetic. We have a carry..
39 */
40#define __range_not_ok(addr, size) \
41({ \
42 unsigned long flag, roksum; \
43 __chk_user_ptr(addr); \
44 asm("# range_ok\n\r" \
45 "addq %3,%1 ; sbbq %0,%0 ; cmpq %1,%4 ; sbbq $0,%0" \
46 : "=&r" (flag), "=r" (roksum) \
47 : "1" (addr), "g" ((long)(size)), \
48 "g" (current_thread_info()->addr_limit.seg)); \
49 flag; \
50})
51
52#define access_ok(type, addr, size) (__range_not_ok(addr, size) == 0)
53
54/*
55 * The exception table consists of pairs of addresses: the first is the
56 * address of an instruction that is allowed to fault, and the second is
57 * the address at which the program should continue. No registers are
58 * modified, so it is entirely up to the continuation code to figure out
59 * what to do.
60 *
61 * All the routines below use bits of fixup code that are out of line
62 * with the main instruction path. This means when everything is well,
63 * we don't even have to jump over them. Further, they do not intrude
64 * on our cache or tlb entries.
65 */
66
67struct exception_table_entry {
68 unsigned long insn, fixup;
69};
70
71extern int fixup_exception(struct pt_regs *regs);
72
73#define ARCH_HAS_SEARCH_EXTABLE
74
75/*
76 * These are the main single-value transfer routines. They automatically
77 * use the right size if we just have the right pointer type.
78 *
79 * This gets kind of ugly. We want to return _two_ values in "get_user()"
80 * and yet we don't want to do any pointers, because that is too much
81 * of a performance impact. Thus we have a few rather ugly macros here,
82 * and hide all the ugliness from the user.
83 *
84 * The "__xxx" versions of the user access functions are versions that
85 * do not verify the address space, that must have been done previously
86 * with a separate "access_ok()" call (this is used when we do multiple
87 * accesses to the same area of user memory).
88 */
89
90#define __get_user_x(size, ret, x, ptr) \
91 asm volatile("call __get_user_" #size \
92 : "=a" (ret),"=d" (x) \
93 : "c" (ptr) \
94 : "r8")
95
96/* Careful: we have to cast the result to the type of the pointer
97 * for sign reasons */
98
99#define get_user(x, ptr) \
100({ \
101 unsigned long __val_gu; \
102 int __ret_gu; \
103 __chk_user_ptr(ptr); \
104 switch (sizeof(*(ptr))) { \
105 case 1: \
106 __get_user_x(1, __ret_gu, __val_gu, ptr); \
107 break; \
108 case 2: \
109 __get_user_x(2, __ret_gu, __val_gu, ptr); \
110 break; \
111 case 4: \
112 __get_user_x(4, __ret_gu, __val_gu, ptr); \
113 break; \
114 case 8: \
115 __get_user_x(8, __ret_gu, __val_gu, ptr); \
116 break; \
117 default: \
118 __get_user_bad(); \
119 break; \
120 } \
121 (x) = (__force typeof(*(ptr)))__val_gu; \
122 __ret_gu; \
123})
124
125extern void __put_user_1(void);
126extern void __put_user_2(void);
127extern void __put_user_4(void);
128extern void __put_user_8(void);
129extern void __put_user_bad(void);
130
131#define __put_user_x(size, ret, x, ptr) \
132 asm volatile("call __put_user_" #size \
133 :"=a" (ret) \
134 :"c" (ptr),"d" (x) \
135 :"r8")
136
137#define put_user(x, ptr) \
138 __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
139
140#define __get_user(x, ptr) \
141 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
142#define __put_user(x, ptr) \
143 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
144
145#define __get_user_unaligned __get_user
146#define __put_user_unaligned __put_user
147
148#define __put_user_nocheck(x, ptr, size) \
149({ \
150 int __pu_err; \
151 __put_user_size((x), (ptr), (size), __pu_err); \
152 __pu_err; \
153})
154
155
156#define __put_user_check(x, ptr, size) \
157({ \
158 int __pu_err; \
159 typeof(*(ptr)) __user *__pu_addr = (ptr); \
160 switch (size) { \
161 case 1: \
162 __put_user_x(1, __pu_err, x, __pu_addr); \
163 break; \
164 case 2: \
165 __put_user_x(2, __pu_err, x, __pu_addr); \
166 break; \
167 case 4: \
168 __put_user_x(4, __pu_err, x, __pu_addr); \
169 break; \
170 case 8: \
171 __put_user_x(8, __pu_err, x, __pu_addr); \
172 break; \
173 default: \
174 __put_user_bad(); \
175 } \
176 __pu_err; \
177})
178
179#define __put_user_size(x, ptr, size, retval) \
180do { \
181 retval = 0; \
182 __chk_user_ptr(ptr); \
183 switch (size) { \
184 case 1: \
185 __put_user_asm(x, ptr, retval, "b", "b", "iq", -EFAULT);\
186 break; \
187 case 2: \
188 __put_user_asm(x, ptr, retval, "w", "w", "ir", -EFAULT);\
189 break; \
190 case 4: \
191 __put_user_asm(x, ptr, retval, "l", "k", "ir", -EFAULT);\
192 break; \
193 case 8: \
194 __put_user_asm(x, ptr, retval, "q", "", "Zr", -EFAULT); \
195 break; \
196 default: \
197 __put_user_bad(); \
198 } \
199} while (0)
200
201/* FIXME: this hack is definitely wrong -AK */
202struct __large_struct { unsigned long buf[100]; };
203#define __m(x) (*(struct __large_struct __user *)(x))
204
205/*
206 * Tell gcc we read from memory instead of writing: this is because
207 * we do not write to any memory gcc knows about, so there are no
208 * aliasing issues.
209 */
210#define __put_user_asm(x, addr, err, itype, rtype, ltype, errno) \
211 asm volatile("1: mov"itype" %"rtype"1,%2\n" \
212 "2:\n" \
213 ".section .fixup, \"ax\"\n" \
214 "3: mov %3,%0\n" \
215 " jmp 2b\n" \
216 ".previous\n" \
217 _ASM_EXTABLE(1b, 3b) \
218 : "=r"(err) \
219 : ltype (x), "m" (__m(addr)), "i" (errno), "0" (err))
220
221
222#define __get_user_nocheck(x, ptr, size) \
223({ \
224 int __gu_err; \
225 unsigned long __gu_val; \
226 __get_user_size(__gu_val, (ptr), (size), __gu_err); \
227 (x) = (__force typeof(*(ptr)))__gu_val; \
228 __gu_err; \
229})
230
231extern int __get_user_1(void);
232extern int __get_user_2(void);
233extern int __get_user_4(void);
234extern int __get_user_8(void);
235extern int __get_user_bad(void);
236
237#define __get_user_size(x, ptr, size, retval) \
238do { \
239 retval = 0; \
240 __chk_user_ptr(ptr); \
241 switch (size) { \
242 case 1: \
243 __get_user_asm(x, ptr, retval, "b", "b", "=q", -EFAULT);\
244 break; \
245 case 2: \
246 __get_user_asm(x, ptr, retval, "w", "w", "=r", -EFAULT);\
247 break; \
248 case 4: \
249 __get_user_asm(x, ptr, retval, "l", "k", "=r", -EFAULT);\
250 break; \
251 case 8: \
252 __get_user_asm(x, ptr, retval, "q", "", "=r", -EFAULT); \
253 break; \
254 default: \
255 (x) = __get_user_bad(); \
256 } \
257} while (0)
258
259#define __get_user_asm(x, addr, err, itype, rtype, ltype, errno) \
260 asm volatile("1: mov"itype" %2,%"rtype"1\n" \
261 "2:\n" \
262 ".section .fixup, \"ax\"\n" \
263 "3: mov %3,%0\n" \
264 " xor"itype" %"rtype"1,%"rtype"1\n" \
265 " jmp 2b\n" \
266 ".previous\n" \
267 _ASM_EXTABLE(1b, 3b) \
268 : "=r" (err), ltype (x) \
269 : "m" (__m(addr)), "i"(errno), "0"(err))
270
271/* 13/*
272 * Copy To/From Userspace 14 * Copy To/From Userspace
273 */ 15 */
@@ -437,7 +179,6 @@ __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
437 return copy_user_generic((__force void *)dst, src, size); 179 return copy_user_generic((__force void *)dst, src, size);
438} 180}
439 181
440#define ARCH_HAS_NOCACHE_UACCESS 1
441extern long __copy_user_nocache(void *dst, const void __user *src, 182extern long __copy_user_nocache(void *dst, const void __user *src,
442 unsigned size, int zerorest); 183 unsigned size, int zerorest);
443 184
@@ -455,4 +196,7 @@ static inline int __copy_from_user_inatomic_nocache(void *dst,
455 return __copy_user_nocache(dst, src, size, 0); 196 return __copy_user_nocache(dst, src, size, 0);
456} 197}
457 198
458#endif /* __X86_64_UACCESS_H */ 199unsigned long
200copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
201
202#endif /* ASM_X86__UACCESS_64_H */
diff --git a/include/asm-x86/ucontext.h b/include/asm-x86/ucontext.h
index 50a79f7fcde9..89eaa5456a7e 100644
--- a/include/asm-x86/ucontext.h
+++ b/include/asm-x86/ucontext.h
@@ -1,5 +1,11 @@
1#ifndef _ASM_X86_UCONTEXT_H 1#ifndef ASM_X86__UCONTEXT_H
2#define _ASM_X86_UCONTEXT_H 2#define ASM_X86__UCONTEXT_H
3
4#define UC_FP_XSTATE 0x1 /* indicates the presence of extended state
5 * information in the memory layout pointed
6 * by the fpstate pointer in the ucontext's
7 * sigcontext struct (uc_mcontext).
8 */
3 9
4struct ucontext { 10struct ucontext {
5 unsigned long uc_flags; 11 unsigned long uc_flags;
@@ -9,4 +15,4 @@ struct ucontext {
9 sigset_t uc_sigmask; /* mask last for extensibility */ 15 sigset_t uc_sigmask; /* mask last for extensibility */
10}; 16};
11 17
12#endif /* _ASM_X86_UCONTEXT_H */ 18#endif /* ASM_X86__UCONTEXT_H */
diff --git a/include/asm-x86/unaligned.h b/include/asm-x86/unaligned.h
index a7bd416b4763..59dcdec37160 100644
--- a/include/asm-x86/unaligned.h
+++ b/include/asm-x86/unaligned.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_UNALIGNED_H 1#ifndef ASM_X86__UNALIGNED_H
2#define _ASM_X86_UNALIGNED_H 2#define ASM_X86__UNALIGNED_H
3 3
4/* 4/*
5 * The x86 can do unaligned accesses itself. 5 * The x86 can do unaligned accesses itself.
@@ -11,4 +11,4 @@
11#define get_unaligned __get_unaligned_le 11#define get_unaligned __get_unaligned_le
12#define put_unaligned __put_unaligned_le 12#define put_unaligned __put_unaligned_le
13 13
14#endif /* _ASM_X86_UNALIGNED_H */ 14#endif /* ASM_X86__UNALIGNED_H */
diff --git a/include/asm-x86/unistd_32.h b/include/asm-x86/unistd_32.h
index 8317d94771d3..017f4a87c913 100644
--- a/include/asm-x86/unistd_32.h
+++ b/include/asm-x86/unistd_32.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_I386_UNISTD_H_ 1#ifndef ASM_X86__UNISTD_32_H
2#define _ASM_I386_UNISTD_H_ 2#define ASM_X86__UNISTD_32_H
3 3
4/* 4/*
5 * This file contains the system call numbers. 5 * This file contains the system call numbers.
@@ -332,6 +332,12 @@
332#define __NR_fallocate 324 332#define __NR_fallocate 324
333#define __NR_timerfd_settime 325 333#define __NR_timerfd_settime 325
334#define __NR_timerfd_gettime 326 334#define __NR_timerfd_gettime 326
335#define __NR_signalfd4 327
336#define __NR_eventfd2 328
337#define __NR_epoll_create1 329
338#define __NR_dup3 330
339#define __NR_pipe2 331
340#define __NR_inotify_init1 332
335 341
336#ifdef __KERNEL__ 342#ifdef __KERNEL__
337 343
@@ -370,4 +376,4 @@
370#endif 376#endif
371 377
372#endif /* __KERNEL__ */ 378#endif /* __KERNEL__ */
373#endif /* _ASM_I386_UNISTD_H_ */ 379#endif /* ASM_X86__UNISTD_32_H */
diff --git a/include/asm-x86/unistd_64.h b/include/asm-x86/unistd_64.h
index fe26e36d0f51..ace83f1f6787 100644
--- a/include/asm-x86/unistd_64.h
+++ b/include/asm-x86/unistd_64.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_64_UNISTD_H_ 1#ifndef ASM_X86__UNISTD_64_H
2#define _ASM_X86_64_UNISTD_H_ 2#define ASM_X86__UNISTD_64_H
3 3
4#ifndef __SYSCALL 4#ifndef __SYSCALL
5#define __SYSCALL(a, b) 5#define __SYSCALL(a, b)
@@ -290,7 +290,7 @@ __SYSCALL(__NR_rt_sigtimedwait, sys_rt_sigtimedwait)
290#define __NR_rt_sigqueueinfo 129 290#define __NR_rt_sigqueueinfo 129
291__SYSCALL(__NR_rt_sigqueueinfo, sys_rt_sigqueueinfo) 291__SYSCALL(__NR_rt_sigqueueinfo, sys_rt_sigqueueinfo)
292#define __NR_rt_sigsuspend 130 292#define __NR_rt_sigsuspend 130
293__SYSCALL(__NR_rt_sigsuspend, stub_rt_sigsuspend) 293__SYSCALL(__NR_rt_sigsuspend, sys_rt_sigsuspend)
294#define __NR_sigaltstack 131 294#define __NR_sigaltstack 131
295__SYSCALL(__NR_sigaltstack, stub_sigaltstack) 295__SYSCALL(__NR_sigaltstack, stub_sigaltstack)
296#define __NR_utime 132 296#define __NR_utime 132
@@ -639,6 +639,20 @@ __SYSCALL(__NR_fallocate, sys_fallocate)
639__SYSCALL(__NR_timerfd_settime, sys_timerfd_settime) 639__SYSCALL(__NR_timerfd_settime, sys_timerfd_settime)
640#define __NR_timerfd_gettime 287 640#define __NR_timerfd_gettime 287
641__SYSCALL(__NR_timerfd_gettime, sys_timerfd_gettime) 641__SYSCALL(__NR_timerfd_gettime, sys_timerfd_gettime)
642#define __NR_paccept 288
643__SYSCALL(__NR_paccept, sys_paccept)
644#define __NR_signalfd4 289
645__SYSCALL(__NR_signalfd4, sys_signalfd4)
646#define __NR_eventfd2 290
647__SYSCALL(__NR_eventfd2, sys_eventfd2)
648#define __NR_epoll_create1 291
649__SYSCALL(__NR_epoll_create1, sys_epoll_create1)
650#define __NR_dup3 292
651__SYSCALL(__NR_dup3, sys_dup3)
652#define __NR_pipe2 293
653__SYSCALL(__NR_pipe2, sys_pipe2)
654#define __NR_inotify_init1 294
655__SYSCALL(__NR_inotify_init1, sys_inotify_init1)
642 656
643 657
644#ifndef __NO_STUBS 658#ifndef __NO_STUBS
@@ -676,4 +690,4 @@ __SYSCALL(__NR_timerfd_gettime, sys_timerfd_gettime)
676#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall") 690#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
677#endif /* __KERNEL__ */ 691#endif /* __KERNEL__ */
678 692
679#endif /* _ASM_X86_64_UNISTD_H_ */ 693#endif /* ASM_X86__UNISTD_64_H */
diff --git a/include/asm-x86/unwind.h b/include/asm-x86/unwind.h
index 8b064bd9c553..a2151567db44 100644
--- a/include/asm-x86/unwind.h
+++ b/include/asm-x86/unwind.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_UNWIND_H 1#ifndef ASM_X86__UNWIND_H
2#define _ASM_X86_UNWIND_H 2#define ASM_X86__UNWIND_H
3 3
4#define UNW_PC(frame) ((void)(frame), 0UL) 4#define UNW_PC(frame) ((void)(frame), 0UL)
5#define UNW_SP(frame) ((void)(frame), 0UL) 5#define UNW_SP(frame) ((void)(frame), 0UL)
@@ -10,4 +10,4 @@ static inline int arch_unw_user_mode(const void *info)
10 return 0; 10 return 0;
11} 11}
12 12
13#endif /* _ASM_X86_UNWIND_H */ 13#endif /* ASM_X86__UNWIND_H */
diff --git a/include/asm-x86/user32.h b/include/asm-x86/user32.h
index a3d910047879..aa66c1857f06 100644
--- a/include/asm-x86/user32.h
+++ b/include/asm-x86/user32.h
@@ -1,5 +1,5 @@
1#ifndef USER32_H 1#ifndef ASM_X86__USER32_H
2#define USER32_H 1 2#define ASM_X86__USER32_H
3 3
4/* IA32 compatible user structures for ptrace. 4/* IA32 compatible user structures for ptrace.
5 * These should be used for 32bit coredumps too. */ 5 * These should be used for 32bit coredumps too. */
@@ -67,4 +67,4 @@ struct user32 {
67}; 67};
68 68
69 69
70#endif 70#endif /* ASM_X86__USER32_H */
diff --git a/include/asm-x86/user_32.h b/include/asm-x86/user_32.h
index d6e51edc259d..e0fe2f55f1a6 100644
--- a/include/asm-x86/user_32.h
+++ b/include/asm-x86/user_32.h
@@ -1,5 +1,5 @@
1#ifndef _I386_USER_H 1#ifndef ASM_X86__USER_32_H
2#define _I386_USER_H 2#define ASM_X86__USER_32_H
3 3
4#include <asm/page.h> 4#include <asm/page.h>
5/* Core file format: The core file is written in such a way that gdb 5/* Core file format: The core file is written in such a way that gdb
@@ -128,4 +128,4 @@ struct user{
128#define HOST_TEXT_START_ADDR (u.start_code) 128#define HOST_TEXT_START_ADDR (u.start_code)
129#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG) 129#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)
130 130
131#endif /* _I386_USER_H */ 131#endif /* ASM_X86__USER_32_H */
diff --git a/include/asm-x86/user_64.h b/include/asm-x86/user_64.h
index 6037b634c77f..38b5799863b4 100644
--- a/include/asm-x86/user_64.h
+++ b/include/asm-x86/user_64.h
@@ -1,5 +1,5 @@
1#ifndef _X86_64_USER_H 1#ifndef ASM_X86__USER_64_H
2#define _X86_64_USER_H 2#define ASM_X86__USER_64_H
3 3
4#include <asm/types.h> 4#include <asm/types.h>
5#include <asm/page.h> 5#include <asm/page.h>
@@ -134,4 +134,4 @@ struct user {
134#define HOST_TEXT_START_ADDR (u.start_code) 134#define HOST_TEXT_START_ADDR (u.start_code)
135#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG) 135#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)
136 136
137#endif /* _X86_64_USER_H */ 137#endif /* ASM_X86__USER_64_H */
diff --git a/include/asm-x86/uv/bios.h b/include/asm-x86/uv/bios.h
new file mode 100644
index 000000000000..7cd6d7ec1308
--- /dev/null
+++ b/include/asm-x86/uv/bios.h
@@ -0,0 +1,68 @@
1#ifndef ASM_X86__UV__BIOS_H
2#define ASM_X86__UV__BIOS_H
3
4/*
5 * BIOS layer definitions.
6 *
7 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 */
23
24#include <linux/rtc.h>
25
26#define BIOS_FREQ_BASE 0x01000001
27
28enum {
29 BIOS_FREQ_BASE_PLATFORM = 0,
30 BIOS_FREQ_BASE_INTERVAL_TIMER = 1,
31 BIOS_FREQ_BASE_REALTIME_CLOCK = 2
32};
33
34# define BIOS_CALL(result, a0, a1, a2, a3, a4, a5, a6, a7) \
35 do { \
36 /* XXX - the real call goes here */ \
37 result.status = BIOS_STATUS_UNIMPLEMENTED; \
38 isrv.v0 = 0; \
39 isrv.v1 = 0; \
40 } while (0)
41
42enum {
43 BIOS_STATUS_SUCCESS = 0,
44 BIOS_STATUS_UNIMPLEMENTED = -1,
45 BIOS_STATUS_EINVAL = -2,
46 BIOS_STATUS_ERROR = -3
47};
48
49struct uv_bios_retval {
50 /*
51 * A zero status value indicates call completed without error.
52 * A negative status value indicates reason of call failure.
53 * A positive status value indicates success but an
54 * informational value should be printed (e.g., "reboot for
55 * change to take effect").
56 */
57 s64 status;
58 u64 v0;
59 u64 v1;
60 u64 v2;
61};
62
63extern long
64x86_bios_freq_base(unsigned long which, unsigned long *ticks_per_second,
65 unsigned long *drift_info);
66extern const char *x86_bios_strerror(long status);
67
68#endif /* ASM_X86__UV__BIOS_H */
diff --git a/include/asm-x86/uv/uv_bau.h b/include/asm-x86/uv/uv_bau.h
new file mode 100644
index 000000000000..77153fb18f5e
--- /dev/null
+++ b/include/asm-x86/uv/uv_bau.h
@@ -0,0 +1,332 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * SGI UV Broadcast Assist Unit definitions
7 *
8 * Copyright (C) 2008 Silicon Graphics, Inc. All rights reserved.
9 */
10
11#ifndef ASM_X86__UV__UV_BAU_H
12#define ASM_X86__UV__UV_BAU_H
13
14#include <linux/bitmap.h>
15#define BITSPERBYTE 8
16
17/*
18 * Broadcast Assist Unit messaging structures
19 *
20 * Selective Broadcast activations are induced by software action
21 * specifying a particular 8-descriptor "set" via a 6-bit index written
22 * to an MMR.
23 * Thus there are 64 unique 512-byte sets of SB descriptors - one set for
24 * each 6-bit index value. These descriptor sets are mapped in sequence
25 * starting with set 0 located at the address specified in the
26 * BAU_SB_DESCRIPTOR_BASE register, set 1 is located at BASE + 512,
27 * set 2 is at BASE + 2*512, set 3 at BASE + 3*512, and so on.
28 *
29 * We will use 31 sets, one for sending BAU messages from each of the 32
30 * cpu's on the node.
31 *
32 * TLB shootdown will use the first of the 8 descriptors of each set.
33 * Each of the descriptors is 64 bytes in size (8*64 = 512 bytes in a set).
34 */
35
36#define UV_ITEMS_PER_DESCRIPTOR 8
37#define UV_CPUS_PER_ACT_STATUS 32
38#define UV_ACT_STATUS_MASK 0x3
39#define UV_ACT_STATUS_SIZE 2
40#define UV_ACTIVATION_DESCRIPTOR_SIZE 32
41#define UV_DISTRIBUTION_SIZE 256
42#define UV_SW_ACK_NPENDING 8
43#define UV_NET_ENDPOINT_INTD 0x38
44#define UV_DESC_BASE_PNODE_SHIFT 49
45#define UV_PAYLOADQ_PNODE_SHIFT 49
46#define UV_PTC_BASENAME "sgi_uv/ptc_statistics"
47#define uv_physnodeaddr(x) ((__pa((unsigned long)(x)) & uv_mmask))
48
49/*
50 * bits in UVH_LB_BAU_SB_ACTIVATION_STATUS_0/1
51 */
52#define DESC_STATUS_IDLE 0
53#define DESC_STATUS_ACTIVE 1
54#define DESC_STATUS_DESTINATION_TIMEOUT 2
55#define DESC_STATUS_SOURCE_TIMEOUT 3
56
57/*
58 * source side threshholds at which message retries print a warning
59 */
60#define SOURCE_TIMEOUT_LIMIT 20
61#define DESTINATION_TIMEOUT_LIMIT 20
62
63/*
64 * number of entries in the destination side payload queue
65 */
66#define DEST_Q_SIZE 17
67/*
68 * number of destination side software ack resources
69 */
70#define DEST_NUM_RESOURCES 8
71#define MAX_CPUS_PER_NODE 32
72/*
73 * completion statuses for sending a TLB flush message
74 */
75#define FLUSH_RETRY 1
76#define FLUSH_GIVEUP 2
77#define FLUSH_COMPLETE 3
78
79/*
80 * Distribution: 32 bytes (256 bits) (bytes 0-0x1f of descriptor)
81 * If the 'multilevel' flag in the header portion of the descriptor
82 * has been set to 0, then endpoint multi-unicast mode is selected.
83 * The distribution specification (32 bytes) is interpreted as a 256-bit
84 * distribution vector. Adjacent bits correspond to consecutive even numbered
85 * nodeIDs. The result of adding the index of a given bit to the 15-bit
86 * 'base_dest_nodeid' field of the header corresponds to the
87 * destination nodeID associated with that specified bit.
88 */
89struct bau_target_nodemask {
90 unsigned long bits[BITS_TO_LONGS(256)];
91};
92
93/*
94 * mask of cpu's on a node
95 * (during initialization we need to check that unsigned long has
96 * enough bits for max. cpu's per node)
97 */
98struct bau_local_cpumask {
99 unsigned long bits;
100};
101
102/*
103 * Payload: 16 bytes (128 bits) (bytes 0x20-0x2f of descriptor)
104 * only 12 bytes (96 bits) of the payload area are usable.
105 * An additional 3 bytes (bits 27:4) of the header address are carried
106 * to the next bytes of the destination payload queue.
107 * And an additional 2 bytes of the header Suppl_A field are also
108 * carried to the destination payload queue.
109 * But the first byte of the Suppl_A becomes bits 127:120 (the 16th byte)
110 * of the destination payload queue, which is written by the hardware
111 * with the s/w ack resource bit vector.
112 * [ effective message contents (16 bytes (128 bits) maximum), not counting
113 * the s/w ack bit vector ]
114 */
115
116/*
117 * The payload is software-defined for INTD transactions
118 */
119struct bau_msg_payload {
120 unsigned long address; /* signifies a page or all TLB's
121 of the cpu */
122 /* 64 bits */
123 unsigned short sending_cpu; /* filled in by sender */
124 /* 16 bits */
125 unsigned short acknowledge_count;/* filled in by destination */
126 /* 16 bits */
127 unsigned int reserved1:32; /* not usable */
128};
129
130
131/*
132 * Message header: 16 bytes (128 bits) (bytes 0x30-0x3f of descriptor)
133 * see table 4.2.3.0.1 in broacast_assist spec.
134 */
135struct bau_msg_header {
136 int dest_subnodeid:6; /* must be zero */
137 /* bits 5:0 */
138 int base_dest_nodeid:15; /* nasid>>1 (pnode) of first bit in node_map */
139 /* bits 20:6 */
140 int command:8; /* message type */
141 /* bits 28:21 */
142 /* 0x38: SN3net EndPoint Message */
143 int rsvd_1:3; /* must be zero */
144 /* bits 31:29 */
145 /* int will align on 32 bits */
146 int rsvd_2:9; /* must be zero */
147 /* bits 40:32 */
148 /* Suppl_A is 56-41 */
149 int payload_2a:8; /* becomes byte 16 of msg */
150 /* bits 48:41 */ /* not currently using */
151 int payload_2b:8; /* becomes byte 17 of msg */
152 /* bits 56:49 */ /* not currently using */
153 /* Address field (96:57) is never used as an
154 address (these are address bits 42:3) */
155 int rsvd_3:1; /* must be zero */
156 /* bit 57 */
157 /* address bits 27:4 are payload */
158 /* these 24 bits become bytes 12-14 of msg */
159 int replied_to:1; /* sent as 0 by the source to byte 12 */
160 /* bit 58 */
161
162 int payload_1a:5; /* not currently used */
163 /* bits 63:59 */
164 int payload_1b:8; /* not currently used */
165 /* bits 71:64 */
166 int payload_1c:8; /* not currently used */
167 /* bits 79:72 */
168 int payload_1d:2; /* not currently used */
169 /* bits 81:80 */
170
171 int rsvd_4:7; /* must be zero */
172 /* bits 88:82 */
173 int sw_ack_flag:1; /* software acknowledge flag */
174 /* bit 89 */
175 /* INTD trasactions at destination are to
176 wait for software acknowledge */
177 int rsvd_5:6; /* must be zero */
178 /* bits 95:90 */
179 int rsvd_6:5; /* must be zero */
180 /* bits 100:96 */
181 int int_both:1; /* if 1, interrupt both sockets on the blade */
182 /* bit 101*/
183 int fairness:3; /* usually zero */
184 /* bits 104:102 */
185 int multilevel:1; /* multi-level multicast format */
186 /* bit 105 */
187 /* 0 for TLB: endpoint multi-unicast messages */
188 int chaining:1; /* next descriptor is part of this activation*/
189 /* bit 106 */
190 int rsvd_7:21; /* must be zero */
191 /* bits 127:107 */
192};
193
194/*
195 * The activation descriptor:
196 * The format of the message to send, plus all accompanying control
197 * Should be 64 bytes
198 */
199struct bau_desc {
200 struct bau_target_nodemask distribution;
201 /*
202 * message template, consisting of header and payload:
203 */
204 struct bau_msg_header header;
205 struct bau_msg_payload payload;
206};
207/*
208 * -payload-- ---------header------
209 * bytes 0-11 bits 41-56 bits 58-81
210 * A B (2) C (3)
211 *
212 * A/B/C are moved to:
213 * A C B
214 * bytes 0-11 bytes 12-14 bytes 16-17 (byte 15 filled in by hw as vector)
215 * ------------payload queue-----------
216 */
217
218/*
219 * The payload queue on the destination side is an array of these.
220 * With BAU_MISC_CONTROL set for software acknowledge mode, the messages
221 * are 32 bytes (2 micropackets) (256 bits) in length, but contain only 17
222 * bytes of usable data, including the sw ack vector in byte 15 (bits 127:120)
223 * (12 bytes come from bau_msg_payload, 3 from payload_1, 2 from
224 * sw_ack_vector and payload_2)
225 * "Enabling Software Acknowledgment mode (see Section 4.3.3 Software
226 * Acknowledge Processing) also selects 32 byte (17 bytes usable) payload
227 * operation."
228 */
229struct bau_payload_queue_entry {
230 unsigned long address; /* signifies a page or all TLB's
231 of the cpu */
232 /* 64 bits, bytes 0-7 */
233
234 unsigned short sending_cpu; /* cpu that sent the message */
235 /* 16 bits, bytes 8-9 */
236
237 unsigned short acknowledge_count; /* filled in by destination */
238 /* 16 bits, bytes 10-11 */
239
240 unsigned short replied_to:1; /* sent as 0 by the source */
241 /* 1 bit */
242 unsigned short unused1:7; /* not currently using */
243 /* 7 bits: byte 12) */
244
245 unsigned char unused2[2]; /* not currently using */
246 /* bytes 13-14 */
247
248 unsigned char sw_ack_vector; /* filled in by the hardware */
249 /* byte 15 (bits 127:120) */
250
251 unsigned char unused4[3]; /* not currently using bytes 17-19 */
252 /* bytes 17-19 */
253
254 int number_of_cpus; /* filled in at destination */
255 /* 32 bits, bytes 20-23 (aligned) */
256
257 unsigned char unused5[8]; /* not using */
258 /* bytes 24-31 */
259};
260
261/*
262 * one for every slot in the destination payload queue
263 */
264struct bau_msg_status {
265 struct bau_local_cpumask seen_by; /* map of cpu's */
266};
267
268/*
269 * one for every slot in the destination software ack resources
270 */
271struct bau_sw_ack_status {
272 struct bau_payload_queue_entry *msg; /* associated message */
273 int watcher; /* cpu monitoring, or -1 */
274};
275
276/*
277 * one on every node and per-cpu; to locate the software tables
278 */
279struct bau_control {
280 struct bau_desc *descriptor_base;
281 struct bau_payload_queue_entry *bau_msg_head;
282 struct bau_payload_queue_entry *va_queue_first;
283 struct bau_payload_queue_entry *va_queue_last;
284 struct bau_msg_status *msg_statuses;
285 int *watching; /* pointer to array */
286};
287
288/*
289 * This structure is allocated per_cpu for UV TLB shootdown statistics.
290 */
291struct ptc_stats {
292 unsigned long ptc_i; /* number of IPI-style flushes */
293 unsigned long requestor; /* number of nodes this cpu sent to */
294 unsigned long requestee; /* times cpu was remotely requested */
295 unsigned long alltlb; /* times all tlb's on this cpu were flushed */
296 unsigned long onetlb; /* times just one tlb on this cpu was flushed */
297 unsigned long s_retry; /* retries on source side timeouts */
298 unsigned long d_retry; /* retries on destination side timeouts */
299 unsigned long sflush; /* cycles spent in uv_flush_tlb_others */
300 unsigned long dflush; /* cycles spent on destination side */
301 unsigned long retriesok; /* successes on retries */
302 unsigned long nomsg; /* interrupts with no message */
303 unsigned long multmsg; /* interrupts with multiple messages */
304 unsigned long ntargeted;/* nodes targeted */
305};
306
307static inline int bau_node_isset(int node, struct bau_target_nodemask *dstp)
308{
309 return constant_test_bit(node, &dstp->bits[0]);
310}
311static inline void bau_node_set(int node, struct bau_target_nodemask *dstp)
312{
313 __set_bit(node, &dstp->bits[0]);
314}
315static inline void bau_nodes_clear(struct bau_target_nodemask *dstp, int nbits)
316{
317 bitmap_zero(&dstp->bits[0], nbits);
318}
319
320static inline void bau_cpubits_clear(struct bau_local_cpumask *dstp, int nbits)
321{
322 bitmap_zero(&dstp->bits, nbits);
323}
324
325#define cpubit_isset(cpu, bau_local_cpumask) \
326 test_bit((cpu), (bau_local_cpumask).bits)
327
328extern int uv_flush_tlb_others(cpumask_t *, struct mm_struct *, unsigned long);
329extern void uv_bau_message_intr1(void);
330extern void uv_bau_timeout_intr1(void);
331
332#endif /* ASM_X86__UV__UV_BAU_H */
diff --git a/include/asm-x86/uv/uv_hub.h b/include/asm-x86/uv/uv_hub.h
index 26b9240d1e23..bdb5b01afbf5 100644
--- a/include/asm-x86/uv/uv_hub.h
+++ b/include/asm-x86/uv/uv_hub.h
@@ -5,11 +5,11 @@
5 * 5 *
6 * SGI UV architectural definitions 6 * SGI UV architectural definitions
7 * 7 *
8 * Copyright (C) 2007 Silicon Graphics, Inc. All rights reserved. 8 * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved.
9 */ 9 */
10 10
11#ifndef __ASM_X86_UV_HUB_H__ 11#ifndef ASM_X86__UV__UV_HUB_H
12#define __ASM_X86_UV_HUB_H__ 12#define ASM_X86__UV__UV_HUB_H
13 13
14#include <linux/numa.h> 14#include <linux/numa.h>
15#include <linux/percpu.h> 15#include <linux/percpu.h>
@@ -20,26 +20,49 @@
20/* 20/*
21 * Addressing Terminology 21 * Addressing Terminology
22 * 22 *
23 * NASID - network ID of a router, Mbrick or Cbrick. Nasid values of 23 * M - The low M bits of a physical address represent the offset
24 * routers always have low bit of 1, C/MBricks have low bit 24 * into the blade local memory. RAM memory on a blade is physically
25 * equal to 0. Most addressing macros that target UV hub chips 25 * contiguous (although various IO spaces may punch holes in
26 * right shift the NASID by 1 to exclude the always-zero bit. 26 * it)..
27 * 27 *
28 * SNASID - NASID right shifted by 1 bit. 28 * N - Number of bits in the node portion of a socket physical
29 * address.
30 *
31 * NASID - network ID of a router, Mbrick or Cbrick. Nasid values of
32 * routers always have low bit of 1, C/MBricks have low bit
33 * equal to 0. Most addressing macros that target UV hub chips
34 * right shift the NASID by 1 to exclude the always-zero bit.
35 * NASIDs contain up to 15 bits.
36 *
37 * GNODE - NASID right shifted by 1 bit. Most mmrs contain gnodes instead
38 * of nasids.
39 *
40 * PNODE - the low N bits of the GNODE. The PNODE is the most useful variant
41 * of the nasid for socket usage.
42 *
43 *
44 * NumaLink Global Physical Address Format:
45 * +--------------------------------+---------------------+
46 * |00..000| GNODE | NodeOffset |
47 * +--------------------------------+---------------------+
48 * |<-------53 - M bits --->|<--------M bits ----->
49 *
50 * M - number of node offset bits (35 .. 40)
29 * 51 *
30 * 52 *
31 * Memory/UV-HUB Processor Socket Address Format: 53 * Memory/UV-HUB Processor Socket Address Format:
32 * +--------+---------------+---------------------+ 54 * +----------------+---------------+---------------------+
33 * |00..0000| SNASID | NodeOffset | 55 * |00..000000000000| PNODE | NodeOffset |
34 * +--------+---------------+---------------------+ 56 * +----------------+---------------+---------------------+
35 * <--- N bits --->|<--------M bits -----> 57 * <--- N bits --->|<--------M bits ----->
36 * 58 *
37 * M number of node offset bits (35 .. 40) 59 * M - number of node offset bits (35 .. 40)
38 * N number of SNASID bits (0 .. 10) 60 * N - number of PNODE bits (0 .. 10)
39 * 61 *
40 * Note: M + N cannot currently exceed 44 (x86_64) or 46 (IA64). 62 * Note: M + N cannot currently exceed 44 (x86_64) or 46 (IA64).
41 * The actual values are configuration dependent and are set at 63 * The actual values are configuration dependent and are set at
42 * boot time 64 * boot time. M & N values are set by the hardware/BIOS at boot.
65 *
43 * 66 *
44 * APICID format 67 * APICID format
45 * NOTE!!!!!! This is the current format of the APICID. However, code 68 * NOTE!!!!!! This is the current format of the APICID. However, code
@@ -48,14 +71,14 @@
48 * 71 *
49 * 1111110000000000 72 * 1111110000000000
50 * 5432109876543210 73 * 5432109876543210
51 * nnnnnnnnnnlc0cch 74 * pppppppppplc0cch
52 * sssssssssss 75 * sssssssssss
53 * 76 *
54 * n = snasid bits 77 * p = pnode bits
55 * l = socket number on board 78 * l = socket number on board
56 * c = core 79 * c = core
57 * h = hyperthread 80 * h = hyperthread
58 * s = bits that are in the socket CSR 81 * s = bits that are in the SOCKET_ID CSR
59 * 82 *
60 * Note: Processor only supports 12 bits in the APICID register. The ACPI 83 * Note: Processor only supports 12 bits in the APICID register. The ACPI
61 * tables hold all 16 bits. Software needs to be aware of this. 84 * tables hold all 16 bits. Software needs to be aware of this.
@@ -74,7 +97,7 @@
74 * This value is also the value of the maximum number of non-router NASIDs 97 * This value is also the value of the maximum number of non-router NASIDs
75 * in the numalink fabric. 98 * in the numalink fabric.
76 * 99 *
77 * NOTE: a brick may be 1 or 2 OS nodes. Don't get these confused. 100 * NOTE: a brick may contain 1 or 2 OS nodes. Don't get these confused.
78 */ 101 */
79#define UV_MAX_NUMALINK_BLADES 16384 102#define UV_MAX_NUMALINK_BLADES 16384
80 103
@@ -96,8 +119,12 @@
96 */ 119 */
97struct uv_hub_info_s { 120struct uv_hub_info_s {
98 unsigned long global_mmr_base; 121 unsigned long global_mmr_base;
99 unsigned short local_nasid; 122 unsigned long gpa_mask;
100 unsigned short gnode_upper; 123 unsigned long gnode_upper;
124 unsigned long lowmem_remap_top;
125 unsigned long lowmem_remap_base;
126 unsigned short pnode;
127 unsigned short pnode_mask;
101 unsigned short coherency_domain_number; 128 unsigned short coherency_domain_number;
102 unsigned short numa_blade_id; 129 unsigned short numa_blade_id;
103 unsigned char blade_processor_id; 130 unsigned char blade_processor_id;
@@ -112,83 +139,126 @@ DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
112 * Local & Global MMR space macros. 139 * Local & Global MMR space macros.
113 * Note: macros are intended to be used ONLY by inline functions 140 * Note: macros are intended to be used ONLY by inline functions
114 * in this file - not by other kernel code. 141 * in this file - not by other kernel code.
142 * n - NASID (full 15-bit global nasid)
143 * g - GNODE (full 15-bit global nasid, right shifted 1)
144 * p - PNODE (local part of nsids, right shifted 1)
115 */ 145 */
116#define UV_SNASID(n) ((n) >> 1) 146#define UV_NASID_TO_PNODE(n) (((n) >> 1) & uv_hub_info->pnode_mask)
117#define UV_NASID(n) ((n) << 1) 147#define UV_PNODE_TO_NASID(p) (((p) << 1) | uv_hub_info->gnode_upper)
118 148
119#define UV_LOCAL_MMR_BASE 0xf4000000UL 149#define UV_LOCAL_MMR_BASE 0xf4000000UL
120#define UV_GLOBAL_MMR32_BASE 0xf8000000UL 150#define UV_GLOBAL_MMR32_BASE 0xf8000000UL
121#define UV_GLOBAL_MMR64_BASE (uv_hub_info->global_mmr_base) 151#define UV_GLOBAL_MMR64_BASE (uv_hub_info->global_mmr_base)
152#define UV_LOCAL_MMR_SIZE (64UL * 1024 * 1024)
153#define UV_GLOBAL_MMR32_SIZE (64UL * 1024 * 1024)
122 154
123#define UV_GLOBAL_MMR32_SNASID_MASK 0x3ff 155#define UV_GLOBAL_MMR32_PNODE_SHIFT 15
124#define UV_GLOBAL_MMR32_SNASID_SHIFT 15 156#define UV_GLOBAL_MMR64_PNODE_SHIFT 26
125#define UV_GLOBAL_MMR64_SNASID_SHIFT 26
126 157
127#define UV_GLOBAL_MMR32_NASID_BITS(n) \ 158#define UV_GLOBAL_MMR32_PNODE_BITS(p) ((p) << (UV_GLOBAL_MMR32_PNODE_SHIFT))
128 (((UV_SNASID(n) & UV_GLOBAL_MMR32_SNASID_MASK)) << \
129 (UV_GLOBAL_MMR32_SNASID_SHIFT))
130 159
131#define UV_GLOBAL_MMR64_NASID_BITS(n) \ 160#define UV_GLOBAL_MMR64_PNODE_BITS(p) \
132 ((unsigned long)UV_SNASID(n) << UV_GLOBAL_MMR64_SNASID_SHIFT) 161 ((unsigned long)(p) << UV_GLOBAL_MMR64_PNODE_SHIFT)
162
163#define UV_APIC_PNODE_SHIFT 6
164
165/*
166 * Macros for converting between kernel virtual addresses, socket local physical
167 * addresses, and UV global physical addresses.
168 * Note: use the standard __pa() & __va() macros for converting
169 * between socket virtual and socket physical addresses.
170 */
171
172/* socket phys RAM --> UV global physical address */
173static inline unsigned long uv_soc_phys_ram_to_gpa(unsigned long paddr)
174{
175 if (paddr < uv_hub_info->lowmem_remap_top)
176 paddr += uv_hub_info->lowmem_remap_base;
177 return paddr | uv_hub_info->gnode_upper;
178}
179
180
181/* socket virtual --> UV global physical address */
182static inline unsigned long uv_gpa(void *v)
183{
184 return __pa(v) | uv_hub_info->gnode_upper;
185}
186
187/* socket virtual --> UV global physical address */
188static inline void *uv_vgpa(void *v)
189{
190 return (void *)uv_gpa(v);
191}
192
193/* UV global physical address --> socket virtual */
194static inline void *uv_va(unsigned long gpa)
195{
196 return __va(gpa & uv_hub_info->gpa_mask);
197}
198
199/* pnode, offset --> socket virtual */
200static inline void *uv_pnode_offset_to_vaddr(int pnode, unsigned long offset)
201{
202 return __va(((unsigned long)pnode << uv_hub_info->m_val) | offset);
203}
133 204
134#define UV_APIC_NASID_SHIFT 6
135 205
136/* 206/*
137 * Extract a NASID from an APICID (full apicid, not processor subset) 207 * Extract a PNODE from an APICID (full apicid, not processor subset)
138 */ 208 */
139static inline int uv_apicid_to_nasid(int apicid) 209static inline int uv_apicid_to_pnode(int apicid)
140{ 210{
141 return (UV_NASID(apicid >> UV_APIC_NASID_SHIFT)); 211 return (apicid >> UV_APIC_PNODE_SHIFT);
142} 212}
143 213
144/* 214/*
145 * Access global MMRs using the low memory MMR32 space. This region supports 215 * Access global MMRs using the low memory MMR32 space. This region supports
146 * faster MMR access but not all MMRs are accessible in this space. 216 * faster MMR access but not all MMRs are accessible in this space.
147 */ 217 */
148static inline unsigned long *uv_global_mmr32_address(int nasid, 218static inline unsigned long *uv_global_mmr32_address(int pnode,
149 unsigned long offset) 219 unsigned long offset)
150{ 220{
151 return __va(UV_GLOBAL_MMR32_BASE | 221 return __va(UV_GLOBAL_MMR32_BASE |
152 UV_GLOBAL_MMR32_NASID_BITS(nasid) | offset); 222 UV_GLOBAL_MMR32_PNODE_BITS(pnode) | offset);
153} 223}
154 224
155static inline void uv_write_global_mmr32(int nasid, unsigned long offset, 225static inline void uv_write_global_mmr32(int pnode, unsigned long offset,
156 unsigned long val) 226 unsigned long val)
157{ 227{
158 *uv_global_mmr32_address(nasid, offset) = val; 228 *uv_global_mmr32_address(pnode, offset) = val;
159} 229}
160 230
161static inline unsigned long uv_read_global_mmr32(int nasid, 231static inline unsigned long uv_read_global_mmr32(int pnode,
162 unsigned long offset) 232 unsigned long offset)
163{ 233{
164 return *uv_global_mmr32_address(nasid, offset); 234 return *uv_global_mmr32_address(pnode, offset);
165} 235}
166 236
167/* 237/*
168 * Access Global MMR space using the MMR space located at the top of physical 238 * Access Global MMR space using the MMR space located at the top of physical
169 * memory. 239 * memory.
170 */ 240 */
171static inline unsigned long *uv_global_mmr64_address(int nasid, 241static inline unsigned long *uv_global_mmr64_address(int pnode,
172 unsigned long offset) 242 unsigned long offset)
173{ 243{
174 return __va(UV_GLOBAL_MMR64_BASE | 244 return __va(UV_GLOBAL_MMR64_BASE |
175 UV_GLOBAL_MMR64_NASID_BITS(nasid) | offset); 245 UV_GLOBAL_MMR64_PNODE_BITS(pnode) | offset);
176} 246}
177 247
178static inline void uv_write_global_mmr64(int nasid, unsigned long offset, 248static inline void uv_write_global_mmr64(int pnode, unsigned long offset,
179 unsigned long val) 249 unsigned long val)
180{ 250{
181 *uv_global_mmr64_address(nasid, offset) = val; 251 *uv_global_mmr64_address(pnode, offset) = val;
182} 252}
183 253
184static inline unsigned long uv_read_global_mmr64(int nasid, 254static inline unsigned long uv_read_global_mmr64(int pnode,
185 unsigned long offset) 255 unsigned long offset)
186{ 256{
187 return *uv_global_mmr64_address(nasid, offset); 257 return *uv_global_mmr64_address(pnode, offset);
188} 258}
189 259
190/* 260/*
191 * Access node local MMRs. Faster than using global space but only local MMRs 261 * Access hub local MMRs. Faster than using global space but only local MMRs
192 * are accessible. 262 * are accessible.
193 */ 263 */
194static inline unsigned long *uv_local_mmr_address(unsigned long offset) 264static inline unsigned long *uv_local_mmr_address(unsigned long offset)
@@ -207,15 +277,15 @@ static inline void uv_write_local_mmr(unsigned long offset, unsigned long val)
207} 277}
208 278
209/* 279/*
210 * Structures and definitions for converting between cpu, node, and blade 280 * Structures and definitions for converting between cpu, node, pnode, and blade
211 * numbers. 281 * numbers.
212 */ 282 */
213struct uv_blade_info { 283struct uv_blade_info {
214 unsigned short nr_posible_cpus; 284 unsigned short nr_possible_cpus;
215 unsigned short nr_online_cpus; 285 unsigned short nr_online_cpus;
216 unsigned short nasid; 286 unsigned short pnode;
217}; 287};
218struct uv_blade_info *uv_blade_info; 288extern struct uv_blade_info *uv_blade_info;
219extern short *uv_node_to_blade; 289extern short *uv_node_to_blade;
220extern short *uv_cpu_to_blade; 290extern short *uv_cpu_to_blade;
221extern short uv_possible_blades; 291extern short uv_possible_blades;
@@ -244,16 +314,16 @@ static inline int uv_node_to_blade_id(int nid)
244 return uv_node_to_blade[nid]; 314 return uv_node_to_blade[nid];
245} 315}
246 316
247/* Convert a blade id to the NASID of the blade */ 317/* Convert a blade id to the PNODE of the blade */
248static inline int uv_blade_to_nasid(int bid) 318static inline int uv_blade_to_pnode(int bid)
249{ 319{
250 return uv_blade_info[bid].nasid; 320 return uv_blade_info[bid].pnode;
251} 321}
252 322
253/* Determine the number of possible cpus on a blade */ 323/* Determine the number of possible cpus on a blade */
254static inline int uv_blade_nr_possible_cpus(int bid) 324static inline int uv_blade_nr_possible_cpus(int bid)
255{ 325{
256 return uv_blade_info[bid].nr_posible_cpus; 326 return uv_blade_info[bid].nr_possible_cpus;
257} 327}
258 328
259/* Determine the number of online cpus on a blade */ 329/* Determine the number of online cpus on a blade */
@@ -262,16 +332,16 @@ static inline int uv_blade_nr_online_cpus(int bid)
262 return uv_blade_info[bid].nr_online_cpus; 332 return uv_blade_info[bid].nr_online_cpus;
263} 333}
264 334
265/* Convert a cpu id to the NASID of the blade containing the cpu */ 335/* Convert a cpu id to the PNODE of the blade containing the cpu */
266static inline int uv_cpu_to_nasid(int cpu) 336static inline int uv_cpu_to_pnode(int cpu)
267{ 337{
268 return uv_blade_info[uv_cpu_to_blade_id(cpu)].nasid; 338 return uv_blade_info[uv_cpu_to_blade_id(cpu)].pnode;
269} 339}
270 340
271/* Convert a node number to the NASID of the blade */ 341/* Convert a linux node number to the PNODE of the blade */
272static inline int uv_node_to_nasid(int nid) 342static inline int uv_node_to_pnode(int nid)
273{ 343{
274 return uv_blade_info[uv_node_to_blade_id(nid)].nasid; 344 return uv_blade_info[uv_node_to_blade_id(nid)].pnode;
275} 345}
276 346
277/* Maximum possible number of blades */ 347/* Maximum possible number of blades */
@@ -280,5 +350,5 @@ static inline int uv_num_possible_blades(void)
280 return uv_possible_blades; 350 return uv_possible_blades;
281} 351}
282 352
283#endif /* __ASM_X86_UV_HUB__ */ 353#endif /* ASM_X86__UV__UV_HUB_H */
284 354
diff --git a/include/asm-x86/uv/uv_mmrs.h b/include/asm-x86/uv/uv_mmrs.h
index 3b69fe6b6376..8b03d89d2459 100644
--- a/include/asm-x86/uv/uv_mmrs.h
+++ b/include/asm-x86/uv/uv_mmrs.h
@@ -8,20 +8,293 @@
8 * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved. 8 * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved.
9 */ 9 */
10 10
11#ifndef __ASM_X86_UV_MMRS__ 11#ifndef ASM_X86__UV__UV_MMRS_H
12#define __ASM_X86_UV_MMRS__ 12#define ASM_X86__UV__UV_MMRS_H
13 13
14/* 14#define UV_MMR_ENABLE (1UL << 63)
15 * AUTO GENERATED - Do not edit 15
16 */ 16/* ========================================================================= */
17/* UVH_BAU_DATA_CONFIG */
18/* ========================================================================= */
19#define UVH_BAU_DATA_CONFIG 0x61680UL
20#define UVH_BAU_DATA_CONFIG_32 0x0438
21
22#define UVH_BAU_DATA_CONFIG_VECTOR_SHFT 0
23#define UVH_BAU_DATA_CONFIG_VECTOR_MASK 0x00000000000000ffUL
24#define UVH_BAU_DATA_CONFIG_DM_SHFT 8
25#define UVH_BAU_DATA_CONFIG_DM_MASK 0x0000000000000700UL
26#define UVH_BAU_DATA_CONFIG_DESTMODE_SHFT 11
27#define UVH_BAU_DATA_CONFIG_DESTMODE_MASK 0x0000000000000800UL
28#define UVH_BAU_DATA_CONFIG_STATUS_SHFT 12
29#define UVH_BAU_DATA_CONFIG_STATUS_MASK 0x0000000000001000UL
30#define UVH_BAU_DATA_CONFIG_P_SHFT 13
31#define UVH_BAU_DATA_CONFIG_P_MASK 0x0000000000002000UL
32#define UVH_BAU_DATA_CONFIG_T_SHFT 15
33#define UVH_BAU_DATA_CONFIG_T_MASK 0x0000000000008000UL
34#define UVH_BAU_DATA_CONFIG_M_SHFT 16
35#define UVH_BAU_DATA_CONFIG_M_MASK 0x0000000000010000UL
36#define UVH_BAU_DATA_CONFIG_APIC_ID_SHFT 32
37#define UVH_BAU_DATA_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
38
39union uvh_bau_data_config_u {
40 unsigned long v;
41 struct uvh_bau_data_config_s {
42 unsigned long vector_ : 8; /* RW */
43 unsigned long dm : 3; /* RW */
44 unsigned long destmode : 1; /* RW */
45 unsigned long status : 1; /* RO */
46 unsigned long p : 1; /* RO */
47 unsigned long rsvd_14 : 1; /* */
48 unsigned long t : 1; /* RO */
49 unsigned long m : 1; /* RW */
50 unsigned long rsvd_17_31: 15; /* */
51 unsigned long apic_id : 32; /* RW */
52 } s;
53};
54
55/* ========================================================================= */
56/* UVH_EVENT_OCCURRED0 */
57/* ========================================================================= */
58#define UVH_EVENT_OCCURRED0 0x70000UL
59#define UVH_EVENT_OCCURRED0_32 0x005e8
60
61#define UVH_EVENT_OCCURRED0_LB_HCERR_SHFT 0
62#define UVH_EVENT_OCCURRED0_LB_HCERR_MASK 0x0000000000000001UL
63#define UVH_EVENT_OCCURRED0_GR0_HCERR_SHFT 1
64#define UVH_EVENT_OCCURRED0_GR0_HCERR_MASK 0x0000000000000002UL
65#define UVH_EVENT_OCCURRED0_GR1_HCERR_SHFT 2
66#define UVH_EVENT_OCCURRED0_GR1_HCERR_MASK 0x0000000000000004UL
67#define UVH_EVENT_OCCURRED0_LH_HCERR_SHFT 3
68#define UVH_EVENT_OCCURRED0_LH_HCERR_MASK 0x0000000000000008UL
69#define UVH_EVENT_OCCURRED0_RH_HCERR_SHFT 4
70#define UVH_EVENT_OCCURRED0_RH_HCERR_MASK 0x0000000000000010UL
71#define UVH_EVENT_OCCURRED0_XN_HCERR_SHFT 5
72#define UVH_EVENT_OCCURRED0_XN_HCERR_MASK 0x0000000000000020UL
73#define UVH_EVENT_OCCURRED0_SI_HCERR_SHFT 6
74#define UVH_EVENT_OCCURRED0_SI_HCERR_MASK 0x0000000000000040UL
75#define UVH_EVENT_OCCURRED0_LB_AOERR0_SHFT 7
76#define UVH_EVENT_OCCURRED0_LB_AOERR0_MASK 0x0000000000000080UL
77#define UVH_EVENT_OCCURRED0_GR0_AOERR0_SHFT 8
78#define UVH_EVENT_OCCURRED0_GR0_AOERR0_MASK 0x0000000000000100UL
79#define UVH_EVENT_OCCURRED0_GR1_AOERR0_SHFT 9
80#define UVH_EVENT_OCCURRED0_GR1_AOERR0_MASK 0x0000000000000200UL
81#define UVH_EVENT_OCCURRED0_LH_AOERR0_SHFT 10
82#define UVH_EVENT_OCCURRED0_LH_AOERR0_MASK 0x0000000000000400UL
83#define UVH_EVENT_OCCURRED0_RH_AOERR0_SHFT 11
84#define UVH_EVENT_OCCURRED0_RH_AOERR0_MASK 0x0000000000000800UL
85#define UVH_EVENT_OCCURRED0_XN_AOERR0_SHFT 12
86#define UVH_EVENT_OCCURRED0_XN_AOERR0_MASK 0x0000000000001000UL
87#define UVH_EVENT_OCCURRED0_SI_AOERR0_SHFT 13
88#define UVH_EVENT_OCCURRED0_SI_AOERR0_MASK 0x0000000000002000UL
89#define UVH_EVENT_OCCURRED0_LB_AOERR1_SHFT 14
90#define UVH_EVENT_OCCURRED0_LB_AOERR1_MASK 0x0000000000004000UL
91#define UVH_EVENT_OCCURRED0_GR0_AOERR1_SHFT 15
92#define UVH_EVENT_OCCURRED0_GR0_AOERR1_MASK 0x0000000000008000UL
93#define UVH_EVENT_OCCURRED0_GR1_AOERR1_SHFT 16
94#define UVH_EVENT_OCCURRED0_GR1_AOERR1_MASK 0x0000000000010000UL
95#define UVH_EVENT_OCCURRED0_LH_AOERR1_SHFT 17
96#define UVH_EVENT_OCCURRED0_LH_AOERR1_MASK 0x0000000000020000UL
97#define UVH_EVENT_OCCURRED0_RH_AOERR1_SHFT 18
98#define UVH_EVENT_OCCURRED0_RH_AOERR1_MASK 0x0000000000040000UL
99#define UVH_EVENT_OCCURRED0_XN_AOERR1_SHFT 19
100#define UVH_EVENT_OCCURRED0_XN_AOERR1_MASK 0x0000000000080000UL
101#define UVH_EVENT_OCCURRED0_SI_AOERR1_SHFT 20
102#define UVH_EVENT_OCCURRED0_SI_AOERR1_MASK 0x0000000000100000UL
103#define UVH_EVENT_OCCURRED0_RH_VPI_INT_SHFT 21
104#define UVH_EVENT_OCCURRED0_RH_VPI_INT_MASK 0x0000000000200000UL
105#define UVH_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_SHFT 22
106#define UVH_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_MASK 0x0000000000400000UL
107#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_0_SHFT 23
108#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_0_MASK 0x0000000000800000UL
109#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_1_SHFT 24
110#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_1_MASK 0x0000000001000000UL
111#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_2_SHFT 25
112#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_2_MASK 0x0000000002000000UL
113#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_3_SHFT 26
114#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_3_MASK 0x0000000004000000UL
115#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_4_SHFT 27
116#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_4_MASK 0x0000000008000000UL
117#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_5_SHFT 28
118#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_5_MASK 0x0000000010000000UL
119#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_6_SHFT 29
120#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_6_MASK 0x0000000020000000UL
121#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_7_SHFT 30
122#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_7_MASK 0x0000000040000000UL
123#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_8_SHFT 31
124#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_8_MASK 0x0000000080000000UL
125#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_9_SHFT 32
126#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_9_MASK 0x0000000100000000UL
127#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_10_SHFT 33
128#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_10_MASK 0x0000000200000000UL
129#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_11_SHFT 34
130#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_11_MASK 0x0000000400000000UL
131#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_12_SHFT 35
132#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_12_MASK 0x0000000800000000UL
133#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_13_SHFT 36
134#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_13_MASK 0x0000001000000000UL
135#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_14_SHFT 37
136#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_14_MASK 0x0000002000000000UL
137#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_15_SHFT 38
138#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_15_MASK 0x0000004000000000UL
139#define UVH_EVENT_OCCURRED0_L1_NMI_INT_SHFT 39
140#define UVH_EVENT_OCCURRED0_L1_NMI_INT_MASK 0x0000008000000000UL
141#define UVH_EVENT_OCCURRED0_STOP_CLOCK_SHFT 40
142#define UVH_EVENT_OCCURRED0_STOP_CLOCK_MASK 0x0000010000000000UL
143#define UVH_EVENT_OCCURRED0_ASIC_TO_L1_SHFT 41
144#define UVH_EVENT_OCCURRED0_ASIC_TO_L1_MASK 0x0000020000000000UL
145#define UVH_EVENT_OCCURRED0_L1_TO_ASIC_SHFT 42
146#define UVH_EVENT_OCCURRED0_L1_TO_ASIC_MASK 0x0000040000000000UL
147#define UVH_EVENT_OCCURRED0_LTC_INT_SHFT 43
148#define UVH_EVENT_OCCURRED0_LTC_INT_MASK 0x0000080000000000UL
149#define UVH_EVENT_OCCURRED0_LA_SEQ_TRIGGER_SHFT 44
150#define UVH_EVENT_OCCURRED0_LA_SEQ_TRIGGER_MASK 0x0000100000000000UL
151#define UVH_EVENT_OCCURRED0_IPI_INT_SHFT 45
152#define UVH_EVENT_OCCURRED0_IPI_INT_MASK 0x0000200000000000UL
153#define UVH_EVENT_OCCURRED0_EXTIO_INT0_SHFT 46
154#define UVH_EVENT_OCCURRED0_EXTIO_INT0_MASK 0x0000400000000000UL
155#define UVH_EVENT_OCCURRED0_EXTIO_INT1_SHFT 47
156#define UVH_EVENT_OCCURRED0_EXTIO_INT1_MASK 0x0000800000000000UL
157#define UVH_EVENT_OCCURRED0_EXTIO_INT2_SHFT 48
158#define UVH_EVENT_OCCURRED0_EXTIO_INT2_MASK 0x0001000000000000UL
159#define UVH_EVENT_OCCURRED0_EXTIO_INT3_SHFT 49
160#define UVH_EVENT_OCCURRED0_EXTIO_INT3_MASK 0x0002000000000000UL
161#define UVH_EVENT_OCCURRED0_PROFILE_INT_SHFT 50
162#define UVH_EVENT_OCCURRED0_PROFILE_INT_MASK 0x0004000000000000UL
163#define UVH_EVENT_OCCURRED0_RTC0_SHFT 51
164#define UVH_EVENT_OCCURRED0_RTC0_MASK 0x0008000000000000UL
165#define UVH_EVENT_OCCURRED0_RTC1_SHFT 52
166#define UVH_EVENT_OCCURRED0_RTC1_MASK 0x0010000000000000UL
167#define UVH_EVENT_OCCURRED0_RTC2_SHFT 53
168#define UVH_EVENT_OCCURRED0_RTC2_MASK 0x0020000000000000UL
169#define UVH_EVENT_OCCURRED0_RTC3_SHFT 54
170#define UVH_EVENT_OCCURRED0_RTC3_MASK 0x0040000000000000UL
171#define UVH_EVENT_OCCURRED0_BAU_DATA_SHFT 55
172#define UVH_EVENT_OCCURRED0_BAU_DATA_MASK 0x0080000000000000UL
173#define UVH_EVENT_OCCURRED0_POWER_MANAGEMENT_REQ_SHFT 56
174#define UVH_EVENT_OCCURRED0_POWER_MANAGEMENT_REQ_MASK 0x0100000000000000UL
175union uvh_event_occurred0_u {
176 unsigned long v;
177 struct uvh_event_occurred0_s {
178 unsigned long lb_hcerr : 1; /* RW, W1C */
179 unsigned long gr0_hcerr : 1; /* RW, W1C */
180 unsigned long gr1_hcerr : 1; /* RW, W1C */
181 unsigned long lh_hcerr : 1; /* RW, W1C */
182 unsigned long rh_hcerr : 1; /* RW, W1C */
183 unsigned long xn_hcerr : 1; /* RW, W1C */
184 unsigned long si_hcerr : 1; /* RW, W1C */
185 unsigned long lb_aoerr0 : 1; /* RW, W1C */
186 unsigned long gr0_aoerr0 : 1; /* RW, W1C */
187 unsigned long gr1_aoerr0 : 1; /* RW, W1C */
188 unsigned long lh_aoerr0 : 1; /* RW, W1C */
189 unsigned long rh_aoerr0 : 1; /* RW, W1C */
190 unsigned long xn_aoerr0 : 1; /* RW, W1C */
191 unsigned long si_aoerr0 : 1; /* RW, W1C */
192 unsigned long lb_aoerr1 : 1; /* RW, W1C */
193 unsigned long gr0_aoerr1 : 1; /* RW, W1C */
194 unsigned long gr1_aoerr1 : 1; /* RW, W1C */
195 unsigned long lh_aoerr1 : 1; /* RW, W1C */
196 unsigned long rh_aoerr1 : 1; /* RW, W1C */
197 unsigned long xn_aoerr1 : 1; /* RW, W1C */
198 unsigned long si_aoerr1 : 1; /* RW, W1C */
199 unsigned long rh_vpi_int : 1; /* RW, W1C */
200 unsigned long system_shutdown_int : 1; /* RW, W1C */
201 unsigned long lb_irq_int_0 : 1; /* RW, W1C */
202 unsigned long lb_irq_int_1 : 1; /* RW, W1C */
203 unsigned long lb_irq_int_2 : 1; /* RW, W1C */
204 unsigned long lb_irq_int_3 : 1; /* RW, W1C */
205 unsigned long lb_irq_int_4 : 1; /* RW, W1C */
206 unsigned long lb_irq_int_5 : 1; /* RW, W1C */
207 unsigned long lb_irq_int_6 : 1; /* RW, W1C */
208 unsigned long lb_irq_int_7 : 1; /* RW, W1C */
209 unsigned long lb_irq_int_8 : 1; /* RW, W1C */
210 unsigned long lb_irq_int_9 : 1; /* RW, W1C */
211 unsigned long lb_irq_int_10 : 1; /* RW, W1C */
212 unsigned long lb_irq_int_11 : 1; /* RW, W1C */
213 unsigned long lb_irq_int_12 : 1; /* RW, W1C */
214 unsigned long lb_irq_int_13 : 1; /* RW, W1C */
215 unsigned long lb_irq_int_14 : 1; /* RW, W1C */
216 unsigned long lb_irq_int_15 : 1; /* RW, W1C */
217 unsigned long l1_nmi_int : 1; /* RW, W1C */
218 unsigned long stop_clock : 1; /* RW, W1C */
219 unsigned long asic_to_l1 : 1; /* RW, W1C */
220 unsigned long l1_to_asic : 1; /* RW, W1C */
221 unsigned long ltc_int : 1; /* RW, W1C */
222 unsigned long la_seq_trigger : 1; /* RW, W1C */
223 unsigned long ipi_int : 1; /* RW, W1C */
224 unsigned long extio_int0 : 1; /* RW, W1C */
225 unsigned long extio_int1 : 1; /* RW, W1C */
226 unsigned long extio_int2 : 1; /* RW, W1C */
227 unsigned long extio_int3 : 1; /* RW, W1C */
228 unsigned long profile_int : 1; /* RW, W1C */
229 unsigned long rtc0 : 1; /* RW, W1C */
230 unsigned long rtc1 : 1; /* RW, W1C */
231 unsigned long rtc2 : 1; /* RW, W1C */
232 unsigned long rtc3 : 1; /* RW, W1C */
233 unsigned long bau_data : 1; /* RW, W1C */
234 unsigned long power_management_req : 1; /* RW, W1C */
235 unsigned long rsvd_57_63 : 7; /* */
236 } s;
237};
238
239/* ========================================================================= */
240/* UVH_EVENT_OCCURRED0_ALIAS */
241/* ========================================================================= */
242#define UVH_EVENT_OCCURRED0_ALIAS 0x0000000000070008UL
243#define UVH_EVENT_OCCURRED0_ALIAS_32 0x005f0
244
245/* ========================================================================= */
246/* UVH_INT_CMPB */
247/* ========================================================================= */
248#define UVH_INT_CMPB 0x22080UL
249
250#define UVH_INT_CMPB_REAL_TIME_CMPB_SHFT 0
251#define UVH_INT_CMPB_REAL_TIME_CMPB_MASK 0x00ffffffffffffffUL
252
253union uvh_int_cmpb_u {
254 unsigned long v;
255 struct uvh_int_cmpb_s {
256 unsigned long real_time_cmpb : 56; /* RW */
257 unsigned long rsvd_56_63 : 8; /* */
258 } s;
259};
260
261/* ========================================================================= */
262/* UVH_INT_CMPC */
263/* ========================================================================= */
264#define UVH_INT_CMPC 0x22100UL
265
266#define UVH_INT_CMPC_REAL_TIME_CMPC_SHFT 0
267#define UVH_INT_CMPC_REAL_TIME_CMPC_MASK 0x00ffffffffffffffUL
268
269union uvh_int_cmpc_u {
270 unsigned long v;
271 struct uvh_int_cmpc_s {
272 unsigned long real_time_cmpc : 56; /* RW */
273 unsigned long rsvd_56_63 : 8; /* */
274 } s;
275};
276
277/* ========================================================================= */
278/* UVH_INT_CMPD */
279/* ========================================================================= */
280#define UVH_INT_CMPD 0x22180UL
17 281
18 #define UV_MMR_ENABLE (1UL << 63) 282#define UVH_INT_CMPD_REAL_TIME_CMPD_SHFT 0
283#define UVH_INT_CMPD_REAL_TIME_CMPD_MASK 0x00ffffffffffffffUL
284
285union uvh_int_cmpd_u {
286 unsigned long v;
287 struct uvh_int_cmpd_s {
288 unsigned long real_time_cmpd : 56; /* RW */
289 unsigned long rsvd_56_63 : 8; /* */
290 } s;
291};
19 292
20/* ========================================================================= */ 293/* ========================================================================= */
21/* UVH_IPI_INT */ 294/* UVH_IPI_INT */
22/* ========================================================================= */ 295/* ========================================================================= */
23#define UVH_IPI_INT 0x60500UL 296#define UVH_IPI_INT 0x60500UL
24#define UVH_IPI_INT_32 0x0360 297#define UVH_IPI_INT_32 0x0348
25 298
26#define UVH_IPI_INT_VECTOR_SHFT 0 299#define UVH_IPI_INT_VECTOR_SHFT 0
27#define UVH_IPI_INT_VECTOR_MASK 0x00000000000000ffUL 300#define UVH_IPI_INT_VECTOR_MASK 0x00000000000000ffUL
@@ -51,7 +324,7 @@ union uvh_ipi_int_u {
51/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST */ 324/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST */
52/* ========================================================================= */ 325/* ========================================================================= */
53#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST 0x320050UL 326#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST 0x320050UL
54#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_32 0x009f0 327#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_32 0x009c0
55 328
56#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_ADDRESS_SHFT 4 329#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_ADDRESS_SHFT 4
57#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_ADDRESS_MASK 0x000007fffffffff0UL 330#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_ADDRESS_MASK 0x000007fffffffff0UL
@@ -73,7 +346,7 @@ union uvh_lb_bau_intd_payload_queue_first_u {
73/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST */ 346/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST */
74/* ========================================================================= */ 347/* ========================================================================= */
75#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST 0x320060UL 348#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST 0x320060UL
76#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_32 0x009f8 349#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_32 0x009c8
77 350
78#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_SHFT 4 351#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_SHFT 4
79#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_MASK 0x000007fffffffff0UL 352#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_MASK 0x000007fffffffff0UL
@@ -91,7 +364,7 @@ union uvh_lb_bau_intd_payload_queue_last_u {
91/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL */ 364/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL */
92/* ========================================================================= */ 365/* ========================================================================= */
93#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL 0x320070UL 366#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL 0x320070UL
94#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_32 0x00a00 367#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_32 0x009d0
95 368
96#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_SHFT 4 369#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_SHFT 4
97#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_MASK 0x000007fffffffff0UL 370#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_MASK 0x000007fffffffff0UL
@@ -109,6 +382,7 @@ union uvh_lb_bau_intd_payload_queue_tail_u {
109/* UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE */ 382/* UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE */
110/* ========================================================================= */ 383/* ========================================================================= */
111#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE 0x320080UL 384#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE 0x320080UL
385#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_32 0x0a68
112 386
113#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_SHFT 0 387#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_SHFT 0
114#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_MASK 0x0000000000000001UL 388#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_MASK 0x0000000000000001UL
@@ -169,12 +443,13 @@ union uvh_lb_bau_intd_software_acknowledge_u {
169/* UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS */ 443/* UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS */
170/* ========================================================================= */ 444/* ========================================================================= */
171#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS 0x0000000000320088UL 445#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS 0x0000000000320088UL
446#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS_32 0x0a70
172 447
173/* ========================================================================= */ 448/* ========================================================================= */
174/* UVH_LB_BAU_SB_ACTIVATION_CONTROL */ 449/* UVH_LB_BAU_SB_ACTIVATION_CONTROL */
175/* ========================================================================= */ 450/* ========================================================================= */
176#define UVH_LB_BAU_SB_ACTIVATION_CONTROL 0x320020UL 451#define UVH_LB_BAU_SB_ACTIVATION_CONTROL 0x320020UL
177#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_32 0x009d8 452#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_32 0x009a8
178 453
179#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INDEX_SHFT 0 454#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INDEX_SHFT 0
180#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INDEX_MASK 0x000000000000003fUL 455#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INDEX_MASK 0x000000000000003fUL
@@ -197,7 +472,7 @@ union uvh_lb_bau_sb_activation_control_u {
197/* UVH_LB_BAU_SB_ACTIVATION_STATUS_0 */ 472/* UVH_LB_BAU_SB_ACTIVATION_STATUS_0 */
198/* ========================================================================= */ 473/* ========================================================================= */
199#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0 0x320030UL 474#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0 0x320030UL
200#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_32 0x009e0 475#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_32 0x009b0
201 476
202#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_STATUS_SHFT 0 477#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_STATUS_SHFT 0
203#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_STATUS_MASK 0xffffffffffffffffUL 478#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_STATUS_MASK 0xffffffffffffffffUL
@@ -213,7 +488,7 @@ union uvh_lb_bau_sb_activation_status_0_u {
213/* UVH_LB_BAU_SB_ACTIVATION_STATUS_1 */ 488/* UVH_LB_BAU_SB_ACTIVATION_STATUS_1 */
214/* ========================================================================= */ 489/* ========================================================================= */
215#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1 0x320040UL 490#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1 0x320040UL
216#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_32 0x009e8 491#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_32 0x009b8
217 492
218#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_STATUS_SHFT 0 493#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_STATUS_SHFT 0
219#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_STATUS_MASK 0xffffffffffffffffUL 494#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_STATUS_MASK 0xffffffffffffffffUL
@@ -229,7 +504,7 @@ union uvh_lb_bau_sb_activation_status_1_u {
229/* UVH_LB_BAU_SB_DESCRIPTOR_BASE */ 504/* UVH_LB_BAU_SB_DESCRIPTOR_BASE */
230/* ========================================================================= */ 505/* ========================================================================= */
231#define UVH_LB_BAU_SB_DESCRIPTOR_BASE 0x320010UL 506#define UVH_LB_BAU_SB_DESCRIPTOR_BASE 0x320010UL
232#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_32 0x009d0 507#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_32 0x009a0
233 508
234#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_SHFT 12 509#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_SHFT 12
235#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_MASK 0x000007fffffff000UL 510#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_MASK 0x000007fffffff000UL
@@ -248,6 +523,334 @@ union uvh_lb_bau_sb_descriptor_base_u {
248}; 523};
249 524
250/* ========================================================================= */ 525/* ========================================================================= */
526/* UVH_LB_MCAST_AOERR0_RPT_ENABLE */
527/* ========================================================================= */
528#define UVH_LB_MCAST_AOERR0_RPT_ENABLE 0x50b20UL
529
530#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_OBESE_MSG_SHFT 0
531#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_OBESE_MSG_MASK 0x0000000000000001UL
532#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_DATA_SB_ERR_SHFT 1
533#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_DATA_SB_ERR_MASK 0x0000000000000002UL
534#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_NACK_BUFF_PARITY_SHFT 2
535#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_NACK_BUFF_PARITY_MASK 0x0000000000000004UL
536#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_TIMEOUT_SHFT 3
537#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_TIMEOUT_MASK 0x0000000000000008UL
538#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_INACTIVE_REPLY_SHFT 4
539#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_INACTIVE_REPLY_MASK 0x0000000000000010UL
540#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_UPGRADE_ERROR_SHFT 5
541#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_UPGRADE_ERROR_MASK 0x0000000000000020UL
542#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_REG_COUNT_UNDERFLOW_SHFT 6
543#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_REG_COUNT_UNDERFLOW_MASK 0x0000000000000040UL
544#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_REP_OBESE_MSG_SHFT 7
545#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_REP_OBESE_MSG_MASK 0x0000000000000080UL
546#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REQ_RUNT_MSG_SHFT 8
547#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REQ_RUNT_MSG_MASK 0x0000000000000100UL
548#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REQ_OBESE_MSG_SHFT 9
549#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REQ_OBESE_MSG_MASK 0x0000000000000200UL
550#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REQ_DATA_SB_ERR_SHFT 10
551#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REQ_DATA_SB_ERR_MASK 0x0000000000000400UL
552#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_RUNT_MSG_SHFT 11
553#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_RUNT_MSG_MASK 0x0000000000000800UL
554#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_OBESE_MSG_SHFT 12
555#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_OBESE_MSG_MASK 0x0000000000001000UL
556#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_DATA_SB_ERR_SHFT 13
557#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_DATA_SB_ERR_MASK 0x0000000000002000UL
558#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_COMMAND_ERR_SHFT 14
559#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_COMMAND_ERR_MASK 0x0000000000004000UL
560#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_PEND_TIMEOUT_SHFT 15
561#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_PEND_TIMEOUT_MASK 0x0000000000008000UL
562#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REQ_RUNT_MSG_SHFT 16
563#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REQ_RUNT_MSG_MASK 0x0000000000010000UL
564#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REQ_OBESE_MSG_SHFT 17
565#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REQ_OBESE_MSG_MASK 0x0000000000020000UL
566#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REQ_DATA_SB_ERR_SHFT 18
567#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REQ_DATA_SB_ERR_MASK 0x0000000000040000UL
568#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REP_RUNT_MSG_SHFT 19
569#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REP_RUNT_MSG_MASK 0x0000000000080000UL
570#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REP_OBESE_MSG_SHFT 20
571#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REP_OBESE_MSG_MASK 0x0000000000100000UL
572#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REP_DATA_SB_ERR_SHFT 21
573#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REP_DATA_SB_ERR_MASK 0x0000000000200000UL
574#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_AMO_TIMEOUT_SHFT 22
575#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_AMO_TIMEOUT_MASK 0x0000000000400000UL
576#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_PUT_TIMEOUT_SHFT 23
577#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_PUT_TIMEOUT_MASK 0x0000000000800000UL
578#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_SPURIOUS_EVENT_SHFT 24
579#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_SPURIOUS_EVENT_MASK 0x0000000001000000UL
580#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_IOH_DESTINATION_TABLE_PARITY_SHFT 25
581#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_IOH_DESTINATION_TABLE_PARITY_MASK 0x0000000002000000UL
582#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_GET_HAD_ERROR_REPLY_SHFT 26
583#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_GET_HAD_ERROR_REPLY_MASK 0x0000000004000000UL
584#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_GET_TIMEOUT_SHFT 27
585#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_GET_TIMEOUT_MASK 0x0000000008000000UL
586#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_LOCK_MANAGER_HAD_ERROR_REPLY_SHFT 28
587#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_LOCK_MANAGER_HAD_ERROR_REPLY_MASK 0x0000000010000000UL
588#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_PUT_HAD_ERROR_REPLY_SHFT 29
589#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_PUT_HAD_ERROR_REPLY_MASK 0x0000000020000000UL
590#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_PUT_TIMEOUT_SHFT 30
591#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_PUT_TIMEOUT_MASK 0x0000000040000000UL
592#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_SB_ACTIVATION_OVERRUN_SHFT 31
593#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_SB_ACTIVATION_OVERRUN_MASK 0x0000000080000000UL
594#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_COMPLETED_GB_ACTIVATION_HAD_ERROR_REPLY_SHFT 32
595#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_COMPLETED_GB_ACTIVATION_HAD_ERROR_REPLY_MASK 0x0000000100000000UL
596#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_COMPLETED_GB_ACTIVATION_TIMEOUT_SHFT 33
597#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_COMPLETED_GB_ACTIVATION_TIMEOUT_MASK 0x0000000200000000UL
598#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_DESCRIPTOR_BUFFER_0_PARITY_SHFT 34
599#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_DESCRIPTOR_BUFFER_0_PARITY_MASK 0x0000000400000000UL
600#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_DESCRIPTOR_BUFFER_1_PARITY_SHFT 35
601#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_DESCRIPTOR_BUFFER_1_PARITY_MASK 0x0000000800000000UL
602#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_SOCKET_DESTINATION_TABLE_PARITY_SHFT 36
603#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_SOCKET_DESTINATION_TABLE_PARITY_MASK 0x0000001000000000UL
604#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_BAU_REPLY_PAYLOAD_CORRUPTION_SHFT 37
605#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_BAU_REPLY_PAYLOAD_CORRUPTION_MASK 0x0000002000000000UL
606#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_IO_PORT_DESTINATION_TABLE_PARITY_SHFT 38
607#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_IO_PORT_DESTINATION_TABLE_PARITY_MASK 0x0000004000000000UL
608#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INTD_SOFT_ACK_TIMEOUT_SHFT 39
609#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INTD_SOFT_ACK_TIMEOUT_MASK 0x0000008000000000UL
610#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INT_REP_OBESE_MSG_SHFT 40
611#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INT_REP_OBESE_MSG_MASK 0x0000010000000000UL
612#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INT_REP_COMMAND_ERR_SHFT 41
613#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INT_REP_COMMAND_ERR_MASK 0x0000020000000000UL
614#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INT_TIMEOUT_SHFT 42
615#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INT_TIMEOUT_MASK 0x0000040000000000UL
616
617union uvh_lb_mcast_aoerr0_rpt_enable_u {
618 unsigned long v;
619 struct uvh_lb_mcast_aoerr0_rpt_enable_s {
620 unsigned long mcast_obese_msg : 1; /* RW */
621 unsigned long mcast_data_sb_err : 1; /* RW */
622 unsigned long mcast_nack_buff_parity : 1; /* RW */
623 unsigned long mcast_timeout : 1; /* RW */
624 unsigned long mcast_inactive_reply : 1; /* RW */
625 unsigned long mcast_upgrade_error : 1; /* RW */
626 unsigned long mcast_reg_count_underflow : 1; /* RW */
627 unsigned long mcast_rep_obese_msg : 1; /* RW */
628 unsigned long ucache_req_runt_msg : 1; /* RW */
629 unsigned long ucache_req_obese_msg : 1; /* RW */
630 unsigned long ucache_req_data_sb_err : 1; /* RW */
631 unsigned long ucache_rep_runt_msg : 1; /* RW */
632 unsigned long ucache_rep_obese_msg : 1; /* RW */
633 unsigned long ucache_rep_data_sb_err : 1; /* RW */
634 unsigned long ucache_rep_command_err : 1; /* RW */
635 unsigned long ucache_pend_timeout : 1; /* RW */
636 unsigned long macc_req_runt_msg : 1; /* RW */
637 unsigned long macc_req_obese_msg : 1; /* RW */
638 unsigned long macc_req_data_sb_err : 1; /* RW */
639 unsigned long macc_rep_runt_msg : 1; /* RW */
640 unsigned long macc_rep_obese_msg : 1; /* RW */
641 unsigned long macc_rep_data_sb_err : 1; /* RW */
642 unsigned long macc_amo_timeout : 1; /* RW */
643 unsigned long macc_put_timeout : 1; /* RW */
644 unsigned long macc_spurious_event : 1; /* RW */
645 unsigned long ioh_destination_table_parity : 1; /* RW */
646 unsigned long get_had_error_reply : 1; /* RW */
647 unsigned long get_timeout : 1; /* RW */
648 unsigned long lock_manager_had_error_reply : 1; /* RW */
649 unsigned long put_had_error_reply : 1; /* RW */
650 unsigned long put_timeout : 1; /* RW */
651 unsigned long sb_activation_overrun : 1; /* RW */
652 unsigned long completed_gb_activation_had_error_reply : 1; /* RW */
653 unsigned long completed_gb_activation_timeout : 1; /* RW */
654 unsigned long descriptor_buffer_0_parity : 1; /* RW */
655 unsigned long descriptor_buffer_1_parity : 1; /* RW */
656 unsigned long socket_destination_table_parity : 1; /* RW */
657 unsigned long bau_reply_payload_corruption : 1; /* RW */
658 unsigned long io_port_destination_table_parity : 1; /* RW */
659 unsigned long intd_soft_ack_timeout : 1; /* RW */
660 unsigned long int_rep_obese_msg : 1; /* RW */
661 unsigned long int_rep_command_err : 1; /* RW */
662 unsigned long int_timeout : 1; /* RW */
663 unsigned long rsvd_43_63 : 21; /* */
664 } s;
665};
666
667/* ========================================================================= */
668/* UVH_LOCAL_INT0_CONFIG */
669/* ========================================================================= */
670#define UVH_LOCAL_INT0_CONFIG 0x61000UL
671
672#define UVH_LOCAL_INT0_CONFIG_VECTOR_SHFT 0
673#define UVH_LOCAL_INT0_CONFIG_VECTOR_MASK 0x00000000000000ffUL
674#define UVH_LOCAL_INT0_CONFIG_DM_SHFT 8
675#define UVH_LOCAL_INT0_CONFIG_DM_MASK 0x0000000000000700UL
676#define UVH_LOCAL_INT0_CONFIG_DESTMODE_SHFT 11
677#define UVH_LOCAL_INT0_CONFIG_DESTMODE_MASK 0x0000000000000800UL
678#define UVH_LOCAL_INT0_CONFIG_STATUS_SHFT 12
679#define UVH_LOCAL_INT0_CONFIG_STATUS_MASK 0x0000000000001000UL
680#define UVH_LOCAL_INT0_CONFIG_P_SHFT 13
681#define UVH_LOCAL_INT0_CONFIG_P_MASK 0x0000000000002000UL
682#define UVH_LOCAL_INT0_CONFIG_T_SHFT 15
683#define UVH_LOCAL_INT0_CONFIG_T_MASK 0x0000000000008000UL
684#define UVH_LOCAL_INT0_CONFIG_M_SHFT 16
685#define UVH_LOCAL_INT0_CONFIG_M_MASK 0x0000000000010000UL
686#define UVH_LOCAL_INT0_CONFIG_APIC_ID_SHFT 32
687#define UVH_LOCAL_INT0_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
688
689union uvh_local_int0_config_u {
690 unsigned long v;
691 struct uvh_local_int0_config_s {
692 unsigned long vector_ : 8; /* RW */
693 unsigned long dm : 3; /* RW */
694 unsigned long destmode : 1; /* RW */
695 unsigned long status : 1; /* RO */
696 unsigned long p : 1; /* RO */
697 unsigned long rsvd_14 : 1; /* */
698 unsigned long t : 1; /* RO */
699 unsigned long m : 1; /* RW */
700 unsigned long rsvd_17_31: 15; /* */
701 unsigned long apic_id : 32; /* RW */
702 } s;
703};
704
705/* ========================================================================= */
706/* UVH_LOCAL_INT0_ENABLE */
707/* ========================================================================= */
708#define UVH_LOCAL_INT0_ENABLE 0x65000UL
709
710#define UVH_LOCAL_INT0_ENABLE_LB_HCERR_SHFT 0
711#define UVH_LOCAL_INT0_ENABLE_LB_HCERR_MASK 0x0000000000000001UL
712#define UVH_LOCAL_INT0_ENABLE_GR0_HCERR_SHFT 1
713#define UVH_LOCAL_INT0_ENABLE_GR0_HCERR_MASK 0x0000000000000002UL
714#define UVH_LOCAL_INT0_ENABLE_GR1_HCERR_SHFT 2
715#define UVH_LOCAL_INT0_ENABLE_GR1_HCERR_MASK 0x0000000000000004UL
716#define UVH_LOCAL_INT0_ENABLE_LH_HCERR_SHFT 3
717#define UVH_LOCAL_INT0_ENABLE_LH_HCERR_MASK 0x0000000000000008UL
718#define UVH_LOCAL_INT0_ENABLE_RH_HCERR_SHFT 4
719#define UVH_LOCAL_INT0_ENABLE_RH_HCERR_MASK 0x0000000000000010UL
720#define UVH_LOCAL_INT0_ENABLE_XN_HCERR_SHFT 5
721#define UVH_LOCAL_INT0_ENABLE_XN_HCERR_MASK 0x0000000000000020UL
722#define UVH_LOCAL_INT0_ENABLE_SI_HCERR_SHFT 6
723#define UVH_LOCAL_INT0_ENABLE_SI_HCERR_MASK 0x0000000000000040UL
724#define UVH_LOCAL_INT0_ENABLE_LB_AOERR0_SHFT 7
725#define UVH_LOCAL_INT0_ENABLE_LB_AOERR0_MASK 0x0000000000000080UL
726#define UVH_LOCAL_INT0_ENABLE_GR0_AOERR0_SHFT 8
727#define UVH_LOCAL_INT0_ENABLE_GR0_AOERR0_MASK 0x0000000000000100UL
728#define UVH_LOCAL_INT0_ENABLE_GR1_AOERR0_SHFT 9
729#define UVH_LOCAL_INT0_ENABLE_GR1_AOERR0_MASK 0x0000000000000200UL
730#define UVH_LOCAL_INT0_ENABLE_LH_AOERR0_SHFT 10
731#define UVH_LOCAL_INT0_ENABLE_LH_AOERR0_MASK 0x0000000000000400UL
732#define UVH_LOCAL_INT0_ENABLE_RH_AOERR0_SHFT 11
733#define UVH_LOCAL_INT0_ENABLE_RH_AOERR0_MASK 0x0000000000000800UL
734#define UVH_LOCAL_INT0_ENABLE_XN_AOERR0_SHFT 12
735#define UVH_LOCAL_INT0_ENABLE_XN_AOERR0_MASK 0x0000000000001000UL
736#define UVH_LOCAL_INT0_ENABLE_SI_AOERR0_SHFT 13
737#define UVH_LOCAL_INT0_ENABLE_SI_AOERR0_MASK 0x0000000000002000UL
738#define UVH_LOCAL_INT0_ENABLE_LB_AOERR1_SHFT 14
739#define UVH_LOCAL_INT0_ENABLE_LB_AOERR1_MASK 0x0000000000004000UL
740#define UVH_LOCAL_INT0_ENABLE_GR0_AOERR1_SHFT 15
741#define UVH_LOCAL_INT0_ENABLE_GR0_AOERR1_MASK 0x0000000000008000UL
742#define UVH_LOCAL_INT0_ENABLE_GR1_AOERR1_SHFT 16
743#define UVH_LOCAL_INT0_ENABLE_GR1_AOERR1_MASK 0x0000000000010000UL
744#define UVH_LOCAL_INT0_ENABLE_LH_AOERR1_SHFT 17
745#define UVH_LOCAL_INT0_ENABLE_LH_AOERR1_MASK 0x0000000000020000UL
746#define UVH_LOCAL_INT0_ENABLE_RH_AOERR1_SHFT 18
747#define UVH_LOCAL_INT0_ENABLE_RH_AOERR1_MASK 0x0000000000040000UL
748#define UVH_LOCAL_INT0_ENABLE_XN_AOERR1_SHFT 19
749#define UVH_LOCAL_INT0_ENABLE_XN_AOERR1_MASK 0x0000000000080000UL
750#define UVH_LOCAL_INT0_ENABLE_SI_AOERR1_SHFT 20
751#define UVH_LOCAL_INT0_ENABLE_SI_AOERR1_MASK 0x0000000000100000UL
752#define UVH_LOCAL_INT0_ENABLE_RH_VPI_INT_SHFT 21
753#define UVH_LOCAL_INT0_ENABLE_RH_VPI_INT_MASK 0x0000000000200000UL
754#define UVH_LOCAL_INT0_ENABLE_SYSTEM_SHUTDOWN_INT_SHFT 22
755#define UVH_LOCAL_INT0_ENABLE_SYSTEM_SHUTDOWN_INT_MASK 0x0000000000400000UL
756#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_0_SHFT 23
757#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_0_MASK 0x0000000000800000UL
758#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_1_SHFT 24
759#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_1_MASK 0x0000000001000000UL
760#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_2_SHFT 25
761#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_2_MASK 0x0000000002000000UL
762#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_3_SHFT 26
763#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_3_MASK 0x0000000004000000UL
764#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_4_SHFT 27
765#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_4_MASK 0x0000000008000000UL
766#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_5_SHFT 28
767#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_5_MASK 0x0000000010000000UL
768#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_6_SHFT 29
769#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_6_MASK 0x0000000020000000UL
770#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_7_SHFT 30
771#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_7_MASK 0x0000000040000000UL
772#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_8_SHFT 31
773#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_8_MASK 0x0000000080000000UL
774#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_9_SHFT 32
775#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_9_MASK 0x0000000100000000UL
776#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_10_SHFT 33
777#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_10_MASK 0x0000000200000000UL
778#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_11_SHFT 34
779#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_11_MASK 0x0000000400000000UL
780#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_12_SHFT 35
781#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_12_MASK 0x0000000800000000UL
782#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_13_SHFT 36
783#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_13_MASK 0x0000001000000000UL
784#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_14_SHFT 37
785#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_14_MASK 0x0000002000000000UL
786#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_15_SHFT 38
787#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_15_MASK 0x0000004000000000UL
788#define UVH_LOCAL_INT0_ENABLE_L1_NMI_INT_SHFT 39
789#define UVH_LOCAL_INT0_ENABLE_L1_NMI_INT_MASK 0x0000008000000000UL
790#define UVH_LOCAL_INT0_ENABLE_STOP_CLOCK_SHFT 40
791#define UVH_LOCAL_INT0_ENABLE_STOP_CLOCK_MASK 0x0000010000000000UL
792#define UVH_LOCAL_INT0_ENABLE_ASIC_TO_L1_SHFT 41
793#define UVH_LOCAL_INT0_ENABLE_ASIC_TO_L1_MASK 0x0000020000000000UL
794#define UVH_LOCAL_INT0_ENABLE_L1_TO_ASIC_SHFT 42
795#define UVH_LOCAL_INT0_ENABLE_L1_TO_ASIC_MASK 0x0000040000000000UL
796#define UVH_LOCAL_INT0_ENABLE_LTC_INT_SHFT 43
797#define UVH_LOCAL_INT0_ENABLE_LTC_INT_MASK 0x0000080000000000UL
798#define UVH_LOCAL_INT0_ENABLE_LA_SEQ_TRIGGER_SHFT 44
799#define UVH_LOCAL_INT0_ENABLE_LA_SEQ_TRIGGER_MASK 0x0000100000000000UL
800
801union uvh_local_int0_enable_u {
802 unsigned long v;
803 struct uvh_local_int0_enable_s {
804 unsigned long lb_hcerr : 1; /* RW */
805 unsigned long gr0_hcerr : 1; /* RW */
806 unsigned long gr1_hcerr : 1; /* RW */
807 unsigned long lh_hcerr : 1; /* RW */
808 unsigned long rh_hcerr : 1; /* RW */
809 unsigned long xn_hcerr : 1; /* RW */
810 unsigned long si_hcerr : 1; /* RW */
811 unsigned long lb_aoerr0 : 1; /* RW */
812 unsigned long gr0_aoerr0 : 1; /* RW */
813 unsigned long gr1_aoerr0 : 1; /* RW */
814 unsigned long lh_aoerr0 : 1; /* RW */
815 unsigned long rh_aoerr0 : 1; /* RW */
816 unsigned long xn_aoerr0 : 1; /* RW */
817 unsigned long si_aoerr0 : 1; /* RW */
818 unsigned long lb_aoerr1 : 1; /* RW */
819 unsigned long gr0_aoerr1 : 1; /* RW */
820 unsigned long gr1_aoerr1 : 1; /* RW */
821 unsigned long lh_aoerr1 : 1; /* RW */
822 unsigned long rh_aoerr1 : 1; /* RW */
823 unsigned long xn_aoerr1 : 1; /* RW */
824 unsigned long si_aoerr1 : 1; /* RW */
825 unsigned long rh_vpi_int : 1; /* RW */
826 unsigned long system_shutdown_int : 1; /* RW */
827 unsigned long lb_irq_int_0 : 1; /* RW */
828 unsigned long lb_irq_int_1 : 1; /* RW */
829 unsigned long lb_irq_int_2 : 1; /* RW */
830 unsigned long lb_irq_int_3 : 1; /* RW */
831 unsigned long lb_irq_int_4 : 1; /* RW */
832 unsigned long lb_irq_int_5 : 1; /* RW */
833 unsigned long lb_irq_int_6 : 1; /* RW */
834 unsigned long lb_irq_int_7 : 1; /* RW */
835 unsigned long lb_irq_int_8 : 1; /* RW */
836 unsigned long lb_irq_int_9 : 1; /* RW */
837 unsigned long lb_irq_int_10 : 1; /* RW */
838 unsigned long lb_irq_int_11 : 1; /* RW */
839 unsigned long lb_irq_int_12 : 1; /* RW */
840 unsigned long lb_irq_int_13 : 1; /* RW */
841 unsigned long lb_irq_int_14 : 1; /* RW */
842 unsigned long lb_irq_int_15 : 1; /* RW */
843 unsigned long l1_nmi_int : 1; /* RW */
844 unsigned long stop_clock : 1; /* RW */
845 unsigned long asic_to_l1 : 1; /* RW */
846 unsigned long l1_to_asic : 1; /* RW */
847 unsigned long ltc_int : 1; /* RW */
848 unsigned long la_seq_trigger : 1; /* RW */
849 unsigned long rsvd_45_63 : 19; /* */
850 } s;
851};
852
853/* ========================================================================= */
251/* UVH_NODE_ID */ 854/* UVH_NODE_ID */
252/* ========================================================================= */ 855/* ========================================================================= */
253#define UVH_NODE_ID 0x0UL 856#define UVH_NODE_ID 0x0UL
@@ -284,14 +887,101 @@ union uvh_node_id_u {
284}; 887};
285 888
286/* ========================================================================= */ 889/* ========================================================================= */
890/* UVH_NODE_PRESENT_TABLE */
891/* ========================================================================= */
892#define UVH_NODE_PRESENT_TABLE 0x1400UL
893#define UVH_NODE_PRESENT_TABLE_DEPTH 16
894
895#define UVH_NODE_PRESENT_TABLE_NODES_SHFT 0
896#define UVH_NODE_PRESENT_TABLE_NODES_MASK 0xffffffffffffffffUL
897
898union uvh_node_present_table_u {
899 unsigned long v;
900 struct uvh_node_present_table_s {
901 unsigned long nodes : 64; /* RW */
902 } s;
903};
904
905/* ========================================================================= */
906/* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR */
907/* ========================================================================= */
908#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR 0x16000d0UL
909
910#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT 24
911#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_MASK 0x00003fffff000000UL
912
913union uvh_rh_gam_alias210_redirect_config_0_mmr_u {
914 unsigned long v;
915 struct uvh_rh_gam_alias210_redirect_config_0_mmr_s {
916 unsigned long rsvd_0_23 : 24; /* */
917 unsigned long dest_base : 22; /* RW */
918 unsigned long rsvd_46_63: 18; /* */
919 } s;
920};
921
922/* ========================================================================= */
923/* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR */
924/* ========================================================================= */
925#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR 0x16000e0UL
926
927#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_SHFT 24
928#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_MASK 0x00003fffff000000UL
929
930union uvh_rh_gam_alias210_redirect_config_1_mmr_u {
931 unsigned long v;
932 struct uvh_rh_gam_alias210_redirect_config_1_mmr_s {
933 unsigned long rsvd_0_23 : 24; /* */
934 unsigned long dest_base : 22; /* RW */
935 unsigned long rsvd_46_63: 18; /* */
936 } s;
937};
938
939/* ========================================================================= */
940/* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR */
941/* ========================================================================= */
942#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR 0x16000f0UL
943
944#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_SHFT 24
945#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_MASK 0x00003fffff000000UL
946
947union uvh_rh_gam_alias210_redirect_config_2_mmr_u {
948 unsigned long v;
949 struct uvh_rh_gam_alias210_redirect_config_2_mmr_s {
950 unsigned long rsvd_0_23 : 24; /* */
951 unsigned long dest_base : 22; /* RW */
952 unsigned long rsvd_46_63: 18; /* */
953 } s;
954};
955
956/* ========================================================================= */
957/* UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR */
958/* ========================================================================= */
959#define UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR 0x1600020UL
960
961#define UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR_BASE_SHFT 26
962#define UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL
963#define UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
964#define UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
965
966union uvh_rh_gam_cfg_overlay_config_mmr_u {
967 unsigned long v;
968 struct uvh_rh_gam_cfg_overlay_config_mmr_s {
969 unsigned long rsvd_0_25: 26; /* */
970 unsigned long base : 20; /* RW */
971 unsigned long rsvd_46_62: 17; /* */
972 unsigned long enable : 1; /* RW */
973 } s;
974};
975
976/* ========================================================================= */
287/* UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR */ 977/* UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR */
288/* ========================================================================= */ 978/* ========================================================================= */
289#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL 979#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL
290 980
291#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28 981#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28
292#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff0000000UL 982#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff0000000UL
293#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_GR4_SHFT 46 983#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_GR4_SHFT 48
294#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_GR4_MASK 0x0000400000000000UL 984#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_GR4_MASK 0x0001000000000000UL
295#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52 985#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52
296#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL 986#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL
297#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63 987#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
@@ -302,8 +992,9 @@ union uvh_rh_gam_gru_overlay_config_mmr_u {
302 struct uvh_rh_gam_gru_overlay_config_mmr_s { 992 struct uvh_rh_gam_gru_overlay_config_mmr_s {
303 unsigned long rsvd_0_27: 28; /* */ 993 unsigned long rsvd_0_27: 28; /* */
304 unsigned long base : 18; /* RW */ 994 unsigned long base : 18; /* RW */
995 unsigned long rsvd_46_47: 2; /* */
305 unsigned long gr4 : 1; /* RW */ 996 unsigned long gr4 : 1; /* RW */
306 unsigned long rsvd_47_51: 5; /* */ 997 unsigned long rsvd_49_51: 3; /* */
307 unsigned long n_gru : 4; /* RW */ 998 unsigned long n_gru : 4; /* RW */
308 unsigned long rsvd_56_62: 7; /* */ 999 unsigned long rsvd_56_62: 7; /* */
309 unsigned long enable : 1; /* RW */ 1000 unsigned long enable : 1; /* RW */
@@ -311,6 +1002,32 @@ union uvh_rh_gam_gru_overlay_config_mmr_u {
311}; 1002};
312 1003
313/* ========================================================================= */ 1004/* ========================================================================= */
1005/* UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR */
1006/* ========================================================================= */
1007#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR 0x1600030UL
1008
1009#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT 30
1010#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003fffc0000000UL
1011#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_M_IO_SHFT 46
1012#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_M_IO_MASK 0x000fc00000000000UL
1013#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_N_IO_SHFT 52
1014#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_N_IO_MASK 0x00f0000000000000UL
1015#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
1016#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
1017
1018union uvh_rh_gam_mmioh_overlay_config_mmr_u {
1019 unsigned long v;
1020 struct uvh_rh_gam_mmioh_overlay_config_mmr_s {
1021 unsigned long rsvd_0_29: 30; /* */
1022 unsigned long base : 16; /* RW */
1023 unsigned long m_io : 6; /* RW */
1024 unsigned long n_io : 4; /* RW */
1025 unsigned long rsvd_56_62: 7; /* */
1026 unsigned long enable : 1; /* RW */
1027 } s;
1028};
1029
1030/* ========================================================================= */
314/* UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR */ 1031/* UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR */
315/* ========================================================================= */ 1032/* ========================================================================= */
316#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR 0x1600028UL 1033#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR 0x1600028UL
@@ -336,7 +1053,7 @@ union uvh_rh_gam_mmr_overlay_config_mmr_u {
336/* ========================================================================= */ 1053/* ========================================================================= */
337/* UVH_RTC */ 1054/* UVH_RTC */
338/* ========================================================================= */ 1055/* ========================================================================= */
339#define UVH_RTC 0x28000UL 1056#define UVH_RTC 0x340000UL
340 1057
341#define UVH_RTC_REAL_TIME_CLOCK_SHFT 0 1058#define UVH_RTC_REAL_TIME_CLOCK_SHFT 0
342#define UVH_RTC_REAL_TIME_CLOCK_MASK 0x00ffffffffffffffUL 1059#define UVH_RTC_REAL_TIME_CLOCK_MASK 0x00ffffffffffffffUL
@@ -350,6 +1067,139 @@ union uvh_rtc_u {
350}; 1067};
351 1068
352/* ========================================================================= */ 1069/* ========================================================================= */
1070/* UVH_RTC1_INT_CONFIG */
1071/* ========================================================================= */
1072#define UVH_RTC1_INT_CONFIG 0x615c0UL
1073
1074#define UVH_RTC1_INT_CONFIG_VECTOR_SHFT 0
1075#define UVH_RTC1_INT_CONFIG_VECTOR_MASK 0x00000000000000ffUL
1076#define UVH_RTC1_INT_CONFIG_DM_SHFT 8
1077#define UVH_RTC1_INT_CONFIG_DM_MASK 0x0000000000000700UL
1078#define UVH_RTC1_INT_CONFIG_DESTMODE_SHFT 11
1079#define UVH_RTC1_INT_CONFIG_DESTMODE_MASK 0x0000000000000800UL
1080#define UVH_RTC1_INT_CONFIG_STATUS_SHFT 12
1081#define UVH_RTC1_INT_CONFIG_STATUS_MASK 0x0000000000001000UL
1082#define UVH_RTC1_INT_CONFIG_P_SHFT 13
1083#define UVH_RTC1_INT_CONFIG_P_MASK 0x0000000000002000UL
1084#define UVH_RTC1_INT_CONFIG_T_SHFT 15
1085#define UVH_RTC1_INT_CONFIG_T_MASK 0x0000000000008000UL
1086#define UVH_RTC1_INT_CONFIG_M_SHFT 16
1087#define UVH_RTC1_INT_CONFIG_M_MASK 0x0000000000010000UL
1088#define UVH_RTC1_INT_CONFIG_APIC_ID_SHFT 32
1089#define UVH_RTC1_INT_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
1090
1091union uvh_rtc1_int_config_u {
1092 unsigned long v;
1093 struct uvh_rtc1_int_config_s {
1094 unsigned long vector_ : 8; /* RW */
1095 unsigned long dm : 3; /* RW */
1096 unsigned long destmode : 1; /* RW */
1097 unsigned long status : 1; /* RO */
1098 unsigned long p : 1; /* RO */
1099 unsigned long rsvd_14 : 1; /* */
1100 unsigned long t : 1; /* RO */
1101 unsigned long m : 1; /* RW */
1102 unsigned long rsvd_17_31: 15; /* */
1103 unsigned long apic_id : 32; /* RW */
1104 } s;
1105};
1106
1107/* ========================================================================= */
1108/* UVH_RTC2_INT_CONFIG */
1109/* ========================================================================= */
1110#define UVH_RTC2_INT_CONFIG 0x61600UL
1111
1112#define UVH_RTC2_INT_CONFIG_VECTOR_SHFT 0
1113#define UVH_RTC2_INT_CONFIG_VECTOR_MASK 0x00000000000000ffUL
1114#define UVH_RTC2_INT_CONFIG_DM_SHFT 8
1115#define UVH_RTC2_INT_CONFIG_DM_MASK 0x0000000000000700UL
1116#define UVH_RTC2_INT_CONFIG_DESTMODE_SHFT 11
1117#define UVH_RTC2_INT_CONFIG_DESTMODE_MASK 0x0000000000000800UL
1118#define UVH_RTC2_INT_CONFIG_STATUS_SHFT 12
1119#define UVH_RTC2_INT_CONFIG_STATUS_MASK 0x0000000000001000UL
1120#define UVH_RTC2_INT_CONFIG_P_SHFT 13
1121#define UVH_RTC2_INT_CONFIG_P_MASK 0x0000000000002000UL
1122#define UVH_RTC2_INT_CONFIG_T_SHFT 15
1123#define UVH_RTC2_INT_CONFIG_T_MASK 0x0000000000008000UL
1124#define UVH_RTC2_INT_CONFIG_M_SHFT 16
1125#define UVH_RTC2_INT_CONFIG_M_MASK 0x0000000000010000UL
1126#define UVH_RTC2_INT_CONFIG_APIC_ID_SHFT 32
1127#define UVH_RTC2_INT_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
1128
1129union uvh_rtc2_int_config_u {
1130 unsigned long v;
1131 struct uvh_rtc2_int_config_s {
1132 unsigned long vector_ : 8; /* RW */
1133 unsigned long dm : 3; /* RW */
1134 unsigned long destmode : 1; /* RW */
1135 unsigned long status : 1; /* RO */
1136 unsigned long p : 1; /* RO */
1137 unsigned long rsvd_14 : 1; /* */
1138 unsigned long t : 1; /* RO */
1139 unsigned long m : 1; /* RW */
1140 unsigned long rsvd_17_31: 15; /* */
1141 unsigned long apic_id : 32; /* RW */
1142 } s;
1143};
1144
1145/* ========================================================================= */
1146/* UVH_RTC3_INT_CONFIG */
1147/* ========================================================================= */
1148#define UVH_RTC3_INT_CONFIG 0x61640UL
1149
1150#define UVH_RTC3_INT_CONFIG_VECTOR_SHFT 0
1151#define UVH_RTC3_INT_CONFIG_VECTOR_MASK 0x00000000000000ffUL
1152#define UVH_RTC3_INT_CONFIG_DM_SHFT 8
1153#define UVH_RTC3_INT_CONFIG_DM_MASK 0x0000000000000700UL
1154#define UVH_RTC3_INT_CONFIG_DESTMODE_SHFT 11
1155#define UVH_RTC3_INT_CONFIG_DESTMODE_MASK 0x0000000000000800UL
1156#define UVH_RTC3_INT_CONFIG_STATUS_SHFT 12
1157#define UVH_RTC3_INT_CONFIG_STATUS_MASK 0x0000000000001000UL
1158#define UVH_RTC3_INT_CONFIG_P_SHFT 13
1159#define UVH_RTC3_INT_CONFIG_P_MASK 0x0000000000002000UL
1160#define UVH_RTC3_INT_CONFIG_T_SHFT 15
1161#define UVH_RTC3_INT_CONFIG_T_MASK 0x0000000000008000UL
1162#define UVH_RTC3_INT_CONFIG_M_SHFT 16
1163#define UVH_RTC3_INT_CONFIG_M_MASK 0x0000000000010000UL
1164#define UVH_RTC3_INT_CONFIG_APIC_ID_SHFT 32
1165#define UVH_RTC3_INT_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
1166
1167union uvh_rtc3_int_config_u {
1168 unsigned long v;
1169 struct uvh_rtc3_int_config_s {
1170 unsigned long vector_ : 8; /* RW */
1171 unsigned long dm : 3; /* RW */
1172 unsigned long destmode : 1; /* RW */
1173 unsigned long status : 1; /* RO */
1174 unsigned long p : 1; /* RO */
1175 unsigned long rsvd_14 : 1; /* */
1176 unsigned long t : 1; /* RO */
1177 unsigned long m : 1; /* RW */
1178 unsigned long rsvd_17_31: 15; /* */
1179 unsigned long apic_id : 32; /* RW */
1180 } s;
1181};
1182
1183/* ========================================================================= */
1184/* UVH_RTC_INC_RATIO */
1185/* ========================================================================= */
1186#define UVH_RTC_INC_RATIO 0x350000UL
1187
1188#define UVH_RTC_INC_RATIO_FRACTION_SHFT 0
1189#define UVH_RTC_INC_RATIO_FRACTION_MASK 0x00000000000fffffUL
1190#define UVH_RTC_INC_RATIO_RATIO_SHFT 20
1191#define UVH_RTC_INC_RATIO_RATIO_MASK 0x0000000000700000UL
1192
1193union uvh_rtc_inc_ratio_u {
1194 unsigned long v;
1195 struct uvh_rtc_inc_ratio_s {
1196 unsigned long fraction : 20; /* RW */
1197 unsigned long ratio : 3; /* RW */
1198 unsigned long rsvd_23_63: 41; /* */
1199 } s;
1200};
1201
1202/* ========================================================================= */
353/* UVH_SI_ADDR_MAP_CONFIG */ 1203/* UVH_SI_ADDR_MAP_CONFIG */
354/* ========================================================================= */ 1204/* ========================================================================= */
355#define UVH_SI_ADDR_MAP_CONFIG 0xc80000UL 1205#define UVH_SI_ADDR_MAP_CONFIG 0xc80000UL
@@ -369,5 +1219,77 @@ union uvh_si_addr_map_config_u {
369 } s; 1219 } s;
370}; 1220};
371 1221
1222/* ========================================================================= */
1223/* UVH_SI_ALIAS0_OVERLAY_CONFIG */
1224/* ========================================================================= */
1225#define UVH_SI_ALIAS0_OVERLAY_CONFIG 0xc80008UL
1226
1227#define UVH_SI_ALIAS0_OVERLAY_CONFIG_BASE_SHFT 24
1228#define UVH_SI_ALIAS0_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL
1229#define UVH_SI_ALIAS0_OVERLAY_CONFIG_M_ALIAS_SHFT 48
1230#define UVH_SI_ALIAS0_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL
1231#define UVH_SI_ALIAS0_OVERLAY_CONFIG_ENABLE_SHFT 63
1232#define UVH_SI_ALIAS0_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL
1233
1234union uvh_si_alias0_overlay_config_u {
1235 unsigned long v;
1236 struct uvh_si_alias0_overlay_config_s {
1237 unsigned long rsvd_0_23: 24; /* */
1238 unsigned long base : 8; /* RW */
1239 unsigned long rsvd_32_47: 16; /* */
1240 unsigned long m_alias : 5; /* RW */
1241 unsigned long rsvd_53_62: 10; /* */
1242 unsigned long enable : 1; /* RW */
1243 } s;
1244};
1245
1246/* ========================================================================= */
1247/* UVH_SI_ALIAS1_OVERLAY_CONFIG */
1248/* ========================================================================= */
1249#define UVH_SI_ALIAS1_OVERLAY_CONFIG 0xc80010UL
1250
1251#define UVH_SI_ALIAS1_OVERLAY_CONFIG_BASE_SHFT 24
1252#define UVH_SI_ALIAS1_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL
1253#define UVH_SI_ALIAS1_OVERLAY_CONFIG_M_ALIAS_SHFT 48
1254#define UVH_SI_ALIAS1_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL
1255#define UVH_SI_ALIAS1_OVERLAY_CONFIG_ENABLE_SHFT 63
1256#define UVH_SI_ALIAS1_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL
1257
1258union uvh_si_alias1_overlay_config_u {
1259 unsigned long v;
1260 struct uvh_si_alias1_overlay_config_s {
1261 unsigned long rsvd_0_23: 24; /* */
1262 unsigned long base : 8; /* RW */
1263 unsigned long rsvd_32_47: 16; /* */
1264 unsigned long m_alias : 5; /* RW */
1265 unsigned long rsvd_53_62: 10; /* */
1266 unsigned long enable : 1; /* RW */
1267 } s;
1268};
1269
1270/* ========================================================================= */
1271/* UVH_SI_ALIAS2_OVERLAY_CONFIG */
1272/* ========================================================================= */
1273#define UVH_SI_ALIAS2_OVERLAY_CONFIG 0xc80018UL
1274
1275#define UVH_SI_ALIAS2_OVERLAY_CONFIG_BASE_SHFT 24
1276#define UVH_SI_ALIAS2_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL
1277#define UVH_SI_ALIAS2_OVERLAY_CONFIG_M_ALIAS_SHFT 48
1278#define UVH_SI_ALIAS2_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL
1279#define UVH_SI_ALIAS2_OVERLAY_CONFIG_ENABLE_SHFT 63
1280#define UVH_SI_ALIAS2_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL
1281
1282union uvh_si_alias2_overlay_config_u {
1283 unsigned long v;
1284 struct uvh_si_alias2_overlay_config_s {
1285 unsigned long rsvd_0_23: 24; /* */
1286 unsigned long base : 8; /* RW */
1287 unsigned long rsvd_32_47: 16; /* */
1288 unsigned long m_alias : 5; /* RW */
1289 unsigned long rsvd_53_62: 10; /* */
1290 unsigned long enable : 1; /* RW */
1291 } s;
1292};
1293
372 1294
373#endif /* __ASM_X86_UV_MMRS__ */ 1295#endif /* ASM_X86__UV__UV_MMRS_H */
diff --git a/include/asm-x86/vdso.h b/include/asm-x86/vdso.h
index 86e085e003d2..4ab320913ea3 100644
--- a/include/asm-x86/vdso.h
+++ b/include/asm-x86/vdso.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_VDSO_H 1#ifndef ASM_X86__VDSO_H
2#define _ASM_X86_VDSO_H 1 2#define ASM_X86__VDSO_H
3 3
4#ifdef CONFIG_X86_64 4#ifdef CONFIG_X86_64
5extern const char VDSO64_PRELINK[]; 5extern const char VDSO64_PRELINK[];
@@ -36,4 +36,12 @@ extern const char VDSO32_PRELINK[];
36extern void __user __kernel_sigreturn; 36extern void __user __kernel_sigreturn;
37extern void __user __kernel_rt_sigreturn; 37extern void __user __kernel_rt_sigreturn;
38 38
39#endif /* asm-x86/vdso.h */ 39/*
40 * These symbols are defined by vdso32.S to mark the bounds
41 * of the ELF DSO images included therein.
42 */
43extern const char vdso32_int80_start, vdso32_int80_end;
44extern const char vdso32_syscall_start, vdso32_syscall_end;
45extern const char vdso32_sysenter_start, vdso32_sysenter_end;
46
47#endif /* ASM_X86__VDSO_H */
diff --git a/include/asm-x86/vga.h b/include/asm-x86/vga.h
index 0ccf804377e6..b9e493d07d07 100644
--- a/include/asm-x86/vga.h
+++ b/include/asm-x86/vga.h
@@ -4,8 +4,8 @@
4 * (c) 1998 Martin Mares <mj@ucw.cz> 4 * (c) 1998 Martin Mares <mj@ucw.cz>
5 */ 5 */
6 6
7#ifndef _LINUX_ASM_VGA_H_ 7#ifndef ASM_X86__VGA_H
8#define _LINUX_ASM_VGA_H_ 8#define ASM_X86__VGA_H
9 9
10/* 10/*
11 * On the PC, we can just recalculate addresses and then 11 * On the PC, we can just recalculate addresses and then
@@ -17,4 +17,4 @@
17#define vga_readb(x) (*(x)) 17#define vga_readb(x) (*(x))
18#define vga_writeb(x, y) (*(y) = (x)) 18#define vga_writeb(x, y) (*(y) = (x))
19 19
20#endif 20#endif /* ASM_X86__VGA_H */
diff --git a/include/asm-x86/vgtod.h b/include/asm-x86/vgtod.h
index 3301f0929342..38fd13364021 100644
--- a/include/asm-x86/vgtod.h
+++ b/include/asm-x86/vgtod.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_VGTOD_H 1#ifndef ASM_X86__VGTOD_H
2#define _ASM_VGTOD_H 1 2#define ASM_X86__VGTOD_H
3 3
4#include <asm/vsyscall.h> 4#include <asm/vsyscall.h>
5#include <linux/clocksource.h> 5#include <linux/clocksource.h>
@@ -26,4 +26,4 @@ extern struct vsyscall_gtod_data __vsyscall_gtod_data
26__section_vsyscall_gtod_data; 26__section_vsyscall_gtod_data;
27extern struct vsyscall_gtod_data vsyscall_gtod_data; 27extern struct vsyscall_gtod_data vsyscall_gtod_data;
28 28
29#endif 29#endif /* ASM_X86__VGTOD_H */
diff --git a/include/asm-x86/mach-visws/cobalt.h b/include/asm-x86/visws/cobalt.h
index 995258831b7f..9627a8fe84e9 100644
--- a/include/asm-x86/mach-visws/cobalt.h
+++ b/include/asm-x86/visws/cobalt.h
@@ -1,5 +1,5 @@
1#ifndef __I386_SGI_COBALT_H 1#ifndef ASM_X86__VISWS__COBALT_H
2#define __I386_SGI_COBALT_H 2#define ASM_X86__VISWS__COBALT_H
3 3
4#include <asm/fixmap.h> 4#include <asm/fixmap.h>
5 5
@@ -122,4 +122,4 @@ extern char visws_board_type;
122 122
123extern char visws_board_rev; 123extern char visws_board_rev;
124 124
125#endif /* __I386_SGI_COBALT_H */ 125#endif /* ASM_X86__VISWS__COBALT_H */
diff --git a/include/asm-x86/mach-visws/lithium.h b/include/asm-x86/visws/lithium.h
index dfcd4f07ab85..b36d3b378c63 100644
--- a/include/asm-x86/mach-visws/lithium.h
+++ b/include/asm-x86/visws/lithium.h
@@ -1,5 +1,5 @@
1#ifndef __I386_SGI_LITHIUM_H 1#ifndef ASM_X86__VISWS__LITHIUM_H
2#define __I386_SGI_LITHIUM_H 2#define ASM_X86__VISWS__LITHIUM_H
3 3
4#include <asm/fixmap.h> 4#include <asm/fixmap.h>
5 5
@@ -49,5 +49,5 @@ static inline unsigned short li_pcib_read16(unsigned long reg)
49 return *((volatile unsigned short *)(LI_PCIB_VADDR+reg)); 49 return *((volatile unsigned short *)(LI_PCIB_VADDR+reg));
50} 50}
51 51
52#endif 52#endif /* ASM_X86__VISWS__LITHIUM_H */
53 53
diff --git a/include/asm-x86/mach-visws/piix4.h b/include/asm-x86/visws/piix4.h
index 83ea4f46e419..61c938045ec9 100644
--- a/include/asm-x86/mach-visws/piix4.h
+++ b/include/asm-x86/visws/piix4.h
@@ -1,5 +1,5 @@
1#ifndef __I386_SGI_PIIX_H 1#ifndef ASM_X86__VISWS__PIIX4_H
2#define __I386_SGI_PIIX_H 2#define ASM_X86__VISWS__PIIX4_H
3 3
4/* 4/*
5 * PIIX4 as used on SGI Visual Workstations 5 * PIIX4 as used on SGI Visual Workstations
@@ -104,4 +104,4 @@
104 */ 104 */
105#define PIIX_GPI_STPCLK 0x4 // STPCLK signal routed back in 105#define PIIX_GPI_STPCLK 0x4 // STPCLK signal routed back in
106 106
107#endif 107#endif /* ASM_X86__VISWS__PIIX4_H */
diff --git a/include/asm-x86/visws/sgivw.h b/include/asm-x86/visws/sgivw.h
new file mode 100644
index 000000000000..5fbf63e1003c
--- /dev/null
+++ b/include/asm-x86/visws/sgivw.h
@@ -0,0 +1,5 @@
1/*
2 * Frame buffer position and size:
3 */
4extern unsigned long sgivwfb_mem_phys;
5extern unsigned long sgivwfb_mem_size;
diff --git a/include/asm-x86/vm86.h b/include/asm-x86/vm86.h
index 074b357146df..998bd18eb737 100644
--- a/include/asm-x86/vm86.h
+++ b/include/asm-x86/vm86.h
@@ -1,5 +1,5 @@
1#ifndef _LINUX_VM86_H 1#ifndef ASM_X86__VM86_H
2#define _LINUX_VM86_H 2#define ASM_X86__VM86_H
3 3
4/* 4/*
5 * I'm guessing at the VIF/VIP flag usage, but hope that this is how 5 * I'm guessing at the VIF/VIP flag usage, but hope that this is how
@@ -14,12 +14,6 @@
14 14
15#include <asm/processor-flags.h> 15#include <asm/processor-flags.h>
16 16
17#ifdef CONFIG_VM86
18#define X86_VM_MASK X86_EFLAGS_VM
19#else
20#define X86_VM_MASK 0 /* No VM86 support */
21#endif
22
23#define BIOSSEG 0x0f000 17#define BIOSSEG 0x0f000
24 18
25#define CPU_086 0 19#define CPU_086 0
@@ -121,7 +115,6 @@ struct vm86plus_info_struct {
121 unsigned long is_vm86pus:1; /* for vm86 internal use */ 115 unsigned long is_vm86pus:1; /* for vm86 internal use */
122 unsigned char vm86dbg_intxxtab[32]; /* for debugger */ 116 unsigned char vm86dbg_intxxtab[32]; /* for debugger */
123}; 117};
124
125struct vm86plus_struct { 118struct vm86plus_struct {
126 struct vm86_regs regs; 119 struct vm86_regs regs;
127 unsigned long flags; 120 unsigned long flags;
@@ -133,6 +126,9 @@ struct vm86plus_struct {
133}; 126};
134 127
135#ifdef __KERNEL__ 128#ifdef __KERNEL__
129
130#include <asm/ptrace.h>
131
136/* 132/*
137 * This is the (kernel) stack-layout when we have done a "SAVE_ALL" from vm86 133 * This is the (kernel) stack-layout when we have done a "SAVE_ALL" from vm86
138 * mode - the main change is that the old segment descriptors aren't 134 * mode - the main change is that the old segment descriptors aren't
@@ -141,7 +137,6 @@ struct vm86plus_struct {
141 * at the end of the structure. Look at ptrace.h to see the "normal" 137 * at the end of the structure. Look at ptrace.h to see the "normal"
142 * setup. For user space layout see 'struct vm86_regs' above. 138 * setup. For user space layout see 'struct vm86_regs' above.
143 */ 139 */
144#include <asm/ptrace.h>
145 140
146struct kernel_vm86_regs { 141struct kernel_vm86_regs {
147/* 142/*
@@ -210,4 +205,4 @@ static inline int handle_vm86_trap(struct kernel_vm86_regs *a, long b, int c)
210 205
211#endif /* __KERNEL__ */ 206#endif /* __KERNEL__ */
212 207
213#endif 208#endif /* ASM_X86__VM86_H */
diff --git a/include/asm-x86/vmi_time.h b/include/asm-x86/vmi_time.h
index 478188130328..b2d39e6a08b7 100644
--- a/include/asm-x86/vmi_time.h
+++ b/include/asm-x86/vmi_time.h
@@ -22,8 +22,8 @@
22 * 22 *
23 */ 23 */
24 24
25#ifndef __VMI_TIME_H 25#ifndef ASM_X86__VMI_TIME_H
26#define __VMI_TIME_H 26#define ASM_X86__VMI_TIME_H
27 27
28/* 28/*
29 * Raw VMI call indices for timer functions 29 * Raw VMI call indices for timer functions
@@ -50,7 +50,7 @@ extern void __init vmi_time_init(void);
50extern unsigned long vmi_get_wallclock(void); 50extern unsigned long vmi_get_wallclock(void);
51extern int vmi_set_wallclock(unsigned long now); 51extern int vmi_set_wallclock(unsigned long now);
52extern unsigned long long vmi_sched_clock(void); 52extern unsigned long long vmi_sched_clock(void);
53extern unsigned long vmi_cpu_khz(void); 53extern unsigned long vmi_tsc_khz(void);
54 54
55#ifdef CONFIG_X86_LOCAL_APIC 55#ifdef CONFIG_X86_LOCAL_APIC
56extern void __devinit vmi_time_bsp_init(void); 56extern void __devinit vmi_time_bsp_init(void);
@@ -95,4 +95,4 @@ extern void __devinit vmi_time_ap_init(void);
95 95
96#define CONFIG_VMI_ALARM_HZ 100 96#define CONFIG_VMI_ALARM_HZ 100
97 97
98#endif 98#endif /* ASM_X86__VMI_TIME_H */
diff --git a/include/asm-x86/vsyscall.h b/include/asm-x86/vsyscall.h
index 17b3700949bf..dcd4682413de 100644
--- a/include/asm-x86/vsyscall.h
+++ b/include/asm-x86/vsyscall.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_64_VSYSCALL_H_ 1#ifndef ASM_X86__VSYSCALL_H
2#define _ASM_X86_64_VSYSCALL_H_ 2#define ASM_X86__VSYSCALL_H
3 3
4enum vsyscall_num { 4enum vsyscall_num {
5 __NR_vgettimeofday, 5 __NR_vgettimeofday,
@@ -24,7 +24,8 @@ enum vsyscall_num {
24 ((unused, __section__ (".vsyscall_gtod_data"),aligned(16))) 24 ((unused, __section__ (".vsyscall_gtod_data"),aligned(16)))
25#define __section_vsyscall_clock __attribute__ \ 25#define __section_vsyscall_clock __attribute__ \
26 ((unused, __section__ (".vsyscall_clock"),aligned(16))) 26 ((unused, __section__ (".vsyscall_clock"),aligned(16)))
27#define __vsyscall_fn __attribute__ ((unused,__section__(".vsyscall_fn"))) 27#define __vsyscall_fn \
28 __attribute__ ((unused, __section__(".vsyscall_fn"))) notrace
28 29
29#define VGETCPU_RDTSCP 1 30#define VGETCPU_RDTSCP 1
30#define VGETCPU_LSL 2 31#define VGETCPU_LSL 2
@@ -40,4 +41,4 @@ extern void map_vsyscall(void);
40 41
41#endif /* __KERNEL__ */ 42#endif /* __KERNEL__ */
42 43
43#endif /* _ASM_X86_64_VSYSCALL_H_ */ 44#endif /* ASM_X86__VSYSCALL_H */
diff --git a/include/asm-x86/xcr.h b/include/asm-x86/xcr.h
new file mode 100644
index 000000000000..f2cba4e79a23
--- /dev/null
+++ b/include/asm-x86/xcr.h
@@ -0,0 +1,49 @@
1/* -*- linux-c -*- ------------------------------------------------------- *
2 *
3 * Copyright 2008 rPath, Inc. - All Rights Reserved
4 *
5 * This file is part of the Linux kernel, and is made available under
6 * the terms of the GNU General Public License version 2 or (at your
7 * option) any later version; incorporated herein by reference.
8 *
9 * ----------------------------------------------------------------------- */
10
11/*
12 * asm-x86/xcr.h
13 *
14 * Definitions for the eXtended Control Register instructions
15 */
16
17#ifndef _ASM_X86_XCR_H
18#define _ASM_X86_XCR_H
19
20#define XCR_XFEATURE_ENABLED_MASK 0x00000000
21
22#ifdef __KERNEL__
23# ifndef __ASSEMBLY__
24
25#include <linux/types.h>
26
27static inline u64 xgetbv(u32 index)
28{
29 u32 eax, edx;
30
31 asm volatile(".byte 0x0f,0x01,0xd0" /* xgetbv */
32 : "=a" (eax), "=d" (edx)
33 : "c" (index));
34 return eax + ((u64)edx << 32);
35}
36
37static inline void xsetbv(u32 index, u64 value)
38{
39 u32 eax = value;
40 u32 edx = value >> 32;
41
42 asm volatile(".byte 0x0f,0x01,0xd1" /* xsetbv */
43 : : "a" (eax), "d" (edx), "c" (index));
44}
45
46# endif /* __ASSEMBLY__ */
47#endif /* __KERNEL__ */
48
49#endif /* _ASM_X86_XCR_H */
diff --git a/include/asm-x86/xen/events.h b/include/asm-x86/xen/events.h
index 596312a7bfc9..8151f5b8b6cb 100644
--- a/include/asm-x86/xen/events.h
+++ b/include/asm-x86/xen/events.h
@@ -1,9 +1,11 @@
1#ifndef __XEN_EVENTS_H 1#ifndef ASM_X86__XEN__EVENTS_H
2#define __XEN_EVENTS_H 2#define ASM_X86__XEN__EVENTS_H
3 3
4enum ipi_vector { 4enum ipi_vector {
5 XEN_RESCHEDULE_VECTOR, 5 XEN_RESCHEDULE_VECTOR,
6 XEN_CALL_FUNCTION_VECTOR, 6 XEN_CALL_FUNCTION_VECTOR,
7 XEN_CALL_FUNCTION_SINGLE_VECTOR,
8 XEN_SPIN_UNLOCK_VECTOR,
7 9
8 XEN_NR_IPIS, 10 XEN_NR_IPIS,
9}; 11};
@@ -19,4 +21,4 @@ static inline void xen_do_IRQ(int irq, struct pt_regs *regs)
19 do_IRQ(regs); 21 do_IRQ(regs);
20} 22}
21 23
22#endif /* __XEN_EVENTS_H */ 24#endif /* ASM_X86__XEN__EVENTS_H */
diff --git a/include/asm-x86/xen/grant_table.h b/include/asm-x86/xen/grant_table.h
index 2444d4593a3b..c4baab4d2b68 100644
--- a/include/asm-x86/xen/grant_table.h
+++ b/include/asm-x86/xen/grant_table.h
@@ -1,7 +1,7 @@
1#ifndef __XEN_GRANT_TABLE_H 1#ifndef ASM_X86__XEN__GRANT_TABLE_H
2#define __XEN_GRANT_TABLE_H 2#define ASM_X86__XEN__GRANT_TABLE_H
3 3
4#define xen_alloc_vm_area(size) alloc_vm_area(size) 4#define xen_alloc_vm_area(size) alloc_vm_area(size)
5#define xen_free_vm_area(area) free_vm_area(area) 5#define xen_free_vm_area(area) free_vm_area(area)
6 6
7#endif /* __XEN_GRANT_TABLE_H */ 7#endif /* ASM_X86__XEN__GRANT_TABLE_H */
diff --git a/include/asm-x86/xen/hypercall.h b/include/asm-x86/xen/hypercall.h
index c2ccd997ed35..44f4259bee3f 100644
--- a/include/asm-x86/xen/hypercall.h
+++ b/include/asm-x86/xen/hypercall.h
@@ -30,8 +30,8 @@
30 * IN THE SOFTWARE. 30 * IN THE SOFTWARE.
31 */ 31 */
32 32
33#ifndef __HYPERCALL_H__ 33#ifndef ASM_X86__XEN__HYPERCALL_H
34#define __HYPERCALL_H__ 34#define ASM_X86__XEN__HYPERCALL_H
35 35
36#include <linux/errno.h> 36#include <linux/errno.h>
37#include <linux/string.h> 37#include <linux/string.h>
@@ -40,83 +40,157 @@
40#include <xen/interface/sched.h> 40#include <xen/interface/sched.h>
41#include <xen/interface/physdev.h> 41#include <xen/interface/physdev.h>
42 42
43/*
44 * The hypercall asms have to meet several constraints:
45 * - Work on 32- and 64-bit.
46 * The two architectures put their arguments in different sets of
47 * registers.
48 *
49 * - Work around asm syntax quirks
50 * It isn't possible to specify one of the rNN registers in a
51 * constraint, so we use explicit register variables to get the
52 * args into the right place.
53 *
54 * - Mark all registers as potentially clobbered
55 * Even unused parameters can be clobbered by the hypervisor, so we
56 * need to make sure gcc knows it.
57 *
58 * - Avoid compiler bugs.
59 * This is the tricky part. Because x86_32 has such a constrained
60 * register set, gcc versions below 4.3 have trouble generating
61 * code when all the arg registers and memory are trashed by the
62 * asm. There are syntactically simpler ways of achieving the
63 * semantics below, but they cause the compiler to crash.
64 *
65 * The only combination I found which works is:
66 * - assign the __argX variables first
67 * - list all actually used parameters as "+r" (__argX)
68 * - clobber the rest
69 *
70 * The result certainly isn't pretty, and it really shows up cpp's
71 * weakness as as macro language. Sorry. (But let's just give thanks
72 * there aren't more than 5 arguments...)
73 */
74
43extern struct { char _entry[32]; } hypercall_page[]; 75extern struct { char _entry[32]; } hypercall_page[];
44 76
77#define __HYPERCALL "call hypercall_page+%c[offset]"
78#define __HYPERCALL_ENTRY(x) \
79 [offset] "i" (__HYPERVISOR_##x * sizeof(hypercall_page[0]))
80
81#ifdef CONFIG_X86_32
82#define __HYPERCALL_RETREG "eax"
83#define __HYPERCALL_ARG1REG "ebx"
84#define __HYPERCALL_ARG2REG "ecx"
85#define __HYPERCALL_ARG3REG "edx"
86#define __HYPERCALL_ARG4REG "esi"
87#define __HYPERCALL_ARG5REG "edi"
88#else
89#define __HYPERCALL_RETREG "rax"
90#define __HYPERCALL_ARG1REG "rdi"
91#define __HYPERCALL_ARG2REG "rsi"
92#define __HYPERCALL_ARG3REG "rdx"
93#define __HYPERCALL_ARG4REG "r10"
94#define __HYPERCALL_ARG5REG "r8"
95#endif
96
97#define __HYPERCALL_DECLS \
98 register unsigned long __res asm(__HYPERCALL_RETREG); \
99 register unsigned long __arg1 asm(__HYPERCALL_ARG1REG) = __arg1; \
100 register unsigned long __arg2 asm(__HYPERCALL_ARG2REG) = __arg2; \
101 register unsigned long __arg3 asm(__HYPERCALL_ARG3REG) = __arg3; \
102 register unsigned long __arg4 asm(__HYPERCALL_ARG4REG) = __arg4; \
103 register unsigned long __arg5 asm(__HYPERCALL_ARG5REG) = __arg5;
104
105#define __HYPERCALL_0PARAM "=r" (__res)
106#define __HYPERCALL_1PARAM __HYPERCALL_0PARAM, "+r" (__arg1)
107#define __HYPERCALL_2PARAM __HYPERCALL_1PARAM, "+r" (__arg2)
108#define __HYPERCALL_3PARAM __HYPERCALL_2PARAM, "+r" (__arg3)
109#define __HYPERCALL_4PARAM __HYPERCALL_3PARAM, "+r" (__arg4)
110#define __HYPERCALL_5PARAM __HYPERCALL_4PARAM, "+r" (__arg5)
111
112#define __HYPERCALL_0ARG()
113#define __HYPERCALL_1ARG(a1) \
114 __HYPERCALL_0ARG() __arg1 = (unsigned long)(a1);
115#define __HYPERCALL_2ARG(a1,a2) \
116 __HYPERCALL_1ARG(a1) __arg2 = (unsigned long)(a2);
117#define __HYPERCALL_3ARG(a1,a2,a3) \
118 __HYPERCALL_2ARG(a1,a2) __arg3 = (unsigned long)(a3);
119#define __HYPERCALL_4ARG(a1,a2,a3,a4) \
120 __HYPERCALL_3ARG(a1,a2,a3) __arg4 = (unsigned long)(a4);
121#define __HYPERCALL_5ARG(a1,a2,a3,a4,a5) \
122 __HYPERCALL_4ARG(a1,a2,a3,a4) __arg5 = (unsigned long)(a5);
123
124#define __HYPERCALL_CLOBBER5 "memory"
125#define __HYPERCALL_CLOBBER4 __HYPERCALL_CLOBBER5, __HYPERCALL_ARG5REG
126#define __HYPERCALL_CLOBBER3 __HYPERCALL_CLOBBER4, __HYPERCALL_ARG4REG
127#define __HYPERCALL_CLOBBER2 __HYPERCALL_CLOBBER3, __HYPERCALL_ARG3REG
128#define __HYPERCALL_CLOBBER1 __HYPERCALL_CLOBBER2, __HYPERCALL_ARG2REG
129#define __HYPERCALL_CLOBBER0 __HYPERCALL_CLOBBER1, __HYPERCALL_ARG1REG
130
45#define _hypercall0(type, name) \ 131#define _hypercall0(type, name) \
46({ \ 132({ \
47 long __res; \ 133 __HYPERCALL_DECLS; \
48 asm volatile ( \ 134 __HYPERCALL_0ARG(); \
49 "call %[call]" \ 135 asm volatile (__HYPERCALL \
50 : "=a" (__res) \ 136 : __HYPERCALL_0PARAM \
51 : [call] "m" (hypercall_page[__HYPERVISOR_##name]) \ 137 : __HYPERCALL_ENTRY(name) \
52 : "memory" ); \ 138 : __HYPERCALL_CLOBBER0); \
53 (type)__res; \ 139 (type)__res; \
54}) 140})
55 141
56#define _hypercall1(type, name, a1) \ 142#define _hypercall1(type, name, a1) \
57({ \ 143({ \
58 long __res, __ign1; \ 144 __HYPERCALL_DECLS; \
59 asm volatile ( \ 145 __HYPERCALL_1ARG(a1); \
60 "call %[call]" \ 146 asm volatile (__HYPERCALL \
61 : "=a" (__res), "=b" (__ign1) \ 147 : __HYPERCALL_1PARAM \
62 : "1" ((long)(a1)), \ 148 : __HYPERCALL_ENTRY(name) \
63 [call] "m" (hypercall_page[__HYPERVISOR_##name]) \ 149 : __HYPERCALL_CLOBBER1); \
64 : "memory" ); \
65 (type)__res; \ 150 (type)__res; \
66}) 151})
67 152
68#define _hypercall2(type, name, a1, a2) \ 153#define _hypercall2(type, name, a1, a2) \
69({ \ 154({ \
70 long __res, __ign1, __ign2; \ 155 __HYPERCALL_DECLS; \
71 asm volatile ( \ 156 __HYPERCALL_2ARG(a1, a2); \
72 "call %[call]" \ 157 asm volatile (__HYPERCALL \
73 : "=a" (__res), "=b" (__ign1), "=c" (__ign2) \ 158 : __HYPERCALL_2PARAM \
74 : "1" ((long)(a1)), "2" ((long)(a2)), \ 159 : __HYPERCALL_ENTRY(name) \
75 [call] "m" (hypercall_page[__HYPERVISOR_##name]) \ 160 : __HYPERCALL_CLOBBER2); \
76 : "memory" ); \
77 (type)__res; \ 161 (type)__res; \
78}) 162})
79 163
80#define _hypercall3(type, name, a1, a2, a3) \ 164#define _hypercall3(type, name, a1, a2, a3) \
81({ \ 165({ \
82 long __res, __ign1, __ign2, __ign3; \ 166 __HYPERCALL_DECLS; \
83 asm volatile ( \ 167 __HYPERCALL_3ARG(a1, a2, a3); \
84 "call %[call]" \ 168 asm volatile (__HYPERCALL \
85 : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \ 169 : __HYPERCALL_3PARAM \
86 "=d" (__ign3) \ 170 : __HYPERCALL_ENTRY(name) \
87 : "1" ((long)(a1)), "2" ((long)(a2)), \ 171 : __HYPERCALL_CLOBBER3); \
88 "3" ((long)(a3)), \
89 [call] "m" (hypercall_page[__HYPERVISOR_##name]) \
90 : "memory" ); \
91 (type)__res; \ 172 (type)__res; \
92}) 173})
93 174
94#define _hypercall4(type, name, a1, a2, a3, a4) \ 175#define _hypercall4(type, name, a1, a2, a3, a4) \
95({ \ 176({ \
96 long __res, __ign1, __ign2, __ign3, __ign4; \ 177 __HYPERCALL_DECLS; \
97 asm volatile ( \ 178 __HYPERCALL_4ARG(a1, a2, a3, a4); \
98 "call %[call]" \ 179 asm volatile (__HYPERCALL \
99 : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \ 180 : __HYPERCALL_4PARAM \
100 "=d" (__ign3), "=S" (__ign4) \ 181 : __HYPERCALL_ENTRY(name) \
101 : "1" ((long)(a1)), "2" ((long)(a2)), \ 182 : __HYPERCALL_CLOBBER4); \
102 "3" ((long)(a3)), "4" ((long)(a4)), \
103 [call] "m" (hypercall_page[__HYPERVISOR_##name]) \
104 : "memory" ); \
105 (type)__res; \ 183 (type)__res; \
106}) 184})
107 185
108#define _hypercall5(type, name, a1, a2, a3, a4, a5) \ 186#define _hypercall5(type, name, a1, a2, a3, a4, a5) \
109({ \ 187({ \
110 long __res, __ign1, __ign2, __ign3, __ign4, __ign5; \ 188 __HYPERCALL_DECLS; \
111 asm volatile ( \ 189 __HYPERCALL_5ARG(a1, a2, a3, a4, a5); \
112 "call %[call]" \ 190 asm volatile (__HYPERCALL \
113 : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \ 191 : __HYPERCALL_5PARAM \
114 "=d" (__ign3), "=S" (__ign4), "=D" (__ign5) \ 192 : __HYPERCALL_ENTRY(name) \
115 : "1" ((long)(a1)), "2" ((long)(a2)), \ 193 : __HYPERCALL_CLOBBER5); \
116 "3" ((long)(a3)), "4" ((long)(a4)), \
117 "5" ((long)(a5)), \
118 [call] "m" (hypercall_page[__HYPERVISOR_##name]) \
119 : "memory" ); \
120 (type)__res; \ 194 (type)__res; \
121}) 195})
122 196
@@ -152,6 +226,7 @@ HYPERVISOR_stack_switch(unsigned long ss, unsigned long esp)
152 return _hypercall2(int, stack_switch, ss, esp); 226 return _hypercall2(int, stack_switch, ss, esp);
153} 227}
154 228
229#ifdef CONFIG_X86_32
155static inline int 230static inline int
156HYPERVISOR_set_callbacks(unsigned long event_selector, 231HYPERVISOR_set_callbacks(unsigned long event_selector,
157 unsigned long event_address, 232 unsigned long event_address,
@@ -162,6 +237,17 @@ HYPERVISOR_set_callbacks(unsigned long event_selector,
162 event_selector, event_address, 237 event_selector, event_address,
163 failsafe_selector, failsafe_address); 238 failsafe_selector, failsafe_address);
164} 239}
240#else /* CONFIG_X86_64 */
241static inline int
242HYPERVISOR_set_callbacks(unsigned long event_address,
243 unsigned long failsafe_address,
244 unsigned long syscall_address)
245{
246 return _hypercall3(int, set_callbacks,
247 event_address, failsafe_address,
248 syscall_address);
249}
250#endif /* CONFIG_X86_{32,64} */
165 251
166static inline int 252static inline int
167HYPERVISOR_callback_op(int cmd, void *arg) 253HYPERVISOR_callback_op(int cmd, void *arg)
@@ -176,9 +262,9 @@ HYPERVISOR_fpu_taskswitch(int set)
176} 262}
177 263
178static inline int 264static inline int
179HYPERVISOR_sched_op(int cmd, unsigned long arg) 265HYPERVISOR_sched_op(int cmd, void *arg)
180{ 266{
181 return _hypercall2(int, sched_op, cmd, arg); 267 return _hypercall2(int, sched_op_new, cmd, arg);
182} 268}
183 269
184static inline long 270static inline long
@@ -223,12 +309,12 @@ static inline int
223HYPERVISOR_update_va_mapping(unsigned long va, pte_t new_val, 309HYPERVISOR_update_va_mapping(unsigned long va, pte_t new_val,
224 unsigned long flags) 310 unsigned long flags)
225{ 311{
226 unsigned long pte_hi = 0; 312 if (sizeof(new_val) == sizeof(long))
227#ifdef CONFIG_X86_PAE 313 return _hypercall3(int, update_va_mapping, va,
228 pte_hi = new_val.pte_high; 314 new_val.pte, flags);
229#endif 315 else
230 return _hypercall4(int, update_va_mapping, va, 316 return _hypercall4(int, update_va_mapping, va,
231 new_val.pte_low, pte_hi, flags); 317 new_val.pte, new_val.pte >> 32, flags);
232} 318}
233 319
234static inline int 320static inline int
@@ -281,12 +367,13 @@ static inline int
281HYPERVISOR_update_va_mapping_otherdomain(unsigned long va, pte_t new_val, 367HYPERVISOR_update_va_mapping_otherdomain(unsigned long va, pte_t new_val,
282 unsigned long flags, domid_t domid) 368 unsigned long flags, domid_t domid)
283{ 369{
284 unsigned long pte_hi = 0; 370 if (sizeof(new_val) == sizeof(long))
285#ifdef CONFIG_X86_PAE 371 return _hypercall4(int, update_va_mapping_otherdomain, va,
286 pte_hi = new_val.pte_high; 372 new_val.pte, flags, domid);
287#endif 373 else
288 return _hypercall5(int, update_va_mapping_otherdomain, va, 374 return _hypercall5(int, update_va_mapping_otherdomain, va,
289 new_val.pte_low, pte_hi, flags, domid); 375 new_val.pte, new_val.pte >> 32,
376 flags, domid);
290} 377}
291 378
292static inline int 379static inline int
@@ -301,6 +388,14 @@ HYPERVISOR_vcpu_op(int cmd, int vcpuid, void *extra_args)
301 return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args); 388 return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args);
302} 389}
303 390
391#ifdef CONFIG_X86_64
392static inline int
393HYPERVISOR_set_segment_base(int reg, unsigned long value)
394{
395 return _hypercall2(int, set_segment_base, reg, value);
396}
397#endif
398
304static inline int 399static inline int
305HYPERVISOR_suspend(unsigned long srec) 400HYPERVISOR_suspend(unsigned long srec)
306{ 401{
@@ -315,19 +410,26 @@ HYPERVISOR_nmi_op(unsigned long op, unsigned long arg)
315} 410}
316 411
317static inline void 412static inline void
413MULTI_fpu_taskswitch(struct multicall_entry *mcl, int set)
414{
415 mcl->op = __HYPERVISOR_fpu_taskswitch;
416 mcl->args[0] = set;
417}
418
419static inline void
318MULTI_update_va_mapping(struct multicall_entry *mcl, unsigned long va, 420MULTI_update_va_mapping(struct multicall_entry *mcl, unsigned long va,
319 pte_t new_val, unsigned long flags) 421 pte_t new_val, unsigned long flags)
320{ 422{
321 mcl->op = __HYPERVISOR_update_va_mapping; 423 mcl->op = __HYPERVISOR_update_va_mapping;
322 mcl->args[0] = va; 424 mcl->args[0] = va;
323#ifdef CONFIG_X86_PAE 425 if (sizeof(new_val) == sizeof(long)) {
324 mcl->args[1] = new_val.pte_low; 426 mcl->args[1] = new_val.pte;
325 mcl->args[2] = new_val.pte_high; 427 mcl->args[2] = flags;
326#else 428 } else {
327 mcl->args[1] = new_val.pte_low; 429 mcl->args[1] = new_val.pte;
328 mcl->args[2] = 0; 430 mcl->args[2] = new_val.pte >> 32;
329#endif 431 mcl->args[3] = flags;
330 mcl->args[3] = flags; 432 }
331} 433}
332 434
333static inline void 435static inline void
@@ -347,15 +449,16 @@ MULTI_update_va_mapping_otherdomain(struct multicall_entry *mcl, unsigned long v
347{ 449{
348 mcl->op = __HYPERVISOR_update_va_mapping_otherdomain; 450 mcl->op = __HYPERVISOR_update_va_mapping_otherdomain;
349 mcl->args[0] = va; 451 mcl->args[0] = va;
350#ifdef CONFIG_X86_PAE 452 if (sizeof(new_val) == sizeof(long)) {
351 mcl->args[1] = new_val.pte_low; 453 mcl->args[1] = new_val.pte;
352 mcl->args[2] = new_val.pte_high; 454 mcl->args[2] = flags;
353#else 455 mcl->args[3] = domid;
354 mcl->args[1] = new_val.pte_low; 456 } else {
355 mcl->args[2] = 0; 457 mcl->args[1] = new_val.pte;
356#endif 458 mcl->args[2] = new_val.pte >> 32;
357 mcl->args[3] = flags; 459 mcl->args[3] = flags;
358 mcl->args[4] = domid; 460 mcl->args[4] = domid;
461 }
359} 462}
360 463
361static inline void 464static inline void
@@ -363,10 +466,15 @@ MULTI_update_descriptor(struct multicall_entry *mcl, u64 maddr,
363 struct desc_struct desc) 466 struct desc_struct desc)
364{ 467{
365 mcl->op = __HYPERVISOR_update_descriptor; 468 mcl->op = __HYPERVISOR_update_descriptor;
366 mcl->args[0] = maddr; 469 if (sizeof(maddr) == sizeof(long)) {
367 mcl->args[1] = maddr >> 32; 470 mcl->args[0] = maddr;
368 mcl->args[2] = desc.a; 471 mcl->args[1] = *(unsigned long *)&desc;
369 mcl->args[3] = desc.b; 472 } else {
473 mcl->args[0] = maddr;
474 mcl->args[1] = maddr >> 32;
475 mcl->args[2] = desc.a;
476 mcl->args[3] = desc.b;
477 }
370} 478}
371 479
372static inline void 480static inline void
@@ -416,4 +524,4 @@ MULTI_stack_switch(struct multicall_entry *mcl,
416 mcl->args[1] = esp; 524 mcl->args[1] = esp;
417} 525}
418 526
419#endif /* __HYPERCALL_H__ */ 527#endif /* ASM_X86__XEN__HYPERCALL_H */
diff --git a/include/asm-x86/xen/hypervisor.h b/include/asm-x86/xen/hypervisor.h
index 8e15dd28c91f..445a24759560 100644
--- a/include/asm-x86/xen/hypervisor.h
+++ b/include/asm-x86/xen/hypervisor.h
@@ -30,12 +30,11 @@
30 * IN THE SOFTWARE. 30 * IN THE SOFTWARE.
31 */ 31 */
32 32
33#ifndef __HYPERVISOR_H__ 33#ifndef ASM_X86__XEN__HYPERVISOR_H
34#define __HYPERVISOR_H__ 34#define ASM_X86__XEN__HYPERVISOR_H
35 35
36#include <linux/types.h> 36#include <linux/types.h>
37#include <linux/kernel.h> 37#include <linux/kernel.h>
38#include <linux/version.h>
39 38
40#include <xen/interface/xen.h> 39#include <xen/interface/xen.h>
41#include <xen/interface/version.h> 40#include <xen/interface/version.h>
@@ -55,7 +54,6 @@
55/* arch/i386/kernel/setup.c */ 54/* arch/i386/kernel/setup.c */
56extern struct shared_info *HYPERVISOR_shared_info; 55extern struct shared_info *HYPERVISOR_shared_info;
57extern struct start_info *xen_start_info; 56extern struct start_info *xen_start_info;
58#define is_initial_xendomain() (xen_start_info->flags & SIF_INITDOMAIN)
59 57
60/* arch/i386/mach-xen/evtchn.c */ 58/* arch/i386/mach-xen/evtchn.c */
61/* Force a proper event-channel callback from Xen. */ 59/* Force a proper event-channel callback from Xen. */
@@ -68,6 +66,17 @@ u64 jiffies_to_st(unsigned long jiffies);
68#define MULTI_UVMFLAGS_INDEX 3 66#define MULTI_UVMFLAGS_INDEX 3
69#define MULTI_UVMDOMID_INDEX 4 67#define MULTI_UVMDOMID_INDEX 4
70 68
71#define is_running_on_xen() (xen_start_info ? 1 : 0) 69enum xen_domain_type {
70 XEN_NATIVE,
71 XEN_PV_DOMAIN,
72 XEN_HVM_DOMAIN,
73};
72 74
73#endif /* __HYPERVISOR_H__ */ 75extern enum xen_domain_type xen_domain_type;
76
77#define xen_domain() (xen_domain_type != XEN_NATIVE)
78#define xen_pv_domain() (xen_domain_type == XEN_PV_DOMAIN)
79#define xen_initial_domain() (xen_pv_domain() && xen_start_info->flags & SIF_INITDOMAIN)
80#define xen_hvm_domain() (xen_domain_type == XEN_HVM_DOMAIN)
81
82#endif /* ASM_X86__XEN__HYPERVISOR_H */
diff --git a/include/asm-x86/xen/interface.h b/include/asm-x86/xen/interface.h
index 6227000a1e84..d077bba96da9 100644
--- a/include/asm-x86/xen/interface.h
+++ b/include/asm-x86/xen/interface.h
@@ -1,13 +1,13 @@
1/****************************************************************************** 1/******************************************************************************
2 * arch-x86_32.h 2 * arch-x86_32.h
3 * 3 *
4 * Guest OS interface to x86 32-bit Xen. 4 * Guest OS interface to x86 Xen.
5 * 5 *
6 * Copyright (c) 2004, K A Fraser 6 * Copyright (c) 2004, K A Fraser
7 */ 7 */
8 8
9#ifndef __XEN_PUBLIC_ARCH_X86_32_H__ 9#ifndef ASM_X86__XEN__INTERFACE_H
10#define __XEN_PUBLIC_ARCH_X86_32_H__ 10#define ASM_X86__XEN__INTERFACE_H
11 11
12#ifdef __XEN__ 12#ifdef __XEN__
13#define __DEFINE_GUEST_HANDLE(name, type) \ 13#define __DEFINE_GUEST_HANDLE(name, type) \
@@ -57,6 +57,17 @@ DEFINE_GUEST_HANDLE(long);
57DEFINE_GUEST_HANDLE(void); 57DEFINE_GUEST_HANDLE(void);
58#endif 58#endif
59 59
60#ifndef HYPERVISOR_VIRT_START
61#define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START)
62#endif
63
64#ifndef machine_to_phys_mapping
65#define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START)
66#endif
67
68/* Maximum number of virtual CPUs in multi-processor guests. */
69#define MAX_VIRT_CPUS 32
70
60/* 71/*
61 * SEGMENT DESCRIPTOR TABLES 72 * SEGMENT DESCRIPTOR TABLES
62 */ 73 */
@@ -71,58 +82,21 @@ DEFINE_GUEST_HANDLE(void);
71#define FIRST_RESERVED_GDT_ENTRY (FIRST_RESERVED_GDT_BYTE / 8) 82#define FIRST_RESERVED_GDT_ENTRY (FIRST_RESERVED_GDT_BYTE / 8)
72 83
73/* 84/*
74 * These flat segments are in the Xen-private section of every GDT. Since these
75 * are also present in the initial GDT, many OSes will be able to avoid
76 * installing their own GDT.
77 */
78#define FLAT_RING1_CS 0xe019 /* GDT index 259 */
79#define FLAT_RING1_DS 0xe021 /* GDT index 260 */
80#define FLAT_RING1_SS 0xe021 /* GDT index 260 */
81#define FLAT_RING3_CS 0xe02b /* GDT index 261 */
82#define FLAT_RING3_DS 0xe033 /* GDT index 262 */
83#define FLAT_RING3_SS 0xe033 /* GDT index 262 */
84
85#define FLAT_KERNEL_CS FLAT_RING1_CS
86#define FLAT_KERNEL_DS FLAT_RING1_DS
87#define FLAT_KERNEL_SS FLAT_RING1_SS
88#define FLAT_USER_CS FLAT_RING3_CS
89#define FLAT_USER_DS FLAT_RING3_DS
90#define FLAT_USER_SS FLAT_RING3_SS
91
92/* And the trap vector is... */
93#define TRAP_INSTR "int $0x82"
94
95/*
96 * Virtual addresses beyond this are not modifiable by guest OSes. The
97 * machine->physical mapping table starts at this address, read-only.
98 */
99#ifdef CONFIG_X86_PAE
100#define __HYPERVISOR_VIRT_START 0xF5800000
101#else
102#define __HYPERVISOR_VIRT_START 0xFC000000
103#endif
104
105#ifndef HYPERVISOR_VIRT_START
106#define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START)
107#endif
108
109#ifndef machine_to_phys_mapping
110#define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START)
111#endif
112
113/* Maximum number of virtual CPUs in multi-processor guests. */
114#define MAX_VIRT_CPUS 32
115
116#ifndef __ASSEMBLY__
117
118/*
119 * Send an array of these to HYPERVISOR_set_trap_table() 85 * Send an array of these to HYPERVISOR_set_trap_table()
86 * The privilege level specifies which modes may enter a trap via a software
87 * interrupt. On x86/64, since rings 1 and 2 are unavailable, we allocate
88 * privilege levels as follows:
89 * Level == 0: Noone may enter
90 * Level == 1: Kernel may enter
91 * Level == 2: Kernel may enter
92 * Level == 3: Everyone may enter
120 */ 93 */
121#define TI_GET_DPL(_ti) ((_ti)->flags & 3) 94#define TI_GET_DPL(_ti) ((_ti)->flags & 3)
122#define TI_GET_IF(_ti) ((_ti)->flags & 4) 95#define TI_GET_IF(_ti) ((_ti)->flags & 4)
123#define TI_SET_DPL(_ti, _dpl) ((_ti)->flags |= (_dpl)) 96#define TI_SET_DPL(_ti, _dpl) ((_ti)->flags |= (_dpl))
124#define TI_SET_IF(_ti, _if) ((_ti)->flags |= ((!!(_if))<<2)) 97#define TI_SET_IF(_ti, _if) ((_ti)->flags |= ((!!(_if))<<2))
125 98
99#ifndef __ASSEMBLY__
126struct trap_info { 100struct trap_info {
127 uint8_t vector; /* exception vector */ 101 uint8_t vector; /* exception vector */
128 uint8_t flags; /* 0-3: privilege level; 4: clear event enable? */ 102 uint8_t flags; /* 0-3: privilege level; 4: clear event enable? */
@@ -131,32 +105,21 @@ struct trap_info {
131}; 105};
132DEFINE_GUEST_HANDLE_STRUCT(trap_info); 106DEFINE_GUEST_HANDLE_STRUCT(trap_info);
133 107
134struct cpu_user_regs { 108struct arch_shared_info {
135 uint32_t ebx; 109 unsigned long max_pfn; /* max pfn that appears in table */
136 uint32_t ecx; 110 /* Frame containing list of mfns containing list of mfns containing p2m. */
137 uint32_t edx; 111 unsigned long pfn_to_mfn_frame_list_list;
138 uint32_t esi; 112 unsigned long nmi_reason;
139 uint32_t edi;
140 uint32_t ebp;
141 uint32_t eax;
142 uint16_t error_code; /* private */
143 uint16_t entry_vector; /* private */
144 uint32_t eip;
145 uint16_t cs;
146 uint8_t saved_upcall_mask;
147 uint8_t _pad0;
148 uint32_t eflags; /* eflags.IF == !saved_upcall_mask */
149 uint32_t esp;
150 uint16_t ss, _pad1;
151 uint16_t es, _pad2;
152 uint16_t ds, _pad3;
153 uint16_t fs, _pad4;
154 uint16_t gs, _pad5;
155}; 113};
156DEFINE_GUEST_HANDLE_STRUCT(cpu_user_regs); 114#endif /* !__ASSEMBLY__ */
157 115
158typedef uint64_t tsc_timestamp_t; /* RDTSC timestamp */ 116#ifdef CONFIG_X86_32
117#include "interface_32.h"
118#else
119#include "interface_64.h"
120#endif
159 121
122#ifndef __ASSEMBLY__
160/* 123/*
161 * The following is all CPU context. Note that the fpu_ctxt block is filled 124 * The following is all CPU context. Note that the fpu_ctxt block is filled
162 * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used. 125 * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used.
@@ -173,33 +136,29 @@ struct vcpu_guest_context {
173 unsigned long ldt_base, ldt_ents; /* LDT (linear address, # ents) */ 136 unsigned long ldt_base, ldt_ents; /* LDT (linear address, # ents) */
174 unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */ 137 unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */
175 unsigned long kernel_ss, kernel_sp; /* Virtual TSS (only SS1/SP1) */ 138 unsigned long kernel_ss, kernel_sp; /* Virtual TSS (only SS1/SP1) */
139 /* NB. User pagetable on x86/64 is placed in ctrlreg[1]. */
176 unsigned long ctrlreg[8]; /* CR0-CR7 (control registers) */ 140 unsigned long ctrlreg[8]; /* CR0-CR7 (control registers) */
177 unsigned long debugreg[8]; /* DB0-DB7 (debug registers) */ 141 unsigned long debugreg[8]; /* DB0-DB7 (debug registers) */
142#ifdef __i386__
178 unsigned long event_callback_cs; /* CS:EIP of event callback */ 143 unsigned long event_callback_cs; /* CS:EIP of event callback */
179 unsigned long event_callback_eip; 144 unsigned long event_callback_eip;
180 unsigned long failsafe_callback_cs; /* CS:EIP of failsafe callback */ 145 unsigned long failsafe_callback_cs; /* CS:EIP of failsafe callback */
181 unsigned long failsafe_callback_eip; 146 unsigned long failsafe_callback_eip;
147#else
148 unsigned long event_callback_eip;
149 unsigned long failsafe_callback_eip;
150 unsigned long syscall_callback_eip;
151#endif
182 unsigned long vm_assist; /* VMASST_TYPE_* bitmap */ 152 unsigned long vm_assist; /* VMASST_TYPE_* bitmap */
153#ifdef __x86_64__
154 /* Segment base addresses. */
155 uint64_t fs_base;
156 uint64_t gs_base_kernel;
157 uint64_t gs_base_user;
158#endif
183}; 159};
184DEFINE_GUEST_HANDLE_STRUCT(vcpu_guest_context); 160DEFINE_GUEST_HANDLE_STRUCT(vcpu_guest_context);
185 161#endif /* !__ASSEMBLY__ */
186struct arch_shared_info {
187 unsigned long max_pfn; /* max pfn that appears in table */
188 /* Frame containing list of mfns containing list of mfns containing p2m. */
189 unsigned long pfn_to_mfn_frame_list_list;
190 unsigned long nmi_reason;
191};
192
193struct arch_vcpu_info {
194 unsigned long cr2;
195 unsigned long pad[5]; /* sizeof(struct vcpu_info) == 64 */
196};
197
198struct xen_callback {
199 unsigned long cs;
200 unsigned long eip;
201};
202#endif /* !__ASSEMBLY__ */
203 162
204/* 163/*
205 * Prefix forces emulation of some non-trapping instructions. 164 * Prefix forces emulation of some non-trapping instructions.
@@ -213,4 +172,4 @@ struct xen_callback {
213#define XEN_CPUID XEN_EMULATE_PREFIX "cpuid" 172#define XEN_CPUID XEN_EMULATE_PREFIX "cpuid"
214#endif 173#endif
215 174
216#endif 175#endif /* ASM_X86__XEN__INTERFACE_H */
diff --git a/include/asm-x86/xen/interface_32.h b/include/asm-x86/xen/interface_32.h
new file mode 100644
index 000000000000..08167e19fc66
--- /dev/null
+++ b/include/asm-x86/xen/interface_32.h
@@ -0,0 +1,97 @@
1/******************************************************************************
2 * arch-x86_32.h
3 *
4 * Guest OS interface to x86 32-bit Xen.
5 *
6 * Copyright (c) 2004, K A Fraser
7 */
8
9#ifndef ASM_X86__XEN__INTERFACE_32_H
10#define ASM_X86__XEN__INTERFACE_32_H
11
12
13/*
14 * These flat segments are in the Xen-private section of every GDT. Since these
15 * are also present in the initial GDT, many OSes will be able to avoid
16 * installing their own GDT.
17 */
18#define FLAT_RING1_CS 0xe019 /* GDT index 259 */
19#define FLAT_RING1_DS 0xe021 /* GDT index 260 */
20#define FLAT_RING1_SS 0xe021 /* GDT index 260 */
21#define FLAT_RING3_CS 0xe02b /* GDT index 261 */
22#define FLAT_RING3_DS 0xe033 /* GDT index 262 */
23#define FLAT_RING3_SS 0xe033 /* GDT index 262 */
24
25#define FLAT_KERNEL_CS FLAT_RING1_CS
26#define FLAT_KERNEL_DS FLAT_RING1_DS
27#define FLAT_KERNEL_SS FLAT_RING1_SS
28#define FLAT_USER_CS FLAT_RING3_CS
29#define FLAT_USER_DS FLAT_RING3_DS
30#define FLAT_USER_SS FLAT_RING3_SS
31
32/* And the trap vector is... */
33#define TRAP_INSTR "int $0x82"
34
35/*
36 * Virtual addresses beyond this are not modifiable by guest OSes. The
37 * machine->physical mapping table starts at this address, read-only.
38 */
39#define __HYPERVISOR_VIRT_START 0xF5800000
40
41#ifndef __ASSEMBLY__
42
43struct cpu_user_regs {
44 uint32_t ebx;
45 uint32_t ecx;
46 uint32_t edx;
47 uint32_t esi;
48 uint32_t edi;
49 uint32_t ebp;
50 uint32_t eax;
51 uint16_t error_code; /* private */
52 uint16_t entry_vector; /* private */
53 uint32_t eip;
54 uint16_t cs;
55 uint8_t saved_upcall_mask;
56 uint8_t _pad0;
57 uint32_t eflags; /* eflags.IF == !saved_upcall_mask */
58 uint32_t esp;
59 uint16_t ss, _pad1;
60 uint16_t es, _pad2;
61 uint16_t ds, _pad3;
62 uint16_t fs, _pad4;
63 uint16_t gs, _pad5;
64};
65DEFINE_GUEST_HANDLE_STRUCT(cpu_user_regs);
66
67typedef uint64_t tsc_timestamp_t; /* RDTSC timestamp */
68
69struct arch_vcpu_info {
70 unsigned long cr2;
71 unsigned long pad[5]; /* sizeof(struct vcpu_info) == 64 */
72};
73
74struct xen_callback {
75 unsigned long cs;
76 unsigned long eip;
77};
78typedef struct xen_callback xen_callback_t;
79
80#define XEN_CALLBACK(__cs, __eip) \
81 ((struct xen_callback){ .cs = (__cs), .eip = (unsigned long)(__eip) })
82#endif /* !__ASSEMBLY__ */
83
84
85/*
86 * Page-directory addresses above 4GB do not fit into architectural %cr3.
87 * When accessing %cr3, or equivalent field in vcpu_guest_context, guests
88 * must use the following accessor macros to pack/unpack valid MFNs.
89 *
90 * Note that Xen is using the fact that the pagetable base is always
91 * page-aligned, and putting the 12 MSB of the address into the 12 LSB
92 * of cr3.
93 */
94#define xen_pfn_to_cr3(pfn) (((unsigned)(pfn) << 12) | ((unsigned)(pfn) >> 20))
95#define xen_cr3_to_pfn(cr3) (((unsigned)(cr3) >> 12) | ((unsigned)(cr3) << 20))
96
97#endif /* ASM_X86__XEN__INTERFACE_32_H */
diff --git a/include/asm-x86/xen/interface_64.h b/include/asm-x86/xen/interface_64.h
new file mode 100644
index 000000000000..046c0f1e01d4
--- /dev/null
+++ b/include/asm-x86/xen/interface_64.h
@@ -0,0 +1,159 @@
1#ifndef ASM_X86__XEN__INTERFACE_64_H
2#define ASM_X86__XEN__INTERFACE_64_H
3
4/*
5 * 64-bit segment selectors
6 * These flat segments are in the Xen-private section of every GDT. Since these
7 * are also present in the initial GDT, many OSes will be able to avoid
8 * installing their own GDT.
9 */
10
11#define FLAT_RING3_CS32 0xe023 /* GDT index 260 */
12#define FLAT_RING3_CS64 0xe033 /* GDT index 261 */
13#define FLAT_RING3_DS32 0xe02b /* GDT index 262 */
14#define FLAT_RING3_DS64 0x0000 /* NULL selector */
15#define FLAT_RING3_SS32 0xe02b /* GDT index 262 */
16#define FLAT_RING3_SS64 0xe02b /* GDT index 262 */
17
18#define FLAT_KERNEL_DS64 FLAT_RING3_DS64
19#define FLAT_KERNEL_DS32 FLAT_RING3_DS32
20#define FLAT_KERNEL_DS FLAT_KERNEL_DS64
21#define FLAT_KERNEL_CS64 FLAT_RING3_CS64
22#define FLAT_KERNEL_CS32 FLAT_RING3_CS32
23#define FLAT_KERNEL_CS FLAT_KERNEL_CS64
24#define FLAT_KERNEL_SS64 FLAT_RING3_SS64
25#define FLAT_KERNEL_SS32 FLAT_RING3_SS32
26#define FLAT_KERNEL_SS FLAT_KERNEL_SS64
27
28#define FLAT_USER_DS64 FLAT_RING3_DS64
29#define FLAT_USER_DS32 FLAT_RING3_DS32
30#define FLAT_USER_DS FLAT_USER_DS64
31#define FLAT_USER_CS64 FLAT_RING3_CS64
32#define FLAT_USER_CS32 FLAT_RING3_CS32
33#define FLAT_USER_CS FLAT_USER_CS64
34#define FLAT_USER_SS64 FLAT_RING3_SS64
35#define FLAT_USER_SS32 FLAT_RING3_SS32
36#define FLAT_USER_SS FLAT_USER_SS64
37
38#define __HYPERVISOR_VIRT_START 0xFFFF800000000000
39#define __HYPERVISOR_VIRT_END 0xFFFF880000000000
40#define __MACH2PHYS_VIRT_START 0xFFFF800000000000
41#define __MACH2PHYS_VIRT_END 0xFFFF804000000000
42
43#ifndef HYPERVISOR_VIRT_START
44#define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START)
45#define HYPERVISOR_VIRT_END mk_unsigned_long(__HYPERVISOR_VIRT_END)
46#endif
47
48#define MACH2PHYS_VIRT_START mk_unsigned_long(__MACH2PHYS_VIRT_START)
49#define MACH2PHYS_VIRT_END mk_unsigned_long(__MACH2PHYS_VIRT_END)
50#define MACH2PHYS_NR_ENTRIES ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>3)
51#ifndef machine_to_phys_mapping
52#define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START)
53#endif
54
55/*
56 * int HYPERVISOR_set_segment_base(unsigned int which, unsigned long base)
57 * @which == SEGBASE_* ; @base == 64-bit base address
58 * Returns 0 on success.
59 */
60#define SEGBASE_FS 0
61#define SEGBASE_GS_USER 1
62#define SEGBASE_GS_KERNEL 2
63#define SEGBASE_GS_USER_SEL 3 /* Set user %gs specified in base[15:0] */
64
65/*
66 * int HYPERVISOR_iret(void)
67 * All arguments are on the kernel stack, in the following format.
68 * Never returns if successful. Current kernel context is lost.
69 * The saved CS is mapped as follows:
70 * RING0 -> RING3 kernel mode.
71 * RING1 -> RING3 kernel mode.
72 * RING2 -> RING3 kernel mode.
73 * RING3 -> RING3 user mode.
74 * However RING0 indicates that the guest kernel should return to iteself
75 * directly with
76 * orb $3,1*8(%rsp)
77 * iretq
78 * If flags contains VGCF_in_syscall:
79 * Restore RAX, RIP, RFLAGS, RSP.
80 * Discard R11, RCX, CS, SS.
81 * Otherwise:
82 * Restore RAX, R11, RCX, CS:RIP, RFLAGS, SS:RSP.
83 * All other registers are saved on hypercall entry and restored to user.
84 */
85/* Guest exited in SYSCALL context? Return to guest with SYSRET? */
86#define _VGCF_in_syscall 8
87#define VGCF_in_syscall (1<<_VGCF_in_syscall)
88#define VGCF_IN_SYSCALL VGCF_in_syscall
89
90#ifndef __ASSEMBLY__
91
92struct iret_context {
93 /* Top of stack (%rsp at point of hypercall). */
94 uint64_t rax, r11, rcx, flags, rip, cs, rflags, rsp, ss;
95 /* Bottom of iret stack frame. */
96};
97
98#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
99/* Anonymous union includes both 32- and 64-bit names (e.g., eax/rax). */
100#define __DECL_REG(name) union { \
101 uint64_t r ## name, e ## name; \
102 uint32_t _e ## name; \
103}
104#else
105/* Non-gcc sources must always use the proper 64-bit name (e.g., rax). */
106#define __DECL_REG(name) uint64_t r ## name
107#endif
108
109struct cpu_user_regs {
110 uint64_t r15;
111 uint64_t r14;
112 uint64_t r13;
113 uint64_t r12;
114 __DECL_REG(bp);
115 __DECL_REG(bx);
116 uint64_t r11;
117 uint64_t r10;
118 uint64_t r9;
119 uint64_t r8;
120 __DECL_REG(ax);
121 __DECL_REG(cx);
122 __DECL_REG(dx);
123 __DECL_REG(si);
124 __DECL_REG(di);
125 uint32_t error_code; /* private */
126 uint32_t entry_vector; /* private */
127 __DECL_REG(ip);
128 uint16_t cs, _pad0[1];
129 uint8_t saved_upcall_mask;
130 uint8_t _pad1[3];
131 __DECL_REG(flags); /* rflags.IF == !saved_upcall_mask */
132 __DECL_REG(sp);
133 uint16_t ss, _pad2[3];
134 uint16_t es, _pad3[3];
135 uint16_t ds, _pad4[3];
136 uint16_t fs, _pad5[3]; /* Non-zero => takes precedence over fs_base. */
137 uint16_t gs, _pad6[3]; /* Non-zero => takes precedence over gs_base_usr. */
138};
139DEFINE_GUEST_HANDLE_STRUCT(cpu_user_regs);
140
141#undef __DECL_REG
142
143#define xen_pfn_to_cr3(pfn) ((unsigned long)(pfn) << 12)
144#define xen_cr3_to_pfn(cr3) ((unsigned long)(cr3) >> 12)
145
146struct arch_vcpu_info {
147 unsigned long cr2;
148 unsigned long pad; /* sizeof(vcpu_info_t) == 64 */
149};
150
151typedef unsigned long xen_callback_t;
152
153#define XEN_CALLBACK(__cs, __rip) \
154 ((unsigned long)(__rip))
155
156#endif /* !__ASSEMBLY__ */
157
158
159#endif /* ASM_X86__XEN__INTERFACE_64_H */
diff --git a/include/asm-x86/xen/page.h b/include/asm-x86/xen/page.h
index baf3a4dce28c..c50185dccec1 100644
--- a/include/asm-x86/xen/page.h
+++ b/include/asm-x86/xen/page.h
@@ -1,5 +1,5 @@
1#ifndef __XEN_PAGE_H 1#ifndef ASM_X86__XEN__PAGE_H
2#define __XEN_PAGE_H 2#define ASM_X86__XEN__PAGE_H
3 3
4#include <linux/pfn.h> 4#include <linux/pfn.h>
5 5
@@ -26,15 +26,20 @@ typedef struct xpaddr {
26#define FOREIGN_FRAME_BIT (1UL<<31) 26#define FOREIGN_FRAME_BIT (1UL<<31)
27#define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT) 27#define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT)
28 28
29extern unsigned long *phys_to_machine_mapping; 29/* Maximum amount of memory we can handle in a domain in pages */
30#define MAX_DOMAIN_PAGES \
31 ((unsigned long)((u64)CONFIG_XEN_MAX_DOMAIN_MEMORY * 1024 * 1024 * 1024 / PAGE_SIZE))
32
33
34extern unsigned long get_phys_to_machine(unsigned long pfn);
35extern void set_phys_to_machine(unsigned long pfn, unsigned long mfn);
30 36
31static inline unsigned long pfn_to_mfn(unsigned long pfn) 37static inline unsigned long pfn_to_mfn(unsigned long pfn)
32{ 38{
33 if (xen_feature(XENFEAT_auto_translated_physmap)) 39 if (xen_feature(XENFEAT_auto_translated_physmap))
34 return pfn; 40 return pfn;
35 41
36 return phys_to_machine_mapping[(unsigned int)(pfn)] & 42 return get_phys_to_machine(pfn) & ~FOREIGN_FRAME_BIT;
37 ~FOREIGN_FRAME_BIT;
38} 43}
39 44
40static inline int phys_to_machine_mapping_valid(unsigned long pfn) 45static inline int phys_to_machine_mapping_valid(unsigned long pfn)
@@ -42,7 +47,7 @@ static inline int phys_to_machine_mapping_valid(unsigned long pfn)
42 if (xen_feature(XENFEAT_auto_translated_physmap)) 47 if (xen_feature(XENFEAT_auto_translated_physmap))
43 return 1; 48 return 1;
44 49
45 return (phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY); 50 return get_phys_to_machine(pfn) != INVALID_P2M_ENTRY;
46} 51}
47 52
48static inline unsigned long mfn_to_pfn(unsigned long mfn) 53static inline unsigned long mfn_to_pfn(unsigned long mfn)
@@ -106,20 +111,12 @@ static inline unsigned long mfn_to_local_pfn(unsigned long mfn)
106 unsigned long pfn = mfn_to_pfn(mfn); 111 unsigned long pfn = mfn_to_pfn(mfn);
107 if ((pfn < max_mapnr) 112 if ((pfn < max_mapnr)
108 && !xen_feature(XENFEAT_auto_translated_physmap) 113 && !xen_feature(XENFEAT_auto_translated_physmap)
109 && (phys_to_machine_mapping[pfn] != mfn)) 114 && (get_phys_to_machine(pfn) != mfn))
110 return max_mapnr; /* force !pfn_valid() */ 115 return max_mapnr; /* force !pfn_valid() */
116 /* XXX fixme; not true with sparsemem */
111 return pfn; 117 return pfn;
112} 118}
113 119
114static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
115{
116 if (xen_feature(XENFEAT_auto_translated_physmap)) {
117 BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
118 return;
119 }
120 phys_to_machine_mapping[pfn] = mfn;
121}
122
123/* VIRT <-> MACHINE conversion */ 120/* VIRT <-> MACHINE conversion */
124#define virt_to_machine(v) (phys_to_machine(XPADDR(__pa(v)))) 121#define virt_to_machine(v) (phys_to_machine(XPADDR(__pa(v))))
125#define virt_to_mfn(v) (pfn_to_mfn(PFN_DOWN(__pa(v)))) 122#define virt_to_mfn(v) (pfn_to_mfn(PFN_DOWN(__pa(v))))
@@ -127,7 +124,7 @@ static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
127 124
128static inline unsigned long pte_mfn(pte_t pte) 125static inline unsigned long pte_mfn(pte_t pte)
129{ 126{
130 return (pte.pte & PTE_MASK) >> PAGE_SHIFT; 127 return (pte.pte & PTE_PFN_MASK) >> PAGE_SHIFT;
131} 128}
132 129
133static inline pte_t mfn_pte(unsigned long page_nr, pgprot_t pgprot) 130static inline pte_t mfn_pte(unsigned long page_nr, pgprot_t pgprot)
@@ -150,19 +147,19 @@ static inline pte_t __pte_ma(pteval_t x)
150 return (pte_t) { .pte = x }; 147 return (pte_t) { .pte = x };
151} 148}
152 149
153#ifdef CONFIG_X86_PAE
154#define pmd_val_ma(v) ((v).pmd) 150#define pmd_val_ma(v) ((v).pmd)
151#ifdef __PAGETABLE_PUD_FOLDED
155#define pud_val_ma(v) ((v).pgd.pgd) 152#define pud_val_ma(v) ((v).pgd.pgd)
153#else
154#define pud_val_ma(v) ((v).pud)
155#endif
156#define __pmd_ma(x) ((pmd_t) { (x) } ) 156#define __pmd_ma(x) ((pmd_t) { (x) } )
157#else /* !X86_PAE */
158#define pmd_val_ma(v) ((v).pud.pgd.pgd)
159#endif /* CONFIG_X86_PAE */
160 157
161#define pgd_val_ma(x) ((x).pgd) 158#define pgd_val_ma(x) ((x).pgd)
162 159
163 160
164xmaddr_t arbitrary_virt_to_machine(unsigned long address); 161xmaddr_t arbitrary_virt_to_machine(void *address);
165void make_lowmem_page_readonly(void *vaddr); 162void make_lowmem_page_readonly(void *vaddr);
166void make_lowmem_page_readwrite(void *vaddr); 163void make_lowmem_page_readwrite(void *vaddr);
167 164
168#endif /* __XEN_PAGE_H */ 165#endif /* ASM_X86__XEN__PAGE_H */
diff --git a/include/asm-x86/xor_32.h b/include/asm-x86/xor_32.h
index 067b5c1835a3..921b45840449 100644
--- a/include/asm-x86/xor_32.h
+++ b/include/asm-x86/xor_32.h
@@ -1,3 +1,6 @@
1#ifndef ASM_X86__XOR_32_H
2#define ASM_X86__XOR_32_H
3
1/* 4/*
2 * Optimized RAID-5 checksumming functions for MMX and SSE. 5 * Optimized RAID-5 checksumming functions for MMX and SSE.
3 * 6 *
@@ -881,3 +884,5 @@ do { \
881 deals with a load to a line that is being prefetched. */ 884 deals with a load to a line that is being prefetched. */
882#define XOR_SELECT_TEMPLATE(FASTEST) \ 885#define XOR_SELECT_TEMPLATE(FASTEST) \
883 (cpu_has_xmm ? &xor_block_pIII_sse : FASTEST) 886 (cpu_has_xmm ? &xor_block_pIII_sse : FASTEST)
887
888#endif /* ASM_X86__XOR_32_H */
diff --git a/include/asm-x86/xor_64.h b/include/asm-x86/xor_64.h
index 24957e39ac8a..2d3a18de295b 100644
--- a/include/asm-x86/xor_64.h
+++ b/include/asm-x86/xor_64.h
@@ -1,3 +1,6 @@
1#ifndef ASM_X86__XOR_64_H
2#define ASM_X86__XOR_64_H
3
1/* 4/*
2 * Optimized RAID-5 checksumming functions for MMX and SSE. 5 * Optimized RAID-5 checksumming functions for MMX and SSE.
3 * 6 *
@@ -354,3 +357,5 @@ do { \
354 We may also be able to load into the L1 only depending on how the cpu 357 We may also be able to load into the L1 only depending on how the cpu
355 deals with a load to a line that is being prefetched. */ 358 deals with a load to a line that is being prefetched. */
356#define XOR_SELECT_TEMPLATE(FASTEST) (&xor_block_sse) 359#define XOR_SELECT_TEMPLATE(FASTEST) (&xor_block_sse)
360
361#endif /* ASM_X86__XOR_64_H */
diff --git a/include/asm-x86/xsave.h b/include/asm-x86/xsave.h
new file mode 100644
index 000000000000..08e9a1ac07a9
--- /dev/null
+++ b/include/asm-x86/xsave.h
@@ -0,0 +1,118 @@
1#ifndef __ASM_X86_XSAVE_H
2#define __ASM_X86_XSAVE_H
3
4#include <linux/types.h>
5#include <asm/processor.h>
6#include <asm/i387.h>
7
8#define XSTATE_FP 0x1
9#define XSTATE_SSE 0x2
10
11#define XSTATE_FPSSE (XSTATE_FP | XSTATE_SSE)
12
13#define FXSAVE_SIZE 512
14
15/*
16 * These are the features that the OS can handle currently.
17 */
18#define XCNTXT_MASK (XSTATE_FP | XSTATE_SSE)
19
20#ifdef CONFIG_X86_64
21#define REX_PREFIX "0x48, "
22#else
23#define REX_PREFIX
24#endif
25
26extern unsigned int xstate_size;
27extern u64 pcntxt_mask;
28extern struct xsave_struct *init_xstate_buf;
29
30extern void xsave_cntxt_init(void);
31extern void xsave_init(void);
32extern int init_fpu(struct task_struct *child);
33extern int check_for_xstate(struct i387_fxsave_struct __user *buf,
34 void __user *fpstate,
35 struct _fpx_sw_bytes *sw);
36
37static inline int xrstor_checking(struct xsave_struct *fx)
38{
39 int err;
40
41 asm volatile("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n\t"
42 "2:\n"
43 ".section .fixup,\"ax\"\n"
44 "3: movl $-1,%[err]\n"
45 " jmp 2b\n"
46 ".previous\n"
47 _ASM_EXTABLE(1b, 3b)
48 : [err] "=r" (err)
49 : "D" (fx), "m" (*fx), "a" (-1), "d" (-1), "0" (0)
50 : "memory");
51
52 return err;
53}
54
55static inline int xsave_user(struct xsave_struct __user *buf)
56{
57 int err;
58 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
59 "2:\n"
60 ".section .fixup,\"ax\"\n"
61 "3: movl $-1,%[err]\n"
62 " jmp 2b\n"
63 ".previous\n"
64 ".section __ex_table,\"a\"\n"
65 _ASM_ALIGN "\n"
66 _ASM_PTR "1b,3b\n"
67 ".previous"
68 : [err] "=r" (err)
69 : "D" (buf), "a" (-1), "d" (-1), "0" (0)
70 : "memory");
71 if (unlikely(err) && __clear_user(buf, xstate_size))
72 err = -EFAULT;
73 /* No need to clear here because the caller clears USED_MATH */
74 return err;
75}
76
77static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
78{
79 int err;
80 struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
81 u32 lmask = mask;
82 u32 hmask = mask >> 32;
83
84 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
85 "2:\n"
86 ".section .fixup,\"ax\"\n"
87 "3: movl $-1,%[err]\n"
88 " jmp 2b\n"
89 ".previous\n"
90 ".section __ex_table,\"a\"\n"
91 _ASM_ALIGN "\n"
92 _ASM_PTR "1b,3b\n"
93 ".previous"
94 : [err] "=r" (err)
95 : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0)
96 : "memory"); /* memory required? */
97 return err;
98}
99
100static inline void xrstor_state(struct xsave_struct *fx, u64 mask)
101{
102 u32 lmask = mask;
103 u32 hmask = mask >> 32;
104
105 asm volatile(".byte " REX_PREFIX "0x0f,0xae,0x2f\n\t"
106 : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
107 : "memory");
108}
109
110static inline void xsave(struct task_struct *tsk)
111{
112 /* This, however, we can work around by forcing the compiler to select
113 an addressing mode that doesn't require extended registers. */
114 __asm__ __volatile__(".byte " REX_PREFIX "0x0f,0xae,0x27"
115 : : "D" (&(tsk->thread.xstate->xsave)),
116 "a" (-1), "d"(-1) : "memory");
117}
118#endif