aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-10-23 13:22:01 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-10-23 13:22:01 -0400
commit5b34653963de7a6d0d8c783527457d68fddc60fb (patch)
tree1a234741e1823a54cd0514616f783b4cf503a528 /include
parent765426e8ee4c0ab2bc9d44951f4865b8494cdbd0 (diff)
parent5e1b00758b5a8bee9d42515bffdaf305a32f1b04 (diff)
Merge branch 'x86/um-header' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'x86/um-header' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (26 commits) x86: canonicalize remaining header guards x86: drop double underscores from header guards x86: Fix ASM_X86__ header guards x86, um: get rid of uml-config.h x86, um: get rid of arch/um/Kconfig.arch x86, um: get rid of arch/um/os symlink x86, um: get rid of excessive includes of uml-config.h x86, um: get rid of header symlinks x86, um: merge Kconfig.i386 and Kconfig.x86_64 x86, um: get rid of sysdep symlink x86, um: trim the junk from uml ptrace-*.h x86, um: take vm-flags.h to sysdep x86, um: get rid of uml asm/arch x86, um: get rid of uml highmem.h x86, um: get rid of uml unistd.h x86, um: get rid of system.h -> system.h include x86, um: uml atomic.h is not needed anymore x86, um: untangle uml ldt.h x86, um: get rid of more uml asm/arch uses x86, um: remove dead header (uml module-generic.h; never used these days) ...
Diffstat (limited to 'include')
-rw-r--r--include/asm-um/a.out-core.h27
-rw-r--r--include/asm-um/a.out.h11
-rw-r--r--include/asm-um/alternative-asm.h6
-rw-r--r--include/asm-um/alternative.h6
-rw-r--r--include/asm-um/apic.h4
-rw-r--r--include/asm-um/archparam-i386.h26
-rw-r--r--include/asm-um/archparam-ppc.h8
-rw-r--r--include/asm-um/archparam-x86_64.h26
-rw-r--r--include/asm-um/asm.h6
-rw-r--r--include/asm-um/atomic.h11
-rw-r--r--include/asm-um/auxvec.h4
-rw-r--r--include/asm-um/bitops.h10
-rw-r--r--include/asm-um/boot.h6
-rw-r--r--include/asm-um/bug.h6
-rw-r--r--include/asm-um/bugs.h6
-rw-r--r--include/asm-um/byteorder.h6
-rw-r--r--include/asm-um/cache.h17
-rw-r--r--include/asm-um/cacheflush.h6
-rw-r--r--include/asm-um/calling.h9
-rw-r--r--include/asm-um/checksum.h6
-rw-r--r--include/asm-um/cmpxchg.h6
-rw-r--r--include/asm-um/cobalt.h6
-rw-r--r--include/asm-um/common.lds.S130
-rw-r--r--include/asm-um/cpufeature.h6
-rw-r--r--include/asm-um/cputime.h6
-rw-r--r--include/asm-um/current.h13
-rw-r--r--include/asm-um/delay.h20
-rw-r--r--include/asm-um/desc.h16
-rw-r--r--include/asm-um/device.h7
-rw-r--r--include/asm-um/div64.h6
-rw-r--r--include/asm-um/dma-mapping.h128
-rw-r--r--include/asm-um/dma.h10
-rw-r--r--include/asm-um/dwarf2.h11
-rw-r--r--include/asm-um/elf-i386.h163
-rw-r--r--include/asm-um/elf-ppc.h53
-rw-r--r--include/asm-um/elf-x86_64.h119
-rw-r--r--include/asm-um/emergency-restart.h6
-rw-r--r--include/asm-um/errno.h6
-rw-r--r--include/asm-um/fcntl.h6
-rw-r--r--include/asm-um/fixmap.h98
-rw-r--r--include/asm-um/floppy.h6
-rw-r--r--include/asm-um/frame.h6
-rw-r--r--include/asm-um/futex.h6
-rw-r--r--include/asm-um/hardirq.h25
-rw-r--r--include/asm-um/highmem.h12
-rw-r--r--include/asm-um/host_ldt-i386.h34
-rw-r--r--include/asm-um/host_ldt-x86_64.h38
-rw-r--r--include/asm-um/hw_irq.h7
-rw-r--r--include/asm-um/ide.h6
-rw-r--r--include/asm-um/io.h57
-rw-r--r--include/asm-um/ioctl.h6
-rw-r--r--include/asm-um/ioctls.h6
-rw-r--r--include/asm-um/ipcbuf.h6
-rw-r--r--include/asm-um/irq.h23
-rw-r--r--include/asm-um/irq_regs.h1
-rw-r--r--include/asm-um/irq_vectors.h20
-rw-r--r--include/asm-um/irqflags.h6
-rw-r--r--include/asm-um/kdebug.h1
-rw-r--r--include/asm-um/kmap_types.h29
-rw-r--r--include/asm-um/ldt.h37
-rw-r--r--include/asm-um/linkage.h6
-rw-r--r--include/asm-um/local.h6
-rw-r--r--include/asm-um/locks.h6
-rw-r--r--include/asm-um/mca_dma.h6
-rw-r--r--include/asm-um/mman.h6
-rw-r--r--include/asm-um/mmu.h22
-rw-r--r--include/asm-um/mmu_context.h54
-rw-r--r--include/asm-um/module-generic.h6
-rw-r--r--include/asm-um/module-i386.h13
-rw-r--r--include/asm-um/module-x86_64.h30
-rw-r--r--include/asm-um/msgbuf.h6
-rw-r--r--include/asm-um/mtrr.h6
-rw-r--r--include/asm-um/mutex.h9
-rw-r--r--include/asm-um/nops.h6
-rw-r--r--include/asm-um/page.h122
-rw-r--r--include/asm-um/page_offset.h1
-rw-r--r--include/asm-um/param.h20
-rw-r--r--include/asm-um/paravirt.h6
-rw-r--r--include/asm-um/pci.h7
-rw-r--r--include/asm-um/pda.h31
-rw-r--r--include/asm-um/percpu.h6
-rw-r--r--include/asm-um/pgalloc.h72
-rw-r--r--include/asm-um/pgtable-2level.h53
-rw-r--r--include/asm-um/pgtable-3level.h146
-rw-r--r--include/asm-um/pgtable.h358
-rw-r--r--include/asm-um/poll.h6
-rw-r--r--include/asm-um/posix_types.h6
-rw-r--r--include/asm-um/prctl.h6
-rw-r--r--include/asm-um/processor-generic.h136
-rw-r--r--include/asm-um/processor-i386.h78
-rw-r--r--include/asm-um/processor-ppc.h15
-rw-r--r--include/asm-um/processor-x86_64.h56
-rw-r--r--include/asm-um/ptrace-generic.h55
-rw-r--r--include/asm-um/ptrace-i386.h60
-rw-r--r--include/asm-um/ptrace-x86_64.h81
-rw-r--r--include/asm-um/required-features.h9
-rw-r--r--include/asm-um/resource.h6
-rw-r--r--include/asm-um/rwlock.h6
-rw-r--r--include/asm-um/rwsem.h6
-rw-r--r--include/asm-um/scatterlist.h6
-rw-r--r--include/asm-um/sections.h7
-rw-r--r--include/asm-um/segment.h10
-rw-r--r--include/asm-um/sembuf.h6
-rw-r--r--include/asm-um/serial.h6
-rw-r--r--include/asm-um/setup.h10
-rw-r--r--include/asm-um/shmbuf.h6
-rw-r--r--include/asm-um/shmparam.h6
-rw-r--r--include/asm-um/sigcontext-generic.h6
-rw-r--r--include/asm-um/sigcontext-i386.h6
-rw-r--r--include/asm-um/sigcontext-ppc.h10
-rw-r--r--include/asm-um/sigcontext-x86_64.h22
-rw-r--r--include/asm-um/siginfo.h6
-rw-r--r--include/asm-um/signal.h29
-rw-r--r--include/asm-um/smp.h33
-rw-r--r--include/asm-um/socket.h6
-rw-r--r--include/asm-um/sockios.h6
-rw-r--r--include/asm-um/spinlock.h6
-rw-r--r--include/asm-um/spinlock_types.h6
-rw-r--r--include/asm-um/stat.h6
-rw-r--r--include/asm-um/statfs.h6
-rw-r--r--include/asm-um/string.h7
-rw-r--r--include/asm-um/suspend.h4
-rw-r--r--include/asm-um/system-generic.h47
-rw-r--r--include/asm-um/system-i386.h6
-rw-r--r--include/asm-um/system-ppc.h12
-rw-r--r--include/asm-um/system-x86_64.h23
-rw-r--r--include/asm-um/termbits.h6
-rw-r--r--include/asm-um/termios.h6
-rw-r--r--include/asm-um/thread_info.h83
-rw-r--r--include/asm-um/timex.h13
-rw-r--r--include/asm-um/tlb.h127
-rw-r--r--include/asm-um/tlbflush.h31
-rw-r--r--include/asm-um/topology.h6
-rw-r--r--include/asm-um/types.h6
-rw-r--r--include/asm-um/uaccess.h99
-rw-r--r--include/asm-um/ucontext.h6
-rw-r--r--include/asm-um/unaligned.h6
-rw-r--r--include/asm-um/unistd.h41
-rw-r--r--include/asm-um/user.h6
-rw-r--r--include/asm-um/vga.h6
-rw-r--r--include/asm-um/vm-flags-i386.h14
-rw-r--r--include/asm-um/vm-flags-x86_64.h33
-rw-r--r--include/asm-um/vm86.h6
-rw-r--r--include/asm-um/xor.h6
-rw-r--r--include/asm-x86/Kbuild24
-rw-r--r--include/asm-x86/a.out-core.h73
-rw-r--r--include/asm-x86/a.out.h20
-rw-r--r--include/asm-x86/acpi.h178
-rw-r--r--include/asm-x86/agp.h35
-rw-r--r--include/asm-x86/alternative-asm.h22
-rw-r--r--include/asm-x86/alternative.h183
-rw-r--r--include/asm-x86/amd_iommu.h35
-rw-r--r--include/asm-x86/amd_iommu_types.h404
-rw-r--r--include/asm-x86/apic.h199
-rw-r--r--include/asm-x86/apicdef.h417
-rw-r--r--include/asm-x86/arch_hooks.h26
-rw-r--r--include/asm-x86/asm.h47
-rw-r--r--include/asm-x86/atomic.h5
-rw-r--r--include/asm-x86/atomic_32.h259
-rw-r--r--include/asm-x86/atomic_64.h473
-rw-r--r--include/asm-x86/auxvec.h12
-rw-r--r--include/asm-x86/bigsmp/apic.h139
-rw-r--r--include/asm-x86/bigsmp/apicdef.h13
-rw-r--r--include/asm-x86/bigsmp/ipi.h25
-rw-r--r--include/asm-x86/bios_ebda.h36
-rw-r--r--include/asm-x86/bitops.h451
-rw-r--r--include/asm-x86/boot.h26
-rw-r--r--include/asm-x86/bootparam.h111
-rw-r--r--include/asm-x86/bug.h39
-rw-r--r--include/asm-x86/bugs.h12
-rw-r--r--include/asm-x86/byteorder.h81
-rw-r--r--include/asm-x86/cache.h20
-rw-r--r--include/asm-x86/cacheflush.h118
-rw-r--r--include/asm-x86/calgary.h72
-rw-r--r--include/asm-x86/calling.h170
-rw-r--r--include/asm-x86/checksum.h5
-rw-r--r--include/asm-x86/checksum_32.h189
-rw-r--r--include/asm-x86/checksum_64.h191
-rw-r--r--include/asm-x86/cmpxchg.h5
-rw-r--r--include/asm-x86/cmpxchg_32.h344
-rw-r--r--include/asm-x86/cmpxchg_64.h185
-rw-r--r--include/asm-x86/compat.h218
-rw-r--r--include/asm-x86/cpu.h20
-rw-r--r--include/asm-x86/cpufeature.h271
-rw-r--r--include/asm-x86/cputime.h1
-rw-r--r--include/asm-x86/current.h39
-rw-r--r--include/asm-x86/debugreg.h70
-rw-r--r--include/asm-x86/delay.h31
-rw-r--r--include/asm-x86/desc.h409
-rw-r--r--include/asm-x86/desc_defs.h95
-rw-r--r--include/asm-x86/device.h16
-rw-r--r--include/asm-x86/div64.h60
-rw-r--r--include/asm-x86/dma-mapping.h308
-rw-r--r--include/asm-x86/dma.h318
-rw-r--r--include/asm-x86/dmi.h26
-rw-r--r--include/asm-x86/ds.h238
-rw-r--r--include/asm-x86/dwarf2.h61
-rw-r--r--include/asm-x86/e820.h146
-rw-r--r--include/asm-x86/edac.h18
-rw-r--r--include/asm-x86/efi.h110
-rw-r--r--include/asm-x86/elf.h336
-rw-r--r--include/asm-x86/emergency-restart.h18
-rw-r--r--include/asm-x86/errno.h1
-rw-r--r--include/asm-x86/es7000/apic.h193
-rw-r--r--include/asm-x86/es7000/apicdef.h13
-rw-r--r--include/asm-x86/es7000/ipi.h24
-rw-r--r--include/asm-x86/es7000/mpparse.h30
-rw-r--r--include/asm-x86/es7000/wakecpu.h59
-rw-r--r--include/asm-x86/fb.h21
-rw-r--r--include/asm-x86/fcntl.h1
-rw-r--r--include/asm-x86/fixmap.h68
-rw-r--r--include/asm-x86/fixmap_32.h123
-rw-r--r--include/asm-x86/fixmap_64.h83
-rw-r--r--include/asm-x86/floppy.h281
-rw-r--r--include/asm-x86/frame.h27
-rw-r--r--include/asm-x86/ftrace.h24
-rw-r--r--include/asm-x86/futex.h140
-rw-r--r--include/asm-x86/gart.h73
-rw-r--r--include/asm-x86/genapic.h5
-rw-r--r--include/asm-x86/genapic_32.h126
-rw-r--r--include/asm-x86/genapic_64.h58
-rw-r--r--include/asm-x86/geode.h253
-rw-r--r--include/asm-x86/gpio.h56
-rw-r--r--include/asm-x86/hardirq.h11
-rw-r--r--include/asm-x86/hardirq_32.h28
-rw-r--r--include/asm-x86/hardirq_64.h23
-rw-r--r--include/asm-x86/highmem.h82
-rw-r--r--include/asm-x86/hpet.h114
-rw-r--r--include/asm-x86/hugetlb.h93
-rw-r--r--include/asm-x86/hw_irq.h131
-rw-r--r--include/asm-x86/hypertransport.h45
-rw-r--r--include/asm-x86/i387.h400
-rw-r--r--include/asm-x86/i8253.h18
-rw-r--r--include/asm-x86/i8259.h63
-rw-r--r--include/asm-x86/ia32.h170
-rw-r--r--include/asm-x86/ia32_unistd.h18
-rw-r--r--include/asm-x86/idle.h16
-rw-r--r--include/asm-x86/intel_arch_perfmon.h31
-rw-r--r--include/asm-x86/io.h91
-rw-r--r--include/asm-x86/io_32.h284
-rw-r--r--include/asm-x86/io_64.h244
-rw-r--r--include/asm-x86/io_apic.h204
-rw-r--r--include/asm-x86/ioctl.h1
-rw-r--r--include/asm-x86/ioctls.h94
-rw-r--r--include/asm-x86/iommu.h50
-rw-r--r--include/asm-x86/ipcbuf.h28
-rw-r--r--include/asm-x86/ipi.h138
-rw-r--r--include/asm-x86/irq.h50
-rw-r--r--include/asm-x86/irq_regs.h5
-rw-r--r--include/asm-x86/irq_regs_32.h29
-rw-r--r--include/asm-x86/irq_regs_64.h1
-rw-r--r--include/asm-x86/irq_remapping.h8
-rw-r--r--include/asm-x86/irq_vectors.h164
-rw-r--r--include/asm-x86/irqflags.h211
-rw-r--r--include/asm-x86/ist.h34
-rw-r--r--include/asm-x86/k8.h15
-rw-r--r--include/asm-x86/kdebug.h37
-rw-r--r--include/asm-x86/kexec.h175
-rw-r--r--include/asm-x86/kgdb.h79
-rw-r--r--include/asm-x86/kmap_types.h29
-rw-r--r--include/asm-x86/kprobes.h88
-rw-r--r--include/asm-x86/kvm.h211
-rw-r--r--include/asm-x86/kvm_host.h752
-rw-r--r--include/asm-x86/kvm_para.h147
-rw-r--r--include/asm-x86/kvm_x86_emulate.h184
-rw-r--r--include/asm-x86/ldt.h40
-rw-r--r--include/asm-x86/lguest.h94
-rw-r--r--include/asm-x86/lguest_hcall.h71
-rw-r--r--include/asm-x86/linkage.h61
-rw-r--r--include/asm-x86/local.h235
-rw-r--r--include/asm-x86/mach-default/apm.h73
-rw-r--r--include/asm-x86/mach-default/do_timer.h16
-rw-r--r--include/asm-x86/mach-default/entry_arch.h36
-rw-r--r--include/asm-x86/mach-default/mach_apic.h156
-rw-r--r--include/asm-x86/mach-default/mach_apicdef.h24
-rw-r--r--include/asm-x86/mach-default/mach_ipi.h64
-rw-r--r--include/asm-x86/mach-default/mach_mpparse.h17
-rw-r--r--include/asm-x86/mach-default/mach_mpspec.h12
-rw-r--r--include/asm-x86/mach-default/mach_timer.h48
-rw-r--r--include/asm-x86/mach-default/mach_traps.h33
-rw-r--r--include/asm-x86/mach-default/mach_wakecpu.h42
-rw-r--r--include/asm-x86/mach-default/pci-functions.h19
-rw-r--r--include/asm-x86/mach-default/setup_arch.h3
-rw-r--r--include/asm-x86/mach-default/smpboot_hooks.h59
-rw-r--r--include/asm-x86/mach-generic/gpio.h15
-rw-r--r--include/asm-x86/mach-generic/mach_apic.h33
-rw-r--r--include/asm-x86/mach-generic/mach_apicdef.h11
-rw-r--r--include/asm-x86/mach-generic/mach_ipi.h10
-rw-r--r--include/asm-x86/mach-generic/mach_mpparse.h10
-rw-r--r--include/asm-x86/mach-generic/mach_mpspec.h12
-rw-r--r--include/asm-x86/mach-rdc321x/gpio.h60
-rw-r--r--include/asm-x86/mach-rdc321x/rdc321x_defs.h12
-rw-r--r--include/asm-x86/mach-voyager/do_timer.h17
-rw-r--r--include/asm-x86/mach-voyager/entry_arch.h26
-rw-r--r--include/asm-x86/mach-voyager/setup_arch.h12
-rw-r--r--include/asm-x86/math_emu.h31
-rw-r--r--include/asm-x86/mc146818rtc.h104
-rw-r--r--include/asm-x86/mca.h43
-rw-r--r--include/asm-x86/mca_dma.h201
-rw-r--r--include/asm-x86/mce.h130
-rw-r--r--include/asm-x86/microcode.h47
-rw-r--r--include/asm-x86/mman.h20
-rw-r--r--include/asm-x86/mmconfig.h12
-rw-r--r--include/asm-x86/mmu.h26
-rw-r--r--include/asm-x86/mmu_context.h37
-rw-r--r--include/asm-x86/mmu_context_32.h56
-rw-r--r--include/asm-x86/mmu_context_64.h54
-rw-r--r--include/asm-x86/mmx.h14
-rw-r--r--include/asm-x86/mmzone.h5
-rw-r--r--include/asm-x86/mmzone_32.h134
-rw-r--r--include/asm-x86/mmzone_64.h51
-rw-r--r--include/asm-x86/module.h80
-rw-r--r--include/asm-x86/mpspec.h145
-rw-r--r--include/asm-x86/mpspec_def.h180
-rw-r--r--include/asm-x86/msgbuf.h39
-rw-r--r--include/asm-x86/msidef.h55
-rw-r--r--include/asm-x86/msr-index.h332
-rw-r--r--include/asm-x86/msr.h247
-rw-r--r--include/asm-x86/mtrr.h173
-rw-r--r--include/asm-x86/mutex.h5
-rw-r--r--include/asm-x86/mutex_32.h125
-rw-r--r--include/asm-x86/mutex_64.h100
-rw-r--r--include/asm-x86/nmi.h81
-rw-r--r--include/asm-x86/nops.h118
-rw-r--r--include/asm-x86/numa.h5
-rw-r--r--include/asm-x86/numa_32.h11
-rw-r--r--include/asm-x86/numa_64.h43
-rw-r--r--include/asm-x86/numaq.h169
-rw-r--r--include/asm-x86/numaq/apic.h136
-rw-r--r--include/asm-x86/numaq/apicdef.h14
-rw-r--r--include/asm-x86/numaq/ipi.h25
-rw-r--r--include/asm-x86/numaq/mpparse.h7
-rw-r--r--include/asm-x86/numaq/wakecpu.h43
-rw-r--r--include/asm-x86/olpc.h132
-rw-r--r--include/asm-x86/page.h209
-rw-r--r--include/asm-x86/page_32.h136
-rw-r--r--include/asm-x86/page_64.h105
-rw-r--r--include/asm-x86/param.h22
-rw-r--r--include/asm-x86/paravirt.h1650
-rw-r--r--include/asm-x86/parport.h10
-rw-r--r--include/asm-x86/pat.h22
-rw-r--r--include/asm-x86/pci-direct.h21
-rw-r--r--include/asm-x86/pci.h114
-rw-r--r--include/asm-x86/pci_32.h34
-rw-r--r--include/asm-x86/pci_64.h66
-rw-r--r--include/asm-x86/pda.h137
-rw-r--r--include/asm-x86/percpu.h218
-rw-r--r--include/asm-x86/pgalloc.h114
-rw-r--r--include/asm-x86/pgtable-2level-defs.h20
-rw-r--r--include/asm-x86/pgtable-2level.h79
-rw-r--r--include/asm-x86/pgtable-3level-defs.h28
-rw-r--r--include/asm-x86/pgtable-3level.h175
-rw-r--r--include/asm-x86/pgtable.h561
-rw-r--r--include/asm-x86/pgtable_32.h191
-rw-r--r--include/asm-x86/pgtable_64.h285
-rw-r--r--include/asm-x86/poll.h1
-rw-r--r--include/asm-x86/posix_types.h13
-rw-r--r--include/asm-x86/posix_types_32.h85
-rw-r--r--include/asm-x86/posix_types_64.h119
-rw-r--r--include/asm-x86/prctl.h10
-rw-r--r--include/asm-x86/processor-cyrix.h38
-rw-r--r--include/asm-x86/processor-flags.h100
-rw-r--r--include/asm-x86/processor.h936
-rw-r--r--include/asm-x86/proto.h32
-rw-r--r--include/asm-x86/ptrace-abi.h145
-rw-r--r--include/asm-x86/ptrace.h280
-rw-r--r--include/asm-x86/pvclock-abi.h42
-rw-r--r--include/asm-x86/pvclock.h14
-rw-r--r--include/asm-x86/reboot.h21
-rw-r--r--include/asm-x86/reboot_fixups.h6
-rw-r--r--include/asm-x86/required-features.h82
-rw-r--r--include/asm-x86/resource.h1
-rw-r--r--include/asm-x86/resume-trace.h21
-rw-r--r--include/asm-x86/rio.h63
-rw-r--r--include/asm-x86/rtc.h1
-rw-r--r--include/asm-x86/rwlock.h8
-rw-r--r--include/asm-x86/rwsem.h265
-rw-r--r--include/asm-x86/scatterlist.h33
-rw-r--r--include/asm-x86/seccomp.h5
-rw-r--r--include/asm-x86/seccomp_32.h17
-rw-r--r--include/asm-x86/seccomp_64.h25
-rw-r--r--include/asm-x86/sections.h1
-rw-r--r--include/asm-x86/segment.h209
-rw-r--r--include/asm-x86/sembuf.h24
-rw-r--r--include/asm-x86/serial.h29
-rw-r--r--include/asm-x86/setup.h105
-rw-r--r--include/asm-x86/shmbuf.h51
-rw-r--r--include/asm-x86/shmparam.h6
-rw-r--r--include/asm-x86/sigcontext.h284
-rw-r--r--include/asm-x86/sigcontext32.h75
-rw-r--r--include/asm-x86/siginfo.h10
-rw-r--r--include/asm-x86/signal.h262
-rw-r--r--include/asm-x86/smp.h229
-rw-r--r--include/asm-x86/socket.h57
-rw-r--r--include/asm-x86/sockios.h13
-rw-r--r--include/asm-x86/sparsemem.h34
-rw-r--r--include/asm-x86/spinlock.h364
-rw-r--r--include/asm-x86/spinlock_types.h20
-rw-r--r--include/asm-x86/srat.h39
-rw-r--r--include/asm-x86/stacktrace.h21
-rw-r--r--include/asm-x86/stat.h114
-rw-r--r--include/asm-x86/statfs.h12
-rw-r--r--include/asm-x86/string.h5
-rw-r--r--include/asm-x86/string_32.h326
-rw-r--r--include/asm-x86/string_64.h60
-rw-r--r--include/asm-x86/summit/apic.h184
-rw-r--r--include/asm-x86/summit/apicdef.h13
-rw-r--r--include/asm-x86/summit/ipi.h25
-rw-r--r--include/asm-x86/summit/mpparse.h109
-rw-r--r--include/asm-x86/suspend.h5
-rw-r--r--include/asm-x86/suspend_32.h51
-rw-r--r--include/asm-x86/suspend_64.h52
-rw-r--r--include/asm-x86/swiotlb.h58
-rw-r--r--include/asm-x86/sync_bitops.h130
-rw-r--r--include/asm-x86/syscall.h211
-rw-r--r--include/asm-x86/syscalls.h93
-rw-r--r--include/asm-x86/system.h425
-rw-r--r--include/asm-x86/system_64.h22
-rw-r--r--include/asm-x86/tce.h48
-rw-r--r--include/asm-x86/termbits.h198
-rw-r--r--include/asm-x86/termios.h113
-rw-r--r--include/asm-x86/therm_throt.h9
-rw-r--r--include/asm-x86/thread_info.h264
-rw-r--r--include/asm-x86/time.h63
-rw-r--r--include/asm-x86/timer.h66
-rw-r--r--include/asm-x86/timex.h19
-rw-r--r--include/asm-x86/tlb.h11
-rw-r--r--include/asm-x86/tlbflush.h178
-rw-r--r--include/asm-x86/topology.h258
-rw-r--r--include/asm-x86/trampoline.h21
-rw-r--r--include/asm-x86/traps.h81
-rw-r--r--include/asm-x86/tsc.h62
-rw-r--r--include/asm-x86/types.h36
-rw-r--r--include/asm-x86/uaccess.h454
-rw-r--r--include/asm-x86/uaccess_32.h218
-rw-r--r--include/asm-x86/uaccess_64.h202
-rw-r--r--include/asm-x86/ucontext.h18
-rw-r--r--include/asm-x86/unaligned.h14
-rw-r--r--include/asm-x86/unistd.h13
-rw-r--r--include/asm-x86/unistd_32.h379
-rw-r--r--include/asm-x86/unistd_64.h693
-rw-r--r--include/asm-x86/unwind.h13
-rw-r--r--include/asm-x86/user.h5
-rw-r--r--include/asm-x86/user32.h70
-rw-r--r--include/asm-x86/user_32.h131
-rw-r--r--include/asm-x86/user_64.h137
-rw-r--r--include/asm-x86/uv/bios.h94
-rw-r--r--include/asm-x86/uv/uv_bau.h332
-rw-r--r--include/asm-x86/uv/uv_hub.h354
-rw-r--r--include/asm-x86/uv/uv_irq.h36
-rw-r--r--include/asm-x86/uv/uv_mmrs.h1295
-rw-r--r--include/asm-x86/vdso.h47
-rw-r--r--include/asm-x86/vga.h20
-rw-r--r--include/asm-x86/vgtod.h29
-rw-r--r--include/asm-x86/vic.h61
-rw-r--r--include/asm-x86/visws/cobalt.h125
-rw-r--r--include/asm-x86/visws/lithium.h53
-rw-r--r--include/asm-x86/visws/piix4.h107
-rw-r--r--include/asm-x86/visws/sgivw.h5
-rw-r--r--include/asm-x86/vm86.h208
-rw-r--r--include/asm-x86/vmi.h263
-rw-r--r--include/asm-x86/vmi_time.h98
-rw-r--r--include/asm-x86/voyager.h528
-rw-r--r--include/asm-x86/vsyscall.h44
-rw-r--r--include/asm-x86/xcr.h49
-rw-r--r--include/asm-x86/xen/events.h24
-rw-r--r--include/asm-x86/xen/grant_table.h7
-rw-r--r--include/asm-x86/xen/hypercall.h527
-rw-r--r--include/asm-x86/xen/hypervisor.h82
-rw-r--r--include/asm-x86/xen/interface.h175
-rw-r--r--include/asm-x86/xen/interface_32.h97
-rw-r--r--include/asm-x86/xen/interface_64.h159
-rw-r--r--include/asm-x86/xen/page.h165
-rw-r--r--include/asm-x86/xor.h5
-rw-r--r--include/asm-x86/xor_32.h888
-rw-r--r--include/asm-x86/xor_64.h361
-rw-r--r--include/asm-x86/xsave.h118
477 files changed, 0 insertions, 42367 deletions
diff --git a/include/asm-um/a.out-core.h b/include/asm-um/a.out-core.h
deleted file mode 100644
index 995643b18309..000000000000
--- a/include/asm-um/a.out-core.h
+++ /dev/null
@@ -1,27 +0,0 @@
1/* a.out coredump register dumper
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11
12#ifndef __UM_A_OUT_CORE_H
13#define __UM_A_OUT_CORE_H
14
15#ifdef __KERNEL__
16
17#include <linux/user.h>
18
19/*
20 * fill in the user structure for an a.out core dump
21 */
22static inline void aout_dump_thread(struct pt_regs *regs, struct user *u)
23{
24}
25
26#endif /* __KERNEL__ */
27#endif /* __UM_A_OUT_CORE_H */
diff --git a/include/asm-um/a.out.h b/include/asm-um/a.out.h
deleted file mode 100644
index 754181ee8683..000000000000
--- a/include/asm-um/a.out.h
+++ /dev/null
@@ -1,11 +0,0 @@
1/*
2 * Copyright (C) 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __UM_A_OUT_H
7#define __UM_A_OUT_H
8
9#include "asm/arch/a.out.h"
10
11#endif
diff --git a/include/asm-um/alternative-asm.h b/include/asm-um/alternative-asm.h
deleted file mode 100644
index 9aa9fa2402a4..000000000000
--- a/include/asm-um/alternative-asm.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __UM_ALTERNATIVE_ASM_I
2#define __UM_ALTERNATIVE_ASM_I
3
4#include "asm/arch/alternative-asm.h"
5
6#endif
diff --git a/include/asm-um/alternative.h b/include/asm-um/alternative.h
deleted file mode 100644
index b6434396bd42..000000000000
--- a/include/asm-um/alternative.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __UM_ALTERNATIVE_H
2#define __UM_ALTERNATIVE_H
3
4#include "asm/arch/alternative.h"
5
6#endif
diff --git a/include/asm-um/apic.h b/include/asm-um/apic.h
deleted file mode 100644
index 876dee84ab11..000000000000
--- a/include/asm-um/apic.h
+++ /dev/null
@@ -1,4 +0,0 @@
1#ifndef __UM_APIC_H
2#define __UM_APIC_H
3
4#endif
diff --git a/include/asm-um/archparam-i386.h b/include/asm-um/archparam-i386.h
deleted file mode 100644
index 49e89b8d7e58..000000000000
--- a/include/asm-um/archparam-i386.h
+++ /dev/null
@@ -1,26 +0,0 @@
1/*
2 * Copyright (C) 2000 - 2003 Jeff Dike (jdike@addtoit.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __UM_ARCHPARAM_I386_H
7#define __UM_ARCHPARAM_I386_H
8
9/********* Nothing for asm-um/hardirq.h **********/
10
11/********* Nothing for asm-um/hw_irq.h **********/
12
13/********* Nothing for asm-um/string.h **********/
14
15#endif
16
17/*
18 * Overrides for Emacs so that we follow Linus's tabbing style.
19 * Emacs will notice this stuff at the end of the file and automatically
20 * adjust the settings for this buffer only. This must remain at the end
21 * of the file.
22 * ---------------------------------------------------------------------------
23 * Local variables:
24 * c-file-style: "linux"
25 * End:
26 */
diff --git a/include/asm-um/archparam-ppc.h b/include/asm-um/archparam-ppc.h
deleted file mode 100644
index 4269d8a37b4f..000000000000
--- a/include/asm-um/archparam-ppc.h
+++ /dev/null
@@ -1,8 +0,0 @@
1#ifndef __UM_ARCHPARAM_PPC_H
2#define __UM_ARCHPARAM_PPC_H
3
4/********* Bits for asm-um/string.h **********/
5
6#define __HAVE_ARCH_STRRCHR
7
8#endif
diff --git a/include/asm-um/archparam-x86_64.h b/include/asm-um/archparam-x86_64.h
deleted file mode 100644
index 270ed9586b68..000000000000
--- a/include/asm-um/archparam-x86_64.h
+++ /dev/null
@@ -1,26 +0,0 @@
1/*
2 * Copyright 2003 PathScale, Inc.
3 *
4 * Licensed under the GPL
5 */
6
7#ifndef __UM_ARCHPARAM_X86_64_H
8#define __UM_ARCHPARAM_X86_64_H
9
10
11/* No user-accessible fixmap addresses, i.e. vsyscall */
12#define FIXADDR_USER_START 0
13#define FIXADDR_USER_END 0
14
15#endif
16
17/*
18 * Overrides for Emacs so that we follow Linus's tabbing style.
19 * Emacs will notice this stuff at the end of the file and automatically
20 * adjust the settings for this buffer only. This must remain at the end
21 * of the file.
22 * ---------------------------------------------------------------------------
23 * Local variables:
24 * c-file-style: "linux"
25 * End:
26 */
diff --git a/include/asm-um/asm.h b/include/asm-um/asm.h
deleted file mode 100644
index af1269a1e9eb..000000000000
--- a/include/asm-um/asm.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __UM_ASM_H
2#define __UM_ASM_H
3
4#include "asm/arch/asm.h"
5
6#endif
diff --git a/include/asm-um/atomic.h b/include/asm-um/atomic.h
deleted file mode 100644
index b683f1034d1e..000000000000
--- a/include/asm-um/atomic.h
+++ /dev/null
@@ -1,11 +0,0 @@
1#ifndef __UM_ATOMIC_H
2#define __UM_ATOMIC_H
3
4/* The i386 atomic.h calls printk, but doesn't include kernel.h, so we
5 * include it here.
6 */
7#include "linux/kernel.h"
8
9#include "asm/arch/atomic.h"
10
11#endif
diff --git a/include/asm-um/auxvec.h b/include/asm-um/auxvec.h
deleted file mode 100644
index 1e5e1c2fc9b1..000000000000
--- a/include/asm-um/auxvec.h
+++ /dev/null
@@ -1,4 +0,0 @@
1#ifndef __UM_AUXVEC_H
2#define __UM_AUXVEC_H
3
4#endif
diff --git a/include/asm-um/bitops.h b/include/asm-um/bitops.h
deleted file mode 100644
index e4d38d437b97..000000000000
--- a/include/asm-um/bitops.h
+++ /dev/null
@@ -1,10 +0,0 @@
1#ifndef __UM_BITOPS_H
2#define __UM_BITOPS_H
3
4#ifndef _LINUX_BITOPS_H
5#error only <linux/bitops.h> can be included directly
6#endif
7
8#include "asm/arch/bitops.h"
9
10#endif
diff --git a/include/asm-um/boot.h b/include/asm-um/boot.h
deleted file mode 100644
index 09548c3e784e..000000000000
--- a/include/asm-um/boot.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __UM_BOOT_H
2#define __UM_BOOT_H
3
4#include "asm/arch/boot.h"
5
6#endif
diff --git a/include/asm-um/bug.h b/include/asm-um/bug.h
deleted file mode 100644
index 9e33b864c359..000000000000
--- a/include/asm-um/bug.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __UM_BUG_H
2#define __UM_BUG_H
3
4#include <asm-generic/bug.h>
5
6#endif
diff --git a/include/asm-um/bugs.h b/include/asm-um/bugs.h
deleted file mode 100644
index 6a72e240d5fc..000000000000
--- a/include/asm-um/bugs.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __UM_BUGS_H
2#define __UM_BUGS_H
3
4void check_bugs(void);
5
6#endif
diff --git a/include/asm-um/byteorder.h b/include/asm-um/byteorder.h
deleted file mode 100644
index eee0a834f447..000000000000
--- a/include/asm-um/byteorder.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __UM_BYTEORDER_H
2#define __UM_BYTEORDER_H
3
4#include "asm/arch/byteorder.h"
5
6#endif
diff --git a/include/asm-um/cache.h b/include/asm-um/cache.h
deleted file mode 100644
index 19e1bdd67416..000000000000
--- a/include/asm-um/cache.h
+++ /dev/null
@@ -1,17 +0,0 @@
1#ifndef __UM_CACHE_H
2#define __UM_CACHE_H
3
4
5#if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
6# define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
7#elif defined(CONFIG_UML_X86) /* 64-bit */
8# define L1_CACHE_SHIFT 6 /* Should be 7 on Intel */
9#else
10/* XXX: this was taken from x86, now it's completely random. Luckily only
11 * affects SMP padding. */
12# define L1_CACHE_SHIFT 5
13#endif
14
15#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
16
17#endif
diff --git a/include/asm-um/cacheflush.h b/include/asm-um/cacheflush.h
deleted file mode 100644
index 12e9d4b74c8f..000000000000
--- a/include/asm-um/cacheflush.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __UM_CACHEFLUSH_H
2#define __UM_CACHEFLUSH_H
3
4#include "asm/arch/cacheflush.h"
5
6#endif
diff --git a/include/asm-um/calling.h b/include/asm-um/calling.h
deleted file mode 100644
index 0b2384cc99fd..000000000000
--- a/include/asm-um/calling.h
+++ /dev/null
@@ -1,9 +0,0 @@
1# Copyright 2003 - 2004 Pathscale, Inc
2# Released under the GPL
3
4#ifndef __UM_CALLING_H /* XXX x86_64 */
5#define __UM_CALLING_H
6
7#include "asm/arch/calling.h"
8
9#endif
diff --git a/include/asm-um/checksum.h b/include/asm-um/checksum.h
deleted file mode 100644
index 5b501361e361..000000000000
--- a/include/asm-um/checksum.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __UM_CHECKSUM_H
2#define __UM_CHECKSUM_H
3
4#include "sysdep/checksum.h"
5
6#endif
diff --git a/include/asm-um/cmpxchg.h b/include/asm-um/cmpxchg.h
deleted file mode 100644
index 529376a99885..000000000000
--- a/include/asm-um/cmpxchg.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __UM_CMPXCHG_H
2#define __UM_CMPXCHG_H
3
4#include "asm/arch/cmpxchg.h"
5
6#endif
diff --git a/include/asm-um/cobalt.h b/include/asm-um/cobalt.h
deleted file mode 100644
index f813a684be98..000000000000
--- a/include/asm-um/cobalt.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __UM_COBALT_H
2#define __UM_COBALT_H
3
4#include "asm/arch/cobalt.h"
5
6#endif
diff --git a/include/asm-um/common.lds.S b/include/asm-um/common.lds.S
deleted file mode 100644
index cb0248616d49..000000000000
--- a/include/asm-um/common.lds.S
+++ /dev/null
@@ -1,130 +0,0 @@
1#include <asm-generic/vmlinux.lds.h>
2
3 .fini : { *(.fini) } =0x9090
4 _etext = .;
5 PROVIDE (etext = .);
6
7 . = ALIGN(4096);
8 _sdata = .;
9 PROVIDE (sdata = .);
10
11 RODATA
12
13 .unprotected : { *(.unprotected) }
14 . = ALIGN(4096);
15 PROVIDE (_unprotected_end = .);
16
17 . = ALIGN(4096);
18 .note : { *(.note.*) }
19 __ex_table : {
20 __start___ex_table = .;
21 *(__ex_table)
22 __stop___ex_table = .;
23 }
24
25 BUG_TABLE
26
27 .uml.setup.init : {
28 __uml_setup_start = .;
29 *(.uml.setup.init)
30 __uml_setup_end = .;
31 }
32
33 .uml.help.init : {
34 __uml_help_start = .;
35 *(.uml.help.init)
36 __uml_help_end = .;
37 }
38
39 .uml.postsetup.init : {
40 __uml_postsetup_start = .;
41 *(.uml.postsetup.init)
42 __uml_postsetup_end = .;
43 }
44
45 .init.setup : {
46 __setup_start = .;
47 *(.init.setup)
48 __setup_end = .;
49 }
50
51 . = ALIGN(32);
52 .data.percpu : {
53 __per_cpu_start = . ;
54 *(.data.percpu)
55 __per_cpu_end = . ;
56 }
57
58 .initcall.init : {
59 __initcall_start = .;
60 INITCALLS
61 __initcall_end = .;
62 }
63
64 .con_initcall.init : {
65 __con_initcall_start = .;
66 *(.con_initcall.init)
67 __con_initcall_end = .;
68 }
69
70 .uml.initcall.init : {
71 __uml_initcall_start = .;
72 *(.uml.initcall.init)
73 __uml_initcall_end = .;
74 }
75 __init_end = .;
76
77 SECURITY_INIT
78
79 .exitcall : {
80 __exitcall_begin = .;
81 *(.exitcall.exit)
82 __exitcall_end = .;
83 }
84
85 .uml.exitcall : {
86 __uml_exitcall_begin = .;
87 *(.uml.exitcall.exit)
88 __uml_exitcall_end = .;
89 }
90
91 . = ALIGN(4);
92 .altinstructions : {
93 __alt_instructions = .;
94 *(.altinstructions)
95 __alt_instructions_end = .;
96 }
97 .altinstr_replacement : { *(.altinstr_replacement) }
98 /* .exit.text is discard at runtime, not link time, to deal with references
99 from .altinstructions and .eh_frame */
100 .exit.text : { *(.exit.text) }
101 .exit.data : { *(.exit.data) }
102
103 .preinit_array : {
104 __preinit_array_start = .;
105 *(.preinit_array)
106 __preinit_array_end = .;
107 }
108 .init_array : {
109 __init_array_start = .;
110 *(.init_array)
111 __init_array_end = .;
112 }
113 .fini_array : {
114 __fini_array_start = .;
115 *(.fini_array)
116 __fini_array_end = .;
117 }
118
119 . = ALIGN(4096);
120 .init.ramfs : {
121 __initramfs_start = .;
122 *(.init.ramfs)
123 __initramfs_end = .;
124 }
125
126 /* Sections to be discarded */
127 /DISCARD/ : {
128 *(.exitcall.exit)
129 }
130
diff --git a/include/asm-um/cpufeature.h b/include/asm-um/cpufeature.h
deleted file mode 100644
index fb7bd42a4d96..000000000000
--- a/include/asm-um/cpufeature.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __UM_CPUFEATURE_H
2#define __UM_CPUFEATURE_H
3
4#include "asm/arch/cpufeature.h"
5
6#endif
diff --git a/include/asm-um/cputime.h b/include/asm-um/cputime.h
deleted file mode 100644
index c84acbadfa2f..000000000000
--- a/include/asm-um/cputime.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __UM_CPUTIME_H
2#define __UM_CPUTIME_H
3
4#include <asm-generic/cputime.h>
5
6#endif /* __UM_CPUTIME_H */
diff --git a/include/asm-um/current.h b/include/asm-um/current.h
deleted file mode 100644
index c2191d9aa03d..000000000000
--- a/include/asm-um/current.h
+++ /dev/null
@@ -1,13 +0,0 @@
1/*
2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __UM_CURRENT_H
7#define __UM_CURRENT_H
8
9#include "linux/thread_info.h"
10
11#define current (current_thread_info()->task)
12
13#endif
diff --git a/include/asm-um/delay.h b/include/asm-um/delay.h
deleted file mode 100644
index c71e32b6741e..000000000000
--- a/include/asm-um/delay.h
+++ /dev/null
@@ -1,20 +0,0 @@
1#ifndef __UM_DELAY_H
2#define __UM_DELAY_H
3
4#define MILLION 1000000
5
6/* Undefined on purpose */
7extern void __bad_udelay(void);
8
9extern void __udelay(unsigned long usecs);
10extern void __delay(unsigned long loops);
11
12#define udelay(n) ((__builtin_constant_p(n) && (n) > 20000) ? \
13 __bad_udelay() : __udelay(n))
14
15/* It appears that ndelay is not used at all for UML, and has never been
16 * implemented. */
17extern void __unimplemented_ndelay(void);
18#define ndelay(n) __unimplemented_ndelay()
19
20#endif
diff --git a/include/asm-um/desc.h b/include/asm-um/desc.h
deleted file mode 100644
index 4ec34a51b62c..000000000000
--- a/include/asm-um/desc.h
+++ /dev/null
@@ -1,16 +0,0 @@
1#ifndef __UM_DESC_H
2#define __UM_DESC_H
3
4/* Taken from asm-i386/desc.h, it's the only thing we need. The rest wouldn't
5 * compile, and has never been used. */
6#define LDT_empty(info) (\
7 (info)->base_addr == 0 && \
8 (info)->limit == 0 && \
9 (info)->contents == 0 && \
10 (info)->read_exec_only == 1 && \
11 (info)->seg_32bit == 0 && \
12 (info)->limit_in_pages == 0 && \
13 (info)->seg_not_present == 1 && \
14 (info)->useable == 0 )
15
16#endif
diff --git a/include/asm-um/device.h b/include/asm-um/device.h
deleted file mode 100644
index d8f9872b0e2d..000000000000
--- a/include/asm-um/device.h
+++ /dev/null
@@ -1,7 +0,0 @@
1/*
2 * Arch specific extensions to struct device
3 *
4 * This file is released under the GPLv2
5 */
6#include <asm-generic/device.h>
7
diff --git a/include/asm-um/div64.h b/include/asm-um/div64.h
deleted file mode 100644
index 1e17f7409cab..000000000000
--- a/include/asm-um/div64.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _UM_DIV64_H
2#define _UM_DIV64_H
3
4#include "asm/arch/div64.h"
5
6#endif
diff --git a/include/asm-um/dma-mapping.h b/include/asm-um/dma-mapping.h
deleted file mode 100644
index 90fc708b320e..000000000000
--- a/include/asm-um/dma-mapping.h
+++ /dev/null
@@ -1,128 +0,0 @@
1#ifndef _ASM_DMA_MAPPING_H
2#define _ASM_DMA_MAPPING_H
3
4#include <asm/scatterlist.h>
5
6static inline int
7dma_supported(struct device *dev, u64 mask)
8{
9 BUG();
10 return(0);
11}
12
13static inline int
14dma_set_mask(struct device *dev, u64 dma_mask)
15{
16 BUG();
17 return(0);
18}
19
20static inline void *
21dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
22 gfp_t flag)
23{
24 BUG();
25 return((void *) 0);
26}
27
28static inline void
29dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
30 dma_addr_t dma_handle)
31{
32 BUG();
33}
34
35static inline dma_addr_t
36dma_map_single(struct device *dev, void *cpu_addr, size_t size,
37 enum dma_data_direction direction)
38{
39 BUG();
40 return(0);
41}
42
43static inline void
44dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
45 enum dma_data_direction direction)
46{
47 BUG();
48}
49
50static inline dma_addr_t
51dma_map_page(struct device *dev, struct page *page,
52 unsigned long offset, size_t size,
53 enum dma_data_direction direction)
54{
55 BUG();
56 return(0);
57}
58
59static inline void
60dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
61 enum dma_data_direction direction)
62{
63 BUG();
64}
65
66static inline int
67dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
68 enum dma_data_direction direction)
69{
70 BUG();
71 return(0);
72}
73
74static inline void
75dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
76 enum dma_data_direction direction)
77{
78 BUG();
79}
80
81static inline void
82dma_sync_single(struct device *dev, dma_addr_t dma_handle, size_t size,
83 enum dma_data_direction direction)
84{
85 BUG();
86}
87
88static inline void
89dma_sync_sg(struct device *dev, struct scatterlist *sg, int nelems,
90 enum dma_data_direction direction)
91{
92 BUG();
93}
94
95#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
96#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
97#define dma_is_consistent(d, h) (1)
98
99static inline int
100dma_get_cache_alignment(void)
101{
102 BUG();
103 return(0);
104}
105
106static inline void
107dma_sync_single_range(struct device *dev, dma_addr_t dma_handle,
108 unsigned long offset, size_t size,
109 enum dma_data_direction direction)
110{
111 BUG();
112}
113
114static inline void
115dma_cache_sync(struct device *dev, void *vaddr, size_t size,
116 enum dma_data_direction direction)
117{
118 BUG();
119}
120
121static inline int
122dma_mapping_error(struct device *dev, dma_addr_t dma_handle)
123{
124 BUG();
125 return 0;
126}
127
128#endif
diff --git a/include/asm-um/dma.h b/include/asm-um/dma.h
deleted file mode 100644
index 9f6139a8a525..000000000000
--- a/include/asm-um/dma.h
+++ /dev/null
@@ -1,10 +0,0 @@
1#ifndef __UM_DMA_H
2#define __UM_DMA_H
3
4#include "asm/io.h"
5
6extern unsigned long uml_physmem;
7
8#define MAX_DMA_ADDRESS (uml_physmem)
9
10#endif
diff --git a/include/asm-um/dwarf2.h b/include/asm-um/dwarf2.h
deleted file mode 100644
index d1a02e762931..000000000000
--- a/include/asm-um/dwarf2.h
+++ /dev/null
@@ -1,11 +0,0 @@
1/* Copyright 2003 - 2004 Pathscale, Inc
2 * Released under the GPL
3 */
4
5/* Needed on x86_64 by thunk.S */
6#ifndef __UM_DWARF2_H
7#define __UM_DWARF2_H
8
9#include "asm/arch/dwarf2.h"
10
11#endif
diff --git a/include/asm-um/elf-i386.h b/include/asm-um/elf-i386.h
deleted file mode 100644
index d0da9d7c5371..000000000000
--- a/include/asm-um/elf-i386.h
+++ /dev/null
@@ -1,163 +0,0 @@
1/*
2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
4 */
5#ifndef __UM_ELF_I386_H
6#define __UM_ELF_I386_H
7
8#include <asm/user.h>
9#include "skas.h"
10
11#define R_386_NONE 0
12#define R_386_32 1
13#define R_386_PC32 2
14#define R_386_GOT32 3
15#define R_386_PLT32 4
16#define R_386_COPY 5
17#define R_386_GLOB_DAT 6
18#define R_386_JMP_SLOT 7
19#define R_386_RELATIVE 8
20#define R_386_GOTOFF 9
21#define R_386_GOTPC 10
22#define R_386_NUM 11
23
24typedef unsigned long elf_greg_t;
25
26#define ELF_NGREG (sizeof (struct user_regs_struct) / sizeof(elf_greg_t))
27typedef elf_greg_t elf_gregset_t[ELF_NGREG];
28
29typedef struct user_i387_struct elf_fpregset_t;
30
31/*
32 * This is used to ensure we don't load something for the wrong architecture.
33 */
34#define elf_check_arch(x) \
35 (((x)->e_machine == EM_386) || ((x)->e_machine == EM_486))
36
37#define ELF_CLASS ELFCLASS32
38#define ELF_DATA ELFDATA2LSB
39#define ELF_ARCH EM_386
40
41#define ELF_PLAT_INIT(regs, load_addr) do { \
42 PT_REGS_EBX(regs) = 0; \
43 PT_REGS_ECX(regs) = 0; \
44 PT_REGS_EDX(regs) = 0; \
45 PT_REGS_ESI(regs) = 0; \
46 PT_REGS_EDI(regs) = 0; \
47 PT_REGS_EBP(regs) = 0; \
48 PT_REGS_EAX(regs) = 0; \
49} while (0)
50
51#define USE_ELF_CORE_DUMP
52#define ELF_EXEC_PAGESIZE 4096
53
54#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
55
56/* Shamelessly stolen from include/asm-i386/elf.h */
57
58#define ELF_CORE_COPY_REGS(pr_reg, regs) do { \
59 pr_reg[0] = PT_REGS_EBX(regs); \
60 pr_reg[1] = PT_REGS_ECX(regs); \
61 pr_reg[2] = PT_REGS_EDX(regs); \
62 pr_reg[3] = PT_REGS_ESI(regs); \
63 pr_reg[4] = PT_REGS_EDI(regs); \
64 pr_reg[5] = PT_REGS_EBP(regs); \
65 pr_reg[6] = PT_REGS_EAX(regs); \
66 pr_reg[7] = PT_REGS_DS(regs); \
67 pr_reg[8] = PT_REGS_ES(regs); \
68 /* fake once used fs and gs selectors? */ \
69 pr_reg[9] = PT_REGS_DS(regs); \
70 pr_reg[10] = PT_REGS_DS(regs); \
71 pr_reg[11] = PT_REGS_SYSCALL_NR(regs); \
72 pr_reg[12] = PT_REGS_IP(regs); \
73 pr_reg[13] = PT_REGS_CS(regs); \
74 pr_reg[14] = PT_REGS_EFLAGS(regs); \
75 pr_reg[15] = PT_REGS_SP(regs); \
76 pr_reg[16] = PT_REGS_SS(regs); \
77} while (0);
78
79extern int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu);
80
81#define ELF_CORE_COPY_FPREGS(t, fpu) elf_core_copy_fpregs(t, fpu)
82
83extern long elf_aux_hwcap;
84#define ELF_HWCAP (elf_aux_hwcap)
85
86extern char * elf_aux_platform;
87#define ELF_PLATFORM (elf_aux_platform)
88
89#define SET_PERSONALITY(ex) do { } while (0)
90
91extern unsigned long vsyscall_ehdr;
92extern unsigned long vsyscall_end;
93extern unsigned long __kernel_vsyscall;
94
95#define VSYSCALL_BASE vsyscall_ehdr
96#define VSYSCALL_END vsyscall_end
97
98/*
99 * This is the range that is readable by user mode, and things
100 * acting like user mode such as get_user_pages.
101 */
102#define FIXADDR_USER_START VSYSCALL_BASE
103#define FIXADDR_USER_END VSYSCALL_END
104
105/*
106 * Architecture-neutral AT_ values in 0-17, leave some room
107 * for more of them, start the x86-specific ones at 32.
108 */
109#define AT_SYSINFO 32
110#define AT_SYSINFO_EHDR 33
111
112#define ARCH_DLINFO \
113do { \
114 if ( vsyscall_ehdr ) { \
115 NEW_AUX_ENT(AT_SYSINFO, __kernel_vsyscall); \
116 NEW_AUX_ENT(AT_SYSINFO_EHDR, vsyscall_ehdr); \
117 } \
118} while (0)
119
120/*
121 * These macros parameterize elf_core_dump in fs/binfmt_elf.c to write out
122 * extra segments containing the vsyscall DSO contents. Dumping its
123 * contents makes post-mortem fully interpretable later without matching up
124 * the same kernel and hardware config to see what PC values meant.
125 * Dumping its extra ELF program headers includes all the other information
126 * a debugger needs to easily find how the vsyscall DSO was being used.
127 */
128#define ELF_CORE_EXTRA_PHDRS \
129 (vsyscall_ehdr ? (((struct elfhdr *)vsyscall_ehdr)->e_phnum) : 0 )
130
131#define ELF_CORE_WRITE_EXTRA_PHDRS \
132if ( vsyscall_ehdr ) { \
133 const struct elfhdr *const ehdrp = (struct elfhdr *)vsyscall_ehdr; \
134 const struct elf_phdr *const phdrp = \
135 (const struct elf_phdr *) (vsyscall_ehdr + ehdrp->e_phoff); \
136 int i; \
137 Elf32_Off ofs = 0; \
138 for (i = 0; i < ehdrp->e_phnum; ++i) { \
139 struct elf_phdr phdr = phdrp[i]; \
140 if (phdr.p_type == PT_LOAD) { \
141 ofs = phdr.p_offset = offset; \
142 offset += phdr.p_filesz; \
143 } \
144 else \
145 phdr.p_offset += ofs; \
146 phdr.p_paddr = 0; /* match other core phdrs */ \
147 DUMP_WRITE(&phdr, sizeof(phdr)); \
148 } \
149}
150#define ELF_CORE_WRITE_EXTRA_DATA \
151if ( vsyscall_ehdr ) { \
152 const struct elfhdr *const ehdrp = (struct elfhdr *)vsyscall_ehdr; \
153 const struct elf_phdr *const phdrp = \
154 (const struct elf_phdr *) (vsyscall_ehdr + ehdrp->e_phoff); \
155 int i; \
156 for (i = 0; i < ehdrp->e_phnum; ++i) { \
157 if (phdrp[i].p_type == PT_LOAD) \
158 DUMP_WRITE((void *) phdrp[i].p_vaddr, \
159 phdrp[i].p_filesz); \
160 } \
161}
162
163#endif
diff --git a/include/asm-um/elf-ppc.h b/include/asm-um/elf-ppc.h
deleted file mode 100644
index af9463cd8ce5..000000000000
--- a/include/asm-um/elf-ppc.h
+++ /dev/null
@@ -1,53 +0,0 @@
1#ifndef __UM_ELF_PPC_H
2#define __UM_ELF_PPC_H
3
4
5extern long elf_aux_hwcap;
6#define ELF_HWCAP (elf_aux_hwcap)
7
8#define SET_PERSONALITY(ex) do ; while(0)
9
10#define ELF_EXEC_PAGESIZE 4096
11
12#define elf_check_arch(x) (1)
13
14#ifdef CONFIG_64BIT
15#define ELF_CLASS ELFCLASS64
16#else
17#define ELF_CLASS ELFCLASS32
18#endif
19
20#define USE_ELF_CORE_DUMP
21
22#define R_386_NONE 0
23#define R_386_32 1
24#define R_386_PC32 2
25#define R_386_GOT32 3
26#define R_386_PLT32 4
27#define R_386_COPY 5
28#define R_386_GLOB_DAT 6
29#define R_386_JMP_SLOT 7
30#define R_386_RELATIVE 8
31#define R_386_GOTOFF 9
32#define R_386_GOTPC 10
33#define R_386_NUM 11
34
35#define ELF_PLATFORM (0)
36
37#define ELF_ET_DYN_BASE (0x08000000)
38
39/* the following stolen from asm-ppc/elf.h */
40#define ELF_NGREG 48 /* includes nip, msr, lr, etc. */
41#define ELF_NFPREG 33 /* includes fpscr */
42/* General registers */
43typedef unsigned long elf_greg_t;
44typedef elf_greg_t elf_gregset_t[ELF_NGREG];
45
46/* Floating point registers */
47typedef double elf_fpreg_t;
48typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
49
50#define ELF_DATA ELFDATA2MSB
51#define ELF_ARCH EM_PPC
52
53#endif
diff --git a/include/asm-um/elf-x86_64.h b/include/asm-um/elf-x86_64.h
deleted file mode 100644
index 6e8a9195e952..000000000000
--- a/include/asm-um/elf-x86_64.h
+++ /dev/null
@@ -1,119 +0,0 @@
1/*
2 * Copyright 2003 PathScale, Inc.
3 * Copyright (C) 2003 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
4 *
5 * Licensed under the GPL
6 */
7#ifndef __UM_ELF_X86_64_H
8#define __UM_ELF_X86_64_H
9
10#include <asm/user.h>
11#include "skas.h"
12
13/* x86-64 relocation types, taken from asm-x86_64/elf.h */
14#define R_X86_64_NONE 0 /* No reloc */
15#define R_X86_64_64 1 /* Direct 64 bit */
16#define R_X86_64_PC32 2 /* PC relative 32 bit signed */
17#define R_X86_64_GOT32 3 /* 32 bit GOT entry */
18#define R_X86_64_PLT32 4 /* 32 bit PLT address */
19#define R_X86_64_COPY 5 /* Copy symbol at runtime */
20#define R_X86_64_GLOB_DAT 6 /* Create GOT entry */
21#define R_X86_64_JUMP_SLOT 7 /* Create PLT entry */
22#define R_X86_64_RELATIVE 8 /* Adjust by program base */
23#define R_X86_64_GOTPCREL 9 /* 32 bit signed pc relative
24 offset to GOT */
25#define R_X86_64_32 10 /* Direct 32 bit zero extended */
26#define R_X86_64_32S 11 /* Direct 32 bit sign extended */
27#define R_X86_64_16 12 /* Direct 16 bit zero extended */
28#define R_X86_64_PC16 13 /* 16 bit sign extended pc relative */
29#define R_X86_64_8 14 /* Direct 8 bit sign extended */
30#define R_X86_64_PC8 15 /* 8 bit sign extended pc relative */
31
32#define R_X86_64_NUM 16
33
34typedef unsigned long elf_greg_t;
35
36#define ELF_NGREG (sizeof (struct user_regs_struct) / sizeof(elf_greg_t))
37typedef elf_greg_t elf_gregset_t[ELF_NGREG];
38
39typedef struct user_i387_struct elf_fpregset_t;
40
41/*
42 * This is used to ensure we don't load something for the wrong architecture.
43 */
44#define elf_check_arch(x) \
45 ((x)->e_machine == EM_X86_64)
46
47#define ELF_CLASS ELFCLASS64
48#define ELF_DATA ELFDATA2LSB
49#define ELF_ARCH EM_X86_64
50
51#define ELF_PLAT_INIT(regs, load_addr) do { \
52 PT_REGS_RBX(regs) = 0; \
53 PT_REGS_RCX(regs) = 0; \
54 PT_REGS_RDX(regs) = 0; \
55 PT_REGS_RSI(regs) = 0; \
56 PT_REGS_RDI(regs) = 0; \
57 PT_REGS_RBP(regs) = 0; \
58 PT_REGS_RAX(regs) = 0; \
59 PT_REGS_R8(regs) = 0; \
60 PT_REGS_R9(regs) = 0; \
61 PT_REGS_R10(regs) = 0; \
62 PT_REGS_R11(regs) = 0; \
63 PT_REGS_R12(regs) = 0; \
64 PT_REGS_R13(regs) = 0; \
65 PT_REGS_R14(regs) = 0; \
66 PT_REGS_R15(regs) = 0; \
67} while (0)
68
69#define ELF_CORE_COPY_REGS(pr_reg, regs) \
70 (pr_reg)[0] = (regs)->regs.gp[0]; \
71 (pr_reg)[1] = (regs)->regs.gp[1]; \
72 (pr_reg)[2] = (regs)->regs.gp[2]; \
73 (pr_reg)[3] = (regs)->regs.gp[3]; \
74 (pr_reg)[4] = (regs)->regs.gp[4]; \
75 (pr_reg)[5] = (regs)->regs.gp[5]; \
76 (pr_reg)[6] = (regs)->regs.gp[6]; \
77 (pr_reg)[7] = (regs)->regs.gp[7]; \
78 (pr_reg)[8] = (regs)->regs.gp[8]; \
79 (pr_reg)[9] = (regs)->regs.gp[9]; \
80 (pr_reg)[10] = (regs)->regs.gp[10]; \
81 (pr_reg)[11] = (regs)->regs.gp[11]; \
82 (pr_reg)[12] = (regs)->regs.gp[12]; \
83 (pr_reg)[13] = (regs)->regs.gp[13]; \
84 (pr_reg)[14] = (regs)->regs.gp[14]; \
85 (pr_reg)[15] = (regs)->regs.gp[15]; \
86 (pr_reg)[16] = (regs)->regs.gp[16]; \
87 (pr_reg)[17] = (regs)->regs.gp[17]; \
88 (pr_reg)[18] = (regs)->regs.gp[18]; \
89 (pr_reg)[19] = (regs)->regs.gp[19]; \
90 (pr_reg)[20] = (regs)->regs.gp[20]; \
91 (pr_reg)[21] = current->thread.arch.fs; \
92 (pr_reg)[22] = 0; \
93 (pr_reg)[23] = 0; \
94 (pr_reg)[24] = 0; \
95 (pr_reg)[25] = 0; \
96 (pr_reg)[26] = 0;
97
98extern int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu);
99
100#define ELF_CORE_COPY_FPREGS(t, fpu) elf_core_copy_fpregs(t, fpu)
101
102#ifdef TIF_IA32 /* XXX */
103#error XXX, indeed
104 clear_thread_flag(TIF_IA32);
105#endif
106
107#define USE_ELF_CORE_DUMP
108#define ELF_EXEC_PAGESIZE 4096
109
110#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
111
112extern long elf_aux_hwcap;
113#define ELF_HWCAP (elf_aux_hwcap)
114
115#define ELF_PLATFORM "x86_64"
116
117#define SET_PERSONALITY(ex) do ; while(0)
118
119#endif
diff --git a/include/asm-um/emergency-restart.h b/include/asm-um/emergency-restart.h
deleted file mode 100644
index 108d8c48e42e..000000000000
--- a/include/asm-um/emergency-restart.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _ASM_EMERGENCY_RESTART_H
2#define _ASM_EMERGENCY_RESTART_H
3
4#include <asm-generic/emergency-restart.h>
5
6#endif /* _ASM_EMERGENCY_RESTART_H */
diff --git a/include/asm-um/errno.h b/include/asm-um/errno.h
deleted file mode 100644
index b7a9e37fd8d8..000000000000
--- a/include/asm-um/errno.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __UM_ERRNO_H
2#define __UM_ERRNO_H
3
4#include "asm/arch/errno.h"
5
6#endif
diff --git a/include/asm-um/fcntl.h b/include/asm-um/fcntl.h
deleted file mode 100644
index 812a65446d92..000000000000
--- a/include/asm-um/fcntl.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __UM_FCNTL_H
2#define __UM_FCNTL_H
3
4#include "asm/arch/fcntl.h"
5
6#endif
diff --git a/include/asm-um/fixmap.h b/include/asm-um/fixmap.h
deleted file mode 100644
index 9d2be52b8655..000000000000
--- a/include/asm-um/fixmap.h
+++ /dev/null
@@ -1,98 +0,0 @@
1#ifndef __UM_FIXMAP_H
2#define __UM_FIXMAP_H
3
4#include <asm/processor.h>
5#include <asm/system.h>
6#include <asm/kmap_types.h>
7#include <asm/archparam.h>
8#include <asm/page.h>
9
10/*
11 * Here we define all the compile-time 'special' virtual
12 * addresses. The point is to have a constant address at
13 * compile time, but to set the physical address only
14 * in the boot process. We allocate these special addresses
15 * from the end of virtual memory (0xfffff000) backwards.
16 * Also this lets us do fail-safe vmalloc(), we
17 * can guarantee that these special addresses and
18 * vmalloc()-ed addresses never overlap.
19 *
20 * these 'compile-time allocated' memory buffers are
21 * fixed-size 4k pages. (or larger if used with an increment
22 * highger than 1) use fixmap_set(idx,phys) to associate
23 * physical memory with fixmap indices.
24 *
25 * TLB entries of such buffers will not be flushed across
26 * task switches.
27 */
28
29/*
30 * on UP currently we will have no trace of the fixmap mechanizm,
31 * no page table allocations, etc. This might change in the
32 * future, say framebuffers for the console driver(s) could be
33 * fix-mapped?
34 */
35enum fixed_addresses {
36#ifdef CONFIG_HIGHMEM
37 FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
38 FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
39#endif
40 __end_of_fixed_addresses
41};
42
43extern void __set_fixmap (enum fixed_addresses idx,
44 unsigned long phys, pgprot_t flags);
45
46#define set_fixmap(idx, phys) \
47 __set_fixmap(idx, phys, PAGE_KERNEL)
48/*
49 * Some hardware wants to get fixmapped without caching.
50 */
51#define set_fixmap_nocache(idx, phys) \
52 __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
53/*
54 * used by vmalloc.c.
55 *
56 * Leave one empty page between vmalloc'ed areas and
57 * the start of the fixmap, and leave one page empty
58 * at the top of mem..
59 */
60
61#define FIXADDR_TOP (TASK_SIZE - 2 * PAGE_SIZE)
62#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
63#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
64
65#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
66#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT)
67
68extern void __this_fixmap_does_not_exist(void);
69
70/*
71 * 'index to address' translation. If anyone tries to use the idx
72 * directly without tranlation, we catch the bug with a NULL-deference
73 * kernel oops. Illegal ranges of incoming indices are caught too.
74 */
75static inline unsigned long fix_to_virt(const unsigned int idx)
76{
77 /*
78 * this branch gets completely eliminated after inlining,
79 * except when someone tries to use fixaddr indices in an
80 * illegal way. (such as mixing up address types or using
81 * out-of-range indices).
82 *
83 * If it doesn't get removed, the linker will complain
84 * loudly with a reasonably clear error message..
85 */
86 if (idx >= __end_of_fixed_addresses)
87 __this_fixmap_does_not_exist();
88
89 return __fix_to_virt(idx);
90}
91
92static inline unsigned long virt_to_fix(const unsigned long vaddr)
93{
94 BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
95 return __virt_to_fix(vaddr);
96}
97
98#endif
diff --git a/include/asm-um/floppy.h b/include/asm-um/floppy.h
deleted file mode 100644
index 453e7415fb6f..000000000000
--- a/include/asm-um/floppy.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __UM_FLOPPY_H
2#define __UM_FLOPPY_H
3
4#include "asm/arch/floppy.h"
5
6#endif
diff --git a/include/asm-um/frame.h b/include/asm-um/frame.h
deleted file mode 100644
index 8a8c1cb415b4..000000000000
--- a/include/asm-um/frame.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __UM_FRAME_I
2#define __UM_FRAME_I
3
4#include "asm/arch/frame.h"
5
6#endif
diff --git a/include/asm-um/futex.h b/include/asm-um/futex.h
deleted file mode 100644
index 6a332a9f099c..000000000000
--- a/include/asm-um/futex.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _ASM_FUTEX_H
2#define _ASM_FUTEX_H
3
4#include <asm-generic/futex.h>
5
6#endif
diff --git a/include/asm-um/hardirq.h b/include/asm-um/hardirq.h
deleted file mode 100644
index 313ebb8a2566..000000000000
--- a/include/asm-um/hardirq.h
+++ /dev/null
@@ -1,25 +0,0 @@
1/* (c) 2004 cw@f00f.org, GPLv2 blah blah */
2
3#ifndef __ASM_UM_HARDIRQ_H
4#define __ASM_UM_HARDIRQ_H
5
6#include <linux/threads.h>
7#include <linux/irq.h>
8
9/* NOTE: When SMP works again we might want to make this
10 * ____cacheline_aligned or maybe use per_cpu state? --cw */
11typedef struct {
12 unsigned int __softirq_pending;
13} irq_cpustat_t;
14
15#include <linux/irq_cpustat.h>
16
17/* As this would be very strange for UML to get we BUG() after the
18 * printk. */
19static inline void ack_bad_irq(unsigned int irq)
20{
21 printk(KERN_ERR "unexpected IRQ %02x\n", irq);
22 BUG();
23}
24
25#endif /* __ASM_UM_HARDIRQ_H */
diff --git a/include/asm-um/highmem.h b/include/asm-um/highmem.h
deleted file mode 100644
index 36974cb8abc7..000000000000
--- a/include/asm-um/highmem.h
+++ /dev/null
@@ -1,12 +0,0 @@
1#ifndef __UM_HIGHMEM_H
2#define __UM_HIGHMEM_H
3
4#include "asm/page.h"
5#include "asm/fixmap.h"
6#include "asm/arch/highmem.h"
7
8#undef PKMAP_BASE
9
10#define PKMAP_BASE ((FIXADDR_START - LAST_PKMAP * PAGE_SIZE) & PMD_MASK)
11
12#endif
diff --git a/include/asm-um/host_ldt-i386.h b/include/asm-um/host_ldt-i386.h
deleted file mode 100644
index b27cb0a9dd30..000000000000
--- a/include/asm-um/host_ldt-i386.h
+++ /dev/null
@@ -1,34 +0,0 @@
1#ifndef __ASM_HOST_LDT_I386_H
2#define __ASM_HOST_LDT_I386_H
3
4#include "asm/arch/ldt.h"
5
6/*
7 * macros stolen from include/asm-i386/desc.h
8 */
9#define LDT_entry_a(info) \
10 ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
11
12#define LDT_entry_b(info) \
13 (((info)->base_addr & 0xff000000) | \
14 (((info)->base_addr & 0x00ff0000) >> 16) | \
15 ((info)->limit & 0xf0000) | \
16 (((info)->read_exec_only ^ 1) << 9) | \
17 ((info)->contents << 10) | \
18 (((info)->seg_not_present ^ 1) << 15) | \
19 ((info)->seg_32bit << 22) | \
20 ((info)->limit_in_pages << 23) | \
21 ((info)->useable << 20) | \
22 0x7000)
23
24#define LDT_empty(info) (\
25 (info)->base_addr == 0 && \
26 (info)->limit == 0 && \
27 (info)->contents == 0 && \
28 (info)->read_exec_only == 1 && \
29 (info)->seg_32bit == 0 && \
30 (info)->limit_in_pages == 0 && \
31 (info)->seg_not_present == 1 && \
32 (info)->useable == 0 )
33
34#endif
diff --git a/include/asm-um/host_ldt-x86_64.h b/include/asm-um/host_ldt-x86_64.h
deleted file mode 100644
index 74a63f7d9a90..000000000000
--- a/include/asm-um/host_ldt-x86_64.h
+++ /dev/null
@@ -1,38 +0,0 @@
1#ifndef __ASM_HOST_LDT_X86_64_H
2#define __ASM_HOST_LDT_X86_64_H
3
4#include "asm/arch/ldt.h"
5
6/*
7 * macros stolen from include/asm-x86_64/desc.h
8 */
9#define LDT_entry_a(info) \
10 ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
11
12/* Don't allow setting of the lm bit. It is useless anyways because
13 * 64bit system calls require __USER_CS. */
14#define LDT_entry_b(info) \
15 (((info)->base_addr & 0xff000000) | \
16 (((info)->base_addr & 0x00ff0000) >> 16) | \
17 ((info)->limit & 0xf0000) | \
18 (((info)->read_exec_only ^ 1) << 9) | \
19 ((info)->contents << 10) | \
20 (((info)->seg_not_present ^ 1) << 15) | \
21 ((info)->seg_32bit << 22) | \
22 ((info)->limit_in_pages << 23) | \
23 ((info)->useable << 20) | \
24 /* ((info)->lm << 21) | */ \
25 0x7000)
26
27#define LDT_empty(info) (\
28 (info)->base_addr == 0 && \
29 (info)->limit == 0 && \
30 (info)->contents == 0 && \
31 (info)->read_exec_only == 1 && \
32 (info)->seg_32bit == 0 && \
33 (info)->limit_in_pages == 0 && \
34 (info)->seg_not_present == 1 && \
35 (info)->useable == 0 && \
36 (info)->lm == 0)
37
38#endif
diff --git a/include/asm-um/hw_irq.h b/include/asm-um/hw_irq.h
deleted file mode 100644
index 1cf84cf5f21a..000000000000
--- a/include/asm-um/hw_irq.h
+++ /dev/null
@@ -1,7 +0,0 @@
1#ifndef _ASM_UM_HW_IRQ_H
2#define _ASM_UM_HW_IRQ_H
3
4#include "asm/irq.h"
5#include "asm/archparam.h"
6
7#endif
diff --git a/include/asm-um/ide.h b/include/asm-um/ide.h
deleted file mode 100644
index 3d1ccebcfbaf..000000000000
--- a/include/asm-um/ide.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __UM_IDE_H
2#define __UM_IDE_H
3
4#include "asm/arch/ide.h"
5
6#endif
diff --git a/include/asm-um/io.h b/include/asm-um/io.h
deleted file mode 100644
index 44e8b8c772ae..000000000000
--- a/include/asm-um/io.h
+++ /dev/null
@@ -1,57 +0,0 @@
1#ifndef __UM_IO_H
2#define __UM_IO_H
3
4#include "asm/page.h"
5
6#define IO_SPACE_LIMIT 0xdeadbeef /* Sure hope nothing uses this */
7
8static inline int inb(unsigned long i) { return(0); }
9static inline void outb(char c, unsigned long i) { }
10
11/*
12 * Change virtual addresses to physical addresses and vv.
13 * These are pretty trivial
14 */
15static inline unsigned long virt_to_phys(volatile void * address)
16{
17 return __pa((void *) address);
18}
19
20static inline void * phys_to_virt(unsigned long address)
21{
22 return __va(address);
23}
24
25/*
26 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
27 * access
28 */
29#define xlate_dev_mem_ptr(p) __va(p)
30
31/*
32 * Convert a virtual cached pointer to an uncached pointer
33 */
34#define xlate_dev_kmem_ptr(p) p
35
36static inline void writeb(unsigned char b, volatile void __iomem *addr)
37{
38 *(volatile unsigned char __force *) addr = b;
39}
40static inline void writew(unsigned short b, volatile void __iomem *addr)
41{
42 *(volatile unsigned short __force *) addr = b;
43}
44static inline void writel(unsigned int b, volatile void __iomem *addr)
45{
46 *(volatile unsigned int __force *) addr = b;
47}
48static inline void writeq(unsigned int b, volatile void __iomem *addr)
49{
50 *(volatile unsigned long long __force *) addr = b;
51}
52#define __raw_writeb writeb
53#define __raw_writew writew
54#define __raw_writel writel
55#define __raw_writeq writeq
56
57#endif
diff --git a/include/asm-um/ioctl.h b/include/asm-um/ioctl.h
deleted file mode 100644
index cc22157346db..000000000000
--- a/include/asm-um/ioctl.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __UM_IOCTL_H
2#define __UM_IOCTL_H
3
4#include "asm/arch/ioctl.h"
5
6#endif
diff --git a/include/asm-um/ioctls.h b/include/asm-um/ioctls.h
deleted file mode 100644
index 9a1a017de6a7..000000000000
--- a/include/asm-um/ioctls.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __UM_IOCTLS_H
2#define __UM_IOCTLS_H
3
4#include "asm/arch/ioctls.h"
5
6#endif
diff --git a/include/asm-um/ipcbuf.h b/include/asm-um/ipcbuf.h
deleted file mode 100644
index bb2ad31dc434..000000000000
--- a/include/asm-um/ipcbuf.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __UM_IPCBUF_H
2#define __UM_IPCBUF_H
3
4#include "asm/arch/ipcbuf.h"
5
6#endif
diff --git a/include/asm-um/irq.h b/include/asm-um/irq.h
deleted file mode 100644
index 4a2037f8204b..000000000000
--- a/include/asm-um/irq.h
+++ /dev/null
@@ -1,23 +0,0 @@
1#ifndef __UM_IRQ_H
2#define __UM_IRQ_H
3
4#define TIMER_IRQ 0
5#define UMN_IRQ 1
6#define CONSOLE_IRQ 2
7#define CONSOLE_WRITE_IRQ 3
8#define UBD_IRQ 4
9#define UM_ETH_IRQ 5
10#define SSL_IRQ 6
11#define SSL_WRITE_IRQ 7
12#define ACCEPT_IRQ 8
13#define MCONSOLE_IRQ 9
14#define WINCH_IRQ 10
15#define SIGIO_WRITE_IRQ 11
16#define TELNETD_IRQ 12
17#define XTERM_IRQ 13
18#define RANDOM_IRQ 14
19
20#define LAST_IRQ RANDOM_IRQ
21#define NR_IRQS (LAST_IRQ + 1)
22
23#endif
diff --git a/include/asm-um/irq_regs.h b/include/asm-um/irq_regs.h
deleted file mode 100644
index 3dd9c0b70270..000000000000
--- a/include/asm-um/irq_regs.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/irq_regs.h>
diff --git a/include/asm-um/irq_vectors.h b/include/asm-um/irq_vectors.h
deleted file mode 100644
index 62ddba6fc733..000000000000
--- a/include/asm-um/irq_vectors.h
+++ /dev/null
@@ -1,20 +0,0 @@
1/*
2 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __UM_IRQ_VECTORS_H
7#define __UM_IRQ_VECTORS_H
8
9#endif
10
11/*
12 * Overrides for Emacs so that we follow Linus's tabbing style.
13 * Emacs will notice this stuff at the end of the file and automatically
14 * adjust the settings for this buffer only. This must remain at the end
15 * of the file.
16 * ---------------------------------------------------------------------------
17 * Local variables:
18 * c-file-style: "linux"
19 * End:
20 */
diff --git a/include/asm-um/irqflags.h b/include/asm-um/irqflags.h
deleted file mode 100644
index 659b9abdfdba..000000000000
--- a/include/asm-um/irqflags.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __UM_IRQFLAGS_H
2#define __UM_IRQFLAGS_H
3
4/* Empty for now */
5
6#endif
diff --git a/include/asm-um/kdebug.h b/include/asm-um/kdebug.h
deleted file mode 100644
index 6ece1b037665..000000000000
--- a/include/asm-um/kdebug.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/kdebug.h>
diff --git a/include/asm-um/kmap_types.h b/include/asm-um/kmap_types.h
deleted file mode 100644
index 6c03acdb4405..000000000000
--- a/include/asm-um/kmap_types.h
+++ /dev/null
@@ -1,29 +0,0 @@
1/*
2 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __UM_KMAP_TYPES_H
7#define __UM_KMAP_TYPES_H
8
9/* No more #include "asm/arch/kmap_types.h" ! */
10
11enum km_type {
12 KM_BOUNCE_READ,
13 KM_SKB_SUNRPC_DATA,
14 KM_SKB_DATA_SOFTIRQ,
15 KM_USER0,
16 KM_USER1,
17 KM_UML_USERCOPY, /* UML specific, for copy_*_user - used in do_op_one_page */
18 KM_BIO_SRC_IRQ,
19 KM_BIO_DST_IRQ,
20 KM_PTE0,
21 KM_PTE1,
22 KM_IRQ0,
23 KM_IRQ1,
24 KM_SOFTIRQ0,
25 KM_SOFTIRQ1,
26 KM_TYPE_NR
27};
28
29#endif
diff --git a/include/asm-um/ldt.h b/include/asm-um/ldt.h
deleted file mode 100644
index 52af512f5e7d..000000000000
--- a/include/asm-um/ldt.h
+++ /dev/null
@@ -1,37 +0,0 @@
1/*
2 * Copyright (C) 2004 Fujitsu Siemens Computers GmbH
3 * Licensed under the GPL
4 *
5 * Author: Bodo Stroesser <bstroesser@fujitsu-siemens.com>
6 */
7
8#ifndef __ASM_LDT_H
9#define __ASM_LDT_H
10
11#include <linux/mutex.h>
12#include "asm/host_ldt.h"
13
14extern void ldt_host_info(void);
15
16#define LDT_PAGES_MAX \
17 ((LDT_ENTRIES * LDT_ENTRY_SIZE)/PAGE_SIZE)
18#define LDT_ENTRIES_PER_PAGE \
19 (PAGE_SIZE/LDT_ENTRY_SIZE)
20#define LDT_DIRECT_ENTRIES \
21 ((LDT_PAGES_MAX*sizeof(void *))/LDT_ENTRY_SIZE)
22
23struct ldt_entry {
24 __u32 a;
25 __u32 b;
26};
27
28typedef struct uml_ldt {
29 int entry_count;
30 struct mutex lock;
31 union {
32 struct ldt_entry * pages[LDT_PAGES_MAX];
33 struct ldt_entry entries[LDT_DIRECT_ENTRIES];
34 } u;
35} uml_ldt_t;
36
37#endif
diff --git a/include/asm-um/linkage.h b/include/asm-um/linkage.h
deleted file mode 100644
index 7dfce37adc8b..000000000000
--- a/include/asm-um/linkage.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __ASM_UM_LINKAGE_H
2#define __ASM_UM_LINKAGE_H
3
4#include "asm/arch/linkage.h"
5
6#endif
diff --git a/include/asm-um/local.h b/include/asm-um/local.h
deleted file mode 100644
index 9a280c5bb609..000000000000
--- a/include/asm-um/local.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __UM_LOCAL_H
2#define __UM_LOCAL_H
3
4#include "asm/arch/local.h"
5
6#endif
diff --git a/include/asm-um/locks.h b/include/asm-um/locks.h
deleted file mode 100644
index f80030a3ef5a..000000000000
--- a/include/asm-um/locks.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __UM_LOCKS_H
2#define __UM_LOCKS_H
3
4#include "asm/arch/locks.h"
5
6#endif
diff --git a/include/asm-um/mca_dma.h b/include/asm-um/mca_dma.h
deleted file mode 100644
index e492e4ec1392..000000000000
--- a/include/asm-um/mca_dma.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef mca___UM_DMA_H
2#define mca___UM_DMA_H
3
4#include "asm/arch/mca_dma.h"
5
6#endif
diff --git a/include/asm-um/mman.h b/include/asm-um/mman.h
deleted file mode 100644
index b09ed523019b..000000000000
--- a/include/asm-um/mman.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __UM_MMAN_H
2#define __UM_MMAN_H
3
4#include "asm/arch/mman.h"
5
6#endif
diff --git a/include/asm-um/mmu.h b/include/asm-um/mmu.h
deleted file mode 100644
index 2cf35c21d694..000000000000
--- a/include/asm-um/mmu.h
+++ /dev/null
@@ -1,22 +0,0 @@
1/*
2 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __MMU_H
7#define __MMU_H
8
9#include "um_mmu.h"
10
11#endif
12
13/*
14 * Overrides for Emacs so that we follow Linus's tabbing style.
15 * Emacs will notice this stuff at the end of the file and automatically
16 * adjust the settings for this buffer only. This must remain at the end
17 * of the file.
18 * ---------------------------------------------------------------------------
19 * Local variables:
20 * c-file-style: "linux"
21 * End:
22 */
diff --git a/include/asm-um/mmu_context.h b/include/asm-um/mmu_context.h
deleted file mode 100644
index 54f42e8b0105..000000000000
--- a/include/asm-um/mmu_context.h
+++ /dev/null
@@ -1,54 +0,0 @@
1/*
2 * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __UM_MMU_CONTEXT_H
7#define __UM_MMU_CONTEXT_H
8
9#include "linux/sched.h"
10#include "um_mmu.h"
11
12extern void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm);
13extern void arch_exit_mmap(struct mm_struct *mm);
14
15#define get_mmu_context(task) do ; while(0)
16#define activate_context(tsk) do ; while(0)
17
18#define deactivate_mm(tsk,mm) do { } while (0)
19
20extern void force_flush_all(void);
21
22static inline void activate_mm(struct mm_struct *old, struct mm_struct *new)
23{
24 /*
25 * This is called by fs/exec.c and sys_unshare()
26 * when the new ->mm is used for the first time.
27 */
28 __switch_mm(&new->context.id);
29 arch_dup_mmap(old, new);
30}
31
32static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
33 struct task_struct *tsk)
34{
35 unsigned cpu = smp_processor_id();
36
37 if(prev != next){
38 cpu_clear(cpu, prev->cpu_vm_mask);
39 cpu_set(cpu, next->cpu_vm_mask);
40 if(next != &init_mm)
41 __switch_mm(&next->context.id);
42 }
43}
44
45static inline void enter_lazy_tlb(struct mm_struct *mm,
46 struct task_struct *tsk)
47{
48}
49
50extern int init_new_context(struct task_struct *task, struct mm_struct *mm);
51
52extern void destroy_context(struct mm_struct *mm);
53
54#endif
diff --git a/include/asm-um/module-generic.h b/include/asm-um/module-generic.h
deleted file mode 100644
index 5a265f56b174..000000000000
--- a/include/asm-um/module-generic.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __UM_MODULE_GENERIC_H
2#define __UM_MODULE_GENERIC_H
3
4#include "asm/arch/module.h"
5
6#endif
diff --git a/include/asm-um/module-i386.h b/include/asm-um/module-i386.h
deleted file mode 100644
index 5ead4a0b2e35..000000000000
--- a/include/asm-um/module-i386.h
+++ /dev/null
@@ -1,13 +0,0 @@
1#ifndef __UM_MODULE_I386_H
2#define __UM_MODULE_I386_H
3
4/* UML is simple */
5struct mod_arch_specific
6{
7};
8
9#define Elf_Shdr Elf32_Shdr
10#define Elf_Sym Elf32_Sym
11#define Elf_Ehdr Elf32_Ehdr
12
13#endif
diff --git a/include/asm-um/module-x86_64.h b/include/asm-um/module-x86_64.h
deleted file mode 100644
index 35b5491d3e96..000000000000
--- a/include/asm-um/module-x86_64.h
+++ /dev/null
@@ -1,30 +0,0 @@
1/*
2 * Copyright 2003 PathScale, Inc.
3 *
4 * Licensed under the GPL
5 */
6
7#ifndef __UM_MODULE_X86_64_H
8#define __UM_MODULE_X86_64_H
9
10/* UML is simple */
11struct mod_arch_specific
12{
13};
14
15#define Elf_Shdr Elf64_Shdr
16#define Elf_Sym Elf64_Sym
17#define Elf_Ehdr Elf64_Ehdr
18
19#endif
20
21/*
22 * Overrides for Emacs so that we follow Linus's tabbing style.
23 * Emacs will notice this stuff at the end of the file and automatically
24 * adjust the settings for this buffer only. This must remain at the end
25 * of the file.
26 * ---------------------------------------------------------------------------
27 * Local variables:
28 * c-file-style: "linux"
29 * End:
30 */
diff --git a/include/asm-um/msgbuf.h b/include/asm-um/msgbuf.h
deleted file mode 100644
index 8ce8c30d5377..000000000000
--- a/include/asm-um/msgbuf.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __UM_MSGBUF_H
2#define __UM_MSGBUF_H
3
4#include "asm/arch/msgbuf.h"
5
6#endif
diff --git a/include/asm-um/mtrr.h b/include/asm-um/mtrr.h
deleted file mode 100644
index 5e9cd12c578d..000000000000
--- a/include/asm-um/mtrr.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __UM_MTRR_H
2#define __UM_MTRR_H
3
4#include "asm/arch/mtrr.h"
5
6#endif
diff --git a/include/asm-um/mutex.h b/include/asm-um/mutex.h
deleted file mode 100644
index 458c1f7fbc18..000000000000
--- a/include/asm-um/mutex.h
+++ /dev/null
@@ -1,9 +0,0 @@
1/*
2 * Pull in the generic implementation for the mutex fastpath.
3 *
4 * TODO: implement optimized primitives instead, or leave the generic
5 * implementation in place, or pick the atomic_xchg() based generic
6 * implementation. (see asm-generic/mutex-xchg.h for details)
7 */
8
9#include <asm-generic/mutex-dec.h>
diff --git a/include/asm-um/nops.h b/include/asm-um/nops.h
deleted file mode 100644
index 814e9bf5dea6..000000000000
--- a/include/asm-um/nops.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __UM_NOPS_H
2#define __UM_NOPS_H
3
4#include "asm/arch/nops.h"
5
6#endif
diff --git a/include/asm-um/page.h b/include/asm-um/page.h
deleted file mode 100644
index a6df1f13d732..000000000000
--- a/include/asm-um/page.h
+++ /dev/null
@@ -1,122 +0,0 @@
1/*
2 * Copyright (C) 2000 - 2003 Jeff Dike (jdike@addtoit.com)
3 * Copyright 2003 PathScale, Inc.
4 * Licensed under the GPL
5 */
6
7#ifndef __UM_PAGE_H
8#define __UM_PAGE_H
9
10#include <linux/const.h>
11
12/* PAGE_SHIFT determines the page size */
13#define PAGE_SHIFT 12
14#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
15#define PAGE_MASK (~(PAGE_SIZE-1))
16
17#ifndef __ASSEMBLY__
18
19struct page;
20
21#include <linux/types.h>
22#include <asm/vm-flags.h>
23
24/*
25 * These are used to make use of C type-checking..
26 */
27
28#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
29#define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE)
30
31#define clear_user_page(page, vaddr, pg) clear_page(page)
32#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
33
34#if defined(CONFIG_3_LEVEL_PGTABLES) && !defined(CONFIG_64BIT)
35
36typedef struct { unsigned long pte_low, pte_high; } pte_t;
37typedef struct { unsigned long pmd; } pmd_t;
38typedef struct { unsigned long pgd; } pgd_t;
39#define pte_val(x) ((x).pte_low | ((unsigned long long) (x).pte_high << 32))
40
41#define pte_get_bits(pte, bits) ((pte).pte_low & (bits))
42#define pte_set_bits(pte, bits) ((pte).pte_low |= (bits))
43#define pte_clear_bits(pte, bits) ((pte).pte_low &= ~(bits))
44#define pte_copy(to, from) ({ (to).pte_high = (from).pte_high; \
45 smp_wmb(); \
46 (to).pte_low = (from).pte_low; })
47#define pte_is_zero(pte) (!((pte).pte_low & ~_PAGE_NEWPAGE) && !(pte).pte_high)
48#define pte_set_val(pte, phys, prot) \
49 ({ (pte).pte_high = (phys) >> 32; \
50 (pte).pte_low = (phys) | pgprot_val(prot); })
51
52#define pmd_val(x) ((x).pmd)
53#define __pmd(x) ((pmd_t) { (x) } )
54
55typedef unsigned long long pfn_t;
56typedef unsigned long long phys_t;
57
58#else
59
60typedef struct { unsigned long pte; } pte_t;
61typedef struct { unsigned long pgd; } pgd_t;
62
63#ifdef CONFIG_3_LEVEL_PGTABLES
64typedef struct { unsigned long pmd; } pmd_t;
65#define pmd_val(x) ((x).pmd)
66#define __pmd(x) ((pmd_t) { (x) } )
67#endif
68
69#define pte_val(x) ((x).pte)
70
71
72#define pte_get_bits(p, bits) ((p).pte & (bits))
73#define pte_set_bits(p, bits) ((p).pte |= (bits))
74#define pte_clear_bits(p, bits) ((p).pte &= ~(bits))
75#define pte_copy(to, from) ((to).pte = (from).pte)
76#define pte_is_zero(p) (!((p).pte & ~_PAGE_NEWPAGE))
77#define pte_set_val(p, phys, prot) (p).pte = (phys | pgprot_val(prot))
78
79typedef unsigned long pfn_t;
80typedef unsigned long phys_t;
81
82#endif
83
84typedef struct { unsigned long pgprot; } pgprot_t;
85
86typedef struct page *pgtable_t;
87
88#define pgd_val(x) ((x).pgd)
89#define pgprot_val(x) ((x).pgprot)
90
91#define __pte(x) ((pte_t) { (x) } )
92#define __pgd(x) ((pgd_t) { (x) } )
93#define __pgprot(x) ((pgprot_t) { (x) } )
94
95extern unsigned long uml_physmem;
96
97#define PAGE_OFFSET (uml_physmem)
98#define KERNELBASE PAGE_OFFSET
99
100#define __va_space (8*1024*1024)
101
102#include "mem.h"
103
104/* Cast to unsigned long before casting to void * to avoid a warning from
105 * mmap_kmem about cutting a long long down to a void *. Not sure that
106 * casting is the right thing, but 32-bit UML can't have 64-bit virtual
107 * addresses
108 */
109#define __pa(virt) to_phys((void *) (unsigned long) (virt))
110#define __va(phys) to_virt((unsigned long) (phys))
111
112#define phys_to_pfn(p) ((pfn_t) ((p) >> PAGE_SHIFT))
113#define pfn_to_phys(pfn) ((phys_t) ((pfn) << PAGE_SHIFT))
114
115#define pfn_valid(pfn) ((pfn) < max_mapnr)
116#define virt_addr_valid(v) pfn_valid(phys_to_pfn(__pa(v)))
117
118#include <asm-generic/memory_model.h>
119#include <asm-generic/page.h>
120
121#endif /* __ASSEMBLY__ */
122#endif /* __UM_PAGE_H */
diff --git a/include/asm-um/page_offset.h b/include/asm-um/page_offset.h
deleted file mode 100644
index 1c168dfbf359..000000000000
--- a/include/asm-um/page_offset.h
+++ /dev/null
@@ -1 +0,0 @@
1#define PAGE_OFFSET_RAW (uml_physmem)
diff --git a/include/asm-um/param.h b/include/asm-um/param.h
deleted file mode 100644
index e44f4e60d16d..000000000000
--- a/include/asm-um/param.h
+++ /dev/null
@@ -1,20 +0,0 @@
1#ifndef _UM_PARAM_H
2#define _UM_PARAM_H
3
4#define EXEC_PAGESIZE 4096
5
6#ifndef NOGROUP
7#define NOGROUP (-1)
8#endif
9
10#define MAXHOSTNAMELEN 64 /* max length of hostname */
11
12#ifdef __KERNEL__
13#define HZ CONFIG_HZ
14#define USER_HZ 100 /* .. some user interfaces are in "ticks" */
15#define CLOCKS_PER_SEC (USER_HZ) /* frequency at which times() counts */
16#else
17#define HZ 100
18#endif
19
20#endif
diff --git a/include/asm-um/paravirt.h b/include/asm-um/paravirt.h
deleted file mode 100644
index 9d6aaad80b5f..000000000000
--- a/include/asm-um/paravirt.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __UM_PARAVIRT_H
2#define __UM_PARAVIRT_H
3
4#include "asm/arch/paravirt.h"
5
6#endif
diff --git a/include/asm-um/pci.h b/include/asm-um/pci.h
deleted file mode 100644
index 59923199cdc3..000000000000
--- a/include/asm-um/pci.h
+++ /dev/null
@@ -1,7 +0,0 @@
1#ifndef __UM_PCI_H
2#define __UM_PCI_H
3
4#define PCI_DMA_BUS_IS_PHYS (1)
5#define pcibios_scan_all_fns(a, b) 0
6
7#endif
diff --git a/include/asm-um/pda.h b/include/asm-um/pda.h
deleted file mode 100644
index 0d8bf33ffd42..000000000000
--- a/include/asm-um/pda.h
+++ /dev/null
@@ -1,31 +0,0 @@
1/*
2 * Copyright 2003 PathScale, Inc.
3 *
4 * Licensed under the GPL
5 */
6
7#ifndef __UM_PDA_X86_64_H
8#define __UM_PDA_X86_64_H
9
10/* XXX */
11struct foo {
12 unsigned int __softirq_pending;
13 unsigned int __nmi_count;
14};
15
16extern struct foo me;
17
18#define read_pda(me) (&me)
19
20#endif
21
22/*
23 * Overrides for Emacs so that we follow Linus's tabbing style.
24 * Emacs will notice this stuff at the end of the file and automatically
25 * adjust the settings for this buffer only. This must remain at the end
26 * of the file.
27 * ---------------------------------------------------------------------------
28 * Local variables:
29 * c-file-style: "linux"
30 * End:
31 */
diff --git a/include/asm-um/percpu.h b/include/asm-um/percpu.h
deleted file mode 100644
index 5723e2aab8e7..000000000000
--- a/include/asm-um/percpu.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __UM_PERCPU_H
2#define __UM_PERCPU_H
3
4#include "asm/arch/percpu.h"
5
6#endif
diff --git a/include/asm-um/pgalloc.h b/include/asm-um/pgalloc.h
deleted file mode 100644
index 9062a6e72241..000000000000
--- a/include/asm-um/pgalloc.h
+++ /dev/null
@@ -1,72 +0,0 @@
1/*
2 * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
3 * Copyright 2003 PathScale, Inc.
4 * Derived from include/asm-i386/pgalloc.h and include/asm-i386/pgtable.h
5 * Licensed under the GPL
6 */
7
8#ifndef __UM_PGALLOC_H
9#define __UM_PGALLOC_H
10
11#include "linux/mm.h"
12#include "asm/fixmap.h"
13
14#define pmd_populate_kernel(mm, pmd, pte) \
15 set_pmd(pmd, __pmd(_PAGE_TABLE + (unsigned long) __pa(pte)))
16
17#define pmd_populate(mm, pmd, pte) \
18 set_pmd(pmd, __pmd(_PAGE_TABLE + \
19 ((unsigned long long)page_to_pfn(pte) << \
20 (unsigned long long) PAGE_SHIFT)))
21#define pmd_pgtable(pmd) pmd_page(pmd)
22
23/*
24 * Allocate and free page tables.
25 */
26extern pgd_t *pgd_alloc(struct mm_struct *);
27extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
28
29extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
30extern pgtable_t pte_alloc_one(struct mm_struct *, unsigned long);
31
32static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
33{
34 free_page((unsigned long) pte);
35}
36
37static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
38{
39 pgtable_page_dtor(pte);
40 __free_page(pte);
41}
42
43#define __pte_free_tlb(tlb,pte) \
44do { \
45 pgtable_page_dtor(pte); \
46 tlb_remove_page((tlb),(pte)); \
47} while (0)
48
49#ifdef CONFIG_3_LEVEL_PGTABLES
50
51static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
52{
53 free_page((unsigned long)pmd);
54}
55
56#define __pmd_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x))
57#endif
58
59#define check_pgt_cache() do { } while (0)
60
61#endif
62
63/*
64 * Overrides for Emacs so that we follow Linus's tabbing style.
65 * Emacs will notice this stuff at the end of the file and automatically
66 * adjust the settings for this buffer only. This must remain at the end
67 * of the file.
68 * ---------------------------------------------------------------------------
69 * Local variables:
70 * c-file-style: "linux"
71 * End:
72 */
diff --git a/include/asm-um/pgtable-2level.h b/include/asm-um/pgtable-2level.h
deleted file mode 100644
index f534b73e753e..000000000000
--- a/include/asm-um/pgtable-2level.h
+++ /dev/null
@@ -1,53 +0,0 @@
1/*
2 * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
3 * Copyright 2003 PathScale, Inc.
4 * Derived from include/asm-i386/pgtable.h
5 * Licensed under the GPL
6 */
7
8#ifndef __UM_PGTABLE_2LEVEL_H
9#define __UM_PGTABLE_2LEVEL_H
10
11#include <asm-generic/pgtable-nopmd.h>
12
13/* PGDIR_SHIFT determines what a third-level page table entry can map */
14
15#define PGDIR_SHIFT 22
16#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
17#define PGDIR_MASK (~(PGDIR_SIZE-1))
18
19/*
20 * entries per page directory level: the i386 is two-level, so
21 * we don't really have any PMD directory physically.
22 */
23#define PTRS_PER_PTE 1024
24#define USER_PTRS_PER_PGD ((TASK_SIZE + (PGDIR_SIZE - 1)) / PGDIR_SIZE)
25#define PTRS_PER_PGD 1024
26#define FIRST_USER_ADDRESS 0
27
28#define pte_ERROR(e) \
29 printk("%s:%d: bad pte %p(%08lx).\n", __FILE__, __LINE__, &(e), \
30 pte_val(e))
31#define pgd_ERROR(e) \
32 printk("%s:%d: bad pgd %p(%08lx).\n", __FILE__, __LINE__, &(e), \
33 pgd_val(e))
34
35static inline int pgd_newpage(pgd_t pgd) { return 0; }
36static inline void pgd_mkuptodate(pgd_t pgd) { }
37
38#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
39
40#define pte_pfn(x) phys_to_pfn(pte_val(x))
41#define pfn_pte(pfn, prot) __pte(pfn_to_phys(pfn) | pgprot_val(prot))
42#define pfn_pmd(pfn, prot) __pmd(pfn_to_phys(pfn) | pgprot_val(prot))
43
44/*
45 * Bits 0 through 4 are taken
46 */
47#define PTE_FILE_MAX_BITS 27
48
49#define pte_to_pgoff(pte) (pte_val(pte) >> 5)
50
51#define pgoff_to_pte(off) ((pte_t) { ((off) << 5) + _PAGE_FILE })
52
53#endif
diff --git a/include/asm-um/pgtable-3level.h b/include/asm-um/pgtable-3level.h
deleted file mode 100644
index 0446f456b428..000000000000
--- a/include/asm-um/pgtable-3level.h
+++ /dev/null
@@ -1,146 +0,0 @@
1/*
2 * Copyright 2003 PathScale Inc
3 * Derived from include/asm-i386/pgtable.h
4 * Licensed under the GPL
5 */
6
7#ifndef __UM_PGTABLE_3LEVEL_H
8#define __UM_PGTABLE_3LEVEL_H
9
10#include <asm-generic/pgtable-nopud.h>
11
12/* PGDIR_SHIFT determines what a third-level page table entry can map */
13
14#ifdef CONFIG_64BIT
15#define PGDIR_SHIFT 30
16#else
17#define PGDIR_SHIFT 31
18#endif
19#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
20#define PGDIR_MASK (~(PGDIR_SIZE-1))
21
22/* PMD_SHIFT determines the size of the area a second-level page table can
23 * map
24 */
25
26#define PMD_SHIFT 21
27#define PMD_SIZE (1UL << PMD_SHIFT)
28#define PMD_MASK (~(PMD_SIZE-1))
29
30/*
31 * entries per page directory level
32 */
33
34#define PTRS_PER_PTE 512
35#ifdef CONFIG_64BIT
36#define PTRS_PER_PMD 512
37#define PTRS_PER_PGD 512
38#else
39#define PTRS_PER_PMD 1024
40#define PTRS_PER_PGD 1024
41#endif
42
43#define USER_PTRS_PER_PGD ((TASK_SIZE + (PGDIR_SIZE - 1)) / PGDIR_SIZE)
44#define FIRST_USER_ADDRESS 0
45
46#define pte_ERROR(e) \
47 printk("%s:%d: bad pte %p(%016lx).\n", __FILE__, __LINE__, &(e), \
48 pte_val(e))
49#define pmd_ERROR(e) \
50 printk("%s:%d: bad pmd %p(%016lx).\n", __FILE__, __LINE__, &(e), \
51 pmd_val(e))
52#define pgd_ERROR(e) \
53 printk("%s:%d: bad pgd %p(%016lx).\n", __FILE__, __LINE__, &(e), \
54 pgd_val(e))
55
56#define pud_none(x) (!(pud_val(x) & ~_PAGE_NEWPAGE))
57#define pud_bad(x) ((pud_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
58#define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
59#define pud_populate(mm, pud, pmd) \
60 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
61
62#ifdef CONFIG_64BIT
63#define set_pud(pudptr, pudval) set_64bit((phys_t *) (pudptr), pud_val(pudval))
64#else
65#define set_pud(pudptr, pudval) (*(pudptr) = (pudval))
66#endif
67
68static inline int pgd_newpage(pgd_t pgd)
69{
70 return(pgd_val(pgd) & _PAGE_NEWPAGE);
71}
72
73static inline void pgd_mkuptodate(pgd_t pgd) { pgd_val(pgd) &= ~_PAGE_NEWPAGE; }
74
75#ifdef CONFIG_64BIT
76#define set_pmd(pmdptr, pmdval) set_64bit((phys_t *) (pmdptr), pmd_val(pmdval))
77#else
78#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
79#endif
80
81struct mm_struct;
82extern pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address);
83
84static inline void pud_clear (pud_t *pud)
85{
86 set_pud(pud, __pud(_PAGE_NEWPAGE));
87}
88
89#define pud_page(pud) phys_to_page(pud_val(pud) & PAGE_MASK)
90#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PAGE_MASK))
91
92/* Find an entry in the second-level page table.. */
93#define pmd_offset(pud, address) ((pmd_t *) pud_page_vaddr(*(pud)) + \
94 pmd_index(address))
95
96static inline unsigned long pte_pfn(pte_t pte)
97{
98 return phys_to_pfn(pte_val(pte));
99}
100
101static inline pte_t pfn_pte(pfn_t page_nr, pgprot_t pgprot)
102{
103 pte_t pte;
104 phys_t phys = pfn_to_phys(page_nr);
105
106 pte_set_val(pte, phys, pgprot);
107 return pte;
108}
109
110static inline pmd_t pfn_pmd(pfn_t page_nr, pgprot_t pgprot)
111{
112 return __pmd((page_nr << PAGE_SHIFT) | pgprot_val(pgprot));
113}
114
115/*
116 * Bits 0 through 3 are taken in the low part of the pte,
117 * put the 32 bits of offset into the high part.
118 */
119#define PTE_FILE_MAX_BITS 32
120
121#ifdef CONFIG_64BIT
122
123#define pte_to_pgoff(p) ((p).pte >> 32)
124
125#define pgoff_to_pte(off) ((pte_t) { ((off) << 32) | _PAGE_FILE })
126
127#else
128
129#define pte_to_pgoff(pte) ((pte).pte_high)
130
131#define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) })
132
133#endif
134
135#endif
136
137/*
138 * Overrides for Emacs so that we follow Linus's tabbing style.
139 * Emacs will notice this stuff at the end of the file and automatically
140 * adjust the settings for this buffer only. This must remain at the end
141 * of the file.
142 * ---------------------------------------------------------------------------
143 * Local variables:
144 * c-file-style: "linux"
145 * End:
146 */
diff --git a/include/asm-um/pgtable.h b/include/asm-um/pgtable.h
deleted file mode 100644
index 02db81b7b86e..000000000000
--- a/include/asm-um/pgtable.h
+++ /dev/null
@@ -1,358 +0,0 @@
1/*
2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Copyright 2003 PathScale, Inc.
4 * Derived from include/asm-i386/pgtable.h
5 * Licensed under the GPL
6 */
7
8#ifndef __UM_PGTABLE_H
9#define __UM_PGTABLE_H
10
11#include <asm/fixmap.h>
12
13#define _PAGE_PRESENT 0x001
14#define _PAGE_NEWPAGE 0x002
15#define _PAGE_NEWPROT 0x004
16#define _PAGE_RW 0x020
17#define _PAGE_USER 0x040
18#define _PAGE_ACCESSED 0x080
19#define _PAGE_DIRTY 0x100
20/* If _PAGE_PRESENT is clear, we use these: */
21#define _PAGE_FILE 0x008 /* nonlinear file mapping, saved PTE; unset:swap */
22#define _PAGE_PROTNONE 0x010 /* if the user mapped it with PROT_NONE;
23 pte_present gives true */
24
25#ifdef CONFIG_3_LEVEL_PGTABLES
26#include "asm/pgtable-3level.h"
27#else
28#include "asm/pgtable-2level.h"
29#endif
30
31extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
32
33/* zero page used for uninitialized stuff */
34extern unsigned long *empty_zero_page;
35
36#define pgtable_cache_init() do ; while (0)
37
38/* Just any arbitrary offset to the start of the vmalloc VM area: the
39 * current 8MB value just means that there will be a 8MB "hole" after the
40 * physical memory until the kernel virtual memory starts. That means that
41 * any out-of-bounds memory accesses will hopefully be caught.
42 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
43 * area for the same reason. ;)
44 */
45
46extern unsigned long end_iomem;
47
48#define VMALLOC_OFFSET (__va_space)
49#define VMALLOC_START ((end_iomem + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
50#ifdef CONFIG_HIGHMEM
51# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
52#else
53# define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
54#endif
55
56#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
57#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
58#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
59
60#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
61#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
62#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
63#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
64#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
65
66/*
67 * The i386 can't do page protection for execute, and considers that the same
68 * are read.
69 * Also, write permissions imply read permissions. This is the closest we can
70 * get..
71 */
72#define __P000 PAGE_NONE
73#define __P001 PAGE_READONLY
74#define __P010 PAGE_COPY
75#define __P011 PAGE_COPY
76#define __P100 PAGE_READONLY
77#define __P101 PAGE_READONLY
78#define __P110 PAGE_COPY
79#define __P111 PAGE_COPY
80
81#define __S000 PAGE_NONE
82#define __S001 PAGE_READONLY
83#define __S010 PAGE_SHARED
84#define __S011 PAGE_SHARED
85#define __S100 PAGE_READONLY
86#define __S101 PAGE_READONLY
87#define __S110 PAGE_SHARED
88#define __S111 PAGE_SHARED
89
90/*
91 * ZERO_PAGE is a global shared page that is always zero: used
92 * for zero-mapped memory areas etc..
93 */
94#define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page)
95
96#define pte_clear(mm,addr,xp) pte_set_val(*(xp), (phys_t) 0, __pgprot(_PAGE_NEWPAGE))
97
98#define pmd_none(x) (!((unsigned long)pmd_val(x) & ~_PAGE_NEWPAGE))
99#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
100
101#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
102#define pmd_clear(xp) do { pmd_val(*(xp)) = _PAGE_NEWPAGE; } while (0)
103
104#define pmd_newpage(x) (pmd_val(x) & _PAGE_NEWPAGE)
105#define pmd_mkuptodate(x) (pmd_val(x) &= ~_PAGE_NEWPAGE)
106
107#define pud_newpage(x) (pud_val(x) & _PAGE_NEWPAGE)
108#define pud_mkuptodate(x) (pud_val(x) &= ~_PAGE_NEWPAGE)
109
110#define pmd_page(pmd) phys_to_page(pmd_val(pmd) & PAGE_MASK)
111
112#define pte_page(x) pfn_to_page(pte_pfn(x))
113
114#define pte_present(x) pte_get_bits(x, (_PAGE_PRESENT | _PAGE_PROTNONE))
115
116/*
117 * =================================
118 * Flags checking section.
119 * =================================
120 */
121
122static inline int pte_none(pte_t pte)
123{
124 return pte_is_zero(pte);
125}
126
127/*
128 * The following only work if pte_present() is true.
129 * Undefined behaviour if not..
130 */
131static inline int pte_read(pte_t pte)
132{
133 return((pte_get_bits(pte, _PAGE_USER)) &&
134 !(pte_get_bits(pte, _PAGE_PROTNONE)));
135}
136
137static inline int pte_exec(pte_t pte){
138 return((pte_get_bits(pte, _PAGE_USER)) &&
139 !(pte_get_bits(pte, _PAGE_PROTNONE)));
140}
141
142static inline int pte_write(pte_t pte)
143{
144 return((pte_get_bits(pte, _PAGE_RW)) &&
145 !(pte_get_bits(pte, _PAGE_PROTNONE)));
146}
147
148/*
149 * The following only works if pte_present() is not true.
150 */
151static inline int pte_file(pte_t pte)
152{
153 return pte_get_bits(pte, _PAGE_FILE);
154}
155
156static inline int pte_dirty(pte_t pte)
157{
158 return pte_get_bits(pte, _PAGE_DIRTY);
159}
160
161static inline int pte_young(pte_t pte)
162{
163 return pte_get_bits(pte, _PAGE_ACCESSED);
164}
165
166static inline int pte_newpage(pte_t pte)
167{
168 return pte_get_bits(pte, _PAGE_NEWPAGE);
169}
170
171static inline int pte_newprot(pte_t pte)
172{
173 return(pte_present(pte) && (pte_get_bits(pte, _PAGE_NEWPROT)));
174}
175
176static inline int pte_special(pte_t pte)
177{
178 return 0;
179}
180
181/*
182 * =================================
183 * Flags setting section.
184 * =================================
185 */
186
187static inline pte_t pte_mknewprot(pte_t pte)
188{
189 pte_set_bits(pte, _PAGE_NEWPROT);
190 return(pte);
191}
192
193static inline pte_t pte_mkclean(pte_t pte)
194{
195 pte_clear_bits(pte, _PAGE_DIRTY);
196 return(pte);
197}
198
199static inline pte_t pte_mkold(pte_t pte)
200{
201 pte_clear_bits(pte, _PAGE_ACCESSED);
202 return(pte);
203}
204
205static inline pte_t pte_wrprotect(pte_t pte)
206{
207 pte_clear_bits(pte, _PAGE_RW);
208 return(pte_mknewprot(pte));
209}
210
211static inline pte_t pte_mkread(pte_t pte)
212{
213 pte_set_bits(pte, _PAGE_USER);
214 return(pte_mknewprot(pte));
215}
216
217static inline pte_t pte_mkdirty(pte_t pte)
218{
219 pte_set_bits(pte, _PAGE_DIRTY);
220 return(pte);
221}
222
223static inline pte_t pte_mkyoung(pte_t pte)
224{
225 pte_set_bits(pte, _PAGE_ACCESSED);
226 return(pte);
227}
228
229static inline pte_t pte_mkwrite(pte_t pte)
230{
231 pte_set_bits(pte, _PAGE_RW);
232 return(pte_mknewprot(pte));
233}
234
235static inline pte_t pte_mkuptodate(pte_t pte)
236{
237 pte_clear_bits(pte, _PAGE_NEWPAGE);
238 if(pte_present(pte))
239 pte_clear_bits(pte, _PAGE_NEWPROT);
240 return(pte);
241}
242
243static inline pte_t pte_mknewpage(pte_t pte)
244{
245 pte_set_bits(pte, _PAGE_NEWPAGE);
246 return(pte);
247}
248
249static inline pte_t pte_mkspecial(pte_t pte)
250{
251 return(pte);
252}
253
254static inline void set_pte(pte_t *pteptr, pte_t pteval)
255{
256 pte_copy(*pteptr, pteval);
257
258 /* If it's a swap entry, it needs to be marked _PAGE_NEWPAGE so
259 * fix_range knows to unmap it. _PAGE_NEWPROT is specific to
260 * mapped pages.
261 */
262
263 *pteptr = pte_mknewpage(*pteptr);
264 if(pte_present(*pteptr)) *pteptr = pte_mknewprot(*pteptr);
265}
266#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
267
268/*
269 * Conversion functions: convert a page and protection to a page entry,
270 * and a page entry and page directory to the page they refer to.
271 */
272
273#define phys_to_page(phys) pfn_to_page(phys_to_pfn(phys))
274#define __virt_to_page(virt) phys_to_page(__pa(virt))
275#define page_to_phys(page) pfn_to_phys((pfn_t) page_to_pfn(page))
276#define virt_to_page(addr) __virt_to_page((const unsigned long) addr)
277
278#define mk_pte(page, pgprot) \
279 ({ pte_t pte; \
280 \
281 pte_set_val(pte, page_to_phys(page), (pgprot)); \
282 if (pte_present(pte)) \
283 pte_mknewprot(pte_mknewpage(pte)); \
284 pte;})
285
286static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
287{
288 pte_set_val(pte, (pte_val(pte) & _PAGE_CHG_MASK), newprot);
289 return pte;
290}
291
292/*
293 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
294 *
295 * this macro returns the index of the entry in the pgd page which would
296 * control the given virtual address
297 */
298#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
299
300/*
301 * pgd_offset() returns a (pgd_t *)
302 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
303 */
304#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
305
306/*
307 * a shortcut which implies the use of the kernel's pgd, instead
308 * of a process's
309 */
310#define pgd_offset_k(address) pgd_offset(&init_mm, address)
311
312/*
313 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
314 *
315 * this macro returns the index of the entry in the pmd page which would
316 * control the given virtual address
317 */
318#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
319#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
320
321#define pmd_page_vaddr(pmd) \
322 ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
323
324/*
325 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
326 *
327 * this macro returns the index of the entry in the pte page which would
328 * control the given virtual address
329 */
330#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
331#define pte_offset_kernel(dir, address) \
332 ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
333#define pte_offset_map(dir, address) \
334 ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
335#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)
336#define pte_unmap(pte) do { } while (0)
337#define pte_unmap_nested(pte) do { } while (0)
338
339struct mm_struct;
340extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
341
342#define update_mmu_cache(vma,address,pte) do ; while (0)
343
344/* Encode and de-code a swap entry */
345#define __swp_type(x) (((x).val >> 4) & 0x3f)
346#define __swp_offset(x) ((x).val >> 11)
347
348#define __swp_entry(type, offset) \
349 ((swp_entry_t) { ((type) << 4) | ((offset) << 11) })
350#define __pte_to_swp_entry(pte) \
351 ((swp_entry_t) { pte_val(pte_mkuptodate(pte)) })
352#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
353
354#define kern_addr_valid(addr) (1)
355
356#include <asm-generic/pgtable.h>
357
358#endif
diff --git a/include/asm-um/poll.h b/include/asm-um/poll.h
deleted file mode 100644
index 1eb4e1bc6383..000000000000
--- a/include/asm-um/poll.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __UM_POLL_H
2#define __UM_POLL_H
3
4#include "asm/arch/poll.h"
5
6#endif
diff --git a/include/asm-um/posix_types.h b/include/asm-um/posix_types.h
deleted file mode 100644
index 32fb4198f644..000000000000
--- a/include/asm-um/posix_types.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __UM_POSIX_TYPES_H
2#define __UM_POSIX_TYPES_H
3
4#include "asm/arch/posix_types.h"
5
6#endif
diff --git a/include/asm-um/prctl.h b/include/asm-um/prctl.h
deleted file mode 100644
index 64b6d099bdd5..000000000000
--- a/include/asm-um/prctl.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __UM_PRCTL_H
2#define __UM_PRCTL_H
3
4#include "asm/arch/prctl.h"
5
6#endif
diff --git a/include/asm-um/processor-generic.h b/include/asm-um/processor-generic.h
deleted file mode 100644
index bed668824b5f..000000000000
--- a/include/asm-um/processor-generic.h
+++ /dev/null
@@ -1,136 +0,0 @@
1/*
2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __UM_PROCESSOR_GENERIC_H
7#define __UM_PROCESSOR_GENERIC_H
8
9struct pt_regs;
10
11struct task_struct;
12
13#include "asm/ptrace.h"
14#include "registers.h"
15#include "sysdep/archsetjmp.h"
16
17struct mm_struct;
18
19struct thread_struct {
20 struct task_struct *saved_task;
21 /*
22 * This flag is set to 1 before calling do_fork (and analyzed in
23 * copy_thread) to mark that we are begin called from userspace (fork /
24 * vfork / clone), and reset to 0 after. It is left to 0 when called
25 * from kernelspace (i.e. kernel_thread() or fork_idle(),
26 * as of 2.6.11).
27 */
28 int forking;
29 struct pt_regs regs;
30 int singlestep_syscall;
31 void *fault_addr;
32 jmp_buf *fault_catcher;
33 struct task_struct *prev_sched;
34 unsigned long temp_stack;
35 jmp_buf *exec_buf;
36 struct arch_thread arch;
37 jmp_buf switch_buf;
38 int mm_count;
39 struct {
40 int op;
41 union {
42 struct {
43 int pid;
44 } fork, exec;
45 struct {
46 int (*proc)(void *);
47 void *arg;
48 } thread;
49 struct {
50 void (*proc)(void *);
51 void *arg;
52 } cb;
53 } u;
54 } request;
55};
56
57#define INIT_THREAD \
58{ \
59 .forking = 0, \
60 .regs = EMPTY_REGS, \
61 .fault_addr = NULL, \
62 .prev_sched = NULL, \
63 .temp_stack = 0, \
64 .exec_buf = NULL, \
65 .arch = INIT_ARCH_THREAD, \
66 .request = { 0 } \
67}
68
69extern struct task_struct *alloc_task_struct(void);
70
71static inline void release_thread(struct task_struct *task)
72{
73}
74
75extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
76
77static inline void prepare_to_copy(struct task_struct *tsk)
78{
79}
80
81
82extern unsigned long thread_saved_pc(struct task_struct *t);
83
84static inline void mm_copy_segments(struct mm_struct *from_mm,
85 struct mm_struct *new_mm)
86{
87}
88
89#define init_stack (init_thread_union.stack)
90
91/*
92 * User space process size: 3GB (default).
93 */
94extern unsigned long task_size;
95
96#define TASK_SIZE (task_size)
97
98#undef STACK_TOP
99#undef STACK_TOP_MAX
100
101extern unsigned long stacksizelim;
102
103#define STACK_ROOM (stacksizelim)
104#define STACK_TOP (TASK_SIZE - 2 * PAGE_SIZE)
105#define STACK_TOP_MAX STACK_TOP
106
107/* This decides where the kernel will search for a free chunk of vm
108 * space during mmap's.
109 */
110#define TASK_UNMAPPED_BASE (0x40000000)
111
112extern void start_thread(struct pt_regs *regs, unsigned long entry,
113 unsigned long stack);
114
115struct cpuinfo_um {
116 unsigned long loops_per_jiffy;
117 int ipi_pipe[2];
118};
119
120extern struct cpuinfo_um boot_cpu_data;
121
122#define my_cpu_data cpu_data[smp_processor_id()]
123
124#ifdef CONFIG_SMP
125extern struct cpuinfo_um cpu_data[];
126#define current_cpu_data cpu_data[smp_processor_id()]
127#else
128#define cpu_data (&boot_cpu_data)
129#define current_cpu_data boot_cpu_data
130#endif
131
132
133#define KSTK_REG(tsk, reg) get_thread_reg(reg, &tsk->thread.switch_buf)
134extern unsigned long get_wchan(struct task_struct *p);
135
136#endif
diff --git a/include/asm-um/processor-i386.h b/include/asm-um/processor-i386.h
deleted file mode 100644
index a2b7fe13fe1e..000000000000
--- a/include/asm-um/processor-i386.h
+++ /dev/null
@@ -1,78 +0,0 @@
1/*
2 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __UM_PROCESSOR_I386_H
7#define __UM_PROCESSOR_I386_H
8
9#include "linux/string.h"
10#include "asm/host_ldt.h"
11#include "asm/segment.h"
12
13extern int host_has_cmov;
14
15/* include faultinfo structure */
16#include "sysdep/faultinfo.h"
17
18struct uml_tls_struct {
19 struct user_desc tls;
20 unsigned flushed:1;
21 unsigned present:1;
22};
23
24struct arch_thread {
25 struct uml_tls_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
26 unsigned long debugregs[8];
27 int debugregs_seq;
28 struct faultinfo faultinfo;
29};
30
31#define INIT_ARCH_THREAD { \
32 .tls_array = { [ 0 ... GDT_ENTRY_TLS_ENTRIES - 1 ] = \
33 { .present = 0, .flushed = 0 } }, \
34 .debugregs = { [ 0 ... 7 ] = 0 }, \
35 .debugregs_seq = 0, \
36 .faultinfo = { 0, 0, 0 } \
37}
38
39static inline void arch_flush_thread(struct arch_thread *thread)
40{
41 /* Clear any TLS still hanging */
42 memset(&thread->tls_array, 0, sizeof(thread->tls_array));
43}
44
45static inline void arch_copy_thread(struct arch_thread *from,
46 struct arch_thread *to)
47{
48 memcpy(&to->tls_array, &from->tls_array, sizeof(from->tls_array));
49}
50
51#include "asm/arch/user.h"
52
53/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
54static inline void rep_nop(void)
55{
56 __asm__ __volatile__("rep;nop": : :"memory");
57}
58
59#define cpu_relax() rep_nop()
60
61/*
62 * Default implementation of macro that returns current
63 * instruction pointer ("program counter"). Stolen
64 * from asm-i386/processor.h
65 */
66#define current_text_addr() \
67 ({ void *pc; __asm__("movl $1f,%0\n1:":"=g" (pc)); pc; })
68
69#define ARCH_IS_STACKGROW(address) \
70 (address + 32 >= UPT_SP(&current->thread.regs.regs))
71
72#define KSTK_EIP(tsk) KSTK_REG(tsk, EIP)
73#define KSTK_ESP(tsk) KSTK_REG(tsk, UESP)
74#define KSTK_EBP(tsk) KSTK_REG(tsk, EBP)
75
76#include "asm/processor-generic.h"
77
78#endif
diff --git a/include/asm-um/processor-ppc.h b/include/asm-um/processor-ppc.h
deleted file mode 100644
index 959323151229..000000000000
--- a/include/asm-um/processor-ppc.h
+++ /dev/null
@@ -1,15 +0,0 @@
1#ifndef __UM_PROCESSOR_PPC_H
2#define __UM_PROCESSOR_PPC_H
3
4#if defined(__ASSEMBLY__)
5
6#define CONFIG_PPC_MULTIPLATFORM
7#include "arch/processor.h"
8
9#else
10
11#include "asm/processor-generic.h"
12
13#endif
14
15#endif
diff --git a/include/asm-um/processor-x86_64.h b/include/asm-um/processor-x86_64.h
deleted file mode 100644
index e50933175e91..000000000000
--- a/include/asm-um/processor-x86_64.h
+++ /dev/null
@@ -1,56 +0,0 @@
1/*
2 * Copyright 2003 PathScale, Inc.
3 *
4 * Licensed under the GPL
5 */
6
7#ifndef __UM_PROCESSOR_X86_64_H
8#define __UM_PROCESSOR_X86_64_H
9
10/* include faultinfo structure */
11#include "sysdep/faultinfo.h"
12
13struct arch_thread {
14 unsigned long debugregs[8];
15 int debugregs_seq;
16 unsigned long fs;
17 struct faultinfo faultinfo;
18};
19
20/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
21static inline void rep_nop(void)
22{
23 __asm__ __volatile__("rep;nop": : :"memory");
24}
25
26#define cpu_relax() rep_nop()
27
28#define INIT_ARCH_THREAD { .debugregs = { [ 0 ... 7 ] = 0 }, \
29 .debugregs_seq = 0, \
30 .fs = 0, \
31 .faultinfo = { 0, 0, 0 } }
32
33static inline void arch_flush_thread(struct arch_thread *thread)
34{
35}
36
37static inline void arch_copy_thread(struct arch_thread *from,
38 struct arch_thread *to)
39{
40 to->fs = from->fs;
41}
42
43#include "asm/arch/user.h"
44
45#define current_text_addr() \
46 ({ void *pc; __asm__("movq $1f,%0\n1:":"=g" (pc)); pc; })
47
48#define ARCH_IS_STACKGROW(address) \
49 (address + 128 >= UPT_SP(&current->thread.regs.regs))
50
51#define KSTK_EIP(tsk) KSTK_REG(tsk, RIP)
52#define KSTK_ESP(tsk) KSTK_REG(tsk, RSP)
53
54#include "asm/processor-generic.h"
55
56#endif
diff --git a/include/asm-um/ptrace-generic.h b/include/asm-um/ptrace-generic.h
deleted file mode 100644
index 315749705ea1..000000000000
--- a/include/asm-um/ptrace-generic.h
+++ /dev/null
@@ -1,55 +0,0 @@
1/*
2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __UM_PTRACE_GENERIC_H
7#define __UM_PTRACE_GENERIC_H
8
9#ifndef __ASSEMBLY__
10
11#include "asm/arch/ptrace-abi.h"
12#include <asm/user.h>
13#include "sysdep/ptrace.h"
14
15struct pt_regs {
16 struct uml_pt_regs regs;
17};
18
19#define EMPTY_REGS { .regs = EMPTY_UML_PT_REGS }
20
21#define PT_REGS_IP(r) UPT_IP(&(r)->regs)
22#define PT_REGS_SP(r) UPT_SP(&(r)->regs)
23
24#define PT_REG(r, reg) UPT_REG(&(r)->regs, reg)
25#define PT_REGS_SET(r, reg, val) UPT_SET(&(r)->regs, reg, val)
26
27#define PT_REGS_SET_SYSCALL_RETURN(r, res) \
28 UPT_SET_SYSCALL_RETURN(&(r)->regs, res)
29#define PT_REGS_RESTART_SYSCALL(r) UPT_RESTART_SYSCALL(&(r)->regs)
30
31#define PT_REGS_SYSCALL_NR(r) UPT_SYSCALL_NR(&(r)->regs)
32
33#define PT_REGS_SC(r) UPT_SC(&(r)->regs)
34
35#define instruction_pointer(regs) PT_REGS_IP(regs)
36
37struct task_struct;
38
39extern long subarch_ptrace(struct task_struct *child, long request, long addr,
40 long data);
41extern unsigned long getreg(struct task_struct *child, int regno);
42extern int putreg(struct task_struct *child, int regno, unsigned long value);
43extern int get_fpregs(struct user_i387_struct __user *buf,
44 struct task_struct *child);
45extern int set_fpregs(struct user_i387_struct __user *buf,
46 struct task_struct *child);
47
48extern void show_regs(struct pt_regs *regs);
49
50extern int arch_copy_tls(struct task_struct *new);
51extern void clear_flushed_tls(struct task_struct *task);
52
53#endif
54
55#endif
diff --git a/include/asm-um/ptrace-i386.h b/include/asm-um/ptrace-i386.h
deleted file mode 100644
index b2d24c5ea2c3..000000000000
--- a/include/asm-um/ptrace-i386.h
+++ /dev/null
@@ -1,60 +0,0 @@
1/*
2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __UM_PTRACE_I386_H
7#define __UM_PTRACE_I386_H
8
9#define HOST_AUDIT_ARCH AUDIT_ARCH_I386
10
11#include "linux/compiler.h"
12#include "asm/ptrace-generic.h"
13#include <asm/user.h>
14#include "sysdep/ptrace.h"
15
16#define PT_REGS_EAX(r) UPT_EAX(&(r)->regs)
17#define PT_REGS_EBX(r) UPT_EBX(&(r)->regs)
18#define PT_REGS_ECX(r) UPT_ECX(&(r)->regs)
19#define PT_REGS_EDX(r) UPT_EDX(&(r)->regs)
20#define PT_REGS_ESI(r) UPT_ESI(&(r)->regs)
21#define PT_REGS_EDI(r) UPT_EDI(&(r)->regs)
22#define PT_REGS_EBP(r) UPT_EBP(&(r)->regs)
23
24#define PT_REGS_CS(r) UPT_CS(&(r)->regs)
25#define PT_REGS_SS(r) UPT_SS(&(r)->regs)
26#define PT_REGS_DS(r) UPT_DS(&(r)->regs)
27#define PT_REGS_ES(r) UPT_ES(&(r)->regs)
28#define PT_REGS_FS(r) UPT_FS(&(r)->regs)
29#define PT_REGS_GS(r) UPT_GS(&(r)->regs)
30
31#define PT_REGS_EFLAGS(r) UPT_EFLAGS(&(r)->regs)
32
33#define PT_REGS_ORIG_SYSCALL(r) PT_REGS_EAX(r)
34#define PT_REGS_SYSCALL_RET(r) PT_REGS_EAX(r)
35#define PT_FIX_EXEC_STACK(sp) do ; while(0)
36
37/* Cope with a conditional i386 definition. */
38#undef profile_pc
39#define profile_pc(regs) PT_REGS_IP(regs)
40
41#define user_mode(r) UPT_IS_USER(&(r)->regs)
42
43/*
44 * Forward declaration to avoid including sysdep/tls.h, which causes a
45 * circular include, and compilation failures.
46 */
47struct user_desc;
48
49extern int get_fpxregs(struct user_fxsr_struct __user *buf,
50 struct task_struct *child);
51extern int set_fpxregs(struct user_fxsr_struct __user *buf,
52 struct task_struct *tsk);
53
54extern int ptrace_get_thread_area(struct task_struct *child, int idx,
55 struct user_desc __user *user_desc);
56
57extern int ptrace_set_thread_area(struct task_struct *child, int idx,
58 struct user_desc __user *user_desc);
59
60#endif
diff --git a/include/asm-um/ptrace-x86_64.h b/include/asm-um/ptrace-x86_64.h
deleted file mode 100644
index 4c475350dcf0..000000000000
--- a/include/asm-um/ptrace-x86_64.h
+++ /dev/null
@@ -1,81 +0,0 @@
1/*
2 * Copyright 2003 PathScale, Inc.
3 *
4 * Licensed under the GPL
5 */
6
7#ifndef __UM_PTRACE_X86_64_H
8#define __UM_PTRACE_X86_64_H
9
10#include "linux/compiler.h"
11#include "asm/errno.h"
12#include "asm/host_ldt.h"
13
14#define __FRAME_OFFSETS /* Needed to get the R* macros */
15#include "asm/ptrace-generic.h"
16
17#define HOST_AUDIT_ARCH AUDIT_ARCH_X86_64
18
19/* Also defined in sysdep/ptrace.h, so may already be defined. */
20#ifndef FS_BASE
21#define FS_BASE (21 * sizeof(unsigned long))
22#define GS_BASE (22 * sizeof(unsigned long))
23#define DS (23 * sizeof(unsigned long))
24#define ES (24 * sizeof(unsigned long))
25#define FS (25 * sizeof(unsigned long))
26#define GS (26 * sizeof(unsigned long))
27#endif
28
29#define PT_REGS_RBX(r) UPT_RBX(&(r)->regs)
30#define PT_REGS_RCX(r) UPT_RCX(&(r)->regs)
31#define PT_REGS_RDX(r) UPT_RDX(&(r)->regs)
32#define PT_REGS_RSI(r) UPT_RSI(&(r)->regs)
33#define PT_REGS_RDI(r) UPT_RDI(&(r)->regs)
34#define PT_REGS_RBP(r) UPT_RBP(&(r)->regs)
35#define PT_REGS_RAX(r) UPT_RAX(&(r)->regs)
36#define PT_REGS_R8(r) UPT_R8(&(r)->regs)
37#define PT_REGS_R9(r) UPT_R9(&(r)->regs)
38#define PT_REGS_R10(r) UPT_R10(&(r)->regs)
39#define PT_REGS_R11(r) UPT_R11(&(r)->regs)
40#define PT_REGS_R12(r) UPT_R12(&(r)->regs)
41#define PT_REGS_R13(r) UPT_R13(&(r)->regs)
42#define PT_REGS_R14(r) UPT_R14(&(r)->regs)
43#define PT_REGS_R15(r) UPT_R15(&(r)->regs)
44
45#define PT_REGS_FS(r) UPT_FS(&(r)->regs)
46#define PT_REGS_GS(r) UPT_GS(&(r)->regs)
47#define PT_REGS_DS(r) UPT_DS(&(r)->regs)
48#define PT_REGS_ES(r) UPT_ES(&(r)->regs)
49#define PT_REGS_SS(r) UPT_SS(&(r)->regs)
50#define PT_REGS_CS(r) UPT_CS(&(r)->regs)
51
52#define PT_REGS_ORIG_RAX(r) UPT_ORIG_RAX(&(r)->regs)
53#define PT_REGS_RIP(r) UPT_IP(&(r)->regs)
54#define PT_REGS_RSP(r) UPT_SP(&(r)->regs)
55
56#define PT_REGS_EFLAGS(r) UPT_EFLAGS(&(r)->regs)
57
58/* XXX */
59#define user_mode(r) UPT_IS_USER(&(r)->regs)
60#define PT_REGS_ORIG_SYSCALL(r) PT_REGS_RAX(r)
61#define PT_REGS_SYSCALL_RET(r) PT_REGS_RAX(r)
62
63#define PT_FIX_EXEC_STACK(sp) do ; while(0)
64
65#define profile_pc(regs) PT_REGS_IP(regs)
66
67static inline int ptrace_get_thread_area(struct task_struct *child, int idx,
68 struct user_desc __user *user_desc)
69{
70 return -ENOSYS;
71}
72
73static inline int ptrace_set_thread_area(struct task_struct *child, int idx,
74 struct user_desc __user *user_desc)
75{
76 return -ENOSYS;
77}
78
79extern long arch_prctl(struct task_struct *task, int code,
80 unsigned long __user *addr);
81#endif
diff --git a/include/asm-um/required-features.h b/include/asm-um/required-features.h
deleted file mode 100644
index dfb967b2d2f3..000000000000
--- a/include/asm-um/required-features.h
+++ /dev/null
@@ -1,9 +0,0 @@
1#ifndef __UM_REQUIRED_FEATURES_H
2#define __UM_REQUIRED_FEATURES_H
3
4/*
5 * Nothing to see, just need something for the i386 and x86_64 asm
6 * headers to include.
7 */
8
9#endif
diff --git a/include/asm-um/resource.h b/include/asm-um/resource.h
deleted file mode 100644
index c9b074001252..000000000000
--- a/include/asm-um/resource.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __UM_RESOURCE_H
2#define __UM_RESOURCE_H
3
4#include "asm/arch/resource.h"
5
6#endif
diff --git a/include/asm-um/rwlock.h b/include/asm-um/rwlock.h
deleted file mode 100644
index ff383aafc9fe..000000000000
--- a/include/asm-um/rwlock.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __UM_RWLOCK_H
2#define __UM_RWLOCK_H
3
4#include "asm/arch/rwlock.h"
5
6#endif
diff --git a/include/asm-um/rwsem.h b/include/asm-um/rwsem.h
deleted file mode 100644
index b5fc449dc86b..000000000000
--- a/include/asm-um/rwsem.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __UM_RWSEM_H__
2#define __UM_RWSEM_H__
3
4#include "asm/arch/rwsem.h"
5
6#endif
diff --git a/include/asm-um/scatterlist.h b/include/asm-um/scatterlist.h
deleted file mode 100644
index e92016aa2079..000000000000
--- a/include/asm-um/scatterlist.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __UM_SCATTERLIST_H
2#define __UM_SCATTERLIST_H
3
4#include "asm/arch/scatterlist.h"
5
6#endif
diff --git a/include/asm-um/sections.h b/include/asm-um/sections.h
deleted file mode 100644
index 6b0231eefea8..000000000000
--- a/include/asm-um/sections.h
+++ /dev/null
@@ -1,7 +0,0 @@
1#ifndef _UM_SECTIONS_H
2#define _UM_SECTIONS_H
3
4/* nothing to see, move along */
5#include <asm-generic/sections.h>
6
7#endif
diff --git a/include/asm-um/segment.h b/include/asm-um/segment.h
deleted file mode 100644
index 45183fcd10b6..000000000000
--- a/include/asm-um/segment.h
+++ /dev/null
@@ -1,10 +0,0 @@
1#ifndef __UM_SEGMENT_H
2#define __UM_SEGMENT_H
3
4extern int host_gdt_entry_tls_min;
5
6#define GDT_ENTRY_TLS_ENTRIES 3
7#define GDT_ENTRY_TLS_MIN host_gdt_entry_tls_min
8#define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
9
10#endif
diff --git a/include/asm-um/sembuf.h b/include/asm-um/sembuf.h
deleted file mode 100644
index 1ae82c14ff86..000000000000
--- a/include/asm-um/sembuf.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __UM_SEMBUF_H
2#define __UM_SEMBUF_H
3
4#include "asm/arch/sembuf.h"
5
6#endif
diff --git a/include/asm-um/serial.h b/include/asm-um/serial.h
deleted file mode 100644
index 61ad07cfd2d5..000000000000
--- a/include/asm-um/serial.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __UM_SERIAL_H
2#define __UM_SERIAL_H
3
4#include "asm/arch/serial.h"
5
6#endif
diff --git a/include/asm-um/setup.h b/include/asm-um/setup.h
deleted file mode 100644
index 99f086301f4c..000000000000
--- a/include/asm-um/setup.h
+++ /dev/null
@@ -1,10 +0,0 @@
1#ifndef SETUP_H_INCLUDED
2#define SETUP_H_INCLUDED
3
4/* POSIX mandated with _POSIX_ARG_MAX that we can rely on 4096 chars in the
5 * command line, so this choice is ok.
6 */
7
8#define COMMAND_LINE_SIZE 4096
9
10#endif /* SETUP_H_INCLUDED */
diff --git a/include/asm-um/shmbuf.h b/include/asm-um/shmbuf.h
deleted file mode 100644
index 9684d4a284a6..000000000000
--- a/include/asm-um/shmbuf.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __UM_SHMBUF_H
2#define __UM_SHMBUF_H
3
4#include "asm/arch/shmbuf.h"
5
6#endif
diff --git a/include/asm-um/shmparam.h b/include/asm-um/shmparam.h
deleted file mode 100644
index 124c00174f6a..000000000000
--- a/include/asm-um/shmparam.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __UM_SHMPARAM_H
2#define __UM_SHMPARAM_H
3
4#include "asm/arch/shmparam.h"
5
6#endif
diff --git a/include/asm-um/sigcontext-generic.h b/include/asm-um/sigcontext-generic.h
deleted file mode 100644
index 164587014c61..000000000000
--- a/include/asm-um/sigcontext-generic.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __UM_SIGCONTEXT_GENERIC_H
2#define __UM_SIGCONTEXT_GENERIC_H
3
4#include "asm/arch/sigcontext.h"
5
6#endif
diff --git a/include/asm-um/sigcontext-i386.h b/include/asm-um/sigcontext-i386.h
deleted file mode 100644
index b88333f488bb..000000000000
--- a/include/asm-um/sigcontext-i386.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __UM_SIGCONTEXT_I386_H
2#define __UM_SIGCONTEXT_I386_H
3
4#include "asm/sigcontext-generic.h"
5
6#endif
diff --git a/include/asm-um/sigcontext-ppc.h b/include/asm-um/sigcontext-ppc.h
deleted file mode 100644
index 2467f20eda99..000000000000
--- a/include/asm-um/sigcontext-ppc.h
+++ /dev/null
@@ -1,10 +0,0 @@
1#ifndef __UM_SIGCONTEXT_PPC_H
2#define __UM_SIGCONTEXT_PPC_H
3
4#define pt_regs sys_pt_regs
5
6#include "asm/sigcontext-generic.h"
7
8#undef pt_regs
9
10#endif
diff --git a/include/asm-um/sigcontext-x86_64.h b/include/asm-um/sigcontext-x86_64.h
deleted file mode 100644
index b600e0b01e48..000000000000
--- a/include/asm-um/sigcontext-x86_64.h
+++ /dev/null
@@ -1,22 +0,0 @@
1/* Copyright 2003 PathScale, Inc.
2 *
3 * Licensed under the GPL
4 */
5
6#ifndef __UM_SIGCONTEXT_X86_64_H
7#define __UM_SIGCONTEXT_X86_64_H
8
9#include "asm/sigcontext-generic.h"
10
11#endif
12
13/*
14 * Overrides for Emacs so that we follow Linus's tabbing style.
15 * Emacs will notice this stuff at the end of the file and automatically
16 * adjust the settings for this buffer only. This must remain at the end
17 * of the file.
18 * ---------------------------------------------------------------------------
19 * Local variables:
20 * c-file-style: "linux"
21 * End:
22 */
diff --git a/include/asm-um/siginfo.h b/include/asm-um/siginfo.h
deleted file mode 100644
index bec6124c36d0..000000000000
--- a/include/asm-um/siginfo.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __UM_SIGINFO_H
2#define __UM_SIGINFO_H
3
4#include "asm/arch/siginfo.h"
5
6#endif
diff --git a/include/asm-um/signal.h b/include/asm-um/signal.h
deleted file mode 100644
index 52ed92cbce4c..000000000000
--- a/include/asm-um/signal.h
+++ /dev/null
@@ -1,29 +0,0 @@
1/*
2 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __UM_SIGNAL_H
7#define __UM_SIGNAL_H
8
9/* Need to kill the do_signal() declaration in the i386 signal.h */
10
11#define do_signal do_signal_renamed
12#include "asm/arch/signal.h"
13#undef do_signal
14#undef ptrace_signal_deliver
15
16#define ptrace_signal_deliver(regs, cookie) do {} while(0)
17
18#endif
19
20/*
21 * Overrides for Emacs so that we follow Linus's tabbing style.
22 * Emacs will notice this stuff at the end of the file and automatically
23 * adjust the settings for this buffer only. This must remain at the end
24 * of the file.
25 * ---------------------------------------------------------------------------
26 * Local variables:
27 * c-file-style: "linux"
28 * End:
29 */
diff --git a/include/asm-um/smp.h b/include/asm-um/smp.h
deleted file mode 100644
index f27a96313174..000000000000
--- a/include/asm-um/smp.h
+++ /dev/null
@@ -1,33 +0,0 @@
1#ifndef __UM_SMP_H
2#define __UM_SMP_H
3
4#ifdef CONFIG_SMP
5
6#include "linux/bitops.h"
7#include "asm/current.h"
8#include "linux/cpumask.h"
9
10#define raw_smp_processor_id() (current_thread->cpu)
11
12#define cpu_logical_map(n) (n)
13#define cpu_number_map(n) (n)
14#define PROC_CHANGE_PENALTY 15 /* Pick a number, any number */
15extern int hard_smp_processor_id(void);
16#define NO_PROC_ID -1
17
18extern int ncpus;
19
20
21static inline void smp_cpus_done(unsigned int maxcpus)
22{
23}
24
25extern struct task_struct *idle_threads[NR_CPUS];
26
27#else
28
29#define hard_smp_processor_id() 0
30
31#endif
32
33#endif
diff --git a/include/asm-um/socket.h b/include/asm-um/socket.h
deleted file mode 100644
index 67886e42ef04..000000000000
--- a/include/asm-um/socket.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __UM_SOCKET_H
2#define __UM_SOCKET_H
3
4#include "asm/arch/socket.h"
5
6#endif
diff --git a/include/asm-um/sockios.h b/include/asm-um/sockios.h
deleted file mode 100644
index 93ee1c55c4d6..000000000000
--- a/include/asm-um/sockios.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __UM_SOCKIOS_H
2#define __UM_SOCKIOS_H
3
4#include "asm/arch/sockios.h"
5
6#endif
diff --git a/include/asm-um/spinlock.h b/include/asm-um/spinlock.h
deleted file mode 100644
index f18c82886992..000000000000
--- a/include/asm-um/spinlock.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __UM_SPINLOCK_H
2#define __UM_SPINLOCK_H
3
4#include "asm/arch/spinlock.h"
5
6#endif
diff --git a/include/asm-um/spinlock_types.h b/include/asm-um/spinlock_types.h
deleted file mode 100644
index e5a94294bf82..000000000000
--- a/include/asm-um/spinlock_types.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __UM_SPINLOCK_TYPES_H
2#define __UM_SPINLOCK_TYPES_H
3
4#include "asm/arch/spinlock_types.h"
5
6#endif
diff --git a/include/asm-um/stat.h b/include/asm-um/stat.h
deleted file mode 100644
index 83ed85ad2539..000000000000
--- a/include/asm-um/stat.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __UM_STAT_H
2#define __UM_STAT_H
3
4#include "asm/arch/stat.h"
5
6#endif
diff --git a/include/asm-um/statfs.h b/include/asm-um/statfs.h
deleted file mode 100644
index ba6fb53e7f87..000000000000
--- a/include/asm-um/statfs.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _UM_STATFS_H
2#define _UM_STATFS_H
3
4#include "asm/arch/statfs.h"
5
6#endif
diff --git a/include/asm-um/string.h b/include/asm-um/string.h
deleted file mode 100644
index 9a0571f6dd61..000000000000
--- a/include/asm-um/string.h
+++ /dev/null
@@ -1,7 +0,0 @@
1#ifndef __UM_STRING_H
2#define __UM_STRING_H
3
4#include "asm/arch/string.h"
5#include "asm/archparam.h"
6
7#endif
diff --git a/include/asm-um/suspend.h b/include/asm-um/suspend.h
deleted file mode 100644
index f4e8e007f468..000000000000
--- a/include/asm-um/suspend.h
+++ /dev/null
@@ -1,4 +0,0 @@
1#ifndef __UM_SUSPEND_H
2#define __UM_SUSPEND_H
3
4#endif
diff --git a/include/asm-um/system-generic.h b/include/asm-um/system-generic.h
deleted file mode 100644
index 5bcfa35e7a22..000000000000
--- a/include/asm-um/system-generic.h
+++ /dev/null
@@ -1,47 +0,0 @@
1#ifndef __UM_SYSTEM_GENERIC_H
2#define __UM_SYSTEM_GENERIC_H
3
4#include "asm/arch/system.h"
5
6#undef switch_to
7#undef local_irq_save
8#undef local_irq_restore
9#undef local_irq_disable
10#undef local_irq_enable
11#undef local_save_flags
12#undef local_irq_restore
13#undef local_irq_enable
14#undef local_irq_disable
15#undef local_irq_save
16#undef irqs_disabled
17
18extern void *switch_to(void *prev, void *next, void *last);
19
20extern int get_signals(void);
21extern int set_signals(int enable);
22extern int get_signals(void);
23extern void block_signals(void);
24extern void unblock_signals(void);
25
26#define local_save_flags(flags) do { typecheck(unsigned long, flags); \
27 (flags) = get_signals(); } while(0)
28#define local_irq_restore(flags) do { typecheck(unsigned long, flags); \
29 set_signals(flags); } while(0)
30
31#define local_irq_save(flags) do { local_save_flags(flags); \
32 local_irq_disable(); } while(0)
33
34#define local_irq_enable() unblock_signals()
35#define local_irq_disable() block_signals()
36
37#define irqs_disabled() \
38({ \
39 unsigned long flags; \
40 local_save_flags(flags); \
41 (flags == 0); \
42})
43
44extern void *_switch_to(void *prev, void *next, void *last);
45#define switch_to(prev, next, last) prev = _switch_to(prev, next, last)
46
47#endif
diff --git a/include/asm-um/system-i386.h b/include/asm-um/system-i386.h
deleted file mode 100644
index c436263e67ba..000000000000
--- a/include/asm-um/system-i386.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __UM_SYSTEM_I386_H
2#define __UM_SYSTEM_I386_H
3
4#include "asm/system-generic.h"
5
6#endif
diff --git a/include/asm-um/system-ppc.h b/include/asm-um/system-ppc.h
deleted file mode 100644
index 17cde6640bf5..000000000000
--- a/include/asm-um/system-ppc.h
+++ /dev/null
@@ -1,12 +0,0 @@
1#ifndef __UM_SYSTEM_PPC_H
2#define __UM_SYSTEM_PPC_H
3
4#define _switch_to _ppc_switch_to
5
6#include "asm/arch/system.h"
7
8#undef _switch_to
9
10#include "asm/system-generic.h"
11
12#endif
diff --git a/include/asm-um/system-x86_64.h b/include/asm-um/system-x86_64.h
deleted file mode 100644
index e1b61b580734..000000000000
--- a/include/asm-um/system-x86_64.h
+++ /dev/null
@@ -1,23 +0,0 @@
1/*
2 * Copyright 2003 PathScale, Inc.
3 *
4 * Licensed under the GPL
5 */
6
7#ifndef __UM_SYSTEM_X86_64_H
8#define __UM_SYSTEM_X86_64_H
9
10#include "asm/system-generic.h"
11
12#endif
13
14/*
15 * Overrides for Emacs so that we follow Linus's tabbing style.
16 * Emacs will notice this stuff at the end of the file and automatically
17 * adjust the settings for this buffer only. This must remain at the end
18 * of the file.
19 * ---------------------------------------------------------------------------
20 * Local variables:
21 * c-file-style: "linux"
22 * End:
23 */
diff --git a/include/asm-um/termbits.h b/include/asm-um/termbits.h
deleted file mode 100644
index 5739c608a2cb..000000000000
--- a/include/asm-um/termbits.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __UM_TERMBITS_H
2#define __UM_TERMBITS_H
3
4#include "asm/arch/termbits.h"
5
6#endif
diff --git a/include/asm-um/termios.h b/include/asm-um/termios.h
deleted file mode 100644
index d9f97b303311..000000000000
--- a/include/asm-um/termios.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __UM_TERMIOS_H
2#define __UM_TERMIOS_H
3
4#include "asm/arch/termios.h"
5
6#endif
diff --git a/include/asm-um/thread_info.h b/include/asm-um/thread_info.h
deleted file mode 100644
index 62274ab9471f..000000000000
--- a/include/asm-um/thread_info.h
+++ /dev/null
@@ -1,83 +0,0 @@
1/*
2 * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __UM_THREAD_INFO_H
7#define __UM_THREAD_INFO_H
8
9#ifndef __ASSEMBLY__
10
11#include <asm/types.h>
12#include <asm/page.h>
13#include <asm/uaccess.h>
14
15struct thread_info {
16 struct task_struct *task; /* main task structure */
17 struct exec_domain *exec_domain; /* execution domain */
18 unsigned long flags; /* low level flags */
19 __u32 cpu; /* current CPU */
20 int preempt_count; /* 0 => preemptable,
21 <0 => BUG */
22 mm_segment_t addr_limit; /* thread address space:
23 0-0xBFFFFFFF for user
24 0-0xFFFFFFFF for kernel */
25 struct restart_block restart_block;
26 struct thread_info *real_thread; /* Points to non-IRQ stack */
27};
28
29#define INIT_THREAD_INFO(tsk) \
30{ \
31 .task = &tsk, \
32 .exec_domain = &default_exec_domain, \
33 .flags = 0, \
34 .cpu = 0, \
35 .preempt_count = 1, \
36 .addr_limit = KERNEL_DS, \
37 .restart_block = { \
38 .fn = do_no_restart_syscall, \
39 }, \
40 .real_thread = NULL, \
41}
42
43#define init_thread_info (init_thread_union.thread_info)
44#define init_stack (init_thread_union.stack)
45
46#define THREAD_SIZE ((1 << CONFIG_KERNEL_STACK_ORDER) * PAGE_SIZE)
47/* how to get the thread information struct from C */
48static inline struct thread_info *current_thread_info(void)
49{
50 struct thread_info *ti;
51 unsigned long mask = THREAD_SIZE - 1;
52 ti = (struct thread_info *) (((unsigned long) &ti) & ~mask);
53 return ti;
54}
55
56#define THREAD_SIZE_ORDER CONFIG_KERNEL_STACK_ORDER
57
58#endif
59
60#define PREEMPT_ACTIVE 0x10000000
61
62#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
63#define TIF_SIGPENDING 1 /* signal pending */
64#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
65#define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling
66 * TIF_NEED_RESCHED
67 */
68#define TIF_RESTART_BLOCK 4
69#define TIF_MEMDIE 5
70#define TIF_SYSCALL_AUDIT 6
71#define TIF_RESTORE_SIGMASK 7
72#define TIF_FREEZE 16 /* is freezing for suspend */
73
74#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
75#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
76#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
77#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
78#define _TIF_MEMDIE (1 << TIF_MEMDIE)
79#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
80#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
81#define _TIF_FREEZE (1 << TIF_FREEZE)
82
83#endif
diff --git a/include/asm-um/timex.h b/include/asm-um/timex.h
deleted file mode 100644
index 0f4ada08f748..000000000000
--- a/include/asm-um/timex.h
+++ /dev/null
@@ -1,13 +0,0 @@
1#ifndef __UM_TIMEX_H
2#define __UM_TIMEX_H
3
4typedef unsigned long cycles_t;
5
6static inline cycles_t get_cycles (void)
7{
8 return 0;
9}
10
11#define CLOCK_TICK_RATE (HZ)
12
13#endif
diff --git a/include/asm-um/tlb.h b/include/asm-um/tlb.h
deleted file mode 100644
index 5240fa1c5e08..000000000000
--- a/include/asm-um/tlb.h
+++ /dev/null
@@ -1,127 +0,0 @@
1#ifndef __UM_TLB_H
2#define __UM_TLB_H
3
4#include <linux/pagemap.h>
5#include <linux/swap.h>
6#include <asm/percpu.h>
7#include <asm/pgalloc.h>
8#include <asm/tlbflush.h>
9
10#define tlb_start_vma(tlb, vma) do { } while (0)
11#define tlb_end_vma(tlb, vma) do { } while (0)
12#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
13
14/* struct mmu_gather is an opaque type used by the mm code for passing around
15 * any data needed by arch specific code for tlb_remove_page.
16 */
17struct mmu_gather {
18 struct mm_struct *mm;
19 unsigned int need_flush; /* Really unmapped some ptes? */
20 unsigned long start;
21 unsigned long end;
22 unsigned int fullmm; /* non-zero means full mm flush */
23};
24
25/* Users of the generic TLB shootdown code must declare this storage space. */
26DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
27
28static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
29 unsigned long address)
30{
31 if (tlb->start > address)
32 tlb->start = address;
33 if (tlb->end < address + PAGE_SIZE)
34 tlb->end = address + PAGE_SIZE;
35}
36
37static inline void init_tlb_gather(struct mmu_gather *tlb)
38{
39 tlb->need_flush = 0;
40
41 tlb->start = TASK_SIZE;
42 tlb->end = 0;
43
44 if (tlb->fullmm) {
45 tlb->start = 0;
46 tlb->end = TASK_SIZE;
47 }
48}
49
50/* tlb_gather_mmu
51 * Return a pointer to an initialized struct mmu_gather.
52 */
53static inline struct mmu_gather *
54tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
55{
56 struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
57
58 tlb->mm = mm;
59 tlb->fullmm = full_mm_flush;
60
61 init_tlb_gather(tlb);
62
63 return tlb;
64}
65
66extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
67 unsigned long end);
68
69static inline void
70tlb_flush_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
71{
72 if (!tlb->need_flush)
73 return;
74
75 flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end);
76 init_tlb_gather(tlb);
77}
78
79/* tlb_finish_mmu
80 * Called at the end of the shootdown operation to free up any resources
81 * that were required.
82 */
83static inline void
84tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
85{
86 tlb_flush_mmu(tlb, start, end);
87
88 /* keep the page table cache within bounds */
89 check_pgt_cache();
90
91 put_cpu_var(mmu_gathers);
92}
93
94/* tlb_remove_page
95 * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)),
96 * while handling the additional races in SMP caused by other CPUs
97 * caching valid mappings in their TLBs.
98 */
99static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
100{
101 tlb->need_flush = 1;
102 free_page_and_swap_cache(page);
103 return;
104}
105
106/**
107 * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
108 *
109 * Record the fact that pte's were really umapped in ->need_flush, so we can
110 * later optimise away the tlb invalidate. This helps when userspace is
111 * unmapping already-unmapped pages, which happens quite a lot.
112 */
113#define tlb_remove_tlb_entry(tlb, ptep, address) \
114 do { \
115 tlb->need_flush = 1; \
116 __tlb_remove_tlb_entry(tlb, ptep, address); \
117 } while (0)
118
119#define pte_free_tlb(tlb, ptep) __pte_free_tlb(tlb, ptep)
120
121#define pud_free_tlb(tlb, pudp) __pud_free_tlb(tlb, pudp)
122
123#define pmd_free_tlb(tlb, pmdp) __pmd_free_tlb(tlb, pmdp)
124
125#define tlb_migrate_finish(mm) do {} while (0)
126
127#endif
diff --git a/include/asm-um/tlbflush.h b/include/asm-um/tlbflush.h
deleted file mode 100644
index 614f2c091178..000000000000
--- a/include/asm-um/tlbflush.h
+++ /dev/null
@@ -1,31 +0,0 @@
1/*
2 * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __UM_TLBFLUSH_H
7#define __UM_TLBFLUSH_H
8
9#include <linux/mm.h>
10
11/*
12 * TLB flushing:
13 *
14 * - flush_tlb() flushes the current mm struct TLBs
15 * - flush_tlb_all() flushes all processes TLBs
16 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
17 * - flush_tlb_page(vma, vmaddr) flushes one page
18 * - flush_tlb_kernel_vm() flushes the kernel vm area
19 * - flush_tlb_range(vma, start, end) flushes a range of pages
20 */
21
22extern void flush_tlb_all(void);
23extern void flush_tlb_mm(struct mm_struct *mm);
24extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
25 unsigned long end);
26extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long address);
27extern void flush_tlb_kernel_vm(void);
28extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
29extern void __flush_tlb_one(unsigned long addr);
30
31#endif
diff --git a/include/asm-um/topology.h b/include/asm-um/topology.h
deleted file mode 100644
index 0905e4f21d42..000000000000
--- a/include/asm-um/topology.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _ASM_UM_TOPOLOGY_H
2#define _ASM_UM_TOPOLOGY_H
3
4#include <asm-generic/topology.h>
5
6#endif
diff --git a/include/asm-um/types.h b/include/asm-um/types.h
deleted file mode 100644
index 816e9590fc73..000000000000
--- a/include/asm-um/types.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __UM_TYPES_H
2#define __UM_TYPES_H
3
4#include "asm/arch/types.h"
5
6#endif
diff --git a/include/asm-um/uaccess.h b/include/asm-um/uaccess.h
deleted file mode 100644
index b9a895d6fa1d..000000000000
--- a/include/asm-um/uaccess.h
+++ /dev/null
@@ -1,99 +0,0 @@
1/*
2 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __UM_UACCESS_H
7#define __UM_UACCESS_H
8
9#include <asm/errno.h>
10#include <asm/processor.h>
11
12/* thread_info has a mm_segment_t in it, so put the definition up here */
13typedef struct {
14 unsigned long seg;
15} mm_segment_t;
16
17#include "linux/thread_info.h"
18
19#define VERIFY_READ 0
20#define VERIFY_WRITE 1
21
22/*
23 * The fs value determines whether argument validity checking should be
24 * performed or not. If get_fs() == USER_DS, checking is performed, with
25 * get_fs() == KERNEL_DS, checking is bypassed.
26 *
27 * For historical reasons, these macros are grossly misnamed.
28 */
29
30#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
31
32#define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
33#define USER_DS MAKE_MM_SEG(TASK_SIZE)
34
35#define get_ds() (KERNEL_DS)
36#define get_fs() (current_thread_info()->addr_limit)
37#define set_fs(x) (current_thread_info()->addr_limit = (x))
38
39#define segment_eq(a, b) ((a).seg == (b).seg)
40
41#include "um_uaccess.h"
42
43#define __copy_from_user(to, from, n) copy_from_user(to, from, n)
44
45#define __copy_to_user(to, from, n) copy_to_user(to, from, n)
46
47#define __copy_to_user_inatomic __copy_to_user
48#define __copy_from_user_inatomic __copy_from_user
49
50#define __get_user(x, ptr) \
51({ \
52 const __typeof__(*(ptr)) __user *__private_ptr = (ptr); \
53 __typeof__(x) __private_val; \
54 int __private_ret = -EFAULT; \
55 (x) = (__typeof__(*(__private_ptr)))0; \
56 if (__copy_from_user((__force void *)&__private_val, (__private_ptr),\
57 sizeof(*(__private_ptr))) == 0) { \
58 (x) = (__typeof__(*(__private_ptr))) __private_val; \
59 __private_ret = 0; \
60 } \
61 __private_ret; \
62})
63
64#define get_user(x, ptr) \
65({ \
66 const __typeof__((*(ptr))) __user *private_ptr = (ptr); \
67 (access_ok(VERIFY_READ, private_ptr, sizeof(*private_ptr)) ? \
68 __get_user(x, private_ptr) : ((x) = (__typeof__(*ptr))0, -EFAULT)); \
69})
70
71#define __put_user(x, ptr) \
72({ \
73 __typeof__(*(ptr)) __user *__private_ptr = ptr; \
74 __typeof__(*(__private_ptr)) __private_val; \
75 int __private_ret = -EFAULT; \
76 __private_val = (__typeof__(*(__private_ptr))) (x); \
77 if (__copy_to_user((__private_ptr), &__private_val, \
78 sizeof(*(__private_ptr))) == 0) { \
79 __private_ret = 0; \
80 } \
81 __private_ret; \
82})
83
84#define put_user(x, ptr) \
85({ \
86 __typeof__(*(ptr)) __user *private_ptr = (ptr); \
87 (access_ok(VERIFY_WRITE, private_ptr, sizeof(*private_ptr)) ? \
88 __put_user(x, private_ptr) : -EFAULT); \
89})
90
91#define strlen_user(str) strnlen_user(str, ~0U >> 1)
92
93struct exception_table_entry
94{
95 unsigned long insn;
96 unsigned long fixup;
97};
98
99#endif
diff --git a/include/asm-um/ucontext.h b/include/asm-um/ucontext.h
deleted file mode 100644
index 5c96c0e607f0..000000000000
--- a/include/asm-um/ucontext.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _ASM_UM_UCONTEXT_H
2#define _ASM_UM_UCONTEXT_H
3
4#include "asm/arch/ucontext.h"
5
6#endif
diff --git a/include/asm-um/unaligned.h b/include/asm-um/unaligned.h
deleted file mode 100644
index a47196974e39..000000000000
--- a/include/asm-um/unaligned.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _ASM_UM_UNALIGNED_H
2#define _ASM_UM_UNALIGNED_H
3
4#include "asm/arch/unaligned.h"
5
6#endif /* _ASM_UM_UNALIGNED_H */
diff --git a/include/asm-um/unistd.h b/include/asm-um/unistd.h
deleted file mode 100644
index 38bd9d94ee46..000000000000
--- a/include/asm-um/unistd.h
+++ /dev/null
@@ -1,41 +0,0 @@
1/*
2 * Copyright (C) 2000 - 2004 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
4 */
5
6#ifndef _UM_UNISTD_H_
7#define _UM_UNISTD_H_
8
9#include <linux/syscalls.h>
10#include "linux/resource.h"
11#include "asm/uaccess.h"
12
13extern int um_execve(const char *file, char *const argv[], char *const env[]);
14
15#ifdef __KERNEL__
16/* We get __ARCH_WANT_OLD_STAT and __ARCH_WANT_STAT64 from the base arch */
17#define __ARCH_WANT_OLD_READDIR
18#define __ARCH_WANT_SYS_ALARM
19#define __ARCH_WANT_SYS_GETHOSTNAME
20#define __ARCH_WANT_SYS_PAUSE
21#define __ARCH_WANT_SYS_SGETMASK
22#define __ARCH_WANT_SYS_SIGNAL
23#define __ARCH_WANT_SYS_TIME
24#define __ARCH_WANT_SYS_UTIME
25#define __ARCH_WANT_SYS_WAITPID
26#define __ARCH_WANT_SYS_SOCKETCALL
27#define __ARCH_WANT_SYS_FADVISE64
28#define __ARCH_WANT_SYS_GETPGRP
29#define __ARCH_WANT_SYS_LLSEEK
30#define __ARCH_WANT_SYS_NICE
31#define __ARCH_WANT_SYS_OLD_GETRLIMIT
32#define __ARCH_WANT_SYS_OLDUMOUNT
33#define __ARCH_WANT_SYS_SIGPENDING
34#define __ARCH_WANT_SYS_SIGPROCMASK
35#define __ARCH_WANT_SYS_RT_SIGACTION
36#define __ARCH_WANT_SYS_RT_SIGSUSPEND
37#endif
38
39#include "asm/arch/unistd.h"
40
41#endif /* _UM_UNISTD_H_*/
diff --git a/include/asm-um/user.h b/include/asm-um/user.h
deleted file mode 100644
index aae414ee1f5e..000000000000
--- a/include/asm-um/user.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __UM_USER_H
2#define __UM_USER_H
3
4#include "asm/arch/user.h"
5
6#endif
diff --git a/include/asm-um/vga.h b/include/asm-um/vga.h
deleted file mode 100644
index 903a592b00d0..000000000000
--- a/include/asm-um/vga.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __UM_VGA_H
2#define __UM_VGA_H
3
4#include "asm/arch/vga.h"
5
6#endif
diff --git a/include/asm-um/vm-flags-i386.h b/include/asm-um/vm-flags-i386.h
deleted file mode 100644
index e0d24c568dbc..000000000000
--- a/include/asm-um/vm-flags-i386.h
+++ /dev/null
@@ -1,14 +0,0 @@
1/*
2 * Copyright (C) 2004 Jeff Dike (jdike@addtoit.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __VM_FLAGS_I386_H
7#define __VM_FLAGS_I386_H
8
9#define VM_DATA_DEFAULT_FLAGS \
10 (VM_READ | VM_WRITE | \
11 ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
12 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
13
14#endif
diff --git a/include/asm-um/vm-flags-x86_64.h b/include/asm-um/vm-flags-x86_64.h
deleted file mode 100644
index 3213edfa7888..000000000000
--- a/include/asm-um/vm-flags-x86_64.h
+++ /dev/null
@@ -1,33 +0,0 @@
1/*
2 * Copyright (C) 2004 Jeff Dike (jdike@addtoit.com)
3 * Copyright 2003 PathScale, Inc.
4 * Licensed under the GPL
5 */
6
7#ifndef __VM_FLAGS_X86_64_H
8#define __VM_FLAGS_X86_64_H
9
10#define __VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
11 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
12#define __VM_STACK_FLAGS (VM_GROWSDOWN | VM_READ | VM_WRITE | \
13 VM_EXEC | VM_MAYREAD | VM_MAYWRITE | \
14 VM_MAYEXEC)
15
16extern unsigned long vm_stack_flags, vm_stack_flags32;
17extern unsigned long vm_data_default_flags, vm_data_default_flags32;
18extern unsigned long vm_force_exec32;
19
20#ifdef TIF_IA32
21#define VM_DATA_DEFAULT_FLAGS \
22 (test_thread_flag(TIF_IA32) ? vm_data_default_flags32 : \
23 vm_data_default_flags)
24
25#define VM_STACK_DEFAULT_FLAGS \
26 (test_thread_flag(TIF_IA32) ? vm_stack_flags32 : vm_stack_flags)
27#endif
28
29#define VM_DATA_DEFAULT_FLAGS vm_data_default_flags
30
31#define VM_STACK_DEFAULT_FLAGS vm_stack_flags
32
33#endif
diff --git a/include/asm-um/vm86.h b/include/asm-um/vm86.h
deleted file mode 100644
index 7801f82de1f4..000000000000
--- a/include/asm-um/vm86.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __UM_VM86_H
2#define __UM_VM86_H
3
4#include "asm/arch/vm86.h"
5
6#endif
diff --git a/include/asm-um/xor.h b/include/asm-um/xor.h
deleted file mode 100644
index a19db3e17241..000000000000
--- a/include/asm-um/xor.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __UM_XOR_H
2#define __UM_XOR_H
3
4#include "asm-generic/xor.h"
5
6#endif
diff --git a/include/asm-x86/Kbuild b/include/asm-x86/Kbuild
deleted file mode 100644
index 4a8e80cdcfa5..000000000000
--- a/include/asm-x86/Kbuild
+++ /dev/null
@@ -1,24 +0,0 @@
1include include/asm-generic/Kbuild.asm
2
3header-y += boot.h
4header-y += bootparam.h
5header-y += debugreg.h
6header-y += ldt.h
7header-y += msr-index.h
8header-y += prctl.h
9header-y += ptrace-abi.h
10header-y += sigcontext32.h
11header-y += ucontext.h
12header-y += processor-flags.h
13
14unifdef-y += e820.h
15unifdef-y += ist.h
16unifdef-y += mce.h
17unifdef-y += msr.h
18unifdef-y += mtrr.h
19unifdef-y += posix_types_32.h
20unifdef-y += posix_types_64.h
21unifdef-y += unistd_32.h
22unifdef-y += unistd_64.h
23unifdef-y += vm86.h
24unifdef-y += vsyscall.h
diff --git a/include/asm-x86/a.out-core.h b/include/asm-x86/a.out-core.h
deleted file mode 100644
index f5705761a37b..000000000000
--- a/include/asm-x86/a.out-core.h
+++ /dev/null
@@ -1,73 +0,0 @@
1/* a.out coredump register dumper
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11
12#ifndef ASM_X86__A_OUT_CORE_H
13#define ASM_X86__A_OUT_CORE_H
14
15#ifdef __KERNEL__
16#ifdef CONFIG_X86_32
17
18#include <linux/user.h>
19#include <linux/elfcore.h>
20
21/*
22 * fill in the user structure for an a.out core dump
23 */
24static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump)
25{
26 u16 gs;
27
28/* changed the size calculations - should hopefully work better. lbt */
29 dump->magic = CMAGIC;
30 dump->start_code = 0;
31 dump->start_stack = regs->sp & ~(PAGE_SIZE - 1);
32 dump->u_tsize = ((unsigned long)current->mm->end_code) >> PAGE_SHIFT;
33 dump->u_dsize = ((unsigned long)(current->mm->brk + (PAGE_SIZE - 1)))
34 >> PAGE_SHIFT;
35 dump->u_dsize -= dump->u_tsize;
36 dump->u_ssize = 0;
37 dump->u_debugreg[0] = current->thread.debugreg0;
38 dump->u_debugreg[1] = current->thread.debugreg1;
39 dump->u_debugreg[2] = current->thread.debugreg2;
40 dump->u_debugreg[3] = current->thread.debugreg3;
41 dump->u_debugreg[4] = 0;
42 dump->u_debugreg[5] = 0;
43 dump->u_debugreg[6] = current->thread.debugreg6;
44 dump->u_debugreg[7] = current->thread.debugreg7;
45
46 if (dump->start_stack < TASK_SIZE)
47 dump->u_ssize = ((unsigned long)(TASK_SIZE - dump->start_stack))
48 >> PAGE_SHIFT;
49
50 dump->regs.bx = regs->bx;
51 dump->regs.cx = regs->cx;
52 dump->regs.dx = regs->dx;
53 dump->regs.si = regs->si;
54 dump->regs.di = regs->di;
55 dump->regs.bp = regs->bp;
56 dump->regs.ax = regs->ax;
57 dump->regs.ds = (u16)regs->ds;
58 dump->regs.es = (u16)regs->es;
59 dump->regs.fs = (u16)regs->fs;
60 savesegment(gs, gs);
61 dump->regs.orig_ax = regs->orig_ax;
62 dump->regs.ip = regs->ip;
63 dump->regs.cs = (u16)regs->cs;
64 dump->regs.flags = regs->flags;
65 dump->regs.sp = regs->sp;
66 dump->regs.ss = (u16)regs->ss;
67
68 dump->u_fpvalid = dump_fpu(regs, &dump->i387);
69}
70
71#endif /* CONFIG_X86_32 */
72#endif /* __KERNEL__ */
73#endif /* ASM_X86__A_OUT_CORE_H */
diff --git a/include/asm-x86/a.out.h b/include/asm-x86/a.out.h
deleted file mode 100644
index 0948748bc69c..000000000000
--- a/include/asm-x86/a.out.h
+++ /dev/null
@@ -1,20 +0,0 @@
1#ifndef ASM_X86__A_OUT_H
2#define ASM_X86__A_OUT_H
3
4struct exec
5{
6 unsigned int a_info; /* Use macros N_MAGIC, etc for access */
7 unsigned a_text; /* length of text, in bytes */
8 unsigned a_data; /* length of data, in bytes */
9 unsigned a_bss; /* length of uninitialized data area for file, in bytes */
10 unsigned a_syms; /* length of symbol table data in file, in bytes */
11 unsigned a_entry; /* start address */
12 unsigned a_trsize; /* length of relocation info for text, in bytes */
13 unsigned a_drsize; /* length of relocation info for data, in bytes */
14};
15
16#define N_TRSIZE(a) ((a).a_trsize)
17#define N_DRSIZE(a) ((a).a_drsize)
18#define N_SYMSIZE(a) ((a).a_syms)
19
20#endif /* ASM_X86__A_OUT_H */
diff --git a/include/asm-x86/acpi.h b/include/asm-x86/acpi.h
deleted file mode 100644
index 392e17336be1..000000000000
--- a/include/asm-x86/acpi.h
+++ /dev/null
@@ -1,178 +0,0 @@
1#ifndef ASM_X86__ACPI_H
2#define ASM_X86__ACPI_H
3
4/*
5 * Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6 * Copyright (C) 2001 Patrick Mochel <mochel@osdl.org>
7 *
8 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 *
24 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
25 */
26#include <acpi/pdc_intel.h>
27
28#include <asm/numa.h>
29#include <asm/processor.h>
30#include <asm/mmu.h>
31#include <asm/mpspec.h>
32
33#define COMPILER_DEPENDENT_INT64 long long
34#define COMPILER_DEPENDENT_UINT64 unsigned long long
35
36/*
37 * Calling conventions:
38 *
39 * ACPI_SYSTEM_XFACE - Interfaces to host OS (handlers, threads)
40 * ACPI_EXTERNAL_XFACE - External ACPI interfaces
41 * ACPI_INTERNAL_XFACE - Internal ACPI interfaces
42 * ACPI_INTERNAL_VAR_XFACE - Internal variable-parameter list interfaces
43 */
44#define ACPI_SYSTEM_XFACE
45#define ACPI_EXTERNAL_XFACE
46#define ACPI_INTERNAL_XFACE
47#define ACPI_INTERNAL_VAR_XFACE
48
49/* Asm macros */
50
51#define ACPI_ASM_MACROS
52#define BREAKPOINT3
53#define ACPI_DISABLE_IRQS() local_irq_disable()
54#define ACPI_ENABLE_IRQS() local_irq_enable()
55#define ACPI_FLUSH_CPU_CACHE() wbinvd()
56
57int __acpi_acquire_global_lock(unsigned int *lock);
58int __acpi_release_global_lock(unsigned int *lock);
59
60#define ACPI_ACQUIRE_GLOBAL_LOCK(facs, Acq) \
61 ((Acq) = __acpi_acquire_global_lock(&facs->global_lock))
62
63#define ACPI_RELEASE_GLOBAL_LOCK(facs, Acq) \
64 ((Acq) = __acpi_release_global_lock(&facs->global_lock))
65
66/*
67 * Math helper asm macros
68 */
69#define ACPI_DIV_64_BY_32(n_hi, n_lo, d32, q32, r32) \
70 asm("divl %2;" \
71 : "=a"(q32), "=d"(r32) \
72 : "r"(d32), \
73 "0"(n_lo), "1"(n_hi))
74
75
76#define ACPI_SHIFT_RIGHT_64(n_hi, n_lo) \
77 asm("shrl $1,%2 ;" \
78 "rcrl $1,%3;" \
79 : "=r"(n_hi), "=r"(n_lo) \
80 : "0"(n_hi), "1"(n_lo))
81
82#ifdef CONFIG_ACPI
83extern int acpi_lapic;
84extern int acpi_ioapic;
85extern int acpi_noirq;
86extern int acpi_strict;
87extern int acpi_disabled;
88extern int acpi_ht;
89extern int acpi_pci_disabled;
90extern int acpi_skip_timer_override;
91extern int acpi_use_timer_override;
92
93extern u8 acpi_sci_flags;
94extern int acpi_sci_override_gsi;
95void acpi_pic_sci_set_trigger(unsigned int, u16);
96
97static inline void disable_acpi(void)
98{
99 acpi_disabled = 1;
100 acpi_ht = 0;
101 acpi_pci_disabled = 1;
102 acpi_noirq = 1;
103}
104
105/* Fixmap pages to reserve for ACPI boot-time tables (see fixmap.h) */
106#define FIX_ACPI_PAGES 4
107
108extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq);
109
110static inline void acpi_noirq_set(void) { acpi_noirq = 1; }
111static inline void acpi_disable_pci(void)
112{
113 acpi_pci_disabled = 1;
114 acpi_noirq_set();
115}
116extern int acpi_irq_balance_set(char *str);
117
118/* routines for saving/restoring kernel state */
119extern int acpi_save_state_mem(void);
120extern void acpi_restore_state_mem(void);
121
122extern unsigned long acpi_wakeup_address;
123
124/* early initialization routine */
125extern void acpi_reserve_bootmem(void);
126
127/*
128 * Check if the CPU can handle C2 and deeper
129 */
130static inline unsigned int acpi_processor_cstate_check(unsigned int max_cstate)
131{
132 /*
133 * Early models (<=5) of AMD Opterons are not supposed to go into
134 * C2 state.
135 *
136 * Steppings 0x0A and later are good
137 */
138 if (boot_cpu_data.x86 == 0x0F &&
139 boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
140 boot_cpu_data.x86_model <= 0x05 &&
141 boot_cpu_data.x86_mask < 0x0A)
142 return 1;
143 else if (boot_cpu_has(X86_FEATURE_AMDC1E))
144 return 1;
145 else
146 return max_cstate;
147}
148
149#else /* !CONFIG_ACPI */
150
151#define acpi_lapic 0
152#define acpi_ioapic 0
153static inline void acpi_noirq_set(void) { }
154static inline void acpi_disable_pci(void) { }
155static inline void disable_acpi(void) { }
156
157#endif /* !CONFIG_ACPI */
158
159#define ARCH_HAS_POWER_INIT 1
160
161struct bootnode;
162
163#ifdef CONFIG_ACPI_NUMA
164extern int acpi_numa;
165extern int acpi_scan_nodes(unsigned long start, unsigned long end);
166#define NR_NODE_MEMBLKS (MAX_NUMNODES*2)
167extern void acpi_fake_nodes(const struct bootnode *fake_nodes,
168 int num_nodes);
169#else
170static inline void acpi_fake_nodes(const struct bootnode *fake_nodes,
171 int num_nodes)
172{
173}
174#endif
175
176#define acpi_unlazy_tlb(x) leave_mm(x)
177
178#endif /* ASM_X86__ACPI_H */
diff --git a/include/asm-x86/agp.h b/include/asm-x86/agp.h
deleted file mode 100644
index 3617fd4fcdf9..000000000000
--- a/include/asm-x86/agp.h
+++ /dev/null
@@ -1,35 +0,0 @@
1#ifndef ASM_X86__AGP_H
2#define ASM_X86__AGP_H
3
4#include <asm/pgtable.h>
5#include <asm/cacheflush.h>
6
7/*
8 * Functions to keep the agpgart mappings coherent with the MMU. The
9 * GART gives the CPU a physical alias of pages in memory. The alias
10 * region is mapped uncacheable. Make sure there are no conflicting
11 * mappings with different cachability attributes for the same
12 * page. This avoids data corruption on some CPUs.
13 */
14
15#define map_page_into_agp(page) set_pages_uc(page, 1)
16#define unmap_page_from_agp(page) set_pages_wb(page, 1)
17
18/*
19 * Could use CLFLUSH here if the cpu supports it. But then it would
20 * need to be called for each cacheline of the whole page so it may
21 * not be worth it. Would need a page for it.
22 */
23#define flush_agp_cache() wbinvd()
24
25/* Convert a physical address to an address suitable for the GART. */
26#define phys_to_gart(x) (x)
27#define gart_to_phys(x) (x)
28
29/* GATT allocation. Returns/accepts GATT kernel virtual address. */
30#define alloc_gatt_pages(order) \
31 ((char *)__get_free_pages(GFP_KERNEL, (order)))
32#define free_gatt_pages(table, order) \
33 free_pages((unsigned long)(table), (order))
34
35#endif /* ASM_X86__AGP_H */
diff --git a/include/asm-x86/alternative-asm.h b/include/asm-x86/alternative-asm.h
deleted file mode 100644
index e2077d343c33..000000000000
--- a/include/asm-x86/alternative-asm.h
+++ /dev/null
@@ -1,22 +0,0 @@
1#ifdef __ASSEMBLY__
2
3#ifdef CONFIG_X86_32
4# define X86_ALIGN .long
5#else
6# define X86_ALIGN .quad
7#endif
8
9#ifdef CONFIG_SMP
10 .macro LOCK_PREFIX
111: lock
12 .section .smp_locks,"a"
13 .align 4
14 X86_ALIGN 1b
15 .previous
16 .endm
17#else
18 .macro LOCK_PREFIX
19 .endm
20#endif
21
22#endif /* __ASSEMBLY__ */
diff --git a/include/asm-x86/alternative.h b/include/asm-x86/alternative.h
deleted file mode 100644
index 22d3c9862bf3..000000000000
--- a/include/asm-x86/alternative.h
+++ /dev/null
@@ -1,183 +0,0 @@
1#ifndef ASM_X86__ALTERNATIVE_H
2#define ASM_X86__ALTERNATIVE_H
3
4#include <linux/types.h>
5#include <linux/stddef.h>
6#include <asm/asm.h>
7
8/*
9 * Alternative inline assembly for SMP.
10 *
11 * The LOCK_PREFIX macro defined here replaces the LOCK and
12 * LOCK_PREFIX macros used everywhere in the source tree.
13 *
14 * SMP alternatives use the same data structures as the other
15 * alternatives and the X86_FEATURE_UP flag to indicate the case of a
16 * UP system running a SMP kernel. The existing apply_alternatives()
17 * works fine for patching a SMP kernel for UP.
18 *
19 * The SMP alternative tables can be kept after boot and contain both
20 * UP and SMP versions of the instructions to allow switching back to
21 * SMP at runtime, when hotplugging in a new CPU, which is especially
22 * useful in virtualized environments.
23 *
24 * The very common lock prefix is handled as special case in a
25 * separate table which is a pure address list without replacement ptr
26 * and size information. That keeps the table sizes small.
27 */
28
29#ifdef CONFIG_SMP
30#define LOCK_PREFIX \
31 ".section .smp_locks,\"a\"\n" \
32 _ASM_ALIGN "\n" \
33 _ASM_PTR "661f\n" /* address */ \
34 ".previous\n" \
35 "661:\n\tlock; "
36
37#else /* ! CONFIG_SMP */
38#define LOCK_PREFIX ""
39#endif
40
41/* This must be included *after* the definition of LOCK_PREFIX */
42#include <asm/cpufeature.h>
43
44struct alt_instr {
45 u8 *instr; /* original instruction */
46 u8 *replacement;
47 u8 cpuid; /* cpuid bit set for replacement */
48 u8 instrlen; /* length of original instruction */
49 u8 replacementlen; /* length of new instruction, <= instrlen */
50 u8 pad1;
51#ifdef CONFIG_X86_64
52 u32 pad2;
53#endif
54};
55
56extern void alternative_instructions(void);
57extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
58
59struct module;
60
61#ifdef CONFIG_SMP
62extern void alternatives_smp_module_add(struct module *mod, char *name,
63 void *locks, void *locks_end,
64 void *text, void *text_end);
65extern void alternatives_smp_module_del(struct module *mod);
66extern void alternatives_smp_switch(int smp);
67#else
68static inline void alternatives_smp_module_add(struct module *mod, char *name,
69 void *locks, void *locks_end,
70 void *text, void *text_end) {}
71static inline void alternatives_smp_module_del(struct module *mod) {}
72static inline void alternatives_smp_switch(int smp) {}
73#endif /* CONFIG_SMP */
74
75const unsigned char *const *find_nop_table(void);
76
77/*
78 * Alternative instructions for different CPU types or capabilities.
79 *
80 * This allows to use optimized instructions even on generic binary
81 * kernels.
82 *
83 * length of oldinstr must be longer or equal the length of newinstr
84 * It can be padded with nops as needed.
85 *
86 * For non barrier like inlines please define new variants
87 * without volatile and memory clobber.
88 */
89#define alternative(oldinstr, newinstr, feature) \
90 asm volatile ("661:\n\t" oldinstr "\n662:\n" \
91 ".section .altinstructions,\"a\"\n" \
92 _ASM_ALIGN "\n" \
93 _ASM_PTR "661b\n" /* label */ \
94 _ASM_PTR "663f\n" /* new instruction */ \
95 " .byte %c0\n" /* feature bit */ \
96 " .byte 662b-661b\n" /* sourcelen */ \
97 " .byte 664f-663f\n" /* replacementlen */ \
98 ".previous\n" \
99 ".section .altinstr_replacement,\"ax\"\n" \
100 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
101 ".previous" :: "i" (feature) : "memory")
102
103/*
104 * Alternative inline assembly with input.
105 *
106 * Pecularities:
107 * No memory clobber here.
108 * Argument numbers start with 1.
109 * Best is to use constraints that are fixed size (like (%1) ... "r")
110 * If you use variable sized constraints like "m" or "g" in the
111 * replacement make sure to pad to the worst case length.
112 */
113#define alternative_input(oldinstr, newinstr, feature, input...) \
114 asm volatile ("661:\n\t" oldinstr "\n662:\n" \
115 ".section .altinstructions,\"a\"\n" \
116 _ASM_ALIGN "\n" \
117 _ASM_PTR "661b\n" /* label */ \
118 _ASM_PTR "663f\n" /* new instruction */ \
119 " .byte %c0\n" /* feature bit */ \
120 " .byte 662b-661b\n" /* sourcelen */ \
121 " .byte 664f-663f\n" /* replacementlen */ \
122 ".previous\n" \
123 ".section .altinstr_replacement,\"ax\"\n" \
124 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
125 ".previous" :: "i" (feature), ##input)
126
127/* Like alternative_input, but with a single output argument */
128#define alternative_io(oldinstr, newinstr, feature, output, input...) \
129 asm volatile ("661:\n\t" oldinstr "\n662:\n" \
130 ".section .altinstructions,\"a\"\n" \
131 _ASM_ALIGN "\n" \
132 _ASM_PTR "661b\n" /* label */ \
133 _ASM_PTR "663f\n" /* new instruction */ \
134 " .byte %c[feat]\n" /* feature bit */ \
135 " .byte 662b-661b\n" /* sourcelen */ \
136 " .byte 664f-663f\n" /* replacementlen */ \
137 ".previous\n" \
138 ".section .altinstr_replacement,\"ax\"\n" \
139 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
140 ".previous" : output : [feat] "i" (feature), ##input)
141
142/*
143 * use this macro(s) if you need more than one output parameter
144 * in alternative_io
145 */
146#define ASM_OUTPUT2(a, b) a, b
147
148struct paravirt_patch_site;
149#ifdef CONFIG_PARAVIRT
150void apply_paravirt(struct paravirt_patch_site *start,
151 struct paravirt_patch_site *end);
152#else
153static inline void apply_paravirt(struct paravirt_patch_site *start,
154 struct paravirt_patch_site *end)
155{}
156#define __parainstructions NULL
157#define __parainstructions_end NULL
158#endif
159
160extern void add_nops(void *insns, unsigned int len);
161
162/*
163 * Clear and restore the kernel write-protection flag on the local CPU.
164 * Allows the kernel to edit read-only pages.
165 * Side-effect: any interrupt handler running between save and restore will have
166 * the ability to write to read-only pages.
167 *
168 * Warning:
169 * Code patching in the UP case is safe if NMIs and MCE handlers are stopped and
170 * no thread can be preempted in the instructions being modified (no iret to an
171 * invalid instruction possible) or if the instructions are changed from a
172 * consistent state to another consistent state atomically.
173 * More care must be taken when modifying code in the SMP case because of
174 * Intel's errata.
175 * On the local CPU you need to be protected again NMI or MCE handlers seeing an
176 * inconsistent instruction while you patch.
177 * The _early version expects the memory to already be RW.
178 */
179
180extern void *text_poke(void *addr, const void *opcode, size_t len);
181extern void *text_poke_early(void *addr, const void *opcode, size_t len);
182
183#endif /* ASM_X86__ALTERNATIVE_H */
diff --git a/include/asm-x86/amd_iommu.h b/include/asm-x86/amd_iommu.h
deleted file mode 100644
index 041d0db7da27..000000000000
--- a/include/asm-x86/amd_iommu.h
+++ /dev/null
@@ -1,35 +0,0 @@
1/*
2 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com>
4 * Leo Duran <leo.duran@amd.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#ifndef ASM_X86__AMD_IOMMU_H
21#define ASM_X86__AMD_IOMMU_H
22
23#include <linux/irqreturn.h>
24
25#ifdef CONFIG_AMD_IOMMU
26extern int amd_iommu_init(void);
27extern int amd_iommu_init_dma_ops(void);
28extern void amd_iommu_detect(void);
29extern irqreturn_t amd_iommu_int_handler(int irq, void *data);
30#else
31static inline int amd_iommu_init(void) { return -ENODEV; }
32static inline void amd_iommu_detect(void) { }
33#endif
34
35#endif /* ASM_X86__AMD_IOMMU_H */
diff --git a/include/asm-x86/amd_iommu_types.h b/include/asm-x86/amd_iommu_types.h
deleted file mode 100644
index b3085869a17b..000000000000
--- a/include/asm-x86/amd_iommu_types.h
+++ /dev/null
@@ -1,404 +0,0 @@
1/*
2 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com>
4 * Leo Duran <leo.duran@amd.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#ifndef ASM_X86__AMD_IOMMU_TYPES_H
21#define ASM_X86__AMD_IOMMU_TYPES_H
22
23#include <linux/types.h>
24#include <linux/list.h>
25#include <linux/spinlock.h>
26
27/*
28 * some size calculation constants
29 */
30#define DEV_TABLE_ENTRY_SIZE 32
31#define ALIAS_TABLE_ENTRY_SIZE 2
32#define RLOOKUP_TABLE_ENTRY_SIZE (sizeof(void *))
33
34/* Length of the MMIO region for the AMD IOMMU */
35#define MMIO_REGION_LENGTH 0x4000
36
37/* Capability offsets used by the driver */
38#define MMIO_CAP_HDR_OFFSET 0x00
39#define MMIO_RANGE_OFFSET 0x0c
40#define MMIO_MISC_OFFSET 0x10
41
42/* Masks, shifts and macros to parse the device range capability */
43#define MMIO_RANGE_LD_MASK 0xff000000
44#define MMIO_RANGE_FD_MASK 0x00ff0000
45#define MMIO_RANGE_BUS_MASK 0x0000ff00
46#define MMIO_RANGE_LD_SHIFT 24
47#define MMIO_RANGE_FD_SHIFT 16
48#define MMIO_RANGE_BUS_SHIFT 8
49#define MMIO_GET_LD(x) (((x) & MMIO_RANGE_LD_MASK) >> MMIO_RANGE_LD_SHIFT)
50#define MMIO_GET_FD(x) (((x) & MMIO_RANGE_FD_MASK) >> MMIO_RANGE_FD_SHIFT)
51#define MMIO_GET_BUS(x) (((x) & MMIO_RANGE_BUS_MASK) >> MMIO_RANGE_BUS_SHIFT)
52#define MMIO_MSI_NUM(x) ((x) & 0x1f)
53
54/* Flag masks for the AMD IOMMU exclusion range */
55#define MMIO_EXCL_ENABLE_MASK 0x01ULL
56#define MMIO_EXCL_ALLOW_MASK 0x02ULL
57
58/* Used offsets into the MMIO space */
59#define MMIO_DEV_TABLE_OFFSET 0x0000
60#define MMIO_CMD_BUF_OFFSET 0x0008
61#define MMIO_EVT_BUF_OFFSET 0x0010
62#define MMIO_CONTROL_OFFSET 0x0018
63#define MMIO_EXCL_BASE_OFFSET 0x0020
64#define MMIO_EXCL_LIMIT_OFFSET 0x0028
65#define MMIO_CMD_HEAD_OFFSET 0x2000
66#define MMIO_CMD_TAIL_OFFSET 0x2008
67#define MMIO_EVT_HEAD_OFFSET 0x2010
68#define MMIO_EVT_TAIL_OFFSET 0x2018
69#define MMIO_STATUS_OFFSET 0x2020
70
71/* MMIO status bits */
72#define MMIO_STATUS_COM_WAIT_INT_MASK 0x04
73
74/* event logging constants */
75#define EVENT_ENTRY_SIZE 0x10
76#define EVENT_TYPE_SHIFT 28
77#define EVENT_TYPE_MASK 0xf
78#define EVENT_TYPE_ILL_DEV 0x1
79#define EVENT_TYPE_IO_FAULT 0x2
80#define EVENT_TYPE_DEV_TAB_ERR 0x3
81#define EVENT_TYPE_PAGE_TAB_ERR 0x4
82#define EVENT_TYPE_ILL_CMD 0x5
83#define EVENT_TYPE_CMD_HARD_ERR 0x6
84#define EVENT_TYPE_IOTLB_INV_TO 0x7
85#define EVENT_TYPE_INV_DEV_REQ 0x8
86#define EVENT_DEVID_MASK 0xffff
87#define EVENT_DEVID_SHIFT 0
88#define EVENT_DOMID_MASK 0xffff
89#define EVENT_DOMID_SHIFT 0
90#define EVENT_FLAGS_MASK 0xfff
91#define EVENT_FLAGS_SHIFT 0x10
92
93/* feature control bits */
94#define CONTROL_IOMMU_EN 0x00ULL
95#define CONTROL_HT_TUN_EN 0x01ULL
96#define CONTROL_EVT_LOG_EN 0x02ULL
97#define CONTROL_EVT_INT_EN 0x03ULL
98#define CONTROL_COMWAIT_EN 0x04ULL
99#define CONTROL_PASSPW_EN 0x08ULL
100#define CONTROL_RESPASSPW_EN 0x09ULL
101#define CONTROL_COHERENT_EN 0x0aULL
102#define CONTROL_ISOC_EN 0x0bULL
103#define CONTROL_CMDBUF_EN 0x0cULL
104#define CONTROL_PPFLOG_EN 0x0dULL
105#define CONTROL_PPFINT_EN 0x0eULL
106
107/* command specific defines */
108#define CMD_COMPL_WAIT 0x01
109#define CMD_INV_DEV_ENTRY 0x02
110#define CMD_INV_IOMMU_PAGES 0x03
111
112#define CMD_COMPL_WAIT_STORE_MASK 0x01
113#define CMD_COMPL_WAIT_INT_MASK 0x02
114#define CMD_INV_IOMMU_PAGES_SIZE_MASK 0x01
115#define CMD_INV_IOMMU_PAGES_PDE_MASK 0x02
116
117#define CMD_INV_IOMMU_ALL_PAGES_ADDRESS 0x7fffffffffffffffULL
118
119/* macros and definitions for device table entries */
120#define DEV_ENTRY_VALID 0x00
121#define DEV_ENTRY_TRANSLATION 0x01
122#define DEV_ENTRY_IR 0x3d
123#define DEV_ENTRY_IW 0x3e
124#define DEV_ENTRY_NO_PAGE_FAULT 0x62
125#define DEV_ENTRY_EX 0x67
126#define DEV_ENTRY_SYSMGT1 0x68
127#define DEV_ENTRY_SYSMGT2 0x69
128#define DEV_ENTRY_INIT_PASS 0xb8
129#define DEV_ENTRY_EINT_PASS 0xb9
130#define DEV_ENTRY_NMI_PASS 0xba
131#define DEV_ENTRY_LINT0_PASS 0xbe
132#define DEV_ENTRY_LINT1_PASS 0xbf
133#define DEV_ENTRY_MODE_MASK 0x07
134#define DEV_ENTRY_MODE_SHIFT 0x09
135
136/* constants to configure the command buffer */
137#define CMD_BUFFER_SIZE 8192
138#define CMD_BUFFER_ENTRIES 512
139#define MMIO_CMD_SIZE_SHIFT 56
140#define MMIO_CMD_SIZE_512 (0x9ULL << MMIO_CMD_SIZE_SHIFT)
141
142/* constants for event buffer handling */
143#define EVT_BUFFER_SIZE 8192 /* 512 entries */
144#define EVT_LEN_MASK (0x9ULL << 56)
145
146#define PAGE_MODE_1_LEVEL 0x01
147#define PAGE_MODE_2_LEVEL 0x02
148#define PAGE_MODE_3_LEVEL 0x03
149
150#define IOMMU_PDE_NL_0 0x000ULL
151#define IOMMU_PDE_NL_1 0x200ULL
152#define IOMMU_PDE_NL_2 0x400ULL
153#define IOMMU_PDE_NL_3 0x600ULL
154
155#define IOMMU_PTE_L2_INDEX(address) (((address) >> 30) & 0x1ffULL)
156#define IOMMU_PTE_L1_INDEX(address) (((address) >> 21) & 0x1ffULL)
157#define IOMMU_PTE_L0_INDEX(address) (((address) >> 12) & 0x1ffULL)
158
159#define IOMMU_MAP_SIZE_L1 (1ULL << 21)
160#define IOMMU_MAP_SIZE_L2 (1ULL << 30)
161#define IOMMU_MAP_SIZE_L3 (1ULL << 39)
162
163#define IOMMU_PTE_P (1ULL << 0)
164#define IOMMU_PTE_TV (1ULL << 1)
165#define IOMMU_PTE_U (1ULL << 59)
166#define IOMMU_PTE_FC (1ULL << 60)
167#define IOMMU_PTE_IR (1ULL << 61)
168#define IOMMU_PTE_IW (1ULL << 62)
169
170#define IOMMU_L1_PDE(address) \
171 ((address) | IOMMU_PDE_NL_1 | IOMMU_PTE_P | IOMMU_PTE_IR | IOMMU_PTE_IW)
172#define IOMMU_L2_PDE(address) \
173 ((address) | IOMMU_PDE_NL_2 | IOMMU_PTE_P | IOMMU_PTE_IR | IOMMU_PTE_IW)
174
175#define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL)
176#define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_P)
177#define IOMMU_PTE_PAGE(pte) (phys_to_virt((pte) & IOMMU_PAGE_MASK))
178#define IOMMU_PTE_MODE(pte) (((pte) >> 9) & 0x07)
179
180#define IOMMU_PROT_MASK 0x03
181#define IOMMU_PROT_IR 0x01
182#define IOMMU_PROT_IW 0x02
183
184/* IOMMU capabilities */
185#define IOMMU_CAP_IOTLB 24
186#define IOMMU_CAP_NPCACHE 26
187
188#define MAX_DOMAIN_ID 65536
189
190/* FIXME: move this macro to <linux/pci.h> */
191#define PCI_BUS(x) (((x) >> 8) & 0xff)
192
193/*
194 * This structure contains generic data for IOMMU protection domains
195 * independent of their use.
196 */
197struct protection_domain {
198 spinlock_t lock; /* mostly used to lock the page table*/
199 u16 id; /* the domain id written to the device table */
200 int mode; /* paging mode (0-6 levels) */
201 u64 *pt_root; /* page table root pointer */
202 void *priv; /* private data */
203};
204
205/*
206 * Data container for a dma_ops specific protection domain
207 */
208struct dma_ops_domain {
209 struct list_head list;
210
211 /* generic protection domain information */
212 struct protection_domain domain;
213
214 /* size of the aperture for the mappings */
215 unsigned long aperture_size;
216
217 /* address we start to search for free addresses */
218 unsigned long next_bit;
219
220 /* address allocation bitmap */
221 unsigned long *bitmap;
222
223 /*
224 * Array of PTE pages for the aperture. In this array we save all the
225 * leaf pages of the domain page table used for the aperture. This way
226 * we don't need to walk the page table to find a specific PTE. We can
227 * just calculate its address in constant time.
228 */
229 u64 **pte_pages;
230
231 /* This will be set to true when TLB needs to be flushed */
232 bool need_flush;
233
234 /*
235 * if this is a preallocated domain, keep the device for which it was
236 * preallocated in this variable
237 */
238 u16 target_dev;
239};
240
241/*
242 * Structure where we save information about one hardware AMD IOMMU in the
243 * system.
244 */
245struct amd_iommu {
246 struct list_head list;
247
248 /* locks the accesses to the hardware */
249 spinlock_t lock;
250
251 /* Pointer to PCI device of this IOMMU */
252 struct pci_dev *dev;
253
254 /*
255 * Capability pointer. There could be more than one IOMMU per PCI
256 * device function if there are more than one AMD IOMMU capability
257 * pointers.
258 */
259 u16 cap_ptr;
260
261 /* physical address of MMIO space */
262 u64 mmio_phys;
263 /* virtual address of MMIO space */
264 u8 *mmio_base;
265
266 /* capabilities of that IOMMU read from ACPI */
267 u32 cap;
268
269 /* pci domain of this IOMMU */
270 u16 pci_seg;
271
272 /* first device this IOMMU handles. read from PCI */
273 u16 first_device;
274 /* last device this IOMMU handles. read from PCI */
275 u16 last_device;
276
277 /* start of exclusion range of that IOMMU */
278 u64 exclusion_start;
279 /* length of exclusion range of that IOMMU */
280 u64 exclusion_length;
281
282 /* command buffer virtual address */
283 u8 *cmd_buf;
284 /* size of command buffer */
285 u32 cmd_buf_size;
286
287 /* event buffer virtual address */
288 u8 *evt_buf;
289 /* size of event buffer */
290 u32 evt_buf_size;
291 /* MSI number for event interrupt */
292 u16 evt_msi_num;
293
294 /* if one, we need to send a completion wait command */
295 int need_sync;
296
297 /* true if interrupts for this IOMMU are already enabled */
298 bool int_enabled;
299
300 /* default dma_ops domain for that IOMMU */
301 struct dma_ops_domain *default_dom;
302};
303
304/*
305 * List with all IOMMUs in the system. This list is not locked because it is
306 * only written and read at driver initialization or suspend time
307 */
308extern struct list_head amd_iommu_list;
309
310/*
311 * Structure defining one entry in the device table
312 */
313struct dev_table_entry {
314 u32 data[8];
315};
316
317/*
318 * One entry for unity mappings parsed out of the ACPI table.
319 */
320struct unity_map_entry {
321 struct list_head list;
322
323 /* starting device id this entry is used for (including) */
324 u16 devid_start;
325 /* end device id this entry is used for (including) */
326 u16 devid_end;
327
328 /* start address to unity map (including) */
329 u64 address_start;
330 /* end address to unity map (including) */
331 u64 address_end;
332
333 /* required protection */
334 int prot;
335};
336
337/*
338 * List of all unity mappings. It is not locked because as runtime it is only
339 * read. It is created at ACPI table parsing time.
340 */
341extern struct list_head amd_iommu_unity_map;
342
343/*
344 * Data structures for device handling
345 */
346
347/*
348 * Device table used by hardware. Read and write accesses by software are
349 * locked with the amd_iommu_pd_table lock.
350 */
351extern struct dev_table_entry *amd_iommu_dev_table;
352
353/*
354 * Alias table to find requestor ids to device ids. Not locked because only
355 * read on runtime.
356 */
357extern u16 *amd_iommu_alias_table;
358
359/*
360 * Reverse lookup table to find the IOMMU which translates a specific device.
361 */
362extern struct amd_iommu **amd_iommu_rlookup_table;
363
364/* size of the dma_ops aperture as power of 2 */
365extern unsigned amd_iommu_aperture_order;
366
367/* largest PCI device id we expect translation requests for */
368extern u16 amd_iommu_last_bdf;
369
370/* data structures for protection domain handling */
371extern struct protection_domain **amd_iommu_pd_table;
372
373/* allocation bitmap for domain ids */
374extern unsigned long *amd_iommu_pd_alloc_bitmap;
375
376/* will be 1 if device isolation is enabled */
377extern int amd_iommu_isolate;
378
379/*
380 * If true, the addresses will be flushed on unmap time, not when
381 * they are reused
382 */
383extern bool amd_iommu_unmap_flush;
384
385/* takes a PCI device id and prints it out in a readable form */
386static inline void print_devid(u16 devid, int nl)
387{
388 int bus = devid >> 8;
389 int dev = devid >> 3 & 0x1f;
390 int fn = devid & 0x07;
391
392 printk("%02x:%02x.%x", bus, dev, fn);
393 if (nl)
394 printk("\n");
395}
396
397/* takes bus and device/function and returns the device id
398 * FIXME: should that be in generic PCI code? */
399static inline u16 calc_devid(u8 bus, u8 devfn)
400{
401 return (((u16)bus) << 8) | devfn;
402}
403
404#endif /* ASM_X86__AMD_IOMMU_TYPES_H */
diff --git a/include/asm-x86/apic.h b/include/asm-x86/apic.h
deleted file mode 100644
index ef1d72dbdfe0..000000000000
--- a/include/asm-x86/apic.h
+++ /dev/null
@@ -1,199 +0,0 @@
1#ifndef ASM_X86__APIC_H
2#define ASM_X86__APIC_H
3
4#include <linux/pm.h>
5#include <linux/delay.h>
6
7#include <asm/alternative.h>
8#include <asm/fixmap.h>
9#include <asm/apicdef.h>
10#include <asm/processor.h>
11#include <asm/system.h>
12#include <asm/cpufeature.h>
13#include <asm/msr.h>
14
15#define ARCH_APICTIMER_STOPS_ON_C3 1
16
17/*
18 * Debugging macros
19 */
20#define APIC_QUIET 0
21#define APIC_VERBOSE 1
22#define APIC_DEBUG 2
23
24/*
25 * Define the default level of output to be very little
26 * This can be turned up by using apic=verbose for more
27 * information and apic=debug for _lots_ of information.
28 * apic_verbosity is defined in apic.c
29 */
30#define apic_printk(v, s, a...) do { \
31 if ((v) <= apic_verbosity) \
32 printk(s, ##a); \
33 } while (0)
34
35
36extern void generic_apic_probe(void);
37
38#ifdef CONFIG_X86_LOCAL_APIC
39
40extern unsigned int apic_verbosity;
41extern int local_apic_timer_c2_ok;
42
43extern int disable_apic;
44/*
45 * Basic functions accessing APICs.
46 */
47#ifdef CONFIG_PARAVIRT
48#include <asm/paravirt.h>
49#else
50#define setup_boot_clock setup_boot_APIC_clock
51#define setup_secondary_clock setup_secondary_APIC_clock
52#endif
53
54extern int is_vsmp_box(void);
55extern void xapic_wait_icr_idle(void);
56extern u32 safe_xapic_wait_icr_idle(void);
57extern u64 xapic_icr_read(void);
58extern void xapic_icr_write(u32, u32);
59extern int setup_profiling_timer(unsigned int);
60
61static inline void native_apic_mem_write(u32 reg, u32 v)
62{
63 volatile u32 *addr = (volatile u32 *)(APIC_BASE + reg);
64
65 alternative_io("movl %0, %1", "xchgl %0, %1", X86_FEATURE_11AP,
66 ASM_OUTPUT2("=r" (v), "=m" (*addr)),
67 ASM_OUTPUT2("0" (v), "m" (*addr)));
68}
69
70static inline u32 native_apic_mem_read(u32 reg)
71{
72 return *((volatile u32 *)(APIC_BASE + reg));
73}
74
75static inline void native_apic_msr_write(u32 reg, u32 v)
76{
77 if (reg == APIC_DFR || reg == APIC_ID || reg == APIC_LDR ||
78 reg == APIC_LVR)
79 return;
80
81 wrmsr(APIC_BASE_MSR + (reg >> 4), v, 0);
82}
83
84static inline u32 native_apic_msr_read(u32 reg)
85{
86 u32 low, high;
87
88 if (reg == APIC_DFR)
89 return -1;
90
91 rdmsr(APIC_BASE_MSR + (reg >> 4), low, high);
92 return low;
93}
94
95#ifndef CONFIG_X86_32
96extern int x2apic, x2apic_preenabled;
97extern void check_x2apic(void);
98extern void enable_x2apic(void);
99extern void enable_IR_x2apic(void);
100extern void x2apic_icr_write(u32 low, u32 id);
101static inline int x2apic_enabled(void)
102{
103 int msr, msr2;
104
105 if (!cpu_has_x2apic)
106 return 0;
107
108 rdmsr(MSR_IA32_APICBASE, msr, msr2);
109 if (msr & X2APIC_ENABLE)
110 return 1;
111 return 0;
112}
113#else
114#define x2apic_enabled() 0
115#endif
116
117struct apic_ops {
118 u32 (*read)(u32 reg);
119 void (*write)(u32 reg, u32 v);
120 u64 (*icr_read)(void);
121 void (*icr_write)(u32 low, u32 high);
122 void (*wait_icr_idle)(void);
123 u32 (*safe_wait_icr_idle)(void);
124};
125
126extern struct apic_ops *apic_ops;
127
128#define apic_read (apic_ops->read)
129#define apic_write (apic_ops->write)
130#define apic_icr_read (apic_ops->icr_read)
131#define apic_icr_write (apic_ops->icr_write)
132#define apic_wait_icr_idle (apic_ops->wait_icr_idle)
133#define safe_apic_wait_icr_idle (apic_ops->safe_wait_icr_idle)
134
135extern int get_physical_broadcast(void);
136
137#ifdef CONFIG_X86_64
138static inline void ack_x2APIC_irq(void)
139{
140 /* Docs say use 0 for future compatibility */
141 native_apic_msr_write(APIC_EOI, 0);
142}
143#endif
144
145
146static inline void ack_APIC_irq(void)
147{
148 /*
149 * ack_APIC_irq() actually gets compiled as a single instruction
150 * ... yummie.
151 */
152
153 /* Docs say use 0 for future compatibility */
154 apic_write(APIC_EOI, 0);
155}
156
157extern int lapic_get_maxlvt(void);
158extern void clear_local_APIC(void);
159extern void connect_bsp_APIC(void);
160extern void disconnect_bsp_APIC(int virt_wire_setup);
161extern void disable_local_APIC(void);
162extern void lapic_shutdown(void);
163extern int verify_local_APIC(void);
164extern void cache_APIC_registers(void);
165extern void sync_Arb_IDs(void);
166extern void init_bsp_APIC(void);
167extern void setup_local_APIC(void);
168extern void end_local_APIC_setup(void);
169extern void init_apic_mappings(void);
170extern void setup_boot_APIC_clock(void);
171extern void setup_secondary_APIC_clock(void);
172extern int APIC_init_uniprocessor(void);
173extern void enable_NMI_through_LVT0(void);
174
175/*
176 * On 32bit this is mach-xxx local
177 */
178#ifdef CONFIG_X86_64
179extern void early_init_lapic_mapping(void);
180extern int apic_is_clustered_box(void);
181#else
182static inline int apic_is_clustered_box(void)
183{
184 return 0;
185}
186#endif
187
188extern u8 setup_APIC_eilvt_mce(u8 vector, u8 msg_type, u8 mask);
189extern u8 setup_APIC_eilvt_ibs(u8 vector, u8 msg_type, u8 mask);
190
191
192#else /* !CONFIG_X86_LOCAL_APIC */
193static inline void lapic_shutdown(void) { }
194#define local_apic_timer_c2_ok 1
195static inline void init_apic_mappings(void) { }
196
197#endif /* !CONFIG_X86_LOCAL_APIC */
198
199#endif /* ASM_X86__APIC_H */
diff --git a/include/asm-x86/apicdef.h b/include/asm-x86/apicdef.h
deleted file mode 100644
index b922c85ac91d..000000000000
--- a/include/asm-x86/apicdef.h
+++ /dev/null
@@ -1,417 +0,0 @@
1#ifndef ASM_X86__APICDEF_H
2#define ASM_X86__APICDEF_H
3
4/*
5 * Constants for various Intel APICs. (local APIC, IOAPIC, etc.)
6 *
7 * Alan Cox <Alan.Cox@linux.org>, 1995.
8 * Ingo Molnar <mingo@redhat.com>, 1999, 2000
9 */
10
11#define APIC_DEFAULT_PHYS_BASE 0xfee00000
12
13#define APIC_ID 0x20
14
15#define APIC_LVR 0x30
16#define APIC_LVR_MASK 0xFF00FF
17#define GET_APIC_VERSION(x) ((x) & 0xFFu)
18#define GET_APIC_MAXLVT(x) (((x) >> 16) & 0xFFu)
19#ifdef CONFIG_X86_32
20# define APIC_INTEGRATED(x) ((x) & 0xF0u)
21#else
22# define APIC_INTEGRATED(x) (1)
23#endif
24#define APIC_XAPIC(x) ((x) >= 0x14)
25#define APIC_TASKPRI 0x80
26#define APIC_TPRI_MASK 0xFFu
27#define APIC_ARBPRI 0x90
28#define APIC_ARBPRI_MASK 0xFFu
29#define APIC_PROCPRI 0xA0
30#define APIC_EOI 0xB0
31#define APIC_EIO_ACK 0x0
32#define APIC_RRR 0xC0
33#define APIC_LDR 0xD0
34#define APIC_LDR_MASK (0xFFu << 24)
35#define GET_APIC_LOGICAL_ID(x) (((x) >> 24) & 0xFFu)
36#define SET_APIC_LOGICAL_ID(x) (((x) << 24))
37#define APIC_ALL_CPUS 0xFFu
38#define APIC_DFR 0xE0
39#define APIC_DFR_CLUSTER 0x0FFFFFFFul
40#define APIC_DFR_FLAT 0xFFFFFFFFul
41#define APIC_SPIV 0xF0
42#define APIC_SPIV_FOCUS_DISABLED (1 << 9)
43#define APIC_SPIV_APIC_ENABLED (1 << 8)
44#define APIC_ISR 0x100
45#define APIC_ISR_NR 0x8 /* Number of 32 bit ISR registers. */
46#define APIC_TMR 0x180
47#define APIC_IRR 0x200
48#define APIC_ESR 0x280
49#define APIC_ESR_SEND_CS 0x00001
50#define APIC_ESR_RECV_CS 0x00002
51#define APIC_ESR_SEND_ACC 0x00004
52#define APIC_ESR_RECV_ACC 0x00008
53#define APIC_ESR_SENDILL 0x00020
54#define APIC_ESR_RECVILL 0x00040
55#define APIC_ESR_ILLREGA 0x00080
56#define APIC_ICR 0x300
57#define APIC_DEST_SELF 0x40000
58#define APIC_DEST_ALLINC 0x80000
59#define APIC_DEST_ALLBUT 0xC0000
60#define APIC_ICR_RR_MASK 0x30000
61#define APIC_ICR_RR_INVALID 0x00000
62#define APIC_ICR_RR_INPROG 0x10000
63#define APIC_ICR_RR_VALID 0x20000
64#define APIC_INT_LEVELTRIG 0x08000
65#define APIC_INT_ASSERT 0x04000
66#define APIC_ICR_BUSY 0x01000
67#define APIC_DEST_LOGICAL 0x00800
68#define APIC_DEST_PHYSICAL 0x00000
69#define APIC_DM_FIXED 0x00000
70#define APIC_DM_LOWEST 0x00100
71#define APIC_DM_SMI 0x00200
72#define APIC_DM_REMRD 0x00300
73#define APIC_DM_NMI 0x00400
74#define APIC_DM_INIT 0x00500
75#define APIC_DM_STARTUP 0x00600
76#define APIC_DM_EXTINT 0x00700
77#define APIC_VECTOR_MASK 0x000FF
78#define APIC_ICR2 0x310
79#define GET_APIC_DEST_FIELD(x) (((x) >> 24) & 0xFF)
80#define SET_APIC_DEST_FIELD(x) ((x) << 24)
81#define APIC_LVTT 0x320
82#define APIC_LVTTHMR 0x330
83#define APIC_LVTPC 0x340
84#define APIC_LVT0 0x350
85#define APIC_LVT_TIMER_BASE_MASK (0x3 << 18)
86#define GET_APIC_TIMER_BASE(x) (((x) >> 18) & 0x3)
87#define SET_APIC_TIMER_BASE(x) (((x) << 18))
88#define APIC_TIMER_BASE_CLKIN 0x0
89#define APIC_TIMER_BASE_TMBASE 0x1
90#define APIC_TIMER_BASE_DIV 0x2
91#define APIC_LVT_TIMER_PERIODIC (1 << 17)
92#define APIC_LVT_MASKED (1 << 16)
93#define APIC_LVT_LEVEL_TRIGGER (1 << 15)
94#define APIC_LVT_REMOTE_IRR (1 << 14)
95#define APIC_INPUT_POLARITY (1 << 13)
96#define APIC_SEND_PENDING (1 << 12)
97#define APIC_MODE_MASK 0x700
98#define GET_APIC_DELIVERY_MODE(x) (((x) >> 8) & 0x7)
99#define SET_APIC_DELIVERY_MODE(x, y) (((x) & ~0x700) | ((y) << 8))
100#define APIC_MODE_FIXED 0x0
101#define APIC_MODE_NMI 0x4
102#define APIC_MODE_EXTINT 0x7
103#define APIC_LVT1 0x360
104#define APIC_LVTERR 0x370
105#define APIC_TMICT 0x380
106#define APIC_TMCCT 0x390
107#define APIC_TDCR 0x3E0
108#define APIC_SELF_IPI 0x3F0
109#define APIC_TDR_DIV_TMBASE (1 << 2)
110#define APIC_TDR_DIV_1 0xB
111#define APIC_TDR_DIV_2 0x0
112#define APIC_TDR_DIV_4 0x1
113#define APIC_TDR_DIV_8 0x2
114#define APIC_TDR_DIV_16 0x3
115#define APIC_TDR_DIV_32 0x8
116#define APIC_TDR_DIV_64 0x9
117#define APIC_TDR_DIV_128 0xA
118#define APIC_EILVT0 0x500
119#define APIC_EILVT_NR_AMD_K8 1 /* # of extended interrupts */
120#define APIC_EILVT_NR_AMD_10H 4
121#define APIC_EILVT_LVTOFF(x) (((x) >> 4) & 0xF)
122#define APIC_EILVT_MSG_FIX 0x0
123#define APIC_EILVT_MSG_SMI 0x2
124#define APIC_EILVT_MSG_NMI 0x4
125#define APIC_EILVT_MSG_EXT 0x7
126#define APIC_EILVT_MASKED (1 << 16)
127#define APIC_EILVT1 0x510
128#define APIC_EILVT2 0x520
129#define APIC_EILVT3 0x530
130
131#define APIC_BASE (fix_to_virt(FIX_APIC_BASE))
132#define APIC_BASE_MSR 0x800
133#define X2APIC_ENABLE (1UL << 10)
134
135#ifdef CONFIG_X86_32
136# define MAX_IO_APICS 64
137#else
138# define MAX_IO_APICS 128
139# define MAX_LOCAL_APIC 32768
140#endif
141
142/*
143 * All x86-64 systems are xAPIC compatible.
144 * In the following, "apicid" is a physical APIC ID.
145 */
146#define XAPIC_DEST_CPUS_SHIFT 4
147#define XAPIC_DEST_CPUS_MASK ((1u << XAPIC_DEST_CPUS_SHIFT) - 1)
148#define XAPIC_DEST_CLUSTER_MASK (XAPIC_DEST_CPUS_MASK << XAPIC_DEST_CPUS_SHIFT)
149#define APIC_CLUSTER(apicid) ((apicid) & XAPIC_DEST_CLUSTER_MASK)
150#define APIC_CLUSTERID(apicid) (APIC_CLUSTER(apicid) >> XAPIC_DEST_CPUS_SHIFT)
151#define APIC_CPUID(apicid) ((apicid) & XAPIC_DEST_CPUS_MASK)
152#define NUM_APIC_CLUSTERS ((BAD_APICID + 1) >> XAPIC_DEST_CPUS_SHIFT)
153
154/*
155 * the local APIC register structure, memory mapped. Not terribly well
156 * tested, but we might eventually use this one in the future - the
157 * problem why we cannot use it right now is the P5 APIC, it has an
158 * errata which cannot take 8-bit reads and writes, only 32-bit ones ...
159 */
160#define u32 unsigned int
161
162struct local_apic {
163
164/*000*/ struct { u32 __reserved[4]; } __reserved_01;
165
166/*010*/ struct { u32 __reserved[4]; } __reserved_02;
167
168/*020*/ struct { /* APIC ID Register */
169 u32 __reserved_1 : 24,
170 phys_apic_id : 4,
171 __reserved_2 : 4;
172 u32 __reserved[3];
173 } id;
174
175/*030*/ const
176 struct { /* APIC Version Register */
177 u32 version : 8,
178 __reserved_1 : 8,
179 max_lvt : 8,
180 __reserved_2 : 8;
181 u32 __reserved[3];
182 } version;
183
184/*040*/ struct { u32 __reserved[4]; } __reserved_03;
185
186/*050*/ struct { u32 __reserved[4]; } __reserved_04;
187
188/*060*/ struct { u32 __reserved[4]; } __reserved_05;
189
190/*070*/ struct { u32 __reserved[4]; } __reserved_06;
191
192/*080*/ struct { /* Task Priority Register */
193 u32 priority : 8,
194 __reserved_1 : 24;
195 u32 __reserved_2[3];
196 } tpr;
197
198/*090*/ const
199 struct { /* Arbitration Priority Register */
200 u32 priority : 8,
201 __reserved_1 : 24;
202 u32 __reserved_2[3];
203 } apr;
204
205/*0A0*/ const
206 struct { /* Processor Priority Register */
207 u32 priority : 8,
208 __reserved_1 : 24;
209 u32 __reserved_2[3];
210 } ppr;
211
212/*0B0*/ struct { /* End Of Interrupt Register */
213 u32 eoi;
214 u32 __reserved[3];
215 } eoi;
216
217/*0C0*/ struct { u32 __reserved[4]; } __reserved_07;
218
219/*0D0*/ struct { /* Logical Destination Register */
220 u32 __reserved_1 : 24,
221 logical_dest : 8;
222 u32 __reserved_2[3];
223 } ldr;
224
225/*0E0*/ struct { /* Destination Format Register */
226 u32 __reserved_1 : 28,
227 model : 4;
228 u32 __reserved_2[3];
229 } dfr;
230
231/*0F0*/ struct { /* Spurious Interrupt Vector Register */
232 u32 spurious_vector : 8,
233 apic_enabled : 1,
234 focus_cpu : 1,
235 __reserved_2 : 22;
236 u32 __reserved_3[3];
237 } svr;
238
239/*100*/ struct { /* In Service Register */
240/*170*/ u32 bitfield;
241 u32 __reserved[3];
242 } isr [8];
243
244/*180*/ struct { /* Trigger Mode Register */
245/*1F0*/ u32 bitfield;
246 u32 __reserved[3];
247 } tmr [8];
248
249/*200*/ struct { /* Interrupt Request Register */
250/*270*/ u32 bitfield;
251 u32 __reserved[3];
252 } irr [8];
253
254/*280*/ union { /* Error Status Register */
255 struct {
256 u32 send_cs_error : 1,
257 receive_cs_error : 1,
258 send_accept_error : 1,
259 receive_accept_error : 1,
260 __reserved_1 : 1,
261 send_illegal_vector : 1,
262 receive_illegal_vector : 1,
263 illegal_register_address : 1,
264 __reserved_2 : 24;
265 u32 __reserved_3[3];
266 } error_bits;
267 struct {
268 u32 errors;
269 u32 __reserved_3[3];
270 } all_errors;
271 } esr;
272
273/*290*/ struct { u32 __reserved[4]; } __reserved_08;
274
275/*2A0*/ struct { u32 __reserved[4]; } __reserved_09;
276
277/*2B0*/ struct { u32 __reserved[4]; } __reserved_10;
278
279/*2C0*/ struct { u32 __reserved[4]; } __reserved_11;
280
281/*2D0*/ struct { u32 __reserved[4]; } __reserved_12;
282
283/*2E0*/ struct { u32 __reserved[4]; } __reserved_13;
284
285/*2F0*/ struct { u32 __reserved[4]; } __reserved_14;
286
287/*300*/ struct { /* Interrupt Command Register 1 */
288 u32 vector : 8,
289 delivery_mode : 3,
290 destination_mode : 1,
291 delivery_status : 1,
292 __reserved_1 : 1,
293 level : 1,
294 trigger : 1,
295 __reserved_2 : 2,
296 shorthand : 2,
297 __reserved_3 : 12;
298 u32 __reserved_4[3];
299 } icr1;
300
301/*310*/ struct { /* Interrupt Command Register 2 */
302 union {
303 u32 __reserved_1 : 24,
304 phys_dest : 4,
305 __reserved_2 : 4;
306 u32 __reserved_3 : 24,
307 logical_dest : 8;
308 } dest;
309 u32 __reserved_4[3];
310 } icr2;
311
312/*320*/ struct { /* LVT - Timer */
313 u32 vector : 8,
314 __reserved_1 : 4,
315 delivery_status : 1,
316 __reserved_2 : 3,
317 mask : 1,
318 timer_mode : 1,
319 __reserved_3 : 14;
320 u32 __reserved_4[3];
321 } lvt_timer;
322
323/*330*/ struct { /* LVT - Thermal Sensor */
324 u32 vector : 8,
325 delivery_mode : 3,
326 __reserved_1 : 1,
327 delivery_status : 1,
328 __reserved_2 : 3,
329 mask : 1,
330 __reserved_3 : 15;
331 u32 __reserved_4[3];
332 } lvt_thermal;
333
334/*340*/ struct { /* LVT - Performance Counter */
335 u32 vector : 8,
336 delivery_mode : 3,
337 __reserved_1 : 1,
338 delivery_status : 1,
339 __reserved_2 : 3,
340 mask : 1,
341 __reserved_3 : 15;
342 u32 __reserved_4[3];
343 } lvt_pc;
344
345/*350*/ struct { /* LVT - LINT0 */
346 u32 vector : 8,
347 delivery_mode : 3,
348 __reserved_1 : 1,
349 delivery_status : 1,
350 polarity : 1,
351 remote_irr : 1,
352 trigger : 1,
353 mask : 1,
354 __reserved_2 : 15;
355 u32 __reserved_3[3];
356 } lvt_lint0;
357
358/*360*/ struct { /* LVT - LINT1 */
359 u32 vector : 8,
360 delivery_mode : 3,
361 __reserved_1 : 1,
362 delivery_status : 1,
363 polarity : 1,
364 remote_irr : 1,
365 trigger : 1,
366 mask : 1,
367 __reserved_2 : 15;
368 u32 __reserved_3[3];
369 } lvt_lint1;
370
371/*370*/ struct { /* LVT - Error */
372 u32 vector : 8,
373 __reserved_1 : 4,
374 delivery_status : 1,
375 __reserved_2 : 3,
376 mask : 1,
377 __reserved_3 : 15;
378 u32 __reserved_4[3];
379 } lvt_error;
380
381/*380*/ struct { /* Timer Initial Count Register */
382 u32 initial_count;
383 u32 __reserved_2[3];
384 } timer_icr;
385
386/*390*/ const
387 struct { /* Timer Current Count Register */
388 u32 curr_count;
389 u32 __reserved_2[3];
390 } timer_ccr;
391
392/*3A0*/ struct { u32 __reserved[4]; } __reserved_16;
393
394/*3B0*/ struct { u32 __reserved[4]; } __reserved_17;
395
396/*3C0*/ struct { u32 __reserved[4]; } __reserved_18;
397
398/*3D0*/ struct { u32 __reserved[4]; } __reserved_19;
399
400/*3E0*/ struct { /* Timer Divide Configuration Register */
401 u32 divisor : 4,
402 __reserved_1 : 28;
403 u32 __reserved_2[3];
404 } timer_dcr;
405
406/*3F0*/ struct { u32 __reserved[4]; } __reserved_20;
407
408} __attribute__ ((packed));
409
410#undef u32
411
412#ifdef CONFIG_X86_32
413 #define BAD_APICID 0xFFu
414#else
415 #define BAD_APICID 0xFFFFu
416#endif
417#endif /* ASM_X86__APICDEF_H */
diff --git a/include/asm-x86/arch_hooks.h b/include/asm-x86/arch_hooks.h
deleted file mode 100644
index de4596b24c23..000000000000
--- a/include/asm-x86/arch_hooks.h
+++ /dev/null
@@ -1,26 +0,0 @@
1#ifndef ASM_X86__ARCH_HOOKS_H
2#define ASM_X86__ARCH_HOOKS_H
3
4#include <linux/interrupt.h>
5
6/*
7 * linux/include/asm/arch_hooks.h
8 *
9 * define the architecture specific hooks
10 */
11
12/* these aren't arch hooks, they are generic routines
13 * that can be used by the hooks */
14extern void init_ISA_irqs(void);
15extern irqreturn_t timer_interrupt(int irq, void *dev_id);
16
17/* these are the defined hooks */
18extern void intr_init_hook(void);
19extern void pre_intr_init_hook(void);
20extern void pre_setup_arch_hook(void);
21extern void trap_init_hook(void);
22extern void pre_time_init_hook(void);
23extern void time_init_hook(void);
24extern void mca_nmi_hook(void);
25
26#endif /* ASM_X86__ARCH_HOOKS_H */
diff --git a/include/asm-x86/asm.h b/include/asm-x86/asm.h
deleted file mode 100644
index e1355f44d7c3..000000000000
--- a/include/asm-x86/asm.h
+++ /dev/null
@@ -1,47 +0,0 @@
1#ifndef ASM_X86__ASM_H
2#define ASM_X86__ASM_H
3
4#ifdef __ASSEMBLY__
5# define __ASM_FORM(x) x
6# define __ASM_EX_SEC .section __ex_table
7#else
8# define __ASM_FORM(x) " " #x " "
9# define __ASM_EX_SEC " .section __ex_table,\"a\"\n"
10#endif
11
12#ifdef CONFIG_X86_32
13# define __ASM_SEL(a,b) __ASM_FORM(a)
14#else
15# define __ASM_SEL(a,b) __ASM_FORM(b)
16#endif
17
18#define __ASM_SIZE(inst) __ASM_SEL(inst##l, inst##q)
19#define __ASM_REG(reg) __ASM_SEL(e##reg, r##reg)
20
21#define _ASM_PTR __ASM_SEL(.long, .quad)
22#define _ASM_ALIGN __ASM_SEL(.balign 4, .balign 8)
23
24#define _ASM_MOV __ASM_SIZE(mov)
25#define _ASM_INC __ASM_SIZE(inc)
26#define _ASM_DEC __ASM_SIZE(dec)
27#define _ASM_ADD __ASM_SIZE(add)
28#define _ASM_SUB __ASM_SIZE(sub)
29#define _ASM_XADD __ASM_SIZE(xadd)
30
31#define _ASM_AX __ASM_REG(ax)
32#define _ASM_BX __ASM_REG(bx)
33#define _ASM_CX __ASM_REG(cx)
34#define _ASM_DX __ASM_REG(dx)
35#define _ASM_SP __ASM_REG(sp)
36#define _ASM_BP __ASM_REG(bp)
37#define _ASM_SI __ASM_REG(si)
38#define _ASM_DI __ASM_REG(di)
39
40/* Exception table entry */
41# define _ASM_EXTABLE(from,to) \
42 __ASM_EX_SEC \
43 _ASM_ALIGN "\n" \
44 _ASM_PTR #from "," #to "\n" \
45 " .previous\n"
46
47#endif /* ASM_X86__ASM_H */
diff --git a/include/asm-x86/atomic.h b/include/asm-x86/atomic.h
deleted file mode 100644
index 4e1b8873c474..000000000000
--- a/include/asm-x86/atomic.h
+++ /dev/null
@@ -1,5 +0,0 @@
1#ifdef CONFIG_X86_32
2# include "atomic_32.h"
3#else
4# include "atomic_64.h"
5#endif
diff --git a/include/asm-x86/atomic_32.h b/include/asm-x86/atomic_32.h
deleted file mode 100644
index 14d3f0beb889..000000000000
--- a/include/asm-x86/atomic_32.h
+++ /dev/null
@@ -1,259 +0,0 @@
1#ifndef ASM_X86__ATOMIC_32_H
2#define ASM_X86__ATOMIC_32_H
3
4#include <linux/compiler.h>
5#include <asm/processor.h>
6#include <asm/cmpxchg.h>
7
8/*
9 * Atomic operations that C can't guarantee us. Useful for
10 * resource counting etc..
11 */
12
13/*
14 * Make sure gcc doesn't try to be clever and move things around
15 * on us. We need to use _exactly_ the address the user gave us,
16 * not some alias that contains the same information.
17 */
18typedef struct {
19 int counter;
20} atomic_t;
21
22#define ATOMIC_INIT(i) { (i) }
23
24/**
25 * atomic_read - read atomic variable
26 * @v: pointer of type atomic_t
27 *
28 * Atomically reads the value of @v.
29 */
30#define atomic_read(v) ((v)->counter)
31
32/**
33 * atomic_set - set atomic variable
34 * @v: pointer of type atomic_t
35 * @i: required value
36 *
37 * Atomically sets the value of @v to @i.
38 */
39#define atomic_set(v, i) (((v)->counter) = (i))
40
41/**
42 * atomic_add - add integer to atomic variable
43 * @i: integer value to add
44 * @v: pointer of type atomic_t
45 *
46 * Atomically adds @i to @v.
47 */
48static inline void atomic_add(int i, atomic_t *v)
49{
50 asm volatile(LOCK_PREFIX "addl %1,%0"
51 : "+m" (v->counter)
52 : "ir" (i));
53}
54
55/**
56 * atomic_sub - subtract integer from atomic variable
57 * @i: integer value to subtract
58 * @v: pointer of type atomic_t
59 *
60 * Atomically subtracts @i from @v.
61 */
62static inline void atomic_sub(int i, atomic_t *v)
63{
64 asm volatile(LOCK_PREFIX "subl %1,%0"
65 : "+m" (v->counter)
66 : "ir" (i));
67}
68
69/**
70 * atomic_sub_and_test - subtract value from variable and test result
71 * @i: integer value to subtract
72 * @v: pointer of type atomic_t
73 *
74 * Atomically subtracts @i from @v and returns
75 * true if the result is zero, or false for all
76 * other cases.
77 */
78static inline int atomic_sub_and_test(int i, atomic_t *v)
79{
80 unsigned char c;
81
82 asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
83 : "+m" (v->counter), "=qm" (c)
84 : "ir" (i) : "memory");
85 return c;
86}
87
88/**
89 * atomic_inc - increment atomic variable
90 * @v: pointer of type atomic_t
91 *
92 * Atomically increments @v by 1.
93 */
94static inline void atomic_inc(atomic_t *v)
95{
96 asm volatile(LOCK_PREFIX "incl %0"
97 : "+m" (v->counter));
98}
99
100/**
101 * atomic_dec - decrement atomic variable
102 * @v: pointer of type atomic_t
103 *
104 * Atomically decrements @v by 1.
105 */
106static inline void atomic_dec(atomic_t *v)
107{
108 asm volatile(LOCK_PREFIX "decl %0"
109 : "+m" (v->counter));
110}
111
112/**
113 * atomic_dec_and_test - decrement and test
114 * @v: pointer of type atomic_t
115 *
116 * Atomically decrements @v by 1 and
117 * returns true if the result is 0, or false for all other
118 * cases.
119 */
120static inline int atomic_dec_and_test(atomic_t *v)
121{
122 unsigned char c;
123
124 asm volatile(LOCK_PREFIX "decl %0; sete %1"
125 : "+m" (v->counter), "=qm" (c)
126 : : "memory");
127 return c != 0;
128}
129
130/**
131 * atomic_inc_and_test - increment and test
132 * @v: pointer of type atomic_t
133 *
134 * Atomically increments @v by 1
135 * and returns true if the result is zero, or false for all
136 * other cases.
137 */
138static inline int atomic_inc_and_test(atomic_t *v)
139{
140 unsigned char c;
141
142 asm volatile(LOCK_PREFIX "incl %0; sete %1"
143 : "+m" (v->counter), "=qm" (c)
144 : : "memory");
145 return c != 0;
146}
147
148/**
149 * atomic_add_negative - add and test if negative
150 * @v: pointer of type atomic_t
151 * @i: integer value to add
152 *
153 * Atomically adds @i to @v and returns true
154 * if the result is negative, or false when
155 * result is greater than or equal to zero.
156 */
157static inline int atomic_add_negative(int i, atomic_t *v)
158{
159 unsigned char c;
160
161 asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
162 : "+m" (v->counter), "=qm" (c)
163 : "ir" (i) : "memory");
164 return c;
165}
166
167/**
168 * atomic_add_return - add integer and return
169 * @v: pointer of type atomic_t
170 * @i: integer value to add
171 *
172 * Atomically adds @i to @v and returns @i + @v
173 */
174static inline int atomic_add_return(int i, atomic_t *v)
175{
176 int __i;
177#ifdef CONFIG_M386
178 unsigned long flags;
179 if (unlikely(boot_cpu_data.x86 <= 3))
180 goto no_xadd;
181#endif
182 /* Modern 486+ processor */
183 __i = i;
184 asm volatile(LOCK_PREFIX "xaddl %0, %1"
185 : "+r" (i), "+m" (v->counter)
186 : : "memory");
187 return i + __i;
188
189#ifdef CONFIG_M386
190no_xadd: /* Legacy 386 processor */
191 local_irq_save(flags);
192 __i = atomic_read(v);
193 atomic_set(v, i + __i);
194 local_irq_restore(flags);
195 return i + __i;
196#endif
197}
198
199/**
200 * atomic_sub_return - subtract integer and return
201 * @v: pointer of type atomic_t
202 * @i: integer value to subtract
203 *
204 * Atomically subtracts @i from @v and returns @v - @i
205 */
206static inline int atomic_sub_return(int i, atomic_t *v)
207{
208 return atomic_add_return(-i, v);
209}
210
211#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
212#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
213
214/**
215 * atomic_add_unless - add unless the number is already a given value
216 * @v: pointer of type atomic_t
217 * @a: the amount to add to v...
218 * @u: ...unless v is equal to u.
219 *
220 * Atomically adds @a to @v, so long as @v was not already @u.
221 * Returns non-zero if @v was not @u, and zero otherwise.
222 */
223static inline int atomic_add_unless(atomic_t *v, int a, int u)
224{
225 int c, old;
226 c = atomic_read(v);
227 for (;;) {
228 if (unlikely(c == (u)))
229 break;
230 old = atomic_cmpxchg((v), c, c + (a));
231 if (likely(old == c))
232 break;
233 c = old;
234 }
235 return c != (u);
236}
237
238#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
239
240#define atomic_inc_return(v) (atomic_add_return(1, v))
241#define atomic_dec_return(v) (atomic_sub_return(1, v))
242
243/* These are x86-specific, used by some header files */
244#define atomic_clear_mask(mask, addr) \
245 asm volatile(LOCK_PREFIX "andl %0,%1" \
246 : : "r" (~(mask)), "m" (*(addr)) : "memory")
247
248#define atomic_set_mask(mask, addr) \
249 asm volatile(LOCK_PREFIX "orl %0,%1" \
250 : : "r" (mask), "m" (*(addr)) : "memory")
251
252/* Atomic operations are already serializing on x86 */
253#define smp_mb__before_atomic_dec() barrier()
254#define smp_mb__after_atomic_dec() barrier()
255#define smp_mb__before_atomic_inc() barrier()
256#define smp_mb__after_atomic_inc() barrier()
257
258#include <asm-generic/atomic.h>
259#endif /* ASM_X86__ATOMIC_32_H */
diff --git a/include/asm-x86/atomic_64.h b/include/asm-x86/atomic_64.h
deleted file mode 100644
index 2cb218c4a356..000000000000
--- a/include/asm-x86/atomic_64.h
+++ /dev/null
@@ -1,473 +0,0 @@
1#ifndef ASM_X86__ATOMIC_64_H
2#define ASM_X86__ATOMIC_64_H
3
4#include <asm/alternative.h>
5#include <asm/cmpxchg.h>
6
7/* atomic_t should be 32 bit signed type */
8
9/*
10 * Atomic operations that C can't guarantee us. Useful for
11 * resource counting etc..
12 */
13
14/*
15 * Make sure gcc doesn't try to be clever and move things around
16 * on us. We need to use _exactly_ the address the user gave us,
17 * not some alias that contains the same information.
18 */
19typedef struct {
20 int counter;
21} atomic_t;
22
23#define ATOMIC_INIT(i) { (i) }
24
25/**
26 * atomic_read - read atomic variable
27 * @v: pointer of type atomic_t
28 *
29 * Atomically reads the value of @v.
30 */
31#define atomic_read(v) ((v)->counter)
32
33/**
34 * atomic_set - set atomic variable
35 * @v: pointer of type atomic_t
36 * @i: required value
37 *
38 * Atomically sets the value of @v to @i.
39 */
40#define atomic_set(v, i) (((v)->counter) = (i))
41
42/**
43 * atomic_add - add integer to atomic variable
44 * @i: integer value to add
45 * @v: pointer of type atomic_t
46 *
47 * Atomically adds @i to @v.
48 */
49static inline void atomic_add(int i, atomic_t *v)
50{
51 asm volatile(LOCK_PREFIX "addl %1,%0"
52 : "=m" (v->counter)
53 : "ir" (i), "m" (v->counter));
54}
55
56/**
57 * atomic_sub - subtract the atomic variable
58 * @i: integer value to subtract
59 * @v: pointer of type atomic_t
60 *
61 * Atomically subtracts @i from @v.
62 */
63static inline void atomic_sub(int i, atomic_t *v)
64{
65 asm volatile(LOCK_PREFIX "subl %1,%0"
66 : "=m" (v->counter)
67 : "ir" (i), "m" (v->counter));
68}
69
70/**
71 * atomic_sub_and_test - subtract value from variable and test result
72 * @i: integer value to subtract
73 * @v: pointer of type atomic_t
74 *
75 * Atomically subtracts @i from @v and returns
76 * true if the result is zero, or false for all
77 * other cases.
78 */
79static inline int atomic_sub_and_test(int i, atomic_t *v)
80{
81 unsigned char c;
82
83 asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
84 : "=m" (v->counter), "=qm" (c)
85 : "ir" (i), "m" (v->counter) : "memory");
86 return c;
87}
88
89/**
90 * atomic_inc - increment atomic variable
91 * @v: pointer of type atomic_t
92 *
93 * Atomically increments @v by 1.
94 */
95static inline void atomic_inc(atomic_t *v)
96{
97 asm volatile(LOCK_PREFIX "incl %0"
98 : "=m" (v->counter)
99 : "m" (v->counter));
100}
101
102/**
103 * atomic_dec - decrement atomic variable
104 * @v: pointer of type atomic_t
105 *
106 * Atomically decrements @v by 1.
107 */
108static inline void atomic_dec(atomic_t *v)
109{
110 asm volatile(LOCK_PREFIX "decl %0"
111 : "=m" (v->counter)
112 : "m" (v->counter));
113}
114
115/**
116 * atomic_dec_and_test - decrement and test
117 * @v: pointer of type atomic_t
118 *
119 * Atomically decrements @v by 1 and
120 * returns true if the result is 0, or false for all other
121 * cases.
122 */
123static inline int atomic_dec_and_test(atomic_t *v)
124{
125 unsigned char c;
126
127 asm volatile(LOCK_PREFIX "decl %0; sete %1"
128 : "=m" (v->counter), "=qm" (c)
129 : "m" (v->counter) : "memory");
130 return c != 0;
131}
132
133/**
134 * atomic_inc_and_test - increment and test
135 * @v: pointer of type atomic_t
136 *
137 * Atomically increments @v by 1
138 * and returns true if the result is zero, or false for all
139 * other cases.
140 */
141static inline int atomic_inc_and_test(atomic_t *v)
142{
143 unsigned char c;
144
145 asm volatile(LOCK_PREFIX "incl %0; sete %1"
146 : "=m" (v->counter), "=qm" (c)
147 : "m" (v->counter) : "memory");
148 return c != 0;
149}
150
151/**
152 * atomic_add_negative - add and test if negative
153 * @i: integer value to add
154 * @v: pointer of type atomic_t
155 *
156 * Atomically adds @i to @v and returns true
157 * if the result is negative, or false when
158 * result is greater than or equal to zero.
159 */
160static inline int atomic_add_negative(int i, atomic_t *v)
161{
162 unsigned char c;
163
164 asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
165 : "=m" (v->counter), "=qm" (c)
166 : "ir" (i), "m" (v->counter) : "memory");
167 return c;
168}
169
170/**
171 * atomic_add_return - add and return
172 * @i: integer value to add
173 * @v: pointer of type atomic_t
174 *
175 * Atomically adds @i to @v and returns @i + @v
176 */
177static inline int atomic_add_return(int i, atomic_t *v)
178{
179 int __i = i;
180 asm volatile(LOCK_PREFIX "xaddl %0, %1"
181 : "+r" (i), "+m" (v->counter)
182 : : "memory");
183 return i + __i;
184}
185
186static inline int atomic_sub_return(int i, atomic_t *v)
187{
188 return atomic_add_return(-i, v);
189}
190
191#define atomic_inc_return(v) (atomic_add_return(1, v))
192#define atomic_dec_return(v) (atomic_sub_return(1, v))
193
194/* An 64bit atomic type */
195
196typedef struct {
197 long counter;
198} atomic64_t;
199
200#define ATOMIC64_INIT(i) { (i) }
201
202/**
203 * atomic64_read - read atomic64 variable
204 * @v: pointer of type atomic64_t
205 *
206 * Atomically reads the value of @v.
207 * Doesn't imply a read memory barrier.
208 */
209#define atomic64_read(v) ((v)->counter)
210
211/**
212 * atomic64_set - set atomic64 variable
213 * @v: pointer to type atomic64_t
214 * @i: required value
215 *
216 * Atomically sets the value of @v to @i.
217 */
218#define atomic64_set(v, i) (((v)->counter) = (i))
219
220/**
221 * atomic64_add - add integer to atomic64 variable
222 * @i: integer value to add
223 * @v: pointer to type atomic64_t
224 *
225 * Atomically adds @i to @v.
226 */
227static inline void atomic64_add(long i, atomic64_t *v)
228{
229 asm volatile(LOCK_PREFIX "addq %1,%0"
230 : "=m" (v->counter)
231 : "er" (i), "m" (v->counter));
232}
233
234/**
235 * atomic64_sub - subtract the atomic64 variable
236 * @i: integer value to subtract
237 * @v: pointer to type atomic64_t
238 *
239 * Atomically subtracts @i from @v.
240 */
241static inline void atomic64_sub(long i, atomic64_t *v)
242{
243 asm volatile(LOCK_PREFIX "subq %1,%0"
244 : "=m" (v->counter)
245 : "er" (i), "m" (v->counter));
246}
247
248/**
249 * atomic64_sub_and_test - subtract value from variable and test result
250 * @i: integer value to subtract
251 * @v: pointer to type atomic64_t
252 *
253 * Atomically subtracts @i from @v and returns
254 * true if the result is zero, or false for all
255 * other cases.
256 */
257static inline int atomic64_sub_and_test(long i, atomic64_t *v)
258{
259 unsigned char c;
260
261 asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
262 : "=m" (v->counter), "=qm" (c)
263 : "er" (i), "m" (v->counter) : "memory");
264 return c;
265}
266
267/**
268 * atomic64_inc - increment atomic64 variable
269 * @v: pointer to type atomic64_t
270 *
271 * Atomically increments @v by 1.
272 */
273static inline void atomic64_inc(atomic64_t *v)
274{
275 asm volatile(LOCK_PREFIX "incq %0"
276 : "=m" (v->counter)
277 : "m" (v->counter));
278}
279
280/**
281 * atomic64_dec - decrement atomic64 variable
282 * @v: pointer to type atomic64_t
283 *
284 * Atomically decrements @v by 1.
285 */
286static inline void atomic64_dec(atomic64_t *v)
287{
288 asm volatile(LOCK_PREFIX "decq %0"
289 : "=m" (v->counter)
290 : "m" (v->counter));
291}
292
293/**
294 * atomic64_dec_and_test - decrement and test
295 * @v: pointer to type atomic64_t
296 *
297 * Atomically decrements @v by 1 and
298 * returns true if the result is 0, or false for all other
299 * cases.
300 */
301static inline int atomic64_dec_and_test(atomic64_t *v)
302{
303 unsigned char c;
304
305 asm volatile(LOCK_PREFIX "decq %0; sete %1"
306 : "=m" (v->counter), "=qm" (c)
307 : "m" (v->counter) : "memory");
308 return c != 0;
309}
310
311/**
312 * atomic64_inc_and_test - increment and test
313 * @v: pointer to type atomic64_t
314 *
315 * Atomically increments @v by 1
316 * and returns true if the result is zero, or false for all
317 * other cases.
318 */
319static inline int atomic64_inc_and_test(atomic64_t *v)
320{
321 unsigned char c;
322
323 asm volatile(LOCK_PREFIX "incq %0; sete %1"
324 : "=m" (v->counter), "=qm" (c)
325 : "m" (v->counter) : "memory");
326 return c != 0;
327}
328
329/**
330 * atomic64_add_negative - add and test if negative
331 * @i: integer value to add
332 * @v: pointer to type atomic64_t
333 *
334 * Atomically adds @i to @v and returns true
335 * if the result is negative, or false when
336 * result is greater than or equal to zero.
337 */
338static inline int atomic64_add_negative(long i, atomic64_t *v)
339{
340 unsigned char c;
341
342 asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
343 : "=m" (v->counter), "=qm" (c)
344 : "er" (i), "m" (v->counter) : "memory");
345 return c;
346}
347
348/**
349 * atomic64_add_return - add and return
350 * @i: integer value to add
351 * @v: pointer to type atomic64_t
352 *
353 * Atomically adds @i to @v and returns @i + @v
354 */
355static inline long atomic64_add_return(long i, atomic64_t *v)
356{
357 long __i = i;
358 asm volatile(LOCK_PREFIX "xaddq %0, %1;"
359 : "+r" (i), "+m" (v->counter)
360 : : "memory");
361 return i + __i;
362}
363
364static inline long atomic64_sub_return(long i, atomic64_t *v)
365{
366 return atomic64_add_return(-i, v);
367}
368
369#define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
370#define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
371
372#define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
373#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
374
375#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
376#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
377
378/**
379 * atomic_add_unless - add unless the number is a given value
380 * @v: pointer of type atomic_t
381 * @a: the amount to add to v...
382 * @u: ...unless v is equal to u.
383 *
384 * Atomically adds @a to @v, so long as it was not @u.
385 * Returns non-zero if @v was not @u, and zero otherwise.
386 */
387static inline int atomic_add_unless(atomic_t *v, int a, int u)
388{
389 int c, old;
390 c = atomic_read(v);
391 for (;;) {
392 if (unlikely(c == (u)))
393 break;
394 old = atomic_cmpxchg((v), c, c + (a));
395 if (likely(old == c))
396 break;
397 c = old;
398 }
399 return c != (u);
400}
401
402#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
403
404/**
405 * atomic64_add_unless - add unless the number is a given value
406 * @v: pointer of type atomic64_t
407 * @a: the amount to add to v...
408 * @u: ...unless v is equal to u.
409 *
410 * Atomically adds @a to @v, so long as it was not @u.
411 * Returns non-zero if @v was not @u, and zero otherwise.
412 */
413static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
414{
415 long c, old;
416 c = atomic64_read(v);
417 for (;;) {
418 if (unlikely(c == (u)))
419 break;
420 old = atomic64_cmpxchg((v), c, c + (a));
421 if (likely(old == c))
422 break;
423 c = old;
424 }
425 return c != (u);
426}
427
428/**
429 * atomic_inc_short - increment of a short integer
430 * @v: pointer to type int
431 *
432 * Atomically adds 1 to @v
433 * Returns the new value of @u
434 */
435static inline short int atomic_inc_short(short int *v)
436{
437 asm(LOCK_PREFIX "addw $1, %0" : "+m" (*v));
438 return *v;
439}
440
441/**
442 * atomic_or_long - OR of two long integers
443 * @v1: pointer to type unsigned long
444 * @v2: pointer to type unsigned long
445 *
446 * Atomically ORs @v1 and @v2
447 * Returns the result of the OR
448 */
449static inline void atomic_or_long(unsigned long *v1, unsigned long v2)
450{
451 asm(LOCK_PREFIX "orq %1, %0" : "+m" (*v1) : "r" (v2));
452}
453
454#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
455
456/* These are x86-specific, used by some header files */
457#define atomic_clear_mask(mask, addr) \
458 asm volatile(LOCK_PREFIX "andl %0,%1" \
459 : : "r" (~(mask)), "m" (*(addr)) : "memory")
460
461#define atomic_set_mask(mask, addr) \
462 asm volatile(LOCK_PREFIX "orl %0,%1" \
463 : : "r" ((unsigned)(mask)), "m" (*(addr)) \
464 : "memory")
465
466/* Atomic operations are already serializing on x86 */
467#define smp_mb__before_atomic_dec() barrier()
468#define smp_mb__after_atomic_dec() barrier()
469#define smp_mb__before_atomic_inc() barrier()
470#define smp_mb__after_atomic_inc() barrier()
471
472#include <asm-generic/atomic.h>
473#endif /* ASM_X86__ATOMIC_64_H */
diff --git a/include/asm-x86/auxvec.h b/include/asm-x86/auxvec.h
deleted file mode 100644
index 12c7cac74202..000000000000
--- a/include/asm-x86/auxvec.h
+++ /dev/null
@@ -1,12 +0,0 @@
1#ifndef ASM_X86__AUXVEC_H
2#define ASM_X86__AUXVEC_H
3/*
4 * Architecture-neutral AT_ values in 0-17, leave some room
5 * for more of them, start the x86-specific ones at 32.
6 */
7#ifdef __i386__
8#define AT_SYSINFO 32
9#endif
10#define AT_SYSINFO_EHDR 33
11
12#endif /* ASM_X86__AUXVEC_H */
diff --git a/include/asm-x86/bigsmp/apic.h b/include/asm-x86/bigsmp/apic.h
deleted file mode 100644
index 1d9543b9d358..000000000000
--- a/include/asm-x86/bigsmp/apic.h
+++ /dev/null
@@ -1,139 +0,0 @@
1#ifndef __ASM_MACH_APIC_H
2#define __ASM_MACH_APIC_H
3
4#define xapic_phys_to_log_apicid(cpu) (per_cpu(x86_bios_cpu_apicid, cpu))
5#define esr_disable (1)
6
7static inline int apic_id_registered(void)
8{
9 return (1);
10}
11
12static inline cpumask_t target_cpus(void)
13{
14#ifdef CONFIG_SMP
15 return cpu_online_map;
16#else
17 return cpumask_of_cpu(0);
18#endif
19}
20
21#undef APIC_DEST_LOGICAL
22#define APIC_DEST_LOGICAL 0
23#define APIC_DFR_VALUE (APIC_DFR_FLAT)
24#define INT_DELIVERY_MODE (dest_Fixed)
25#define INT_DEST_MODE (0) /* phys delivery to target proc */
26#define NO_BALANCE_IRQ (0)
27#define WAKE_SECONDARY_VIA_INIT
28
29
30static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
31{
32 return (0);
33}
34
35static inline unsigned long check_apicid_present(int bit)
36{
37 return (1);
38}
39
40static inline unsigned long calculate_ldr(int cpu)
41{
42 unsigned long val, id;
43 val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
44 id = xapic_phys_to_log_apicid(cpu);
45 val |= SET_APIC_LOGICAL_ID(id);
46 return val;
47}
48
49/*
50 * Set up the logical destination ID.
51 *
52 * Intel recommends to set DFR, LDR and TPR before enabling
53 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
54 * document number 292116). So here it goes...
55 */
56static inline void init_apic_ldr(void)
57{
58 unsigned long val;
59 int cpu = smp_processor_id();
60
61 apic_write(APIC_DFR, APIC_DFR_VALUE);
62 val = calculate_ldr(cpu);
63 apic_write(APIC_LDR, val);
64}
65
66static inline void setup_apic_routing(void)
67{
68 printk("Enabling APIC mode: %s. Using %d I/O APICs\n",
69 "Physflat", nr_ioapics);
70}
71
72static inline int multi_timer_check(int apic, int irq)
73{
74 return (0);
75}
76
77static inline int apicid_to_node(int logical_apicid)
78{
79 return apicid_2_node[hard_smp_processor_id()];
80}
81
82static inline int cpu_present_to_apicid(int mps_cpu)
83{
84 if (mps_cpu < NR_CPUS)
85 return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
86
87 return BAD_APICID;
88}
89
90static inline physid_mask_t apicid_to_cpu_present(int phys_apicid)
91{
92 return physid_mask_of_physid(phys_apicid);
93}
94
95extern u8 cpu_2_logical_apicid[];
96/* Mapping from cpu number to logical apicid */
97static inline int cpu_to_logical_apicid(int cpu)
98{
99 if (cpu >= NR_CPUS)
100 return BAD_APICID;
101 return cpu_physical_id(cpu);
102}
103
104static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
105{
106 /* For clustered we don't have a good way to do this yet - hack */
107 return physids_promote(0xFFL);
108}
109
110static inline void setup_portio_remap(void)
111{
112}
113
114static inline void enable_apic_mode(void)
115{
116}
117
118static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
119{
120 return (1);
121}
122
123/* As we are using single CPU as destination, pick only one CPU here */
124static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
125{
126 int cpu;
127 int apicid;
128
129 cpu = first_cpu(cpumask);
130 apicid = cpu_to_logical_apicid(cpu);
131 return apicid;
132}
133
134static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
135{
136 return cpuid_apic >> index_msb;
137}
138
139#endif /* __ASM_MACH_APIC_H */
diff --git a/include/asm-x86/bigsmp/apicdef.h b/include/asm-x86/bigsmp/apicdef.h
deleted file mode 100644
index 392c3f5ef2fe..000000000000
--- a/include/asm-x86/bigsmp/apicdef.h
+++ /dev/null
@@ -1,13 +0,0 @@
1#ifndef __ASM_MACH_APICDEF_H
2#define __ASM_MACH_APICDEF_H
3
4#define APIC_ID_MASK (0xFF<<24)
5
6static inline unsigned get_apic_id(unsigned long x)
7{
8 return (((x)>>24)&0xFF);
9}
10
11#define GET_APIC_ID(x) get_apic_id(x)
12
13#endif
diff --git a/include/asm-x86/bigsmp/ipi.h b/include/asm-x86/bigsmp/ipi.h
deleted file mode 100644
index 9404c535b7ec..000000000000
--- a/include/asm-x86/bigsmp/ipi.h
+++ /dev/null
@@ -1,25 +0,0 @@
1#ifndef __ASM_MACH_IPI_H
2#define __ASM_MACH_IPI_H
3
4void send_IPI_mask_sequence(cpumask_t mask, int vector);
5
6static inline void send_IPI_mask(cpumask_t mask, int vector)
7{
8 send_IPI_mask_sequence(mask, vector);
9}
10
11static inline void send_IPI_allbutself(int vector)
12{
13 cpumask_t mask = cpu_online_map;
14 cpu_clear(smp_processor_id(), mask);
15
16 if (!cpus_empty(mask))
17 send_IPI_mask(mask, vector);
18}
19
20static inline void send_IPI_all(int vector)
21{
22 send_IPI_mask(cpu_online_map, vector);
23}
24
25#endif /* __ASM_MACH_IPI_H */
diff --git a/include/asm-x86/bios_ebda.h b/include/asm-x86/bios_ebda.h
deleted file mode 100644
index 79b4b88505d7..000000000000
--- a/include/asm-x86/bios_ebda.h
+++ /dev/null
@@ -1,36 +0,0 @@
1#ifndef ASM_X86__BIOS_EBDA_H
2#define ASM_X86__BIOS_EBDA_H
3
4#include <asm/io.h>
5
6/*
7 * there is a real-mode segmented pointer pointing to the
8 * 4K EBDA area at 0x40E.
9 */
10static inline unsigned int get_bios_ebda(void)
11{
12 unsigned int address = *(unsigned short *)phys_to_virt(0x40E);
13 address <<= 4;
14 return address; /* 0 means none */
15}
16
17void reserve_ebda_region(void);
18
19#ifdef CONFIG_X86_CHECK_BIOS_CORRUPTION
20/*
21 * This is obviously not a great place for this, but we want to be
22 * able to scatter it around anywhere in the kernel.
23 */
24void check_for_bios_corruption(void);
25void start_periodic_check_for_corruption(void);
26#else
27static inline void check_for_bios_corruption(void)
28{
29}
30
31static inline void start_periodic_check_for_corruption(void)
32{
33}
34#endif
35
36#endif /* ASM_X86__BIOS_EBDA_H */
diff --git a/include/asm-x86/bitops.h b/include/asm-x86/bitops.h
deleted file mode 100644
index 451a74762bd4..000000000000
--- a/include/asm-x86/bitops.h
+++ /dev/null
@@ -1,451 +0,0 @@
1#ifndef ASM_X86__BITOPS_H
2#define ASM_X86__BITOPS_H
3
4/*
5 * Copyright 1992, Linus Torvalds.
6 */
7
8#ifndef _LINUX_BITOPS_H
9#error only <linux/bitops.h> can be included directly
10#endif
11
12#include <linux/compiler.h>
13#include <asm/alternative.h>
14
15/*
16 * These have to be done with inline assembly: that way the bit-setting
17 * is guaranteed to be atomic. All bit operations return 0 if the bit
18 * was cleared before the operation and != 0 if it was not.
19 *
20 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
21 */
22
23#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1)
24/* Technically wrong, but this avoids compilation errors on some gcc
25 versions. */
26#define BITOP_ADDR(x) "=m" (*(volatile long *) (x))
27#else
28#define BITOP_ADDR(x) "+m" (*(volatile long *) (x))
29#endif
30
31#define ADDR BITOP_ADDR(addr)
32
33/*
34 * We do the locked ops that don't return the old value as
35 * a mask operation on a byte.
36 */
37#define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
38#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
39#define CONST_MASK(nr) (1 << ((nr) & 7))
40
41/**
42 * set_bit - Atomically set a bit in memory
43 * @nr: the bit to set
44 * @addr: the address to start counting from
45 *
46 * This function is atomic and may not be reordered. See __set_bit()
47 * if you do not require the atomic guarantees.
48 *
49 * Note: there are no guarantees that this function will not be reordered
50 * on non x86 architectures, so if you are writing portable code,
51 * make sure not to rely on its reordering guarantees.
52 *
53 * Note that @nr may be almost arbitrarily large; this function is not
54 * restricted to acting on a single-word quantity.
55 */
56static inline void set_bit(unsigned int nr, volatile unsigned long *addr)
57{
58 if (IS_IMMEDIATE(nr)) {
59 asm volatile(LOCK_PREFIX "orb %1,%0"
60 : CONST_MASK_ADDR(nr, addr)
61 : "iq" ((u8)CONST_MASK(nr))
62 : "memory");
63 } else {
64 asm volatile(LOCK_PREFIX "bts %1,%0"
65 : BITOP_ADDR(addr) : "Ir" (nr) : "memory");
66 }
67}
68
69/**
70 * __set_bit - Set a bit in memory
71 * @nr: the bit to set
72 * @addr: the address to start counting from
73 *
74 * Unlike set_bit(), this function is non-atomic and may be reordered.
75 * If it's called on the same region of memory simultaneously, the effect
76 * may be that only one operation succeeds.
77 */
78static inline void __set_bit(int nr, volatile unsigned long *addr)
79{
80 asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory");
81}
82
83/**
84 * clear_bit - Clears a bit in memory
85 * @nr: Bit to clear
86 * @addr: Address to start counting from
87 *
88 * clear_bit() is atomic and may not be reordered. However, it does
89 * not contain a memory barrier, so if it is used for locking purposes,
90 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
91 * in order to ensure changes are visible on other processors.
92 */
93static inline void clear_bit(int nr, volatile unsigned long *addr)
94{
95 if (IS_IMMEDIATE(nr)) {
96 asm volatile(LOCK_PREFIX "andb %1,%0"
97 : CONST_MASK_ADDR(nr, addr)
98 : "iq" ((u8)~CONST_MASK(nr)));
99 } else {
100 asm volatile(LOCK_PREFIX "btr %1,%0"
101 : BITOP_ADDR(addr)
102 : "Ir" (nr));
103 }
104}
105
106/*
107 * clear_bit_unlock - Clears a bit in memory
108 * @nr: Bit to clear
109 * @addr: Address to start counting from
110 *
111 * clear_bit() is atomic and implies release semantics before the memory
112 * operation. It can be used for an unlock.
113 */
114static inline void clear_bit_unlock(unsigned nr, volatile unsigned long *addr)
115{
116 barrier();
117 clear_bit(nr, addr);
118}
119
120static inline void __clear_bit(int nr, volatile unsigned long *addr)
121{
122 asm volatile("btr %1,%0" : ADDR : "Ir" (nr));
123}
124
125/*
126 * __clear_bit_unlock - Clears a bit in memory
127 * @nr: Bit to clear
128 * @addr: Address to start counting from
129 *
130 * __clear_bit() is non-atomic and implies release semantics before the memory
131 * operation. It can be used for an unlock if no other CPUs can concurrently
132 * modify other bits in the word.
133 *
134 * No memory barrier is required here, because x86 cannot reorder stores past
135 * older loads. Same principle as spin_unlock.
136 */
137static inline void __clear_bit_unlock(unsigned nr, volatile unsigned long *addr)
138{
139 barrier();
140 __clear_bit(nr, addr);
141}
142
143#define smp_mb__before_clear_bit() barrier()
144#define smp_mb__after_clear_bit() barrier()
145
146/**
147 * __change_bit - Toggle a bit in memory
148 * @nr: the bit to change
149 * @addr: the address to start counting from
150 *
151 * Unlike change_bit(), this function is non-atomic and may be reordered.
152 * If it's called on the same region of memory simultaneously, the effect
153 * may be that only one operation succeeds.
154 */
155static inline void __change_bit(int nr, volatile unsigned long *addr)
156{
157 asm volatile("btc %1,%0" : ADDR : "Ir" (nr));
158}
159
160/**
161 * change_bit - Toggle a bit in memory
162 * @nr: Bit to change
163 * @addr: Address to start counting from
164 *
165 * change_bit() is atomic and may not be reordered.
166 * Note that @nr may be almost arbitrarily large; this function is not
167 * restricted to acting on a single-word quantity.
168 */
169static inline void change_bit(int nr, volatile unsigned long *addr)
170{
171 asm volatile(LOCK_PREFIX "btc %1,%0" : ADDR : "Ir" (nr));
172}
173
174/**
175 * test_and_set_bit - Set a bit and return its old value
176 * @nr: Bit to set
177 * @addr: Address to count from
178 *
179 * This operation is atomic and cannot be reordered.
180 * It also implies a memory barrier.
181 */
182static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
183{
184 int oldbit;
185
186 asm volatile(LOCK_PREFIX "bts %2,%1\n\t"
187 "sbb %0,%0" : "=r" (oldbit), ADDR : "Ir" (nr) : "memory");
188
189 return oldbit;
190}
191
192/**
193 * test_and_set_bit_lock - Set a bit and return its old value for lock
194 * @nr: Bit to set
195 * @addr: Address to count from
196 *
197 * This is the same as test_and_set_bit on x86.
198 */
199static inline int test_and_set_bit_lock(int nr, volatile unsigned long *addr)
200{
201 return test_and_set_bit(nr, addr);
202}
203
204/**
205 * __test_and_set_bit - Set a bit and return its old value
206 * @nr: Bit to set
207 * @addr: Address to count from
208 *
209 * This operation is non-atomic and can be reordered.
210 * If two examples of this operation race, one can appear to succeed
211 * but actually fail. You must protect multiple accesses with a lock.
212 */
213static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
214{
215 int oldbit;
216
217 asm("bts %2,%1\n\t"
218 "sbb %0,%0"
219 : "=r" (oldbit), ADDR
220 : "Ir" (nr));
221 return oldbit;
222}
223
224/**
225 * test_and_clear_bit - Clear a bit and return its old value
226 * @nr: Bit to clear
227 * @addr: Address to count from
228 *
229 * This operation is atomic and cannot be reordered.
230 * It also implies a memory barrier.
231 */
232static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
233{
234 int oldbit;
235
236 asm volatile(LOCK_PREFIX "btr %2,%1\n\t"
237 "sbb %0,%0"
238 : "=r" (oldbit), ADDR : "Ir" (nr) : "memory");
239
240 return oldbit;
241}
242
243/**
244 * __test_and_clear_bit - Clear a bit and return its old value
245 * @nr: Bit to clear
246 * @addr: Address to count from
247 *
248 * This operation is non-atomic and can be reordered.
249 * If two examples of this operation race, one can appear to succeed
250 * but actually fail. You must protect multiple accesses with a lock.
251 */
252static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
253{
254 int oldbit;
255
256 asm volatile("btr %2,%1\n\t"
257 "sbb %0,%0"
258 : "=r" (oldbit), ADDR
259 : "Ir" (nr));
260 return oldbit;
261}
262
263/* WARNING: non atomic and it can be reordered! */
264static inline int __test_and_change_bit(int nr, volatile unsigned long *addr)
265{
266 int oldbit;
267
268 asm volatile("btc %2,%1\n\t"
269 "sbb %0,%0"
270 : "=r" (oldbit), ADDR
271 : "Ir" (nr) : "memory");
272
273 return oldbit;
274}
275
276/**
277 * test_and_change_bit - Change a bit and return its old value
278 * @nr: Bit to change
279 * @addr: Address to count from
280 *
281 * This operation is atomic and cannot be reordered.
282 * It also implies a memory barrier.
283 */
284static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
285{
286 int oldbit;
287
288 asm volatile(LOCK_PREFIX "btc %2,%1\n\t"
289 "sbb %0,%0"
290 : "=r" (oldbit), ADDR : "Ir" (nr) : "memory");
291
292 return oldbit;
293}
294
295static inline int constant_test_bit(int nr, const volatile unsigned long *addr)
296{
297 return ((1UL << (nr % BITS_PER_LONG)) &
298 (((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0;
299}
300
301static inline int variable_test_bit(int nr, volatile const unsigned long *addr)
302{
303 int oldbit;
304
305 asm volatile("bt %2,%1\n\t"
306 "sbb %0,%0"
307 : "=r" (oldbit)
308 : "m" (*(unsigned long *)addr), "Ir" (nr));
309
310 return oldbit;
311}
312
313#if 0 /* Fool kernel-doc since it doesn't do macros yet */
314/**
315 * test_bit - Determine whether a bit is set
316 * @nr: bit number to test
317 * @addr: Address to start counting from
318 */
319static int test_bit(int nr, const volatile unsigned long *addr);
320#endif
321
322#define test_bit(nr, addr) \
323 (__builtin_constant_p((nr)) \
324 ? constant_test_bit((nr), (addr)) \
325 : variable_test_bit((nr), (addr)))
326
327/**
328 * __ffs - find first set bit in word
329 * @word: The word to search
330 *
331 * Undefined if no bit exists, so code should check against 0 first.
332 */
333static inline unsigned long __ffs(unsigned long word)
334{
335 asm("bsf %1,%0"
336 : "=r" (word)
337 : "rm" (word));
338 return word;
339}
340
341/**
342 * ffz - find first zero bit in word
343 * @word: The word to search
344 *
345 * Undefined if no zero exists, so code should check against ~0UL first.
346 */
347static inline unsigned long ffz(unsigned long word)
348{
349 asm("bsf %1,%0"
350 : "=r" (word)
351 : "r" (~word));
352 return word;
353}
354
355/*
356 * __fls: find last set bit in word
357 * @word: The word to search
358 *
359 * Undefined if no set bit exists, so code should check against 0 first.
360 */
361static inline unsigned long __fls(unsigned long word)
362{
363 asm("bsr %1,%0"
364 : "=r" (word)
365 : "rm" (word));
366 return word;
367}
368
369#ifdef __KERNEL__
370/**
371 * ffs - find first set bit in word
372 * @x: the word to search
373 *
374 * This is defined the same way as the libc and compiler builtin ffs
375 * routines, therefore differs in spirit from the other bitops.
376 *
377 * ffs(value) returns 0 if value is 0 or the position of the first
378 * set bit if value is nonzero. The first (least significant) bit
379 * is at position 1.
380 */
381static inline int ffs(int x)
382{
383 int r;
384#ifdef CONFIG_X86_CMOV
385 asm("bsfl %1,%0\n\t"
386 "cmovzl %2,%0"
387 : "=r" (r) : "rm" (x), "r" (-1));
388#else
389 asm("bsfl %1,%0\n\t"
390 "jnz 1f\n\t"
391 "movl $-1,%0\n"
392 "1:" : "=r" (r) : "rm" (x));
393#endif
394 return r + 1;
395}
396
397/**
398 * fls - find last set bit in word
399 * @x: the word to search
400 *
401 * This is defined in a similar way as the libc and compiler builtin
402 * ffs, but returns the position of the most significant set bit.
403 *
404 * fls(value) returns 0 if value is 0 or the position of the last
405 * set bit if value is nonzero. The last (most significant) bit is
406 * at position 32.
407 */
408static inline int fls(int x)
409{
410 int r;
411#ifdef CONFIG_X86_CMOV
412 asm("bsrl %1,%0\n\t"
413 "cmovzl %2,%0"
414 : "=&r" (r) : "rm" (x), "rm" (-1));
415#else
416 asm("bsrl %1,%0\n\t"
417 "jnz 1f\n\t"
418 "movl $-1,%0\n"
419 "1:" : "=r" (r) : "rm" (x));
420#endif
421 return r + 1;
422}
423#endif /* __KERNEL__ */
424
425#undef ADDR
426
427#ifdef __KERNEL__
428
429#include <asm-generic/bitops/sched.h>
430
431#define ARCH_HAS_FAST_MULTIPLIER 1
432
433#include <asm-generic/bitops/hweight.h>
434
435#endif /* __KERNEL__ */
436
437#include <asm-generic/bitops/fls64.h>
438
439#ifdef __KERNEL__
440
441#include <asm-generic/bitops/ext2-non-atomic.h>
442
443#define ext2_set_bit_atomic(lock, nr, addr) \
444 test_and_set_bit((nr), (unsigned long *)(addr))
445#define ext2_clear_bit_atomic(lock, nr, addr) \
446 test_and_clear_bit((nr), (unsigned long *)(addr))
447
448#include <asm-generic/bitops/minix.h>
449
450#endif /* __KERNEL__ */
451#endif /* ASM_X86__BITOPS_H */
diff --git a/include/asm-x86/boot.h b/include/asm-x86/boot.h
deleted file mode 100644
index 1d63bd5d5946..000000000000
--- a/include/asm-x86/boot.h
+++ /dev/null
@@ -1,26 +0,0 @@
1#ifndef ASM_X86__BOOT_H
2#define ASM_X86__BOOT_H
3
4/* Don't touch these, unless you really know what you're doing. */
5#define DEF_SYSSEG 0x1000
6#define DEF_SYSSIZE 0x7F00
7
8/* Internal svga startup constants */
9#define NORMAL_VGA 0xffff /* 80x25 mode */
10#define EXTENDED_VGA 0xfffe /* 80x50 mode */
11#define ASK_VGA 0xfffd /* ask for it at bootup */
12
13/* Physical address where kernel should be loaded. */
14#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
15 + (CONFIG_PHYSICAL_ALIGN - 1)) \
16 & ~(CONFIG_PHYSICAL_ALIGN - 1))
17
18#ifdef CONFIG_X86_64
19#define BOOT_HEAP_SIZE 0x7000
20#define BOOT_STACK_SIZE 0x4000
21#else
22#define BOOT_HEAP_SIZE 0x4000
23#define BOOT_STACK_SIZE 0x1000
24#endif
25
26#endif /* ASM_X86__BOOT_H */
diff --git a/include/asm-x86/bootparam.h b/include/asm-x86/bootparam.h
deleted file mode 100644
index ccf027e2d97d..000000000000
--- a/include/asm-x86/bootparam.h
+++ /dev/null
@@ -1,111 +0,0 @@
1#ifndef ASM_X86__BOOTPARAM_H
2#define ASM_X86__BOOTPARAM_H
3
4#include <linux/types.h>
5#include <linux/screen_info.h>
6#include <linux/apm_bios.h>
7#include <linux/edd.h>
8#include <asm/e820.h>
9#include <asm/ist.h>
10#include <video/edid.h>
11
12/* setup data types */
13#define SETUP_NONE 0
14#define SETUP_E820_EXT 1
15
16/* extensible setup data list node */
17struct setup_data {
18 __u64 next;
19 __u32 type;
20 __u32 len;
21 __u8 data[0];
22};
23
24struct setup_header {
25 __u8 setup_sects;
26 __u16 root_flags;
27 __u32 syssize;
28 __u16 ram_size;
29#define RAMDISK_IMAGE_START_MASK 0x07FF
30#define RAMDISK_PROMPT_FLAG 0x8000
31#define RAMDISK_LOAD_FLAG 0x4000
32 __u16 vid_mode;
33 __u16 root_dev;
34 __u16 boot_flag;
35 __u16 jump;
36 __u32 header;
37 __u16 version;
38 __u32 realmode_swtch;
39 __u16 start_sys;
40 __u16 kernel_version;
41 __u8 type_of_loader;
42 __u8 loadflags;
43#define LOADED_HIGH (1<<0)
44#define QUIET_FLAG (1<<5)
45#define KEEP_SEGMENTS (1<<6)
46#define CAN_USE_HEAP (1<<7)
47 __u16 setup_move_size;
48 __u32 code32_start;
49 __u32 ramdisk_image;
50 __u32 ramdisk_size;
51 __u32 bootsect_kludge;
52 __u16 heap_end_ptr;
53 __u16 _pad1;
54 __u32 cmd_line_ptr;
55 __u32 initrd_addr_max;
56 __u32 kernel_alignment;
57 __u8 relocatable_kernel;
58 __u8 _pad2[3];
59 __u32 cmdline_size;
60 __u32 hardware_subarch;
61 __u64 hardware_subarch_data;
62 __u32 payload_offset;
63 __u32 payload_length;
64 __u64 setup_data;
65} __attribute__((packed));
66
67struct sys_desc_table {
68 __u16 length;
69 __u8 table[14];
70};
71
72struct efi_info {
73 __u32 efi_loader_signature;
74 __u32 efi_systab;
75 __u32 efi_memdesc_size;
76 __u32 efi_memdesc_version;
77 __u32 efi_memmap;
78 __u32 efi_memmap_size;
79 __u32 efi_systab_hi;
80 __u32 efi_memmap_hi;
81};
82
83/* The so-called "zeropage" */
84struct boot_params {
85 struct screen_info screen_info; /* 0x000 */
86 struct apm_bios_info apm_bios_info; /* 0x040 */
87 __u8 _pad2[12]; /* 0x054 */
88 struct ist_info ist_info; /* 0x060 */
89 __u8 _pad3[16]; /* 0x070 */
90 __u8 hd0_info[16]; /* obsolete! */ /* 0x080 */
91 __u8 hd1_info[16]; /* obsolete! */ /* 0x090 */
92 struct sys_desc_table sys_desc_table; /* 0x0a0 */
93 __u8 _pad4[144]; /* 0x0b0 */
94 struct edid_info edid_info; /* 0x140 */
95 struct efi_info efi_info; /* 0x1c0 */
96 __u32 alt_mem_k; /* 0x1e0 */
97 __u32 scratch; /* Scratch field! */ /* 0x1e4 */
98 __u8 e820_entries; /* 0x1e8 */
99 __u8 eddbuf_entries; /* 0x1e9 */
100 __u8 edd_mbr_sig_buf_entries; /* 0x1ea */
101 __u8 _pad6[6]; /* 0x1eb */
102 struct setup_header hdr; /* setup header */ /* 0x1f1 */
103 __u8 _pad7[0x290-0x1f1-sizeof(struct setup_header)];
104 __u32 edd_mbr_sig_buffer[EDD_MBR_SIG_MAX]; /* 0x290 */
105 struct e820entry e820_map[E820MAX]; /* 0x2d0 */
106 __u8 _pad8[48]; /* 0xcd0 */
107 struct edd_info eddbuf[EDDMAXNR]; /* 0xd00 */
108 __u8 _pad9[276]; /* 0xeec */
109} __attribute__((packed));
110
111#endif /* ASM_X86__BOOTPARAM_H */
diff --git a/include/asm-x86/bug.h b/include/asm-x86/bug.h
deleted file mode 100644
index 91ad43a54c47..000000000000
--- a/include/asm-x86/bug.h
+++ /dev/null
@@ -1,39 +0,0 @@
1#ifndef ASM_X86__BUG_H
2#define ASM_X86__BUG_H
3
4#ifdef CONFIG_BUG
5#define HAVE_ARCH_BUG
6
7#ifdef CONFIG_DEBUG_BUGVERBOSE
8
9#ifdef CONFIG_X86_32
10# define __BUG_C0 "2:\t.long 1b, %c0\n"
11#else
12# define __BUG_C0 "2:\t.quad 1b, %c0\n"
13#endif
14
15#define BUG() \
16do { \
17 asm volatile("1:\tud2\n" \
18 ".pushsection __bug_table,\"a\"\n" \
19 __BUG_C0 \
20 "\t.word %c1, 0\n" \
21 "\t.org 2b+%c2\n" \
22 ".popsection" \
23 : : "i" (__FILE__), "i" (__LINE__), \
24 "i" (sizeof(struct bug_entry))); \
25 for (;;) ; \
26} while (0)
27
28#else
29#define BUG() \
30do { \
31 asm volatile("ud2"); \
32 for (;;) ; \
33} while (0)
34#endif
35
36#endif /* !CONFIG_BUG */
37
38#include <asm-generic/bug.h>
39#endif /* ASM_X86__BUG_H */
diff --git a/include/asm-x86/bugs.h b/include/asm-x86/bugs.h
deleted file mode 100644
index dc604985f2ad..000000000000
--- a/include/asm-x86/bugs.h
+++ /dev/null
@@ -1,12 +0,0 @@
1#ifndef ASM_X86__BUGS_H
2#define ASM_X86__BUGS_H
3
4extern void check_bugs(void);
5
6#if defined(CONFIG_CPU_SUP_INTEL) && defined(CONFIG_X86_32)
7int ppro_with_ram_bug(void);
8#else
9static inline int ppro_with_ram_bug(void) { return 0; }
10#endif
11
12#endif /* ASM_X86__BUGS_H */
diff --git a/include/asm-x86/byteorder.h b/include/asm-x86/byteorder.h
deleted file mode 100644
index 722f27d68105..000000000000
--- a/include/asm-x86/byteorder.h
+++ /dev/null
@@ -1,81 +0,0 @@
1#ifndef ASM_X86__BYTEORDER_H
2#define ASM_X86__BYTEORDER_H
3
4#include <asm/types.h>
5#include <linux/compiler.h>
6
7#ifdef __GNUC__
8
9#ifdef __i386__
10
11static inline __attribute_const__ __u32 ___arch__swab32(__u32 x)
12{
13#ifdef CONFIG_X86_BSWAP
14 asm("bswap %0" : "=r" (x) : "0" (x));
15#else
16 asm("xchgb %b0,%h0\n\t" /* swap lower bytes */
17 "rorl $16,%0\n\t" /* swap words */
18 "xchgb %b0,%h0" /* swap higher bytes */
19 : "=q" (x)
20 : "0" (x));
21#endif
22 return x;
23}
24
25static inline __attribute_const__ __u64 ___arch__swab64(__u64 val)
26{
27 union {
28 struct {
29 __u32 a;
30 __u32 b;
31 } s;
32 __u64 u;
33 } v;
34 v.u = val;
35#ifdef CONFIG_X86_BSWAP
36 asm("bswapl %0 ; bswapl %1 ; xchgl %0,%1"
37 : "=r" (v.s.a), "=r" (v.s.b)
38 : "0" (v.s.a), "1" (v.s.b));
39#else
40 v.s.a = ___arch__swab32(v.s.a);
41 v.s.b = ___arch__swab32(v.s.b);
42 asm("xchgl %0,%1"
43 : "=r" (v.s.a), "=r" (v.s.b)
44 : "0" (v.s.a), "1" (v.s.b));
45#endif
46 return v.u;
47}
48
49#else /* __i386__ */
50
51static inline __attribute_const__ __u64 ___arch__swab64(__u64 x)
52{
53 asm("bswapq %0"
54 : "=r" (x)
55 : "0" (x));
56 return x;
57}
58
59static inline __attribute_const__ __u32 ___arch__swab32(__u32 x)
60{
61 asm("bswapl %0"
62 : "=r" (x)
63 : "0" (x));
64 return x;
65}
66
67#endif
68
69/* Do not define swab16. Gcc is smart enough to recognize "C" version and
70 convert it into rotation or exhange. */
71
72#define __arch__swab64(x) ___arch__swab64(x)
73#define __arch__swab32(x) ___arch__swab32(x)
74
75#define __BYTEORDER_HAS_U64__
76
77#endif /* __GNUC__ */
78
79#include <linux/byteorder/little_endian.h>
80
81#endif /* ASM_X86__BYTEORDER_H */
diff --git a/include/asm-x86/cache.h b/include/asm-x86/cache.h
deleted file mode 100644
index ea3f1cc06a97..000000000000
--- a/include/asm-x86/cache.h
+++ /dev/null
@@ -1,20 +0,0 @@
1#ifndef ASM_X86__CACHE_H
2#define ASM_X86__CACHE_H
3
4/* L1 cache line size */
5#define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
6#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7
8#define __read_mostly __attribute__((__section__(".data.read_mostly")))
9
10#ifdef CONFIG_X86_VSMP
11/* vSMP Internode cacheline shift */
12#define INTERNODE_CACHE_SHIFT (12)
13#ifdef CONFIG_SMP
14#define __cacheline_aligned_in_smp \
15 __attribute__((__aligned__(1 << (INTERNODE_CACHE_SHIFT)))) \
16 __attribute__((__section__(".data.page_aligned")))
17#endif
18#endif
19
20#endif /* ASM_X86__CACHE_H */
diff --git a/include/asm-x86/cacheflush.h b/include/asm-x86/cacheflush.h
deleted file mode 100644
index 68840ef1b35a..000000000000
--- a/include/asm-x86/cacheflush.h
+++ /dev/null
@@ -1,118 +0,0 @@
1#ifndef ASM_X86__CACHEFLUSH_H
2#define ASM_X86__CACHEFLUSH_H
3
4/* Keep includes the same across arches. */
5#include <linux/mm.h>
6
7/* Caches aren't brain-dead on the intel. */
8#define flush_cache_all() do { } while (0)
9#define flush_cache_mm(mm) do { } while (0)
10#define flush_cache_dup_mm(mm) do { } while (0)
11#define flush_cache_range(vma, start, end) do { } while (0)
12#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
13#define flush_dcache_page(page) do { } while (0)
14#define flush_dcache_mmap_lock(mapping) do { } while (0)
15#define flush_dcache_mmap_unlock(mapping) do { } while (0)
16#define flush_icache_range(start, end) do { } while (0)
17#define flush_icache_page(vma, pg) do { } while (0)
18#define flush_icache_user_range(vma, pg, adr, len) do { } while (0)
19#define flush_cache_vmap(start, end) do { } while (0)
20#define flush_cache_vunmap(start, end) do { } while (0)
21
22#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
23 memcpy((dst), (src), (len))
24#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
25 memcpy((dst), (src), (len))
26
27#define PG_non_WB PG_arch_1
28PAGEFLAG(NonWB, non_WB)
29
30/*
31 * The set_memory_* API can be used to change various attributes of a virtual
32 * address range. The attributes include:
33 * Cachability : UnCached, WriteCombining, WriteBack
34 * Executability : eXeutable, NoteXecutable
35 * Read/Write : ReadOnly, ReadWrite
36 * Presence : NotPresent
37 *
38 * Within a catagory, the attributes are mutually exclusive.
39 *
40 * The implementation of this API will take care of various aspects that
41 * are associated with changing such attributes, such as:
42 * - Flushing TLBs
43 * - Flushing CPU caches
44 * - Making sure aliases of the memory behind the mapping don't violate
45 * coherency rules as defined by the CPU in the system.
46 *
47 * What this API does not do:
48 * - Provide exclusion between various callers - including callers that
49 * operation on other mappings of the same physical page
50 * - Restore default attributes when a page is freed
51 * - Guarantee that mappings other than the requested one are
52 * in any state, other than that these do not violate rules for
53 * the CPU you have. Do not depend on any effects on other mappings,
54 * CPUs other than the one you have may have more relaxed rules.
55 * The caller is required to take care of these.
56 */
57
58int _set_memory_uc(unsigned long addr, int numpages);
59int _set_memory_wc(unsigned long addr, int numpages);
60int _set_memory_wb(unsigned long addr, int numpages);
61int set_memory_uc(unsigned long addr, int numpages);
62int set_memory_wc(unsigned long addr, int numpages);
63int set_memory_wb(unsigned long addr, int numpages);
64int set_memory_x(unsigned long addr, int numpages);
65int set_memory_nx(unsigned long addr, int numpages);
66int set_memory_ro(unsigned long addr, int numpages);
67int set_memory_rw(unsigned long addr, int numpages);
68int set_memory_np(unsigned long addr, int numpages);
69int set_memory_4k(unsigned long addr, int numpages);
70
71int set_memory_array_uc(unsigned long *addr, int addrinarray);
72int set_memory_array_wb(unsigned long *addr, int addrinarray);
73
74/*
75 * For legacy compatibility with the old APIs, a few functions
76 * are provided that work on a "struct page".
77 * These functions operate ONLY on the 1:1 kernel mapping of the
78 * memory that the struct page represents, and internally just
79 * call the set_memory_* function. See the description of the
80 * set_memory_* function for more details on conventions.
81 *
82 * These APIs should be considered *deprecated* and are likely going to
83 * be removed in the future.
84 * The reason for this is the implicit operation on the 1:1 mapping only,
85 * making this not a generally useful API.
86 *
87 * Specifically, many users of the old APIs had a virtual address,
88 * called virt_to_page() or vmalloc_to_page() on that address to
89 * get a struct page* that the old API required.
90 * To convert these cases, use set_memory_*() on the original
91 * virtual address, do not use these functions.
92 */
93
94int set_pages_uc(struct page *page, int numpages);
95int set_pages_wb(struct page *page, int numpages);
96int set_pages_x(struct page *page, int numpages);
97int set_pages_nx(struct page *page, int numpages);
98int set_pages_ro(struct page *page, int numpages);
99int set_pages_rw(struct page *page, int numpages);
100
101
102void clflush_cache_range(void *addr, unsigned int size);
103
104#ifdef CONFIG_DEBUG_RODATA
105void mark_rodata_ro(void);
106extern const int rodata_test_data;
107#endif
108
109#ifdef CONFIG_DEBUG_RODATA_TEST
110int rodata_test(void);
111#else
112static inline int rodata_test(void)
113{
114 return 0;
115}
116#endif
117
118#endif /* ASM_X86__CACHEFLUSH_H */
diff --git a/include/asm-x86/calgary.h b/include/asm-x86/calgary.h
deleted file mode 100644
index 933fd272f826..000000000000
--- a/include/asm-x86/calgary.h
+++ /dev/null
@@ -1,72 +0,0 @@
1/*
2 * Derived from include/asm-powerpc/iommu.h
3 *
4 * Copyright IBM Corporation, 2006-2007
5 *
6 * Author: Jon Mason <jdmason@us.ibm.com>
7 * Author: Muli Ben-Yehuda <muli@il.ibm.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 */
23
24#ifndef ASM_X86__CALGARY_H
25#define ASM_X86__CALGARY_H
26
27#include <linux/spinlock.h>
28#include <linux/device.h>
29#include <linux/dma-mapping.h>
30#include <linux/timer.h>
31#include <asm/types.h>
32
33struct iommu_table {
34 struct cal_chipset_ops *chip_ops; /* chipset specific funcs */
35 unsigned long it_base; /* mapped address of tce table */
36 unsigned long it_hint; /* Hint for next alloc */
37 unsigned long *it_map; /* A simple allocation bitmap for now */
38 void __iomem *bbar; /* Bridge BAR */
39 u64 tar_val; /* Table Address Register */
40 struct timer_list watchdog_timer;
41 spinlock_t it_lock; /* Protects it_map */
42 unsigned int it_size; /* Size of iommu table in entries */
43 unsigned char it_busno; /* Bus number this table belongs to */
44};
45
46struct cal_chipset_ops {
47 void (*handle_quirks)(struct iommu_table *tbl, struct pci_dev *dev);
48 void (*tce_cache_blast)(struct iommu_table *tbl);
49 void (*dump_error_regs)(struct iommu_table *tbl);
50};
51
52#define TCE_TABLE_SIZE_UNSPECIFIED ~0
53#define TCE_TABLE_SIZE_64K 0
54#define TCE_TABLE_SIZE_128K 1
55#define TCE_TABLE_SIZE_256K 2
56#define TCE_TABLE_SIZE_512K 3
57#define TCE_TABLE_SIZE_1M 4
58#define TCE_TABLE_SIZE_2M 5
59#define TCE_TABLE_SIZE_4M 6
60#define TCE_TABLE_SIZE_8M 7
61
62extern int use_calgary;
63
64#ifdef CONFIG_CALGARY_IOMMU
65extern int calgary_iommu_init(void);
66extern void detect_calgary(void);
67#else
68static inline int calgary_iommu_init(void) { return 1; }
69static inline void detect_calgary(void) { return; }
70#endif
71
72#endif /* ASM_X86__CALGARY_H */
diff --git a/include/asm-x86/calling.h b/include/asm-x86/calling.h
deleted file mode 100644
index 2bc162e0ec6e..000000000000
--- a/include/asm-x86/calling.h
+++ /dev/null
@@ -1,170 +0,0 @@
1/*
2 * Some macros to handle stack frames in assembly.
3 */
4
5#define R15 0
6#define R14 8
7#define R13 16
8#define R12 24
9#define RBP 32
10#define RBX 40
11
12/* arguments: interrupts/non tracing syscalls only save upto here*/
13#define R11 48
14#define R10 56
15#define R9 64
16#define R8 72
17#define RAX 80
18#define RCX 88
19#define RDX 96
20#define RSI 104
21#define RDI 112
22#define ORIG_RAX 120 /* + error_code */
23/* end of arguments */
24
25/* cpu exception frame or undefined in case of fast syscall. */
26#define RIP 128
27#define CS 136
28#define EFLAGS 144
29#define RSP 152
30#define SS 160
31
32#define ARGOFFSET R11
33#define SWFRAME ORIG_RAX
34
35 .macro SAVE_ARGS addskip=0, norcx=0, nor891011=0
36 subq $9*8+\addskip, %rsp
37 CFI_ADJUST_CFA_OFFSET 9*8+\addskip
38 movq %rdi, 8*8(%rsp)
39 CFI_REL_OFFSET rdi, 8*8
40 movq %rsi, 7*8(%rsp)
41 CFI_REL_OFFSET rsi, 7*8
42 movq %rdx, 6*8(%rsp)
43 CFI_REL_OFFSET rdx, 6*8
44 .if \norcx
45 .else
46 movq %rcx, 5*8(%rsp)
47 CFI_REL_OFFSET rcx, 5*8
48 .endif
49 movq %rax, 4*8(%rsp)
50 CFI_REL_OFFSET rax, 4*8
51 .if \nor891011
52 .else
53 movq %r8, 3*8(%rsp)
54 CFI_REL_OFFSET r8, 3*8
55 movq %r9, 2*8(%rsp)
56 CFI_REL_OFFSET r9, 2*8
57 movq %r10, 1*8(%rsp)
58 CFI_REL_OFFSET r10, 1*8
59 movq %r11, (%rsp)
60 CFI_REL_OFFSET r11, 0*8
61 .endif
62 .endm
63
64#define ARG_SKIP 9*8
65
66 .macro RESTORE_ARGS skiprax=0, addskip=0, skiprcx=0, skipr11=0, \
67 skipr8910=0, skiprdx=0
68 .if \skipr11
69 .else
70 movq (%rsp), %r11
71 CFI_RESTORE r11
72 .endif
73 .if \skipr8910
74 .else
75 movq 1*8(%rsp), %r10
76 CFI_RESTORE r10
77 movq 2*8(%rsp), %r9
78 CFI_RESTORE r9
79 movq 3*8(%rsp), %r8
80 CFI_RESTORE r8
81 .endif
82 .if \skiprax
83 .else
84 movq 4*8(%rsp), %rax
85 CFI_RESTORE rax
86 .endif
87 .if \skiprcx
88 .else
89 movq 5*8(%rsp), %rcx
90 CFI_RESTORE rcx
91 .endif
92 .if \skiprdx
93 .else
94 movq 6*8(%rsp), %rdx
95 CFI_RESTORE rdx
96 .endif
97 movq 7*8(%rsp), %rsi
98 CFI_RESTORE rsi
99 movq 8*8(%rsp), %rdi
100 CFI_RESTORE rdi
101 .if ARG_SKIP+\addskip > 0
102 addq $ARG_SKIP+\addskip, %rsp
103 CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip)
104 .endif
105 .endm
106
107 .macro LOAD_ARGS offset, skiprax=0
108 movq \offset(%rsp), %r11
109 movq \offset+8(%rsp), %r10
110 movq \offset+16(%rsp), %r9
111 movq \offset+24(%rsp), %r8
112 movq \offset+40(%rsp), %rcx
113 movq \offset+48(%rsp), %rdx
114 movq \offset+56(%rsp), %rsi
115 movq \offset+64(%rsp), %rdi
116 .if \skiprax
117 .else
118 movq \offset+72(%rsp), %rax
119 .endif
120 .endm
121
122#define REST_SKIP 6*8
123
124 .macro SAVE_REST
125 subq $REST_SKIP, %rsp
126 CFI_ADJUST_CFA_OFFSET REST_SKIP
127 movq %rbx, 5*8(%rsp)
128 CFI_REL_OFFSET rbx, 5*8
129 movq %rbp, 4*8(%rsp)
130 CFI_REL_OFFSET rbp, 4*8
131 movq %r12, 3*8(%rsp)
132 CFI_REL_OFFSET r12, 3*8
133 movq %r13, 2*8(%rsp)
134 CFI_REL_OFFSET r13, 2*8
135 movq %r14, 1*8(%rsp)
136 CFI_REL_OFFSET r14, 1*8
137 movq %r15, (%rsp)
138 CFI_REL_OFFSET r15, 0*8
139 .endm
140
141 .macro RESTORE_REST
142 movq (%rsp), %r15
143 CFI_RESTORE r15
144 movq 1*8(%rsp), %r14
145 CFI_RESTORE r14
146 movq 2*8(%rsp), %r13
147 CFI_RESTORE r13
148 movq 3*8(%rsp), %r12
149 CFI_RESTORE r12
150 movq 4*8(%rsp), %rbp
151 CFI_RESTORE rbp
152 movq 5*8(%rsp), %rbx
153 CFI_RESTORE rbx
154 addq $REST_SKIP, %rsp
155 CFI_ADJUST_CFA_OFFSET -(REST_SKIP)
156 .endm
157
158 .macro SAVE_ALL
159 SAVE_ARGS
160 SAVE_REST
161 .endm
162
163 .macro RESTORE_ALL addskip=0
164 RESTORE_REST
165 RESTORE_ARGS 0, \addskip
166 .endm
167
168 .macro icebp
169 .byte 0xf1
170 .endm
diff --git a/include/asm-x86/checksum.h b/include/asm-x86/checksum.h
deleted file mode 100644
index 848850fd7d62..000000000000
--- a/include/asm-x86/checksum.h
+++ /dev/null
@@ -1,5 +0,0 @@
1#ifdef CONFIG_X86_32
2# include "checksum_32.h"
3#else
4# include "checksum_64.h"
5#endif
diff --git a/include/asm-x86/checksum_32.h b/include/asm-x86/checksum_32.h
deleted file mode 100644
index d041e8cda227..000000000000
--- a/include/asm-x86/checksum_32.h
+++ /dev/null
@@ -1,189 +0,0 @@
1#ifndef ASM_X86__CHECKSUM_32_H
2#define ASM_X86__CHECKSUM_32_H
3
4#include <linux/in6.h>
5
6#include <asm/uaccess.h>
7
8/*
9 * computes the checksum of a memory block at buff, length len,
10 * and adds in "sum" (32-bit)
11 *
12 * returns a 32-bit number suitable for feeding into itself
13 * or csum_tcpudp_magic
14 *
15 * this function must be called with even lengths, except
16 * for the last fragment, which may be odd
17 *
18 * it's best to have buff aligned on a 32-bit boundary
19 */
20asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum);
21
22/*
23 * the same as csum_partial, but copies from src while it
24 * checksums, and handles user-space pointer exceptions correctly, when needed.
25 *
26 * here even more important to align src and dst on a 32-bit (or even
27 * better 64-bit) boundary
28 */
29
30asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
31 int len, __wsum sum,
32 int *src_err_ptr, int *dst_err_ptr);
33
34/*
35 * Note: when you get a NULL pointer exception here this means someone
36 * passed in an incorrect kernel address to one of these functions.
37 *
38 * If you use these functions directly please don't forget the
39 * access_ok().
40 */
41static inline __wsum csum_partial_copy_nocheck(const void *src, void *dst,
42 int len, __wsum sum)
43{
44 return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL);
45}
46
47static inline __wsum csum_partial_copy_from_user(const void __user *src,
48 void *dst,
49 int len, __wsum sum,
50 int *err_ptr)
51{
52 might_sleep();
53 return csum_partial_copy_generic((__force void *)src, dst,
54 len, sum, err_ptr, NULL);
55}
56
57/*
58 * This is a version of ip_compute_csum() optimized for IP headers,
59 * which always checksum on 4 octet boundaries.
60 *
61 * By Jorge Cwik <jorge@laser.satlink.net>, adapted for linux by
62 * Arnt Gulbrandsen.
63 */
64static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
65{
66 unsigned int sum;
67
68 asm volatile("movl (%1), %0 ;\n"
69 "subl $4, %2 ;\n"
70 "jbe 2f ;\n"
71 "addl 4(%1), %0 ;\n"
72 "adcl 8(%1), %0 ;\n"
73 "adcl 12(%1), %0;\n"
74 "1: adcl 16(%1), %0 ;\n"
75 "lea 4(%1), %1 ;\n"
76 "decl %2 ;\n"
77 "jne 1b ;\n"
78 "adcl $0, %0 ;\n"
79 "movl %0, %2 ;\n"
80 "shrl $16, %0 ;\n"
81 "addw %w2, %w0 ;\n"
82 "adcl $0, %0 ;\n"
83 "notl %0 ;\n"
84 "2: ;\n"
85 /* Since the input registers which are loaded with iph and ihl
86 are modified, we must also specify them as outputs, or gcc
87 will assume they contain their original values. */
88 : "=r" (sum), "=r" (iph), "=r" (ihl)
89 : "1" (iph), "2" (ihl)
90 : "memory");
91 return (__force __sum16)sum;
92}
93
94/*
95 * Fold a partial checksum
96 */
97
98static inline __sum16 csum_fold(__wsum sum)
99{
100 asm("addl %1, %0 ;\n"
101 "adcl $0xffff, %0 ;\n"
102 : "=r" (sum)
103 : "r" ((__force u32)sum << 16),
104 "0" ((__force u32)sum & 0xffff0000));
105 return (__force __sum16)(~(__force u32)sum >> 16);
106}
107
108static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
109 unsigned short len,
110 unsigned short proto,
111 __wsum sum)
112{
113 asm("addl %1, %0 ;\n"
114 "adcl %2, %0 ;\n"
115 "adcl %3, %0 ;\n"
116 "adcl $0, %0 ;\n"
117 : "=r" (sum)
118 : "g" (daddr), "g"(saddr),
119 "g" ((len + proto) << 8), "0" (sum));
120 return sum;
121}
122
123/*
124 * computes the checksum of the TCP/UDP pseudo-header
125 * returns a 16-bit checksum, already complemented
126 */
127static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
128 unsigned short len,
129 unsigned short proto,
130 __wsum sum)
131{
132 return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
133}
134
135/*
136 * this routine is used for miscellaneous IP-like checksums, mainly
137 * in icmp.c
138 */
139
140static inline __sum16 ip_compute_csum(const void *buff, int len)
141{
142 return csum_fold(csum_partial(buff, len, 0));
143}
144
145#define _HAVE_ARCH_IPV6_CSUM
146static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
147 const struct in6_addr *daddr,
148 __u32 len, unsigned short proto,
149 __wsum sum)
150{
151 asm("addl 0(%1), %0 ;\n"
152 "adcl 4(%1), %0 ;\n"
153 "adcl 8(%1), %0 ;\n"
154 "adcl 12(%1), %0 ;\n"
155 "adcl 0(%2), %0 ;\n"
156 "adcl 4(%2), %0 ;\n"
157 "adcl 8(%2), %0 ;\n"
158 "adcl 12(%2), %0 ;\n"
159 "adcl %3, %0 ;\n"
160 "adcl %4, %0 ;\n"
161 "adcl $0, %0 ;\n"
162 : "=&r" (sum)
163 : "r" (saddr), "r" (daddr),
164 "r" (htonl(len)), "r" (htonl(proto)), "0" (sum));
165
166 return csum_fold(sum);
167}
168
169/*
170 * Copy and checksum to user
171 */
172#define HAVE_CSUM_COPY_USER
173static inline __wsum csum_and_copy_to_user(const void *src,
174 void __user *dst,
175 int len, __wsum sum,
176 int *err_ptr)
177{
178 might_sleep();
179 if (access_ok(VERIFY_WRITE, dst, len))
180 return csum_partial_copy_generic(src, (__force void *)dst,
181 len, sum, NULL, err_ptr);
182
183 if (len)
184 *err_ptr = -EFAULT;
185
186 return (__force __wsum)-1; /* invalid checksum */
187}
188
189#endif /* ASM_X86__CHECKSUM_32_H */
diff --git a/include/asm-x86/checksum_64.h b/include/asm-x86/checksum_64.h
deleted file mode 100644
index 110f403beb89..000000000000
--- a/include/asm-x86/checksum_64.h
+++ /dev/null
@@ -1,191 +0,0 @@
1#ifndef ASM_X86__CHECKSUM_64_H
2#define ASM_X86__CHECKSUM_64_H
3
4/*
5 * Checksums for x86-64
6 * Copyright 2002 by Andi Kleen, SuSE Labs
7 * with some code from asm-x86/checksum.h
8 */
9
10#include <linux/compiler.h>
11#include <asm/uaccess.h>
12#include <asm/byteorder.h>
13
14/**
15 * csum_fold - Fold and invert a 32bit checksum.
16 * sum: 32bit unfolded sum
17 *
18 * Fold a 32bit running checksum to 16bit and invert it. This is usually
19 * the last step before putting a checksum into a packet.
20 * Make sure not to mix with 64bit checksums.
21 */
22static inline __sum16 csum_fold(__wsum sum)
23{
24 asm(" addl %1,%0\n"
25 " adcl $0xffff,%0"
26 : "=r" (sum)
27 : "r" ((__force u32)sum << 16),
28 "0" ((__force u32)sum & 0xffff0000));
29 return (__force __sum16)(~(__force u32)sum >> 16);
30}
31
32/*
33 * This is a version of ip_compute_csum() optimized for IP headers,
34 * which always checksum on 4 octet boundaries.
35 *
36 * By Jorge Cwik <jorge@laser.satlink.net>, adapted for linux by
37 * Arnt Gulbrandsen.
38 */
39
40/**
41 * ip_fast_csum - Compute the IPv4 header checksum efficiently.
42 * iph: ipv4 header
43 * ihl: length of header / 4
44 */
45static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
46{
47 unsigned int sum;
48
49 asm(" movl (%1), %0\n"
50 " subl $4, %2\n"
51 " jbe 2f\n"
52 " addl 4(%1), %0\n"
53 " adcl 8(%1), %0\n"
54 " adcl 12(%1), %0\n"
55 "1: adcl 16(%1), %0\n"
56 " lea 4(%1), %1\n"
57 " decl %2\n"
58 " jne 1b\n"
59 " adcl $0, %0\n"
60 " movl %0, %2\n"
61 " shrl $16, %0\n"
62 " addw %w2, %w0\n"
63 " adcl $0, %0\n"
64 " notl %0\n"
65 "2:"
66 /* Since the input registers which are loaded with iph and ihl
67 are modified, we must also specify them as outputs, or gcc
68 will assume they contain their original values. */
69 : "=r" (sum), "=r" (iph), "=r" (ihl)
70 : "1" (iph), "2" (ihl)
71 : "memory");
72 return (__force __sum16)sum;
73}
74
75/**
76 * csum_tcpup_nofold - Compute an IPv4 pseudo header checksum.
77 * @saddr: source address
78 * @daddr: destination address
79 * @len: length of packet
80 * @proto: ip protocol of packet
81 * @sum: initial sum to be added in (32bit unfolded)
82 *
83 * Returns the pseudo header checksum the input data. Result is
84 * 32bit unfolded.
85 */
86static inline __wsum
87csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
88 unsigned short proto, __wsum sum)
89{
90 asm(" addl %1, %0\n"
91 " adcl %2, %0\n"
92 " adcl %3, %0\n"
93 " adcl $0, %0\n"
94 : "=r" (sum)
95 : "g" (daddr), "g" (saddr),
96 "g" ((len + proto)<<8), "0" (sum));
97 return sum;
98}
99
100
101/**
102 * csum_tcpup_magic - Compute an IPv4 pseudo header checksum.
103 * @saddr: source address
104 * @daddr: destination address
105 * @len: length of packet
106 * @proto: ip protocol of packet
107 * @sum: initial sum to be added in (32bit unfolded)
108 *
109 * Returns the 16bit pseudo header checksum the input data already
110 * complemented and ready to be filled in.
111 */
112static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
113 unsigned short len,
114 unsigned short proto, __wsum sum)
115{
116 return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
117}
118
119/**
120 * csum_partial - Compute an internet checksum.
121 * @buff: buffer to be checksummed
122 * @len: length of buffer.
123 * @sum: initial sum to be added in (32bit unfolded)
124 *
125 * Returns the 32bit unfolded internet checksum of the buffer.
126 * Before filling it in it needs to be csum_fold()'ed.
127 * buff should be aligned to a 64bit boundary if possible.
128 */
129extern __wsum csum_partial(const void *buff, int len, __wsum sum);
130
131#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER 1
132#define HAVE_CSUM_COPY_USER 1
133
134
135/* Do not call this directly. Use the wrappers below */
136extern __wsum csum_partial_copy_generic(const void *src, const void *dst,
137 int len, __wsum sum,
138 int *src_err_ptr, int *dst_err_ptr);
139
140
141extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst,
142 int len, __wsum isum, int *errp);
143extern __wsum csum_partial_copy_to_user(const void *src, void __user *dst,
144 int len, __wsum isum, int *errp);
145extern __wsum csum_partial_copy_nocheck(const void *src, void *dst,
146 int len, __wsum sum);
147
148/* Old names. To be removed. */
149#define csum_and_copy_to_user csum_partial_copy_to_user
150#define csum_and_copy_from_user csum_partial_copy_from_user
151
152/**
153 * ip_compute_csum - Compute an 16bit IP checksum.
154 * @buff: buffer address.
155 * @len: length of buffer.
156 *
157 * Returns the 16bit folded/inverted checksum of the passed buffer.
158 * Ready to fill in.
159 */
160extern __sum16 ip_compute_csum(const void *buff, int len);
161
162/**
163 * csum_ipv6_magic - Compute checksum of an IPv6 pseudo header.
164 * @saddr: source address
165 * @daddr: destination address
166 * @len: length of packet
167 * @proto: protocol of packet
168 * @sum: initial sum (32bit unfolded) to be added in
169 *
170 * Computes an IPv6 pseudo header checksum. This sum is added the checksum
171 * into UDP/TCP packets and contains some link layer information.
172 * Returns the unfolded 32bit checksum.
173 */
174
175struct in6_addr;
176
177#define _HAVE_ARCH_IPV6_CSUM 1
178extern __sum16
179csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr,
180 __u32 len, unsigned short proto, __wsum sum);
181
182static inline unsigned add32_with_carry(unsigned a, unsigned b)
183{
184 asm("addl %2,%0\n\t"
185 "adcl $0,%0"
186 : "=r" (a)
187 : "0" (a), "r" (b));
188 return a;
189}
190
191#endif /* ASM_X86__CHECKSUM_64_H */
diff --git a/include/asm-x86/cmpxchg.h b/include/asm-x86/cmpxchg.h
deleted file mode 100644
index a460fa088d4c..000000000000
--- a/include/asm-x86/cmpxchg.h
+++ /dev/null
@@ -1,5 +0,0 @@
1#ifdef CONFIG_X86_32
2# include "cmpxchg_32.h"
3#else
4# include "cmpxchg_64.h"
5#endif
diff --git a/include/asm-x86/cmpxchg_32.h b/include/asm-x86/cmpxchg_32.h
deleted file mode 100644
index 0622e45cdf7c..000000000000
--- a/include/asm-x86/cmpxchg_32.h
+++ /dev/null
@@ -1,344 +0,0 @@
1#ifndef ASM_X86__CMPXCHG_32_H
2#define ASM_X86__CMPXCHG_32_H
3
4#include <linux/bitops.h> /* for LOCK_PREFIX */
5
6/*
7 * Note: if you use set64_bit(), __cmpxchg64(), or their variants, you
8 * you need to test for the feature in boot_cpu_data.
9 */
10
11#define xchg(ptr, v) \
12 ((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), sizeof(*(ptr))))
13
14struct __xchg_dummy {
15 unsigned long a[100];
16};
17#define __xg(x) ((struct __xchg_dummy *)(x))
18
19/*
20 * The semantics of XCHGCMP8B are a bit strange, this is why
21 * there is a loop and the loading of %%eax and %%edx has to
22 * be inside. This inlines well in most cases, the cached
23 * cost is around ~38 cycles. (in the future we might want
24 * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that
25 * might have an implicit FPU-save as a cost, so it's not
26 * clear which path to go.)
27 *
28 * cmpxchg8b must be used with the lock prefix here to allow
29 * the instruction to be executed atomically, see page 3-102
30 * of the instruction set reference 24319102.pdf. We need
31 * the reader side to see the coherent 64bit value.
32 */
33static inline void __set_64bit(unsigned long long *ptr,
34 unsigned int low, unsigned int high)
35{
36 asm volatile("\n1:\t"
37 "movl (%0), %%eax\n\t"
38 "movl 4(%0), %%edx\n\t"
39 LOCK_PREFIX "cmpxchg8b (%0)\n\t"
40 "jnz 1b"
41 : /* no outputs */
42 : "D"(ptr),
43 "b"(low),
44 "c"(high)
45 : "ax", "dx", "memory");
46}
47
48static inline void __set_64bit_constant(unsigned long long *ptr,
49 unsigned long long value)
50{
51 __set_64bit(ptr, (unsigned int)value, (unsigned int)(value >> 32));
52}
53
54#define ll_low(x) *(((unsigned int *)&(x)) + 0)
55#define ll_high(x) *(((unsigned int *)&(x)) + 1)
56
57static inline void __set_64bit_var(unsigned long long *ptr,
58 unsigned long long value)
59{
60 __set_64bit(ptr, ll_low(value), ll_high(value));
61}
62
63#define set_64bit(ptr, value) \
64 (__builtin_constant_p((value)) \
65 ? __set_64bit_constant((ptr), (value)) \
66 : __set_64bit_var((ptr), (value)))
67
68#define _set_64bit(ptr, value) \
69 (__builtin_constant_p(value) \
70 ? __set_64bit(ptr, (unsigned int)(value), \
71 (unsigned int)((value) >> 32)) \
72 : __set_64bit(ptr, ll_low((value)), ll_high((value))))
73
74/*
75 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
76 * Note 2: xchg has side effect, so that attribute volatile is necessary,
77 * but generally the primitive is invalid, *ptr is output argument. --ANK
78 */
79static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
80 int size)
81{
82 switch (size) {
83 case 1:
84 asm volatile("xchgb %b0,%1"
85 : "=q" (x)
86 : "m" (*__xg(ptr)), "0" (x)
87 : "memory");
88 break;
89 case 2:
90 asm volatile("xchgw %w0,%1"
91 : "=r" (x)
92 : "m" (*__xg(ptr)), "0" (x)
93 : "memory");
94 break;
95 case 4:
96 asm volatile("xchgl %0,%1"
97 : "=r" (x)
98 : "m" (*__xg(ptr)), "0" (x)
99 : "memory");
100 break;
101 }
102 return x;
103}
104
105/*
106 * Atomic compare and exchange. Compare OLD with MEM, if identical,
107 * store NEW in MEM. Return the initial value in MEM. Success is
108 * indicated by comparing RETURN with OLD.
109 */
110
111#ifdef CONFIG_X86_CMPXCHG
112#define __HAVE_ARCH_CMPXCHG 1
113#define cmpxchg(ptr, o, n) \
114 ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
115 (unsigned long)(n), \
116 sizeof(*(ptr))))
117#define sync_cmpxchg(ptr, o, n) \
118 ((__typeof__(*(ptr)))__sync_cmpxchg((ptr), (unsigned long)(o), \
119 (unsigned long)(n), \
120 sizeof(*(ptr))))
121#define cmpxchg_local(ptr, o, n) \
122 ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
123 (unsigned long)(n), \
124 sizeof(*(ptr))))
125#endif
126
127#ifdef CONFIG_X86_CMPXCHG64
128#define cmpxchg64(ptr, o, n) \
129 ((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \
130 (unsigned long long)(n)))
131#define cmpxchg64_local(ptr, o, n) \
132 ((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o), \
133 (unsigned long long)(n)))
134#endif
135
136static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
137 unsigned long new, int size)
138{
139 unsigned long prev;
140 switch (size) {
141 case 1:
142 asm volatile(LOCK_PREFIX "cmpxchgb %b1,%2"
143 : "=a"(prev)
144 : "q"(new), "m"(*__xg(ptr)), "0"(old)
145 : "memory");
146 return prev;
147 case 2:
148 asm volatile(LOCK_PREFIX "cmpxchgw %w1,%2"
149 : "=a"(prev)
150 : "r"(new), "m"(*__xg(ptr)), "0"(old)
151 : "memory");
152 return prev;
153 case 4:
154 asm volatile(LOCK_PREFIX "cmpxchgl %1,%2"
155 : "=a"(prev)
156 : "r"(new), "m"(*__xg(ptr)), "0"(old)
157 : "memory");
158 return prev;
159 }
160 return old;
161}
162
163/*
164 * Always use locked operations when touching memory shared with a
165 * hypervisor, since the system may be SMP even if the guest kernel
166 * isn't.
167 */
168static inline unsigned long __sync_cmpxchg(volatile void *ptr,
169 unsigned long old,
170 unsigned long new, int size)
171{
172 unsigned long prev;
173 switch (size) {
174 case 1:
175 asm volatile("lock; cmpxchgb %b1,%2"
176 : "=a"(prev)
177 : "q"(new), "m"(*__xg(ptr)), "0"(old)
178 : "memory");
179 return prev;
180 case 2:
181 asm volatile("lock; cmpxchgw %w1,%2"
182 : "=a"(prev)
183 : "r"(new), "m"(*__xg(ptr)), "0"(old)
184 : "memory");
185 return prev;
186 case 4:
187 asm volatile("lock; cmpxchgl %1,%2"
188 : "=a"(prev)
189 : "r"(new), "m"(*__xg(ptr)), "0"(old)
190 : "memory");
191 return prev;
192 }
193 return old;
194}
195
196static inline unsigned long __cmpxchg_local(volatile void *ptr,
197 unsigned long old,
198 unsigned long new, int size)
199{
200 unsigned long prev;
201 switch (size) {
202 case 1:
203 asm volatile("cmpxchgb %b1,%2"
204 : "=a"(prev)
205 : "q"(new), "m"(*__xg(ptr)), "0"(old)
206 : "memory");
207 return prev;
208 case 2:
209 asm volatile("cmpxchgw %w1,%2"
210 : "=a"(prev)
211 : "r"(new), "m"(*__xg(ptr)), "0"(old)
212 : "memory");
213 return prev;
214 case 4:
215 asm volatile("cmpxchgl %1,%2"
216 : "=a"(prev)
217 : "r"(new), "m"(*__xg(ptr)), "0"(old)
218 : "memory");
219 return prev;
220 }
221 return old;
222}
223
224static inline unsigned long long __cmpxchg64(volatile void *ptr,
225 unsigned long long old,
226 unsigned long long new)
227{
228 unsigned long long prev;
229 asm volatile(LOCK_PREFIX "cmpxchg8b %3"
230 : "=A"(prev)
231 : "b"((unsigned long)new),
232 "c"((unsigned long)(new >> 32)),
233 "m"(*__xg(ptr)),
234 "0"(old)
235 : "memory");
236 return prev;
237}
238
239static inline unsigned long long __cmpxchg64_local(volatile void *ptr,
240 unsigned long long old,
241 unsigned long long new)
242{
243 unsigned long long prev;
244 asm volatile("cmpxchg8b %3"
245 : "=A"(prev)
246 : "b"((unsigned long)new),
247 "c"((unsigned long)(new >> 32)),
248 "m"(*__xg(ptr)),
249 "0"(old)
250 : "memory");
251 return prev;
252}
253
254#ifndef CONFIG_X86_CMPXCHG
255/*
256 * Building a kernel capable running on 80386. It may be necessary to
257 * simulate the cmpxchg on the 80386 CPU. For that purpose we define
258 * a function for each of the sizes we support.
259 */
260
261extern unsigned long cmpxchg_386_u8(volatile void *, u8, u8);
262extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16);
263extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32);
264
265static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
266 unsigned long new, int size)
267{
268 switch (size) {
269 case 1:
270 return cmpxchg_386_u8(ptr, old, new);
271 case 2:
272 return cmpxchg_386_u16(ptr, old, new);
273 case 4:
274 return cmpxchg_386_u32(ptr, old, new);
275 }
276 return old;
277}
278
279#define cmpxchg(ptr, o, n) \
280({ \
281 __typeof__(*(ptr)) __ret; \
282 if (likely(boot_cpu_data.x86 > 3)) \
283 __ret = (__typeof__(*(ptr)))__cmpxchg((ptr), \
284 (unsigned long)(o), (unsigned long)(n), \
285 sizeof(*(ptr))); \
286 else \
287 __ret = (__typeof__(*(ptr)))cmpxchg_386((ptr), \
288 (unsigned long)(o), (unsigned long)(n), \
289 sizeof(*(ptr))); \
290 __ret; \
291})
292#define cmpxchg_local(ptr, o, n) \
293({ \
294 __typeof__(*(ptr)) __ret; \
295 if (likely(boot_cpu_data.x86 > 3)) \
296 __ret = (__typeof__(*(ptr)))__cmpxchg_local((ptr), \
297 (unsigned long)(o), (unsigned long)(n), \
298 sizeof(*(ptr))); \
299 else \
300 __ret = (__typeof__(*(ptr)))cmpxchg_386((ptr), \
301 (unsigned long)(o), (unsigned long)(n), \
302 sizeof(*(ptr))); \
303 __ret; \
304})
305#endif
306
307#ifndef CONFIG_X86_CMPXCHG64
308/*
309 * Building a kernel capable running on 80386 and 80486. It may be necessary
310 * to simulate the cmpxchg8b on the 80386 and 80486 CPU.
311 */
312
313extern unsigned long long cmpxchg_486_u64(volatile void *, u64, u64);
314
315#define cmpxchg64(ptr, o, n) \
316({ \
317 __typeof__(*(ptr)) __ret; \
318 if (likely(boot_cpu_data.x86 > 4)) \
319 __ret = (__typeof__(*(ptr)))__cmpxchg64((ptr), \
320 (unsigned long long)(o), \
321 (unsigned long long)(n)); \
322 else \
323 __ret = (__typeof__(*(ptr)))cmpxchg_486_u64((ptr), \
324 (unsigned long long)(o), \
325 (unsigned long long)(n)); \
326 __ret; \
327})
328#define cmpxchg64_local(ptr, o, n) \
329({ \
330 __typeof__(*(ptr)) __ret; \
331 if (likely(boot_cpu_data.x86 > 4)) \
332 __ret = (__typeof__(*(ptr)))__cmpxchg64_local((ptr), \
333 (unsigned long long)(o), \
334 (unsigned long long)(n)); \
335 else \
336 __ret = (__typeof__(*(ptr)))cmpxchg_486_u64((ptr), \
337 (unsigned long long)(o), \
338 (unsigned long long)(n)); \
339 __ret; \
340})
341
342#endif
343
344#endif /* ASM_X86__CMPXCHG_32_H */
diff --git a/include/asm-x86/cmpxchg_64.h b/include/asm-x86/cmpxchg_64.h
deleted file mode 100644
index 63c1a5e61b99..000000000000
--- a/include/asm-x86/cmpxchg_64.h
+++ /dev/null
@@ -1,185 +0,0 @@
1#ifndef ASM_X86__CMPXCHG_64_H
2#define ASM_X86__CMPXCHG_64_H
3
4#include <asm/alternative.h> /* Provides LOCK_PREFIX */
5
6#define xchg(ptr, v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v), \
7 (ptr), sizeof(*(ptr))))
8
9#define __xg(x) ((volatile long *)(x))
10
11static inline void set_64bit(volatile unsigned long *ptr, unsigned long val)
12{
13 *ptr = val;
14}
15
16#define _set_64bit set_64bit
17
18/*
19 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
20 * Note 2: xchg has side effect, so that attribute volatile is necessary,
21 * but generally the primitive is invalid, *ptr is output argument. --ANK
22 */
23static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
24 int size)
25{
26 switch (size) {
27 case 1:
28 asm volatile("xchgb %b0,%1"
29 : "=q" (x)
30 : "m" (*__xg(ptr)), "0" (x)
31 : "memory");
32 break;
33 case 2:
34 asm volatile("xchgw %w0,%1"
35 : "=r" (x)
36 : "m" (*__xg(ptr)), "0" (x)
37 : "memory");
38 break;
39 case 4:
40 asm volatile("xchgl %k0,%1"
41 : "=r" (x)
42 : "m" (*__xg(ptr)), "0" (x)
43 : "memory");
44 break;
45 case 8:
46 asm volatile("xchgq %0,%1"
47 : "=r" (x)
48 : "m" (*__xg(ptr)), "0" (x)
49 : "memory");
50 break;
51 }
52 return x;
53}
54
55/*
56 * Atomic compare and exchange. Compare OLD with MEM, if identical,
57 * store NEW in MEM. Return the initial value in MEM. Success is
58 * indicated by comparing RETURN with OLD.
59 */
60
61#define __HAVE_ARCH_CMPXCHG 1
62
63static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
64 unsigned long new, int size)
65{
66 unsigned long prev;
67 switch (size) {
68 case 1:
69 asm volatile(LOCK_PREFIX "cmpxchgb %b1,%2"
70 : "=a"(prev)
71 : "q"(new), "m"(*__xg(ptr)), "0"(old)
72 : "memory");
73 return prev;
74 case 2:
75 asm volatile(LOCK_PREFIX "cmpxchgw %w1,%2"
76 : "=a"(prev)
77 : "r"(new), "m"(*__xg(ptr)), "0"(old)
78 : "memory");
79 return prev;
80 case 4:
81 asm volatile(LOCK_PREFIX "cmpxchgl %k1,%2"
82 : "=a"(prev)
83 : "r"(new), "m"(*__xg(ptr)), "0"(old)
84 : "memory");
85 return prev;
86 case 8:
87 asm volatile(LOCK_PREFIX "cmpxchgq %1,%2"
88 : "=a"(prev)
89 : "r"(new), "m"(*__xg(ptr)), "0"(old)
90 : "memory");
91 return prev;
92 }
93 return old;
94}
95
96/*
97 * Always use locked operations when touching memory shared with a
98 * hypervisor, since the system may be SMP even if the guest kernel
99 * isn't.
100 */
101static inline unsigned long __sync_cmpxchg(volatile void *ptr,
102 unsigned long old,
103 unsigned long new, int size)
104{
105 unsigned long prev;
106 switch (size) {
107 case 1:
108 asm volatile("lock; cmpxchgb %b1,%2"
109 : "=a"(prev)
110 : "q"(new), "m"(*__xg(ptr)), "0"(old)
111 : "memory");
112 return prev;
113 case 2:
114 asm volatile("lock; cmpxchgw %w1,%2"
115 : "=a"(prev)
116 : "r"(new), "m"(*__xg(ptr)), "0"(old)
117 : "memory");
118 return prev;
119 case 4:
120 asm volatile("lock; cmpxchgl %1,%2"
121 : "=a"(prev)
122 : "r"(new), "m"(*__xg(ptr)), "0"(old)
123 : "memory");
124 return prev;
125 }
126 return old;
127}
128
129static inline unsigned long __cmpxchg_local(volatile void *ptr,
130 unsigned long old,
131 unsigned long new, int size)
132{
133 unsigned long prev;
134 switch (size) {
135 case 1:
136 asm volatile("cmpxchgb %b1,%2"
137 : "=a"(prev)
138 : "q"(new), "m"(*__xg(ptr)), "0"(old)
139 : "memory");
140 return prev;
141 case 2:
142 asm volatile("cmpxchgw %w1,%2"
143 : "=a"(prev)
144 : "r"(new), "m"(*__xg(ptr)), "0"(old)
145 : "memory");
146 return prev;
147 case 4:
148 asm volatile("cmpxchgl %k1,%2"
149 : "=a"(prev)
150 : "r"(new), "m"(*__xg(ptr)), "0"(old)
151 : "memory");
152 return prev;
153 case 8:
154 asm volatile("cmpxchgq %1,%2"
155 : "=a"(prev)
156 : "r"(new), "m"(*__xg(ptr)), "0"(old)
157 : "memory");
158 return prev;
159 }
160 return old;
161}
162
163#define cmpxchg(ptr, o, n) \
164 ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
165 (unsigned long)(n), sizeof(*(ptr))))
166#define cmpxchg64(ptr, o, n) \
167({ \
168 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
169 cmpxchg((ptr), (o), (n)); \
170})
171#define cmpxchg_local(ptr, o, n) \
172 ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
173 (unsigned long)(n), \
174 sizeof(*(ptr))))
175#define sync_cmpxchg(ptr, o, n) \
176 ((__typeof__(*(ptr)))__sync_cmpxchg((ptr), (unsigned long)(o), \
177 (unsigned long)(n), \
178 sizeof(*(ptr))))
179#define cmpxchg64_local(ptr, o, n) \
180({ \
181 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
182 cmpxchg_local((ptr), (o), (n)); \
183})
184
185#endif /* ASM_X86__CMPXCHG_64_H */
diff --git a/include/asm-x86/compat.h b/include/asm-x86/compat.h
deleted file mode 100644
index 6732b150949e..000000000000
--- a/include/asm-x86/compat.h
+++ /dev/null
@@ -1,218 +0,0 @@
1#ifndef ASM_X86__COMPAT_H
2#define ASM_X86__COMPAT_H
3
4/*
5 * Architecture specific compatibility types
6 */
7#include <linux/types.h>
8#include <linux/sched.h>
9#include <asm/user32.h>
10
11#define COMPAT_USER_HZ 100
12
13typedef u32 compat_size_t;
14typedef s32 compat_ssize_t;
15typedef s32 compat_time_t;
16typedef s32 compat_clock_t;
17typedef s32 compat_pid_t;
18typedef u16 __compat_uid_t;
19typedef u16 __compat_gid_t;
20typedef u32 __compat_uid32_t;
21typedef u32 __compat_gid32_t;
22typedef u16 compat_mode_t;
23typedef u32 compat_ino_t;
24typedef u16 compat_dev_t;
25typedef s32 compat_off_t;
26typedef s64 compat_loff_t;
27typedef u16 compat_nlink_t;
28typedef u16 compat_ipc_pid_t;
29typedef s32 compat_daddr_t;
30typedef u32 compat_caddr_t;
31typedef __kernel_fsid_t compat_fsid_t;
32typedef s32 compat_timer_t;
33typedef s32 compat_key_t;
34
35typedef s32 compat_int_t;
36typedef s32 compat_long_t;
37typedef s64 __attribute__((aligned(4))) compat_s64;
38typedef u32 compat_uint_t;
39typedef u32 compat_ulong_t;
40typedef u64 __attribute__((aligned(4))) compat_u64;
41
42struct compat_timespec {
43 compat_time_t tv_sec;
44 s32 tv_nsec;
45};
46
47struct compat_timeval {
48 compat_time_t tv_sec;
49 s32 tv_usec;
50};
51
52struct compat_stat {
53 compat_dev_t st_dev;
54 u16 __pad1;
55 compat_ino_t st_ino;
56 compat_mode_t st_mode;
57 compat_nlink_t st_nlink;
58 __compat_uid_t st_uid;
59 __compat_gid_t st_gid;
60 compat_dev_t st_rdev;
61 u16 __pad2;
62 u32 st_size;
63 u32 st_blksize;
64 u32 st_blocks;
65 u32 st_atime;
66 u32 st_atime_nsec;
67 u32 st_mtime;
68 u32 st_mtime_nsec;
69 u32 st_ctime;
70 u32 st_ctime_nsec;
71 u32 __unused4;
72 u32 __unused5;
73};
74
75struct compat_flock {
76 short l_type;
77 short l_whence;
78 compat_off_t l_start;
79 compat_off_t l_len;
80 compat_pid_t l_pid;
81};
82
83#define F_GETLK64 12 /* using 'struct flock64' */
84#define F_SETLK64 13
85#define F_SETLKW64 14
86
87/*
88 * IA32 uses 4 byte alignment for 64 bit quantities,
89 * so we need to pack this structure.
90 */
91struct compat_flock64 {
92 short l_type;
93 short l_whence;
94 compat_loff_t l_start;
95 compat_loff_t l_len;
96 compat_pid_t l_pid;
97} __attribute__((packed));
98
99struct compat_statfs {
100 int f_type;
101 int f_bsize;
102 int f_blocks;
103 int f_bfree;
104 int f_bavail;
105 int f_files;
106 int f_ffree;
107 compat_fsid_t f_fsid;
108 int f_namelen; /* SunOS ignores this field. */
109 int f_frsize;
110 int f_spare[5];
111};
112
113#define COMPAT_RLIM_OLD_INFINITY 0x7fffffff
114#define COMPAT_RLIM_INFINITY 0xffffffff
115
116typedef u32 compat_old_sigset_t; /* at least 32 bits */
117
118#define _COMPAT_NSIG 64
119#define _COMPAT_NSIG_BPW 32
120
121typedef u32 compat_sigset_word;
122
123#define COMPAT_OFF_T_MAX 0x7fffffff
124#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL
125
126struct compat_ipc64_perm {
127 compat_key_t key;
128 __compat_uid32_t uid;
129 __compat_gid32_t gid;
130 __compat_uid32_t cuid;
131 __compat_gid32_t cgid;
132 unsigned short mode;
133 unsigned short __pad1;
134 unsigned short seq;
135 unsigned short __pad2;
136 compat_ulong_t unused1;
137 compat_ulong_t unused2;
138};
139
140struct compat_semid64_ds {
141 struct compat_ipc64_perm sem_perm;
142 compat_time_t sem_otime;
143 compat_ulong_t __unused1;
144 compat_time_t sem_ctime;
145 compat_ulong_t __unused2;
146 compat_ulong_t sem_nsems;
147 compat_ulong_t __unused3;
148 compat_ulong_t __unused4;
149};
150
151struct compat_msqid64_ds {
152 struct compat_ipc64_perm msg_perm;
153 compat_time_t msg_stime;
154 compat_ulong_t __unused1;
155 compat_time_t msg_rtime;
156 compat_ulong_t __unused2;
157 compat_time_t msg_ctime;
158 compat_ulong_t __unused3;
159 compat_ulong_t msg_cbytes;
160 compat_ulong_t msg_qnum;
161 compat_ulong_t msg_qbytes;
162 compat_pid_t msg_lspid;
163 compat_pid_t msg_lrpid;
164 compat_ulong_t __unused4;
165 compat_ulong_t __unused5;
166};
167
168struct compat_shmid64_ds {
169 struct compat_ipc64_perm shm_perm;
170 compat_size_t shm_segsz;
171 compat_time_t shm_atime;
172 compat_ulong_t __unused1;
173 compat_time_t shm_dtime;
174 compat_ulong_t __unused2;
175 compat_time_t shm_ctime;
176 compat_ulong_t __unused3;
177 compat_pid_t shm_cpid;
178 compat_pid_t shm_lpid;
179 compat_ulong_t shm_nattch;
180 compat_ulong_t __unused4;
181 compat_ulong_t __unused5;
182};
183
184/*
185 * The type of struct elf_prstatus.pr_reg in compatible core dumps.
186 */
187typedef struct user_regs_struct32 compat_elf_gregset_t;
188
189/*
190 * A pointer passed in from user mode. This should not
191 * be used for syscall parameters, just declare them
192 * as pointers because the syscall entry code will have
193 * appropriately converted them already.
194 */
195typedef u32 compat_uptr_t;
196
197static inline void __user *compat_ptr(compat_uptr_t uptr)
198{
199 return (void __user *)(unsigned long)uptr;
200}
201
202static inline compat_uptr_t ptr_to_compat(void __user *uptr)
203{
204 return (u32)(unsigned long)uptr;
205}
206
207static inline void __user *compat_alloc_user_space(long len)
208{
209 struct pt_regs *regs = task_pt_regs(current);
210 return (void __user *)regs->sp - len;
211}
212
213static inline int is_compat_task(void)
214{
215 return current_thread_info()->status & TS_COMPAT;
216}
217
218#endif /* ASM_X86__COMPAT_H */
diff --git a/include/asm-x86/cpu.h b/include/asm-x86/cpu.h
deleted file mode 100644
index 83a115083f0d..000000000000
--- a/include/asm-x86/cpu.h
+++ /dev/null
@@ -1,20 +0,0 @@
1#ifndef ASM_X86__CPU_H
2#define ASM_X86__CPU_H
3
4#include <linux/device.h>
5#include <linux/cpu.h>
6#include <linux/topology.h>
7#include <linux/nodemask.h>
8#include <linux/percpu.h>
9
10struct x86_cpu {
11 struct cpu cpu;
12};
13
14#ifdef CONFIG_HOTPLUG_CPU
15extern int arch_register_cpu(int num);
16extern void arch_unregister_cpu(int);
17#endif
18
19DECLARE_PER_CPU(int, cpu_state);
20#endif /* ASM_X86__CPU_H */
diff --git a/include/asm-x86/cpufeature.h b/include/asm-x86/cpufeature.h
deleted file mode 100644
index adfeae6586e1..000000000000
--- a/include/asm-x86/cpufeature.h
+++ /dev/null
@@ -1,271 +0,0 @@
1/*
2 * Defines x86 CPU feature bits
3 */
4#ifndef ASM_X86__CPUFEATURE_H
5#define ASM_X86__CPUFEATURE_H
6
7#include <asm/required-features.h>
8
9#define NCAPINTS 9 /* N 32-bit words worth of info */
10
11/*
12 * Note: If the comment begins with a quoted string, that string is used
13 * in /proc/cpuinfo instead of the macro name. If the string is "",
14 * this feature bit is not displayed in /proc/cpuinfo at all.
15 */
16
17/* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */
18#define X86_FEATURE_FPU (0*32+ 0) /* Onboard FPU */
19#define X86_FEATURE_VME (0*32+ 1) /* Virtual Mode Extensions */
20#define X86_FEATURE_DE (0*32+ 2) /* Debugging Extensions */
21#define X86_FEATURE_PSE (0*32+ 3) /* Page Size Extensions */
22#define X86_FEATURE_TSC (0*32+ 4) /* Time Stamp Counter */
23#define X86_FEATURE_MSR (0*32+ 5) /* Model-Specific Registers */
24#define X86_FEATURE_PAE (0*32+ 6) /* Physical Address Extensions */
25#define X86_FEATURE_MCE (0*32+ 7) /* Machine Check Architecture */
26#define X86_FEATURE_CX8 (0*32+ 8) /* CMPXCHG8 instruction */
27#define X86_FEATURE_APIC (0*32+ 9) /* Onboard APIC */
28#define X86_FEATURE_SEP (0*32+11) /* SYSENTER/SYSEXIT */
29#define X86_FEATURE_MTRR (0*32+12) /* Memory Type Range Registers */
30#define X86_FEATURE_PGE (0*32+13) /* Page Global Enable */
31#define X86_FEATURE_MCA (0*32+14) /* Machine Check Architecture */
32#define X86_FEATURE_CMOV (0*32+15) /* CMOV instructions */
33 /* (plus FCMOVcc, FCOMI with FPU) */
34#define X86_FEATURE_PAT (0*32+16) /* Page Attribute Table */
35#define X86_FEATURE_PSE36 (0*32+17) /* 36-bit PSEs */
36#define X86_FEATURE_PN (0*32+18) /* Processor serial number */
37#define X86_FEATURE_CLFLSH (0*32+19) /* "clflush" CLFLUSH instruction */
38#define X86_FEATURE_DS (0*32+21) /* "dts" Debug Store */
39#define X86_FEATURE_ACPI (0*32+22) /* ACPI via MSR */
40#define X86_FEATURE_MMX (0*32+23) /* Multimedia Extensions */
41#define X86_FEATURE_FXSR (0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */
42#define X86_FEATURE_XMM (0*32+25) /* "sse" */
43#define X86_FEATURE_XMM2 (0*32+26) /* "sse2" */
44#define X86_FEATURE_SELFSNOOP (0*32+27) /* "ss" CPU self snoop */
45#define X86_FEATURE_HT (0*32+28) /* Hyper-Threading */
46#define X86_FEATURE_ACC (0*32+29) /* "tm" Automatic clock control */
47#define X86_FEATURE_IA64 (0*32+30) /* IA-64 processor */
48#define X86_FEATURE_PBE (0*32+31) /* Pending Break Enable */
49
50/* AMD-defined CPU features, CPUID level 0x80000001, word 1 */
51/* Don't duplicate feature flags which are redundant with Intel! */
52#define X86_FEATURE_SYSCALL (1*32+11) /* SYSCALL/SYSRET */
53#define X86_FEATURE_MP (1*32+19) /* MP Capable. */
54#define X86_FEATURE_NX (1*32+20) /* Execute Disable */
55#define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */
56#define X86_FEATURE_FXSR_OPT (1*32+25) /* FXSAVE/FXRSTOR optimizations */
57#define X86_FEATURE_GBPAGES (1*32+26) /* "pdpe1gb" GB pages */
58#define X86_FEATURE_RDTSCP (1*32+27) /* RDTSCP */
59#define X86_FEATURE_LM (1*32+29) /* Long Mode (x86-64) */
60#define X86_FEATURE_3DNOWEXT (1*32+30) /* AMD 3DNow! extensions */
61#define X86_FEATURE_3DNOW (1*32+31) /* 3DNow! */
62
63/* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */
64#define X86_FEATURE_RECOVERY (2*32+ 0) /* CPU in recovery mode */
65#define X86_FEATURE_LONGRUN (2*32+ 1) /* Longrun power control */
66#define X86_FEATURE_LRTI (2*32+ 3) /* LongRun table interface */
67
68/* Other features, Linux-defined mapping, word 3 */
69/* This range is used for feature bits which conflict or are synthesized */
70#define X86_FEATURE_CXMMX (3*32+ 0) /* Cyrix MMX extensions */
71#define X86_FEATURE_K6_MTRR (3*32+ 1) /* AMD K6 nonstandard MTRRs */
72#define X86_FEATURE_CYRIX_ARR (3*32+ 2) /* Cyrix ARRs (= MTRRs) */
73#define X86_FEATURE_CENTAUR_MCR (3*32+ 3) /* Centaur MCRs (= MTRRs) */
74/* cpu types for specific tunings: */
75#define X86_FEATURE_K8 (3*32+ 4) /* "" Opteron, Athlon64 */
76#define X86_FEATURE_K7 (3*32+ 5) /* "" Athlon */
77#define X86_FEATURE_P3 (3*32+ 6) /* "" P3 */
78#define X86_FEATURE_P4 (3*32+ 7) /* "" P4 */
79#define X86_FEATURE_CONSTANT_TSC (3*32+ 8) /* TSC ticks at a constant rate */
80#define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */
81#define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* "" FXSAVE leaks FOP/FIP/FOP */
82#define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */
83#define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */
84#define X86_FEATURE_PEBS (3*32+12) /* Precise-Event Based Sampling */
85#define X86_FEATURE_BTS (3*32+13) /* Branch Trace Store */
86#define X86_FEATURE_SYSCALL32 (3*32+14) /* "" syscall in ia32 userspace */
87#define X86_FEATURE_SYSENTER32 (3*32+15) /* "" sysenter in ia32 userspace */
88#define X86_FEATURE_REP_GOOD (3*32+16) /* rep microcode works well */
89#define X86_FEATURE_MFENCE_RDTSC (3*32+17) /* "" Mfence synchronizes RDTSC */
90#define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* "" Lfence synchronizes RDTSC */
91#define X86_FEATURE_11AP (3*32+19) /* "" Bad local APIC aka 11AP */
92#define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */
93#define X86_FEATURE_AMDC1E (3*32+21) /* AMD C1E detected */
94#define X86_FEATURE_XTOPOLOGY (3*32+21) /* cpu topology enum extensions */
95
96/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
97#define X86_FEATURE_XMM3 (4*32+ 0) /* "pni" SSE-3 */
98#define X86_FEATURE_PCLMULQDQ (4*32+ 1) /* PCLMULQDQ instruction */
99#define X86_FEATURE_DTES64 (4*32+ 2) /* 64-bit Debug Store */
100#define X86_FEATURE_MWAIT (4*32+ 3) /* "monitor" Monitor/Mwait support */
101#define X86_FEATURE_DSCPL (4*32+ 4) /* "ds_cpl" CPL Qual. Debug Store */
102#define X86_FEATURE_VMX (4*32+ 5) /* Hardware virtualization */
103#define X86_FEATURE_SMX (4*32+ 6) /* Safer mode */
104#define X86_FEATURE_EST (4*32+ 7) /* Enhanced SpeedStep */
105#define X86_FEATURE_TM2 (4*32+ 8) /* Thermal Monitor 2 */
106#define X86_FEATURE_SSSE3 (4*32+ 9) /* Supplemental SSE-3 */
107#define X86_FEATURE_CID (4*32+10) /* Context ID */
108#define X86_FEATURE_FMA (4*32+12) /* Fused multiply-add */
109#define X86_FEATURE_CX16 (4*32+13) /* CMPXCHG16B */
110#define X86_FEATURE_XTPR (4*32+14) /* Send Task Priority Messages */
111#define X86_FEATURE_PDCM (4*32+15) /* Performance Capabilities */
112#define X86_FEATURE_DCA (4*32+18) /* Direct Cache Access */
113#define X86_FEATURE_XMM4_1 (4*32+19) /* "sse4_1" SSE-4.1 */
114#define X86_FEATURE_XMM4_2 (4*32+20) /* "sse4_2" SSE-4.2 */
115#define X86_FEATURE_X2APIC (4*32+21) /* x2APIC */
116#define X86_FEATURE_AES (4*32+25) /* AES instructions */
117#define X86_FEATURE_XSAVE (4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */
118#define X86_FEATURE_OSXSAVE (4*32+27) /* "" XSAVE enabled in the OS */
119#define X86_FEATURE_AVX (4*32+28) /* Advanced Vector Extensions */
120
121/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
122#define X86_FEATURE_XSTORE (5*32+ 2) /* "rng" RNG present (xstore) */
123#define X86_FEATURE_XSTORE_EN (5*32+ 3) /* "rng_en" RNG enabled */
124#define X86_FEATURE_XCRYPT (5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */
125#define X86_FEATURE_XCRYPT_EN (5*32+ 7) /* "ace_en" on-CPU crypto enabled */
126#define X86_FEATURE_ACE2 (5*32+ 8) /* Advanced Cryptography Engine v2 */
127#define X86_FEATURE_ACE2_EN (5*32+ 9) /* ACE v2 enabled */
128#define X86_FEATURE_PHE (5*32+10) /* PadLock Hash Engine */
129#define X86_FEATURE_PHE_EN (5*32+11) /* PHE enabled */
130#define X86_FEATURE_PMM (5*32+12) /* PadLock Montgomery Multiplier */
131#define X86_FEATURE_PMM_EN (5*32+13) /* PMM enabled */
132
133/* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */
134#define X86_FEATURE_LAHF_LM (6*32+ 0) /* LAHF/SAHF in long mode */
135#define X86_FEATURE_CMP_LEGACY (6*32+ 1) /* If yes HyperThreading not valid */
136#define X86_FEATURE_SVM (6*32+ 2) /* Secure virtual machine */
137#define X86_FEATURE_EXTAPIC (6*32+ 3) /* Extended APIC space */
138#define X86_FEATURE_CR8_LEGACY (6*32+ 4) /* CR8 in 32-bit mode */
139#define X86_FEATURE_ABM (6*32+ 5) /* Advanced bit manipulation */
140#define X86_FEATURE_SSE4A (6*32+ 6) /* SSE-4A */
141#define X86_FEATURE_MISALIGNSSE (6*32+ 7) /* Misaligned SSE mode */
142#define X86_FEATURE_3DNOWPREFETCH (6*32+ 8) /* 3DNow prefetch instructions */
143#define X86_FEATURE_OSVW (6*32+ 9) /* OS Visible Workaround */
144#define X86_FEATURE_IBS (6*32+10) /* Instruction Based Sampling */
145#define X86_FEATURE_SSE5 (6*32+11) /* SSE-5 */
146#define X86_FEATURE_SKINIT (6*32+12) /* SKINIT/STGI instructions */
147#define X86_FEATURE_WDT (6*32+13) /* Watchdog timer */
148
149/*
150 * Auxiliary flags: Linux defined - For features scattered in various
151 * CPUID levels like 0x6, 0xA etc
152 */
153#define X86_FEATURE_IDA (7*32+ 0) /* Intel Dynamic Acceleration */
154
155/* Virtualization flags: Linux defined */
156#define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */
157#define X86_FEATURE_VNMI (8*32+ 1) /* Intel Virtual NMI */
158#define X86_FEATURE_FLEXPRIORITY (8*32+ 2) /* Intel FlexPriority */
159#define X86_FEATURE_EPT (8*32+ 3) /* Intel Extended Page Table */
160#define X86_FEATURE_VPID (8*32+ 4) /* Intel Virtual Processor ID */
161
162#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
163
164#include <linux/bitops.h>
165
166extern const char * const x86_cap_flags[NCAPINTS*32];
167extern const char * const x86_power_flags[32];
168
169#define test_cpu_cap(c, bit) \
170 test_bit(bit, (unsigned long *)((c)->x86_capability))
171
172#define cpu_has(c, bit) \
173 (__builtin_constant_p(bit) && \
174 ( (((bit)>>5)==0 && (1UL<<((bit)&31) & REQUIRED_MASK0)) || \
175 (((bit)>>5)==1 && (1UL<<((bit)&31) & REQUIRED_MASK1)) || \
176 (((bit)>>5)==2 && (1UL<<((bit)&31) & REQUIRED_MASK2)) || \
177 (((bit)>>5)==3 && (1UL<<((bit)&31) & REQUIRED_MASK3)) || \
178 (((bit)>>5)==4 && (1UL<<((bit)&31) & REQUIRED_MASK4)) || \
179 (((bit)>>5)==5 && (1UL<<((bit)&31) & REQUIRED_MASK5)) || \
180 (((bit)>>5)==6 && (1UL<<((bit)&31) & REQUIRED_MASK6)) || \
181 (((bit)>>5)==7 && (1UL<<((bit)&31) & REQUIRED_MASK7)) ) \
182 ? 1 : \
183 test_cpu_cap(c, bit))
184
185#define boot_cpu_has(bit) cpu_has(&boot_cpu_data, bit)
186
187#define set_cpu_cap(c, bit) set_bit(bit, (unsigned long *)((c)->x86_capability))
188#define clear_cpu_cap(c, bit) clear_bit(bit, (unsigned long *)((c)->x86_capability))
189#define setup_clear_cpu_cap(bit) do { \
190 clear_cpu_cap(&boot_cpu_data, bit); \
191 set_bit(bit, (unsigned long *)cleared_cpu_caps); \
192} while (0)
193#define setup_force_cpu_cap(bit) do { \
194 set_cpu_cap(&boot_cpu_data, bit); \
195 clear_bit(bit, (unsigned long *)cleared_cpu_caps); \
196} while (0)
197
198#define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU)
199#define cpu_has_vme boot_cpu_has(X86_FEATURE_VME)
200#define cpu_has_de boot_cpu_has(X86_FEATURE_DE)
201#define cpu_has_pse boot_cpu_has(X86_FEATURE_PSE)
202#define cpu_has_tsc boot_cpu_has(X86_FEATURE_TSC)
203#define cpu_has_pae boot_cpu_has(X86_FEATURE_PAE)
204#define cpu_has_pge boot_cpu_has(X86_FEATURE_PGE)
205#define cpu_has_apic boot_cpu_has(X86_FEATURE_APIC)
206#define cpu_has_sep boot_cpu_has(X86_FEATURE_SEP)
207#define cpu_has_mtrr boot_cpu_has(X86_FEATURE_MTRR)
208#define cpu_has_mmx boot_cpu_has(X86_FEATURE_MMX)
209#define cpu_has_fxsr boot_cpu_has(X86_FEATURE_FXSR)
210#define cpu_has_xmm boot_cpu_has(X86_FEATURE_XMM)
211#define cpu_has_xmm2 boot_cpu_has(X86_FEATURE_XMM2)
212#define cpu_has_xmm3 boot_cpu_has(X86_FEATURE_XMM3)
213#define cpu_has_ht boot_cpu_has(X86_FEATURE_HT)
214#define cpu_has_mp boot_cpu_has(X86_FEATURE_MP)
215#define cpu_has_nx boot_cpu_has(X86_FEATURE_NX)
216#define cpu_has_k6_mtrr boot_cpu_has(X86_FEATURE_K6_MTRR)
217#define cpu_has_cyrix_arr boot_cpu_has(X86_FEATURE_CYRIX_ARR)
218#define cpu_has_centaur_mcr boot_cpu_has(X86_FEATURE_CENTAUR_MCR)
219#define cpu_has_xstore boot_cpu_has(X86_FEATURE_XSTORE)
220#define cpu_has_xstore_enabled boot_cpu_has(X86_FEATURE_XSTORE_EN)
221#define cpu_has_xcrypt boot_cpu_has(X86_FEATURE_XCRYPT)
222#define cpu_has_xcrypt_enabled boot_cpu_has(X86_FEATURE_XCRYPT_EN)
223#define cpu_has_ace2 boot_cpu_has(X86_FEATURE_ACE2)
224#define cpu_has_ace2_enabled boot_cpu_has(X86_FEATURE_ACE2_EN)
225#define cpu_has_phe boot_cpu_has(X86_FEATURE_PHE)
226#define cpu_has_phe_enabled boot_cpu_has(X86_FEATURE_PHE_EN)
227#define cpu_has_pmm boot_cpu_has(X86_FEATURE_PMM)
228#define cpu_has_pmm_enabled boot_cpu_has(X86_FEATURE_PMM_EN)
229#define cpu_has_ds boot_cpu_has(X86_FEATURE_DS)
230#define cpu_has_pebs boot_cpu_has(X86_FEATURE_PEBS)
231#define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLSH)
232#define cpu_has_bts boot_cpu_has(X86_FEATURE_BTS)
233#define cpu_has_gbpages boot_cpu_has(X86_FEATURE_GBPAGES)
234#define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON)
235#define cpu_has_pat boot_cpu_has(X86_FEATURE_PAT)
236#define cpu_has_xmm4_1 boot_cpu_has(X86_FEATURE_XMM4_1)
237#define cpu_has_xmm4_2 boot_cpu_has(X86_FEATURE_XMM4_2)
238#define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC)
239#define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE)
240
241#if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64)
242# define cpu_has_invlpg 1
243#else
244# define cpu_has_invlpg (boot_cpu_data.x86 > 3)
245#endif
246
247#ifdef CONFIG_X86_64
248
249#undef cpu_has_vme
250#define cpu_has_vme 0
251
252#undef cpu_has_pae
253#define cpu_has_pae ___BUG___
254
255#undef cpu_has_mp
256#define cpu_has_mp 1
257
258#undef cpu_has_k6_mtrr
259#define cpu_has_k6_mtrr 0
260
261#undef cpu_has_cyrix_arr
262#define cpu_has_cyrix_arr 0
263
264#undef cpu_has_centaur_mcr
265#define cpu_has_centaur_mcr 0
266
267#endif /* CONFIG_X86_64 */
268
269#endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */
270
271#endif /* ASM_X86__CPUFEATURE_H */
diff --git a/include/asm-x86/cputime.h b/include/asm-x86/cputime.h
deleted file mode 100644
index 6d68ad7e0ea3..000000000000
--- a/include/asm-x86/cputime.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/cputime.h>
diff --git a/include/asm-x86/current.h b/include/asm-x86/current.h
deleted file mode 100644
index a863ead856f3..000000000000
--- a/include/asm-x86/current.h
+++ /dev/null
@@ -1,39 +0,0 @@
1#ifndef ASM_X86__CURRENT_H
2#define ASM_X86__CURRENT_H
3
4#ifdef CONFIG_X86_32
5#include <linux/compiler.h>
6#include <asm/percpu.h>
7
8struct task_struct;
9
10DECLARE_PER_CPU(struct task_struct *, current_task);
11static __always_inline struct task_struct *get_current(void)
12{
13 return x86_read_percpu(current_task);
14}
15
16#else /* X86_32 */
17
18#ifndef __ASSEMBLY__
19#include <asm/pda.h>
20
21struct task_struct;
22
23static __always_inline struct task_struct *get_current(void)
24{
25 return read_pda(pcurrent);
26}
27
28#else /* __ASSEMBLY__ */
29
30#include <asm/asm-offsets.h>
31#define GET_CURRENT(reg) movq %gs:(pda_pcurrent),reg
32
33#endif /* __ASSEMBLY__ */
34
35#endif /* X86_32 */
36
37#define current get_current()
38
39#endif /* ASM_X86__CURRENT_H */
diff --git a/include/asm-x86/debugreg.h b/include/asm-x86/debugreg.h
deleted file mode 100644
index ecb6907c3ea4..000000000000
--- a/include/asm-x86/debugreg.h
+++ /dev/null
@@ -1,70 +0,0 @@
1#ifndef ASM_X86__DEBUGREG_H
2#define ASM_X86__DEBUGREG_H
3
4
5/* Indicate the register numbers for a number of the specific
6 debug registers. Registers 0-3 contain the addresses we wish to trap on */
7#define DR_FIRSTADDR 0 /* u_debugreg[DR_FIRSTADDR] */
8#define DR_LASTADDR 3 /* u_debugreg[DR_LASTADDR] */
9
10#define DR_STATUS 6 /* u_debugreg[DR_STATUS] */
11#define DR_CONTROL 7 /* u_debugreg[DR_CONTROL] */
12
13/* Define a few things for the status register. We can use this to determine
14 which debugging register was responsible for the trap. The other bits
15 are either reserved or not of interest to us. */
16
17#define DR_TRAP0 (0x1) /* db0 */
18#define DR_TRAP1 (0x2) /* db1 */
19#define DR_TRAP2 (0x4) /* db2 */
20#define DR_TRAP3 (0x8) /* db3 */
21
22#define DR_STEP (0x4000) /* single-step */
23#define DR_SWITCH (0x8000) /* task switch */
24
25/* Now define a bunch of things for manipulating the control register.
26 The top two bytes of the control register consist of 4 fields of 4
27 bits - each field corresponds to one of the four debug registers,
28 and indicates what types of access we trap on, and how large the data
29 field is that we are looking at */
30
31#define DR_CONTROL_SHIFT 16 /* Skip this many bits in ctl register */
32#define DR_CONTROL_SIZE 4 /* 4 control bits per register */
33
34#define DR_RW_EXECUTE (0x0) /* Settings for the access types to trap on */
35#define DR_RW_WRITE (0x1)
36#define DR_RW_READ (0x3)
37
38#define DR_LEN_1 (0x0) /* Settings for data length to trap on */
39#define DR_LEN_2 (0x4)
40#define DR_LEN_4 (0xC)
41#define DR_LEN_8 (0x8)
42
43/* The low byte to the control register determine which registers are
44 enabled. There are 4 fields of two bits. One bit is "local", meaning
45 that the processor will reset the bit after a task switch and the other
46 is global meaning that we have to explicitly reset the bit. With linux,
47 you can use either one, since we explicitly zero the register when we enter
48 kernel mode. */
49
50#define DR_LOCAL_ENABLE_SHIFT 0 /* Extra shift to the local enable bit */
51#define DR_GLOBAL_ENABLE_SHIFT 1 /* Extra shift to the global enable bit */
52#define DR_ENABLE_SIZE 2 /* 2 enable bits per register */
53
54#define DR_LOCAL_ENABLE_MASK (0x55) /* Set local bits for all 4 regs */
55#define DR_GLOBAL_ENABLE_MASK (0xAA) /* Set global bits for all 4 regs */
56
57/* The second byte to the control register has a few special things.
58 We can slow the instruction pipeline for instructions coming via the
59 gdt or the ldt if we want to. I am not sure why this is an advantage */
60
61#ifdef __i386__
62#define DR_CONTROL_RESERVED (0xFC00) /* Reserved by Intel */
63#else
64#define DR_CONTROL_RESERVED (0xFFFFFFFF0000FC00UL) /* Reserved */
65#endif
66
67#define DR_LOCAL_SLOWDOWN (0x100) /* Local slow the pipeline */
68#define DR_GLOBAL_SLOWDOWN (0x200) /* Global slow the pipeline */
69
70#endif /* ASM_X86__DEBUGREG_H */
diff --git a/include/asm-x86/delay.h b/include/asm-x86/delay.h
deleted file mode 100644
index 8a0da95b4fc5..000000000000
--- a/include/asm-x86/delay.h
+++ /dev/null
@@ -1,31 +0,0 @@
1#ifndef ASM_X86__DELAY_H
2#define ASM_X86__DELAY_H
3
4/*
5 * Copyright (C) 1993 Linus Torvalds
6 *
7 * Delay routines calling functions in arch/x86/lib/delay.c
8 */
9
10/* Undefined functions to get compile-time errors */
11extern void __bad_udelay(void);
12extern void __bad_ndelay(void);
13
14extern void __udelay(unsigned long usecs);
15extern void __ndelay(unsigned long nsecs);
16extern void __const_udelay(unsigned long xloops);
17extern void __delay(unsigned long loops);
18
19/* 0x10c7 is 2**32 / 1000000 (rounded up) */
20#define udelay(n) (__builtin_constant_p(n) ? \
21 ((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c7ul)) : \
22 __udelay(n))
23
24/* 0x5 is 2**32 / 1000000000 (rounded up) */
25#define ndelay(n) (__builtin_constant_p(n) ? \
26 ((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \
27 __ndelay(n))
28
29void use_tsc_delay(void);
30
31#endif /* ASM_X86__DELAY_H */
diff --git a/include/asm-x86/desc.h b/include/asm-x86/desc.h
deleted file mode 100644
index f06adac7938c..000000000000
--- a/include/asm-x86/desc.h
+++ /dev/null
@@ -1,409 +0,0 @@
1#ifndef ASM_X86__DESC_H
2#define ASM_X86__DESC_H
3
4#ifndef __ASSEMBLY__
5#include <asm/desc_defs.h>
6#include <asm/ldt.h>
7#include <asm/mmu.h>
8#include <linux/smp.h>
9
10static inline void fill_ldt(struct desc_struct *desc,
11 const struct user_desc *info)
12{
13 desc->limit0 = info->limit & 0x0ffff;
14 desc->base0 = info->base_addr & 0x0000ffff;
15
16 desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
17 desc->type = (info->read_exec_only ^ 1) << 1;
18 desc->type |= info->contents << 2;
19 desc->s = 1;
20 desc->dpl = 0x3;
21 desc->p = info->seg_not_present ^ 1;
22 desc->limit = (info->limit & 0xf0000) >> 16;
23 desc->avl = info->useable;
24 desc->d = info->seg_32bit;
25 desc->g = info->limit_in_pages;
26 desc->base2 = (info->base_addr & 0xff000000) >> 24;
27 /*
28 * Don't allow setting of the lm bit. It is useless anyway
29 * because 64bit system calls require __USER_CS:
30 */
31 desc->l = 0;
32}
33
34extern struct desc_ptr idt_descr;
35extern gate_desc idt_table[];
36
37struct gdt_page {
38 struct desc_struct gdt[GDT_ENTRIES];
39} __attribute__((aligned(PAGE_SIZE)));
40DECLARE_PER_CPU(struct gdt_page, gdt_page);
41
42static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
43{
44 return per_cpu(gdt_page, cpu).gdt;
45}
46
47#ifdef CONFIG_X86_64
48
49static inline void pack_gate(gate_desc *gate, unsigned type, unsigned long func,
50 unsigned dpl, unsigned ist, unsigned seg)
51{
52 gate->offset_low = PTR_LOW(func);
53 gate->segment = __KERNEL_CS;
54 gate->ist = ist;
55 gate->p = 1;
56 gate->dpl = dpl;
57 gate->zero0 = 0;
58 gate->zero1 = 0;
59 gate->type = type;
60 gate->offset_middle = PTR_MIDDLE(func);
61 gate->offset_high = PTR_HIGH(func);
62}
63
64#else
65static inline void pack_gate(gate_desc *gate, unsigned char type,
66 unsigned long base, unsigned dpl, unsigned flags,
67 unsigned short seg)
68{
69 gate->a = (seg << 16) | (base & 0xffff);
70 gate->b = (base & 0xffff0000) |
71 (((0x80 | type | (dpl << 5)) & 0xff) << 8);
72}
73
74#endif
75
76static inline int desc_empty(const void *ptr)
77{
78 const u32 *desc = ptr;
79 return !(desc[0] | desc[1]);
80}
81
82#ifdef CONFIG_PARAVIRT
83#include <asm/paravirt.h>
84#else
85#define load_TR_desc() native_load_tr_desc()
86#define load_gdt(dtr) native_load_gdt(dtr)
87#define load_idt(dtr) native_load_idt(dtr)
88#define load_tr(tr) asm volatile("ltr %0"::"m" (tr))
89#define load_ldt(ldt) asm volatile("lldt %0"::"m" (ldt))
90
91#define store_gdt(dtr) native_store_gdt(dtr)
92#define store_idt(dtr) native_store_idt(dtr)
93#define store_tr(tr) (tr = native_store_tr())
94#define store_ldt(ldt) asm("sldt %0":"=m" (ldt))
95
96#define load_TLS(t, cpu) native_load_tls(t, cpu)
97#define set_ldt native_set_ldt
98
99#define write_ldt_entry(dt, entry, desc) \
100 native_write_ldt_entry(dt, entry, desc)
101#define write_gdt_entry(dt, entry, desc, type) \
102 native_write_gdt_entry(dt, entry, desc, type)
103#define write_idt_entry(dt, entry, g) \
104 native_write_idt_entry(dt, entry, g)
105
106static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
107{
108}
109
110static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
111{
112}
113#endif /* CONFIG_PARAVIRT */
114
115static inline void native_write_idt_entry(gate_desc *idt, int entry,
116 const gate_desc *gate)
117{
118 memcpy(&idt[entry], gate, sizeof(*gate));
119}
120
121static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry,
122 const void *desc)
123{
124 memcpy(&ldt[entry], desc, 8);
125}
126
127static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
128 const void *desc, int type)
129{
130 unsigned int size;
131 switch (type) {
132 case DESC_TSS:
133 size = sizeof(tss_desc);
134 break;
135 case DESC_LDT:
136 size = sizeof(ldt_desc);
137 break;
138 default:
139 size = sizeof(struct desc_struct);
140 break;
141 }
142 memcpy(&gdt[entry], desc, size);
143}
144
145static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
146 unsigned long limit, unsigned char type,
147 unsigned char flags)
148{
149 desc->a = ((base & 0xffff) << 16) | (limit & 0xffff);
150 desc->b = (base & 0xff000000) | ((base & 0xff0000) >> 16) |
151 (limit & 0x000f0000) | ((type & 0xff) << 8) |
152 ((flags & 0xf) << 20);
153 desc->p = 1;
154}
155
156
157static inline void set_tssldt_descriptor(void *d, unsigned long addr,
158 unsigned type, unsigned size)
159{
160#ifdef CONFIG_X86_64
161 struct ldttss_desc64 *desc = d;
162 memset(desc, 0, sizeof(*desc));
163 desc->limit0 = size & 0xFFFF;
164 desc->base0 = PTR_LOW(addr);
165 desc->base1 = PTR_MIDDLE(addr) & 0xFF;
166 desc->type = type;
167 desc->p = 1;
168 desc->limit1 = (size >> 16) & 0xF;
169 desc->base2 = (PTR_MIDDLE(addr) >> 8) & 0xFF;
170 desc->base3 = PTR_HIGH(addr);
171#else
172 pack_descriptor((struct desc_struct *)d, addr, size, 0x80 | type, 0);
173#endif
174}
175
176static inline void __set_tss_desc(unsigned cpu, unsigned int entry, void *addr)
177{
178 struct desc_struct *d = get_cpu_gdt_table(cpu);
179 tss_desc tss;
180
181 /*
182 * sizeof(unsigned long) coming from an extra "long" at the end
183 * of the iobitmap. See tss_struct definition in processor.h
184 *
185 * -1? seg base+limit should be pointing to the address of the
186 * last valid byte
187 */
188 set_tssldt_descriptor(&tss, (unsigned long)addr, DESC_TSS,
189 IO_BITMAP_OFFSET + IO_BITMAP_BYTES +
190 sizeof(unsigned long) - 1);
191 write_gdt_entry(d, entry, &tss, DESC_TSS);
192}
193
194#define set_tss_desc(cpu, addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr)
195
196static inline void native_set_ldt(const void *addr, unsigned int entries)
197{
198 if (likely(entries == 0))
199 asm volatile("lldt %w0"::"q" (0));
200 else {
201 unsigned cpu = smp_processor_id();
202 ldt_desc ldt;
203
204 set_tssldt_descriptor(&ldt, (unsigned long)addr, DESC_LDT,
205 entries * LDT_ENTRY_SIZE - 1);
206 write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT,
207 &ldt, DESC_LDT);
208 asm volatile("lldt %w0"::"q" (GDT_ENTRY_LDT*8));
209 }
210}
211
212static inline void native_load_tr_desc(void)
213{
214 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
215}
216
217static inline void native_load_gdt(const struct desc_ptr *dtr)
218{
219 asm volatile("lgdt %0"::"m" (*dtr));
220}
221
222static inline void native_load_idt(const struct desc_ptr *dtr)
223{
224 asm volatile("lidt %0"::"m" (*dtr));
225}
226
227static inline void native_store_gdt(struct desc_ptr *dtr)
228{
229 asm volatile("sgdt %0":"=m" (*dtr));
230}
231
232static inline void native_store_idt(struct desc_ptr *dtr)
233{
234 asm volatile("sidt %0":"=m" (*dtr));
235}
236
237static inline unsigned long native_store_tr(void)
238{
239 unsigned long tr;
240 asm volatile("str %0":"=r" (tr));
241 return tr;
242}
243
244static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
245{
246 unsigned int i;
247 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
248
249 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
250 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
251}
252
253#define _LDT_empty(info) \
254 ((info)->base_addr == 0 && \
255 (info)->limit == 0 && \
256 (info)->contents == 0 && \
257 (info)->read_exec_only == 1 && \
258 (info)->seg_32bit == 0 && \
259 (info)->limit_in_pages == 0 && \
260 (info)->seg_not_present == 1 && \
261 (info)->useable == 0)
262
263#ifdef CONFIG_X86_64
264#define LDT_empty(info) (_LDT_empty(info) && ((info)->lm == 0))
265#else
266#define LDT_empty(info) (_LDT_empty(info))
267#endif
268
269static inline void clear_LDT(void)
270{
271 set_ldt(NULL, 0);
272}
273
274/*
275 * load one particular LDT into the current CPU
276 */
277static inline void load_LDT_nolock(mm_context_t *pc)
278{
279 set_ldt(pc->ldt, pc->size);
280}
281
282static inline void load_LDT(mm_context_t *pc)
283{
284 preempt_disable();
285 load_LDT_nolock(pc);
286 preempt_enable();
287}
288
289static inline unsigned long get_desc_base(const struct desc_struct *desc)
290{
291 return desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24);
292}
293
294static inline unsigned long get_desc_limit(const struct desc_struct *desc)
295{
296 return desc->limit0 | (desc->limit << 16);
297}
298
299static inline void _set_gate(int gate, unsigned type, void *addr,
300 unsigned dpl, unsigned ist, unsigned seg)
301{
302 gate_desc s;
303 pack_gate(&s, type, (unsigned long)addr, dpl, ist, seg);
304 /*
305 * does not need to be atomic because it is only done once at
306 * setup time
307 */
308 write_idt_entry(idt_table, gate, &s);
309}
310
311/*
312 * This needs to use 'idt_table' rather than 'idt', and
313 * thus use the _nonmapped_ version of the IDT, as the
314 * Pentium F0 0F bugfix can have resulted in the mapped
315 * IDT being write-protected.
316 */
317static inline void set_intr_gate(unsigned int n, void *addr)
318{
319 BUG_ON((unsigned)n > 0xFF);
320 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
321}
322
323#define SYS_VECTOR_FREE 0
324#define SYS_VECTOR_ALLOCED 1
325
326extern int first_system_vector;
327extern char system_vectors[];
328
329static inline void alloc_system_vector(int vector)
330{
331 if (system_vectors[vector] == SYS_VECTOR_FREE) {
332 system_vectors[vector] = SYS_VECTOR_ALLOCED;
333 if (first_system_vector > vector)
334 first_system_vector = vector;
335 } else
336 BUG();
337}
338
339static inline void alloc_intr_gate(unsigned int n, void *addr)
340{
341 alloc_system_vector(n);
342 set_intr_gate(n, addr);
343}
344
345/*
346 * This routine sets up an interrupt gate at directory privilege level 3.
347 */
348static inline void set_system_intr_gate(unsigned int n, void *addr)
349{
350 BUG_ON((unsigned)n > 0xFF);
351 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
352}
353
354static inline void set_system_trap_gate(unsigned int n, void *addr)
355{
356 BUG_ON((unsigned)n > 0xFF);
357 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
358}
359
360static inline void set_trap_gate(unsigned int n, void *addr)
361{
362 BUG_ON((unsigned)n > 0xFF);
363 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
364}
365
366static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
367{
368 BUG_ON((unsigned)n > 0xFF);
369 _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
370}
371
372static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
373{
374 BUG_ON((unsigned)n > 0xFF);
375 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
376}
377
378static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
379{
380 BUG_ON((unsigned)n > 0xFF);
381 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
382}
383
384#else
385/*
386 * GET_DESC_BASE reads the descriptor base of the specified segment.
387 *
388 * Args:
389 * idx - descriptor index
390 * gdt - GDT pointer
391 * base - 32bit register to which the base will be written
392 * lo_w - lo word of the "base" register
393 * lo_b - lo byte of the "base" register
394 * hi_b - hi byte of the low word of the "base" register
395 *
396 * Example:
397 * GET_DESC_BASE(GDT_ENTRY_ESPFIX_SS, %ebx, %eax, %ax, %al, %ah)
398 * Will read the base address of GDT_ENTRY_ESPFIX_SS and put it into %eax.
399 */
400#define GET_DESC_BASE(idx, gdt, base, lo_w, lo_b, hi_b) \
401 movb idx * 8 + 4(gdt), lo_b; \
402 movb idx * 8 + 7(gdt), hi_b; \
403 shll $16, base; \
404 movw idx * 8 + 2(gdt), lo_w;
405
406
407#endif /* __ASSEMBLY__ */
408
409#endif /* ASM_X86__DESC_H */
diff --git a/include/asm-x86/desc_defs.h b/include/asm-x86/desc_defs.h
deleted file mode 100644
index b881db664b46..000000000000
--- a/include/asm-x86/desc_defs.h
+++ /dev/null
@@ -1,95 +0,0 @@
1/* Written 2000 by Andi Kleen */
2#ifndef ASM_X86__DESC_DEFS_H
3#define ASM_X86__DESC_DEFS_H
4
5/*
6 * Segment descriptor structure definitions, usable from both x86_64 and i386
7 * archs.
8 */
9
10#ifndef __ASSEMBLY__
11
12#include <linux/types.h>
13
14/*
15 * FIXME: Acessing the desc_struct through its fields is more elegant,
16 * and should be the one valid thing to do. However, a lot of open code
17 * still touches the a and b acessors, and doing this allow us to do it
18 * incrementally. We keep the signature as a struct, rather than an union,
19 * so we can get rid of it transparently in the future -- glommer
20 */
21/* 8 byte segment descriptor */
22struct desc_struct {
23 union {
24 struct {
25 unsigned int a;
26 unsigned int b;
27 };
28 struct {
29 u16 limit0;
30 u16 base0;
31 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
32 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
33 };
34 };
35} __attribute__((packed));
36
37enum {
38 GATE_INTERRUPT = 0xE,
39 GATE_TRAP = 0xF,
40 GATE_CALL = 0xC,
41 GATE_TASK = 0x5,
42};
43
44/* 16byte gate */
45struct gate_struct64 {
46 u16 offset_low;
47 u16 segment;
48 unsigned ist : 3, zero0 : 5, type : 5, dpl : 2, p : 1;
49 u16 offset_middle;
50 u32 offset_high;
51 u32 zero1;
52} __attribute__((packed));
53
54#define PTR_LOW(x) ((unsigned long long)(x) & 0xFFFF)
55#define PTR_MIDDLE(x) (((unsigned long long)(x) >> 16) & 0xFFFF)
56#define PTR_HIGH(x) ((unsigned long long)(x) >> 32)
57
58enum {
59 DESC_TSS = 0x9,
60 DESC_LDT = 0x2,
61 DESCTYPE_S = 0x10, /* !system */
62};
63
64/* LDT or TSS descriptor in the GDT. 16 bytes. */
65struct ldttss_desc64 {
66 u16 limit0;
67 u16 base0;
68 unsigned base1 : 8, type : 5, dpl : 2, p : 1;
69 unsigned limit1 : 4, zero0 : 3, g : 1, base2 : 8;
70 u32 base3;
71 u32 zero1;
72} __attribute__((packed));
73
74#ifdef CONFIG_X86_64
75typedef struct gate_struct64 gate_desc;
76typedef struct ldttss_desc64 ldt_desc;
77typedef struct ldttss_desc64 tss_desc;
78#define gate_offset(g) ((g).offset_low | ((unsigned long)(g).offset_middle << 16) | ((unsigned long)(g).offset_high << 32))
79#define gate_segment(g) ((g).segment)
80#else
81typedef struct desc_struct gate_desc;
82typedef struct desc_struct ldt_desc;
83typedef struct desc_struct tss_desc;
84#define gate_offset(g) (((g).b & 0xffff0000) | ((g).a & 0x0000ffff))
85#define gate_segment(g) ((g).a >> 16)
86#endif
87
88struct desc_ptr {
89 unsigned short size;
90 unsigned long address;
91} __attribute__((packed)) ;
92
93#endif /* !__ASSEMBLY__ */
94
95#endif /* ASM_X86__DESC_DEFS_H */
diff --git a/include/asm-x86/device.h b/include/asm-x86/device.h
deleted file mode 100644
index 1bece04c7d9d..000000000000
--- a/include/asm-x86/device.h
+++ /dev/null
@@ -1,16 +0,0 @@
1#ifndef ASM_X86__DEVICE_H
2#define ASM_X86__DEVICE_H
3
4struct dev_archdata {
5#ifdef CONFIG_ACPI
6 void *acpi_handle;
7#endif
8#ifdef CONFIG_X86_64
9struct dma_mapping_ops *dma_ops;
10#endif
11#ifdef CONFIG_DMAR
12 void *iommu; /* hook for IOMMU specific extension */
13#endif
14};
15
16#endif /* ASM_X86__DEVICE_H */
diff --git a/include/asm-x86/div64.h b/include/asm-x86/div64.h
deleted file mode 100644
index f9530f23f1d6..000000000000
--- a/include/asm-x86/div64.h
+++ /dev/null
@@ -1,60 +0,0 @@
1#ifndef ASM_X86__DIV64_H
2#define ASM_X86__DIV64_H
3
4#ifdef CONFIG_X86_32
5
6#include <linux/types.h>
7
8/*
9 * do_div() is NOT a C function. It wants to return
10 * two values (the quotient and the remainder), but
11 * since that doesn't work very well in C, what it
12 * does is:
13 *
14 * - modifies the 64-bit dividend _in_place_
15 * - returns the 32-bit remainder
16 *
17 * This ends up being the most efficient "calling
18 * convention" on x86.
19 */
20#define do_div(n, base) \
21({ \
22 unsigned long __upper, __low, __high, __mod, __base; \
23 __base = (base); \
24 asm("":"=a" (__low), "=d" (__high) : "A" (n)); \
25 __upper = __high; \
26 if (__high) { \
27 __upper = __high % (__base); \
28 __high = __high / (__base); \
29 } \
30 asm("divl %2":"=a" (__low), "=d" (__mod) \
31 : "rm" (__base), "0" (__low), "1" (__upper)); \
32 asm("":"=A" (n) : "a" (__low), "d" (__high)); \
33 __mod; \
34})
35
36static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
37{
38 union {
39 u64 v64;
40 u32 v32[2];
41 } d = { dividend };
42 u32 upper;
43
44 upper = d.v32[1];
45 d.v32[1] = 0;
46 if (upper >= divisor) {
47 d.v32[1] = upper / divisor;
48 upper %= divisor;
49 }
50 asm ("divl %2" : "=a" (d.v32[0]), "=d" (*remainder) :
51 "rm" (divisor), "0" (d.v32[0]), "1" (upper));
52 return d.v64;
53}
54#define div_u64_rem div_u64_rem
55
56#else
57# include <asm-generic/div64.h>
58#endif /* CONFIG_X86_32 */
59
60#endif /* ASM_X86__DIV64_H */
diff --git a/include/asm-x86/dma-mapping.h b/include/asm-x86/dma-mapping.h
deleted file mode 100644
index 219c33d6361c..000000000000
--- a/include/asm-x86/dma-mapping.h
+++ /dev/null
@@ -1,308 +0,0 @@
1#ifndef ASM_X86__DMA_MAPPING_H
2#define ASM_X86__DMA_MAPPING_H
3
4/*
5 * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
6 * documentation.
7 */
8
9#include <linux/scatterlist.h>
10#include <asm/io.h>
11#include <asm/swiotlb.h>
12#include <asm-generic/dma-coherent.h>
13
14extern dma_addr_t bad_dma_address;
15extern int iommu_merge;
16extern struct device x86_dma_fallback_dev;
17extern int panic_on_overflow;
18
19struct dma_mapping_ops {
20 int (*mapping_error)(struct device *dev,
21 dma_addr_t dma_addr);
22 void* (*alloc_coherent)(struct device *dev, size_t size,
23 dma_addr_t *dma_handle, gfp_t gfp);
24 void (*free_coherent)(struct device *dev, size_t size,
25 void *vaddr, dma_addr_t dma_handle);
26 dma_addr_t (*map_single)(struct device *hwdev, phys_addr_t ptr,
27 size_t size, int direction);
28 void (*unmap_single)(struct device *dev, dma_addr_t addr,
29 size_t size, int direction);
30 void (*sync_single_for_cpu)(struct device *hwdev,
31 dma_addr_t dma_handle, size_t size,
32 int direction);
33 void (*sync_single_for_device)(struct device *hwdev,
34 dma_addr_t dma_handle, size_t size,
35 int direction);
36 void (*sync_single_range_for_cpu)(struct device *hwdev,
37 dma_addr_t dma_handle, unsigned long offset,
38 size_t size, int direction);
39 void (*sync_single_range_for_device)(struct device *hwdev,
40 dma_addr_t dma_handle, unsigned long offset,
41 size_t size, int direction);
42 void (*sync_sg_for_cpu)(struct device *hwdev,
43 struct scatterlist *sg, int nelems,
44 int direction);
45 void (*sync_sg_for_device)(struct device *hwdev,
46 struct scatterlist *sg, int nelems,
47 int direction);
48 int (*map_sg)(struct device *hwdev, struct scatterlist *sg,
49 int nents, int direction);
50 void (*unmap_sg)(struct device *hwdev,
51 struct scatterlist *sg, int nents,
52 int direction);
53 int (*dma_supported)(struct device *hwdev, u64 mask);
54 int is_phys;
55};
56
57extern struct dma_mapping_ops *dma_ops;
58
59static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
60{
61#ifdef CONFIG_X86_32
62 return dma_ops;
63#else
64 if (unlikely(!dev) || !dev->archdata.dma_ops)
65 return dma_ops;
66 else
67 return dev->archdata.dma_ops;
68#endif /* ASM_X86__DMA_MAPPING_H */
69}
70
71/* Make sure we keep the same behaviour */
72static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
73{
74#ifdef CONFIG_X86_32
75 return 0;
76#else
77 struct dma_mapping_ops *ops = get_dma_ops(dev);
78 if (ops->mapping_error)
79 return ops->mapping_error(dev, dma_addr);
80
81 return (dma_addr == bad_dma_address);
82#endif
83}
84
85#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
86#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
87#define dma_is_consistent(d, h) (1)
88
89extern int dma_supported(struct device *hwdev, u64 mask);
90extern int dma_set_mask(struct device *dev, u64 mask);
91
92extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
93 dma_addr_t *dma_addr, gfp_t flag);
94
95static inline dma_addr_t
96dma_map_single(struct device *hwdev, void *ptr, size_t size,
97 int direction)
98{
99 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
100
101 BUG_ON(!valid_dma_direction(direction));
102 return ops->map_single(hwdev, virt_to_phys(ptr), size, direction);
103}
104
105static inline void
106dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
107 int direction)
108{
109 struct dma_mapping_ops *ops = get_dma_ops(dev);
110
111 BUG_ON(!valid_dma_direction(direction));
112 if (ops->unmap_single)
113 ops->unmap_single(dev, addr, size, direction);
114}
115
116static inline int
117dma_map_sg(struct device *hwdev, struct scatterlist *sg,
118 int nents, int direction)
119{
120 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
121
122 BUG_ON(!valid_dma_direction(direction));
123 return ops->map_sg(hwdev, sg, nents, direction);
124}
125
126static inline void
127dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
128 int direction)
129{
130 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
131
132 BUG_ON(!valid_dma_direction(direction));
133 if (ops->unmap_sg)
134 ops->unmap_sg(hwdev, sg, nents, direction);
135}
136
137static inline void
138dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
139 size_t size, int direction)
140{
141 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
142
143 BUG_ON(!valid_dma_direction(direction));
144 if (ops->sync_single_for_cpu)
145 ops->sync_single_for_cpu(hwdev, dma_handle, size, direction);
146 flush_write_buffers();
147}
148
149static inline void
150dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
151 size_t size, int direction)
152{
153 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
154
155 BUG_ON(!valid_dma_direction(direction));
156 if (ops->sync_single_for_device)
157 ops->sync_single_for_device(hwdev, dma_handle, size, direction);
158 flush_write_buffers();
159}
160
161static inline void
162dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
163 unsigned long offset, size_t size, int direction)
164{
165 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
166
167 BUG_ON(!valid_dma_direction(direction));
168 if (ops->sync_single_range_for_cpu)
169 ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
170 size, direction);
171 flush_write_buffers();
172}
173
174static inline void
175dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
176 unsigned long offset, size_t size,
177 int direction)
178{
179 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
180
181 BUG_ON(!valid_dma_direction(direction));
182 if (ops->sync_single_range_for_device)
183 ops->sync_single_range_for_device(hwdev, dma_handle,
184 offset, size, direction);
185 flush_write_buffers();
186}
187
188static inline void
189dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
190 int nelems, int direction)
191{
192 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
193
194 BUG_ON(!valid_dma_direction(direction));
195 if (ops->sync_sg_for_cpu)
196 ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
197 flush_write_buffers();
198}
199
200static inline void
201dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
202 int nelems, int direction)
203{
204 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
205
206 BUG_ON(!valid_dma_direction(direction));
207 if (ops->sync_sg_for_device)
208 ops->sync_sg_for_device(hwdev, sg, nelems, direction);
209
210 flush_write_buffers();
211}
212
213static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
214 size_t offset, size_t size,
215 int direction)
216{
217 struct dma_mapping_ops *ops = get_dma_ops(dev);
218
219 BUG_ON(!valid_dma_direction(direction));
220 return ops->map_single(dev, page_to_phys(page) + offset,
221 size, direction);
222}
223
224static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
225 size_t size, int direction)
226{
227 dma_unmap_single(dev, addr, size, direction);
228}
229
230static inline void
231dma_cache_sync(struct device *dev, void *vaddr, size_t size,
232 enum dma_data_direction dir)
233{
234 flush_write_buffers();
235}
236
237static inline int dma_get_cache_alignment(void)
238{
239 /* no easy way to get cache size on all x86, so return the
240 * maximum possible, to be safe */
241 return boot_cpu_data.x86_clflush_size;
242}
243
244static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
245 gfp_t gfp)
246{
247 unsigned long dma_mask = 0;
248
249 dma_mask = dev->coherent_dma_mask;
250 if (!dma_mask)
251 dma_mask = (gfp & GFP_DMA) ? DMA_24BIT_MASK : DMA_32BIT_MASK;
252
253 return dma_mask;
254}
255
256static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
257{
258#ifdef CONFIG_X86_64
259 unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp);
260
261 if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA))
262 gfp |= GFP_DMA32;
263#endif
264 return gfp;
265}
266
267static inline void *
268dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
269 gfp_t gfp)
270{
271 struct dma_mapping_ops *ops = get_dma_ops(dev);
272 void *memory;
273
274 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
275
276 if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
277 return memory;
278
279 if (!dev) {
280 dev = &x86_dma_fallback_dev;
281 gfp |= GFP_DMA;
282 }
283
284 if (!is_device_dma_capable(dev))
285 return NULL;
286
287 if (!ops->alloc_coherent)
288 return NULL;
289
290 return ops->alloc_coherent(dev, size, dma_handle,
291 dma_alloc_coherent_gfp_flags(dev, gfp));
292}
293
294static inline void dma_free_coherent(struct device *dev, size_t size,
295 void *vaddr, dma_addr_t bus)
296{
297 struct dma_mapping_ops *ops = get_dma_ops(dev);
298
299 WARN_ON(irqs_disabled()); /* for portability */
300
301 if (dma_release_from_coherent(dev, get_order(size), vaddr))
302 return;
303
304 if (ops->free_coherent)
305 ops->free_coherent(dev, size, vaddr, bus);
306}
307
308#endif
diff --git a/include/asm-x86/dma.h b/include/asm-x86/dma.h
deleted file mode 100644
index c9f7a4eec555..000000000000
--- a/include/asm-x86/dma.h
+++ /dev/null
@@ -1,318 +0,0 @@
1/*
2 * linux/include/asm/dma.h: Defines for using and allocating dma channels.
3 * Written by Hennus Bergman, 1992.
4 * High DMA channel support & info by Hannu Savolainen
5 * and John Boyd, Nov. 1992.
6 */
7
8#ifndef ASM_X86__DMA_H
9#define ASM_X86__DMA_H
10
11#include <linux/spinlock.h> /* And spinlocks */
12#include <asm/io.h> /* need byte IO */
13#include <linux/delay.h>
14
15#ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER
16#define dma_outb outb_p
17#else
18#define dma_outb outb
19#endif
20
21#define dma_inb inb
22
23/*
24 * NOTES about DMA transfers:
25 *
26 * controller 1: channels 0-3, byte operations, ports 00-1F
27 * controller 2: channels 4-7, word operations, ports C0-DF
28 *
29 * - ALL registers are 8 bits only, regardless of transfer size
30 * - channel 4 is not used - cascades 1 into 2.
31 * - channels 0-3 are byte - addresses/counts are for physical bytes
32 * - channels 5-7 are word - addresses/counts are for physical words
33 * - transfers must not cross physical 64K (0-3) or 128K (5-7) boundaries
34 * - transfer count loaded to registers is 1 less than actual count
35 * - controller 2 offsets are all even (2x offsets for controller 1)
36 * - page registers for 5-7 don't use data bit 0, represent 128K pages
37 * - page registers for 0-3 use bit 0, represent 64K pages
38 *
39 * DMA transfers are limited to the lower 16MB of _physical_ memory.
40 * Note that addresses loaded into registers must be _physical_ addresses,
41 * not logical addresses (which may differ if paging is active).
42 *
43 * Address mapping for channels 0-3:
44 *
45 * A23 ... A16 A15 ... A8 A7 ... A0 (Physical addresses)
46 * | ... | | ... | | ... |
47 * | ... | | ... | | ... |
48 * | ... | | ... | | ... |
49 * P7 ... P0 A7 ... A0 A7 ... A0
50 * | Page | Addr MSB | Addr LSB | (DMA registers)
51 *
52 * Address mapping for channels 5-7:
53 *
54 * A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0 (Physical addresses)
55 * | ... | \ \ ... \ \ \ ... \ \
56 * | ... | \ \ ... \ \ \ ... \ (not used)
57 * | ... | \ \ ... \ \ \ ... \
58 * P7 ... P1 (0) A7 A6 ... A0 A7 A6 ... A0
59 * | Page | Addr MSB | Addr LSB | (DMA registers)
60 *
61 * Again, channels 5-7 transfer _physical_ words (16 bits), so addresses
62 * and counts _must_ be word-aligned (the lowest address bit is _ignored_ at
63 * the hardware level, so odd-byte transfers aren't possible).
64 *
65 * Transfer count (_not # bytes_) is limited to 64K, represented as actual
66 * count - 1 : 64K => 0xFFFF, 1 => 0x0000. Thus, count is always 1 or more,
67 * and up to 128K bytes may be transferred on channels 5-7 in one operation.
68 *
69 */
70
71#define MAX_DMA_CHANNELS 8
72
73#ifdef CONFIG_X86_32
74
75/* The maximum address that we can perform a DMA transfer to on this platform */
76#define MAX_DMA_ADDRESS (PAGE_OFFSET + 0x1000000)
77
78#else
79
80/* 16MB ISA DMA zone */
81#define MAX_DMA_PFN ((16 * 1024 * 1024) >> PAGE_SHIFT)
82
83/* 4GB broken PCI/AGP hardware bus master zone */
84#define MAX_DMA32_PFN ((4UL * 1024 * 1024 * 1024) >> PAGE_SHIFT)
85
86/* Compat define for old dma zone */
87#define MAX_DMA_ADDRESS ((unsigned long)__va(MAX_DMA_PFN << PAGE_SHIFT))
88
89#endif
90
91/* 8237 DMA controllers */
92#define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */
93#define IO_DMA2_BASE 0xC0 /* 16 bit master DMA, ch 4(=slave input)..7 */
94
95/* DMA controller registers */
96#define DMA1_CMD_REG 0x08 /* command register (w) */
97#define DMA1_STAT_REG 0x08 /* status register (r) */
98#define DMA1_REQ_REG 0x09 /* request register (w) */
99#define DMA1_MASK_REG 0x0A /* single-channel mask (w) */
100#define DMA1_MODE_REG 0x0B /* mode register (w) */
101#define DMA1_CLEAR_FF_REG 0x0C /* clear pointer flip-flop (w) */
102#define DMA1_TEMP_REG 0x0D /* Temporary Register (r) */
103#define DMA1_RESET_REG 0x0D /* Master Clear (w) */
104#define DMA1_CLR_MASK_REG 0x0E /* Clear Mask */
105#define DMA1_MASK_ALL_REG 0x0F /* all-channels mask (w) */
106
107#define DMA2_CMD_REG 0xD0 /* command register (w) */
108#define DMA2_STAT_REG 0xD0 /* status register (r) */
109#define DMA2_REQ_REG 0xD2 /* request register (w) */
110#define DMA2_MASK_REG 0xD4 /* single-channel mask (w) */
111#define DMA2_MODE_REG 0xD6 /* mode register (w) */
112#define DMA2_CLEAR_FF_REG 0xD8 /* clear pointer flip-flop (w) */
113#define DMA2_TEMP_REG 0xDA /* Temporary Register (r) */
114#define DMA2_RESET_REG 0xDA /* Master Clear (w) */
115#define DMA2_CLR_MASK_REG 0xDC /* Clear Mask */
116#define DMA2_MASK_ALL_REG 0xDE /* all-channels mask (w) */
117
118#define DMA_ADDR_0 0x00 /* DMA address registers */
119#define DMA_ADDR_1 0x02
120#define DMA_ADDR_2 0x04
121#define DMA_ADDR_3 0x06
122#define DMA_ADDR_4 0xC0
123#define DMA_ADDR_5 0xC4
124#define DMA_ADDR_6 0xC8
125#define DMA_ADDR_7 0xCC
126
127#define DMA_CNT_0 0x01 /* DMA count registers */
128#define DMA_CNT_1 0x03
129#define DMA_CNT_2 0x05
130#define DMA_CNT_3 0x07
131#define DMA_CNT_4 0xC2
132#define DMA_CNT_5 0xC6
133#define DMA_CNT_6 0xCA
134#define DMA_CNT_7 0xCE
135
136#define DMA_PAGE_0 0x87 /* DMA page registers */
137#define DMA_PAGE_1 0x83
138#define DMA_PAGE_2 0x81
139#define DMA_PAGE_3 0x82
140#define DMA_PAGE_5 0x8B
141#define DMA_PAGE_6 0x89
142#define DMA_PAGE_7 0x8A
143
144/* I/O to memory, no autoinit, increment, single mode */
145#define DMA_MODE_READ 0x44
146/* memory to I/O, no autoinit, increment, single mode */
147#define DMA_MODE_WRITE 0x48
148/* pass thru DREQ->HRQ, DACK<-HLDA only */
149#define DMA_MODE_CASCADE 0xC0
150
151#define DMA_AUTOINIT 0x10
152
153
154extern spinlock_t dma_spin_lock;
155
156static inline unsigned long claim_dma_lock(void)
157{
158 unsigned long flags;
159 spin_lock_irqsave(&dma_spin_lock, flags);
160 return flags;
161}
162
163static inline void release_dma_lock(unsigned long flags)
164{
165 spin_unlock_irqrestore(&dma_spin_lock, flags);
166}
167
168/* enable/disable a specific DMA channel */
169static inline void enable_dma(unsigned int dmanr)
170{
171 if (dmanr <= 3)
172 dma_outb(dmanr, DMA1_MASK_REG);
173 else
174 dma_outb(dmanr & 3, DMA2_MASK_REG);
175}
176
177static inline void disable_dma(unsigned int dmanr)
178{
179 if (dmanr <= 3)
180 dma_outb(dmanr | 4, DMA1_MASK_REG);
181 else
182 dma_outb((dmanr & 3) | 4, DMA2_MASK_REG);
183}
184
185/* Clear the 'DMA Pointer Flip Flop'.
186 * Write 0 for LSB/MSB, 1 for MSB/LSB access.
187 * Use this once to initialize the FF to a known state.
188 * After that, keep track of it. :-)
189 * --- In order to do that, the DMA routines below should ---
190 * --- only be used while holding the DMA lock ! ---
191 */
192static inline void clear_dma_ff(unsigned int dmanr)
193{
194 if (dmanr <= 3)
195 dma_outb(0, DMA1_CLEAR_FF_REG);
196 else
197 dma_outb(0, DMA2_CLEAR_FF_REG);
198}
199
200/* set mode (above) for a specific DMA channel */
201static inline void set_dma_mode(unsigned int dmanr, char mode)
202{
203 if (dmanr <= 3)
204 dma_outb(mode | dmanr, DMA1_MODE_REG);
205 else
206 dma_outb(mode | (dmanr & 3), DMA2_MODE_REG);
207}
208
209/* Set only the page register bits of the transfer address.
210 * This is used for successive transfers when we know the contents of
211 * the lower 16 bits of the DMA current address register, but a 64k boundary
212 * may have been crossed.
213 */
214static inline void set_dma_page(unsigned int dmanr, char pagenr)
215{
216 switch (dmanr) {
217 case 0:
218 dma_outb(pagenr, DMA_PAGE_0);
219 break;
220 case 1:
221 dma_outb(pagenr, DMA_PAGE_1);
222 break;
223 case 2:
224 dma_outb(pagenr, DMA_PAGE_2);
225 break;
226 case 3:
227 dma_outb(pagenr, DMA_PAGE_3);
228 break;
229 case 5:
230 dma_outb(pagenr & 0xfe, DMA_PAGE_5);
231 break;
232 case 6:
233 dma_outb(pagenr & 0xfe, DMA_PAGE_6);
234 break;
235 case 7:
236 dma_outb(pagenr & 0xfe, DMA_PAGE_7);
237 break;
238 }
239}
240
241
242/* Set transfer address & page bits for specific DMA channel.
243 * Assumes dma flipflop is clear.
244 */
245static inline void set_dma_addr(unsigned int dmanr, unsigned int a)
246{
247 set_dma_page(dmanr, a>>16);
248 if (dmanr <= 3) {
249 dma_outb(a & 0xff, ((dmanr & 3) << 1) + IO_DMA1_BASE);
250 dma_outb((a >> 8) & 0xff, ((dmanr & 3) << 1) + IO_DMA1_BASE);
251 } else {
252 dma_outb((a >> 1) & 0xff, ((dmanr & 3) << 2) + IO_DMA2_BASE);
253 dma_outb((a >> 9) & 0xff, ((dmanr & 3) << 2) + IO_DMA2_BASE);
254 }
255}
256
257
258/* Set transfer size (max 64k for DMA0..3, 128k for DMA5..7) for
259 * a specific DMA channel.
260 * You must ensure the parameters are valid.
261 * NOTE: from a manual: "the number of transfers is one more
262 * than the initial word count"! This is taken into account.
263 * Assumes dma flip-flop is clear.
264 * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7.
265 */
266static inline void set_dma_count(unsigned int dmanr, unsigned int count)
267{
268 count--;
269 if (dmanr <= 3) {
270 dma_outb(count & 0xff, ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE);
271 dma_outb((count >> 8) & 0xff,
272 ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE);
273 } else {
274 dma_outb((count >> 1) & 0xff,
275 ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE);
276 dma_outb((count >> 9) & 0xff,
277 ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE);
278 }
279}
280
281
282/* Get DMA residue count. After a DMA transfer, this
283 * should return zero. Reading this while a DMA transfer is
284 * still in progress will return unpredictable results.
285 * If called before the channel has been used, it may return 1.
286 * Otherwise, it returns the number of _bytes_ left to transfer.
287 *
288 * Assumes DMA flip-flop is clear.
289 */
290static inline int get_dma_residue(unsigned int dmanr)
291{
292 unsigned int io_port;
293 /* using short to get 16-bit wrap around */
294 unsigned short count;
295
296 io_port = (dmanr <= 3) ? ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE
297 : ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE;
298
299 count = 1 + dma_inb(io_port);
300 count += dma_inb(io_port) << 8;
301
302 return (dmanr <= 3) ? count : (count << 1);
303}
304
305
306/* These are in kernel/dma.c: */
307extern int request_dma(unsigned int dmanr, const char *device_id);
308extern void free_dma(unsigned int dmanr);
309
310/* From PCI */
311
312#ifdef CONFIG_PCI
313extern int isa_dma_bridge_buggy;
314#else
315#define isa_dma_bridge_buggy (0)
316#endif
317
318#endif /* ASM_X86__DMA_H */
diff --git a/include/asm-x86/dmi.h b/include/asm-x86/dmi.h
deleted file mode 100644
index 1cff6fe81fa5..000000000000
--- a/include/asm-x86/dmi.h
+++ /dev/null
@@ -1,26 +0,0 @@
1#ifndef ASM_X86__DMI_H
2#define ASM_X86__DMI_H
3
4#include <asm/io.h>
5
6#define DMI_MAX_DATA 2048
7
8extern int dmi_alloc_index;
9extern char dmi_alloc_data[DMI_MAX_DATA];
10
11/* This is so early that there is no good way to allocate dynamic memory.
12 Allocate data in an BSS array. */
13static inline void *dmi_alloc(unsigned len)
14{
15 int idx = dmi_alloc_index;
16 if ((dmi_alloc_index + len) > DMI_MAX_DATA)
17 return NULL;
18 dmi_alloc_index += len;
19 return dmi_alloc_data + idx;
20}
21
22/* Use early IO mappings for DMI because it's initialized early */
23#define dmi_ioremap early_ioremap
24#define dmi_iounmap early_iounmap
25
26#endif /* ASM_X86__DMI_H */
diff --git a/include/asm-x86/ds.h b/include/asm-x86/ds.h
deleted file mode 100644
index c3c953a45b21..000000000000
--- a/include/asm-x86/ds.h
+++ /dev/null
@@ -1,238 +0,0 @@
1/*
2 * Debug Store (DS) support
3 *
4 * This provides a low-level interface to the hardware's Debug Store
5 * feature that is used for branch trace store (BTS) and
6 * precise-event based sampling (PEBS).
7 *
8 * It manages:
9 * - per-thread and per-cpu allocation of BTS and PEBS
10 * - buffer memory allocation (optional)
11 * - buffer overflow handling
12 * - buffer access
13 *
14 * It assumes:
15 * - get_task_struct on all parameter tasks
16 * - current is allowed to trace parameter tasks
17 *
18 *
19 * Copyright (C) 2007-2008 Intel Corporation.
20 * Markus Metzger <markus.t.metzger@intel.com>, 2007-2008
21 */
22
23#ifndef ASM_X86__DS_H
24#define ASM_X86__DS_H
25
26#ifdef CONFIG_X86_DS
27
28#include <linux/types.h>
29#include <linux/init.h>
30
31
32struct task_struct;
33
34/*
35 * Request BTS or PEBS
36 *
37 * Due to alignement constraints, the actual buffer may be slightly
38 * smaller than the requested or provided buffer.
39 *
40 * Returns 0 on success; -Eerrno otherwise
41 *
42 * task: the task to request recording for;
43 * NULL for per-cpu recording on the current cpu
44 * base: the base pointer for the (non-pageable) buffer;
45 * NULL if buffer allocation requested
46 * size: the size of the requested or provided buffer
47 * ovfl: pointer to a function to be called on buffer overflow;
48 * NULL if cyclic buffer requested
49 */
50typedef void (*ds_ovfl_callback_t)(struct task_struct *);
51extern int ds_request_bts(struct task_struct *task, void *base, size_t size,
52 ds_ovfl_callback_t ovfl);
53extern int ds_request_pebs(struct task_struct *task, void *base, size_t size,
54 ds_ovfl_callback_t ovfl);
55
56/*
57 * Release BTS or PEBS resources
58 *
59 * Frees buffers allocated on ds_request.
60 *
61 * Returns 0 on success; -Eerrno otherwise
62 *
63 * task: the task to release resources for;
64 * NULL to release resources for the current cpu
65 */
66extern int ds_release_bts(struct task_struct *task);
67extern int ds_release_pebs(struct task_struct *task);
68
69/*
70 * Return the (array) index of the write pointer.
71 * (assuming an array of BTS/PEBS records)
72 *
73 * Returns -Eerrno on error
74 *
75 * task: the task to access;
76 * NULL to access the current cpu
77 * pos (out): if not NULL, will hold the result
78 */
79extern int ds_get_bts_index(struct task_struct *task, size_t *pos);
80extern int ds_get_pebs_index(struct task_struct *task, size_t *pos);
81
82/*
83 * Return the (array) index one record beyond the end of the array.
84 * (assuming an array of BTS/PEBS records)
85 *
86 * Returns -Eerrno on error
87 *
88 * task: the task to access;
89 * NULL to access the current cpu
90 * pos (out): if not NULL, will hold the result
91 */
92extern int ds_get_bts_end(struct task_struct *task, size_t *pos);
93extern int ds_get_pebs_end(struct task_struct *task, size_t *pos);
94
95/*
96 * Provide a pointer to the BTS/PEBS record at parameter index.
97 * (assuming an array of BTS/PEBS records)
98 *
99 * The pointer points directly into the buffer. The user is
100 * responsible for copying the record.
101 *
102 * Returns the size of a single record on success; -Eerrno on error
103 *
104 * task: the task to access;
105 * NULL to access the current cpu
106 * index: the index of the requested record
107 * record (out): pointer to the requested record
108 */
109extern int ds_access_bts(struct task_struct *task,
110 size_t index, const void **record);
111extern int ds_access_pebs(struct task_struct *task,
112 size_t index, const void **record);
113
114/*
115 * Write one or more BTS/PEBS records at the write pointer index and
116 * advance the write pointer.
117 *
118 * If size is not a multiple of the record size, trailing bytes are
119 * zeroed out.
120 *
121 * May result in one or more overflow notifications.
122 *
123 * If called during overflow handling, that is, with index >=
124 * interrupt threshold, the write will wrap around.
125 *
126 * An overflow notification is given if and when the interrupt
127 * threshold is reached during or after the write.
128 *
129 * Returns the number of bytes written or -Eerrno.
130 *
131 * task: the task to access;
132 * NULL to access the current cpu
133 * buffer: the buffer to write
134 * size: the size of the buffer
135 */
136extern int ds_write_bts(struct task_struct *task,
137 const void *buffer, size_t size);
138extern int ds_write_pebs(struct task_struct *task,
139 const void *buffer, size_t size);
140
141/*
142 * Same as ds_write_bts/pebs, but omit ownership checks.
143 *
144 * This is needed to have some other task than the owner of the
145 * BTS/PEBS buffer or the parameter task itself write into the
146 * respective buffer.
147 */
148extern int ds_unchecked_write_bts(struct task_struct *task,
149 const void *buffer, size_t size);
150extern int ds_unchecked_write_pebs(struct task_struct *task,
151 const void *buffer, size_t size);
152
153/*
154 * Reset the write pointer of the BTS/PEBS buffer.
155 *
156 * Returns 0 on success; -Eerrno on error
157 *
158 * task: the task to access;
159 * NULL to access the current cpu
160 */
161extern int ds_reset_bts(struct task_struct *task);
162extern int ds_reset_pebs(struct task_struct *task);
163
164/*
165 * Clear the BTS/PEBS buffer and reset the write pointer.
166 * The entire buffer will be zeroed out.
167 *
168 * Returns 0 on success; -Eerrno on error
169 *
170 * task: the task to access;
171 * NULL to access the current cpu
172 */
173extern int ds_clear_bts(struct task_struct *task);
174extern int ds_clear_pebs(struct task_struct *task);
175
176/*
177 * Provide the PEBS counter reset value.
178 *
179 * Returns 0 on success; -Eerrno on error
180 *
181 * task: the task to access;
182 * NULL to access the current cpu
183 * value (out): the counter reset value
184 */
185extern int ds_get_pebs_reset(struct task_struct *task, u64 *value);
186
187/*
188 * Set the PEBS counter reset value.
189 *
190 * Returns 0 on success; -Eerrno on error
191 *
192 * task: the task to access;
193 * NULL to access the current cpu
194 * value: the new counter reset value
195 */
196extern int ds_set_pebs_reset(struct task_struct *task, u64 value);
197
198/*
199 * Initialization
200 */
201struct cpuinfo_x86;
202extern void __cpuinit ds_init_intel(struct cpuinfo_x86 *);
203
204
205
206/*
207 * The DS context - part of struct thread_struct.
208 */
209struct ds_context {
210 /* pointer to the DS configuration; goes into MSR_IA32_DS_AREA */
211 unsigned char *ds;
212 /* the owner of the BTS and PEBS configuration, respectively */
213 struct task_struct *owner[2];
214 /* buffer overflow notification function for BTS and PEBS */
215 ds_ovfl_callback_t callback[2];
216 /* the original buffer address */
217 void *buffer[2];
218 /* the number of allocated pages for on-request allocated buffers */
219 unsigned int pages[2];
220 /* use count */
221 unsigned long count;
222 /* a pointer to the context location inside the thread_struct
223 * or the per_cpu context array */
224 struct ds_context **this;
225 /* a pointer to the task owning this context, or NULL, if the
226 * context is owned by a cpu */
227 struct task_struct *task;
228};
229
230/* called by exit_thread() to free leftover contexts */
231extern void ds_free(struct ds_context *context);
232
233#else /* CONFIG_X86_DS */
234
235#define ds_init_intel(config) do {} while (0)
236
237#endif /* CONFIG_X86_DS */
238#endif /* ASM_X86__DS_H */
diff --git a/include/asm-x86/dwarf2.h b/include/asm-x86/dwarf2.h
deleted file mode 100644
index 21d1bc32ad7c..000000000000
--- a/include/asm-x86/dwarf2.h
+++ /dev/null
@@ -1,61 +0,0 @@
1#ifndef ASM_X86__DWARF2_H
2#define ASM_X86__DWARF2_H
3
4#ifndef __ASSEMBLY__
5#warning "asm/dwarf2.h should be only included in pure assembly files"
6#endif
7
8/*
9 Macros for dwarf2 CFI unwind table entries.
10 See "as.info" for details on these pseudo ops. Unfortunately
11 they are only supported in very new binutils, so define them
12 away for older version.
13 */
14
15#ifdef CONFIG_AS_CFI
16
17#define CFI_STARTPROC .cfi_startproc
18#define CFI_ENDPROC .cfi_endproc
19#define CFI_DEF_CFA .cfi_def_cfa
20#define CFI_DEF_CFA_REGISTER .cfi_def_cfa_register
21#define CFI_DEF_CFA_OFFSET .cfi_def_cfa_offset
22#define CFI_ADJUST_CFA_OFFSET .cfi_adjust_cfa_offset
23#define CFI_OFFSET .cfi_offset
24#define CFI_REL_OFFSET .cfi_rel_offset
25#define CFI_REGISTER .cfi_register
26#define CFI_RESTORE .cfi_restore
27#define CFI_REMEMBER_STATE .cfi_remember_state
28#define CFI_RESTORE_STATE .cfi_restore_state
29#define CFI_UNDEFINED .cfi_undefined
30
31#ifdef CONFIG_AS_CFI_SIGNAL_FRAME
32#define CFI_SIGNAL_FRAME .cfi_signal_frame
33#else
34#define CFI_SIGNAL_FRAME
35#endif
36
37#else
38
39/* Due to the structure of pre-exisiting code, don't use assembler line
40 comment character # to ignore the arguments. Instead, use a dummy macro. */
41.macro cfi_ignore a=0, b=0, c=0, d=0
42.endm
43
44#define CFI_STARTPROC cfi_ignore
45#define CFI_ENDPROC cfi_ignore
46#define CFI_DEF_CFA cfi_ignore
47#define CFI_DEF_CFA_REGISTER cfi_ignore
48#define CFI_DEF_CFA_OFFSET cfi_ignore
49#define CFI_ADJUST_CFA_OFFSET cfi_ignore
50#define CFI_OFFSET cfi_ignore
51#define CFI_REL_OFFSET cfi_ignore
52#define CFI_REGISTER cfi_ignore
53#define CFI_RESTORE cfi_ignore
54#define CFI_REMEMBER_STATE cfi_ignore
55#define CFI_RESTORE_STATE cfi_ignore
56#define CFI_UNDEFINED cfi_ignore
57#define CFI_SIGNAL_FRAME cfi_ignore
58
59#endif
60
61#endif /* ASM_X86__DWARF2_H */
diff --git a/include/asm-x86/e820.h b/include/asm-x86/e820.h
deleted file mode 100644
index 5abbdec06bd2..000000000000
--- a/include/asm-x86/e820.h
+++ /dev/null
@@ -1,146 +0,0 @@
1#ifndef ASM_X86__E820_H
2#define ASM_X86__E820_H
3#define E820MAP 0x2d0 /* our map */
4#define E820MAX 128 /* number of entries in E820MAP */
5
6/*
7 * Legacy E820 BIOS limits us to 128 (E820MAX) nodes due to the
8 * constrained space in the zeropage. If we have more nodes than
9 * that, and if we've booted off EFI firmware, then the EFI tables
10 * passed us from the EFI firmware can list more nodes. Size our
11 * internal memory map tables to have room for these additional
12 * nodes, based on up to three entries per node for which the
13 * kernel was built: MAX_NUMNODES == (1 << CONFIG_NODES_SHIFT),
14 * plus E820MAX, allowing space for the possible duplicate E820
15 * entries that might need room in the same arrays, prior to the
16 * call to sanitize_e820_map() to remove duplicates. The allowance
17 * of three memory map entries per node is "enough" entries for
18 * the initial hardware platform motivating this mechanism to make
19 * use of additional EFI map entries. Future platforms may want
20 * to allow more than three entries per node or otherwise refine
21 * this size.
22 */
23
24/*
25 * Odd: 'make headers_check' complains about numa.h if I try
26 * to collapse the next two #ifdef lines to a single line:
27 * #if defined(__KERNEL__) && defined(CONFIG_EFI)
28 */
29#ifdef __KERNEL__
30#ifdef CONFIG_EFI
31#include <linux/numa.h>
32#define E820_X_MAX (E820MAX + 3 * MAX_NUMNODES)
33#else /* ! CONFIG_EFI */
34#define E820_X_MAX E820MAX
35#endif
36#else /* ! __KERNEL__ */
37#define E820_X_MAX E820MAX
38#endif
39
40#define E820NR 0x1e8 /* # entries in E820MAP */
41
42#define E820_RAM 1
43#define E820_RESERVED 2
44#define E820_ACPI 3
45#define E820_NVS 4
46#define E820_UNUSABLE 5
47
48/* reserved RAM used by kernel itself */
49#define E820_RESERVED_KERN 128
50
51#ifndef __ASSEMBLY__
52struct e820entry {
53 __u64 addr; /* start of memory segment */
54 __u64 size; /* size of memory segment */
55 __u32 type; /* type of memory segment */
56} __attribute__((packed));
57
58struct e820map {
59 __u32 nr_map;
60 struct e820entry map[E820_X_MAX];
61};
62
63#ifdef __KERNEL__
64/* see comment in arch/x86/kernel/e820.c */
65extern struct e820map e820;
66extern struct e820map e820_saved;
67
68extern unsigned long pci_mem_start;
69extern int e820_any_mapped(u64 start, u64 end, unsigned type);
70extern int e820_all_mapped(u64 start, u64 end, unsigned type);
71extern void e820_add_region(u64 start, u64 size, int type);
72extern void e820_print_map(char *who);
73extern int
74sanitize_e820_map(struct e820entry *biosmap, int max_nr_map, int *pnr_map);
75extern u64 e820_update_range(u64 start, u64 size, unsigned old_type,
76 unsigned new_type);
77extern u64 e820_remove_range(u64 start, u64 size, unsigned old_type,
78 int checktype);
79extern void update_e820(void);
80extern void e820_setup_gap(void);
81extern int e820_search_gap(unsigned long *gapstart, unsigned long *gapsize,
82 unsigned long start_addr, unsigned long long end_addr);
83struct setup_data;
84extern void parse_e820_ext(struct setup_data *data, unsigned long pa_data);
85
86#if defined(CONFIG_X86_64) || \
87 (defined(CONFIG_X86_32) && defined(CONFIG_HIBERNATION))
88extern void e820_mark_nosave_regions(unsigned long limit_pfn);
89#else
90static inline void e820_mark_nosave_regions(unsigned long limit_pfn)
91{
92}
93#endif
94
95#ifdef CONFIG_MEMTEST
96extern void early_memtest(unsigned long start, unsigned long end);
97#else
98static inline void early_memtest(unsigned long start, unsigned long end)
99{
100}
101#endif
102
103extern unsigned long end_user_pfn;
104
105extern u64 find_e820_area(u64 start, u64 end, u64 size, u64 align);
106extern u64 find_e820_area_size(u64 start, u64 *sizep, u64 align);
107extern void reserve_early(u64 start, u64 end, char *name);
108extern void reserve_early_overlap_ok(u64 start, u64 end, char *name);
109extern void free_early(u64 start, u64 end);
110extern void early_res_to_bootmem(u64 start, u64 end);
111extern u64 early_reserve_e820(u64 startt, u64 sizet, u64 align);
112
113extern unsigned long e820_end_of_ram_pfn(void);
114extern unsigned long e820_end_of_low_ram_pfn(void);
115extern int e820_find_active_region(const struct e820entry *ei,
116 unsigned long start_pfn,
117 unsigned long last_pfn,
118 unsigned long *ei_startpfn,
119 unsigned long *ei_endpfn);
120extern void e820_register_active_regions(int nid, unsigned long start_pfn,
121 unsigned long end_pfn);
122extern u64 e820_hole_size(u64 start, u64 end);
123extern void finish_e820_parsing(void);
124extern void e820_reserve_resources(void);
125extern void e820_reserve_resources_late(void);
126extern void setup_memory_map(void);
127extern char *default_machine_specific_memory_setup(void);
128extern char *machine_specific_memory_setup(void);
129extern char *memory_setup(void);
130#endif /* __KERNEL__ */
131#endif /* __ASSEMBLY__ */
132
133#define ISA_START_ADDRESS 0xa0000
134#define ISA_END_ADDRESS 0x100000
135#define is_ISA_range(s, e) ((s) >= ISA_START_ADDRESS && (e) < ISA_END_ADDRESS)
136
137#define BIOS_BEGIN 0x000a0000
138#define BIOS_END 0x00100000
139
140#ifdef __KERNEL__
141#include <linux/ioport.h>
142
143#define HIGH_MEMORY (1024*1024)
144#endif /* __KERNEL__ */
145
146#endif /* ASM_X86__E820_H */
diff --git a/include/asm-x86/edac.h b/include/asm-x86/edac.h
deleted file mode 100644
index 9493c5b27bbd..000000000000
--- a/include/asm-x86/edac.h
+++ /dev/null
@@ -1,18 +0,0 @@
1#ifndef ASM_X86__EDAC_H
2#define ASM_X86__EDAC_H
3
4/* ECC atomic, DMA, SMP and interrupt safe scrub function */
5
6static inline void atomic_scrub(void *va, u32 size)
7{
8 u32 i, *virt_addr = va;
9
10 /*
11 * Very carefully read and write to memory atomically so we
12 * are interrupt, DMA and SMP safe.
13 */
14 for (i = 0; i < size / 4; i++, virt_addr++)
15 asm volatile("lock; addl $0, %0"::"m" (*virt_addr));
16}
17
18#endif /* ASM_X86__EDAC_H */
diff --git a/include/asm-x86/efi.h b/include/asm-x86/efi.h
deleted file mode 100644
index 313438e63348..000000000000
--- a/include/asm-x86/efi.h
+++ /dev/null
@@ -1,110 +0,0 @@
1#ifndef ASM_X86__EFI_H
2#define ASM_X86__EFI_H
3
4#ifdef CONFIG_X86_32
5
6extern unsigned long asmlinkage efi_call_phys(void *, ...);
7
8#define efi_call_phys0(f) efi_call_phys(f)
9#define efi_call_phys1(f, a1) efi_call_phys(f, a1)
10#define efi_call_phys2(f, a1, a2) efi_call_phys(f, a1, a2)
11#define efi_call_phys3(f, a1, a2, a3) efi_call_phys(f, a1, a2, a3)
12#define efi_call_phys4(f, a1, a2, a3, a4) \
13 efi_call_phys(f, a1, a2, a3, a4)
14#define efi_call_phys5(f, a1, a2, a3, a4, a5) \
15 efi_call_phys(f, a1, a2, a3, a4, a5)
16#define efi_call_phys6(f, a1, a2, a3, a4, a5, a6) \
17 efi_call_phys(f, a1, a2, a3, a4, a5, a6)
18/*
19 * Wrap all the virtual calls in a way that forces the parameters on the stack.
20 */
21
22#define efi_call_virt(f, args...) \
23 ((efi_##f##_t __attribute__((regparm(0)))*)efi.systab->runtime->f)(args)
24
25#define efi_call_virt0(f) efi_call_virt(f)
26#define efi_call_virt1(f, a1) efi_call_virt(f, a1)
27#define efi_call_virt2(f, a1, a2) efi_call_virt(f, a1, a2)
28#define efi_call_virt3(f, a1, a2, a3) efi_call_virt(f, a1, a2, a3)
29#define efi_call_virt4(f, a1, a2, a3, a4) \
30 efi_call_virt(f, a1, a2, a3, a4)
31#define efi_call_virt5(f, a1, a2, a3, a4, a5) \
32 efi_call_virt(f, a1, a2, a3, a4, a5)
33#define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \
34 efi_call_virt(f, a1, a2, a3, a4, a5, a6)
35
36#define efi_ioremap(addr, size) ioremap_cache(addr, size)
37
38#else /* !CONFIG_X86_32 */
39
40#define MAX_EFI_IO_PAGES 100
41
42extern u64 efi_call0(void *fp);
43extern u64 efi_call1(void *fp, u64 arg1);
44extern u64 efi_call2(void *fp, u64 arg1, u64 arg2);
45extern u64 efi_call3(void *fp, u64 arg1, u64 arg2, u64 arg3);
46extern u64 efi_call4(void *fp, u64 arg1, u64 arg2, u64 arg3, u64 arg4);
47extern u64 efi_call5(void *fp, u64 arg1, u64 arg2, u64 arg3,
48 u64 arg4, u64 arg5);
49extern u64 efi_call6(void *fp, u64 arg1, u64 arg2, u64 arg3,
50 u64 arg4, u64 arg5, u64 arg6);
51
52#define efi_call_phys0(f) \
53 efi_call0((void *)(f))
54#define efi_call_phys1(f, a1) \
55 efi_call1((void *)(f), (u64)(a1))
56#define efi_call_phys2(f, a1, a2) \
57 efi_call2((void *)(f), (u64)(a1), (u64)(a2))
58#define efi_call_phys3(f, a1, a2, a3) \
59 efi_call3((void *)(f), (u64)(a1), (u64)(a2), (u64)(a3))
60#define efi_call_phys4(f, a1, a2, a3, a4) \
61 efi_call4((void *)(f), (u64)(a1), (u64)(a2), (u64)(a3), \
62 (u64)(a4))
63#define efi_call_phys5(f, a1, a2, a3, a4, a5) \
64 efi_call5((void *)(f), (u64)(a1), (u64)(a2), (u64)(a3), \
65 (u64)(a4), (u64)(a5))
66#define efi_call_phys6(f, a1, a2, a3, a4, a5, a6) \
67 efi_call6((void *)(f), (u64)(a1), (u64)(a2), (u64)(a3), \
68 (u64)(a4), (u64)(a5), (u64)(a6))
69
70#define efi_call_virt0(f) \
71 efi_call0((void *)(efi.systab->runtime->f))
72#define efi_call_virt1(f, a1) \
73 efi_call1((void *)(efi.systab->runtime->f), (u64)(a1))
74#define efi_call_virt2(f, a1, a2) \
75 efi_call2((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2))
76#define efi_call_virt3(f, a1, a2, a3) \
77 efi_call3((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
78 (u64)(a3))
79#define efi_call_virt4(f, a1, a2, a3, a4) \
80 efi_call4((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
81 (u64)(a3), (u64)(a4))
82#define efi_call_virt5(f, a1, a2, a3, a4, a5) \
83 efi_call5((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
84 (u64)(a3), (u64)(a4), (u64)(a5))
85#define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \
86 efi_call6((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
87 (u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6))
88
89extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size);
90
91#endif /* CONFIG_X86_32 */
92
93extern void efi_reserve_early(void);
94extern void efi_call_phys_prelog(void);
95extern void efi_call_phys_epilog(void);
96
97#ifndef CONFIG_EFI
98/*
99 * IF EFI is not configured, have the EFI calls return -ENOSYS.
100 */
101#define efi_call0(_f) (-ENOSYS)
102#define efi_call1(_f, _a1) (-ENOSYS)
103#define efi_call2(_f, _a1, _a2) (-ENOSYS)
104#define efi_call3(_f, _a1, _a2, _a3) (-ENOSYS)
105#define efi_call4(_f, _a1, _a2, _a3, _a4) (-ENOSYS)
106#define efi_call5(_f, _a1, _a2, _a3, _a4, _a5) (-ENOSYS)
107#define efi_call6(_f, _a1, _a2, _a3, _a4, _a5, _a6) (-ENOSYS)
108#endif /* CONFIG_EFI */
109
110#endif /* ASM_X86__EFI_H */
diff --git a/include/asm-x86/elf.h b/include/asm-x86/elf.h
deleted file mode 100644
index 26bc15f01e78..000000000000
--- a/include/asm-x86/elf.h
+++ /dev/null
@@ -1,336 +0,0 @@
1#ifndef ASM_X86__ELF_H
2#define ASM_X86__ELF_H
3
4/*
5 * ELF register definitions..
6 */
7
8#include <asm/ptrace.h>
9#include <asm/user.h>
10#include <asm/auxvec.h>
11
12typedef unsigned long elf_greg_t;
13
14#define ELF_NGREG (sizeof(struct user_regs_struct) / sizeof(elf_greg_t))
15typedef elf_greg_t elf_gregset_t[ELF_NGREG];
16
17typedef struct user_i387_struct elf_fpregset_t;
18
19#ifdef __i386__
20
21typedef struct user_fxsr_struct elf_fpxregset_t;
22
23#define R_386_NONE 0
24#define R_386_32 1
25#define R_386_PC32 2
26#define R_386_GOT32 3
27#define R_386_PLT32 4
28#define R_386_COPY 5
29#define R_386_GLOB_DAT 6
30#define R_386_JMP_SLOT 7
31#define R_386_RELATIVE 8
32#define R_386_GOTOFF 9
33#define R_386_GOTPC 10
34#define R_386_NUM 11
35
36/*
37 * These are used to set parameters in the core dumps.
38 */
39#define ELF_CLASS ELFCLASS32
40#define ELF_DATA ELFDATA2LSB
41#define ELF_ARCH EM_386
42
43#else
44
45/* x86-64 relocation types */
46#define R_X86_64_NONE 0 /* No reloc */
47#define R_X86_64_64 1 /* Direct 64 bit */
48#define R_X86_64_PC32 2 /* PC relative 32 bit signed */
49#define R_X86_64_GOT32 3 /* 32 bit GOT entry */
50#define R_X86_64_PLT32 4 /* 32 bit PLT address */
51#define R_X86_64_COPY 5 /* Copy symbol at runtime */
52#define R_X86_64_GLOB_DAT 6 /* Create GOT entry */
53#define R_X86_64_JUMP_SLOT 7 /* Create PLT entry */
54#define R_X86_64_RELATIVE 8 /* Adjust by program base */
55#define R_X86_64_GOTPCREL 9 /* 32 bit signed pc relative
56 offset to GOT */
57#define R_X86_64_32 10 /* Direct 32 bit zero extended */
58#define R_X86_64_32S 11 /* Direct 32 bit sign extended */
59#define R_X86_64_16 12 /* Direct 16 bit zero extended */
60#define R_X86_64_PC16 13 /* 16 bit sign extended pc relative */
61#define R_X86_64_8 14 /* Direct 8 bit sign extended */
62#define R_X86_64_PC8 15 /* 8 bit sign extended pc relative */
63
64#define R_X86_64_NUM 16
65
66/*
67 * These are used to set parameters in the core dumps.
68 */
69#define ELF_CLASS ELFCLASS64
70#define ELF_DATA ELFDATA2LSB
71#define ELF_ARCH EM_X86_64
72
73#endif
74
75#include <asm/vdso.h>
76
77extern unsigned int vdso_enabled;
78
79/*
80 * This is used to ensure we don't load something for the wrong architecture.
81 */
82#define elf_check_arch_ia32(x) \
83 (((x)->e_machine == EM_386) || ((x)->e_machine == EM_486))
84
85#include <asm/processor.h>
86#include <asm/system.h>
87
88#ifdef CONFIG_X86_32
89#include <asm/desc.h>
90
91#define elf_check_arch(x) elf_check_arch_ia32(x)
92
93/* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program starts %edx
94 contains a pointer to a function which might be registered using `atexit'.
95 This provides a mean for the dynamic linker to call DT_FINI functions for
96 shared libraries that have been loaded before the code runs.
97
98 A value of 0 tells we have no such handler.
99
100 We might as well make sure everything else is cleared too (except for %esp),
101 just to make things more deterministic.
102 */
103#define ELF_PLAT_INIT(_r, load_addr) \
104 do { \
105 _r->bx = 0; _r->cx = 0; _r->dx = 0; \
106 _r->si = 0; _r->di = 0; _r->bp = 0; \
107 _r->ax = 0; \
108} while (0)
109
110/*
111 * regs is struct pt_regs, pr_reg is elf_gregset_t (which is
112 * now struct_user_regs, they are different)
113 */
114
115#define ELF_CORE_COPY_REGS(pr_reg, regs) \
116do { \
117 pr_reg[0] = regs->bx; \
118 pr_reg[1] = regs->cx; \
119 pr_reg[2] = regs->dx; \
120 pr_reg[3] = regs->si; \
121 pr_reg[4] = regs->di; \
122 pr_reg[5] = regs->bp; \
123 pr_reg[6] = regs->ax; \
124 pr_reg[7] = regs->ds & 0xffff; \
125 pr_reg[8] = regs->es & 0xffff; \
126 pr_reg[9] = regs->fs & 0xffff; \
127 savesegment(gs, pr_reg[10]); \
128 pr_reg[11] = regs->orig_ax; \
129 pr_reg[12] = regs->ip; \
130 pr_reg[13] = regs->cs & 0xffff; \
131 pr_reg[14] = regs->flags; \
132 pr_reg[15] = regs->sp; \
133 pr_reg[16] = regs->ss & 0xffff; \
134} while (0);
135
136#define ELF_PLATFORM (utsname()->machine)
137#define set_personality_64bit() do { } while (0)
138
139#else /* CONFIG_X86_32 */
140
141/*
142 * This is used to ensure we don't load something for the wrong architecture.
143 */
144#define elf_check_arch(x) \
145 ((x)->e_machine == EM_X86_64)
146
147#define compat_elf_check_arch(x) elf_check_arch_ia32(x)
148
149static inline void start_ia32_thread(struct pt_regs *regs, u32 ip, u32 sp)
150{
151 loadsegment(fs, 0);
152 loadsegment(ds, __USER32_DS);
153 loadsegment(es, __USER32_DS);
154 load_gs_index(0);
155 regs->ip = ip;
156 regs->sp = sp;
157 regs->flags = X86_EFLAGS_IF;
158 regs->cs = __USER32_CS;
159 regs->ss = __USER32_DS;
160}
161
162static inline void elf_common_init(struct thread_struct *t,
163 struct pt_regs *regs, const u16 ds)
164{
165 regs->ax = regs->bx = regs->cx = regs->dx = 0;
166 regs->si = regs->di = regs->bp = 0;
167 regs->r8 = regs->r9 = regs->r10 = regs->r11 = 0;
168 regs->r12 = regs->r13 = regs->r14 = regs->r15 = 0;
169 t->fs = t->gs = 0;
170 t->fsindex = t->gsindex = 0;
171 t->ds = t->es = ds;
172}
173
174#define ELF_PLAT_INIT(_r, load_addr) \
175do { \
176 elf_common_init(&current->thread, _r, 0); \
177 clear_thread_flag(TIF_IA32); \
178} while (0)
179
180#define COMPAT_ELF_PLAT_INIT(regs, load_addr) \
181 elf_common_init(&current->thread, regs, __USER_DS)
182
183#define compat_start_thread(regs, ip, sp) \
184do { \
185 start_ia32_thread(regs, ip, sp); \
186 set_fs(USER_DS); \
187} while (0)
188
189#define COMPAT_SET_PERSONALITY(ex) \
190do { \
191 if (test_thread_flag(TIF_IA32)) \
192 clear_thread_flag(TIF_ABI_PENDING); \
193 else \
194 set_thread_flag(TIF_ABI_PENDING); \
195 current->personality |= force_personality32; \
196} while (0)
197
198#define COMPAT_ELF_PLATFORM ("i686")
199
200/*
201 * regs is struct pt_regs, pr_reg is elf_gregset_t (which is
202 * now struct_user_regs, they are different). Assumes current is the process
203 * getting dumped.
204 */
205
206#define ELF_CORE_COPY_REGS(pr_reg, regs) \
207do { \
208 unsigned v; \
209 (pr_reg)[0] = (regs)->r15; \
210 (pr_reg)[1] = (regs)->r14; \
211 (pr_reg)[2] = (regs)->r13; \
212 (pr_reg)[3] = (regs)->r12; \
213 (pr_reg)[4] = (regs)->bp; \
214 (pr_reg)[5] = (regs)->bx; \
215 (pr_reg)[6] = (regs)->r11; \
216 (pr_reg)[7] = (regs)->r10; \
217 (pr_reg)[8] = (regs)->r9; \
218 (pr_reg)[9] = (regs)->r8; \
219 (pr_reg)[10] = (regs)->ax; \
220 (pr_reg)[11] = (regs)->cx; \
221 (pr_reg)[12] = (regs)->dx; \
222 (pr_reg)[13] = (regs)->si; \
223 (pr_reg)[14] = (regs)->di; \
224 (pr_reg)[15] = (regs)->orig_ax; \
225 (pr_reg)[16] = (regs)->ip; \
226 (pr_reg)[17] = (regs)->cs; \
227 (pr_reg)[18] = (regs)->flags; \
228 (pr_reg)[19] = (regs)->sp; \
229 (pr_reg)[20] = (regs)->ss; \
230 (pr_reg)[21] = current->thread.fs; \
231 (pr_reg)[22] = current->thread.gs; \
232 asm("movl %%ds,%0" : "=r" (v)); (pr_reg)[23] = v; \
233 asm("movl %%es,%0" : "=r" (v)); (pr_reg)[24] = v; \
234 asm("movl %%fs,%0" : "=r" (v)); (pr_reg)[25] = v; \
235 asm("movl %%gs,%0" : "=r" (v)); (pr_reg)[26] = v; \
236} while (0);
237
238/* I'm not sure if we can use '-' here */
239#define ELF_PLATFORM ("x86_64")
240extern void set_personality_64bit(void);
241extern unsigned int sysctl_vsyscall32;
242extern int force_personality32;
243
244#endif /* !CONFIG_X86_32 */
245
246#define CORE_DUMP_USE_REGSET
247#define USE_ELF_CORE_DUMP
248#define ELF_EXEC_PAGESIZE 4096
249
250/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
251 use of this is to invoke "./ld.so someprog" to test out a new version of
252 the loader. We need to make sure that it is out of the way of the program
253 that it will "exec", and that there is sufficient room for the brk. */
254
255#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
256
257/* This yields a mask that user programs can use to figure out what
258 instruction set this CPU supports. This could be done in user space,
259 but it's not easy, and we've already done it here. */
260
261#define ELF_HWCAP (boot_cpu_data.x86_capability[0])
262
263/* This yields a string that ld.so will use to load implementation
264 specific libraries for optimization. This is more specific in
265 intent than poking at uname or /proc/cpuinfo.
266
267 For the moment, we have only optimizations for the Intel generations,
268 but that could change... */
269
270#define SET_PERSONALITY(ex) set_personality_64bit()
271
272/*
273 * An executable for which elf_read_implies_exec() returns TRUE will
274 * have the READ_IMPLIES_EXEC personality flag set automatically.
275 */
276#define elf_read_implies_exec(ex, executable_stack) \
277 (executable_stack != EXSTACK_DISABLE_X)
278
279struct task_struct;
280
281#define ARCH_DLINFO_IA32(vdso_enabled) \
282do { \
283 if (vdso_enabled) { \
284 NEW_AUX_ENT(AT_SYSINFO, VDSO_ENTRY); \
285 NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_CURRENT_BASE); \
286 } \
287} while (0)
288
289#ifdef CONFIG_X86_32
290
291#define VDSO_HIGH_BASE (__fix_to_virt(FIX_VDSO))
292
293#define ARCH_DLINFO ARCH_DLINFO_IA32(vdso_enabled)
294
295/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */
296
297#else /* CONFIG_X86_32 */
298
299#define VDSO_HIGH_BASE 0xffffe000U /* CONFIG_COMPAT_VDSO address */
300
301/* 1GB for 64bit, 8MB for 32bit */
302#define STACK_RND_MASK (test_thread_flag(TIF_IA32) ? 0x7ff : 0x3fffff)
303
304#define ARCH_DLINFO \
305do { \
306 if (vdso_enabled) \
307 NEW_AUX_ENT(AT_SYSINFO_EHDR, \
308 (unsigned long)current->mm->context.vdso); \
309} while (0)
310
311#define AT_SYSINFO 32
312
313#define COMPAT_ARCH_DLINFO ARCH_DLINFO_IA32(sysctl_vsyscall32)
314
315#define COMPAT_ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
316
317#endif /* !CONFIG_X86_32 */
318
319#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
320
321#define VDSO_ENTRY \
322 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
323
324struct linux_binprm;
325
326#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
327extern int arch_setup_additional_pages(struct linux_binprm *bprm,
328 int executable_stack);
329
330extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
331#define compat_arch_setup_additional_pages syscall32_setup_pages
332
333extern unsigned long arch_randomize_brk(struct mm_struct *mm);
334#define arch_randomize_brk arch_randomize_brk
335
336#endif /* ASM_X86__ELF_H */
diff --git a/include/asm-x86/emergency-restart.h b/include/asm-x86/emergency-restart.h
deleted file mode 100644
index 190d0d8b71e3..000000000000
--- a/include/asm-x86/emergency-restart.h
+++ /dev/null
@@ -1,18 +0,0 @@
1#ifndef ASM_X86__EMERGENCY_RESTART_H
2#define ASM_X86__EMERGENCY_RESTART_H
3
4enum reboot_type {
5 BOOT_TRIPLE = 't',
6 BOOT_KBD = 'k',
7#ifdef CONFIG_X86_32
8 BOOT_BIOS = 'b',
9#endif
10 BOOT_ACPI = 'a',
11 BOOT_EFI = 'e'
12};
13
14extern enum reboot_type reboot_type;
15
16extern void machine_emergency_restart(void);
17
18#endif /* ASM_X86__EMERGENCY_RESTART_H */
diff --git a/include/asm-x86/errno.h b/include/asm-x86/errno.h
deleted file mode 100644
index 4c82b503d92f..000000000000
--- a/include/asm-x86/errno.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/errno.h>
diff --git a/include/asm-x86/es7000/apic.h b/include/asm-x86/es7000/apic.h
deleted file mode 100644
index 380f0b4f17ed..000000000000
--- a/include/asm-x86/es7000/apic.h
+++ /dev/null
@@ -1,193 +0,0 @@
1#ifndef __ASM_ES7000_APIC_H
2#define __ASM_ES7000_APIC_H
3
4#define xapic_phys_to_log_apicid(cpu) per_cpu(x86_bios_cpu_apicid, cpu)
5#define esr_disable (1)
6
7static inline int apic_id_registered(void)
8{
9 return (1);
10}
11
12static inline cpumask_t target_cpus(void)
13{
14#if defined CONFIG_ES7000_CLUSTERED_APIC
15 return CPU_MASK_ALL;
16#else
17 return cpumask_of_cpu(smp_processor_id());
18#endif
19}
20
21#if defined CONFIG_ES7000_CLUSTERED_APIC
22#define APIC_DFR_VALUE (APIC_DFR_CLUSTER)
23#define INT_DELIVERY_MODE (dest_LowestPrio)
24#define INT_DEST_MODE (1) /* logical delivery broadcast to all procs */
25#define NO_BALANCE_IRQ (1)
26#undef WAKE_SECONDARY_VIA_INIT
27#define WAKE_SECONDARY_VIA_MIP
28#else
29#define APIC_DFR_VALUE (APIC_DFR_FLAT)
30#define INT_DELIVERY_MODE (dest_Fixed)
31#define INT_DEST_MODE (0) /* phys delivery to target procs */
32#define NO_BALANCE_IRQ (0)
33#undef APIC_DEST_LOGICAL
34#define APIC_DEST_LOGICAL 0x0
35#define WAKE_SECONDARY_VIA_INIT
36#endif
37
38static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
39{
40 return 0;
41}
42static inline unsigned long check_apicid_present(int bit)
43{
44 return physid_isset(bit, phys_cpu_present_map);
45}
46
47#define apicid_cluster(apicid) (apicid & 0xF0)
48
49static inline unsigned long calculate_ldr(int cpu)
50{
51 unsigned long id;
52 id = xapic_phys_to_log_apicid(cpu);
53 return (SET_APIC_LOGICAL_ID(id));
54}
55
56/*
57 * Set up the logical destination ID.
58 *
59 * Intel recommends to set DFR, LdR and TPR before enabling
60 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
61 * document number 292116). So here it goes...
62 */
63static inline void init_apic_ldr(void)
64{
65 unsigned long val;
66 int cpu = smp_processor_id();
67
68 apic_write(APIC_DFR, APIC_DFR_VALUE);
69 val = calculate_ldr(cpu);
70 apic_write(APIC_LDR, val);
71}
72
73#ifndef CONFIG_X86_GENERICARCH
74extern void enable_apic_mode(void);
75#endif
76
77extern int apic_version [MAX_APICS];
78static inline void setup_apic_routing(void)
79{
80 int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id());
81 printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n",
82 (apic_version[apic] == 0x14) ?
83 "Physical Cluster" : "Logical Cluster", nr_ioapics, cpus_addr(target_cpus())[0]);
84}
85
86static inline int multi_timer_check(int apic, int irq)
87{
88 return 0;
89}
90
91static inline int apicid_to_node(int logical_apicid)
92{
93 return 0;
94}
95
96
97static inline int cpu_present_to_apicid(int mps_cpu)
98{
99 if (!mps_cpu)
100 return boot_cpu_physical_apicid;
101 else if (mps_cpu < NR_CPUS)
102 return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
103 else
104 return BAD_APICID;
105}
106
107static inline physid_mask_t apicid_to_cpu_present(int phys_apicid)
108{
109 static int id = 0;
110 physid_mask_t mask;
111 mask = physid_mask_of_physid(id);
112 ++id;
113 return mask;
114}
115
116extern u8 cpu_2_logical_apicid[];
117/* Mapping from cpu number to logical apicid */
118static inline int cpu_to_logical_apicid(int cpu)
119{
120#ifdef CONFIG_SMP
121 if (cpu >= NR_CPUS)
122 return BAD_APICID;
123 return (int)cpu_2_logical_apicid[cpu];
124#else
125 return logical_smp_processor_id();
126#endif
127}
128
129static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
130{
131 /* For clustered we don't have a good way to do this yet - hack */
132 return physids_promote(0xff);
133}
134
135
136static inline void setup_portio_remap(void)
137{
138}
139
140extern unsigned int boot_cpu_physical_apicid;
141static inline int check_phys_apicid_present(int cpu_physical_apicid)
142{
143 boot_cpu_physical_apicid = read_apic_id();
144 return (1);
145}
146
147static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
148{
149 int num_bits_set;
150 int cpus_found = 0;
151 int cpu;
152 int apicid;
153
154 num_bits_set = cpus_weight(cpumask);
155 /* Return id to all */
156 if (num_bits_set == NR_CPUS)
157#if defined CONFIG_ES7000_CLUSTERED_APIC
158 return 0xFF;
159#else
160 return cpu_to_logical_apicid(0);
161#endif
162 /*
163 * The cpus in the mask must all be on the apic cluster. If are not
164 * on the same apicid cluster return default value of TARGET_CPUS.
165 */
166 cpu = first_cpu(cpumask);
167 apicid = cpu_to_logical_apicid(cpu);
168 while (cpus_found < num_bits_set) {
169 if (cpu_isset(cpu, cpumask)) {
170 int new_apicid = cpu_to_logical_apicid(cpu);
171 if (apicid_cluster(apicid) !=
172 apicid_cluster(new_apicid)){
173 printk ("%s: Not a valid mask!\n", __func__);
174#if defined CONFIG_ES7000_CLUSTERED_APIC
175 return 0xFF;
176#else
177 return cpu_to_logical_apicid(0);
178#endif
179 }
180 apicid = new_apicid;
181 cpus_found++;
182 }
183 cpu++;
184 }
185 return apicid;
186}
187
188static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
189{
190 return cpuid_apic >> index_msb;
191}
192
193#endif /* __ASM_ES7000_APIC_H */
diff --git a/include/asm-x86/es7000/apicdef.h b/include/asm-x86/es7000/apicdef.h
deleted file mode 100644
index 8b234a3cb851..000000000000
--- a/include/asm-x86/es7000/apicdef.h
+++ /dev/null
@@ -1,13 +0,0 @@
1#ifndef __ASM_ES7000_APICDEF_H
2#define __ASM_ES7000_APICDEF_H
3
4#define APIC_ID_MASK (0xFF<<24)
5
6static inline unsigned get_apic_id(unsigned long x)
7{
8 return (((x)>>24)&0xFF);
9}
10
11#define GET_APIC_ID(x) get_apic_id(x)
12
13#endif
diff --git a/include/asm-x86/es7000/ipi.h b/include/asm-x86/es7000/ipi.h
deleted file mode 100644
index 632a955fcc0a..000000000000
--- a/include/asm-x86/es7000/ipi.h
+++ /dev/null
@@ -1,24 +0,0 @@
1#ifndef __ASM_ES7000_IPI_H
2#define __ASM_ES7000_IPI_H
3
4void send_IPI_mask_sequence(cpumask_t mask, int vector);
5
6static inline void send_IPI_mask(cpumask_t mask, int vector)
7{
8 send_IPI_mask_sequence(mask, vector);
9}
10
11static inline void send_IPI_allbutself(int vector)
12{
13 cpumask_t mask = cpu_online_map;
14 cpu_clear(smp_processor_id(), mask);
15 if (!cpus_empty(mask))
16 send_IPI_mask(mask, vector);
17}
18
19static inline void send_IPI_all(int vector)
20{
21 send_IPI_mask(cpu_online_map, vector);
22}
23
24#endif /* __ASM_ES7000_IPI_H */
diff --git a/include/asm-x86/es7000/mpparse.h b/include/asm-x86/es7000/mpparse.h
deleted file mode 100644
index ed5a3caae141..000000000000
--- a/include/asm-x86/es7000/mpparse.h
+++ /dev/null
@@ -1,30 +0,0 @@
1#ifndef __ASM_ES7000_MPPARSE_H
2#define __ASM_ES7000_MPPARSE_H
3
4#include <linux/acpi.h>
5
6extern int parse_unisys_oem (char *oemptr);
7extern int find_unisys_acpi_oem_table(unsigned long *oem_addr);
8extern void unmap_unisys_acpi_oem_table(unsigned long oem_addr);
9extern void setup_unisys(void);
10
11#ifndef CONFIG_X86_GENERICARCH
12extern int acpi_madt_oem_check(char *oem_id, char *oem_table_id);
13extern int mps_oem_check(struct mp_config_table *mpc, char *oem,
14 char *productid);
15#endif
16
17#ifdef CONFIG_ACPI
18
19static inline int es7000_check_dsdt(void)
20{
21 struct acpi_table_header header;
22
23 if (ACPI_SUCCESS(acpi_get_table_header(ACPI_SIG_DSDT, 0, &header)) &&
24 !strncmp(header.oem_id, "UNISYS", 6))
25 return 1;
26 return 0;
27}
28#endif
29
30#endif /* __ASM_MACH_MPPARSE_H */
diff --git a/include/asm-x86/es7000/wakecpu.h b/include/asm-x86/es7000/wakecpu.h
deleted file mode 100644
index 3ffc5a7bf667..000000000000
--- a/include/asm-x86/es7000/wakecpu.h
+++ /dev/null
@@ -1,59 +0,0 @@
1#ifndef __ASM_ES7000_WAKECPU_H
2#define __ASM_ES7000_WAKECPU_H
3
4/*
5 * This file copes with machines that wakeup secondary CPUs by the
6 * INIT, INIT, STARTUP sequence.
7 */
8
9#ifdef CONFIG_ES7000_CLUSTERED_APIC
10#define WAKE_SECONDARY_VIA_MIP
11#else
12#define WAKE_SECONDARY_VIA_INIT
13#endif
14
15#ifdef WAKE_SECONDARY_VIA_MIP
16extern int es7000_start_cpu(int cpu, unsigned long eip);
17static inline int
18wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
19{
20 int boot_error = 0;
21 boot_error = es7000_start_cpu(phys_apicid, start_eip);
22 return boot_error;
23}
24#endif
25
26#define TRAMPOLINE_LOW phys_to_virt(0x467)
27#define TRAMPOLINE_HIGH phys_to_virt(0x469)
28
29#define boot_cpu_apicid boot_cpu_physical_apicid
30
31static inline void wait_for_init_deassert(atomic_t *deassert)
32{
33#ifdef WAKE_SECONDARY_VIA_INIT
34 while (!atomic_read(deassert))
35 cpu_relax();
36#endif
37 return;
38}
39
40/* Nothing to do for most platforms, since cleared by the INIT cycle */
41static inline void smp_callin_clear_local_apic(void)
42{
43}
44
45static inline void store_NMI_vector(unsigned short *high, unsigned short *low)
46{
47}
48
49static inline void restore_NMI_vector(unsigned short *high, unsigned short *low)
50{
51}
52
53#if APIC_DEBUG
54 #define inquire_remote_apic(apicid) __inquire_remote_apic(apicid)
55#else
56 #define inquire_remote_apic(apicid) {}
57#endif
58
59#endif /* __ASM_MACH_WAKECPU_H */
diff --git a/include/asm-x86/fb.h b/include/asm-x86/fb.h
deleted file mode 100644
index aca38dbd9a64..000000000000
--- a/include/asm-x86/fb.h
+++ /dev/null
@@ -1,21 +0,0 @@
1#ifndef ASM_X86__FB_H
2#define ASM_X86__FB_H
3
4#include <linux/fb.h>
5#include <linux/fs.h>
6#include <asm/page.h>
7
8static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
9 unsigned long off)
10{
11 if (boot_cpu_data.x86 > 3)
12 pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
13}
14
15#ifdef CONFIG_X86_32
16extern int fb_is_primary_device(struct fb_info *info);
17#else
18static inline int fb_is_primary_device(struct fb_info *info) { return 0; }
19#endif
20
21#endif /* ASM_X86__FB_H */
diff --git a/include/asm-x86/fcntl.h b/include/asm-x86/fcntl.h
deleted file mode 100644
index 46ab12db5739..000000000000
--- a/include/asm-x86/fcntl.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/fcntl.h>
diff --git a/include/asm-x86/fixmap.h b/include/asm-x86/fixmap.h
deleted file mode 100644
index 78e33a1bc591..000000000000
--- a/include/asm-x86/fixmap.h
+++ /dev/null
@@ -1,68 +0,0 @@
1#ifndef ASM_X86__FIXMAP_H
2#define ASM_X86__FIXMAP_H
3
4#ifdef CONFIG_X86_32
5# include "fixmap_32.h"
6#else
7# include "fixmap_64.h"
8#endif
9
10extern int fixmaps_set;
11
12void __native_set_fixmap(enum fixed_addresses idx, pte_t pte);
13void native_set_fixmap(enum fixed_addresses idx,
14 unsigned long phys, pgprot_t flags);
15
16#ifndef CONFIG_PARAVIRT
17static inline void __set_fixmap(enum fixed_addresses idx,
18 unsigned long phys, pgprot_t flags)
19{
20 native_set_fixmap(idx, phys, flags);
21}
22#endif
23
24#define set_fixmap(idx, phys) \
25 __set_fixmap(idx, phys, PAGE_KERNEL)
26
27/*
28 * Some hardware wants to get fixmapped without caching.
29 */
30#define set_fixmap_nocache(idx, phys) \
31 __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
32
33#define clear_fixmap(idx) \
34 __set_fixmap(idx, 0, __pgprot(0))
35
36#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
37#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT)
38
39extern void __this_fixmap_does_not_exist(void);
40
41/*
42 * 'index to address' translation. If anyone tries to use the idx
43 * directly without translation, we catch the bug with a NULL-deference
44 * kernel oops. Illegal ranges of incoming indices are caught too.
45 */
46static __always_inline unsigned long fix_to_virt(const unsigned int idx)
47{
48 /*
49 * this branch gets completely eliminated after inlining,
50 * except when someone tries to use fixaddr indices in an
51 * illegal way. (such as mixing up address types or using
52 * out-of-range indices).
53 *
54 * If it doesn't get removed, the linker will complain
55 * loudly with a reasonably clear error message..
56 */
57 if (idx >= __end_of_fixed_addresses)
58 __this_fixmap_does_not_exist();
59
60 return __fix_to_virt(idx);
61}
62
63static inline unsigned long virt_to_fix(const unsigned long vaddr)
64{
65 BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
66 return __virt_to_fix(vaddr);
67}
68#endif /* ASM_X86__FIXMAP_H */
diff --git a/include/asm-x86/fixmap_32.h b/include/asm-x86/fixmap_32.h
deleted file mode 100644
index 8844002da0e0..000000000000
--- a/include/asm-x86/fixmap_32.h
+++ /dev/null
@@ -1,123 +0,0 @@
1/*
2 * fixmap.h: compile-time virtual memory allocation
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 1998 Ingo Molnar
9 *
10 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
11 */
12
13#ifndef ASM_X86__FIXMAP_32_H
14#define ASM_X86__FIXMAP_32_H
15
16
17/* used by vmalloc.c, vsyscall.lds.S.
18 *
19 * Leave one empty page between vmalloc'ed areas and
20 * the start of the fixmap.
21 */
22extern unsigned long __FIXADDR_TOP;
23#define FIXADDR_USER_START __fix_to_virt(FIX_VDSO)
24#define FIXADDR_USER_END __fix_to_virt(FIX_VDSO - 1)
25
26#ifndef __ASSEMBLY__
27#include <linux/kernel.h>
28#include <asm/acpi.h>
29#include <asm/apicdef.h>
30#include <asm/page.h>
31#ifdef CONFIG_HIGHMEM
32#include <linux/threads.h>
33#include <asm/kmap_types.h>
34#endif
35
36/*
37 * Here we define all the compile-time 'special' virtual
38 * addresses. The point is to have a constant address at
39 * compile time, but to set the physical address only
40 * in the boot process. We allocate these special addresses
41 * from the end of virtual memory (0xfffff000) backwards.
42 * Also this lets us do fail-safe vmalloc(), we
43 * can guarantee that these special addresses and
44 * vmalloc()-ed addresses never overlap.
45 *
46 * these 'compile-time allocated' memory buffers are
47 * fixed-size 4k pages. (or larger if used with an increment
48 * highger than 1) use fixmap_set(idx,phys) to associate
49 * physical memory with fixmap indices.
50 *
51 * TLB entries of such buffers will not be flushed across
52 * task switches.
53 */
54enum fixed_addresses {
55 FIX_HOLE,
56 FIX_VDSO,
57 FIX_DBGP_BASE,
58 FIX_EARLYCON_MEM_BASE,
59#ifdef CONFIG_X86_LOCAL_APIC
60 FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
61#endif
62#ifdef CONFIG_X86_IO_APIC
63 FIX_IO_APIC_BASE_0,
64 FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1,
65#endif
66#ifdef CONFIG_X86_VISWS_APIC
67 FIX_CO_CPU, /* Cobalt timer */
68 FIX_CO_APIC, /* Cobalt APIC Redirection Table */
69 FIX_LI_PCIA, /* Lithium PCI Bridge A */
70 FIX_LI_PCIB, /* Lithium PCI Bridge B */
71#endif
72#ifdef CONFIG_X86_F00F_BUG
73 FIX_F00F_IDT, /* Virtual mapping for IDT */
74#endif
75#ifdef CONFIG_X86_CYCLONE_TIMER
76 FIX_CYCLONE_TIMER, /*cyclone timer register*/
77#endif
78#ifdef CONFIG_HIGHMEM
79 FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
80 FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
81#endif
82#ifdef CONFIG_PCI_MMCONFIG
83 FIX_PCIE_MCFG,
84#endif
85#ifdef CONFIG_PARAVIRT
86 FIX_PARAVIRT_BOOTMAP,
87#endif
88 __end_of_permanent_fixed_addresses,
89 /*
90 * 256 temporary boot-time mappings, used by early_ioremap(),
91 * before ioremap() is functional.
92 *
93 * We round it up to the next 256 pages boundary so that we
94 * can have a single pgd entry and a single pte table:
95 */
96#define NR_FIX_BTMAPS 64
97#define FIX_BTMAPS_SLOTS 4
98 FIX_BTMAP_END = __end_of_permanent_fixed_addresses + 256 -
99 (__end_of_permanent_fixed_addresses & 255),
100 FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_SLOTS - 1,
101 FIX_WP_TEST,
102#ifdef CONFIG_ACPI
103 FIX_ACPI_BEGIN,
104 FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1,
105#endif
106#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
107 FIX_OHCI1394_BASE,
108#endif
109 __end_of_fixed_addresses
110};
111
112extern void reserve_top_address(unsigned long reserve);
113
114
115#define FIXADDR_TOP ((unsigned long)__FIXADDR_TOP)
116
117#define __FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT)
118#define __FIXADDR_BOOT_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
119#define FIXADDR_START (FIXADDR_TOP - __FIXADDR_SIZE)
120#define FIXADDR_BOOT_START (FIXADDR_TOP - __FIXADDR_BOOT_SIZE)
121
122#endif /* !__ASSEMBLY__ */
123#endif /* ASM_X86__FIXMAP_32_H */
diff --git a/include/asm-x86/fixmap_64.h b/include/asm-x86/fixmap_64.h
deleted file mode 100644
index dab4751d1307..000000000000
--- a/include/asm-x86/fixmap_64.h
+++ /dev/null
@@ -1,83 +0,0 @@
1/*
2 * fixmap.h: compile-time virtual memory allocation
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 1998 Ingo Molnar
9 */
10
11#ifndef ASM_X86__FIXMAP_64_H
12#define ASM_X86__FIXMAP_64_H
13
14#include <linux/kernel.h>
15#include <asm/acpi.h>
16#include <asm/apicdef.h>
17#include <asm/page.h>
18#include <asm/vsyscall.h>
19#include <asm/efi.h>
20
21/*
22 * Here we define all the compile-time 'special' virtual
23 * addresses. The point is to have a constant address at
24 * compile time, but to set the physical address only
25 * in the boot process.
26 *
27 * These 'compile-time allocated' memory buffers are
28 * fixed-size 4k pages (or larger if used with an increment
29 * higher than 1). Use set_fixmap(idx,phys) to associate
30 * physical memory with fixmap indices.
31 *
32 * TLB entries of such buffers will not be flushed across
33 * task switches.
34 */
35
36enum fixed_addresses {
37 VSYSCALL_LAST_PAGE,
38 VSYSCALL_FIRST_PAGE = VSYSCALL_LAST_PAGE
39 + ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1,
40 VSYSCALL_HPET,
41 FIX_DBGP_BASE,
42 FIX_EARLYCON_MEM_BASE,
43 FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
44 FIX_IO_APIC_BASE_0,
45 FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS - 1,
46 FIX_EFI_IO_MAP_LAST_PAGE,
47 FIX_EFI_IO_MAP_FIRST_PAGE = FIX_EFI_IO_MAP_LAST_PAGE
48 + MAX_EFI_IO_PAGES - 1,
49#ifdef CONFIG_PARAVIRT
50 FIX_PARAVIRT_BOOTMAP,
51#endif
52 __end_of_permanent_fixed_addresses,
53#ifdef CONFIG_ACPI
54 FIX_ACPI_BEGIN,
55 FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1,
56#endif
57#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
58 FIX_OHCI1394_BASE,
59#endif
60 /*
61 * 256 temporary boot-time mappings, used by early_ioremap(),
62 * before ioremap() is functional.
63 *
64 * We round it up to the next 256 pages boundary so that we
65 * can have a single pgd entry and a single pte table:
66 */
67#define NR_FIX_BTMAPS 64
68#define FIX_BTMAPS_SLOTS 4
69 FIX_BTMAP_END = __end_of_permanent_fixed_addresses + 256 -
70 (__end_of_permanent_fixed_addresses & 255),
71 FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_SLOTS - 1,
72 __end_of_fixed_addresses
73};
74
75#define FIXADDR_TOP (VSYSCALL_END-PAGE_SIZE)
76#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
77#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
78
79/* Only covers 32bit vsyscalls currently. Need another set for 64bit. */
80#define FIXADDR_USER_START ((unsigned long)VSYSCALL32_VSYSCALL)
81#define FIXADDR_USER_END (FIXADDR_USER_START + PAGE_SIZE)
82
83#endif /* ASM_X86__FIXMAP_64_H */
diff --git a/include/asm-x86/floppy.h b/include/asm-x86/floppy.h
deleted file mode 100644
index 7d83a3a83e37..000000000000
--- a/include/asm-x86/floppy.h
+++ /dev/null
@@ -1,281 +0,0 @@
1/*
2 * Architecture specific parts of the Floppy driver
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 1995
9 */
10#ifndef ASM_X86__FLOPPY_H
11#define ASM_X86__FLOPPY_H
12
13#include <linux/vmalloc.h>
14
15/*
16 * The DMA channel used by the floppy controller cannot access data at
17 * addresses >= 16MB
18 *
19 * Went back to the 1MB limit, as some people had problems with the floppy
20 * driver otherwise. It doesn't matter much for performance anyway, as most
21 * floppy accesses go through the track buffer.
22 */
23#define _CROSS_64KB(a, s, vdma) \
24 (!(vdma) && \
25 ((unsigned long)(a)/K_64 != ((unsigned long)(a) + (s) - 1) / K_64))
26
27#define CROSS_64KB(a, s) _CROSS_64KB(a, s, use_virtual_dma & 1)
28
29
30#define SW fd_routine[use_virtual_dma & 1]
31#define CSW fd_routine[can_use_virtual_dma & 1]
32
33
34#define fd_inb(port) inb_p(port)
35#define fd_outb(value, port) outb_p(value, port)
36
37#define fd_request_dma() CSW._request_dma(FLOPPY_DMA, "floppy")
38#define fd_free_dma() CSW._free_dma(FLOPPY_DMA)
39#define fd_enable_irq() enable_irq(FLOPPY_IRQ)
40#define fd_disable_irq() disable_irq(FLOPPY_IRQ)
41#define fd_free_irq() free_irq(FLOPPY_IRQ, NULL)
42#define fd_get_dma_residue() SW._get_dma_residue(FLOPPY_DMA)
43#define fd_dma_mem_alloc(size) SW._dma_mem_alloc(size)
44#define fd_dma_setup(addr, size, mode, io) SW._dma_setup(addr, size, mode, io)
45
46#define FLOPPY_CAN_FALLBACK_ON_NODMA
47
48static int virtual_dma_count;
49static int virtual_dma_residue;
50static char *virtual_dma_addr;
51static int virtual_dma_mode;
52static int doing_pdma;
53
54static irqreturn_t floppy_hardint(int irq, void *dev_id)
55{
56 unsigned char st;
57
58#undef TRACE_FLPY_INT
59
60#ifdef TRACE_FLPY_INT
61 static int calls;
62 static int bytes;
63 static int dma_wait;
64#endif
65 if (!doing_pdma)
66 return floppy_interrupt(irq, dev_id);
67
68#ifdef TRACE_FLPY_INT
69 if (!calls)
70 bytes = virtual_dma_count;
71#endif
72
73 {
74 int lcount;
75 char *lptr;
76
77 st = 1;
78 for (lcount = virtual_dma_count, lptr = virtual_dma_addr;
79 lcount; lcount--, lptr++) {
80 st = inb(virtual_dma_port + 4) & 0xa0;
81 if (st != 0xa0)
82 break;
83 if (virtual_dma_mode)
84 outb_p(*lptr, virtual_dma_port + 5);
85 else
86 *lptr = inb_p(virtual_dma_port + 5);
87 }
88 virtual_dma_count = lcount;
89 virtual_dma_addr = lptr;
90 st = inb(virtual_dma_port + 4);
91 }
92
93#ifdef TRACE_FLPY_INT
94 calls++;
95#endif
96 if (st == 0x20)
97 return IRQ_HANDLED;
98 if (!(st & 0x20)) {
99 virtual_dma_residue += virtual_dma_count;
100 virtual_dma_count = 0;
101#ifdef TRACE_FLPY_INT
102 printk("count=%x, residue=%x calls=%d bytes=%d dma_wait=%d\n",
103 virtual_dma_count, virtual_dma_residue, calls, bytes,
104 dma_wait);
105 calls = 0;
106 dma_wait = 0;
107#endif
108 doing_pdma = 0;
109 floppy_interrupt(irq, dev_id);
110 return IRQ_HANDLED;
111 }
112#ifdef TRACE_FLPY_INT
113 if (!virtual_dma_count)
114 dma_wait++;
115#endif
116 return IRQ_HANDLED;
117}
118
119static void fd_disable_dma(void)
120{
121 if (!(can_use_virtual_dma & 1))
122 disable_dma(FLOPPY_DMA);
123 doing_pdma = 0;
124 virtual_dma_residue += virtual_dma_count;
125 virtual_dma_count = 0;
126}
127
128static int vdma_request_dma(unsigned int dmanr, const char *device_id)
129{
130 return 0;
131}
132
133static void vdma_nop(unsigned int dummy)
134{
135}
136
137
138static int vdma_get_dma_residue(unsigned int dummy)
139{
140 return virtual_dma_count + virtual_dma_residue;
141}
142
143
144static int fd_request_irq(void)
145{
146 if (can_use_virtual_dma)
147 return request_irq(FLOPPY_IRQ, floppy_hardint,
148 IRQF_DISABLED, "floppy", NULL);
149 else
150 return request_irq(FLOPPY_IRQ, floppy_interrupt,
151 IRQF_DISABLED, "floppy", NULL);
152}
153
154static unsigned long dma_mem_alloc(unsigned long size)
155{
156 return __get_dma_pages(GFP_KERNEL|__GFP_NORETRY, get_order(size));
157}
158
159
160static unsigned long vdma_mem_alloc(unsigned long size)
161{
162 return (unsigned long)vmalloc(size);
163
164}
165
166#define nodma_mem_alloc(size) vdma_mem_alloc(size)
167
168static void _fd_dma_mem_free(unsigned long addr, unsigned long size)
169{
170 if ((unsigned long)addr >= (unsigned long)high_memory)
171 vfree((void *)addr);
172 else
173 free_pages(addr, get_order(size));
174}
175
176#define fd_dma_mem_free(addr, size) _fd_dma_mem_free(addr, size)
177
178static void _fd_chose_dma_mode(char *addr, unsigned long size)
179{
180 if (can_use_virtual_dma == 2) {
181 if ((unsigned long)addr >= (unsigned long)high_memory ||
182 isa_virt_to_bus(addr) >= 0x1000000 ||
183 _CROSS_64KB(addr, size, 0))
184 use_virtual_dma = 1;
185 else
186 use_virtual_dma = 0;
187 } else {
188 use_virtual_dma = can_use_virtual_dma & 1;
189 }
190}
191
192#define fd_chose_dma_mode(addr, size) _fd_chose_dma_mode(addr, size)
193
194
195static int vdma_dma_setup(char *addr, unsigned long size, int mode, int io)
196{
197 doing_pdma = 1;
198 virtual_dma_port = io;
199 virtual_dma_mode = (mode == DMA_MODE_WRITE);
200 virtual_dma_addr = addr;
201 virtual_dma_count = size;
202 virtual_dma_residue = 0;
203 return 0;
204}
205
206static int hard_dma_setup(char *addr, unsigned long size, int mode, int io)
207{
208#ifdef FLOPPY_SANITY_CHECK
209 if (CROSS_64KB(addr, size)) {
210 printk("DMA crossing 64-K boundary %p-%p\n", addr, addr+size);
211 return -1;
212 }
213#endif
214 /* actual, physical DMA */
215 doing_pdma = 0;
216 clear_dma_ff(FLOPPY_DMA);
217 set_dma_mode(FLOPPY_DMA, mode);
218 set_dma_addr(FLOPPY_DMA, isa_virt_to_bus(addr));
219 set_dma_count(FLOPPY_DMA, size);
220 enable_dma(FLOPPY_DMA);
221 return 0;
222}
223
224static struct fd_routine_l {
225 int (*_request_dma)(unsigned int dmanr, const char *device_id);
226 void (*_free_dma)(unsigned int dmanr);
227 int (*_get_dma_residue)(unsigned int dummy);
228 unsigned long (*_dma_mem_alloc)(unsigned long size);
229 int (*_dma_setup)(char *addr, unsigned long size, int mode, int io);
230} fd_routine[] = {
231 {
232 request_dma,
233 free_dma,
234 get_dma_residue,
235 dma_mem_alloc,
236 hard_dma_setup
237 },
238 {
239 vdma_request_dma,
240 vdma_nop,
241 vdma_get_dma_residue,
242 vdma_mem_alloc,
243 vdma_dma_setup
244 }
245};
246
247
248static int FDC1 = 0x3f0;
249static int FDC2 = -1;
250
251/*
252 * Floppy types are stored in the rtc's CMOS RAM and so rtc_lock
253 * is needed to prevent corrupted CMOS RAM in case "insmod floppy"
254 * coincides with another rtc CMOS user. Paul G.
255 */
256#define FLOPPY0_TYPE \
257({ \
258 unsigned long flags; \
259 unsigned char val; \
260 spin_lock_irqsave(&rtc_lock, flags); \
261 val = (CMOS_READ(0x10) >> 4) & 15; \
262 spin_unlock_irqrestore(&rtc_lock, flags); \
263 val; \
264})
265
266#define FLOPPY1_TYPE \
267({ \
268 unsigned long flags; \
269 unsigned char val; \
270 spin_lock_irqsave(&rtc_lock, flags); \
271 val = CMOS_READ(0x10) & 15; \
272 spin_unlock_irqrestore(&rtc_lock, flags); \
273 val; \
274})
275
276#define N_FDC 2
277#define N_DRIVE 8
278
279#define EXTRA_FLOPPY_PARAMS
280
281#endif /* ASM_X86__FLOPPY_H */
diff --git a/include/asm-x86/frame.h b/include/asm-x86/frame.h
deleted file mode 100644
index 06850a7194e1..000000000000
--- a/include/asm-x86/frame.h
+++ /dev/null
@@ -1,27 +0,0 @@
1#ifdef __ASSEMBLY__
2
3#include <asm/dwarf2.h>
4
5/* The annotation hides the frame from the unwinder and makes it look
6 like a ordinary ebp save/restore. This avoids some special cases for
7 frame pointer later */
8#ifdef CONFIG_FRAME_POINTER
9 .macro FRAME
10 pushl %ebp
11 CFI_ADJUST_CFA_OFFSET 4
12 CFI_REL_OFFSET ebp,0
13 movl %esp,%ebp
14 .endm
15 .macro ENDFRAME
16 popl %ebp
17 CFI_ADJUST_CFA_OFFSET -4
18 CFI_RESTORE ebp
19 .endm
20#else
21 .macro FRAME
22 .endm
23 .macro ENDFRAME
24 .endm
25#endif
26
27#endif /* __ASSEMBLY__ */
diff --git a/include/asm-x86/ftrace.h b/include/asm-x86/ftrace.h
deleted file mode 100644
index 1bb6f9bbe1ab..000000000000
--- a/include/asm-x86/ftrace.h
+++ /dev/null
@@ -1,24 +0,0 @@
1#ifndef ASM_X86__FTRACE_H
2#define ASM_X86__FTRACE_H
3
4#ifdef CONFIG_FTRACE
5#define MCOUNT_ADDR ((long)(mcount))
6#define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */
7
8#ifndef __ASSEMBLY__
9extern void mcount(void);
10
11static inline unsigned long ftrace_call_adjust(unsigned long addr)
12{
13 /*
14 * call mcount is "e8 <4 byte offset>"
15 * The addr points to the 4 byte offset and the caller of this
16 * function wants the pointer to e8. Simply subtract one.
17 */
18 return addr - 1;
19}
20#endif
21
22#endif /* CONFIG_FTRACE */
23
24#endif /* ASM_X86__FTRACE_H */
diff --git a/include/asm-x86/futex.h b/include/asm-x86/futex.h
deleted file mode 100644
index 06b924ef6fa5..000000000000
--- a/include/asm-x86/futex.h
+++ /dev/null
@@ -1,140 +0,0 @@
1#ifndef ASM_X86__FUTEX_H
2#define ASM_X86__FUTEX_H
3
4#ifdef __KERNEL__
5
6#include <linux/futex.h>
7#include <linux/uaccess.h>
8
9#include <asm/asm.h>
10#include <asm/errno.h>
11#include <asm/processor.h>
12#include <asm/system.h>
13
14#define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
15 asm volatile("1:\t" insn "\n" \
16 "2:\t.section .fixup,\"ax\"\n" \
17 "3:\tmov\t%3, %1\n" \
18 "\tjmp\t2b\n" \
19 "\t.previous\n" \
20 _ASM_EXTABLE(1b, 3b) \
21 : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
22 : "i" (-EFAULT), "0" (oparg), "1" (0))
23
24#define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
25 asm volatile("1:\tmovl %2, %0\n" \
26 "\tmovl\t%0, %3\n" \
27 "\t" insn "\n" \
28 "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
29 "\tjnz\t1b\n" \
30 "3:\t.section .fixup,\"ax\"\n" \
31 "4:\tmov\t%5, %1\n" \
32 "\tjmp\t3b\n" \
33 "\t.previous\n" \
34 _ASM_EXTABLE(1b, 4b) \
35 _ASM_EXTABLE(2b, 4b) \
36 : "=&a" (oldval), "=&r" (ret), \
37 "+m" (*uaddr), "=&r" (tem) \
38 : "r" (oparg), "i" (-EFAULT), "1" (0))
39
40static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
41{
42 int op = (encoded_op >> 28) & 7;
43 int cmp = (encoded_op >> 24) & 15;
44 int oparg = (encoded_op << 8) >> 20;
45 int cmparg = (encoded_op << 20) >> 20;
46 int oldval = 0, ret, tem;
47
48 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
49 oparg = 1 << oparg;
50
51 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
52 return -EFAULT;
53
54#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_BSWAP)
55 /* Real i386 machines can only support FUTEX_OP_SET */
56 if (op != FUTEX_OP_SET && boot_cpu_data.x86 == 3)
57 return -ENOSYS;
58#endif
59
60 pagefault_disable();
61
62 switch (op) {
63 case FUTEX_OP_SET:
64 __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
65 break;
66 case FUTEX_OP_ADD:
67 __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
68 uaddr, oparg);
69 break;
70 case FUTEX_OP_OR:
71 __futex_atomic_op2("orl %4, %3", ret, oldval, uaddr, oparg);
72 break;
73 case FUTEX_OP_ANDN:
74 __futex_atomic_op2("andl %4, %3", ret, oldval, uaddr, ~oparg);
75 break;
76 case FUTEX_OP_XOR:
77 __futex_atomic_op2("xorl %4, %3", ret, oldval, uaddr, oparg);
78 break;
79 default:
80 ret = -ENOSYS;
81 }
82
83 pagefault_enable();
84
85 if (!ret) {
86 switch (cmp) {
87 case FUTEX_OP_CMP_EQ:
88 ret = (oldval == cmparg);
89 break;
90 case FUTEX_OP_CMP_NE:
91 ret = (oldval != cmparg);
92 break;
93 case FUTEX_OP_CMP_LT:
94 ret = (oldval < cmparg);
95 break;
96 case FUTEX_OP_CMP_GE:
97 ret = (oldval >= cmparg);
98 break;
99 case FUTEX_OP_CMP_LE:
100 ret = (oldval <= cmparg);
101 break;
102 case FUTEX_OP_CMP_GT:
103 ret = (oldval > cmparg);
104 break;
105 default:
106 ret = -ENOSYS;
107 }
108 }
109 return ret;
110}
111
112static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
113 int newval)
114{
115
116#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_BSWAP)
117 /* Real i386 machines have no cmpxchg instruction */
118 if (boot_cpu_data.x86 == 3)
119 return -ENOSYS;
120#endif
121
122 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
123 return -EFAULT;
124
125 asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
126 "2:\t.section .fixup, \"ax\"\n"
127 "3:\tmov %2, %0\n"
128 "\tjmp 2b\n"
129 "\t.previous\n"
130 _ASM_EXTABLE(1b, 3b)
131 : "=a" (oldval), "+m" (*uaddr)
132 : "i" (-EFAULT), "r" (newval), "0" (oldval)
133 : "memory"
134 );
135
136 return oldval;
137}
138
139#endif
140#endif /* ASM_X86__FUTEX_H */
diff --git a/include/asm-x86/gart.h b/include/asm-x86/gart.h
deleted file mode 100644
index 605edb39ef9e..000000000000
--- a/include/asm-x86/gart.h
+++ /dev/null
@@ -1,73 +0,0 @@
1#ifndef ASM_X86__GART_H
2#define ASM_X86__GART_H
3
4#include <asm/e820.h>
5
6extern void set_up_gart_resume(u32, u32);
7
8extern int fallback_aper_order;
9extern int fallback_aper_force;
10extern int fix_aperture;
11
12/* PTE bits. */
13#define GPTE_VALID 1
14#define GPTE_COHERENT 2
15
16/* Aperture control register bits. */
17#define GARTEN (1<<0)
18#define DISGARTCPU (1<<4)
19#define DISGARTIO (1<<5)
20
21/* GART cache control register bits. */
22#define INVGART (1<<0)
23#define GARTPTEERR (1<<1)
24
25/* K8 On-cpu GART registers */
26#define AMD64_GARTAPERTURECTL 0x90
27#define AMD64_GARTAPERTUREBASE 0x94
28#define AMD64_GARTTABLEBASE 0x98
29#define AMD64_GARTCACHECTL 0x9c
30#define AMD64_GARTEN (1<<0)
31
32extern int agp_amd64_init(void);
33
34static inline void enable_gart_translation(struct pci_dev *dev, u64 addr)
35{
36 u32 tmp, ctl;
37
38 /* address of the mappings table */
39 addr >>= 12;
40 tmp = (u32) addr<<4;
41 tmp &= ~0xf;
42 pci_write_config_dword(dev, AMD64_GARTTABLEBASE, tmp);
43
44 /* Enable GART translation for this hammer. */
45 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl);
46 ctl |= GARTEN;
47 ctl &= ~(DISGARTCPU | DISGARTIO);
48 pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl);
49}
50
51static inline int aperture_valid(u64 aper_base, u32 aper_size, u32 min_size)
52{
53 if (!aper_base)
54 return 0;
55
56 if (aper_base + aper_size > 0x100000000ULL) {
57 printk(KERN_INFO "Aperture beyond 4GB. Ignoring.\n");
58 return 0;
59 }
60 if (e820_any_mapped(aper_base, aper_base + aper_size, E820_RAM)) {
61 printk(KERN_INFO "Aperture pointing to e820 RAM. Ignoring.\n");
62 return 0;
63 }
64 if (aper_size < min_size) {
65 printk(KERN_INFO "Aperture too small (%d MB) than (%d MB)\n",
66 aper_size>>20, min_size>>20);
67 return 0;
68 }
69
70 return 1;
71}
72
73#endif /* ASM_X86__GART_H */
diff --git a/include/asm-x86/genapic.h b/include/asm-x86/genapic.h
deleted file mode 100644
index d48bee663a6f..000000000000
--- a/include/asm-x86/genapic.h
+++ /dev/null
@@ -1,5 +0,0 @@
1#ifdef CONFIG_X86_32
2# include "genapic_32.h"
3#else
4# include "genapic_64.h"
5#endif
diff --git a/include/asm-x86/genapic_32.h b/include/asm-x86/genapic_32.h
deleted file mode 100644
index 6fe4f81bfcf9..000000000000
--- a/include/asm-x86/genapic_32.h
+++ /dev/null
@@ -1,126 +0,0 @@
1#ifndef ASM_X86__GENAPIC_32_H
2#define ASM_X86__GENAPIC_32_H
3
4#include <asm/mpspec.h>
5
6/*
7 * Generic APIC driver interface.
8 *
9 * An straight forward mapping of the APIC related parts of the
10 * x86 subarchitecture interface to a dynamic object.
11 *
12 * This is used by the "generic" x86 subarchitecture.
13 *
14 * Copyright 2003 Andi Kleen, SuSE Labs.
15 */
16
17struct mpc_config_bus;
18struct mp_config_table;
19struct mpc_config_processor;
20
21struct genapic {
22 char *name;
23 int (*probe)(void);
24
25 int (*apic_id_registered)(void);
26 cpumask_t (*target_cpus)(void);
27 int int_delivery_mode;
28 int int_dest_mode;
29 int ESR_DISABLE;
30 int apic_destination_logical;
31 unsigned long (*check_apicid_used)(physid_mask_t bitmap, int apicid);
32 unsigned long (*check_apicid_present)(int apicid);
33 int no_balance_irq;
34 int no_ioapic_check;
35 void (*init_apic_ldr)(void);
36 physid_mask_t (*ioapic_phys_id_map)(physid_mask_t map);
37
38 void (*setup_apic_routing)(void);
39 int (*multi_timer_check)(int apic, int irq);
40 int (*apicid_to_node)(int logical_apicid);
41 int (*cpu_to_logical_apicid)(int cpu);
42 int (*cpu_present_to_apicid)(int mps_cpu);
43 physid_mask_t (*apicid_to_cpu_present)(int phys_apicid);
44 void (*setup_portio_remap)(void);
45 int (*check_phys_apicid_present)(int boot_cpu_physical_apicid);
46 void (*enable_apic_mode)(void);
47 u32 (*phys_pkg_id)(u32 cpuid_apic, int index_msb);
48
49 /* mpparse */
50 /* When one of the next two hooks returns 1 the genapic
51 is switched to this. Essentially they are additional probe
52 functions. */
53 int (*mps_oem_check)(struct mp_config_table *mpc, char *oem,
54 char *productid);
55 int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id);
56
57 unsigned (*get_apic_id)(unsigned long x);
58 unsigned long apic_id_mask;
59 unsigned int (*cpu_mask_to_apicid)(cpumask_t cpumask);
60 cpumask_t (*vector_allocation_domain)(int cpu);
61
62#ifdef CONFIG_SMP
63 /* ipi */
64 void (*send_IPI_mask)(cpumask_t mask, int vector);
65 void (*send_IPI_allbutself)(int vector);
66 void (*send_IPI_all)(int vector);
67#endif
68};
69
70#define APICFUNC(x) .x = x,
71
72/* More functions could be probably marked IPIFUNC and save some space
73 in UP GENERICARCH kernels, but I don't have the nerve right now
74 to untangle this mess. -AK */
75#ifdef CONFIG_SMP
76#define IPIFUNC(x) APICFUNC(x)
77#else
78#define IPIFUNC(x)
79#endif
80
81#define APIC_INIT(aname, aprobe) \
82{ \
83 .name = aname, \
84 .probe = aprobe, \
85 .int_delivery_mode = INT_DELIVERY_MODE, \
86 .int_dest_mode = INT_DEST_MODE, \
87 .no_balance_irq = NO_BALANCE_IRQ, \
88 .ESR_DISABLE = esr_disable, \
89 .apic_destination_logical = APIC_DEST_LOGICAL, \
90 APICFUNC(apic_id_registered) \
91 APICFUNC(target_cpus) \
92 APICFUNC(check_apicid_used) \
93 APICFUNC(check_apicid_present) \
94 APICFUNC(init_apic_ldr) \
95 APICFUNC(ioapic_phys_id_map) \
96 APICFUNC(setup_apic_routing) \
97 APICFUNC(multi_timer_check) \
98 APICFUNC(apicid_to_node) \
99 APICFUNC(cpu_to_logical_apicid) \
100 APICFUNC(cpu_present_to_apicid) \
101 APICFUNC(apicid_to_cpu_present) \
102 APICFUNC(setup_portio_remap) \
103 APICFUNC(check_phys_apicid_present) \
104 APICFUNC(mps_oem_check) \
105 APICFUNC(get_apic_id) \
106 .apic_id_mask = APIC_ID_MASK, \
107 APICFUNC(cpu_mask_to_apicid) \
108 APICFUNC(vector_allocation_domain) \
109 APICFUNC(acpi_madt_oem_check) \
110 IPIFUNC(send_IPI_mask) \
111 IPIFUNC(send_IPI_allbutself) \
112 IPIFUNC(send_IPI_all) \
113 APICFUNC(enable_apic_mode) \
114 APICFUNC(phys_pkg_id) \
115}
116
117extern struct genapic *genapic;
118
119enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC};
120#define get_uv_system_type() UV_NONE
121#define is_uv_system() 0
122#define uv_wakeup_secondary(a, b) 1
123#define uv_system_init() do {} while (0)
124
125
126#endif /* ASM_X86__GENAPIC_32_H */
diff --git a/include/asm-x86/genapic_64.h b/include/asm-x86/genapic_64.h
deleted file mode 100644
index ed6a4886c082..000000000000
--- a/include/asm-x86/genapic_64.h
+++ /dev/null
@@ -1,58 +0,0 @@
1#ifndef ASM_X86__GENAPIC_64_H
2#define ASM_X86__GENAPIC_64_H
3
4/*
5 * Copyright 2004 James Cleverdon, IBM.
6 * Subject to the GNU Public License, v.2
7 *
8 * Generic APIC sub-arch data struct.
9 *
10 * Hacked for x86-64 by James Cleverdon from i386 architecture code by
11 * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
12 * James Cleverdon.
13 */
14
15struct genapic {
16 char *name;
17 int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id);
18 u32 int_delivery_mode;
19 u32 int_dest_mode;
20 int (*apic_id_registered)(void);
21 cpumask_t (*target_cpus)(void);
22 cpumask_t (*vector_allocation_domain)(int cpu);
23 void (*init_apic_ldr)(void);
24 /* ipi */
25 void (*send_IPI_mask)(cpumask_t mask, int vector);
26 void (*send_IPI_allbutself)(int vector);
27 void (*send_IPI_all)(int vector);
28 void (*send_IPI_self)(int vector);
29 /* */
30 unsigned int (*cpu_mask_to_apicid)(cpumask_t cpumask);
31 unsigned int (*phys_pkg_id)(int index_msb);
32 unsigned int (*get_apic_id)(unsigned long x);
33 unsigned long (*set_apic_id)(unsigned int id);
34 unsigned long apic_id_mask;
35};
36
37extern struct genapic *genapic;
38
39extern struct genapic apic_flat;
40extern struct genapic apic_physflat;
41extern struct genapic apic_x2apic_cluster;
42extern struct genapic apic_x2apic_phys;
43extern int acpi_madt_oem_check(char *, char *);
44
45extern void apic_send_IPI_self(int vector);
46enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC};
47extern enum uv_system_type get_uv_system_type(void);
48extern int is_uv_system(void);
49
50extern struct genapic apic_x2apic_uv_x;
51DECLARE_PER_CPU(int, x2apic_extra_bits);
52extern void uv_cpu_init(void);
53extern void uv_system_init(void);
54extern int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip);
55
56extern void setup_apic_routing(void);
57
58#endif /* ASM_X86__GENAPIC_64_H */
diff --git a/include/asm-x86/geode.h b/include/asm-x86/geode.h
deleted file mode 100644
index 3f3444be2638..000000000000
--- a/include/asm-x86/geode.h
+++ /dev/null
@@ -1,253 +0,0 @@
1/*
2 * AMD Geode definitions
3 * Copyright (C) 2006, Advanced Micro Devices, Inc.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public License
7 * as published by the Free Software Foundation.
8 */
9
10#ifndef ASM_X86__GEODE_H
11#define ASM_X86__GEODE_H
12
13#include <asm/processor.h>
14#include <linux/io.h>
15
16/* Generic southbridge functions */
17
18#define GEODE_DEV_PMS 0
19#define GEODE_DEV_ACPI 1
20#define GEODE_DEV_GPIO 2
21#define GEODE_DEV_MFGPT 3
22
23extern int geode_get_dev_base(unsigned int dev);
24
25/* Useful macros */
26#define geode_pms_base() geode_get_dev_base(GEODE_DEV_PMS)
27#define geode_acpi_base() geode_get_dev_base(GEODE_DEV_ACPI)
28#define geode_gpio_base() geode_get_dev_base(GEODE_DEV_GPIO)
29#define geode_mfgpt_base() geode_get_dev_base(GEODE_DEV_MFGPT)
30
31/* MSRS */
32
33#define MSR_GLIU_P2D_RO0 0x10000029
34
35#define MSR_LX_GLD_MSR_CONFIG 0x48002001
36#define MSR_LX_MSR_PADSEL 0x48002011 /* NOT 0x48000011; the data
37 * sheet has the wrong value */
38#define MSR_GLCP_SYS_RSTPLL 0x4C000014
39#define MSR_GLCP_DOTPLL 0x4C000015
40
41#define MSR_LBAR_SMB 0x5140000B
42#define MSR_LBAR_GPIO 0x5140000C
43#define MSR_LBAR_MFGPT 0x5140000D
44#define MSR_LBAR_ACPI 0x5140000E
45#define MSR_LBAR_PMS 0x5140000F
46
47#define MSR_DIVIL_SOFT_RESET 0x51400017
48
49#define MSR_PIC_YSEL_LOW 0x51400020
50#define MSR_PIC_YSEL_HIGH 0x51400021
51#define MSR_PIC_ZSEL_LOW 0x51400022
52#define MSR_PIC_ZSEL_HIGH 0x51400023
53#define MSR_PIC_IRQM_LPC 0x51400025
54
55#define MSR_MFGPT_IRQ 0x51400028
56#define MSR_MFGPT_NR 0x51400029
57#define MSR_MFGPT_SETUP 0x5140002B
58
59#define MSR_LX_SPARE_MSR 0x80000011 /* DC-specific */
60
61#define MSR_GX_GLD_MSR_CONFIG 0xC0002001
62#define MSR_GX_MSR_PADSEL 0xC0002011
63
64/* Resource Sizes */
65
66#define LBAR_GPIO_SIZE 0xFF
67#define LBAR_MFGPT_SIZE 0x40
68#define LBAR_ACPI_SIZE 0x40
69#define LBAR_PMS_SIZE 0x80
70
71/* ACPI registers (PMS block) */
72
73/*
74 * PM1_EN is only valid when VSA is enabled for 16 bit reads.
75 * When VSA is not enabled, *always* read both PM1_STS and PM1_EN
76 * with a 32 bit read at offset 0x0
77 */
78
79#define PM1_STS 0x00
80#define PM1_EN 0x02
81#define PM1_CNT 0x08
82#define PM2_CNT 0x0C
83#define PM_TMR 0x10
84#define PM_GPE0_STS 0x18
85#define PM_GPE0_EN 0x1C
86
87/* PMC registers (PMS block) */
88
89#define PM_SSD 0x00
90#define PM_SCXA 0x04
91#define PM_SCYA 0x08
92#define PM_OUT_SLPCTL 0x0C
93#define PM_SCLK 0x10
94#define PM_SED 0x1
95#define PM_SCXD 0x18
96#define PM_SCYD 0x1C
97#define PM_IN_SLPCTL 0x20
98#define PM_WKD 0x30
99#define PM_WKXD 0x34
100#define PM_RD 0x38
101#define PM_WKXA 0x3C
102#define PM_FSD 0x40
103#define PM_TSD 0x44
104#define PM_PSD 0x48
105#define PM_NWKD 0x4C
106#define PM_AWKD 0x50
107#define PM_SSC 0x54
108
109/* VSA2 magic values */
110
111#define VSA_VRC_INDEX 0xAC1C
112#define VSA_VRC_DATA 0xAC1E
113#define VSA_VR_UNLOCK 0xFC53 /* unlock virtual register */
114#define VSA_VR_SIGNATURE 0x0003
115#define VSA_VR_MEM_SIZE 0x0200
116#define AMD_VSA_SIG 0x4132 /* signature is ascii 'VSA2' */
117#define GSW_VSA_SIG 0x534d /* General Software signature */
118/* GPIO */
119
120#define GPIO_OUTPUT_VAL 0x00
121#define GPIO_OUTPUT_ENABLE 0x04
122#define GPIO_OUTPUT_OPEN_DRAIN 0x08
123#define GPIO_OUTPUT_INVERT 0x0C
124#define GPIO_OUTPUT_AUX1 0x10
125#define GPIO_OUTPUT_AUX2 0x14
126#define GPIO_PULL_UP 0x18
127#define GPIO_PULL_DOWN 0x1C
128#define GPIO_INPUT_ENABLE 0x20
129#define GPIO_INPUT_INVERT 0x24
130#define GPIO_INPUT_FILTER 0x28
131#define GPIO_INPUT_EVENT_COUNT 0x2C
132#define GPIO_READ_BACK 0x30
133#define GPIO_INPUT_AUX1 0x34
134#define GPIO_EVENTS_ENABLE 0x38
135#define GPIO_LOCK_ENABLE 0x3C
136#define GPIO_POSITIVE_EDGE_EN 0x40
137#define GPIO_NEGATIVE_EDGE_EN 0x44
138#define GPIO_POSITIVE_EDGE_STS 0x48
139#define GPIO_NEGATIVE_EDGE_STS 0x4C
140
141#define GPIO_MAP_X 0xE0
142#define GPIO_MAP_Y 0xE4
143#define GPIO_MAP_Z 0xE8
144#define GPIO_MAP_W 0xEC
145
146static inline u32 geode_gpio(unsigned int nr)
147{
148 BUG_ON(nr > 28);
149 return 1 << nr;
150}
151
152extern void geode_gpio_set(u32, unsigned int);
153extern void geode_gpio_clear(u32, unsigned int);
154extern int geode_gpio_isset(u32, unsigned int);
155extern void geode_gpio_setup_event(unsigned int, int, int);
156extern void geode_gpio_set_irq(unsigned int, unsigned int);
157
158static inline void geode_gpio_event_irq(unsigned int gpio, int pair)
159{
160 geode_gpio_setup_event(gpio, pair, 0);
161}
162
163static inline void geode_gpio_event_pme(unsigned int gpio, int pair)
164{
165 geode_gpio_setup_event(gpio, pair, 1);
166}
167
168/* Specific geode tests */
169
170static inline int is_geode_gx(void)
171{
172 return ((boot_cpu_data.x86_vendor == X86_VENDOR_NSC) &&
173 (boot_cpu_data.x86 == 5) &&
174 (boot_cpu_data.x86_model == 5));
175}
176
177static inline int is_geode_lx(void)
178{
179 return ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) &&
180 (boot_cpu_data.x86 == 5) &&
181 (boot_cpu_data.x86_model == 10));
182}
183
184static inline int is_geode(void)
185{
186 return (is_geode_gx() || is_geode_lx());
187}
188
189#ifdef CONFIG_MGEODE_LX
190extern int geode_has_vsa2(void);
191#else
192static inline int geode_has_vsa2(void)
193{
194 return 0;
195}
196#endif
197
198/* MFGPTs */
199
200#define MFGPT_MAX_TIMERS 8
201#define MFGPT_TIMER_ANY (-1)
202
203#define MFGPT_DOMAIN_WORKING 1
204#define MFGPT_DOMAIN_STANDBY 2
205#define MFGPT_DOMAIN_ANY (MFGPT_DOMAIN_WORKING | MFGPT_DOMAIN_STANDBY)
206
207#define MFGPT_CMP1 0
208#define MFGPT_CMP2 1
209
210#define MFGPT_EVENT_IRQ 0
211#define MFGPT_EVENT_NMI 1
212#define MFGPT_EVENT_RESET 3
213
214#define MFGPT_REG_CMP1 0
215#define MFGPT_REG_CMP2 2
216#define MFGPT_REG_COUNTER 4
217#define MFGPT_REG_SETUP 6
218
219#define MFGPT_SETUP_CNTEN (1 << 15)
220#define MFGPT_SETUP_CMP2 (1 << 14)
221#define MFGPT_SETUP_CMP1 (1 << 13)
222#define MFGPT_SETUP_SETUP (1 << 12)
223#define MFGPT_SETUP_STOPEN (1 << 11)
224#define MFGPT_SETUP_EXTEN (1 << 10)
225#define MFGPT_SETUP_REVEN (1 << 5)
226#define MFGPT_SETUP_CLKSEL (1 << 4)
227
228static inline void geode_mfgpt_write(int timer, u16 reg, u16 value)
229{
230 u32 base = geode_get_dev_base(GEODE_DEV_MFGPT);
231 outw(value, base + reg + (timer * 8));
232}
233
234static inline u16 geode_mfgpt_read(int timer, u16 reg)
235{
236 u32 base = geode_get_dev_base(GEODE_DEV_MFGPT);
237 return inw(base + reg + (timer * 8));
238}
239
240extern int geode_mfgpt_toggle_event(int timer, int cmp, int event, int enable);
241extern int geode_mfgpt_set_irq(int timer, int cmp, int *irq, int enable);
242extern int geode_mfgpt_alloc_timer(int timer, int domain);
243
244#define geode_mfgpt_setup_irq(t, c, i) geode_mfgpt_set_irq((t), (c), (i), 1)
245#define geode_mfgpt_release_irq(t, c, i) geode_mfgpt_set_irq((t), (c), (i), 0)
246
247#ifdef CONFIG_GEODE_MFGPT_TIMER
248extern int __init mfgpt_timer_setup(void);
249#else
250static inline int mfgpt_timer_setup(void) { return 0; }
251#endif
252
253#endif /* ASM_X86__GEODE_H */
diff --git a/include/asm-x86/gpio.h b/include/asm-x86/gpio.h
deleted file mode 100644
index 497fb980d962..000000000000
--- a/include/asm-x86/gpio.h
+++ /dev/null
@@ -1,56 +0,0 @@
1/*
2 * Generic GPIO API implementation for x86.
3 *
4 * Derived from the generic GPIO API for powerpc:
5 *
6 * Copyright (c) 2007-2008 MontaVista Software, Inc.
7 *
8 * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 */
15
16#ifndef _ASM_I386_GPIO_H
17#define _ASM_I386_GPIO_H
18
19#include <asm-generic/gpio.h>
20
21#ifdef CONFIG_GPIOLIB
22
23/*
24 * Just call gpiolib.
25 */
26static inline int gpio_get_value(unsigned int gpio)
27{
28 return __gpio_get_value(gpio);
29}
30
31static inline void gpio_set_value(unsigned int gpio, int value)
32{
33 __gpio_set_value(gpio, value);
34}
35
36static inline int gpio_cansleep(unsigned int gpio)
37{
38 return __gpio_cansleep(gpio);
39}
40
41/*
42 * Not implemented, yet.
43 */
44static inline int gpio_to_irq(unsigned int gpio)
45{
46 return -ENOSYS;
47}
48
49static inline int irq_to_gpio(unsigned int irq)
50{
51 return -EINVAL;
52}
53
54#endif /* CONFIG_GPIOLIB */
55
56#endif /* ASM_X86__GPIO_H */
diff --git a/include/asm-x86/hardirq.h b/include/asm-x86/hardirq.h
deleted file mode 100644
index 000787df66e6..000000000000
--- a/include/asm-x86/hardirq.h
+++ /dev/null
@@ -1,11 +0,0 @@
1#ifdef CONFIG_X86_32
2# include "hardirq_32.h"
3#else
4# include "hardirq_64.h"
5#endif
6
7extern u64 arch_irq_stat_cpu(unsigned int cpu);
8#define arch_irq_stat_cpu arch_irq_stat_cpu
9
10extern u64 arch_irq_stat(void);
11#define arch_irq_stat arch_irq_stat
diff --git a/include/asm-x86/hardirq_32.h b/include/asm-x86/hardirq_32.h
deleted file mode 100644
index 700fe230d919..000000000000
--- a/include/asm-x86/hardirq_32.h
+++ /dev/null
@@ -1,28 +0,0 @@
1#ifndef ASM_X86__HARDIRQ_32_H
2#define ASM_X86__HARDIRQ_32_H
3
4#include <linux/threads.h>
5#include <linux/irq.h>
6
7typedef struct {
8 unsigned int __softirq_pending;
9 unsigned long idle_timestamp;
10 unsigned int __nmi_count; /* arch dependent */
11 unsigned int apic_timer_irqs; /* arch dependent */
12 unsigned int irq0_irqs;
13 unsigned int irq_resched_count;
14 unsigned int irq_call_count;
15 unsigned int irq_tlb_count;
16 unsigned int irq_thermal_count;
17 unsigned int irq_spurious_count;
18} ____cacheline_aligned irq_cpustat_t;
19
20DECLARE_PER_CPU(irq_cpustat_t, irq_stat);
21
22#define __ARCH_IRQ_STAT
23#define __IRQ_STAT(cpu, member) (per_cpu(irq_stat, cpu).member)
24
25void ack_bad_irq(unsigned int irq);
26#include <linux/irq_cpustat.h>
27
28#endif /* ASM_X86__HARDIRQ_32_H */
diff --git a/include/asm-x86/hardirq_64.h b/include/asm-x86/hardirq_64.h
deleted file mode 100644
index f8bd2919a8ce..000000000000
--- a/include/asm-x86/hardirq_64.h
+++ /dev/null
@@ -1,23 +0,0 @@
1#ifndef ASM_X86__HARDIRQ_64_H
2#define ASM_X86__HARDIRQ_64_H
3
4#include <linux/threads.h>
5#include <linux/irq.h>
6#include <asm/pda.h>
7#include <asm/apic.h>
8
9/* We can have at most NR_VECTORS irqs routed to a cpu at a time */
10#define MAX_HARDIRQS_PER_CPU NR_VECTORS
11
12#define __ARCH_IRQ_STAT 1
13
14#define local_softirq_pending() read_pda(__softirq_pending)
15
16#define __ARCH_SET_SOFTIRQ_PENDING 1
17
18#define set_softirq_pending(x) write_pda(__softirq_pending, (x))
19#define or_softirq_pending(x) or_pda(__softirq_pending, (x))
20
21extern void ack_bad_irq(unsigned int irq);
22
23#endif /* ASM_X86__HARDIRQ_64_H */
diff --git a/include/asm-x86/highmem.h b/include/asm-x86/highmem.h
deleted file mode 100644
index bc3f6a280316..000000000000
--- a/include/asm-x86/highmem.h
+++ /dev/null
@@ -1,82 +0,0 @@
1/*
2 * highmem.h: virtual kernel memory mappings for high memory
3 *
4 * Used in CONFIG_HIGHMEM systems for memory pages which
5 * are not addressable by direct kernel virtual addresses.
6 *
7 * Copyright (C) 1999 Gerhard Wichert, Siemens AG
8 * Gerhard.Wichert@pdb.siemens.de
9 *
10 *
11 * Redesigned the x86 32-bit VM architecture to deal with
12 * up to 16 Terabyte physical memory. With current x86 CPUs
13 * we now support up to 64 Gigabytes physical RAM.
14 *
15 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
16 */
17
18#ifndef ASM_X86__HIGHMEM_H
19#define ASM_X86__HIGHMEM_H
20
21#ifdef __KERNEL__
22
23#include <linux/interrupt.h>
24#include <linux/threads.h>
25#include <asm/kmap_types.h>
26#include <asm/tlbflush.h>
27#include <asm/paravirt.h>
28
29/* declarations for highmem.c */
30extern unsigned long highstart_pfn, highend_pfn;
31
32extern pte_t *kmap_pte;
33extern pgprot_t kmap_prot;
34extern pte_t *pkmap_page_table;
35
36/*
37 * Right now we initialize only a single pte table. It can be extended
38 * easily, subsequent pte tables have to be allocated in one physical
39 * chunk of RAM.
40 */
41/*
42 * Ordering is:
43 *
44 * FIXADDR_TOP
45 * fixed_addresses
46 * FIXADDR_START
47 * temp fixed addresses
48 * FIXADDR_BOOT_START
49 * Persistent kmap area
50 * PKMAP_BASE
51 * VMALLOC_END
52 * Vmalloc area
53 * VMALLOC_START
54 * high_memory
55 */
56#define LAST_PKMAP_MASK (LAST_PKMAP-1)
57#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
58#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
59
60extern void *kmap_high(struct page *page);
61extern void kunmap_high(struct page *page);
62
63void *kmap(struct page *page);
64void kunmap(struct page *page);
65void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot);
66void *kmap_atomic(struct page *page, enum km_type type);
67void kunmap_atomic(void *kvaddr, enum km_type type);
68void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
69struct page *kmap_atomic_to_page(void *ptr);
70
71#ifndef CONFIG_PARAVIRT
72#define kmap_atomic_pte(page, type) kmap_atomic(page, type)
73#endif
74
75#define flush_cache_kmaps() do { } while (0)
76
77extern void add_highpages_with_active_regions(int nid, unsigned long start_pfn,
78 unsigned long end_pfn);
79
80#endif /* __KERNEL__ */
81
82#endif /* ASM_X86__HIGHMEM_H */
diff --git a/include/asm-x86/hpet.h b/include/asm-x86/hpet.h
deleted file mode 100644
index 58b273f6ef07..000000000000
--- a/include/asm-x86/hpet.h
+++ /dev/null
@@ -1,114 +0,0 @@
1#ifndef ASM_X86__HPET_H
2#define ASM_X86__HPET_H
3
4#include <linux/msi.h>
5
6#ifdef CONFIG_HPET_TIMER
7
8#define HPET_MMAP_SIZE 1024
9
10#define HPET_ID 0x000
11#define HPET_PERIOD 0x004
12#define HPET_CFG 0x010
13#define HPET_STATUS 0x020
14#define HPET_COUNTER 0x0f0
15
16#define HPET_Tn_CFG(n) (0x100 + 0x20 * n)
17#define HPET_Tn_CMP(n) (0x108 + 0x20 * n)
18#define HPET_Tn_ROUTE(n) (0x110 + 0x20 * n)
19
20#define HPET_T0_CFG 0x100
21#define HPET_T0_CMP 0x108
22#define HPET_T0_ROUTE 0x110
23#define HPET_T1_CFG 0x120
24#define HPET_T1_CMP 0x128
25#define HPET_T1_ROUTE 0x130
26#define HPET_T2_CFG 0x140
27#define HPET_T2_CMP 0x148
28#define HPET_T2_ROUTE 0x150
29
30#define HPET_ID_REV 0x000000ff
31#define HPET_ID_NUMBER 0x00001f00
32#define HPET_ID_64BIT 0x00002000
33#define HPET_ID_LEGSUP 0x00008000
34#define HPET_ID_VENDOR 0xffff0000
35#define HPET_ID_NUMBER_SHIFT 8
36#define HPET_ID_VENDOR_SHIFT 16
37
38#define HPET_ID_VENDOR_8086 0x8086
39
40#define HPET_CFG_ENABLE 0x001
41#define HPET_CFG_LEGACY 0x002
42#define HPET_LEGACY_8254 2
43#define HPET_LEGACY_RTC 8
44
45#define HPET_TN_LEVEL 0x0002
46#define HPET_TN_ENABLE 0x0004
47#define HPET_TN_PERIODIC 0x0008
48#define HPET_TN_PERIODIC_CAP 0x0010
49#define HPET_TN_64BIT_CAP 0x0020
50#define HPET_TN_SETVAL 0x0040
51#define HPET_TN_32BIT 0x0100
52#define HPET_TN_ROUTE 0x3e00
53#define HPET_TN_FSB 0x4000
54#define HPET_TN_FSB_CAP 0x8000
55#define HPET_TN_ROUTE_SHIFT 9
56
57/* Max HPET Period is 10^8 femto sec as in HPET spec */
58#define HPET_MAX_PERIOD 100000000UL
59/*
60 * Min HPET period is 10^5 femto sec just for safety. If it is less than this,
61 * then 32 bit HPET counter wrapsaround in less than 0.5 sec.
62 */
63#define HPET_MIN_PERIOD 100000UL
64
65/* hpet memory map physical address */
66extern unsigned long hpet_address;
67extern unsigned long force_hpet_address;
68extern int hpet_force_user;
69extern int is_hpet_enabled(void);
70extern int hpet_enable(void);
71extern void hpet_disable(void);
72extern unsigned long hpet_readl(unsigned long a);
73extern void force_hpet_resume(void);
74
75extern void hpet_msi_unmask(unsigned int irq);
76extern void hpet_msi_mask(unsigned int irq);
77extern void hpet_msi_write(unsigned int irq, struct msi_msg *msg);
78extern void hpet_msi_read(unsigned int irq, struct msi_msg *msg);
79
80#ifdef CONFIG_PCI_MSI
81extern int arch_setup_hpet_msi(unsigned int irq);
82#else
83static inline int arch_setup_hpet_msi(unsigned int irq)
84{
85 return -EINVAL;
86}
87#endif
88
89#ifdef CONFIG_HPET_EMULATE_RTC
90
91#include <linux/interrupt.h>
92
93typedef irqreturn_t (*rtc_irq_handler)(int interrupt, void *cookie);
94extern int hpet_mask_rtc_irq_bit(unsigned long bit_mask);
95extern int hpet_set_rtc_irq_bit(unsigned long bit_mask);
96extern int hpet_set_alarm_time(unsigned char hrs, unsigned char min,
97 unsigned char sec);
98extern int hpet_set_periodic_freq(unsigned long freq);
99extern int hpet_rtc_dropped_irq(void);
100extern int hpet_rtc_timer_init(void);
101extern irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id);
102extern int hpet_register_irq_handler(rtc_irq_handler handler);
103extern void hpet_unregister_irq_handler(rtc_irq_handler handler);
104
105#endif /* CONFIG_HPET_EMULATE_RTC */
106
107#else /* CONFIG_HPET_TIMER */
108
109static inline int hpet_enable(void) { return 0; }
110static inline int is_hpet_enabled(void) { return 0; }
111#define hpet_readl(a) 0
112
113#endif
114#endif /* ASM_X86__HPET_H */
diff --git a/include/asm-x86/hugetlb.h b/include/asm-x86/hugetlb.h
deleted file mode 100644
index 0b7ec5dc0884..000000000000
--- a/include/asm-x86/hugetlb.h
+++ /dev/null
@@ -1,93 +0,0 @@
1#ifndef ASM_X86__HUGETLB_H
2#define ASM_X86__HUGETLB_H
3
4#include <asm/page.h>
5
6
7static inline int is_hugepage_only_range(struct mm_struct *mm,
8 unsigned long addr,
9 unsigned long len) {
10 return 0;
11}
12
13/*
14 * If the arch doesn't supply something else, assume that hugepage
15 * size aligned regions are ok without further preparation.
16 */
17static inline int prepare_hugepage_range(struct file *file,
18 unsigned long addr, unsigned long len)
19{
20 struct hstate *h = hstate_file(file);
21 if (len & ~huge_page_mask(h))
22 return -EINVAL;
23 if (addr & ~huge_page_mask(h))
24 return -EINVAL;
25 return 0;
26}
27
28static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) {
29}
30
31static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
32 unsigned long addr, unsigned long end,
33 unsigned long floor,
34 unsigned long ceiling)
35{
36 free_pgd_range(tlb, addr, end, floor, ceiling);
37}
38
39static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
40 pte_t *ptep, pte_t pte)
41{
42 set_pte_at(mm, addr, ptep, pte);
43}
44
45static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
46 unsigned long addr, pte_t *ptep)
47{
48 return ptep_get_and_clear(mm, addr, ptep);
49}
50
51static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
52 unsigned long addr, pte_t *ptep)
53{
54}
55
56static inline int huge_pte_none(pte_t pte)
57{
58 return pte_none(pte);
59}
60
61static inline pte_t huge_pte_wrprotect(pte_t pte)
62{
63 return pte_wrprotect(pte);
64}
65
66static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
67 unsigned long addr, pte_t *ptep)
68{
69 ptep_set_wrprotect(mm, addr, ptep);
70}
71
72static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
73 unsigned long addr, pte_t *ptep,
74 pte_t pte, int dirty)
75{
76 return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
77}
78
79static inline pte_t huge_ptep_get(pte_t *ptep)
80{
81 return *ptep;
82}
83
84static inline int arch_prepare_hugepage(struct page *page)
85{
86 return 0;
87}
88
89static inline void arch_release_hugepage(struct page *page)
90{
91}
92
93#endif /* ASM_X86__HUGETLB_H */
diff --git a/include/asm-x86/hw_irq.h b/include/asm-x86/hw_irq.h
deleted file mode 100644
index 749d042f0556..000000000000
--- a/include/asm-x86/hw_irq.h
+++ /dev/null
@@ -1,131 +0,0 @@
1#ifndef ASM_X86__HW_IRQ_H
2#define ASM_X86__HW_IRQ_H
3
4/*
5 * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
6 *
7 * moved some of the old arch/i386/kernel/irq.h to here. VY
8 *
9 * IRQ/IPI changes taken from work by Thomas Radke
10 * <tomsoft@informatik.tu-chemnitz.de>
11 *
12 * hacked by Andi Kleen for x86-64.
13 * unified by tglx
14 */
15
16#include <asm/irq_vectors.h>
17
18#ifndef __ASSEMBLY__
19
20#include <linux/percpu.h>
21#include <linux/profile.h>
22#include <linux/smp.h>
23
24#include <asm/atomic.h>
25#include <asm/irq.h>
26#include <asm/sections.h>
27
28#define platform_legacy_irq(irq) ((irq) < 16)
29
30/* Interrupt handlers registered during init_IRQ */
31extern void apic_timer_interrupt(void);
32extern void error_interrupt(void);
33extern void spurious_interrupt(void);
34extern void thermal_interrupt(void);
35extern void reschedule_interrupt(void);
36
37extern void invalidate_interrupt(void);
38extern void invalidate_interrupt0(void);
39extern void invalidate_interrupt1(void);
40extern void invalidate_interrupt2(void);
41extern void invalidate_interrupt3(void);
42extern void invalidate_interrupt4(void);
43extern void invalidate_interrupt5(void);
44extern void invalidate_interrupt6(void);
45extern void invalidate_interrupt7(void);
46
47extern void irq_move_cleanup_interrupt(void);
48extern void threshold_interrupt(void);
49
50extern void call_function_interrupt(void);
51extern void call_function_single_interrupt(void);
52
53/* PIC specific functions */
54extern void disable_8259A_irq(unsigned int irq);
55extern void enable_8259A_irq(unsigned int irq);
56extern int i8259A_irq_pending(unsigned int irq);
57extern void make_8259A_irq(unsigned int irq);
58extern void init_8259A(int aeoi);
59
60/* IOAPIC */
61#define IO_APIC_IRQ(x) (((x) >= 16) || ((1<<(x)) & io_apic_irqs))
62extern unsigned long io_apic_irqs;
63
64extern void init_VISWS_APIC_irqs(void);
65extern void setup_IO_APIC(void);
66extern void disable_IO_APIC(void);
67extern int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn);
68extern void setup_ioapic_dest(void);
69
70#ifdef CONFIG_X86_64
71extern void enable_IO_APIC(void);
72#endif
73
74/* IPI functions */
75#ifdef CONFIG_X86_32
76extern void send_IPI_self(int vector);
77#endif
78extern void send_IPI(int dest, int vector);
79
80/* Statistics */
81extern atomic_t irq_err_count;
82extern atomic_t irq_mis_count;
83
84/* EISA */
85extern void eisa_set_level_irq(unsigned int irq);
86
87/* Voyager functions */
88extern asmlinkage void vic_cpi_interrupt(void);
89extern asmlinkage void vic_sys_interrupt(void);
90extern asmlinkage void vic_cmn_interrupt(void);
91extern asmlinkage void qic_timer_interrupt(void);
92extern asmlinkage void qic_invalidate_interrupt(void);
93extern asmlinkage void qic_reschedule_interrupt(void);
94extern asmlinkage void qic_enable_irq_interrupt(void);
95extern asmlinkage void qic_call_function_interrupt(void);
96
97/* SMP */
98extern void smp_apic_timer_interrupt(struct pt_regs *);
99extern void smp_spurious_interrupt(struct pt_regs *);
100extern void smp_error_interrupt(struct pt_regs *);
101#ifdef CONFIG_X86_SMP
102extern void smp_reschedule_interrupt(struct pt_regs *);
103extern void smp_call_function_interrupt(struct pt_regs *);
104extern void smp_call_function_single_interrupt(struct pt_regs *);
105#ifdef CONFIG_X86_32
106extern void smp_invalidate_interrupt(struct pt_regs *);
107#else
108extern asmlinkage void smp_invalidate_interrupt(struct pt_regs *);
109#endif
110#endif
111
112#ifdef CONFIG_X86_32
113extern void (*const interrupt[NR_VECTORS])(void);
114#endif
115
116typedef int vector_irq_t[NR_VECTORS];
117DECLARE_PER_CPU(vector_irq_t, vector_irq);
118
119#ifdef CONFIG_X86_IO_APIC
120extern void lock_vector_lock(void);
121extern void unlock_vector_lock(void);
122extern void __setup_vector_irq(int cpu);
123#else
124static inline void lock_vector_lock(void) {}
125static inline void unlock_vector_lock(void) {}
126static inline void __setup_vector_irq(int cpu) {}
127#endif
128
129#endif /* !ASSEMBLY_ */
130
131#endif /* ASM_X86__HW_IRQ_H */
diff --git a/include/asm-x86/hypertransport.h b/include/asm-x86/hypertransport.h
deleted file mode 100644
index cc011a3bc1c2..000000000000
--- a/include/asm-x86/hypertransport.h
+++ /dev/null
@@ -1,45 +0,0 @@
1#ifndef ASM_X86__HYPERTRANSPORT_H
2#define ASM_X86__HYPERTRANSPORT_H
3
4/*
5 * Constants for x86 Hypertransport Interrupts.
6 */
7
8#define HT_IRQ_LOW_BASE 0xf8000000
9
10#define HT_IRQ_LOW_VECTOR_SHIFT 16
11#define HT_IRQ_LOW_VECTOR_MASK 0x00ff0000
12#define HT_IRQ_LOW_VECTOR(v) \
13 (((v) << HT_IRQ_LOW_VECTOR_SHIFT) & HT_IRQ_LOW_VECTOR_MASK)
14
15#define HT_IRQ_LOW_DEST_ID_SHIFT 8
16#define HT_IRQ_LOW_DEST_ID_MASK 0x0000ff00
17#define HT_IRQ_LOW_DEST_ID(v) \
18 (((v) << HT_IRQ_LOW_DEST_ID_SHIFT) & HT_IRQ_LOW_DEST_ID_MASK)
19
20#define HT_IRQ_LOW_DM_PHYSICAL 0x0000000
21#define HT_IRQ_LOW_DM_LOGICAL 0x0000040
22
23#define HT_IRQ_LOW_RQEOI_EDGE 0x0000000
24#define HT_IRQ_LOW_RQEOI_LEVEL 0x0000020
25
26
27#define HT_IRQ_LOW_MT_FIXED 0x0000000
28#define HT_IRQ_LOW_MT_ARBITRATED 0x0000004
29#define HT_IRQ_LOW_MT_SMI 0x0000008
30#define HT_IRQ_LOW_MT_NMI 0x000000c
31#define HT_IRQ_LOW_MT_INIT 0x0000010
32#define HT_IRQ_LOW_MT_STARTUP 0x0000014
33#define HT_IRQ_LOW_MT_EXTINT 0x0000018
34#define HT_IRQ_LOW_MT_LINT1 0x000008c
35#define HT_IRQ_LOW_MT_LINT0 0x0000098
36
37#define HT_IRQ_LOW_IRQ_MASKED 0x0000001
38
39
40#define HT_IRQ_HIGH_DEST_ID_SHIFT 0
41#define HT_IRQ_HIGH_DEST_ID_MASK 0x00ffffff
42#define HT_IRQ_HIGH_DEST_ID(v) \
43 ((((v) >> 8) << HT_IRQ_HIGH_DEST_ID_SHIFT) & HT_IRQ_HIGH_DEST_ID_MASK)
44
45#endif /* ASM_X86__HYPERTRANSPORT_H */
diff --git a/include/asm-x86/i387.h b/include/asm-x86/i387.h
deleted file mode 100644
index 9ba862a4eac0..000000000000
--- a/include/asm-x86/i387.h
+++ /dev/null
@@ -1,400 +0,0 @@
1/*
2 * Copyright (C) 1994 Linus Torvalds
3 *
4 * Pentium III FXSR, SSE support
5 * General FPU state handling cleanups
6 * Gareth Hughes <gareth@valinux.com>, May 2000
7 * x86-64 work by Andi Kleen 2002
8 */
9
10#ifndef ASM_X86__I387_H
11#define ASM_X86__I387_H
12
13#include <linux/sched.h>
14#include <linux/kernel_stat.h>
15#include <linux/regset.h>
16#include <linux/hardirq.h>
17#include <asm/asm.h>
18#include <asm/processor.h>
19#include <asm/sigcontext.h>
20#include <asm/user.h>
21#include <asm/uaccess.h>
22#include <asm/xsave.h>
23
24extern unsigned int sig_xstate_size;
25extern void fpu_init(void);
26extern void mxcsr_feature_mask_init(void);
27extern int init_fpu(struct task_struct *child);
28extern asmlinkage void math_state_restore(void);
29extern void init_thread_xstate(void);
30extern int dump_fpu(struct pt_regs *, struct user_i387_struct *);
31
32extern user_regset_active_fn fpregs_active, xfpregs_active;
33extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get;
34extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set;
35
36extern struct _fpx_sw_bytes fx_sw_reserved;
37#ifdef CONFIG_IA32_EMULATION
38extern unsigned int sig_xstate_ia32_size;
39extern struct _fpx_sw_bytes fx_sw_reserved_ia32;
40struct _fpstate_ia32;
41struct _xstate_ia32;
42extern int save_i387_xstate_ia32(void __user *buf);
43extern int restore_i387_xstate_ia32(void __user *buf);
44#endif
45
46#define X87_FSW_ES (1 << 7) /* Exception Summary */
47
48#ifdef CONFIG_X86_64
49
50/* Ignore delayed exceptions from user space */
51static inline void tolerant_fwait(void)
52{
53 asm volatile("1: fwait\n"
54 "2:\n"
55 _ASM_EXTABLE(1b, 2b));
56}
57
58static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
59{
60 int err;
61
62 asm volatile("1: rex64/fxrstor (%[fx])\n\t"
63 "2:\n"
64 ".section .fixup,\"ax\"\n"
65 "3: movl $-1,%[err]\n"
66 " jmp 2b\n"
67 ".previous\n"
68 _ASM_EXTABLE(1b, 3b)
69 : [err] "=r" (err)
70#if 0 /* See comment in __save_init_fpu() below. */
71 : [fx] "r" (fx), "m" (*fx), "0" (0));
72#else
73 : [fx] "cdaSDb" (fx), "m" (*fx), "0" (0));
74#endif
75 return err;
76}
77
78static inline int restore_fpu_checking(struct task_struct *tsk)
79{
80 if (task_thread_info(tsk)->status & TS_XSAVE)
81 return xrstor_checking(&tsk->thread.xstate->xsave);
82 else
83 return fxrstor_checking(&tsk->thread.xstate->fxsave);
84}
85
86/* AMD CPUs don't save/restore FDP/FIP/FOP unless an exception
87 is pending. Clear the x87 state here by setting it to fixed
88 values. The kernel data segment can be sometimes 0 and sometimes
89 new user value. Both should be ok.
90 Use the PDA as safe address because it should be already in L1. */
91static inline void clear_fpu_state(struct task_struct *tsk)
92{
93 struct xsave_struct *xstate = &tsk->thread.xstate->xsave;
94 struct i387_fxsave_struct *fx = &tsk->thread.xstate->fxsave;
95
96 /*
97 * xsave header may indicate the init state of the FP.
98 */
99 if ((task_thread_info(tsk)->status & TS_XSAVE) &&
100 !(xstate->xsave_hdr.xstate_bv & XSTATE_FP))
101 return;
102
103 if (unlikely(fx->swd & X87_FSW_ES))
104 asm volatile("fnclex");
105 alternative_input(ASM_NOP8 ASM_NOP2,
106 " emms\n" /* clear stack tags */
107 " fildl %%gs:0", /* load to clear state */
108 X86_FEATURE_FXSAVE_LEAK);
109}
110
111static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
112{
113 int err;
114
115 asm volatile("1: rex64/fxsave (%[fx])\n\t"
116 "2:\n"
117 ".section .fixup,\"ax\"\n"
118 "3: movl $-1,%[err]\n"
119 " jmp 2b\n"
120 ".previous\n"
121 _ASM_EXTABLE(1b, 3b)
122 : [err] "=r" (err), "=m" (*fx)
123#if 0 /* See comment in __fxsave_clear() below. */
124 : [fx] "r" (fx), "0" (0));
125#else
126 : [fx] "cdaSDb" (fx), "0" (0));
127#endif
128 if (unlikely(err) &&
129 __clear_user(fx, sizeof(struct i387_fxsave_struct)))
130 err = -EFAULT;
131 /* No need to clear here because the caller clears USED_MATH */
132 return err;
133}
134
135static inline void fxsave(struct task_struct *tsk)
136{
137 /* Using "rex64; fxsave %0" is broken because, if the memory operand
138 uses any extended registers for addressing, a second REX prefix
139 will be generated (to the assembler, rex64 followed by semicolon
140 is a separate instruction), and hence the 64-bitness is lost. */
141#if 0
142 /* Using "fxsaveq %0" would be the ideal choice, but is only supported
143 starting with gas 2.16. */
144 __asm__ __volatile__("fxsaveq %0"
145 : "=m" (tsk->thread.xstate->fxsave));
146#elif 0
147 /* Using, as a workaround, the properly prefixed form below isn't
148 accepted by any binutils version so far released, complaining that
149 the same type of prefix is used twice if an extended register is
150 needed for addressing (fix submitted to mainline 2005-11-21). */
151 __asm__ __volatile__("rex64/fxsave %0"
152 : "=m" (tsk->thread.xstate->fxsave));
153#else
154 /* This, however, we can work around by forcing the compiler to select
155 an addressing mode that doesn't require extended registers. */
156 __asm__ __volatile__("rex64/fxsave (%1)"
157 : "=m" (tsk->thread.xstate->fxsave)
158 : "cdaSDb" (&tsk->thread.xstate->fxsave));
159#endif
160}
161
162static inline void __save_init_fpu(struct task_struct *tsk)
163{
164 if (task_thread_info(tsk)->status & TS_XSAVE)
165 xsave(tsk);
166 else
167 fxsave(tsk);
168
169 clear_fpu_state(tsk);
170 task_thread_info(tsk)->status &= ~TS_USEDFPU;
171}
172
173#else /* CONFIG_X86_32 */
174
175extern void finit(void);
176
177static inline void tolerant_fwait(void)
178{
179 asm volatile("fnclex ; fwait");
180}
181
182static inline void restore_fpu(struct task_struct *tsk)
183{
184 if (task_thread_info(tsk)->status & TS_XSAVE) {
185 xrstor_checking(&tsk->thread.xstate->xsave);
186 return;
187 }
188 /*
189 * The "nop" is needed to make the instructions the same
190 * length.
191 */
192 alternative_input(
193 "nop ; frstor %1",
194 "fxrstor %1",
195 X86_FEATURE_FXSR,
196 "m" (tsk->thread.xstate->fxsave));
197}
198
199/* We need a safe address that is cheap to find and that is already
200 in L1 during context switch. The best choices are unfortunately
201 different for UP and SMP */
202#ifdef CONFIG_SMP
203#define safe_address (__per_cpu_offset[0])
204#else
205#define safe_address (kstat_cpu(0).cpustat.user)
206#endif
207
208/*
209 * These must be called with preempt disabled
210 */
211static inline void __save_init_fpu(struct task_struct *tsk)
212{
213 if (task_thread_info(tsk)->status & TS_XSAVE) {
214 struct xsave_struct *xstate = &tsk->thread.xstate->xsave;
215 struct i387_fxsave_struct *fx = &tsk->thread.xstate->fxsave;
216
217 xsave(tsk);
218
219 /*
220 * xsave header may indicate the init state of the FP.
221 */
222 if (!(xstate->xsave_hdr.xstate_bv & XSTATE_FP))
223 goto end;
224
225 if (unlikely(fx->swd & X87_FSW_ES))
226 asm volatile("fnclex");
227
228 /*
229 * we can do a simple return here or be paranoid :)
230 */
231 goto clear_state;
232 }
233
234 /* Use more nops than strictly needed in case the compiler
235 varies code */
236 alternative_input(
237 "fnsave %[fx] ;fwait;" GENERIC_NOP8 GENERIC_NOP4,
238 "fxsave %[fx]\n"
239 "bt $7,%[fsw] ; jnc 1f ; fnclex\n1:",
240 X86_FEATURE_FXSR,
241 [fx] "m" (tsk->thread.xstate->fxsave),
242 [fsw] "m" (tsk->thread.xstate->fxsave.swd) : "memory");
243clear_state:
244 /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
245 is pending. Clear the x87 state here by setting it to fixed
246 values. safe_address is a random variable that should be in L1 */
247 alternative_input(
248 GENERIC_NOP8 GENERIC_NOP2,
249 "emms\n\t" /* clear stack tags */
250 "fildl %[addr]", /* set F?P to defined value */
251 X86_FEATURE_FXSAVE_LEAK,
252 [addr] "m" (safe_address));
253end:
254 task_thread_info(tsk)->status &= ~TS_USEDFPU;
255}
256
257#endif /* CONFIG_X86_64 */
258
259/*
260 * Signal frame handlers...
261 */
262extern int save_i387_xstate(void __user *buf);
263extern int restore_i387_xstate(void __user *buf);
264
265static inline void __unlazy_fpu(struct task_struct *tsk)
266{
267 if (task_thread_info(tsk)->status & TS_USEDFPU) {
268 __save_init_fpu(tsk);
269 stts();
270 } else
271 tsk->fpu_counter = 0;
272}
273
274static inline void __clear_fpu(struct task_struct *tsk)
275{
276 if (task_thread_info(tsk)->status & TS_USEDFPU) {
277 tolerant_fwait();
278 task_thread_info(tsk)->status &= ~TS_USEDFPU;
279 stts();
280 }
281}
282
283static inline void kernel_fpu_begin(void)
284{
285 struct thread_info *me = current_thread_info();
286 preempt_disable();
287 if (me->status & TS_USEDFPU)
288 __save_init_fpu(me->task);
289 else
290 clts();
291}
292
293static inline void kernel_fpu_end(void)
294{
295 stts();
296 preempt_enable();
297}
298
299/*
300 * Some instructions like VIA's padlock instructions generate a spurious
301 * DNA fault but don't modify SSE registers. And these instructions
302 * get used from interrupt context aswell. To prevent these kernel instructions
303 * in interrupt context interact wrongly with other user/kernel fpu usage, we
304 * should use them only in the context of irq_ts_save/restore()
305 */
306static inline int irq_ts_save(void)
307{
308 /*
309 * If we are in process context, we are ok to take a spurious DNA fault.
310 * Otherwise, doing clts() in process context require pre-emption to
311 * be disabled or some heavy lifting like kernel_fpu_begin()
312 */
313 if (!in_interrupt())
314 return 0;
315
316 if (read_cr0() & X86_CR0_TS) {
317 clts();
318 return 1;
319 }
320
321 return 0;
322}
323
324static inline void irq_ts_restore(int TS_state)
325{
326 if (TS_state)
327 stts();
328}
329
330#ifdef CONFIG_X86_64
331
332static inline void save_init_fpu(struct task_struct *tsk)
333{
334 __save_init_fpu(tsk);
335 stts();
336}
337
338#define unlazy_fpu __unlazy_fpu
339#define clear_fpu __clear_fpu
340
341#else /* CONFIG_X86_32 */
342
343/*
344 * These disable preemption on their own and are safe
345 */
346static inline void save_init_fpu(struct task_struct *tsk)
347{
348 preempt_disable();
349 __save_init_fpu(tsk);
350 stts();
351 preempt_enable();
352}
353
354static inline void unlazy_fpu(struct task_struct *tsk)
355{
356 preempt_disable();
357 __unlazy_fpu(tsk);
358 preempt_enable();
359}
360
361static inline void clear_fpu(struct task_struct *tsk)
362{
363 preempt_disable();
364 __clear_fpu(tsk);
365 preempt_enable();
366}
367
368#endif /* CONFIG_X86_64 */
369
370/*
371 * i387 state interaction
372 */
373static inline unsigned short get_fpu_cwd(struct task_struct *tsk)
374{
375 if (cpu_has_fxsr) {
376 return tsk->thread.xstate->fxsave.cwd;
377 } else {
378 return (unsigned short)tsk->thread.xstate->fsave.cwd;
379 }
380}
381
382static inline unsigned short get_fpu_swd(struct task_struct *tsk)
383{
384 if (cpu_has_fxsr) {
385 return tsk->thread.xstate->fxsave.swd;
386 } else {
387 return (unsigned short)tsk->thread.xstate->fsave.swd;
388 }
389}
390
391static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk)
392{
393 if (cpu_has_xmm) {
394 return tsk->thread.xstate->fxsave.mxcsr;
395 } else {
396 return MXCSR_DEFAULT;
397 }
398}
399
400#endif /* ASM_X86__I387_H */
diff --git a/include/asm-x86/i8253.h b/include/asm-x86/i8253.h
deleted file mode 100644
index 15a5b530044e..000000000000
--- a/include/asm-x86/i8253.h
+++ /dev/null
@@ -1,18 +0,0 @@
1#ifndef ASM_X86__I8253_H
2#define ASM_X86__I8253_H
3
4/* i8253A PIT registers */
5#define PIT_MODE 0x43
6#define PIT_CH0 0x40
7#define PIT_CH2 0x42
8
9extern spinlock_t i8253_lock;
10
11extern struct clock_event_device *global_clock_event;
12
13extern void setup_pit_timer(void);
14
15#define inb_pit inb_p
16#define outb_pit outb_p
17
18#endif /* ASM_X86__I8253_H */
diff --git a/include/asm-x86/i8259.h b/include/asm-x86/i8259.h
deleted file mode 100644
index 23c1b3baaecd..000000000000
--- a/include/asm-x86/i8259.h
+++ /dev/null
@@ -1,63 +0,0 @@
1#ifndef ASM_X86__I8259_H
2#define ASM_X86__I8259_H
3
4#include <linux/delay.h>
5
6extern unsigned int cached_irq_mask;
7
8#define __byte(x, y) (((unsigned char *)&(y))[x])
9#define cached_master_mask (__byte(0, cached_irq_mask))
10#define cached_slave_mask (__byte(1, cached_irq_mask))
11
12/* i8259A PIC registers */
13#define PIC_MASTER_CMD 0x20
14#define PIC_MASTER_IMR 0x21
15#define PIC_MASTER_ISR PIC_MASTER_CMD
16#define PIC_MASTER_POLL PIC_MASTER_ISR
17#define PIC_MASTER_OCW3 PIC_MASTER_ISR
18#define PIC_SLAVE_CMD 0xa0
19#define PIC_SLAVE_IMR 0xa1
20
21/* i8259A PIC related value */
22#define PIC_CASCADE_IR 2
23#define MASTER_ICW4_DEFAULT 0x01
24#define SLAVE_ICW4_DEFAULT 0x01
25#define PIC_ICW4_AEOI 2
26
27extern spinlock_t i8259A_lock;
28
29extern void init_8259A(int auto_eoi);
30extern void enable_8259A_irq(unsigned int irq);
31extern void disable_8259A_irq(unsigned int irq);
32extern unsigned int startup_8259A_irq(unsigned int irq);
33
34/* the PIC may need a careful delay on some platforms, hence specific calls */
35static inline unsigned char inb_pic(unsigned int port)
36{
37 unsigned char value = inb(port);
38
39 /*
40 * delay for some accesses to PIC on motherboard or in chipset
41 * must be at least one microsecond, so be safe here:
42 */
43 udelay(2);
44
45 return value;
46}
47
48static inline void outb_pic(unsigned char value, unsigned int port)
49{
50 outb(value, port);
51 /*
52 * delay for some accesses to PIC on motherboard or in chipset
53 * must be at least one microsecond, so be safe here:
54 */
55 udelay(2);
56}
57
58extern struct irq_chip i8259A_chip;
59
60extern void mask_8259A(void);
61extern void unmask_8259A(void);
62
63#endif /* ASM_X86__I8259_H */
diff --git a/include/asm-x86/ia32.h b/include/asm-x86/ia32.h
deleted file mode 100644
index f932f7ad51dd..000000000000
--- a/include/asm-x86/ia32.h
+++ /dev/null
@@ -1,170 +0,0 @@
1#ifndef ASM_X86__IA32_H
2#define ASM_X86__IA32_H
3
4
5#ifdef CONFIG_IA32_EMULATION
6
7#include <linux/compat.h>
8
9/*
10 * 32 bit structures for IA32 support.
11 */
12
13#include <asm/sigcontext32.h>
14
15/* signal.h */
16struct sigaction32 {
17 unsigned int sa_handler; /* Really a pointer, but need to deal
18 with 32 bits */
19 unsigned int sa_flags;
20 unsigned int sa_restorer; /* Another 32 bit pointer */
21 compat_sigset_t sa_mask; /* A 32 bit mask */
22};
23
24struct old_sigaction32 {
25 unsigned int sa_handler; /* Really a pointer, but need to deal
26 with 32 bits */
27 compat_old_sigset_t sa_mask; /* A 32 bit mask */
28 unsigned int sa_flags;
29 unsigned int sa_restorer; /* Another 32 bit pointer */
30};
31
32typedef struct sigaltstack_ia32 {
33 unsigned int ss_sp;
34 int ss_flags;
35 unsigned int ss_size;
36} stack_ia32_t;
37
38struct ucontext_ia32 {
39 unsigned int uc_flags;
40 unsigned int uc_link;
41 stack_ia32_t uc_stack;
42 struct sigcontext_ia32 uc_mcontext;
43 compat_sigset_t uc_sigmask; /* mask last for extensibility */
44};
45
46/* This matches struct stat64 in glibc2.2, hence the absolutely
47 * insane amounts of padding around dev_t's.
48 */
49struct stat64 {
50 unsigned long long st_dev;
51 unsigned char __pad0[4];
52
53#define STAT64_HAS_BROKEN_ST_INO 1
54 unsigned int __st_ino;
55
56 unsigned int st_mode;
57 unsigned int st_nlink;
58
59 unsigned int st_uid;
60 unsigned int st_gid;
61
62 unsigned long long st_rdev;
63 unsigned char __pad3[4];
64
65 long long st_size;
66 unsigned int st_blksize;
67
68 long long st_blocks;/* Number 512-byte blocks allocated */
69
70 unsigned st_atime;
71 unsigned st_atime_nsec;
72 unsigned st_mtime;
73 unsigned st_mtime_nsec;
74 unsigned st_ctime;
75 unsigned st_ctime_nsec;
76
77 unsigned long long st_ino;
78} __attribute__((packed));
79
80typedef struct compat_siginfo {
81 int si_signo;
82 int si_errno;
83 int si_code;
84
85 union {
86 int _pad[((128 / sizeof(int)) - 3)];
87
88 /* kill() */
89 struct {
90 unsigned int _pid; /* sender's pid */
91 unsigned int _uid; /* sender's uid */
92 } _kill;
93
94 /* POSIX.1b timers */
95 struct {
96 compat_timer_t _tid; /* timer id */
97 int _overrun; /* overrun count */
98 compat_sigval_t _sigval; /* same as below */
99 int _sys_private; /* not to be passed to user */
100 int _overrun_incr; /* amount to add to overrun */
101 } _timer;
102
103 /* POSIX.1b signals */
104 struct {
105 unsigned int _pid; /* sender's pid */
106 unsigned int _uid; /* sender's uid */
107 compat_sigval_t _sigval;
108 } _rt;
109
110 /* SIGCHLD */
111 struct {
112 unsigned int _pid; /* which child */
113 unsigned int _uid; /* sender's uid */
114 int _status; /* exit code */
115 compat_clock_t _utime;
116 compat_clock_t _stime;
117 } _sigchld;
118
119 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
120 struct {
121 unsigned int _addr; /* faulting insn/memory ref. */
122 } _sigfault;
123
124 /* SIGPOLL */
125 struct {
126 int _band; /* POLL_IN, POLL_OUT, POLL_MSG */
127 int _fd;
128 } _sigpoll;
129 } _sifields;
130} compat_siginfo_t;
131
132struct sigframe32 {
133 u32 pretcode;
134 int sig;
135 struct sigcontext_ia32 sc;
136 struct _fpstate_ia32 fpstate;
137 unsigned int extramask[_COMPAT_NSIG_WORDS-1];
138};
139
140struct rt_sigframe32 {
141 u32 pretcode;
142 int sig;
143 u32 pinfo;
144 u32 puc;
145 compat_siginfo_t info;
146 struct ucontext_ia32 uc;
147 struct _fpstate_ia32 fpstate;
148};
149
150struct ustat32 {
151 __u32 f_tfree;
152 compat_ino_t f_tinode;
153 char f_fname[6];
154 char f_fpack[6];
155};
156
157#define IA32_STACK_TOP IA32_PAGE_OFFSET
158
159#ifdef __KERNEL__
160struct linux_binprm;
161extern int ia32_setup_arg_pages(struct linux_binprm *bprm,
162 unsigned long stack_top, int exec_stack);
163struct mm_struct;
164extern void ia32_pick_mmap_layout(struct mm_struct *mm);
165
166#endif
167
168#endif /* !CONFIG_IA32_SUPPORT */
169
170#endif /* ASM_X86__IA32_H */
diff --git a/include/asm-x86/ia32_unistd.h b/include/asm-x86/ia32_unistd.h
deleted file mode 100644
index dbd887d8a5a5..000000000000
--- a/include/asm-x86/ia32_unistd.h
+++ /dev/null
@@ -1,18 +0,0 @@
1#ifndef ASM_X86__IA32_UNISTD_H
2#define ASM_X86__IA32_UNISTD_H
3
4/*
5 * This file contains the system call numbers of the ia32 port,
6 * this is for the kernel only.
7 * Only add syscalls here where some part of the kernel needs to know
8 * the number. This should be otherwise in sync with asm-x86/unistd_32.h. -AK
9 */
10
11#define __NR_ia32_restart_syscall 0
12#define __NR_ia32_exit 1
13#define __NR_ia32_read 3
14#define __NR_ia32_write 4
15#define __NR_ia32_sigreturn 119
16#define __NR_ia32_rt_sigreturn 173
17
18#endif /* ASM_X86__IA32_UNISTD_H */
diff --git a/include/asm-x86/idle.h b/include/asm-x86/idle.h
deleted file mode 100644
index 32227f0188dd..000000000000
--- a/include/asm-x86/idle.h
+++ /dev/null
@@ -1,16 +0,0 @@
1#ifndef ASM_X86__IDLE_H
2#define ASM_X86__IDLE_H
3
4#define IDLE_START 1
5#define IDLE_END 2
6
7struct notifier_block;
8void idle_notifier_register(struct notifier_block *n);
9void idle_notifier_unregister(struct notifier_block *n);
10
11void enter_idle(void);
12void exit_idle(void);
13
14void c1e_remove_cpu(int cpu);
15
16#endif /* ASM_X86__IDLE_H */
diff --git a/include/asm-x86/intel_arch_perfmon.h b/include/asm-x86/intel_arch_perfmon.h
deleted file mode 100644
index 07c03c6c9a16..000000000000
--- a/include/asm-x86/intel_arch_perfmon.h
+++ /dev/null
@@ -1,31 +0,0 @@
1#ifndef ASM_X86__INTEL_ARCH_PERFMON_H
2#define ASM_X86__INTEL_ARCH_PERFMON_H
3
4#define MSR_ARCH_PERFMON_PERFCTR0 0xc1
5#define MSR_ARCH_PERFMON_PERFCTR1 0xc2
6
7#define MSR_ARCH_PERFMON_EVENTSEL0 0x186
8#define MSR_ARCH_PERFMON_EVENTSEL1 0x187
9
10#define ARCH_PERFMON_EVENTSEL0_ENABLE (1 << 22)
11#define ARCH_PERFMON_EVENTSEL_INT (1 << 20)
12#define ARCH_PERFMON_EVENTSEL_OS (1 << 17)
13#define ARCH_PERFMON_EVENTSEL_USR (1 << 16)
14
15#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL (0x3c)
16#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
17#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX (0)
18#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \
19 (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX))
20
21union cpuid10_eax {
22 struct {
23 unsigned int version_id:8;
24 unsigned int num_counters:8;
25 unsigned int bit_width:8;
26 unsigned int mask_length:8;
27 } split;
28 unsigned int full;
29};
30
31#endif /* ASM_X86__INTEL_ARCH_PERFMON_H */
diff --git a/include/asm-x86/io.h b/include/asm-x86/io.h
deleted file mode 100644
index a233f835e0b5..000000000000
--- a/include/asm-x86/io.h
+++ /dev/null
@@ -1,91 +0,0 @@
1#ifndef ASM_X86__IO_H
2#define ASM_X86__IO_H
3
4#define ARCH_HAS_IOREMAP_WC
5
6#include <linux/compiler.h>
7
8#define build_mmio_read(name, size, type, reg, barrier) \
9static inline type name(const volatile void __iomem *addr) \
10{ type ret; asm volatile("mov" size " %1,%0":reg (ret) \
11:"m" (*(volatile type __force *)addr) barrier); return ret; }
12
13#define build_mmio_write(name, size, type, reg, barrier) \
14static inline void name(type val, volatile void __iomem *addr) \
15{ asm volatile("mov" size " %0,%1": :reg (val), \
16"m" (*(volatile type __force *)addr) barrier); }
17
18build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
19build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
20build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
21
22build_mmio_read(__readb, "b", unsigned char, "=q", )
23build_mmio_read(__readw, "w", unsigned short, "=r", )
24build_mmio_read(__readl, "l", unsigned int, "=r", )
25
26build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
27build_mmio_write(writew, "w", unsigned short, "r", :"memory")
28build_mmio_write(writel, "l", unsigned int, "r", :"memory")
29
30build_mmio_write(__writeb, "b", unsigned char, "q", )
31build_mmio_write(__writew, "w", unsigned short, "r", )
32build_mmio_write(__writel, "l", unsigned int, "r", )
33
34#define readb_relaxed(a) __readb(a)
35#define readw_relaxed(a) __readw(a)
36#define readl_relaxed(a) __readl(a)
37#define __raw_readb __readb
38#define __raw_readw __readw
39#define __raw_readl __readl
40
41#define __raw_writeb __writeb
42#define __raw_writew __writew
43#define __raw_writel __writel
44
45#define mmiowb() barrier()
46
47#ifdef CONFIG_X86_64
48build_mmio_read(readq, "q", unsigned long, "=r", :"memory")
49build_mmio_read(__readq, "q", unsigned long, "=r", )
50build_mmio_write(writeq, "q", unsigned long, "r", :"memory")
51build_mmio_write(__writeq, "q", unsigned long, "r", )
52
53#define readq_relaxed(a) __readq(a)
54#define __raw_readq __readq
55#define __raw_writeq writeq
56
57/* Let people know we have them */
58#define readq readq
59#define writeq writeq
60#endif
61
62extern int iommu_bio_merge;
63
64#ifdef CONFIG_X86_32
65# include "io_32.h"
66#else
67# include "io_64.h"
68#endif
69
70extern void *xlate_dev_mem_ptr(unsigned long phys);
71extern void unxlate_dev_mem_ptr(unsigned long phys, void *addr);
72
73extern int ioremap_change_attr(unsigned long vaddr, unsigned long size,
74 unsigned long prot_val);
75extern void __iomem *ioremap_wc(unsigned long offset, unsigned long size);
76
77/*
78 * early_ioremap() and early_iounmap() are for temporary early boot-time
79 * mappings, before the real ioremap() is functional.
80 * A boot-time mapping is currently limited to at most 16 pages.
81 */
82extern void early_ioremap_init(void);
83extern void early_ioremap_clear(void);
84extern void early_ioremap_reset(void);
85extern void *early_ioremap(unsigned long offset, unsigned long size);
86extern void *early_memremap(unsigned long offset, unsigned long size);
87extern void early_iounmap(void *addr, unsigned long size);
88extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys);
89
90
91#endif /* ASM_X86__IO_H */
diff --git a/include/asm-x86/io_32.h b/include/asm-x86/io_32.h
deleted file mode 100644
index 4f7d878bda18..000000000000
--- a/include/asm-x86/io_32.h
+++ /dev/null
@@ -1,284 +0,0 @@
1#ifndef ASM_X86__IO_32_H
2#define ASM_X86__IO_32_H
3
4#include <linux/string.h>
5#include <linux/compiler.h>
6
7/*
8 * This file contains the definitions for the x86 IO instructions
9 * inb/inw/inl/outb/outw/outl and the "string versions" of the same
10 * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
11 * versions of the single-IO instructions (inb_p/inw_p/..).
12 *
13 * This file is not meant to be obfuscating: it's just complicated
14 * to (a) handle it all in a way that makes gcc able to optimize it
15 * as well as possible and (b) trying to avoid writing the same thing
16 * over and over again with slight variations and possibly making a
17 * mistake somewhere.
18 */
19
20/*
21 * Thanks to James van Artsdalen for a better timing-fix than
22 * the two short jumps: using outb's to a nonexistent port seems
23 * to guarantee better timings even on fast machines.
24 *
25 * On the other hand, I'd like to be sure of a non-existent port:
26 * I feel a bit unsafe about using 0x80 (should be safe, though)
27 *
28 * Linus
29 */
30
31 /*
32 * Bit simplified and optimized by Jan Hubicka
33 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999.
34 *
35 * isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added,
36 * isa_read[wl] and isa_write[wl] fixed
37 * - Arnaldo Carvalho de Melo <acme@conectiva.com.br>
38 */
39
40#define IO_SPACE_LIMIT 0xffff
41
42#define XQUAD_PORTIO_BASE 0xfe400000
43#define XQUAD_PORTIO_QUAD 0x40000 /* 256k per quad. */
44
45#ifdef __KERNEL__
46
47#include <asm-generic/iomap.h>
48
49#include <linux/vmalloc.h>
50
51/*
52 * Convert a virtual cached pointer to an uncached pointer
53 */
54#define xlate_dev_kmem_ptr(p) p
55
56/**
57 * virt_to_phys - map virtual addresses to physical
58 * @address: address to remap
59 *
60 * The returned physical address is the physical (CPU) mapping for
61 * the memory address given. It is only valid to use this function on
62 * addresses directly mapped or allocated via kmalloc.
63 *
64 * This function does not give bus mappings for DMA transfers. In
65 * almost all conceivable cases a device driver should not be using
66 * this function
67 */
68
69static inline unsigned long virt_to_phys(volatile void *address)
70{
71 return __pa(address);
72}
73
74/**
75 * phys_to_virt - map physical address to virtual
76 * @address: address to remap
77 *
78 * The returned virtual address is a current CPU mapping for
79 * the memory address given. It is only valid to use this function on
80 * addresses that have a kernel mapping
81 *
82 * This function does not handle bus mappings for DMA transfers. In
83 * almost all conceivable cases a device driver should not be using
84 * this function
85 */
86
87static inline void *phys_to_virt(unsigned long address)
88{
89 return __va(address);
90}
91
92/*
93 * Change "struct page" to physical address.
94 */
95#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
96
97/**
98 * ioremap - map bus memory into CPU space
99 * @offset: bus address of the memory
100 * @size: size of the resource to map
101 *
102 * ioremap performs a platform specific sequence of operations to
103 * make bus memory CPU accessible via the readb/readw/readl/writeb/
104 * writew/writel functions and the other mmio helpers. The returned
105 * address is not guaranteed to be usable directly as a virtual
106 * address.
107 *
108 * If the area you are trying to map is a PCI BAR you should have a
109 * look at pci_iomap().
110 */
111extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
112extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
113extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size,
114 unsigned long prot_val);
115
116/*
117 * The default ioremap() behavior is non-cached:
118 */
119static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
120{
121 return ioremap_nocache(offset, size);
122}
123
124extern void iounmap(volatile void __iomem *addr);
125
126/*
127 * ISA I/O bus memory addresses are 1:1 with the physical address.
128 */
129#define isa_virt_to_bus virt_to_phys
130#define isa_page_to_bus page_to_phys
131#define isa_bus_to_virt phys_to_virt
132
133/*
134 * However PCI ones are not necessarily 1:1 and therefore these interfaces
135 * are forbidden in portable PCI drivers.
136 *
137 * Allow them on x86 for legacy drivers, though.
138 */
139#define virt_to_bus virt_to_phys
140#define bus_to_virt phys_to_virt
141
142static inline void
143memset_io(volatile void __iomem *addr, unsigned char val, int count)
144{
145 memset((void __force *)addr, val, count);
146}
147
148static inline void
149memcpy_fromio(void *dst, const volatile void __iomem *src, int count)
150{
151 __memcpy(dst, (const void __force *)src, count);
152}
153
154static inline void
155memcpy_toio(volatile void __iomem *dst, const void *src, int count)
156{
157 __memcpy((void __force *)dst, src, count);
158}
159
160/*
161 * ISA space is 'always mapped' on a typical x86 system, no need to
162 * explicitly ioremap() it. The fact that the ISA IO space is mapped
163 * to PAGE_OFFSET is pure coincidence - it does not mean ISA values
164 * are physical addresses. The following constant pointer can be
165 * used as the IO-area pointer (it can be iounmapped as well, so the
166 * analogy with PCI is quite large):
167 */
168#define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET))
169
170/*
171 * Cache management
172 *
173 * This needed for two cases
174 * 1. Out of order aware processors
175 * 2. Accidentally out of order processors (PPro errata #51)
176 */
177
178#if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE)
179
180static inline void flush_write_buffers(void)
181{
182 asm volatile("lock; addl $0,0(%%esp)": : :"memory");
183}
184
185#else
186
187#define flush_write_buffers() do { } while (0)
188
189#endif
190
191#endif /* __KERNEL__ */
192
193extern void native_io_delay(void);
194
195extern int io_delay_type;
196extern void io_delay_init(void);
197
198#if defined(CONFIG_PARAVIRT)
199#include <asm/paravirt.h>
200#else
201
202static inline void slow_down_io(void)
203{
204 native_io_delay();
205#ifdef REALLY_SLOW_IO
206 native_io_delay();
207 native_io_delay();
208 native_io_delay();
209#endif
210}
211
212#endif
213
214#define __BUILDIO(bwl, bw, type) \
215static inline void out##bwl(unsigned type value, int port) \
216{ \
217 out##bwl##_local(value, port); \
218} \
219 \
220static inline unsigned type in##bwl(int port) \
221{ \
222 return in##bwl##_local(port); \
223}
224
225#define BUILDIO(bwl, bw, type) \
226static inline void out##bwl##_local(unsigned type value, int port) \
227{ \
228 asm volatile("out" #bwl " %" #bw "0, %w1" \
229 : : "a"(value), "Nd"(port)); \
230} \
231 \
232static inline unsigned type in##bwl##_local(int port) \
233{ \
234 unsigned type value; \
235 asm volatile("in" #bwl " %w1, %" #bw "0" \
236 : "=a"(value) : "Nd"(port)); \
237 return value; \
238} \
239 \
240static inline void out##bwl##_local_p(unsigned type value, int port) \
241{ \
242 out##bwl##_local(value, port); \
243 slow_down_io(); \
244} \
245 \
246static inline unsigned type in##bwl##_local_p(int port) \
247{ \
248 unsigned type value = in##bwl##_local(port); \
249 slow_down_io(); \
250 return value; \
251} \
252 \
253__BUILDIO(bwl, bw, type) \
254 \
255static inline void out##bwl##_p(unsigned type value, int port) \
256{ \
257 out##bwl(value, port); \
258 slow_down_io(); \
259} \
260 \
261static inline unsigned type in##bwl##_p(int port) \
262{ \
263 unsigned type value = in##bwl(port); \
264 slow_down_io(); \
265 return value; \
266} \
267 \
268static inline void outs##bwl(int port, const void *addr, unsigned long count) \
269{ \
270 asm volatile("rep; outs" #bwl \
271 : "+S"(addr), "+c"(count) : "d"(port)); \
272} \
273 \
274static inline void ins##bwl(int port, void *addr, unsigned long count) \
275{ \
276 asm volatile("rep; ins" #bwl \
277 : "+D"(addr), "+c"(count) : "d"(port)); \
278}
279
280BUILDIO(b, b, char)
281BUILDIO(w, w, short)
282BUILDIO(l, , int)
283
284#endif /* ASM_X86__IO_32_H */
diff --git a/include/asm-x86/io_64.h b/include/asm-x86/io_64.h
deleted file mode 100644
index ee6e086b7dfe..000000000000
--- a/include/asm-x86/io_64.h
+++ /dev/null
@@ -1,244 +0,0 @@
1#ifndef ASM_X86__IO_64_H
2#define ASM_X86__IO_64_H
3
4
5/*
6 * This file contains the definitions for the x86 IO instructions
7 * inb/inw/inl/outb/outw/outl and the "string versions" of the same
8 * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
9 * versions of the single-IO instructions (inb_p/inw_p/..).
10 *
11 * This file is not meant to be obfuscating: it's just complicated
12 * to (a) handle it all in a way that makes gcc able to optimize it
13 * as well as possible and (b) trying to avoid writing the same thing
14 * over and over again with slight variations and possibly making a
15 * mistake somewhere.
16 */
17
18/*
19 * Thanks to James van Artsdalen for a better timing-fix than
20 * the two short jumps: using outb's to a nonexistent port seems
21 * to guarantee better timings even on fast machines.
22 *
23 * On the other hand, I'd like to be sure of a non-existent port:
24 * I feel a bit unsafe about using 0x80 (should be safe, though)
25 *
26 * Linus
27 */
28
29 /*
30 * Bit simplified and optimized by Jan Hubicka
31 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999.
32 *
33 * isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added,
34 * isa_read[wl] and isa_write[wl] fixed
35 * - Arnaldo Carvalho de Melo <acme@conectiva.com.br>
36 */
37
38extern void native_io_delay(void);
39
40extern int io_delay_type;
41extern void io_delay_init(void);
42
43#if defined(CONFIG_PARAVIRT)
44#include <asm/paravirt.h>
45#else
46
47static inline void slow_down_io(void)
48{
49 native_io_delay();
50#ifdef REALLY_SLOW_IO
51 native_io_delay();
52 native_io_delay();
53 native_io_delay();
54#endif
55}
56#endif
57
58/*
59 * Talk about misusing macros..
60 */
61#define __OUT1(s, x) \
62static inline void out##s(unsigned x value, unsigned short port) {
63
64#define __OUT2(s, s1, s2) \
65asm volatile ("out" #s " %" s1 "0,%" s2 "1"
66
67#ifndef REALLY_SLOW_IO
68#define REALLY_SLOW_IO
69#define UNSET_REALLY_SLOW_IO
70#endif
71
72#define __OUT(s, s1, x) \
73 __OUT1(s, x) __OUT2(s, s1, "w") : : "a" (value), "Nd" (port)); \
74 } \
75 __OUT1(s##_p, x) __OUT2(s, s1, "w") : : "a" (value), "Nd" (port)); \
76 slow_down_io(); \
77}
78
79#define __IN1(s) \
80static inline RETURN_TYPE in##s(unsigned short port) \
81{ \
82 RETURN_TYPE _v;
83
84#define __IN2(s, s1, s2) \
85 asm volatile ("in" #s " %" s2 "1,%" s1 "0"
86
87#define __IN(s, s1, i...) \
88 __IN1(s) __IN2(s, s1, "w") : "=a" (_v) : "Nd" (port), ##i); \
89 return _v; \
90 } \
91 __IN1(s##_p) __IN2(s, s1, "w") : "=a" (_v) : "Nd" (port), ##i); \
92 slow_down_io(); \
93 return _v; }
94
95#ifdef UNSET_REALLY_SLOW_IO
96#undef REALLY_SLOW_IO
97#endif
98
99#define __INS(s) \
100static inline void ins##s(unsigned short port, void *addr, \
101 unsigned long count) \
102{ \
103 asm volatile ("rep ; ins" #s \
104 : "=D" (addr), "=c" (count) \
105 : "d" (port), "0" (addr), "1" (count)); \
106}
107
108#define __OUTS(s) \
109static inline void outs##s(unsigned short port, const void *addr, \
110 unsigned long count) \
111{ \
112 asm volatile ("rep ; outs" #s \
113 : "=S" (addr), "=c" (count) \
114 : "d" (port), "0" (addr), "1" (count)); \
115}
116
117#define RETURN_TYPE unsigned char
118__IN(b, "")
119#undef RETURN_TYPE
120#define RETURN_TYPE unsigned short
121__IN(w, "")
122#undef RETURN_TYPE
123#define RETURN_TYPE unsigned int
124__IN(l, "")
125#undef RETURN_TYPE
126
127__OUT(b, "b", char)
128__OUT(w, "w", short)
129__OUT(l, , int)
130
131__INS(b)
132__INS(w)
133__INS(l)
134
135__OUTS(b)
136__OUTS(w)
137__OUTS(l)
138
139#define IO_SPACE_LIMIT 0xffff
140
141#if defined(__KERNEL__) && defined(__x86_64__)
142
143#include <linux/vmalloc.h>
144
145#ifndef __i386__
146/*
147 * Change virtual addresses to physical addresses and vv.
148 * These are pretty trivial
149 */
150static inline unsigned long virt_to_phys(volatile void *address)
151{
152 return __pa(address);
153}
154
155static inline void *phys_to_virt(unsigned long address)
156{
157 return __va(address);
158}
159#endif
160
161/*
162 * Change "struct page" to physical address.
163 */
164#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
165
166#include <asm-generic/iomap.h>
167
168/*
169 * This one maps high address device memory and turns off caching for that area.
170 * it's useful if some control registers are in such an area and write combining
171 * or read caching is not desirable:
172 */
173extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
174extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
175extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size,
176 unsigned long prot_val);
177
178/*
179 * The default ioremap() behavior is non-cached:
180 */
181static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
182{
183 return ioremap_nocache(offset, size);
184}
185
186extern void iounmap(volatile void __iomem *addr);
187
188extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys);
189
190/*
191 * ISA I/O bus memory addresses are 1:1 with the physical address.
192 */
193#define isa_virt_to_bus virt_to_phys
194#define isa_page_to_bus page_to_phys
195#define isa_bus_to_virt phys_to_virt
196
197/*
198 * However PCI ones are not necessarily 1:1 and therefore these interfaces
199 * are forbidden in portable PCI drivers.
200 *
201 * Allow them on x86 for legacy drivers, though.
202 */
203#define virt_to_bus virt_to_phys
204#define bus_to_virt phys_to_virt
205
206void __memcpy_fromio(void *, unsigned long, unsigned);
207void __memcpy_toio(unsigned long, const void *, unsigned);
208
209static inline void memcpy_fromio(void *to, const volatile void __iomem *from,
210 unsigned len)
211{
212 __memcpy_fromio(to, (unsigned long)from, len);
213}
214
215static inline void memcpy_toio(volatile void __iomem *to, const void *from,
216 unsigned len)
217{
218 __memcpy_toio((unsigned long)to, from, len);
219}
220
221void memset_io(volatile void __iomem *a, int b, size_t c);
222
223/*
224 * ISA space is 'always mapped' on a typical x86 system, no need to
225 * explicitly ioremap() it. The fact that the ISA IO space is mapped
226 * to PAGE_OFFSET is pure coincidence - it does not mean ISA values
227 * are physical addresses. The following constant pointer can be
228 * used as the IO-area pointer (it can be iounmapped as well, so the
229 * analogy with PCI is quite large):
230 */
231#define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET))
232
233#define flush_write_buffers()
234
235#define BIO_VMERGE_BOUNDARY iommu_bio_merge
236
237/*
238 * Convert a virtual cached pointer to an uncached pointer
239 */
240#define xlate_dev_kmem_ptr(p) p
241
242#endif /* __KERNEL__ */
243
244#endif /* ASM_X86__IO_64_H */
diff --git a/include/asm-x86/io_apic.h b/include/asm-x86/io_apic.h
deleted file mode 100644
index d35cbd7aa587..000000000000
--- a/include/asm-x86/io_apic.h
+++ /dev/null
@@ -1,204 +0,0 @@
1#ifndef ASM_X86__IO_APIC_H
2#define ASM_X86__IO_APIC_H
3
4#include <linux/types.h>
5#include <asm/mpspec.h>
6#include <asm/apicdef.h>
7#include <asm/irq_vectors.h>
8
9/*
10 * Intel IO-APIC support for SMP and UP systems.
11 *
12 * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar
13 */
14
15/* I/O Unit Redirection Table */
16#define IO_APIC_REDIR_VECTOR_MASK 0x000FF
17#define IO_APIC_REDIR_DEST_LOGICAL 0x00800
18#define IO_APIC_REDIR_DEST_PHYSICAL 0x00000
19#define IO_APIC_REDIR_SEND_PENDING (1 << 12)
20#define IO_APIC_REDIR_REMOTE_IRR (1 << 14)
21#define IO_APIC_REDIR_LEVEL_TRIGGER (1 << 15)
22#define IO_APIC_REDIR_MASKED (1 << 16)
23
24/*
25 * The structure of the IO-APIC:
26 */
27union IO_APIC_reg_00 {
28 u32 raw;
29 struct {
30 u32 __reserved_2 : 14,
31 LTS : 1,
32 delivery_type : 1,
33 __reserved_1 : 8,
34 ID : 8;
35 } __attribute__ ((packed)) bits;
36};
37
38union IO_APIC_reg_01 {
39 u32 raw;
40 struct {
41 u32 version : 8,
42 __reserved_2 : 7,
43 PRQ : 1,
44 entries : 8,
45 __reserved_1 : 8;
46 } __attribute__ ((packed)) bits;
47};
48
49union IO_APIC_reg_02 {
50 u32 raw;
51 struct {
52 u32 __reserved_2 : 24,
53 arbitration : 4,
54 __reserved_1 : 4;
55 } __attribute__ ((packed)) bits;
56};
57
58union IO_APIC_reg_03 {
59 u32 raw;
60 struct {
61 u32 boot_DT : 1,
62 __reserved_1 : 31;
63 } __attribute__ ((packed)) bits;
64};
65
66enum ioapic_irq_destination_types {
67 dest_Fixed = 0,
68 dest_LowestPrio = 1,
69 dest_SMI = 2,
70 dest__reserved_1 = 3,
71 dest_NMI = 4,
72 dest_INIT = 5,
73 dest__reserved_2 = 6,
74 dest_ExtINT = 7
75};
76
77struct IO_APIC_route_entry {
78 __u32 vector : 8,
79 delivery_mode : 3, /* 000: FIXED
80 * 001: lowest prio
81 * 111: ExtINT
82 */
83 dest_mode : 1, /* 0: physical, 1: logical */
84 delivery_status : 1,
85 polarity : 1,
86 irr : 1,
87 trigger : 1, /* 0: edge, 1: level */
88 mask : 1, /* 0: enabled, 1: disabled */
89 __reserved_2 : 15;
90
91 __u32 __reserved_3 : 24,
92 dest : 8;
93} __attribute__ ((packed));
94
95struct IR_IO_APIC_route_entry {
96 __u64 vector : 8,
97 zero : 3,
98 index2 : 1,
99 delivery_status : 1,
100 polarity : 1,
101 irr : 1,
102 trigger : 1,
103 mask : 1,
104 reserved : 31,
105 format : 1,
106 index : 15;
107} __attribute__ ((packed));
108
109#ifdef CONFIG_X86_IO_APIC
110
111/*
112 * # of IO-APICs and # of IRQ routing registers
113 */
114extern int nr_ioapics;
115extern int nr_ioapic_registers[MAX_IO_APICS];
116
117/*
118 * MP-BIOS irq configuration table structures:
119 */
120
121#define MP_MAX_IOAPIC_PIN 127
122
123struct mp_config_ioapic {
124 unsigned long mp_apicaddr;
125 unsigned int mp_apicid;
126 unsigned char mp_type;
127 unsigned char mp_apicver;
128 unsigned char mp_flags;
129};
130
131struct mp_config_intsrc {
132 unsigned int mp_dstapic;
133 unsigned char mp_type;
134 unsigned char mp_irqtype;
135 unsigned short mp_irqflag;
136 unsigned char mp_srcbus;
137 unsigned char mp_srcbusirq;
138 unsigned char mp_dstirq;
139};
140
141/* I/O APIC entries */
142extern struct mp_config_ioapic mp_ioapics[MAX_IO_APICS];
143
144/* # of MP IRQ source entries */
145extern int mp_irq_entries;
146
147/* MP IRQ source entries */
148extern struct mp_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
149
150/* non-0 if default (table-less) MP configuration */
151extern int mpc_default_type;
152
153/* Older SiS APIC requires we rewrite the index register */
154extern int sis_apic_bug;
155
156/* 1 if "noapic" boot option passed */
157extern int skip_ioapic_setup;
158
159/* 1 if the timer IRQ uses the '8259A Virtual Wire' mode */
160extern int timer_through_8259;
161
162static inline void disable_ioapic_setup(void)
163{
164 skip_ioapic_setup = 1;
165}
166
167/*
168 * If we use the IO-APIC for IRQ routing, disable automatic
169 * assignment of PCI IRQ's.
170 */
171#define io_apic_assign_pci_irqs \
172 (mp_irq_entries && !skip_ioapic_setup && io_apic_irqs)
173
174#ifdef CONFIG_ACPI
175extern int io_apic_get_unique_id(int ioapic, int apic_id);
176extern int io_apic_get_version(int ioapic);
177extern int io_apic_get_redir_entries(int ioapic);
178extern int io_apic_set_pci_routing(int ioapic, int pin, int irq,
179 int edge_level, int active_high_low);
180#endif /* CONFIG_ACPI */
181
182extern int (*ioapic_renumber_irq)(int ioapic, int irq);
183extern void ioapic_init_mappings(void);
184
185#ifdef CONFIG_X86_64
186extern int save_mask_IO_APIC_setup(void);
187extern void restore_IO_APIC_setup(void);
188extern void reinit_intr_remapped_IO_APIC(int);
189#endif
190
191extern int probe_nr_irqs(void);
192
193#else /* !CONFIG_X86_IO_APIC */
194#define io_apic_assign_pci_irqs 0
195static const int timer_through_8259 = 0;
196static inline void ioapic_init_mappings(void) { }
197
198static inline int probe_nr_irqs(void)
199{
200 return NR_IRQS;
201}
202#endif
203
204#endif /* ASM_X86__IO_APIC_H */
diff --git a/include/asm-x86/ioctl.h b/include/asm-x86/ioctl.h
deleted file mode 100644
index b279fe06dfe5..000000000000
--- a/include/asm-x86/ioctl.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/ioctl.h>
diff --git a/include/asm-x86/ioctls.h b/include/asm-x86/ioctls.h
deleted file mode 100644
index 06752a649044..000000000000
--- a/include/asm-x86/ioctls.h
+++ /dev/null
@@ -1,94 +0,0 @@
1#ifndef ASM_X86__IOCTLS_H
2#define ASM_X86__IOCTLS_H
3
4#include <asm/ioctl.h>
5
6/* 0x54 is just a magic number to make these relatively unique ('T') */
7
8#define TCGETS 0x5401
9#define TCSETS 0x5402 /* Clashes with SNDCTL_TMR_START sound ioctl */
10#define TCSETSW 0x5403
11#define TCSETSF 0x5404
12#define TCGETA 0x5405
13#define TCSETA 0x5406
14#define TCSETAW 0x5407
15#define TCSETAF 0x5408
16#define TCSBRK 0x5409
17#define TCXONC 0x540A
18#define TCFLSH 0x540B
19#define TIOCEXCL 0x540C
20#define TIOCNXCL 0x540D
21#define TIOCSCTTY 0x540E
22#define TIOCGPGRP 0x540F
23#define TIOCSPGRP 0x5410
24#define TIOCOUTQ 0x5411
25#define TIOCSTI 0x5412
26#define TIOCGWINSZ 0x5413
27#define TIOCSWINSZ 0x5414
28#define TIOCMGET 0x5415
29#define TIOCMBIS 0x5416
30#define TIOCMBIC 0x5417
31#define TIOCMSET 0x5418
32#define TIOCGSOFTCAR 0x5419
33#define TIOCSSOFTCAR 0x541A
34#define FIONREAD 0x541B
35#define TIOCINQ FIONREAD
36#define TIOCLINUX 0x541C
37#define TIOCCONS 0x541D
38#define TIOCGSERIAL 0x541E
39#define TIOCSSERIAL 0x541F
40#define TIOCPKT 0x5420
41#define FIONBIO 0x5421
42#define TIOCNOTTY 0x5422
43#define TIOCSETD 0x5423
44#define TIOCGETD 0x5424
45#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */
46/* #define TIOCTTYGSTRUCT 0x5426 - Former debugging-only ioctl */
47#define TIOCSBRK 0x5427 /* BSD compatibility */
48#define TIOCCBRK 0x5428 /* BSD compatibility */
49#define TIOCGSID 0x5429 /* Return the session ID of FD */
50#define TCGETS2 _IOR('T', 0x2A, struct termios2)
51#define TCSETS2 _IOW('T', 0x2B, struct termios2)
52#define TCSETSW2 _IOW('T', 0x2C, struct termios2)
53#define TCSETSF2 _IOW('T', 0x2D, struct termios2)
54#define TIOCGRS485 0x542E
55#define TIOCSRS485 0x542F
56#define TIOCGPTN _IOR('T', 0x30, unsigned int)
57 /* Get Pty Number (of pty-mux device) */
58#define TIOCSPTLCK _IOW('T', 0x31, int) /* Lock/unlock Pty */
59#define TCGETX 0x5432 /* SYS5 TCGETX compatibility */
60#define TCSETX 0x5433
61#define TCSETXF 0x5434
62#define TCSETXW 0x5435
63
64#define FIONCLEX 0x5450
65#define FIOCLEX 0x5451
66#define FIOASYNC 0x5452
67#define TIOCSERCONFIG 0x5453
68#define TIOCSERGWILD 0x5454
69#define TIOCSERSWILD 0x5455
70#define TIOCGLCKTRMIOS 0x5456
71#define TIOCSLCKTRMIOS 0x5457
72#define TIOCSERGSTRUCT 0x5458 /* For debugging only */
73#define TIOCSERGETLSR 0x5459 /* Get line status register */
74#define TIOCSERGETMULTI 0x545A /* Get multiport config */
75#define TIOCSERSETMULTI 0x545B /* Set multiport config */
76
77#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */
78#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
79#define TIOCGHAYESESP 0x545E /* Get Hayes ESP configuration */
80#define TIOCSHAYESESP 0x545F /* Set Hayes ESP configuration */
81#define FIOQSIZE 0x5460
82
83/* Used for packet mode */
84#define TIOCPKT_DATA 0
85#define TIOCPKT_FLUSHREAD 1
86#define TIOCPKT_FLUSHWRITE 2
87#define TIOCPKT_STOP 4
88#define TIOCPKT_START 8
89#define TIOCPKT_NOSTOP 16
90#define TIOCPKT_DOSTOP 32
91
92#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
93
94#endif /* ASM_X86__IOCTLS_H */
diff --git a/include/asm-x86/iommu.h b/include/asm-x86/iommu.h
deleted file mode 100644
index 2daaffcda52f..000000000000
--- a/include/asm-x86/iommu.h
+++ /dev/null
@@ -1,50 +0,0 @@
1#ifndef ASM_X86__IOMMU_H
2#define ASM_X86__IOMMU_H
3
4extern void pci_iommu_shutdown(void);
5extern void no_iommu_init(void);
6extern struct dma_mapping_ops nommu_dma_ops;
7extern int force_iommu, no_iommu;
8extern int iommu_detected;
9extern int dmar_disabled;
10extern int forbid_dac;
11
12extern unsigned long iommu_nr_pages(unsigned long addr, unsigned long len);
13
14/* 10 seconds */
15#define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000)
16
17#ifdef CONFIG_GART_IOMMU
18extern int gart_iommu_aperture;
19extern int gart_iommu_aperture_allowed;
20extern int gart_iommu_aperture_disabled;
21
22extern void early_gart_iommu_check(void);
23extern void gart_iommu_init(void);
24extern void gart_iommu_shutdown(void);
25extern void __init gart_parse_options(char *);
26extern void gart_iommu_hole_init(void);
27
28#else
29#define gart_iommu_aperture 0
30#define gart_iommu_aperture_allowed 0
31#define gart_iommu_aperture_disabled 1
32
33static inline void early_gart_iommu_check(void)
34{
35}
36static inline void gart_iommu_init(void)
37{
38}
39static inline void gart_iommu_shutdown(void)
40{
41}
42static inline void gart_parse_options(char *options)
43{
44}
45static inline void gart_iommu_hole_init(void)
46{
47}
48#endif
49
50#endif /* ASM_X86__IOMMU_H */
diff --git a/include/asm-x86/ipcbuf.h b/include/asm-x86/ipcbuf.h
deleted file mode 100644
index 910304fbdc8f..000000000000
--- a/include/asm-x86/ipcbuf.h
+++ /dev/null
@@ -1,28 +0,0 @@
1#ifndef ASM_X86__IPCBUF_H
2#define ASM_X86__IPCBUF_H
3
4/*
5 * The ipc64_perm structure for x86 architecture.
6 * Note extra padding because this structure is passed back and forth
7 * between kernel and user space.
8 *
9 * Pad space is left for:
10 * - 32-bit mode_t and seq
11 * - 2 miscellaneous 32-bit values
12 */
13
14struct ipc64_perm {
15 __kernel_key_t key;
16 __kernel_uid32_t uid;
17 __kernel_gid32_t gid;
18 __kernel_uid32_t cuid;
19 __kernel_gid32_t cgid;
20 __kernel_mode_t mode;
21 unsigned short __pad1;
22 unsigned short seq;
23 unsigned short __pad2;
24 unsigned long __unused1;
25 unsigned long __unused2;
26};
27
28#endif /* ASM_X86__IPCBUF_H */
diff --git a/include/asm-x86/ipi.h b/include/asm-x86/ipi.h
deleted file mode 100644
index 30a692cfaff8..000000000000
--- a/include/asm-x86/ipi.h
+++ /dev/null
@@ -1,138 +0,0 @@
1#ifndef ASM_X86__IPI_H
2#define ASM_X86__IPI_H
3
4/*
5 * Copyright 2004 James Cleverdon, IBM.
6 * Subject to the GNU Public License, v.2
7 *
8 * Generic APIC InterProcessor Interrupt code.
9 *
10 * Moved to include file by James Cleverdon from
11 * arch/x86-64/kernel/smp.c
12 *
13 * Copyrights from kernel/smp.c:
14 *
15 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
16 * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
17 * (c) 2002,2003 Andi Kleen, SuSE Labs.
18 * Subject to the GNU Public License, v.2
19 */
20
21#include <asm/hw_irq.h>
22#include <asm/apic.h>
23#include <asm/smp.h>
24
25/*
26 * the following functions deal with sending IPIs between CPUs.
27 *
28 * We use 'broadcast', CPU->CPU IPIs and self-IPIs too.
29 */
30
31static inline unsigned int __prepare_ICR(unsigned int shortcut, int vector,
32 unsigned int dest)
33{
34 unsigned int icr = shortcut | dest;
35
36 switch (vector) {
37 default:
38 icr |= APIC_DM_FIXED | vector;
39 break;
40 case NMI_VECTOR:
41 icr |= APIC_DM_NMI;
42 break;
43 }
44 return icr;
45}
46
47static inline int __prepare_ICR2(unsigned int mask)
48{
49 return SET_APIC_DEST_FIELD(mask);
50}
51
52static inline void __xapic_wait_icr_idle(void)
53{
54 while (native_apic_mem_read(APIC_ICR) & APIC_ICR_BUSY)
55 cpu_relax();
56}
57
58static inline void __send_IPI_shortcut(unsigned int shortcut, int vector,
59 unsigned int dest)
60{
61 /*
62 * Subtle. In the case of the 'never do double writes' workaround
63 * we have to lock out interrupts to be safe. As we don't care
64 * of the value read we use an atomic rmw access to avoid costly
65 * cli/sti. Otherwise we use an even cheaper single atomic write
66 * to the APIC.
67 */
68 unsigned int cfg;
69
70 /*
71 * Wait for idle.
72 */
73 __xapic_wait_icr_idle();
74
75 /*
76 * No need to touch the target chip field
77 */
78 cfg = __prepare_ICR(shortcut, vector, dest);
79
80 /*
81 * Send the IPI. The write to APIC_ICR fires this off.
82 */
83 native_apic_mem_write(APIC_ICR, cfg);
84}
85
86/*
87 * This is used to send an IPI with no shorthand notation (the destination is
88 * specified in bits 56 to 63 of the ICR).
89 */
90static inline void __send_IPI_dest_field(unsigned int mask, int vector,
91 unsigned int dest)
92{
93 unsigned long cfg;
94
95 /*
96 * Wait for idle.
97 */
98 if (unlikely(vector == NMI_VECTOR))
99 safe_apic_wait_icr_idle();
100 else
101 __xapic_wait_icr_idle();
102
103 /*
104 * prepare target chip field
105 */
106 cfg = __prepare_ICR2(mask);
107 native_apic_mem_write(APIC_ICR2, cfg);
108
109 /*
110 * program the ICR
111 */
112 cfg = __prepare_ICR(0, vector, dest);
113
114 /*
115 * Send the IPI. The write to APIC_ICR fires this off.
116 */
117 native_apic_mem_write(APIC_ICR, cfg);
118}
119
120static inline void send_IPI_mask_sequence(cpumask_t mask, int vector)
121{
122 unsigned long flags;
123 unsigned long query_cpu;
124
125 /*
126 * Hack. The clustered APIC addressing mode doesn't allow us to send
127 * to an arbitrary mask, so I do a unicast to each CPU instead.
128 * - mbligh
129 */
130 local_irq_save(flags);
131 for_each_cpu_mask_nr(query_cpu, mask) {
132 __send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, query_cpu),
133 vector, APIC_DEST_PHYSICAL);
134 }
135 local_irq_restore(flags);
136}
137
138#endif /* ASM_X86__IPI_H */
diff --git a/include/asm-x86/irq.h b/include/asm-x86/irq.h
deleted file mode 100644
index 1e5f2909c1db..000000000000
--- a/include/asm-x86/irq.h
+++ /dev/null
@@ -1,50 +0,0 @@
1#ifndef ASM_X86__IRQ_H
2#define ASM_X86__IRQ_H
3/*
4 * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
5 *
6 * IRQ/IPI changes taken from work by Thomas Radke
7 * <tomsoft@informatik.tu-chemnitz.de>
8 */
9
10#include <asm/apicdef.h>
11#include <asm/irq_vectors.h>
12
13static inline int irq_canonicalize(int irq)
14{
15 return ((irq == 2) ? 9 : irq);
16}
17
18#ifdef CONFIG_X86_LOCAL_APIC
19# define ARCH_HAS_NMI_WATCHDOG
20#endif
21
22#ifdef CONFIG_4KSTACKS
23 extern void irq_ctx_init(int cpu);
24 extern void irq_ctx_exit(int cpu);
25# define __ARCH_HAS_DO_SOFTIRQ
26#else
27# define irq_ctx_init(cpu) do { } while (0)
28# define irq_ctx_exit(cpu) do { } while (0)
29# ifdef CONFIG_X86_64
30# define __ARCH_HAS_DO_SOFTIRQ
31# endif
32#endif
33
34#ifdef CONFIG_IRQBALANCE
35extern int irqbalance_disable(char *str);
36#endif
37
38#ifdef CONFIG_HOTPLUG_CPU
39#include <linux/cpumask.h>
40extern void fixup_irqs(cpumask_t map);
41#endif
42
43extern unsigned int do_IRQ(struct pt_regs *regs);
44extern void init_IRQ(void);
45extern void native_init_IRQ(void);
46
47/* Interrupt vector management */
48extern DECLARE_BITMAP(used_vectors, NR_VECTORS);
49
50#endif /* ASM_X86__IRQ_H */
diff --git a/include/asm-x86/irq_regs.h b/include/asm-x86/irq_regs.h
deleted file mode 100644
index 89c898ab298b..000000000000
--- a/include/asm-x86/irq_regs.h
+++ /dev/null
@@ -1,5 +0,0 @@
1#ifdef CONFIG_X86_32
2# include "irq_regs_32.h"
3#else
4# include "irq_regs_64.h"
5#endif
diff --git a/include/asm-x86/irq_regs_32.h b/include/asm-x86/irq_regs_32.h
deleted file mode 100644
index 316a3b258871..000000000000
--- a/include/asm-x86/irq_regs_32.h
+++ /dev/null
@@ -1,29 +0,0 @@
1/*
2 * Per-cpu current frame pointer - the location of the last exception frame on
3 * the stack, stored in the per-cpu area.
4 *
5 * Jeremy Fitzhardinge <jeremy@goop.org>
6 */
7#ifndef ASM_X86__IRQ_REGS_32_H
8#define ASM_X86__IRQ_REGS_32_H
9
10#include <asm/percpu.h>
11
12DECLARE_PER_CPU(struct pt_regs *, irq_regs);
13
14static inline struct pt_regs *get_irq_regs(void)
15{
16 return x86_read_percpu(irq_regs);
17}
18
19static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs)
20{
21 struct pt_regs *old_regs;
22
23 old_regs = get_irq_regs();
24 x86_write_percpu(irq_regs, new_regs);
25
26 return old_regs;
27}
28
29#endif /* ASM_X86__IRQ_REGS_32_H */
diff --git a/include/asm-x86/irq_regs_64.h b/include/asm-x86/irq_regs_64.h
deleted file mode 100644
index 3dd9c0b70270..000000000000
--- a/include/asm-x86/irq_regs_64.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/irq_regs.h>
diff --git a/include/asm-x86/irq_remapping.h b/include/asm-x86/irq_remapping.h
deleted file mode 100644
index 78242c6ffa58..000000000000
--- a/include/asm-x86/irq_remapping.h
+++ /dev/null
@@ -1,8 +0,0 @@
1#ifndef _ASM_IRQ_REMAPPING_H
2#define _ASM_IRQ_REMAPPING_H
3
4extern int x2apic;
5
6#define IRTE_DEST(dest) ((x2apic) ? dest : dest << 8)
7
8#endif
diff --git a/include/asm-x86/irq_vectors.h b/include/asm-x86/irq_vectors.h
deleted file mode 100644
index a8d065d85f57..000000000000
--- a/include/asm-x86/irq_vectors.h
+++ /dev/null
@@ -1,164 +0,0 @@
1#ifndef ASM_X86__IRQ_VECTORS_H
2#define ASM_X86__IRQ_VECTORS_H
3
4#include <linux/threads.h>
5
6#define NMI_VECTOR 0x02
7
8/*
9 * IDT vectors usable for external interrupt sources start
10 * at 0x20:
11 */
12#define FIRST_EXTERNAL_VECTOR 0x20
13
14#ifdef CONFIG_X86_32
15# define SYSCALL_VECTOR 0x80
16#else
17# define IA32_SYSCALL_VECTOR 0x80
18#endif
19
20/*
21 * Reserve the lowest usable priority level 0x20 - 0x2f for triggering
22 * cleanup after irq migration.
23 */
24#define IRQ_MOVE_CLEANUP_VECTOR FIRST_EXTERNAL_VECTOR
25
26/*
27 * Vectors 0x30-0x3f are used for ISA interrupts.
28 */
29#define IRQ0_VECTOR (FIRST_EXTERNAL_VECTOR + 0x10)
30#define IRQ1_VECTOR (IRQ0_VECTOR + 1)
31#define IRQ2_VECTOR (IRQ0_VECTOR + 2)
32#define IRQ3_VECTOR (IRQ0_VECTOR + 3)
33#define IRQ4_VECTOR (IRQ0_VECTOR + 4)
34#define IRQ5_VECTOR (IRQ0_VECTOR + 5)
35#define IRQ6_VECTOR (IRQ0_VECTOR + 6)
36#define IRQ7_VECTOR (IRQ0_VECTOR + 7)
37#define IRQ8_VECTOR (IRQ0_VECTOR + 8)
38#define IRQ9_VECTOR (IRQ0_VECTOR + 9)
39#define IRQ10_VECTOR (IRQ0_VECTOR + 10)
40#define IRQ11_VECTOR (IRQ0_VECTOR + 11)
41#define IRQ12_VECTOR (IRQ0_VECTOR + 12)
42#define IRQ13_VECTOR (IRQ0_VECTOR + 13)
43#define IRQ14_VECTOR (IRQ0_VECTOR + 14)
44#define IRQ15_VECTOR (IRQ0_VECTOR + 15)
45
46/*
47 * Special IRQ vectors used by the SMP architecture, 0xf0-0xff
48 *
49 * some of the following vectors are 'rare', they are merged
50 * into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
51 * TLB, reschedule and local APIC vectors are performance-critical.
52 *
53 * Vectors 0xf0-0xfa are free (reserved for future Linux use).
54 */
55#ifdef CONFIG_X86_32
56
57# define SPURIOUS_APIC_VECTOR 0xff
58# define ERROR_APIC_VECTOR 0xfe
59# define INVALIDATE_TLB_VECTOR 0xfd
60# define RESCHEDULE_VECTOR 0xfc
61# define CALL_FUNCTION_VECTOR 0xfb
62# define CALL_FUNCTION_SINGLE_VECTOR 0xfa
63# define THERMAL_APIC_VECTOR 0xf0
64
65#else
66
67#define SPURIOUS_APIC_VECTOR 0xff
68#define ERROR_APIC_VECTOR 0xfe
69#define RESCHEDULE_VECTOR 0xfd
70#define CALL_FUNCTION_VECTOR 0xfc
71#define CALL_FUNCTION_SINGLE_VECTOR 0xfb
72#define THERMAL_APIC_VECTOR 0xfa
73#define THRESHOLD_APIC_VECTOR 0xf9
74#define UV_BAU_MESSAGE 0xf8
75#define INVALIDATE_TLB_VECTOR_END 0xf7
76#define INVALIDATE_TLB_VECTOR_START 0xf0 /* f0-f7 used for TLB flush */
77
78#define NUM_INVALIDATE_TLB_VECTORS 8
79
80#endif
81
82/*
83 * Local APIC timer IRQ vector is on a different priority level,
84 * to work around the 'lost local interrupt if more than 2 IRQ
85 * sources per level' errata.
86 */
87#define LOCAL_TIMER_VECTOR 0xef
88
89/*
90 * First APIC vector available to drivers: (vectors 0x30-0xee) we
91 * start at 0x31(0x41) to spread out vectors evenly between priority
92 * levels. (0x80 is the syscall vector)
93 */
94#define FIRST_DEVICE_VECTOR (IRQ15_VECTOR + 2)
95
96#define NR_VECTORS 256
97
98#define FPU_IRQ 13
99
100#define FIRST_VM86_IRQ 3
101#define LAST_VM86_IRQ 15
102#define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15)
103
104#ifdef CONFIG_X86_64
105# if NR_CPUS < MAX_IO_APICS
106# define NR_IRQS (NR_VECTORS + (32 * NR_CPUS))
107# else
108# define NR_IRQS (NR_VECTORS + (32 * MAX_IO_APICS))
109# endif
110
111#elif !defined(CONFIG_X86_VOYAGER)
112
113# if defined(CONFIG_X86_IO_APIC) || defined(CONFIG_PARAVIRT) || defined(CONFIG_X86_VISWS)
114
115# define NR_IRQS 224
116
117# else /* IO_APIC || PARAVIRT */
118
119# define NR_IRQS 16
120
121# endif
122
123#else /* !VISWS && !VOYAGER */
124
125# define NR_IRQS 224
126
127#endif /* VISWS */
128
129/* Voyager specific defines */
130/* These define the CPIs we use in linux */
131#define VIC_CPI_LEVEL0 0
132#define VIC_CPI_LEVEL1 1
133/* now the fake CPIs */
134#define VIC_TIMER_CPI 2
135#define VIC_INVALIDATE_CPI 3
136#define VIC_RESCHEDULE_CPI 4
137#define VIC_ENABLE_IRQ_CPI 5
138#define VIC_CALL_FUNCTION_CPI 6
139#define VIC_CALL_FUNCTION_SINGLE_CPI 7
140
141/* Now the QIC CPIs: Since we don't need the two initial levels,
142 * these are 2 less than the VIC CPIs */
143#define QIC_CPI_OFFSET 1
144#define QIC_TIMER_CPI (VIC_TIMER_CPI - QIC_CPI_OFFSET)
145#define QIC_INVALIDATE_CPI (VIC_INVALIDATE_CPI - QIC_CPI_OFFSET)
146#define QIC_RESCHEDULE_CPI (VIC_RESCHEDULE_CPI - QIC_CPI_OFFSET)
147#define QIC_ENABLE_IRQ_CPI (VIC_ENABLE_IRQ_CPI - QIC_CPI_OFFSET)
148#define QIC_CALL_FUNCTION_CPI (VIC_CALL_FUNCTION_CPI - QIC_CPI_OFFSET)
149#define QIC_CALL_FUNCTION_SINGLE_CPI (VIC_CALL_FUNCTION_SINGLE_CPI - QIC_CPI_OFFSET)
150
151#define VIC_START_FAKE_CPI VIC_TIMER_CPI
152#define VIC_END_FAKE_CPI VIC_CALL_FUNCTION_SINGLE_CPI
153
154/* this is the SYS_INT CPI. */
155#define VIC_SYS_INT 8
156#define VIC_CMN_INT 15
157
158/* This is the boot CPI for alternate processors. It gets overwritten
159 * by the above once the system has activated all available processors */
160#define VIC_CPU_BOOT_CPI VIC_CPI_LEVEL0
161#define VIC_CPU_BOOT_ERRATA_CPI (VIC_CPI_LEVEL0 + 8)
162
163
164#endif /* ASM_X86__IRQ_VECTORS_H */
diff --git a/include/asm-x86/irqflags.h b/include/asm-x86/irqflags.h
deleted file mode 100644
index 2bdab21f0898..000000000000
--- a/include/asm-x86/irqflags.h
+++ /dev/null
@@ -1,211 +0,0 @@
1#ifndef _X86_IRQFLAGS_H_
2#define _X86_IRQFLAGS_H_
3
4#include <asm/processor-flags.h>
5
6#ifndef __ASSEMBLY__
7/*
8 * Interrupt control:
9 */
10
11static inline unsigned long native_save_fl(void)
12{
13 unsigned long flags;
14
15 asm volatile("# __raw_save_flags\n\t"
16 "pushf ; pop %0"
17 : "=g" (flags)
18 : /* no input */
19 : "memory");
20
21 return flags;
22}
23
24static inline void native_restore_fl(unsigned long flags)
25{
26 asm volatile("push %0 ; popf"
27 : /* no output */
28 :"g" (flags)
29 :"memory", "cc");
30}
31
32static inline void native_irq_disable(void)
33{
34 asm volatile("cli": : :"memory");
35}
36
37static inline void native_irq_enable(void)
38{
39 asm volatile("sti": : :"memory");
40}
41
42static inline void native_safe_halt(void)
43{
44 asm volatile("sti; hlt": : :"memory");
45}
46
47static inline void native_halt(void)
48{
49 asm volatile("hlt": : :"memory");
50}
51
52#endif
53
54#ifdef CONFIG_PARAVIRT
55#include <asm/paravirt.h>
56#else
57#ifndef __ASSEMBLY__
58
59static inline unsigned long __raw_local_save_flags(void)
60{
61 return native_save_fl();
62}
63
64static inline void raw_local_irq_restore(unsigned long flags)
65{
66 native_restore_fl(flags);
67}
68
69static inline void raw_local_irq_disable(void)
70{
71 native_irq_disable();
72}
73
74static inline void raw_local_irq_enable(void)
75{
76 native_irq_enable();
77}
78
79/*
80 * Used in the idle loop; sti takes one instruction cycle
81 * to complete:
82 */
83static inline void raw_safe_halt(void)
84{
85 native_safe_halt();
86}
87
88/*
89 * Used when interrupts are already enabled or to
90 * shutdown the processor:
91 */
92static inline void halt(void)
93{
94 native_halt();
95}
96
97/*
98 * For spinlocks, etc:
99 */
100static inline unsigned long __raw_local_irq_save(void)
101{
102 unsigned long flags = __raw_local_save_flags();
103
104 raw_local_irq_disable();
105
106 return flags;
107}
108#else
109
110#define ENABLE_INTERRUPTS(x) sti
111#define DISABLE_INTERRUPTS(x) cli
112
113#ifdef CONFIG_X86_64
114#define SWAPGS swapgs
115/*
116 * Currently paravirt can't handle swapgs nicely when we
117 * don't have a stack we can rely on (such as a user space
118 * stack). So we either find a way around these or just fault
119 * and emulate if a guest tries to call swapgs directly.
120 *
121 * Either way, this is a good way to document that we don't
122 * have a reliable stack. x86_64 only.
123 */
124#define SWAPGS_UNSAFE_STACK swapgs
125
126#define PARAVIRT_ADJUST_EXCEPTION_FRAME /* */
127
128#define INTERRUPT_RETURN iretq
129#define USERGS_SYSRET64 \
130 swapgs; \
131 sysretq;
132#define USERGS_SYSRET32 \
133 swapgs; \
134 sysretl
135#define ENABLE_INTERRUPTS_SYSEXIT32 \
136 swapgs; \
137 sti; \
138 sysexit
139
140#else
141#define INTERRUPT_RETURN iret
142#define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
143#define GET_CR0_INTO_EAX movl %cr0, %eax
144#endif
145
146
147#endif /* __ASSEMBLY__ */
148#endif /* CONFIG_PARAVIRT */
149
150#ifndef __ASSEMBLY__
151#define raw_local_save_flags(flags) \
152 do { (flags) = __raw_local_save_flags(); } while (0)
153
154#define raw_local_irq_save(flags) \
155 do { (flags) = __raw_local_irq_save(); } while (0)
156
157static inline int raw_irqs_disabled_flags(unsigned long flags)
158{
159 return !(flags & X86_EFLAGS_IF);
160}
161
162static inline int raw_irqs_disabled(void)
163{
164 unsigned long flags = __raw_local_save_flags();
165
166 return raw_irqs_disabled_flags(flags);
167}
168
169#else
170
171#ifdef CONFIG_X86_64
172#define ARCH_LOCKDEP_SYS_EXIT call lockdep_sys_exit_thunk
173#define ARCH_LOCKDEP_SYS_EXIT_IRQ \
174 TRACE_IRQS_ON; \
175 sti; \
176 SAVE_REST; \
177 LOCKDEP_SYS_EXIT; \
178 RESTORE_REST; \
179 cli; \
180 TRACE_IRQS_OFF;
181
182#else
183#define ARCH_LOCKDEP_SYS_EXIT \
184 pushl %eax; \
185 pushl %ecx; \
186 pushl %edx; \
187 call lockdep_sys_exit; \
188 popl %edx; \
189 popl %ecx; \
190 popl %eax;
191
192#define ARCH_LOCKDEP_SYS_EXIT_IRQ
193#endif
194
195#ifdef CONFIG_TRACE_IRQFLAGS
196# define TRACE_IRQS_ON call trace_hardirqs_on_thunk;
197# define TRACE_IRQS_OFF call trace_hardirqs_off_thunk;
198#else
199# define TRACE_IRQS_ON
200# define TRACE_IRQS_OFF
201#endif
202#ifdef CONFIG_DEBUG_LOCK_ALLOC
203# define LOCKDEP_SYS_EXIT ARCH_LOCKDEP_SYS_EXIT
204# define LOCKDEP_SYS_EXIT_IRQ ARCH_LOCKDEP_SYS_EXIT_IRQ
205# else
206# define LOCKDEP_SYS_EXIT
207# define LOCKDEP_SYS_EXIT_IRQ
208# endif
209
210#endif /* __ASSEMBLY__ */
211#endif
diff --git a/include/asm-x86/ist.h b/include/asm-x86/ist.h
deleted file mode 100644
index 35a2fe9bc921..000000000000
--- a/include/asm-x86/ist.h
+++ /dev/null
@@ -1,34 +0,0 @@
1#ifndef ASM_X86__IST_H
2#define ASM_X86__IST_H
3
4/*
5 * Include file for the interface to IST BIOS
6 * Copyright 2002 Andy Grover <andrew.grover@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2, or (at your option) any
11 * later version.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 */
18
19
20#include <linux/types.h>
21
22struct ist_info {
23 __u32 signature;
24 __u32 command;
25 __u32 event;
26 __u32 perf_level;
27};
28
29#ifdef __KERNEL__
30
31extern struct ist_info ist_info;
32
33#endif /* __KERNEL__ */
34#endif /* ASM_X86__IST_H */
diff --git a/include/asm-x86/k8.h b/include/asm-x86/k8.h
deleted file mode 100644
index 2bbaf4370a55..000000000000
--- a/include/asm-x86/k8.h
+++ /dev/null
@@ -1,15 +0,0 @@
1#ifndef ASM_X86__K8_H
2#define ASM_X86__K8_H
3
4#include <linux/pci.h>
5
6extern struct pci_device_id k8_nb_ids[];
7
8extern int early_is_k8_nb(u32 value);
9extern struct pci_dev **k8_northbridges;
10extern int num_k8_northbridges;
11extern int cache_k8_northbridges(void);
12extern void k8_flush_garts(void);
13extern int k8_scan_nodes(unsigned long start, unsigned long end);
14
15#endif /* ASM_X86__K8_H */
diff --git a/include/asm-x86/kdebug.h b/include/asm-x86/kdebug.h
deleted file mode 100644
index fbbab66ee9df..000000000000
--- a/include/asm-x86/kdebug.h
+++ /dev/null
@@ -1,37 +0,0 @@
1#ifndef ASM_X86__KDEBUG_H
2#define ASM_X86__KDEBUG_H
3
4#include <linux/notifier.h>
5
6struct pt_regs;
7
8/* Grossly misnamed. */
9enum die_val {
10 DIE_OOPS = 1,
11 DIE_INT3,
12 DIE_DEBUG,
13 DIE_PANIC,
14 DIE_NMI,
15 DIE_DIE,
16 DIE_NMIWATCHDOG,
17 DIE_KERNELDEBUG,
18 DIE_TRAP,
19 DIE_GPF,
20 DIE_CALL,
21 DIE_NMI_IPI,
22 DIE_PAGE_FAULT,
23 DIE_NMIUNKNOWN,
24};
25
26extern void printk_address(unsigned long address, int reliable);
27extern void die(const char *, struct pt_regs *,long);
28extern int __must_check __die(const char *, struct pt_regs *, long);
29extern void show_registers(struct pt_regs *regs);
30extern void show_trace(struct task_struct *t, struct pt_regs *regs,
31 unsigned long *sp, unsigned long bp);
32extern void __show_regs(struct pt_regs *regs, int all);
33extern void show_regs(struct pt_regs *regs);
34extern unsigned long oops_begin(void);
35extern void oops_end(unsigned long, struct pt_regs *, int signr);
36
37#endif /* ASM_X86__KDEBUG_H */
diff --git a/include/asm-x86/kexec.h b/include/asm-x86/kexec.h
deleted file mode 100644
index ea09600d6129..000000000000
--- a/include/asm-x86/kexec.h
+++ /dev/null
@@ -1,175 +0,0 @@
1#ifndef ASM_X86__KEXEC_H
2#define ASM_X86__KEXEC_H
3
4#ifdef CONFIG_X86_32
5# define PA_CONTROL_PAGE 0
6# define VA_CONTROL_PAGE 1
7# define PA_PGD 2
8# define VA_PGD 3
9# define PA_PTE_0 4
10# define VA_PTE_0 5
11# define PA_PTE_1 6
12# define VA_PTE_1 7
13# define PA_SWAP_PAGE 8
14# ifdef CONFIG_X86_PAE
15# define PA_PMD_0 9
16# define VA_PMD_0 10
17# define PA_PMD_1 11
18# define VA_PMD_1 12
19# define PAGES_NR 13
20# else
21# define PAGES_NR 9
22# endif
23#else
24# define PA_CONTROL_PAGE 0
25# define VA_CONTROL_PAGE 1
26# define PA_PGD 2
27# define VA_PGD 3
28# define PA_PUD_0 4
29# define VA_PUD_0 5
30# define PA_PMD_0 6
31# define VA_PMD_0 7
32# define PA_PTE_0 8
33# define VA_PTE_0 9
34# define PA_PUD_1 10
35# define VA_PUD_1 11
36# define PA_PMD_1 12
37# define VA_PMD_1 13
38# define PA_PTE_1 14
39# define VA_PTE_1 15
40# define PA_TABLE_PAGE 16
41# define PAGES_NR 17
42#endif
43
44#ifdef CONFIG_X86_32
45# define KEXEC_CONTROL_CODE_MAX_SIZE 2048
46#endif
47
48#ifndef __ASSEMBLY__
49
50#include <linux/string.h>
51
52#include <asm/page.h>
53#include <asm/ptrace.h>
54
55/*
56 * KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return.
57 * I.e. Maximum page that is mapped directly into kernel memory,
58 * and kmap is not required.
59 *
60 * So far x86_64 is limited to 40 physical address bits.
61 */
62#ifdef CONFIG_X86_32
63/* Maximum physical address we can use pages from */
64# define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
65/* Maximum address we can reach in physical address mode */
66# define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL)
67/* Maximum address we can use for the control code buffer */
68# define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE
69
70# define KEXEC_CONTROL_PAGE_SIZE 4096
71
72/* The native architecture */
73# define KEXEC_ARCH KEXEC_ARCH_386
74
75/* We can also handle crash dumps from 64 bit kernel. */
76# define vmcore_elf_check_arch_cross(x) ((x)->e_machine == EM_X86_64)
77#else
78/* Maximum physical address we can use pages from */
79# define KEXEC_SOURCE_MEMORY_LIMIT (0xFFFFFFFFFFUL)
80/* Maximum address we can reach in physical address mode */
81# define KEXEC_DESTINATION_MEMORY_LIMIT (0xFFFFFFFFFFUL)
82/* Maximum address we can use for the control pages */
83# define KEXEC_CONTROL_MEMORY_LIMIT (0xFFFFFFFFFFUL)
84
85/* Allocate one page for the pdp and the second for the code */
86# define KEXEC_CONTROL_PAGE_SIZE (4096UL + 4096UL)
87
88/* The native architecture */
89# define KEXEC_ARCH KEXEC_ARCH_X86_64
90#endif
91
92/*
93 * CPU does not save ss and sp on stack if execution is already
94 * running in kernel mode at the time of NMI occurrence. This code
95 * fixes it.
96 */
97static inline void crash_fixup_ss_esp(struct pt_regs *newregs,
98 struct pt_regs *oldregs)
99{
100#ifdef CONFIG_X86_32
101 newregs->sp = (unsigned long)&(oldregs->sp);
102 asm volatile("xorl %%eax, %%eax\n\t"
103 "movw %%ss, %%ax\n\t"
104 :"=a"(newregs->ss));
105#endif
106}
107
108/*
109 * This function is responsible for capturing register states if coming
110 * via panic otherwise just fix up the ss and sp if coming via kernel
111 * mode exception.
112 */
113static inline void crash_setup_regs(struct pt_regs *newregs,
114 struct pt_regs *oldregs)
115{
116 if (oldregs) {
117 memcpy(newregs, oldregs, sizeof(*newregs));
118 crash_fixup_ss_esp(newregs, oldregs);
119 } else {
120#ifdef CONFIG_X86_32
121 asm volatile("movl %%ebx,%0" : "=m"(newregs->bx));
122 asm volatile("movl %%ecx,%0" : "=m"(newregs->cx));
123 asm volatile("movl %%edx,%0" : "=m"(newregs->dx));
124 asm volatile("movl %%esi,%0" : "=m"(newregs->si));
125 asm volatile("movl %%edi,%0" : "=m"(newregs->di));
126 asm volatile("movl %%ebp,%0" : "=m"(newregs->bp));
127 asm volatile("movl %%eax,%0" : "=m"(newregs->ax));
128 asm volatile("movl %%esp,%0" : "=m"(newregs->sp));
129 asm volatile("movl %%ss, %%eax;" :"=a"(newregs->ss));
130 asm volatile("movl %%cs, %%eax;" :"=a"(newregs->cs));
131 asm volatile("movl %%ds, %%eax;" :"=a"(newregs->ds));
132 asm volatile("movl %%es, %%eax;" :"=a"(newregs->es));
133 asm volatile("pushfl; popl %0" :"=m"(newregs->flags));
134#else
135 asm volatile("movq %%rbx,%0" : "=m"(newregs->bx));
136 asm volatile("movq %%rcx,%0" : "=m"(newregs->cx));
137 asm volatile("movq %%rdx,%0" : "=m"(newregs->dx));
138 asm volatile("movq %%rsi,%0" : "=m"(newregs->si));
139 asm volatile("movq %%rdi,%0" : "=m"(newregs->di));
140 asm volatile("movq %%rbp,%0" : "=m"(newregs->bp));
141 asm volatile("movq %%rax,%0" : "=m"(newregs->ax));
142 asm volatile("movq %%rsp,%0" : "=m"(newregs->sp));
143 asm volatile("movq %%r8,%0" : "=m"(newregs->r8));
144 asm volatile("movq %%r9,%0" : "=m"(newregs->r9));
145 asm volatile("movq %%r10,%0" : "=m"(newregs->r10));
146 asm volatile("movq %%r11,%0" : "=m"(newregs->r11));
147 asm volatile("movq %%r12,%0" : "=m"(newregs->r12));
148 asm volatile("movq %%r13,%0" : "=m"(newregs->r13));
149 asm volatile("movq %%r14,%0" : "=m"(newregs->r14));
150 asm volatile("movq %%r15,%0" : "=m"(newregs->r15));
151 asm volatile("movl %%ss, %%eax;" :"=a"(newregs->ss));
152 asm volatile("movl %%cs, %%eax;" :"=a"(newregs->cs));
153 asm volatile("pushfq; popq %0" :"=m"(newregs->flags));
154#endif
155 newregs->ip = (unsigned long)current_text_addr();
156 }
157}
158
159#ifdef CONFIG_X86_32
160asmlinkage unsigned long
161relocate_kernel(unsigned long indirection_page,
162 unsigned long control_page,
163 unsigned long start_address,
164 unsigned int has_pae,
165 unsigned int preserve_context);
166#else
167NORET_TYPE void
168relocate_kernel(unsigned long indirection_page,
169 unsigned long page_list,
170 unsigned long start_address) ATTRIB_NORET;
171#endif
172
173#endif /* __ASSEMBLY__ */
174
175#endif /* ASM_X86__KEXEC_H */
diff --git a/include/asm-x86/kgdb.h b/include/asm-x86/kgdb.h
deleted file mode 100644
index d283863354de..000000000000
--- a/include/asm-x86/kgdb.h
+++ /dev/null
@@ -1,79 +0,0 @@
1#ifndef ASM_X86__KGDB_H
2#define ASM_X86__KGDB_H
3
4/*
5 * Copyright (C) 2001-2004 Amit S. Kale
6 * Copyright (C) 2008 Wind River Systems, Inc.
7 */
8
9/*
10 * BUFMAX defines the maximum number of characters in inbound/outbound
11 * buffers at least NUMREGBYTES*2 are needed for register packets
12 * Longer buffer is needed to list all threads
13 */
14#define BUFMAX 1024
15
16/*
17 * Note that this register image is in a different order than
18 * the register image that Linux produces at interrupt time.
19 *
20 * Linux's register image is defined by struct pt_regs in ptrace.h.
21 * Just why GDB uses a different order is a historical mystery.
22 */
23#ifdef CONFIG_X86_32
24enum regnames {
25 GDB_AX, /* 0 */
26 GDB_CX, /* 1 */
27 GDB_DX, /* 2 */
28 GDB_BX, /* 3 */
29 GDB_SP, /* 4 */
30 GDB_BP, /* 5 */
31 GDB_SI, /* 6 */
32 GDB_DI, /* 7 */
33 GDB_PC, /* 8 also known as eip */
34 GDB_PS, /* 9 also known as eflags */
35 GDB_CS, /* 10 */
36 GDB_SS, /* 11 */
37 GDB_DS, /* 12 */
38 GDB_ES, /* 13 */
39 GDB_FS, /* 14 */
40 GDB_GS, /* 15 */
41};
42#define NUMREGBYTES ((GDB_GS+1)*4)
43#else /* ! CONFIG_X86_32 */
44enum regnames64 {
45 GDB_AX, /* 0 */
46 GDB_BX, /* 1 */
47 GDB_CX, /* 2 */
48 GDB_DX, /* 3 */
49 GDB_SI, /* 4 */
50 GDB_DI, /* 5 */
51 GDB_BP, /* 6 */
52 GDB_SP, /* 7 */
53 GDB_R8, /* 8 */
54 GDB_R9, /* 9 */
55 GDB_R10, /* 10 */
56 GDB_R11, /* 11 */
57 GDB_R12, /* 12 */
58 GDB_R13, /* 13 */
59 GDB_R14, /* 14 */
60 GDB_R15, /* 15 */
61 GDB_PC, /* 16 */
62};
63
64enum regnames32 {
65 GDB_PS = 34,
66 GDB_CS,
67 GDB_SS,
68};
69#define NUMREGBYTES ((GDB_SS+1)*4)
70#endif /* CONFIG_X86_32 */
71
72static inline void arch_kgdb_breakpoint(void)
73{
74 asm(" int $3");
75}
76#define BREAK_INSTR_SIZE 1
77#define CACHE_FLUSH_IS_SAFE 1
78
79#endif /* ASM_X86__KGDB_H */
diff --git a/include/asm-x86/kmap_types.h b/include/asm-x86/kmap_types.h
deleted file mode 100644
index 89f44493e643..000000000000
--- a/include/asm-x86/kmap_types.h
+++ /dev/null
@@ -1,29 +0,0 @@
1#ifndef ASM_X86__KMAP_TYPES_H
2#define ASM_X86__KMAP_TYPES_H
3
4#if defined(CONFIG_X86_32) && defined(CONFIG_DEBUG_HIGHMEM)
5# define D(n) __KM_FENCE_##n ,
6#else
7# define D(n)
8#endif
9
10enum km_type {
11D(0) KM_BOUNCE_READ,
12D(1) KM_SKB_SUNRPC_DATA,
13D(2) KM_SKB_DATA_SOFTIRQ,
14D(3) KM_USER0,
15D(4) KM_USER1,
16D(5) KM_BIO_SRC_IRQ,
17D(6) KM_BIO_DST_IRQ,
18D(7) KM_PTE0,
19D(8) KM_PTE1,
20D(9) KM_IRQ0,
21D(10) KM_IRQ1,
22D(11) KM_SOFTIRQ0,
23D(12) KM_SOFTIRQ1,
24D(13) KM_TYPE_NR
25};
26
27#undef D
28
29#endif /* ASM_X86__KMAP_TYPES_H */
diff --git a/include/asm-x86/kprobes.h b/include/asm-x86/kprobes.h
deleted file mode 100644
index 8a0748d01036..000000000000
--- a/include/asm-x86/kprobes.h
+++ /dev/null
@@ -1,88 +0,0 @@
1#ifndef ASM_X86__KPROBES_H
2#define ASM_X86__KPROBES_H
3/*
4 * Kernel Probes (KProbes)
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 *
20 * Copyright (C) IBM Corporation, 2002, 2004
21 *
22 * See arch/x86/kernel/kprobes.c for x86 kprobes history.
23 */
24#include <linux/types.h>
25#include <linux/ptrace.h>
26#include <linux/percpu.h>
27
28#define __ARCH_WANT_KPROBES_INSN_SLOT
29
30struct pt_regs;
31struct kprobe;
32
33typedef u8 kprobe_opcode_t;
34#define BREAKPOINT_INSTRUCTION 0xcc
35#define RELATIVEJUMP_INSTRUCTION 0xe9
36#define MAX_INSN_SIZE 16
37#define MAX_STACK_SIZE 64
38#define MIN_STACK_SIZE(ADDR) \
39 (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
40 THREAD_SIZE - (unsigned long)(ADDR))) \
41 ? (MAX_STACK_SIZE) \
42 : (((unsigned long)current_thread_info()) + \
43 THREAD_SIZE - (unsigned long)(ADDR)))
44
45#define flush_insn_slot(p) do { } while (0)
46
47extern const int kretprobe_blacklist_size;
48
49void arch_remove_kprobe(struct kprobe *p);
50void kretprobe_trampoline(void);
51
52/* Architecture specific copy of original instruction*/
53struct arch_specific_insn {
54 /* copy of the original instruction */
55 kprobe_opcode_t *insn;
56 /*
57 * boostable = -1: This instruction type is not boostable.
58 * boostable = 0: This instruction type is boostable.
59 * boostable = 1: This instruction has been boosted: we have
60 * added a relative jump after the instruction copy in insn,
61 * so no single-step and fixup are needed (unless there's
62 * a post_handler or break_handler).
63 */
64 int boostable;
65};
66
67struct prev_kprobe {
68 struct kprobe *kp;
69 unsigned long status;
70 unsigned long old_flags;
71 unsigned long saved_flags;
72};
73
74/* per-cpu kprobe control block */
75struct kprobe_ctlblk {
76 unsigned long kprobe_status;
77 unsigned long kprobe_old_flags;
78 unsigned long kprobe_saved_flags;
79 unsigned long *jprobe_saved_sp;
80 struct pt_regs jprobe_saved_regs;
81 kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE];
82 struct prev_kprobe prev_kprobe;
83};
84
85extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
86extern int kprobe_exceptions_notify(struct notifier_block *self,
87 unsigned long val, void *data);
88#endif /* ASM_X86__KPROBES_H */
diff --git a/include/asm-x86/kvm.h b/include/asm-x86/kvm.h
deleted file mode 100644
index ba0dd791fadf..000000000000
--- a/include/asm-x86/kvm.h
+++ /dev/null
@@ -1,211 +0,0 @@
1#ifndef ASM_X86__KVM_H
2#define ASM_X86__KVM_H
3
4/*
5 * KVM x86 specific structures and definitions
6 *
7 */
8
9#include <asm/types.h>
10#include <linux/ioctl.h>
11
12/* Architectural interrupt line count. */
13#define KVM_NR_INTERRUPTS 256
14
15struct kvm_memory_alias {
16 __u32 slot; /* this has a different namespace than memory slots */
17 __u32 flags;
18 __u64 guest_phys_addr;
19 __u64 memory_size;
20 __u64 target_phys_addr;
21};
22
23/* for KVM_GET_IRQCHIP and KVM_SET_IRQCHIP */
24struct kvm_pic_state {
25 __u8 last_irr; /* edge detection */
26 __u8 irr; /* interrupt request register */
27 __u8 imr; /* interrupt mask register */
28 __u8 isr; /* interrupt service register */
29 __u8 priority_add; /* highest irq priority */
30 __u8 irq_base;
31 __u8 read_reg_select;
32 __u8 poll;
33 __u8 special_mask;
34 __u8 init_state;
35 __u8 auto_eoi;
36 __u8 rotate_on_auto_eoi;
37 __u8 special_fully_nested_mode;
38 __u8 init4; /* true if 4 byte init */
39 __u8 elcr; /* PIIX edge/trigger selection */
40 __u8 elcr_mask;
41};
42
43#define KVM_IOAPIC_NUM_PINS 24
44struct kvm_ioapic_state {
45 __u64 base_address;
46 __u32 ioregsel;
47 __u32 id;
48 __u32 irr;
49 __u32 pad;
50 union {
51 __u64 bits;
52 struct {
53 __u8 vector;
54 __u8 delivery_mode:3;
55 __u8 dest_mode:1;
56 __u8 delivery_status:1;
57 __u8 polarity:1;
58 __u8 remote_irr:1;
59 __u8 trig_mode:1;
60 __u8 mask:1;
61 __u8 reserve:7;
62 __u8 reserved[4];
63 __u8 dest_id;
64 } fields;
65 } redirtbl[KVM_IOAPIC_NUM_PINS];
66};
67
68#define KVM_IRQCHIP_PIC_MASTER 0
69#define KVM_IRQCHIP_PIC_SLAVE 1
70#define KVM_IRQCHIP_IOAPIC 2
71
72/* for KVM_GET_REGS and KVM_SET_REGS */
73struct kvm_regs {
74 /* out (KVM_GET_REGS) / in (KVM_SET_REGS) */
75 __u64 rax, rbx, rcx, rdx;
76 __u64 rsi, rdi, rsp, rbp;
77 __u64 r8, r9, r10, r11;
78 __u64 r12, r13, r14, r15;
79 __u64 rip, rflags;
80};
81
82/* for KVM_GET_LAPIC and KVM_SET_LAPIC */
83#define KVM_APIC_REG_SIZE 0x400
84struct kvm_lapic_state {
85 char regs[KVM_APIC_REG_SIZE];
86};
87
88struct kvm_segment {
89 __u64 base;
90 __u32 limit;
91 __u16 selector;
92 __u8 type;
93 __u8 present, dpl, db, s, l, g, avl;
94 __u8 unusable;
95 __u8 padding;
96};
97
98struct kvm_dtable {
99 __u64 base;
100 __u16 limit;
101 __u16 padding[3];
102};
103
104
105/* for KVM_GET_SREGS and KVM_SET_SREGS */
106struct kvm_sregs {
107 /* out (KVM_GET_SREGS) / in (KVM_SET_SREGS) */
108 struct kvm_segment cs, ds, es, fs, gs, ss;
109 struct kvm_segment tr, ldt;
110 struct kvm_dtable gdt, idt;
111 __u64 cr0, cr2, cr3, cr4, cr8;
112 __u64 efer;
113 __u64 apic_base;
114 __u64 interrupt_bitmap[(KVM_NR_INTERRUPTS + 63) / 64];
115};
116
117/* for KVM_GET_FPU and KVM_SET_FPU */
118struct kvm_fpu {
119 __u8 fpr[8][16];
120 __u16 fcw;
121 __u16 fsw;
122 __u8 ftwx; /* in fxsave format */
123 __u8 pad1;
124 __u16 last_opcode;
125 __u64 last_ip;
126 __u64 last_dp;
127 __u8 xmm[16][16];
128 __u32 mxcsr;
129 __u32 pad2;
130};
131
132struct kvm_msr_entry {
133 __u32 index;
134 __u32 reserved;
135 __u64 data;
136};
137
138/* for KVM_GET_MSRS and KVM_SET_MSRS */
139struct kvm_msrs {
140 __u32 nmsrs; /* number of msrs in entries */
141 __u32 pad;
142
143 struct kvm_msr_entry entries[0];
144};
145
146/* for KVM_GET_MSR_INDEX_LIST */
147struct kvm_msr_list {
148 __u32 nmsrs; /* number of msrs in entries */
149 __u32 indices[0];
150};
151
152
153struct kvm_cpuid_entry {
154 __u32 function;
155 __u32 eax;
156 __u32 ebx;
157 __u32 ecx;
158 __u32 edx;
159 __u32 padding;
160};
161
162/* for KVM_SET_CPUID */
163struct kvm_cpuid {
164 __u32 nent;
165 __u32 padding;
166 struct kvm_cpuid_entry entries[0];
167};
168
169struct kvm_cpuid_entry2 {
170 __u32 function;
171 __u32 index;
172 __u32 flags;
173 __u32 eax;
174 __u32 ebx;
175 __u32 ecx;
176 __u32 edx;
177 __u32 padding[3];
178};
179
180#define KVM_CPUID_FLAG_SIGNIFCANT_INDEX 1
181#define KVM_CPUID_FLAG_STATEFUL_FUNC 2
182#define KVM_CPUID_FLAG_STATE_READ_NEXT 4
183
184/* for KVM_SET_CPUID2 */
185struct kvm_cpuid2 {
186 __u32 nent;
187 __u32 padding;
188 struct kvm_cpuid_entry2 entries[0];
189};
190
191/* for KVM_GET_PIT and KVM_SET_PIT */
192struct kvm_pit_channel_state {
193 __u32 count; /* can be 65536 */
194 __u16 latched_count;
195 __u8 count_latched;
196 __u8 status_latched;
197 __u8 status;
198 __u8 read_state;
199 __u8 write_state;
200 __u8 write_latch;
201 __u8 rw_mode;
202 __u8 mode;
203 __u8 bcd;
204 __u8 gate;
205 __s64 count_load_time;
206};
207
208struct kvm_pit_state {
209 struct kvm_pit_channel_state channels[3];
210};
211#endif /* ASM_X86__KVM_H */
diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h
deleted file mode 100644
index 411fb8cfb24e..000000000000
--- a/include/asm-x86/kvm_host.h
+++ /dev/null
@@ -1,752 +0,0 @@
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This header defines architecture specific interfaces, x86 version
5 *
6 * This work is licensed under the terms of the GNU GPL, version 2. See
7 * the COPYING file in the top-level directory.
8 *
9 */
10
11#ifndef ASM_X86__KVM_HOST_H
12#define ASM_X86__KVM_HOST_H
13
14#include <linux/types.h>
15#include <linux/mm.h>
16#include <linux/mmu_notifier.h>
17
18#include <linux/kvm.h>
19#include <linux/kvm_para.h>
20#include <linux/kvm_types.h>
21
22#include <asm/pvclock-abi.h>
23#include <asm/desc.h>
24
25#define KVM_MAX_VCPUS 16
26#define KVM_MEMORY_SLOTS 32
27/* memory slots that does not exposed to userspace */
28#define KVM_PRIVATE_MEM_SLOTS 4
29
30#define KVM_PIO_PAGE_OFFSET 1
31#define KVM_COALESCED_MMIO_PAGE_OFFSET 2
32
33#define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1)
34#define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD))
35#define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS | \
36 0xFFFFFF0000000000ULL)
37
38#define KVM_GUEST_CR0_MASK \
39 (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE \
40 | X86_CR0_NW | X86_CR0_CD)
41#define KVM_VM_CR0_ALWAYS_ON \
42 (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE | X86_CR0_TS \
43 | X86_CR0_MP)
44#define KVM_GUEST_CR4_MASK \
45 (X86_CR4_VME | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE | X86_CR4_VMXE)
46#define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
47#define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
48
49#define INVALID_PAGE (~(hpa_t)0)
50#define UNMAPPED_GVA (~(gpa_t)0)
51
52/* shadow tables are PAE even on non-PAE hosts */
53#define KVM_HPAGE_SHIFT 21
54#define KVM_HPAGE_SIZE (1UL << KVM_HPAGE_SHIFT)
55#define KVM_HPAGE_MASK (~(KVM_HPAGE_SIZE - 1))
56
57#define KVM_PAGES_PER_HPAGE (KVM_HPAGE_SIZE / PAGE_SIZE)
58
59#define DE_VECTOR 0
60#define DB_VECTOR 1
61#define BP_VECTOR 3
62#define OF_VECTOR 4
63#define BR_VECTOR 5
64#define UD_VECTOR 6
65#define NM_VECTOR 7
66#define DF_VECTOR 8
67#define TS_VECTOR 10
68#define NP_VECTOR 11
69#define SS_VECTOR 12
70#define GP_VECTOR 13
71#define PF_VECTOR 14
72#define MF_VECTOR 16
73#define MC_VECTOR 18
74
75#define SELECTOR_TI_MASK (1 << 2)
76#define SELECTOR_RPL_MASK 0x03
77
78#define IOPL_SHIFT 12
79
80#define KVM_ALIAS_SLOTS 4
81
82#define KVM_PERMILLE_MMU_PAGES 20
83#define KVM_MIN_ALLOC_MMU_PAGES 64
84#define KVM_MMU_HASH_SHIFT 10
85#define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT)
86#define KVM_MIN_FREE_MMU_PAGES 5
87#define KVM_REFILL_PAGES 25
88#define KVM_MAX_CPUID_ENTRIES 40
89#define KVM_NR_VAR_MTRR 8
90
91extern spinlock_t kvm_lock;
92extern struct list_head vm_list;
93
94struct kvm_vcpu;
95struct kvm;
96
97enum kvm_reg {
98 VCPU_REGS_RAX = 0,
99 VCPU_REGS_RCX = 1,
100 VCPU_REGS_RDX = 2,
101 VCPU_REGS_RBX = 3,
102 VCPU_REGS_RSP = 4,
103 VCPU_REGS_RBP = 5,
104 VCPU_REGS_RSI = 6,
105 VCPU_REGS_RDI = 7,
106#ifdef CONFIG_X86_64
107 VCPU_REGS_R8 = 8,
108 VCPU_REGS_R9 = 9,
109 VCPU_REGS_R10 = 10,
110 VCPU_REGS_R11 = 11,
111 VCPU_REGS_R12 = 12,
112 VCPU_REGS_R13 = 13,
113 VCPU_REGS_R14 = 14,
114 VCPU_REGS_R15 = 15,
115#endif
116 VCPU_REGS_RIP,
117 NR_VCPU_REGS
118};
119
120enum {
121 VCPU_SREG_ES,
122 VCPU_SREG_CS,
123 VCPU_SREG_SS,
124 VCPU_SREG_DS,
125 VCPU_SREG_FS,
126 VCPU_SREG_GS,
127 VCPU_SREG_TR,
128 VCPU_SREG_LDTR,
129};
130
131#include <asm/kvm_x86_emulate.h>
132
133#define KVM_NR_MEM_OBJS 40
134
135struct kvm_guest_debug {
136 int enabled;
137 unsigned long bp[4];
138 int singlestep;
139};
140
141/*
142 * We don't want allocation failures within the mmu code, so we preallocate
143 * enough memory for a single page fault in a cache.
144 */
145struct kvm_mmu_memory_cache {
146 int nobjs;
147 void *objects[KVM_NR_MEM_OBJS];
148};
149
150#define NR_PTE_CHAIN_ENTRIES 5
151
152struct kvm_pte_chain {
153 u64 *parent_ptes[NR_PTE_CHAIN_ENTRIES];
154 struct hlist_node link;
155};
156
157/*
158 * kvm_mmu_page_role, below, is defined as:
159 *
160 * bits 0:3 - total guest paging levels (2-4, or zero for real mode)
161 * bits 4:7 - page table level for this shadow (1-4)
162 * bits 8:9 - page table quadrant for 2-level guests
163 * bit 16 - "metaphysical" - gfn is not a real page (huge page/real mode)
164 * bits 17:19 - common access permissions for all ptes in this shadow page
165 */
166union kvm_mmu_page_role {
167 unsigned word;
168 struct {
169 unsigned glevels:4;
170 unsigned level:4;
171 unsigned quadrant:2;
172 unsigned pad_for_nice_hex_output:6;
173 unsigned metaphysical:1;
174 unsigned access:3;
175 unsigned invalid:1;
176 };
177};
178
179struct kvm_mmu_page {
180 struct list_head link;
181 struct hlist_node hash_link;
182
183 /*
184 * The following two entries are used to key the shadow page in the
185 * hash table.
186 */
187 gfn_t gfn;
188 union kvm_mmu_page_role role;
189
190 u64 *spt;
191 /* hold the gfn of each spte inside spt */
192 gfn_t *gfns;
193 unsigned long slot_bitmap; /* One bit set per slot which has memory
194 * in this shadow page.
195 */
196 int multimapped; /* More than one parent_pte? */
197 int root_count; /* Currently serving as active root */
198 bool unsync;
199 bool unsync_children;
200 union {
201 u64 *parent_pte; /* !multimapped */
202 struct hlist_head parent_ptes; /* multimapped, kvm_pte_chain */
203 };
204 DECLARE_BITMAP(unsync_child_bitmap, 512);
205};
206
207struct kvm_pv_mmu_op_buffer {
208 void *ptr;
209 unsigned len;
210 unsigned processed;
211 char buf[512] __aligned(sizeof(long));
212};
213
214/*
215 * x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level
216 * 32-bit). The kvm_mmu structure abstracts the details of the current mmu
217 * mode.
218 */
219struct kvm_mmu {
220 void (*new_cr3)(struct kvm_vcpu *vcpu);
221 int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err);
222 void (*free)(struct kvm_vcpu *vcpu);
223 gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva);
224 void (*prefetch_page)(struct kvm_vcpu *vcpu,
225 struct kvm_mmu_page *page);
226 int (*sync_page)(struct kvm_vcpu *vcpu,
227 struct kvm_mmu_page *sp);
228 void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva);
229 hpa_t root_hpa;
230 int root_level;
231 int shadow_root_level;
232
233 u64 *pae_root;
234};
235
236struct kvm_vcpu_arch {
237 u64 host_tsc;
238 int interrupt_window_open;
239 unsigned long irq_summary; /* bit vector: 1 per word in irq_pending */
240 DECLARE_BITMAP(irq_pending, KVM_NR_INTERRUPTS);
241 /*
242 * rip and regs accesses must go through
243 * kvm_{register,rip}_{read,write} functions.
244 */
245 unsigned long regs[NR_VCPU_REGS];
246 u32 regs_avail;
247 u32 regs_dirty;
248
249 unsigned long cr0;
250 unsigned long cr2;
251 unsigned long cr3;
252 unsigned long cr4;
253 unsigned long cr8;
254 u64 pdptrs[4]; /* pae */
255 u64 shadow_efer;
256 u64 apic_base;
257 struct kvm_lapic *apic; /* kernel irqchip context */
258 int mp_state;
259 int sipi_vector;
260 u64 ia32_misc_enable_msr;
261 bool tpr_access_reporting;
262
263 struct kvm_mmu mmu;
264 /* only needed in kvm_pv_mmu_op() path, but it's hot so
265 * put it here to avoid allocation */
266 struct kvm_pv_mmu_op_buffer mmu_op_buffer;
267
268 struct kvm_mmu_memory_cache mmu_pte_chain_cache;
269 struct kvm_mmu_memory_cache mmu_rmap_desc_cache;
270 struct kvm_mmu_memory_cache mmu_page_cache;
271 struct kvm_mmu_memory_cache mmu_page_header_cache;
272
273 gfn_t last_pt_write_gfn;
274 int last_pt_write_count;
275 u64 *last_pte_updated;
276 gfn_t last_pte_gfn;
277
278 struct {
279 gfn_t gfn; /* presumed gfn during guest pte update */
280 pfn_t pfn; /* pfn corresponding to that gfn */
281 int largepage;
282 unsigned long mmu_seq;
283 } update_pte;
284
285 struct i387_fxsave_struct host_fx_image;
286 struct i387_fxsave_struct guest_fx_image;
287
288 gva_t mmio_fault_cr2;
289 struct kvm_pio_request pio;
290 void *pio_data;
291
292 struct kvm_queued_exception {
293 bool pending;
294 bool has_error_code;
295 u8 nr;
296 u32 error_code;
297 } exception;
298
299 struct kvm_queued_interrupt {
300 bool pending;
301 u8 nr;
302 } interrupt;
303
304 struct {
305 int active;
306 u8 save_iopl;
307 struct kvm_save_segment {
308 u16 selector;
309 unsigned long base;
310 u32 limit;
311 u32 ar;
312 } tr, es, ds, fs, gs;
313 } rmode;
314 int halt_request; /* real mode on Intel only */
315
316 int cpuid_nent;
317 struct kvm_cpuid_entry2 cpuid_entries[KVM_MAX_CPUID_ENTRIES];
318 /* emulate context */
319
320 struct x86_emulate_ctxt emulate_ctxt;
321
322 gpa_t time;
323 struct pvclock_vcpu_time_info hv_clock;
324 unsigned int hv_clock_tsc_khz;
325 unsigned int time_offset;
326 struct page *time_page;
327
328 bool nmi_pending;
329 bool nmi_injected;
330
331 u64 mtrr[0x100];
332};
333
334struct kvm_mem_alias {
335 gfn_t base_gfn;
336 unsigned long npages;
337 gfn_t target_gfn;
338};
339
340struct kvm_arch{
341 int naliases;
342 struct kvm_mem_alias aliases[KVM_ALIAS_SLOTS];
343
344 unsigned int n_free_mmu_pages;
345 unsigned int n_requested_mmu_pages;
346 unsigned int n_alloc_mmu_pages;
347 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
348 /*
349 * Hash table of struct kvm_mmu_page.
350 */
351 struct list_head active_mmu_pages;
352 struct list_head assigned_dev_head;
353 struct dmar_domain *intel_iommu_domain;
354 struct kvm_pic *vpic;
355 struct kvm_ioapic *vioapic;
356 struct kvm_pit *vpit;
357 struct hlist_head irq_ack_notifier_list;
358
359 int round_robin_prev_vcpu;
360 unsigned int tss_addr;
361 struct page *apic_access_page;
362
363 gpa_t wall_clock;
364
365 struct page *ept_identity_pagetable;
366 bool ept_identity_pagetable_done;
367};
368
369struct kvm_vm_stat {
370 u32 mmu_shadow_zapped;
371 u32 mmu_pte_write;
372 u32 mmu_pte_updated;
373 u32 mmu_pde_zapped;
374 u32 mmu_flooded;
375 u32 mmu_recycled;
376 u32 mmu_cache_miss;
377 u32 mmu_unsync;
378 u32 remote_tlb_flush;
379 u32 lpages;
380};
381
382struct kvm_vcpu_stat {
383 u32 pf_fixed;
384 u32 pf_guest;
385 u32 tlb_flush;
386 u32 invlpg;
387
388 u32 exits;
389 u32 io_exits;
390 u32 mmio_exits;
391 u32 signal_exits;
392 u32 irq_window_exits;
393 u32 nmi_window_exits;
394 u32 halt_exits;
395 u32 halt_wakeup;
396 u32 request_irq_exits;
397 u32 irq_exits;
398 u32 host_state_reload;
399 u32 efer_reload;
400 u32 fpu_reload;
401 u32 insn_emulation;
402 u32 insn_emulation_fail;
403 u32 hypercalls;
404 u32 irq_injections;
405};
406
407struct descriptor_table {
408 u16 limit;
409 unsigned long base;
410} __attribute__((packed));
411
412struct kvm_x86_ops {
413 int (*cpu_has_kvm_support)(void); /* __init */
414 int (*disabled_by_bios)(void); /* __init */
415 void (*hardware_enable)(void *dummy); /* __init */
416 void (*hardware_disable)(void *dummy);
417 void (*check_processor_compatibility)(void *rtn);
418 int (*hardware_setup)(void); /* __init */
419 void (*hardware_unsetup)(void); /* __exit */
420 bool (*cpu_has_accelerated_tpr)(void);
421
422 /* Create, but do not attach this VCPU */
423 struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id);
424 void (*vcpu_free)(struct kvm_vcpu *vcpu);
425 int (*vcpu_reset)(struct kvm_vcpu *vcpu);
426
427 void (*prepare_guest_switch)(struct kvm_vcpu *vcpu);
428 void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
429 void (*vcpu_put)(struct kvm_vcpu *vcpu);
430
431 int (*set_guest_debug)(struct kvm_vcpu *vcpu,
432 struct kvm_debug_guest *dbg);
433 void (*guest_debug_pre)(struct kvm_vcpu *vcpu);
434 int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata);
435 int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
436 u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
437 void (*get_segment)(struct kvm_vcpu *vcpu,
438 struct kvm_segment *var, int seg);
439 int (*get_cpl)(struct kvm_vcpu *vcpu);
440 void (*set_segment)(struct kvm_vcpu *vcpu,
441 struct kvm_segment *var, int seg);
442 void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
443 void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu);
444 void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
445 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
446 void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
447 void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
448 void (*get_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
449 void (*set_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
450 void (*get_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
451 void (*set_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
452 unsigned long (*get_dr)(struct kvm_vcpu *vcpu, int dr);
453 void (*set_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long value,
454 int *exception);
455 void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
456 unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
457 void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
458
459 void (*tlb_flush)(struct kvm_vcpu *vcpu);
460
461 void (*run)(struct kvm_vcpu *vcpu, struct kvm_run *run);
462 int (*handle_exit)(struct kvm_run *run, struct kvm_vcpu *vcpu);
463 void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
464 void (*patch_hypercall)(struct kvm_vcpu *vcpu,
465 unsigned char *hypercall_addr);
466 int (*get_irq)(struct kvm_vcpu *vcpu);
467 void (*set_irq)(struct kvm_vcpu *vcpu, int vec);
468 void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr,
469 bool has_error_code, u32 error_code);
470 bool (*exception_injected)(struct kvm_vcpu *vcpu);
471 void (*inject_pending_irq)(struct kvm_vcpu *vcpu);
472 void (*inject_pending_vectors)(struct kvm_vcpu *vcpu,
473 struct kvm_run *run);
474
475 int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
476 int (*get_tdp_level)(void);
477};
478
479extern struct kvm_x86_ops *kvm_x86_ops;
480
481int kvm_mmu_module_init(void);
482void kvm_mmu_module_exit(void);
483
484void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
485int kvm_mmu_create(struct kvm_vcpu *vcpu);
486int kvm_mmu_setup(struct kvm_vcpu *vcpu);
487void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte);
488void kvm_mmu_set_base_ptes(u64 base_pte);
489void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
490 u64 dirty_mask, u64 nx_mask, u64 x_mask);
491
492int kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
493void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot);
494void kvm_mmu_zap_all(struct kvm *kvm);
495unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
496void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
497
498int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3);
499
500int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
501 const void *val, int bytes);
502int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
503 gpa_t addr, unsigned long *ret);
504
505extern bool tdp_enabled;
506
507enum emulation_result {
508 EMULATE_DONE, /* no further processing */
509 EMULATE_DO_MMIO, /* kvm_run filled with mmio request */
510 EMULATE_FAIL, /* can't emulate this instruction */
511};
512
513#define EMULTYPE_NO_DECODE (1 << 0)
514#define EMULTYPE_TRAP_UD (1 << 1)
515int emulate_instruction(struct kvm_vcpu *vcpu, struct kvm_run *run,
516 unsigned long cr2, u16 error_code, int emulation_type);
517void kvm_report_emulation_failure(struct kvm_vcpu *cvpu, const char *context);
518void realmode_lgdt(struct kvm_vcpu *vcpu, u16 size, unsigned long address);
519void realmode_lidt(struct kvm_vcpu *vcpu, u16 size, unsigned long address);
520void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
521 unsigned long *rflags);
522
523unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr);
524void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long value,
525 unsigned long *rflags);
526void kvm_enable_efer_bits(u64);
527int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data);
528int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
529
530struct x86_emulate_ctxt;
531
532int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
533 int size, unsigned port);
534int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
535 int size, unsigned long count, int down,
536 gva_t address, int rep, unsigned port);
537void kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
538int kvm_emulate_halt(struct kvm_vcpu *vcpu);
539int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address);
540int emulate_clts(struct kvm_vcpu *vcpu);
541int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr,
542 unsigned long *dest);
543int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr,
544 unsigned long value);
545
546void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
547int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
548 int type_bits, int seg);
549
550int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason);
551
552void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
553void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
554void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
555void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8);
556unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu);
557void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
558void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
559
560int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
561int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data);
562
563void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr);
564void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
565void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long cr2,
566 u32 error_code);
567
568void kvm_pic_set_irq(void *opaque, int irq, int level);
569
570void kvm_inject_nmi(struct kvm_vcpu *vcpu);
571
572void fx_init(struct kvm_vcpu *vcpu);
573
574int emulator_read_std(unsigned long addr,
575 void *val,
576 unsigned int bytes,
577 struct kvm_vcpu *vcpu);
578int emulator_write_emulated(unsigned long addr,
579 const void *val,
580 unsigned int bytes,
581 struct kvm_vcpu *vcpu);
582
583unsigned long segment_base(u16 selector);
584
585void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu);
586void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
587 const u8 *new, int bytes);
588int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
589void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
590int kvm_mmu_load(struct kvm_vcpu *vcpu);
591void kvm_mmu_unload(struct kvm_vcpu *vcpu);
592void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
593
594int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
595
596int kvm_fix_hypercall(struct kvm_vcpu *vcpu);
597
598int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code);
599void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
600
601void kvm_enable_tdp(void);
602void kvm_disable_tdp(void);
603
604int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3);
605int complete_pio(struct kvm_vcpu *vcpu);
606
607static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
608{
609 struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);
610
611 return (struct kvm_mmu_page *)page_private(page);
612}
613
614static inline u16 kvm_read_fs(void)
615{
616 u16 seg;
617 asm("mov %%fs, %0" : "=g"(seg));
618 return seg;
619}
620
621static inline u16 kvm_read_gs(void)
622{
623 u16 seg;
624 asm("mov %%gs, %0" : "=g"(seg));
625 return seg;
626}
627
628static inline u16 kvm_read_ldt(void)
629{
630 u16 ldt;
631 asm("sldt %0" : "=g"(ldt));
632 return ldt;
633}
634
635static inline void kvm_load_fs(u16 sel)
636{
637 asm("mov %0, %%fs" : : "rm"(sel));
638}
639
640static inline void kvm_load_gs(u16 sel)
641{
642 asm("mov %0, %%gs" : : "rm"(sel));
643}
644
645static inline void kvm_load_ldt(u16 sel)
646{
647 asm("lldt %0" : : "rm"(sel));
648}
649
650static inline void kvm_get_idt(struct descriptor_table *table)
651{
652 asm("sidt %0" : "=m"(*table));
653}
654
655static inline void kvm_get_gdt(struct descriptor_table *table)
656{
657 asm("sgdt %0" : "=m"(*table));
658}
659
660static inline unsigned long kvm_read_tr_base(void)
661{
662 u16 tr;
663 asm("str %0" : "=g"(tr));
664 return segment_base(tr);
665}
666
667#ifdef CONFIG_X86_64
668static inline unsigned long read_msr(unsigned long msr)
669{
670 u64 value;
671
672 rdmsrl(msr, value);
673 return value;
674}
675#endif
676
677static inline void kvm_fx_save(struct i387_fxsave_struct *image)
678{
679 asm("fxsave (%0)":: "r" (image));
680}
681
682static inline void kvm_fx_restore(struct i387_fxsave_struct *image)
683{
684 asm("fxrstor (%0)":: "r" (image));
685}
686
687static inline void kvm_fx_finit(void)
688{
689 asm("finit");
690}
691
692static inline u32 get_rdx_init_val(void)
693{
694 return 0x600; /* P6 family */
695}
696
697static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
698{
699 kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
700}
701
702#define ASM_VMX_VMCLEAR_RAX ".byte 0x66, 0x0f, 0xc7, 0x30"
703#define ASM_VMX_VMLAUNCH ".byte 0x0f, 0x01, 0xc2"
704#define ASM_VMX_VMRESUME ".byte 0x0f, 0x01, 0xc3"
705#define ASM_VMX_VMPTRLD_RAX ".byte 0x0f, 0xc7, 0x30"
706#define ASM_VMX_VMREAD_RDX_RAX ".byte 0x0f, 0x78, 0xd0"
707#define ASM_VMX_VMWRITE_RAX_RDX ".byte 0x0f, 0x79, 0xd0"
708#define ASM_VMX_VMWRITE_RSP_RDX ".byte 0x0f, 0x79, 0xd4"
709#define ASM_VMX_VMXOFF ".byte 0x0f, 0x01, 0xc4"
710#define ASM_VMX_VMXON_RAX ".byte 0xf3, 0x0f, 0xc7, 0x30"
711#define ASM_VMX_INVEPT ".byte 0x66, 0x0f, 0x38, 0x80, 0x08"
712#define ASM_VMX_INVVPID ".byte 0x66, 0x0f, 0x38, 0x81, 0x08"
713
714#define MSR_IA32_TIME_STAMP_COUNTER 0x010
715
716#define TSS_IOPB_BASE_OFFSET 0x66
717#define TSS_BASE_SIZE 0x68
718#define TSS_IOPB_SIZE (65536 / 8)
719#define TSS_REDIRECTION_SIZE (256 / 8)
720#define RMODE_TSS_SIZE \
721 (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1)
722
723enum {
724 TASK_SWITCH_CALL = 0,
725 TASK_SWITCH_IRET = 1,
726 TASK_SWITCH_JMP = 2,
727 TASK_SWITCH_GATE = 3,
728};
729
730/*
731 * Hardware virtualization extension instructions may fault if a
732 * reboot turns off virtualization while processes are running.
733 * Trap the fault and ignore the instruction if that happens.
734 */
735asmlinkage void kvm_handle_fault_on_reboot(void);
736
737#define __kvm_handle_fault_on_reboot(insn) \
738 "666: " insn "\n\t" \
739 ".pushsection .fixup, \"ax\" \n" \
740 "667: \n\t" \
741 __ASM_SIZE(push) " $666b \n\t" \
742 "jmp kvm_handle_fault_on_reboot \n\t" \
743 ".popsection \n\t" \
744 ".pushsection __ex_table, \"a\" \n\t" \
745 _ASM_PTR " 666b, 667b \n\t" \
746 ".popsection"
747
748#define KVM_ARCH_WANT_MMU_NOTIFIER
749int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
750int kvm_age_hva(struct kvm *kvm, unsigned long hva);
751
752#endif /* ASM_X86__KVM_HOST_H */
diff --git a/include/asm-x86/kvm_para.h b/include/asm-x86/kvm_para.h
deleted file mode 100644
index 30054fded4fb..000000000000
--- a/include/asm-x86/kvm_para.h
+++ /dev/null
@@ -1,147 +0,0 @@
1#ifndef ASM_X86__KVM_PARA_H
2#define ASM_X86__KVM_PARA_H
3
4/* This CPUID returns the signature 'KVMKVMKVM' in ebx, ecx, and edx. It
5 * should be used to determine that a VM is running under KVM.
6 */
7#define KVM_CPUID_SIGNATURE 0x40000000
8
9/* This CPUID returns a feature bitmap in eax. Before enabling a particular
10 * paravirtualization, the appropriate feature bit should be checked.
11 */
12#define KVM_CPUID_FEATURES 0x40000001
13#define KVM_FEATURE_CLOCKSOURCE 0
14#define KVM_FEATURE_NOP_IO_DELAY 1
15#define KVM_FEATURE_MMU_OP 2
16
17#define MSR_KVM_WALL_CLOCK 0x11
18#define MSR_KVM_SYSTEM_TIME 0x12
19
20#define KVM_MAX_MMU_OP_BATCH 32
21
22/* Operations for KVM_HC_MMU_OP */
23#define KVM_MMU_OP_WRITE_PTE 1
24#define KVM_MMU_OP_FLUSH_TLB 2
25#define KVM_MMU_OP_RELEASE_PT 3
26
27/* Payload for KVM_HC_MMU_OP */
28struct kvm_mmu_op_header {
29 __u32 op;
30 __u32 pad;
31};
32
33struct kvm_mmu_op_write_pte {
34 struct kvm_mmu_op_header header;
35 __u64 pte_phys;
36 __u64 pte_val;
37};
38
39struct kvm_mmu_op_flush_tlb {
40 struct kvm_mmu_op_header header;
41};
42
43struct kvm_mmu_op_release_pt {
44 struct kvm_mmu_op_header header;
45 __u64 pt_phys;
46};
47
48#ifdef __KERNEL__
49#include <asm/processor.h>
50
51extern void kvmclock_init(void);
52
53
54/* This instruction is vmcall. On non-VT architectures, it will generate a
55 * trap that we will then rewrite to the appropriate instruction.
56 */
57#define KVM_HYPERCALL ".byte 0x0f,0x01,0xc1"
58
59/* For KVM hypercalls, a three-byte sequence of either the vmrun or the vmmrun
60 * instruction. The hypervisor may replace it with something else but only the
61 * instructions are guaranteed to be supported.
62 *
63 * Up to four arguments may be passed in rbx, rcx, rdx, and rsi respectively.
64 * The hypercall number should be placed in rax and the return value will be
65 * placed in rax. No other registers will be clobbered unless explicited
66 * noted by the particular hypercall.
67 */
68
69static inline long kvm_hypercall0(unsigned int nr)
70{
71 long ret;
72 asm volatile(KVM_HYPERCALL
73 : "=a"(ret)
74 : "a"(nr)
75 : "memory");
76 return ret;
77}
78
79static inline long kvm_hypercall1(unsigned int nr, unsigned long p1)
80{
81 long ret;
82 asm volatile(KVM_HYPERCALL
83 : "=a"(ret)
84 : "a"(nr), "b"(p1)
85 : "memory");
86 return ret;
87}
88
89static inline long kvm_hypercall2(unsigned int nr, unsigned long p1,
90 unsigned long p2)
91{
92 long ret;
93 asm volatile(KVM_HYPERCALL
94 : "=a"(ret)
95 : "a"(nr), "b"(p1), "c"(p2)
96 : "memory");
97 return ret;
98}
99
100static inline long kvm_hypercall3(unsigned int nr, unsigned long p1,
101 unsigned long p2, unsigned long p3)
102{
103 long ret;
104 asm volatile(KVM_HYPERCALL
105 : "=a"(ret)
106 : "a"(nr), "b"(p1), "c"(p2), "d"(p3)
107 : "memory");
108 return ret;
109}
110
111static inline long kvm_hypercall4(unsigned int nr, unsigned long p1,
112 unsigned long p2, unsigned long p3,
113 unsigned long p4)
114{
115 long ret;
116 asm volatile(KVM_HYPERCALL
117 : "=a"(ret)
118 : "a"(nr), "b"(p1), "c"(p2), "d"(p3), "S"(p4)
119 : "memory");
120 return ret;
121}
122
123static inline int kvm_para_available(void)
124{
125 unsigned int eax, ebx, ecx, edx;
126 char signature[13];
127
128 cpuid(KVM_CPUID_SIGNATURE, &eax, &ebx, &ecx, &edx);
129 memcpy(signature + 0, &ebx, 4);
130 memcpy(signature + 4, &ecx, 4);
131 memcpy(signature + 8, &edx, 4);
132 signature[12] = 0;
133
134 if (strcmp(signature, "KVMKVMKVM") == 0)
135 return 1;
136
137 return 0;
138}
139
140static inline unsigned int kvm_arch_para_features(void)
141{
142 return cpuid_eax(KVM_CPUID_FEATURES);
143}
144
145#endif
146
147#endif /* ASM_X86__KVM_PARA_H */
diff --git a/include/asm-x86/kvm_x86_emulate.h b/include/asm-x86/kvm_x86_emulate.h
deleted file mode 100644
index e2d9b030c1ac..000000000000
--- a/include/asm-x86/kvm_x86_emulate.h
+++ /dev/null
@@ -1,184 +0,0 @@
1/******************************************************************************
2 * x86_emulate.h
3 *
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
5 *
6 * Copyright (c) 2005 Keir Fraser
7 *
8 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
9 */
10
11#ifndef ASM_X86__KVM_X86_EMULATE_H
12#define ASM_X86__KVM_X86_EMULATE_H
13
14struct x86_emulate_ctxt;
15
16/*
17 * x86_emulate_ops:
18 *
19 * These operations represent the instruction emulator's interface to memory.
20 * There are two categories of operation: those that act on ordinary memory
21 * regions (*_std), and those that act on memory regions known to require
22 * special treatment or emulation (*_emulated).
23 *
24 * The emulator assumes that an instruction accesses only one 'emulated memory'
25 * location, that this location is the given linear faulting address (cr2), and
26 * that this is one of the instruction's data operands. Instruction fetches and
27 * stack operations are assumed never to access emulated memory. The emulator
28 * automatically deduces which operand of a string-move operation is accessing
29 * emulated memory, and assumes that the other operand accesses normal memory.
30 *
31 * NOTES:
32 * 1. The emulator isn't very smart about emulated vs. standard memory.
33 * 'Emulated memory' access addresses should be checked for sanity.
34 * 'Normal memory' accesses may fault, and the caller must arrange to
35 * detect and handle reentrancy into the emulator via recursive faults.
36 * Accesses may be unaligned and may cross page boundaries.
37 * 2. If the access fails (cannot emulate, or a standard access faults) then
38 * it is up to the memop to propagate the fault to the guest VM via
39 * some out-of-band mechanism, unknown to the emulator. The memop signals
40 * failure by returning X86EMUL_PROPAGATE_FAULT to the emulator, which will
41 * then immediately bail.
42 * 3. Valid access sizes are 1, 2, 4 and 8 bytes. On x86/32 systems only
43 * cmpxchg8b_emulated need support 8-byte accesses.
44 * 4. The emulator cannot handle 64-bit mode emulation on an x86/32 system.
45 */
46/* Access completed successfully: continue emulation as normal. */
47#define X86EMUL_CONTINUE 0
48/* Access is unhandleable: bail from emulation and return error to caller. */
49#define X86EMUL_UNHANDLEABLE 1
50/* Terminate emulation but return success to the caller. */
51#define X86EMUL_PROPAGATE_FAULT 2 /* propagate a generated fault to guest */
52#define X86EMUL_RETRY_INSTR 2 /* retry the instruction for some reason */
53#define X86EMUL_CMPXCHG_FAILED 2 /* cmpxchg did not see expected value */
54struct x86_emulate_ops {
55 /*
56 * read_std: Read bytes of standard (non-emulated/special) memory.
57 * Used for instruction fetch, stack operations, and others.
58 * @addr: [IN ] Linear address from which to read.
59 * @val: [OUT] Value read from memory, zero-extended to 'u_long'.
60 * @bytes: [IN ] Number of bytes to read from memory.
61 */
62 int (*read_std)(unsigned long addr, void *val,
63 unsigned int bytes, struct kvm_vcpu *vcpu);
64
65 /*
66 * read_emulated: Read bytes from emulated/special memory area.
67 * @addr: [IN ] Linear address from which to read.
68 * @val: [OUT] Value read from memory, zero-extended to 'u_long'.
69 * @bytes: [IN ] Number of bytes to read from memory.
70 */
71 int (*read_emulated)(unsigned long addr,
72 void *val,
73 unsigned int bytes,
74 struct kvm_vcpu *vcpu);
75
76 /*
77 * write_emulated: Read bytes from emulated/special memory area.
78 * @addr: [IN ] Linear address to which to write.
79 * @val: [IN ] Value to write to memory (low-order bytes used as
80 * required).
81 * @bytes: [IN ] Number of bytes to write to memory.
82 */
83 int (*write_emulated)(unsigned long addr,
84 const void *val,
85 unsigned int bytes,
86 struct kvm_vcpu *vcpu);
87
88 /*
89 * cmpxchg_emulated: Emulate an atomic (LOCKed) CMPXCHG operation on an
90 * emulated/special memory area.
91 * @addr: [IN ] Linear address to access.
92 * @old: [IN ] Value expected to be current at @addr.
93 * @new: [IN ] Value to write to @addr.
94 * @bytes: [IN ] Number of bytes to access using CMPXCHG.
95 */
96 int (*cmpxchg_emulated)(unsigned long addr,
97 const void *old,
98 const void *new,
99 unsigned int bytes,
100 struct kvm_vcpu *vcpu);
101
102};
103
104/* Type, address-of, and value of an instruction's operand. */
105struct operand {
106 enum { OP_REG, OP_MEM, OP_IMM, OP_NONE } type;
107 unsigned int bytes;
108 unsigned long val, orig_val, *ptr;
109};
110
111struct fetch_cache {
112 u8 data[15];
113 unsigned long start;
114 unsigned long end;
115};
116
117struct decode_cache {
118 u8 twobyte;
119 u8 b;
120 u8 lock_prefix;
121 u8 rep_prefix;
122 u8 op_bytes;
123 u8 ad_bytes;
124 u8 rex_prefix;
125 struct operand src;
126 struct operand dst;
127 bool has_seg_override;
128 u8 seg_override;
129 unsigned int d;
130 unsigned long regs[NR_VCPU_REGS];
131 unsigned long eip;
132 /* modrm */
133 u8 modrm;
134 u8 modrm_mod;
135 u8 modrm_reg;
136 u8 modrm_rm;
137 u8 use_modrm_ea;
138 bool rip_relative;
139 unsigned long modrm_ea;
140 void *modrm_ptr;
141 unsigned long modrm_val;
142 struct fetch_cache fetch;
143};
144
145struct x86_emulate_ctxt {
146 /* Register state before/after emulation. */
147 struct kvm_vcpu *vcpu;
148
149 /* Linear faulting address (if emulating a page-faulting instruction) */
150 unsigned long eflags;
151
152 /* Emulated execution mode, represented by an X86EMUL_MODE value. */
153 int mode;
154
155 u32 cs_base;
156
157 /* decode cache */
158
159 struct decode_cache decode;
160};
161
162/* Repeat String Operation Prefix */
163#define REPE_PREFIX 1
164#define REPNE_PREFIX 2
165
166/* Execution mode, passed to the emulator. */
167#define X86EMUL_MODE_REAL 0 /* Real mode. */
168#define X86EMUL_MODE_PROT16 2 /* 16-bit protected mode. */
169#define X86EMUL_MODE_PROT32 4 /* 32-bit protected mode. */
170#define X86EMUL_MODE_PROT64 8 /* 64-bit (long) mode. */
171
172/* Host execution mode. */
173#if defined(__i386__)
174#define X86EMUL_MODE_HOST X86EMUL_MODE_PROT32
175#elif defined(CONFIG_X86_64)
176#define X86EMUL_MODE_HOST X86EMUL_MODE_PROT64
177#endif
178
179int x86_decode_insn(struct x86_emulate_ctxt *ctxt,
180 struct x86_emulate_ops *ops);
181int x86_emulate_insn(struct x86_emulate_ctxt *ctxt,
182 struct x86_emulate_ops *ops);
183
184#endif /* ASM_X86__KVM_X86_EMULATE_H */
diff --git a/include/asm-x86/ldt.h b/include/asm-x86/ldt.h
deleted file mode 100644
index a5228504d867..000000000000
--- a/include/asm-x86/ldt.h
+++ /dev/null
@@ -1,40 +0,0 @@
1/*
2 * ldt.h
3 *
4 * Definitions of structures used with the modify_ldt system call.
5 */
6#ifndef ASM_X86__LDT_H
7#define ASM_X86__LDT_H
8
9/* Maximum number of LDT entries supported. */
10#define LDT_ENTRIES 8192
11/* The size of each LDT entry. */
12#define LDT_ENTRY_SIZE 8
13
14#ifndef __ASSEMBLY__
15/*
16 * Note on 64bit base and limit is ignored and you cannot set DS/ES/CS
17 * not to the default values if you still want to do syscalls. This
18 * call is more for 32bit mode therefore.
19 */
20struct user_desc {
21 unsigned int entry_number;
22 unsigned int base_addr;
23 unsigned int limit;
24 unsigned int seg_32bit:1;
25 unsigned int contents:2;
26 unsigned int read_exec_only:1;
27 unsigned int limit_in_pages:1;
28 unsigned int seg_not_present:1;
29 unsigned int useable:1;
30#ifdef __x86_64__
31 unsigned int lm:1;
32#endif
33};
34
35#define MODIFY_LDT_CONTENTS_DATA 0
36#define MODIFY_LDT_CONTENTS_STACK 1
37#define MODIFY_LDT_CONTENTS_CODE 2
38
39#endif /* !__ASSEMBLY__ */
40#endif /* ASM_X86__LDT_H */
diff --git a/include/asm-x86/lguest.h b/include/asm-x86/lguest.h
deleted file mode 100644
index 7505e947ed27..000000000000
--- a/include/asm-x86/lguest.h
+++ /dev/null
@@ -1,94 +0,0 @@
1#ifndef ASM_X86__LGUEST_H
2#define ASM_X86__LGUEST_H
3
4#define GDT_ENTRY_LGUEST_CS 10
5#define GDT_ENTRY_LGUEST_DS 11
6#define LGUEST_CS (GDT_ENTRY_LGUEST_CS * 8)
7#define LGUEST_DS (GDT_ENTRY_LGUEST_DS * 8)
8
9#ifndef __ASSEMBLY__
10#include <asm/desc.h>
11
12#define GUEST_PL 1
13
14/* Every guest maps the core switcher code. */
15#define SHARED_SWITCHER_PAGES \
16 DIV_ROUND_UP(end_switcher_text - start_switcher_text, PAGE_SIZE)
17/* Pages for switcher itself, then two pages per cpu */
18#define TOTAL_SWITCHER_PAGES (SHARED_SWITCHER_PAGES + 2 * NR_CPUS)
19
20/* We map at -4M for ease of mapping into the guest (one PTE page). */
21#define SWITCHER_ADDR 0xFFC00000
22
23/* Found in switcher.S */
24extern unsigned long default_idt_entries[];
25
26/* Declarations for definitions in lguest_guest.S */
27extern char lguest_noirq_start[], lguest_noirq_end[];
28extern const char lgstart_cli[], lgend_cli[];
29extern const char lgstart_sti[], lgend_sti[];
30extern const char lgstart_popf[], lgend_popf[];
31extern const char lgstart_pushf[], lgend_pushf[];
32extern const char lgstart_iret[], lgend_iret[];
33
34extern void lguest_iret(void);
35extern void lguest_init(void);
36
37struct lguest_regs {
38 /* Manually saved part. */
39 unsigned long eax, ebx, ecx, edx;
40 unsigned long esi, edi, ebp;
41 unsigned long gs;
42 unsigned long fs, ds, es;
43 unsigned long trapnum, errcode;
44 /* Trap pushed part */
45 unsigned long eip;
46 unsigned long cs;
47 unsigned long eflags;
48 unsigned long esp;
49 unsigned long ss;
50};
51
52/* This is a guest-specific page (mapped ro) into the guest. */
53struct lguest_ro_state {
54 /* Host information we need to restore when we switch back. */
55 u32 host_cr3;
56 struct desc_ptr host_idt_desc;
57 struct desc_ptr host_gdt_desc;
58 u32 host_sp;
59
60 /* Fields which are used when guest is running. */
61 struct desc_ptr guest_idt_desc;
62 struct desc_ptr guest_gdt_desc;
63 struct x86_hw_tss guest_tss;
64 struct desc_struct guest_idt[IDT_ENTRIES];
65 struct desc_struct guest_gdt[GDT_ENTRIES];
66};
67
68struct lg_cpu_arch {
69 /* The GDT entries copied into lguest_ro_state when running. */
70 struct desc_struct gdt[GDT_ENTRIES];
71
72 /* The IDT entries: some copied into lguest_ro_state when running. */
73 struct desc_struct idt[IDT_ENTRIES];
74
75 /* The address of the last guest-visible pagefault (ie. cr2). */
76 unsigned long last_pagefault;
77};
78
79static inline void lguest_set_ts(void)
80{
81 u32 cr0;
82
83 cr0 = read_cr0();
84 if (!(cr0 & 8))
85 write_cr0(cr0 | 8);
86}
87
88/* Full 4G segment descriptors, suitable for CS and DS. */
89#define FULL_EXEC_SEGMENT ((struct desc_struct){ { {0x0000ffff, 0x00cf9b00} } })
90#define FULL_SEGMENT ((struct desc_struct){ { {0x0000ffff, 0x00cf9300} } })
91
92#endif /* __ASSEMBLY__ */
93
94#endif /* ASM_X86__LGUEST_H */
diff --git a/include/asm-x86/lguest_hcall.h b/include/asm-x86/lguest_hcall.h
deleted file mode 100644
index 8f034ba4b53e..000000000000
--- a/include/asm-x86/lguest_hcall.h
+++ /dev/null
@@ -1,71 +0,0 @@
1/* Architecture specific portion of the lguest hypercalls */
2#ifndef ASM_X86__LGUEST_HCALL_H
3#define ASM_X86__LGUEST_HCALL_H
4
5#define LHCALL_FLUSH_ASYNC 0
6#define LHCALL_LGUEST_INIT 1
7#define LHCALL_SHUTDOWN 2
8#define LHCALL_LOAD_GDT 3
9#define LHCALL_NEW_PGTABLE 4
10#define LHCALL_FLUSH_TLB 5
11#define LHCALL_LOAD_IDT_ENTRY 6
12#define LHCALL_SET_STACK 7
13#define LHCALL_TS 8
14#define LHCALL_SET_CLOCKEVENT 9
15#define LHCALL_HALT 10
16#define LHCALL_SET_PTE 14
17#define LHCALL_SET_PMD 15
18#define LHCALL_LOAD_TLS 16
19#define LHCALL_NOTIFY 17
20
21#define LGUEST_TRAP_ENTRY 0x1F
22
23/* Argument number 3 to LHCALL_LGUEST_SHUTDOWN */
24#define LGUEST_SHUTDOWN_POWEROFF 1
25#define LGUEST_SHUTDOWN_RESTART 2
26
27#ifndef __ASSEMBLY__
28#include <asm/hw_irq.h>
29
30/*G:031 But first, how does our Guest contact the Host to ask for privileged
31 * operations? There are two ways: the direct way is to make a "hypercall",
32 * to make requests of the Host Itself.
33 *
34 * Our hypercall mechanism uses the highest unused trap code (traps 32 and
35 * above are used by real hardware interrupts). Fifteen hypercalls are
36 * available: the hypercall number is put in the %eax register, and the
37 * arguments (when required) are placed in %edx, %ebx and %ecx. If a return
38 * value makes sense, it's returned in %eax.
39 *
40 * Grossly invalid calls result in Sudden Death at the hands of the vengeful
41 * Host, rather than returning failure. This reflects Winston Churchill's
42 * definition of a gentleman: "someone who is only rude intentionally". */
43static inline unsigned long
44hcall(unsigned long call,
45 unsigned long arg1, unsigned long arg2, unsigned long arg3)
46{
47 /* "int" is the Intel instruction to trigger a trap. */
48 asm volatile("int $" __stringify(LGUEST_TRAP_ENTRY)
49 /* The call in %eax (aka "a") might be overwritten */
50 : "=a"(call)
51 /* The arguments are in %eax, %edx, %ebx & %ecx */
52 : "a"(call), "d"(arg1), "b"(arg2), "c"(arg3)
53 /* "memory" means this might write somewhere in memory.
54 * This isn't true for all calls, but it's safe to tell
55 * gcc that it might happen so it doesn't get clever. */
56 : "memory");
57 return call;
58}
59/*:*/
60
61/* Can't use our min() macro here: needs to be a constant */
62#define LGUEST_IRQS (NR_IRQS < 32 ? NR_IRQS: 32)
63
64#define LHCALL_RING_SIZE 64
65struct hcall_args {
66 /* These map directly onto eax, ebx, ecx, edx in struct lguest_regs */
67 unsigned long arg0, arg2, arg3, arg1;
68};
69
70#endif /* !__ASSEMBLY__ */
71#endif /* ASM_X86__LGUEST_HCALL_H */
diff --git a/include/asm-x86/linkage.h b/include/asm-x86/linkage.h
deleted file mode 100644
index 42d8b62ee8ab..000000000000
--- a/include/asm-x86/linkage.h
+++ /dev/null
@@ -1,61 +0,0 @@
1#ifndef ASM_X86__LINKAGE_H
2#define ASM_X86__LINKAGE_H
3
4#undef notrace
5#define notrace __attribute__((no_instrument_function))
6
7#ifdef CONFIG_X86_64
8#define __ALIGN .p2align 4,,15
9#define __ALIGN_STR ".p2align 4,,15"
10#endif
11
12#ifdef CONFIG_X86_32
13#define asmlinkage CPP_ASMLINKAGE __attribute__((regparm(0)))
14/*
15 * For 32-bit UML - mark functions implemented in assembly that use
16 * regparm input parameters:
17 */
18#define asmregparm __attribute__((regparm(3)))
19
20/*
21 * Make sure the compiler doesn't do anything stupid with the
22 * arguments on the stack - they are owned by the *caller*, not
23 * the callee. This just fools gcc into not spilling into them,
24 * and keeps it from doing tailcall recursion and/or using the
25 * stack slots for temporaries, since they are live and "used"
26 * all the way to the end of the function.
27 *
28 * NOTE! On x86-64, all the arguments are in registers, so this
29 * only matters on a 32-bit kernel.
30 */
31#define asmlinkage_protect(n, ret, args...) \
32 __asmlinkage_protect##n(ret, ##args)
33#define __asmlinkage_protect_n(ret, args...) \
34 __asm__ __volatile__ ("" : "=r" (ret) : "0" (ret), ##args)
35#define __asmlinkage_protect0(ret) \
36 __asmlinkage_protect_n(ret)
37#define __asmlinkage_protect1(ret, arg1) \
38 __asmlinkage_protect_n(ret, "g" (arg1))
39#define __asmlinkage_protect2(ret, arg1, arg2) \
40 __asmlinkage_protect_n(ret, "g" (arg1), "g" (arg2))
41#define __asmlinkage_protect3(ret, arg1, arg2, arg3) \
42 __asmlinkage_protect_n(ret, "g" (arg1), "g" (arg2), "g" (arg3))
43#define __asmlinkage_protect4(ret, arg1, arg2, arg3, arg4) \
44 __asmlinkage_protect_n(ret, "g" (arg1), "g" (arg2), "g" (arg3), \
45 "g" (arg4))
46#define __asmlinkage_protect5(ret, arg1, arg2, arg3, arg4, arg5) \
47 __asmlinkage_protect_n(ret, "g" (arg1), "g" (arg2), "g" (arg3), \
48 "g" (arg4), "g" (arg5))
49#define __asmlinkage_protect6(ret, arg1, arg2, arg3, arg4, arg5, arg6) \
50 __asmlinkage_protect_n(ret, "g" (arg1), "g" (arg2), "g" (arg3), \
51 "g" (arg4), "g" (arg5), "g" (arg6))
52
53#endif
54
55#ifdef CONFIG_X86_ALIGNMENT_16
56#define __ALIGN .align 16,0x90
57#define __ALIGN_STR ".align 16,0x90"
58#endif
59
60#endif /* ASM_X86__LINKAGE_H */
61
diff --git a/include/asm-x86/local.h b/include/asm-x86/local.h
deleted file mode 100644
index ae91994fd6c9..000000000000
--- a/include/asm-x86/local.h
+++ /dev/null
@@ -1,235 +0,0 @@
1#ifndef ASM_X86__LOCAL_H
2#define ASM_X86__LOCAL_H
3
4#include <linux/percpu.h>
5
6#include <asm/system.h>
7#include <asm/atomic.h>
8#include <asm/asm.h>
9
10typedef struct {
11 atomic_long_t a;
12} local_t;
13
14#define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
15
16#define local_read(l) atomic_long_read(&(l)->a)
17#define local_set(l, i) atomic_long_set(&(l)->a, (i))
18
19static inline void local_inc(local_t *l)
20{
21 asm volatile(_ASM_INC "%0"
22 : "+m" (l->a.counter));
23}
24
25static inline void local_dec(local_t *l)
26{
27 asm volatile(_ASM_DEC "%0"
28 : "+m" (l->a.counter));
29}
30
31static inline void local_add(long i, local_t *l)
32{
33 asm volatile(_ASM_ADD "%1,%0"
34 : "+m" (l->a.counter)
35 : "ir" (i));
36}
37
38static inline void local_sub(long i, local_t *l)
39{
40 asm volatile(_ASM_SUB "%1,%0"
41 : "+m" (l->a.counter)
42 : "ir" (i));
43}
44
45/**
46 * local_sub_and_test - subtract value from variable and test result
47 * @i: integer value to subtract
48 * @l: pointer to type local_t
49 *
50 * Atomically subtracts @i from @l and returns
51 * true if the result is zero, or false for all
52 * other cases.
53 */
54static inline int local_sub_and_test(long i, local_t *l)
55{
56 unsigned char c;
57
58 asm volatile(_ASM_SUB "%2,%0; sete %1"
59 : "+m" (l->a.counter), "=qm" (c)
60 : "ir" (i) : "memory");
61 return c;
62}
63
64/**
65 * local_dec_and_test - decrement and test
66 * @l: pointer to type local_t
67 *
68 * Atomically decrements @l by 1 and
69 * returns true if the result is 0, or false for all other
70 * cases.
71 */
72static inline int local_dec_and_test(local_t *l)
73{
74 unsigned char c;
75
76 asm volatile(_ASM_DEC "%0; sete %1"
77 : "+m" (l->a.counter), "=qm" (c)
78 : : "memory");
79 return c != 0;
80}
81
82/**
83 * local_inc_and_test - increment and test
84 * @l: pointer to type local_t
85 *
86 * Atomically increments @l by 1
87 * and returns true if the result is zero, or false for all
88 * other cases.
89 */
90static inline int local_inc_and_test(local_t *l)
91{
92 unsigned char c;
93
94 asm volatile(_ASM_INC "%0; sete %1"
95 : "+m" (l->a.counter), "=qm" (c)
96 : : "memory");
97 return c != 0;
98}
99
100/**
101 * local_add_negative - add and test if negative
102 * @i: integer value to add
103 * @l: pointer to type local_t
104 *
105 * Atomically adds @i to @l and returns true
106 * if the result is negative, or false when
107 * result is greater than or equal to zero.
108 */
109static inline int local_add_negative(long i, local_t *l)
110{
111 unsigned char c;
112
113 asm volatile(_ASM_ADD "%2,%0; sets %1"
114 : "+m" (l->a.counter), "=qm" (c)
115 : "ir" (i) : "memory");
116 return c;
117}
118
119/**
120 * local_add_return - add and return
121 * @i: integer value to add
122 * @l: pointer to type local_t
123 *
124 * Atomically adds @i to @l and returns @i + @l
125 */
126static inline long local_add_return(long i, local_t *l)
127{
128 long __i;
129#ifdef CONFIG_M386
130 unsigned long flags;
131 if (unlikely(boot_cpu_data.x86 <= 3))
132 goto no_xadd;
133#endif
134 /* Modern 486+ processor */
135 __i = i;
136 asm volatile(_ASM_XADD "%0, %1;"
137 : "+r" (i), "+m" (l->a.counter)
138 : : "memory");
139 return i + __i;
140
141#ifdef CONFIG_M386
142no_xadd: /* Legacy 386 processor */
143 local_irq_save(flags);
144 __i = local_read(l);
145 local_set(l, i + __i);
146 local_irq_restore(flags);
147 return i + __i;
148#endif
149}
150
151static inline long local_sub_return(long i, local_t *l)
152{
153 return local_add_return(-i, l);
154}
155
156#define local_inc_return(l) (local_add_return(1, l))
157#define local_dec_return(l) (local_sub_return(1, l))
158
159#define local_cmpxchg(l, o, n) \
160 (cmpxchg_local(&((l)->a.counter), (o), (n)))
161/* Always has a lock prefix */
162#define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
163
164/**
165 * local_add_unless - add unless the number is a given value
166 * @l: pointer of type local_t
167 * @a: the amount to add to l...
168 * @u: ...unless l is equal to u.
169 *
170 * Atomically adds @a to @l, so long as it was not @u.
171 * Returns non-zero if @l was not @u, and zero otherwise.
172 */
173#define local_add_unless(l, a, u) \
174({ \
175 long c, old; \
176 c = local_read((l)); \
177 for (;;) { \
178 if (unlikely(c == (u))) \
179 break; \
180 old = local_cmpxchg((l), c, c + (a)); \
181 if (likely(old == c)) \
182 break; \
183 c = old; \
184 } \
185 c != (u); \
186})
187#define local_inc_not_zero(l) local_add_unless((l), 1, 0)
188
189/* On x86_32, these are no better than the atomic variants.
190 * On x86-64 these are better than the atomic variants on SMP kernels
191 * because they dont use a lock prefix.
192 */
193#define __local_inc(l) local_inc(l)
194#define __local_dec(l) local_dec(l)
195#define __local_add(i, l) local_add((i), (l))
196#define __local_sub(i, l) local_sub((i), (l))
197
198/* Use these for per-cpu local_t variables: on some archs they are
199 * much more efficient than these naive implementations. Note they take
200 * a variable, not an address.
201 *
202 * X86_64: This could be done better if we moved the per cpu data directly
203 * after GS.
204 */
205
206/* Need to disable preemption for the cpu local counters otherwise we could
207 still access a variable of a previous CPU in a non atomic way. */
208#define cpu_local_wrap_v(l) \
209({ \
210 local_t res__; \
211 preempt_disable(); \
212 res__ = (l); \
213 preempt_enable(); \
214 res__; \
215})
216#define cpu_local_wrap(l) \
217({ \
218 preempt_disable(); \
219 (l); \
220 preempt_enable(); \
221}) \
222
223#define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var((l))))
224#define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var((l)), (i)))
225#define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var((l))))
226#define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var((l))))
227#define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var((l))))
228#define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var((l))))
229
230#define __cpu_local_inc(l) cpu_local_inc((l))
231#define __cpu_local_dec(l) cpu_local_dec((l))
232#define __cpu_local_add(i, l) cpu_local_add((i), (l))
233#define __cpu_local_sub(i, l) cpu_local_sub((i), (l))
234
235#endif /* ASM_X86__LOCAL_H */
diff --git a/include/asm-x86/mach-default/apm.h b/include/asm-x86/mach-default/apm.h
deleted file mode 100644
index 2aa61b54fbd5..000000000000
--- a/include/asm-x86/mach-default/apm.h
+++ /dev/null
@@ -1,73 +0,0 @@
1/*
2 * Machine specific APM BIOS functions for generic.
3 * Split out from apm.c by Osamu Tomita <tomita@cinet.co.jp>
4 */
5
6#ifndef ASM_X86__MACH_DEFAULT__APM_H
7#define ASM_X86__MACH_DEFAULT__APM_H
8
9#ifdef APM_ZERO_SEGS
10# define APM_DO_ZERO_SEGS \
11 "pushl %%ds\n\t" \
12 "pushl %%es\n\t" \
13 "xorl %%edx, %%edx\n\t" \
14 "mov %%dx, %%ds\n\t" \
15 "mov %%dx, %%es\n\t" \
16 "mov %%dx, %%fs\n\t" \
17 "mov %%dx, %%gs\n\t"
18# define APM_DO_POP_SEGS \
19 "popl %%es\n\t" \
20 "popl %%ds\n\t"
21#else
22# define APM_DO_ZERO_SEGS
23# define APM_DO_POP_SEGS
24#endif
25
26static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
27 u32 *eax, u32 *ebx, u32 *ecx,
28 u32 *edx, u32 *esi)
29{
30 /*
31 * N.B. We do NOT need a cld after the BIOS call
32 * because we always save and restore the flags.
33 */
34 __asm__ __volatile__(APM_DO_ZERO_SEGS
35 "pushl %%edi\n\t"
36 "pushl %%ebp\n\t"
37 "lcall *%%cs:apm_bios_entry\n\t"
38 "setc %%al\n\t"
39 "popl %%ebp\n\t"
40 "popl %%edi\n\t"
41 APM_DO_POP_SEGS
42 : "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx),
43 "=S" (*esi)
44 : "a" (func), "b" (ebx_in), "c" (ecx_in)
45 : "memory", "cc");
46}
47
48static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
49 u32 ecx_in, u32 *eax)
50{
51 int cx, dx, si;
52 u8 error;
53
54 /*
55 * N.B. We do NOT need a cld after the BIOS call
56 * because we always save and restore the flags.
57 */
58 __asm__ __volatile__(APM_DO_ZERO_SEGS
59 "pushl %%edi\n\t"
60 "pushl %%ebp\n\t"
61 "lcall *%%cs:apm_bios_entry\n\t"
62 "setc %%bl\n\t"
63 "popl %%ebp\n\t"
64 "popl %%edi\n\t"
65 APM_DO_POP_SEGS
66 : "=a" (*eax), "=b" (error), "=c" (cx), "=d" (dx),
67 "=S" (si)
68 : "a" (func), "b" (ebx_in), "c" (ecx_in)
69 : "memory", "cc");
70 return error;
71}
72
73#endif /* ASM_X86__MACH_DEFAULT__APM_H */
diff --git a/include/asm-x86/mach-default/do_timer.h b/include/asm-x86/mach-default/do_timer.h
deleted file mode 100644
index 23ecda0b28a0..000000000000
--- a/include/asm-x86/mach-default/do_timer.h
+++ /dev/null
@@ -1,16 +0,0 @@
1/* defines for inline arch setup functions */
2#include <linux/clockchips.h>
3
4#include <asm/i8259.h>
5#include <asm/i8253.h>
6
7/**
8 * do_timer_interrupt_hook - hook into timer tick
9 *
10 * Call the pit clock event handler. see asm/i8253.h
11 **/
12
13static inline void do_timer_interrupt_hook(void)
14{
15 global_clock_event->event_handler(global_clock_event);
16}
diff --git a/include/asm-x86/mach-default/entry_arch.h b/include/asm-x86/mach-default/entry_arch.h
deleted file mode 100644
index 6b1add8e31dd..000000000000
--- a/include/asm-x86/mach-default/entry_arch.h
+++ /dev/null
@@ -1,36 +0,0 @@
1/*
2 * This file is designed to contain the BUILD_INTERRUPT specifications for
3 * all of the extra named interrupt vectors used by the architecture.
4 * Usually this is the Inter Process Interrupts (IPIs)
5 */
6
7/*
8 * The following vectors are part of the Linux architecture, there
9 * is no hardware IRQ pin equivalent for them, they are triggered
10 * through the ICC by us (IPIs)
11 */
12#ifdef CONFIG_X86_SMP
13BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR)
14BUILD_INTERRUPT(invalidate_interrupt,INVALIDATE_TLB_VECTOR)
15BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR)
16BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR)
17BUILD_INTERRUPT(irq_move_cleanup_interrupt,IRQ_MOVE_CLEANUP_VECTOR)
18#endif
19
20/*
21 * every pentium local APIC has two 'local interrupts', with a
22 * soft-definable vector attached to both interrupts, one of
23 * which is a timer interrupt, the other one is error counter
24 * overflow. Linux uses the local APIC timer interrupt to get
25 * a much simpler SMP time architecture:
26 */
27#ifdef CONFIG_X86_LOCAL_APIC
28BUILD_INTERRUPT(apic_timer_interrupt,LOCAL_TIMER_VECTOR)
29BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR)
30BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR)
31
32#ifdef CONFIG_X86_MCE_P4THERMAL
33BUILD_INTERRUPT(thermal_interrupt,THERMAL_APIC_VECTOR)
34#endif
35
36#endif
diff --git a/include/asm-x86/mach-default/mach_apic.h b/include/asm-x86/mach-default/mach_apic.h
deleted file mode 100644
index 3c66f2cdaec1..000000000000
--- a/include/asm-x86/mach-default/mach_apic.h
+++ /dev/null
@@ -1,156 +0,0 @@
1#ifndef ASM_X86__MACH_DEFAULT__MACH_APIC_H
2#define ASM_X86__MACH_DEFAULT__MACH_APIC_H
3
4#ifdef CONFIG_X86_LOCAL_APIC
5
6#include <mach_apicdef.h>
7#include <asm/smp.h>
8
9#define APIC_DFR_VALUE (APIC_DFR_FLAT)
10
11static inline cpumask_t target_cpus(void)
12{
13#ifdef CONFIG_SMP
14 return cpu_online_map;
15#else
16 return cpumask_of_cpu(0);
17#endif
18}
19
20#define NO_BALANCE_IRQ (0)
21#define esr_disable (0)
22
23#ifdef CONFIG_X86_64
24#include <asm/genapic.h>
25#define INT_DELIVERY_MODE (genapic->int_delivery_mode)
26#define INT_DEST_MODE (genapic->int_dest_mode)
27#define TARGET_CPUS (genapic->target_cpus())
28#define apic_id_registered (genapic->apic_id_registered)
29#define init_apic_ldr (genapic->init_apic_ldr)
30#define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid)
31#define phys_pkg_id (genapic->phys_pkg_id)
32#define vector_allocation_domain (genapic->vector_allocation_domain)
33#define read_apic_id() (GET_APIC_ID(apic_read(APIC_ID)))
34#define send_IPI_self (genapic->send_IPI_self)
35extern void setup_apic_routing(void);
36#else
37#define INT_DELIVERY_MODE dest_LowestPrio
38#define INT_DEST_MODE 1 /* logical delivery broadcast to all procs */
39#define TARGET_CPUS (target_cpus())
40/*
41 * Set up the logical destination ID.
42 *
43 * Intel recommends to set DFR, LDR and TPR before enabling
44 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
45 * document number 292116). So here it goes...
46 */
47static inline void init_apic_ldr(void)
48{
49 unsigned long val;
50
51 apic_write(APIC_DFR, APIC_DFR_VALUE);
52 val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
53 val |= SET_APIC_LOGICAL_ID(1UL << smp_processor_id());
54 apic_write(APIC_LDR, val);
55}
56
57static inline int apic_id_registered(void)
58{
59 return physid_isset(read_apic_id(), phys_cpu_present_map);
60}
61
62static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
63{
64 return cpus_addr(cpumask)[0];
65}
66
67static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
68{
69 return cpuid_apic >> index_msb;
70}
71
72static inline void setup_apic_routing(void)
73{
74#ifdef CONFIG_X86_IO_APIC
75 printk("Enabling APIC mode: %s. Using %d I/O APICs\n",
76 "Flat", nr_ioapics);
77#endif
78}
79
80static inline int apicid_to_node(int logical_apicid)
81{
82#ifdef CONFIG_SMP
83 return apicid_2_node[hard_smp_processor_id()];
84#else
85 return 0;
86#endif
87}
88
89static inline cpumask_t vector_allocation_domain(int cpu)
90{
91 /* Careful. Some cpus do not strictly honor the set of cpus
92 * specified in the interrupt destination when using lowest
93 * priority interrupt delivery mode.
94 *
95 * In particular there was a hyperthreading cpu observed to
96 * deliver interrupts to the wrong hyperthread when only one
97 * hyperthread was specified in the interrupt desitination.
98 */
99 cpumask_t domain = { { [0] = APIC_ALL_CPUS, } };
100 return domain;
101}
102#endif
103
104static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
105{
106 return physid_isset(apicid, bitmap);
107}
108
109static inline unsigned long check_apicid_present(int bit)
110{
111 return physid_isset(bit, phys_cpu_present_map);
112}
113
114static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
115{
116 return phys_map;
117}
118
119static inline int multi_timer_check(int apic, int irq)
120{
121 return 0;
122}
123
124/* Mapping from cpu number to logical apicid */
125static inline int cpu_to_logical_apicid(int cpu)
126{
127 return 1 << cpu;
128}
129
130static inline int cpu_present_to_apicid(int mps_cpu)
131{
132 if (mps_cpu < NR_CPUS && cpu_present(mps_cpu))
133 return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
134 else
135 return BAD_APICID;
136}
137
138static inline physid_mask_t apicid_to_cpu_present(int phys_apicid)
139{
140 return physid_mask_of_physid(phys_apicid);
141}
142
143static inline void setup_portio_remap(void)
144{
145}
146
147static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
148{
149 return physid_isset(boot_cpu_physical_apicid, phys_cpu_present_map);
150}
151
152static inline void enable_apic_mode(void)
153{
154}
155#endif /* CONFIG_X86_LOCAL_APIC */
156#endif /* ASM_X86__MACH_DEFAULT__MACH_APIC_H */
diff --git a/include/asm-x86/mach-default/mach_apicdef.h b/include/asm-x86/mach-default/mach_apicdef.h
deleted file mode 100644
index 0c2d41c41b20..000000000000
--- a/include/asm-x86/mach-default/mach_apicdef.h
+++ /dev/null
@@ -1,24 +0,0 @@
1#ifndef ASM_X86__MACH_DEFAULT__MACH_APICDEF_H
2#define ASM_X86__MACH_DEFAULT__MACH_APICDEF_H
3
4#include <asm/apic.h>
5
6#ifdef CONFIG_X86_64
7#define APIC_ID_MASK (genapic->apic_id_mask)
8#define GET_APIC_ID(x) (genapic->get_apic_id(x))
9#define SET_APIC_ID(x) (genapic->set_apic_id(x))
10#else
11#define APIC_ID_MASK (0xF<<24)
12static inline unsigned get_apic_id(unsigned long x)
13{
14 unsigned int ver = GET_APIC_VERSION(apic_read(APIC_LVR));
15 if (APIC_XAPIC(ver))
16 return (((x)>>24)&0xFF);
17 else
18 return (((x)>>24)&0xF);
19}
20
21#define GET_APIC_ID(x) get_apic_id(x)
22#endif
23
24#endif /* ASM_X86__MACH_DEFAULT__MACH_APICDEF_H */
diff --git a/include/asm-x86/mach-default/mach_ipi.h b/include/asm-x86/mach-default/mach_ipi.h
deleted file mode 100644
index 674bc7e50c35..000000000000
--- a/include/asm-x86/mach-default/mach_ipi.h
+++ /dev/null
@@ -1,64 +0,0 @@
1#ifndef ASM_X86__MACH_DEFAULT__MACH_IPI_H
2#define ASM_X86__MACH_DEFAULT__MACH_IPI_H
3
4/* Avoid include hell */
5#define NMI_VECTOR 0x02
6
7void send_IPI_mask_bitmask(cpumask_t mask, int vector);
8void __send_IPI_shortcut(unsigned int shortcut, int vector);
9
10extern int no_broadcast;
11
12#ifdef CONFIG_X86_64
13#include <asm/genapic.h>
14#define send_IPI_mask (genapic->send_IPI_mask)
15#else
16static inline void send_IPI_mask(cpumask_t mask, int vector)
17{
18 send_IPI_mask_bitmask(mask, vector);
19}
20#endif
21
22static inline void __local_send_IPI_allbutself(int vector)
23{
24 if (no_broadcast || vector == NMI_VECTOR) {
25 cpumask_t mask = cpu_online_map;
26
27 cpu_clear(smp_processor_id(), mask);
28 send_IPI_mask(mask, vector);
29 } else
30 __send_IPI_shortcut(APIC_DEST_ALLBUT, vector);
31}
32
33static inline void __local_send_IPI_all(int vector)
34{
35 if (no_broadcast || vector == NMI_VECTOR)
36 send_IPI_mask(cpu_online_map, vector);
37 else
38 __send_IPI_shortcut(APIC_DEST_ALLINC, vector);
39}
40
41#ifdef CONFIG_X86_64
42#define send_IPI_allbutself (genapic->send_IPI_allbutself)
43#define send_IPI_all (genapic->send_IPI_all)
44#else
45static inline void send_IPI_allbutself(int vector)
46{
47 /*
48 * if there are no other CPUs in the system then we get an APIC send
49 * error if we try to broadcast, thus avoid sending IPIs in this case.
50 */
51 if (!(num_online_cpus() > 1))
52 return;
53
54 __local_send_IPI_allbutself(vector);
55 return;
56}
57
58static inline void send_IPI_all(int vector)
59{
60 __local_send_IPI_all(vector);
61}
62#endif
63
64#endif /* ASM_X86__MACH_DEFAULT__MACH_IPI_H */
diff --git a/include/asm-x86/mach-default/mach_mpparse.h b/include/asm-x86/mach-default/mach_mpparse.h
deleted file mode 100644
index 9c381f2815ac..000000000000
--- a/include/asm-x86/mach-default/mach_mpparse.h
+++ /dev/null
@@ -1,17 +0,0 @@
1#ifndef ASM_X86__MACH_DEFAULT__MACH_MPPARSE_H
2#define ASM_X86__MACH_DEFAULT__MACH_MPPARSE_H
3
4static inline int mps_oem_check(struct mp_config_table *mpc, char *oem,
5 char *productid)
6{
7 return 0;
8}
9
10/* Hook from generic ACPI tables.c */
11static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id)
12{
13 return 0;
14}
15
16
17#endif /* ASM_X86__MACH_DEFAULT__MACH_MPPARSE_H */
diff --git a/include/asm-x86/mach-default/mach_mpspec.h b/include/asm-x86/mach-default/mach_mpspec.h
deleted file mode 100644
index d77646f011f1..000000000000
--- a/include/asm-x86/mach-default/mach_mpspec.h
+++ /dev/null
@@ -1,12 +0,0 @@
1#ifndef ASM_X86__MACH_DEFAULT__MACH_MPSPEC_H
2#define ASM_X86__MACH_DEFAULT__MACH_MPSPEC_H
3
4#define MAX_IRQ_SOURCES 256
5
6#if CONFIG_BASE_SMALL == 0
7#define MAX_MP_BUSSES 256
8#else
9#define MAX_MP_BUSSES 32
10#endif
11
12#endif /* ASM_X86__MACH_DEFAULT__MACH_MPSPEC_H */
diff --git a/include/asm-x86/mach-default/mach_timer.h b/include/asm-x86/mach-default/mach_timer.h
deleted file mode 100644
index 990b15833834..000000000000
--- a/include/asm-x86/mach-default/mach_timer.h
+++ /dev/null
@@ -1,48 +0,0 @@
1/*
2 * Machine specific calibrate_tsc() for generic.
3 * Split out from timer_tsc.c by Osamu Tomita <tomita@cinet.co.jp>
4 */
5/* ------ Calibrate the TSC -------
6 * Return 2^32 * (1 / (TSC clocks per usec)) for do_fast_gettimeoffset().
7 * Too much 64-bit arithmetic here to do this cleanly in C, and for
8 * accuracy's sake we want to keep the overhead on the CTC speaker (channel 2)
9 * output busy loop as low as possible. We avoid reading the CTC registers
10 * directly because of the awkward 8-bit access mechanism of the 82C54
11 * device.
12 */
13#ifndef ASM_X86__MACH_DEFAULT__MACH_TIMER_H
14#define ASM_X86__MACH_DEFAULT__MACH_TIMER_H
15
16#define CALIBRATE_TIME_MSEC 30 /* 30 msecs */
17#define CALIBRATE_LATCH \
18 ((CLOCK_TICK_RATE * CALIBRATE_TIME_MSEC + 1000/2)/1000)
19
20static inline void mach_prepare_counter(void)
21{
22 /* Set the Gate high, disable speaker */
23 outb((inb(0x61) & ~0x02) | 0x01, 0x61);
24
25 /*
26 * Now let's take care of CTC channel 2
27 *
28 * Set the Gate high, program CTC channel 2 for mode 0,
29 * (interrupt on terminal count mode), binary count,
30 * load 5 * LATCH count, (LSB and MSB) to begin countdown.
31 *
32 * Some devices need a delay here.
33 */
34 outb(0xb0, 0x43); /* binary, mode 0, LSB/MSB, Ch 2 */
35 outb_p(CALIBRATE_LATCH & 0xff, 0x42); /* LSB of count */
36 outb_p(CALIBRATE_LATCH >> 8, 0x42); /* MSB of count */
37}
38
39static inline void mach_countup(unsigned long *count_p)
40{
41 unsigned long count = 0;
42 do {
43 count++;
44 } while ((inb_p(0x61) & 0x20) == 0);
45 *count_p = count;
46}
47
48#endif /* ASM_X86__MACH_DEFAULT__MACH_TIMER_H */
diff --git a/include/asm-x86/mach-default/mach_traps.h b/include/asm-x86/mach-default/mach_traps.h
deleted file mode 100644
index ff8778f26b84..000000000000
--- a/include/asm-x86/mach-default/mach_traps.h
+++ /dev/null
@@ -1,33 +0,0 @@
1/*
2 * Machine specific NMI handling for generic.
3 * Split out from traps.c by Osamu Tomita <tomita@cinet.co.jp>
4 */
5#ifndef ASM_X86__MACH_DEFAULT__MACH_TRAPS_H
6#define ASM_X86__MACH_DEFAULT__MACH_TRAPS_H
7
8#include <asm/mc146818rtc.h>
9
10static inline unsigned char get_nmi_reason(void)
11{
12 return inb(0x61);
13}
14
15static inline void reassert_nmi(void)
16{
17 int old_reg = -1;
18
19 if (do_i_have_lock_cmos())
20 old_reg = current_lock_cmos_reg();
21 else
22 lock_cmos(0); /* register doesn't matter here */
23 outb(0x8f, 0x70);
24 inb(0x71); /* dummy */
25 outb(0x0f, 0x70);
26 inb(0x71); /* dummy */
27 if (old_reg >= 0)
28 outb(old_reg, 0x70);
29 else
30 unlock_cmos();
31}
32
33#endif /* ASM_X86__MACH_DEFAULT__MACH_TRAPS_H */
diff --git a/include/asm-x86/mach-default/mach_wakecpu.h b/include/asm-x86/mach-default/mach_wakecpu.h
deleted file mode 100644
index 361b810f5160..000000000000
--- a/include/asm-x86/mach-default/mach_wakecpu.h
+++ /dev/null
@@ -1,42 +0,0 @@
1#ifndef ASM_X86__MACH_DEFAULT__MACH_WAKECPU_H
2#define ASM_X86__MACH_DEFAULT__MACH_WAKECPU_H
3
4/*
5 * This file copes with machines that wakeup secondary CPUs by the
6 * INIT, INIT, STARTUP sequence.
7 */
8
9#define WAKE_SECONDARY_VIA_INIT
10
11#define TRAMPOLINE_LOW phys_to_virt(0x467)
12#define TRAMPOLINE_HIGH phys_to_virt(0x469)
13
14#define boot_cpu_apicid boot_cpu_physical_apicid
15
16static inline void wait_for_init_deassert(atomic_t *deassert)
17{
18 while (!atomic_read(deassert))
19 cpu_relax();
20 return;
21}
22
23/* Nothing to do for most platforms, since cleared by the INIT cycle */
24static inline void smp_callin_clear_local_apic(void)
25{
26}
27
28static inline void store_NMI_vector(unsigned short *high, unsigned short *low)
29{
30}
31
32static inline void restore_NMI_vector(unsigned short *high, unsigned short *low)
33{
34}
35
36#if APIC_DEBUG
37 #define inquire_remote_apic(apicid) __inquire_remote_apic(apicid)
38#else
39 #define inquire_remote_apic(apicid) {}
40#endif
41
42#endif /* ASM_X86__MACH_DEFAULT__MACH_WAKECPU_H */
diff --git a/include/asm-x86/mach-default/pci-functions.h b/include/asm-x86/mach-default/pci-functions.h
deleted file mode 100644
index ed0bab427354..000000000000
--- a/include/asm-x86/mach-default/pci-functions.h
+++ /dev/null
@@ -1,19 +0,0 @@
1/*
2 * PCI BIOS function numbering for conventional PCI BIOS
3 * systems
4 */
5
6#define PCIBIOS_PCI_FUNCTION_ID 0xb1XX
7#define PCIBIOS_PCI_BIOS_PRESENT 0xb101
8#define PCIBIOS_FIND_PCI_DEVICE 0xb102
9#define PCIBIOS_FIND_PCI_CLASS_CODE 0xb103
10#define PCIBIOS_GENERATE_SPECIAL_CYCLE 0xb106
11#define PCIBIOS_READ_CONFIG_BYTE 0xb108
12#define PCIBIOS_READ_CONFIG_WORD 0xb109
13#define PCIBIOS_READ_CONFIG_DWORD 0xb10a
14#define PCIBIOS_WRITE_CONFIG_BYTE 0xb10b
15#define PCIBIOS_WRITE_CONFIG_WORD 0xb10c
16#define PCIBIOS_WRITE_CONFIG_DWORD 0xb10d
17#define PCIBIOS_GET_ROUTING_OPTIONS 0xb10e
18#define PCIBIOS_SET_PCI_HW_INT 0xb10f
19
diff --git a/include/asm-x86/mach-default/setup_arch.h b/include/asm-x86/mach-default/setup_arch.h
deleted file mode 100644
index 38846208b548..000000000000
--- a/include/asm-x86/mach-default/setup_arch.h
+++ /dev/null
@@ -1,3 +0,0 @@
1/* Hook to call BIOS initialisation function */
2
3/* no action for generic */
diff --git a/include/asm-x86/mach-default/smpboot_hooks.h b/include/asm-x86/mach-default/smpboot_hooks.h
deleted file mode 100644
index dbab36d64d48..000000000000
--- a/include/asm-x86/mach-default/smpboot_hooks.h
+++ /dev/null
@@ -1,59 +0,0 @@
1/* two abstractions specific to kernel/smpboot.c, mainly to cater to visws
2 * which needs to alter them. */
3
4static inline void smpboot_clear_io_apic_irqs(void)
5{
6#ifdef CONFIG_X86_IO_APIC
7 io_apic_irqs = 0;
8#endif
9}
10
11static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
12{
13 CMOS_WRITE(0xa, 0xf);
14 local_flush_tlb();
15 pr_debug("1.\n");
16 *((volatile unsigned short *) TRAMPOLINE_HIGH) = start_eip >> 4;
17 pr_debug("2.\n");
18 *((volatile unsigned short *) TRAMPOLINE_LOW) = start_eip & 0xf;
19 pr_debug("3.\n");
20}
21
22static inline void smpboot_restore_warm_reset_vector(void)
23{
24 /*
25 * Install writable page 0 entry to set BIOS data area.
26 */
27 local_flush_tlb();
28
29 /*
30 * Paranoid: Set warm reset code and vector here back
31 * to default values.
32 */
33 CMOS_WRITE(0, 0xf);
34
35 *((volatile long *) phys_to_virt(0x467)) = 0;
36}
37
38static inline void __init smpboot_setup_io_apic(void)
39{
40#ifdef CONFIG_X86_IO_APIC
41 /*
42 * Here we can be sure that there is an IO-APIC in the system. Let's
43 * go and set it up:
44 */
45 if (!skip_ioapic_setup && nr_ioapics)
46 setup_IO_APIC();
47 else {
48 nr_ioapics = 0;
49 localise_nmi_watchdog();
50 }
51#endif
52}
53
54static inline void smpboot_clear_io_apic(void)
55{
56#ifdef CONFIG_X86_IO_APIC
57 nr_ioapics = 0;
58#endif
59}
diff --git a/include/asm-x86/mach-generic/gpio.h b/include/asm-x86/mach-generic/gpio.h
deleted file mode 100644
index 6ce0f7786ef8..000000000000
--- a/include/asm-x86/mach-generic/gpio.h
+++ /dev/null
@@ -1,15 +0,0 @@
1#ifndef ASM_X86__MACH_GENERIC__GPIO_H
2#define ASM_X86__MACH_GENERIC__GPIO_H
3
4int gpio_request(unsigned gpio, const char *label);
5void gpio_free(unsigned gpio);
6int gpio_direction_input(unsigned gpio);
7int gpio_direction_output(unsigned gpio, int value);
8int gpio_get_value(unsigned gpio);
9void gpio_set_value(unsigned gpio, int value);
10int gpio_to_irq(unsigned gpio);
11int irq_to_gpio(unsigned irq);
12
13#include <asm-generic/gpio.h> /* cansleep wrappers */
14
15#endif /* ASM_X86__MACH_GENERIC__GPIO_H */
diff --git a/include/asm-x86/mach-generic/mach_apic.h b/include/asm-x86/mach-generic/mach_apic.h
deleted file mode 100644
index 5085b52da301..000000000000
--- a/include/asm-x86/mach-generic/mach_apic.h
+++ /dev/null
@@ -1,33 +0,0 @@
1#ifndef ASM_X86__MACH_GENERIC__MACH_APIC_H
2#define ASM_X86__MACH_GENERIC__MACH_APIC_H
3
4#include <asm/genapic.h>
5
6#define esr_disable (genapic->ESR_DISABLE)
7#define NO_BALANCE_IRQ (genapic->no_balance_irq)
8#define INT_DELIVERY_MODE (genapic->int_delivery_mode)
9#define INT_DEST_MODE (genapic->int_dest_mode)
10#undef APIC_DEST_LOGICAL
11#define APIC_DEST_LOGICAL (genapic->apic_destination_logical)
12#define TARGET_CPUS (genapic->target_cpus())
13#define apic_id_registered (genapic->apic_id_registered)
14#define init_apic_ldr (genapic->init_apic_ldr)
15#define ioapic_phys_id_map (genapic->ioapic_phys_id_map)
16#define setup_apic_routing (genapic->setup_apic_routing)
17#define multi_timer_check (genapic->multi_timer_check)
18#define apicid_to_node (genapic->apicid_to_node)
19#define cpu_to_logical_apicid (genapic->cpu_to_logical_apicid)
20#define cpu_present_to_apicid (genapic->cpu_present_to_apicid)
21#define apicid_to_cpu_present (genapic->apicid_to_cpu_present)
22#define setup_portio_remap (genapic->setup_portio_remap)
23#define check_apicid_present (genapic->check_apicid_present)
24#define check_phys_apicid_present (genapic->check_phys_apicid_present)
25#define check_apicid_used (genapic->check_apicid_used)
26#define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid)
27#define vector_allocation_domain (genapic->vector_allocation_domain)
28#define enable_apic_mode (genapic->enable_apic_mode)
29#define phys_pkg_id (genapic->phys_pkg_id)
30
31extern void generic_bigsmp_probe(void);
32
33#endif /* ASM_X86__MACH_GENERIC__MACH_APIC_H */
diff --git a/include/asm-x86/mach-generic/mach_apicdef.h b/include/asm-x86/mach-generic/mach_apicdef.h
deleted file mode 100644
index 1657f38b8f27..000000000000
--- a/include/asm-x86/mach-generic/mach_apicdef.h
+++ /dev/null
@@ -1,11 +0,0 @@
1#ifndef ASM_X86__MACH_GENERIC__MACH_APICDEF_H
2#define ASM_X86__MACH_GENERIC__MACH_APICDEF_H
3
4#ifndef APIC_DEFINITION
5#include <asm/genapic.h>
6
7#define GET_APIC_ID (genapic->get_apic_id)
8#define APIC_ID_MASK (genapic->apic_id_mask)
9#endif
10
11#endif /* ASM_X86__MACH_GENERIC__MACH_APICDEF_H */
diff --git a/include/asm-x86/mach-generic/mach_ipi.h b/include/asm-x86/mach-generic/mach_ipi.h
deleted file mode 100644
index f67433dbd65f..000000000000
--- a/include/asm-x86/mach-generic/mach_ipi.h
+++ /dev/null
@@ -1,10 +0,0 @@
1#ifndef ASM_X86__MACH_GENERIC__MACH_IPI_H
2#define ASM_X86__MACH_GENERIC__MACH_IPI_H
3
4#include <asm/genapic.h>
5
6#define send_IPI_mask (genapic->send_IPI_mask)
7#define send_IPI_allbutself (genapic->send_IPI_allbutself)
8#define send_IPI_all (genapic->send_IPI_all)
9
10#endif /* ASM_X86__MACH_GENERIC__MACH_IPI_H */
diff --git a/include/asm-x86/mach-generic/mach_mpparse.h b/include/asm-x86/mach-generic/mach_mpparse.h
deleted file mode 100644
index 3115564e557c..000000000000
--- a/include/asm-x86/mach-generic/mach_mpparse.h
+++ /dev/null
@@ -1,10 +0,0 @@
1#ifndef ASM_X86__MACH_GENERIC__MACH_MPPARSE_H
2#define ASM_X86__MACH_GENERIC__MACH_MPPARSE_H
3
4
5extern int mps_oem_check(struct mp_config_table *mpc, char *oem,
6 char *productid);
7
8extern int acpi_madt_oem_check(char *oem_id, char *oem_table_id);
9
10#endif /* ASM_X86__MACH_GENERIC__MACH_MPPARSE_H */
diff --git a/include/asm-x86/mach-generic/mach_mpspec.h b/include/asm-x86/mach-generic/mach_mpspec.h
deleted file mode 100644
index 6061b153613e..000000000000
--- a/include/asm-x86/mach-generic/mach_mpspec.h
+++ /dev/null
@@ -1,12 +0,0 @@
1#ifndef ASM_X86__MACH_GENERIC__MACH_MPSPEC_H
2#define ASM_X86__MACH_GENERIC__MACH_MPSPEC_H
3
4#define MAX_IRQ_SOURCES 256
5
6/* Summit or generic (i.e. installer) kernels need lots of bus entries. */
7/* Maximum 256 PCI busses, plus 1 ISA bus in each of 4 cabinets. */
8#define MAX_MP_BUSSES 260
9
10extern void numaq_mps_oem_check(struct mp_config_table *mpc, char *oem,
11 char *productid);
12#endif /* ASM_X86__MACH_GENERIC__MACH_MPSPEC_H */
diff --git a/include/asm-x86/mach-rdc321x/gpio.h b/include/asm-x86/mach-rdc321x/gpio.h
deleted file mode 100644
index 94b6cdf532e2..000000000000
--- a/include/asm-x86/mach-rdc321x/gpio.h
+++ /dev/null
@@ -1,60 +0,0 @@
1#ifndef ASM_X86__MACH_RDC321X__GPIO_H
2#define ASM_X86__MACH_RDC321X__GPIO_H
3
4#include <linux/kernel.h>
5
6extern int rdc_gpio_get_value(unsigned gpio);
7extern void rdc_gpio_set_value(unsigned gpio, int value);
8extern int rdc_gpio_direction_input(unsigned gpio);
9extern int rdc_gpio_direction_output(unsigned gpio, int value);
10extern int rdc_gpio_request(unsigned gpio, const char *label);
11extern void rdc_gpio_free(unsigned gpio);
12extern void __init rdc321x_gpio_setup(void);
13
14/* Wrappers for the arch-neutral GPIO API */
15
16static inline int gpio_request(unsigned gpio, const char *label)
17{
18 return rdc_gpio_request(gpio, label);
19}
20
21static inline void gpio_free(unsigned gpio)
22{
23 might_sleep();
24 rdc_gpio_free(gpio);
25}
26
27static inline int gpio_direction_input(unsigned gpio)
28{
29 return rdc_gpio_direction_input(gpio);
30}
31
32static inline int gpio_direction_output(unsigned gpio, int value)
33{
34 return rdc_gpio_direction_output(gpio, value);
35}
36
37static inline int gpio_get_value(unsigned gpio)
38{
39 return rdc_gpio_get_value(gpio);
40}
41
42static inline void gpio_set_value(unsigned gpio, int value)
43{
44 rdc_gpio_set_value(gpio, value);
45}
46
47static inline int gpio_to_irq(unsigned gpio)
48{
49 return gpio;
50}
51
52static inline int irq_to_gpio(unsigned irq)
53{
54 return irq;
55}
56
57/* For cansleep */
58#include <asm-generic/gpio.h>
59
60#endif /* ASM_X86__MACH_RDC321X__GPIO_H */
diff --git a/include/asm-x86/mach-rdc321x/rdc321x_defs.h b/include/asm-x86/mach-rdc321x/rdc321x_defs.h
deleted file mode 100644
index c8e9c8bed3d0..000000000000
--- a/include/asm-x86/mach-rdc321x/rdc321x_defs.h
+++ /dev/null
@@ -1,12 +0,0 @@
1#define PFX "rdc321x: "
2
3/* General purpose configuration and data registers */
4#define RDC3210_CFGREG_ADDR 0x0CF8
5#define RDC3210_CFGREG_DATA 0x0CFC
6
7#define RDC321X_GPIO_CTRL_REG1 0x48
8#define RDC321X_GPIO_CTRL_REG2 0x84
9#define RDC321X_GPIO_DATA_REG1 0x4c
10#define RDC321X_GPIO_DATA_REG2 0x88
11
12#define RDC321X_MAX_GPIO 58
diff --git a/include/asm-x86/mach-voyager/do_timer.h b/include/asm-x86/mach-voyager/do_timer.h
deleted file mode 100644
index 9e5a459fd15b..000000000000
--- a/include/asm-x86/mach-voyager/do_timer.h
+++ /dev/null
@@ -1,17 +0,0 @@
1/* defines for inline arch setup functions */
2#include <linux/clockchips.h>
3
4#include <asm/voyager.h>
5#include <asm/i8253.h>
6
7/**
8 * do_timer_interrupt_hook - hook into timer tick
9 *
10 * Call the pit clock event handler. see asm/i8253.h
11 **/
12static inline void do_timer_interrupt_hook(void)
13{
14 global_clock_event->event_handler(global_clock_event);
15 voyager_timer_interrupt();
16}
17
diff --git a/include/asm-x86/mach-voyager/entry_arch.h b/include/asm-x86/mach-voyager/entry_arch.h
deleted file mode 100644
index ae52624b5937..000000000000
--- a/include/asm-x86/mach-voyager/entry_arch.h
+++ /dev/null
@@ -1,26 +0,0 @@
1/* -*- mode: c; c-basic-offset: 8 -*- */
2
3/* Copyright (C) 2002
4 *
5 * Author: James.Bottomley@HansenPartnership.com
6 *
7 * linux/arch/i386/voyager/entry_arch.h
8 *
9 * This file builds the VIC and QIC CPI gates
10 */
11
12/* initialise the voyager interrupt gates
13 *
14 * This uses the macros in irq.h to set up assembly jump gates. The
15 * calls are then redirected to the same routine with smp_ prefixed */
16BUILD_INTERRUPT(vic_sys_interrupt, VIC_SYS_INT)
17BUILD_INTERRUPT(vic_cmn_interrupt, VIC_CMN_INT)
18BUILD_INTERRUPT(vic_cpi_interrupt, VIC_CPI_LEVEL0);
19
20/* do all the QIC interrupts */
21BUILD_INTERRUPT(qic_timer_interrupt, QIC_TIMER_CPI);
22BUILD_INTERRUPT(qic_invalidate_interrupt, QIC_INVALIDATE_CPI);
23BUILD_INTERRUPT(qic_reschedule_interrupt, QIC_RESCHEDULE_CPI);
24BUILD_INTERRUPT(qic_enable_irq_interrupt, QIC_ENABLE_IRQ_CPI);
25BUILD_INTERRUPT(qic_call_function_interrupt, QIC_CALL_FUNCTION_CPI);
26BUILD_INTERRUPT(qic_call_function_single_interrupt, QIC_CALL_FUNCTION_SINGLE_CPI);
diff --git a/include/asm-x86/mach-voyager/setup_arch.h b/include/asm-x86/mach-voyager/setup_arch.h
deleted file mode 100644
index 71729ca05cd7..000000000000
--- a/include/asm-x86/mach-voyager/setup_arch.h
+++ /dev/null
@@ -1,12 +0,0 @@
1#include <asm/voyager.h>
2#include <asm/setup.h>
3#define VOYAGER_BIOS_INFO ((struct voyager_bios_info *) \
4 (&boot_params.apm_bios_info))
5
6/* Hook to call BIOS initialisation function */
7
8/* for voyager, pass the voyager BIOS/SUS info area to the detection
9 * routines */
10
11#define ARCH_SETUP voyager_detect(VOYAGER_BIOS_INFO);
12
diff --git a/include/asm-x86/math_emu.h b/include/asm-x86/math_emu.h
deleted file mode 100644
index 5768d8e95c8c..000000000000
--- a/include/asm-x86/math_emu.h
+++ /dev/null
@@ -1,31 +0,0 @@
1#ifndef ASM_X86__MATH_EMU_H
2#define ASM_X86__MATH_EMU_H
3
4/* This structure matches the layout of the data saved to the stack
5 following a device-not-present interrupt, part of it saved
6 automatically by the 80386/80486.
7 */
8struct info {
9 long ___orig_eip;
10 long ___ebx;
11 long ___ecx;
12 long ___edx;
13 long ___esi;
14 long ___edi;
15 long ___ebp;
16 long ___eax;
17 long ___ds;
18 long ___es;
19 long ___fs;
20 long ___orig_eax;
21 long ___eip;
22 long ___cs;
23 long ___eflags;
24 long ___esp;
25 long ___ss;
26 long ___vm86_es; /* This and the following only in vm86 mode */
27 long ___vm86_ds;
28 long ___vm86_fs;
29 long ___vm86_gs;
30};
31#endif /* ASM_X86__MATH_EMU_H */
diff --git a/include/asm-x86/mc146818rtc.h b/include/asm-x86/mc146818rtc.h
deleted file mode 100644
index a995f33176cd..000000000000
--- a/include/asm-x86/mc146818rtc.h
+++ /dev/null
@@ -1,104 +0,0 @@
1/*
2 * Machine dependent access functions for RTC registers.
3 */
4#ifndef ASM_X86__MC146818RTC_H
5#define ASM_X86__MC146818RTC_H
6
7#include <asm/io.h>
8#include <asm/system.h>
9#include <asm/processor.h>
10#include <linux/mc146818rtc.h>
11
12#ifndef RTC_PORT
13#define RTC_PORT(x) (0x70 + (x))
14#define RTC_ALWAYS_BCD 1 /* RTC operates in binary mode */
15#endif
16
17#if defined(CONFIG_X86_32) && defined(__HAVE_ARCH_CMPXCHG)
18/*
19 * This lock provides nmi access to the CMOS/RTC registers. It has some
20 * special properties. It is owned by a CPU and stores the index register
21 * currently being accessed (if owned). The idea here is that it works
22 * like a normal lock (normally). However, in an NMI, the NMI code will
23 * first check to see if its CPU owns the lock, meaning that the NMI
24 * interrupted during the read/write of the device. If it does, it goes ahead
25 * and performs the access and then restores the index register. If it does
26 * not, it locks normally.
27 *
28 * Note that since we are working with NMIs, we need this lock even in
29 * a non-SMP machine just to mark that the lock is owned.
30 *
31 * This only works with compare-and-swap. There is no other way to
32 * atomically claim the lock and set the owner.
33 */
34#include <linux/smp.h>
35extern volatile unsigned long cmos_lock;
36
37/*
38 * All of these below must be called with interrupts off, preempt
39 * disabled, etc.
40 */
41
42static inline void lock_cmos(unsigned char reg)
43{
44 unsigned long new;
45 new = ((smp_processor_id() + 1) << 8) | reg;
46 for (;;) {
47 if (cmos_lock) {
48 cpu_relax();
49 continue;
50 }
51 if (__cmpxchg(&cmos_lock, 0, new, sizeof(cmos_lock)) == 0)
52 return;
53 }
54}
55
56static inline void unlock_cmos(void)
57{
58 cmos_lock = 0;
59}
60
61static inline int do_i_have_lock_cmos(void)
62{
63 return (cmos_lock >> 8) == (smp_processor_id() + 1);
64}
65
66static inline unsigned char current_lock_cmos_reg(void)
67{
68 return cmos_lock & 0xff;
69}
70
71#define lock_cmos_prefix(reg) \
72 do { \
73 unsigned long cmos_flags; \
74 local_irq_save(cmos_flags); \
75 lock_cmos(reg)
76
77#define lock_cmos_suffix(reg) \
78 unlock_cmos(); \
79 local_irq_restore(cmos_flags); \
80 } while (0)
81#else
82#define lock_cmos_prefix(reg) do {} while (0)
83#define lock_cmos_suffix(reg) do {} while (0)
84#define lock_cmos(reg)
85#define unlock_cmos()
86#define do_i_have_lock_cmos() 0
87#define current_lock_cmos_reg() 0
88#endif
89
90/*
91 * The yet supported machines all access the RTC index register via
92 * an ISA port access but the way to access the date register differs ...
93 */
94#define CMOS_READ(addr) rtc_cmos_read(addr)
95#define CMOS_WRITE(val, addr) rtc_cmos_write(val, addr)
96unsigned char rtc_cmos_read(unsigned char addr);
97void rtc_cmos_write(unsigned char val, unsigned char addr);
98
99extern int mach_set_rtc_mmss(unsigned long nowtime);
100extern unsigned long mach_get_cmos_time(void);
101
102#define RTC_IRQ 8
103
104#endif /* ASM_X86__MC146818RTC_H */
diff --git a/include/asm-x86/mca.h b/include/asm-x86/mca.h
deleted file mode 100644
index 60d1ed287b13..000000000000
--- a/include/asm-x86/mca.h
+++ /dev/null
@@ -1,43 +0,0 @@
1/* -*- mode: c; c-basic-offset: 8 -*- */
2
3/* Platform specific MCA defines */
4#ifndef ASM_X86__MCA_H
5#define ASM_X86__MCA_H
6
7/* Maximal number of MCA slots - actually, some machines have less, but
8 * they all have sufficient number of POS registers to cover 8.
9 */
10#define MCA_MAX_SLOT_NR 8
11
12/* Most machines have only one MCA bus. The only multiple bus machines
13 * I know have at most two */
14#define MAX_MCA_BUSSES 2
15
16#define MCA_PRIMARY_BUS 0
17#define MCA_SECONDARY_BUS 1
18
19/* Dummy slot numbers on primary MCA for integrated functions */
20#define MCA_INTEGSCSI (MCA_MAX_SLOT_NR)
21#define MCA_INTEGVIDEO (MCA_MAX_SLOT_NR+1)
22#define MCA_MOTHERBOARD (MCA_MAX_SLOT_NR+2)
23
24/* Dummy POS values for integrated functions */
25#define MCA_DUMMY_POS_START 0x10000
26#define MCA_INTEGSCSI_POS (MCA_DUMMY_POS_START+1)
27#define MCA_INTEGVIDEO_POS (MCA_DUMMY_POS_START+2)
28#define MCA_MOTHERBOARD_POS (MCA_DUMMY_POS_START+3)
29
30/* MCA registers */
31
32#define MCA_MOTHERBOARD_SETUP_REG 0x94
33#define MCA_ADAPTER_SETUP_REG 0x96
34#define MCA_POS_REG(n) (0x100+(n))
35
36#define MCA_ENABLED 0x01 /* POS 2, set if adapter enabled */
37
38/* Max number of adapters, including both slots and various integrated
39 * things.
40 */
41#define MCA_NUMADAPTERS (MCA_MAX_SLOT_NR+3)
42
43#endif /* ASM_X86__MCA_H */
diff --git a/include/asm-x86/mca_dma.h b/include/asm-x86/mca_dma.h
deleted file mode 100644
index 49f22be237d2..000000000000
--- a/include/asm-x86/mca_dma.h
+++ /dev/null
@@ -1,201 +0,0 @@
1#ifndef ASM_X86__MCA_DMA_H
2#define ASM_X86__MCA_DMA_H
3
4#include <asm/io.h>
5#include <linux/ioport.h>
6
7/*
8 * Microchannel specific DMA stuff. DMA on an MCA machine is fairly similar to
9 * standard PC dma, but it certainly has its quirks. DMA register addresses
10 * are in a different place and there are some added functions. Most of this
11 * should be pretty obvious on inspection. Note that the user must divide
12 * count by 2 when using 16-bit dma; that is not handled by these functions.
13 *
14 * Ramen Noodles are yummy.
15 *
16 * 1998 Tymm Twillman <tymm@computer.org>
17 */
18
19/*
20 * Registers that are used by the DMA controller; FN is the function register
21 * (tell the controller what to do) and EXE is the execution register (how
22 * to do it)
23 */
24
25#define MCA_DMA_REG_FN 0x18
26#define MCA_DMA_REG_EXE 0x1A
27
28/*
29 * Functions that the DMA controller can do
30 */
31
32#define MCA_DMA_FN_SET_IO 0x00
33#define MCA_DMA_FN_SET_ADDR 0x20
34#define MCA_DMA_FN_GET_ADDR 0x30
35#define MCA_DMA_FN_SET_COUNT 0x40
36#define MCA_DMA_FN_GET_COUNT 0x50
37#define MCA_DMA_FN_GET_STATUS 0x60
38#define MCA_DMA_FN_SET_MODE 0x70
39#define MCA_DMA_FN_SET_ARBUS 0x80
40#define MCA_DMA_FN_MASK 0x90
41#define MCA_DMA_FN_RESET_MASK 0xA0
42#define MCA_DMA_FN_MASTER_CLEAR 0xD0
43
44/*
45 * Modes (used by setting MCA_DMA_FN_MODE in the function register)
46 *
47 * Note that the MODE_READ is read from memory (write to device), and
48 * MODE_WRITE is vice-versa.
49 */
50
51#define MCA_DMA_MODE_XFER 0x04 /* read by default */
52#define MCA_DMA_MODE_READ 0x04 /* same as XFER */
53#define MCA_DMA_MODE_WRITE 0x08 /* OR with MODE_XFER to use */
54#define MCA_DMA_MODE_IO 0x01 /* DMA from IO register */
55#define MCA_DMA_MODE_16 0x40 /* 16 bit xfers */
56
57
58/**
59 * mca_enable_dma - channel to enable DMA on
60 * @dmanr: DMA channel
61 *
62 * Enable the MCA bus DMA on a channel. This can be called from
63 * IRQ context.
64 */
65
66static inline void mca_enable_dma(unsigned int dmanr)
67{
68 outb(MCA_DMA_FN_RESET_MASK | dmanr, MCA_DMA_REG_FN);
69}
70
71/**
72 * mca_disble_dma - channel to disable DMA on
73 * @dmanr: DMA channel
74 *
75 * Enable the MCA bus DMA on a channel. This can be called from
76 * IRQ context.
77 */
78
79static inline void mca_disable_dma(unsigned int dmanr)
80{
81 outb(MCA_DMA_FN_MASK | dmanr, MCA_DMA_REG_FN);
82}
83
84/**
85 * mca_set_dma_addr - load a 24bit DMA address
86 * @dmanr: DMA channel
87 * @a: 24bit bus address
88 *
89 * Load the address register in the DMA controller. This has a 24bit
90 * limitation (16Mb).
91 */
92
93static inline void mca_set_dma_addr(unsigned int dmanr, unsigned int a)
94{
95 outb(MCA_DMA_FN_SET_ADDR | dmanr, MCA_DMA_REG_FN);
96 outb(a & 0xff, MCA_DMA_REG_EXE);
97 outb((a >> 8) & 0xff, MCA_DMA_REG_EXE);
98 outb((a >> 16) & 0xff, MCA_DMA_REG_EXE);
99}
100
101/**
102 * mca_get_dma_addr - load a 24bit DMA address
103 * @dmanr: DMA channel
104 *
105 * Read the address register in the DMA controller. This has a 24bit
106 * limitation (16Mb). The return is a bus address.
107 */
108
109static inline unsigned int mca_get_dma_addr(unsigned int dmanr)
110{
111 unsigned int addr;
112
113 outb(MCA_DMA_FN_GET_ADDR | dmanr, MCA_DMA_REG_FN);
114 addr = inb(MCA_DMA_REG_EXE);
115 addr |= inb(MCA_DMA_REG_EXE) << 8;
116 addr |= inb(MCA_DMA_REG_EXE) << 16;
117
118 return addr;
119}
120
121/**
122 * mca_set_dma_count - load a 16bit transfer count
123 * @dmanr: DMA channel
124 * @count: count
125 *
126 * Set the DMA count for this channel. This can be up to 64Kbytes.
127 * Setting a count of zero will not do what you expect.
128 */
129
130static inline void mca_set_dma_count(unsigned int dmanr, unsigned int count)
131{
132 count--; /* transfers one more than count -- correct for this */
133
134 outb(MCA_DMA_FN_SET_COUNT | dmanr, MCA_DMA_REG_FN);
135 outb(count & 0xff, MCA_DMA_REG_EXE);
136 outb((count >> 8) & 0xff, MCA_DMA_REG_EXE);
137}
138
139/**
140 * mca_get_dma_residue - get the remaining bytes to transfer
141 * @dmanr: DMA channel
142 *
143 * This function returns the number of bytes left to transfer
144 * on this DMA channel.
145 */
146
147static inline unsigned int mca_get_dma_residue(unsigned int dmanr)
148{
149 unsigned short count;
150
151 outb(MCA_DMA_FN_GET_COUNT | dmanr, MCA_DMA_REG_FN);
152 count = 1 + inb(MCA_DMA_REG_EXE);
153 count += inb(MCA_DMA_REG_EXE) << 8;
154
155 return count;
156}
157
158/**
159 * mca_set_dma_io - set the port for an I/O transfer
160 * @dmanr: DMA channel
161 * @io_addr: an I/O port number
162 *
163 * Unlike the ISA bus DMA controllers the DMA on MCA bus can transfer
164 * with an I/O port target.
165 */
166
167static inline void mca_set_dma_io(unsigned int dmanr, unsigned int io_addr)
168{
169 /*
170 * DMA from a port address -- set the io address
171 */
172
173 outb(MCA_DMA_FN_SET_IO | dmanr, MCA_DMA_REG_FN);
174 outb(io_addr & 0xff, MCA_DMA_REG_EXE);
175 outb((io_addr >> 8) & 0xff, MCA_DMA_REG_EXE);
176}
177
178/**
179 * mca_set_dma_mode - set the DMA mode
180 * @dmanr: DMA channel
181 * @mode: mode to set
182 *
183 * The DMA controller supports several modes. The mode values you can
184 * set are-
185 *
186 * %MCA_DMA_MODE_READ when reading from the DMA device.
187 *
188 * %MCA_DMA_MODE_WRITE to writing to the DMA device.
189 *
190 * %MCA_DMA_MODE_IO to do DMA to or from an I/O port.
191 *
192 * %MCA_DMA_MODE_16 to do 16bit transfers.
193 */
194
195static inline void mca_set_dma_mode(unsigned int dmanr, unsigned int mode)
196{
197 outb(MCA_DMA_FN_SET_MODE | dmanr, MCA_DMA_REG_FN);
198 outb(mode, MCA_DMA_REG_EXE);
199}
200
201#endif /* ASM_X86__MCA_DMA_H */
diff --git a/include/asm-x86/mce.h b/include/asm-x86/mce.h
deleted file mode 100644
index 036133eaf744..000000000000
--- a/include/asm-x86/mce.h
+++ /dev/null
@@ -1,130 +0,0 @@
1#ifndef ASM_X86__MCE_H
2#define ASM_X86__MCE_H
3
4#ifdef __x86_64__
5
6#include <asm/ioctls.h>
7#include <asm/types.h>
8
9/*
10 * Machine Check support for x86
11 */
12
13#define MCG_CTL_P (1UL<<8) /* MCG_CAP register available */
14
15#define MCG_STATUS_RIPV (1UL<<0) /* restart ip valid */
16#define MCG_STATUS_EIPV (1UL<<1) /* ip points to correct instruction */
17#define MCG_STATUS_MCIP (1UL<<2) /* machine check in progress */
18
19#define MCI_STATUS_VAL (1UL<<63) /* valid error */
20#define MCI_STATUS_OVER (1UL<<62) /* previous errors lost */
21#define MCI_STATUS_UC (1UL<<61) /* uncorrected error */
22#define MCI_STATUS_EN (1UL<<60) /* error enabled */
23#define MCI_STATUS_MISCV (1UL<<59) /* misc error reg. valid */
24#define MCI_STATUS_ADDRV (1UL<<58) /* addr reg. valid */
25#define MCI_STATUS_PCC (1UL<<57) /* processor context corrupt */
26
27/* Fields are zero when not available */
28struct mce {
29 __u64 status;
30 __u64 misc;
31 __u64 addr;
32 __u64 mcgstatus;
33 __u64 ip;
34 __u64 tsc; /* cpu time stamp counter */
35 __u64 res1; /* for future extension */
36 __u64 res2; /* dito. */
37 __u8 cs; /* code segment */
38 __u8 bank; /* machine check bank */
39 __u8 cpu; /* cpu that raised the error */
40 __u8 finished; /* entry is valid */
41 __u32 pad;
42};
43
44/*
45 * This structure contains all data related to the MCE log. Also
46 * carries a signature to make it easier to find from external
47 * debugging tools. Each entry is only valid when its finished flag
48 * is set.
49 */
50
51#define MCE_LOG_LEN 32
52
53struct mce_log {
54 char signature[12]; /* "MACHINECHECK" */
55 unsigned len; /* = MCE_LOG_LEN */
56 unsigned next;
57 unsigned flags;
58 unsigned pad0;
59 struct mce entry[MCE_LOG_LEN];
60};
61
62#define MCE_OVERFLOW 0 /* bit 0 in flags means overflow */
63
64#define MCE_LOG_SIGNATURE "MACHINECHECK"
65
66#define MCE_GET_RECORD_LEN _IOR('M', 1, int)
67#define MCE_GET_LOG_LEN _IOR('M', 2, int)
68#define MCE_GETCLEAR_FLAGS _IOR('M', 3, int)
69
70/* Software defined banks */
71#define MCE_EXTENDED_BANK 128
72#define MCE_THERMAL_BANK MCE_EXTENDED_BANK + 0
73
74#define K8_MCE_THRESHOLD_BASE (MCE_EXTENDED_BANK + 1) /* MCE_AMD */
75#define K8_MCE_THRESHOLD_BANK_0 (MCE_THRESHOLD_BASE + 0 * 9)
76#define K8_MCE_THRESHOLD_BANK_1 (MCE_THRESHOLD_BASE + 1 * 9)
77#define K8_MCE_THRESHOLD_BANK_2 (MCE_THRESHOLD_BASE + 2 * 9)
78#define K8_MCE_THRESHOLD_BANK_3 (MCE_THRESHOLD_BASE + 3 * 9)
79#define K8_MCE_THRESHOLD_BANK_4 (MCE_THRESHOLD_BASE + 4 * 9)
80#define K8_MCE_THRESHOLD_BANK_5 (MCE_THRESHOLD_BASE + 5 * 9)
81#define K8_MCE_THRESHOLD_DRAM_ECC (MCE_THRESHOLD_BANK_4 + 0)
82
83#endif /* __x86_64__ */
84
85#ifdef __KERNEL__
86
87#ifdef CONFIG_X86_32
88extern int mce_disabled;
89#else /* CONFIG_X86_32 */
90
91#include <asm/atomic.h>
92
93void mce_log(struct mce *m);
94DECLARE_PER_CPU(struct sys_device, device_mce);
95extern void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu);
96
97#ifdef CONFIG_X86_MCE_INTEL
98void mce_intel_feature_init(struct cpuinfo_x86 *c);
99#else
100static inline void mce_intel_feature_init(struct cpuinfo_x86 *c) { }
101#endif
102
103#ifdef CONFIG_X86_MCE_AMD
104void mce_amd_feature_init(struct cpuinfo_x86 *c);
105#else
106static inline void mce_amd_feature_init(struct cpuinfo_x86 *c) { }
107#endif
108
109void mce_log_therm_throt_event(unsigned int cpu, __u64 status);
110
111extern atomic_t mce_entry;
112
113extern void do_machine_check(struct pt_regs *, long);
114extern int mce_notify_user(void);
115
116#endif /* !CONFIG_X86_32 */
117
118
119
120#ifdef CONFIG_X86_MCE
121extern void mcheck_init(struct cpuinfo_x86 *c);
122#else
123#define mcheck_init(c) do { } while (0)
124#endif
125extern void stop_mce(void);
126extern void restart_mce(void);
127
128#endif /* __KERNEL__ */
129
130#endif /* ASM_X86__MCE_H */
diff --git a/include/asm-x86/microcode.h b/include/asm-x86/microcode.h
deleted file mode 100644
index 62c793bb70ca..000000000000
--- a/include/asm-x86/microcode.h
+++ /dev/null
@@ -1,47 +0,0 @@
1#ifndef ASM_X86__MICROCODE_H
2#define ASM_X86__MICROCODE_H
3
4struct cpu_signature {
5 unsigned int sig;
6 unsigned int pf;
7 unsigned int rev;
8};
9
10struct device;
11
12struct microcode_ops {
13 int (*request_microcode_user) (int cpu, const void __user *buf, size_t size);
14 int (*request_microcode_fw) (int cpu, struct device *device);
15
16 void (*apply_microcode) (int cpu);
17
18 int (*collect_cpu_info) (int cpu, struct cpu_signature *csig);
19 void (*microcode_fini_cpu) (int cpu);
20};
21
22struct ucode_cpu_info {
23 struct cpu_signature cpu_sig;
24 int valid;
25 void *mc;
26};
27extern struct ucode_cpu_info ucode_cpu_info[];
28
29#ifdef CONFIG_MICROCODE_INTEL
30extern struct microcode_ops * __init init_intel_microcode(void);
31#else
32static inline struct microcode_ops * __init init_intel_microcode(void)
33{
34 return NULL;
35}
36#endif /* CONFIG_MICROCODE_INTEL */
37
38#ifdef CONFIG_MICROCODE_AMD
39extern struct microcode_ops * __init init_amd_microcode(void);
40#else
41static inline struct microcode_ops * __init init_amd_microcode(void)
42{
43 return NULL;
44}
45#endif
46
47#endif /* ASM_X86__MICROCODE_H */
diff --git a/include/asm-x86/mman.h b/include/asm-x86/mman.h
deleted file mode 100644
index 4ef28e6de383..000000000000
--- a/include/asm-x86/mman.h
+++ /dev/null
@@ -1,20 +0,0 @@
1#ifndef ASM_X86__MMAN_H
2#define ASM_X86__MMAN_H
3
4#include <asm-generic/mman.h>
5
6#define MAP_32BIT 0x40 /* only give out 32bit addresses */
7
8#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
9#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
10#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */
11#define MAP_LOCKED 0x2000 /* pages are locked */
12#define MAP_NORESERVE 0x4000 /* don't check for reservations */
13#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
14#define MAP_NONBLOCK 0x10000 /* do not block on IO */
15#define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */
16
17#define MCL_CURRENT 1 /* lock all current mappings */
18#define MCL_FUTURE 2 /* lock all future mappings */
19
20#endif /* ASM_X86__MMAN_H */
diff --git a/include/asm-x86/mmconfig.h b/include/asm-x86/mmconfig.h
deleted file mode 100644
index fb79b1cf5d07..000000000000
--- a/include/asm-x86/mmconfig.h
+++ /dev/null
@@ -1,12 +0,0 @@
1#ifndef ASM_X86__MMCONFIG_H
2#define ASM_X86__MMCONFIG_H
3
4#ifdef CONFIG_PCI_MMCONFIG
5extern void __cpuinit fam10h_check_enable_mmcfg(void);
6extern void __cpuinit check_enable_amd_mmconf_dmi(void);
7#else
8static inline void fam10h_check_enable_mmcfg(void) { }
9static inline void check_enable_amd_mmconf_dmi(void) { }
10#endif
11
12#endif /* ASM_X86__MMCONFIG_H */
diff --git a/include/asm-x86/mmu.h b/include/asm-x86/mmu.h
deleted file mode 100644
index 9d5aff14334a..000000000000
--- a/include/asm-x86/mmu.h
+++ /dev/null
@@ -1,26 +0,0 @@
1#ifndef ASM_X86__MMU_H
2#define ASM_X86__MMU_H
3
4#include <linux/spinlock.h>
5#include <linux/mutex.h>
6
7/*
8 * The x86 doesn't have a mmu context, but
9 * we put the segment information here.
10 */
11typedef struct {
12 void *ldt;
13 int size;
14 struct mutex lock;
15 void *vdso;
16} mm_context_t;
17
18#ifdef CONFIG_SMP
19void leave_mm(int cpu);
20#else
21static inline void leave_mm(int cpu)
22{
23}
24#endif
25
26#endif /* ASM_X86__MMU_H */
diff --git a/include/asm-x86/mmu_context.h b/include/asm-x86/mmu_context.h
deleted file mode 100644
index 8ec940bfd079..000000000000
--- a/include/asm-x86/mmu_context.h
+++ /dev/null
@@ -1,37 +0,0 @@
1#ifndef ASM_X86__MMU_CONTEXT_H
2#define ASM_X86__MMU_CONTEXT_H
3
4#include <asm/desc.h>
5#include <asm/atomic.h>
6#include <asm/pgalloc.h>
7#include <asm/tlbflush.h>
8#include <asm/paravirt.h>
9#ifndef CONFIG_PARAVIRT
10#include <asm-generic/mm_hooks.h>
11
12static inline void paravirt_activate_mm(struct mm_struct *prev,
13 struct mm_struct *next)
14{
15}
16#endif /* !CONFIG_PARAVIRT */
17
18/*
19 * Used for LDT copy/destruction.
20 */
21int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
22void destroy_context(struct mm_struct *mm);
23
24#ifdef CONFIG_X86_32
25# include "mmu_context_32.h"
26#else
27# include "mmu_context_64.h"
28#endif
29
30#define activate_mm(prev, next) \
31do { \
32 paravirt_activate_mm((prev), (next)); \
33 switch_mm((prev), (next), NULL); \
34} while (0);
35
36
37#endif /* ASM_X86__MMU_CONTEXT_H */
diff --git a/include/asm-x86/mmu_context_32.h b/include/asm-x86/mmu_context_32.h
deleted file mode 100644
index cce6f6e4afd6..000000000000
--- a/include/asm-x86/mmu_context_32.h
+++ /dev/null
@@ -1,56 +0,0 @@
1#ifndef ASM_X86__MMU_CONTEXT_32_H
2#define ASM_X86__MMU_CONTEXT_32_H
3
4static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
5{
6#ifdef CONFIG_SMP
7 unsigned cpu = smp_processor_id();
8 if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
9 per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_LAZY;
10#endif
11}
12
13static inline void switch_mm(struct mm_struct *prev,
14 struct mm_struct *next,
15 struct task_struct *tsk)
16{
17 int cpu = smp_processor_id();
18
19 if (likely(prev != next)) {
20 /* stop flush ipis for the previous mm */
21 cpu_clear(cpu, prev->cpu_vm_mask);
22#ifdef CONFIG_SMP
23 per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK;
24 per_cpu(cpu_tlbstate, cpu).active_mm = next;
25#endif
26 cpu_set(cpu, next->cpu_vm_mask);
27
28 /* Re-load page tables */
29 load_cr3(next->pgd);
30
31 /*
32 * load the LDT, if the LDT is different:
33 */
34 if (unlikely(prev->context.ldt != next->context.ldt))
35 load_LDT_nolock(&next->context);
36 }
37#ifdef CONFIG_SMP
38 else {
39 per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK;
40 BUG_ON(per_cpu(cpu_tlbstate, cpu).active_mm != next);
41
42 if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
43 /* We were in lazy tlb mode and leave_mm disabled
44 * tlb flush IPI delivery. We must reload %cr3.
45 */
46 load_cr3(next->pgd);
47 load_LDT_nolock(&next->context);
48 }
49 }
50#endif
51}
52
53#define deactivate_mm(tsk, mm) \
54 asm("movl %0,%%gs": :"r" (0));
55
56#endif /* ASM_X86__MMU_CONTEXT_32_H */
diff --git a/include/asm-x86/mmu_context_64.h b/include/asm-x86/mmu_context_64.h
deleted file mode 100644
index 26758673c828..000000000000
--- a/include/asm-x86/mmu_context_64.h
+++ /dev/null
@@ -1,54 +0,0 @@
1#ifndef ASM_X86__MMU_CONTEXT_64_H
2#define ASM_X86__MMU_CONTEXT_64_H
3
4#include <asm/pda.h>
5
6static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
7{
8#ifdef CONFIG_SMP
9 if (read_pda(mmu_state) == TLBSTATE_OK)
10 write_pda(mmu_state, TLBSTATE_LAZY);
11#endif
12}
13
14static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
15 struct task_struct *tsk)
16{
17 unsigned cpu = smp_processor_id();
18 if (likely(prev != next)) {
19 /* stop flush ipis for the previous mm */
20 cpu_clear(cpu, prev->cpu_vm_mask);
21#ifdef CONFIG_SMP
22 write_pda(mmu_state, TLBSTATE_OK);
23 write_pda(active_mm, next);
24#endif
25 cpu_set(cpu, next->cpu_vm_mask);
26 load_cr3(next->pgd);
27
28 if (unlikely(next->context.ldt != prev->context.ldt))
29 load_LDT_nolock(&next->context);
30 }
31#ifdef CONFIG_SMP
32 else {
33 write_pda(mmu_state, TLBSTATE_OK);
34 if (read_pda(active_mm) != next)
35 BUG();
36 if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
37 /* We were in lazy tlb mode and leave_mm disabled
38 * tlb flush IPI delivery. We must reload CR3
39 * to make sure to use no freed page tables.
40 */
41 load_cr3(next->pgd);
42 load_LDT_nolock(&next->context);
43 }
44 }
45#endif
46}
47
48#define deactivate_mm(tsk, mm) \
49do { \
50 load_gs_index(0); \
51 asm volatile("movl %0,%%fs"::"r"(0)); \
52} while (0)
53
54#endif /* ASM_X86__MMU_CONTEXT_64_H */
diff --git a/include/asm-x86/mmx.h b/include/asm-x86/mmx.h
deleted file mode 100644
index 2e7299bb3653..000000000000
--- a/include/asm-x86/mmx.h
+++ /dev/null
@@ -1,14 +0,0 @@
1#ifndef ASM_X86__MMX_H
2#define ASM_X86__MMX_H
3
4/*
5 * MMX 3Dnow! helper operations
6 */
7
8#include <linux/types.h>
9
10extern void *_mmx_memcpy(void *to, const void *from, size_t size);
11extern void mmx_clear_page(void *page);
12extern void mmx_copy_page(void *to, void *from);
13
14#endif /* ASM_X86__MMX_H */
diff --git a/include/asm-x86/mmzone.h b/include/asm-x86/mmzone.h
deleted file mode 100644
index 64217ea16a36..000000000000
--- a/include/asm-x86/mmzone.h
+++ /dev/null
@@ -1,5 +0,0 @@
1#ifdef CONFIG_X86_32
2# include "mmzone_32.h"
3#else
4# include "mmzone_64.h"
5#endif
diff --git a/include/asm-x86/mmzone_32.h b/include/asm-x86/mmzone_32.h
deleted file mode 100644
index 121b65d61d86..000000000000
--- a/include/asm-x86/mmzone_32.h
+++ /dev/null
@@ -1,134 +0,0 @@
1/*
2 * Written by Pat Gaughen (gone@us.ibm.com) Mar 2002
3 *
4 */
5
6#ifndef ASM_X86__MMZONE_32_H
7#define ASM_X86__MMZONE_32_H
8
9#include <asm/smp.h>
10
11#ifdef CONFIG_NUMA
12extern struct pglist_data *node_data[];
13#define NODE_DATA(nid) (node_data[nid])
14
15#include <asm/numaq.h>
16/* summit or generic arch */
17#include <asm/srat.h>
18
19extern int get_memcfg_numa_flat(void);
20/*
21 * This allows any one NUMA architecture to be compiled
22 * for, and still fall back to the flat function if it
23 * fails.
24 */
25static inline void get_memcfg_numa(void)
26{
27
28 if (get_memcfg_numaq())
29 return;
30 if (get_memcfg_from_srat())
31 return;
32 get_memcfg_numa_flat();
33}
34
35extern int early_pfn_to_nid(unsigned long pfn);
36
37#else /* !CONFIG_NUMA */
38
39#define get_memcfg_numa get_memcfg_numa_flat
40
41#endif /* CONFIG_NUMA */
42
43#ifdef CONFIG_DISCONTIGMEM
44
45/*
46 * generic node memory support, the following assumptions apply:
47 *
48 * 1) memory comes in 64Mb contigious chunks which are either present or not
49 * 2) we will not have more than 64Gb in total
50 *
51 * for now assume that 64Gb is max amount of RAM for whole system
52 * 64Gb / 4096bytes/page = 16777216 pages
53 */
54#define MAX_NR_PAGES 16777216
55#define MAX_ELEMENTS 1024
56#define PAGES_PER_ELEMENT (MAX_NR_PAGES/MAX_ELEMENTS)
57
58extern s8 physnode_map[];
59
60static inline int pfn_to_nid(unsigned long pfn)
61{
62#ifdef CONFIG_NUMA
63 return((int) physnode_map[(pfn) / PAGES_PER_ELEMENT]);
64#else
65 return 0;
66#endif
67}
68
69/*
70 * Following are macros that each numa implmentation must define.
71 */
72
73#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
74#define node_end_pfn(nid) \
75({ \
76 pg_data_t *__pgdat = NODE_DATA(nid); \
77 __pgdat->node_start_pfn + __pgdat->node_spanned_pages; \
78})
79
80static inline int pfn_valid(int pfn)
81{
82 int nid = pfn_to_nid(pfn);
83
84 if (nid >= 0)
85 return (pfn < node_end_pfn(nid));
86 return 0;
87}
88
89#endif /* CONFIG_DISCONTIGMEM */
90
91#ifdef CONFIG_NEED_MULTIPLE_NODES
92
93/*
94 * Following are macros that are specific to this numa platform.
95 */
96#define reserve_bootmem(addr, size, flags) \
97 reserve_bootmem_node(NODE_DATA(0), (addr), (size), (flags))
98#define alloc_bootmem(x) \
99 __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
100#define alloc_bootmem_nopanic(x) \
101 __alloc_bootmem_node_nopanic(NODE_DATA(0), (x), SMP_CACHE_BYTES, \
102 __pa(MAX_DMA_ADDRESS))
103#define alloc_bootmem_low(x) \
104 __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, 0)
105#define alloc_bootmem_pages(x) \
106 __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
107#define alloc_bootmem_pages_nopanic(x) \
108 __alloc_bootmem_node_nopanic(NODE_DATA(0), (x), PAGE_SIZE, \
109 __pa(MAX_DMA_ADDRESS))
110#define alloc_bootmem_low_pages(x) \
111 __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0)
112#define alloc_bootmem_node(pgdat, x) \
113({ \
114 struct pglist_data __maybe_unused \
115 *__alloc_bootmem_node__pgdat = (pgdat); \
116 __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, \
117 __pa(MAX_DMA_ADDRESS)); \
118})
119#define alloc_bootmem_pages_node(pgdat, x) \
120({ \
121 struct pglist_data __maybe_unused \
122 *__alloc_bootmem_node__pgdat = (pgdat); \
123 __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, \
124 __pa(MAX_DMA_ADDRESS)); \
125})
126#define alloc_bootmem_low_pages_node(pgdat, x) \
127({ \
128 struct pglist_data __maybe_unused \
129 *__alloc_bootmem_node__pgdat = (pgdat); \
130 __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0); \
131})
132#endif /* CONFIG_NEED_MULTIPLE_NODES */
133
134#endif /* ASM_X86__MMZONE_32_H */
diff --git a/include/asm-x86/mmzone_64.h b/include/asm-x86/mmzone_64.h
deleted file mode 100644
index 6480f3333b2a..000000000000
--- a/include/asm-x86/mmzone_64.h
+++ /dev/null
@@ -1,51 +0,0 @@
1/* K8 NUMA support */
2/* Copyright 2002,2003 by Andi Kleen, SuSE Labs */
3/* 2.5 Version loosely based on the NUMAQ Code by Pat Gaughen. */
4#ifndef ASM_X86__MMZONE_64_H
5#define ASM_X86__MMZONE_64_H
6
7
8#ifdef CONFIG_NUMA
9
10#include <linux/mmdebug.h>
11
12#include <asm/smp.h>
13
14/* Simple perfect hash to map physical addresses to node numbers */
15struct memnode {
16 int shift;
17 unsigned int mapsize;
18 s16 *map;
19 s16 embedded_map[64 - 8];
20} ____cacheline_aligned; /* total size = 128 bytes */
21extern struct memnode memnode;
22#define memnode_shift memnode.shift
23#define memnodemap memnode.map
24#define memnodemapsize memnode.mapsize
25
26extern struct pglist_data *node_data[];
27
28static inline __attribute__((pure)) int phys_to_nid(unsigned long addr)
29{
30 unsigned nid;
31 VIRTUAL_BUG_ON(!memnodemap);
32 nid = memnodemap[addr >> memnode_shift];
33 VIRTUAL_BUG_ON(nid >= MAX_NUMNODES || !node_data[nid]);
34 return nid;
35}
36
37#define NODE_DATA(nid) (node_data[nid])
38
39#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
40#define node_end_pfn(nid) (NODE_DATA(nid)->node_start_pfn + \
41 NODE_DATA(nid)->node_spanned_pages)
42
43extern int early_pfn_to_nid(unsigned long pfn);
44
45#ifdef CONFIG_NUMA_EMU
46#define FAKE_NODE_MIN_SIZE (64 * 1024 * 1024)
47#define FAKE_NODE_MIN_HASH_MASK (~(FAKE_NODE_MIN_SIZE - 1UL))
48#endif
49
50#endif
51#endif /* ASM_X86__MMZONE_64_H */
diff --git a/include/asm-x86/module.h b/include/asm-x86/module.h
deleted file mode 100644
index 864f2005fc1d..000000000000
--- a/include/asm-x86/module.h
+++ /dev/null
@@ -1,80 +0,0 @@
1#ifndef ASM_X86__MODULE_H
2#define ASM_X86__MODULE_H
3
4/* x86_32/64 are simple */
5struct mod_arch_specific {};
6
7#ifdef CONFIG_X86_32
8# define Elf_Shdr Elf32_Shdr
9# define Elf_Sym Elf32_Sym
10# define Elf_Ehdr Elf32_Ehdr
11#else
12# define Elf_Shdr Elf64_Shdr
13# define Elf_Sym Elf64_Sym
14# define Elf_Ehdr Elf64_Ehdr
15#endif
16
17#ifdef CONFIG_X86_64
18/* X86_64 does not define MODULE_PROC_FAMILY */
19#elif defined CONFIG_M386
20#define MODULE_PROC_FAMILY "386 "
21#elif defined CONFIG_M486
22#define MODULE_PROC_FAMILY "486 "
23#elif defined CONFIG_M586
24#define MODULE_PROC_FAMILY "586 "
25#elif defined CONFIG_M586TSC
26#define MODULE_PROC_FAMILY "586TSC "
27#elif defined CONFIG_M586MMX
28#define MODULE_PROC_FAMILY "586MMX "
29#elif defined CONFIG_MCORE2
30#define MODULE_PROC_FAMILY "CORE2 "
31#elif defined CONFIG_M686
32#define MODULE_PROC_FAMILY "686 "
33#elif defined CONFIG_MPENTIUMII
34#define MODULE_PROC_FAMILY "PENTIUMII "
35#elif defined CONFIG_MPENTIUMIII
36#define MODULE_PROC_FAMILY "PENTIUMIII "
37#elif defined CONFIG_MPENTIUMM
38#define MODULE_PROC_FAMILY "PENTIUMM "
39#elif defined CONFIG_MPENTIUM4
40#define MODULE_PROC_FAMILY "PENTIUM4 "
41#elif defined CONFIG_MK6
42#define MODULE_PROC_FAMILY "K6 "
43#elif defined CONFIG_MK7
44#define MODULE_PROC_FAMILY "K7 "
45#elif defined CONFIG_MK8
46#define MODULE_PROC_FAMILY "K8 "
47#elif defined CONFIG_X86_ELAN
48#define MODULE_PROC_FAMILY "ELAN "
49#elif defined CONFIG_MCRUSOE
50#define MODULE_PROC_FAMILY "CRUSOE "
51#elif defined CONFIG_MEFFICEON
52#define MODULE_PROC_FAMILY "EFFICEON "
53#elif defined CONFIG_MWINCHIPC6
54#define MODULE_PROC_FAMILY "WINCHIPC6 "
55#elif defined CONFIG_MWINCHIP3D
56#define MODULE_PROC_FAMILY "WINCHIP3D "
57#elif defined CONFIG_MCYRIXIII
58#define MODULE_PROC_FAMILY "CYRIXIII "
59#elif defined CONFIG_MVIAC3_2
60#define MODULE_PROC_FAMILY "VIAC3-2 "
61#elif defined CONFIG_MVIAC7
62#define MODULE_PROC_FAMILY "VIAC7 "
63#elif defined CONFIG_MGEODEGX1
64#define MODULE_PROC_FAMILY "GEODEGX1 "
65#elif defined CONFIG_MGEODE_LX
66#define MODULE_PROC_FAMILY "GEODE "
67#else
68#error unknown processor family
69#endif
70
71#ifdef CONFIG_X86_32
72# ifdef CONFIG_4KSTACKS
73# define MODULE_STACKSIZE "4KSTACKS "
74# else
75# define MODULE_STACKSIZE ""
76# endif
77# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE
78#endif
79
80#endif /* ASM_X86__MODULE_H */
diff --git a/include/asm-x86/mpspec.h b/include/asm-x86/mpspec.h
deleted file mode 100644
index be2241a818f1..000000000000
--- a/include/asm-x86/mpspec.h
+++ /dev/null
@@ -1,145 +0,0 @@
1#ifndef ASM_X86__MPSPEC_H
2#define ASM_X86__MPSPEC_H
3
4#include <linux/init.h>
5
6#include <asm/mpspec_def.h>
7
8extern int apic_version[MAX_APICS];
9
10#ifdef CONFIG_X86_32
11#include <mach_mpspec.h>
12
13extern unsigned int def_to_bigsmp;
14extern u8 apicid_2_node[];
15extern int pic_mode;
16
17#ifdef CONFIG_X86_NUMAQ
18extern int mp_bus_id_to_node[MAX_MP_BUSSES];
19extern int mp_bus_id_to_local[MAX_MP_BUSSES];
20extern int quad_local_to_mp_bus_id [NR_CPUS/4][4];
21#endif
22
23#define MAX_APICID 256
24
25#else
26
27#define MAX_MP_BUSSES 256
28/* Each PCI slot may be a combo card with its own bus. 4 IRQ pins per slot. */
29#define MAX_IRQ_SOURCES (MAX_MP_BUSSES * 4)
30
31#endif
32
33extern void early_find_smp_config(void);
34extern void early_get_smp_config(void);
35
36#if defined(CONFIG_MCA) || defined(CONFIG_EISA)
37extern int mp_bus_id_to_type[MAX_MP_BUSSES];
38#endif
39
40extern DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);
41
42extern unsigned int boot_cpu_physical_apicid;
43extern unsigned int max_physical_apicid;
44extern int smp_found_config;
45extern int mpc_default_type;
46extern unsigned long mp_lapic_addr;
47
48extern void find_smp_config(void);
49extern void get_smp_config(void);
50#ifdef CONFIG_X86_MPPARSE
51extern void early_reserve_e820_mpc_new(void);
52#else
53static inline void early_reserve_e820_mpc_new(void) { }
54#endif
55
56void __cpuinit generic_processor_info(int apicid, int version);
57#ifdef CONFIG_ACPI
58extern void mp_register_ioapic(int id, u32 address, u32 gsi_base);
59extern void mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger,
60 u32 gsi);
61extern void mp_config_acpi_legacy_irqs(void);
62extern int mp_register_gsi(u32 gsi, int edge_level, int active_high_low);
63#ifdef CONFIG_X86_IO_APIC
64extern int mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin,
65 u32 gsi, int triggering, int polarity);
66#else
67static inline int
68mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin,
69 u32 gsi, int triggering, int polarity)
70{
71 return 0;
72}
73#endif
74#endif /* CONFIG_ACPI */
75
76#define PHYSID_ARRAY_SIZE BITS_TO_LONGS(MAX_APICS)
77
78struct physid_mask {
79 unsigned long mask[PHYSID_ARRAY_SIZE];
80};
81
82typedef struct physid_mask physid_mask_t;
83
84#define physid_set(physid, map) set_bit(physid, (map).mask)
85#define physid_clear(physid, map) clear_bit(physid, (map).mask)
86#define physid_isset(physid, map) test_bit(physid, (map).mask)
87#define physid_test_and_set(physid, map) \
88 test_and_set_bit(physid, (map).mask)
89
90#define physids_and(dst, src1, src2) \
91 bitmap_and((dst).mask, (src1).mask, (src2).mask, MAX_APICS)
92
93#define physids_or(dst, src1, src2) \
94 bitmap_or((dst).mask, (src1).mask, (src2).mask, MAX_APICS)
95
96#define physids_clear(map) \
97 bitmap_zero((map).mask, MAX_APICS)
98
99#define physids_complement(dst, src) \
100 bitmap_complement((dst).mask, (src).mask, MAX_APICS)
101
102#define physids_empty(map) \
103 bitmap_empty((map).mask, MAX_APICS)
104
105#define physids_equal(map1, map2) \
106 bitmap_equal((map1).mask, (map2).mask, MAX_APICS)
107
108#define physids_weight(map) \
109 bitmap_weight((map).mask, MAX_APICS)
110
111#define physids_shift_right(d, s, n) \
112 bitmap_shift_right((d).mask, (s).mask, n, MAX_APICS)
113
114#define physids_shift_left(d, s, n) \
115 bitmap_shift_left((d).mask, (s).mask, n, MAX_APICS)
116
117#define physids_coerce(map) ((map).mask[0])
118
119#define physids_promote(physids) \
120 ({ \
121 physid_mask_t __physid_mask = PHYSID_MASK_NONE; \
122 __physid_mask.mask[0] = physids; \
123 __physid_mask; \
124 })
125
126/* Note: will create very large stack frames if physid_mask_t is big */
127#define physid_mask_of_physid(physid) \
128 ({ \
129 physid_mask_t __physid_mask = PHYSID_MASK_NONE; \
130 physid_set(physid, __physid_mask); \
131 __physid_mask; \
132 })
133
134static inline void physid_set_mask_of_physid(int physid, physid_mask_t *map)
135{
136 physids_clear(*map);
137 physid_set(physid, *map);
138}
139
140#define PHYSID_MASK_ALL { {[0 ... PHYSID_ARRAY_SIZE-1] = ~0UL} }
141#define PHYSID_MASK_NONE { {[0 ... PHYSID_ARRAY_SIZE-1] = 0UL} }
142
143extern physid_mask_t phys_cpu_present_map;
144
145#endif /* ASM_X86__MPSPEC_H */
diff --git a/include/asm-x86/mpspec_def.h b/include/asm-x86/mpspec_def.h
deleted file mode 100644
index 79166b048012..000000000000
--- a/include/asm-x86/mpspec_def.h
+++ /dev/null
@@ -1,180 +0,0 @@
1#ifndef ASM_X86__MPSPEC_DEF_H
2#define ASM_X86__MPSPEC_DEF_H
3
4/*
5 * Structure definitions for SMP machines following the
6 * Intel Multiprocessing Specification 1.1 and 1.4.
7 */
8
9/*
10 * This tag identifies where the SMP configuration
11 * information is.
12 */
13
14#define SMP_MAGIC_IDENT (('_'<<24) | ('P'<<16) | ('M'<<8) | '_')
15
16#ifdef CONFIG_X86_32
17# define MAX_MPC_ENTRY 1024
18# define MAX_APICS 256
19#else
20# if NR_CPUS <= 255
21# define MAX_APICS 255
22# else
23# define MAX_APICS 32768
24# endif
25#endif
26
27struct intel_mp_floating {
28 char mpf_signature[4]; /* "_MP_" */
29 unsigned int mpf_physptr; /* Configuration table address */
30 unsigned char mpf_length; /* Our length (paragraphs) */
31 unsigned char mpf_specification;/* Specification version */
32 unsigned char mpf_checksum; /* Checksum (makes sum 0) */
33 unsigned char mpf_feature1; /* Standard or configuration ? */
34 unsigned char mpf_feature2; /* Bit7 set for IMCR|PIC */
35 unsigned char mpf_feature3; /* Unused (0) */
36 unsigned char mpf_feature4; /* Unused (0) */
37 unsigned char mpf_feature5; /* Unused (0) */
38};
39
40#define MPC_SIGNATURE "PCMP"
41
42struct mp_config_table {
43 char mpc_signature[4];
44 unsigned short mpc_length; /* Size of table */
45 char mpc_spec; /* 0x01 */
46 char mpc_checksum;
47 char mpc_oem[8];
48 char mpc_productid[12];
49 unsigned int mpc_oemptr; /* 0 if not present */
50 unsigned short mpc_oemsize; /* 0 if not present */
51 unsigned short mpc_oemcount;
52 unsigned int mpc_lapic; /* APIC address */
53 unsigned int reserved;
54};
55
56/* Followed by entries */
57
58#define MP_PROCESSOR 0
59#define MP_BUS 1
60#define MP_IOAPIC 2
61#define MP_INTSRC 3
62#define MP_LINTSRC 4
63/* Used by IBM NUMA-Q to describe node locality */
64#define MP_TRANSLATION 192
65
66#define CPU_ENABLED 1 /* Processor is available */
67#define CPU_BOOTPROCESSOR 2 /* Processor is the BP */
68
69#define CPU_STEPPING_MASK 0x000F
70#define CPU_MODEL_MASK 0x00F0
71#define CPU_FAMILY_MASK 0x0F00
72
73struct mpc_config_processor {
74 unsigned char mpc_type;
75 unsigned char mpc_apicid; /* Local APIC number */
76 unsigned char mpc_apicver; /* Its versions */
77 unsigned char mpc_cpuflag;
78 unsigned int mpc_cpufeature;
79 unsigned int mpc_featureflag; /* CPUID feature value */
80 unsigned int mpc_reserved[2];
81};
82
83struct mpc_config_bus {
84 unsigned char mpc_type;
85 unsigned char mpc_busid;
86 unsigned char mpc_bustype[6];
87};
88
89/* List of Bus Type string values, Intel MP Spec. */
90#define BUSTYPE_EISA "EISA"
91#define BUSTYPE_ISA "ISA"
92#define BUSTYPE_INTERN "INTERN" /* Internal BUS */
93#define BUSTYPE_MCA "MCA"
94#define BUSTYPE_VL "VL" /* Local bus */
95#define BUSTYPE_PCI "PCI"
96#define BUSTYPE_PCMCIA "PCMCIA"
97#define BUSTYPE_CBUS "CBUS"
98#define BUSTYPE_CBUSII "CBUSII"
99#define BUSTYPE_FUTURE "FUTURE"
100#define BUSTYPE_MBI "MBI"
101#define BUSTYPE_MBII "MBII"
102#define BUSTYPE_MPI "MPI"
103#define BUSTYPE_MPSA "MPSA"
104#define BUSTYPE_NUBUS "NUBUS"
105#define BUSTYPE_TC "TC"
106#define BUSTYPE_VME "VME"
107#define BUSTYPE_XPRESS "XPRESS"
108
109#define MPC_APIC_USABLE 0x01
110
111struct mpc_config_ioapic {
112 unsigned char mpc_type;
113 unsigned char mpc_apicid;
114 unsigned char mpc_apicver;
115 unsigned char mpc_flags;
116 unsigned int mpc_apicaddr;
117};
118
119struct mpc_config_intsrc {
120 unsigned char mpc_type;
121 unsigned char mpc_irqtype;
122 unsigned short mpc_irqflag;
123 unsigned char mpc_srcbus;
124 unsigned char mpc_srcbusirq;
125 unsigned char mpc_dstapic;
126 unsigned char mpc_dstirq;
127};
128
129enum mp_irq_source_types {
130 mp_INT = 0,
131 mp_NMI = 1,
132 mp_SMI = 2,
133 mp_ExtINT = 3
134};
135
136#define MP_IRQDIR_DEFAULT 0
137#define MP_IRQDIR_HIGH 1
138#define MP_IRQDIR_LOW 3
139
140#define MP_APIC_ALL 0xFF
141
142struct mpc_config_lintsrc {
143 unsigned char mpc_type;
144 unsigned char mpc_irqtype;
145 unsigned short mpc_irqflag;
146 unsigned char mpc_srcbusid;
147 unsigned char mpc_srcbusirq;
148 unsigned char mpc_destapic;
149 unsigned char mpc_destapiclint;
150};
151
152#define MPC_OEM_SIGNATURE "_OEM"
153
154struct mp_config_oemtable {
155 char oem_signature[4];
156 unsigned short oem_length; /* Size of table */
157 char oem_rev; /* 0x01 */
158 char oem_checksum;
159 char mpc_oem[8];
160};
161
162/*
163 * Default configurations
164 *
165 * 1 2 CPU ISA 82489DX
166 * 2 2 CPU EISA 82489DX neither IRQ 0 timer nor IRQ 13 DMA chaining
167 * 3 2 CPU EISA 82489DX
168 * 4 2 CPU MCA 82489DX
169 * 5 2 CPU ISA+PCI
170 * 6 2 CPU EISA+PCI
171 * 7 2 CPU MCA+PCI
172 */
173
174enum mp_bustype {
175 MP_BUS_ISA = 1,
176 MP_BUS_EISA,
177 MP_BUS_PCI,
178 MP_BUS_MCA,
179};
180#endif /* ASM_X86__MPSPEC_DEF_H */
diff --git a/include/asm-x86/msgbuf.h b/include/asm-x86/msgbuf.h
deleted file mode 100644
index 1b538c907a3d..000000000000
--- a/include/asm-x86/msgbuf.h
+++ /dev/null
@@ -1,39 +0,0 @@
1#ifndef ASM_X86__MSGBUF_H
2#define ASM_X86__MSGBUF_H
3
4/*
5 * The msqid64_ds structure for i386 architecture.
6 * Note extra padding because this structure is passed back and forth
7 * between kernel and user space.
8 *
9 * Pad space on i386 is left for:
10 * - 64-bit time_t to solve y2038 problem
11 * - 2 miscellaneous 32-bit values
12 *
13 * Pad space on x8664 is left for:
14 * - 2 miscellaneous 64-bit values
15 */
16struct msqid64_ds {
17 struct ipc64_perm msg_perm;
18 __kernel_time_t msg_stime; /* last msgsnd time */
19#ifdef __i386__
20 unsigned long __unused1;
21#endif
22 __kernel_time_t msg_rtime; /* last msgrcv time */
23#ifdef __i386__
24 unsigned long __unused2;
25#endif
26 __kernel_time_t msg_ctime; /* last change time */
27#ifdef __i386__
28 unsigned long __unused3;
29#endif
30 unsigned long msg_cbytes; /* current number of bytes on queue */
31 unsigned long msg_qnum; /* number of messages in queue */
32 unsigned long msg_qbytes; /* max number of bytes on queue */
33 __kernel_pid_t msg_lspid; /* pid of last msgsnd */
34 __kernel_pid_t msg_lrpid; /* last receive pid */
35 unsigned long __unused4;
36 unsigned long __unused5;
37};
38
39#endif /* ASM_X86__MSGBUF_H */
diff --git a/include/asm-x86/msidef.h b/include/asm-x86/msidef.h
deleted file mode 100644
index ed9190246876..000000000000
--- a/include/asm-x86/msidef.h
+++ /dev/null
@@ -1,55 +0,0 @@
1#ifndef ASM_X86__MSIDEF_H
2#define ASM_X86__MSIDEF_H
3
4/*
5 * Constants for Intel APIC based MSI messages.
6 */
7
8/*
9 * Shifts for MSI data
10 */
11
12#define MSI_DATA_VECTOR_SHIFT 0
13#define MSI_DATA_VECTOR_MASK 0x000000ff
14#define MSI_DATA_VECTOR(v) (((v) << MSI_DATA_VECTOR_SHIFT) & \
15 MSI_DATA_VECTOR_MASK)
16
17#define MSI_DATA_DELIVERY_MODE_SHIFT 8
18#define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_MODE_SHIFT)
19#define MSI_DATA_DELIVERY_LOWPRI (1 << MSI_DATA_DELIVERY_MODE_SHIFT)
20
21#define MSI_DATA_LEVEL_SHIFT 14
22#define MSI_DATA_LEVEL_DEASSERT (0 << MSI_DATA_LEVEL_SHIFT)
23#define MSI_DATA_LEVEL_ASSERT (1 << MSI_DATA_LEVEL_SHIFT)
24
25#define MSI_DATA_TRIGGER_SHIFT 15
26#define MSI_DATA_TRIGGER_EDGE (0 << MSI_DATA_TRIGGER_SHIFT)
27#define MSI_DATA_TRIGGER_LEVEL (1 << MSI_DATA_TRIGGER_SHIFT)
28
29/*
30 * Shift/mask fields for msi address
31 */
32
33#define MSI_ADDR_BASE_HI 0
34#define MSI_ADDR_BASE_LO 0xfee00000
35
36#define MSI_ADDR_DEST_MODE_SHIFT 2
37#define MSI_ADDR_DEST_MODE_PHYSICAL (0 << MSI_ADDR_DEST_MODE_SHIFT)
38#define MSI_ADDR_DEST_MODE_LOGICAL (1 << MSI_ADDR_DEST_MODE_SHIFT)
39
40#define MSI_ADDR_REDIRECTION_SHIFT 3
41#define MSI_ADDR_REDIRECTION_CPU (0 << MSI_ADDR_REDIRECTION_SHIFT)
42 /* dedicated cpu */
43#define MSI_ADDR_REDIRECTION_LOWPRI (1 << MSI_ADDR_REDIRECTION_SHIFT)
44 /* lowest priority */
45
46#define MSI_ADDR_DEST_ID_SHIFT 12
47#define MSI_ADDR_DEST_ID_MASK 0x00ffff0
48#define MSI_ADDR_DEST_ID(dest) (((dest) << MSI_ADDR_DEST_ID_SHIFT) & \
49 MSI_ADDR_DEST_ID_MASK)
50
51#define MSI_ADDR_IR_EXT_INT (1 << 4)
52#define MSI_ADDR_IR_SHV (1 << 3)
53#define MSI_ADDR_IR_INDEX1(index) ((index & 0x8000) >> 13)
54#define MSI_ADDR_IR_INDEX2(index) ((index & 0x7fff) << 5)
55#endif /* ASM_X86__MSIDEF_H */
diff --git a/include/asm-x86/msr-index.h b/include/asm-x86/msr-index.h
deleted file mode 100644
index dabd10f0bbee..000000000000
--- a/include/asm-x86/msr-index.h
+++ /dev/null
@@ -1,332 +0,0 @@
1#ifndef ASM_X86__MSR_INDEX_H
2#define ASM_X86__MSR_INDEX_H
3
4/* CPU model specific register (MSR) numbers */
5
6/* x86-64 specific MSRs */
7#define MSR_EFER 0xc0000080 /* extended feature register */
8#define MSR_STAR 0xc0000081 /* legacy mode SYSCALL target */
9#define MSR_LSTAR 0xc0000082 /* long mode SYSCALL target */
10#define MSR_CSTAR 0xc0000083 /* compat mode SYSCALL target */
11#define MSR_SYSCALL_MASK 0xc0000084 /* EFLAGS mask for syscall */
12#define MSR_FS_BASE 0xc0000100 /* 64bit FS base */
13#define MSR_GS_BASE 0xc0000101 /* 64bit GS base */
14#define MSR_KERNEL_GS_BASE 0xc0000102 /* SwapGS GS shadow */
15
16/* EFER bits: */
17#define _EFER_SCE 0 /* SYSCALL/SYSRET */
18#define _EFER_LME 8 /* Long mode enable */
19#define _EFER_LMA 10 /* Long mode active (read-only) */
20#define _EFER_NX 11 /* No execute enable */
21
22#define EFER_SCE (1<<_EFER_SCE)
23#define EFER_LME (1<<_EFER_LME)
24#define EFER_LMA (1<<_EFER_LMA)
25#define EFER_NX (1<<_EFER_NX)
26
27/* Intel MSRs. Some also available on other CPUs */
28#define MSR_IA32_PERFCTR0 0x000000c1
29#define MSR_IA32_PERFCTR1 0x000000c2
30#define MSR_FSB_FREQ 0x000000cd
31
32#define MSR_MTRRcap 0x000000fe
33#define MSR_IA32_BBL_CR_CTL 0x00000119
34
35#define MSR_IA32_SYSENTER_CS 0x00000174
36#define MSR_IA32_SYSENTER_ESP 0x00000175
37#define MSR_IA32_SYSENTER_EIP 0x00000176
38
39#define MSR_IA32_MCG_CAP 0x00000179
40#define MSR_IA32_MCG_STATUS 0x0000017a
41#define MSR_IA32_MCG_CTL 0x0000017b
42
43#define MSR_IA32_PEBS_ENABLE 0x000003f1
44#define MSR_IA32_DS_AREA 0x00000600
45#define MSR_IA32_PERF_CAPABILITIES 0x00000345
46
47#define MSR_MTRRfix64K_00000 0x00000250
48#define MSR_MTRRfix16K_80000 0x00000258
49#define MSR_MTRRfix16K_A0000 0x00000259
50#define MSR_MTRRfix4K_C0000 0x00000268
51#define MSR_MTRRfix4K_C8000 0x00000269
52#define MSR_MTRRfix4K_D0000 0x0000026a
53#define MSR_MTRRfix4K_D8000 0x0000026b
54#define MSR_MTRRfix4K_E0000 0x0000026c
55#define MSR_MTRRfix4K_E8000 0x0000026d
56#define MSR_MTRRfix4K_F0000 0x0000026e
57#define MSR_MTRRfix4K_F8000 0x0000026f
58#define MSR_MTRRdefType 0x000002ff
59
60#define MSR_IA32_CR_PAT 0x00000277
61
62#define MSR_IA32_DEBUGCTLMSR 0x000001d9
63#define MSR_IA32_LASTBRANCHFROMIP 0x000001db
64#define MSR_IA32_LASTBRANCHTOIP 0x000001dc
65#define MSR_IA32_LASTINTFROMIP 0x000001dd
66#define MSR_IA32_LASTINTTOIP 0x000001de
67
68/* DEBUGCTLMSR bits (others vary by model): */
69#define _DEBUGCTLMSR_LBR 0 /* last branch recording */
70#define _DEBUGCTLMSR_BTF 1 /* single-step on branches */
71
72#define DEBUGCTLMSR_LBR (1UL << _DEBUGCTLMSR_LBR)
73#define DEBUGCTLMSR_BTF (1UL << _DEBUGCTLMSR_BTF)
74
75#define MSR_IA32_MC0_CTL 0x00000400
76#define MSR_IA32_MC0_STATUS 0x00000401
77#define MSR_IA32_MC0_ADDR 0x00000402
78#define MSR_IA32_MC0_MISC 0x00000403
79
80#define MSR_P6_PERFCTR0 0x000000c1
81#define MSR_P6_PERFCTR1 0x000000c2
82#define MSR_P6_EVNTSEL0 0x00000186
83#define MSR_P6_EVNTSEL1 0x00000187
84
85/* AMD64 MSRs. Not complete. See the architecture manual for a more
86 complete list. */
87
88#define MSR_AMD64_NB_CFG 0xc001001f
89#define MSR_AMD64_IBSFETCHCTL 0xc0011030
90#define MSR_AMD64_IBSFETCHLINAD 0xc0011031
91#define MSR_AMD64_IBSFETCHPHYSAD 0xc0011032
92#define MSR_AMD64_IBSOPCTL 0xc0011033
93#define MSR_AMD64_IBSOPRIP 0xc0011034
94#define MSR_AMD64_IBSOPDATA 0xc0011035
95#define MSR_AMD64_IBSOPDATA2 0xc0011036
96#define MSR_AMD64_IBSOPDATA3 0xc0011037
97#define MSR_AMD64_IBSDCLINAD 0xc0011038
98#define MSR_AMD64_IBSDCPHYSAD 0xc0011039
99#define MSR_AMD64_IBSCTL 0xc001103a
100
101/* Fam 10h MSRs */
102#define MSR_FAM10H_MMIO_CONF_BASE 0xc0010058
103#define FAM10H_MMIO_CONF_ENABLE (1<<0)
104#define FAM10H_MMIO_CONF_BUSRANGE_MASK 0xf
105#define FAM10H_MMIO_CONF_BUSRANGE_SHIFT 2
106#define FAM10H_MMIO_CONF_BASE_MASK 0xfffffff
107#define FAM10H_MMIO_CONF_BASE_SHIFT 20
108
109/* K8 MSRs */
110#define MSR_K8_TOP_MEM1 0xc001001a
111#define MSR_K8_TOP_MEM2 0xc001001d
112#define MSR_K8_SYSCFG 0xc0010010
113#define MSR_K8_HWCR 0xc0010015
114#define MSR_K8_INT_PENDING_MSG 0xc0010055
115/* C1E active bits in int pending message */
116#define K8_INTP_C1E_ACTIVE_MASK 0x18000000
117#define MSR_K8_TSEG_ADDR 0xc0010112
118#define K8_MTRRFIXRANGE_DRAM_ENABLE 0x00040000 /* MtrrFixDramEn bit */
119#define K8_MTRRFIXRANGE_DRAM_MODIFY 0x00080000 /* MtrrFixDramModEn bit */
120#define K8_MTRR_RDMEM_WRMEM_MASK 0x18181818 /* Mask: RdMem|WrMem */
121
122/* K7 MSRs */
123#define MSR_K7_EVNTSEL0 0xc0010000
124#define MSR_K7_PERFCTR0 0xc0010004
125#define MSR_K7_EVNTSEL1 0xc0010001
126#define MSR_K7_PERFCTR1 0xc0010005
127#define MSR_K7_EVNTSEL2 0xc0010002
128#define MSR_K7_PERFCTR2 0xc0010006
129#define MSR_K7_EVNTSEL3 0xc0010003
130#define MSR_K7_PERFCTR3 0xc0010007
131#define MSR_K7_CLK_CTL 0xc001001b
132#define MSR_K7_HWCR 0xc0010015
133#define MSR_K7_FID_VID_CTL 0xc0010041
134#define MSR_K7_FID_VID_STATUS 0xc0010042
135
136/* K6 MSRs */
137#define MSR_K6_EFER 0xc0000080
138#define MSR_K6_STAR 0xc0000081
139#define MSR_K6_WHCR 0xc0000082
140#define MSR_K6_UWCCR 0xc0000085
141#define MSR_K6_EPMR 0xc0000086
142#define MSR_K6_PSOR 0xc0000087
143#define MSR_K6_PFIR 0xc0000088
144
145/* Centaur-Hauls/IDT defined MSRs. */
146#define MSR_IDT_FCR1 0x00000107
147#define MSR_IDT_FCR2 0x00000108
148#define MSR_IDT_FCR3 0x00000109
149#define MSR_IDT_FCR4 0x0000010a
150
151#define MSR_IDT_MCR0 0x00000110
152#define MSR_IDT_MCR1 0x00000111
153#define MSR_IDT_MCR2 0x00000112
154#define MSR_IDT_MCR3 0x00000113
155#define MSR_IDT_MCR4 0x00000114
156#define MSR_IDT_MCR5 0x00000115
157#define MSR_IDT_MCR6 0x00000116
158#define MSR_IDT_MCR7 0x00000117
159#define MSR_IDT_MCR_CTRL 0x00000120
160
161/* VIA Cyrix defined MSRs*/
162#define MSR_VIA_FCR 0x00001107
163#define MSR_VIA_LONGHAUL 0x0000110a
164#define MSR_VIA_RNG 0x0000110b
165#define MSR_VIA_BCR2 0x00001147
166
167/* Transmeta defined MSRs */
168#define MSR_TMTA_LONGRUN_CTRL 0x80868010
169#define MSR_TMTA_LONGRUN_FLAGS 0x80868011
170#define MSR_TMTA_LRTI_READOUT 0x80868018
171#define MSR_TMTA_LRTI_VOLT_MHZ 0x8086801a
172
173/* Intel defined MSRs. */
174#define MSR_IA32_P5_MC_ADDR 0x00000000
175#define MSR_IA32_P5_MC_TYPE 0x00000001
176#define MSR_IA32_TSC 0x00000010
177#define MSR_IA32_PLATFORM_ID 0x00000017
178#define MSR_IA32_EBL_CR_POWERON 0x0000002a
179#define MSR_IA32_FEATURE_CONTROL 0x0000003a
180
181#define FEATURE_CONTROL_LOCKED (1<<0)
182#define FEATURE_CONTROL_VMXON_ENABLED (1<<2)
183
184#define MSR_IA32_APICBASE 0x0000001b
185#define MSR_IA32_APICBASE_BSP (1<<8)
186#define MSR_IA32_APICBASE_ENABLE (1<<11)
187#define MSR_IA32_APICBASE_BASE (0xfffff<<12)
188
189#define MSR_IA32_UCODE_WRITE 0x00000079
190#define MSR_IA32_UCODE_REV 0x0000008b
191
192#define MSR_IA32_PERF_STATUS 0x00000198
193#define MSR_IA32_PERF_CTL 0x00000199
194
195#define MSR_IA32_MPERF 0x000000e7
196#define MSR_IA32_APERF 0x000000e8
197
198#define MSR_IA32_THERM_CONTROL 0x0000019a
199#define MSR_IA32_THERM_INTERRUPT 0x0000019b
200#define MSR_IA32_THERM_STATUS 0x0000019c
201#define MSR_IA32_MISC_ENABLE 0x000001a0
202
203/* Intel Model 6 */
204#define MSR_P6_EVNTSEL0 0x00000186
205#define MSR_P6_EVNTSEL1 0x00000187
206
207/* P4/Xeon+ specific */
208#define MSR_IA32_MCG_EAX 0x00000180
209#define MSR_IA32_MCG_EBX 0x00000181
210#define MSR_IA32_MCG_ECX 0x00000182
211#define MSR_IA32_MCG_EDX 0x00000183
212#define MSR_IA32_MCG_ESI 0x00000184
213#define MSR_IA32_MCG_EDI 0x00000185
214#define MSR_IA32_MCG_EBP 0x00000186
215#define MSR_IA32_MCG_ESP 0x00000187
216#define MSR_IA32_MCG_EFLAGS 0x00000188
217#define MSR_IA32_MCG_EIP 0x00000189
218#define MSR_IA32_MCG_RESERVED 0x0000018a
219
220/* Pentium IV performance counter MSRs */
221#define MSR_P4_BPU_PERFCTR0 0x00000300
222#define MSR_P4_BPU_PERFCTR1 0x00000301
223#define MSR_P4_BPU_PERFCTR2 0x00000302
224#define MSR_P4_BPU_PERFCTR3 0x00000303
225#define MSR_P4_MS_PERFCTR0 0x00000304
226#define MSR_P4_MS_PERFCTR1 0x00000305
227#define MSR_P4_MS_PERFCTR2 0x00000306
228#define MSR_P4_MS_PERFCTR3 0x00000307
229#define MSR_P4_FLAME_PERFCTR0 0x00000308
230#define MSR_P4_FLAME_PERFCTR1 0x00000309
231#define MSR_P4_FLAME_PERFCTR2 0x0000030a
232#define MSR_P4_FLAME_PERFCTR3 0x0000030b
233#define MSR_P4_IQ_PERFCTR0 0x0000030c
234#define MSR_P4_IQ_PERFCTR1 0x0000030d
235#define MSR_P4_IQ_PERFCTR2 0x0000030e
236#define MSR_P4_IQ_PERFCTR3 0x0000030f
237#define MSR_P4_IQ_PERFCTR4 0x00000310
238#define MSR_P4_IQ_PERFCTR5 0x00000311
239#define MSR_P4_BPU_CCCR0 0x00000360
240#define MSR_P4_BPU_CCCR1 0x00000361
241#define MSR_P4_BPU_CCCR2 0x00000362
242#define MSR_P4_BPU_CCCR3 0x00000363
243#define MSR_P4_MS_CCCR0 0x00000364
244#define MSR_P4_MS_CCCR1 0x00000365
245#define MSR_P4_MS_CCCR2 0x00000366
246#define MSR_P4_MS_CCCR3 0x00000367
247#define MSR_P4_FLAME_CCCR0 0x00000368
248#define MSR_P4_FLAME_CCCR1 0x00000369
249#define MSR_P4_FLAME_CCCR2 0x0000036a
250#define MSR_P4_FLAME_CCCR3 0x0000036b
251#define MSR_P4_IQ_CCCR0 0x0000036c
252#define MSR_P4_IQ_CCCR1 0x0000036d
253#define MSR_P4_IQ_CCCR2 0x0000036e
254#define MSR_P4_IQ_CCCR3 0x0000036f
255#define MSR_P4_IQ_CCCR4 0x00000370
256#define MSR_P4_IQ_CCCR5 0x00000371
257#define MSR_P4_ALF_ESCR0 0x000003ca
258#define MSR_P4_ALF_ESCR1 0x000003cb
259#define MSR_P4_BPU_ESCR0 0x000003b2
260#define MSR_P4_BPU_ESCR1 0x000003b3
261#define MSR_P4_BSU_ESCR0 0x000003a0
262#define MSR_P4_BSU_ESCR1 0x000003a1
263#define MSR_P4_CRU_ESCR0 0x000003b8
264#define MSR_P4_CRU_ESCR1 0x000003b9
265#define MSR_P4_CRU_ESCR2 0x000003cc
266#define MSR_P4_CRU_ESCR3 0x000003cd
267#define MSR_P4_CRU_ESCR4 0x000003e0
268#define MSR_P4_CRU_ESCR5 0x000003e1
269#define MSR_P4_DAC_ESCR0 0x000003a8
270#define MSR_P4_DAC_ESCR1 0x000003a9
271#define MSR_P4_FIRM_ESCR0 0x000003a4
272#define MSR_P4_FIRM_ESCR1 0x000003a5
273#define MSR_P4_FLAME_ESCR0 0x000003a6
274#define MSR_P4_FLAME_ESCR1 0x000003a7
275#define MSR_P4_FSB_ESCR0 0x000003a2
276#define MSR_P4_FSB_ESCR1 0x000003a3
277#define MSR_P4_IQ_ESCR0 0x000003ba
278#define MSR_P4_IQ_ESCR1 0x000003bb
279#define MSR_P4_IS_ESCR0 0x000003b4
280#define MSR_P4_IS_ESCR1 0x000003b5
281#define MSR_P4_ITLB_ESCR0 0x000003b6
282#define MSR_P4_ITLB_ESCR1 0x000003b7
283#define MSR_P4_IX_ESCR0 0x000003c8
284#define MSR_P4_IX_ESCR1 0x000003c9
285#define MSR_P4_MOB_ESCR0 0x000003aa
286#define MSR_P4_MOB_ESCR1 0x000003ab
287#define MSR_P4_MS_ESCR0 0x000003c0
288#define MSR_P4_MS_ESCR1 0x000003c1
289#define MSR_P4_PMH_ESCR0 0x000003ac
290#define MSR_P4_PMH_ESCR1 0x000003ad
291#define MSR_P4_RAT_ESCR0 0x000003bc
292#define MSR_P4_RAT_ESCR1 0x000003bd
293#define MSR_P4_SAAT_ESCR0 0x000003ae
294#define MSR_P4_SAAT_ESCR1 0x000003af
295#define MSR_P4_SSU_ESCR0 0x000003be
296#define MSR_P4_SSU_ESCR1 0x000003bf /* guess: not in manual */
297
298#define MSR_P4_TBPU_ESCR0 0x000003c2
299#define MSR_P4_TBPU_ESCR1 0x000003c3
300#define MSR_P4_TC_ESCR0 0x000003c4
301#define MSR_P4_TC_ESCR1 0x000003c5
302#define MSR_P4_U2L_ESCR0 0x000003b0
303#define MSR_P4_U2L_ESCR1 0x000003b1
304
305/* Intel Core-based CPU performance counters */
306#define MSR_CORE_PERF_FIXED_CTR0 0x00000309
307#define MSR_CORE_PERF_FIXED_CTR1 0x0000030a
308#define MSR_CORE_PERF_FIXED_CTR2 0x0000030b
309#define MSR_CORE_PERF_FIXED_CTR_CTRL 0x0000038d
310#define MSR_CORE_PERF_GLOBAL_STATUS 0x0000038e
311#define MSR_CORE_PERF_GLOBAL_CTRL 0x0000038f
312#define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x00000390
313
314/* Geode defined MSRs */
315#define MSR_GEODE_BUSCONT_CONF0 0x00001900
316
317/* Intel VT MSRs */
318#define MSR_IA32_VMX_BASIC 0x00000480
319#define MSR_IA32_VMX_PINBASED_CTLS 0x00000481
320#define MSR_IA32_VMX_PROCBASED_CTLS 0x00000482
321#define MSR_IA32_VMX_EXIT_CTLS 0x00000483
322#define MSR_IA32_VMX_ENTRY_CTLS 0x00000484
323#define MSR_IA32_VMX_MISC 0x00000485
324#define MSR_IA32_VMX_CR0_FIXED0 0x00000486
325#define MSR_IA32_VMX_CR0_FIXED1 0x00000487
326#define MSR_IA32_VMX_CR4_FIXED0 0x00000488
327#define MSR_IA32_VMX_CR4_FIXED1 0x00000489
328#define MSR_IA32_VMX_VMCS_ENUM 0x0000048a
329#define MSR_IA32_VMX_PROCBASED_CTLS2 0x0000048b
330#define MSR_IA32_VMX_EPT_VPID_CAP 0x0000048c
331
332#endif /* ASM_X86__MSR_INDEX_H */
diff --git a/include/asm-x86/msr.h b/include/asm-x86/msr.h
deleted file mode 100644
index 530af1f6389e..000000000000
--- a/include/asm-x86/msr.h
+++ /dev/null
@@ -1,247 +0,0 @@
1#ifndef ASM_X86__MSR_H
2#define ASM_X86__MSR_H
3
4#include <asm/msr-index.h>
5
6#ifndef __ASSEMBLY__
7# include <linux/types.h>
8#endif
9
10#ifdef __KERNEL__
11#ifndef __ASSEMBLY__
12
13#include <asm/asm.h>
14#include <asm/errno.h>
15
16static inline unsigned long long native_read_tscp(unsigned int *aux)
17{
18 unsigned long low, high;
19 asm volatile(".byte 0x0f,0x01,0xf9"
20 : "=a" (low), "=d" (high), "=c" (*aux));
21 return low | ((u64)high << 32);
22}
23
24/*
25 * i386 calling convention returns 64-bit value in edx:eax, while
26 * x86_64 returns at rax. Also, the "A" constraint does not really
27 * mean rdx:rax in x86_64, so we need specialized behaviour for each
28 * architecture
29 */
30#ifdef CONFIG_X86_64
31#define DECLARE_ARGS(val, low, high) unsigned low, high
32#define EAX_EDX_VAL(val, low, high) ((low) | ((u64)(high) << 32))
33#define EAX_EDX_ARGS(val, low, high) "a" (low), "d" (high)
34#define EAX_EDX_RET(val, low, high) "=a" (low), "=d" (high)
35#else
36#define DECLARE_ARGS(val, low, high) unsigned long long val
37#define EAX_EDX_VAL(val, low, high) (val)
38#define EAX_EDX_ARGS(val, low, high) "A" (val)
39#define EAX_EDX_RET(val, low, high) "=A" (val)
40#endif
41
42static inline unsigned long long native_read_msr(unsigned int msr)
43{
44 DECLARE_ARGS(val, low, high);
45
46 asm volatile("rdmsr" : EAX_EDX_RET(val, low, high) : "c" (msr));
47 return EAX_EDX_VAL(val, low, high);
48}
49
50static inline unsigned long long native_read_msr_safe(unsigned int msr,
51 int *err)
52{
53 DECLARE_ARGS(val, low, high);
54
55 asm volatile("2: rdmsr ; xor %[err],%[err]\n"
56 "1:\n\t"
57 ".section .fixup,\"ax\"\n\t"
58 "3: mov %[fault],%[err] ; jmp 1b\n\t"
59 ".previous\n\t"
60 _ASM_EXTABLE(2b, 3b)
61 : [err] "=r" (*err), EAX_EDX_RET(val, low, high)
62 : "c" (msr), [fault] "i" (-EFAULT));
63 return EAX_EDX_VAL(val, low, high);
64}
65
66static inline unsigned long long native_read_msr_amd_safe(unsigned int msr,
67 int *err)
68{
69 DECLARE_ARGS(val, low, high);
70
71 asm volatile("2: rdmsr ; xor %0,%0\n"
72 "1:\n\t"
73 ".section .fixup,\"ax\"\n\t"
74 "3: mov %3,%0 ; jmp 1b\n\t"
75 ".previous\n\t"
76 _ASM_EXTABLE(2b, 3b)
77 : "=r" (*err), EAX_EDX_RET(val, low, high)
78 : "c" (msr), "D" (0x9c5a203a), "i" (-EFAULT));
79 return EAX_EDX_VAL(val, low, high);
80}
81
82static inline void native_write_msr(unsigned int msr,
83 unsigned low, unsigned high)
84{
85 asm volatile("wrmsr" : : "c" (msr), "a"(low), "d" (high) : "memory");
86}
87
88static inline int native_write_msr_safe(unsigned int msr,
89 unsigned low, unsigned high)
90{
91 int err;
92 asm volatile("2: wrmsr ; xor %[err],%[err]\n"
93 "1:\n\t"
94 ".section .fixup,\"ax\"\n\t"
95 "3: mov %[fault],%[err] ; jmp 1b\n\t"
96 ".previous\n\t"
97 _ASM_EXTABLE(2b, 3b)
98 : [err] "=a" (err)
99 : "c" (msr), "0" (low), "d" (high),
100 [fault] "i" (-EFAULT)
101 : "memory");
102 return err;
103}
104
105extern unsigned long long native_read_tsc(void);
106
107static __always_inline unsigned long long __native_read_tsc(void)
108{
109 DECLARE_ARGS(val, low, high);
110
111 rdtsc_barrier();
112 asm volatile("rdtsc" : EAX_EDX_RET(val, low, high));
113 rdtsc_barrier();
114
115 return EAX_EDX_VAL(val, low, high);
116}
117
118static inline unsigned long long native_read_pmc(int counter)
119{
120 DECLARE_ARGS(val, low, high);
121
122 asm volatile("rdpmc" : EAX_EDX_RET(val, low, high) : "c" (counter));
123 return EAX_EDX_VAL(val, low, high);
124}
125
126#ifdef CONFIG_PARAVIRT
127#include <asm/paravirt.h>
128#else
129#include <linux/errno.h>
130/*
131 * Access to machine-specific registers (available on 586 and better only)
132 * Note: the rd* operations modify the parameters directly (without using
133 * pointer indirection), this allows gcc to optimize better
134 */
135
136#define rdmsr(msr, val1, val2) \
137do { \
138 u64 __val = native_read_msr((msr)); \
139 (val1) = (u32)__val; \
140 (val2) = (u32)(__val >> 32); \
141} while (0)
142
143static inline void wrmsr(unsigned msr, unsigned low, unsigned high)
144{
145 native_write_msr(msr, low, high);
146}
147
148#define rdmsrl(msr, val) \
149 ((val) = native_read_msr((msr)))
150
151#define wrmsrl(msr, val) \
152 native_write_msr((msr), (u32)((u64)(val)), (u32)((u64)(val) >> 32))
153
154/* wrmsr with exception handling */
155static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high)
156{
157 return native_write_msr_safe(msr, low, high);
158}
159
160/* rdmsr with exception handling */
161#define rdmsr_safe(msr, p1, p2) \
162({ \
163 int __err; \
164 u64 __val = native_read_msr_safe((msr), &__err); \
165 (*p1) = (u32)__val; \
166 (*p2) = (u32)(__val >> 32); \
167 __err; \
168})
169
170static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
171{
172 int err;
173
174 *p = native_read_msr_safe(msr, &err);
175 return err;
176}
177static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
178{
179 int err;
180
181 *p = native_read_msr_amd_safe(msr, &err);
182 return err;
183}
184
185#define rdtscl(low) \
186 ((low) = (u32)native_read_tsc())
187
188#define rdtscll(val) \
189 ((val) = native_read_tsc())
190
191#define rdpmc(counter, low, high) \
192do { \
193 u64 _l = native_read_pmc((counter)); \
194 (low) = (u32)_l; \
195 (high) = (u32)(_l >> 32); \
196} while (0)
197
198#define rdtscp(low, high, aux) \
199do { \
200 unsigned long long _val = native_read_tscp(&(aux)); \
201 (low) = (u32)_val; \
202 (high) = (u32)(_val >> 32); \
203} while (0)
204
205#define rdtscpll(val, aux) (val) = native_read_tscp(&(aux))
206
207#endif /* !CONFIG_PARAVIRT */
208
209
210#define checking_wrmsrl(msr, val) wrmsr_safe((msr), (u32)(val), \
211 (u32)((val) >> 32))
212
213#define write_tsc(val1, val2) wrmsr(0x10, (val1), (val2))
214
215#define write_rdtscp_aux(val) wrmsr(0xc0000103, (val), 0)
216
217#ifdef CONFIG_SMP
218int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
219int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
220int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
221int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
222#else /* CONFIG_SMP */
223static inline int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
224{
225 rdmsr(msr_no, *l, *h);
226 return 0;
227}
228static inline int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
229{
230 wrmsr(msr_no, l, h);
231 return 0;
232}
233static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no,
234 u32 *l, u32 *h)
235{
236 return rdmsr_safe(msr_no, l, h);
237}
238static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
239{
240 return wrmsr_safe(msr_no, l, h);
241}
242#endif /* CONFIG_SMP */
243#endif /* __ASSEMBLY__ */
244#endif /* __KERNEL__ */
245
246
247#endif /* ASM_X86__MSR_H */
diff --git a/include/asm-x86/mtrr.h b/include/asm-x86/mtrr.h
deleted file mode 100644
index 23a7f83da953..000000000000
--- a/include/asm-x86/mtrr.h
+++ /dev/null
@@ -1,173 +0,0 @@
1/* Generic MTRR (Memory Type Range Register) ioctls.
2
3 Copyright (C) 1997-1999 Richard Gooch
4
5 This library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Library General Public
7 License as published by the Free Software Foundation; either
8 version 2 of the License, or (at your option) any later version.
9
10 This library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Library General Public License for more details.
14
15 You should have received a copy of the GNU Library General Public
16 License along with this library; if not, write to the Free
17 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18
19 Richard Gooch may be reached by email at rgooch@atnf.csiro.au
20 The postal address is:
21 Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia.
22*/
23#ifndef ASM_X86__MTRR_H
24#define ASM_X86__MTRR_H
25
26#include <linux/ioctl.h>
27#include <linux/errno.h>
28
29#define MTRR_IOCTL_BASE 'M'
30
31struct mtrr_sentry {
32 unsigned long base; /* Base address */
33 unsigned int size; /* Size of region */
34 unsigned int type; /* Type of region */
35};
36
37/* Warning: this structure has a different order from i386
38 on x86-64. The 32bit emulation code takes care of that.
39 But you need to use this for 64bit, otherwise your X server
40 will break. */
41
42#ifdef __i386__
43struct mtrr_gentry {
44 unsigned int regnum; /* Register number */
45 unsigned long base; /* Base address */
46 unsigned int size; /* Size of region */
47 unsigned int type; /* Type of region */
48};
49
50#else /* __i386__ */
51
52struct mtrr_gentry {
53 unsigned long base; /* Base address */
54 unsigned int size; /* Size of region */
55 unsigned int regnum; /* Register number */
56 unsigned int type; /* Type of region */
57};
58#endif /* !__i386__ */
59
60/* These are the various ioctls */
61#define MTRRIOC_ADD_ENTRY _IOW(MTRR_IOCTL_BASE, 0, struct mtrr_sentry)
62#define MTRRIOC_SET_ENTRY _IOW(MTRR_IOCTL_BASE, 1, struct mtrr_sentry)
63#define MTRRIOC_DEL_ENTRY _IOW(MTRR_IOCTL_BASE, 2, struct mtrr_sentry)
64#define MTRRIOC_GET_ENTRY _IOWR(MTRR_IOCTL_BASE, 3, struct mtrr_gentry)
65#define MTRRIOC_KILL_ENTRY _IOW(MTRR_IOCTL_BASE, 4, struct mtrr_sentry)
66#define MTRRIOC_ADD_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 5, struct mtrr_sentry)
67#define MTRRIOC_SET_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 6, struct mtrr_sentry)
68#define MTRRIOC_DEL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 7, struct mtrr_sentry)
69#define MTRRIOC_GET_PAGE_ENTRY _IOWR(MTRR_IOCTL_BASE, 8, struct mtrr_gentry)
70#define MTRRIOC_KILL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 9, struct mtrr_sentry)
71
72/* These are the region types */
73#define MTRR_TYPE_UNCACHABLE 0
74#define MTRR_TYPE_WRCOMB 1
75/*#define MTRR_TYPE_ 2*/
76/*#define MTRR_TYPE_ 3*/
77#define MTRR_TYPE_WRTHROUGH 4
78#define MTRR_TYPE_WRPROT 5
79#define MTRR_TYPE_WRBACK 6
80#define MTRR_NUM_TYPES 7
81
82#ifdef __KERNEL__
83
84/* The following functions are for use by other drivers */
85# ifdef CONFIG_MTRR
86extern u8 mtrr_type_lookup(u64 addr, u64 end);
87extern void mtrr_save_fixed_ranges(void *);
88extern void mtrr_save_state(void);
89extern int mtrr_add(unsigned long base, unsigned long size,
90 unsigned int type, bool increment);
91extern int mtrr_add_page(unsigned long base, unsigned long size,
92 unsigned int type, bool increment);
93extern int mtrr_del(int reg, unsigned long base, unsigned long size);
94extern int mtrr_del_page(int reg, unsigned long base, unsigned long size);
95extern void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi);
96extern void mtrr_ap_init(void);
97extern void mtrr_bp_init(void);
98extern int mtrr_trim_uncached_memory(unsigned long end_pfn);
99extern int amd_special_default_mtrr(void);
100# else
101static inline u8 mtrr_type_lookup(u64 addr, u64 end)
102{
103 /*
104 * Return no-MTRRs:
105 */
106 return 0xff;
107}
108#define mtrr_save_fixed_ranges(arg) do {} while (0)
109#define mtrr_save_state() do {} while (0)
110static inline int mtrr_add(unsigned long base, unsigned long size,
111 unsigned int type, bool increment)
112{
113 return -ENODEV;
114}
115static inline int mtrr_add_page(unsigned long base, unsigned long size,
116 unsigned int type, bool increment)
117{
118 return -ENODEV;
119}
120static inline int mtrr_del(int reg, unsigned long base, unsigned long size)
121{
122 return -ENODEV;
123}
124static inline int mtrr_del_page(int reg, unsigned long base, unsigned long size)
125{
126 return -ENODEV;
127}
128static inline int mtrr_trim_uncached_memory(unsigned long end_pfn)
129{
130 return 0;
131}
132static inline void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi)
133{
134}
135
136#define mtrr_ap_init() do {} while (0)
137#define mtrr_bp_init() do {} while (0)
138# endif
139
140#ifdef CONFIG_COMPAT
141#include <linux/compat.h>
142
143struct mtrr_sentry32 {
144 compat_ulong_t base; /* Base address */
145 compat_uint_t size; /* Size of region */
146 compat_uint_t type; /* Type of region */
147};
148
149struct mtrr_gentry32 {
150 compat_ulong_t regnum; /* Register number */
151 compat_uint_t base; /* Base address */
152 compat_uint_t size; /* Size of region */
153 compat_uint_t type; /* Type of region */
154};
155
156#define MTRR_IOCTL_BASE 'M'
157
158#define MTRRIOC32_ADD_ENTRY _IOW(MTRR_IOCTL_BASE, 0, struct mtrr_sentry32)
159#define MTRRIOC32_SET_ENTRY _IOW(MTRR_IOCTL_BASE, 1, struct mtrr_sentry32)
160#define MTRRIOC32_DEL_ENTRY _IOW(MTRR_IOCTL_BASE, 2, struct mtrr_sentry32)
161#define MTRRIOC32_GET_ENTRY _IOWR(MTRR_IOCTL_BASE, 3, struct mtrr_gentry32)
162#define MTRRIOC32_KILL_ENTRY _IOW(MTRR_IOCTL_BASE, 4, struct mtrr_sentry32)
163#define MTRRIOC32_ADD_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 5, struct mtrr_sentry32)
164#define MTRRIOC32_SET_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 6, struct mtrr_sentry32)
165#define MTRRIOC32_DEL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 7, struct mtrr_sentry32)
166#define MTRRIOC32_GET_PAGE_ENTRY _IOWR(MTRR_IOCTL_BASE, 8, struct mtrr_gentry32)
167#define MTRRIOC32_KILL_PAGE_ENTRY \
168 _IOW(MTRR_IOCTL_BASE, 9, struct mtrr_sentry32)
169#endif /* CONFIG_COMPAT */
170
171#endif /* __KERNEL__ */
172
173#endif /* ASM_X86__MTRR_H */
diff --git a/include/asm-x86/mutex.h b/include/asm-x86/mutex.h
deleted file mode 100644
index a731b9c573a6..000000000000
--- a/include/asm-x86/mutex.h
+++ /dev/null
@@ -1,5 +0,0 @@
1#ifdef CONFIG_X86_32
2# include "mutex_32.h"
3#else
4# include "mutex_64.h"
5#endif
diff --git a/include/asm-x86/mutex_32.h b/include/asm-x86/mutex_32.h
deleted file mode 100644
index 25c16d8ba3c7..000000000000
--- a/include/asm-x86/mutex_32.h
+++ /dev/null
@@ -1,125 +0,0 @@
1/*
2 * Assembly implementation of the mutex fastpath, based on atomic
3 * decrement/increment.
4 *
5 * started by Ingo Molnar:
6 *
7 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
8 */
9#ifndef ASM_X86__MUTEX_32_H
10#define ASM_X86__MUTEX_32_H
11
12#include <asm/alternative.h>
13
14/**
15 * __mutex_fastpath_lock - try to take the lock by moving the count
16 * from 1 to a 0 value
17 * @count: pointer of type atomic_t
18 * @fn: function to call if the original value was not 1
19 *
20 * Change the count from 1 to a value lower than 1, and call <fn> if it
21 * wasn't 1 originally. This function MUST leave the value lower than 1
22 * even when the "1" assertion wasn't true.
23 */
24#define __mutex_fastpath_lock(count, fail_fn) \
25do { \
26 unsigned int dummy; \
27 \
28 typecheck(atomic_t *, count); \
29 typecheck_fn(void (*)(atomic_t *), fail_fn); \
30 \
31 asm volatile(LOCK_PREFIX " decl (%%eax)\n" \
32 " jns 1f \n" \
33 " call " #fail_fn "\n" \
34 "1:\n" \
35 : "=a" (dummy) \
36 : "a" (count) \
37 : "memory", "ecx", "edx"); \
38} while (0)
39
40
41/**
42 * __mutex_fastpath_lock_retval - try to take the lock by moving the count
43 * from 1 to a 0 value
44 * @count: pointer of type atomic_t
45 * @fail_fn: function to call if the original value was not 1
46 *
47 * Change the count from 1 to a value lower than 1, and call <fail_fn> if it
48 * wasn't 1 originally. This function returns 0 if the fastpath succeeds,
49 * or anything the slow path function returns
50 */
51static inline int __mutex_fastpath_lock_retval(atomic_t *count,
52 int (*fail_fn)(atomic_t *))
53{
54 if (unlikely(atomic_dec_return(count) < 0))
55 return fail_fn(count);
56 else
57 return 0;
58}
59
60/**
61 * __mutex_fastpath_unlock - try to promote the mutex from 0 to 1
62 * @count: pointer of type atomic_t
63 * @fail_fn: function to call if the original value was not 0
64 *
65 * try to promote the mutex from 0 to 1. if it wasn't 0, call <fail_fn>.
66 * In the failure case, this function is allowed to either set the value
67 * to 1, or to set it to a value lower than 1.
68 *
69 * If the implementation sets it to a value of lower than 1, the
70 * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
71 * to return 0 otherwise.
72 */
73#define __mutex_fastpath_unlock(count, fail_fn) \
74do { \
75 unsigned int dummy; \
76 \
77 typecheck(atomic_t *, count); \
78 typecheck_fn(void (*)(atomic_t *), fail_fn); \
79 \
80 asm volatile(LOCK_PREFIX " incl (%%eax)\n" \
81 " jg 1f\n" \
82 " call " #fail_fn "\n" \
83 "1:\n" \
84 : "=a" (dummy) \
85 : "a" (count) \
86 : "memory", "ecx", "edx"); \
87} while (0)
88
89#define __mutex_slowpath_needs_to_unlock() 1
90
91/**
92 * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
93 *
94 * @count: pointer of type atomic_t
95 * @fail_fn: fallback function
96 *
97 * Change the count from 1 to a value lower than 1, and return 0 (failure)
98 * if it wasn't 1 originally, or return 1 (success) otherwise. This function
99 * MUST leave the value lower than 1 even when the "1" assertion wasn't true.
100 * Additionally, if the value was < 0 originally, this function must not leave
101 * it to 0 on failure.
102 */
103static inline int __mutex_fastpath_trylock(atomic_t *count,
104 int (*fail_fn)(atomic_t *))
105{
106 /*
107 * We have two variants here. The cmpxchg based one is the best one
108 * because it never induce a false contention state. It is included
109 * here because architectures using the inc/dec algorithms over the
110 * xchg ones are much more likely to support cmpxchg natively.
111 *
112 * If not we fall back to the spinlock based variant - that is
113 * just as efficient (and simpler) as a 'destructive' probing of
114 * the mutex state would be.
115 */
116#ifdef __HAVE_ARCH_CMPXCHG
117 if (likely(atomic_cmpxchg(count, 1, 0) == 1))
118 return 1;
119 return 0;
120#else
121 return fail_fn(count);
122#endif
123}
124
125#endif /* ASM_X86__MUTEX_32_H */
diff --git a/include/asm-x86/mutex_64.h b/include/asm-x86/mutex_64.h
deleted file mode 100644
index 918ba21ab9d9..000000000000
--- a/include/asm-x86/mutex_64.h
+++ /dev/null
@@ -1,100 +0,0 @@
1/*
2 * Assembly implementation of the mutex fastpath, based on atomic
3 * decrement/increment.
4 *
5 * started by Ingo Molnar:
6 *
7 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
8 */
9#ifndef ASM_X86__MUTEX_64_H
10#define ASM_X86__MUTEX_64_H
11
12/**
13 * __mutex_fastpath_lock - decrement and call function if negative
14 * @v: pointer of type atomic_t
15 * @fail_fn: function to call if the result is negative
16 *
17 * Atomically decrements @v and calls <fail_fn> if the result is negative.
18 */
19#define __mutex_fastpath_lock(v, fail_fn) \
20do { \
21 unsigned long dummy; \
22 \
23 typecheck(atomic_t *, v); \
24 typecheck_fn(void (*)(atomic_t *), fail_fn); \
25 \
26 asm volatile(LOCK_PREFIX " decl (%%rdi)\n" \
27 " jns 1f \n" \
28 " call " #fail_fn "\n" \
29 "1:" \
30 : "=D" (dummy) \
31 : "D" (v) \
32 : "rax", "rsi", "rdx", "rcx", \
33 "r8", "r9", "r10", "r11", "memory"); \
34} while (0)
35
36/**
37 * __mutex_fastpath_lock_retval - try to take the lock by moving the count
38 * from 1 to a 0 value
39 * @count: pointer of type atomic_t
40 * @fail_fn: function to call if the original value was not 1
41 *
42 * Change the count from 1 to a value lower than 1, and call <fail_fn> if
43 * it wasn't 1 originally. This function returns 0 if the fastpath succeeds,
44 * or anything the slow path function returns
45 */
46static inline int __mutex_fastpath_lock_retval(atomic_t *count,
47 int (*fail_fn)(atomic_t *))
48{
49 if (unlikely(atomic_dec_return(count) < 0))
50 return fail_fn(count);
51 else
52 return 0;
53}
54
55/**
56 * __mutex_fastpath_unlock - increment and call function if nonpositive
57 * @v: pointer of type atomic_t
58 * @fail_fn: function to call if the result is nonpositive
59 *
60 * Atomically increments @v and calls <fail_fn> if the result is nonpositive.
61 */
62#define __mutex_fastpath_unlock(v, fail_fn) \
63do { \
64 unsigned long dummy; \
65 \
66 typecheck(atomic_t *, v); \
67 typecheck_fn(void (*)(atomic_t *), fail_fn); \
68 \
69 asm volatile(LOCK_PREFIX " incl (%%rdi)\n" \
70 " jg 1f\n" \
71 " call " #fail_fn "\n" \
72 "1:" \
73 : "=D" (dummy) \
74 : "D" (v) \
75 : "rax", "rsi", "rdx", "rcx", \
76 "r8", "r9", "r10", "r11", "memory"); \
77} while (0)
78
79#define __mutex_slowpath_needs_to_unlock() 1
80
81/**
82 * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
83 *
84 * @count: pointer of type atomic_t
85 * @fail_fn: fallback function
86 *
87 * Change the count from 1 to 0 and return 1 (success), or return 0 (failure)
88 * if it wasn't 1 originally. [the fallback function is never used on
89 * x86_64, because all x86_64 CPUs have a CMPXCHG instruction.]
90 */
91static inline int __mutex_fastpath_trylock(atomic_t *count,
92 int (*fail_fn)(atomic_t *))
93{
94 if (likely(atomic_cmpxchg(count, 1, 0) == 1))
95 return 1;
96 else
97 return 0;
98}
99
100#endif /* ASM_X86__MUTEX_64_H */
diff --git a/include/asm-x86/nmi.h b/include/asm-x86/nmi.h
deleted file mode 100644
index a53f829a97c5..000000000000
--- a/include/asm-x86/nmi.h
+++ /dev/null
@@ -1,81 +0,0 @@
1#ifndef ASM_X86__NMI_H
2#define ASM_X86__NMI_H
3
4#include <linux/pm.h>
5#include <asm/irq.h>
6#include <asm/io.h>
7
8#ifdef ARCH_HAS_NMI_WATCHDOG
9
10/**
11 * do_nmi_callback
12 *
13 * Check to see if a callback exists and execute it. Return 1
14 * if the handler exists and was handled successfully.
15 */
16int do_nmi_callback(struct pt_regs *regs, int cpu);
17
18extern void die_nmi(char *str, struct pt_regs *regs, int do_panic);
19extern int check_nmi_watchdog(void);
20extern int nmi_watchdog_enabled;
21extern int avail_to_resrv_perfctr_nmi_bit(unsigned int);
22extern int avail_to_resrv_perfctr_nmi(unsigned int);
23extern int reserve_perfctr_nmi(unsigned int);
24extern void release_perfctr_nmi(unsigned int);
25extern int reserve_evntsel_nmi(unsigned int);
26extern void release_evntsel_nmi(unsigned int);
27
28extern void setup_apic_nmi_watchdog(void *);
29extern void stop_apic_nmi_watchdog(void *);
30extern void disable_timer_nmi_watchdog(void);
31extern void enable_timer_nmi_watchdog(void);
32extern int nmi_watchdog_tick(struct pt_regs *regs, unsigned reason);
33extern void cpu_nmi_set_wd_enabled(void);
34
35extern atomic_t nmi_active;
36extern unsigned int nmi_watchdog;
37#define NMI_NONE 0
38#define NMI_IO_APIC 1
39#define NMI_LOCAL_APIC 2
40#define NMI_INVALID 3
41
42struct ctl_table;
43struct file;
44extern int proc_nmi_enabled(struct ctl_table *, int , struct file *,
45 void __user *, size_t *, loff_t *);
46extern int unknown_nmi_panic;
47
48void __trigger_all_cpu_backtrace(void);
49#define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace()
50
51static inline void localise_nmi_watchdog(void)
52{
53 if (nmi_watchdog == NMI_IO_APIC)
54 nmi_watchdog = NMI_LOCAL_APIC;
55}
56
57/* check if nmi_watchdog is active (ie was specified at boot) */
58static inline int nmi_watchdog_active(void)
59{
60 /*
61 * actually it should be:
62 * return (nmi_watchdog == NMI_LOCAL_APIC ||
63 * nmi_watchdog == NMI_IO_APIC)
64 * but since they are power of two we could use a
65 * cheaper way --cvg
66 */
67 return nmi_watchdog & 0x3;
68}
69#endif
70
71void lapic_watchdog_stop(void);
72int lapic_watchdog_init(unsigned nmi_hz);
73int lapic_wd_event(unsigned nmi_hz);
74unsigned lapic_adjust_nmi_hz(unsigned hz);
75int lapic_watchdog_ok(void);
76void disable_lapic_nmi_watchdog(void);
77void enable_lapic_nmi_watchdog(void);
78void stop_nmi(void);
79void restart_nmi(void);
80
81#endif /* ASM_X86__NMI_H */
diff --git a/include/asm-x86/nops.h b/include/asm-x86/nops.h
deleted file mode 100644
index ae742721ae73..000000000000
--- a/include/asm-x86/nops.h
+++ /dev/null
@@ -1,118 +0,0 @@
1#ifndef ASM_X86__NOPS_H
2#define ASM_X86__NOPS_H
3
4/* Define nops for use with alternative() */
5
6/* generic versions from gas
7 1: nop
8 the following instructions are NOT nops in 64-bit mode,
9 for 64-bit mode use K8 or P6 nops instead
10 2: movl %esi,%esi
11 3: leal 0x00(%esi),%esi
12 4: leal 0x00(,%esi,1),%esi
13 6: leal 0x00000000(%esi),%esi
14 7: leal 0x00000000(,%esi,1),%esi
15*/
16#define GENERIC_NOP1 ".byte 0x90\n"
17#define GENERIC_NOP2 ".byte 0x89,0xf6\n"
18#define GENERIC_NOP3 ".byte 0x8d,0x76,0x00\n"
19#define GENERIC_NOP4 ".byte 0x8d,0x74,0x26,0x00\n"
20#define GENERIC_NOP5 GENERIC_NOP1 GENERIC_NOP4
21#define GENERIC_NOP6 ".byte 0x8d,0xb6,0x00,0x00,0x00,0x00\n"
22#define GENERIC_NOP7 ".byte 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00\n"
23#define GENERIC_NOP8 GENERIC_NOP1 GENERIC_NOP7
24
25/* Opteron 64bit nops
26 1: nop
27 2: osp nop
28 3: osp osp nop
29 4: osp osp osp nop
30*/
31#define K8_NOP1 GENERIC_NOP1
32#define K8_NOP2 ".byte 0x66,0x90\n"
33#define K8_NOP3 ".byte 0x66,0x66,0x90\n"
34#define K8_NOP4 ".byte 0x66,0x66,0x66,0x90\n"
35#define K8_NOP5 K8_NOP3 K8_NOP2
36#define K8_NOP6 K8_NOP3 K8_NOP3
37#define K8_NOP7 K8_NOP4 K8_NOP3
38#define K8_NOP8 K8_NOP4 K8_NOP4
39
40/* K7 nops
41 uses eax dependencies (arbitary choice)
42 1: nop
43 2: movl %eax,%eax
44 3: leal (,%eax,1),%eax
45 4: leal 0x00(,%eax,1),%eax
46 6: leal 0x00000000(%eax),%eax
47 7: leal 0x00000000(,%eax,1),%eax
48*/
49#define K7_NOP1 GENERIC_NOP1
50#define K7_NOP2 ".byte 0x8b,0xc0\n"
51#define K7_NOP3 ".byte 0x8d,0x04,0x20\n"
52#define K7_NOP4 ".byte 0x8d,0x44,0x20,0x00\n"
53#define K7_NOP5 K7_NOP4 ASM_NOP1
54#define K7_NOP6 ".byte 0x8d,0x80,0,0,0,0\n"
55#define K7_NOP7 ".byte 0x8D,0x04,0x05,0,0,0,0\n"
56#define K7_NOP8 K7_NOP7 ASM_NOP1
57
58/* P6 nops
59 uses eax dependencies (Intel-recommended choice)
60 1: nop
61 2: osp nop
62 3: nopl (%eax)
63 4: nopl 0x00(%eax)
64 5: nopl 0x00(%eax,%eax,1)
65 6: osp nopl 0x00(%eax,%eax,1)
66 7: nopl 0x00000000(%eax)
67 8: nopl 0x00000000(%eax,%eax,1)
68*/
69#define P6_NOP1 GENERIC_NOP1
70#define P6_NOP2 ".byte 0x66,0x90\n"
71#define P6_NOP3 ".byte 0x0f,0x1f,0x00\n"
72#define P6_NOP4 ".byte 0x0f,0x1f,0x40,0\n"
73#define P6_NOP5 ".byte 0x0f,0x1f,0x44,0x00,0\n"
74#define P6_NOP6 ".byte 0x66,0x0f,0x1f,0x44,0x00,0\n"
75#define P6_NOP7 ".byte 0x0f,0x1f,0x80,0,0,0,0\n"
76#define P6_NOP8 ".byte 0x0f,0x1f,0x84,0x00,0,0,0,0\n"
77
78#if defined(CONFIG_MK7)
79#define ASM_NOP1 K7_NOP1
80#define ASM_NOP2 K7_NOP2
81#define ASM_NOP3 K7_NOP3
82#define ASM_NOP4 K7_NOP4
83#define ASM_NOP5 K7_NOP5
84#define ASM_NOP6 K7_NOP6
85#define ASM_NOP7 K7_NOP7
86#define ASM_NOP8 K7_NOP8
87#elif defined(CONFIG_X86_P6_NOP)
88#define ASM_NOP1 P6_NOP1
89#define ASM_NOP2 P6_NOP2
90#define ASM_NOP3 P6_NOP3
91#define ASM_NOP4 P6_NOP4
92#define ASM_NOP5 P6_NOP5
93#define ASM_NOP6 P6_NOP6
94#define ASM_NOP7 P6_NOP7
95#define ASM_NOP8 P6_NOP8
96#elif defined(CONFIG_X86_64)
97#define ASM_NOP1 K8_NOP1
98#define ASM_NOP2 K8_NOP2
99#define ASM_NOP3 K8_NOP3
100#define ASM_NOP4 K8_NOP4
101#define ASM_NOP5 K8_NOP5
102#define ASM_NOP6 K8_NOP6
103#define ASM_NOP7 K8_NOP7
104#define ASM_NOP8 K8_NOP8
105#else
106#define ASM_NOP1 GENERIC_NOP1
107#define ASM_NOP2 GENERIC_NOP2
108#define ASM_NOP3 GENERIC_NOP3
109#define ASM_NOP4 GENERIC_NOP4
110#define ASM_NOP5 GENERIC_NOP5
111#define ASM_NOP6 GENERIC_NOP6
112#define ASM_NOP7 GENERIC_NOP7
113#define ASM_NOP8 GENERIC_NOP8
114#endif
115
116#define ASM_NOP_MAX 8
117
118#endif /* ASM_X86__NOPS_H */
diff --git a/include/asm-x86/numa.h b/include/asm-x86/numa.h
deleted file mode 100644
index 27da400d3138..000000000000
--- a/include/asm-x86/numa.h
+++ /dev/null
@@ -1,5 +0,0 @@
1#ifdef CONFIG_X86_32
2# include "numa_32.h"
3#else
4# include "numa_64.h"
5#endif
diff --git a/include/asm-x86/numa_32.h b/include/asm-x86/numa_32.h
deleted file mode 100644
index 44cb07855c5b..000000000000
--- a/include/asm-x86/numa_32.h
+++ /dev/null
@@ -1,11 +0,0 @@
1#ifndef ASM_X86__NUMA_32_H
2#define ASM_X86__NUMA_32_H
3
4extern int pxm_to_nid(int pxm);
5extern void numa_remove_cpu(int cpu);
6
7#ifdef CONFIG_NUMA
8extern void set_highmem_pages_init(void);
9#endif
10
11#endif /* ASM_X86__NUMA_32_H */
diff --git a/include/asm-x86/numa_64.h b/include/asm-x86/numa_64.h
deleted file mode 100644
index 15c990395b02..000000000000
--- a/include/asm-x86/numa_64.h
+++ /dev/null
@@ -1,43 +0,0 @@
1#ifndef ASM_X86__NUMA_64_H
2#define ASM_X86__NUMA_64_H
3
4#include <linux/nodemask.h>
5#include <asm/apicdef.h>
6
7struct bootnode {
8 u64 start;
9 u64 end;
10};
11
12extern int compute_hash_shift(struct bootnode *nodes, int numblks,
13 int *nodeids);
14
15#define ZONE_ALIGN (1UL << (MAX_ORDER+PAGE_SHIFT))
16
17extern void numa_init_array(void);
18extern int numa_off;
19
20extern void srat_reserve_add_area(int nodeid);
21extern int hotadd_percent;
22
23extern s16 apicid_to_node[MAX_LOCAL_APIC];
24
25extern unsigned long numa_free_all_bootmem(void);
26extern void setup_node_bootmem(int nodeid, unsigned long start,
27 unsigned long end);
28
29#ifdef CONFIG_NUMA
30extern void __init init_cpu_to_node(void);
31extern void __cpuinit numa_set_node(int cpu, int node);
32extern void __cpuinit numa_clear_node(int cpu);
33extern void __cpuinit numa_add_cpu(int cpu);
34extern void __cpuinit numa_remove_cpu(int cpu);
35#else
36static inline void init_cpu_to_node(void) { }
37static inline void numa_set_node(int cpu, int node) { }
38static inline void numa_clear_node(int cpu) { }
39static inline void numa_add_cpu(int cpu, int node) { }
40static inline void numa_remove_cpu(int cpu) { }
41#endif
42
43#endif /* ASM_X86__NUMA_64_H */
diff --git a/include/asm-x86/numaq.h b/include/asm-x86/numaq.h
deleted file mode 100644
index 124bf7d4b70a..000000000000
--- a/include/asm-x86/numaq.h
+++ /dev/null
@@ -1,169 +0,0 @@
1/*
2 * Written by: Patricia Gaughen, IBM Corporation
3 *
4 * Copyright (C) 2002, IBM Corp.
5 *
6 * All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
16 * NON INFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 *
23 * Send feedback to <gone@us.ibm.com>
24 */
25
26#ifndef ASM_X86__NUMAQ_H
27#define ASM_X86__NUMAQ_H
28
29#ifdef CONFIG_X86_NUMAQ
30
31extern int found_numaq;
32extern int get_memcfg_numaq(void);
33
34/*
35 * SYS_CFG_DATA_PRIV_ADDR, struct eachquadmem, and struct sys_cfg_data are the
36 */
37#define SYS_CFG_DATA_PRIV_ADDR 0x0009d000 /* place for scd in private
38 quad space */
39
40/*
41 * Communication area for each processor on lynxer-processor tests.
42 *
43 * NOTE: If you change the size of this eachproc structure you need
44 * to change the definition for EACH_QUAD_SIZE.
45 */
46struct eachquadmem {
47 unsigned int priv_mem_start; /* Starting address of this */
48 /* quad's private memory. */
49 /* This is always 0. */
50 /* In MB. */
51 unsigned int priv_mem_size; /* Size of this quad's */
52 /* private memory. */
53 /* In MB. */
54 unsigned int low_shrd_mem_strp_start;/* Starting address of this */
55 /* quad's low shared block */
56 /* (untranslated). */
57 /* In MB. */
58 unsigned int low_shrd_mem_start; /* Starting address of this */
59 /* quad's low shared memory */
60 /* (untranslated). */
61 /* In MB. */
62 unsigned int low_shrd_mem_size; /* Size of this quad's low */
63 /* shared memory. */
64 /* In MB. */
65 unsigned int lmmio_copb_start; /* Starting address of this */
66 /* quad's local memory */
67 /* mapped I/O in the */
68 /* compatibility OPB. */
69 /* In MB. */
70 unsigned int lmmio_copb_size; /* Size of this quad's local */
71 /* memory mapped I/O in the */
72 /* compatibility OPB. */
73 /* In MB. */
74 unsigned int lmmio_nopb_start; /* Starting address of this */
75 /* quad's local memory */
76 /* mapped I/O in the */
77 /* non-compatibility OPB. */
78 /* In MB. */
79 unsigned int lmmio_nopb_size; /* Size of this quad's local */
80 /* memory mapped I/O in the */
81 /* non-compatibility OPB. */
82 /* In MB. */
83 unsigned int io_apic_0_start; /* Starting address of I/O */
84 /* APIC 0. */
85 unsigned int io_apic_0_sz; /* Size I/O APIC 0. */
86 unsigned int io_apic_1_start; /* Starting address of I/O */
87 /* APIC 1. */
88 unsigned int io_apic_1_sz; /* Size I/O APIC 1. */
89 unsigned int hi_shrd_mem_start; /* Starting address of this */
90 /* quad's high shared memory.*/
91 /* In MB. */
92 unsigned int hi_shrd_mem_size; /* Size of this quad's high */
93 /* shared memory. */
94 /* In MB. */
95 unsigned int mps_table_addr; /* Address of this quad's */
96 /* MPS tables from BIOS, */
97 /* in system space.*/
98 unsigned int lcl_MDC_pio_addr; /* Port-I/O address for */
99 /* local access of MDC. */
100 unsigned int rmt_MDC_mmpio_addr; /* MM-Port-I/O address for */
101 /* remote access of MDC. */
102 unsigned int mm_port_io_start; /* Starting address of this */
103 /* quad's memory mapped Port */
104 /* I/O space. */
105 unsigned int mm_port_io_size; /* Size of this quad's memory*/
106 /* mapped Port I/O space. */
107 unsigned int mm_rmt_io_apic_start; /* Starting address of this */
108 /* quad's memory mapped */
109 /* remote I/O APIC space. */
110 unsigned int mm_rmt_io_apic_size; /* Size of this quad's memory*/
111 /* mapped remote I/O APIC */
112 /* space. */
113 unsigned int mm_isa_start; /* Starting address of this */
114 /* quad's memory mapped ISA */
115 /* space (contains MDC */
116 /* memory space). */
117 unsigned int mm_isa_size; /* Size of this quad's memory*/
118 /* mapped ISA space (contains*/
119 /* MDC memory space). */
120 unsigned int rmt_qmi_addr; /* Remote addr to access QMI.*/
121 unsigned int lcl_qmi_addr; /* Local addr to access QMI. */
122};
123
124/*
125 * Note: This structure must be NOT be changed unless the multiproc and
126 * OS are changed to reflect the new structure.
127 */
128struct sys_cfg_data {
129 unsigned int quad_id;
130 unsigned int bsp_proc_id; /* Boot Strap Processor in this quad. */
131 unsigned int scd_version; /* Version number of this table. */
132 unsigned int first_quad_id;
133 unsigned int quads_present31_0; /* 1 bit for each quad */
134 unsigned int quads_present63_32; /* 1 bit for each quad */
135 unsigned int config_flags;
136 unsigned int boot_flags;
137 unsigned int csr_start_addr; /* Absolute value (not in MB) */
138 unsigned int csr_size; /* Absolute value (not in MB) */
139 unsigned int lcl_apic_start_addr; /* Absolute value (not in MB) */
140 unsigned int lcl_apic_size; /* Absolute value (not in MB) */
141 unsigned int low_shrd_mem_base; /* 0 or 512MB or 1GB */
142 unsigned int low_shrd_mem_quad_offset; /* 0,128M,256M,512M,1G */
143 /* may not be totally populated */
144 unsigned int split_mem_enbl; /* 0 for no low shared memory */
145 unsigned int mmio_sz; /* Size of total system memory mapped I/O */
146 /* (in MB). */
147 unsigned int quad_spin_lock; /* Spare location used for quad */
148 /* bringup. */
149 unsigned int nonzero55; /* For checksumming. */
150 unsigned int nonzeroaa; /* For checksumming. */
151 unsigned int scd_magic_number;
152 unsigned int system_type;
153 unsigned int checksum;
154 /*
155 * memory configuration area for each quad
156 */
157 struct eachquadmem eq[MAX_NUMNODES]; /* indexed by quad id */
158};
159
160void numaq_tsc_disable(void);
161
162#else
163static inline int get_memcfg_numaq(void)
164{
165 return 0;
166}
167#endif /* CONFIG_X86_NUMAQ */
168#endif /* ASM_X86__NUMAQ_H */
169
diff --git a/include/asm-x86/numaq/apic.h b/include/asm-x86/numaq/apic.h
deleted file mode 100644
index 0bf2a06b7a4e..000000000000
--- a/include/asm-x86/numaq/apic.h
+++ /dev/null
@@ -1,136 +0,0 @@
1#ifndef __ASM_NUMAQ_APIC_H
2#define __ASM_NUMAQ_APIC_H
3
4#include <asm/io.h>
5#include <linux/mmzone.h>
6#include <linux/nodemask.h>
7
8#define APIC_DFR_VALUE (APIC_DFR_CLUSTER)
9
10static inline cpumask_t target_cpus(void)
11{
12 return CPU_MASK_ALL;
13}
14
15#define NO_BALANCE_IRQ (1)
16#define esr_disable (1)
17
18#define INT_DELIVERY_MODE dest_LowestPrio
19#define INT_DEST_MODE 0 /* physical delivery on LOCAL quad */
20
21static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
22{
23 return physid_isset(apicid, bitmap);
24}
25static inline unsigned long check_apicid_present(int bit)
26{
27 return physid_isset(bit, phys_cpu_present_map);
28}
29#define apicid_cluster(apicid) (apicid & 0xF0)
30
31static inline int apic_id_registered(void)
32{
33 return 1;
34}
35
36static inline void init_apic_ldr(void)
37{
38 /* Already done in NUMA-Q firmware */
39}
40
41static inline void setup_apic_routing(void)
42{
43 printk("Enabling APIC mode: %s. Using %d I/O APICs\n",
44 "NUMA-Q", nr_ioapics);
45}
46
47/*
48 * Skip adding the timer int on secondary nodes, which causes
49 * a small but painful rift in the time-space continuum.
50 */
51static inline int multi_timer_check(int apic, int irq)
52{
53 return apic != 0 && irq == 0;
54}
55
56static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
57{
58 /* We don't have a good way to do this yet - hack */
59 return physids_promote(0xFUL);
60}
61
62/* Mapping from cpu number to logical apicid */
63extern u8 cpu_2_logical_apicid[];
64static inline int cpu_to_logical_apicid(int cpu)
65{
66 if (cpu >= NR_CPUS)
67 return BAD_APICID;
68 return (int)cpu_2_logical_apicid[cpu];
69}
70
71/*
72 * Supporting over 60 cpus on NUMA-Q requires a locality-dependent
73 * cpu to APIC ID relation to properly interact with the intelligent
74 * mode of the cluster controller.
75 */
76static inline int cpu_present_to_apicid(int mps_cpu)
77{
78 if (mps_cpu < 60)
79 return ((mps_cpu >> 2) << 4) | (1 << (mps_cpu & 0x3));
80 else
81 return BAD_APICID;
82}
83
84static inline int apicid_to_node(int logical_apicid)
85{
86 return logical_apicid >> 4;
87}
88
89static inline physid_mask_t apicid_to_cpu_present(int logical_apicid)
90{
91 int node = apicid_to_node(logical_apicid);
92 int cpu = __ffs(logical_apicid & 0xf);
93
94 return physid_mask_of_physid(cpu + 4*node);
95}
96
97extern void *xquad_portio;
98
99static inline void setup_portio_remap(void)
100{
101 int num_quads = num_online_nodes();
102
103 if (num_quads <= 1)
104 return;
105
106 printk("Remapping cross-quad port I/O for %d quads\n", num_quads);
107 xquad_portio = ioremap(XQUAD_PORTIO_BASE, num_quads*XQUAD_PORTIO_QUAD);
108 printk("xquad_portio vaddr 0x%08lx, len %08lx\n",
109 (u_long) xquad_portio, (u_long) num_quads*XQUAD_PORTIO_QUAD);
110}
111
112static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
113{
114 return (1);
115}
116
117static inline void enable_apic_mode(void)
118{
119}
120
121/*
122 * We use physical apicids here, not logical, so just return the default
123 * physical broadcast to stop people from breaking us
124 */
125static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
126{
127 return (int) 0xF;
128}
129
130/* No NUMA-Q box has a HT CPU, but it can't hurt to use the default code. */
131static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
132{
133 return cpuid_apic >> index_msb;
134}
135
136#endif /* __ASM_NUMAQ_APIC_H */
diff --git a/include/asm-x86/numaq/apicdef.h b/include/asm-x86/numaq/apicdef.h
deleted file mode 100644
index e012a46cc22a..000000000000
--- a/include/asm-x86/numaq/apicdef.h
+++ /dev/null
@@ -1,14 +0,0 @@
1#ifndef __ASM_NUMAQ_APICDEF_H
2#define __ASM_NUMAQ_APICDEF_H
3
4
5#define APIC_ID_MASK (0xF<<24)
6
7static inline unsigned get_apic_id(unsigned long x)
8{
9 return (((x)>>24)&0x0F);
10}
11
12#define GET_APIC_ID(x) get_apic_id(x)
13
14#endif
diff --git a/include/asm-x86/numaq/ipi.h b/include/asm-x86/numaq/ipi.h
deleted file mode 100644
index 935588d286cf..000000000000
--- a/include/asm-x86/numaq/ipi.h
+++ /dev/null
@@ -1,25 +0,0 @@
1#ifndef __ASM_NUMAQ_IPI_H
2#define __ASM_NUMAQ_IPI_H
3
4void send_IPI_mask_sequence(cpumask_t, int vector);
5
6static inline void send_IPI_mask(cpumask_t mask, int vector)
7{
8 send_IPI_mask_sequence(mask, vector);
9}
10
11static inline void send_IPI_allbutself(int vector)
12{
13 cpumask_t mask = cpu_online_map;
14 cpu_clear(smp_processor_id(), mask);
15
16 if (!cpus_empty(mask))
17 send_IPI_mask(mask, vector);
18}
19
20static inline void send_IPI_all(int vector)
21{
22 send_IPI_mask(cpu_online_map, vector);
23}
24
25#endif /* __ASM_NUMAQ_IPI_H */
diff --git a/include/asm-x86/numaq/mpparse.h b/include/asm-x86/numaq/mpparse.h
deleted file mode 100644
index 252292e077b6..000000000000
--- a/include/asm-x86/numaq/mpparse.h
+++ /dev/null
@@ -1,7 +0,0 @@
1#ifndef __ASM_NUMAQ_MPPARSE_H
2#define __ASM_NUMAQ_MPPARSE_H
3
4extern void numaq_mps_oem_check(struct mp_config_table *mpc, char *oem,
5 char *productid);
6
7#endif /* __ASM_NUMAQ_MPPARSE_H */
diff --git a/include/asm-x86/numaq/wakecpu.h b/include/asm-x86/numaq/wakecpu.h
deleted file mode 100644
index c577bda5b1c5..000000000000
--- a/include/asm-x86/numaq/wakecpu.h
+++ /dev/null
@@ -1,43 +0,0 @@
1#ifndef __ASM_NUMAQ_WAKECPU_H
2#define __ASM_NUMAQ_WAKECPU_H
3
4/* This file copes with machines that wakeup secondary CPUs by NMIs */
5
6#define WAKE_SECONDARY_VIA_NMI
7
8#define TRAMPOLINE_LOW phys_to_virt(0x8)
9#define TRAMPOLINE_HIGH phys_to_virt(0xa)
10
11#define boot_cpu_apicid boot_cpu_logical_apicid
12
13/* We don't do anything here because we use NMI's to boot instead */
14static inline void wait_for_init_deassert(atomic_t *deassert)
15{
16}
17
18/*
19 * Because we use NMIs rather than the INIT-STARTUP sequence to
20 * bootstrap the CPUs, the APIC may be in a weird state. Kick it.
21 */
22static inline void smp_callin_clear_local_apic(void)
23{
24 clear_local_APIC();
25}
26
27static inline void store_NMI_vector(unsigned short *high, unsigned short *low)
28{
29 printk("Storing NMI vector\n");
30 *high = *((volatile unsigned short *) TRAMPOLINE_HIGH);
31 *low = *((volatile unsigned short *) TRAMPOLINE_LOW);
32}
33
34static inline void restore_NMI_vector(unsigned short *high, unsigned short *low)
35{
36 printk("Restoring NMI vector\n");
37 *((volatile unsigned short *) TRAMPOLINE_HIGH) = *high;
38 *((volatile unsigned short *) TRAMPOLINE_LOW) = *low;
39}
40
41#define inquire_remote_apic(apicid) {}
42
43#endif /* __ASM_NUMAQ_WAKECPU_H */
diff --git a/include/asm-x86/olpc.h b/include/asm-x86/olpc.h
deleted file mode 100644
index d7328b1a05c1..000000000000
--- a/include/asm-x86/olpc.h
+++ /dev/null
@@ -1,132 +0,0 @@
1/* OLPC machine specific definitions */
2
3#ifndef ASM_X86__OLPC_H
4#define ASM_X86__OLPC_H
5
6#include <asm/geode.h>
7
8struct olpc_platform_t {
9 int flags;
10 uint32_t boardrev;
11 int ecver;
12};
13
14#define OLPC_F_PRESENT 0x01
15#define OLPC_F_DCON 0x02
16#define OLPC_F_VSA 0x04
17
18#ifdef CONFIG_OLPC
19
20extern struct olpc_platform_t olpc_platform_info;
21
22/*
23 * OLPC board IDs contain the major build number within the mask 0x0ff0,
24 * and the minor build number withing 0x000f. Pre-builds have a minor
25 * number less than 8, and normal builds start at 8. For example, 0x0B10
26 * is a PreB1, and 0x0C18 is a C1.
27 */
28
29static inline uint32_t olpc_board(uint8_t id)
30{
31 return (id << 4) | 0x8;
32}
33
34static inline uint32_t olpc_board_pre(uint8_t id)
35{
36 return id << 4;
37}
38
39static inline int machine_is_olpc(void)
40{
41 return (olpc_platform_info.flags & OLPC_F_PRESENT) ? 1 : 0;
42}
43
44/*
45 * The DCON is OLPC's Display Controller. It has a number of unique
46 * features that we might want to take advantage of..
47 */
48static inline int olpc_has_dcon(void)
49{
50 return (olpc_platform_info.flags & OLPC_F_DCON) ? 1 : 0;
51}
52
53/*
54 * The VSA is software from AMD that typical Geode bioses will include.
55 * It is used to emulate the PCI bus, VGA, etc. OLPC's Open Firmware does
56 * not include the VSA; instead, PCI is emulated by the kernel.
57 *
58 * The VSA is described further in arch/x86/pci/olpc.c.
59 */
60static inline int olpc_has_vsa(void)
61{
62 return (olpc_platform_info.flags & OLPC_F_VSA) ? 1 : 0;
63}
64
65/*
66 * The "Mass Production" version of OLPC's XO is identified as being model
67 * C2. During the prototype phase, the following models (in chronological
68 * order) were created: A1, B1, B2, B3, B4, C1. The A1 through B2 models
69 * were based on Geode GX CPUs, and models after that were based upon
70 * Geode LX CPUs. There were also some hand-assembled models floating
71 * around, referred to as PreB1, PreB2, etc.
72 */
73static inline int olpc_board_at_least(uint32_t rev)
74{
75 return olpc_platform_info.boardrev >= rev;
76}
77
78#else
79
80static inline int machine_is_olpc(void)
81{
82 return 0;
83}
84
85static inline int olpc_has_dcon(void)
86{
87 return 0;
88}
89
90static inline int olpc_has_vsa(void)
91{
92 return 0;
93}
94
95#endif
96
97/* EC related functions */
98
99extern int olpc_ec_cmd(unsigned char cmd, unsigned char *inbuf, size_t inlen,
100 unsigned char *outbuf, size_t outlen);
101
102extern int olpc_ec_mask_set(uint8_t bits);
103extern int olpc_ec_mask_unset(uint8_t bits);
104
105/* EC commands */
106
107#define EC_FIRMWARE_REV 0x08
108
109/* SCI source values */
110
111#define EC_SCI_SRC_EMPTY 0x00
112#define EC_SCI_SRC_GAME 0x01
113#define EC_SCI_SRC_BATTERY 0x02
114#define EC_SCI_SRC_BATSOC 0x04
115#define EC_SCI_SRC_BATERR 0x08
116#define EC_SCI_SRC_EBOOK 0x10
117#define EC_SCI_SRC_WLAN 0x20
118#define EC_SCI_SRC_ACPWR 0x40
119#define EC_SCI_SRC_ALL 0x7F
120
121/* GPIO assignments */
122
123#define OLPC_GPIO_MIC_AC geode_gpio(1)
124#define OLPC_GPIO_DCON_IRQ geode_gpio(7)
125#define OLPC_GPIO_THRM_ALRM geode_gpio(10)
126#define OLPC_GPIO_SMB_CLK geode_gpio(14)
127#define OLPC_GPIO_SMB_DATA geode_gpio(15)
128#define OLPC_GPIO_WORKAUX geode_gpio(24)
129#define OLPC_GPIO_LID geode_gpio(26)
130#define OLPC_GPIO_ECSCI geode_gpio(27)
131
132#endif /* ASM_X86__OLPC_H */
diff --git a/include/asm-x86/page.h b/include/asm-x86/page.h
deleted file mode 100644
index d4f1d5791fc1..000000000000
--- a/include/asm-x86/page.h
+++ /dev/null
@@ -1,209 +0,0 @@
1#ifndef ASM_X86__PAGE_H
2#define ASM_X86__PAGE_H
3
4#include <linux/const.h>
5
6/* PAGE_SHIFT determines the page size */
7#define PAGE_SHIFT 12
8#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
9#define PAGE_MASK (~(PAGE_SIZE-1))
10
11#ifdef __KERNEL__
12
13#define __PHYSICAL_MASK ((phys_addr_t)(1ULL << __PHYSICAL_MASK_SHIFT) - 1)
14#define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1)
15
16/* Cast PAGE_MASK to a signed type so that it is sign-extended if
17 virtual addresses are 32-bits but physical addresses are larger
18 (ie, 32-bit PAE). */
19#define PHYSICAL_PAGE_MASK (((signed long)PAGE_MASK) & __PHYSICAL_MASK)
20
21/* PTE_PFN_MASK extracts the PFN from a (pte|pmd|pud|pgd)val_t */
22#define PTE_PFN_MASK ((pteval_t)PHYSICAL_PAGE_MASK)
23
24/* PTE_FLAGS_MASK extracts the flags from a (pte|pmd|pud|pgd)val_t */
25#define PTE_FLAGS_MASK (~PTE_PFN_MASK)
26
27#define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT)
28#define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1))
29
30#define HPAGE_SHIFT PMD_SHIFT
31#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT)
32#define HPAGE_MASK (~(HPAGE_SIZE - 1))
33#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
34
35#define HUGE_MAX_HSTATE 2
36
37#ifndef __ASSEMBLY__
38#include <linux/types.h>
39#endif
40
41#ifdef CONFIG_X86_64
42#include <asm/page_64.h>
43#else
44#include <asm/page_32.h>
45#endif /* CONFIG_X86_64 */
46
47#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
48
49#define VM_DATA_DEFAULT_FLAGS \
50 (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
51 VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
52
53
54#ifndef __ASSEMBLY__
55
56typedef struct { pgdval_t pgd; } pgd_t;
57typedef struct { pgprotval_t pgprot; } pgprot_t;
58
59extern int page_is_ram(unsigned long pagenr);
60extern int pagerange_is_ram(unsigned long start, unsigned long end);
61extern int devmem_is_allowed(unsigned long pagenr);
62extern void map_devmem(unsigned long pfn, unsigned long size,
63 pgprot_t vma_prot);
64extern void unmap_devmem(unsigned long pfn, unsigned long size,
65 pgprot_t vma_prot);
66
67extern unsigned long max_low_pfn_mapped;
68extern unsigned long max_pfn_mapped;
69
70struct page;
71
72static inline void clear_user_page(void *page, unsigned long vaddr,
73 struct page *pg)
74{
75 clear_page(page);
76}
77
78static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
79 struct page *topage)
80{
81 copy_page(to, from);
82}
83
84#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
85 alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
86#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
87
88static inline pgd_t native_make_pgd(pgdval_t val)
89{
90 return (pgd_t) { val };
91}
92
93static inline pgdval_t native_pgd_val(pgd_t pgd)
94{
95 return pgd.pgd;
96}
97
98#if PAGETABLE_LEVELS >= 3
99#if PAGETABLE_LEVELS == 4
100typedef struct { pudval_t pud; } pud_t;
101
102static inline pud_t native_make_pud(pmdval_t val)
103{
104 return (pud_t) { val };
105}
106
107static inline pudval_t native_pud_val(pud_t pud)
108{
109 return pud.pud;
110}
111#else /* PAGETABLE_LEVELS == 3 */
112#include <asm-generic/pgtable-nopud.h>
113
114static inline pudval_t native_pud_val(pud_t pud)
115{
116 return native_pgd_val(pud.pgd);
117}
118#endif /* PAGETABLE_LEVELS == 4 */
119
120typedef struct { pmdval_t pmd; } pmd_t;
121
122static inline pmd_t native_make_pmd(pmdval_t val)
123{
124 return (pmd_t) { val };
125}
126
127static inline pmdval_t native_pmd_val(pmd_t pmd)
128{
129 return pmd.pmd;
130}
131#else /* PAGETABLE_LEVELS == 2 */
132#include <asm-generic/pgtable-nopmd.h>
133
134static inline pmdval_t native_pmd_val(pmd_t pmd)
135{
136 return native_pgd_val(pmd.pud.pgd);
137}
138#endif /* PAGETABLE_LEVELS >= 3 */
139
140static inline pte_t native_make_pte(pteval_t val)
141{
142 return (pte_t) { .pte = val };
143}
144
145static inline pteval_t native_pte_val(pte_t pte)
146{
147 return pte.pte;
148}
149
150static inline pteval_t native_pte_flags(pte_t pte)
151{
152 return native_pte_val(pte) & PTE_FLAGS_MASK;
153}
154
155#define pgprot_val(x) ((x).pgprot)
156#define __pgprot(x) ((pgprot_t) { (x) } )
157
158#ifdef CONFIG_PARAVIRT
159#include <asm/paravirt.h>
160#else /* !CONFIG_PARAVIRT */
161
162#define pgd_val(x) native_pgd_val(x)
163#define __pgd(x) native_make_pgd(x)
164
165#ifndef __PAGETABLE_PUD_FOLDED
166#define pud_val(x) native_pud_val(x)
167#define __pud(x) native_make_pud(x)
168#endif
169
170#ifndef __PAGETABLE_PMD_FOLDED
171#define pmd_val(x) native_pmd_val(x)
172#define __pmd(x) native_make_pmd(x)
173#endif
174
175#define pte_val(x) native_pte_val(x)
176#define pte_flags(x) native_pte_flags(x)
177#define __pte(x) native_make_pte(x)
178
179#endif /* CONFIG_PARAVIRT */
180
181#define __pa(x) __phys_addr((unsigned long)(x))
182#define __pa_nodebug(x) __phys_addr_nodebug((unsigned long)(x))
183/* __pa_symbol should be used for C visible symbols.
184 This seems to be the official gcc blessed way to do such arithmetic. */
185#define __pa_symbol(x) __pa(__phys_reloc_hide((unsigned long)(x)))
186
187#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
188
189#define __boot_va(x) __va(x)
190#define __boot_pa(x) __pa(x)
191
192/*
193 * virt_to_page(kaddr) returns a valid pointer if and only if
194 * virt_addr_valid(kaddr) returns true.
195 */
196#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
197#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
198extern bool __virt_addr_valid(unsigned long kaddr);
199#define virt_addr_valid(kaddr) __virt_addr_valid((unsigned long) (kaddr))
200
201#endif /* __ASSEMBLY__ */
202
203#include <asm-generic/memory_model.h>
204#include <asm-generic/page.h>
205
206#define __HAVE_ARCH_GATE_AREA 1
207
208#endif /* __KERNEL__ */
209#endif /* ASM_X86__PAGE_H */
diff --git a/include/asm-x86/page_32.h b/include/asm-x86/page_32.h
deleted file mode 100644
index bdf5dba4cfb0..000000000000
--- a/include/asm-x86/page_32.h
+++ /dev/null
@@ -1,136 +0,0 @@
1#ifndef ASM_X86__PAGE_32_H
2#define ASM_X86__PAGE_32_H
3
4/*
5 * This handles the memory map.
6 *
7 * A __PAGE_OFFSET of 0xC0000000 means that the kernel has
8 * a virtual address space of one gigabyte, which limits the
9 * amount of physical memory you can use to about 950MB.
10 *
11 * If you want more physical memory than this then see the CONFIG_HIGHMEM4G
12 * and CONFIG_HIGHMEM64G options in the kernel configuration.
13 */
14#define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
15
16#ifdef CONFIG_4KSTACKS
17#define THREAD_ORDER 0
18#else
19#define THREAD_ORDER 1
20#endif
21#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
22
23#define STACKFAULT_STACK 0
24#define DOUBLEFAULT_STACK 1
25#define NMI_STACK 0
26#define DEBUG_STACK 0
27#define MCE_STACK 0
28#define N_EXCEPTION_STACKS 1
29
30#ifdef CONFIG_X86_PAE
31/* 44=32+12, the limit we can fit into an unsigned long pfn */
32#define __PHYSICAL_MASK_SHIFT 44
33#define __VIRTUAL_MASK_SHIFT 32
34#define PAGETABLE_LEVELS 3
35
36#ifndef __ASSEMBLY__
37typedef u64 pteval_t;
38typedef u64 pmdval_t;
39typedef u64 pudval_t;
40typedef u64 pgdval_t;
41typedef u64 pgprotval_t;
42
43typedef union {
44 struct {
45 unsigned long pte_low, pte_high;
46 };
47 pteval_t pte;
48} pte_t;
49#endif /* __ASSEMBLY__
50 */
51#else /* !CONFIG_X86_PAE */
52#define __PHYSICAL_MASK_SHIFT 32
53#define __VIRTUAL_MASK_SHIFT 32
54#define PAGETABLE_LEVELS 2
55
56#ifndef __ASSEMBLY__
57typedef unsigned long pteval_t;
58typedef unsigned long pmdval_t;
59typedef unsigned long pudval_t;
60typedef unsigned long pgdval_t;
61typedef unsigned long pgprotval_t;
62
63typedef union {
64 pteval_t pte;
65 pteval_t pte_low;
66} pte_t;
67
68#endif /* __ASSEMBLY__ */
69#endif /* CONFIG_X86_PAE */
70
71#ifndef __ASSEMBLY__
72typedef struct page *pgtable_t;
73#endif
74
75#ifdef CONFIG_HUGETLB_PAGE
76#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
77#endif
78
79#ifndef __ASSEMBLY__
80#define __phys_addr_nodebug(x) ((x) - PAGE_OFFSET)
81#ifdef CONFIG_DEBUG_VIRTUAL
82extern unsigned long __phys_addr(unsigned long);
83#else
84#define __phys_addr(x) __phys_addr_nodebug(x)
85#endif
86#define __phys_reloc_hide(x) RELOC_HIDE((x), 0)
87
88#ifdef CONFIG_FLATMEM
89#define pfn_valid(pfn) ((pfn) < max_mapnr)
90#endif /* CONFIG_FLATMEM */
91
92extern int nx_enabled;
93
94/*
95 * This much address space is reserved for vmalloc() and iomap()
96 * as well as fixmap mappings.
97 */
98extern unsigned int __VMALLOC_RESERVE;
99extern int sysctl_legacy_va_layout;
100
101extern void find_low_pfn_range(void);
102extern unsigned long init_memory_mapping(unsigned long start,
103 unsigned long end);
104extern void initmem_init(unsigned long, unsigned long);
105extern void free_initmem(void);
106extern void setup_bootmem_allocator(void);
107
108
109#ifdef CONFIG_X86_USE_3DNOW
110#include <asm/mmx.h>
111
112static inline void clear_page(void *page)
113{
114 mmx_clear_page(page);
115}
116
117static inline void copy_page(void *to, void *from)
118{
119 mmx_copy_page(to, from);
120}
121#else /* !CONFIG_X86_USE_3DNOW */
122#include <linux/string.h>
123
124static inline void clear_page(void *page)
125{
126 memset(page, 0, PAGE_SIZE);
127}
128
129static inline void copy_page(void *to, void *from)
130{
131 memcpy(to, from, PAGE_SIZE);
132}
133#endif /* CONFIG_X86_3DNOW */
134#endif /* !__ASSEMBLY__ */
135
136#endif /* ASM_X86__PAGE_32_H */
diff --git a/include/asm-x86/page_64.h b/include/asm-x86/page_64.h
deleted file mode 100644
index 49380b8c7e25..000000000000
--- a/include/asm-x86/page_64.h
+++ /dev/null
@@ -1,105 +0,0 @@
1#ifndef ASM_X86__PAGE_64_H
2#define ASM_X86__PAGE_64_H
3
4#define PAGETABLE_LEVELS 4
5
6#define THREAD_ORDER 1
7#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
8#define CURRENT_MASK (~(THREAD_SIZE - 1))
9
10#define EXCEPTION_STACK_ORDER 0
11#define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER)
12
13#define DEBUG_STACK_ORDER (EXCEPTION_STACK_ORDER + 1)
14#define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER)
15
16#define IRQSTACK_ORDER 2
17#define IRQSTACKSIZE (PAGE_SIZE << IRQSTACK_ORDER)
18
19#define STACKFAULT_STACK 1
20#define DOUBLEFAULT_STACK 2
21#define NMI_STACK 3
22#define DEBUG_STACK 4
23#define MCE_STACK 5
24#define N_EXCEPTION_STACKS 5 /* hw limit: 7 */
25
26#define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT)
27#define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1))
28
29/*
30 * Set __PAGE_OFFSET to the most negative possible address +
31 * PGDIR_SIZE*16 (pgd slot 272). The gap is to allow a space for a
32 * hypervisor to fit. Choosing 16 slots here is arbitrary, but it's
33 * what Xen requires.
34 */
35#define __PAGE_OFFSET _AC(0xffff880000000000, UL)
36
37#define __PHYSICAL_START CONFIG_PHYSICAL_START
38#define __KERNEL_ALIGN 0x200000
39
40/*
41 * Make sure kernel is aligned to 2MB address. Catching it at compile
42 * time is better. Change your config file and compile the kernel
43 * for a 2MB aligned address (CONFIG_PHYSICAL_START)
44 */
45#if (CONFIG_PHYSICAL_START % __KERNEL_ALIGN) != 0
46#error "CONFIG_PHYSICAL_START must be a multiple of 2MB"
47#endif
48
49#define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START)
50#define __START_KERNEL_map _AC(0xffffffff80000000, UL)
51
52/* See Documentation/x86_64/mm.txt for a description of the memory map. */
53#define __PHYSICAL_MASK_SHIFT 46
54#define __VIRTUAL_MASK_SHIFT 48
55
56/*
57 * Kernel image size is limited to 512 MB (see level2_kernel_pgt in
58 * arch/x86/kernel/head_64.S), and it is mapped here:
59 */
60#define KERNEL_IMAGE_SIZE (512 * 1024 * 1024)
61#define KERNEL_IMAGE_START _AC(0xffffffff80000000, UL)
62
63#ifndef __ASSEMBLY__
64void clear_page(void *page);
65void copy_page(void *to, void *from);
66
67/* duplicated to the one in bootmem.h */
68extern unsigned long max_pfn;
69extern unsigned long phys_base;
70
71extern unsigned long __phys_addr(unsigned long);
72#define __phys_reloc_hide(x) (x)
73
74/*
75 * These are used to make use of C type-checking..
76 */
77typedef unsigned long pteval_t;
78typedef unsigned long pmdval_t;
79typedef unsigned long pudval_t;
80typedef unsigned long pgdval_t;
81typedef unsigned long pgprotval_t;
82
83typedef struct page *pgtable_t;
84
85typedef struct { pteval_t pte; } pte_t;
86
87#define vmemmap ((struct page *)VMEMMAP_START)
88
89extern unsigned long init_memory_mapping(unsigned long start,
90 unsigned long end);
91
92extern void initmem_init(unsigned long start_pfn, unsigned long end_pfn);
93extern void free_initmem(void);
94
95extern void init_extra_mapping_uc(unsigned long phys, unsigned long size);
96extern void init_extra_mapping_wb(unsigned long phys, unsigned long size);
97
98#endif /* !__ASSEMBLY__ */
99
100#ifdef CONFIG_FLATMEM
101#define pfn_valid(pfn) ((pfn) < max_pfn)
102#endif
103
104
105#endif /* ASM_X86__PAGE_64_H */
diff --git a/include/asm-x86/param.h b/include/asm-x86/param.h
deleted file mode 100644
index 0009cfb11a5f..000000000000
--- a/include/asm-x86/param.h
+++ /dev/null
@@ -1,22 +0,0 @@
1#ifndef ASM_X86__PARAM_H
2#define ASM_X86__PARAM_H
3
4#ifdef __KERNEL__
5# define HZ CONFIG_HZ /* Internal kernel timer frequency */
6# define USER_HZ 100 /* some user interfaces are */
7# define CLOCKS_PER_SEC (USER_HZ) /* in "ticks" like times() */
8#endif
9
10#ifndef HZ
11#define HZ 100
12#endif
13
14#define EXEC_PAGESIZE 4096
15
16#ifndef NOGROUP
17#define NOGROUP (-1)
18#endif
19
20#define MAXHOSTNAMELEN 64 /* max length of hostname */
21
22#endif /* ASM_X86__PARAM_H */
diff --git a/include/asm-x86/paravirt.h b/include/asm-x86/paravirt.h
deleted file mode 100644
index 8d6ae2f760d0..000000000000
--- a/include/asm-x86/paravirt.h
+++ /dev/null
@@ -1,1650 +0,0 @@
1#ifndef ASM_X86__PARAVIRT_H
2#define ASM_X86__PARAVIRT_H
3/* Various instructions on x86 need to be replaced for
4 * para-virtualization: those hooks are defined here. */
5
6#ifdef CONFIG_PARAVIRT
7#include <asm/page.h>
8#include <asm/asm.h>
9
10/* Bitmask of what can be clobbered: usually at least eax. */
11#define CLBR_NONE 0
12#define CLBR_EAX (1 << 0)
13#define CLBR_ECX (1 << 1)
14#define CLBR_EDX (1 << 2)
15
16#ifdef CONFIG_X86_64
17#define CLBR_RSI (1 << 3)
18#define CLBR_RDI (1 << 4)
19#define CLBR_R8 (1 << 5)
20#define CLBR_R9 (1 << 6)
21#define CLBR_R10 (1 << 7)
22#define CLBR_R11 (1 << 8)
23#define CLBR_ANY ((1 << 9) - 1)
24#include <asm/desc_defs.h>
25#else
26/* CLBR_ANY should match all regs platform has. For i386, that's just it */
27#define CLBR_ANY ((1 << 3) - 1)
28#endif /* X86_64 */
29
30#ifndef __ASSEMBLY__
31#include <linux/types.h>
32#include <linux/cpumask.h>
33#include <asm/kmap_types.h>
34#include <asm/desc_defs.h>
35
36struct page;
37struct thread_struct;
38struct desc_ptr;
39struct tss_struct;
40struct mm_struct;
41struct desc_struct;
42
43/* general info */
44struct pv_info {
45 unsigned int kernel_rpl;
46 int shared_kernel_pmd;
47 int paravirt_enabled;
48 const char *name;
49};
50
51struct pv_init_ops {
52 /*
53 * Patch may replace one of the defined code sequences with
54 * arbitrary code, subject to the same register constraints.
55 * This generally means the code is not free to clobber any
56 * registers other than EAX. The patch function should return
57 * the number of bytes of code generated, as we nop pad the
58 * rest in generic code.
59 */
60 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
61 unsigned long addr, unsigned len);
62
63 /* Basic arch-specific setup */
64 void (*arch_setup)(void);
65 char *(*memory_setup)(void);
66 void (*post_allocator_init)(void);
67
68 /* Print a banner to identify the environment */
69 void (*banner)(void);
70};
71
72
73struct pv_lazy_ops {
74 /* Set deferred update mode, used for batching operations. */
75 void (*enter)(void);
76 void (*leave)(void);
77};
78
79struct pv_time_ops {
80 void (*time_init)(void);
81
82 /* Set and set time of day */
83 unsigned long (*get_wallclock)(void);
84 int (*set_wallclock)(unsigned long);
85
86 unsigned long long (*sched_clock)(void);
87 unsigned long (*get_tsc_khz)(void);
88};
89
90struct pv_cpu_ops {
91 /* hooks for various privileged instructions */
92 unsigned long (*get_debugreg)(int regno);
93 void (*set_debugreg)(int regno, unsigned long value);
94
95 void (*clts)(void);
96
97 unsigned long (*read_cr0)(void);
98 void (*write_cr0)(unsigned long);
99
100 unsigned long (*read_cr4_safe)(void);
101 unsigned long (*read_cr4)(void);
102 void (*write_cr4)(unsigned long);
103
104#ifdef CONFIG_X86_64
105 unsigned long (*read_cr8)(void);
106 void (*write_cr8)(unsigned long);
107#endif
108
109 /* Segment descriptor handling */
110 void (*load_tr_desc)(void);
111 void (*load_gdt)(const struct desc_ptr *);
112 void (*load_idt)(const struct desc_ptr *);
113 void (*store_gdt)(struct desc_ptr *);
114 void (*store_idt)(struct desc_ptr *);
115 void (*set_ldt)(const void *desc, unsigned entries);
116 unsigned long (*store_tr)(void);
117 void (*load_tls)(struct thread_struct *t, unsigned int cpu);
118#ifdef CONFIG_X86_64
119 void (*load_gs_index)(unsigned int idx);
120#endif
121 void (*write_ldt_entry)(struct desc_struct *ldt, int entrynum,
122 const void *desc);
123 void (*write_gdt_entry)(struct desc_struct *,
124 int entrynum, const void *desc, int size);
125 void (*write_idt_entry)(gate_desc *,
126 int entrynum, const gate_desc *gate);
127 void (*alloc_ldt)(struct desc_struct *ldt, unsigned entries);
128 void (*free_ldt)(struct desc_struct *ldt, unsigned entries);
129
130 void (*load_sp0)(struct tss_struct *tss, struct thread_struct *t);
131
132 void (*set_iopl_mask)(unsigned mask);
133
134 void (*wbinvd)(void);
135 void (*io_delay)(void);
136
137 /* cpuid emulation, mostly so that caps bits can be disabled */
138 void (*cpuid)(unsigned int *eax, unsigned int *ebx,
139 unsigned int *ecx, unsigned int *edx);
140
141 /* MSR, PMC and TSR operations.
142 err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */
143 u64 (*read_msr_amd)(unsigned int msr, int *err);
144 u64 (*read_msr)(unsigned int msr, int *err);
145 int (*write_msr)(unsigned int msr, unsigned low, unsigned high);
146
147 u64 (*read_tsc)(void);
148 u64 (*read_pmc)(int counter);
149 unsigned long long (*read_tscp)(unsigned int *aux);
150
151 /*
152 * Atomically enable interrupts and return to userspace. This
153 * is only ever used to return to 32-bit processes; in a
154 * 64-bit kernel, it's used for 32-on-64 compat processes, but
155 * never native 64-bit processes. (Jump, not call.)
156 */
157 void (*irq_enable_sysexit)(void);
158
159 /*
160 * Switch to usermode gs and return to 64-bit usermode using
161 * sysret. Only used in 64-bit kernels to return to 64-bit
162 * processes. Usermode register state, including %rsp, must
163 * already be restored.
164 */
165 void (*usergs_sysret64)(void);
166
167 /*
168 * Switch to usermode gs and return to 32-bit usermode using
169 * sysret. Used to return to 32-on-64 compat processes.
170 * Other usermode register state, including %esp, must already
171 * be restored.
172 */
173 void (*usergs_sysret32)(void);
174
175 /* Normal iret. Jump to this with the standard iret stack
176 frame set up. */
177 void (*iret)(void);
178
179 void (*swapgs)(void);
180
181 struct pv_lazy_ops lazy_mode;
182};
183
184struct pv_irq_ops {
185 void (*init_IRQ)(void);
186
187 /*
188 * Get/set interrupt state. save_fl and restore_fl are only
189 * expected to use X86_EFLAGS_IF; all other bits
190 * returned from save_fl are undefined, and may be ignored by
191 * restore_fl.
192 */
193 unsigned long (*save_fl)(void);
194 void (*restore_fl)(unsigned long);
195 void (*irq_disable)(void);
196 void (*irq_enable)(void);
197 void (*safe_halt)(void);
198 void (*halt)(void);
199
200#ifdef CONFIG_X86_64
201 void (*adjust_exception_frame)(void);
202#endif
203};
204
205struct pv_apic_ops {
206#ifdef CONFIG_X86_LOCAL_APIC
207 void (*setup_boot_clock)(void);
208 void (*setup_secondary_clock)(void);
209
210 void (*startup_ipi_hook)(int phys_apicid,
211 unsigned long start_eip,
212 unsigned long start_esp);
213#endif
214};
215
216struct pv_mmu_ops {
217 /*
218 * Called before/after init_mm pagetable setup. setup_start
219 * may reset %cr3, and may pre-install parts of the pagetable;
220 * pagetable setup is expected to preserve any existing
221 * mapping.
222 */
223 void (*pagetable_setup_start)(pgd_t *pgd_base);
224 void (*pagetable_setup_done)(pgd_t *pgd_base);
225
226 unsigned long (*read_cr2)(void);
227 void (*write_cr2)(unsigned long);
228
229 unsigned long (*read_cr3)(void);
230 void (*write_cr3)(unsigned long);
231
232 /*
233 * Hooks for intercepting the creation/use/destruction of an
234 * mm_struct.
235 */
236 void (*activate_mm)(struct mm_struct *prev,
237 struct mm_struct *next);
238 void (*dup_mmap)(struct mm_struct *oldmm,
239 struct mm_struct *mm);
240 void (*exit_mmap)(struct mm_struct *mm);
241
242
243 /* TLB operations */
244 void (*flush_tlb_user)(void);
245 void (*flush_tlb_kernel)(void);
246 void (*flush_tlb_single)(unsigned long addr);
247 void (*flush_tlb_others)(const cpumask_t *cpus, struct mm_struct *mm,
248 unsigned long va);
249
250 /* Hooks for allocating and freeing a pagetable top-level */
251 int (*pgd_alloc)(struct mm_struct *mm);
252 void (*pgd_free)(struct mm_struct *mm, pgd_t *pgd);
253
254 /*
255 * Hooks for allocating/releasing pagetable pages when they're
256 * attached to a pagetable
257 */
258 void (*alloc_pte)(struct mm_struct *mm, unsigned long pfn);
259 void (*alloc_pmd)(struct mm_struct *mm, unsigned long pfn);
260 void (*alloc_pmd_clone)(unsigned long pfn, unsigned long clonepfn, unsigned long start, unsigned long count);
261 void (*alloc_pud)(struct mm_struct *mm, unsigned long pfn);
262 void (*release_pte)(unsigned long pfn);
263 void (*release_pmd)(unsigned long pfn);
264 void (*release_pud)(unsigned long pfn);
265
266 /* Pagetable manipulation functions */
267 void (*set_pte)(pte_t *ptep, pte_t pteval);
268 void (*set_pte_at)(struct mm_struct *mm, unsigned long addr,
269 pte_t *ptep, pte_t pteval);
270 void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval);
271 void (*pte_update)(struct mm_struct *mm, unsigned long addr,
272 pte_t *ptep);
273 void (*pte_update_defer)(struct mm_struct *mm,
274 unsigned long addr, pte_t *ptep);
275
276 pte_t (*ptep_modify_prot_start)(struct mm_struct *mm, unsigned long addr,
277 pte_t *ptep);
278 void (*ptep_modify_prot_commit)(struct mm_struct *mm, unsigned long addr,
279 pte_t *ptep, pte_t pte);
280
281 pteval_t (*pte_val)(pte_t);
282 pteval_t (*pte_flags)(pte_t);
283 pte_t (*make_pte)(pteval_t pte);
284
285 pgdval_t (*pgd_val)(pgd_t);
286 pgd_t (*make_pgd)(pgdval_t pgd);
287
288#if PAGETABLE_LEVELS >= 3
289#ifdef CONFIG_X86_PAE
290 void (*set_pte_atomic)(pte_t *ptep, pte_t pteval);
291 void (*set_pte_present)(struct mm_struct *mm, unsigned long addr,
292 pte_t *ptep, pte_t pte);
293 void (*pte_clear)(struct mm_struct *mm, unsigned long addr,
294 pte_t *ptep);
295 void (*pmd_clear)(pmd_t *pmdp);
296
297#endif /* CONFIG_X86_PAE */
298
299 void (*set_pud)(pud_t *pudp, pud_t pudval);
300
301 pmdval_t (*pmd_val)(pmd_t);
302 pmd_t (*make_pmd)(pmdval_t pmd);
303
304#if PAGETABLE_LEVELS == 4
305 pudval_t (*pud_val)(pud_t);
306 pud_t (*make_pud)(pudval_t pud);
307
308 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
309#endif /* PAGETABLE_LEVELS == 4 */
310#endif /* PAGETABLE_LEVELS >= 3 */
311
312#ifdef CONFIG_HIGHPTE
313 void *(*kmap_atomic_pte)(struct page *page, enum km_type type);
314#endif
315
316 struct pv_lazy_ops lazy_mode;
317
318 /* dom0 ops */
319
320 /* Sometimes the physical address is a pfn, and sometimes its
321 an mfn. We can tell which is which from the index. */
322 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
323 unsigned long phys, pgprot_t flags);
324};
325
326struct raw_spinlock;
327struct pv_lock_ops {
328 int (*spin_is_locked)(struct raw_spinlock *lock);
329 int (*spin_is_contended)(struct raw_spinlock *lock);
330 void (*spin_lock)(struct raw_spinlock *lock);
331 void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags);
332 int (*spin_trylock)(struct raw_spinlock *lock);
333 void (*spin_unlock)(struct raw_spinlock *lock);
334};
335
336/* This contains all the paravirt structures: we get a convenient
337 * number for each function using the offset which we use to indicate
338 * what to patch. */
339struct paravirt_patch_template {
340 struct pv_init_ops pv_init_ops;
341 struct pv_time_ops pv_time_ops;
342 struct pv_cpu_ops pv_cpu_ops;
343 struct pv_irq_ops pv_irq_ops;
344 struct pv_apic_ops pv_apic_ops;
345 struct pv_mmu_ops pv_mmu_ops;
346 struct pv_lock_ops pv_lock_ops;
347};
348
349extern struct pv_info pv_info;
350extern struct pv_init_ops pv_init_ops;
351extern struct pv_time_ops pv_time_ops;
352extern struct pv_cpu_ops pv_cpu_ops;
353extern struct pv_irq_ops pv_irq_ops;
354extern struct pv_apic_ops pv_apic_ops;
355extern struct pv_mmu_ops pv_mmu_ops;
356extern struct pv_lock_ops pv_lock_ops;
357
358#define PARAVIRT_PATCH(x) \
359 (offsetof(struct paravirt_patch_template, x) / sizeof(void *))
360
361#define paravirt_type(op) \
362 [paravirt_typenum] "i" (PARAVIRT_PATCH(op)), \
363 [paravirt_opptr] "m" (op)
364#define paravirt_clobber(clobber) \
365 [paravirt_clobber] "i" (clobber)
366
367/*
368 * Generate some code, and mark it as patchable by the
369 * apply_paravirt() alternate instruction patcher.
370 */
371#define _paravirt_alt(insn_string, type, clobber) \
372 "771:\n\t" insn_string "\n" "772:\n" \
373 ".pushsection .parainstructions,\"a\"\n" \
374 _ASM_ALIGN "\n" \
375 _ASM_PTR " 771b\n" \
376 " .byte " type "\n" \
377 " .byte 772b-771b\n" \
378 " .short " clobber "\n" \
379 ".popsection\n"
380
381/* Generate patchable code, with the default asm parameters. */
382#define paravirt_alt(insn_string) \
383 _paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]")
384
385/* Simple instruction patching code. */
386#define DEF_NATIVE(ops, name, code) \
387 extern const char start_##ops##_##name[], end_##ops##_##name[]; \
388 asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":")
389
390unsigned paravirt_patch_nop(void);
391unsigned paravirt_patch_ignore(unsigned len);
392unsigned paravirt_patch_call(void *insnbuf,
393 const void *target, u16 tgt_clobbers,
394 unsigned long addr, u16 site_clobbers,
395 unsigned len);
396unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
397 unsigned long addr, unsigned len);
398unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
399 unsigned long addr, unsigned len);
400
401unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
402 const char *start, const char *end);
403
404unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
405 unsigned long addr, unsigned len);
406
407int paravirt_disable_iospace(void);
408
409/*
410 * This generates an indirect call based on the operation type number.
411 * The type number, computed in PARAVIRT_PATCH, is derived from the
412 * offset into the paravirt_patch_template structure, and can therefore be
413 * freely converted back into a structure offset.
414 */
415#define PARAVIRT_CALL "call *%[paravirt_opptr];"
416
417/*
418 * These macros are intended to wrap calls through one of the paravirt
419 * ops structs, so that they can be later identified and patched at
420 * runtime.
421 *
422 * Normally, a call to a pv_op function is a simple indirect call:
423 * (pv_op_struct.operations)(args...).
424 *
425 * Unfortunately, this is a relatively slow operation for modern CPUs,
426 * because it cannot necessarily determine what the destination
427 * address is. In this case, the address is a runtime constant, so at
428 * the very least we can patch the call to e a simple direct call, or
429 * ideally, patch an inline implementation into the callsite. (Direct
430 * calls are essentially free, because the call and return addresses
431 * are completely predictable.)
432 *
433 * For i386, these macros rely on the standard gcc "regparm(3)" calling
434 * convention, in which the first three arguments are placed in %eax,
435 * %edx, %ecx (in that order), and the remaining arguments are placed
436 * on the stack. All caller-save registers (eax,edx,ecx) are expected
437 * to be modified (either clobbered or used for return values).
438 * X86_64, on the other hand, already specifies a register-based calling
439 * conventions, returning at %rax, with parameteres going on %rdi, %rsi,
440 * %rdx, and %rcx. Note that for this reason, x86_64 does not need any
441 * special handling for dealing with 4 arguments, unlike i386.
442 * However, x86_64 also have to clobber all caller saved registers, which
443 * unfortunately, are quite a bit (r8 - r11)
444 *
445 * The call instruction itself is marked by placing its start address
446 * and size into the .parainstructions section, so that
447 * apply_paravirt() in arch/i386/kernel/alternative.c can do the
448 * appropriate patching under the control of the backend pv_init_ops
449 * implementation.
450 *
451 * Unfortunately there's no way to get gcc to generate the args setup
452 * for the call, and then allow the call itself to be generated by an
453 * inline asm. Because of this, we must do the complete arg setup and
454 * return value handling from within these macros. This is fairly
455 * cumbersome.
456 *
457 * There are 5 sets of PVOP_* macros for dealing with 0-4 arguments.
458 * It could be extended to more arguments, but there would be little
459 * to be gained from that. For each number of arguments, there are
460 * the two VCALL and CALL variants for void and non-void functions.
461 *
462 * When there is a return value, the invoker of the macro must specify
463 * the return type. The macro then uses sizeof() on that type to
464 * determine whether its a 32 or 64 bit value, and places the return
465 * in the right register(s) (just %eax for 32-bit, and %edx:%eax for
466 * 64-bit). For x86_64 machines, it just returns at %rax regardless of
467 * the return value size.
468 *
469 * 64-bit arguments are passed as a pair of adjacent 32-bit arguments
470 * i386 also passes 64-bit arguments as a pair of adjacent 32-bit arguments
471 * in low,high order
472 *
473 * Small structures are passed and returned in registers. The macro
474 * calling convention can't directly deal with this, so the wrapper
475 * functions must do this.
476 *
477 * These PVOP_* macros are only defined within this header. This
478 * means that all uses must be wrapped in inline functions. This also
479 * makes sure the incoming and outgoing types are always correct.
480 */
481#ifdef CONFIG_X86_32
482#define PVOP_VCALL_ARGS unsigned long __eax, __edx, __ecx
483#define PVOP_CALL_ARGS PVOP_VCALL_ARGS
484#define PVOP_VCALL_CLOBBERS "=a" (__eax), "=d" (__edx), \
485 "=c" (__ecx)
486#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS
487#define EXTRA_CLOBBERS
488#define VEXTRA_CLOBBERS
489#else
490#define PVOP_VCALL_ARGS unsigned long __edi, __esi, __edx, __ecx
491#define PVOP_CALL_ARGS PVOP_VCALL_ARGS, __eax
492#define PVOP_VCALL_CLOBBERS "=D" (__edi), \
493 "=S" (__esi), "=d" (__edx), \
494 "=c" (__ecx)
495
496#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax)
497
498#define EXTRA_CLOBBERS , "r8", "r9", "r10", "r11"
499#define VEXTRA_CLOBBERS , "rax", "r8", "r9", "r10", "r11"
500#endif
501
502#ifdef CONFIG_PARAVIRT_DEBUG
503#define PVOP_TEST_NULL(op) BUG_ON(op == NULL)
504#else
505#define PVOP_TEST_NULL(op) ((void)op)
506#endif
507
508#define __PVOP_CALL(rettype, op, pre, post, ...) \
509 ({ \
510 rettype __ret; \
511 PVOP_CALL_ARGS; \
512 PVOP_TEST_NULL(op); \
513 /* This is 32-bit specific, but is okay in 64-bit */ \
514 /* since this condition will never hold */ \
515 if (sizeof(rettype) > sizeof(unsigned long)) { \
516 asm volatile(pre \
517 paravirt_alt(PARAVIRT_CALL) \
518 post \
519 : PVOP_CALL_CLOBBERS \
520 : paravirt_type(op), \
521 paravirt_clobber(CLBR_ANY), \
522 ##__VA_ARGS__ \
523 : "memory", "cc" EXTRA_CLOBBERS); \
524 __ret = (rettype)((((u64)__edx) << 32) | __eax); \
525 } else { \
526 asm volatile(pre \
527 paravirt_alt(PARAVIRT_CALL) \
528 post \
529 : PVOP_CALL_CLOBBERS \
530 : paravirt_type(op), \
531 paravirt_clobber(CLBR_ANY), \
532 ##__VA_ARGS__ \
533 : "memory", "cc" EXTRA_CLOBBERS); \
534 __ret = (rettype)__eax; \
535 } \
536 __ret; \
537 })
538#define __PVOP_VCALL(op, pre, post, ...) \
539 ({ \
540 PVOP_VCALL_ARGS; \
541 PVOP_TEST_NULL(op); \
542 asm volatile(pre \
543 paravirt_alt(PARAVIRT_CALL) \
544 post \
545 : PVOP_VCALL_CLOBBERS \
546 : paravirt_type(op), \
547 paravirt_clobber(CLBR_ANY), \
548 ##__VA_ARGS__ \
549 : "memory", "cc" VEXTRA_CLOBBERS); \
550 })
551
552#define PVOP_CALL0(rettype, op) \
553 __PVOP_CALL(rettype, op, "", "")
554#define PVOP_VCALL0(op) \
555 __PVOP_VCALL(op, "", "")
556
557#define PVOP_CALL1(rettype, op, arg1) \
558 __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)))
559#define PVOP_VCALL1(op, arg1) \
560 __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)))
561
562#define PVOP_CALL2(rettype, op, arg1, arg2) \
563 __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)), \
564 "1" ((unsigned long)(arg2)))
565#define PVOP_VCALL2(op, arg1, arg2) \
566 __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)), \
567 "1" ((unsigned long)(arg2)))
568
569#define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \
570 __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)), \
571 "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)))
572#define PVOP_VCALL3(op, arg1, arg2, arg3) \
573 __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)), \
574 "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)))
575
576/* This is the only difference in x86_64. We can make it much simpler */
577#ifdef CONFIG_X86_32
578#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
579 __PVOP_CALL(rettype, op, \
580 "push %[_arg4];", "lea 4(%%esp),%%esp;", \
581 "0" ((u32)(arg1)), "1" ((u32)(arg2)), \
582 "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
583#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
584 __PVOP_VCALL(op, \
585 "push %[_arg4];", "lea 4(%%esp),%%esp;", \
586 "0" ((u32)(arg1)), "1" ((u32)(arg2)), \
587 "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
588#else
589#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
590 __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)), \
591 "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)), \
592 "3"((unsigned long)(arg4)))
593#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
594 __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)), \
595 "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)), \
596 "3"((unsigned long)(arg4)))
597#endif
598
599static inline int paravirt_enabled(void)
600{
601 return pv_info.paravirt_enabled;
602}
603
604static inline void load_sp0(struct tss_struct *tss,
605 struct thread_struct *thread)
606{
607 PVOP_VCALL2(pv_cpu_ops.load_sp0, tss, thread);
608}
609
610#define ARCH_SETUP pv_init_ops.arch_setup();
611static inline unsigned long get_wallclock(void)
612{
613 return PVOP_CALL0(unsigned long, pv_time_ops.get_wallclock);
614}
615
616static inline int set_wallclock(unsigned long nowtime)
617{
618 return PVOP_CALL1(int, pv_time_ops.set_wallclock, nowtime);
619}
620
621static inline void (*choose_time_init(void))(void)
622{
623 return pv_time_ops.time_init;
624}
625
626/* The paravirtualized CPUID instruction. */
627static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
628 unsigned int *ecx, unsigned int *edx)
629{
630 PVOP_VCALL4(pv_cpu_ops.cpuid, eax, ebx, ecx, edx);
631}
632
633/*
634 * These special macros can be used to get or set a debugging register
635 */
636static inline unsigned long paravirt_get_debugreg(int reg)
637{
638 return PVOP_CALL1(unsigned long, pv_cpu_ops.get_debugreg, reg);
639}
640#define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
641static inline void set_debugreg(unsigned long val, int reg)
642{
643 PVOP_VCALL2(pv_cpu_ops.set_debugreg, reg, val);
644}
645
646static inline void clts(void)
647{
648 PVOP_VCALL0(pv_cpu_ops.clts);
649}
650
651static inline unsigned long read_cr0(void)
652{
653 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr0);
654}
655
656static inline void write_cr0(unsigned long x)
657{
658 PVOP_VCALL1(pv_cpu_ops.write_cr0, x);
659}
660
661static inline unsigned long read_cr2(void)
662{
663 return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr2);
664}
665
666static inline void write_cr2(unsigned long x)
667{
668 PVOP_VCALL1(pv_mmu_ops.write_cr2, x);
669}
670
671static inline unsigned long read_cr3(void)
672{
673 return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr3);
674}
675
676static inline void write_cr3(unsigned long x)
677{
678 PVOP_VCALL1(pv_mmu_ops.write_cr3, x);
679}
680
681static inline unsigned long read_cr4(void)
682{
683 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4);
684}
685static inline unsigned long read_cr4_safe(void)
686{
687 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4_safe);
688}
689
690static inline void write_cr4(unsigned long x)
691{
692 PVOP_VCALL1(pv_cpu_ops.write_cr4, x);
693}
694
695#ifdef CONFIG_X86_64
696static inline unsigned long read_cr8(void)
697{
698 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr8);
699}
700
701static inline void write_cr8(unsigned long x)
702{
703 PVOP_VCALL1(pv_cpu_ops.write_cr8, x);
704}
705#endif
706
707static inline void raw_safe_halt(void)
708{
709 PVOP_VCALL0(pv_irq_ops.safe_halt);
710}
711
712static inline void halt(void)
713{
714 PVOP_VCALL0(pv_irq_ops.safe_halt);
715}
716
717static inline void wbinvd(void)
718{
719 PVOP_VCALL0(pv_cpu_ops.wbinvd);
720}
721
722#define get_kernel_rpl() (pv_info.kernel_rpl)
723
724static inline u64 paravirt_read_msr(unsigned msr, int *err)
725{
726 return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err);
727}
728static inline u64 paravirt_read_msr_amd(unsigned msr, int *err)
729{
730 return PVOP_CALL2(u64, pv_cpu_ops.read_msr_amd, msr, err);
731}
732static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
733{
734 return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high);
735}
736
737/* These should all do BUG_ON(_err), but our headers are too tangled. */
738#define rdmsr(msr, val1, val2) \
739do { \
740 int _err; \
741 u64 _l = paravirt_read_msr(msr, &_err); \
742 val1 = (u32)_l; \
743 val2 = _l >> 32; \
744} while (0)
745
746#define wrmsr(msr, val1, val2) \
747do { \
748 paravirt_write_msr(msr, val1, val2); \
749} while (0)
750
751#define rdmsrl(msr, val) \
752do { \
753 int _err; \
754 val = paravirt_read_msr(msr, &_err); \
755} while (0)
756
757#define wrmsrl(msr, val) wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32)
758#define wrmsr_safe(msr, a, b) paravirt_write_msr(msr, a, b)
759
760/* rdmsr with exception handling */
761#define rdmsr_safe(msr, a, b) \
762({ \
763 int _err; \
764 u64 _l = paravirt_read_msr(msr, &_err); \
765 (*a) = (u32)_l; \
766 (*b) = _l >> 32; \
767 _err; \
768})
769
770static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
771{
772 int err;
773
774 *p = paravirt_read_msr(msr, &err);
775 return err;
776}
777static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
778{
779 int err;
780
781 *p = paravirt_read_msr_amd(msr, &err);
782 return err;
783}
784
785static inline u64 paravirt_read_tsc(void)
786{
787 return PVOP_CALL0(u64, pv_cpu_ops.read_tsc);
788}
789
790#define rdtscl(low) \
791do { \
792 u64 _l = paravirt_read_tsc(); \
793 low = (int)_l; \
794} while (0)
795
796#define rdtscll(val) (val = paravirt_read_tsc())
797
798static inline unsigned long long paravirt_sched_clock(void)
799{
800 return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock);
801}
802#define calibrate_tsc() (pv_time_ops.get_tsc_khz())
803
804static inline unsigned long long paravirt_read_pmc(int counter)
805{
806 return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter);
807}
808
809#define rdpmc(counter, low, high) \
810do { \
811 u64 _l = paravirt_read_pmc(counter); \
812 low = (u32)_l; \
813 high = _l >> 32; \
814} while (0)
815
816static inline unsigned long long paravirt_rdtscp(unsigned int *aux)
817{
818 return PVOP_CALL1(u64, pv_cpu_ops.read_tscp, aux);
819}
820
821#define rdtscp(low, high, aux) \
822do { \
823 int __aux; \
824 unsigned long __val = paravirt_rdtscp(&__aux); \
825 (low) = (u32)__val; \
826 (high) = (u32)(__val >> 32); \
827 (aux) = __aux; \
828} while (0)
829
830#define rdtscpll(val, aux) \
831do { \
832 unsigned long __aux; \
833 val = paravirt_rdtscp(&__aux); \
834 (aux) = __aux; \
835} while (0)
836
837static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
838{
839 PVOP_VCALL2(pv_cpu_ops.alloc_ldt, ldt, entries);
840}
841
842static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
843{
844 PVOP_VCALL2(pv_cpu_ops.free_ldt, ldt, entries);
845}
846
847static inline void load_TR_desc(void)
848{
849 PVOP_VCALL0(pv_cpu_ops.load_tr_desc);
850}
851static inline void load_gdt(const struct desc_ptr *dtr)
852{
853 PVOP_VCALL1(pv_cpu_ops.load_gdt, dtr);
854}
855static inline void load_idt(const struct desc_ptr *dtr)
856{
857 PVOP_VCALL1(pv_cpu_ops.load_idt, dtr);
858}
859static inline void set_ldt(const void *addr, unsigned entries)
860{
861 PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries);
862}
863static inline void store_gdt(struct desc_ptr *dtr)
864{
865 PVOP_VCALL1(pv_cpu_ops.store_gdt, dtr);
866}
867static inline void store_idt(struct desc_ptr *dtr)
868{
869 PVOP_VCALL1(pv_cpu_ops.store_idt, dtr);
870}
871static inline unsigned long paravirt_store_tr(void)
872{
873 return PVOP_CALL0(unsigned long, pv_cpu_ops.store_tr);
874}
875#define store_tr(tr) ((tr) = paravirt_store_tr())
876static inline void load_TLS(struct thread_struct *t, unsigned cpu)
877{
878 PVOP_VCALL2(pv_cpu_ops.load_tls, t, cpu);
879}
880
881#ifdef CONFIG_X86_64
882static inline void load_gs_index(unsigned int gs)
883{
884 PVOP_VCALL1(pv_cpu_ops.load_gs_index, gs);
885}
886#endif
887
888static inline void write_ldt_entry(struct desc_struct *dt, int entry,
889 const void *desc)
890{
891 PVOP_VCALL3(pv_cpu_ops.write_ldt_entry, dt, entry, desc);
892}
893
894static inline void write_gdt_entry(struct desc_struct *dt, int entry,
895 void *desc, int type)
896{
897 PVOP_VCALL4(pv_cpu_ops.write_gdt_entry, dt, entry, desc, type);
898}
899
900static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
901{
902 PVOP_VCALL3(pv_cpu_ops.write_idt_entry, dt, entry, g);
903}
904static inline void set_iopl_mask(unsigned mask)
905{
906 PVOP_VCALL1(pv_cpu_ops.set_iopl_mask, mask);
907}
908
909/* The paravirtualized I/O functions */
910static inline void slow_down_io(void)
911{
912 pv_cpu_ops.io_delay();
913#ifdef REALLY_SLOW_IO
914 pv_cpu_ops.io_delay();
915 pv_cpu_ops.io_delay();
916 pv_cpu_ops.io_delay();
917#endif
918}
919
920#ifdef CONFIG_X86_LOCAL_APIC
921static inline void setup_boot_clock(void)
922{
923 PVOP_VCALL0(pv_apic_ops.setup_boot_clock);
924}
925
926static inline void setup_secondary_clock(void)
927{
928 PVOP_VCALL0(pv_apic_ops.setup_secondary_clock);
929}
930#endif
931
932static inline void paravirt_post_allocator_init(void)
933{
934 if (pv_init_ops.post_allocator_init)
935 (*pv_init_ops.post_allocator_init)();
936}
937
938static inline void paravirt_pagetable_setup_start(pgd_t *base)
939{
940 (*pv_mmu_ops.pagetable_setup_start)(base);
941}
942
943static inline void paravirt_pagetable_setup_done(pgd_t *base)
944{
945 (*pv_mmu_ops.pagetable_setup_done)(base);
946}
947
948#ifdef CONFIG_SMP
949static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip,
950 unsigned long start_esp)
951{
952 PVOP_VCALL3(pv_apic_ops.startup_ipi_hook,
953 phys_apicid, start_eip, start_esp);
954}
955#endif
956
957static inline void paravirt_activate_mm(struct mm_struct *prev,
958 struct mm_struct *next)
959{
960 PVOP_VCALL2(pv_mmu_ops.activate_mm, prev, next);
961}
962
963static inline void arch_dup_mmap(struct mm_struct *oldmm,
964 struct mm_struct *mm)
965{
966 PVOP_VCALL2(pv_mmu_ops.dup_mmap, oldmm, mm);
967}
968
969static inline void arch_exit_mmap(struct mm_struct *mm)
970{
971 PVOP_VCALL1(pv_mmu_ops.exit_mmap, mm);
972}
973
974static inline void __flush_tlb(void)
975{
976 PVOP_VCALL0(pv_mmu_ops.flush_tlb_user);
977}
978static inline void __flush_tlb_global(void)
979{
980 PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel);
981}
982static inline void __flush_tlb_single(unsigned long addr)
983{
984 PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr);
985}
986
987static inline void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
988 unsigned long va)
989{
990 PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, &cpumask, mm, va);
991}
992
993static inline int paravirt_pgd_alloc(struct mm_struct *mm)
994{
995 return PVOP_CALL1(int, pv_mmu_ops.pgd_alloc, mm);
996}
997
998static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
999{
1000 PVOP_VCALL2(pv_mmu_ops.pgd_free, mm, pgd);
1001}
1002
1003static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
1004{
1005 PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn);
1006}
1007static inline void paravirt_release_pte(unsigned long pfn)
1008{
1009 PVOP_VCALL1(pv_mmu_ops.release_pte, pfn);
1010}
1011
1012static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
1013{
1014 PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn);
1015}
1016
1017static inline void paravirt_alloc_pmd_clone(unsigned long pfn, unsigned long clonepfn,
1018 unsigned long start, unsigned long count)
1019{
1020 PVOP_VCALL4(pv_mmu_ops.alloc_pmd_clone, pfn, clonepfn, start, count);
1021}
1022static inline void paravirt_release_pmd(unsigned long pfn)
1023{
1024 PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn);
1025}
1026
1027static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
1028{
1029 PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn);
1030}
1031static inline void paravirt_release_pud(unsigned long pfn)
1032{
1033 PVOP_VCALL1(pv_mmu_ops.release_pud, pfn);
1034}
1035
1036#ifdef CONFIG_HIGHPTE
1037static inline void *kmap_atomic_pte(struct page *page, enum km_type type)
1038{
1039 unsigned long ret;
1040 ret = PVOP_CALL2(unsigned long, pv_mmu_ops.kmap_atomic_pte, page, type);
1041 return (void *)ret;
1042}
1043#endif
1044
1045static inline void pte_update(struct mm_struct *mm, unsigned long addr,
1046 pte_t *ptep)
1047{
1048 PVOP_VCALL3(pv_mmu_ops.pte_update, mm, addr, ptep);
1049}
1050
1051static inline void pte_update_defer(struct mm_struct *mm, unsigned long addr,
1052 pte_t *ptep)
1053{
1054 PVOP_VCALL3(pv_mmu_ops.pte_update_defer, mm, addr, ptep);
1055}
1056
1057static inline pte_t __pte(pteval_t val)
1058{
1059 pteval_t ret;
1060
1061 if (sizeof(pteval_t) > sizeof(long))
1062 ret = PVOP_CALL2(pteval_t,
1063 pv_mmu_ops.make_pte,
1064 val, (u64)val >> 32);
1065 else
1066 ret = PVOP_CALL1(pteval_t,
1067 pv_mmu_ops.make_pte,
1068 val);
1069
1070 return (pte_t) { .pte = ret };
1071}
1072
1073static inline pteval_t pte_val(pte_t pte)
1074{
1075 pteval_t ret;
1076
1077 if (sizeof(pteval_t) > sizeof(long))
1078 ret = PVOP_CALL2(pteval_t, pv_mmu_ops.pte_val,
1079 pte.pte, (u64)pte.pte >> 32);
1080 else
1081 ret = PVOP_CALL1(pteval_t, pv_mmu_ops.pte_val,
1082 pte.pte);
1083
1084 return ret;
1085}
1086
1087static inline pteval_t pte_flags(pte_t pte)
1088{
1089 pteval_t ret;
1090
1091 if (sizeof(pteval_t) > sizeof(long))
1092 ret = PVOP_CALL2(pteval_t, pv_mmu_ops.pte_flags,
1093 pte.pte, (u64)pte.pte >> 32);
1094 else
1095 ret = PVOP_CALL1(pteval_t, pv_mmu_ops.pte_flags,
1096 pte.pte);
1097
1098#ifdef CONFIG_PARAVIRT_DEBUG
1099 BUG_ON(ret & PTE_PFN_MASK);
1100#endif
1101 return ret;
1102}
1103
1104static inline pgd_t __pgd(pgdval_t val)
1105{
1106 pgdval_t ret;
1107
1108 if (sizeof(pgdval_t) > sizeof(long))
1109 ret = PVOP_CALL2(pgdval_t, pv_mmu_ops.make_pgd,
1110 val, (u64)val >> 32);
1111 else
1112 ret = PVOP_CALL1(pgdval_t, pv_mmu_ops.make_pgd,
1113 val);
1114
1115 return (pgd_t) { ret };
1116}
1117
1118static inline pgdval_t pgd_val(pgd_t pgd)
1119{
1120 pgdval_t ret;
1121
1122 if (sizeof(pgdval_t) > sizeof(long))
1123 ret = PVOP_CALL2(pgdval_t, pv_mmu_ops.pgd_val,
1124 pgd.pgd, (u64)pgd.pgd >> 32);
1125 else
1126 ret = PVOP_CALL1(pgdval_t, pv_mmu_ops.pgd_val,
1127 pgd.pgd);
1128
1129 return ret;
1130}
1131
1132#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1133static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr,
1134 pte_t *ptep)
1135{
1136 pteval_t ret;
1137
1138 ret = PVOP_CALL3(pteval_t, pv_mmu_ops.ptep_modify_prot_start,
1139 mm, addr, ptep);
1140
1141 return (pte_t) { .pte = ret };
1142}
1143
1144static inline void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
1145 pte_t *ptep, pte_t pte)
1146{
1147 if (sizeof(pteval_t) > sizeof(long))
1148 /* 5 arg words */
1149 pv_mmu_ops.ptep_modify_prot_commit(mm, addr, ptep, pte);
1150 else
1151 PVOP_VCALL4(pv_mmu_ops.ptep_modify_prot_commit,
1152 mm, addr, ptep, pte.pte);
1153}
1154
1155static inline void set_pte(pte_t *ptep, pte_t pte)
1156{
1157 if (sizeof(pteval_t) > sizeof(long))
1158 PVOP_VCALL3(pv_mmu_ops.set_pte, ptep,
1159 pte.pte, (u64)pte.pte >> 32);
1160 else
1161 PVOP_VCALL2(pv_mmu_ops.set_pte, ptep,
1162 pte.pte);
1163}
1164
1165static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
1166 pte_t *ptep, pte_t pte)
1167{
1168 if (sizeof(pteval_t) > sizeof(long))
1169 /* 5 arg words */
1170 pv_mmu_ops.set_pte_at(mm, addr, ptep, pte);
1171 else
1172 PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pte.pte);
1173}
1174
1175static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
1176{
1177 pmdval_t val = native_pmd_val(pmd);
1178
1179 if (sizeof(pmdval_t) > sizeof(long))
1180 PVOP_VCALL3(pv_mmu_ops.set_pmd, pmdp, val, (u64)val >> 32);
1181 else
1182 PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, val);
1183}
1184
1185#if PAGETABLE_LEVELS >= 3
1186static inline pmd_t __pmd(pmdval_t val)
1187{
1188 pmdval_t ret;
1189
1190 if (sizeof(pmdval_t) > sizeof(long))
1191 ret = PVOP_CALL2(pmdval_t, pv_mmu_ops.make_pmd,
1192 val, (u64)val >> 32);
1193 else
1194 ret = PVOP_CALL1(pmdval_t, pv_mmu_ops.make_pmd,
1195 val);
1196
1197 return (pmd_t) { ret };
1198}
1199
1200static inline pmdval_t pmd_val(pmd_t pmd)
1201{
1202 pmdval_t ret;
1203
1204 if (sizeof(pmdval_t) > sizeof(long))
1205 ret = PVOP_CALL2(pmdval_t, pv_mmu_ops.pmd_val,
1206 pmd.pmd, (u64)pmd.pmd >> 32);
1207 else
1208 ret = PVOP_CALL1(pmdval_t, pv_mmu_ops.pmd_val,
1209 pmd.pmd);
1210
1211 return ret;
1212}
1213
1214static inline void set_pud(pud_t *pudp, pud_t pud)
1215{
1216 pudval_t val = native_pud_val(pud);
1217
1218 if (sizeof(pudval_t) > sizeof(long))
1219 PVOP_VCALL3(pv_mmu_ops.set_pud, pudp,
1220 val, (u64)val >> 32);
1221 else
1222 PVOP_VCALL2(pv_mmu_ops.set_pud, pudp,
1223 val);
1224}
1225#if PAGETABLE_LEVELS == 4
1226static inline pud_t __pud(pudval_t val)
1227{
1228 pudval_t ret;
1229
1230 if (sizeof(pudval_t) > sizeof(long))
1231 ret = PVOP_CALL2(pudval_t, pv_mmu_ops.make_pud,
1232 val, (u64)val >> 32);
1233 else
1234 ret = PVOP_CALL1(pudval_t, pv_mmu_ops.make_pud,
1235 val);
1236
1237 return (pud_t) { ret };
1238}
1239
1240static inline pudval_t pud_val(pud_t pud)
1241{
1242 pudval_t ret;
1243
1244 if (sizeof(pudval_t) > sizeof(long))
1245 ret = PVOP_CALL2(pudval_t, pv_mmu_ops.pud_val,
1246 pud.pud, (u64)pud.pud >> 32);
1247 else
1248 ret = PVOP_CALL1(pudval_t, pv_mmu_ops.pud_val,
1249 pud.pud);
1250
1251 return ret;
1252}
1253
1254static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
1255{
1256 pgdval_t val = native_pgd_val(pgd);
1257
1258 if (sizeof(pgdval_t) > sizeof(long))
1259 PVOP_VCALL3(pv_mmu_ops.set_pgd, pgdp,
1260 val, (u64)val >> 32);
1261 else
1262 PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp,
1263 val);
1264}
1265
1266static inline void pgd_clear(pgd_t *pgdp)
1267{
1268 set_pgd(pgdp, __pgd(0));
1269}
1270
1271static inline void pud_clear(pud_t *pudp)
1272{
1273 set_pud(pudp, __pud(0));
1274}
1275
1276#endif /* PAGETABLE_LEVELS == 4 */
1277
1278#endif /* PAGETABLE_LEVELS >= 3 */
1279
1280#ifdef CONFIG_X86_PAE
1281/* Special-case pte-setting operations for PAE, which can't update a
1282 64-bit pte atomically */
1283static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
1284{
1285 PVOP_VCALL3(pv_mmu_ops.set_pte_atomic, ptep,
1286 pte.pte, pte.pte >> 32);
1287}
1288
1289static inline void set_pte_present(struct mm_struct *mm, unsigned long addr,
1290 pte_t *ptep, pte_t pte)
1291{
1292 /* 5 arg words */
1293 pv_mmu_ops.set_pte_present(mm, addr, ptep, pte);
1294}
1295
1296static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
1297 pte_t *ptep)
1298{
1299 PVOP_VCALL3(pv_mmu_ops.pte_clear, mm, addr, ptep);
1300}
1301
1302static inline void pmd_clear(pmd_t *pmdp)
1303{
1304 PVOP_VCALL1(pv_mmu_ops.pmd_clear, pmdp);
1305}
1306#else /* !CONFIG_X86_PAE */
1307static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
1308{
1309 set_pte(ptep, pte);
1310}
1311
1312static inline void set_pte_present(struct mm_struct *mm, unsigned long addr,
1313 pte_t *ptep, pte_t pte)
1314{
1315 set_pte(ptep, pte);
1316}
1317
1318static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
1319 pte_t *ptep)
1320{
1321 set_pte_at(mm, addr, ptep, __pte(0));
1322}
1323
1324static inline void pmd_clear(pmd_t *pmdp)
1325{
1326 set_pmd(pmdp, __pmd(0));
1327}
1328#endif /* CONFIG_X86_PAE */
1329
1330/* Lazy mode for batching updates / context switch */
1331enum paravirt_lazy_mode {
1332 PARAVIRT_LAZY_NONE,
1333 PARAVIRT_LAZY_MMU,
1334 PARAVIRT_LAZY_CPU,
1335};
1336
1337enum paravirt_lazy_mode paravirt_get_lazy_mode(void);
1338void paravirt_enter_lazy_cpu(void);
1339void paravirt_leave_lazy_cpu(void);
1340void paravirt_enter_lazy_mmu(void);
1341void paravirt_leave_lazy_mmu(void);
1342void paravirt_leave_lazy(enum paravirt_lazy_mode mode);
1343
1344#define __HAVE_ARCH_ENTER_LAZY_CPU_MODE
1345static inline void arch_enter_lazy_cpu_mode(void)
1346{
1347 PVOP_VCALL0(pv_cpu_ops.lazy_mode.enter);
1348}
1349
1350static inline void arch_leave_lazy_cpu_mode(void)
1351{
1352 PVOP_VCALL0(pv_cpu_ops.lazy_mode.leave);
1353}
1354
1355static inline void arch_flush_lazy_cpu_mode(void)
1356{
1357 if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU)) {
1358 arch_leave_lazy_cpu_mode();
1359 arch_enter_lazy_cpu_mode();
1360 }
1361}
1362
1363
1364#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
1365static inline void arch_enter_lazy_mmu_mode(void)
1366{
1367 PVOP_VCALL0(pv_mmu_ops.lazy_mode.enter);
1368}
1369
1370static inline void arch_leave_lazy_mmu_mode(void)
1371{
1372 PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
1373}
1374
1375static inline void arch_flush_lazy_mmu_mode(void)
1376{
1377 if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU)) {
1378 arch_leave_lazy_mmu_mode();
1379 arch_enter_lazy_mmu_mode();
1380 }
1381}
1382
1383static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
1384 unsigned long phys, pgprot_t flags)
1385{
1386 pv_mmu_ops.set_fixmap(idx, phys, flags);
1387}
1388
1389void _paravirt_nop(void);
1390#define paravirt_nop ((void *)_paravirt_nop)
1391
1392void paravirt_use_bytelocks(void);
1393
1394#ifdef CONFIG_SMP
1395
1396static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
1397{
1398 return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock);
1399}
1400
1401static inline int __raw_spin_is_contended(struct raw_spinlock *lock)
1402{
1403 return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock);
1404}
1405
1406static __always_inline void __raw_spin_lock(struct raw_spinlock *lock)
1407{
1408 PVOP_VCALL1(pv_lock_ops.spin_lock, lock);
1409}
1410
1411static __always_inline void __raw_spin_lock_flags(struct raw_spinlock *lock,
1412 unsigned long flags)
1413{
1414 PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags);
1415}
1416
1417static __always_inline int __raw_spin_trylock(struct raw_spinlock *lock)
1418{
1419 return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock);
1420}
1421
1422static __always_inline void __raw_spin_unlock(struct raw_spinlock *lock)
1423{
1424 PVOP_VCALL1(pv_lock_ops.spin_unlock, lock);
1425}
1426
1427#endif
1428
1429/* These all sit in the .parainstructions section to tell us what to patch. */
1430struct paravirt_patch_site {
1431 u8 *instr; /* original instructions */
1432 u8 instrtype; /* type of this instruction */
1433 u8 len; /* length of original instruction */
1434 u16 clobbers; /* what registers you may clobber */
1435};
1436
1437extern struct paravirt_patch_site __parainstructions[],
1438 __parainstructions_end[];
1439
1440#ifdef CONFIG_X86_32
1441#define PV_SAVE_REGS "pushl %%ecx; pushl %%edx;"
1442#define PV_RESTORE_REGS "popl %%edx; popl %%ecx"
1443#define PV_FLAGS_ARG "0"
1444#define PV_EXTRA_CLOBBERS
1445#define PV_VEXTRA_CLOBBERS
1446#else
1447/* We save some registers, but all of them, that's too much. We clobber all
1448 * caller saved registers but the argument parameter */
1449#define PV_SAVE_REGS "pushq %%rdi;"
1450#define PV_RESTORE_REGS "popq %%rdi;"
1451#define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx", "rsi"
1452#define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx", "rsi"
1453#define PV_FLAGS_ARG "D"
1454#endif
1455
1456static inline unsigned long __raw_local_save_flags(void)
1457{
1458 unsigned long f;
1459
1460 asm volatile(paravirt_alt(PV_SAVE_REGS
1461 PARAVIRT_CALL
1462 PV_RESTORE_REGS)
1463 : "=a"(f)
1464 : paravirt_type(pv_irq_ops.save_fl),
1465 paravirt_clobber(CLBR_EAX)
1466 : "memory", "cc" PV_VEXTRA_CLOBBERS);
1467 return f;
1468}
1469
1470static inline void raw_local_irq_restore(unsigned long f)
1471{
1472 asm volatile(paravirt_alt(PV_SAVE_REGS
1473 PARAVIRT_CALL
1474 PV_RESTORE_REGS)
1475 : "=a"(f)
1476 : PV_FLAGS_ARG(f),
1477 paravirt_type(pv_irq_ops.restore_fl),
1478 paravirt_clobber(CLBR_EAX)
1479 : "memory", "cc" PV_EXTRA_CLOBBERS);
1480}
1481
1482static inline void raw_local_irq_disable(void)
1483{
1484 asm volatile(paravirt_alt(PV_SAVE_REGS
1485 PARAVIRT_CALL
1486 PV_RESTORE_REGS)
1487 :
1488 : paravirt_type(pv_irq_ops.irq_disable),
1489 paravirt_clobber(CLBR_EAX)
1490 : "memory", "eax", "cc" PV_EXTRA_CLOBBERS);
1491}
1492
1493static inline void raw_local_irq_enable(void)
1494{
1495 asm volatile(paravirt_alt(PV_SAVE_REGS
1496 PARAVIRT_CALL
1497 PV_RESTORE_REGS)
1498 :
1499 : paravirt_type(pv_irq_ops.irq_enable),
1500 paravirt_clobber(CLBR_EAX)
1501 : "memory", "eax", "cc" PV_EXTRA_CLOBBERS);
1502}
1503
1504static inline unsigned long __raw_local_irq_save(void)
1505{
1506 unsigned long f;
1507
1508 f = __raw_local_save_flags();
1509 raw_local_irq_disable();
1510 return f;
1511}
1512
1513
1514/* Make sure as little as possible of this mess escapes. */
1515#undef PARAVIRT_CALL
1516#undef __PVOP_CALL
1517#undef __PVOP_VCALL
1518#undef PVOP_VCALL0
1519#undef PVOP_CALL0
1520#undef PVOP_VCALL1
1521#undef PVOP_CALL1
1522#undef PVOP_VCALL2
1523#undef PVOP_CALL2
1524#undef PVOP_VCALL3
1525#undef PVOP_CALL3
1526#undef PVOP_VCALL4
1527#undef PVOP_CALL4
1528
1529#else /* __ASSEMBLY__ */
1530
1531#define _PVSITE(ptype, clobbers, ops, word, algn) \
1532771:; \
1533 ops; \
1534772:; \
1535 .pushsection .parainstructions,"a"; \
1536 .align algn; \
1537 word 771b; \
1538 .byte ptype; \
1539 .byte 772b-771b; \
1540 .short clobbers; \
1541 .popsection
1542
1543
1544#ifdef CONFIG_X86_64
1545#define PV_SAVE_REGS \
1546 push %rax; \
1547 push %rcx; \
1548 push %rdx; \
1549 push %rsi; \
1550 push %rdi; \
1551 push %r8; \
1552 push %r9; \
1553 push %r10; \
1554 push %r11
1555#define PV_RESTORE_REGS \
1556 pop %r11; \
1557 pop %r10; \
1558 pop %r9; \
1559 pop %r8; \
1560 pop %rdi; \
1561 pop %rsi; \
1562 pop %rdx; \
1563 pop %rcx; \
1564 pop %rax
1565#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 8)
1566#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8)
1567#define PARA_INDIRECT(addr) *addr(%rip)
1568#else
1569#define PV_SAVE_REGS pushl %eax; pushl %edi; pushl %ecx; pushl %edx
1570#define PV_RESTORE_REGS popl %edx; popl %ecx; popl %edi; popl %eax
1571#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
1572#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
1573#define PARA_INDIRECT(addr) *%cs:addr
1574#endif
1575
1576#define INTERRUPT_RETURN \
1577 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE, \
1578 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret))
1579
1580#define DISABLE_INTERRUPTS(clobbers) \
1581 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
1582 PV_SAVE_REGS; \
1583 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable); \
1584 PV_RESTORE_REGS;) \
1585
1586#define ENABLE_INTERRUPTS(clobbers) \
1587 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \
1588 PV_SAVE_REGS; \
1589 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \
1590 PV_RESTORE_REGS;)
1591
1592#define USERGS_SYSRET32 \
1593 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret32), \
1594 CLBR_NONE, \
1595 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret32))
1596
1597#ifdef CONFIG_X86_32
1598#define GET_CR0_INTO_EAX \
1599 push %ecx; push %edx; \
1600 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
1601 pop %edx; pop %ecx
1602
1603#define ENABLE_INTERRUPTS_SYSEXIT \
1604 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
1605 CLBR_NONE, \
1606 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
1607
1608
1609#else /* !CONFIG_X86_32 */
1610
1611/*
1612 * If swapgs is used while the userspace stack is still current,
1613 * there's no way to call a pvop. The PV replacement *must* be
1614 * inlined, or the swapgs instruction must be trapped and emulated.
1615 */
1616#define SWAPGS_UNSAFE_STACK \
1617 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
1618 swapgs)
1619
1620#define SWAPGS \
1621 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
1622 PV_SAVE_REGS; \
1623 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs); \
1624 PV_RESTORE_REGS \
1625 )
1626
1627#define GET_CR2_INTO_RCX \
1628 call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2); \
1629 movq %rax, %rcx; \
1630 xorq %rax, %rax;
1631
1632#define PARAVIRT_ADJUST_EXCEPTION_FRAME \
1633 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_adjust_exception_frame), \
1634 CLBR_NONE, \
1635 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_adjust_exception_frame))
1636
1637#define USERGS_SYSRET64 \
1638 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64), \
1639 CLBR_NONE, \
1640 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
1641
1642#define ENABLE_INTERRUPTS_SYSEXIT32 \
1643 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
1644 CLBR_NONE, \
1645 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
1646#endif /* CONFIG_X86_32 */
1647
1648#endif /* __ASSEMBLY__ */
1649#endif /* CONFIG_PARAVIRT */
1650#endif /* ASM_X86__PARAVIRT_H */
diff --git a/include/asm-x86/parport.h b/include/asm-x86/parport.h
deleted file mode 100644
index 2e3dda4dc3d9..000000000000
--- a/include/asm-x86/parport.h
+++ /dev/null
@@ -1,10 +0,0 @@
1#ifndef ASM_X86__PARPORT_H
2#define ASM_X86__PARPORT_H
3
4static int __devinit parport_pc_find_isa_ports(int autoirq, int autodma);
5static int __devinit parport_pc_find_nonpci_ports(int autoirq, int autodma)
6{
7 return parport_pc_find_isa_ports(autoirq, autodma);
8}
9
10#endif /* ASM_X86__PARPORT_H */
diff --git a/include/asm-x86/pat.h b/include/asm-x86/pat.h
deleted file mode 100644
index 482c3e3f9879..000000000000
--- a/include/asm-x86/pat.h
+++ /dev/null
@@ -1,22 +0,0 @@
1#ifndef ASM_X86__PAT_H
2#define ASM_X86__PAT_H
3
4#include <linux/types.h>
5
6#ifdef CONFIG_X86_PAT
7extern int pat_enabled;
8extern void validate_pat_support(struct cpuinfo_x86 *c);
9#else
10static const int pat_enabled;
11static inline void validate_pat_support(struct cpuinfo_x86 *c) { }
12#endif
13
14extern void pat_init(void);
15
16extern int reserve_memtype(u64 start, u64 end,
17 unsigned long req_type, unsigned long *ret_type);
18extern int free_memtype(u64 start, u64 end);
19
20extern void pat_disable(char *reason);
21
22#endif /* ASM_X86__PAT_H */
diff --git a/include/asm-x86/pci-direct.h b/include/asm-x86/pci-direct.h
deleted file mode 100644
index da42be07b690..000000000000
--- a/include/asm-x86/pci-direct.h
+++ /dev/null
@@ -1,21 +0,0 @@
1#ifndef ASM_X86__PCI_DIRECT_H
2#define ASM_X86__PCI_DIRECT_H
3
4#include <linux/types.h>
5
6/* Direct PCI access. This is used for PCI accesses in early boot before
7 the PCI subsystem works. */
8
9extern u32 read_pci_config(u8 bus, u8 slot, u8 func, u8 offset);
10extern u8 read_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset);
11extern u16 read_pci_config_16(u8 bus, u8 slot, u8 func, u8 offset);
12extern void write_pci_config(u8 bus, u8 slot, u8 func, u8 offset, u32 val);
13extern void write_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset, u8 val);
14extern void write_pci_config_16(u8 bus, u8 slot, u8 func, u8 offset, u16 val);
15
16extern int early_pci_allowed(void);
17
18extern unsigned int pci_early_dump_regs;
19extern void early_dump_pci_device(u8 bus, u8 slot, u8 func);
20extern void early_dump_pci_devices(void);
21#endif /* ASM_X86__PCI_DIRECT_H */
diff --git a/include/asm-x86/pci.h b/include/asm-x86/pci.h
deleted file mode 100644
index 602583192991..000000000000
--- a/include/asm-x86/pci.h
+++ /dev/null
@@ -1,114 +0,0 @@
1#ifndef ASM_X86__PCI_H
2#define ASM_X86__PCI_H
3
4#include <linux/mm.h> /* for struct page */
5#include <linux/types.h>
6#include <linux/slab.h>
7#include <linux/string.h>
8#include <asm/scatterlist.h>
9#include <asm/io.h>
10
11#ifdef __KERNEL__
12
13struct pci_sysdata {
14 int domain; /* PCI domain */
15 int node; /* NUMA node */
16#ifdef CONFIG_X86_64
17 void *iommu; /* IOMMU private data */
18#endif
19};
20
21extern int pci_routeirq;
22
23/* scan a bus after allocating a pci_sysdata for it */
24extern struct pci_bus *pci_scan_bus_on_node(int busno, struct pci_ops *ops,
25 int node);
26extern struct pci_bus *pci_scan_bus_with_sysdata(int busno);
27
28static inline int pci_domain_nr(struct pci_bus *bus)
29{
30 struct pci_sysdata *sd = bus->sysdata;
31 return sd->domain;
32}
33
34static inline int pci_proc_domain(struct pci_bus *bus)
35{
36 return pci_domain_nr(bus);
37}
38
39
40/* Can be used to override the logic in pci_scan_bus for skipping
41 already-configured bus numbers - to be used for buggy BIOSes
42 or architectures with incomplete PCI setup by the loader */
43
44#ifdef CONFIG_PCI
45extern unsigned int pcibios_assign_all_busses(void);
46#else
47#define pcibios_assign_all_busses() 0
48#endif
49#define pcibios_scan_all_fns(a, b) 0
50
51extern unsigned long pci_mem_start;
52#define PCIBIOS_MIN_IO 0x1000
53#define PCIBIOS_MIN_MEM (pci_mem_start)
54
55#define PCIBIOS_MIN_CARDBUS_IO 0x4000
56
57void pcibios_config_init(void);
58struct pci_bus *pcibios_scan_root(int bus);
59
60void pcibios_set_master(struct pci_dev *dev);
61void pcibios_penalize_isa_irq(int irq, int active);
62struct irq_routing_table *pcibios_get_irq_routing_table(void);
63int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq);
64
65
66#define HAVE_PCI_MMAP
67extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
68 enum pci_mmap_state mmap_state,
69 int write_combine);
70
71
72#ifdef CONFIG_PCI
73extern void early_quirks(void);
74static inline void pci_dma_burst_advice(struct pci_dev *pdev,
75 enum pci_dma_burst_strategy *strat,
76 unsigned long *strategy_parameter)
77{
78 *strat = PCI_DMA_BURST_INFINITY;
79 *strategy_parameter = ~0UL;
80}
81#else
82static inline void early_quirks(void) { }
83#endif
84
85#endif /* __KERNEL__ */
86
87#ifdef CONFIG_X86_32
88# include "pci_32.h"
89#else
90# include "pci_64.h"
91#endif
92
93/* implement the pci_ DMA API in terms of the generic device dma_ one */
94#include <asm-generic/pci-dma-compat.h>
95
96/* generic pci stuff */
97#include <asm-generic/pci.h>
98
99#ifdef CONFIG_NUMA
100/* Returns the node based on pci bus */
101static inline int __pcibus_to_node(struct pci_bus *bus)
102{
103 struct pci_sysdata *sd = bus->sysdata;
104
105 return sd->node;
106}
107
108static inline cpumask_t __pcibus_to_cpumask(struct pci_bus *bus)
109{
110 return node_to_cpumask(__pcibus_to_node(bus));
111}
112#endif
113
114#endif /* ASM_X86__PCI_H */
diff --git a/include/asm-x86/pci_32.h b/include/asm-x86/pci_32.h
deleted file mode 100644
index 3f2288207c0c..000000000000
--- a/include/asm-x86/pci_32.h
+++ /dev/null
@@ -1,34 +0,0 @@
1#ifndef ASM_X86__PCI_32_H
2#define ASM_X86__PCI_32_H
3
4
5#ifdef __KERNEL__
6
7
8/* Dynamic DMA mapping stuff.
9 * i386 has everything mapped statically.
10 */
11
12struct pci_dev;
13
14/* The PCI address space does equal the physical memory
15 * address space. The networking and block device layers use
16 * this boolean for bounce buffer decisions.
17 */
18#define PCI_DMA_BUS_IS_PHYS (1)
19
20/* pci_unmap_{page,single} is a nop so... */
21#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME[0];
22#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) unsigned LEN_NAME[0];
23#define pci_unmap_addr(PTR, ADDR_NAME) sizeof((PTR)->ADDR_NAME)
24#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
25 do { break; } while (pci_unmap_addr(PTR, ADDR_NAME))
26#define pci_unmap_len(PTR, LEN_NAME) sizeof((PTR)->LEN_NAME)
27#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
28 do { break; } while (pci_unmap_len(PTR, LEN_NAME))
29
30
31#endif /* __KERNEL__ */
32
33
34#endif /* ASM_X86__PCI_32_H */
diff --git a/include/asm-x86/pci_64.h b/include/asm-x86/pci_64.h
deleted file mode 100644
index f72e12d5770e..000000000000
--- a/include/asm-x86/pci_64.h
+++ /dev/null
@@ -1,66 +0,0 @@
1#ifndef ASM_X86__PCI_64_H
2#define ASM_X86__PCI_64_H
3
4#ifdef __KERNEL__
5
6#ifdef CONFIG_CALGARY_IOMMU
7static inline void *pci_iommu(struct pci_bus *bus)
8{
9 struct pci_sysdata *sd = bus->sysdata;
10 return sd->iommu;
11}
12
13static inline void set_pci_iommu(struct pci_bus *bus, void *val)
14{
15 struct pci_sysdata *sd = bus->sysdata;
16 sd->iommu = val;
17}
18#endif /* CONFIG_CALGARY_IOMMU */
19
20extern int (*pci_config_read)(int seg, int bus, int dev, int fn,
21 int reg, int len, u32 *value);
22extern int (*pci_config_write)(int seg, int bus, int dev, int fn,
23 int reg, int len, u32 value);
24
25extern void dma32_reserve_bootmem(void);
26extern void pci_iommu_alloc(void);
27
28/* The PCI address space does equal the physical memory
29 * address space. The networking and block device layers use
30 * this boolean for bounce buffer decisions
31 *
32 * On AMD64 it mostly equals, but we set it to zero if a hardware
33 * IOMMU (gart) of sotware IOMMU (swiotlb) is available.
34 */
35#define PCI_DMA_BUS_IS_PHYS (dma_ops->is_phys)
36
37#if defined(CONFIG_GART_IOMMU) || defined(CONFIG_CALGARY_IOMMU)
38
39#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
40 dma_addr_t ADDR_NAME;
41#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
42 __u32 LEN_NAME;
43#define pci_unmap_addr(PTR, ADDR_NAME) \
44 ((PTR)->ADDR_NAME)
45#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
46 (((PTR)->ADDR_NAME) = (VAL))
47#define pci_unmap_len(PTR, LEN_NAME) \
48 ((PTR)->LEN_NAME)
49#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
50 (((PTR)->LEN_NAME) = (VAL))
51
52#else
53/* No IOMMU */
54
55#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
56#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
57#define pci_unmap_addr(PTR, ADDR_NAME) (0)
58#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
59#define pci_unmap_len(PTR, LEN_NAME) (0)
60#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
61
62#endif
63
64#endif /* __KERNEL__ */
65
66#endif /* ASM_X86__PCI_64_H */
diff --git a/include/asm-x86/pda.h b/include/asm-x86/pda.h
deleted file mode 100644
index 80860afffbdb..000000000000
--- a/include/asm-x86/pda.h
+++ /dev/null
@@ -1,137 +0,0 @@
1#ifndef ASM_X86__PDA_H
2#define ASM_X86__PDA_H
3
4#ifndef __ASSEMBLY__
5#include <linux/stddef.h>
6#include <linux/types.h>
7#include <linux/cache.h>
8#include <asm/page.h>
9
10/* Per processor datastructure. %gs points to it while the kernel runs */
11struct x8664_pda {
12 struct task_struct *pcurrent; /* 0 Current process */
13 unsigned long data_offset; /* 8 Per cpu data offset from linker
14 address */
15 unsigned long kernelstack; /* 16 top of kernel stack for current */
16 unsigned long oldrsp; /* 24 user rsp for system call */
17 int irqcount; /* 32 Irq nesting counter. Starts -1 */
18 unsigned int cpunumber; /* 36 Logical CPU number */
19#ifdef CONFIG_CC_STACKPROTECTOR
20 unsigned long stack_canary; /* 40 stack canary value */
21 /* gcc-ABI: this canary MUST be at
22 offset 40!!! */
23#endif
24 char *irqstackptr;
25 short nodenumber; /* number of current node (32k max) */
26 short in_bootmem; /* pda lives in bootmem */
27 unsigned int __softirq_pending;
28 unsigned int __nmi_count; /* number of NMI on this CPUs */
29 short mmu_state;
30 short isidle;
31 struct mm_struct *active_mm;
32 unsigned apic_timer_irqs;
33 unsigned irq0_irqs;
34 unsigned irq_resched_count;
35 unsigned irq_call_count;
36 unsigned irq_tlb_count;
37 unsigned irq_thermal_count;
38 unsigned irq_threshold_count;
39 unsigned irq_spurious_count;
40} ____cacheline_aligned_in_smp;
41
42extern struct x8664_pda **_cpu_pda;
43extern void pda_init(int);
44
45#define cpu_pda(i) (_cpu_pda[i])
46
47/*
48 * There is no fast way to get the base address of the PDA, all the accesses
49 * have to mention %fs/%gs. So it needs to be done this Torvaldian way.
50 */
51extern void __bad_pda_field(void) __attribute__((noreturn));
52
53/*
54 * proxy_pda doesn't actually exist, but tell gcc it is accessed for
55 * all PDA accesses so it gets read/write dependencies right.
56 */
57extern struct x8664_pda _proxy_pda;
58
59#define pda_offset(field) offsetof(struct x8664_pda, field)
60
61#define pda_to_op(op, field, val) \
62do { \
63 typedef typeof(_proxy_pda.field) T__; \
64 if (0) { T__ tmp__; tmp__ = (val); } /* type checking */ \
65 switch (sizeof(_proxy_pda.field)) { \
66 case 2: \
67 asm(op "w %1,%%gs:%c2" : \
68 "+m" (_proxy_pda.field) : \
69 "ri" ((T__)val), \
70 "i"(pda_offset(field))); \
71 break; \
72 case 4: \
73 asm(op "l %1,%%gs:%c2" : \
74 "+m" (_proxy_pda.field) : \
75 "ri" ((T__)val), \
76 "i" (pda_offset(field))); \
77 break; \
78 case 8: \
79 asm(op "q %1,%%gs:%c2": \
80 "+m" (_proxy_pda.field) : \
81 "ri" ((T__)val), \
82 "i"(pda_offset(field))); \
83 break; \
84 default: \
85 __bad_pda_field(); \
86 } \
87} while (0)
88
89#define pda_from_op(op, field) \
90({ \
91 typeof(_proxy_pda.field) ret__; \
92 switch (sizeof(_proxy_pda.field)) { \
93 case 2: \
94 asm(op "w %%gs:%c1,%0" : \
95 "=r" (ret__) : \
96 "i" (pda_offset(field)), \
97 "m" (_proxy_pda.field)); \
98 break; \
99 case 4: \
100 asm(op "l %%gs:%c1,%0": \
101 "=r" (ret__): \
102 "i" (pda_offset(field)), \
103 "m" (_proxy_pda.field)); \
104 break; \
105 case 8: \
106 asm(op "q %%gs:%c1,%0": \
107 "=r" (ret__) : \
108 "i" (pda_offset(field)), \
109 "m" (_proxy_pda.field)); \
110 break; \
111 default: \
112 __bad_pda_field(); \
113 } \
114 ret__; \
115})
116
117#define read_pda(field) pda_from_op("mov", field)
118#define write_pda(field, val) pda_to_op("mov", field, val)
119#define add_pda(field, val) pda_to_op("add", field, val)
120#define sub_pda(field, val) pda_to_op("sub", field, val)
121#define or_pda(field, val) pda_to_op("or", field, val)
122
123/* This is not atomic against other CPUs -- CPU preemption needs to be off */
124#define test_and_clear_bit_pda(bit, field) \
125({ \
126 int old__; \
127 asm volatile("btr %2,%%gs:%c3\n\tsbbl %0,%0" \
128 : "=r" (old__), "+m" (_proxy_pda.field) \
129 : "dIr" (bit), "i" (pda_offset(field)) : "memory");\
130 old__; \
131})
132
133#endif
134
135#define PDA_STACKOFFSET (5*8)
136
137#endif /* ASM_X86__PDA_H */
diff --git a/include/asm-x86/percpu.h b/include/asm-x86/percpu.h
deleted file mode 100644
index e10a1d0678cf..000000000000
--- a/include/asm-x86/percpu.h
+++ /dev/null
@@ -1,218 +0,0 @@
1#ifndef ASM_X86__PERCPU_H
2#define ASM_X86__PERCPU_H
3
4#ifdef CONFIG_X86_64
5#include <linux/compiler.h>
6
7/* Same as asm-generic/percpu.h, except that we store the per cpu offset
8 in the PDA. Longer term the PDA and every per cpu variable
9 should be just put into a single section and referenced directly
10 from %gs */
11
12#ifdef CONFIG_SMP
13#include <asm/pda.h>
14
15#define __per_cpu_offset(cpu) (cpu_pda(cpu)->data_offset)
16#define __my_cpu_offset read_pda(data_offset)
17
18#define per_cpu_offset(x) (__per_cpu_offset(x))
19
20#endif
21#include <asm-generic/percpu.h>
22
23DECLARE_PER_CPU(struct x8664_pda, pda);
24
25/*
26 * These are supposed to be implemented as a single instruction which
27 * operates on the per-cpu data base segment. x86-64 doesn't have
28 * that yet, so this is a fairly inefficient workaround for the
29 * meantime. The single instruction is atomic with respect to
30 * preemption and interrupts, so we need to explicitly disable
31 * interrupts here to achieve the same effect. However, because it
32 * can be used from within interrupt-disable/enable, we can't actually
33 * disable interrupts; disabling preemption is enough.
34 */
35#define x86_read_percpu(var) \
36 ({ \
37 typeof(per_cpu_var(var)) __tmp; \
38 preempt_disable(); \
39 __tmp = __get_cpu_var(var); \
40 preempt_enable(); \
41 __tmp; \
42 })
43
44#define x86_write_percpu(var, val) \
45 do { \
46 preempt_disable(); \
47 __get_cpu_var(var) = (val); \
48 preempt_enable(); \
49 } while(0)
50
51#else /* CONFIG_X86_64 */
52
53#ifdef __ASSEMBLY__
54
55/*
56 * PER_CPU finds an address of a per-cpu variable.
57 *
58 * Args:
59 * var - variable name
60 * reg - 32bit register
61 *
62 * The resulting address is stored in the "reg" argument.
63 *
64 * Example:
65 * PER_CPU(cpu_gdt_descr, %ebx)
66 */
67#ifdef CONFIG_SMP
68#define PER_CPU(var, reg) \
69 movl %fs:per_cpu__##this_cpu_off, reg; \
70 lea per_cpu__##var(reg), reg
71#define PER_CPU_VAR(var) %fs:per_cpu__##var
72#else /* ! SMP */
73#define PER_CPU(var, reg) \
74 movl $per_cpu__##var, reg
75#define PER_CPU_VAR(var) per_cpu__##var
76#endif /* SMP */
77
78#else /* ...!ASSEMBLY */
79
80/*
81 * PER_CPU finds an address of a per-cpu variable.
82 *
83 * Args:
84 * var - variable name
85 * cpu - 32bit register containing the current CPU number
86 *
87 * The resulting address is stored in the "cpu" argument.
88 *
89 * Example:
90 * PER_CPU(cpu_gdt_descr, %ebx)
91 */
92#ifdef CONFIG_SMP
93
94#define __my_cpu_offset x86_read_percpu(this_cpu_off)
95
96/* fs segment starts at (positive) offset == __per_cpu_offset[cpu] */
97#define __percpu_seg "%%fs:"
98
99#else /* !SMP */
100
101#define __percpu_seg ""
102
103#endif /* SMP */
104
105#include <asm-generic/percpu.h>
106
107/* We can use this directly for local CPU (faster). */
108DECLARE_PER_CPU(unsigned long, this_cpu_off);
109
110/* For arch-specific code, we can use direct single-insn ops (they
111 * don't give an lvalue though). */
112extern void __bad_percpu_size(void);
113
114#define percpu_to_op(op, var, val) \
115do { \
116 typedef typeof(var) T__; \
117 if (0) { \
118 T__ tmp__; \
119 tmp__ = (val); \
120 } \
121 switch (sizeof(var)) { \
122 case 1: \
123 asm(op "b %1,"__percpu_seg"%0" \
124 : "+m" (var) \
125 : "ri" ((T__)val)); \
126 break; \
127 case 2: \
128 asm(op "w %1,"__percpu_seg"%0" \
129 : "+m" (var) \
130 : "ri" ((T__)val)); \
131 break; \
132 case 4: \
133 asm(op "l %1,"__percpu_seg"%0" \
134 : "+m" (var) \
135 : "ri" ((T__)val)); \
136 break; \
137 default: __bad_percpu_size(); \
138 } \
139} while (0)
140
141#define percpu_from_op(op, var) \
142({ \
143 typeof(var) ret__; \
144 switch (sizeof(var)) { \
145 case 1: \
146 asm(op "b "__percpu_seg"%1,%0" \
147 : "=r" (ret__) \
148 : "m" (var)); \
149 break; \
150 case 2: \
151 asm(op "w "__percpu_seg"%1,%0" \
152 : "=r" (ret__) \
153 : "m" (var)); \
154 break; \
155 case 4: \
156 asm(op "l "__percpu_seg"%1,%0" \
157 : "=r" (ret__) \
158 : "m" (var)); \
159 break; \
160 default: __bad_percpu_size(); \
161 } \
162 ret__; \
163})
164
165#define x86_read_percpu(var) percpu_from_op("mov", per_cpu__##var)
166#define x86_write_percpu(var, val) percpu_to_op("mov", per_cpu__##var, val)
167#define x86_add_percpu(var, val) percpu_to_op("add", per_cpu__##var, val)
168#define x86_sub_percpu(var, val) percpu_to_op("sub", per_cpu__##var, val)
169#define x86_or_percpu(var, val) percpu_to_op("or", per_cpu__##var, val)
170#endif /* !__ASSEMBLY__ */
171#endif /* !CONFIG_X86_64 */
172
173#ifdef CONFIG_SMP
174
175/*
176 * Define the "EARLY_PER_CPU" macros. These are used for some per_cpu
177 * variables that are initialized and accessed before there are per_cpu
178 * areas allocated.
179 */
180
181#define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \
182 DEFINE_PER_CPU(_type, _name) = _initvalue; \
183 __typeof__(_type) _name##_early_map[NR_CPUS] __initdata = \
184 { [0 ... NR_CPUS-1] = _initvalue }; \
185 __typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map
186
187#define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \
188 EXPORT_PER_CPU_SYMBOL(_name)
189
190#define DECLARE_EARLY_PER_CPU(_type, _name) \
191 DECLARE_PER_CPU(_type, _name); \
192 extern __typeof__(_type) *_name##_early_ptr; \
193 extern __typeof__(_type) _name##_early_map[]
194
195#define early_per_cpu_ptr(_name) (_name##_early_ptr)
196#define early_per_cpu_map(_name, _idx) (_name##_early_map[_idx])
197#define early_per_cpu(_name, _cpu) \
198 (early_per_cpu_ptr(_name) ? \
199 early_per_cpu_ptr(_name)[_cpu] : \
200 per_cpu(_name, _cpu))
201
202#else /* !CONFIG_SMP */
203#define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \
204 DEFINE_PER_CPU(_type, _name) = _initvalue
205
206#define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \
207 EXPORT_PER_CPU_SYMBOL(_name)
208
209#define DECLARE_EARLY_PER_CPU(_type, _name) \
210 DECLARE_PER_CPU(_type, _name)
211
212#define early_per_cpu(_name, _cpu) per_cpu(_name, _cpu)
213#define early_per_cpu_ptr(_name) NULL
214/* no early_per_cpu_map() */
215
216#endif /* !CONFIG_SMP */
217
218#endif /* ASM_X86__PERCPU_H */
diff --git a/include/asm-x86/pgalloc.h b/include/asm-x86/pgalloc.h
deleted file mode 100644
index 3cd23adedae8..000000000000
--- a/include/asm-x86/pgalloc.h
+++ /dev/null
@@ -1,114 +0,0 @@
1#ifndef ASM_X86__PGALLOC_H
2#define ASM_X86__PGALLOC_H
3
4#include <linux/threads.h>
5#include <linux/mm.h> /* for struct page */
6#include <linux/pagemap.h>
7
8static inline int __paravirt_pgd_alloc(struct mm_struct *mm) { return 0; }
9
10#ifdef CONFIG_PARAVIRT
11#include <asm/paravirt.h>
12#else
13#define paravirt_pgd_alloc(mm) __paravirt_pgd_alloc(mm)
14static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd) {}
15static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn) {}
16static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn) {}
17static inline void paravirt_alloc_pmd_clone(unsigned long pfn, unsigned long clonepfn,
18 unsigned long start, unsigned long count) {}
19static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn) {}
20static inline void paravirt_release_pte(unsigned long pfn) {}
21static inline void paravirt_release_pmd(unsigned long pfn) {}
22static inline void paravirt_release_pud(unsigned long pfn) {}
23#endif
24
25/*
26 * Allocate and free page tables.
27 */
28extern pgd_t *pgd_alloc(struct mm_struct *);
29extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
30
31extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
32extern pgtable_t pte_alloc_one(struct mm_struct *, unsigned long);
33
34/* Should really implement gc for free page table pages. This could be
35 done with a reference count in struct page. */
36
37static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
38{
39 BUG_ON((unsigned long)pte & (PAGE_SIZE-1));
40 free_page((unsigned long)pte);
41}
42
43static inline void pte_free(struct mm_struct *mm, struct page *pte)
44{
45 __free_page(pte);
46}
47
48extern void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte);
49
50static inline void pmd_populate_kernel(struct mm_struct *mm,
51 pmd_t *pmd, pte_t *pte)
52{
53 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
54 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
55}
56
57static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
58 struct page *pte)
59{
60 unsigned long pfn = page_to_pfn(pte);
61
62 paravirt_alloc_pte(mm, pfn);
63 set_pmd(pmd, __pmd(((pteval_t)pfn << PAGE_SHIFT) | _PAGE_TABLE));
64}
65
66#define pmd_pgtable(pmd) pmd_page(pmd)
67
68#if PAGETABLE_LEVELS > 2
69static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
70{
71 return (pmd_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
72}
73
74static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
75{
76 BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
77 free_page((unsigned long)pmd);
78}
79
80extern void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd);
81
82#ifdef CONFIG_X86_PAE
83extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
84#else /* !CONFIG_X86_PAE */
85static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
86{
87 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
88 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
89}
90#endif /* CONFIG_X86_PAE */
91
92#if PAGETABLE_LEVELS > 3
93static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
94{
95 paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
96 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
97}
98
99static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
100{
101 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
102}
103
104static inline void pud_free(struct mm_struct *mm, pud_t *pud)
105{
106 BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
107 free_page((unsigned long)pud);
108}
109
110extern void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud);
111#endif /* PAGETABLE_LEVELS > 3 */
112#endif /* PAGETABLE_LEVELS > 2 */
113
114#endif /* ASM_X86__PGALLOC_H */
diff --git a/include/asm-x86/pgtable-2level-defs.h b/include/asm-x86/pgtable-2level-defs.h
deleted file mode 100644
index 7ec48f4e5347..000000000000
--- a/include/asm-x86/pgtable-2level-defs.h
+++ /dev/null
@@ -1,20 +0,0 @@
1#ifndef ASM_X86__PGTABLE_2LEVEL_DEFS_H
2#define ASM_X86__PGTABLE_2LEVEL_DEFS_H
3
4#define SHARED_KERNEL_PMD 0
5
6/*
7 * traditional i386 two-level paging structure:
8 */
9
10#define PGDIR_SHIFT 22
11#define PTRS_PER_PGD 1024
12
13/*
14 * the i386 is two-level, so we don't really have any
15 * PMD directory physically.
16 */
17
18#define PTRS_PER_PTE 1024
19
20#endif /* ASM_X86__PGTABLE_2LEVEL_DEFS_H */
diff --git a/include/asm-x86/pgtable-2level.h b/include/asm-x86/pgtable-2level.h
deleted file mode 100644
index 81762081dcd8..000000000000
--- a/include/asm-x86/pgtable-2level.h
+++ /dev/null
@@ -1,79 +0,0 @@
1#ifndef ASM_X86__PGTABLE_2LEVEL_H
2#define ASM_X86__PGTABLE_2LEVEL_H
3
4#define pte_ERROR(e) \
5 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low)
6#define pgd_ERROR(e) \
7 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
8
9/*
10 * Certain architectures need to do special things when PTEs
11 * within a page table are directly modified. Thus, the following
12 * hook is made available.
13 */
14static inline void native_set_pte(pte_t *ptep , pte_t pte)
15{
16 *ptep = pte;
17}
18
19static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
20{
21 *pmdp = pmd;
22}
23
24static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
25{
26 native_set_pte(ptep, pte);
27}
28
29static inline void native_set_pte_present(struct mm_struct *mm,
30 unsigned long addr,
31 pte_t *ptep, pte_t pte)
32{
33 native_set_pte(ptep, pte);
34}
35
36static inline void native_pmd_clear(pmd_t *pmdp)
37{
38 native_set_pmd(pmdp, __pmd(0));
39}
40
41static inline void native_pte_clear(struct mm_struct *mm,
42 unsigned long addr, pte_t *xp)
43{
44 *xp = native_make_pte(0);
45}
46
47#ifdef CONFIG_SMP
48static inline pte_t native_ptep_get_and_clear(pte_t *xp)
49{
50 return __pte(xchg(&xp->pte_low, 0));
51}
52#else
53#define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
54#endif
55
56#define pte_none(x) (!(x).pte_low)
57
58/*
59 * Bits 0, 6 and 7 are taken, split up the 29 bits of offset
60 * into this range:
61 */
62#define PTE_FILE_MAX_BITS 29
63
64#define pte_to_pgoff(pte) \
65 ((((pte).pte_low >> 1) & 0x1f) + (((pte).pte_low >> 8) << 5))
66
67#define pgoff_to_pte(off) \
68 ((pte_t) { .pte_low = (((off) & 0x1f) << 1) + \
69 (((off) >> 5) << 8) + _PAGE_FILE })
70
71/* Encode and de-code a swap entry */
72#define __swp_type(x) (((x).val >> 1) & 0x1f)
73#define __swp_offset(x) ((x).val >> 8)
74#define __swp_entry(type, offset) \
75 ((swp_entry_t) { ((type) << 1) | ((offset) << 8) })
76#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low })
77#define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val })
78
79#endif /* ASM_X86__PGTABLE_2LEVEL_H */
diff --git a/include/asm-x86/pgtable-3level-defs.h b/include/asm-x86/pgtable-3level-defs.h
deleted file mode 100644
index c05fe6ff3720..000000000000
--- a/include/asm-x86/pgtable-3level-defs.h
+++ /dev/null
@@ -1,28 +0,0 @@
1#ifndef ASM_X86__PGTABLE_3LEVEL_DEFS_H
2#define ASM_X86__PGTABLE_3LEVEL_DEFS_H
3
4#ifdef CONFIG_PARAVIRT
5#define SHARED_KERNEL_PMD (pv_info.shared_kernel_pmd)
6#else
7#define SHARED_KERNEL_PMD 1
8#endif
9
10/*
11 * PGDIR_SHIFT determines what a top-level page table entry can map
12 */
13#define PGDIR_SHIFT 30
14#define PTRS_PER_PGD 4
15
16/*
17 * PMD_SHIFT determines the size of the area a middle-level
18 * page table can map
19 */
20#define PMD_SHIFT 21
21#define PTRS_PER_PMD 512
22
23/*
24 * entries per page directory level
25 */
26#define PTRS_PER_PTE 512
27
28#endif /* ASM_X86__PGTABLE_3LEVEL_DEFS_H */
diff --git a/include/asm-x86/pgtable-3level.h b/include/asm-x86/pgtable-3level.h
deleted file mode 100644
index 75f4276b5ddb..000000000000
--- a/include/asm-x86/pgtable-3level.h
+++ /dev/null
@@ -1,175 +0,0 @@
1#ifndef ASM_X86__PGTABLE_3LEVEL_H
2#define ASM_X86__PGTABLE_3LEVEL_H
3
4/*
5 * Intel Physical Address Extension (PAE) Mode - three-level page
6 * tables on PPro+ CPUs.
7 *
8 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
9 */
10
11#define pte_ERROR(e) \
12 printk("%s:%d: bad pte %p(%08lx%08lx).\n", \
13 __FILE__, __LINE__, &(e), (e).pte_high, (e).pte_low)
14#define pmd_ERROR(e) \
15 printk("%s:%d: bad pmd %p(%016Lx).\n", \
16 __FILE__, __LINE__, &(e), pmd_val(e))
17#define pgd_ERROR(e) \
18 printk("%s:%d: bad pgd %p(%016Lx).\n", \
19 __FILE__, __LINE__, &(e), pgd_val(e))
20
21static inline int pud_none(pud_t pud)
22{
23 return pud_val(pud) == 0;
24}
25
26static inline int pud_bad(pud_t pud)
27{
28 return (pud_val(pud) & ~(PTE_PFN_MASK | _KERNPG_TABLE | _PAGE_USER)) != 0;
29}
30
31static inline int pud_present(pud_t pud)
32{
33 return pud_val(pud) & _PAGE_PRESENT;
34}
35
36/* Rules for using set_pte: the pte being assigned *must* be
37 * either not present or in a state where the hardware will
38 * not attempt to update the pte. In places where this is
39 * not possible, use pte_get_and_clear to obtain the old pte
40 * value and then use set_pte to update it. -ben
41 */
42static inline void native_set_pte(pte_t *ptep, pte_t pte)
43{
44 ptep->pte_high = pte.pte_high;
45 smp_wmb();
46 ptep->pte_low = pte.pte_low;
47}
48
49/*
50 * Since this is only called on user PTEs, and the page fault handler
51 * must handle the already racy situation of simultaneous page faults,
52 * we are justified in merely clearing the PTE present bit, followed
53 * by a set. The ordering here is important.
54 */
55static inline void native_set_pte_present(struct mm_struct *mm,
56 unsigned long addr,
57 pte_t *ptep, pte_t pte)
58{
59 ptep->pte_low = 0;
60 smp_wmb();
61 ptep->pte_high = pte.pte_high;
62 smp_wmb();
63 ptep->pte_low = pte.pte_low;
64}
65
66static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
67{
68 set_64bit((unsigned long long *)(ptep), native_pte_val(pte));
69}
70
71static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
72{
73 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
74}
75
76static inline void native_set_pud(pud_t *pudp, pud_t pud)
77{
78 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
79}
80
81/*
82 * For PTEs and PDEs, we must clear the P-bit first when clearing a page table
83 * entry, so clear the bottom half first and enforce ordering with a compiler
84 * barrier.
85 */
86static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr,
87 pte_t *ptep)
88{
89 ptep->pte_low = 0;
90 smp_wmb();
91 ptep->pte_high = 0;
92}
93
94static inline void native_pmd_clear(pmd_t *pmd)
95{
96 u32 *tmp = (u32 *)pmd;
97 *tmp = 0;
98 smp_wmb();
99 *(tmp + 1) = 0;
100}
101
102static inline void pud_clear(pud_t *pudp)
103{
104 unsigned long pgd;
105
106 set_pud(pudp, __pud(0));
107
108 /*
109 * According to Intel App note "TLBs, Paging-Structure Caches,
110 * and Their Invalidation", April 2007, document 317080-001,
111 * section 8.1: in PAE mode we explicitly have to flush the
112 * TLB via cr3 if the top-level pgd is changed...
113 *
114 * Make sure the pud entry we're updating is within the
115 * current pgd to avoid unnecessary TLB flushes.
116 */
117 pgd = read_cr3();
118 if (__pa(pudp) >= pgd && __pa(pudp) <
119 (pgd + sizeof(pgd_t)*PTRS_PER_PGD))
120 write_cr3(pgd);
121}
122
123#define pud_page(pud) ((struct page *) __va(pud_val(pud) & PTE_PFN_MASK))
124
125#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PTE_PFN_MASK))
126
127
128/* Find an entry in the second-level page table.. */
129#define pmd_offset(pud, address) ((pmd_t *)pud_page(*(pud)) + \
130 pmd_index(address))
131
132#ifdef CONFIG_SMP
133static inline pte_t native_ptep_get_and_clear(pte_t *ptep)
134{
135 pte_t res;
136
137 /* xchg acts as a barrier before the setting of the high bits */
138 res.pte_low = xchg(&ptep->pte_low, 0);
139 res.pte_high = ptep->pte_high;
140 ptep->pte_high = 0;
141
142 return res;
143}
144#else
145#define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
146#endif
147
148#define __HAVE_ARCH_PTE_SAME
149static inline int pte_same(pte_t a, pte_t b)
150{
151 return a.pte_low == b.pte_low && a.pte_high == b.pte_high;
152}
153
154static inline int pte_none(pte_t pte)
155{
156 return !pte.pte_low && !pte.pte_high;
157}
158
159/*
160 * Bits 0, 6 and 7 are taken in the low part of the pte,
161 * put the 32 bits of offset into the high part.
162 */
163#define pte_to_pgoff(pte) ((pte).pte_high)
164#define pgoff_to_pte(off) \
165 ((pte_t) { { .pte_low = _PAGE_FILE, .pte_high = (off) } })
166#define PTE_FILE_MAX_BITS 32
167
168/* Encode and de-code a swap entry */
169#define __swp_type(x) (((x).val) & 0x1f)
170#define __swp_offset(x) ((x).val >> 5)
171#define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) << 5})
172#define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high })
173#define __swp_entry_to_pte(x) ((pte_t){ { .pte_high = (x).val } })
174
175#endif /* ASM_X86__PGTABLE_3LEVEL_H */
diff --git a/include/asm-x86/pgtable.h b/include/asm-x86/pgtable.h
deleted file mode 100644
index 88a53b1a17f0..000000000000
--- a/include/asm-x86/pgtable.h
+++ /dev/null
@@ -1,561 +0,0 @@
1#ifndef ASM_X86__PGTABLE_H
2#define ASM_X86__PGTABLE_H
3
4#define FIRST_USER_ADDRESS 0
5
6#define _PAGE_BIT_PRESENT 0 /* is present */
7#define _PAGE_BIT_RW 1 /* writeable */
8#define _PAGE_BIT_USER 2 /* userspace addressable */
9#define _PAGE_BIT_PWT 3 /* page write through */
10#define _PAGE_BIT_PCD 4 /* page cache disabled */
11#define _PAGE_BIT_ACCESSED 5 /* was accessed (raised by CPU) */
12#define _PAGE_BIT_DIRTY 6 /* was written to (raised by CPU) */
13#define _PAGE_BIT_FILE 6
14#define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
15#define _PAGE_BIT_PAT 7 /* on 4KB pages */
16#define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
17#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
18#define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
19#define _PAGE_BIT_UNUSED3 11
20#define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
21#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
22#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
23#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
24
25#define _PAGE_PRESENT (_AT(pteval_t, 1) << _PAGE_BIT_PRESENT)
26#define _PAGE_RW (_AT(pteval_t, 1) << _PAGE_BIT_RW)
27#define _PAGE_USER (_AT(pteval_t, 1) << _PAGE_BIT_USER)
28#define _PAGE_PWT (_AT(pteval_t, 1) << _PAGE_BIT_PWT)
29#define _PAGE_PCD (_AT(pteval_t, 1) << _PAGE_BIT_PCD)
30#define _PAGE_ACCESSED (_AT(pteval_t, 1) << _PAGE_BIT_ACCESSED)
31#define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
32#define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
33#define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
34#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
35#define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
36#define _PAGE_UNUSED3 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED3)
37#define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
38#define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
39#define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL)
40#define _PAGE_CPA_TEST (_AT(pteval_t, 1) << _PAGE_BIT_CPA_TEST)
41#define __HAVE_ARCH_PTE_SPECIAL
42
43#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
44#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
45#else
46#define _PAGE_NX (_AT(pteval_t, 0))
47#endif
48
49/* If _PAGE_PRESENT is clear, we use these: */
50#define _PAGE_FILE _PAGE_DIRTY /* nonlinear file mapping,
51 * saved PTE; unset:swap */
52#define _PAGE_PROTNONE _PAGE_PSE /* if the user mapped it with PROT_NONE;
53 pte_present gives true */
54
55#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
56 _PAGE_ACCESSED | _PAGE_DIRTY)
57#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | \
58 _PAGE_DIRTY)
59
60/* Set of bits not changed in pte_modify */
61#define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
62 _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY)
63
64#define _PAGE_CACHE_MASK (_PAGE_PCD | _PAGE_PWT)
65#define _PAGE_CACHE_WB (0)
66#define _PAGE_CACHE_WC (_PAGE_PWT)
67#define _PAGE_CACHE_UC_MINUS (_PAGE_PCD)
68#define _PAGE_CACHE_UC (_PAGE_PCD | _PAGE_PWT)
69
70#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
71#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
72 _PAGE_ACCESSED | _PAGE_NX)
73
74#define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | \
75 _PAGE_USER | _PAGE_ACCESSED)
76#define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
77 _PAGE_ACCESSED | _PAGE_NX)
78#define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
79 _PAGE_ACCESSED)
80#define PAGE_COPY PAGE_COPY_NOEXEC
81#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | \
82 _PAGE_ACCESSED | _PAGE_NX)
83#define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
84 _PAGE_ACCESSED)
85
86#define __PAGE_KERNEL_EXEC \
87 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
88#define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
89
90#define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
91#define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
92#define __PAGE_KERNEL_EXEC_NOCACHE (__PAGE_KERNEL_EXEC | _PAGE_PCD | _PAGE_PWT)
93#define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
94#define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
95#define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
96#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
97#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
98#define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
99#define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
100#define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
101
102#define __PAGE_KERNEL_IO (__PAGE_KERNEL | _PAGE_IOMAP)
103#define __PAGE_KERNEL_IO_NOCACHE (__PAGE_KERNEL_NOCACHE | _PAGE_IOMAP)
104#define __PAGE_KERNEL_IO_UC_MINUS (__PAGE_KERNEL_UC_MINUS | _PAGE_IOMAP)
105#define __PAGE_KERNEL_IO_WC (__PAGE_KERNEL_WC | _PAGE_IOMAP)
106
107#define PAGE_KERNEL __pgprot(__PAGE_KERNEL)
108#define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO)
109#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
110#define PAGE_KERNEL_RX __pgprot(__PAGE_KERNEL_RX)
111#define PAGE_KERNEL_WC __pgprot(__PAGE_KERNEL_WC)
112#define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE)
113#define PAGE_KERNEL_UC_MINUS __pgprot(__PAGE_KERNEL_UC_MINUS)
114#define PAGE_KERNEL_EXEC_NOCACHE __pgprot(__PAGE_KERNEL_EXEC_NOCACHE)
115#define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE)
116#define PAGE_KERNEL_LARGE_NOCACHE __pgprot(__PAGE_KERNEL_LARGE_NOCACHE)
117#define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC)
118#define PAGE_KERNEL_VSYSCALL __pgprot(__PAGE_KERNEL_VSYSCALL)
119#define PAGE_KERNEL_VSYSCALL_NOCACHE __pgprot(__PAGE_KERNEL_VSYSCALL_NOCACHE)
120
121#define PAGE_KERNEL_IO __pgprot(__PAGE_KERNEL_IO)
122#define PAGE_KERNEL_IO_NOCACHE __pgprot(__PAGE_KERNEL_IO_NOCACHE)
123#define PAGE_KERNEL_IO_UC_MINUS __pgprot(__PAGE_KERNEL_IO_UC_MINUS)
124#define PAGE_KERNEL_IO_WC __pgprot(__PAGE_KERNEL_IO_WC)
125
126/* xwr */
127#define __P000 PAGE_NONE
128#define __P001 PAGE_READONLY
129#define __P010 PAGE_COPY
130#define __P011 PAGE_COPY
131#define __P100 PAGE_READONLY_EXEC
132#define __P101 PAGE_READONLY_EXEC
133#define __P110 PAGE_COPY_EXEC
134#define __P111 PAGE_COPY_EXEC
135
136#define __S000 PAGE_NONE
137#define __S001 PAGE_READONLY
138#define __S010 PAGE_SHARED
139#define __S011 PAGE_SHARED
140#define __S100 PAGE_READONLY_EXEC
141#define __S101 PAGE_READONLY_EXEC
142#define __S110 PAGE_SHARED_EXEC
143#define __S111 PAGE_SHARED_EXEC
144
145/*
146 * early identity mapping pte attrib macros.
147 */
148#ifdef CONFIG_X86_64
149#define __PAGE_KERNEL_IDENT_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC
150#else
151/*
152 * For PDE_IDENT_ATTR include USER bit. As the PDE and PTE protection
153 * bits are combined, this will alow user to access the high address mapped
154 * VDSO in the presence of CONFIG_COMPAT_VDSO
155 */
156#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
157#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
158#define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
159#endif
160
161#ifndef __ASSEMBLY__
162
163/*
164 * ZERO_PAGE is a global shared page that is always zero: used
165 * for zero-mapped memory areas etc..
166 */
167extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
168#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
169
170extern spinlock_t pgd_lock;
171extern struct list_head pgd_list;
172
173/*
174 * The following only work if pte_present() is true.
175 * Undefined behaviour if not..
176 */
177static inline int pte_dirty(pte_t pte)
178{
179 return pte_flags(pte) & _PAGE_DIRTY;
180}
181
182static inline int pte_young(pte_t pte)
183{
184 return pte_flags(pte) & _PAGE_ACCESSED;
185}
186
187static inline int pte_write(pte_t pte)
188{
189 return pte_flags(pte) & _PAGE_RW;
190}
191
192static inline int pte_file(pte_t pte)
193{
194 return pte_flags(pte) & _PAGE_FILE;
195}
196
197static inline int pte_huge(pte_t pte)
198{
199 return pte_flags(pte) & _PAGE_PSE;
200}
201
202static inline int pte_global(pte_t pte)
203{
204 return pte_flags(pte) & _PAGE_GLOBAL;
205}
206
207static inline int pte_exec(pte_t pte)
208{
209 return !(pte_flags(pte) & _PAGE_NX);
210}
211
212static inline int pte_special(pte_t pte)
213{
214 return pte_flags(pte) & _PAGE_SPECIAL;
215}
216
217static inline unsigned long pte_pfn(pte_t pte)
218{
219 return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
220}
221
222#define pte_page(pte) pfn_to_page(pte_pfn(pte))
223
224static inline int pmd_large(pmd_t pte)
225{
226 return (pmd_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
227 (_PAGE_PSE | _PAGE_PRESENT);
228}
229
230static inline pte_t pte_mkclean(pte_t pte)
231{
232 return __pte(pte_val(pte) & ~_PAGE_DIRTY);
233}
234
235static inline pte_t pte_mkold(pte_t pte)
236{
237 return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
238}
239
240static inline pte_t pte_wrprotect(pte_t pte)
241{
242 return __pte(pte_val(pte) & ~_PAGE_RW);
243}
244
245static inline pte_t pte_mkexec(pte_t pte)
246{
247 return __pte(pte_val(pte) & ~_PAGE_NX);
248}
249
250static inline pte_t pte_mkdirty(pte_t pte)
251{
252 return __pte(pte_val(pte) | _PAGE_DIRTY);
253}
254
255static inline pte_t pte_mkyoung(pte_t pte)
256{
257 return __pte(pte_val(pte) | _PAGE_ACCESSED);
258}
259
260static inline pte_t pte_mkwrite(pte_t pte)
261{
262 return __pte(pte_val(pte) | _PAGE_RW);
263}
264
265static inline pte_t pte_mkhuge(pte_t pte)
266{
267 return __pte(pte_val(pte) | _PAGE_PSE);
268}
269
270static inline pte_t pte_clrhuge(pte_t pte)
271{
272 return __pte(pte_val(pte) & ~_PAGE_PSE);
273}
274
275static inline pte_t pte_mkglobal(pte_t pte)
276{
277 return __pte(pte_val(pte) | _PAGE_GLOBAL);
278}
279
280static inline pte_t pte_clrglobal(pte_t pte)
281{
282 return __pte(pte_val(pte) & ~_PAGE_GLOBAL);
283}
284
285static inline pte_t pte_mkspecial(pte_t pte)
286{
287 return __pte(pte_val(pte) | _PAGE_SPECIAL);
288}
289
290extern pteval_t __supported_pte_mask;
291
292static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
293{
294 return __pte((((phys_addr_t)page_nr << PAGE_SHIFT) |
295 pgprot_val(pgprot)) & __supported_pte_mask);
296}
297
298static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
299{
300 return __pmd((((phys_addr_t)page_nr << PAGE_SHIFT) |
301 pgprot_val(pgprot)) & __supported_pte_mask);
302}
303
304static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
305{
306 pteval_t val = pte_val(pte);
307
308 /*
309 * Chop off the NX bit (if present), and add the NX portion of
310 * the newprot (if present):
311 */
312 val &= _PAGE_CHG_MASK;
313 val |= pgprot_val(newprot) & (~_PAGE_CHG_MASK) & __supported_pte_mask;
314
315 return __pte(val);
316}
317
318/* mprotect needs to preserve PAT bits when updating vm_page_prot */
319#define pgprot_modify pgprot_modify
320static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
321{
322 pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK;
323 pgprotval_t addbits = pgprot_val(newprot);
324 return __pgprot(preservebits | addbits);
325}
326
327#define pte_pgprot(x) __pgprot(pte_flags(x) & PTE_FLAGS_MASK)
328
329#define canon_pgprot(p) __pgprot(pgprot_val(p) & __supported_pte_mask)
330
331#ifndef __ASSEMBLY__
332#define __HAVE_PHYS_MEM_ACCESS_PROT
333struct file;
334pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
335 unsigned long size, pgprot_t vma_prot);
336int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
337 unsigned long size, pgprot_t *vma_prot);
338#endif
339
340/* Install a pte for a particular vaddr in kernel space. */
341void set_pte_vaddr(unsigned long vaddr, pte_t pte);
342
343#ifdef CONFIG_X86_32
344extern void native_pagetable_setup_start(pgd_t *base);
345extern void native_pagetable_setup_done(pgd_t *base);
346#else
347static inline void native_pagetable_setup_start(pgd_t *base) {}
348static inline void native_pagetable_setup_done(pgd_t *base) {}
349#endif
350
351extern int arch_report_meminfo(char *page);
352
353#ifdef CONFIG_PARAVIRT
354#include <asm/paravirt.h>
355#else /* !CONFIG_PARAVIRT */
356#define set_pte(ptep, pte) native_set_pte(ptep, pte)
357#define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
358
359#define set_pte_present(mm, addr, ptep, pte) \
360 native_set_pte_present(mm, addr, ptep, pte)
361#define set_pte_atomic(ptep, pte) \
362 native_set_pte_atomic(ptep, pte)
363
364#define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd)
365
366#ifndef __PAGETABLE_PUD_FOLDED
367#define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
368#define pgd_clear(pgd) native_pgd_clear(pgd)
369#endif
370
371#ifndef set_pud
372# define set_pud(pudp, pud) native_set_pud(pudp, pud)
373#endif
374
375#ifndef __PAGETABLE_PMD_FOLDED
376#define pud_clear(pud) native_pud_clear(pud)
377#endif
378
379#define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep)
380#define pmd_clear(pmd) native_pmd_clear(pmd)
381
382#define pte_update(mm, addr, ptep) do { } while (0)
383#define pte_update_defer(mm, addr, ptep) do { } while (0)
384
385static inline void __init paravirt_pagetable_setup_start(pgd_t *base)
386{
387 native_pagetable_setup_start(base);
388}
389
390static inline void __init paravirt_pagetable_setup_done(pgd_t *base)
391{
392 native_pagetable_setup_done(base);
393}
394#endif /* CONFIG_PARAVIRT */
395
396#endif /* __ASSEMBLY__ */
397
398#ifdef CONFIG_X86_32
399# include "pgtable_32.h"
400#else
401# include "pgtable_64.h"
402#endif
403
404/*
405 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
406 *
407 * this macro returns the index of the entry in the pgd page which would
408 * control the given virtual address
409 */
410#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
411
412/*
413 * pgd_offset() returns a (pgd_t *)
414 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
415 */
416#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
417/*
418 * a shortcut which implies the use of the kernel's pgd, instead
419 * of a process's
420 */
421#define pgd_offset_k(address) pgd_offset(&init_mm, (address))
422
423
424#define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
425#define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
426
427#ifndef __ASSEMBLY__
428
429enum {
430 PG_LEVEL_NONE,
431 PG_LEVEL_4K,
432 PG_LEVEL_2M,
433 PG_LEVEL_1G,
434 PG_LEVEL_NUM
435};
436
437#ifdef CONFIG_PROC_FS
438extern void update_page_count(int level, unsigned long pages);
439#else
440static inline void update_page_count(int level, unsigned long pages) { }
441#endif
442
443/*
444 * Helper function that returns the kernel pagetable entry controlling
445 * the virtual address 'address'. NULL means no pagetable entry present.
446 * NOTE: the return type is pte_t but if the pmd is PSE then we return it
447 * as a pte too.
448 */
449extern pte_t *lookup_address(unsigned long address, unsigned int *level);
450
451/* local pte updates need not use xchg for locking */
452static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
453{
454 pte_t res = *ptep;
455
456 /* Pure native function needs no input for mm, addr */
457 native_pte_clear(NULL, 0, ptep);
458 return res;
459}
460
461static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
462 pte_t *ptep , pte_t pte)
463{
464 native_set_pte(ptep, pte);
465}
466
467#ifndef CONFIG_PARAVIRT
468/*
469 * Rules for using pte_update - it must be called after any PTE update which
470 * has not been done using the set_pte / clear_pte interfaces. It is used by
471 * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE
472 * updates should either be sets, clears, or set_pte_atomic for P->P
473 * transitions, which means this hook should only be called for user PTEs.
474 * This hook implies a P->P protection or access change has taken place, which
475 * requires a subsequent TLB flush. The notification can optionally be delayed
476 * until the TLB flush event by using the pte_update_defer form of the
477 * interface, but care must be taken to assure that the flush happens while
478 * still holding the same page table lock so that the shadow and primary pages
479 * do not become out of sync on SMP.
480 */
481#define pte_update(mm, addr, ptep) do { } while (0)
482#define pte_update_defer(mm, addr, ptep) do { } while (0)
483#endif
484
485/*
486 * We only update the dirty/accessed state if we set
487 * the dirty bit by hand in the kernel, since the hardware
488 * will do the accessed bit for us, and we don't want to
489 * race with other CPU's that might be updating the dirty
490 * bit at the same time.
491 */
492struct vm_area_struct;
493
494#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
495extern int ptep_set_access_flags(struct vm_area_struct *vma,
496 unsigned long address, pte_t *ptep,
497 pte_t entry, int dirty);
498
499#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
500extern int ptep_test_and_clear_young(struct vm_area_struct *vma,
501 unsigned long addr, pte_t *ptep);
502
503#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
504extern int ptep_clear_flush_young(struct vm_area_struct *vma,
505 unsigned long address, pte_t *ptep);
506
507#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
508static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
509 pte_t *ptep)
510{
511 pte_t pte = native_ptep_get_and_clear(ptep);
512 pte_update(mm, addr, ptep);
513 return pte;
514}
515
516#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
517static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
518 unsigned long addr, pte_t *ptep,
519 int full)
520{
521 pte_t pte;
522 if (full) {
523 /*
524 * Full address destruction in progress; paravirt does not
525 * care about updates and native needs no locking
526 */
527 pte = native_local_ptep_get_and_clear(ptep);
528 } else {
529 pte = ptep_get_and_clear(mm, addr, ptep);
530 }
531 return pte;
532}
533
534#define __HAVE_ARCH_PTEP_SET_WRPROTECT
535static inline void ptep_set_wrprotect(struct mm_struct *mm,
536 unsigned long addr, pte_t *ptep)
537{
538 clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
539 pte_update(mm, addr, ptep);
540}
541
542/*
543 * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
544 *
545 * dst - pointer to pgd range anwhere on a pgd page
546 * src - ""
547 * count - the number of pgds to copy.
548 *
549 * dst and src can be on the same page, but the range must not overlap,
550 * and must not cross a page boundary.
551 */
552static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
553{
554 memcpy(dst, src, count * sizeof(pgd_t));
555}
556
557
558#include <asm-generic/pgtable.h>
559#endif /* __ASSEMBLY__ */
560
561#endif /* ASM_X86__PGTABLE_H */
diff --git a/include/asm-x86/pgtable_32.h b/include/asm-x86/pgtable_32.h
deleted file mode 100644
index 8de702dc7d62..000000000000
--- a/include/asm-x86/pgtable_32.h
+++ /dev/null
@@ -1,191 +0,0 @@
1#ifndef ASM_X86__PGTABLE_32_H
2#define ASM_X86__PGTABLE_32_H
3
4
5/*
6 * The Linux memory management assumes a three-level page table setup. On
7 * the i386, we use that, but "fold" the mid level into the top-level page
8 * table, so that we physically have the same two-level page table as the
9 * i386 mmu expects.
10 *
11 * This file contains the functions and defines necessary to modify and use
12 * the i386 page table tree.
13 */
14#ifndef __ASSEMBLY__
15#include <asm/processor.h>
16#include <asm/fixmap.h>
17#include <linux/threads.h>
18#include <asm/paravirt.h>
19
20#include <linux/bitops.h>
21#include <linux/slab.h>
22#include <linux/list.h>
23#include <linux/spinlock.h>
24
25struct mm_struct;
26struct vm_area_struct;
27
28extern pgd_t swapper_pg_dir[1024];
29
30static inline void pgtable_cache_init(void) { }
31static inline void check_pgt_cache(void) { }
32void paging_init(void);
33
34extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
35
36/*
37 * The Linux x86 paging architecture is 'compile-time dual-mode', it
38 * implements both the traditional 2-level x86 page tables and the
39 * newer 3-level PAE-mode page tables.
40 */
41#ifdef CONFIG_X86_PAE
42# include <asm/pgtable-3level-defs.h>
43# define PMD_SIZE (1UL << PMD_SHIFT)
44# define PMD_MASK (~(PMD_SIZE - 1))
45#else
46# include <asm/pgtable-2level-defs.h>
47#endif
48
49#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
50#define PGDIR_MASK (~(PGDIR_SIZE - 1))
51
52/* Just any arbitrary offset to the start of the vmalloc VM area: the
53 * current 8MB value just means that there will be a 8MB "hole" after the
54 * physical memory until the kernel virtual memory starts. That means that
55 * any out-of-bounds memory accesses will hopefully be caught.
56 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
57 * area for the same reason. ;)
58 */
59#define VMALLOC_OFFSET (8 * 1024 * 1024)
60#define VMALLOC_START ((unsigned long)high_memory + VMALLOC_OFFSET)
61#ifdef CONFIG_X86_PAE
62#define LAST_PKMAP 512
63#else
64#define LAST_PKMAP 1024
65#endif
66
67#define PKMAP_BASE ((FIXADDR_BOOT_START - PAGE_SIZE * (LAST_PKMAP + 1)) \
68 & PMD_MASK)
69
70#ifdef CONFIG_HIGHMEM
71# define VMALLOC_END (PKMAP_BASE - 2 * PAGE_SIZE)
72#else
73# define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
74#endif
75
76#define MAXMEM (VMALLOC_END - PAGE_OFFSET - __VMALLOC_RESERVE)
77
78/*
79 * Define this if things work differently on an i386 and an i486:
80 * it will (on an i486) warn about kernel memory accesses that are
81 * done without a 'access_ok(VERIFY_WRITE,..)'
82 */
83#undef TEST_ACCESS_OK
84
85/* The boot page tables (all created as a single array) */
86extern unsigned long pg0[];
87
88#define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE))
89
90/* To avoid harmful races, pmd_none(x) should check only the lower when PAE */
91#define pmd_none(x) (!(unsigned long)pmd_val((x)))
92#define pmd_present(x) (pmd_val((x)) & _PAGE_PRESENT)
93#define pmd_bad(x) ((pmd_val(x) & (PTE_FLAGS_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
94
95#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
96
97#ifdef CONFIG_X86_PAE
98# include <asm/pgtable-3level.h>
99#else
100# include <asm/pgtable-2level.h>
101#endif
102
103/*
104 * Macro to mark a page protection value as "uncacheable".
105 * On processors which do not support it, this is a no-op.
106 */
107#define pgprot_noncached(prot) \
108 ((boot_cpu_data.x86 > 3) \
109 ? (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) \
110 : (prot))
111
112/*
113 * Conversion functions: convert a page and protection to a page entry,
114 * and a page entry and page directory to the page they refer to.
115 */
116#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
117
118
119static inline int pud_large(pud_t pud) { return 0; }
120
121/*
122 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
123 *
124 * this macro returns the index of the entry in the pmd page which would
125 * control the given virtual address
126 */
127#define pmd_index(address) \
128 (((address) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
129
130/*
131 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
132 *
133 * this macro returns the index of the entry in the pte page which would
134 * control the given virtual address
135 */
136#define pte_index(address) \
137 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
138#define pte_offset_kernel(dir, address) \
139 ((pte_t *)pmd_page_vaddr(*(dir)) + pte_index((address)))
140
141#define pmd_page(pmd) (pfn_to_page(pmd_val((pmd)) >> PAGE_SHIFT))
142
143#define pmd_page_vaddr(pmd) \
144 ((unsigned long)__va(pmd_val((pmd)) & PTE_PFN_MASK))
145
146#if defined(CONFIG_HIGHPTE)
147#define pte_offset_map(dir, address) \
148 ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), KM_PTE0) + \
149 pte_index((address)))
150#define pte_offset_map_nested(dir, address) \
151 ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), KM_PTE1) + \
152 pte_index((address)))
153#define pte_unmap(pte) kunmap_atomic((pte), KM_PTE0)
154#define pte_unmap_nested(pte) kunmap_atomic((pte), KM_PTE1)
155#else
156#define pte_offset_map(dir, address) \
157 ((pte_t *)page_address(pmd_page(*(dir))) + pte_index((address)))
158#define pte_offset_map_nested(dir, address) pte_offset_map((dir), (address))
159#define pte_unmap(pte) do { } while (0)
160#define pte_unmap_nested(pte) do { } while (0)
161#endif
162
163/* Clear a kernel PTE and flush it from the TLB */
164#define kpte_clear_flush(ptep, vaddr) \
165do { \
166 pte_clear(&init_mm, (vaddr), (ptep)); \
167 __flush_tlb_one((vaddr)); \
168} while (0)
169
170/*
171 * The i386 doesn't have any external MMU info: the kernel page
172 * tables contain all the necessary information.
173 */
174#define update_mmu_cache(vma, address, pte) do { } while (0)
175
176#endif /* !__ASSEMBLY__ */
177
178/*
179 * kern_addr_valid() is (1) for FLATMEM and (0) for
180 * SPARSEMEM and DISCONTIGMEM
181 */
182#ifdef CONFIG_FLATMEM
183#define kern_addr_valid(addr) (1)
184#else
185#define kern_addr_valid(kaddr) (0)
186#endif
187
188#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
189 remap_pfn_range(vma, vaddr, pfn, size, prot)
190
191#endif /* ASM_X86__PGTABLE_32_H */
diff --git a/include/asm-x86/pgtable_64.h b/include/asm-x86/pgtable_64.h
deleted file mode 100644
index fde9770e53d1..000000000000
--- a/include/asm-x86/pgtable_64.h
+++ /dev/null
@@ -1,285 +0,0 @@
1#ifndef ASM_X86__PGTABLE_64_H
2#define ASM_X86__PGTABLE_64_H
3
4#include <linux/const.h>
5#ifndef __ASSEMBLY__
6
7/*
8 * This file contains the functions and defines necessary to modify and use
9 * the x86-64 page table tree.
10 */
11#include <asm/processor.h>
12#include <linux/bitops.h>
13#include <linux/threads.h>
14#include <asm/pda.h>
15
16extern pud_t level3_kernel_pgt[512];
17extern pud_t level3_ident_pgt[512];
18extern pmd_t level2_kernel_pgt[512];
19extern pmd_t level2_fixmap_pgt[512];
20extern pmd_t level2_ident_pgt[512];
21extern pgd_t init_level4_pgt[];
22
23#define swapper_pg_dir init_level4_pgt
24
25extern void paging_init(void);
26
27#endif /* !__ASSEMBLY__ */
28
29#define SHARED_KERNEL_PMD 0
30
31/*
32 * PGDIR_SHIFT determines what a top-level page table entry can map
33 */
34#define PGDIR_SHIFT 39
35#define PTRS_PER_PGD 512
36
37/*
38 * 3rd level page
39 */
40#define PUD_SHIFT 30
41#define PTRS_PER_PUD 512
42
43/*
44 * PMD_SHIFT determines the size of the area a middle-level
45 * page table can map
46 */
47#define PMD_SHIFT 21
48#define PTRS_PER_PMD 512
49
50/*
51 * entries per page directory level
52 */
53#define PTRS_PER_PTE 512
54
55#ifndef __ASSEMBLY__
56
57#define pte_ERROR(e) \
58 printk("%s:%d: bad pte %p(%016lx).\n", \
59 __FILE__, __LINE__, &(e), pte_val(e))
60#define pmd_ERROR(e) \
61 printk("%s:%d: bad pmd %p(%016lx).\n", \
62 __FILE__, __LINE__, &(e), pmd_val(e))
63#define pud_ERROR(e) \
64 printk("%s:%d: bad pud %p(%016lx).\n", \
65 __FILE__, __LINE__, &(e), pud_val(e))
66#define pgd_ERROR(e) \
67 printk("%s:%d: bad pgd %p(%016lx).\n", \
68 __FILE__, __LINE__, &(e), pgd_val(e))
69
70#define pgd_none(x) (!pgd_val(x))
71#define pud_none(x) (!pud_val(x))
72
73struct mm_struct;
74
75void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte);
76
77
78static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr,
79 pte_t *ptep)
80{
81 *ptep = native_make_pte(0);
82}
83
84static inline void native_set_pte(pte_t *ptep, pte_t pte)
85{
86 *ptep = pte;
87}
88
89static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
90{
91 native_set_pte(ptep, pte);
92}
93
94static inline pte_t native_ptep_get_and_clear(pte_t *xp)
95{
96#ifdef CONFIG_SMP
97 return native_make_pte(xchg(&xp->pte, 0));
98#else
99 /* native_local_ptep_get_and_clear,
100 but duplicated because of cyclic dependency */
101 pte_t ret = *xp;
102 native_pte_clear(NULL, 0, xp);
103 return ret;
104#endif
105}
106
107static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
108{
109 *pmdp = pmd;
110}
111
112static inline void native_pmd_clear(pmd_t *pmd)
113{
114 native_set_pmd(pmd, native_make_pmd(0));
115}
116
117static inline void native_set_pud(pud_t *pudp, pud_t pud)
118{
119 *pudp = pud;
120}
121
122static inline void native_pud_clear(pud_t *pud)
123{
124 native_set_pud(pud, native_make_pud(0));
125}
126
127static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
128{
129 *pgdp = pgd;
130}
131
132static inline void native_pgd_clear(pgd_t *pgd)
133{
134 native_set_pgd(pgd, native_make_pgd(0));
135}
136
137#define pte_same(a, b) ((a).pte == (b).pte)
138
139#endif /* !__ASSEMBLY__ */
140
141#define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
142#define PMD_MASK (~(PMD_SIZE - 1))
143#define PUD_SIZE (_AC(1, UL) << PUD_SHIFT)
144#define PUD_MASK (~(PUD_SIZE - 1))
145#define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT)
146#define PGDIR_MASK (~(PGDIR_SIZE - 1))
147
148
149#define MAXMEM _AC(0x00003fffffffffff, UL)
150#define VMALLOC_START _AC(0xffffc20000000000, UL)
151#define VMALLOC_END _AC(0xffffe1ffffffffff, UL)
152#define VMEMMAP_START _AC(0xffffe20000000000, UL)
153#define MODULES_VADDR _AC(0xffffffffa0000000, UL)
154#define MODULES_END _AC(0xffffffffff000000, UL)
155#define MODULES_LEN (MODULES_END - MODULES_VADDR)
156
157#ifndef __ASSEMBLY__
158
159static inline int pgd_bad(pgd_t pgd)
160{
161 return (pgd_val(pgd) & ~(PTE_PFN_MASK | _PAGE_USER)) != _KERNPG_TABLE;
162}
163
164static inline int pud_bad(pud_t pud)
165{
166 return (pud_val(pud) & ~(PTE_PFN_MASK | _PAGE_USER)) != _KERNPG_TABLE;
167}
168
169static inline int pmd_bad(pmd_t pmd)
170{
171 return (pmd_val(pmd) & ~(PTE_PFN_MASK | _PAGE_USER)) != _KERNPG_TABLE;
172}
173
174#define pte_none(x) (!pte_val((x)))
175#define pte_present(x) (pte_val((x)) & (_PAGE_PRESENT | _PAGE_PROTNONE))
176
177#define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT)) /* FIXME: is this right? */
178
179/*
180 * Macro to mark a page protection value as "uncacheable".
181 */
182#define pgprot_noncached(prot) \
183 (__pgprot(pgprot_val((prot)) | _PAGE_PCD | _PAGE_PWT))
184
185/*
186 * Conversion functions: convert a page and protection to a page entry,
187 * and a page entry and page directory to the page they refer to.
188 */
189
190/*
191 * Level 4 access.
192 */
193#define pgd_page_vaddr(pgd) \
194 ((unsigned long)__va((unsigned long)pgd_val((pgd)) & PTE_PFN_MASK))
195#define pgd_page(pgd) (pfn_to_page(pgd_val((pgd)) >> PAGE_SHIFT))
196#define pgd_present(pgd) (pgd_val(pgd) & _PAGE_PRESENT)
197static inline int pgd_large(pgd_t pgd) { return 0; }
198#define mk_kernel_pgd(address) __pgd((address) | _KERNPG_TABLE)
199
200/* PUD - Level3 access */
201/* to find an entry in a page-table-directory. */
202#define pud_page_vaddr(pud) \
203 ((unsigned long)__va(pud_val((pud)) & PHYSICAL_PAGE_MASK))
204#define pud_page(pud) (pfn_to_page(pud_val((pud)) >> PAGE_SHIFT))
205#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
206#define pud_offset(pgd, address) \
207 ((pud_t *)pgd_page_vaddr(*(pgd)) + pud_index((address)))
208#define pud_present(pud) (pud_val((pud)) & _PAGE_PRESENT)
209
210static inline int pud_large(pud_t pte)
211{
212 return (pud_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
213 (_PAGE_PSE | _PAGE_PRESENT);
214}
215
216/* PMD - Level 2 access */
217#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val((pmd)) & PTE_PFN_MASK))
218#define pmd_page(pmd) (pfn_to_page(pmd_val((pmd)) >> PAGE_SHIFT))
219
220#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
221#define pmd_offset(dir, address) ((pmd_t *)pud_page_vaddr(*(dir)) + \
222 pmd_index(address))
223#define pmd_none(x) (!pmd_val((x)))
224#define pmd_present(x) (pmd_val((x)) & _PAGE_PRESENT)
225#define pfn_pmd(nr, prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val((prot))))
226#define pmd_pfn(x) ((pmd_val((x)) & __PHYSICAL_MASK) >> PAGE_SHIFT)
227
228#define pte_to_pgoff(pte) ((pte_val((pte)) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT)
229#define pgoff_to_pte(off) ((pte_t) { .pte = ((off) << PAGE_SHIFT) | \
230 _PAGE_FILE })
231#define PTE_FILE_MAX_BITS __PHYSICAL_MASK_SHIFT
232
233/* PTE - Level 1 access. */
234
235/* page, protection -> pte */
236#define mk_pte(page, pgprot) pfn_pte(page_to_pfn((page)), (pgprot))
237
238#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
239#define pte_offset_kernel(dir, address) ((pte_t *) pmd_page_vaddr(*(dir)) + \
240 pte_index((address)))
241
242/* x86-64 always has all page tables mapped. */
243#define pte_offset_map(dir, address) pte_offset_kernel((dir), (address))
244#define pte_offset_map_nested(dir, address) pte_offset_kernel((dir), (address))
245#define pte_unmap(pte) /* NOP */
246#define pte_unmap_nested(pte) /* NOP */
247
248#define update_mmu_cache(vma, address, pte) do { } while (0)
249
250extern int direct_gbpages;
251
252/* Encode and de-code a swap entry */
253#define __swp_type(x) (((x).val >> 1) & 0x3f)
254#define __swp_offset(x) ((x).val >> 8)
255#define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | \
256 ((offset) << 8) })
257#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) })
258#define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val })
259
260extern int kern_addr_valid(unsigned long addr);
261extern void cleanup_highmap(void);
262
263#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
264 remap_pfn_range(vma, vaddr, pfn, size, prot)
265
266#define HAVE_ARCH_UNMAPPED_AREA
267#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
268
269#define pgtable_cache_init() do { } while (0)
270#define check_pgt_cache() do { } while (0)
271
272#define PAGE_AGP PAGE_KERNEL_NOCACHE
273#define HAVE_PAGE_AGP 1
274
275/* fs/proc/kcore.c */
276#define kc_vaddr_to_offset(v) ((v) & __VIRTUAL_MASK)
277#define kc_offset_to_vaddr(o) \
278 (((o) & (1UL << (__VIRTUAL_MASK_SHIFT - 1))) \
279 ? ((o) | ~__VIRTUAL_MASK) \
280 : (o))
281
282#define __HAVE_ARCH_PTE_SAME
283#endif /* !__ASSEMBLY__ */
284
285#endif /* ASM_X86__PGTABLE_64_H */
diff --git a/include/asm-x86/poll.h b/include/asm-x86/poll.h
deleted file mode 100644
index c98509d3149e..000000000000
--- a/include/asm-x86/poll.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/poll.h>
diff --git a/include/asm-x86/posix_types.h b/include/asm-x86/posix_types.h
deleted file mode 100644
index bb7133dc155d..000000000000
--- a/include/asm-x86/posix_types.h
+++ /dev/null
@@ -1,13 +0,0 @@
1#ifdef __KERNEL__
2# ifdef CONFIG_X86_32
3# include "posix_types_32.h"
4# else
5# include "posix_types_64.h"
6# endif
7#else
8# ifdef __i386__
9# include "posix_types_32.h"
10# else
11# include "posix_types_64.h"
12# endif
13#endif
diff --git a/include/asm-x86/posix_types_32.h b/include/asm-x86/posix_types_32.h
deleted file mode 100644
index 70cf2bb05939..000000000000
--- a/include/asm-x86/posix_types_32.h
+++ /dev/null
@@ -1,85 +0,0 @@
1#ifndef ASM_X86__POSIX_TYPES_32_H
2#define ASM_X86__POSIX_TYPES_32_H
3
4/*
5 * This file is generally used by user-level software, so you need to
6 * be a little careful about namespace pollution etc. Also, we cannot
7 * assume GCC is being used.
8 */
9
10typedef unsigned long __kernel_ino_t;
11typedef unsigned short __kernel_mode_t;
12typedef unsigned short __kernel_nlink_t;
13typedef long __kernel_off_t;
14typedef int __kernel_pid_t;
15typedef unsigned short __kernel_ipc_pid_t;
16typedef unsigned short __kernel_uid_t;
17typedef unsigned short __kernel_gid_t;
18typedef unsigned int __kernel_size_t;
19typedef int __kernel_ssize_t;
20typedef int __kernel_ptrdiff_t;
21typedef long __kernel_time_t;
22typedef long __kernel_suseconds_t;
23typedef long __kernel_clock_t;
24typedef int __kernel_timer_t;
25typedef int __kernel_clockid_t;
26typedef int __kernel_daddr_t;
27typedef char * __kernel_caddr_t;
28typedef unsigned short __kernel_uid16_t;
29typedef unsigned short __kernel_gid16_t;
30typedef unsigned int __kernel_uid32_t;
31typedef unsigned int __kernel_gid32_t;
32
33typedef unsigned short __kernel_old_uid_t;
34typedef unsigned short __kernel_old_gid_t;
35typedef unsigned short __kernel_old_dev_t;
36
37#ifdef __GNUC__
38typedef long long __kernel_loff_t;
39#endif
40
41typedef struct {
42 int val[2];
43} __kernel_fsid_t;
44
45#if defined(__KERNEL__)
46
47#undef __FD_SET
48#define __FD_SET(fd,fdsetp) \
49 asm volatile("btsl %1,%0": \
50 "+m" (*(__kernel_fd_set *)(fdsetp)) \
51 : "r" ((int)(fd)))
52
53#undef __FD_CLR
54#define __FD_CLR(fd,fdsetp) \
55 asm volatile("btrl %1,%0": \
56 "+m" (*(__kernel_fd_set *)(fdsetp)) \
57 : "r" ((int) (fd)))
58
59#undef __FD_ISSET
60#define __FD_ISSET(fd,fdsetp) \
61 (__extension__ \
62 ({ \
63 unsigned char __result; \
64 asm volatile("btl %1,%2 ; setb %0" \
65 : "=q" (__result) \
66 : "r" ((int)(fd)), \
67 "m" (*(__kernel_fd_set *)(fdsetp))); \
68 __result; \
69}))
70
71#undef __FD_ZERO
72#define __FD_ZERO(fdsetp) \
73do { \
74 int __d0, __d1; \
75 asm volatile("cld ; rep ; stosl" \
76 : "=m" (*(__kernel_fd_set *)(fdsetp)), \
77 "=&c" (__d0), "=&D" (__d1) \
78 : "a" (0), "1" (__FDSET_LONGS), \
79 "2" ((__kernel_fd_set *)(fdsetp)) \
80 : "memory"); \
81} while (0)
82
83#endif /* defined(__KERNEL__) */
84
85#endif /* ASM_X86__POSIX_TYPES_32_H */
diff --git a/include/asm-x86/posix_types_64.h b/include/asm-x86/posix_types_64.h
deleted file mode 100644
index 388b4e7f4a44..000000000000
--- a/include/asm-x86/posix_types_64.h
+++ /dev/null
@@ -1,119 +0,0 @@
1#ifndef ASM_X86__POSIX_TYPES_64_H
2#define ASM_X86__POSIX_TYPES_64_H
3
4/*
5 * This file is generally used by user-level software, so you need to
6 * be a little careful about namespace pollution etc. Also, we cannot
7 * assume GCC is being used.
8 */
9
10typedef unsigned long __kernel_ino_t;
11typedef unsigned int __kernel_mode_t;
12typedef unsigned long __kernel_nlink_t;
13typedef long __kernel_off_t;
14typedef int __kernel_pid_t;
15typedef int __kernel_ipc_pid_t;
16typedef unsigned int __kernel_uid_t;
17typedef unsigned int __kernel_gid_t;
18typedef unsigned long __kernel_size_t;
19typedef long __kernel_ssize_t;
20typedef long __kernel_ptrdiff_t;
21typedef long __kernel_time_t;
22typedef long __kernel_suseconds_t;
23typedef long __kernel_clock_t;
24typedef int __kernel_timer_t;
25typedef int __kernel_clockid_t;
26typedef int __kernel_daddr_t;
27typedef char * __kernel_caddr_t;
28typedef unsigned short __kernel_uid16_t;
29typedef unsigned short __kernel_gid16_t;
30
31#ifdef __GNUC__
32typedef long long __kernel_loff_t;
33#endif
34
35typedef struct {
36 int val[2];
37} __kernel_fsid_t;
38
39typedef unsigned short __kernel_old_uid_t;
40typedef unsigned short __kernel_old_gid_t;
41typedef __kernel_uid_t __kernel_uid32_t;
42typedef __kernel_gid_t __kernel_gid32_t;
43
44typedef unsigned long __kernel_old_dev_t;
45
46#ifdef __KERNEL__
47
48#undef __FD_SET
49static inline void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp)
50{
51 unsigned long _tmp = fd / __NFDBITS;
52 unsigned long _rem = fd % __NFDBITS;
53 fdsetp->fds_bits[_tmp] |= (1UL<<_rem);
54}
55
56#undef __FD_CLR
57static inline void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp)
58{
59 unsigned long _tmp = fd / __NFDBITS;
60 unsigned long _rem = fd % __NFDBITS;
61 fdsetp->fds_bits[_tmp] &= ~(1UL<<_rem);
62}
63
64#undef __FD_ISSET
65static inline int __FD_ISSET(unsigned long fd, __const__ __kernel_fd_set *p)
66{
67 unsigned long _tmp = fd / __NFDBITS;
68 unsigned long _rem = fd % __NFDBITS;
69 return (p->fds_bits[_tmp] & (1UL<<_rem)) != 0;
70}
71
72/*
73 * This will unroll the loop for the normal constant cases (8 or 32 longs,
74 * for 256 and 1024-bit fd_sets respectively)
75 */
76#undef __FD_ZERO
77static inline void __FD_ZERO(__kernel_fd_set *p)
78{
79 unsigned long *tmp = p->fds_bits;
80 int i;
81
82 if (__builtin_constant_p(__FDSET_LONGS)) {
83 switch (__FDSET_LONGS) {
84 case 32:
85 tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
86 tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
87 tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0;
88 tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0;
89 tmp[16] = 0; tmp[17] = 0; tmp[18] = 0; tmp[19] = 0;
90 tmp[20] = 0; tmp[21] = 0; tmp[22] = 0; tmp[23] = 0;
91 tmp[24] = 0; tmp[25] = 0; tmp[26] = 0; tmp[27] = 0;
92 tmp[28] = 0; tmp[29] = 0; tmp[30] = 0; tmp[31] = 0;
93 return;
94 case 16:
95 tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
96 tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
97 tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0;
98 tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0;
99 return;
100 case 8:
101 tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
102 tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
103 return;
104 case 4:
105 tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
106 return;
107 }
108 }
109 i = __FDSET_LONGS;
110 while (i) {
111 i--;
112 *tmp = 0;
113 tmp++;
114 }
115}
116
117#endif /* defined(__KERNEL__) */
118
119#endif /* ASM_X86__POSIX_TYPES_64_H */
diff --git a/include/asm-x86/prctl.h b/include/asm-x86/prctl.h
deleted file mode 100644
index e7ae34eb4103..000000000000
--- a/include/asm-x86/prctl.h
+++ /dev/null
@@ -1,10 +0,0 @@
1#ifndef ASM_X86__PRCTL_H
2#define ASM_X86__PRCTL_H
3
4#define ARCH_SET_GS 0x1001
5#define ARCH_SET_FS 0x1002
6#define ARCH_GET_FS 0x1003
7#define ARCH_GET_GS 0x1004
8
9
10#endif /* ASM_X86__PRCTL_H */
diff --git a/include/asm-x86/processor-cyrix.h b/include/asm-x86/processor-cyrix.h
deleted file mode 100644
index 1198f2a0e42c..000000000000
--- a/include/asm-x86/processor-cyrix.h
+++ /dev/null
@@ -1,38 +0,0 @@
1/*
2 * NSC/Cyrix CPU indexed register access. Must be inlined instead of
3 * macros to ensure correct access ordering
4 * Access order is always 0x22 (=offset), 0x23 (=value)
5 *
6 * When using the old macros a line like
7 * setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x88);
8 * gets expanded to:
9 * do {
10 * outb((CX86_CCR2), 0x22);
11 * outb((({
12 * outb((CX86_CCR2), 0x22);
13 * inb(0x23);
14 * }) | 0x88), 0x23);
15 * } while (0);
16 *
17 * which in fact violates the access order (= 0x22, 0x22, 0x23, 0x23).
18 */
19
20static inline u8 getCx86(u8 reg)
21{
22 outb(reg, 0x22);
23 return inb(0x23);
24}
25
26static inline void setCx86(u8 reg, u8 data)
27{
28 outb(reg, 0x22);
29 outb(data, 0x23);
30}
31
32#define getCx86_old(reg) ({ outb((reg), 0x22); inb(0x23); })
33
34#define setCx86_old(reg, data) do { \
35 outb((reg), 0x22); \
36 outb((data), 0x23); \
37} while (0)
38
diff --git a/include/asm-x86/processor-flags.h b/include/asm-x86/processor-flags.h
deleted file mode 100644
index dc5f0712f9fa..000000000000
--- a/include/asm-x86/processor-flags.h
+++ /dev/null
@@ -1,100 +0,0 @@
1#ifndef ASM_X86__PROCESSOR_FLAGS_H
2#define ASM_X86__PROCESSOR_FLAGS_H
3/* Various flags defined: can be included from assembler. */
4
5/*
6 * EFLAGS bits
7 */
8#define X86_EFLAGS_CF 0x00000001 /* Carry Flag */
9#define X86_EFLAGS_PF 0x00000004 /* Parity Flag */
10#define X86_EFLAGS_AF 0x00000010 /* Auxillary carry Flag */
11#define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */
12#define X86_EFLAGS_SF 0x00000080 /* Sign Flag */
13#define X86_EFLAGS_TF 0x00000100 /* Trap Flag */
14#define X86_EFLAGS_IF 0x00000200 /* Interrupt Flag */
15#define X86_EFLAGS_DF 0x00000400 /* Direction Flag */
16#define X86_EFLAGS_OF 0x00000800 /* Overflow Flag */
17#define X86_EFLAGS_IOPL 0x00003000 /* IOPL mask */
18#define X86_EFLAGS_NT 0x00004000 /* Nested Task */
19#define X86_EFLAGS_RF 0x00010000 /* Resume Flag */
20#define X86_EFLAGS_VM 0x00020000 /* Virtual Mode */
21#define X86_EFLAGS_AC 0x00040000 /* Alignment Check */
22#define X86_EFLAGS_VIF 0x00080000 /* Virtual Interrupt Flag */
23#define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */
24#define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */
25
26/*
27 * Basic CPU control in CR0
28 */
29#define X86_CR0_PE 0x00000001 /* Protection Enable */
30#define X86_CR0_MP 0x00000002 /* Monitor Coprocessor */
31#define X86_CR0_EM 0x00000004 /* Emulation */
32#define X86_CR0_TS 0x00000008 /* Task Switched */
33#define X86_CR0_ET 0x00000010 /* Extension Type */
34#define X86_CR0_NE 0x00000020 /* Numeric Error */
35#define X86_CR0_WP 0x00010000 /* Write Protect */
36#define X86_CR0_AM 0x00040000 /* Alignment Mask */
37#define X86_CR0_NW 0x20000000 /* Not Write-through */
38#define X86_CR0_CD 0x40000000 /* Cache Disable */
39#define X86_CR0_PG 0x80000000 /* Paging */
40
41/*
42 * Paging options in CR3
43 */
44#define X86_CR3_PWT 0x00000008 /* Page Write Through */
45#define X86_CR3_PCD 0x00000010 /* Page Cache Disable */
46
47/*
48 * Intel CPU features in CR4
49 */
50#define X86_CR4_VME 0x00000001 /* enable vm86 extensions */
51#define X86_CR4_PVI 0x00000002 /* virtual interrupts flag enable */
52#define X86_CR4_TSD 0x00000004 /* disable time stamp at ipl 3 */
53#define X86_CR4_DE 0x00000008 /* enable debugging extensions */
54#define X86_CR4_PSE 0x00000010 /* enable page size extensions */
55#define X86_CR4_PAE 0x00000020 /* enable physical address extensions */
56#define X86_CR4_MCE 0x00000040 /* Machine check enable */
57#define X86_CR4_PGE 0x00000080 /* enable global pages */
58#define X86_CR4_PCE 0x00000100 /* enable performance counters at ipl 3 */
59#define X86_CR4_OSFXSR 0x00000200 /* enable fast FPU save and restore */
60#define X86_CR4_OSXMMEXCPT 0x00000400 /* enable unmasked SSE exceptions */
61#define X86_CR4_VMXE 0x00002000 /* enable VMX virtualization */
62#define X86_CR4_OSXSAVE 0x00040000 /* enable xsave and xrestore */
63
64/*
65 * x86-64 Task Priority Register, CR8
66 */
67#define X86_CR8_TPR 0x0000000F /* task priority register */
68
69/*
70 * AMD and Transmeta use MSRs for configuration; see <asm/msr-index.h>
71 */
72
73/*
74 * NSC/Cyrix CPU configuration register indexes
75 */
76#define CX86_PCR0 0x20
77#define CX86_GCR 0xb8
78#define CX86_CCR0 0xc0
79#define CX86_CCR1 0xc1
80#define CX86_CCR2 0xc2
81#define CX86_CCR3 0xc3
82#define CX86_CCR4 0xe8
83#define CX86_CCR5 0xe9
84#define CX86_CCR6 0xea
85#define CX86_CCR7 0xeb
86#define CX86_PCR1 0xf0
87#define CX86_DIR0 0xfe
88#define CX86_DIR1 0xff
89#define CX86_ARR_BASE 0xc4
90#define CX86_RCR_BASE 0xdc
91
92#ifdef __KERNEL__
93#ifdef CONFIG_VM86
94#define X86_VM_MASK X86_EFLAGS_VM
95#else
96#define X86_VM_MASK 0 /* No VM86 support */
97#endif
98#endif
99
100#endif /* ASM_X86__PROCESSOR_FLAGS_H */
diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h
deleted file mode 100644
index ee7cbb30773a..000000000000
--- a/include/asm-x86/processor.h
+++ /dev/null
@@ -1,936 +0,0 @@
1#ifndef ASM_X86__PROCESSOR_H
2#define ASM_X86__PROCESSOR_H
3
4#include <asm/processor-flags.h>
5
6/* Forward declaration, a strange C thing */
7struct task_struct;
8struct mm_struct;
9
10#include <asm/vm86.h>
11#include <asm/math_emu.h>
12#include <asm/segment.h>
13#include <asm/types.h>
14#include <asm/sigcontext.h>
15#include <asm/current.h>
16#include <asm/cpufeature.h>
17#include <asm/system.h>
18#include <asm/page.h>
19#include <asm/percpu.h>
20#include <asm/msr.h>
21#include <asm/desc_defs.h>
22#include <asm/nops.h>
23#include <asm/ds.h>
24
25#include <linux/personality.h>
26#include <linux/cpumask.h>
27#include <linux/cache.h>
28#include <linux/threads.h>
29#include <linux/init.h>
30
31/*
32 * Default implementation of macro that returns current
33 * instruction pointer ("program counter").
34 */
35static inline void *current_text_addr(void)
36{
37 void *pc;
38
39 asm volatile("mov $1f, %0; 1:":"=r" (pc));
40
41 return pc;
42}
43
44#ifdef CONFIG_X86_VSMP
45# define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT)
46# define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT)
47#else
48# define ARCH_MIN_TASKALIGN 16
49# define ARCH_MIN_MMSTRUCT_ALIGN 0
50#endif
51
52/*
53 * CPU type and hardware bug flags. Kept separately for each CPU.
54 * Members of this structure are referenced in head.S, so think twice
55 * before touching them. [mj]
56 */
57
58struct cpuinfo_x86 {
59 __u8 x86; /* CPU family */
60 __u8 x86_vendor; /* CPU vendor */
61 __u8 x86_model;
62 __u8 x86_mask;
63#ifdef CONFIG_X86_32
64 char wp_works_ok; /* It doesn't on 386's */
65
66 /* Problems on some 486Dx4's and old 386's: */
67 char hlt_works_ok;
68 char hard_math;
69 char rfu;
70 char fdiv_bug;
71 char f00f_bug;
72 char coma_bug;
73 char pad0;
74#else
75 /* Number of 4K pages in DTLB/ITLB combined(in pages): */
76 int x86_tlbsize;
77 __u8 x86_virt_bits;
78 __u8 x86_phys_bits;
79#endif
80 /* CPUID returned core id bits: */
81 __u8 x86_coreid_bits;
82 /* Max extended CPUID function supported: */
83 __u32 extended_cpuid_level;
84 /* Maximum supported CPUID level, -1=no CPUID: */
85 int cpuid_level;
86 __u32 x86_capability[NCAPINTS];
87 char x86_vendor_id[16];
88 char x86_model_id[64];
89 /* in KB - valid for CPUS which support this call: */
90 int x86_cache_size;
91 int x86_cache_alignment; /* In bytes */
92 int x86_power;
93 unsigned long loops_per_jiffy;
94#ifdef CONFIG_SMP
95 /* cpus sharing the last level cache: */
96 cpumask_t llc_shared_map;
97#endif
98 /* cpuid returned max cores value: */
99 u16 x86_max_cores;
100 u16 apicid;
101 u16 initial_apicid;
102 u16 x86_clflush_size;
103#ifdef CONFIG_SMP
104 /* number of cores as seen by the OS: */
105 u16 booted_cores;
106 /* Physical processor id: */
107 u16 phys_proc_id;
108 /* Core id: */
109 u16 cpu_core_id;
110 /* Index into per_cpu list: */
111 u16 cpu_index;
112#endif
113} __attribute__((__aligned__(SMP_CACHE_BYTES)));
114
115#define X86_VENDOR_INTEL 0
116#define X86_VENDOR_CYRIX 1
117#define X86_VENDOR_AMD 2
118#define X86_VENDOR_UMC 3
119#define X86_VENDOR_CENTAUR 5
120#define X86_VENDOR_TRANSMETA 7
121#define X86_VENDOR_NSC 8
122#define X86_VENDOR_NUM 9
123
124#define X86_VENDOR_UNKNOWN 0xff
125
126/*
127 * capabilities of CPUs
128 */
129extern struct cpuinfo_x86 boot_cpu_data;
130extern struct cpuinfo_x86 new_cpu_data;
131
132extern struct tss_struct doublefault_tss;
133extern __u32 cleared_cpu_caps[NCAPINTS];
134
135#ifdef CONFIG_SMP
136DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info);
137#define cpu_data(cpu) per_cpu(cpu_info, cpu)
138#define current_cpu_data __get_cpu_var(cpu_info)
139#else
140#define cpu_data(cpu) boot_cpu_data
141#define current_cpu_data boot_cpu_data
142#endif
143
144extern const struct seq_operations cpuinfo_op;
145
146static inline int hlt_works(int cpu)
147{
148#ifdef CONFIG_X86_32
149 return cpu_data(cpu).hlt_works_ok;
150#else
151 return 1;
152#endif
153}
154
155#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
156
157extern void cpu_detect(struct cpuinfo_x86 *c);
158
159extern struct pt_regs *idle_regs(struct pt_regs *);
160
161extern void early_cpu_init(void);
162extern void identify_boot_cpu(void);
163extern void identify_secondary_cpu(struct cpuinfo_x86 *);
164extern void print_cpu_info(struct cpuinfo_x86 *);
165extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
166extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
167extern unsigned short num_cache_leaves;
168
169extern void detect_extended_topology(struct cpuinfo_x86 *c);
170extern void detect_ht(struct cpuinfo_x86 *c);
171
172static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
173 unsigned int *ecx, unsigned int *edx)
174{
175 /* ecx is often an input as well as an output. */
176 asm("cpuid"
177 : "=a" (*eax),
178 "=b" (*ebx),
179 "=c" (*ecx),
180 "=d" (*edx)
181 : "0" (*eax), "2" (*ecx));
182}
183
184static inline void load_cr3(pgd_t *pgdir)
185{
186 write_cr3(__pa(pgdir));
187}
188
189#ifdef CONFIG_X86_32
190/* This is the TSS defined by the hardware. */
191struct x86_hw_tss {
192 unsigned short back_link, __blh;
193 unsigned long sp0;
194 unsigned short ss0, __ss0h;
195 unsigned long sp1;
196 /* ss1 caches MSR_IA32_SYSENTER_CS: */
197 unsigned short ss1, __ss1h;
198 unsigned long sp2;
199 unsigned short ss2, __ss2h;
200 unsigned long __cr3;
201 unsigned long ip;
202 unsigned long flags;
203 unsigned long ax;
204 unsigned long cx;
205 unsigned long dx;
206 unsigned long bx;
207 unsigned long sp;
208 unsigned long bp;
209 unsigned long si;
210 unsigned long di;
211 unsigned short es, __esh;
212 unsigned short cs, __csh;
213 unsigned short ss, __ssh;
214 unsigned short ds, __dsh;
215 unsigned short fs, __fsh;
216 unsigned short gs, __gsh;
217 unsigned short ldt, __ldth;
218 unsigned short trace;
219 unsigned short io_bitmap_base;
220
221} __attribute__((packed));
222#else
223struct x86_hw_tss {
224 u32 reserved1;
225 u64 sp0;
226 u64 sp1;
227 u64 sp2;
228 u64 reserved2;
229 u64 ist[7];
230 u32 reserved3;
231 u32 reserved4;
232 u16 reserved5;
233 u16 io_bitmap_base;
234
235} __attribute__((packed)) ____cacheline_aligned;
236#endif
237
238/*
239 * IO-bitmap sizes:
240 */
241#define IO_BITMAP_BITS 65536
242#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
243#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
244#define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap)
245#define INVALID_IO_BITMAP_OFFSET 0x8000
246#define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000
247
248struct tss_struct {
249 /*
250 * The hardware state:
251 */
252 struct x86_hw_tss x86_tss;
253
254 /*
255 * The extra 1 is there because the CPU will access an
256 * additional byte beyond the end of the IO permission
257 * bitmap. The extra byte must be all 1 bits, and must
258 * be within the limit.
259 */
260 unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
261 /*
262 * Cache the current maximum and the last task that used the bitmap:
263 */
264 unsigned long io_bitmap_max;
265 struct thread_struct *io_bitmap_owner;
266
267 /*
268 * .. and then another 0x100 bytes for the emergency kernel stack:
269 */
270 unsigned long stack[64];
271
272} ____cacheline_aligned;
273
274DECLARE_PER_CPU(struct tss_struct, init_tss);
275
276/*
277 * Save the original ist values for checking stack pointers during debugging
278 */
279struct orig_ist {
280 unsigned long ist[7];
281};
282
283#define MXCSR_DEFAULT 0x1f80
284
285struct i387_fsave_struct {
286 u32 cwd; /* FPU Control Word */
287 u32 swd; /* FPU Status Word */
288 u32 twd; /* FPU Tag Word */
289 u32 fip; /* FPU IP Offset */
290 u32 fcs; /* FPU IP Selector */
291 u32 foo; /* FPU Operand Pointer Offset */
292 u32 fos; /* FPU Operand Pointer Selector */
293
294 /* 8*10 bytes for each FP-reg = 80 bytes: */
295 u32 st_space[20];
296
297 /* Software status information [not touched by FSAVE ]: */
298 u32 status;
299};
300
301struct i387_fxsave_struct {
302 u16 cwd; /* Control Word */
303 u16 swd; /* Status Word */
304 u16 twd; /* Tag Word */
305 u16 fop; /* Last Instruction Opcode */
306 union {
307 struct {
308 u64 rip; /* Instruction Pointer */
309 u64 rdp; /* Data Pointer */
310 };
311 struct {
312 u32 fip; /* FPU IP Offset */
313 u32 fcs; /* FPU IP Selector */
314 u32 foo; /* FPU Operand Offset */
315 u32 fos; /* FPU Operand Selector */
316 };
317 };
318 u32 mxcsr; /* MXCSR Register State */
319 u32 mxcsr_mask; /* MXCSR Mask */
320
321 /* 8*16 bytes for each FP-reg = 128 bytes: */
322 u32 st_space[32];
323
324 /* 16*16 bytes for each XMM-reg = 256 bytes: */
325 u32 xmm_space[64];
326
327 u32 padding[12];
328
329 union {
330 u32 padding1[12];
331 u32 sw_reserved[12];
332 };
333
334} __attribute__((aligned(16)));
335
336struct i387_soft_struct {
337 u32 cwd;
338 u32 swd;
339 u32 twd;
340 u32 fip;
341 u32 fcs;
342 u32 foo;
343 u32 fos;
344 /* 8*10 bytes for each FP-reg = 80 bytes: */
345 u32 st_space[20];
346 u8 ftop;
347 u8 changed;
348 u8 lookahead;
349 u8 no_update;
350 u8 rm;
351 u8 alimit;
352 struct info *info;
353 u32 entry_eip;
354};
355
356struct xsave_hdr_struct {
357 u64 xstate_bv;
358 u64 reserved1[2];
359 u64 reserved2[5];
360} __attribute__((packed));
361
362struct xsave_struct {
363 struct i387_fxsave_struct i387;
364 struct xsave_hdr_struct xsave_hdr;
365 /* new processor state extensions will go here */
366} __attribute__ ((packed, aligned (64)));
367
368union thread_xstate {
369 struct i387_fsave_struct fsave;
370 struct i387_fxsave_struct fxsave;
371 struct i387_soft_struct soft;
372 struct xsave_struct xsave;
373};
374
375#ifdef CONFIG_X86_64
376DECLARE_PER_CPU(struct orig_ist, orig_ist);
377#endif
378
379extern void print_cpu_info(struct cpuinfo_x86 *);
380extern unsigned int xstate_size;
381extern void free_thread_xstate(struct task_struct *);
382extern struct kmem_cache *task_xstate_cachep;
383extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
384extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
385extern unsigned short num_cache_leaves;
386
387struct thread_struct {
388 /* Cached TLS descriptors: */
389 struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
390 unsigned long sp0;
391 unsigned long sp;
392#ifdef CONFIG_X86_32
393 unsigned long sysenter_cs;
394#else
395 unsigned long usersp; /* Copy from PDA */
396 unsigned short es;
397 unsigned short ds;
398 unsigned short fsindex;
399 unsigned short gsindex;
400#endif
401 unsigned long ip;
402 unsigned long fs;
403 unsigned long gs;
404 /* Hardware debugging registers: */
405 unsigned long debugreg0;
406 unsigned long debugreg1;
407 unsigned long debugreg2;
408 unsigned long debugreg3;
409 unsigned long debugreg6;
410 unsigned long debugreg7;
411 /* Fault info: */
412 unsigned long cr2;
413 unsigned long trap_no;
414 unsigned long error_code;
415 /* floating point and extended processor state */
416 union thread_xstate *xstate;
417#ifdef CONFIG_X86_32
418 /* Virtual 86 mode info */
419 struct vm86_struct __user *vm86_info;
420 unsigned long screen_bitmap;
421 unsigned long v86flags;
422 unsigned long v86mask;
423 unsigned long saved_sp0;
424 unsigned int saved_fs;
425 unsigned int saved_gs;
426#endif
427 /* IO permissions: */
428 unsigned long *io_bitmap_ptr;
429 unsigned long iopl;
430 /* Max allowed port in the bitmap, in bytes: */
431 unsigned io_bitmap_max;
432/* MSR_IA32_DEBUGCTLMSR value to switch in if TIF_DEBUGCTLMSR is set. */
433 unsigned long debugctlmsr;
434#ifdef CONFIG_X86_DS
435/* Debug Store context; see include/asm-x86/ds.h; goes into MSR_IA32_DS_AREA */
436 struct ds_context *ds_ctx;
437#endif /* CONFIG_X86_DS */
438#ifdef CONFIG_X86_PTRACE_BTS
439/* the signal to send on a bts buffer overflow */
440 unsigned int bts_ovfl_signal;
441#endif /* CONFIG_X86_PTRACE_BTS */
442};
443
444static inline unsigned long native_get_debugreg(int regno)
445{
446 unsigned long val = 0; /* Damn you, gcc! */
447
448 switch (regno) {
449 case 0:
450 asm("mov %%db0, %0" :"=r" (val));
451 break;
452 case 1:
453 asm("mov %%db1, %0" :"=r" (val));
454 break;
455 case 2:
456 asm("mov %%db2, %0" :"=r" (val));
457 break;
458 case 3:
459 asm("mov %%db3, %0" :"=r" (val));
460 break;
461 case 6:
462 asm("mov %%db6, %0" :"=r" (val));
463 break;
464 case 7:
465 asm("mov %%db7, %0" :"=r" (val));
466 break;
467 default:
468 BUG();
469 }
470 return val;
471}
472
473static inline void native_set_debugreg(int regno, unsigned long value)
474{
475 switch (regno) {
476 case 0:
477 asm("mov %0, %%db0" ::"r" (value));
478 break;
479 case 1:
480 asm("mov %0, %%db1" ::"r" (value));
481 break;
482 case 2:
483 asm("mov %0, %%db2" ::"r" (value));
484 break;
485 case 3:
486 asm("mov %0, %%db3" ::"r" (value));
487 break;
488 case 6:
489 asm("mov %0, %%db6" ::"r" (value));
490 break;
491 case 7:
492 asm("mov %0, %%db7" ::"r" (value));
493 break;
494 default:
495 BUG();
496 }
497}
498
499/*
500 * Set IOPL bits in EFLAGS from given mask
501 */
502static inline void native_set_iopl_mask(unsigned mask)
503{
504#ifdef CONFIG_X86_32
505 unsigned int reg;
506
507 asm volatile ("pushfl;"
508 "popl %0;"
509 "andl %1, %0;"
510 "orl %2, %0;"
511 "pushl %0;"
512 "popfl"
513 : "=&r" (reg)
514 : "i" (~X86_EFLAGS_IOPL), "r" (mask));
515#endif
516}
517
518static inline void
519native_load_sp0(struct tss_struct *tss, struct thread_struct *thread)
520{
521 tss->x86_tss.sp0 = thread->sp0;
522#ifdef CONFIG_X86_32
523 /* Only happens when SEP is enabled, no need to test "SEP"arately: */
524 if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) {
525 tss->x86_tss.ss1 = thread->sysenter_cs;
526 wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
527 }
528#endif
529}
530
531static inline void native_swapgs(void)
532{
533#ifdef CONFIG_X86_64
534 asm volatile("swapgs" ::: "memory");
535#endif
536}
537
538#ifdef CONFIG_PARAVIRT
539#include <asm/paravirt.h>
540#else
541#define __cpuid native_cpuid
542#define paravirt_enabled() 0
543
544/*
545 * These special macros can be used to get or set a debugging register
546 */
547#define get_debugreg(var, register) \
548 (var) = native_get_debugreg(register)
549#define set_debugreg(value, register) \
550 native_set_debugreg(register, value)
551
552static inline void load_sp0(struct tss_struct *tss,
553 struct thread_struct *thread)
554{
555 native_load_sp0(tss, thread);
556}
557
558#define set_iopl_mask native_set_iopl_mask
559#endif /* CONFIG_PARAVIRT */
560
561/*
562 * Save the cr4 feature set we're using (ie
563 * Pentium 4MB enable and PPro Global page
564 * enable), so that any CPU's that boot up
565 * after us can get the correct flags.
566 */
567extern unsigned long mmu_cr4_features;
568
569static inline void set_in_cr4(unsigned long mask)
570{
571 unsigned cr4;
572
573 mmu_cr4_features |= mask;
574 cr4 = read_cr4();
575 cr4 |= mask;
576 write_cr4(cr4);
577}
578
579static inline void clear_in_cr4(unsigned long mask)
580{
581 unsigned cr4;
582
583 mmu_cr4_features &= ~mask;
584 cr4 = read_cr4();
585 cr4 &= ~mask;
586 write_cr4(cr4);
587}
588
589typedef struct {
590 unsigned long seg;
591} mm_segment_t;
592
593
594/*
595 * create a kernel thread without removing it from tasklists
596 */
597extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
598
599/* Free all resources held by a thread. */
600extern void release_thread(struct task_struct *);
601
602/* Prepare to copy thread state - unlazy all lazy state */
603extern void prepare_to_copy(struct task_struct *tsk);
604
605unsigned long get_wchan(struct task_struct *p);
606
607/*
608 * Generic CPUID function
609 * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
610 * resulting in stale register contents being returned.
611 */
612static inline void cpuid(unsigned int op,
613 unsigned int *eax, unsigned int *ebx,
614 unsigned int *ecx, unsigned int *edx)
615{
616 *eax = op;
617 *ecx = 0;
618 __cpuid(eax, ebx, ecx, edx);
619}
620
621/* Some CPUID calls want 'count' to be placed in ecx */
622static inline void cpuid_count(unsigned int op, int count,
623 unsigned int *eax, unsigned int *ebx,
624 unsigned int *ecx, unsigned int *edx)
625{
626 *eax = op;
627 *ecx = count;
628 __cpuid(eax, ebx, ecx, edx);
629}
630
631/*
632 * CPUID functions returning a single datum
633 */
634static inline unsigned int cpuid_eax(unsigned int op)
635{
636 unsigned int eax, ebx, ecx, edx;
637
638 cpuid(op, &eax, &ebx, &ecx, &edx);
639
640 return eax;
641}
642
643static inline unsigned int cpuid_ebx(unsigned int op)
644{
645 unsigned int eax, ebx, ecx, edx;
646
647 cpuid(op, &eax, &ebx, &ecx, &edx);
648
649 return ebx;
650}
651
652static inline unsigned int cpuid_ecx(unsigned int op)
653{
654 unsigned int eax, ebx, ecx, edx;
655
656 cpuid(op, &eax, &ebx, &ecx, &edx);
657
658 return ecx;
659}
660
661static inline unsigned int cpuid_edx(unsigned int op)
662{
663 unsigned int eax, ebx, ecx, edx;
664
665 cpuid(op, &eax, &ebx, &ecx, &edx);
666
667 return edx;
668}
669
670/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
671static inline void rep_nop(void)
672{
673 asm volatile("rep; nop" ::: "memory");
674}
675
676static inline void cpu_relax(void)
677{
678 rep_nop();
679}
680
681/* Stop speculative execution: */
682static inline void sync_core(void)
683{
684 int tmp;
685
686 asm volatile("cpuid" : "=a" (tmp) : "0" (1)
687 : "ebx", "ecx", "edx", "memory");
688}
689
690static inline void __monitor(const void *eax, unsigned long ecx,
691 unsigned long edx)
692{
693 /* "monitor %eax, %ecx, %edx;" */
694 asm volatile(".byte 0x0f, 0x01, 0xc8;"
695 :: "a" (eax), "c" (ecx), "d"(edx));
696}
697
698static inline void __mwait(unsigned long eax, unsigned long ecx)
699{
700 /* "mwait %eax, %ecx;" */
701 asm volatile(".byte 0x0f, 0x01, 0xc9;"
702 :: "a" (eax), "c" (ecx));
703}
704
705static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
706{
707 trace_hardirqs_on();
708 /* "mwait %eax, %ecx;" */
709 asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
710 :: "a" (eax), "c" (ecx));
711}
712
713extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx);
714
715extern void select_idle_routine(const struct cpuinfo_x86 *c);
716
717extern unsigned long boot_option_idle_override;
718extern unsigned long idle_halt;
719extern unsigned long idle_nomwait;
720
721/*
722 * on systems with caches, caches must be flashed as the absolute
723 * last instruction before going into a suspended halt. Otherwise,
724 * dirty data can linger in the cache and become stale on resume,
725 * leading to strange errors.
726 *
727 * perform a variety of operations to guarantee that the compiler
728 * will not reorder instructions. wbinvd itself is serializing
729 * so the processor will not reorder.
730 *
731 * Systems without cache can just go into halt.
732 */
733static inline void wbinvd_halt(void)
734{
735 mb();
736 /* check for clflush to determine if wbinvd is legal */
737 if (cpu_has_clflush)
738 asm volatile("cli; wbinvd; 1: hlt; jmp 1b" : : : "memory");
739 else
740 while (1)
741 halt();
742}
743
744extern void enable_sep_cpu(void);
745extern int sysenter_setup(void);
746
747/* Defined in head.S */
748extern struct desc_ptr early_gdt_descr;
749
750extern void cpu_set_gdt(int);
751extern void switch_to_new_gdt(void);
752extern void cpu_init(void);
753extern void init_gdt(int cpu);
754
755static inline void update_debugctlmsr(unsigned long debugctlmsr)
756{
757#ifndef CONFIG_X86_DEBUGCTLMSR
758 if (boot_cpu_data.x86 < 6)
759 return;
760#endif
761 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
762}
763
764/*
765 * from system description table in BIOS. Mostly for MCA use, but
766 * others may find it useful:
767 */
768extern unsigned int machine_id;
769extern unsigned int machine_submodel_id;
770extern unsigned int BIOS_revision;
771
772/* Boot loader type from the setup header: */
773extern int bootloader_type;
774
775extern char ignore_fpu_irq;
776
777#define HAVE_ARCH_PICK_MMAP_LAYOUT 1
778#define ARCH_HAS_PREFETCHW
779#define ARCH_HAS_SPINLOCK_PREFETCH
780
781#ifdef CONFIG_X86_32
782# define BASE_PREFETCH ASM_NOP4
783# define ARCH_HAS_PREFETCH
784#else
785# define BASE_PREFETCH "prefetcht0 (%1)"
786#endif
787
788/*
789 * Prefetch instructions for Pentium III (+) and AMD Athlon (+)
790 *
791 * It's not worth to care about 3dnow prefetches for the K6
792 * because they are microcoded there and very slow.
793 */
794static inline void prefetch(const void *x)
795{
796 alternative_input(BASE_PREFETCH,
797 "prefetchnta (%1)",
798 X86_FEATURE_XMM,
799 "r" (x));
800}
801
802/*
803 * 3dnow prefetch to get an exclusive cache line.
804 * Useful for spinlocks to avoid one state transition in the
805 * cache coherency protocol:
806 */
807static inline void prefetchw(const void *x)
808{
809 alternative_input(BASE_PREFETCH,
810 "prefetchw (%1)",
811 X86_FEATURE_3DNOW,
812 "r" (x));
813}
814
815static inline void spin_lock_prefetch(const void *x)
816{
817 prefetchw(x);
818}
819
820#ifdef CONFIG_X86_32
821/*
822 * User space process size: 3GB (default).
823 */
824#define TASK_SIZE PAGE_OFFSET
825#define STACK_TOP TASK_SIZE
826#define STACK_TOP_MAX STACK_TOP
827
828#define INIT_THREAD { \
829 .sp0 = sizeof(init_stack) + (long)&init_stack, \
830 .vm86_info = NULL, \
831 .sysenter_cs = __KERNEL_CS, \
832 .io_bitmap_ptr = NULL, \
833 .fs = __KERNEL_PERCPU, \
834}
835
836/*
837 * Note that the .io_bitmap member must be extra-big. This is because
838 * the CPU will access an additional byte beyond the end of the IO
839 * permission bitmap. The extra byte must be all 1 bits, and must
840 * be within the limit.
841 */
842#define INIT_TSS { \
843 .x86_tss = { \
844 .sp0 = sizeof(init_stack) + (long)&init_stack, \
845 .ss0 = __KERNEL_DS, \
846 .ss1 = __KERNEL_CS, \
847 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
848 }, \
849 .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 }, \
850}
851
852extern unsigned long thread_saved_pc(struct task_struct *tsk);
853
854#define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
855#define KSTK_TOP(info) \
856({ \
857 unsigned long *__ptr = (unsigned long *)(info); \
858 (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
859})
860
861/*
862 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
863 * This is necessary to guarantee that the entire "struct pt_regs"
864 * is accessable even if the CPU haven't stored the SS/ESP registers
865 * on the stack (interrupt gate does not save these registers
866 * when switching to the same priv ring).
867 * Therefore beware: accessing the ss/esp fields of the
868 * "struct pt_regs" is possible, but they may contain the
869 * completely wrong values.
870 */
871#define task_pt_regs(task) \
872({ \
873 struct pt_regs *__regs__; \
874 __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
875 __regs__ - 1; \
876})
877
878#define KSTK_ESP(task) (task_pt_regs(task)->sp)
879
880#else
881/*
882 * User space process size. 47bits minus one guard page.
883 */
884#define TASK_SIZE64 ((1UL << 47) - PAGE_SIZE)
885
886/* This decides where the kernel will search for a free chunk of vm
887 * space during mmap's.
888 */
889#define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
890 0xc0000000 : 0xFFFFe000)
891
892#define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
893 IA32_PAGE_OFFSET : TASK_SIZE64)
894#define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? \
895 IA32_PAGE_OFFSET : TASK_SIZE64)
896
897#define STACK_TOP TASK_SIZE
898#define STACK_TOP_MAX TASK_SIZE64
899
900#define INIT_THREAD { \
901 .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
902}
903
904#define INIT_TSS { \
905 .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
906}
907
908/*
909 * Return saved PC of a blocked thread.
910 * What is this good for? it will be always the scheduler or ret_from_fork.
911 */
912#define thread_saved_pc(t) (*(unsigned long *)((t)->thread.sp - 8))
913
914#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1)
915#define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */
916#endif /* CONFIG_X86_64 */
917
918extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
919 unsigned long new_sp);
920
921/*
922 * This decides where the kernel will search for a free chunk of vm
923 * space during mmap's.
924 */
925#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
926
927#define KSTK_EIP(task) (task_pt_regs(task)->ip)
928
929/* Get/set a process' ability to use the timestamp counter instruction */
930#define GET_TSC_CTL(adr) get_tsc_mode((adr))
931#define SET_TSC_CTL(val) set_tsc_mode((val))
932
933extern int get_tsc_mode(unsigned long adr);
934extern int set_tsc_mode(unsigned int val);
935
936#endif /* ASM_X86__PROCESSOR_H */
diff --git a/include/asm-x86/proto.h b/include/asm-x86/proto.h
deleted file mode 100644
index 6e89e8b4de0e..000000000000
--- a/include/asm-x86/proto.h
+++ /dev/null
@@ -1,32 +0,0 @@
1#ifndef ASM_X86__PROTO_H
2#define ASM_X86__PROTO_H
3
4#include <asm/ldt.h>
5
6/* misc architecture specific prototypes */
7
8extern void early_idt_handler(void);
9
10extern void system_call(void);
11extern void syscall_init(void);
12
13extern void ia32_syscall(void);
14extern void ia32_cstar_target(void);
15extern void ia32_sysenter_target(void);
16
17extern void syscall32_cpu_init(void);
18
19extern void check_efer(void);
20
21#ifdef CONFIG_X86_BIOS_REBOOT
22extern int reboot_force;
23#else
24static const int reboot_force = 0;
25#endif
26
27long do_arch_prctl(struct task_struct *task, int code, unsigned long addr);
28
29#define round_up(x, y) (((x) + (y) - 1) & ~((y) - 1))
30#define round_down(x, y) ((x) & ~((y) - 1))
31
32#endif /* ASM_X86__PROTO_H */
diff --git a/include/asm-x86/ptrace-abi.h b/include/asm-x86/ptrace-abi.h
deleted file mode 100644
index 4298b8882a78..000000000000
--- a/include/asm-x86/ptrace-abi.h
+++ /dev/null
@@ -1,145 +0,0 @@
1#ifndef ASM_X86__PTRACE_ABI_H
2#define ASM_X86__PTRACE_ABI_H
3
4#ifdef __i386__
5
6#define EBX 0
7#define ECX 1
8#define EDX 2
9#define ESI 3
10#define EDI 4
11#define EBP 5
12#define EAX 6
13#define DS 7
14#define ES 8
15#define FS 9
16#define GS 10
17#define ORIG_EAX 11
18#define EIP 12
19#define CS 13
20#define EFL 14
21#define UESP 15
22#define SS 16
23#define FRAME_SIZE 17
24
25#else /* __i386__ */
26
27#if defined(__ASSEMBLY__) || defined(__FRAME_OFFSETS)
28#define R15 0
29#define R14 8
30#define R13 16
31#define R12 24
32#define RBP 32
33#define RBX 40
34/* arguments: interrupts/non tracing syscalls only save upto here*/
35#define R11 48
36#define R10 56
37#define R9 64
38#define R8 72
39#define RAX 80
40#define RCX 88
41#define RDX 96
42#define RSI 104
43#define RDI 112
44#define ORIG_RAX 120 /* = ERROR */
45/* end of arguments */
46/* cpu exception frame or undefined in case of fast syscall. */
47#define RIP 128
48#define CS 136
49#define EFLAGS 144
50#define RSP 152
51#define SS 160
52#define ARGOFFSET R11
53#endif /* __ASSEMBLY__ */
54
55/* top of stack page */
56#define FRAME_SIZE 168
57
58#endif /* !__i386__ */
59
60/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
61#define PTRACE_GETREGS 12
62#define PTRACE_SETREGS 13
63#define PTRACE_GETFPREGS 14
64#define PTRACE_SETFPREGS 15
65#define PTRACE_GETFPXREGS 18
66#define PTRACE_SETFPXREGS 19
67
68#define PTRACE_OLDSETOPTIONS 21
69
70/* only useful for access 32bit programs / kernels */
71#define PTRACE_GET_THREAD_AREA 25
72#define PTRACE_SET_THREAD_AREA 26
73
74#ifdef __x86_64__
75# define PTRACE_ARCH_PRCTL 30
76#endif
77
78#define PTRACE_SYSEMU 31
79#define PTRACE_SYSEMU_SINGLESTEP 32
80
81#define PTRACE_SINGLEBLOCK 33 /* resume execution until next branch */
82
83#ifdef CONFIG_X86_PTRACE_BTS
84
85#ifndef __ASSEMBLY__
86#include <asm/types.h>
87
88/* configuration/status structure used in PTRACE_BTS_CONFIG and
89 PTRACE_BTS_STATUS commands.
90*/
91struct ptrace_bts_config {
92 /* requested or actual size of BTS buffer in bytes */
93 __u32 size;
94 /* bitmask of below flags */
95 __u32 flags;
96 /* buffer overflow signal */
97 __u32 signal;
98 /* actual size of bts_struct in bytes */
99 __u32 bts_size;
100};
101#endif /* __ASSEMBLY__ */
102
103#define PTRACE_BTS_O_TRACE 0x1 /* branch trace */
104#define PTRACE_BTS_O_SCHED 0x2 /* scheduling events w/ jiffies */
105#define PTRACE_BTS_O_SIGNAL 0x4 /* send SIG<signal> on buffer overflow
106 instead of wrapping around */
107#define PTRACE_BTS_O_ALLOC 0x8 /* (re)allocate buffer */
108
109#define PTRACE_BTS_CONFIG 40
110/* Configure branch trace recording.
111 ADDR points to a struct ptrace_bts_config.
112 DATA gives the size of that buffer.
113 A new buffer is allocated, if requested in the flags.
114 An overflow signal may only be requested for new buffers.
115 Returns the number of bytes read.
116*/
117#define PTRACE_BTS_STATUS 41
118/* Return the current configuration in a struct ptrace_bts_config
119 pointed to by ADDR; DATA gives the size of that buffer.
120 Returns the number of bytes written.
121*/
122#define PTRACE_BTS_SIZE 42
123/* Return the number of available BTS records for draining.
124 DATA and ADDR are ignored.
125*/
126#define PTRACE_BTS_GET 43
127/* Get a single BTS record.
128 DATA defines the index into the BTS array, where 0 is the newest
129 entry, and higher indices refer to older entries.
130 ADDR is pointing to struct bts_struct (see asm/ds.h).
131*/
132#define PTRACE_BTS_CLEAR 44
133/* Clear the BTS buffer.
134 DATA and ADDR are ignored.
135*/
136#define PTRACE_BTS_DRAIN 45
137/* Read all available BTS records and clear the buffer.
138 ADDR points to an array of struct bts_struct.
139 DATA gives the size of that buffer.
140 BTS records are read from oldest to newest.
141 Returns number of BTS records drained.
142*/
143#endif /* CONFIG_X86_PTRACE_BTS */
144
145#endif /* ASM_X86__PTRACE_ABI_H */
diff --git a/include/asm-x86/ptrace.h b/include/asm-x86/ptrace.h
deleted file mode 100644
index a2025525a15a..000000000000
--- a/include/asm-x86/ptrace.h
+++ /dev/null
@@ -1,280 +0,0 @@
1#ifndef ASM_X86__PTRACE_H
2#define ASM_X86__PTRACE_H
3
4#include <linux/compiler.h> /* For __user */
5#include <asm/ptrace-abi.h>
6#include <asm/processor-flags.h>
7
8#ifdef __KERNEL__
9#include <asm/ds.h> /* the DS BTS struct is used for ptrace too */
10#include <asm/segment.h>
11#endif
12
13#ifndef __ASSEMBLY__
14
15#ifdef __i386__
16/* this struct defines the way the registers are stored on the
17 stack during a system call. */
18
19#ifndef __KERNEL__
20
21struct pt_regs {
22 long ebx;
23 long ecx;
24 long edx;
25 long esi;
26 long edi;
27 long ebp;
28 long eax;
29 int xds;
30 int xes;
31 int xfs;
32 /* int gs; */
33 long orig_eax;
34 long eip;
35 int xcs;
36 long eflags;
37 long esp;
38 int xss;
39};
40
41#else /* __KERNEL__ */
42
43struct pt_regs {
44 unsigned long bx;
45 unsigned long cx;
46 unsigned long dx;
47 unsigned long si;
48 unsigned long di;
49 unsigned long bp;
50 unsigned long ax;
51 unsigned long ds;
52 unsigned long es;
53 unsigned long fs;
54 /* int gs; */
55 unsigned long orig_ax;
56 unsigned long ip;
57 unsigned long cs;
58 unsigned long flags;
59 unsigned long sp;
60 unsigned long ss;
61};
62
63#endif /* __KERNEL__ */
64
65#else /* __i386__ */
66
67#ifndef __KERNEL__
68
69struct pt_regs {
70 unsigned long r15;
71 unsigned long r14;
72 unsigned long r13;
73 unsigned long r12;
74 unsigned long rbp;
75 unsigned long rbx;
76/* arguments: non interrupts/non tracing syscalls only save upto here*/
77 unsigned long r11;
78 unsigned long r10;
79 unsigned long r9;
80 unsigned long r8;
81 unsigned long rax;
82 unsigned long rcx;
83 unsigned long rdx;
84 unsigned long rsi;
85 unsigned long rdi;
86 unsigned long orig_rax;
87/* end of arguments */
88/* cpu exception frame or undefined */
89 unsigned long rip;
90 unsigned long cs;
91 unsigned long eflags;
92 unsigned long rsp;
93 unsigned long ss;
94/* top of stack page */
95};
96
97#else /* __KERNEL__ */
98
99struct pt_regs {
100 unsigned long r15;
101 unsigned long r14;
102 unsigned long r13;
103 unsigned long r12;
104 unsigned long bp;
105 unsigned long bx;
106/* arguments: non interrupts/non tracing syscalls only save upto here*/
107 unsigned long r11;
108 unsigned long r10;
109 unsigned long r9;
110 unsigned long r8;
111 unsigned long ax;
112 unsigned long cx;
113 unsigned long dx;
114 unsigned long si;
115 unsigned long di;
116 unsigned long orig_ax;
117/* end of arguments */
118/* cpu exception frame or undefined */
119 unsigned long ip;
120 unsigned long cs;
121 unsigned long flags;
122 unsigned long sp;
123 unsigned long ss;
124/* top of stack page */
125};
126
127#endif /* __KERNEL__ */
128#endif /* !__i386__ */
129
130
131#ifdef CONFIG_X86_PTRACE_BTS
132/* a branch trace record entry
133 *
134 * In order to unify the interface between various processor versions,
135 * we use the below data structure for all processors.
136 */
137enum bts_qualifier {
138 BTS_INVALID = 0,
139 BTS_BRANCH,
140 BTS_TASK_ARRIVES,
141 BTS_TASK_DEPARTS
142};
143
144struct bts_struct {
145 __u64 qualifier;
146 union {
147 /* BTS_BRANCH */
148 struct {
149 __u64 from_ip;
150 __u64 to_ip;
151 } lbr;
152 /* BTS_TASK_ARRIVES or
153 BTS_TASK_DEPARTS */
154 __u64 jiffies;
155 } variant;
156};
157#endif /* CONFIG_X86_PTRACE_BTS */
158
159#ifdef __KERNEL__
160
161#include <linux/init.h>
162
163struct cpuinfo_x86;
164struct task_struct;
165
166#ifdef CONFIG_X86_PTRACE_BTS
167extern void __cpuinit ptrace_bts_init_intel(struct cpuinfo_x86 *);
168extern void ptrace_bts_take_timestamp(struct task_struct *, enum bts_qualifier);
169#else
170#define ptrace_bts_init_intel(config) do {} while (0)
171#endif /* CONFIG_X86_PTRACE_BTS */
172
173extern unsigned long profile_pc(struct pt_regs *regs);
174
175extern unsigned long
176convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs);
177extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
178 int error_code, int si_code);
179void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
180
181extern long syscall_trace_enter(struct pt_regs *);
182extern void syscall_trace_leave(struct pt_regs *);
183
184static inline unsigned long regs_return_value(struct pt_regs *regs)
185{
186 return regs->ax;
187}
188
189/*
190 * user_mode_vm(regs) determines whether a register set came from user mode.
191 * This is true if V8086 mode was enabled OR if the register set was from
192 * protected mode with RPL-3 CS value. This tricky test checks that with
193 * one comparison. Many places in the kernel can bypass this full check
194 * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
195 */
196static inline int user_mode(struct pt_regs *regs)
197{
198#ifdef CONFIG_X86_32
199 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
200#else
201 return !!(regs->cs & 3);
202#endif
203}
204
205static inline int user_mode_vm(struct pt_regs *regs)
206{
207#ifdef CONFIG_X86_32
208 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
209 USER_RPL;
210#else
211 return user_mode(regs);
212#endif
213}
214
215static inline int v8086_mode(struct pt_regs *regs)
216{
217#ifdef CONFIG_X86_32
218 return (regs->flags & X86_VM_MASK);
219#else
220 return 0; /* No V86 mode support in long mode */
221#endif
222}
223
224/*
225 * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode
226 * when it traps. So regs will be the current sp.
227 *
228 * This is valid only for kernel mode traps.
229 */
230static inline unsigned long kernel_trap_sp(struct pt_regs *regs)
231{
232#ifdef CONFIG_X86_32
233 return (unsigned long)regs;
234#else
235 return regs->sp;
236#endif
237}
238
239static inline unsigned long instruction_pointer(struct pt_regs *regs)
240{
241 return regs->ip;
242}
243
244static inline unsigned long frame_pointer(struct pt_regs *regs)
245{
246 return regs->bp;
247}
248
249static inline unsigned long user_stack_pointer(struct pt_regs *regs)
250{
251 return regs->sp;
252}
253
254/*
255 * These are defined as per linux/ptrace.h, which see.
256 */
257#define arch_has_single_step() (1)
258extern void user_enable_single_step(struct task_struct *);
259extern void user_disable_single_step(struct task_struct *);
260
261extern void user_enable_block_step(struct task_struct *);
262#ifdef CONFIG_X86_DEBUGCTLMSR
263#define arch_has_block_step() (1)
264#else
265#define arch_has_block_step() (boot_cpu_data.x86 >= 6)
266#endif
267
268struct user_desc;
269extern int do_get_thread_area(struct task_struct *p, int idx,
270 struct user_desc __user *info);
271extern int do_set_thread_area(struct task_struct *p, int idx,
272 struct user_desc __user *info, int can_allocate);
273
274#define __ARCH_WANT_COMPAT_SYS_PTRACE
275
276#endif /* __KERNEL__ */
277
278#endif /* !__ASSEMBLY__ */
279
280#endif /* ASM_X86__PTRACE_H */
diff --git a/include/asm-x86/pvclock-abi.h b/include/asm-x86/pvclock-abi.h
deleted file mode 100644
index edb3b4ecfc81..000000000000
--- a/include/asm-x86/pvclock-abi.h
+++ /dev/null
@@ -1,42 +0,0 @@
1#ifndef ASM_X86__PVCLOCK_ABI_H
2#define ASM_X86__PVCLOCK_ABI_H
3#ifndef __ASSEMBLY__
4
5/*
6 * These structs MUST NOT be changed.
7 * They are the ABI between hypervisor and guest OS.
8 * Both Xen and KVM are using this.
9 *
10 * pvclock_vcpu_time_info holds the system time and the tsc timestamp
11 * of the last update. So the guest can use the tsc delta to get a
12 * more precise system time. There is one per virtual cpu.
13 *
14 * pvclock_wall_clock references the point in time when the system
15 * time was zero (usually boot time), thus the guest calculates the
16 * current wall clock by adding the system time.
17 *
18 * Protocol for the "version" fields is: hypervisor raises it (making
19 * it uneven) before it starts updating the fields and raises it again
20 * (making it even) when it is done. Thus the guest can make sure the
21 * time values it got are consistent by checking the version before
22 * and after reading them.
23 */
24
25struct pvclock_vcpu_time_info {
26 u32 version;
27 u32 pad0;
28 u64 tsc_timestamp;
29 u64 system_time;
30 u32 tsc_to_system_mul;
31 s8 tsc_shift;
32 u8 pad[3];
33} __attribute__((__packed__)); /* 32 bytes */
34
35struct pvclock_wall_clock {
36 u32 version;
37 u32 sec;
38 u32 nsec;
39} __attribute__((__packed__));
40
41#endif /* __ASSEMBLY__ */
42#endif /* ASM_X86__PVCLOCK_ABI_H */
diff --git a/include/asm-x86/pvclock.h b/include/asm-x86/pvclock.h
deleted file mode 100644
index ad29e277fd6d..000000000000
--- a/include/asm-x86/pvclock.h
+++ /dev/null
@@ -1,14 +0,0 @@
1#ifndef ASM_X86__PVCLOCK_H
2#define ASM_X86__PVCLOCK_H
3
4#include <linux/clocksource.h>
5#include <asm/pvclock-abi.h>
6
7/* some helper functions for xen and kvm pv clock sources */
8cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src);
9unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src);
10void pvclock_read_wallclock(struct pvclock_wall_clock *wall,
11 struct pvclock_vcpu_time_info *vcpu,
12 struct timespec *ts);
13
14#endif /* ASM_X86__PVCLOCK_H */
diff --git a/include/asm-x86/reboot.h b/include/asm-x86/reboot.h
deleted file mode 100644
index 1c2f0ce9e31e..000000000000
--- a/include/asm-x86/reboot.h
+++ /dev/null
@@ -1,21 +0,0 @@
1#ifndef ASM_X86__REBOOT_H
2#define ASM_X86__REBOOT_H
3
4struct pt_regs;
5
6struct machine_ops {
7 void (*restart)(char *cmd);
8 void (*halt)(void);
9 void (*power_off)(void);
10 void (*shutdown)(void);
11 void (*crash_shutdown)(struct pt_regs *);
12 void (*emergency_restart)(void);
13};
14
15extern struct machine_ops machine_ops;
16
17void native_machine_crash_shutdown(struct pt_regs *regs);
18void native_machine_shutdown(void);
19void machine_real_restart(const unsigned char *code, int length);
20
21#endif /* ASM_X86__REBOOT_H */
diff --git a/include/asm-x86/reboot_fixups.h b/include/asm-x86/reboot_fixups.h
deleted file mode 100644
index 2c2987d97570..000000000000
--- a/include/asm-x86/reboot_fixups.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef ASM_X86__REBOOT_FIXUPS_H
2#define ASM_X86__REBOOT_FIXUPS_H
3
4extern void mach_reboot_fixups(void);
5
6#endif /* ASM_X86__REBOOT_FIXUPS_H */
diff --git a/include/asm-x86/required-features.h b/include/asm-x86/required-features.h
deleted file mode 100644
index a01c4e376331..000000000000
--- a/include/asm-x86/required-features.h
+++ /dev/null
@@ -1,82 +0,0 @@
1#ifndef ASM_X86__REQUIRED_FEATURES_H
2#define ASM_X86__REQUIRED_FEATURES_H
3
4/* Define minimum CPUID feature set for kernel These bits are checked
5 really early to actually display a visible error message before the
6 kernel dies. Make sure to assign features to the proper mask!
7
8 Some requirements that are not in CPUID yet are also in the
9 CONFIG_X86_MINIMUM_CPU_FAMILY which is checked too.
10
11 The real information is in arch/x86/Kconfig.cpu, this just converts
12 the CONFIGs into a bitmask */
13
14#ifndef CONFIG_MATH_EMULATION
15# define NEED_FPU (1<<(X86_FEATURE_FPU & 31))
16#else
17# define NEED_FPU 0
18#endif
19
20#if defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
21# define NEED_PAE (1<<(X86_FEATURE_PAE & 31))
22#else
23# define NEED_PAE 0
24#endif
25
26#ifdef CONFIG_X86_CMPXCHG64
27# define NEED_CX8 (1<<(X86_FEATURE_CX8 & 31))
28#else
29# define NEED_CX8 0
30#endif
31
32#if defined(CONFIG_X86_CMOV) || defined(CONFIG_X86_64)
33# define NEED_CMOV (1<<(X86_FEATURE_CMOV & 31))
34#else
35# define NEED_CMOV 0
36#endif
37
38#ifdef CONFIG_X86_USE_3DNOW
39# define NEED_3DNOW (1<<(X86_FEATURE_3DNOW & 31))
40#else
41# define NEED_3DNOW 0
42#endif
43
44#if defined(CONFIG_X86_P6_NOP) || defined(CONFIG_X86_64)
45# define NEED_NOPL (1<<(X86_FEATURE_NOPL & 31))
46#else
47# define NEED_NOPL 0
48#endif
49
50#ifdef CONFIG_X86_64
51#define NEED_PSE 0
52#define NEED_MSR (1<<(X86_FEATURE_MSR & 31))
53#define NEED_PGE (1<<(X86_FEATURE_PGE & 31))
54#define NEED_FXSR (1<<(X86_FEATURE_FXSR & 31))
55#define NEED_XMM (1<<(X86_FEATURE_XMM & 31))
56#define NEED_XMM2 (1<<(X86_FEATURE_XMM2 & 31))
57#define NEED_LM (1<<(X86_FEATURE_LM & 31))
58#else
59#define NEED_PSE 0
60#define NEED_MSR 0
61#define NEED_PGE 0
62#define NEED_FXSR 0
63#define NEED_XMM 0
64#define NEED_XMM2 0
65#define NEED_LM 0
66#endif
67
68#define REQUIRED_MASK0 (NEED_FPU|NEED_PSE|NEED_MSR|NEED_PAE|\
69 NEED_CX8|NEED_PGE|NEED_FXSR|NEED_CMOV|\
70 NEED_XMM|NEED_XMM2)
71#define SSE_MASK (NEED_XMM|NEED_XMM2)
72
73#define REQUIRED_MASK1 (NEED_LM|NEED_3DNOW)
74
75#define REQUIRED_MASK2 0
76#define REQUIRED_MASK3 (NEED_NOPL)
77#define REQUIRED_MASK4 0
78#define REQUIRED_MASK5 0
79#define REQUIRED_MASK6 0
80#define REQUIRED_MASK7 0
81
82#endif /* ASM_X86__REQUIRED_FEATURES_H */
diff --git a/include/asm-x86/resource.h b/include/asm-x86/resource.h
deleted file mode 100644
index 04bc4db8921b..000000000000
--- a/include/asm-x86/resource.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/resource.h>
diff --git a/include/asm-x86/resume-trace.h b/include/asm-x86/resume-trace.h
deleted file mode 100644
index e39376d7de50..000000000000
--- a/include/asm-x86/resume-trace.h
+++ /dev/null
@@ -1,21 +0,0 @@
1#ifndef ASM_X86__RESUME_TRACE_H
2#define ASM_X86__RESUME_TRACE_H
3
4#include <asm/asm.h>
5
6#define TRACE_RESUME(user) \
7do { \
8 if (pm_trace_enabled) { \
9 const void *tracedata; \
10 asm volatile(_ASM_MOV " $1f,%0\n" \
11 ".section .tracedata,\"a\"\n" \
12 "1:\t.word %c1\n\t" \
13 _ASM_PTR " %c2\n" \
14 ".previous" \
15 :"=r" (tracedata) \
16 : "i" (__LINE__), "i" (__FILE__)); \
17 generate_resume_trace(tracedata, user); \
18 } \
19} while (0)
20
21#endif /* ASM_X86__RESUME_TRACE_H */
diff --git a/include/asm-x86/rio.h b/include/asm-x86/rio.h
deleted file mode 100644
index 5e1256bdee83..000000000000
--- a/include/asm-x86/rio.h
+++ /dev/null
@@ -1,63 +0,0 @@
1/*
2 * Derived from include/asm-x86/mach-summit/mach_mpparse.h
3 * and include/asm-x86/mach-default/bios_ebda.h
4 *
5 * Author: Laurent Vivier <Laurent.Vivier@bull.net>
6 */
7
8#ifndef ASM_X86__RIO_H
9#define ASM_X86__RIO_H
10
11#define RIO_TABLE_VERSION 3
12
13struct rio_table_hdr {
14 u8 version; /* Version number of this data structure */
15 u8 num_scal_dev; /* # of Scalability devices */
16 u8 num_rio_dev; /* # of RIO I/O devices */
17} __attribute__((packed));
18
19struct scal_detail {
20 u8 node_id; /* Scalability Node ID */
21 u32 CBAR; /* Address of 1MB register space */
22 u8 port0node; /* Node ID port connected to: 0xFF=None */
23 u8 port0port; /* Port num port connected to: 0,1,2, or */
24 /* 0xFF=None */
25 u8 port1node; /* Node ID port connected to: 0xFF = None */
26 u8 port1port; /* Port num port connected to: 0,1,2, or */
27 /* 0xFF=None */
28 u8 port2node; /* Node ID port connected to: 0xFF = None */
29 u8 port2port; /* Port num port connected to: 0,1,2, or */
30 /* 0xFF=None */
31 u8 chassis_num; /* 1 based Chassis number (1 = boot node) */
32} __attribute__((packed));
33
34struct rio_detail {
35 u8 node_id; /* RIO Node ID */
36 u32 BBAR; /* Address of 1MB register space */
37 u8 type; /* Type of device */
38 u8 owner_id; /* Node ID of Hurricane that owns this */
39 /* node */
40 u8 port0node; /* Node ID port connected to: 0xFF=None */
41 u8 port0port; /* Port num port connected to: 0,1,2, or */
42 /* 0xFF=None */
43 u8 port1node; /* Node ID port connected to: 0xFF=None */
44 u8 port1port; /* Port num port connected to: 0,1,2, or */
45 /* 0xFF=None */
46 u8 first_slot; /* Lowest slot number below this Calgary */
47 u8 status; /* Bit 0 = 1 : the XAPIC is used */
48 /* = 0 : the XAPIC is not used, ie: */
49 /* ints fwded to another XAPIC */
50 /* Bits1:7 Reserved */
51 u8 WP_index; /* instance index - lower ones have */
52 /* lower slot numbers/PCI bus numbers */
53 u8 chassis_num; /* 1 based Chassis number */
54} __attribute__((packed));
55
56enum {
57 HURR_SCALABILTY = 0, /* Hurricane Scalability info */
58 HURR_RIOIB = 2, /* Hurricane RIOIB info */
59 COMPAT_CALGARY = 4, /* Compatibility Calgary */
60 ALT_CALGARY = 5, /* Second Planar Calgary */
61};
62
63#endif /* ASM_X86__RIO_H */
diff --git a/include/asm-x86/rtc.h b/include/asm-x86/rtc.h
deleted file mode 100644
index f71c3b0ed360..000000000000
--- a/include/asm-x86/rtc.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/rtc.h>
diff --git a/include/asm-x86/rwlock.h b/include/asm-x86/rwlock.h
deleted file mode 100644
index 48a3109e1a7d..000000000000
--- a/include/asm-x86/rwlock.h
+++ /dev/null
@@ -1,8 +0,0 @@
1#ifndef ASM_X86__RWLOCK_H
2#define ASM_X86__RWLOCK_H
3
4#define RW_LOCK_BIAS 0x01000000
5
6/* Actual code is in asm/spinlock.h or in arch/x86/lib/rwlock.S */
7
8#endif /* ASM_X86__RWLOCK_H */
diff --git a/include/asm-x86/rwsem.h b/include/asm-x86/rwsem.h
deleted file mode 100644
index 3ff3015b71a8..000000000000
--- a/include/asm-x86/rwsem.h
+++ /dev/null
@@ -1,265 +0,0 @@
1/* rwsem.h: R/W semaphores implemented using XADD/CMPXCHG for i486+
2 *
3 * Written by David Howells (dhowells@redhat.com).
4 *
5 * Derived from asm-x86/semaphore.h
6 *
7 *
8 * The MSW of the count is the negated number of active writers and waiting
9 * lockers, and the LSW is the total number of active locks
10 *
11 * The lock count is initialized to 0 (no active and no waiting lockers).
12 *
13 * When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case of an
14 * uncontended lock. This can be determined because XADD returns the old value.
15 * Readers increment by 1 and see a positive value when uncontended, negative
16 * if there are writers (and maybe) readers waiting (in which case it goes to
17 * sleep).
18 *
19 * The value of WAITING_BIAS supports up to 32766 waiting processes. This can
20 * be extended to 65534 by manually checking the whole MSW rather than relying
21 * on the S flag.
22 *
23 * The value of ACTIVE_BIAS supports up to 65535 active processes.
24 *
25 * This should be totally fair - if anything is waiting, a process that wants a
26 * lock will go to the back of the queue. When the currently active lock is
27 * released, if there's a writer at the front of the queue, then that and only
28 * that will be woken up; if there's a bunch of consequtive readers at the
29 * front, then they'll all be woken up, but no other readers will be.
30 */
31
32#ifndef ASM_X86__RWSEM_H
33#define ASM_X86__RWSEM_H
34
35#ifndef _LINUX_RWSEM_H
36#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
37#endif
38
39#ifdef __KERNEL__
40
41#include <linux/list.h>
42#include <linux/spinlock.h>
43#include <linux/lockdep.h>
44
45struct rwsem_waiter;
46
47extern asmregparm struct rw_semaphore *
48 rwsem_down_read_failed(struct rw_semaphore *sem);
49extern asmregparm struct rw_semaphore *
50 rwsem_down_write_failed(struct rw_semaphore *sem);
51extern asmregparm struct rw_semaphore *
52 rwsem_wake(struct rw_semaphore *);
53extern asmregparm struct rw_semaphore *
54 rwsem_downgrade_wake(struct rw_semaphore *sem);
55
56/*
57 * the semaphore definition
58 */
59
60#define RWSEM_UNLOCKED_VALUE 0x00000000
61#define RWSEM_ACTIVE_BIAS 0x00000001
62#define RWSEM_ACTIVE_MASK 0x0000ffff
63#define RWSEM_WAITING_BIAS (-0x00010000)
64#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
65#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
66
67struct rw_semaphore {
68 signed long count;
69 spinlock_t wait_lock;
70 struct list_head wait_list;
71#ifdef CONFIG_DEBUG_LOCK_ALLOC
72 struct lockdep_map dep_map;
73#endif
74};
75
76#ifdef CONFIG_DEBUG_LOCK_ALLOC
77# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
78#else
79# define __RWSEM_DEP_MAP_INIT(lockname)
80#endif
81
82
83#define __RWSEM_INITIALIZER(name) \
84{ \
85 RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
86 LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) \
87}
88
89#define DECLARE_RWSEM(name) \
90 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
91
92extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
93 struct lock_class_key *key);
94
95#define init_rwsem(sem) \
96do { \
97 static struct lock_class_key __key; \
98 \
99 __init_rwsem((sem), #sem, &__key); \
100} while (0)
101
102/*
103 * lock for reading
104 */
105static inline void __down_read(struct rw_semaphore *sem)
106{
107 asm volatile("# beginning down_read\n\t"
108 LOCK_PREFIX " incl (%%eax)\n\t"
109 /* adds 0x00000001, returns the old value */
110 " jns 1f\n"
111 " call call_rwsem_down_read_failed\n"
112 "1:\n\t"
113 "# ending down_read\n\t"
114 : "+m" (sem->count)
115 : "a" (sem)
116 : "memory", "cc");
117}
118
119/*
120 * trylock for reading -- returns 1 if successful, 0 if contention
121 */
122static inline int __down_read_trylock(struct rw_semaphore *sem)
123{
124 __s32 result, tmp;
125 asm volatile("# beginning __down_read_trylock\n\t"
126 " movl %0,%1\n\t"
127 "1:\n\t"
128 " movl %1,%2\n\t"
129 " addl %3,%2\n\t"
130 " jle 2f\n\t"
131 LOCK_PREFIX " cmpxchgl %2,%0\n\t"
132 " jnz 1b\n\t"
133 "2:\n\t"
134 "# ending __down_read_trylock\n\t"
135 : "+m" (sem->count), "=&a" (result), "=&r" (tmp)
136 : "i" (RWSEM_ACTIVE_READ_BIAS)
137 : "memory", "cc");
138 return result >= 0 ? 1 : 0;
139}
140
141/*
142 * lock for writing
143 */
144static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
145{
146 int tmp;
147
148 tmp = RWSEM_ACTIVE_WRITE_BIAS;
149 asm volatile("# beginning down_write\n\t"
150 LOCK_PREFIX " xadd %%edx,(%%eax)\n\t"
151 /* subtract 0x0000ffff, returns the old value */
152 " testl %%edx,%%edx\n\t"
153 /* was the count 0 before? */
154 " jz 1f\n"
155 " call call_rwsem_down_write_failed\n"
156 "1:\n"
157 "# ending down_write"
158 : "+m" (sem->count), "=d" (tmp)
159 : "a" (sem), "1" (tmp)
160 : "memory", "cc");
161}
162
163static inline void __down_write(struct rw_semaphore *sem)
164{
165 __down_write_nested(sem, 0);
166}
167
168/*
169 * trylock for writing -- returns 1 if successful, 0 if contention
170 */
171static inline int __down_write_trylock(struct rw_semaphore *sem)
172{
173 signed long ret = cmpxchg(&sem->count,
174 RWSEM_UNLOCKED_VALUE,
175 RWSEM_ACTIVE_WRITE_BIAS);
176 if (ret == RWSEM_UNLOCKED_VALUE)
177 return 1;
178 return 0;
179}
180
181/*
182 * unlock after reading
183 */
184static inline void __up_read(struct rw_semaphore *sem)
185{
186 __s32 tmp = -RWSEM_ACTIVE_READ_BIAS;
187 asm volatile("# beginning __up_read\n\t"
188 LOCK_PREFIX " xadd %%edx,(%%eax)\n\t"
189 /* subtracts 1, returns the old value */
190 " jns 1f\n\t"
191 " call call_rwsem_wake\n"
192 "1:\n"
193 "# ending __up_read\n"
194 : "+m" (sem->count), "=d" (tmp)
195 : "a" (sem), "1" (tmp)
196 : "memory", "cc");
197}
198
199/*
200 * unlock after writing
201 */
202static inline void __up_write(struct rw_semaphore *sem)
203{
204 asm volatile("# beginning __up_write\n\t"
205 " movl %2,%%edx\n\t"
206 LOCK_PREFIX " xaddl %%edx,(%%eax)\n\t"
207 /* tries to transition
208 0xffff0001 -> 0x00000000 */
209 " jz 1f\n"
210 " call call_rwsem_wake\n"
211 "1:\n\t"
212 "# ending __up_write\n"
213 : "+m" (sem->count)
214 : "a" (sem), "i" (-RWSEM_ACTIVE_WRITE_BIAS)
215 : "memory", "cc", "edx");
216}
217
218/*
219 * downgrade write lock to read lock
220 */
221static inline void __downgrade_write(struct rw_semaphore *sem)
222{
223 asm volatile("# beginning __downgrade_write\n\t"
224 LOCK_PREFIX " addl %2,(%%eax)\n\t"
225 /* transitions 0xZZZZ0001 -> 0xYYYY0001 */
226 " jns 1f\n\t"
227 " call call_rwsem_downgrade_wake\n"
228 "1:\n\t"
229 "# ending __downgrade_write\n"
230 : "+m" (sem->count)
231 : "a" (sem), "i" (-RWSEM_WAITING_BIAS)
232 : "memory", "cc");
233}
234
235/*
236 * implement atomic add functionality
237 */
238static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
239{
240 asm volatile(LOCK_PREFIX "addl %1,%0"
241 : "+m" (sem->count)
242 : "ir" (delta));
243}
244
245/*
246 * implement exchange and add functionality
247 */
248static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
249{
250 int tmp = delta;
251
252 asm volatile(LOCK_PREFIX "xadd %0,%1"
253 : "+r" (tmp), "+m" (sem->count)
254 : : "memory");
255
256 return tmp + delta;
257}
258
259static inline int rwsem_is_locked(struct rw_semaphore *sem)
260{
261 return (sem->count != 0);
262}
263
264#endif /* __KERNEL__ */
265#endif /* ASM_X86__RWSEM_H */
diff --git a/include/asm-x86/scatterlist.h b/include/asm-x86/scatterlist.h
deleted file mode 100644
index ee48f880005d..000000000000
--- a/include/asm-x86/scatterlist.h
+++ /dev/null
@@ -1,33 +0,0 @@
1#ifndef ASM_X86__SCATTERLIST_H
2#define ASM_X86__SCATTERLIST_H
3
4#include <asm/types.h>
5
6struct scatterlist {
7#ifdef CONFIG_DEBUG_SG
8 unsigned long sg_magic;
9#endif
10 unsigned long page_link;
11 unsigned int offset;
12 unsigned int length;
13 dma_addr_t dma_address;
14 unsigned int dma_length;
15};
16
17#define ARCH_HAS_SG_CHAIN
18#define ISA_DMA_THRESHOLD (0x00ffffff)
19
20/*
21 * These macros should be used after a pci_map_sg call has been done
22 * to get bus addresses of each of the SG entries and their lengths.
23 * You should only work with the number of sg entries pci_map_sg
24 * returns.
25 */
26#define sg_dma_address(sg) ((sg)->dma_address)
27#ifdef CONFIG_X86_32
28# define sg_dma_len(sg) ((sg)->length)
29#else
30# define sg_dma_len(sg) ((sg)->dma_length)
31#endif
32
33#endif /* ASM_X86__SCATTERLIST_H */
diff --git a/include/asm-x86/seccomp.h b/include/asm-x86/seccomp.h
deleted file mode 100644
index c62e58a5a90d..000000000000
--- a/include/asm-x86/seccomp.h
+++ /dev/null
@@ -1,5 +0,0 @@
1#ifdef CONFIG_X86_32
2# include "seccomp_32.h"
3#else
4# include "seccomp_64.h"
5#endif
diff --git a/include/asm-x86/seccomp_32.h b/include/asm-x86/seccomp_32.h
deleted file mode 100644
index cf9ab2dbcef1..000000000000
--- a/include/asm-x86/seccomp_32.h
+++ /dev/null
@@ -1,17 +0,0 @@
1#ifndef ASM_X86__SECCOMP_32_H
2#define ASM_X86__SECCOMP_32_H
3
4#include <linux/thread_info.h>
5
6#ifdef TIF_32BIT
7#error "unexpected TIF_32BIT on i386"
8#endif
9
10#include <linux/unistd.h>
11
12#define __NR_seccomp_read __NR_read
13#define __NR_seccomp_write __NR_write
14#define __NR_seccomp_exit __NR_exit
15#define __NR_seccomp_sigreturn __NR_sigreturn
16
17#endif /* ASM_X86__SECCOMP_32_H */
diff --git a/include/asm-x86/seccomp_64.h b/include/asm-x86/seccomp_64.h
deleted file mode 100644
index 03274cea751f..000000000000
--- a/include/asm-x86/seccomp_64.h
+++ /dev/null
@@ -1,25 +0,0 @@
1#ifndef ASM_X86__SECCOMP_64_H
2#define ASM_X86__SECCOMP_64_H
3
4#include <linux/thread_info.h>
5
6#ifdef TIF_32BIT
7#error "unexpected TIF_32BIT on x86_64"
8#else
9#define TIF_32BIT TIF_IA32
10#endif
11
12#include <linux/unistd.h>
13#include <asm/ia32_unistd.h>
14
15#define __NR_seccomp_read __NR_read
16#define __NR_seccomp_write __NR_write
17#define __NR_seccomp_exit __NR_exit
18#define __NR_seccomp_sigreturn __NR_rt_sigreturn
19
20#define __NR_seccomp_read_32 __NR_ia32_read
21#define __NR_seccomp_write_32 __NR_ia32_write
22#define __NR_seccomp_exit_32 __NR_ia32_exit
23#define __NR_seccomp_sigreturn_32 __NR_ia32_sigreturn
24
25#endif /* ASM_X86__SECCOMP_64_H */
diff --git a/include/asm-x86/sections.h b/include/asm-x86/sections.h
deleted file mode 100644
index 2b8c5160388f..000000000000
--- a/include/asm-x86/sections.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/sections.h>
diff --git a/include/asm-x86/segment.h b/include/asm-x86/segment.h
deleted file mode 100644
index 5d6e69454891..000000000000
--- a/include/asm-x86/segment.h
+++ /dev/null
@@ -1,209 +0,0 @@
1#ifndef ASM_X86__SEGMENT_H
2#define ASM_X86__SEGMENT_H
3
4/* Constructor for a conventional segment GDT (or LDT) entry */
5/* This is a macro so it can be used in initializers */
6#define GDT_ENTRY(flags, base, limit) \
7 ((((base) & 0xff000000ULL) << (56-24)) | \
8 (((flags) & 0x0000f0ffULL) << 40) | \
9 (((limit) & 0x000f0000ULL) << (48-16)) | \
10 (((base) & 0x00ffffffULL) << 16) | \
11 (((limit) & 0x0000ffffULL)))
12
13/* Simple and small GDT entries for booting only */
14
15#define GDT_ENTRY_BOOT_CS 2
16#define __BOOT_CS (GDT_ENTRY_BOOT_CS * 8)
17
18#define GDT_ENTRY_BOOT_DS (GDT_ENTRY_BOOT_CS + 1)
19#define __BOOT_DS (GDT_ENTRY_BOOT_DS * 8)
20
21#define GDT_ENTRY_BOOT_TSS (GDT_ENTRY_BOOT_CS + 2)
22#define __BOOT_TSS (GDT_ENTRY_BOOT_TSS * 8)
23
24#ifdef CONFIG_X86_32
25/*
26 * The layout of the per-CPU GDT under Linux:
27 *
28 * 0 - null
29 * 1 - reserved
30 * 2 - reserved
31 * 3 - reserved
32 *
33 * 4 - unused <==== new cacheline
34 * 5 - unused
35 *
36 * ------- start of TLS (Thread-Local Storage) segments:
37 *
38 * 6 - TLS segment #1 [ glibc's TLS segment ]
39 * 7 - TLS segment #2 [ Wine's %fs Win32 segment ]
40 * 8 - TLS segment #3
41 * 9 - reserved
42 * 10 - reserved
43 * 11 - reserved
44 *
45 * ------- start of kernel segments:
46 *
47 * 12 - kernel code segment <==== new cacheline
48 * 13 - kernel data segment
49 * 14 - default user CS
50 * 15 - default user DS
51 * 16 - TSS
52 * 17 - LDT
53 * 18 - PNPBIOS support (16->32 gate)
54 * 19 - PNPBIOS support
55 * 20 - PNPBIOS support
56 * 21 - PNPBIOS support
57 * 22 - PNPBIOS support
58 * 23 - APM BIOS support
59 * 24 - APM BIOS support
60 * 25 - APM BIOS support
61 *
62 * 26 - ESPFIX small SS
63 * 27 - per-cpu [ offset to per-cpu data area ]
64 * 28 - unused
65 * 29 - unused
66 * 30 - unused
67 * 31 - TSS for double fault handler
68 */
69#define GDT_ENTRY_TLS_MIN 6
70#define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
71
72#define GDT_ENTRY_DEFAULT_USER_CS 14
73
74#define GDT_ENTRY_DEFAULT_USER_DS 15
75
76#define GDT_ENTRY_KERNEL_BASE 12
77
78#define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0)
79
80#define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1)
81
82#define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4)
83#define GDT_ENTRY_LDT (GDT_ENTRY_KERNEL_BASE + 5)
84
85#define GDT_ENTRY_PNPBIOS_BASE (GDT_ENTRY_KERNEL_BASE + 6)
86#define GDT_ENTRY_APMBIOS_BASE (GDT_ENTRY_KERNEL_BASE + 11)
87
88#define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14)
89#define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
90
91#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
92#ifdef CONFIG_SMP
93#define __KERNEL_PERCPU (GDT_ENTRY_PERCPU * 8)
94#else
95#define __KERNEL_PERCPU 0
96#endif
97
98#define GDT_ENTRY_DOUBLEFAULT_TSS 31
99
100/*
101 * The GDT has 32 entries
102 */
103#define GDT_ENTRIES 32
104
105/* The PnP BIOS entries in the GDT */
106#define GDT_ENTRY_PNPBIOS_CS32 (GDT_ENTRY_PNPBIOS_BASE + 0)
107#define GDT_ENTRY_PNPBIOS_CS16 (GDT_ENTRY_PNPBIOS_BASE + 1)
108#define GDT_ENTRY_PNPBIOS_DS (GDT_ENTRY_PNPBIOS_BASE + 2)
109#define GDT_ENTRY_PNPBIOS_TS1 (GDT_ENTRY_PNPBIOS_BASE + 3)
110#define GDT_ENTRY_PNPBIOS_TS2 (GDT_ENTRY_PNPBIOS_BASE + 4)
111
112/* The PnP BIOS selectors */
113#define PNP_CS32 (GDT_ENTRY_PNPBIOS_CS32 * 8) /* segment for calling fn */
114#define PNP_CS16 (GDT_ENTRY_PNPBIOS_CS16 * 8) /* code segment for BIOS */
115#define PNP_DS (GDT_ENTRY_PNPBIOS_DS * 8) /* data segment for BIOS */
116#define PNP_TS1 (GDT_ENTRY_PNPBIOS_TS1 * 8) /* transfer data segment */
117#define PNP_TS2 (GDT_ENTRY_PNPBIOS_TS2 * 8) /* another data segment */
118
119/* Bottom two bits of selector give the ring privilege level */
120#define SEGMENT_RPL_MASK 0x3
121/* Bit 2 is table indicator (LDT/GDT) */
122#define SEGMENT_TI_MASK 0x4
123
124/* User mode is privilege level 3 */
125#define USER_RPL 0x3
126/* LDT segment has TI set, GDT has it cleared */
127#define SEGMENT_LDT 0x4
128#define SEGMENT_GDT 0x0
129
130/*
131 * Matching rules for certain types of segments.
132 */
133
134/* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
135#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
136
137
138#else
139#include <asm/cache.h>
140
141#define GDT_ENTRY_KERNEL32_CS 1
142#define GDT_ENTRY_KERNEL_CS 2
143#define GDT_ENTRY_KERNEL_DS 3
144
145#define __KERNEL32_CS (GDT_ENTRY_KERNEL32_CS * 8)
146
147/*
148 * we cannot use the same code segment descriptor for user and kernel
149 * -- not even in the long flat mode, because of different DPL /kkeil
150 * The segment offset needs to contain a RPL. Grr. -AK
151 * GDT layout to get 64bit syscall right (sysret hardcodes gdt offsets)
152 */
153#define GDT_ENTRY_DEFAULT_USER32_CS 4
154#define GDT_ENTRY_DEFAULT_USER_DS 5
155#define GDT_ENTRY_DEFAULT_USER_CS 6
156#define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
157#define __USER32_DS __USER_DS
158
159#define GDT_ENTRY_TSS 8 /* needs two entries */
160#define GDT_ENTRY_LDT 10 /* needs two entries */
161#define GDT_ENTRY_TLS_MIN 12
162#define GDT_ENTRY_TLS_MAX 14
163
164#define GDT_ENTRY_PER_CPU 15 /* Abused to load per CPU data from limit */
165#define __PER_CPU_SEG (GDT_ENTRY_PER_CPU * 8 + 3)
166
167/* TLS indexes for 64bit - hardcoded in arch_prctl */
168#define FS_TLS 0
169#define GS_TLS 1
170
171#define GS_TLS_SEL ((GDT_ENTRY_TLS_MIN+GS_TLS)*8 + 3)
172#define FS_TLS_SEL ((GDT_ENTRY_TLS_MIN+FS_TLS)*8 + 3)
173
174#define GDT_ENTRIES 16
175
176#endif
177
178#define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8)
179#define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8)
180#define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS* 8 + 3)
181#define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS* 8 + 3)
182#ifndef CONFIG_PARAVIRT
183#define get_kernel_rpl() 0
184#endif
185
186/* User mode is privilege level 3 */
187#define USER_RPL 0x3
188/* LDT segment has TI set, GDT has it cleared */
189#define SEGMENT_LDT 0x4
190#define SEGMENT_GDT 0x0
191
192/* Bottom two bits of selector give the ring privilege level */
193#define SEGMENT_RPL_MASK 0x3
194/* Bit 2 is table indicator (LDT/GDT) */
195#define SEGMENT_TI_MASK 0x4
196
197#define IDT_ENTRIES 256
198#define NUM_EXCEPTION_VECTORS 32
199#define GDT_SIZE (GDT_ENTRIES * 8)
200#define GDT_ENTRY_TLS_ENTRIES 3
201#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8)
202
203#ifdef __KERNEL__
204#ifndef __ASSEMBLY__
205extern const char early_idt_handlers[NUM_EXCEPTION_VECTORS][10];
206#endif
207#endif
208
209#endif /* ASM_X86__SEGMENT_H */
diff --git a/include/asm-x86/sembuf.h b/include/asm-x86/sembuf.h
deleted file mode 100644
index 81f06b7e5a3f..000000000000
--- a/include/asm-x86/sembuf.h
+++ /dev/null
@@ -1,24 +0,0 @@
1#ifndef ASM_X86__SEMBUF_H
2#define ASM_X86__SEMBUF_H
3
4/*
5 * The semid64_ds structure for x86 architecture.
6 * Note extra padding because this structure is passed back and forth
7 * between kernel and user space.
8 *
9 * Pad space is left for:
10 * - 64-bit time_t to solve y2038 problem
11 * - 2 miscellaneous 32-bit values
12 */
13struct semid64_ds {
14 struct ipc64_perm sem_perm; /* permissions .. see ipc.h */
15 __kernel_time_t sem_otime; /* last semop time */
16 unsigned long __unused1;
17 __kernel_time_t sem_ctime; /* last change time */
18 unsigned long __unused2;
19 unsigned long sem_nsems; /* no. of semaphores in array */
20 unsigned long __unused3;
21 unsigned long __unused4;
22};
23
24#endif /* ASM_X86__SEMBUF_H */
diff --git a/include/asm-x86/serial.h b/include/asm-x86/serial.h
deleted file mode 100644
index 303660b671e5..000000000000
--- a/include/asm-x86/serial.h
+++ /dev/null
@@ -1,29 +0,0 @@
1#ifndef ASM_X86__SERIAL_H
2#define ASM_X86__SERIAL_H
3
4/*
5 * This assumes you have a 1.8432 MHz clock for your UART.
6 *
7 * It'd be nice if someone built a serial card with a 24.576 MHz
8 * clock, since the 16550A is capable of handling a top speed of 1.5
9 * megabits/second; but this requires the faster clock.
10 */
11#define BASE_BAUD ( 1843200 / 16 )
12
13/* Standard COM flags (except for COM4, because of the 8514 problem) */
14#ifdef CONFIG_SERIAL_DETECT_IRQ
15#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST | ASYNC_AUTO_IRQ)
16#define STD_COM4_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_AUTO_IRQ)
17#else
18#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST)
19#define STD_COM4_FLAGS ASYNC_BOOT_AUTOCONF
20#endif
21
22#define SERIAL_PORT_DFNS \
23 /* UART CLK PORT IRQ FLAGS */ \
24 { 0, BASE_BAUD, 0x3F8, 4, STD_COM_FLAGS }, /* ttyS0 */ \
25 { 0, BASE_BAUD, 0x2F8, 3, STD_COM_FLAGS }, /* ttyS1 */ \
26 { 0, BASE_BAUD, 0x3E8, 4, STD_COM_FLAGS }, /* ttyS2 */ \
27 { 0, BASE_BAUD, 0x2E8, 3, STD_COM4_FLAGS }, /* ttyS3 */
28
29#endif /* ASM_X86__SERIAL_H */
diff --git a/include/asm-x86/setup.h b/include/asm-x86/setup.h
deleted file mode 100644
index 11b6cc14b289..000000000000
--- a/include/asm-x86/setup.h
+++ /dev/null
@@ -1,105 +0,0 @@
1#ifndef ASM_X86__SETUP_H
2#define ASM_X86__SETUP_H
3
4#define COMMAND_LINE_SIZE 2048
5
6#ifndef __ASSEMBLY__
7
8/* Interrupt control for vSMPowered x86_64 systems */
9void vsmp_init(void);
10
11#ifdef CONFIG_X86_VISWS
12extern void visws_early_detect(void);
13extern int is_visws_box(void);
14#else
15static inline void visws_early_detect(void) { }
16static inline int is_visws_box(void) { return 0; }
17#endif
18
19/*
20 * Any setup quirks to be performed?
21 */
22struct mpc_config_processor;
23struct mpc_config_bus;
24struct mp_config_oemtable;
25struct x86_quirks {
26 int (*arch_pre_time_init)(void);
27 int (*arch_time_init)(void);
28 int (*arch_pre_intr_init)(void);
29 int (*arch_intr_init)(void);
30 int (*arch_trap_init)(void);
31 char * (*arch_memory_setup)(void);
32 int (*mach_get_smp_config)(unsigned int early);
33 int (*mach_find_smp_config)(unsigned int reserve);
34
35 int *mpc_record;
36 int (*mpc_apic_id)(struct mpc_config_processor *m);
37 void (*mpc_oem_bus_info)(struct mpc_config_bus *m, char *name);
38 void (*mpc_oem_pci_bus)(struct mpc_config_bus *m);
39 void (*smp_read_mpc_oem)(struct mp_config_oemtable *oemtable,
40 unsigned short oemsize);
41 int (*setup_ioapic_ids)(void);
42};
43
44extern struct x86_quirks *x86_quirks;
45extern unsigned long saved_video_mode;
46
47#ifndef CONFIG_PARAVIRT
48#define paravirt_post_allocator_init() do {} while (0)
49#endif
50#endif /* __ASSEMBLY__ */
51
52#ifdef __KERNEL__
53
54#ifdef __i386__
55
56#include <linux/pfn.h>
57/*
58 * Reserved space for vmalloc and iomap - defined in asm/page.h
59 */
60#define MAXMEM_PFN PFN_DOWN(MAXMEM)
61#define MAX_NONPAE_PFN (1 << 20)
62
63#endif /* __i386__ */
64
65#define PARAM_SIZE 4096 /* sizeof(struct boot_params) */
66
67#define OLD_CL_MAGIC 0xA33F
68#define OLD_CL_ADDRESS 0x020 /* Relative to real mode data */
69#define NEW_CL_POINTER 0x228 /* Relative to real mode data */
70
71#ifndef __ASSEMBLY__
72#include <asm/bootparam.h>
73
74#ifndef _SETUP
75
76/*
77 * This is set up by the setup-routine at boot-time
78 */
79extern struct boot_params boot_params;
80
81/*
82 * Do NOT EVER look at the BIOS memory size location.
83 * It does not work on many machines.
84 */
85#define LOWMEMSIZE() (0x9f000)
86
87#ifdef __i386__
88
89void __init i386_start_kernel(void);
90extern void probe_roms(void);
91
92extern unsigned long init_pg_tables_start;
93extern unsigned long init_pg_tables_end;
94
95#else
96void __init x86_64_init_pda(void);
97void __init x86_64_start_kernel(char *real_mode);
98void __init x86_64_start_reservations(char *real_mode_data);
99
100#endif /* __i386__ */
101#endif /* _SETUP */
102#endif /* __ASSEMBLY__ */
103#endif /* __KERNEL__ */
104
105#endif /* ASM_X86__SETUP_H */
diff --git a/include/asm-x86/shmbuf.h b/include/asm-x86/shmbuf.h
deleted file mode 100644
index f51aec2298e9..000000000000
--- a/include/asm-x86/shmbuf.h
+++ /dev/null
@@ -1,51 +0,0 @@
1#ifndef ASM_X86__SHMBUF_H
2#define ASM_X86__SHMBUF_H
3
4/*
5 * The shmid64_ds structure for x86 architecture.
6 * Note extra padding because this structure is passed back and forth
7 * between kernel and user space.
8 *
9 * Pad space on 32 bit is left for:
10 * - 64-bit time_t to solve y2038 problem
11 * - 2 miscellaneous 32-bit values
12 *
13 * Pad space on 64 bit is left for:
14 * - 2 miscellaneous 64-bit values
15 */
16
17struct shmid64_ds {
18 struct ipc64_perm shm_perm; /* operation perms */
19 size_t shm_segsz; /* size of segment (bytes) */
20 __kernel_time_t shm_atime; /* last attach time */
21#ifdef __i386__
22 unsigned long __unused1;
23#endif
24 __kernel_time_t shm_dtime; /* last detach time */
25#ifdef __i386__
26 unsigned long __unused2;
27#endif
28 __kernel_time_t shm_ctime; /* last change time */
29#ifdef __i386__
30 unsigned long __unused3;
31#endif
32 __kernel_pid_t shm_cpid; /* pid of creator */
33 __kernel_pid_t shm_lpid; /* pid of last operator */
34 unsigned long shm_nattch; /* no. of current attaches */
35 unsigned long __unused4;
36 unsigned long __unused5;
37};
38
39struct shminfo64 {
40 unsigned long shmmax;
41 unsigned long shmmin;
42 unsigned long shmmni;
43 unsigned long shmseg;
44 unsigned long shmall;
45 unsigned long __unused1;
46 unsigned long __unused2;
47 unsigned long __unused3;
48 unsigned long __unused4;
49};
50
51#endif /* ASM_X86__SHMBUF_H */
diff --git a/include/asm-x86/shmparam.h b/include/asm-x86/shmparam.h
deleted file mode 100644
index a83a1fd96a0e..000000000000
--- a/include/asm-x86/shmparam.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef ASM_X86__SHMPARAM_H
2#define ASM_X86__SHMPARAM_H
3
4#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */
5
6#endif /* ASM_X86__SHMPARAM_H */
diff --git a/include/asm-x86/sigcontext.h b/include/asm-x86/sigcontext.h
deleted file mode 100644
index ee813f4fe5d5..000000000000
--- a/include/asm-x86/sigcontext.h
+++ /dev/null
@@ -1,284 +0,0 @@
1#ifndef ASM_X86__SIGCONTEXT_H
2#define ASM_X86__SIGCONTEXT_H
3
4#include <linux/compiler.h>
5#include <asm/types.h>
6
7#define FP_XSTATE_MAGIC1 0x46505853U
8#define FP_XSTATE_MAGIC2 0x46505845U
9#define FP_XSTATE_MAGIC2_SIZE sizeof(FP_XSTATE_MAGIC2)
10
11/*
12 * bytes 464..511 in the current 512byte layout of fxsave/fxrstor frame
13 * are reserved for SW usage. On cpu's supporting xsave/xrstor, these bytes
14 * are used to extended the fpstate pointer in the sigcontext, which now
15 * includes the extended state information along with fpstate information.
16 *
17 * Presence of FP_XSTATE_MAGIC1 at the beginning of this SW reserved
18 * area and FP_XSTATE_MAGIC2 at the end of memory layout
19 * (extended_size - FP_XSTATE_MAGIC2_SIZE) indicates the presence of the
20 * extended state information in the memory layout pointed by the fpstate
21 * pointer in sigcontext.
22 */
23struct _fpx_sw_bytes {
24 __u32 magic1; /* FP_XSTATE_MAGIC1 */
25 __u32 extended_size; /* total size of the layout referred by
26 * fpstate pointer in the sigcontext.
27 */
28 __u64 xstate_bv;
29 /* feature bit mask (including fp/sse/extended
30 * state) that is present in the memory
31 * layout.
32 */
33 __u32 xstate_size; /* actual xsave state size, based on the
34 * features saved in the layout.
35 * 'extended_size' will be greater than
36 * 'xstate_size'.
37 */
38 __u32 padding[7]; /* for future use. */
39};
40
41#ifdef __i386__
42/*
43 * As documented in the iBCS2 standard..
44 *
45 * The first part of "struct _fpstate" is just the normal i387
46 * hardware setup, the extra "status" word is used to save the
47 * coprocessor status word before entering the handler.
48 *
49 * Pentium III FXSR, SSE support
50 * Gareth Hughes <gareth@valinux.com>, May 2000
51 *
52 * The FPU state data structure has had to grow to accommodate the
53 * extended FPU state required by the Streaming SIMD Extensions.
54 * There is no documented standard to accomplish this at the moment.
55 */
56struct _fpreg {
57 unsigned short significand[4];
58 unsigned short exponent;
59};
60
61struct _fpxreg {
62 unsigned short significand[4];
63 unsigned short exponent;
64 unsigned short padding[3];
65};
66
67struct _xmmreg {
68 unsigned long element[4];
69};
70
71struct _fpstate {
72 /* Regular FPU environment */
73 unsigned long cw;
74 unsigned long sw;
75 unsigned long tag;
76 unsigned long ipoff;
77 unsigned long cssel;
78 unsigned long dataoff;
79 unsigned long datasel;
80 struct _fpreg _st[8];
81 unsigned short status;
82 unsigned short magic; /* 0xffff = regular FPU data only */
83
84 /* FXSR FPU environment */
85 unsigned long _fxsr_env[6]; /* FXSR FPU env is ignored */
86 unsigned long mxcsr;
87 unsigned long reserved;
88 struct _fpxreg _fxsr_st[8]; /* FXSR FPU reg data is ignored */
89 struct _xmmreg _xmm[8];
90 unsigned long padding1[44];
91
92 union {
93 unsigned long padding2[12];
94 struct _fpx_sw_bytes sw_reserved; /* represents the extended
95 * state info */
96 };
97};
98
99#define X86_FXSR_MAGIC 0x0000
100
101#ifdef __KERNEL__
102struct sigcontext {
103 unsigned short gs, __gsh;
104 unsigned short fs, __fsh;
105 unsigned short es, __esh;
106 unsigned short ds, __dsh;
107 unsigned long di;
108 unsigned long si;
109 unsigned long bp;
110 unsigned long sp;
111 unsigned long bx;
112 unsigned long dx;
113 unsigned long cx;
114 unsigned long ax;
115 unsigned long trapno;
116 unsigned long err;
117 unsigned long ip;
118 unsigned short cs, __csh;
119 unsigned long flags;
120 unsigned long sp_at_signal;
121 unsigned short ss, __ssh;
122
123 /*
124 * fpstate is really (struct _fpstate *) or (struct _xstate *)
125 * depending on the FP_XSTATE_MAGIC1 encoded in the SW reserved
126 * bytes of (struct _fpstate) and FP_XSTATE_MAGIC2 present at the end
127 * of extended memory layout. See comments at the defintion of
128 * (struct _fpx_sw_bytes)
129 */
130 void __user *fpstate; /* zero when no FPU/extended context */
131 unsigned long oldmask;
132 unsigned long cr2;
133};
134#else /* __KERNEL__ */
135/*
136 * User-space might still rely on the old definition:
137 */
138struct sigcontext {
139 unsigned short gs, __gsh;
140 unsigned short fs, __fsh;
141 unsigned short es, __esh;
142 unsigned short ds, __dsh;
143 unsigned long edi;
144 unsigned long esi;
145 unsigned long ebp;
146 unsigned long esp;
147 unsigned long ebx;
148 unsigned long edx;
149 unsigned long ecx;
150 unsigned long eax;
151 unsigned long trapno;
152 unsigned long err;
153 unsigned long eip;
154 unsigned short cs, __csh;
155 unsigned long eflags;
156 unsigned long esp_at_signal;
157 unsigned short ss, __ssh;
158 struct _fpstate __user *fpstate;
159 unsigned long oldmask;
160 unsigned long cr2;
161};
162#endif /* !__KERNEL__ */
163
164#else /* __i386__ */
165
166/* FXSAVE frame */
167/* Note: reserved1/2 may someday contain valuable data. Always save/restore
168 them when you change signal frames. */
169struct _fpstate {
170 __u16 cwd;
171 __u16 swd;
172 __u16 twd; /* Note this is not the same as the
173 32bit/x87/FSAVE twd */
174 __u16 fop;
175 __u64 rip;
176 __u64 rdp;
177 __u32 mxcsr;
178 __u32 mxcsr_mask;
179 __u32 st_space[32]; /* 8*16 bytes for each FP-reg */
180 __u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg */
181 __u32 reserved2[12];
182 union {
183 __u32 reserved3[12];
184 struct _fpx_sw_bytes sw_reserved; /* represents the extended
185 * state information */
186 };
187};
188
189#ifdef __KERNEL__
190struct sigcontext {
191 unsigned long r8;
192 unsigned long r9;
193 unsigned long r10;
194 unsigned long r11;
195 unsigned long r12;
196 unsigned long r13;
197 unsigned long r14;
198 unsigned long r15;
199 unsigned long di;
200 unsigned long si;
201 unsigned long bp;
202 unsigned long bx;
203 unsigned long dx;
204 unsigned long ax;
205 unsigned long cx;
206 unsigned long sp;
207 unsigned long ip;
208 unsigned long flags;
209 unsigned short cs;
210 unsigned short gs;
211 unsigned short fs;
212 unsigned short __pad0;
213 unsigned long err;
214 unsigned long trapno;
215 unsigned long oldmask;
216 unsigned long cr2;
217
218 /*
219 * fpstate is really (struct _fpstate *) or (struct _xstate *)
220 * depending on the FP_XSTATE_MAGIC1 encoded in the SW reserved
221 * bytes of (struct _fpstate) and FP_XSTATE_MAGIC2 present at the end
222 * of extended memory layout. See comments at the defintion of
223 * (struct _fpx_sw_bytes)
224 */
225 void __user *fpstate; /* zero when no FPU/extended context */
226 unsigned long reserved1[8];
227};
228#else /* __KERNEL__ */
229/*
230 * User-space might still rely on the old definition:
231 */
232struct sigcontext {
233 unsigned long r8;
234 unsigned long r9;
235 unsigned long r10;
236 unsigned long r11;
237 unsigned long r12;
238 unsigned long r13;
239 unsigned long r14;
240 unsigned long r15;
241 unsigned long rdi;
242 unsigned long rsi;
243 unsigned long rbp;
244 unsigned long rbx;
245 unsigned long rdx;
246 unsigned long rax;
247 unsigned long rcx;
248 unsigned long rsp;
249 unsigned long rip;
250 unsigned long eflags; /* RFLAGS */
251 unsigned short cs;
252 unsigned short gs;
253 unsigned short fs;
254 unsigned short __pad0;
255 unsigned long err;
256 unsigned long trapno;
257 unsigned long oldmask;
258 unsigned long cr2;
259 struct _fpstate __user *fpstate; /* zero when no FPU context */
260 unsigned long reserved1[8];
261};
262#endif /* !__KERNEL__ */
263
264#endif /* !__i386__ */
265
266struct _xsave_hdr {
267 __u64 xstate_bv;
268 __u64 reserved1[2];
269 __u64 reserved2[5];
270};
271
272/*
273 * Extended state pointed by the fpstate pointer in the sigcontext.
274 * In addition to the fpstate, information encoded in the xstate_hdr
275 * indicates the presence of other extended state information
276 * supported by the processor and OS.
277 */
278struct _xstate {
279 struct _fpstate fpstate;
280 struct _xsave_hdr xstate_hdr;
281 /* new processor state extensions go here */
282};
283
284#endif /* ASM_X86__SIGCONTEXT_H */
diff --git a/include/asm-x86/sigcontext32.h b/include/asm-x86/sigcontext32.h
deleted file mode 100644
index 8c347032c2f2..000000000000
--- a/include/asm-x86/sigcontext32.h
+++ /dev/null
@@ -1,75 +0,0 @@
1#ifndef ASM_X86__SIGCONTEXT32_H
2#define ASM_X86__SIGCONTEXT32_H
3
4/* signal context for 32bit programs. */
5
6#define X86_FXSR_MAGIC 0x0000
7
8struct _fpreg {
9 unsigned short significand[4];
10 unsigned short exponent;
11};
12
13struct _fpxreg {
14 unsigned short significand[4];
15 unsigned short exponent;
16 unsigned short padding[3];
17};
18
19struct _xmmreg {
20 __u32 element[4];
21};
22
23/* FSAVE frame with extensions */
24struct _fpstate_ia32 {
25 /* Regular FPU environment */
26 __u32 cw;
27 __u32 sw;
28 __u32 tag; /* not compatible to 64bit twd */
29 __u32 ipoff;
30 __u32 cssel;
31 __u32 dataoff;
32 __u32 datasel;
33 struct _fpreg _st[8];
34 unsigned short status;
35 unsigned short magic; /* 0xffff = regular FPU data only */
36
37 /* FXSR FPU environment */
38 __u32 _fxsr_env[6];
39 __u32 mxcsr;
40 __u32 reserved;
41 struct _fpxreg _fxsr_st[8];
42 struct _xmmreg _xmm[8]; /* It's actually 16 */
43 __u32 padding[44];
44 union {
45 __u32 padding2[12];
46 struct _fpx_sw_bytes sw_reserved;
47 };
48};
49
50struct sigcontext_ia32 {
51 unsigned short gs, __gsh;
52 unsigned short fs, __fsh;
53 unsigned short es, __esh;
54 unsigned short ds, __dsh;
55 unsigned int di;
56 unsigned int si;
57 unsigned int bp;
58 unsigned int sp;
59 unsigned int bx;
60 unsigned int dx;
61 unsigned int cx;
62 unsigned int ax;
63 unsigned int trapno;
64 unsigned int err;
65 unsigned int ip;
66 unsigned short cs, __csh;
67 unsigned int flags;
68 unsigned int sp_at_signal;
69 unsigned short ss, __ssh;
70 unsigned int fpstate; /* really (struct _fpstate_ia32 *) */
71 unsigned int oldmask;
72 unsigned int cr2;
73};
74
75#endif /* ASM_X86__SIGCONTEXT32_H */
diff --git a/include/asm-x86/siginfo.h b/include/asm-x86/siginfo.h
deleted file mode 100644
index 808bdfb2958c..000000000000
--- a/include/asm-x86/siginfo.h
+++ /dev/null
@@ -1,10 +0,0 @@
1#ifndef ASM_X86__SIGINFO_H
2#define ASM_X86__SIGINFO_H
3
4#ifdef __x86_64__
5# define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int))
6#endif
7
8#include <asm-generic/siginfo.h>
9
10#endif /* ASM_X86__SIGINFO_H */
diff --git a/include/asm-x86/signal.h b/include/asm-x86/signal.h
deleted file mode 100644
index 65acc82d267a..000000000000
--- a/include/asm-x86/signal.h
+++ /dev/null
@@ -1,262 +0,0 @@
1#ifndef ASM_X86__SIGNAL_H
2#define ASM_X86__SIGNAL_H
3
4#ifndef __ASSEMBLY__
5#include <linux/types.h>
6#include <linux/time.h>
7#include <linux/compiler.h>
8
9/* Avoid too many header ordering problems. */
10struct siginfo;
11
12#ifdef __KERNEL__
13#include <linux/linkage.h>
14
15/* Most things should be clean enough to redefine this at will, if care
16 is taken to make libc match. */
17
18#define _NSIG 64
19
20#ifdef __i386__
21# define _NSIG_BPW 32
22#else
23# define _NSIG_BPW 64
24#endif
25
26#define _NSIG_WORDS (_NSIG / _NSIG_BPW)
27
28typedef unsigned long old_sigset_t; /* at least 32 bits */
29
30typedef struct {
31 unsigned long sig[_NSIG_WORDS];
32} sigset_t;
33
34#else
35/* Here we must cater to libcs that poke about in kernel headers. */
36
37#define NSIG 32
38typedef unsigned long sigset_t;
39
40#endif /* __KERNEL__ */
41#endif /* __ASSEMBLY__ */
42
43#define SIGHUP 1
44#define SIGINT 2
45#define SIGQUIT 3
46#define SIGILL 4
47#define SIGTRAP 5
48#define SIGABRT 6
49#define SIGIOT 6
50#define SIGBUS 7
51#define SIGFPE 8
52#define SIGKILL 9
53#define SIGUSR1 10
54#define SIGSEGV 11
55#define SIGUSR2 12
56#define SIGPIPE 13
57#define SIGALRM 14
58#define SIGTERM 15
59#define SIGSTKFLT 16
60#define SIGCHLD 17
61#define SIGCONT 18
62#define SIGSTOP 19
63#define SIGTSTP 20
64#define SIGTTIN 21
65#define SIGTTOU 22
66#define SIGURG 23
67#define SIGXCPU 24
68#define SIGXFSZ 25
69#define SIGVTALRM 26
70#define SIGPROF 27
71#define SIGWINCH 28
72#define SIGIO 29
73#define SIGPOLL SIGIO
74/*
75#define SIGLOST 29
76*/
77#define SIGPWR 30
78#define SIGSYS 31
79#define SIGUNUSED 31
80
81/* These should not be considered constants from userland. */
82#define SIGRTMIN 32
83#define SIGRTMAX _NSIG
84
85/*
86 * SA_FLAGS values:
87 *
88 * SA_ONSTACK indicates that a registered stack_t will be used.
89 * SA_RESTART flag to get restarting signals (which were the default long ago)
90 * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
91 * SA_RESETHAND clears the handler when the signal is delivered.
92 * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
93 * SA_NODEFER prevents the current signal from being masked in the handler.
94 *
95 * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
96 * Unix names RESETHAND and NODEFER respectively.
97 */
98#define SA_NOCLDSTOP 0x00000001u
99#define SA_NOCLDWAIT 0x00000002u
100#define SA_SIGINFO 0x00000004u
101#define SA_ONSTACK 0x08000000u
102#define SA_RESTART 0x10000000u
103#define SA_NODEFER 0x40000000u
104#define SA_RESETHAND 0x80000000u
105
106#define SA_NOMASK SA_NODEFER
107#define SA_ONESHOT SA_RESETHAND
108
109#define SA_RESTORER 0x04000000
110
111/*
112 * sigaltstack controls
113 */
114#define SS_ONSTACK 1
115#define SS_DISABLE 2
116
117#define MINSIGSTKSZ 2048
118#define SIGSTKSZ 8192
119
120#include <asm-generic/signal.h>
121
122#ifndef __ASSEMBLY__
123
124#ifdef __i386__
125# ifdef __KERNEL__
126struct old_sigaction {
127 __sighandler_t sa_handler;
128 old_sigset_t sa_mask;
129 unsigned long sa_flags;
130 __sigrestore_t sa_restorer;
131};
132
133struct sigaction {
134 __sighandler_t sa_handler;
135 unsigned long sa_flags;
136 __sigrestore_t sa_restorer;
137 sigset_t sa_mask; /* mask last for extensibility */
138};
139
140struct k_sigaction {
141 struct sigaction sa;
142};
143
144extern void do_notify_resume(struct pt_regs *, void *, __u32);
145
146# else /* __KERNEL__ */
147/* Here we must cater to libcs that poke about in kernel headers. */
148
149struct sigaction {
150 union {
151 __sighandler_t _sa_handler;
152 void (*_sa_sigaction)(int, struct siginfo *, void *);
153 } _u;
154 sigset_t sa_mask;
155 unsigned long sa_flags;
156 void (*sa_restorer)(void);
157};
158
159#define sa_handler _u._sa_handler
160#define sa_sigaction _u._sa_sigaction
161
162# endif /* ! __KERNEL__ */
163#else /* __i386__ */
164
165struct sigaction {
166 __sighandler_t sa_handler;
167 unsigned long sa_flags;
168 __sigrestore_t sa_restorer;
169 sigset_t sa_mask; /* mask last for extensibility */
170};
171
172struct k_sigaction {
173 struct sigaction sa;
174};
175
176#endif /* !__i386__ */
177
178typedef struct sigaltstack {
179 void __user *ss_sp;
180 int ss_flags;
181 size_t ss_size;
182} stack_t;
183
184#ifdef __KERNEL__
185#include <asm/sigcontext.h>
186
187#ifdef __i386__
188
189#define __HAVE_ARCH_SIG_BITOPS
190
191#define sigaddset(set,sig) \
192 (__builtin_constant_p(sig) \
193 ? __const_sigaddset((set), (sig)) \
194 : __gen_sigaddset((set), (sig)))
195
196static inline void __gen_sigaddset(sigset_t *set, int _sig)
197{
198 asm("btsl %1,%0" : "+m"(*set) : "Ir"(_sig - 1) : "cc");
199}
200
201static inline void __const_sigaddset(sigset_t *set, int _sig)
202{
203 unsigned long sig = _sig - 1;
204 set->sig[sig / _NSIG_BPW] |= 1 << (sig % _NSIG_BPW);
205}
206
207#define sigdelset(set, sig) \
208 (__builtin_constant_p(sig) \
209 ? __const_sigdelset((set), (sig)) \
210 : __gen_sigdelset((set), (sig)))
211
212
213static inline void __gen_sigdelset(sigset_t *set, int _sig)
214{
215 asm("btrl %1,%0" : "+m"(*set) : "Ir"(_sig - 1) : "cc");
216}
217
218static inline void __const_sigdelset(sigset_t *set, int _sig)
219{
220 unsigned long sig = _sig - 1;
221 set->sig[sig / _NSIG_BPW] &= ~(1 << (sig % _NSIG_BPW));
222}
223
224static inline int __const_sigismember(sigset_t *set, int _sig)
225{
226 unsigned long sig = _sig - 1;
227 return 1 & (set->sig[sig / _NSIG_BPW] >> (sig % _NSIG_BPW));
228}
229
230static inline int __gen_sigismember(sigset_t *set, int _sig)
231{
232 int ret;
233 asm("btl %2,%1\n\tsbbl %0,%0"
234 : "=r"(ret) : "m"(*set), "Ir"(_sig-1) : "cc");
235 return ret;
236}
237
238#define sigismember(set, sig) \
239 (__builtin_constant_p(sig) \
240 ? __const_sigismember((set), (sig)) \
241 : __gen_sigismember((set), (sig)))
242
243static inline int sigfindinword(unsigned long word)
244{
245 asm("bsfl %1,%0" : "=r"(word) : "rm"(word) : "cc");
246 return word;
247}
248
249struct pt_regs;
250
251#else /* __i386__ */
252
253#undef __HAVE_ARCH_SIG_BITOPS
254
255#endif /* !__i386__ */
256
257#define ptrace_signal_deliver(regs, cookie) do { } while (0)
258
259#endif /* __KERNEL__ */
260#endif /* __ASSEMBLY__ */
261
262#endif /* ASM_X86__SIGNAL_H */
diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h
deleted file mode 100644
index a6afc29f2dd9..000000000000
--- a/include/asm-x86/smp.h
+++ /dev/null
@@ -1,229 +0,0 @@
1#ifndef ASM_X86__SMP_H
2#define ASM_X86__SMP_H
3#ifndef __ASSEMBLY__
4#include <linux/cpumask.h>
5#include <linux/init.h>
6#include <asm/percpu.h>
7
8/*
9 * We need the APIC definitions automatically as part of 'smp.h'
10 */
11#ifdef CONFIG_X86_LOCAL_APIC
12# include <asm/mpspec.h>
13# include <asm/apic.h>
14# ifdef CONFIG_X86_IO_APIC
15# include <asm/io_apic.h>
16# endif
17#endif
18#include <asm/pda.h>
19#include <asm/thread_info.h>
20
21extern cpumask_t cpu_callout_map;
22extern cpumask_t cpu_initialized;
23extern cpumask_t cpu_callin_map;
24
25extern void (*mtrr_hook)(void);
26extern void zap_low_mappings(void);
27
28extern int __cpuinit get_local_pda(int cpu);
29
30extern int smp_num_siblings;
31extern unsigned int num_processors;
32extern cpumask_t cpu_initialized;
33
34DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
35DECLARE_PER_CPU(cpumask_t, cpu_core_map);
36DECLARE_PER_CPU(u16, cpu_llc_id);
37#ifdef CONFIG_X86_32
38DECLARE_PER_CPU(int, cpu_number);
39#endif
40
41DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid);
42DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
43
44/* Static state in head.S used to set up a CPU */
45extern struct {
46 void *sp;
47 unsigned short ss;
48} stack_start;
49
50struct smp_ops {
51 void (*smp_prepare_boot_cpu)(void);
52 void (*smp_prepare_cpus)(unsigned max_cpus);
53 void (*smp_cpus_done)(unsigned max_cpus);
54
55 void (*smp_send_stop)(void);
56 void (*smp_send_reschedule)(int cpu);
57
58 int (*cpu_up)(unsigned cpu);
59 int (*cpu_disable)(void);
60 void (*cpu_die)(unsigned int cpu);
61 void (*play_dead)(void);
62
63 void (*send_call_func_ipi)(cpumask_t mask);
64 void (*send_call_func_single_ipi)(int cpu);
65};
66
67/* Globals due to paravirt */
68extern void set_cpu_sibling_map(int cpu);
69
70#ifdef CONFIG_SMP
71#ifndef CONFIG_PARAVIRT
72#define startup_ipi_hook(phys_apicid, start_eip, start_esp) do { } while (0)
73#endif
74extern struct smp_ops smp_ops;
75
76static inline void smp_send_stop(void)
77{
78 smp_ops.smp_send_stop();
79}
80
81static inline void smp_prepare_boot_cpu(void)
82{
83 smp_ops.smp_prepare_boot_cpu();
84}
85
86static inline void smp_prepare_cpus(unsigned int max_cpus)
87{
88 smp_ops.smp_prepare_cpus(max_cpus);
89}
90
91static inline void smp_cpus_done(unsigned int max_cpus)
92{
93 smp_ops.smp_cpus_done(max_cpus);
94}
95
96static inline int __cpu_up(unsigned int cpu)
97{
98 return smp_ops.cpu_up(cpu);
99}
100
101static inline int __cpu_disable(void)
102{
103 return smp_ops.cpu_disable();
104}
105
106static inline void __cpu_die(unsigned int cpu)
107{
108 smp_ops.cpu_die(cpu);
109}
110
111static inline void play_dead(void)
112{
113 smp_ops.play_dead();
114}
115
116static inline void smp_send_reschedule(int cpu)
117{
118 smp_ops.smp_send_reschedule(cpu);
119}
120
121static inline void arch_send_call_function_single_ipi(int cpu)
122{
123 smp_ops.send_call_func_single_ipi(cpu);
124}
125
126static inline void arch_send_call_function_ipi(cpumask_t mask)
127{
128 smp_ops.send_call_func_ipi(mask);
129}
130
131void cpu_disable_common(void);
132void native_smp_prepare_boot_cpu(void);
133void native_smp_prepare_cpus(unsigned int max_cpus);
134void native_smp_cpus_done(unsigned int max_cpus);
135int native_cpu_up(unsigned int cpunum);
136int native_cpu_disable(void);
137void native_cpu_die(unsigned int cpu);
138void native_play_dead(void);
139void play_dead_common(void);
140
141void native_send_call_func_ipi(cpumask_t mask);
142void native_send_call_func_single_ipi(int cpu);
143
144extern void prefill_possible_map(void);
145
146void smp_store_cpu_info(int id);
147#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu)
148
149/* We don't mark CPUs online until __cpu_up(), so we need another measure */
150static inline int num_booting_cpus(void)
151{
152 return cpus_weight(cpu_callout_map);
153}
154#else
155static inline void prefill_possible_map(void)
156{
157}
158#endif /* CONFIG_SMP */
159
160extern unsigned disabled_cpus __cpuinitdata;
161
162#ifdef CONFIG_X86_32_SMP
163/*
164 * This function is needed by all SMP systems. It must _always_ be valid
165 * from the initial startup. We map APIC_BASE very early in page_setup(),
166 * so this is correct in the x86 case.
167 */
168#define raw_smp_processor_id() (x86_read_percpu(cpu_number))
169extern int safe_smp_processor_id(void);
170
171#elif defined(CONFIG_X86_64_SMP)
172#define raw_smp_processor_id() read_pda(cpunumber)
173
174#define stack_smp_processor_id() \
175({ \
176 struct thread_info *ti; \
177 __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
178 ti->cpu; \
179})
180#define safe_smp_processor_id() smp_processor_id()
181
182#else /* !CONFIG_X86_32_SMP && !CONFIG_X86_64_SMP */
183#define cpu_physical_id(cpu) boot_cpu_physical_apicid
184#define safe_smp_processor_id() 0
185#define stack_smp_processor_id() 0
186#endif
187
188#ifdef CONFIG_X86_LOCAL_APIC
189
190#ifndef CONFIG_X86_64
191static inline int logical_smp_processor_id(void)
192{
193 /* we don't want to mark this access volatile - bad code generation */
194 return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR));
195}
196
197#include <mach_apicdef.h>
198static inline unsigned int read_apic_id(void)
199{
200 unsigned int reg;
201
202 reg = *(u32 *)(APIC_BASE + APIC_ID);
203
204 return GET_APIC_ID(reg);
205}
206#endif
207
208
209# if defined(APIC_DEFINITION) || defined(CONFIG_X86_64)
210extern int hard_smp_processor_id(void);
211# else
212#include <mach_apicdef.h>
213static inline int hard_smp_processor_id(void)
214{
215 /* we don't want to mark this access volatile - bad code generation */
216 return read_apic_id();
217}
218# endif /* APIC_DEFINITION */
219
220#else /* CONFIG_X86_LOCAL_APIC */
221
222# ifndef CONFIG_SMP
223# define hard_smp_processor_id() 0
224# endif
225
226#endif /* CONFIG_X86_LOCAL_APIC */
227
228#endif /* __ASSEMBLY__ */
229#endif /* ASM_X86__SMP_H */
diff --git a/include/asm-x86/socket.h b/include/asm-x86/socket.h
deleted file mode 100644
index db73274c83c3..000000000000
--- a/include/asm-x86/socket.h
+++ /dev/null
@@ -1,57 +0,0 @@
1#ifndef ASM_X86__SOCKET_H
2#define ASM_X86__SOCKET_H
3
4#include <asm/sockios.h>
5
6/* For setsockopt(2) */
7#define SOL_SOCKET 1
8
9#define SO_DEBUG 1
10#define SO_REUSEADDR 2
11#define SO_TYPE 3
12#define SO_ERROR 4
13#define SO_DONTROUTE 5
14#define SO_BROADCAST 6
15#define SO_SNDBUF 7
16#define SO_RCVBUF 8
17#define SO_SNDBUFFORCE 32
18#define SO_RCVBUFFORCE 33
19#define SO_KEEPALIVE 9
20#define SO_OOBINLINE 10
21#define SO_NO_CHECK 11
22#define SO_PRIORITY 12
23#define SO_LINGER 13
24#define SO_BSDCOMPAT 14
25/* To add :#define SO_REUSEPORT 15 */
26#define SO_PASSCRED 16
27#define SO_PEERCRED 17
28#define SO_RCVLOWAT 18
29#define SO_SNDLOWAT 19
30#define SO_RCVTIMEO 20
31#define SO_SNDTIMEO 21
32
33/* Security levels - as per NRL IPv6 - don't actually do anything */
34#define SO_SECURITY_AUTHENTICATION 22
35#define SO_SECURITY_ENCRYPTION_TRANSPORT 23
36#define SO_SECURITY_ENCRYPTION_NETWORK 24
37
38#define SO_BINDTODEVICE 25
39
40/* Socket filtering */
41#define SO_ATTACH_FILTER 26
42#define SO_DETACH_FILTER 27
43
44#define SO_PEERNAME 28
45#define SO_TIMESTAMP 29
46#define SCM_TIMESTAMP SO_TIMESTAMP
47
48#define SO_ACCEPTCONN 30
49
50#define SO_PEERSEC 31
51#define SO_PASSSEC 34
52#define SO_TIMESTAMPNS 35
53#define SCM_TIMESTAMPNS SO_TIMESTAMPNS
54
55#define SO_MARK 36
56
57#endif /* ASM_X86__SOCKET_H */
diff --git a/include/asm-x86/sockios.h b/include/asm-x86/sockios.h
deleted file mode 100644
index a006704fdc84..000000000000
--- a/include/asm-x86/sockios.h
+++ /dev/null
@@ -1,13 +0,0 @@
1#ifndef ASM_X86__SOCKIOS_H
2#define ASM_X86__SOCKIOS_H
3
4/* Socket-level I/O control calls. */
5#define FIOSETOWN 0x8901
6#define SIOCSPGRP 0x8902
7#define FIOGETOWN 0x8903
8#define SIOCGPGRP 0x8904
9#define SIOCATMARK 0x8905
10#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */
11#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */
12
13#endif /* ASM_X86__SOCKIOS_H */
diff --git a/include/asm-x86/sparsemem.h b/include/asm-x86/sparsemem.h
deleted file mode 100644
index 38f8e6bc3186..000000000000
--- a/include/asm-x86/sparsemem.h
+++ /dev/null
@@ -1,34 +0,0 @@
1#ifndef ASM_X86__SPARSEMEM_H
2#define ASM_X86__SPARSEMEM_H
3
4#ifdef CONFIG_SPARSEMEM
5/*
6 * generic non-linear memory support:
7 *
8 * 1) we will not split memory into more chunks than will fit into the flags
9 * field of the struct page
10 *
11 * SECTION_SIZE_BITS 2^n: size of each section
12 * MAX_PHYSADDR_BITS 2^n: max size of physical address space
13 * MAX_PHYSMEM_BITS 2^n: how much memory we can have in that space
14 *
15 */
16
17#ifdef CONFIG_X86_32
18# ifdef CONFIG_X86_PAE
19# define SECTION_SIZE_BITS 29
20# define MAX_PHYSADDR_BITS 36
21# define MAX_PHYSMEM_BITS 36
22# else
23# define SECTION_SIZE_BITS 26
24# define MAX_PHYSADDR_BITS 32
25# define MAX_PHYSMEM_BITS 32
26# endif
27#else /* CONFIG_X86_32 */
28# define SECTION_SIZE_BITS 27 /* matt - 128 is convenient right now */
29# define MAX_PHYSADDR_BITS 44
30# define MAX_PHYSMEM_BITS 44
31#endif
32
33#endif /* CONFIG_SPARSEMEM */
34#endif /* ASM_X86__SPARSEMEM_H */
diff --git a/include/asm-x86/spinlock.h b/include/asm-x86/spinlock.h
deleted file mode 100644
index 157ff7fab97a..000000000000
--- a/include/asm-x86/spinlock.h
+++ /dev/null
@@ -1,364 +0,0 @@
1#ifndef ASM_X86__SPINLOCK_H
2#define ASM_X86__SPINLOCK_H
3
4#include <asm/atomic.h>
5#include <asm/rwlock.h>
6#include <asm/page.h>
7#include <asm/processor.h>
8#include <linux/compiler.h>
9#include <asm/paravirt.h>
10/*
11 * Your basic SMP spinlocks, allowing only a single CPU anywhere
12 *
13 * Simple spin lock operations. There are two variants, one clears IRQ's
14 * on the local processor, one does not.
15 *
16 * These are fair FIFO ticket locks, which are currently limited to 256
17 * CPUs.
18 *
19 * (the type definitions are in asm/spinlock_types.h)
20 */
21
22#ifdef CONFIG_X86_32
23# define LOCK_PTR_REG "a"
24# define REG_PTR_MODE "k"
25#else
26# define LOCK_PTR_REG "D"
27# define REG_PTR_MODE "q"
28#endif
29
30#if defined(CONFIG_X86_32) && \
31 (defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE))
32/*
33 * On PPro SMP or if we are using OOSTORE, we use a locked operation to unlock
34 * (PPro errata 66, 92)
35 */
36# define UNLOCK_LOCK_PREFIX LOCK_PREFIX
37#else
38# define UNLOCK_LOCK_PREFIX
39#endif
40
41/*
42 * Ticket locks are conceptually two parts, one indicating the current head of
43 * the queue, and the other indicating the current tail. The lock is acquired
44 * by atomically noting the tail and incrementing it by one (thus adding
45 * ourself to the queue and noting our position), then waiting until the head
46 * becomes equal to the the initial value of the tail.
47 *
48 * We use an xadd covering *both* parts of the lock, to increment the tail and
49 * also load the position of the head, which takes care of memory ordering
50 * issues and should be optimal for the uncontended case. Note the tail must be
51 * in the high part, because a wide xadd increment of the low part would carry
52 * up and contaminate the high part.
53 *
54 * With fewer than 2^8 possible CPUs, we can use x86's partial registers to
55 * save some instructions and make the code more elegant. There really isn't
56 * much between them in performance though, especially as locks are out of line.
57 */
58#if (NR_CPUS < 256)
59#define TICKET_SHIFT 8
60
61static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
62{
63 short inc = 0x0100;
64
65 asm volatile (
66 LOCK_PREFIX "xaddw %w0, %1\n"
67 "1:\t"
68 "cmpb %h0, %b0\n\t"
69 "je 2f\n\t"
70 "rep ; nop\n\t"
71 "movb %1, %b0\n\t"
72 /* don't need lfence here, because loads are in-order */
73 "jmp 1b\n"
74 "2:"
75 : "+Q" (inc), "+m" (lock->slock)
76 :
77 : "memory", "cc");
78}
79
80static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
81{
82 int tmp, new;
83
84 asm volatile("movzwl %2, %0\n\t"
85 "cmpb %h0,%b0\n\t"
86 "leal 0x100(%" REG_PTR_MODE "0), %1\n\t"
87 "jne 1f\n\t"
88 LOCK_PREFIX "cmpxchgw %w1,%2\n\t"
89 "1:"
90 "sete %b1\n\t"
91 "movzbl %b1,%0\n\t"
92 : "=&a" (tmp), "=&q" (new), "+m" (lock->slock)
93 :
94 : "memory", "cc");
95
96 return tmp;
97}
98
99static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
100{
101 asm volatile(UNLOCK_LOCK_PREFIX "incb %0"
102 : "+m" (lock->slock)
103 :
104 : "memory", "cc");
105}
106#else
107#define TICKET_SHIFT 16
108
109static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
110{
111 int inc = 0x00010000;
112 int tmp;
113
114 asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
115 "movzwl %w0, %2\n\t"
116 "shrl $16, %0\n\t"
117 "1:\t"
118 "cmpl %0, %2\n\t"
119 "je 2f\n\t"
120 "rep ; nop\n\t"
121 "movzwl %1, %2\n\t"
122 /* don't need lfence here, because loads are in-order */
123 "jmp 1b\n"
124 "2:"
125 : "+r" (inc), "+m" (lock->slock), "=&r" (tmp)
126 :
127 : "memory", "cc");
128}
129
130static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
131{
132 int tmp;
133 int new;
134
135 asm volatile("movl %2,%0\n\t"
136 "movl %0,%1\n\t"
137 "roll $16, %0\n\t"
138 "cmpl %0,%1\n\t"
139 "leal 0x00010000(%" REG_PTR_MODE "0), %1\n\t"
140 "jne 1f\n\t"
141 LOCK_PREFIX "cmpxchgl %1,%2\n\t"
142 "1:"
143 "sete %b1\n\t"
144 "movzbl %b1,%0\n\t"
145 : "=&a" (tmp), "=&q" (new), "+m" (lock->slock)
146 :
147 : "memory", "cc");
148
149 return tmp;
150}
151
152static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
153{
154 asm volatile(UNLOCK_LOCK_PREFIX "incw %0"
155 : "+m" (lock->slock)
156 :
157 : "memory", "cc");
158}
159#endif
160
161static inline int __ticket_spin_is_locked(raw_spinlock_t *lock)
162{
163 int tmp = ACCESS_ONCE(lock->slock);
164
165 return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1 << TICKET_SHIFT) - 1));
166}
167
168static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
169{
170 int tmp = ACCESS_ONCE(lock->slock);
171
172 return (((tmp >> TICKET_SHIFT) - tmp) & ((1 << TICKET_SHIFT) - 1)) > 1;
173}
174
175#ifdef CONFIG_PARAVIRT
176/*
177 * Define virtualization-friendly old-style lock byte lock, for use in
178 * pv_lock_ops if desired.
179 *
180 * This differs from the pre-2.6.24 spinlock by always using xchgb
181 * rather than decb to take the lock; this allows it to use a
182 * zero-initialized lock structure. It also maintains a 1-byte
183 * contention counter, so that we can implement
184 * __byte_spin_is_contended.
185 */
186struct __byte_spinlock {
187 s8 lock;
188 s8 spinners;
189};
190
191static inline int __byte_spin_is_locked(raw_spinlock_t *lock)
192{
193 struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
194 return bl->lock != 0;
195}
196
197static inline int __byte_spin_is_contended(raw_spinlock_t *lock)
198{
199 struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
200 return bl->spinners != 0;
201}
202
203static inline void __byte_spin_lock(raw_spinlock_t *lock)
204{
205 struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
206 s8 val = 1;
207
208 asm("1: xchgb %1, %0\n"
209 " test %1,%1\n"
210 " jz 3f\n"
211 " " LOCK_PREFIX "incb %2\n"
212 "2: rep;nop\n"
213 " cmpb $1, %0\n"
214 " je 2b\n"
215 " " LOCK_PREFIX "decb %2\n"
216 " jmp 1b\n"
217 "3:"
218 : "+m" (bl->lock), "+q" (val), "+m" (bl->spinners): : "memory");
219}
220
221static inline int __byte_spin_trylock(raw_spinlock_t *lock)
222{
223 struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
224 u8 old = 1;
225
226 asm("xchgb %1,%0"
227 : "+m" (bl->lock), "+q" (old) : : "memory");
228
229 return old == 0;
230}
231
232static inline void __byte_spin_unlock(raw_spinlock_t *lock)
233{
234 struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
235 smp_wmb();
236 bl->lock = 0;
237}
238#else /* !CONFIG_PARAVIRT */
239static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
240{
241 return __ticket_spin_is_locked(lock);
242}
243
244static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
245{
246 return __ticket_spin_is_contended(lock);
247}
248
249static __always_inline void __raw_spin_lock(raw_spinlock_t *lock)
250{
251 __ticket_spin_lock(lock);
252}
253
254static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock)
255{
256 return __ticket_spin_trylock(lock);
257}
258
259static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock)
260{
261 __ticket_spin_unlock(lock);
262}
263
264static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
265 unsigned long flags)
266{
267 __raw_spin_lock(lock);
268}
269
270#endif /* CONFIG_PARAVIRT */
271
272static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
273{
274 while (__raw_spin_is_locked(lock))
275 cpu_relax();
276}
277
278/*
279 * Read-write spinlocks, allowing multiple readers
280 * but only one writer.
281 *
282 * NOTE! it is quite common to have readers in interrupts
283 * but no interrupt writers. For those circumstances we
284 * can "mix" irq-safe locks - any writer needs to get a
285 * irq-safe write-lock, but readers can get non-irqsafe
286 * read-locks.
287 *
288 * On x86, we implement read-write locks as a 32-bit counter
289 * with the high bit (sign) being the "contended" bit.
290 */
291
292/**
293 * read_can_lock - would read_trylock() succeed?
294 * @lock: the rwlock in question.
295 */
296static inline int __raw_read_can_lock(raw_rwlock_t *lock)
297{
298 return (int)(lock)->lock > 0;
299}
300
301/**
302 * write_can_lock - would write_trylock() succeed?
303 * @lock: the rwlock in question.
304 */
305static inline int __raw_write_can_lock(raw_rwlock_t *lock)
306{
307 return (lock)->lock == RW_LOCK_BIAS;
308}
309
310static inline void __raw_read_lock(raw_rwlock_t *rw)
311{
312 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
313 "jns 1f\n"
314 "call __read_lock_failed\n\t"
315 "1:\n"
316 ::LOCK_PTR_REG (rw) : "memory");
317}
318
319static inline void __raw_write_lock(raw_rwlock_t *rw)
320{
321 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
322 "jz 1f\n"
323 "call __write_lock_failed\n\t"
324 "1:\n"
325 ::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory");
326}
327
328static inline int __raw_read_trylock(raw_rwlock_t *lock)
329{
330 atomic_t *count = (atomic_t *)lock;
331
332 atomic_dec(count);
333 if (atomic_read(count) >= 0)
334 return 1;
335 atomic_inc(count);
336 return 0;
337}
338
339static inline int __raw_write_trylock(raw_rwlock_t *lock)
340{
341 atomic_t *count = (atomic_t *)lock;
342
343 if (atomic_sub_and_test(RW_LOCK_BIAS, count))
344 return 1;
345 atomic_add(RW_LOCK_BIAS, count);
346 return 0;
347}
348
349static inline void __raw_read_unlock(raw_rwlock_t *rw)
350{
351 asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
352}
353
354static inline void __raw_write_unlock(raw_rwlock_t *rw)
355{
356 asm volatile(LOCK_PREFIX "addl %1, %0"
357 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
358}
359
360#define _raw_spin_relax(lock) cpu_relax()
361#define _raw_read_relax(lock) cpu_relax()
362#define _raw_write_relax(lock) cpu_relax()
363
364#endif /* ASM_X86__SPINLOCK_H */
diff --git a/include/asm-x86/spinlock_types.h b/include/asm-x86/spinlock_types.h
deleted file mode 100644
index 6aa9b562c508..000000000000
--- a/include/asm-x86/spinlock_types.h
+++ /dev/null
@@ -1,20 +0,0 @@
1#ifndef ASM_X86__SPINLOCK_TYPES_H
2#define ASM_X86__SPINLOCK_TYPES_H
3
4#ifndef __LINUX_SPINLOCK_TYPES_H
5# error "please don't include this file directly"
6#endif
7
8typedef struct raw_spinlock {
9 unsigned int slock;
10} raw_spinlock_t;
11
12#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
13
14typedef struct {
15 unsigned int lock;
16} raw_rwlock_t;
17
18#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
19
20#endif /* ASM_X86__SPINLOCK_TYPES_H */
diff --git a/include/asm-x86/srat.h b/include/asm-x86/srat.h
deleted file mode 100644
index 5363e4f7e1cd..000000000000
--- a/include/asm-x86/srat.h
+++ /dev/null
@@ -1,39 +0,0 @@
1/*
2 * Some of the code in this file has been gleaned from the 64 bit
3 * discontigmem support code base.
4 *
5 * Copyright (C) 2002, IBM Corp.
6 *
7 * All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
17 * NON INFRINGEMENT. See the GNU General Public License for more
18 * details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 *
24 * Send feedback to Pat Gaughen <gone@us.ibm.com>
25 */
26
27#ifndef ASM_X86__SRAT_H
28#define ASM_X86__SRAT_H
29
30#ifdef CONFIG_ACPI_NUMA
31extern int get_memcfg_from_srat(void);
32#else
33static inline int get_memcfg_from_srat(void)
34{
35 return 0;
36}
37#endif
38
39#endif /* ASM_X86__SRAT_H */
diff --git a/include/asm-x86/stacktrace.h b/include/asm-x86/stacktrace.h
deleted file mode 100644
index f43517e28532..000000000000
--- a/include/asm-x86/stacktrace.h
+++ /dev/null
@@ -1,21 +0,0 @@
1#ifndef ASM_X86__STACKTRACE_H
2#define ASM_X86__STACKTRACE_H
3
4extern int kstack_depth_to_print;
5
6/* Generic stack tracer with callbacks */
7
8struct stacktrace_ops {
9 void (*warning)(void *data, char *msg);
10 /* msg must contain %s for the symbol */
11 void (*warning_symbol)(void *data, char *msg, unsigned long symbol);
12 void (*address)(void *data, unsigned long address, int reliable);
13 /* On negative return stop dumping */
14 int (*stack)(void *data, char *name);
15};
16
17void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
18 unsigned long *stack, unsigned long bp,
19 const struct stacktrace_ops *ops, void *data);
20
21#endif /* ASM_X86__STACKTRACE_H */
diff --git a/include/asm-x86/stat.h b/include/asm-x86/stat.h
deleted file mode 100644
index 1e120f628905..000000000000
--- a/include/asm-x86/stat.h
+++ /dev/null
@@ -1,114 +0,0 @@
1#ifndef ASM_X86__STAT_H
2#define ASM_X86__STAT_H
3
4#define STAT_HAVE_NSEC 1
5
6#ifdef __i386__
7struct stat {
8 unsigned long st_dev;
9 unsigned long st_ino;
10 unsigned short st_mode;
11 unsigned short st_nlink;
12 unsigned short st_uid;
13 unsigned short st_gid;
14 unsigned long st_rdev;
15 unsigned long st_size;
16 unsigned long st_blksize;
17 unsigned long st_blocks;
18 unsigned long st_atime;
19 unsigned long st_atime_nsec;
20 unsigned long st_mtime;
21 unsigned long st_mtime_nsec;
22 unsigned long st_ctime;
23 unsigned long st_ctime_nsec;
24 unsigned long __unused4;
25 unsigned long __unused5;
26};
27
28#define STAT64_HAS_BROKEN_ST_INO 1
29
30/* This matches struct stat64 in glibc2.1, hence the absolutely
31 * insane amounts of padding around dev_t's.
32 */
33struct stat64 {
34 unsigned long long st_dev;
35 unsigned char __pad0[4];
36
37 unsigned long __st_ino;
38
39 unsigned int st_mode;
40 unsigned int st_nlink;
41
42 unsigned long st_uid;
43 unsigned long st_gid;
44
45 unsigned long long st_rdev;
46 unsigned char __pad3[4];
47
48 long long st_size;
49 unsigned long st_blksize;
50
51 /* Number 512-byte blocks allocated. */
52 unsigned long long st_blocks;
53
54 unsigned long st_atime;
55 unsigned long st_atime_nsec;
56
57 unsigned long st_mtime;
58 unsigned int st_mtime_nsec;
59
60 unsigned long st_ctime;
61 unsigned long st_ctime_nsec;
62
63 unsigned long long st_ino;
64};
65
66#else /* __i386__ */
67
68struct stat {
69 unsigned long st_dev;
70 unsigned long st_ino;
71 unsigned long st_nlink;
72
73 unsigned int st_mode;
74 unsigned int st_uid;
75 unsigned int st_gid;
76 unsigned int __pad0;
77 unsigned long st_rdev;
78 long st_size;
79 long st_blksize;
80 long st_blocks; /* Number 512-byte blocks allocated. */
81
82 unsigned long st_atime;
83 unsigned long st_atime_nsec;
84 unsigned long st_mtime;
85 unsigned long st_mtime_nsec;
86 unsigned long st_ctime;
87 unsigned long st_ctime_nsec;
88 long __unused[3];
89};
90#endif
91
92/* for 32bit emulation and 32 bit kernels */
93struct __old_kernel_stat {
94 unsigned short st_dev;
95 unsigned short st_ino;
96 unsigned short st_mode;
97 unsigned short st_nlink;
98 unsigned short st_uid;
99 unsigned short st_gid;
100 unsigned short st_rdev;
101#ifdef __i386__
102 unsigned long st_size;
103 unsigned long st_atime;
104 unsigned long st_mtime;
105 unsigned long st_ctime;
106#else
107 unsigned int st_size;
108 unsigned int st_atime;
109 unsigned int st_mtime;
110 unsigned int st_ctime;
111#endif
112};
113
114#endif /* ASM_X86__STAT_H */
diff --git a/include/asm-x86/statfs.h b/include/asm-x86/statfs.h
deleted file mode 100644
index ca5dc19dd461..000000000000
--- a/include/asm-x86/statfs.h
+++ /dev/null
@@ -1,12 +0,0 @@
1#ifndef ASM_X86__STATFS_H
2#define ASM_X86__STATFS_H
3
4/*
5 * We need compat_statfs64 to be packed, because the i386 ABI won't
6 * add padding at the end to bring it to a multiple of 8 bytes, but
7 * the x86_64 ABI will.
8 */
9#define ARCH_PACK_COMPAT_STATFS64 __attribute__((packed,aligned(4)))
10
11#include <asm-generic/statfs.h>
12#endif /* ASM_X86__STATFS_H */
diff --git a/include/asm-x86/string.h b/include/asm-x86/string.h
deleted file mode 100644
index 6dfd6d9373a0..000000000000
--- a/include/asm-x86/string.h
+++ /dev/null
@@ -1,5 +0,0 @@
1#ifdef CONFIG_X86_32
2# include "string_32.h"
3#else
4# include "string_64.h"
5#endif
diff --git a/include/asm-x86/string_32.h b/include/asm-x86/string_32.h
deleted file mode 100644
index 487843ed245a..000000000000
--- a/include/asm-x86/string_32.h
+++ /dev/null
@@ -1,326 +0,0 @@
1#ifndef ASM_X86__STRING_32_H
2#define ASM_X86__STRING_32_H
3
4#ifdef __KERNEL__
5
6/* Let gcc decide whether to inline or use the out of line functions */
7
8#define __HAVE_ARCH_STRCPY
9extern char *strcpy(char *dest, const char *src);
10
11#define __HAVE_ARCH_STRNCPY
12extern char *strncpy(char *dest, const char *src, size_t count);
13
14#define __HAVE_ARCH_STRCAT
15extern char *strcat(char *dest, const char *src);
16
17#define __HAVE_ARCH_STRNCAT
18extern char *strncat(char *dest, const char *src, size_t count);
19
20#define __HAVE_ARCH_STRCMP
21extern int strcmp(const char *cs, const char *ct);
22
23#define __HAVE_ARCH_STRNCMP
24extern int strncmp(const char *cs, const char *ct, size_t count);
25
26#define __HAVE_ARCH_STRCHR
27extern char *strchr(const char *s, int c);
28
29#define __HAVE_ARCH_STRLEN
30extern size_t strlen(const char *s);
31
32static __always_inline void *__memcpy(void *to, const void *from, size_t n)
33{
34 int d0, d1, d2;
35 asm volatile("rep ; movsl\n\t"
36 "movl %4,%%ecx\n\t"
37 "andl $3,%%ecx\n\t"
38 "jz 1f\n\t"
39 "rep ; movsb\n\t"
40 "1:"
41 : "=&c" (d0), "=&D" (d1), "=&S" (d2)
42 : "0" (n / 4), "g" (n), "1" ((long)to), "2" ((long)from)
43 : "memory");
44 return to;
45}
46
47/*
48 * This looks ugly, but the compiler can optimize it totally,
49 * as the count is constant.
50 */
51static __always_inline void *__constant_memcpy(void *to, const void *from,
52 size_t n)
53{
54 long esi, edi;
55 if (!n)
56 return to;
57
58 switch (n) {
59 case 1:
60 *(char *)to = *(char *)from;
61 return to;
62 case 2:
63 *(short *)to = *(short *)from;
64 return to;
65 case 4:
66 *(int *)to = *(int *)from;
67 return to;
68
69 case 3:
70 *(short *)to = *(short *)from;
71 *((char *)to + 2) = *((char *)from + 2);
72 return to;
73 case 5:
74 *(int *)to = *(int *)from;
75 *((char *)to + 4) = *((char *)from + 4);
76 return to;
77 case 6:
78 *(int *)to = *(int *)from;
79 *((short *)to + 2) = *((short *)from + 2);
80 return to;
81 case 8:
82 *(int *)to = *(int *)from;
83 *((int *)to + 1) = *((int *)from + 1);
84 return to;
85 }
86
87 esi = (long)from;
88 edi = (long)to;
89 if (n >= 5 * 4) {
90 /* large block: use rep prefix */
91 int ecx;
92 asm volatile("rep ; movsl"
93 : "=&c" (ecx), "=&D" (edi), "=&S" (esi)
94 : "0" (n / 4), "1" (edi), "2" (esi)
95 : "memory"
96 );
97 } else {
98 /* small block: don't clobber ecx + smaller code */
99 if (n >= 4 * 4)
100 asm volatile("movsl"
101 : "=&D"(edi), "=&S"(esi)
102 : "0"(edi), "1"(esi)
103 : "memory");
104 if (n >= 3 * 4)
105 asm volatile("movsl"
106 : "=&D"(edi), "=&S"(esi)
107 : "0"(edi), "1"(esi)
108 : "memory");
109 if (n >= 2 * 4)
110 asm volatile("movsl"
111 : "=&D"(edi), "=&S"(esi)
112 : "0"(edi), "1"(esi)
113 : "memory");
114 if (n >= 1 * 4)
115 asm volatile("movsl"
116 : "=&D"(edi), "=&S"(esi)
117 : "0"(edi), "1"(esi)
118 : "memory");
119 }
120 switch (n % 4) {
121 /* tail */
122 case 0:
123 return to;
124 case 1:
125 asm volatile("movsb"
126 : "=&D"(edi), "=&S"(esi)
127 : "0"(edi), "1"(esi)
128 : "memory");
129 return to;
130 case 2:
131 asm volatile("movsw"
132 : "=&D"(edi), "=&S"(esi)
133 : "0"(edi), "1"(esi)
134 : "memory");
135 return to;
136 default:
137 asm volatile("movsw\n\tmovsb"
138 : "=&D"(edi), "=&S"(esi)
139 : "0"(edi), "1"(esi)
140 : "memory");
141 return to;
142 }
143}
144
145#define __HAVE_ARCH_MEMCPY
146
147#ifdef CONFIG_X86_USE_3DNOW
148
149#include <asm/mmx.h>
150
151/*
152 * This CPU favours 3DNow strongly (eg AMD Athlon)
153 */
154
155static inline void *__constant_memcpy3d(void *to, const void *from, size_t len)
156{
157 if (len < 512)
158 return __constant_memcpy(to, from, len);
159 return _mmx_memcpy(to, from, len);
160}
161
162static inline void *__memcpy3d(void *to, const void *from, size_t len)
163{
164 if (len < 512)
165 return __memcpy(to, from, len);
166 return _mmx_memcpy(to, from, len);
167}
168
169#define memcpy(t, f, n) \
170 (__builtin_constant_p((n)) \
171 ? __constant_memcpy3d((t), (f), (n)) \
172 : __memcpy3d((t), (f), (n)))
173
174#else
175
176/*
177 * No 3D Now!
178 */
179
180#define memcpy(t, f, n) \
181 (__builtin_constant_p((n)) \
182 ? __constant_memcpy((t), (f), (n)) \
183 : __memcpy((t), (f), (n)))
184
185#endif
186
187#define __HAVE_ARCH_MEMMOVE
188void *memmove(void *dest, const void *src, size_t n);
189
190#define memcmp __builtin_memcmp
191
192#define __HAVE_ARCH_MEMCHR
193extern void *memchr(const void *cs, int c, size_t count);
194
195static inline void *__memset_generic(void *s, char c, size_t count)
196{
197 int d0, d1;
198 asm volatile("rep\n\t"
199 "stosb"
200 : "=&c" (d0), "=&D" (d1)
201 : "a" (c), "1" (s), "0" (count)
202 : "memory");
203 return s;
204}
205
206/* we might want to write optimized versions of these later */
207#define __constant_count_memset(s, c, count) __memset_generic((s), (c), (count))
208
209/*
210 * memset(x, 0, y) is a reasonably common thing to do, so we want to fill
211 * things 32 bits at a time even when we don't know the size of the
212 * area at compile-time..
213 */
214static __always_inline
215void *__constant_c_memset(void *s, unsigned long c, size_t count)
216{
217 int d0, d1;
218 asm volatile("rep ; stosl\n\t"
219 "testb $2,%b3\n\t"
220 "je 1f\n\t"
221 "stosw\n"
222 "1:\ttestb $1,%b3\n\t"
223 "je 2f\n\t"
224 "stosb\n"
225 "2:"
226 : "=&c" (d0), "=&D" (d1)
227 : "a" (c), "q" (count), "0" (count/4), "1" ((long)s)
228 : "memory");
229 return s;
230}
231
232/* Added by Gertjan van Wingerde to make minix and sysv module work */
233#define __HAVE_ARCH_STRNLEN
234extern size_t strnlen(const char *s, size_t count);
235/* end of additional stuff */
236
237#define __HAVE_ARCH_STRSTR
238extern char *strstr(const char *cs, const char *ct);
239
240/*
241 * This looks horribly ugly, but the compiler can optimize it totally,
242 * as we by now know that both pattern and count is constant..
243 */
244static __always_inline
245void *__constant_c_and_count_memset(void *s, unsigned long pattern,
246 size_t count)
247{
248 switch (count) {
249 case 0:
250 return s;
251 case 1:
252 *(unsigned char *)s = pattern & 0xff;
253 return s;
254 case 2:
255 *(unsigned short *)s = pattern & 0xffff;
256 return s;
257 case 3:
258 *(unsigned short *)s = pattern & 0xffff;
259 *((unsigned char *)s + 2) = pattern & 0xff;
260 return s;
261 case 4:
262 *(unsigned long *)s = pattern;
263 return s;
264 }
265
266#define COMMON(x) \
267 asm volatile("rep ; stosl" \
268 x \
269 : "=&c" (d0), "=&D" (d1) \
270 : "a" (eax), "0" (count/4), "1" ((long)s) \
271 : "memory")
272
273 {
274 int d0, d1;
275#if __GNUC__ == 4 && __GNUC_MINOR__ == 0
276 /* Workaround for broken gcc 4.0 */
277 register unsigned long eax asm("%eax") = pattern;
278#else
279 unsigned long eax = pattern;
280#endif
281
282 switch (count % 4) {
283 case 0:
284 COMMON("");
285 return s;
286 case 1:
287 COMMON("\n\tstosb");
288 return s;
289 case 2:
290 COMMON("\n\tstosw");
291 return s;
292 default:
293 COMMON("\n\tstosw\n\tstosb");
294 return s;
295 }
296 }
297
298#undef COMMON
299}
300
301#define __constant_c_x_memset(s, c, count) \
302 (__builtin_constant_p(count) \
303 ? __constant_c_and_count_memset((s), (c), (count)) \
304 : __constant_c_memset((s), (c), (count)))
305
306#define __memset(s, c, count) \
307 (__builtin_constant_p(count) \
308 ? __constant_count_memset((s), (c), (count)) \
309 : __memset_generic((s), (c), (count)))
310
311#define __HAVE_ARCH_MEMSET
312#define memset(s, c, count) \
313 (__builtin_constant_p(c) \
314 ? __constant_c_x_memset((s), (0x01010101UL * (unsigned char)(c)), \
315 (count)) \
316 : __memset((s), (c), (count)))
317
318/*
319 * find the first occurrence of byte 'c', or 1 past the area if none
320 */
321#define __HAVE_ARCH_MEMSCAN
322extern void *memscan(void *addr, int c, size_t size);
323
324#endif /* __KERNEL__ */
325
326#endif /* ASM_X86__STRING_32_H */
diff --git a/include/asm-x86/string_64.h b/include/asm-x86/string_64.h
deleted file mode 100644
index a2add11d3b66..000000000000
--- a/include/asm-x86/string_64.h
+++ /dev/null
@@ -1,60 +0,0 @@
1#ifndef ASM_X86__STRING_64_H
2#define ASM_X86__STRING_64_H
3
4#ifdef __KERNEL__
5
6/* Written 2002 by Andi Kleen */
7
8/* Only used for special circumstances. Stolen from i386/string.h */
9static __always_inline void *__inline_memcpy(void *to, const void *from, size_t n)
10{
11 unsigned long d0, d1, d2;
12 asm volatile("rep ; movsl\n\t"
13 "testb $2,%b4\n\t"
14 "je 1f\n\t"
15 "movsw\n"
16 "1:\ttestb $1,%b4\n\t"
17 "je 2f\n\t"
18 "movsb\n"
19 "2:"
20 : "=&c" (d0), "=&D" (d1), "=&S" (d2)
21 : "0" (n / 4), "q" (n), "1" ((long)to), "2" ((long)from)
22 : "memory");
23 return to;
24}
25
26/* Even with __builtin_ the compiler may decide to use the out of line
27 function. */
28
29#define __HAVE_ARCH_MEMCPY 1
30#if (__GNUC__ == 4 && __GNUC_MINOR__ >= 3) || __GNUC__ > 4
31extern void *memcpy(void *to, const void *from, size_t len);
32#else
33extern void *__memcpy(void *to, const void *from, size_t len);
34#define memcpy(dst, src, len) \
35({ \
36 size_t __len = (len); \
37 void *__ret; \
38 if (__builtin_constant_p(len) && __len >= 64) \
39 __ret = __memcpy((dst), (src), __len); \
40 else \
41 __ret = __builtin_memcpy((dst), (src), __len); \
42 __ret; \
43})
44#endif
45
46#define __HAVE_ARCH_MEMSET
47void *memset(void *s, int c, size_t n);
48
49#define __HAVE_ARCH_MEMMOVE
50void *memmove(void *dest, const void *src, size_t count);
51
52int memcmp(const void *cs, const void *ct, size_t count);
53size_t strlen(const char *s);
54char *strcpy(char *dest, const char *src);
55char *strcat(char *dest, const char *src);
56int strcmp(const char *cs, const char *ct);
57
58#endif /* __KERNEL__ */
59
60#endif /* ASM_X86__STRING_64_H */
diff --git a/include/asm-x86/summit/apic.h b/include/asm-x86/summit/apic.h
deleted file mode 100644
index 9b3070f1c2ac..000000000000
--- a/include/asm-x86/summit/apic.h
+++ /dev/null
@@ -1,184 +0,0 @@
1#ifndef __ASM_SUMMIT_APIC_H
2#define __ASM_SUMMIT_APIC_H
3
4#include <asm/smp.h>
5
6#define esr_disable (1)
7#define NO_BALANCE_IRQ (0)
8
9/* In clustered mode, the high nibble of APIC ID is a cluster number.
10 * The low nibble is a 4-bit bitmap. */
11#define XAPIC_DEST_CPUS_SHIFT 4
12#define XAPIC_DEST_CPUS_MASK ((1u << XAPIC_DEST_CPUS_SHIFT) - 1)
13#define XAPIC_DEST_CLUSTER_MASK (XAPIC_DEST_CPUS_MASK << XAPIC_DEST_CPUS_SHIFT)
14
15#define APIC_DFR_VALUE (APIC_DFR_CLUSTER)
16
17static inline cpumask_t target_cpus(void)
18{
19 /* CPU_MASK_ALL (0xff) has undefined behaviour with
20 * dest_LowestPrio mode logical clustered apic interrupt routing
21 * Just start on cpu 0. IRQ balancing will spread load
22 */
23 return cpumask_of_cpu(0);
24}
25
26#define INT_DELIVERY_MODE (dest_LowestPrio)
27#define INT_DEST_MODE 1 /* logical delivery broadcast to all procs */
28
29static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
30{
31 return 0;
32}
33
34/* we don't use the phys_cpu_present_map to indicate apicid presence */
35static inline unsigned long check_apicid_present(int bit)
36{
37 return 1;
38}
39
40#define apicid_cluster(apicid) ((apicid) & XAPIC_DEST_CLUSTER_MASK)
41
42extern u8 cpu_2_logical_apicid[];
43
44static inline void init_apic_ldr(void)
45{
46 unsigned long val, id;
47 int count = 0;
48 u8 my_id = (u8)hard_smp_processor_id();
49 u8 my_cluster = (u8)apicid_cluster(my_id);
50#ifdef CONFIG_SMP
51 u8 lid;
52 int i;
53
54 /* Create logical APIC IDs by counting CPUs already in cluster. */
55 for (count = 0, i = NR_CPUS; --i >= 0; ) {
56 lid = cpu_2_logical_apicid[i];
57 if (lid != BAD_APICID && apicid_cluster(lid) == my_cluster)
58 ++count;
59 }
60#endif
61 /* We only have a 4 wide bitmap in cluster mode. If a deranged
62 * BIOS puts 5 CPUs in one APIC cluster, we're hosed. */
63 BUG_ON(count >= XAPIC_DEST_CPUS_SHIFT);
64 id = my_cluster | (1UL << count);
65 apic_write(APIC_DFR, APIC_DFR_VALUE);
66 val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
67 val |= SET_APIC_LOGICAL_ID(id);
68 apic_write(APIC_LDR, val);
69}
70
71static inline int multi_timer_check(int apic, int irq)
72{
73 return 0;
74}
75
76static inline int apic_id_registered(void)
77{
78 return 1;
79}
80
81static inline void setup_apic_routing(void)
82{
83 printk("Enabling APIC mode: Summit. Using %d I/O APICs\n",
84 nr_ioapics);
85}
86
87static inline int apicid_to_node(int logical_apicid)
88{
89#ifdef CONFIG_SMP
90 return apicid_2_node[hard_smp_processor_id()];
91#else
92 return 0;
93#endif
94}
95
96/* Mapping from cpu number to logical apicid */
97static inline int cpu_to_logical_apicid(int cpu)
98{
99#ifdef CONFIG_SMP
100 if (cpu >= NR_CPUS)
101 return BAD_APICID;
102 return (int)cpu_2_logical_apicid[cpu];
103#else
104 return logical_smp_processor_id();
105#endif
106}
107
108static inline int cpu_present_to_apicid(int mps_cpu)
109{
110 if (mps_cpu < NR_CPUS)
111 return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
112 else
113 return BAD_APICID;
114}
115
116static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_id_map)
117{
118 /* For clustered we don't have a good way to do this yet - hack */
119 return physids_promote(0x0F);
120}
121
122static inline physid_mask_t apicid_to_cpu_present(int apicid)
123{
124 return physid_mask_of_physid(0);
125}
126
127static inline void setup_portio_remap(void)
128{
129}
130
131static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
132{
133 return 1;
134}
135
136static inline void enable_apic_mode(void)
137{
138}
139
140static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
141{
142 int num_bits_set;
143 int cpus_found = 0;
144 int cpu;
145 int apicid;
146
147 num_bits_set = cpus_weight(cpumask);
148 /* Return id to all */
149 if (num_bits_set == NR_CPUS)
150 return (int) 0xFF;
151 /*
152 * The cpus in the mask must all be on the apic cluster. If are not
153 * on the same apicid cluster return default value of TARGET_CPUS.
154 */
155 cpu = first_cpu(cpumask);
156 apicid = cpu_to_logical_apicid(cpu);
157 while (cpus_found < num_bits_set) {
158 if (cpu_isset(cpu, cpumask)) {
159 int new_apicid = cpu_to_logical_apicid(cpu);
160 if (apicid_cluster(apicid) !=
161 apicid_cluster(new_apicid)){
162 printk ("%s: Not a valid mask!\n", __func__);
163 return 0xFF;
164 }
165 apicid = apicid | new_apicid;
166 cpus_found++;
167 }
168 cpu++;
169 }
170 return apicid;
171}
172
173/* cpuid returns the value latched in the HW at reset, not the APIC ID
174 * register's value. For any box whose BIOS changes APIC IDs, like
175 * clustered APIC systems, we must use hard_smp_processor_id.
176 *
177 * See Intel's IA-32 SW Dev's Manual Vol2 under CPUID.
178 */
179static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
180{
181 return hard_smp_processor_id() >> index_msb;
182}
183
184#endif /* __ASM_SUMMIT_APIC_H */
diff --git a/include/asm-x86/summit/apicdef.h b/include/asm-x86/summit/apicdef.h
deleted file mode 100644
index f3fbca1f61c1..000000000000
--- a/include/asm-x86/summit/apicdef.h
+++ /dev/null
@@ -1,13 +0,0 @@
1#ifndef __ASM_SUMMIT_APICDEF_H
2#define __ASM_SUMMIT_APICDEF_H
3
4#define APIC_ID_MASK (0xFF<<24)
5
6static inline unsigned get_apic_id(unsigned long x)
7{
8 return (x>>24)&0xFF;
9}
10
11#define GET_APIC_ID(x) get_apic_id(x)
12
13#endif
diff --git a/include/asm-x86/summit/ipi.h b/include/asm-x86/summit/ipi.h
deleted file mode 100644
index 53bd1e7bd7b4..000000000000
--- a/include/asm-x86/summit/ipi.h
+++ /dev/null
@@ -1,25 +0,0 @@
1#ifndef __ASM_SUMMIT_IPI_H
2#define __ASM_SUMMIT_IPI_H
3
4void send_IPI_mask_sequence(cpumask_t mask, int vector);
5
6static inline void send_IPI_mask(cpumask_t mask, int vector)
7{
8 send_IPI_mask_sequence(mask, vector);
9}
10
11static inline void send_IPI_allbutself(int vector)
12{
13 cpumask_t mask = cpu_online_map;
14 cpu_clear(smp_processor_id(), mask);
15
16 if (!cpus_empty(mask))
17 send_IPI_mask(mask, vector);
18}
19
20static inline void send_IPI_all(int vector)
21{
22 send_IPI_mask(cpu_online_map, vector);
23}
24
25#endif /* __ASM_SUMMIT_IPI_H */
diff --git a/include/asm-x86/summit/mpparse.h b/include/asm-x86/summit/mpparse.h
deleted file mode 100644
index 013ce6fab2d5..000000000000
--- a/include/asm-x86/summit/mpparse.h
+++ /dev/null
@@ -1,109 +0,0 @@
1#ifndef __ASM_SUMMIT_MPPARSE_H
2#define __ASM_SUMMIT_MPPARSE_H
3
4#include <asm/tsc.h>
5
6extern int use_cyclone;
7
8#ifdef CONFIG_X86_SUMMIT_NUMA
9extern void setup_summit(void);
10#else
11#define setup_summit() {}
12#endif
13
14static inline int mps_oem_check(struct mp_config_table *mpc, char *oem,
15 char *productid)
16{
17 if (!strncmp(oem, "IBM ENSW", 8) &&
18 (!strncmp(productid, "VIGIL SMP", 9)
19 || !strncmp(productid, "EXA", 3)
20 || !strncmp(productid, "RUTHLESS SMP", 12))){
21 mark_tsc_unstable("Summit based system");
22 use_cyclone = 1; /*enable cyclone-timer*/
23 setup_summit();
24 return 1;
25 }
26 return 0;
27}
28
29/* Hook from generic ACPI tables.c */
30static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id)
31{
32 if (!strncmp(oem_id, "IBM", 3) &&
33 (!strncmp(oem_table_id, "SERVIGIL", 8)
34 || !strncmp(oem_table_id, "EXA", 3))){
35 mark_tsc_unstable("Summit based system");
36 use_cyclone = 1; /*enable cyclone-timer*/
37 setup_summit();
38 return 1;
39 }
40 return 0;
41}
42
43struct rio_table_hdr {
44 unsigned char version; /* Version number of this data structure */
45 /* Version 3 adds chassis_num & WP_index */
46 unsigned char num_scal_dev; /* # of Scalability devices (Twisters for Vigil) */
47 unsigned char num_rio_dev; /* # of RIO I/O devices (Cyclones and Winnipegs) */
48} __attribute__((packed));
49
50struct scal_detail {
51 unsigned char node_id; /* Scalability Node ID */
52 unsigned long CBAR; /* Address of 1MB register space */
53 unsigned char port0node; /* Node ID port connected to: 0xFF=None */
54 unsigned char port0port; /* Port num port connected to: 0,1,2, or 0xFF=None */
55 unsigned char port1node; /* Node ID port connected to: 0xFF = None */
56 unsigned char port1port; /* Port num port connected to: 0,1,2, or 0xFF=None */
57 unsigned char port2node; /* Node ID port connected to: 0xFF = None */
58 unsigned char port2port; /* Port num port connected to: 0,1,2, or 0xFF=None */
59 unsigned char chassis_num; /* 1 based Chassis number (1 = boot node) */
60} __attribute__((packed));
61
62struct rio_detail {
63 unsigned char node_id; /* RIO Node ID */
64 unsigned long BBAR; /* Address of 1MB register space */
65 unsigned char type; /* Type of device */
66 unsigned char owner_id; /* For WPEG: Node ID of Cyclone that owns this WPEG*/
67 /* For CYC: Node ID of Twister that owns this CYC */
68 unsigned char port0node; /* Node ID port connected to: 0xFF=None */
69 unsigned char port0port; /* Port num port connected to: 0,1,2, or 0xFF=None */
70 unsigned char port1node; /* Node ID port connected to: 0xFF=None */
71 unsigned char port1port; /* Port num port connected to: 0,1,2, or 0xFF=None */
72 unsigned char first_slot; /* For WPEG: Lowest slot number below this WPEG */
73 /* For CYC: 0 */
74 unsigned char status; /* For WPEG: Bit 0 = 1 : the XAPIC is used */
75 /* = 0 : the XAPIC is not used, ie:*/
76 /* ints fwded to another XAPIC */
77 /* Bits1:7 Reserved */
78 /* For CYC: Bits0:7 Reserved */
79 unsigned char WP_index; /* For WPEG: WPEG instance index - lower ones have */
80 /* lower slot numbers/PCI bus numbers */
81 /* For CYC: No meaning */
82 unsigned char chassis_num; /* 1 based Chassis number */
83 /* For LookOut WPEGs this field indicates the */
84 /* Expansion Chassis #, enumerated from Boot */
85 /* Node WPEG external port, then Boot Node CYC */
86 /* external port, then Next Vigil chassis WPEG */
87 /* external port, etc. */
88 /* Shared Lookouts have only 1 chassis number (the */
89 /* first one assigned) */
90} __attribute__((packed));
91
92
93typedef enum {
94 CompatTwister = 0, /* Compatibility Twister */
95 AltTwister = 1, /* Alternate Twister of internal 8-way */
96 CompatCyclone = 2, /* Compatibility Cyclone */
97 AltCyclone = 3, /* Alternate Cyclone of internal 8-way */
98 CompatWPEG = 4, /* Compatibility WPEG */
99 AltWPEG = 5, /* Second Planar WPEG */
100 LookOutAWPEG = 6, /* LookOut WPEG */
101 LookOutBWPEG = 7, /* LookOut WPEG */
102} node_type;
103
104static inline int is_WPEG(struct rio_detail *rio){
105 return (rio->type == CompatWPEG || rio->type == AltWPEG ||
106 rio->type == LookOutAWPEG || rio->type == LookOutBWPEG);
107}
108
109#endif /* __ASM_SUMMIT_MPPARSE_H */
diff --git a/include/asm-x86/suspend.h b/include/asm-x86/suspend.h
deleted file mode 100644
index 9bd521fe4570..000000000000
--- a/include/asm-x86/suspend.h
+++ /dev/null
@@ -1,5 +0,0 @@
1#ifdef CONFIG_X86_32
2# include "suspend_32.h"
3#else
4# include "suspend_64.h"
5#endif
diff --git a/include/asm-x86/suspend_32.h b/include/asm-x86/suspend_32.h
deleted file mode 100644
index acb6d4d491f4..000000000000
--- a/include/asm-x86/suspend_32.h
+++ /dev/null
@@ -1,51 +0,0 @@
1/*
2 * Copyright 2001-2002 Pavel Machek <pavel@suse.cz>
3 * Based on code
4 * Copyright 2001 Patrick Mochel <mochel@osdl.org>
5 */
6#ifndef ASM_X86__SUSPEND_32_H
7#define ASM_X86__SUSPEND_32_H
8
9#include <asm/desc.h>
10#include <asm/i387.h>
11
12static inline int arch_prepare_suspend(void) { return 0; }
13
14/* image of the saved processor state */
15struct saved_context {
16 u16 es, fs, gs, ss;
17 unsigned long cr0, cr2, cr3, cr4;
18 struct desc_ptr gdt;
19 struct desc_ptr idt;
20 u16 ldt;
21 u16 tss;
22 unsigned long tr;
23 unsigned long safety;
24 unsigned long return_address;
25} __attribute__((packed));
26
27#ifdef CONFIG_ACPI
28extern unsigned long saved_eip;
29extern unsigned long saved_esp;
30extern unsigned long saved_ebp;
31extern unsigned long saved_ebx;
32extern unsigned long saved_esi;
33extern unsigned long saved_edi;
34
35static inline void acpi_save_register_state(unsigned long return_point)
36{
37 saved_eip = return_point;
38 asm volatile("movl %%esp,%0" : "=m" (saved_esp));
39 asm volatile("movl %%ebp,%0" : "=m" (saved_ebp));
40 asm volatile("movl %%ebx,%0" : "=m" (saved_ebx));
41 asm volatile("movl %%edi,%0" : "=m" (saved_edi));
42 asm volatile("movl %%esi,%0" : "=m" (saved_esi));
43}
44
45#define acpi_restore_register_state() do {} while (0)
46
47/* routines for saving/restoring kernel state */
48extern int acpi_save_state_mem(void);
49#endif
50
51#endif /* ASM_X86__SUSPEND_32_H */
diff --git a/include/asm-x86/suspend_64.h b/include/asm-x86/suspend_64.h
deleted file mode 100644
index cf821dd310e8..000000000000
--- a/include/asm-x86/suspend_64.h
+++ /dev/null
@@ -1,52 +0,0 @@
1/*
2 * Copyright 2001-2003 Pavel Machek <pavel@suse.cz>
3 * Based on code
4 * Copyright 2001 Patrick Mochel <mochel@osdl.org>
5 */
6#ifndef ASM_X86__SUSPEND_64_H
7#define ASM_X86__SUSPEND_64_H
8
9#include <asm/desc.h>
10#include <asm/i387.h>
11
12static inline int arch_prepare_suspend(void)
13{
14 return 0;
15}
16
17/*
18 * Image of the saved processor state, used by the low level ACPI suspend to
19 * RAM code and by the low level hibernation code.
20 *
21 * If you modify it, fix arch/x86/kernel/acpi/wakeup_64.S and make sure that
22 * __save/__restore_processor_state(), defined in arch/x86/kernel/suspend_64.c,
23 * still work as required.
24 */
25struct saved_context {
26 struct pt_regs regs;
27 u16 ds, es, fs, gs, ss;
28 unsigned long gs_base, gs_kernel_base, fs_base;
29 unsigned long cr0, cr2, cr3, cr4, cr8;
30 unsigned long efer;
31 u16 gdt_pad;
32 u16 gdt_limit;
33 unsigned long gdt_base;
34 u16 idt_pad;
35 u16 idt_limit;
36 unsigned long idt_base;
37 u16 ldt;
38 u16 tss;
39 unsigned long tr;
40 unsigned long safety;
41 unsigned long return_address;
42} __attribute__((packed));
43
44#define loaddebug(thread,register) \
45 set_debugreg((thread)->debugreg##register, register)
46
47/* routines for saving/restoring kernel state */
48extern int acpi_save_state_mem(void);
49extern char core_restore_code;
50extern char restore_registers;
51
52#endif /* ASM_X86__SUSPEND_64_H */
diff --git a/include/asm-x86/swiotlb.h b/include/asm-x86/swiotlb.h
deleted file mode 100644
index 1e20adbcad4b..000000000000
--- a/include/asm-x86/swiotlb.h
+++ /dev/null
@@ -1,58 +0,0 @@
1#ifndef ASM_X86__SWIOTLB_H
2#define ASM_X86__SWIOTLB_H
3
4#include <asm/dma-mapping.h>
5
6/* SWIOTLB interface */
7
8extern dma_addr_t swiotlb_map_single(struct device *hwdev, void *ptr,
9 size_t size, int dir);
10extern void *swiotlb_alloc_coherent(struct device *hwdev, size_t size,
11 dma_addr_t *dma_handle, gfp_t flags);
12extern void swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
13 size_t size, int dir);
14extern void swiotlb_sync_single_for_cpu(struct device *hwdev,
15 dma_addr_t dev_addr,
16 size_t size, int dir);
17extern void swiotlb_sync_single_for_device(struct device *hwdev,
18 dma_addr_t dev_addr,
19 size_t size, int dir);
20extern void swiotlb_sync_single_range_for_cpu(struct device *hwdev,
21 dma_addr_t dev_addr,
22 unsigned long offset,
23 size_t size, int dir);
24extern void swiotlb_sync_single_range_for_device(struct device *hwdev,
25 dma_addr_t dev_addr,
26 unsigned long offset,
27 size_t size, int dir);
28extern void swiotlb_sync_sg_for_cpu(struct device *hwdev,
29 struct scatterlist *sg, int nelems,
30 int dir);
31extern void swiotlb_sync_sg_for_device(struct device *hwdev,
32 struct scatterlist *sg, int nelems,
33 int dir);
34extern int swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg,
35 int nents, int direction);
36extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg,
37 int nents, int direction);
38extern int swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr);
39extern void swiotlb_free_coherent(struct device *hwdev, size_t size,
40 void *vaddr, dma_addr_t dma_handle);
41extern int swiotlb_dma_supported(struct device *hwdev, u64 mask);
42extern void swiotlb_init(void);
43
44extern int swiotlb_force;
45
46#ifdef CONFIG_SWIOTLB
47extern int swiotlb;
48extern void pci_swiotlb_init(void);
49#else
50#define swiotlb 0
51static inline void pci_swiotlb_init(void)
52{
53}
54#endif
55
56static inline void dma_mark_clean(void *addr, size_t size) {}
57
58#endif /* ASM_X86__SWIOTLB_H */
diff --git a/include/asm-x86/sync_bitops.h b/include/asm-x86/sync_bitops.h
deleted file mode 100644
index b689bee71104..000000000000
--- a/include/asm-x86/sync_bitops.h
+++ /dev/null
@@ -1,130 +0,0 @@
1#ifndef ASM_X86__SYNC_BITOPS_H
2#define ASM_X86__SYNC_BITOPS_H
3
4/*
5 * Copyright 1992, Linus Torvalds.
6 */
7
8/*
9 * These have to be done with inline assembly: that way the bit-setting
10 * is guaranteed to be atomic. All bit operations return 0 if the bit
11 * was cleared before the operation and != 0 if it was not.
12 *
13 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
14 */
15
16#define ADDR (*(volatile long *)addr)
17
18/**
19 * sync_set_bit - Atomically set a bit in memory
20 * @nr: the bit to set
21 * @addr: the address to start counting from
22 *
23 * This function is atomic and may not be reordered. See __set_bit()
24 * if you do not require the atomic guarantees.
25 *
26 * Note that @nr may be almost arbitrarily large; this function is not
27 * restricted to acting on a single-word quantity.
28 */
29static inline void sync_set_bit(int nr, volatile unsigned long *addr)
30{
31 asm volatile("lock; btsl %1,%0"
32 : "+m" (ADDR)
33 : "Ir" (nr)
34 : "memory");
35}
36
37/**
38 * sync_clear_bit - Clears a bit in memory
39 * @nr: Bit to clear
40 * @addr: Address to start counting from
41 *
42 * sync_clear_bit() is atomic and may not be reordered. However, it does
43 * not contain a memory barrier, so if it is used for locking purposes,
44 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
45 * in order to ensure changes are visible on other processors.
46 */
47static inline void sync_clear_bit(int nr, volatile unsigned long *addr)
48{
49 asm volatile("lock; btrl %1,%0"
50 : "+m" (ADDR)
51 : "Ir" (nr)
52 : "memory");
53}
54
55/**
56 * sync_change_bit - Toggle a bit in memory
57 * @nr: Bit to change
58 * @addr: Address to start counting from
59 *
60 * sync_change_bit() is atomic and may not be reordered.
61 * Note that @nr may be almost arbitrarily large; this function is not
62 * restricted to acting on a single-word quantity.
63 */
64static inline void sync_change_bit(int nr, volatile unsigned long *addr)
65{
66 asm volatile("lock; btcl %1,%0"
67 : "+m" (ADDR)
68 : "Ir" (nr)
69 : "memory");
70}
71
72/**
73 * sync_test_and_set_bit - Set a bit and return its old value
74 * @nr: Bit to set
75 * @addr: Address to count from
76 *
77 * This operation is atomic and cannot be reordered.
78 * It also implies a memory barrier.
79 */
80static inline int sync_test_and_set_bit(int nr, volatile unsigned long *addr)
81{
82 int oldbit;
83
84 asm volatile("lock; btsl %2,%1\n\tsbbl %0,%0"
85 : "=r" (oldbit), "+m" (ADDR)
86 : "Ir" (nr) : "memory");
87 return oldbit;
88}
89
90/**
91 * sync_test_and_clear_bit - Clear a bit and return its old value
92 * @nr: Bit to clear
93 * @addr: Address to count from
94 *
95 * This operation is atomic and cannot be reordered.
96 * It also implies a memory barrier.
97 */
98static inline int sync_test_and_clear_bit(int nr, volatile unsigned long *addr)
99{
100 int oldbit;
101
102 asm volatile("lock; btrl %2,%1\n\tsbbl %0,%0"
103 : "=r" (oldbit), "+m" (ADDR)
104 : "Ir" (nr) : "memory");
105 return oldbit;
106}
107
108/**
109 * sync_test_and_change_bit - Change a bit and return its old value
110 * @nr: Bit to change
111 * @addr: Address to count from
112 *
113 * This operation is atomic and cannot be reordered.
114 * It also implies a memory barrier.
115 */
116static inline int sync_test_and_change_bit(int nr, volatile unsigned long *addr)
117{
118 int oldbit;
119
120 asm volatile("lock; btcl %2,%1\n\tsbbl %0,%0"
121 : "=r" (oldbit), "+m" (ADDR)
122 : "Ir" (nr) : "memory");
123 return oldbit;
124}
125
126#define sync_test_bit(nr, addr) test_bit(nr, addr)
127
128#undef ADDR
129
130#endif /* ASM_X86__SYNC_BITOPS_H */
diff --git a/include/asm-x86/syscall.h b/include/asm-x86/syscall.h
deleted file mode 100644
index 04c47dc5597c..000000000000
--- a/include/asm-x86/syscall.h
+++ /dev/null
@@ -1,211 +0,0 @@
1/*
2 * Access to user system call parameters and results
3 *
4 * Copyright (C) 2008 Red Hat, Inc. All rights reserved.
5 *
6 * This copyrighted material is made available to anyone wishing to use,
7 * modify, copy, or redistribute it subject to the terms and conditions
8 * of the GNU General Public License v.2.
9 *
10 * See asm-generic/syscall.h for descriptions of what we must do here.
11 */
12
13#ifndef _ASM_SYSCALL_H
14#define _ASM_SYSCALL_H 1
15
16#include <linux/sched.h>
17#include <linux/err.h>
18
19static inline long syscall_get_nr(struct task_struct *task,
20 struct pt_regs *regs)
21{
22 /*
23 * We always sign-extend a -1 value being set here,
24 * so this is always either -1L or a syscall number.
25 */
26 return regs->orig_ax;
27}
28
29static inline void syscall_rollback(struct task_struct *task,
30 struct pt_regs *regs)
31{
32 regs->ax = regs->orig_ax;
33}
34
35static inline long syscall_get_error(struct task_struct *task,
36 struct pt_regs *regs)
37{
38 unsigned long error = regs->ax;
39#ifdef CONFIG_IA32_EMULATION
40 /*
41 * TS_COMPAT is set for 32-bit syscall entries and then
42 * remains set until we return to user mode.
43 */
44 if (task_thread_info(task)->status & TS_COMPAT)
45 /*
46 * Sign-extend the value so (int)-EFOO becomes (long)-EFOO
47 * and will match correctly in comparisons.
48 */
49 error = (long) (int) error;
50#endif
51 return IS_ERR_VALUE(error) ? error : 0;
52}
53
54static inline long syscall_get_return_value(struct task_struct *task,
55 struct pt_regs *regs)
56{
57 return regs->ax;
58}
59
60static inline void syscall_set_return_value(struct task_struct *task,
61 struct pt_regs *regs,
62 int error, long val)
63{
64 regs->ax = (long) error ?: val;
65}
66
67#ifdef CONFIG_X86_32
68
69static inline void syscall_get_arguments(struct task_struct *task,
70 struct pt_regs *regs,
71 unsigned int i, unsigned int n,
72 unsigned long *args)
73{
74 BUG_ON(i + n > 6);
75 memcpy(args, &regs->bx + i, n * sizeof(args[0]));
76}
77
78static inline void syscall_set_arguments(struct task_struct *task,
79 struct pt_regs *regs,
80 unsigned int i, unsigned int n,
81 const unsigned long *args)
82{
83 BUG_ON(i + n > 6);
84 memcpy(&regs->bx + i, args, n * sizeof(args[0]));
85}
86
87#else /* CONFIG_X86_64 */
88
89static inline void syscall_get_arguments(struct task_struct *task,
90 struct pt_regs *regs,
91 unsigned int i, unsigned int n,
92 unsigned long *args)
93{
94# ifdef CONFIG_IA32_EMULATION
95 if (task_thread_info(task)->status & TS_COMPAT)
96 switch (i + n) {
97 case 6:
98 if (!n--) break;
99 *args++ = regs->bp;
100 case 5:
101 if (!n--) break;
102 *args++ = regs->di;
103 case 4:
104 if (!n--) break;
105 *args++ = regs->si;
106 case 3:
107 if (!n--) break;
108 *args++ = regs->dx;
109 case 2:
110 if (!n--) break;
111 *args++ = regs->cx;
112 case 1:
113 if (!n--) break;
114 *args++ = regs->bx;
115 case 0:
116 if (!n--) break;
117 default:
118 BUG();
119 break;
120 }
121 else
122# endif
123 switch (i + n) {
124 case 6:
125 if (!n--) break;
126 *args++ = regs->r9;
127 case 5:
128 if (!n--) break;
129 *args++ = regs->r8;
130 case 4:
131 if (!n--) break;
132 *args++ = regs->r10;
133 case 3:
134 if (!n--) break;
135 *args++ = regs->dx;
136 case 2:
137 if (!n--) break;
138 *args++ = regs->si;
139 case 1:
140 if (!n--) break;
141 *args++ = regs->di;
142 case 0:
143 if (!n--) break;
144 default:
145 BUG();
146 break;
147 }
148}
149
150static inline void syscall_set_arguments(struct task_struct *task,
151 struct pt_regs *regs,
152 unsigned int i, unsigned int n,
153 const unsigned long *args)
154{
155# ifdef CONFIG_IA32_EMULATION
156 if (task_thread_info(task)->status & TS_COMPAT)
157 switch (i + n) {
158 case 6:
159 if (!n--) break;
160 regs->bp = *args++;
161 case 5:
162 if (!n--) break;
163 regs->di = *args++;
164 case 4:
165 if (!n--) break;
166 regs->si = *args++;
167 case 3:
168 if (!n--) break;
169 regs->dx = *args++;
170 case 2:
171 if (!n--) break;
172 regs->cx = *args++;
173 case 1:
174 if (!n--) break;
175 regs->bx = *args++;
176 case 0:
177 if (!n--) break;
178 default:
179 BUG();
180 }
181 else
182# endif
183 switch (i + n) {
184 case 6:
185 if (!n--) break;
186 regs->r9 = *args++;
187 case 5:
188 if (!n--) break;
189 regs->r8 = *args++;
190 case 4:
191 if (!n--) break;
192 regs->r10 = *args++;
193 case 3:
194 if (!n--) break;
195 regs->dx = *args++;
196 case 2:
197 if (!n--) break;
198 regs->si = *args++;
199 case 1:
200 if (!n--) break;
201 regs->di = *args++;
202 case 0:
203 if (!n--) break;
204 default:
205 BUG();
206 }
207}
208
209#endif /* CONFIG_X86_32 */
210
211#endif /* _ASM_SYSCALL_H */
diff --git a/include/asm-x86/syscalls.h b/include/asm-x86/syscalls.h
deleted file mode 100644
index 87803da44010..000000000000
--- a/include/asm-x86/syscalls.h
+++ /dev/null
@@ -1,93 +0,0 @@
1/*
2 * syscalls.h - Linux syscall interfaces (arch-specific)
3 *
4 * Copyright (c) 2008 Jaswinder Singh
5 *
6 * This file is released under the GPLv2.
7 * See the file COPYING for more details.
8 */
9
10#ifndef _ASM_X86_SYSCALLS_H
11#define _ASM_X86_SYSCALLS_H
12
13#include <linux/compiler.h>
14#include <linux/linkage.h>
15#include <linux/types.h>
16#include <linux/signal.h>
17
18/* Common in X86_32 and X86_64 */
19/* kernel/ioport.c */
20asmlinkage long sys_ioperm(unsigned long, unsigned long, int);
21
22/* X86_32 only */
23#ifdef CONFIG_X86_32
24/* kernel/process_32.c */
25asmlinkage int sys_fork(struct pt_regs);
26asmlinkage int sys_clone(struct pt_regs);
27asmlinkage int sys_vfork(struct pt_regs);
28asmlinkage int sys_execve(struct pt_regs);
29
30/* kernel/signal_32.c */
31asmlinkage int sys_sigsuspend(int, int, old_sigset_t);
32asmlinkage int sys_sigaction(int, const struct old_sigaction __user *,
33 struct old_sigaction __user *);
34asmlinkage int sys_sigaltstack(unsigned long);
35asmlinkage unsigned long sys_sigreturn(unsigned long);
36asmlinkage int sys_rt_sigreturn(unsigned long);
37
38/* kernel/ioport.c */
39asmlinkage long sys_iopl(unsigned long);
40
41/* kernel/ldt.c */
42asmlinkage int sys_modify_ldt(int, void __user *, unsigned long);
43
44/* kernel/sys_i386_32.c */
45asmlinkage long sys_mmap2(unsigned long, unsigned long, unsigned long,
46 unsigned long, unsigned long, unsigned long);
47struct mmap_arg_struct;
48asmlinkage int old_mmap(struct mmap_arg_struct __user *);
49struct sel_arg_struct;
50asmlinkage int old_select(struct sel_arg_struct __user *);
51asmlinkage int sys_ipc(uint, int, int, int, void __user *, long);
52struct old_utsname;
53asmlinkage int sys_uname(struct old_utsname __user *);
54struct oldold_utsname;
55asmlinkage int sys_olduname(struct oldold_utsname __user *);
56
57/* kernel/tls.c */
58asmlinkage int sys_set_thread_area(struct user_desc __user *);
59asmlinkage int sys_get_thread_area(struct user_desc __user *);
60
61/* kernel/vm86_32.c */
62asmlinkage int sys_vm86old(struct pt_regs);
63asmlinkage int sys_vm86(struct pt_regs);
64
65#else /* CONFIG_X86_32 */
66
67/* X86_64 only */
68/* kernel/process_64.c */
69asmlinkage long sys_fork(struct pt_regs *);
70asmlinkage long sys_clone(unsigned long, unsigned long,
71 void __user *, void __user *,
72 struct pt_regs *);
73asmlinkage long sys_vfork(struct pt_regs *);
74asmlinkage long sys_execve(char __user *, char __user * __user *,
75 char __user * __user *,
76 struct pt_regs *);
77
78/* kernel/ioport.c */
79asmlinkage long sys_iopl(unsigned int, struct pt_regs *);
80
81/* kernel/signal_64.c */
82asmlinkage long sys_sigaltstack(const stack_t __user *, stack_t __user *,
83 struct pt_regs *);
84asmlinkage long sys_rt_sigreturn(struct pt_regs *);
85
86/* kernel/sys_x86_64.c */
87asmlinkage long sys_mmap(unsigned long, unsigned long, unsigned long,
88 unsigned long, unsigned long, unsigned long);
89struct new_utsname;
90asmlinkage long sys_uname(struct new_utsname __user *);
91
92#endif /* CONFIG_X86_32 */
93#endif /* _ASM_X86_SYSCALLS_H */
diff --git a/include/asm-x86/system.h b/include/asm-x86/system.h
deleted file mode 100644
index b20c894660f9..000000000000
--- a/include/asm-x86/system.h
+++ /dev/null
@@ -1,425 +0,0 @@
1#ifndef ASM_X86__SYSTEM_H
2#define ASM_X86__SYSTEM_H
3
4#include <asm/asm.h>
5#include <asm/segment.h>
6#include <asm/cpufeature.h>
7#include <asm/cmpxchg.h>
8#include <asm/nops.h>
9
10#include <linux/kernel.h>
11#include <linux/irqflags.h>
12
13/* entries in ARCH_DLINFO: */
14#ifdef CONFIG_IA32_EMULATION
15# define AT_VECTOR_SIZE_ARCH 2
16#else
17# define AT_VECTOR_SIZE_ARCH 1
18#endif
19
20#ifdef CONFIG_X86_32
21
22struct task_struct; /* one of the stranger aspects of C forward declarations */
23struct task_struct *__switch_to(struct task_struct *prev,
24 struct task_struct *next);
25
26/*
27 * Saving eflags is important. It switches not only IOPL between tasks,
28 * it also protects other tasks from NT leaking through sysenter etc.
29 */
30#define switch_to(prev, next, last) \
31do { \
32 /* \
33 * Context-switching clobbers all registers, so we clobber \
34 * them explicitly, via unused output variables. \
35 * (EAX and EBP is not listed because EBP is saved/restored \
36 * explicitly for wchan access and EAX is the return value of \
37 * __switch_to()) \
38 */ \
39 unsigned long ebx, ecx, edx, esi, edi; \
40 \
41 asm volatile("pushfl\n\t" /* save flags */ \
42 "pushl %%ebp\n\t" /* save EBP */ \
43 "movl %%esp,%[prev_sp]\n\t" /* save ESP */ \
44 "movl %[next_sp],%%esp\n\t" /* restore ESP */ \
45 "movl $1f,%[prev_ip]\n\t" /* save EIP */ \
46 "pushl %[next_ip]\n\t" /* restore EIP */ \
47 "jmp __switch_to\n" /* regparm call */ \
48 "1:\t" \
49 "popl %%ebp\n\t" /* restore EBP */ \
50 "popfl\n" /* restore flags */ \
51 \
52 /* output parameters */ \
53 : [prev_sp] "=m" (prev->thread.sp), \
54 [prev_ip] "=m" (prev->thread.ip), \
55 "=a" (last), \
56 \
57 /* clobbered output registers: */ \
58 "=b" (ebx), "=c" (ecx), "=d" (edx), \
59 "=S" (esi), "=D" (edi) \
60 \
61 /* input parameters: */ \
62 : [next_sp] "m" (next->thread.sp), \
63 [next_ip] "m" (next->thread.ip), \
64 \
65 /* regparm parameters for __switch_to(): */ \
66 [prev] "a" (prev), \
67 [next] "d" (next) \
68 \
69 : /* reloaded segment registers */ \
70 "memory"); \
71} while (0)
72
73/*
74 * disable hlt during certain critical i/o operations
75 */
76#define HAVE_DISABLE_HLT
77#else
78#define __SAVE(reg, offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t"
79#define __RESTORE(reg, offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t"
80
81/* frame pointer must be last for get_wchan */
82#define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
83#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t"
84
85#define __EXTRA_CLOBBER \
86 , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \
87 "r12", "r13", "r14", "r15"
88
89/* Save restore flags to clear handle leaking NT */
90#define switch_to(prev, next, last) \
91 asm volatile(SAVE_CONTEXT \
92 "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
93 "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \
94 "call __switch_to\n\t" \
95 ".globl thread_return\n" \
96 "thread_return:\n\t" \
97 "movq %%gs:%P[pda_pcurrent],%%rsi\n\t" \
98 "movq %P[thread_info](%%rsi),%%r8\n\t" \
99 LOCK_PREFIX "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \
100 "movq %%rax,%%rdi\n\t" \
101 "jc ret_from_fork\n\t" \
102 RESTORE_CONTEXT \
103 : "=a" (last) \
104 : [next] "S" (next), [prev] "D" (prev), \
105 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
106 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
107 [tif_fork] "i" (TIF_FORK), \
108 [thread_info] "i" (offsetof(struct task_struct, stack)), \
109 [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent)) \
110 : "memory", "cc" __EXTRA_CLOBBER)
111#endif
112
113#ifdef __KERNEL__
114#define _set_base(addr, base) do { unsigned long __pr; \
115__asm__ __volatile__ ("movw %%dx,%1\n\t" \
116 "rorl $16,%%edx\n\t" \
117 "movb %%dl,%2\n\t" \
118 "movb %%dh,%3" \
119 :"=&d" (__pr) \
120 :"m" (*((addr)+2)), \
121 "m" (*((addr)+4)), \
122 "m" (*((addr)+7)), \
123 "0" (base) \
124 ); } while (0)
125
126#define _set_limit(addr, limit) do { unsigned long __lr; \
127__asm__ __volatile__ ("movw %%dx,%1\n\t" \
128 "rorl $16,%%edx\n\t" \
129 "movb %2,%%dh\n\t" \
130 "andb $0xf0,%%dh\n\t" \
131 "orb %%dh,%%dl\n\t" \
132 "movb %%dl,%2" \
133 :"=&d" (__lr) \
134 :"m" (*(addr)), \
135 "m" (*((addr)+6)), \
136 "0" (limit) \
137 ); } while (0)
138
139#define set_base(ldt, base) _set_base(((char *)&(ldt)) , (base))
140#define set_limit(ldt, limit) _set_limit(((char *)&(ldt)) , ((limit)-1))
141
142extern void native_load_gs_index(unsigned);
143
144/*
145 * Load a segment. Fall back on loading the zero
146 * segment if something goes wrong..
147 */
148#define loadsegment(seg, value) \
149 asm volatile("\n" \
150 "1:\t" \
151 "movl %k0,%%" #seg "\n" \
152 "2:\n" \
153 ".section .fixup,\"ax\"\n" \
154 "3:\t" \
155 "movl %k1, %%" #seg "\n\t" \
156 "jmp 2b\n" \
157 ".previous\n" \
158 _ASM_EXTABLE(1b,3b) \
159 : :"r" (value), "r" (0) : "memory")
160
161
162/*
163 * Save a segment register away
164 */
165#define savesegment(seg, value) \
166 asm("mov %%" #seg ",%0":"=r" (value) : : "memory")
167
168static inline unsigned long get_limit(unsigned long segment)
169{
170 unsigned long __limit;
171 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
172 return __limit + 1;
173}
174
175static inline void native_clts(void)
176{
177 asm volatile("clts");
178}
179
180/*
181 * Volatile isn't enough to prevent the compiler from reordering the
182 * read/write functions for the control registers and messing everything up.
183 * A memory clobber would solve the problem, but would prevent reordering of
184 * all loads stores around it, which can hurt performance. Solution is to
185 * use a variable and mimic reads and writes to it to enforce serialization
186 */
187static unsigned long __force_order;
188
189static inline unsigned long native_read_cr0(void)
190{
191 unsigned long val;
192 asm volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order));
193 return val;
194}
195
196static inline void native_write_cr0(unsigned long val)
197{
198 asm volatile("mov %0,%%cr0": : "r" (val), "m" (__force_order));
199}
200
201static inline unsigned long native_read_cr2(void)
202{
203 unsigned long val;
204 asm volatile("mov %%cr2,%0\n\t" : "=r" (val), "=m" (__force_order));
205 return val;
206}
207
208static inline void native_write_cr2(unsigned long val)
209{
210 asm volatile("mov %0,%%cr2": : "r" (val), "m" (__force_order));
211}
212
213static inline unsigned long native_read_cr3(void)
214{
215 unsigned long val;
216 asm volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order));
217 return val;
218}
219
220static inline void native_write_cr3(unsigned long val)
221{
222 asm volatile("mov %0,%%cr3": : "r" (val), "m" (__force_order));
223}
224
225static inline unsigned long native_read_cr4(void)
226{
227 unsigned long val;
228 asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order));
229 return val;
230}
231
232static inline unsigned long native_read_cr4_safe(void)
233{
234 unsigned long val;
235 /* This could fault if %cr4 does not exist. In x86_64, a cr4 always
236 * exists, so it will never fail. */
237#ifdef CONFIG_X86_32
238 asm volatile("1: mov %%cr4, %0\n"
239 "2:\n"
240 _ASM_EXTABLE(1b, 2b)
241 : "=r" (val), "=m" (__force_order) : "0" (0));
242#else
243 val = native_read_cr4();
244#endif
245 return val;
246}
247
248static inline void native_write_cr4(unsigned long val)
249{
250 asm volatile("mov %0,%%cr4": : "r" (val), "m" (__force_order));
251}
252
253#ifdef CONFIG_X86_64
254static inline unsigned long native_read_cr8(void)
255{
256 unsigned long cr8;
257 asm volatile("movq %%cr8,%0" : "=r" (cr8));
258 return cr8;
259}
260
261static inline void native_write_cr8(unsigned long val)
262{
263 asm volatile("movq %0,%%cr8" :: "r" (val) : "memory");
264}
265#endif
266
267static inline void native_wbinvd(void)
268{
269 asm volatile("wbinvd": : :"memory");
270}
271
272#ifdef CONFIG_PARAVIRT
273#include <asm/paravirt.h>
274#else
275#define read_cr0() (native_read_cr0())
276#define write_cr0(x) (native_write_cr0(x))
277#define read_cr2() (native_read_cr2())
278#define write_cr2(x) (native_write_cr2(x))
279#define read_cr3() (native_read_cr3())
280#define write_cr3(x) (native_write_cr3(x))
281#define read_cr4() (native_read_cr4())
282#define read_cr4_safe() (native_read_cr4_safe())
283#define write_cr4(x) (native_write_cr4(x))
284#define wbinvd() (native_wbinvd())
285#ifdef CONFIG_X86_64
286#define read_cr8() (native_read_cr8())
287#define write_cr8(x) (native_write_cr8(x))
288#define load_gs_index native_load_gs_index
289#endif
290
291/* Clear the 'TS' bit */
292#define clts() (native_clts())
293
294#endif/* CONFIG_PARAVIRT */
295
296#define stts() write_cr0(read_cr0() | X86_CR0_TS)
297
298#endif /* __KERNEL__ */
299
300static inline void clflush(volatile void *__p)
301{
302 asm volatile("clflush %0" : "+m" (*(volatile char __force *)__p));
303}
304
305#define nop() asm volatile ("nop")
306
307void disable_hlt(void);
308void enable_hlt(void);
309
310void cpu_idle_wait(void);
311
312extern unsigned long arch_align_stack(unsigned long sp);
313extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
314
315void default_idle(void);
316
317/*
318 * Force strict CPU ordering.
319 * And yes, this is required on UP too when we're talking
320 * to devices.
321 */
322#ifdef CONFIG_X86_32
323/*
324 * Some non-Intel clones support out of order store. wmb() ceases to be a
325 * nop for these.
326 */
327#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
328#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
329#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
330#else
331#define mb() asm volatile("mfence":::"memory")
332#define rmb() asm volatile("lfence":::"memory")
333#define wmb() asm volatile("sfence" ::: "memory")
334#endif
335
336/**
337 * read_barrier_depends - Flush all pending reads that subsequents reads
338 * depend on.
339 *
340 * No data-dependent reads from memory-like regions are ever reordered
341 * over this barrier. All reads preceding this primitive are guaranteed
342 * to access memory (but not necessarily other CPUs' caches) before any
343 * reads following this primitive that depend on the data return by
344 * any of the preceding reads. This primitive is much lighter weight than
345 * rmb() on most CPUs, and is never heavier weight than is
346 * rmb().
347 *
348 * These ordering constraints are respected by both the local CPU
349 * and the compiler.
350 *
351 * Ordering is not guaranteed by anything other than these primitives,
352 * not even by data dependencies. See the documentation for
353 * memory_barrier() for examples and URLs to more information.
354 *
355 * For example, the following code would force ordering (the initial
356 * value of "a" is zero, "b" is one, and "p" is "&a"):
357 *
358 * <programlisting>
359 * CPU 0 CPU 1
360 *
361 * b = 2;
362 * memory_barrier();
363 * p = &b; q = p;
364 * read_barrier_depends();
365 * d = *q;
366 * </programlisting>
367 *
368 * because the read of "*q" depends on the read of "p" and these
369 * two reads are separated by a read_barrier_depends(). However,
370 * the following code, with the same initial values for "a" and "b":
371 *
372 * <programlisting>
373 * CPU 0 CPU 1
374 *
375 * a = 2;
376 * memory_barrier();
377 * b = 3; y = b;
378 * read_barrier_depends();
379 * x = a;
380 * </programlisting>
381 *
382 * does not enforce ordering, since there is no data dependency between
383 * the read of "a" and the read of "b". Therefore, on some CPUs, such
384 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
385 * in cases like this where there are no data dependencies.
386 **/
387
388#define read_barrier_depends() do { } while (0)
389
390#ifdef CONFIG_SMP
391#define smp_mb() mb()
392#ifdef CONFIG_X86_PPRO_FENCE
393# define smp_rmb() rmb()
394#else
395# define smp_rmb() barrier()
396#endif
397#ifdef CONFIG_X86_OOSTORE
398# define smp_wmb() wmb()
399#else
400# define smp_wmb() barrier()
401#endif
402#define smp_read_barrier_depends() read_barrier_depends()
403#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
404#else
405#define smp_mb() barrier()
406#define smp_rmb() barrier()
407#define smp_wmb() barrier()
408#define smp_read_barrier_depends() do { } while (0)
409#define set_mb(var, value) do { var = value; barrier(); } while (0)
410#endif
411
412/*
413 * Stop RDTSC speculation. This is needed when you need to use RDTSC
414 * (or get_cycles or vread that possibly accesses the TSC) in a defined
415 * code region.
416 *
417 * (Could use an alternative three way for this if there was one.)
418 */
419static inline void rdtsc_barrier(void)
420{
421 alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC);
422 alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
423}
424
425#endif /* ASM_X86__SYSTEM_H */
diff --git a/include/asm-x86/system_64.h b/include/asm-x86/system_64.h
deleted file mode 100644
index 5aedb8bffc5a..000000000000
--- a/include/asm-x86/system_64.h
+++ /dev/null
@@ -1,22 +0,0 @@
1#ifndef ASM_X86__SYSTEM_64_H
2#define ASM_X86__SYSTEM_64_H
3
4#include <asm/segment.h>
5#include <asm/cmpxchg.h>
6
7
8static inline unsigned long read_cr8(void)
9{
10 unsigned long cr8;
11 asm volatile("movq %%cr8,%0" : "=r" (cr8));
12 return cr8;
13}
14
15static inline void write_cr8(unsigned long val)
16{
17 asm volatile("movq %0,%%cr8" :: "r" (val) : "memory");
18}
19
20#include <linux/irqflags.h>
21
22#endif /* ASM_X86__SYSTEM_64_H */
diff --git a/include/asm-x86/tce.h b/include/asm-x86/tce.h
deleted file mode 100644
index e7932d7fbbab..000000000000
--- a/include/asm-x86/tce.h
+++ /dev/null
@@ -1,48 +0,0 @@
1/*
2 * This file is derived from asm-powerpc/tce.h.
3 *
4 * Copyright (C) IBM Corporation, 2006
5 *
6 * Author: Muli Ben-Yehuda <muli@il.ibm.com>
7 * Author: Jon Mason <jdmason@us.ibm.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 */
23
24#ifndef ASM_X86__TCE_H
25#define ASM_X86__TCE_H
26
27extern unsigned int specified_table_size;
28struct iommu_table;
29
30#define TCE_ENTRY_SIZE 8 /* in bytes */
31
32#define TCE_READ_SHIFT 0
33#define TCE_WRITE_SHIFT 1
34#define TCE_HUBID_SHIFT 2 /* unused */
35#define TCE_RSVD_SHIFT 8 /* unused */
36#define TCE_RPN_SHIFT 12
37#define TCE_UNUSED_SHIFT 48 /* unused */
38
39#define TCE_RPN_MASK 0x0000fffffffff000ULL
40
41extern void tce_build(struct iommu_table *tbl, unsigned long index,
42 unsigned int npages, unsigned long uaddr, int direction);
43extern void tce_free(struct iommu_table *tbl, long index, unsigned int npages);
44extern void * __init alloc_tce_table(void);
45extern void __init free_tce_table(void *tbl);
46extern int __init build_tce_table(struct pci_dev *dev, void __iomem *bbar);
47
48#endif /* ASM_X86__TCE_H */
diff --git a/include/asm-x86/termbits.h b/include/asm-x86/termbits.h
deleted file mode 100644
index 3d00dc5e0c71..000000000000
--- a/include/asm-x86/termbits.h
+++ /dev/null
@@ -1,198 +0,0 @@
1#ifndef ASM_X86__TERMBITS_H
2#define ASM_X86__TERMBITS_H
3
4#include <linux/posix_types.h>
5
6typedef unsigned char cc_t;
7typedef unsigned int speed_t;
8typedef unsigned int tcflag_t;
9
10#define NCCS 19
11struct termios {
12 tcflag_t c_iflag; /* input mode flags */
13 tcflag_t c_oflag; /* output mode flags */
14 tcflag_t c_cflag; /* control mode flags */
15 tcflag_t c_lflag; /* local mode flags */
16 cc_t c_line; /* line discipline */
17 cc_t c_cc[NCCS]; /* control characters */
18};
19
20struct termios2 {
21 tcflag_t c_iflag; /* input mode flags */
22 tcflag_t c_oflag; /* output mode flags */
23 tcflag_t c_cflag; /* control mode flags */
24 tcflag_t c_lflag; /* local mode flags */
25 cc_t c_line; /* line discipline */
26 cc_t c_cc[NCCS]; /* control characters */
27 speed_t c_ispeed; /* input speed */
28 speed_t c_ospeed; /* output speed */
29};
30
31struct ktermios {
32 tcflag_t c_iflag; /* input mode flags */
33 tcflag_t c_oflag; /* output mode flags */
34 tcflag_t c_cflag; /* control mode flags */
35 tcflag_t c_lflag; /* local mode flags */
36 cc_t c_line; /* line discipline */
37 cc_t c_cc[NCCS]; /* control characters */
38 speed_t c_ispeed; /* input speed */
39 speed_t c_ospeed; /* output speed */
40};
41
42/* c_cc characters */
43#define VINTR 0
44#define VQUIT 1
45#define VERASE 2
46#define VKILL 3
47#define VEOF 4
48#define VTIME 5
49#define VMIN 6
50#define VSWTC 7
51#define VSTART 8
52#define VSTOP 9
53#define VSUSP 10
54#define VEOL 11
55#define VREPRINT 12
56#define VDISCARD 13
57#define VWERASE 14
58#define VLNEXT 15
59#define VEOL2 16
60
61/* c_iflag bits */
62#define IGNBRK 0000001
63#define BRKINT 0000002
64#define IGNPAR 0000004
65#define PARMRK 0000010
66#define INPCK 0000020
67#define ISTRIP 0000040
68#define INLCR 0000100
69#define IGNCR 0000200
70#define ICRNL 0000400
71#define IUCLC 0001000
72#define IXON 0002000
73#define IXANY 0004000
74#define IXOFF 0010000
75#define IMAXBEL 0020000
76#define IUTF8 0040000
77
78/* c_oflag bits */
79#define OPOST 0000001
80#define OLCUC 0000002
81#define ONLCR 0000004
82#define OCRNL 0000010
83#define ONOCR 0000020
84#define ONLRET 0000040
85#define OFILL 0000100
86#define OFDEL 0000200
87#define NLDLY 0000400
88#define NL0 0000000
89#define NL1 0000400
90#define CRDLY 0003000
91#define CR0 0000000
92#define CR1 0001000
93#define CR2 0002000
94#define CR3 0003000
95#define TABDLY 0014000
96#define TAB0 0000000
97#define TAB1 0004000
98#define TAB2 0010000
99#define TAB3 0014000
100#define XTABS 0014000
101#define BSDLY 0020000
102#define BS0 0000000
103#define BS1 0020000
104#define VTDLY 0040000
105#define VT0 0000000
106#define VT1 0040000
107#define FFDLY 0100000
108#define FF0 0000000
109#define FF1 0100000
110
111/* c_cflag bit meaning */
112#define CBAUD 0010017
113#define B0 0000000 /* hang up */
114#define B50 0000001
115#define B75 0000002
116#define B110 0000003
117#define B134 0000004
118#define B150 0000005
119#define B200 0000006
120#define B300 0000007
121#define B600 0000010
122#define B1200 0000011
123#define B1800 0000012
124#define B2400 0000013
125#define B4800 0000014
126#define B9600 0000015
127#define B19200 0000016
128#define B38400 0000017
129#define EXTA B19200
130#define EXTB B38400
131#define CSIZE 0000060
132#define CS5 0000000
133#define CS6 0000020
134#define CS7 0000040
135#define CS8 0000060
136#define CSTOPB 0000100
137#define CREAD 0000200
138#define PARENB 0000400
139#define PARODD 0001000
140#define HUPCL 0002000
141#define CLOCAL 0004000
142#define CBAUDEX 0010000
143#define BOTHER 0010000 /* non standard rate */
144#define B57600 0010001
145#define B115200 0010002
146#define B230400 0010003
147#define B460800 0010004
148#define B500000 0010005
149#define B576000 0010006
150#define B921600 0010007
151#define B1000000 0010010
152#define B1152000 0010011
153#define B1500000 0010012
154#define B2000000 0010013
155#define B2500000 0010014
156#define B3000000 0010015
157#define B3500000 0010016
158#define B4000000 0010017
159#define CIBAUD 002003600000 /* input baud rate */
160#define CMSPAR 010000000000 /* mark or space (stick) parity */
161#define CRTSCTS 020000000000 /* flow control */
162
163#define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */
164
165/* c_lflag bits */
166#define ISIG 0000001
167#define ICANON 0000002
168#define XCASE 0000004
169#define ECHO 0000010
170#define ECHOE 0000020
171#define ECHOK 0000040
172#define ECHONL 0000100
173#define NOFLSH 0000200
174#define TOSTOP 0000400
175#define ECHOCTL 0001000
176#define ECHOPRT 0002000
177#define ECHOKE 0004000
178#define FLUSHO 0010000
179#define PENDIN 0040000
180#define IEXTEN 0100000
181
182/* tcflow() and TCXONC use these */
183#define TCOOFF 0
184#define TCOON 1
185#define TCIOFF 2
186#define TCION 3
187
188/* tcflush() and TCFLSH use these */
189#define TCIFLUSH 0
190#define TCOFLUSH 1
191#define TCIOFLUSH 2
192
193/* tcsetattr uses these */
194#define TCSANOW 0
195#define TCSADRAIN 1
196#define TCSAFLUSH 2
197
198#endif /* ASM_X86__TERMBITS_H */
diff --git a/include/asm-x86/termios.h b/include/asm-x86/termios.h
deleted file mode 100644
index e235db248071..000000000000
--- a/include/asm-x86/termios.h
+++ /dev/null
@@ -1,113 +0,0 @@
1#ifndef ASM_X86__TERMIOS_H
2#define ASM_X86__TERMIOS_H
3
4#include <asm/termbits.h>
5#include <asm/ioctls.h>
6
7struct winsize {
8 unsigned short ws_row;
9 unsigned short ws_col;
10 unsigned short ws_xpixel;
11 unsigned short ws_ypixel;
12};
13
14#define NCC 8
15struct termio {
16 unsigned short c_iflag; /* input mode flags */
17 unsigned short c_oflag; /* output mode flags */
18 unsigned short c_cflag; /* control mode flags */
19 unsigned short c_lflag; /* local mode flags */
20 unsigned char c_line; /* line discipline */
21 unsigned char c_cc[NCC]; /* control characters */
22};
23
24/* modem lines */
25#define TIOCM_LE 0x001
26#define TIOCM_DTR 0x002
27#define TIOCM_RTS 0x004
28#define TIOCM_ST 0x008
29#define TIOCM_SR 0x010
30#define TIOCM_CTS 0x020
31#define TIOCM_CAR 0x040
32#define TIOCM_RNG 0x080
33#define TIOCM_DSR 0x100
34#define TIOCM_CD TIOCM_CAR
35#define TIOCM_RI TIOCM_RNG
36#define TIOCM_OUT1 0x2000
37#define TIOCM_OUT2 0x4000
38#define TIOCM_LOOP 0x8000
39
40/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
41
42#ifdef __KERNEL__
43
44#include <asm/uaccess.h>
45
46/* intr=^C quit=^\ erase=del kill=^U
47 eof=^D vtime=\0 vmin=\1 sxtc=\0
48 start=^Q stop=^S susp=^Z eol=\0
49 reprint=^R discard=^U werase=^W lnext=^V
50 eol2=\0
51*/
52#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0"
53
54/*
55 * Translate a "termio" structure into a "termios". Ugh.
56 */
57#define SET_LOW_TERMIOS_BITS(termios, termio, x) { \
58 unsigned short __tmp; \
59 get_user(__tmp,&(termio)->x); \
60 *(unsigned short *) &(termios)->x = __tmp; \
61}
62
63static inline int user_termio_to_kernel_termios(struct ktermios *termios,
64 struct termio __user *termio)
65{
66 SET_LOW_TERMIOS_BITS(termios, termio, c_iflag);
67 SET_LOW_TERMIOS_BITS(termios, termio, c_oflag);
68 SET_LOW_TERMIOS_BITS(termios, termio, c_cflag);
69 SET_LOW_TERMIOS_BITS(termios, termio, c_lflag);
70 return copy_from_user(termios->c_cc, termio->c_cc, NCC);
71}
72
73/*
74 * Translate a "termios" structure into a "termio". Ugh.
75 */
76static inline int kernel_termios_to_user_termio(struct termio __user *termio,
77 struct ktermios *termios)
78{
79 put_user((termios)->c_iflag, &(termio)->c_iflag);
80 put_user((termios)->c_oflag, &(termio)->c_oflag);
81 put_user((termios)->c_cflag, &(termio)->c_cflag);
82 put_user((termios)->c_lflag, &(termio)->c_lflag);
83 put_user((termios)->c_line, &(termio)->c_line);
84 return copy_to_user((termio)->c_cc, (termios)->c_cc, NCC);
85}
86
87static inline int user_termios_to_kernel_termios(struct ktermios *k,
88 struct termios2 __user *u)
89{
90 return copy_from_user(k, u, sizeof(struct termios2));
91}
92
93static inline int kernel_termios_to_user_termios(struct termios2 __user *u,
94 struct ktermios *k)
95{
96 return copy_to_user(u, k, sizeof(struct termios2));
97}
98
99static inline int user_termios_to_kernel_termios_1(struct ktermios *k,
100 struct termios __user *u)
101{
102 return copy_from_user(k, u, sizeof(struct termios));
103}
104
105static inline int kernel_termios_to_user_termios_1(struct termios __user *u,
106 struct ktermios *k)
107{
108 return copy_to_user(u, k, sizeof(struct termios));
109}
110
111#endif /* __KERNEL__ */
112
113#endif /* ASM_X86__TERMIOS_H */
diff --git a/include/asm-x86/therm_throt.h b/include/asm-x86/therm_throt.h
deleted file mode 100644
index 1c7f57b6b66e..000000000000
--- a/include/asm-x86/therm_throt.h
+++ /dev/null
@@ -1,9 +0,0 @@
1#ifndef ASM_X86__THERM_THROT_H
2#define ASM_X86__THERM_THROT_H
3
4#include <asm/atomic.h>
5
6extern atomic_t therm_throt_en;
7int therm_throt_process(int curr);
8
9#endif /* ASM_X86__THERM_THROT_H */
diff --git a/include/asm-x86/thread_info.h b/include/asm-x86/thread_info.h
deleted file mode 100644
index 3f4e52bb77f5..000000000000
--- a/include/asm-x86/thread_info.h
+++ /dev/null
@@ -1,264 +0,0 @@
1/* thread_info.h: low-level thread information
2 *
3 * Copyright (C) 2002 David Howells (dhowells@redhat.com)
4 * - Incorporating suggestions made by Linus Torvalds and Dave Miller
5 */
6
7#ifndef ASM_X86__THREAD_INFO_H
8#define ASM_X86__THREAD_INFO_H
9
10#include <linux/compiler.h>
11#include <asm/page.h>
12#include <asm/types.h>
13
14/*
15 * low level task data that entry.S needs immediate access to
16 * - this struct should fit entirely inside of one cache line
17 * - this struct shares the supervisor stack pages
18 */
19#ifndef __ASSEMBLY__
20struct task_struct;
21struct exec_domain;
22#include <asm/processor.h>
23
24struct thread_info {
25 struct task_struct *task; /* main task structure */
26 struct exec_domain *exec_domain; /* execution domain */
27 unsigned long flags; /* low level flags */
28 __u32 status; /* thread synchronous flags */
29 __u32 cpu; /* current CPU */
30 int preempt_count; /* 0 => preemptable,
31 <0 => BUG */
32 mm_segment_t addr_limit;
33 struct restart_block restart_block;
34 void __user *sysenter_return;
35#ifdef CONFIG_X86_32
36 unsigned long previous_esp; /* ESP of the previous stack in
37 case of nested (IRQ) stacks
38 */
39 __u8 supervisor_stack[0];
40#endif
41};
42
43#define INIT_THREAD_INFO(tsk) \
44{ \
45 .task = &tsk, \
46 .exec_domain = &default_exec_domain, \
47 .flags = 0, \
48 .cpu = 0, \
49 .preempt_count = 1, \
50 .addr_limit = KERNEL_DS, \
51 .restart_block = { \
52 .fn = do_no_restart_syscall, \
53 }, \
54}
55
56#define init_thread_info (init_thread_union.thread_info)
57#define init_stack (init_thread_union.stack)
58
59#else /* !__ASSEMBLY__ */
60
61#include <asm/asm-offsets.h>
62
63#endif
64
65/*
66 * thread information flags
67 * - these are process state flags that various assembly files
68 * may need to access
69 * - pending work-to-be-done flags are in LSW
70 * - other flags in MSW
71 * Warning: layout of LSW is hardcoded in entry.S
72 */
73#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
74#define TIF_NOTIFY_RESUME 1 /* callback before returning to user */
75#define TIF_SIGPENDING 2 /* signal pending */
76#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
77#define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/
78#define TIF_IRET 5 /* force IRET */
79#define TIF_SYSCALL_EMU 6 /* syscall emulation active */
80#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
81#define TIF_SECCOMP 8 /* secure computing */
82#define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */
83#define TIF_NOTSC 16 /* TSC is not accessible in userland */
84#define TIF_IA32 17 /* 32bit process */
85#define TIF_FORK 18 /* ret_from_fork */
86#define TIF_ABI_PENDING 19
87#define TIF_MEMDIE 20
88#define TIF_DEBUG 21 /* uses debug registers */
89#define TIF_IO_BITMAP 22 /* uses I/O bitmap */
90#define TIF_FREEZE 23 /* is freezing for suspend */
91#define TIF_FORCED_TF 24 /* true if TF in eflags artificially */
92#define TIF_DEBUGCTLMSR 25 /* uses thread_struct.debugctlmsr */
93#define TIF_DS_AREA_MSR 26 /* uses thread_struct.ds_area_msr */
94#define TIF_BTS_TRACE_TS 27 /* record scheduling event timestamps */
95
96#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
97#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
98#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
99#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
100#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
101#define _TIF_IRET (1 << TIF_IRET)
102#define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
103#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
104#define _TIF_SECCOMP (1 << TIF_SECCOMP)
105#define _TIF_MCE_NOTIFY (1 << TIF_MCE_NOTIFY)
106#define _TIF_NOTSC (1 << TIF_NOTSC)
107#define _TIF_IA32 (1 << TIF_IA32)
108#define _TIF_FORK (1 << TIF_FORK)
109#define _TIF_ABI_PENDING (1 << TIF_ABI_PENDING)
110#define _TIF_DEBUG (1 << TIF_DEBUG)
111#define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP)
112#define _TIF_FREEZE (1 << TIF_FREEZE)
113#define _TIF_FORCED_TF (1 << TIF_FORCED_TF)
114#define _TIF_DEBUGCTLMSR (1 << TIF_DEBUGCTLMSR)
115#define _TIF_DS_AREA_MSR (1 << TIF_DS_AREA_MSR)
116#define _TIF_BTS_TRACE_TS (1 << TIF_BTS_TRACE_TS)
117
118/* work to do in syscall_trace_enter() */
119#define _TIF_WORK_SYSCALL_ENTRY \
120 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | \
121 _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | _TIF_SINGLESTEP)
122
123/* work to do in syscall_trace_leave() */
124#define _TIF_WORK_SYSCALL_EXIT \
125 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP)
126
127/* work to do on interrupt/exception return */
128#define _TIF_WORK_MASK \
129 (0x0000FFFF & \
130 ~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT| \
131 _TIF_SINGLESTEP|_TIF_SECCOMP|_TIF_SYSCALL_EMU))
132
133/* work to do on any return to user space */
134#define _TIF_ALLWORK_MASK (0x0000FFFF & ~_TIF_SECCOMP)
135
136/* Only used for 64 bit */
137#define _TIF_DO_NOTIFY_MASK \
138 (_TIF_SIGPENDING|_TIF_MCE_NOTIFY|_TIF_NOTIFY_RESUME)
139
140/* flags to check in __switch_to() */
141#define _TIF_WORK_CTXSW \
142 (_TIF_IO_BITMAP|_TIF_DEBUGCTLMSR|_TIF_DS_AREA_MSR|_TIF_BTS_TRACE_TS| \
143 _TIF_NOTSC)
144
145#define _TIF_WORK_CTXSW_PREV _TIF_WORK_CTXSW
146#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW|_TIF_DEBUG)
147
148#define PREEMPT_ACTIVE 0x10000000
149
150/* thread information allocation */
151#ifdef CONFIG_DEBUG_STACK_USAGE
152#define THREAD_FLAGS (GFP_KERNEL | __GFP_ZERO)
153#else
154#define THREAD_FLAGS GFP_KERNEL
155#endif
156
157#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
158
159#define alloc_thread_info(tsk) \
160 ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER))
161
162#ifdef CONFIG_X86_32
163
164#define STACK_WARN (THREAD_SIZE/8)
165/*
166 * macros/functions for gaining access to the thread information structure
167 *
168 * preempt_count needs to be 1 initially, until the scheduler is functional.
169 */
170#ifndef __ASSEMBLY__
171
172
173/* how to get the current stack pointer from C */
174register unsigned long current_stack_pointer asm("esp") __used;
175
176/* how to get the thread information struct from C */
177static inline struct thread_info *current_thread_info(void)
178{
179 return (struct thread_info *)
180 (current_stack_pointer & ~(THREAD_SIZE - 1));
181}
182
183#else /* !__ASSEMBLY__ */
184
185/* how to get the thread information struct from ASM */
186#define GET_THREAD_INFO(reg) \
187 movl $-THREAD_SIZE, reg; \
188 andl %esp, reg
189
190/* use this one if reg already contains %esp */
191#define GET_THREAD_INFO_WITH_ESP(reg) \
192 andl $-THREAD_SIZE, reg
193
194#endif
195
196#else /* X86_32 */
197
198#include <asm/pda.h>
199
200/*
201 * macros/functions for gaining access to the thread information structure
202 * preempt_count needs to be 1 initially, until the scheduler is functional.
203 */
204#ifndef __ASSEMBLY__
205static inline struct thread_info *current_thread_info(void)
206{
207 struct thread_info *ti;
208 ti = (void *)(read_pda(kernelstack) + PDA_STACKOFFSET - THREAD_SIZE);
209 return ti;
210}
211
212/* do not use in interrupt context */
213static inline struct thread_info *stack_thread_info(void)
214{
215 struct thread_info *ti;
216 asm("andq %%rsp,%0; " : "=r" (ti) : "0" (~(THREAD_SIZE - 1)));
217 return ti;
218}
219
220#else /* !__ASSEMBLY__ */
221
222/* how to get the thread information struct from ASM */
223#define GET_THREAD_INFO(reg) \
224 movq %gs:pda_kernelstack,reg ; \
225 subq $(THREAD_SIZE-PDA_STACKOFFSET),reg
226
227#endif
228
229#endif /* !X86_32 */
230
231/*
232 * Thread-synchronous status.
233 *
234 * This is different from the flags in that nobody else
235 * ever touches our thread-synchronous status, so we don't
236 * have to worry about atomic accesses.
237 */
238#define TS_USEDFPU 0x0001 /* FPU was used by this task
239 this quantum (SMP) */
240#define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/
241#define TS_POLLING 0x0004 /* true if in idle loop
242 and not sleeping */
243#define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal() */
244#define TS_XSAVE 0x0010 /* Use xsave/xrstor */
245
246#define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING)
247
248#ifndef __ASSEMBLY__
249#define HAVE_SET_RESTORE_SIGMASK 1
250static inline void set_restore_sigmask(void)
251{
252 struct thread_info *ti = current_thread_info();
253 ti->status |= TS_RESTORE_SIGMASK;
254 set_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags);
255}
256#endif /* !__ASSEMBLY__ */
257
258#ifndef __ASSEMBLY__
259extern void arch_task_cache_init(void);
260extern void free_thread_info(struct thread_info *ti);
261extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
262#define arch_task_cache_init arch_task_cache_init
263#endif
264#endif /* ASM_X86__THREAD_INFO_H */
diff --git a/include/asm-x86/time.h b/include/asm-x86/time.h
deleted file mode 100644
index 3e724eef7ac4..000000000000
--- a/include/asm-x86/time.h
+++ /dev/null
@@ -1,63 +0,0 @@
1#ifndef ASM_X86__TIME_H
2#define ASM_X86__TIME_H
3
4extern void hpet_time_init(void);
5
6#include <asm/mc146818rtc.h>
7#ifdef CONFIG_X86_32
8#include <linux/efi.h>
9
10static inline unsigned long native_get_wallclock(void)
11{
12 unsigned long retval;
13
14 if (efi_enabled)
15 retval = efi_get_time();
16 else
17 retval = mach_get_cmos_time();
18
19 return retval;
20}
21
22static inline int native_set_wallclock(unsigned long nowtime)
23{
24 int retval;
25
26 if (efi_enabled)
27 retval = efi_set_rtc_mmss(nowtime);
28 else
29 retval = mach_set_rtc_mmss(nowtime);
30
31 return retval;
32}
33
34#else
35extern void native_time_init_hook(void);
36
37static inline unsigned long native_get_wallclock(void)
38{
39 return mach_get_cmos_time();
40}
41
42static inline int native_set_wallclock(unsigned long nowtime)
43{
44 return mach_set_rtc_mmss(nowtime);
45}
46
47#endif
48
49extern void time_init(void);
50
51#ifdef CONFIG_PARAVIRT
52#include <asm/paravirt.h>
53#else /* !CONFIG_PARAVIRT */
54
55#define get_wallclock() native_get_wallclock()
56#define set_wallclock(x) native_set_wallclock(x)
57#define choose_time_init() hpet_time_init
58
59#endif /* CONFIG_PARAVIRT */
60
61extern unsigned long __init calibrate_cpu(void);
62
63#endif /* ASM_X86__TIME_H */
diff --git a/include/asm-x86/timer.h b/include/asm-x86/timer.h
deleted file mode 100644
index d0babce4b47a..000000000000
--- a/include/asm-x86/timer.h
+++ /dev/null
@@ -1,66 +0,0 @@
1#ifndef ASM_X86__TIMER_H
2#define ASM_X86__TIMER_H
3#include <linux/init.h>
4#include <linux/pm.h>
5#include <linux/percpu.h>
6
7#define TICK_SIZE (tick_nsec / 1000)
8
9unsigned long long native_sched_clock(void);
10unsigned long native_calibrate_tsc(void);
11
12#ifdef CONFIG_X86_32
13extern int timer_ack;
14extern int recalibrate_cpu_khz(void);
15#endif /* CONFIG_X86_32 */
16
17extern int no_timer_check;
18
19#ifndef CONFIG_PARAVIRT
20#define calibrate_tsc() native_calibrate_tsc()
21#endif
22
23/* Accelerators for sched_clock()
24 * convert from cycles(64bits) => nanoseconds (64bits)
25 * basic equation:
26 * ns = cycles / (freq / ns_per_sec)
27 * ns = cycles * (ns_per_sec / freq)
28 * ns = cycles * (10^9 / (cpu_khz * 10^3))
29 * ns = cycles * (10^6 / cpu_khz)
30 *
31 * Then we use scaling math (suggested by george@mvista.com) to get:
32 * ns = cycles * (10^6 * SC / cpu_khz) / SC
33 * ns = cycles * cyc2ns_scale / SC
34 *
35 * And since SC is a constant power of two, we can convert the div
36 * into a shift.
37 *
38 * We can use khz divisor instead of mhz to keep a better precision, since
39 * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
40 * (mathieu.desnoyers@polymtl.ca)
41 *
42 * -johnstul@us.ibm.com "math is hard, lets go shopping!"
43 */
44
45DECLARE_PER_CPU(unsigned long, cyc2ns);
46
47#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
48
49static inline unsigned long long __cycles_2_ns(unsigned long long cyc)
50{
51 return cyc * per_cpu(cyc2ns, smp_processor_id()) >> CYC2NS_SCALE_FACTOR;
52}
53
54static inline unsigned long long cycles_2_ns(unsigned long long cyc)
55{
56 unsigned long long ns;
57 unsigned long flags;
58
59 local_irq_save(flags);
60 ns = __cycles_2_ns(cyc);
61 local_irq_restore(flags);
62
63 return ns;
64}
65
66#endif /* ASM_X86__TIMER_H */
diff --git a/include/asm-x86/timex.h b/include/asm-x86/timex.h
deleted file mode 100644
index d1ce2416a5da..000000000000
--- a/include/asm-x86/timex.h
+++ /dev/null
@@ -1,19 +0,0 @@
1/* x86 architecture timex specifications */
2#ifndef ASM_X86__TIMEX_H
3#define ASM_X86__TIMEX_H
4
5#include <asm/processor.h>
6#include <asm/tsc.h>
7
8#ifdef CONFIG_X86_ELAN
9# define PIT_TICK_RATE 1189200 /* AMD Elan has different frequency! */
10#elif defined(CONFIG_X86_RDC321X)
11# define PIT_TICK_RATE 1041667 /* Underlying HZ for R8610 */
12#else
13# define PIT_TICK_RATE 1193182 /* Underlying HZ */
14#endif
15#define CLOCK_TICK_RATE PIT_TICK_RATE
16
17#define ARCH_HAS_READ_CURRENT_TIMER
18
19#endif /* ASM_X86__TIMEX_H */
diff --git a/include/asm-x86/tlb.h b/include/asm-x86/tlb.h
deleted file mode 100644
index db36e9e89e87..000000000000
--- a/include/asm-x86/tlb.h
+++ /dev/null
@@ -1,11 +0,0 @@
1#ifndef ASM_X86__TLB_H
2#define ASM_X86__TLB_H
3
4#define tlb_start_vma(tlb, vma) do { } while (0)
5#define tlb_end_vma(tlb, vma) do { } while (0)
6#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
7#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
8
9#include <asm-generic/tlb.h>
10
11#endif /* ASM_X86__TLB_H */
diff --git a/include/asm-x86/tlbflush.h b/include/asm-x86/tlbflush.h
deleted file mode 100644
index 3cdd08b5bdb7..000000000000
--- a/include/asm-x86/tlbflush.h
+++ /dev/null
@@ -1,178 +0,0 @@
1#ifndef ASM_X86__TLBFLUSH_H
2#define ASM_X86__TLBFLUSH_H
3
4#include <linux/mm.h>
5#include <linux/sched.h>
6
7#include <asm/processor.h>
8#include <asm/system.h>
9
10#ifdef CONFIG_PARAVIRT
11#include <asm/paravirt.h>
12#else
13#define __flush_tlb() __native_flush_tlb()
14#define __flush_tlb_global() __native_flush_tlb_global()
15#define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
16#endif
17
18static inline void __native_flush_tlb(void)
19{
20 write_cr3(read_cr3());
21}
22
23static inline void __native_flush_tlb_global(void)
24{
25 unsigned long flags;
26 unsigned long cr4;
27
28 /*
29 * Read-modify-write to CR4 - protect it from preemption and
30 * from interrupts. (Use the raw variant because this code can
31 * be called from deep inside debugging code.)
32 */
33 raw_local_irq_save(flags);
34
35 cr4 = read_cr4();
36 /* clear PGE */
37 write_cr4(cr4 & ~X86_CR4_PGE);
38 /* write old PGE again and flush TLBs */
39 write_cr4(cr4);
40
41 raw_local_irq_restore(flags);
42}
43
44static inline void __native_flush_tlb_single(unsigned long addr)
45{
46 asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
47}
48
49static inline void __flush_tlb_all(void)
50{
51 if (cpu_has_pge)
52 __flush_tlb_global();
53 else
54 __flush_tlb();
55}
56
57static inline void __flush_tlb_one(unsigned long addr)
58{
59 if (cpu_has_invlpg)
60 __flush_tlb_single(addr);
61 else
62 __flush_tlb();
63}
64
65#ifdef CONFIG_X86_32
66# define TLB_FLUSH_ALL 0xffffffff
67#else
68# define TLB_FLUSH_ALL -1ULL
69#endif
70
71/*
72 * TLB flushing:
73 *
74 * - flush_tlb() flushes the current mm struct TLBs
75 * - flush_tlb_all() flushes all processes TLBs
76 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
77 * - flush_tlb_page(vma, vmaddr) flushes one page
78 * - flush_tlb_range(vma, start, end) flushes a range of pages
79 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
80 * - flush_tlb_others(cpumask, mm, va) flushes TLBs on other cpus
81 *
82 * ..but the i386 has somewhat limited tlb flushing capabilities,
83 * and page-granular flushes are available only on i486 and up.
84 *
85 * x86-64 can only flush individual pages or full VMs. For a range flush
86 * we always do the full VM. Might be worth trying if for a small
87 * range a few INVLPGs in a row are a win.
88 */
89
90#ifndef CONFIG_SMP
91
92#define flush_tlb() __flush_tlb()
93#define flush_tlb_all() __flush_tlb_all()
94#define local_flush_tlb() __flush_tlb()
95
96static inline void flush_tlb_mm(struct mm_struct *mm)
97{
98 if (mm == current->active_mm)
99 __flush_tlb();
100}
101
102static inline void flush_tlb_page(struct vm_area_struct *vma,
103 unsigned long addr)
104{
105 if (vma->vm_mm == current->active_mm)
106 __flush_tlb_one(addr);
107}
108
109static inline void flush_tlb_range(struct vm_area_struct *vma,
110 unsigned long start, unsigned long end)
111{
112 if (vma->vm_mm == current->active_mm)
113 __flush_tlb();
114}
115
116static inline void native_flush_tlb_others(const cpumask_t *cpumask,
117 struct mm_struct *mm,
118 unsigned long va)
119{
120}
121
122static inline void reset_lazy_tlbstate(void)
123{
124}
125
126#else /* SMP */
127
128#include <asm/smp.h>
129
130#define local_flush_tlb() __flush_tlb()
131
132extern void flush_tlb_all(void);
133extern void flush_tlb_current_task(void);
134extern void flush_tlb_mm(struct mm_struct *);
135extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
136
137#define flush_tlb() flush_tlb_current_task()
138
139static inline void flush_tlb_range(struct vm_area_struct *vma,
140 unsigned long start, unsigned long end)
141{
142 flush_tlb_mm(vma->vm_mm);
143}
144
145void native_flush_tlb_others(const cpumask_t *cpumask, struct mm_struct *mm,
146 unsigned long va);
147
148#define TLBSTATE_OK 1
149#define TLBSTATE_LAZY 2
150
151#ifdef CONFIG_X86_32
152struct tlb_state {
153 struct mm_struct *active_mm;
154 int state;
155 char __cacheline_padding[L1_CACHE_BYTES-8];
156};
157DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate);
158
159void reset_lazy_tlbstate(void);
160#else
161static inline void reset_lazy_tlbstate(void)
162{
163}
164#endif
165
166#endif /* SMP */
167
168#ifndef CONFIG_PARAVIRT
169#define flush_tlb_others(mask, mm, va) native_flush_tlb_others(&mask, mm, va)
170#endif
171
172static inline void flush_tlb_kernel_range(unsigned long start,
173 unsigned long end)
174{
175 flush_tlb_all();
176}
177
178#endif /* ASM_X86__TLBFLUSH_H */
diff --git a/include/asm-x86/topology.h b/include/asm-x86/topology.h
deleted file mode 100644
index 7eca9bc022b2..000000000000
--- a/include/asm-x86/topology.h
+++ /dev/null
@@ -1,258 +0,0 @@
1/*
2 * Written by: Matthew Dobson, IBM Corporation
3 *
4 * Copyright (C) 2002, IBM Corp.
5 *
6 * All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
16 * NON INFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 *
23 * Send feedback to <colpatch@us.ibm.com>
24 */
25#ifndef ASM_X86__TOPOLOGY_H
26#define ASM_X86__TOPOLOGY_H
27
28#ifdef CONFIG_X86_32
29# ifdef CONFIG_X86_HT
30# define ENABLE_TOPO_DEFINES
31# endif
32#else
33# ifdef CONFIG_SMP
34# define ENABLE_TOPO_DEFINES
35# endif
36#endif
37
38/* Node not present */
39#define NUMA_NO_NODE (-1)
40
41#ifdef CONFIG_NUMA
42#include <linux/cpumask.h>
43#include <asm/mpspec.h>
44
45#ifdef CONFIG_X86_32
46
47/* Mappings between node number and cpus on that node. */
48extern cpumask_t node_to_cpumask_map[];
49
50/* Mappings between logical cpu number and node number */
51extern int cpu_to_node_map[];
52
53/* Returns the number of the node containing CPU 'cpu' */
54static inline int cpu_to_node(int cpu)
55{
56 return cpu_to_node_map[cpu];
57}
58#define early_cpu_to_node(cpu) cpu_to_node(cpu)
59
60/* Returns a bitmask of CPUs on Node 'node'.
61 *
62 * Side note: this function creates the returned cpumask on the stack
63 * so with a high NR_CPUS count, excessive stack space is used. The
64 * node_to_cpumask_ptr function should be used whenever possible.
65 */
66static inline cpumask_t node_to_cpumask(int node)
67{
68 return node_to_cpumask_map[node];
69}
70
71#else /* CONFIG_X86_64 */
72
73/* Mappings between node number and cpus on that node. */
74extern cpumask_t *node_to_cpumask_map;
75
76/* Mappings between logical cpu number and node number */
77DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map);
78
79/* Returns the number of the current Node. */
80#define numa_node_id() read_pda(nodenumber)
81
82#ifdef CONFIG_DEBUG_PER_CPU_MAPS
83extern int cpu_to_node(int cpu);
84extern int early_cpu_to_node(int cpu);
85extern const cpumask_t *_node_to_cpumask_ptr(int node);
86extern cpumask_t node_to_cpumask(int node);
87
88#else /* !CONFIG_DEBUG_PER_CPU_MAPS */
89
90/* Returns the number of the node containing CPU 'cpu' */
91static inline int cpu_to_node(int cpu)
92{
93 return per_cpu(x86_cpu_to_node_map, cpu);
94}
95
96/* Same function but used if called before per_cpu areas are setup */
97static inline int early_cpu_to_node(int cpu)
98{
99 if (early_per_cpu_ptr(x86_cpu_to_node_map))
100 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
101
102 return per_cpu(x86_cpu_to_node_map, cpu);
103}
104
105/* Returns a pointer to the cpumask of CPUs on Node 'node'. */
106static inline const cpumask_t *_node_to_cpumask_ptr(int node)
107{
108 return &node_to_cpumask_map[node];
109}
110
111/* Returns a bitmask of CPUs on Node 'node'. */
112static inline cpumask_t node_to_cpumask(int node)
113{
114 return node_to_cpumask_map[node];
115}
116
117#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
118
119/* Replace default node_to_cpumask_ptr with optimized version */
120#define node_to_cpumask_ptr(v, node) \
121 const cpumask_t *v = _node_to_cpumask_ptr(node)
122
123#define node_to_cpumask_ptr_next(v, node) \
124 v = _node_to_cpumask_ptr(node)
125
126#endif /* CONFIG_X86_64 */
127
128/*
129 * Returns the number of the node containing Node 'node'. This
130 * architecture is flat, so it is a pretty simple function!
131 */
132#define parent_node(node) (node)
133
134#define pcibus_to_node(bus) __pcibus_to_node(bus)
135#define pcibus_to_cpumask(bus) __pcibus_to_cpumask(bus)
136
137#ifdef CONFIG_X86_32
138extern unsigned long node_start_pfn[];
139extern unsigned long node_end_pfn[];
140extern unsigned long node_remap_size[];
141#define node_has_online_mem(nid) (node_start_pfn[nid] != node_end_pfn[nid])
142
143# define SD_CACHE_NICE_TRIES 1
144# define SD_IDLE_IDX 1
145# define SD_NEWIDLE_IDX 2
146# define SD_FORKEXEC_IDX 0
147
148#else
149
150# define SD_CACHE_NICE_TRIES 2
151# define SD_IDLE_IDX 2
152# define SD_NEWIDLE_IDX 2
153# define SD_FORKEXEC_IDX 1
154
155#endif
156
157/* sched_domains SD_NODE_INIT for NUMAQ machines */
158#define SD_NODE_INIT (struct sched_domain) { \
159 .min_interval = 8, \
160 .max_interval = 32, \
161 .busy_factor = 32, \
162 .imbalance_pct = 125, \
163 .cache_nice_tries = SD_CACHE_NICE_TRIES, \
164 .busy_idx = 3, \
165 .idle_idx = SD_IDLE_IDX, \
166 .newidle_idx = SD_NEWIDLE_IDX, \
167 .wake_idx = 1, \
168 .forkexec_idx = SD_FORKEXEC_IDX, \
169 .flags = SD_LOAD_BALANCE \
170 | SD_BALANCE_EXEC \
171 | SD_BALANCE_FORK \
172 | SD_SERIALIZE \
173 | SD_WAKE_BALANCE, \
174 .last_balance = jiffies, \
175 .balance_interval = 1, \
176}
177
178#ifdef CONFIG_X86_64_ACPI_NUMA
179extern int __node_distance(int, int);
180#define node_distance(a, b) __node_distance(a, b)
181#endif
182
183#else /* !CONFIG_NUMA */
184
185#define numa_node_id() 0
186#define cpu_to_node(cpu) 0
187#define early_cpu_to_node(cpu) 0
188
189static inline const cpumask_t *_node_to_cpumask_ptr(int node)
190{
191 return &cpu_online_map;
192}
193static inline cpumask_t node_to_cpumask(int node)
194{
195 return cpu_online_map;
196}
197static inline int node_to_first_cpu(int node)
198{
199 return first_cpu(cpu_online_map);
200}
201
202/* Replace default node_to_cpumask_ptr with optimized version */
203#define node_to_cpumask_ptr(v, node) \
204 const cpumask_t *v = _node_to_cpumask_ptr(node)
205
206#define node_to_cpumask_ptr_next(v, node) \
207 v = _node_to_cpumask_ptr(node)
208#endif
209
210#include <asm-generic/topology.h>
211
212#ifdef CONFIG_NUMA
213/* Returns the number of the first CPU on Node 'node'. */
214static inline int node_to_first_cpu(int node)
215{
216 node_to_cpumask_ptr(mask, node);
217 return first_cpu(*mask);
218}
219#endif
220
221extern cpumask_t cpu_coregroup_map(int cpu);
222
223#ifdef ENABLE_TOPO_DEFINES
224#define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id)
225#define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id)
226#define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu))
227#define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu))
228
229/* indicates that pointers to the topology cpumask_t maps are valid */
230#define arch_provides_topology_pointers yes
231#endif
232
233static inline void arch_fix_phys_package_id(int num, u32 slot)
234{
235}
236
237struct pci_bus;
238void set_pci_bus_resources_arch_default(struct pci_bus *b);
239
240#ifdef CONFIG_SMP
241#define mc_capable() (boot_cpu_data.x86_max_cores > 1)
242#define smt_capable() (smp_num_siblings > 1)
243#endif
244
245#ifdef CONFIG_NUMA
246extern int get_mp_bus_to_node(int busnum);
247extern void set_mp_bus_to_node(int busnum, int node);
248#else
249static inline int get_mp_bus_to_node(int busnum)
250{
251 return 0;
252}
253static inline void set_mp_bus_to_node(int busnum, int node)
254{
255}
256#endif
257
258#endif /* ASM_X86__TOPOLOGY_H */
diff --git a/include/asm-x86/trampoline.h b/include/asm-x86/trampoline.h
deleted file mode 100644
index 0406bbd898a9..000000000000
--- a/include/asm-x86/trampoline.h
+++ /dev/null
@@ -1,21 +0,0 @@
1#ifndef ASM_X86__TRAMPOLINE_H
2#define ASM_X86__TRAMPOLINE_H
3
4#ifndef __ASSEMBLY__
5
6/*
7 * Trampoline 80x86 program as an array.
8 */
9extern const unsigned char trampoline_data [];
10extern const unsigned char trampoline_end [];
11extern unsigned char *trampoline_base;
12
13extern unsigned long init_rsp;
14extern unsigned long initial_code;
15
16#define TRAMPOLINE_BASE 0x6000
17extern unsigned long setup_trampoline(void);
18
19#endif /* __ASSEMBLY__ */
20
21#endif /* ASM_X86__TRAMPOLINE_H */
diff --git a/include/asm-x86/traps.h b/include/asm-x86/traps.h
deleted file mode 100644
index 6c3dc2c65751..000000000000
--- a/include/asm-x86/traps.h
+++ /dev/null
@@ -1,81 +0,0 @@
1#ifndef ASM_X86__TRAPS_H
2#define ASM_X86__TRAPS_H
3
4#include <asm/debugreg.h>
5
6#ifdef CONFIG_X86_32
7#define dotraplinkage
8#else
9#define dotraplinkage asmlinkage
10#endif
11
12asmlinkage void divide_error(void);
13asmlinkage void debug(void);
14asmlinkage void nmi(void);
15asmlinkage void int3(void);
16asmlinkage void overflow(void);
17asmlinkage void bounds(void);
18asmlinkage void invalid_op(void);
19asmlinkage void device_not_available(void);
20#ifdef CONFIG_X86_64
21asmlinkage void double_fault(void);
22#endif
23asmlinkage void coprocessor_segment_overrun(void);
24asmlinkage void invalid_TSS(void);
25asmlinkage void segment_not_present(void);
26asmlinkage void stack_segment(void);
27asmlinkage void general_protection(void);
28asmlinkage void page_fault(void);
29asmlinkage void spurious_interrupt_bug(void);
30asmlinkage void coprocessor_error(void);
31asmlinkage void alignment_check(void);
32#ifdef CONFIG_X86_MCE
33asmlinkage void machine_check(void);
34#endif /* CONFIG_X86_MCE */
35asmlinkage void simd_coprocessor_error(void);
36
37dotraplinkage void do_divide_error(struct pt_regs *, long);
38dotraplinkage void do_debug(struct pt_regs *, long);
39dotraplinkage void do_nmi(struct pt_regs *, long);
40dotraplinkage void do_int3(struct pt_regs *, long);
41dotraplinkage void do_overflow(struct pt_regs *, long);
42dotraplinkage void do_bounds(struct pt_regs *, long);
43dotraplinkage void do_invalid_op(struct pt_regs *, long);
44dotraplinkage void do_device_not_available(struct pt_regs *, long);
45dotraplinkage void do_coprocessor_segment_overrun(struct pt_regs *, long);
46dotraplinkage void do_invalid_TSS(struct pt_regs *, long);
47dotraplinkage void do_segment_not_present(struct pt_regs *, long);
48dotraplinkage void do_stack_segment(struct pt_regs *, long);
49dotraplinkage void do_general_protection(struct pt_regs *, long);
50dotraplinkage void do_page_fault(struct pt_regs *, unsigned long);
51dotraplinkage void do_spurious_interrupt_bug(struct pt_regs *, long);
52dotraplinkage void do_coprocessor_error(struct pt_regs *, long);
53dotraplinkage void do_alignment_check(struct pt_regs *, long);
54#ifdef CONFIG_X86_MCE
55dotraplinkage void do_machine_check(struct pt_regs *, long);
56#endif
57dotraplinkage void do_simd_coprocessor_error(struct pt_regs *, long);
58#ifdef CONFIG_X86_32
59dotraplinkage void do_iret_error(struct pt_regs *, long);
60#endif
61
62static inline int get_si_code(unsigned long condition)
63{
64 if (condition & DR_STEP)
65 return TRAP_TRACE;
66 else if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3))
67 return TRAP_HWBKPT;
68 else
69 return TRAP_BRKPT;
70}
71
72extern int panic_on_unrecovered_nmi;
73extern int kstack_depth_to_print;
74
75#ifdef CONFIG_X86_32
76void math_error(void __user *);
77unsigned long patch_espfix_desc(unsigned long, unsigned long);
78asmlinkage void math_emulate(long);
79#endif
80
81#endif /* ASM_X86__TRAPS_H */
diff --git a/include/asm-x86/tsc.h b/include/asm-x86/tsc.h
deleted file mode 100644
index ad0f5c41e78c..000000000000
--- a/include/asm-x86/tsc.h
+++ /dev/null
@@ -1,62 +0,0 @@
1/*
2 * x86 TSC related functions
3 */
4#ifndef ASM_X86__TSC_H
5#define ASM_X86__TSC_H
6
7#include <asm/processor.h>
8
9#define NS_SCALE 10 /* 2^10, carefully chosen */
10#define US_SCALE 32 /* 2^32, arbitralrily chosen */
11
12/*
13 * Standard way to access the cycle counter.
14 */
15typedef unsigned long long cycles_t;
16
17extern unsigned int cpu_khz;
18extern unsigned int tsc_khz;
19
20extern void disable_TSC(void);
21
22static inline cycles_t get_cycles(void)
23{
24 unsigned long long ret = 0;
25
26#ifndef CONFIG_X86_TSC
27 if (!cpu_has_tsc)
28 return 0;
29#endif
30 rdtscll(ret);
31
32 return ret;
33}
34
35static __always_inline cycles_t vget_cycles(void)
36{
37 /*
38 * We only do VDSOs on TSC capable CPUs, so this shouldnt
39 * access boot_cpu_data (which is not VDSO-safe):
40 */
41#ifndef CONFIG_X86_TSC
42 if (!cpu_has_tsc)
43 return 0;
44#endif
45 return (cycles_t)__native_read_tsc();
46}
47
48extern void tsc_init(void);
49extern void mark_tsc_unstable(char *reason);
50extern int unsynchronized_tsc(void);
51int check_tsc_unstable(void);
52
53/*
54 * Boot-time check whether the TSCs are synchronized across
55 * all CPUs/cores:
56 */
57extern void check_tsc_sync_source(int cpu);
58extern void check_tsc_sync_target(void);
59
60extern int notsc_setup(char *);
61
62#endif /* ASM_X86__TSC_H */
diff --git a/include/asm-x86/types.h b/include/asm-x86/types.h
deleted file mode 100644
index e78b52e17444..000000000000
--- a/include/asm-x86/types.h
+++ /dev/null
@@ -1,36 +0,0 @@
1#ifndef ASM_X86__TYPES_H
2#define ASM_X86__TYPES_H
3
4#include <asm-generic/int-ll64.h>
5
6#ifndef __ASSEMBLY__
7
8typedef unsigned short umode_t;
9
10#endif /* __ASSEMBLY__ */
11
12/*
13 * These aren't exported outside the kernel to avoid name space clashes
14 */
15#ifdef __KERNEL__
16
17#ifdef CONFIG_X86_32
18# define BITS_PER_LONG 32
19#else
20# define BITS_PER_LONG 64
21#endif
22
23#ifndef __ASSEMBLY__
24
25typedef u64 dma64_addr_t;
26#if defined(CONFIG_X86_64) || defined(CONFIG_HIGHMEM64G)
27/* DMA addresses come in 32-bit and 64-bit flavours. */
28typedef u64 dma_addr_t;
29#else
30typedef u32 dma_addr_t;
31#endif
32
33#endif /* __ASSEMBLY__ */
34#endif /* __KERNEL__ */
35
36#endif /* ASM_X86__TYPES_H */
diff --git a/include/asm-x86/uaccess.h b/include/asm-x86/uaccess.h
deleted file mode 100644
index 48ebc0ad40ec..000000000000
--- a/include/asm-x86/uaccess.h
+++ /dev/null
@@ -1,454 +0,0 @@
1#ifndef ASM_X86__UACCESS_H
2#define ASM_X86__UACCESS_H
3/*
4 * User space memory access functions
5 */
6#include <linux/errno.h>
7#include <linux/compiler.h>
8#include <linux/thread_info.h>
9#include <linux/prefetch.h>
10#include <linux/string.h>
11#include <asm/asm.h>
12#include <asm/page.h>
13
14#define VERIFY_READ 0
15#define VERIFY_WRITE 1
16
17/*
18 * The fs value determines whether argument validity checking should be
19 * performed or not. If get_fs() == USER_DS, checking is performed, with
20 * get_fs() == KERNEL_DS, checking is bypassed.
21 *
22 * For historical reasons, these macros are grossly misnamed.
23 */
24
25#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
26
27#define KERNEL_DS MAKE_MM_SEG(-1UL)
28#define USER_DS MAKE_MM_SEG(PAGE_OFFSET)
29
30#define get_ds() (KERNEL_DS)
31#define get_fs() (current_thread_info()->addr_limit)
32#define set_fs(x) (current_thread_info()->addr_limit = (x))
33
34#define segment_eq(a, b) ((a).seg == (b).seg)
35
36#define __addr_ok(addr) \
37 ((unsigned long __force)(addr) < \
38 (current_thread_info()->addr_limit.seg))
39
40/*
41 * Test whether a block of memory is a valid user space address.
42 * Returns 0 if the range is valid, nonzero otherwise.
43 *
44 * This is equivalent to the following test:
45 * (u33)addr + (u33)size >= (u33)current->addr_limit.seg (u65 for x86_64)
46 *
47 * This needs 33-bit (65-bit for x86_64) arithmetic. We have a carry...
48 */
49
50#define __range_not_ok(addr, size) \
51({ \
52 unsigned long flag, roksum; \
53 __chk_user_ptr(addr); \
54 asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" \
55 : "=&r" (flag), "=r" (roksum) \
56 : "1" (addr), "g" ((long)(size)), \
57 "rm" (current_thread_info()->addr_limit.seg)); \
58 flag; \
59})
60
61/**
62 * access_ok: - Checks if a user space pointer is valid
63 * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that
64 * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
65 * to write to a block, it is always safe to read from it.
66 * @addr: User space pointer to start of block to check
67 * @size: Size of block to check
68 *
69 * Context: User context only. This function may sleep.
70 *
71 * Checks if a pointer to a block of memory in user space is valid.
72 *
73 * Returns true (nonzero) if the memory block may be valid, false (zero)
74 * if it is definitely invalid.
75 *
76 * Note that, depending on architecture, this function probably just
77 * checks that the pointer is in the user space range - after calling
78 * this function, memory access functions may still return -EFAULT.
79 */
80#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
81
82/*
83 * The exception table consists of pairs of addresses: the first is the
84 * address of an instruction that is allowed to fault, and the second is
85 * the address at which the program should continue. No registers are
86 * modified, so it is entirely up to the continuation code to figure out
87 * what to do.
88 *
89 * All the routines below use bits of fixup code that are out of line
90 * with the main instruction path. This means when everything is well,
91 * we don't even have to jump over them. Further, they do not intrude
92 * on our cache or tlb entries.
93 */
94
95struct exception_table_entry {
96 unsigned long insn, fixup;
97};
98
99extern int fixup_exception(struct pt_regs *regs);
100
101/*
102 * These are the main single-value transfer routines. They automatically
103 * use the right size if we just have the right pointer type.
104 *
105 * This gets kind of ugly. We want to return _two_ values in "get_user()"
106 * and yet we don't want to do any pointers, because that is too much
107 * of a performance impact. Thus we have a few rather ugly macros here,
108 * and hide all the ugliness from the user.
109 *
110 * The "__xxx" versions of the user access functions are versions that
111 * do not verify the address space, that must have been done previously
112 * with a separate "access_ok()" call (this is used when we do multiple
113 * accesses to the same area of user memory).
114 */
115
116extern int __get_user_1(void);
117extern int __get_user_2(void);
118extern int __get_user_4(void);
119extern int __get_user_8(void);
120extern int __get_user_bad(void);
121
122#define __get_user_x(size, ret, x, ptr) \
123 asm volatile("call __get_user_" #size \
124 : "=a" (ret),"=d" (x) \
125 : "0" (ptr)) \
126
127/* Careful: we have to cast the result to the type of the pointer
128 * for sign reasons */
129
130/**
131 * get_user: - Get a simple variable from user space.
132 * @x: Variable to store result.
133 * @ptr: Source address, in user space.
134 *
135 * Context: User context only. This function may sleep.
136 *
137 * This macro copies a single simple variable from user space to kernel
138 * space. It supports simple types like char and int, but not larger
139 * data types like structures or arrays.
140 *
141 * @ptr must have pointer-to-simple-variable type, and the result of
142 * dereferencing @ptr must be assignable to @x without a cast.
143 *
144 * Returns zero on success, or -EFAULT on error.
145 * On error, the variable @x is set to zero.
146 */
147#ifdef CONFIG_X86_32
148#define __get_user_8(__ret_gu, __val_gu, ptr) \
149 __get_user_x(X, __ret_gu, __val_gu, ptr)
150#else
151#define __get_user_8(__ret_gu, __val_gu, ptr) \
152 __get_user_x(8, __ret_gu, __val_gu, ptr)
153#endif
154
155#define get_user(x, ptr) \
156({ \
157 int __ret_gu; \
158 unsigned long __val_gu; \
159 __chk_user_ptr(ptr); \
160 switch (sizeof(*(ptr))) { \
161 case 1: \
162 __get_user_x(1, __ret_gu, __val_gu, ptr); \
163 break; \
164 case 2: \
165 __get_user_x(2, __ret_gu, __val_gu, ptr); \
166 break; \
167 case 4: \
168 __get_user_x(4, __ret_gu, __val_gu, ptr); \
169 break; \
170 case 8: \
171 __get_user_8(__ret_gu, __val_gu, ptr); \
172 break; \
173 default: \
174 __get_user_x(X, __ret_gu, __val_gu, ptr); \
175 break; \
176 } \
177 (x) = (__typeof__(*(ptr)))__val_gu; \
178 __ret_gu; \
179})
180
181#define __put_user_x(size, x, ptr, __ret_pu) \
182 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
183 :"0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
184
185
186
187#ifdef CONFIG_X86_32
188#define __put_user_u64(x, addr, err) \
189 asm volatile("1: movl %%eax,0(%2)\n" \
190 "2: movl %%edx,4(%2)\n" \
191 "3:\n" \
192 ".section .fixup,\"ax\"\n" \
193 "4: movl %3,%0\n" \
194 " jmp 3b\n" \
195 ".previous\n" \
196 _ASM_EXTABLE(1b, 4b) \
197 _ASM_EXTABLE(2b, 4b) \
198 : "=r" (err) \
199 : "A" (x), "r" (addr), "i" (-EFAULT), "0" (err))
200
201#define __put_user_x8(x, ptr, __ret_pu) \
202 asm volatile("call __put_user_8" : "=a" (__ret_pu) \
203 : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
204#else
205#define __put_user_u64(x, ptr, retval) \
206 __put_user_asm(x, ptr, retval, "q", "", "Zr", -EFAULT)
207#define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
208#endif
209
210extern void __put_user_bad(void);
211
212/*
213 * Strange magic calling convention: pointer in %ecx,
214 * value in %eax(:%edx), return value in %eax. clobbers %rbx
215 */
216extern void __put_user_1(void);
217extern void __put_user_2(void);
218extern void __put_user_4(void);
219extern void __put_user_8(void);
220
221#ifdef CONFIG_X86_WP_WORKS_OK
222
223/**
224 * put_user: - Write a simple value into user space.
225 * @x: Value to copy to user space.
226 * @ptr: Destination address, in user space.
227 *
228 * Context: User context only. This function may sleep.
229 *
230 * This macro copies a single simple value from kernel space to user
231 * space. It supports simple types like char and int, but not larger
232 * data types like structures or arrays.
233 *
234 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
235 * to the result of dereferencing @ptr.
236 *
237 * Returns zero on success, or -EFAULT on error.
238 */
239#define put_user(x, ptr) \
240({ \
241 int __ret_pu; \
242 __typeof__(*(ptr)) __pu_val; \
243 __chk_user_ptr(ptr); \
244 __pu_val = x; \
245 switch (sizeof(*(ptr))) { \
246 case 1: \
247 __put_user_x(1, __pu_val, ptr, __ret_pu); \
248 break; \
249 case 2: \
250 __put_user_x(2, __pu_val, ptr, __ret_pu); \
251 break; \
252 case 4: \
253 __put_user_x(4, __pu_val, ptr, __ret_pu); \
254 break; \
255 case 8: \
256 __put_user_x8(__pu_val, ptr, __ret_pu); \
257 break; \
258 default: \
259 __put_user_x(X, __pu_val, ptr, __ret_pu); \
260 break; \
261 } \
262 __ret_pu; \
263})
264
265#define __put_user_size(x, ptr, size, retval, errret) \
266do { \
267 retval = 0; \
268 __chk_user_ptr(ptr); \
269 switch (size) { \
270 case 1: \
271 __put_user_asm(x, ptr, retval, "b", "b", "iq", errret); \
272 break; \
273 case 2: \
274 __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \
275 break; \
276 case 4: \
277 __put_user_asm(x, ptr, retval, "l", "k", "ir", errret);\
278 break; \
279 case 8: \
280 __put_user_u64((__typeof__(*ptr))(x), ptr, retval); \
281 break; \
282 default: \
283 __put_user_bad(); \
284 } \
285} while (0)
286
287#else
288
289#define __put_user_size(x, ptr, size, retval, errret) \
290do { \
291 __typeof__(*(ptr))__pus_tmp = x; \
292 retval = 0; \
293 \
294 if (unlikely(__copy_to_user_ll(ptr, &__pus_tmp, size) != 0)) \
295 retval = errret; \
296} while (0)
297
298#define put_user(x, ptr) \
299({ \
300 int __ret_pu; \
301 __typeof__(*(ptr))__pus_tmp = x; \
302 __ret_pu = 0; \
303 if (unlikely(__copy_to_user_ll(ptr, &__pus_tmp, \
304 sizeof(*(ptr))) != 0)) \
305 __ret_pu = -EFAULT; \
306 __ret_pu; \
307})
308#endif
309
310#ifdef CONFIG_X86_32
311#define __get_user_asm_u64(x, ptr, retval, errret) (x) = __get_user_bad()
312#else
313#define __get_user_asm_u64(x, ptr, retval, errret) \
314 __get_user_asm(x, ptr, retval, "q", "", "=r", errret)
315#endif
316
317#define __get_user_size(x, ptr, size, retval, errret) \
318do { \
319 retval = 0; \
320 __chk_user_ptr(ptr); \
321 switch (size) { \
322 case 1: \
323 __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \
324 break; \
325 case 2: \
326 __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \
327 break; \
328 case 4: \
329 __get_user_asm(x, ptr, retval, "l", "k", "=r", errret); \
330 break; \
331 case 8: \
332 __get_user_asm_u64(x, ptr, retval, errret); \
333 break; \
334 default: \
335 (x) = __get_user_bad(); \
336 } \
337} while (0)
338
339#define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
340 asm volatile("1: mov"itype" %2,%"rtype"1\n" \
341 "2:\n" \
342 ".section .fixup,\"ax\"\n" \
343 "3: mov %3,%0\n" \
344 " xor"itype" %"rtype"1,%"rtype"1\n" \
345 " jmp 2b\n" \
346 ".previous\n" \
347 _ASM_EXTABLE(1b, 3b) \
348 : "=r" (err), ltype(x) \
349 : "m" (__m(addr)), "i" (errret), "0" (err))
350
351#define __put_user_nocheck(x, ptr, size) \
352({ \
353 long __pu_err; \
354 __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \
355 __pu_err; \
356})
357
358#define __get_user_nocheck(x, ptr, size) \
359({ \
360 long __gu_err; \
361 unsigned long __gu_val; \
362 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
363 (x) = (__force __typeof__(*(ptr)))__gu_val; \
364 __gu_err; \
365})
366
367/* FIXME: this hack is definitely wrong -AK */
368struct __large_struct { unsigned long buf[100]; };
369#define __m(x) (*(struct __large_struct __user *)(x))
370
371/*
372 * Tell gcc we read from memory instead of writing: this is because
373 * we do not write to any memory gcc knows about, so there are no
374 * aliasing issues.
375 */
376#define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
377 asm volatile("1: mov"itype" %"rtype"1,%2\n" \
378 "2:\n" \
379 ".section .fixup,\"ax\"\n" \
380 "3: mov %3,%0\n" \
381 " jmp 2b\n" \
382 ".previous\n" \
383 _ASM_EXTABLE(1b, 3b) \
384 : "=r"(err) \
385 : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
386/**
387 * __get_user: - Get a simple variable from user space, with less checking.
388 * @x: Variable to store result.
389 * @ptr: Source address, in user space.
390 *
391 * Context: User context only. This function may sleep.
392 *
393 * This macro copies a single simple variable from user space to kernel
394 * space. It supports simple types like char and int, but not larger
395 * data types like structures or arrays.
396 *
397 * @ptr must have pointer-to-simple-variable type, and the result of
398 * dereferencing @ptr must be assignable to @x without a cast.
399 *
400 * Caller must check the pointer with access_ok() before calling this
401 * function.
402 *
403 * Returns zero on success, or -EFAULT on error.
404 * On error, the variable @x is set to zero.
405 */
406
407#define __get_user(x, ptr) \
408 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
409/**
410 * __put_user: - Write a simple value into user space, with less checking.
411 * @x: Value to copy to user space.
412 * @ptr: Destination address, in user space.
413 *
414 * Context: User context only. This function may sleep.
415 *
416 * This macro copies a single simple value from kernel space to user
417 * space. It supports simple types like char and int, but not larger
418 * data types like structures or arrays.
419 *
420 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
421 * to the result of dereferencing @ptr.
422 *
423 * Caller must check the pointer with access_ok() before calling this
424 * function.
425 *
426 * Returns zero on success, or -EFAULT on error.
427 */
428
429#define __put_user(x, ptr) \
430 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
431
432#define __get_user_unaligned __get_user
433#define __put_user_unaligned __put_user
434
435/*
436 * movsl can be slow when source and dest are not both 8-byte aligned
437 */
438#ifdef CONFIG_X86_INTEL_USERCOPY
439extern struct movsl_mask {
440 int mask;
441} ____cacheline_aligned_in_smp movsl_mask;
442#endif
443
444#define ARCH_HAS_NOCACHE_UACCESS 1
445
446#ifdef CONFIG_X86_32
447# include "uaccess_32.h"
448#else
449# define ARCH_HAS_SEARCH_EXTABLE
450# include "uaccess_64.h"
451#endif
452
453#endif /* ASM_X86__UACCESS_H */
454
diff --git a/include/asm-x86/uaccess_32.h b/include/asm-x86/uaccess_32.h
deleted file mode 100644
index 6b5b57d9c6d1..000000000000
--- a/include/asm-x86/uaccess_32.h
+++ /dev/null
@@ -1,218 +0,0 @@
1#ifndef ASM_X86__UACCESS_32_H
2#define ASM_X86__UACCESS_32_H
3
4/*
5 * User space memory access functions
6 */
7#include <linux/errno.h>
8#include <linux/thread_info.h>
9#include <linux/prefetch.h>
10#include <linux/string.h>
11#include <asm/asm.h>
12#include <asm/page.h>
13
14unsigned long __must_check __copy_to_user_ll
15 (void __user *to, const void *from, unsigned long n);
16unsigned long __must_check __copy_from_user_ll
17 (void *to, const void __user *from, unsigned long n);
18unsigned long __must_check __copy_from_user_ll_nozero
19 (void *to, const void __user *from, unsigned long n);
20unsigned long __must_check __copy_from_user_ll_nocache
21 (void *to, const void __user *from, unsigned long n);
22unsigned long __must_check __copy_from_user_ll_nocache_nozero
23 (void *to, const void __user *from, unsigned long n);
24
25/**
26 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
27 * @to: Destination address, in user space.
28 * @from: Source address, in kernel space.
29 * @n: Number of bytes to copy.
30 *
31 * Context: User context only.
32 *
33 * Copy data from kernel space to user space. Caller must check
34 * the specified block with access_ok() before calling this function.
35 * The caller should also make sure he pins the user space address
36 * so that the we don't result in page fault and sleep.
37 *
38 * Here we special-case 1, 2 and 4-byte copy_*_user invocations. On a fault
39 * we return the initial request size (1, 2 or 4), as copy_*_user should do.
40 * If a store crosses a page boundary and gets a fault, the x86 will not write
41 * anything, so this is accurate.
42 */
43
44static __always_inline unsigned long __must_check
45__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
46{
47 if (__builtin_constant_p(n)) {
48 unsigned long ret;
49
50 switch (n) {
51 case 1:
52 __put_user_size(*(u8 *)from, (u8 __user *)to,
53 1, ret, 1);
54 return ret;
55 case 2:
56 __put_user_size(*(u16 *)from, (u16 __user *)to,
57 2, ret, 2);
58 return ret;
59 case 4:
60 __put_user_size(*(u32 *)from, (u32 __user *)to,
61 4, ret, 4);
62 return ret;
63 }
64 }
65 return __copy_to_user_ll(to, from, n);
66}
67
68/**
69 * __copy_to_user: - Copy a block of data into user space, with less checking.
70 * @to: Destination address, in user space.
71 * @from: Source address, in kernel space.
72 * @n: Number of bytes to copy.
73 *
74 * Context: User context only. This function may sleep.
75 *
76 * Copy data from kernel space to user space. Caller must check
77 * the specified block with access_ok() before calling this function.
78 *
79 * Returns number of bytes that could not be copied.
80 * On success, this will be zero.
81 */
82static __always_inline unsigned long __must_check
83__copy_to_user(void __user *to, const void *from, unsigned long n)
84{
85 might_sleep();
86 return __copy_to_user_inatomic(to, from, n);
87}
88
89static __always_inline unsigned long
90__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
91{
92 /* Avoid zeroing the tail if the copy fails..
93 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
94 * but as the zeroing behaviour is only significant when n is not
95 * constant, that shouldn't be a problem.
96 */
97 if (__builtin_constant_p(n)) {
98 unsigned long ret;
99
100 switch (n) {
101 case 1:
102 __get_user_size(*(u8 *)to, from, 1, ret, 1);
103 return ret;
104 case 2:
105 __get_user_size(*(u16 *)to, from, 2, ret, 2);
106 return ret;
107 case 4:
108 __get_user_size(*(u32 *)to, from, 4, ret, 4);
109 return ret;
110 }
111 }
112 return __copy_from_user_ll_nozero(to, from, n);
113}
114
115/**
116 * __copy_from_user: - Copy a block of data from user space, with less checking.
117 * @to: Destination address, in kernel space.
118 * @from: Source address, in user space.
119 * @n: Number of bytes to copy.
120 *
121 * Context: User context only. This function may sleep.
122 *
123 * Copy data from user space to kernel space. Caller must check
124 * the specified block with access_ok() before calling this function.
125 *
126 * Returns number of bytes that could not be copied.
127 * On success, this will be zero.
128 *
129 * If some data could not be copied, this function will pad the copied
130 * data to the requested size using zero bytes.
131 *
132 * An alternate version - __copy_from_user_inatomic() - may be called from
133 * atomic context and will fail rather than sleep. In this case the
134 * uncopied bytes will *NOT* be padded with zeros. See fs/filemap.h
135 * for explanation of why this is needed.
136 */
137static __always_inline unsigned long
138__copy_from_user(void *to, const void __user *from, unsigned long n)
139{
140 might_sleep();
141 if (__builtin_constant_p(n)) {
142 unsigned long ret;
143
144 switch (n) {
145 case 1:
146 __get_user_size(*(u8 *)to, from, 1, ret, 1);
147 return ret;
148 case 2:
149 __get_user_size(*(u16 *)to, from, 2, ret, 2);
150 return ret;
151 case 4:
152 __get_user_size(*(u32 *)to, from, 4, ret, 4);
153 return ret;
154 }
155 }
156 return __copy_from_user_ll(to, from, n);
157}
158
159static __always_inline unsigned long __copy_from_user_nocache(void *to,
160 const void __user *from, unsigned long n)
161{
162 might_sleep();
163 if (__builtin_constant_p(n)) {
164 unsigned long ret;
165
166 switch (n) {
167 case 1:
168 __get_user_size(*(u8 *)to, from, 1, ret, 1);
169 return ret;
170 case 2:
171 __get_user_size(*(u16 *)to, from, 2, ret, 2);
172 return ret;
173 case 4:
174 __get_user_size(*(u32 *)to, from, 4, ret, 4);
175 return ret;
176 }
177 }
178 return __copy_from_user_ll_nocache(to, from, n);
179}
180
181static __always_inline unsigned long
182__copy_from_user_inatomic_nocache(void *to, const void __user *from,
183 unsigned long n)
184{
185 return __copy_from_user_ll_nocache_nozero(to, from, n);
186}
187
188unsigned long __must_check copy_to_user(void __user *to,
189 const void *from, unsigned long n);
190unsigned long __must_check copy_from_user(void *to,
191 const void __user *from,
192 unsigned long n);
193long __must_check strncpy_from_user(char *dst, const char __user *src,
194 long count);
195long __must_check __strncpy_from_user(char *dst,
196 const char __user *src, long count);
197
198/**
199 * strlen_user: - Get the size of a string in user space.
200 * @str: The string to measure.
201 *
202 * Context: User context only. This function may sleep.
203 *
204 * Get the size of a NUL-terminated string in user space.
205 *
206 * Returns the size of the string INCLUDING the terminating NUL.
207 * On exception, returns 0.
208 *
209 * If there is a limit on the length of a valid string, you may wish to
210 * consider using strnlen_user() instead.
211 */
212#define strlen_user(str) strnlen_user(str, LONG_MAX)
213
214long strnlen_user(const char __user *str, long n);
215unsigned long __must_check clear_user(void __user *mem, unsigned long len);
216unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
217
218#endif /* ASM_X86__UACCESS_32_H */
diff --git a/include/asm-x86/uaccess_64.h b/include/asm-x86/uaccess_64.h
deleted file mode 100644
index c96c1f5d07a2..000000000000
--- a/include/asm-x86/uaccess_64.h
+++ /dev/null
@@ -1,202 +0,0 @@
1#ifndef ASM_X86__UACCESS_64_H
2#define ASM_X86__UACCESS_64_H
3
4/*
5 * User space memory access functions
6 */
7#include <linux/compiler.h>
8#include <linux/errno.h>
9#include <linux/prefetch.h>
10#include <linux/lockdep.h>
11#include <asm/page.h>
12
13/*
14 * Copy To/From Userspace
15 */
16
17/* Handles exceptions in both to and from, but doesn't do access_ok */
18__must_check unsigned long
19copy_user_generic(void *to, const void *from, unsigned len);
20
21__must_check unsigned long
22copy_to_user(void __user *to, const void *from, unsigned len);
23__must_check unsigned long
24copy_from_user(void *to, const void __user *from, unsigned len);
25__must_check unsigned long
26copy_in_user(void __user *to, const void __user *from, unsigned len);
27
28static __always_inline __must_check
29int __copy_from_user(void *dst, const void __user *src, unsigned size)
30{
31 int ret = 0;
32 if (!__builtin_constant_p(size))
33 return copy_user_generic(dst, (__force void *)src, size);
34 switch (size) {
35 case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
36 ret, "b", "b", "=q", 1);
37 return ret;
38 case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
39 ret, "w", "w", "=r", 2);
40 return ret;
41 case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
42 ret, "l", "k", "=r", 4);
43 return ret;
44 case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
45 ret, "q", "", "=r", 8);
46 return ret;
47 case 10:
48 __get_user_asm(*(u64 *)dst, (u64 __user *)src,
49 ret, "q", "", "=r", 16);
50 if (unlikely(ret))
51 return ret;
52 __get_user_asm(*(u16 *)(8 + (char *)dst),
53 (u16 __user *)(8 + (char __user *)src),
54 ret, "w", "w", "=r", 2);
55 return ret;
56 case 16:
57 __get_user_asm(*(u64 *)dst, (u64 __user *)src,
58 ret, "q", "", "=r", 16);
59 if (unlikely(ret))
60 return ret;
61 __get_user_asm(*(u64 *)(8 + (char *)dst),
62 (u64 __user *)(8 + (char __user *)src),
63 ret, "q", "", "=r", 8);
64 return ret;
65 default:
66 return copy_user_generic(dst, (__force void *)src, size);
67 }
68}
69
70static __always_inline __must_check
71int __copy_to_user(void __user *dst, const void *src, unsigned size)
72{
73 int ret = 0;
74 if (!__builtin_constant_p(size))
75 return copy_user_generic((__force void *)dst, src, size);
76 switch (size) {
77 case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
78 ret, "b", "b", "iq", 1);
79 return ret;
80 case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
81 ret, "w", "w", "ir", 2);
82 return ret;
83 case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
84 ret, "l", "k", "ir", 4);
85 return ret;
86 case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
87 ret, "q", "", "ir", 8);
88 return ret;
89 case 10:
90 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
91 ret, "q", "", "ir", 10);
92 if (unlikely(ret))
93 return ret;
94 asm("":::"memory");
95 __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
96 ret, "w", "w", "ir", 2);
97 return ret;
98 case 16:
99 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
100 ret, "q", "", "ir", 16);
101 if (unlikely(ret))
102 return ret;
103 asm("":::"memory");
104 __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
105 ret, "q", "", "ir", 8);
106 return ret;
107 default:
108 return copy_user_generic((__force void *)dst, src, size);
109 }
110}
111
112static __always_inline __must_check
113int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
114{
115 int ret = 0;
116 if (!__builtin_constant_p(size))
117 return copy_user_generic((__force void *)dst,
118 (__force void *)src, size);
119 switch (size) {
120 case 1: {
121 u8 tmp;
122 __get_user_asm(tmp, (u8 __user *)src,
123 ret, "b", "b", "=q", 1);
124 if (likely(!ret))
125 __put_user_asm(tmp, (u8 __user *)dst,
126 ret, "b", "b", "iq", 1);
127 return ret;
128 }
129 case 2: {
130 u16 tmp;
131 __get_user_asm(tmp, (u16 __user *)src,
132 ret, "w", "w", "=r", 2);
133 if (likely(!ret))
134 __put_user_asm(tmp, (u16 __user *)dst,
135 ret, "w", "w", "ir", 2);
136 return ret;
137 }
138
139 case 4: {
140 u32 tmp;
141 __get_user_asm(tmp, (u32 __user *)src,
142 ret, "l", "k", "=r", 4);
143 if (likely(!ret))
144 __put_user_asm(tmp, (u32 __user *)dst,
145 ret, "l", "k", "ir", 4);
146 return ret;
147 }
148 case 8: {
149 u64 tmp;
150 __get_user_asm(tmp, (u64 __user *)src,
151 ret, "q", "", "=r", 8);
152 if (likely(!ret))
153 __put_user_asm(tmp, (u64 __user *)dst,
154 ret, "q", "", "ir", 8);
155 return ret;
156 }
157 default:
158 return copy_user_generic((__force void *)dst,
159 (__force void *)src, size);
160 }
161}
162
163__must_check long
164strncpy_from_user(char *dst, const char __user *src, long count);
165__must_check long
166__strncpy_from_user(char *dst, const char __user *src, long count);
167__must_check long strnlen_user(const char __user *str, long n);
168__must_check long __strnlen_user(const char __user *str, long n);
169__must_check long strlen_user(const char __user *str);
170__must_check unsigned long clear_user(void __user *mem, unsigned long len);
171__must_check unsigned long __clear_user(void __user *mem, unsigned long len);
172
173__must_check long __copy_from_user_inatomic(void *dst, const void __user *src,
174 unsigned size);
175
176static __must_check __always_inline int
177__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
178{
179 return copy_user_generic((__force void *)dst, src, size);
180}
181
182extern long __copy_user_nocache(void *dst, const void __user *src,
183 unsigned size, int zerorest);
184
185static inline int __copy_from_user_nocache(void *dst, const void __user *src,
186 unsigned size)
187{
188 might_sleep();
189 return __copy_user_nocache(dst, src, size, 1);
190}
191
192static inline int __copy_from_user_inatomic_nocache(void *dst,
193 const void __user *src,
194 unsigned size)
195{
196 return __copy_user_nocache(dst, src, size, 0);
197}
198
199unsigned long
200copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
201
202#endif /* ASM_X86__UACCESS_64_H */
diff --git a/include/asm-x86/ucontext.h b/include/asm-x86/ucontext.h
deleted file mode 100644
index 89eaa5456a7e..000000000000
--- a/include/asm-x86/ucontext.h
+++ /dev/null
@@ -1,18 +0,0 @@
1#ifndef ASM_X86__UCONTEXT_H
2#define ASM_X86__UCONTEXT_H
3
4#define UC_FP_XSTATE 0x1 /* indicates the presence of extended state
5 * information in the memory layout pointed
6 * by the fpstate pointer in the ucontext's
7 * sigcontext struct (uc_mcontext).
8 */
9
10struct ucontext {
11 unsigned long uc_flags;
12 struct ucontext *uc_link;
13 stack_t uc_stack;
14 struct sigcontext uc_mcontext;
15 sigset_t uc_sigmask; /* mask last for extensibility */
16};
17
18#endif /* ASM_X86__UCONTEXT_H */
diff --git a/include/asm-x86/unaligned.h b/include/asm-x86/unaligned.h
deleted file mode 100644
index 59dcdec37160..000000000000
--- a/include/asm-x86/unaligned.h
+++ /dev/null
@@ -1,14 +0,0 @@
1#ifndef ASM_X86__UNALIGNED_H
2#define ASM_X86__UNALIGNED_H
3
4/*
5 * The x86 can do unaligned accesses itself.
6 */
7
8#include <linux/unaligned/access_ok.h>
9#include <linux/unaligned/generic.h>
10
11#define get_unaligned __get_unaligned_le
12#define put_unaligned __put_unaligned_le
13
14#endif /* ASM_X86__UNALIGNED_H */
diff --git a/include/asm-x86/unistd.h b/include/asm-x86/unistd.h
deleted file mode 100644
index 2a58ed3e51d8..000000000000
--- a/include/asm-x86/unistd.h
+++ /dev/null
@@ -1,13 +0,0 @@
1#ifdef __KERNEL__
2# ifdef CONFIG_X86_32
3# include "unistd_32.h"
4# else
5# include "unistd_64.h"
6# endif
7#else
8# ifdef __i386__
9# include "unistd_32.h"
10# else
11# include "unistd_64.h"
12# endif
13#endif
diff --git a/include/asm-x86/unistd_32.h b/include/asm-x86/unistd_32.h
deleted file mode 100644
index 017f4a87c913..000000000000
--- a/include/asm-x86/unistd_32.h
+++ /dev/null
@@ -1,379 +0,0 @@
1#ifndef ASM_X86__UNISTD_32_H
2#define ASM_X86__UNISTD_32_H
3
4/*
5 * This file contains the system call numbers.
6 */
7
8#define __NR_restart_syscall 0
9#define __NR_exit 1
10#define __NR_fork 2
11#define __NR_read 3
12#define __NR_write 4
13#define __NR_open 5
14#define __NR_close 6
15#define __NR_waitpid 7
16#define __NR_creat 8
17#define __NR_link 9
18#define __NR_unlink 10
19#define __NR_execve 11
20#define __NR_chdir 12
21#define __NR_time 13
22#define __NR_mknod 14
23#define __NR_chmod 15
24#define __NR_lchown 16
25#define __NR_break 17
26#define __NR_oldstat 18
27#define __NR_lseek 19
28#define __NR_getpid 20
29#define __NR_mount 21
30#define __NR_umount 22
31#define __NR_setuid 23
32#define __NR_getuid 24
33#define __NR_stime 25
34#define __NR_ptrace 26
35#define __NR_alarm 27
36#define __NR_oldfstat 28
37#define __NR_pause 29
38#define __NR_utime 30
39#define __NR_stty 31
40#define __NR_gtty 32
41#define __NR_access 33
42#define __NR_nice 34
43#define __NR_ftime 35
44#define __NR_sync 36
45#define __NR_kill 37
46#define __NR_rename 38
47#define __NR_mkdir 39
48#define __NR_rmdir 40
49#define __NR_dup 41
50#define __NR_pipe 42
51#define __NR_times 43
52#define __NR_prof 44
53#define __NR_brk 45
54#define __NR_setgid 46
55#define __NR_getgid 47
56#define __NR_signal 48
57#define __NR_geteuid 49
58#define __NR_getegid 50
59#define __NR_acct 51
60#define __NR_umount2 52
61#define __NR_lock 53
62#define __NR_ioctl 54
63#define __NR_fcntl 55
64#define __NR_mpx 56
65#define __NR_setpgid 57
66#define __NR_ulimit 58
67#define __NR_oldolduname 59
68#define __NR_umask 60
69#define __NR_chroot 61
70#define __NR_ustat 62
71#define __NR_dup2 63
72#define __NR_getppid 64
73#define __NR_getpgrp 65
74#define __NR_setsid 66
75#define __NR_sigaction 67
76#define __NR_sgetmask 68
77#define __NR_ssetmask 69
78#define __NR_setreuid 70
79#define __NR_setregid 71
80#define __NR_sigsuspend 72
81#define __NR_sigpending 73
82#define __NR_sethostname 74
83#define __NR_setrlimit 75
84#define __NR_getrlimit 76 /* Back compatible 2Gig limited rlimit */
85#define __NR_getrusage 77
86#define __NR_gettimeofday 78
87#define __NR_settimeofday 79
88#define __NR_getgroups 80
89#define __NR_setgroups 81
90#define __NR_select 82
91#define __NR_symlink 83
92#define __NR_oldlstat 84
93#define __NR_readlink 85
94#define __NR_uselib 86
95#define __NR_swapon 87
96#define __NR_reboot 88
97#define __NR_readdir 89
98#define __NR_mmap 90
99#define __NR_munmap 91
100#define __NR_truncate 92
101#define __NR_ftruncate 93
102#define __NR_fchmod 94
103#define __NR_fchown 95
104#define __NR_getpriority 96
105#define __NR_setpriority 97
106#define __NR_profil 98
107#define __NR_statfs 99
108#define __NR_fstatfs 100
109#define __NR_ioperm 101
110#define __NR_socketcall 102
111#define __NR_syslog 103
112#define __NR_setitimer 104
113#define __NR_getitimer 105
114#define __NR_stat 106
115#define __NR_lstat 107
116#define __NR_fstat 108
117#define __NR_olduname 109
118#define __NR_iopl 110
119#define __NR_vhangup 111
120#define __NR_idle 112
121#define __NR_vm86old 113
122#define __NR_wait4 114
123#define __NR_swapoff 115
124#define __NR_sysinfo 116
125#define __NR_ipc 117
126#define __NR_fsync 118
127#define __NR_sigreturn 119
128#define __NR_clone 120
129#define __NR_setdomainname 121
130#define __NR_uname 122
131#define __NR_modify_ldt 123
132#define __NR_adjtimex 124
133#define __NR_mprotect 125
134#define __NR_sigprocmask 126
135#define __NR_create_module 127
136#define __NR_init_module 128
137#define __NR_delete_module 129
138#define __NR_get_kernel_syms 130
139#define __NR_quotactl 131
140#define __NR_getpgid 132
141#define __NR_fchdir 133
142#define __NR_bdflush 134
143#define __NR_sysfs 135
144#define __NR_personality 136
145#define __NR_afs_syscall 137 /* Syscall for Andrew File System */
146#define __NR_setfsuid 138
147#define __NR_setfsgid 139
148#define __NR__llseek 140
149#define __NR_getdents 141
150#define __NR__newselect 142
151#define __NR_flock 143
152#define __NR_msync 144
153#define __NR_readv 145
154#define __NR_writev 146
155#define __NR_getsid 147
156#define __NR_fdatasync 148
157#define __NR__sysctl 149
158#define __NR_mlock 150
159#define __NR_munlock 151
160#define __NR_mlockall 152
161#define __NR_munlockall 153
162#define __NR_sched_setparam 154
163#define __NR_sched_getparam 155
164#define __NR_sched_setscheduler 156
165#define __NR_sched_getscheduler 157
166#define __NR_sched_yield 158
167#define __NR_sched_get_priority_max 159
168#define __NR_sched_get_priority_min 160
169#define __NR_sched_rr_get_interval 161
170#define __NR_nanosleep 162
171#define __NR_mremap 163
172#define __NR_setresuid 164
173#define __NR_getresuid 165
174#define __NR_vm86 166
175#define __NR_query_module 167
176#define __NR_poll 168
177#define __NR_nfsservctl 169
178#define __NR_setresgid 170
179#define __NR_getresgid 171
180#define __NR_prctl 172
181#define __NR_rt_sigreturn 173
182#define __NR_rt_sigaction 174
183#define __NR_rt_sigprocmask 175
184#define __NR_rt_sigpending 176
185#define __NR_rt_sigtimedwait 177
186#define __NR_rt_sigqueueinfo 178
187#define __NR_rt_sigsuspend 179
188#define __NR_pread64 180
189#define __NR_pwrite64 181
190#define __NR_chown 182
191#define __NR_getcwd 183
192#define __NR_capget 184
193#define __NR_capset 185
194#define __NR_sigaltstack 186
195#define __NR_sendfile 187
196#define __NR_getpmsg 188 /* some people actually want streams */
197#define __NR_putpmsg 189 /* some people actually want streams */
198#define __NR_vfork 190
199#define __NR_ugetrlimit 191 /* SuS compliant getrlimit */
200#define __NR_mmap2 192
201#define __NR_truncate64 193
202#define __NR_ftruncate64 194
203#define __NR_stat64 195
204#define __NR_lstat64 196
205#define __NR_fstat64 197
206#define __NR_lchown32 198
207#define __NR_getuid32 199
208#define __NR_getgid32 200
209#define __NR_geteuid32 201
210#define __NR_getegid32 202
211#define __NR_setreuid32 203
212#define __NR_setregid32 204
213#define __NR_getgroups32 205
214#define __NR_setgroups32 206
215#define __NR_fchown32 207
216#define __NR_setresuid32 208
217#define __NR_getresuid32 209
218#define __NR_setresgid32 210
219#define __NR_getresgid32 211
220#define __NR_chown32 212
221#define __NR_setuid32 213
222#define __NR_setgid32 214
223#define __NR_setfsuid32 215
224#define __NR_setfsgid32 216
225#define __NR_pivot_root 217
226#define __NR_mincore 218
227#define __NR_madvise 219
228#define __NR_madvise1 219 /* delete when C lib stub is removed */
229#define __NR_getdents64 220
230#define __NR_fcntl64 221
231/* 223 is unused */
232#define __NR_gettid 224
233#define __NR_readahead 225
234#define __NR_setxattr 226
235#define __NR_lsetxattr 227
236#define __NR_fsetxattr 228
237#define __NR_getxattr 229
238#define __NR_lgetxattr 230
239#define __NR_fgetxattr 231
240#define __NR_listxattr 232
241#define __NR_llistxattr 233
242#define __NR_flistxattr 234
243#define __NR_removexattr 235
244#define __NR_lremovexattr 236
245#define __NR_fremovexattr 237
246#define __NR_tkill 238
247#define __NR_sendfile64 239
248#define __NR_futex 240
249#define __NR_sched_setaffinity 241
250#define __NR_sched_getaffinity 242
251#define __NR_set_thread_area 243
252#define __NR_get_thread_area 244
253#define __NR_io_setup 245
254#define __NR_io_destroy 246
255#define __NR_io_getevents 247
256#define __NR_io_submit 248
257#define __NR_io_cancel 249
258#define __NR_fadvise64 250
259/* 251 is available for reuse (was briefly sys_set_zone_reclaim) */
260#define __NR_exit_group 252
261#define __NR_lookup_dcookie 253
262#define __NR_epoll_create 254
263#define __NR_epoll_ctl 255
264#define __NR_epoll_wait 256
265#define __NR_remap_file_pages 257
266#define __NR_set_tid_address 258
267#define __NR_timer_create 259
268#define __NR_timer_settime (__NR_timer_create+1)
269#define __NR_timer_gettime (__NR_timer_create+2)
270#define __NR_timer_getoverrun (__NR_timer_create+3)
271#define __NR_timer_delete (__NR_timer_create+4)
272#define __NR_clock_settime (__NR_timer_create+5)
273#define __NR_clock_gettime (__NR_timer_create+6)
274#define __NR_clock_getres (__NR_timer_create+7)
275#define __NR_clock_nanosleep (__NR_timer_create+8)
276#define __NR_statfs64 268
277#define __NR_fstatfs64 269
278#define __NR_tgkill 270
279#define __NR_utimes 271
280#define __NR_fadvise64_64 272
281#define __NR_vserver 273
282#define __NR_mbind 274
283#define __NR_get_mempolicy 275
284#define __NR_set_mempolicy 276
285#define __NR_mq_open 277
286#define __NR_mq_unlink (__NR_mq_open+1)
287#define __NR_mq_timedsend (__NR_mq_open+2)
288#define __NR_mq_timedreceive (__NR_mq_open+3)
289#define __NR_mq_notify (__NR_mq_open+4)
290#define __NR_mq_getsetattr (__NR_mq_open+5)
291#define __NR_kexec_load 283
292#define __NR_waitid 284
293/* #define __NR_sys_setaltroot 285 */
294#define __NR_add_key 286
295#define __NR_request_key 287
296#define __NR_keyctl 288
297#define __NR_ioprio_set 289
298#define __NR_ioprio_get 290
299#define __NR_inotify_init 291
300#define __NR_inotify_add_watch 292
301#define __NR_inotify_rm_watch 293
302#define __NR_migrate_pages 294
303#define __NR_openat 295
304#define __NR_mkdirat 296
305#define __NR_mknodat 297
306#define __NR_fchownat 298
307#define __NR_futimesat 299
308#define __NR_fstatat64 300
309#define __NR_unlinkat 301
310#define __NR_renameat 302
311#define __NR_linkat 303
312#define __NR_symlinkat 304
313#define __NR_readlinkat 305
314#define __NR_fchmodat 306
315#define __NR_faccessat 307
316#define __NR_pselect6 308
317#define __NR_ppoll 309
318#define __NR_unshare 310
319#define __NR_set_robust_list 311
320#define __NR_get_robust_list 312
321#define __NR_splice 313
322#define __NR_sync_file_range 314
323#define __NR_tee 315
324#define __NR_vmsplice 316
325#define __NR_move_pages 317
326#define __NR_getcpu 318
327#define __NR_epoll_pwait 319
328#define __NR_utimensat 320
329#define __NR_signalfd 321
330#define __NR_timerfd_create 322
331#define __NR_eventfd 323
332#define __NR_fallocate 324
333#define __NR_timerfd_settime 325
334#define __NR_timerfd_gettime 326
335#define __NR_signalfd4 327
336#define __NR_eventfd2 328
337#define __NR_epoll_create1 329
338#define __NR_dup3 330
339#define __NR_pipe2 331
340#define __NR_inotify_init1 332
341
342#ifdef __KERNEL__
343
344#define __ARCH_WANT_IPC_PARSE_VERSION
345#define __ARCH_WANT_OLD_READDIR
346#define __ARCH_WANT_OLD_STAT
347#define __ARCH_WANT_STAT64
348#define __ARCH_WANT_SYS_ALARM
349#define __ARCH_WANT_SYS_GETHOSTNAME
350#define __ARCH_WANT_SYS_PAUSE
351#define __ARCH_WANT_SYS_SGETMASK
352#define __ARCH_WANT_SYS_SIGNAL
353#define __ARCH_WANT_SYS_TIME
354#define __ARCH_WANT_SYS_UTIME
355#define __ARCH_WANT_SYS_WAITPID
356#define __ARCH_WANT_SYS_SOCKETCALL
357#define __ARCH_WANT_SYS_FADVISE64
358#define __ARCH_WANT_SYS_GETPGRP
359#define __ARCH_WANT_SYS_LLSEEK
360#define __ARCH_WANT_SYS_NICE
361#define __ARCH_WANT_SYS_OLD_GETRLIMIT
362#define __ARCH_WANT_SYS_OLDUMOUNT
363#define __ARCH_WANT_SYS_SIGPENDING
364#define __ARCH_WANT_SYS_SIGPROCMASK
365#define __ARCH_WANT_SYS_RT_SIGACTION
366#define __ARCH_WANT_SYS_RT_SIGSUSPEND
367
368/*
369 * "Conditional" syscalls
370 *
371 * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
372 * but it doesn't work on all toolchains, so we just do it by hand
373 */
374#ifndef cond_syscall
375#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
376#endif
377
378#endif /* __KERNEL__ */
379#endif /* ASM_X86__UNISTD_32_H */
diff --git a/include/asm-x86/unistd_64.h b/include/asm-x86/unistd_64.h
deleted file mode 100644
index ace83f1f6787..000000000000
--- a/include/asm-x86/unistd_64.h
+++ /dev/null
@@ -1,693 +0,0 @@
1#ifndef ASM_X86__UNISTD_64_H
2#define ASM_X86__UNISTD_64_H
3
4#ifndef __SYSCALL
5#define __SYSCALL(a, b)
6#endif
7
8/*
9 * This file contains the system call numbers.
10 *
11 * Note: holes are not allowed.
12 */
13
14/* at least 8 syscall per cacheline */
15#define __NR_read 0
16__SYSCALL(__NR_read, sys_read)
17#define __NR_write 1
18__SYSCALL(__NR_write, sys_write)
19#define __NR_open 2
20__SYSCALL(__NR_open, sys_open)
21#define __NR_close 3
22__SYSCALL(__NR_close, sys_close)
23#define __NR_stat 4
24__SYSCALL(__NR_stat, sys_newstat)
25#define __NR_fstat 5
26__SYSCALL(__NR_fstat, sys_newfstat)
27#define __NR_lstat 6
28__SYSCALL(__NR_lstat, sys_newlstat)
29#define __NR_poll 7
30__SYSCALL(__NR_poll, sys_poll)
31
32#define __NR_lseek 8
33__SYSCALL(__NR_lseek, sys_lseek)
34#define __NR_mmap 9
35__SYSCALL(__NR_mmap, sys_mmap)
36#define __NR_mprotect 10
37__SYSCALL(__NR_mprotect, sys_mprotect)
38#define __NR_munmap 11
39__SYSCALL(__NR_munmap, sys_munmap)
40#define __NR_brk 12
41__SYSCALL(__NR_brk, sys_brk)
42#define __NR_rt_sigaction 13
43__SYSCALL(__NR_rt_sigaction, sys_rt_sigaction)
44#define __NR_rt_sigprocmask 14
45__SYSCALL(__NR_rt_sigprocmask, sys_rt_sigprocmask)
46#define __NR_rt_sigreturn 15
47__SYSCALL(__NR_rt_sigreturn, stub_rt_sigreturn)
48
49#define __NR_ioctl 16
50__SYSCALL(__NR_ioctl, sys_ioctl)
51#define __NR_pread64 17
52__SYSCALL(__NR_pread64, sys_pread64)
53#define __NR_pwrite64 18
54__SYSCALL(__NR_pwrite64, sys_pwrite64)
55#define __NR_readv 19
56__SYSCALL(__NR_readv, sys_readv)
57#define __NR_writev 20
58__SYSCALL(__NR_writev, sys_writev)
59#define __NR_access 21
60__SYSCALL(__NR_access, sys_access)
61#define __NR_pipe 22
62__SYSCALL(__NR_pipe, sys_pipe)
63#define __NR_select 23
64__SYSCALL(__NR_select, sys_select)
65
66#define __NR_sched_yield 24
67__SYSCALL(__NR_sched_yield, sys_sched_yield)
68#define __NR_mremap 25
69__SYSCALL(__NR_mremap, sys_mremap)
70#define __NR_msync 26
71__SYSCALL(__NR_msync, sys_msync)
72#define __NR_mincore 27
73__SYSCALL(__NR_mincore, sys_mincore)
74#define __NR_madvise 28
75__SYSCALL(__NR_madvise, sys_madvise)
76#define __NR_shmget 29
77__SYSCALL(__NR_shmget, sys_shmget)
78#define __NR_shmat 30
79__SYSCALL(__NR_shmat, sys_shmat)
80#define __NR_shmctl 31
81__SYSCALL(__NR_shmctl, sys_shmctl)
82
83#define __NR_dup 32
84__SYSCALL(__NR_dup, sys_dup)
85#define __NR_dup2 33
86__SYSCALL(__NR_dup2, sys_dup2)
87#define __NR_pause 34
88__SYSCALL(__NR_pause, sys_pause)
89#define __NR_nanosleep 35
90__SYSCALL(__NR_nanosleep, sys_nanosleep)
91#define __NR_getitimer 36
92__SYSCALL(__NR_getitimer, sys_getitimer)
93#define __NR_alarm 37
94__SYSCALL(__NR_alarm, sys_alarm)
95#define __NR_setitimer 38
96__SYSCALL(__NR_setitimer, sys_setitimer)
97#define __NR_getpid 39
98__SYSCALL(__NR_getpid, sys_getpid)
99
100#define __NR_sendfile 40
101__SYSCALL(__NR_sendfile, sys_sendfile64)
102#define __NR_socket 41
103__SYSCALL(__NR_socket, sys_socket)
104#define __NR_connect 42
105__SYSCALL(__NR_connect, sys_connect)
106#define __NR_accept 43
107__SYSCALL(__NR_accept, sys_accept)
108#define __NR_sendto 44
109__SYSCALL(__NR_sendto, sys_sendto)
110#define __NR_recvfrom 45
111__SYSCALL(__NR_recvfrom, sys_recvfrom)
112#define __NR_sendmsg 46
113__SYSCALL(__NR_sendmsg, sys_sendmsg)
114#define __NR_recvmsg 47
115__SYSCALL(__NR_recvmsg, sys_recvmsg)
116
117#define __NR_shutdown 48
118__SYSCALL(__NR_shutdown, sys_shutdown)
119#define __NR_bind 49
120__SYSCALL(__NR_bind, sys_bind)
121#define __NR_listen 50
122__SYSCALL(__NR_listen, sys_listen)
123#define __NR_getsockname 51
124__SYSCALL(__NR_getsockname, sys_getsockname)
125#define __NR_getpeername 52
126__SYSCALL(__NR_getpeername, sys_getpeername)
127#define __NR_socketpair 53
128__SYSCALL(__NR_socketpair, sys_socketpair)
129#define __NR_setsockopt 54
130__SYSCALL(__NR_setsockopt, sys_setsockopt)
131#define __NR_getsockopt 55
132__SYSCALL(__NR_getsockopt, sys_getsockopt)
133
134#define __NR_clone 56
135__SYSCALL(__NR_clone, stub_clone)
136#define __NR_fork 57
137__SYSCALL(__NR_fork, stub_fork)
138#define __NR_vfork 58
139__SYSCALL(__NR_vfork, stub_vfork)
140#define __NR_execve 59
141__SYSCALL(__NR_execve, stub_execve)
142#define __NR_exit 60
143__SYSCALL(__NR_exit, sys_exit)
144#define __NR_wait4 61
145__SYSCALL(__NR_wait4, sys_wait4)
146#define __NR_kill 62
147__SYSCALL(__NR_kill, sys_kill)
148#define __NR_uname 63
149__SYSCALL(__NR_uname, sys_uname)
150
151#define __NR_semget 64
152__SYSCALL(__NR_semget, sys_semget)
153#define __NR_semop 65
154__SYSCALL(__NR_semop, sys_semop)
155#define __NR_semctl 66
156__SYSCALL(__NR_semctl, sys_semctl)
157#define __NR_shmdt 67
158__SYSCALL(__NR_shmdt, sys_shmdt)
159#define __NR_msgget 68
160__SYSCALL(__NR_msgget, sys_msgget)
161#define __NR_msgsnd 69
162__SYSCALL(__NR_msgsnd, sys_msgsnd)
163#define __NR_msgrcv 70
164__SYSCALL(__NR_msgrcv, sys_msgrcv)
165#define __NR_msgctl 71
166__SYSCALL(__NR_msgctl, sys_msgctl)
167
168#define __NR_fcntl 72
169__SYSCALL(__NR_fcntl, sys_fcntl)
170#define __NR_flock 73
171__SYSCALL(__NR_flock, sys_flock)
172#define __NR_fsync 74
173__SYSCALL(__NR_fsync, sys_fsync)
174#define __NR_fdatasync 75
175__SYSCALL(__NR_fdatasync, sys_fdatasync)
176#define __NR_truncate 76
177__SYSCALL(__NR_truncate, sys_truncate)
178#define __NR_ftruncate 77
179__SYSCALL(__NR_ftruncate, sys_ftruncate)
180#define __NR_getdents 78
181__SYSCALL(__NR_getdents, sys_getdents)
182#define __NR_getcwd 79
183__SYSCALL(__NR_getcwd, sys_getcwd)
184
185#define __NR_chdir 80
186__SYSCALL(__NR_chdir, sys_chdir)
187#define __NR_fchdir 81
188__SYSCALL(__NR_fchdir, sys_fchdir)
189#define __NR_rename 82
190__SYSCALL(__NR_rename, sys_rename)
191#define __NR_mkdir 83
192__SYSCALL(__NR_mkdir, sys_mkdir)
193#define __NR_rmdir 84
194__SYSCALL(__NR_rmdir, sys_rmdir)
195#define __NR_creat 85
196__SYSCALL(__NR_creat, sys_creat)
197#define __NR_link 86
198__SYSCALL(__NR_link, sys_link)
199#define __NR_unlink 87
200__SYSCALL(__NR_unlink, sys_unlink)
201
202#define __NR_symlink 88
203__SYSCALL(__NR_symlink, sys_symlink)
204#define __NR_readlink 89
205__SYSCALL(__NR_readlink, sys_readlink)
206#define __NR_chmod 90
207__SYSCALL(__NR_chmod, sys_chmod)
208#define __NR_fchmod 91
209__SYSCALL(__NR_fchmod, sys_fchmod)
210#define __NR_chown 92
211__SYSCALL(__NR_chown, sys_chown)
212#define __NR_fchown 93
213__SYSCALL(__NR_fchown, sys_fchown)
214#define __NR_lchown 94
215__SYSCALL(__NR_lchown, sys_lchown)
216#define __NR_umask 95
217__SYSCALL(__NR_umask, sys_umask)
218
219#define __NR_gettimeofday 96
220__SYSCALL(__NR_gettimeofday, sys_gettimeofday)
221#define __NR_getrlimit 97
222__SYSCALL(__NR_getrlimit, sys_getrlimit)
223#define __NR_getrusage 98
224__SYSCALL(__NR_getrusage, sys_getrusage)
225#define __NR_sysinfo 99
226__SYSCALL(__NR_sysinfo, sys_sysinfo)
227#define __NR_times 100
228__SYSCALL(__NR_times, sys_times)
229#define __NR_ptrace 101
230__SYSCALL(__NR_ptrace, sys_ptrace)
231#define __NR_getuid 102
232__SYSCALL(__NR_getuid, sys_getuid)
233#define __NR_syslog 103
234__SYSCALL(__NR_syslog, sys_syslog)
235
236/* at the very end the stuff that never runs during the benchmarks */
237#define __NR_getgid 104
238__SYSCALL(__NR_getgid, sys_getgid)
239#define __NR_setuid 105
240__SYSCALL(__NR_setuid, sys_setuid)
241#define __NR_setgid 106
242__SYSCALL(__NR_setgid, sys_setgid)
243#define __NR_geteuid 107
244__SYSCALL(__NR_geteuid, sys_geteuid)
245#define __NR_getegid 108
246__SYSCALL(__NR_getegid, sys_getegid)
247#define __NR_setpgid 109
248__SYSCALL(__NR_setpgid, sys_setpgid)
249#define __NR_getppid 110
250__SYSCALL(__NR_getppid, sys_getppid)
251#define __NR_getpgrp 111
252__SYSCALL(__NR_getpgrp, sys_getpgrp)
253
254#define __NR_setsid 112
255__SYSCALL(__NR_setsid, sys_setsid)
256#define __NR_setreuid 113
257__SYSCALL(__NR_setreuid, sys_setreuid)
258#define __NR_setregid 114
259__SYSCALL(__NR_setregid, sys_setregid)
260#define __NR_getgroups 115
261__SYSCALL(__NR_getgroups, sys_getgroups)
262#define __NR_setgroups 116
263__SYSCALL(__NR_setgroups, sys_setgroups)
264#define __NR_setresuid 117
265__SYSCALL(__NR_setresuid, sys_setresuid)
266#define __NR_getresuid 118
267__SYSCALL(__NR_getresuid, sys_getresuid)
268#define __NR_setresgid 119
269__SYSCALL(__NR_setresgid, sys_setresgid)
270
271#define __NR_getresgid 120
272__SYSCALL(__NR_getresgid, sys_getresgid)
273#define __NR_getpgid 121
274__SYSCALL(__NR_getpgid, sys_getpgid)
275#define __NR_setfsuid 122
276__SYSCALL(__NR_setfsuid, sys_setfsuid)
277#define __NR_setfsgid 123
278__SYSCALL(__NR_setfsgid, sys_setfsgid)
279#define __NR_getsid 124
280__SYSCALL(__NR_getsid, sys_getsid)
281#define __NR_capget 125
282__SYSCALL(__NR_capget, sys_capget)
283#define __NR_capset 126
284__SYSCALL(__NR_capset, sys_capset)
285
286#define __NR_rt_sigpending 127
287__SYSCALL(__NR_rt_sigpending, sys_rt_sigpending)
288#define __NR_rt_sigtimedwait 128
289__SYSCALL(__NR_rt_sigtimedwait, sys_rt_sigtimedwait)
290#define __NR_rt_sigqueueinfo 129
291__SYSCALL(__NR_rt_sigqueueinfo, sys_rt_sigqueueinfo)
292#define __NR_rt_sigsuspend 130
293__SYSCALL(__NR_rt_sigsuspend, sys_rt_sigsuspend)
294#define __NR_sigaltstack 131
295__SYSCALL(__NR_sigaltstack, stub_sigaltstack)
296#define __NR_utime 132
297__SYSCALL(__NR_utime, sys_utime)
298#define __NR_mknod 133
299__SYSCALL(__NR_mknod, sys_mknod)
300
301/* Only needed for a.out */
302#define __NR_uselib 134
303__SYSCALL(__NR_uselib, sys_ni_syscall)
304#define __NR_personality 135
305__SYSCALL(__NR_personality, sys_personality)
306
307#define __NR_ustat 136
308__SYSCALL(__NR_ustat, sys_ustat)
309#define __NR_statfs 137
310__SYSCALL(__NR_statfs, sys_statfs)
311#define __NR_fstatfs 138
312__SYSCALL(__NR_fstatfs, sys_fstatfs)
313#define __NR_sysfs 139
314__SYSCALL(__NR_sysfs, sys_sysfs)
315
316#define __NR_getpriority 140
317__SYSCALL(__NR_getpriority, sys_getpriority)
318#define __NR_setpriority 141
319__SYSCALL(__NR_setpriority, sys_setpriority)
320#define __NR_sched_setparam 142
321__SYSCALL(__NR_sched_setparam, sys_sched_setparam)
322#define __NR_sched_getparam 143
323__SYSCALL(__NR_sched_getparam, sys_sched_getparam)
324#define __NR_sched_setscheduler 144
325__SYSCALL(__NR_sched_setscheduler, sys_sched_setscheduler)
326#define __NR_sched_getscheduler 145
327__SYSCALL(__NR_sched_getscheduler, sys_sched_getscheduler)
328#define __NR_sched_get_priority_max 146
329__SYSCALL(__NR_sched_get_priority_max, sys_sched_get_priority_max)
330#define __NR_sched_get_priority_min 147
331__SYSCALL(__NR_sched_get_priority_min, sys_sched_get_priority_min)
332#define __NR_sched_rr_get_interval 148
333__SYSCALL(__NR_sched_rr_get_interval, sys_sched_rr_get_interval)
334
335#define __NR_mlock 149
336__SYSCALL(__NR_mlock, sys_mlock)
337#define __NR_munlock 150
338__SYSCALL(__NR_munlock, sys_munlock)
339#define __NR_mlockall 151
340__SYSCALL(__NR_mlockall, sys_mlockall)
341#define __NR_munlockall 152
342__SYSCALL(__NR_munlockall, sys_munlockall)
343
344#define __NR_vhangup 153
345__SYSCALL(__NR_vhangup, sys_vhangup)
346
347#define __NR_modify_ldt 154
348__SYSCALL(__NR_modify_ldt, sys_modify_ldt)
349
350#define __NR_pivot_root 155
351__SYSCALL(__NR_pivot_root, sys_pivot_root)
352
353#define __NR__sysctl 156
354__SYSCALL(__NR__sysctl, sys_sysctl)
355
356#define __NR_prctl 157
357__SYSCALL(__NR_prctl, sys_prctl)
358#define __NR_arch_prctl 158
359__SYSCALL(__NR_arch_prctl, sys_arch_prctl)
360
361#define __NR_adjtimex 159
362__SYSCALL(__NR_adjtimex, sys_adjtimex)
363
364#define __NR_setrlimit 160
365__SYSCALL(__NR_setrlimit, sys_setrlimit)
366
367#define __NR_chroot 161
368__SYSCALL(__NR_chroot, sys_chroot)
369
370#define __NR_sync 162
371__SYSCALL(__NR_sync, sys_sync)
372
373#define __NR_acct 163
374__SYSCALL(__NR_acct, sys_acct)
375
376#define __NR_settimeofday 164
377__SYSCALL(__NR_settimeofday, sys_settimeofday)
378
379#define __NR_mount 165
380__SYSCALL(__NR_mount, sys_mount)
381#define __NR_umount2 166
382__SYSCALL(__NR_umount2, sys_umount)
383
384#define __NR_swapon 167
385__SYSCALL(__NR_swapon, sys_swapon)
386#define __NR_swapoff 168
387__SYSCALL(__NR_swapoff, sys_swapoff)
388
389#define __NR_reboot 169
390__SYSCALL(__NR_reboot, sys_reboot)
391
392#define __NR_sethostname 170
393__SYSCALL(__NR_sethostname, sys_sethostname)
394#define __NR_setdomainname 171
395__SYSCALL(__NR_setdomainname, sys_setdomainname)
396
397#define __NR_iopl 172
398__SYSCALL(__NR_iopl, stub_iopl)
399#define __NR_ioperm 173
400__SYSCALL(__NR_ioperm, sys_ioperm)
401
402#define __NR_create_module 174
403__SYSCALL(__NR_create_module, sys_ni_syscall)
404#define __NR_init_module 175
405__SYSCALL(__NR_init_module, sys_init_module)
406#define __NR_delete_module 176
407__SYSCALL(__NR_delete_module, sys_delete_module)
408#define __NR_get_kernel_syms 177
409__SYSCALL(__NR_get_kernel_syms, sys_ni_syscall)
410#define __NR_query_module 178
411__SYSCALL(__NR_query_module, sys_ni_syscall)
412
413#define __NR_quotactl 179
414__SYSCALL(__NR_quotactl, sys_quotactl)
415
416#define __NR_nfsservctl 180
417__SYSCALL(__NR_nfsservctl, sys_nfsservctl)
418
419/* reserved for LiS/STREAMS */
420#define __NR_getpmsg 181
421__SYSCALL(__NR_getpmsg, sys_ni_syscall)
422#define __NR_putpmsg 182
423__SYSCALL(__NR_putpmsg, sys_ni_syscall)
424
425/* reserved for AFS */
426#define __NR_afs_syscall 183
427__SYSCALL(__NR_afs_syscall, sys_ni_syscall)
428
429/* reserved for tux */
430#define __NR_tuxcall 184
431__SYSCALL(__NR_tuxcall, sys_ni_syscall)
432
433#define __NR_security 185
434__SYSCALL(__NR_security, sys_ni_syscall)
435
436#define __NR_gettid 186
437__SYSCALL(__NR_gettid, sys_gettid)
438
439#define __NR_readahead 187
440__SYSCALL(__NR_readahead, sys_readahead)
441#define __NR_setxattr 188
442__SYSCALL(__NR_setxattr, sys_setxattr)
443#define __NR_lsetxattr 189
444__SYSCALL(__NR_lsetxattr, sys_lsetxattr)
445#define __NR_fsetxattr 190
446__SYSCALL(__NR_fsetxattr, sys_fsetxattr)
447#define __NR_getxattr 191
448__SYSCALL(__NR_getxattr, sys_getxattr)
449#define __NR_lgetxattr 192
450__SYSCALL(__NR_lgetxattr, sys_lgetxattr)
451#define __NR_fgetxattr 193
452__SYSCALL(__NR_fgetxattr, sys_fgetxattr)
453#define __NR_listxattr 194
454__SYSCALL(__NR_listxattr, sys_listxattr)
455#define __NR_llistxattr 195
456__SYSCALL(__NR_llistxattr, sys_llistxattr)
457#define __NR_flistxattr 196
458__SYSCALL(__NR_flistxattr, sys_flistxattr)
459#define __NR_removexattr 197
460__SYSCALL(__NR_removexattr, sys_removexattr)
461#define __NR_lremovexattr 198
462__SYSCALL(__NR_lremovexattr, sys_lremovexattr)
463#define __NR_fremovexattr 199
464__SYSCALL(__NR_fremovexattr, sys_fremovexattr)
465#define __NR_tkill 200
466__SYSCALL(__NR_tkill, sys_tkill)
467#define __NR_time 201
468__SYSCALL(__NR_time, sys_time)
469#define __NR_futex 202
470__SYSCALL(__NR_futex, sys_futex)
471#define __NR_sched_setaffinity 203
472__SYSCALL(__NR_sched_setaffinity, sys_sched_setaffinity)
473#define __NR_sched_getaffinity 204
474__SYSCALL(__NR_sched_getaffinity, sys_sched_getaffinity)
475#define __NR_set_thread_area 205
476__SYSCALL(__NR_set_thread_area, sys_ni_syscall) /* use arch_prctl */
477#define __NR_io_setup 206
478__SYSCALL(__NR_io_setup, sys_io_setup)
479#define __NR_io_destroy 207
480__SYSCALL(__NR_io_destroy, sys_io_destroy)
481#define __NR_io_getevents 208
482__SYSCALL(__NR_io_getevents, sys_io_getevents)
483#define __NR_io_submit 209
484__SYSCALL(__NR_io_submit, sys_io_submit)
485#define __NR_io_cancel 210
486__SYSCALL(__NR_io_cancel, sys_io_cancel)
487#define __NR_get_thread_area 211
488__SYSCALL(__NR_get_thread_area, sys_ni_syscall) /* use arch_prctl */
489#define __NR_lookup_dcookie 212
490__SYSCALL(__NR_lookup_dcookie, sys_lookup_dcookie)
491#define __NR_epoll_create 213
492__SYSCALL(__NR_epoll_create, sys_epoll_create)
493#define __NR_epoll_ctl_old 214
494__SYSCALL(__NR_epoll_ctl_old, sys_ni_syscall)
495#define __NR_epoll_wait_old 215
496__SYSCALL(__NR_epoll_wait_old, sys_ni_syscall)
497#define __NR_remap_file_pages 216
498__SYSCALL(__NR_remap_file_pages, sys_remap_file_pages)
499#define __NR_getdents64 217
500__SYSCALL(__NR_getdents64, sys_getdents64)
501#define __NR_set_tid_address 218
502__SYSCALL(__NR_set_tid_address, sys_set_tid_address)
503#define __NR_restart_syscall 219
504__SYSCALL(__NR_restart_syscall, sys_restart_syscall)
505#define __NR_semtimedop 220
506__SYSCALL(__NR_semtimedop, sys_semtimedop)
507#define __NR_fadvise64 221
508__SYSCALL(__NR_fadvise64, sys_fadvise64)
509#define __NR_timer_create 222
510__SYSCALL(__NR_timer_create, sys_timer_create)
511#define __NR_timer_settime 223
512__SYSCALL(__NR_timer_settime, sys_timer_settime)
513#define __NR_timer_gettime 224
514__SYSCALL(__NR_timer_gettime, sys_timer_gettime)
515#define __NR_timer_getoverrun 225
516__SYSCALL(__NR_timer_getoverrun, sys_timer_getoverrun)
517#define __NR_timer_delete 226
518__SYSCALL(__NR_timer_delete, sys_timer_delete)
519#define __NR_clock_settime 227
520__SYSCALL(__NR_clock_settime, sys_clock_settime)
521#define __NR_clock_gettime 228
522__SYSCALL(__NR_clock_gettime, sys_clock_gettime)
523#define __NR_clock_getres 229
524__SYSCALL(__NR_clock_getres, sys_clock_getres)
525#define __NR_clock_nanosleep 230
526__SYSCALL(__NR_clock_nanosleep, sys_clock_nanosleep)
527#define __NR_exit_group 231
528__SYSCALL(__NR_exit_group, sys_exit_group)
529#define __NR_epoll_wait 232
530__SYSCALL(__NR_epoll_wait, sys_epoll_wait)
531#define __NR_epoll_ctl 233
532__SYSCALL(__NR_epoll_ctl, sys_epoll_ctl)
533#define __NR_tgkill 234
534__SYSCALL(__NR_tgkill, sys_tgkill)
535#define __NR_utimes 235
536__SYSCALL(__NR_utimes, sys_utimes)
537#define __NR_vserver 236
538__SYSCALL(__NR_vserver, sys_ni_syscall)
539#define __NR_mbind 237
540__SYSCALL(__NR_mbind, sys_mbind)
541#define __NR_set_mempolicy 238
542__SYSCALL(__NR_set_mempolicy, sys_set_mempolicy)
543#define __NR_get_mempolicy 239
544__SYSCALL(__NR_get_mempolicy, sys_get_mempolicy)
545#define __NR_mq_open 240
546__SYSCALL(__NR_mq_open, sys_mq_open)
547#define __NR_mq_unlink 241
548__SYSCALL(__NR_mq_unlink, sys_mq_unlink)
549#define __NR_mq_timedsend 242
550__SYSCALL(__NR_mq_timedsend, sys_mq_timedsend)
551#define __NR_mq_timedreceive 243
552__SYSCALL(__NR_mq_timedreceive, sys_mq_timedreceive)
553#define __NR_mq_notify 244
554__SYSCALL(__NR_mq_notify, sys_mq_notify)
555#define __NR_mq_getsetattr 245
556__SYSCALL(__NR_mq_getsetattr, sys_mq_getsetattr)
557#define __NR_kexec_load 246
558__SYSCALL(__NR_kexec_load, sys_kexec_load)
559#define __NR_waitid 247
560__SYSCALL(__NR_waitid, sys_waitid)
561#define __NR_add_key 248
562__SYSCALL(__NR_add_key, sys_add_key)
563#define __NR_request_key 249
564__SYSCALL(__NR_request_key, sys_request_key)
565#define __NR_keyctl 250
566__SYSCALL(__NR_keyctl, sys_keyctl)
567#define __NR_ioprio_set 251
568__SYSCALL(__NR_ioprio_set, sys_ioprio_set)
569#define __NR_ioprio_get 252
570__SYSCALL(__NR_ioprio_get, sys_ioprio_get)
571#define __NR_inotify_init 253
572__SYSCALL(__NR_inotify_init, sys_inotify_init)
573#define __NR_inotify_add_watch 254
574__SYSCALL(__NR_inotify_add_watch, sys_inotify_add_watch)
575#define __NR_inotify_rm_watch 255
576__SYSCALL(__NR_inotify_rm_watch, sys_inotify_rm_watch)
577#define __NR_migrate_pages 256
578__SYSCALL(__NR_migrate_pages, sys_migrate_pages)
579#define __NR_openat 257
580__SYSCALL(__NR_openat, sys_openat)
581#define __NR_mkdirat 258
582__SYSCALL(__NR_mkdirat, sys_mkdirat)
583#define __NR_mknodat 259
584__SYSCALL(__NR_mknodat, sys_mknodat)
585#define __NR_fchownat 260
586__SYSCALL(__NR_fchownat, sys_fchownat)
587#define __NR_futimesat 261
588__SYSCALL(__NR_futimesat, sys_futimesat)
589#define __NR_newfstatat 262
590__SYSCALL(__NR_newfstatat, sys_newfstatat)
591#define __NR_unlinkat 263
592__SYSCALL(__NR_unlinkat, sys_unlinkat)
593#define __NR_renameat 264
594__SYSCALL(__NR_renameat, sys_renameat)
595#define __NR_linkat 265
596__SYSCALL(__NR_linkat, sys_linkat)
597#define __NR_symlinkat 266
598__SYSCALL(__NR_symlinkat, sys_symlinkat)
599#define __NR_readlinkat 267
600__SYSCALL(__NR_readlinkat, sys_readlinkat)
601#define __NR_fchmodat 268
602__SYSCALL(__NR_fchmodat, sys_fchmodat)
603#define __NR_faccessat 269
604__SYSCALL(__NR_faccessat, sys_faccessat)
605#define __NR_pselect6 270
606__SYSCALL(__NR_pselect6, sys_pselect6)
607#define __NR_ppoll 271
608__SYSCALL(__NR_ppoll, sys_ppoll)
609#define __NR_unshare 272
610__SYSCALL(__NR_unshare, sys_unshare)
611#define __NR_set_robust_list 273
612__SYSCALL(__NR_set_robust_list, sys_set_robust_list)
613#define __NR_get_robust_list 274
614__SYSCALL(__NR_get_robust_list, sys_get_robust_list)
615#define __NR_splice 275
616__SYSCALL(__NR_splice, sys_splice)
617#define __NR_tee 276
618__SYSCALL(__NR_tee, sys_tee)
619#define __NR_sync_file_range 277
620__SYSCALL(__NR_sync_file_range, sys_sync_file_range)
621#define __NR_vmsplice 278
622__SYSCALL(__NR_vmsplice, sys_vmsplice)
623#define __NR_move_pages 279
624__SYSCALL(__NR_move_pages, sys_move_pages)
625#define __NR_utimensat 280
626__SYSCALL(__NR_utimensat, sys_utimensat)
627#define __IGNORE_getcpu /* implemented as a vsyscall */
628#define __NR_epoll_pwait 281
629__SYSCALL(__NR_epoll_pwait, sys_epoll_pwait)
630#define __NR_signalfd 282
631__SYSCALL(__NR_signalfd, sys_signalfd)
632#define __NR_timerfd_create 283
633__SYSCALL(__NR_timerfd_create, sys_timerfd_create)
634#define __NR_eventfd 284
635__SYSCALL(__NR_eventfd, sys_eventfd)
636#define __NR_fallocate 285
637__SYSCALL(__NR_fallocate, sys_fallocate)
638#define __NR_timerfd_settime 286
639__SYSCALL(__NR_timerfd_settime, sys_timerfd_settime)
640#define __NR_timerfd_gettime 287
641__SYSCALL(__NR_timerfd_gettime, sys_timerfd_gettime)
642#define __NR_paccept 288
643__SYSCALL(__NR_paccept, sys_paccept)
644#define __NR_signalfd4 289
645__SYSCALL(__NR_signalfd4, sys_signalfd4)
646#define __NR_eventfd2 290
647__SYSCALL(__NR_eventfd2, sys_eventfd2)
648#define __NR_epoll_create1 291
649__SYSCALL(__NR_epoll_create1, sys_epoll_create1)
650#define __NR_dup3 292
651__SYSCALL(__NR_dup3, sys_dup3)
652#define __NR_pipe2 293
653__SYSCALL(__NR_pipe2, sys_pipe2)
654#define __NR_inotify_init1 294
655__SYSCALL(__NR_inotify_init1, sys_inotify_init1)
656
657
658#ifndef __NO_STUBS
659#define __ARCH_WANT_OLD_READDIR
660#define __ARCH_WANT_OLD_STAT
661#define __ARCH_WANT_SYS_ALARM
662#define __ARCH_WANT_SYS_GETHOSTNAME
663#define __ARCH_WANT_SYS_PAUSE
664#define __ARCH_WANT_SYS_SGETMASK
665#define __ARCH_WANT_SYS_SIGNAL
666#define __ARCH_WANT_SYS_UTIME
667#define __ARCH_WANT_SYS_WAITPID
668#define __ARCH_WANT_SYS_SOCKETCALL
669#define __ARCH_WANT_SYS_FADVISE64
670#define __ARCH_WANT_SYS_GETPGRP
671#define __ARCH_WANT_SYS_LLSEEK
672#define __ARCH_WANT_SYS_NICE
673#define __ARCH_WANT_SYS_OLD_GETRLIMIT
674#define __ARCH_WANT_SYS_OLDUMOUNT
675#define __ARCH_WANT_SYS_SIGPENDING
676#define __ARCH_WANT_SYS_SIGPROCMASK
677#define __ARCH_WANT_SYS_RT_SIGACTION
678#define __ARCH_WANT_SYS_RT_SIGSUSPEND
679#define __ARCH_WANT_SYS_TIME
680#define __ARCH_WANT_COMPAT_SYS_TIME
681#endif /* __NO_STUBS */
682
683#ifdef __KERNEL__
684/*
685 * "Conditional" syscalls
686 *
687 * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
688 * but it doesn't work on all toolchains, so we just do it by hand
689 */
690#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
691#endif /* __KERNEL__ */
692
693#endif /* ASM_X86__UNISTD_64_H */
diff --git a/include/asm-x86/unwind.h b/include/asm-x86/unwind.h
deleted file mode 100644
index a2151567db44..000000000000
--- a/include/asm-x86/unwind.h
+++ /dev/null
@@ -1,13 +0,0 @@
1#ifndef ASM_X86__UNWIND_H
2#define ASM_X86__UNWIND_H
3
4#define UNW_PC(frame) ((void)(frame), 0UL)
5#define UNW_SP(frame) ((void)(frame), 0UL)
6#define UNW_FP(frame) ((void)(frame), 0UL)
7
8static inline int arch_unw_user_mode(const void *info)
9{
10 return 0;
11}
12
13#endif /* ASM_X86__UNWIND_H */
diff --git a/include/asm-x86/user.h b/include/asm-x86/user.h
deleted file mode 100644
index 999873b22e7f..000000000000
--- a/include/asm-x86/user.h
+++ /dev/null
@@ -1,5 +0,0 @@
1#ifdef CONFIG_X86_32
2# include "user_32.h"
3#else
4# include "user_64.h"
5#endif
diff --git a/include/asm-x86/user32.h b/include/asm-x86/user32.h
deleted file mode 100644
index aa66c1857f06..000000000000
--- a/include/asm-x86/user32.h
+++ /dev/null
@@ -1,70 +0,0 @@
1#ifndef ASM_X86__USER32_H
2#define ASM_X86__USER32_H
3
4/* IA32 compatible user structures for ptrace.
5 * These should be used for 32bit coredumps too. */
6
7struct user_i387_ia32_struct {
8 u32 cwd;
9 u32 swd;
10 u32 twd;
11 u32 fip;
12 u32 fcs;
13 u32 foo;
14 u32 fos;
15 u32 st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
16};
17
18/* FSAVE frame with extensions */
19struct user32_fxsr_struct {
20 unsigned short cwd;
21 unsigned short swd;
22 unsigned short twd; /* not compatible to 64bit twd */
23 unsigned short fop;
24 int fip;
25 int fcs;
26 int foo;
27 int fos;
28 int mxcsr;
29 int reserved;
30 int st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
31 int xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
32 int padding[56];
33};
34
35struct user_regs_struct32 {
36 __u32 ebx, ecx, edx, esi, edi, ebp, eax;
37 unsigned short ds, __ds, es, __es;
38 unsigned short fs, __fs, gs, __gs;
39 __u32 orig_eax, eip;
40 unsigned short cs, __cs;
41 __u32 eflags, esp;
42 unsigned short ss, __ss;
43};
44
45struct user32 {
46 struct user_regs_struct32 regs; /* Where the registers are actually stored */
47 int u_fpvalid; /* True if math co-processor being used. */
48 /* for this mess. Not yet used. */
49 struct user_i387_ia32_struct i387; /* Math Co-processor registers. */
50/* The rest of this junk is to help gdb figure out what goes where */
51 __u32 u_tsize; /* Text segment size (pages). */
52 __u32 u_dsize; /* Data segment size (pages). */
53 __u32 u_ssize; /* Stack segment size (pages). */
54 __u32 start_code; /* Starting virtual address of text. */
55 __u32 start_stack; /* Starting virtual address of stack area.
56 This is actually the bottom of the stack,
57 the top of the stack is always found in the
58 esp register. */
59 __u32 signal; /* Signal that caused the core dump. */
60 int reserved; /* No __u32er used */
61 __u32 u_ar0; /* Used by gdb to help find the values for */
62 /* the registers. */
63 __u32 u_fpstate; /* Math Co-processor pointer. */
64 __u32 magic; /* To uniquely identify a core file */
65 char u_comm[32]; /* User command that was responsible */
66 int u_debugreg[8];
67};
68
69
70#endif /* ASM_X86__USER32_H */
diff --git a/include/asm-x86/user_32.h b/include/asm-x86/user_32.h
deleted file mode 100644
index e0fe2f55f1a6..000000000000
--- a/include/asm-x86/user_32.h
+++ /dev/null
@@ -1,131 +0,0 @@
1#ifndef ASM_X86__USER_32_H
2#define ASM_X86__USER_32_H
3
4#include <asm/page.h>
5/* Core file format: The core file is written in such a way that gdb
6 can understand it and provide useful information to the user (under
7 linux we use the 'trad-core' bfd). There are quite a number of
8 obstacles to being able to view the contents of the floating point
9 registers, and until these are solved you will not be able to view the
10 contents of them. Actually, you can read in the core file and look at
11 the contents of the user struct to find out what the floating point
12 registers contain.
13 The actual file contents are as follows:
14 UPAGE: 1 page consisting of a user struct that tells gdb what is present
15 in the file. Directly after this is a copy of the task_struct, which
16 is currently not used by gdb, but it may come in useful at some point.
17 All of the registers are stored as part of the upage. The upage should
18 always be only one page.
19 DATA: The data area is stored. We use current->end_text to
20 current->brk to pick up all of the user variables, plus any memory
21 that may have been malloced. No attempt is made to determine if a page
22 is demand-zero or if a page is totally unused, we just cover the entire
23 range. All of the addresses are rounded in such a way that an integral
24 number of pages is written.
25 STACK: We need the stack information in order to get a meaningful
26 backtrace. We need to write the data from (esp) to
27 current->start_stack, so we round each of these off in order to be able
28 to write an integer number of pages.
29 The minimum core file size is 3 pages, or 12288 bytes.
30*/
31
32/*
33 * Pentium III FXSR, SSE support
34 * Gareth Hughes <gareth@valinux.com>, May 2000
35 *
36 * Provide support for the GDB 5.0+ PTRACE_{GET|SET}FPXREGS requests for
37 * interacting with the FXSR-format floating point environment. Floating
38 * point data can be accessed in the regular format in the usual manner,
39 * and both the standard and SIMD floating point data can be accessed via
40 * the new ptrace requests. In either case, changes to the FPU environment
41 * will be reflected in the task's state as expected.
42 */
43
44struct user_i387_struct {
45 long cwd;
46 long swd;
47 long twd;
48 long fip;
49 long fcs;
50 long foo;
51 long fos;
52 long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
53};
54
55struct user_fxsr_struct {
56 unsigned short cwd;
57 unsigned short swd;
58 unsigned short twd;
59 unsigned short fop;
60 long fip;
61 long fcs;
62 long foo;
63 long fos;
64 long mxcsr;
65 long reserved;
66 long st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
67 long xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
68 long padding[56];
69};
70
71/*
72 * This is the old layout of "struct pt_regs", and
73 * is still the layout used by user mode (the new
74 * pt_regs doesn't have all registers as the kernel
75 * doesn't use the extra segment registers)
76 */
77struct user_regs_struct {
78 unsigned long bx;
79 unsigned long cx;
80 unsigned long dx;
81 unsigned long si;
82 unsigned long di;
83 unsigned long bp;
84 unsigned long ax;
85 unsigned long ds;
86 unsigned long es;
87 unsigned long fs;
88 unsigned long gs;
89 unsigned long orig_ax;
90 unsigned long ip;
91 unsigned long cs;
92 unsigned long flags;
93 unsigned long sp;
94 unsigned long ss;
95};
96
97/* When the kernel dumps core, it starts by dumping the user struct -
98 this will be used by gdb to figure out where the data and stack segments
99 are within the file, and what virtual addresses to use. */
100struct user{
101/* We start with the registers, to mimic the way that "memory" is returned
102 from the ptrace(3,...) function. */
103 struct user_regs_struct regs; /* Where the registers are actually stored */
104/* ptrace does not yet supply these. Someday.... */
105 int u_fpvalid; /* True if math co-processor being used. */
106 /* for this mess. Not yet used. */
107 struct user_i387_struct i387; /* Math Co-processor registers. */
108/* The rest of this junk is to help gdb figure out what goes where */
109 unsigned long int u_tsize; /* Text segment size (pages). */
110 unsigned long int u_dsize; /* Data segment size (pages). */
111 unsigned long int u_ssize; /* Stack segment size (pages). */
112 unsigned long start_code; /* Starting virtual address of text. */
113 unsigned long start_stack; /* Starting virtual address of stack area.
114 This is actually the bottom of the stack,
115 the top of the stack is always found in the
116 esp register. */
117 long int signal; /* Signal that caused the core dump. */
118 int reserved; /* No longer used */
119 unsigned long u_ar0; /* Used by gdb to help find the values for */
120 /* the registers. */
121 struct user_i387_struct *u_fpstate; /* Math Co-processor pointer. */
122 unsigned long magic; /* To uniquely identify a core file */
123 char u_comm[32]; /* User command that was responsible */
124 int u_debugreg[8];
125};
126#define NBPG PAGE_SIZE
127#define UPAGES 1
128#define HOST_TEXT_START_ADDR (u.start_code)
129#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)
130
131#endif /* ASM_X86__USER_32_H */
diff --git a/include/asm-x86/user_64.h b/include/asm-x86/user_64.h
deleted file mode 100644
index 38b5799863b4..000000000000
--- a/include/asm-x86/user_64.h
+++ /dev/null
@@ -1,137 +0,0 @@
1#ifndef ASM_X86__USER_64_H
2#define ASM_X86__USER_64_H
3
4#include <asm/types.h>
5#include <asm/page.h>
6/* Core file format: The core file is written in such a way that gdb
7 can understand it and provide useful information to the user.
8 There are quite a number of obstacles to being able to view the
9 contents of the floating point registers, and until these are
10 solved you will not be able to view the contents of them.
11 Actually, you can read in the core file and look at the contents of
12 the user struct to find out what the floating point registers
13 contain.
14
15 The actual file contents are as follows:
16 UPAGE: 1 page consisting of a user struct that tells gdb what is present
17 in the file. Directly after this is a copy of the task_struct, which
18 is currently not used by gdb, but it may come in useful at some point.
19 All of the registers are stored as part of the upage. The upage should
20 always be only one page.
21 DATA: The data area is stored. We use current->end_text to
22 current->brk to pick up all of the user variables, plus any memory
23 that may have been malloced. No attempt is made to determine if a page
24 is demand-zero or if a page is totally unused, we just cover the entire
25 range. All of the addresses are rounded in such a way that an integral
26 number of pages is written.
27 STACK: We need the stack information in order to get a meaningful
28 backtrace. We need to write the data from (esp) to
29 current->start_stack, so we round each of these off in order to be able
30 to write an integer number of pages.
31 The minimum core file size is 3 pages, or 12288 bytes. */
32
33/*
34 * Pentium III FXSR, SSE support
35 * Gareth Hughes <gareth@valinux.com>, May 2000
36 *
37 * Provide support for the GDB 5.0+ PTRACE_{GET|SET}FPXREGS requests for
38 * interacting with the FXSR-format floating point environment. Floating
39 * point data can be accessed in the regular format in the usual manner,
40 * and both the standard and SIMD floating point data can be accessed via
41 * the new ptrace requests. In either case, changes to the FPU environment
42 * will be reflected in the task's state as expected.
43 *
44 * x86-64 support by Andi Kleen.
45 */
46
47/* This matches the 64bit FXSAVE format as defined by AMD. It is the same
48 as the 32bit format defined by Intel, except that the selector:offset pairs
49 for data and eip are replaced with flat 64bit pointers. */
50struct user_i387_struct {
51 unsigned short cwd;
52 unsigned short swd;
53 unsigned short twd; /* Note this is not the same as
54 the 32bit/x87/FSAVE twd */
55 unsigned short fop;
56 __u64 rip;
57 __u64 rdp;
58 __u32 mxcsr;
59 __u32 mxcsr_mask;
60 __u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
61 __u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */
62 __u32 padding[24];
63};
64
65/*
66 * Segment register layout in coredumps.
67 */
68struct user_regs_struct {
69 unsigned long r15;
70 unsigned long r14;
71 unsigned long r13;
72 unsigned long r12;
73 unsigned long bp;
74 unsigned long bx;
75 unsigned long r11;
76 unsigned long r10;
77 unsigned long r9;
78 unsigned long r8;
79 unsigned long ax;
80 unsigned long cx;
81 unsigned long dx;
82 unsigned long si;
83 unsigned long di;
84 unsigned long orig_ax;
85 unsigned long ip;
86 unsigned long cs;
87 unsigned long flags;
88 unsigned long sp;
89 unsigned long ss;
90 unsigned long fs_base;
91 unsigned long gs_base;
92 unsigned long ds;
93 unsigned long es;
94 unsigned long fs;
95 unsigned long gs;
96};
97
98/* When the kernel dumps core, it starts by dumping the user struct -
99 this will be used by gdb to figure out where the data and stack segments
100 are within the file, and what virtual addresses to use. */
101
102struct user {
103/* We start with the registers, to mimic the way that "memory" is returned
104 from the ptrace(3,...) function. */
105 struct user_regs_struct regs; /* Where the registers are actually stored */
106/* ptrace does not yet supply these. Someday.... */
107 int u_fpvalid; /* True if math co-processor being used. */
108 /* for this mess. Not yet used. */
109 int pad0;
110 struct user_i387_struct i387; /* Math Co-processor registers. */
111/* The rest of this junk is to help gdb figure out what goes where */
112 unsigned long int u_tsize; /* Text segment size (pages). */
113 unsigned long int u_dsize; /* Data segment size (pages). */
114 unsigned long int u_ssize; /* Stack segment size (pages). */
115 unsigned long start_code; /* Starting virtual address of text. */
116 unsigned long start_stack; /* Starting virtual address of stack area.
117 This is actually the bottom of the stack,
118 the top of the stack is always found in the
119 esp register. */
120 long int signal; /* Signal that caused the core dump. */
121 int reserved; /* No longer used */
122 int pad1;
123 unsigned long u_ar0; /* Used by gdb to help find the values for */
124 /* the registers. */
125 struct user_i387_struct *u_fpstate; /* Math Co-processor pointer. */
126 unsigned long magic; /* To uniquely identify a core file */
127 char u_comm[32]; /* User command that was responsible */
128 unsigned long u_debugreg[8];
129 unsigned long error_code; /* CPU error code or 0 */
130 unsigned long fault_address; /* CR3 or 0 */
131};
132#define NBPG PAGE_SIZE
133#define UPAGES 1
134#define HOST_TEXT_START_ADDR (u.start_code)
135#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)
136
137#endif /* ASM_X86__USER_64_H */
diff --git a/include/asm-x86/uv/bios.h b/include/asm-x86/uv/bios.h
deleted file mode 100644
index 215f1969c266..000000000000
--- a/include/asm-x86/uv/bios.h
+++ /dev/null
@@ -1,94 +0,0 @@
1#ifndef ASM_X86__UV__BIOS_H
2#define ASM_X86__UV__BIOS_H
3
4/*
5 * UV BIOS layer definitions.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
22 * Copyright (c) Russ Anderson
23 */
24
25#include <linux/rtc.h>
26
27/*
28 * Values for the BIOS calls. It is passed as the first * argument in the
29 * BIOS call. Passing any other value in the first argument will result
30 * in a BIOS_STATUS_UNIMPLEMENTED return status.
31 */
32enum uv_bios_cmd {
33 UV_BIOS_COMMON,
34 UV_BIOS_GET_SN_INFO,
35 UV_BIOS_FREQ_BASE
36};
37
38/*
39 * Status values returned from a BIOS call.
40 */
41enum {
42 BIOS_STATUS_SUCCESS = 0,
43 BIOS_STATUS_UNIMPLEMENTED = -ENOSYS,
44 BIOS_STATUS_EINVAL = -EINVAL,
45 BIOS_STATUS_UNAVAIL = -EBUSY
46};
47
48/*
49 * The UV system table describes specific firmware
50 * capabilities available to the Linux kernel at runtime.
51 */
52struct uv_systab {
53 char signature[4]; /* must be "UVST" */
54 u32 revision; /* distinguish different firmware revs */
55 u64 function; /* BIOS runtime callback function ptr */
56};
57
58enum {
59 BIOS_FREQ_BASE_PLATFORM = 0,
60 BIOS_FREQ_BASE_INTERVAL_TIMER = 1,
61 BIOS_FREQ_BASE_REALTIME_CLOCK = 2
62};
63
64union partition_info_u {
65 u64 val;
66 struct {
67 u64 hub_version : 8,
68 partition_id : 16,
69 coherence_id : 16,
70 region_size : 24;
71 };
72};
73
74/*
75 * bios calls have 6 parameters
76 */
77extern s64 uv_bios_call(enum uv_bios_cmd, u64, u64, u64, u64, u64);
78extern s64 uv_bios_call_irqsave(enum uv_bios_cmd, u64, u64, u64, u64, u64);
79extern s64 uv_bios_call_reentrant(enum uv_bios_cmd, u64, u64, u64, u64, u64);
80
81extern s64 uv_bios_get_sn_info(int, int *, long *, long *, long *);
82extern s64 uv_bios_freq_base(u64, u64 *);
83
84extern void uv_bios_init(void);
85
86extern int uv_type;
87extern long sn_partition_id;
88extern long uv_coherency_id;
89extern long uv_region_size;
90#define partition_coherence_id() (uv_coherency_id)
91
92extern struct kobject *sgi_uv_kobj; /* /sys/firmware/sgi_uv */
93
94#endif /* ASM_X86__UV__BIOS_H */
diff --git a/include/asm-x86/uv/uv_bau.h b/include/asm-x86/uv/uv_bau.h
deleted file mode 100644
index 77153fb18f5e..000000000000
--- a/include/asm-x86/uv/uv_bau.h
+++ /dev/null
@@ -1,332 +0,0 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * SGI UV Broadcast Assist Unit definitions
7 *
8 * Copyright (C) 2008 Silicon Graphics, Inc. All rights reserved.
9 */
10
11#ifndef ASM_X86__UV__UV_BAU_H
12#define ASM_X86__UV__UV_BAU_H
13
14#include <linux/bitmap.h>
15#define BITSPERBYTE 8
16
17/*
18 * Broadcast Assist Unit messaging structures
19 *
20 * Selective Broadcast activations are induced by software action
21 * specifying a particular 8-descriptor "set" via a 6-bit index written
22 * to an MMR.
23 * Thus there are 64 unique 512-byte sets of SB descriptors - one set for
24 * each 6-bit index value. These descriptor sets are mapped in sequence
25 * starting with set 0 located at the address specified in the
26 * BAU_SB_DESCRIPTOR_BASE register, set 1 is located at BASE + 512,
27 * set 2 is at BASE + 2*512, set 3 at BASE + 3*512, and so on.
28 *
29 * We will use 31 sets, one for sending BAU messages from each of the 32
30 * cpu's on the node.
31 *
32 * TLB shootdown will use the first of the 8 descriptors of each set.
33 * Each of the descriptors is 64 bytes in size (8*64 = 512 bytes in a set).
34 */
35
36#define UV_ITEMS_PER_DESCRIPTOR 8
37#define UV_CPUS_PER_ACT_STATUS 32
38#define UV_ACT_STATUS_MASK 0x3
39#define UV_ACT_STATUS_SIZE 2
40#define UV_ACTIVATION_DESCRIPTOR_SIZE 32
41#define UV_DISTRIBUTION_SIZE 256
42#define UV_SW_ACK_NPENDING 8
43#define UV_NET_ENDPOINT_INTD 0x38
44#define UV_DESC_BASE_PNODE_SHIFT 49
45#define UV_PAYLOADQ_PNODE_SHIFT 49
46#define UV_PTC_BASENAME "sgi_uv/ptc_statistics"
47#define uv_physnodeaddr(x) ((__pa((unsigned long)(x)) & uv_mmask))
48
49/*
50 * bits in UVH_LB_BAU_SB_ACTIVATION_STATUS_0/1
51 */
52#define DESC_STATUS_IDLE 0
53#define DESC_STATUS_ACTIVE 1
54#define DESC_STATUS_DESTINATION_TIMEOUT 2
55#define DESC_STATUS_SOURCE_TIMEOUT 3
56
57/*
58 * source side threshholds at which message retries print a warning
59 */
60#define SOURCE_TIMEOUT_LIMIT 20
61#define DESTINATION_TIMEOUT_LIMIT 20
62
63/*
64 * number of entries in the destination side payload queue
65 */
66#define DEST_Q_SIZE 17
67/*
68 * number of destination side software ack resources
69 */
70#define DEST_NUM_RESOURCES 8
71#define MAX_CPUS_PER_NODE 32
72/*
73 * completion statuses for sending a TLB flush message
74 */
75#define FLUSH_RETRY 1
76#define FLUSH_GIVEUP 2
77#define FLUSH_COMPLETE 3
78
79/*
80 * Distribution: 32 bytes (256 bits) (bytes 0-0x1f of descriptor)
81 * If the 'multilevel' flag in the header portion of the descriptor
82 * has been set to 0, then endpoint multi-unicast mode is selected.
83 * The distribution specification (32 bytes) is interpreted as a 256-bit
84 * distribution vector. Adjacent bits correspond to consecutive even numbered
85 * nodeIDs. The result of adding the index of a given bit to the 15-bit
86 * 'base_dest_nodeid' field of the header corresponds to the
87 * destination nodeID associated with that specified bit.
88 */
89struct bau_target_nodemask {
90 unsigned long bits[BITS_TO_LONGS(256)];
91};
92
93/*
94 * mask of cpu's on a node
95 * (during initialization we need to check that unsigned long has
96 * enough bits for max. cpu's per node)
97 */
98struct bau_local_cpumask {
99 unsigned long bits;
100};
101
102/*
103 * Payload: 16 bytes (128 bits) (bytes 0x20-0x2f of descriptor)
104 * only 12 bytes (96 bits) of the payload area are usable.
105 * An additional 3 bytes (bits 27:4) of the header address are carried
106 * to the next bytes of the destination payload queue.
107 * And an additional 2 bytes of the header Suppl_A field are also
108 * carried to the destination payload queue.
109 * But the first byte of the Suppl_A becomes bits 127:120 (the 16th byte)
110 * of the destination payload queue, which is written by the hardware
111 * with the s/w ack resource bit vector.
112 * [ effective message contents (16 bytes (128 bits) maximum), not counting
113 * the s/w ack bit vector ]
114 */
115
116/*
117 * The payload is software-defined for INTD transactions
118 */
119struct bau_msg_payload {
120 unsigned long address; /* signifies a page or all TLB's
121 of the cpu */
122 /* 64 bits */
123 unsigned short sending_cpu; /* filled in by sender */
124 /* 16 bits */
125 unsigned short acknowledge_count;/* filled in by destination */
126 /* 16 bits */
127 unsigned int reserved1:32; /* not usable */
128};
129
130
131/*
132 * Message header: 16 bytes (128 bits) (bytes 0x30-0x3f of descriptor)
133 * see table 4.2.3.0.1 in broacast_assist spec.
134 */
135struct bau_msg_header {
136 int dest_subnodeid:6; /* must be zero */
137 /* bits 5:0 */
138 int base_dest_nodeid:15; /* nasid>>1 (pnode) of first bit in node_map */
139 /* bits 20:6 */
140 int command:8; /* message type */
141 /* bits 28:21 */
142 /* 0x38: SN3net EndPoint Message */
143 int rsvd_1:3; /* must be zero */
144 /* bits 31:29 */
145 /* int will align on 32 bits */
146 int rsvd_2:9; /* must be zero */
147 /* bits 40:32 */
148 /* Suppl_A is 56-41 */
149 int payload_2a:8; /* becomes byte 16 of msg */
150 /* bits 48:41 */ /* not currently using */
151 int payload_2b:8; /* becomes byte 17 of msg */
152 /* bits 56:49 */ /* not currently using */
153 /* Address field (96:57) is never used as an
154 address (these are address bits 42:3) */
155 int rsvd_3:1; /* must be zero */
156 /* bit 57 */
157 /* address bits 27:4 are payload */
158 /* these 24 bits become bytes 12-14 of msg */
159 int replied_to:1; /* sent as 0 by the source to byte 12 */
160 /* bit 58 */
161
162 int payload_1a:5; /* not currently used */
163 /* bits 63:59 */
164 int payload_1b:8; /* not currently used */
165 /* bits 71:64 */
166 int payload_1c:8; /* not currently used */
167 /* bits 79:72 */
168 int payload_1d:2; /* not currently used */
169 /* bits 81:80 */
170
171 int rsvd_4:7; /* must be zero */
172 /* bits 88:82 */
173 int sw_ack_flag:1; /* software acknowledge flag */
174 /* bit 89 */
175 /* INTD trasactions at destination are to
176 wait for software acknowledge */
177 int rsvd_5:6; /* must be zero */
178 /* bits 95:90 */
179 int rsvd_6:5; /* must be zero */
180 /* bits 100:96 */
181 int int_both:1; /* if 1, interrupt both sockets on the blade */
182 /* bit 101*/
183 int fairness:3; /* usually zero */
184 /* bits 104:102 */
185 int multilevel:1; /* multi-level multicast format */
186 /* bit 105 */
187 /* 0 for TLB: endpoint multi-unicast messages */
188 int chaining:1; /* next descriptor is part of this activation*/
189 /* bit 106 */
190 int rsvd_7:21; /* must be zero */
191 /* bits 127:107 */
192};
193
194/*
195 * The activation descriptor:
196 * The format of the message to send, plus all accompanying control
197 * Should be 64 bytes
198 */
199struct bau_desc {
200 struct bau_target_nodemask distribution;
201 /*
202 * message template, consisting of header and payload:
203 */
204 struct bau_msg_header header;
205 struct bau_msg_payload payload;
206};
207/*
208 * -payload-- ---------header------
209 * bytes 0-11 bits 41-56 bits 58-81
210 * A B (2) C (3)
211 *
212 * A/B/C are moved to:
213 * A C B
214 * bytes 0-11 bytes 12-14 bytes 16-17 (byte 15 filled in by hw as vector)
215 * ------------payload queue-----------
216 */
217
218/*
219 * The payload queue on the destination side is an array of these.
220 * With BAU_MISC_CONTROL set for software acknowledge mode, the messages
221 * are 32 bytes (2 micropackets) (256 bits) in length, but contain only 17
222 * bytes of usable data, including the sw ack vector in byte 15 (bits 127:120)
223 * (12 bytes come from bau_msg_payload, 3 from payload_1, 2 from
224 * sw_ack_vector and payload_2)
225 * "Enabling Software Acknowledgment mode (see Section 4.3.3 Software
226 * Acknowledge Processing) also selects 32 byte (17 bytes usable) payload
227 * operation."
228 */
229struct bau_payload_queue_entry {
230 unsigned long address; /* signifies a page or all TLB's
231 of the cpu */
232 /* 64 bits, bytes 0-7 */
233
234 unsigned short sending_cpu; /* cpu that sent the message */
235 /* 16 bits, bytes 8-9 */
236
237 unsigned short acknowledge_count; /* filled in by destination */
238 /* 16 bits, bytes 10-11 */
239
240 unsigned short replied_to:1; /* sent as 0 by the source */
241 /* 1 bit */
242 unsigned short unused1:7; /* not currently using */
243 /* 7 bits: byte 12) */
244
245 unsigned char unused2[2]; /* not currently using */
246 /* bytes 13-14 */
247
248 unsigned char sw_ack_vector; /* filled in by the hardware */
249 /* byte 15 (bits 127:120) */
250
251 unsigned char unused4[3]; /* not currently using bytes 17-19 */
252 /* bytes 17-19 */
253
254 int number_of_cpus; /* filled in at destination */
255 /* 32 bits, bytes 20-23 (aligned) */
256
257 unsigned char unused5[8]; /* not using */
258 /* bytes 24-31 */
259};
260
261/*
262 * one for every slot in the destination payload queue
263 */
264struct bau_msg_status {
265 struct bau_local_cpumask seen_by; /* map of cpu's */
266};
267
268/*
269 * one for every slot in the destination software ack resources
270 */
271struct bau_sw_ack_status {
272 struct bau_payload_queue_entry *msg; /* associated message */
273 int watcher; /* cpu monitoring, or -1 */
274};
275
276/*
277 * one on every node and per-cpu; to locate the software tables
278 */
279struct bau_control {
280 struct bau_desc *descriptor_base;
281 struct bau_payload_queue_entry *bau_msg_head;
282 struct bau_payload_queue_entry *va_queue_first;
283 struct bau_payload_queue_entry *va_queue_last;
284 struct bau_msg_status *msg_statuses;
285 int *watching; /* pointer to array */
286};
287
288/*
289 * This structure is allocated per_cpu for UV TLB shootdown statistics.
290 */
291struct ptc_stats {
292 unsigned long ptc_i; /* number of IPI-style flushes */
293 unsigned long requestor; /* number of nodes this cpu sent to */
294 unsigned long requestee; /* times cpu was remotely requested */
295 unsigned long alltlb; /* times all tlb's on this cpu were flushed */
296 unsigned long onetlb; /* times just one tlb on this cpu was flushed */
297 unsigned long s_retry; /* retries on source side timeouts */
298 unsigned long d_retry; /* retries on destination side timeouts */
299 unsigned long sflush; /* cycles spent in uv_flush_tlb_others */
300 unsigned long dflush; /* cycles spent on destination side */
301 unsigned long retriesok; /* successes on retries */
302 unsigned long nomsg; /* interrupts with no message */
303 unsigned long multmsg; /* interrupts with multiple messages */
304 unsigned long ntargeted;/* nodes targeted */
305};
306
307static inline int bau_node_isset(int node, struct bau_target_nodemask *dstp)
308{
309 return constant_test_bit(node, &dstp->bits[0]);
310}
311static inline void bau_node_set(int node, struct bau_target_nodemask *dstp)
312{
313 __set_bit(node, &dstp->bits[0]);
314}
315static inline void bau_nodes_clear(struct bau_target_nodemask *dstp, int nbits)
316{
317 bitmap_zero(&dstp->bits[0], nbits);
318}
319
320static inline void bau_cpubits_clear(struct bau_local_cpumask *dstp, int nbits)
321{
322 bitmap_zero(&dstp->bits, nbits);
323}
324
325#define cpubit_isset(cpu, bau_local_cpumask) \
326 test_bit((cpu), (bau_local_cpumask).bits)
327
328extern int uv_flush_tlb_others(cpumask_t *, struct mm_struct *, unsigned long);
329extern void uv_bau_message_intr1(void);
330extern void uv_bau_timeout_intr1(void);
331
332#endif /* ASM_X86__UV__UV_BAU_H */
diff --git a/include/asm-x86/uv/uv_hub.h b/include/asm-x86/uv/uv_hub.h
deleted file mode 100644
index bdb5b01afbf5..000000000000
--- a/include/asm-x86/uv/uv_hub.h
+++ /dev/null
@@ -1,354 +0,0 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * SGI UV architectural definitions
7 *
8 * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved.
9 */
10
11#ifndef ASM_X86__UV__UV_HUB_H
12#define ASM_X86__UV__UV_HUB_H
13
14#include <linux/numa.h>
15#include <linux/percpu.h>
16#include <asm/types.h>
17#include <asm/percpu.h>
18
19
20/*
21 * Addressing Terminology
22 *
23 * M - The low M bits of a physical address represent the offset
24 * into the blade local memory. RAM memory on a blade is physically
25 * contiguous (although various IO spaces may punch holes in
26 * it)..
27 *
28 * N - Number of bits in the node portion of a socket physical
29 * address.
30 *
31 * NASID - network ID of a router, Mbrick or Cbrick. Nasid values of
32 * routers always have low bit of 1, C/MBricks have low bit
33 * equal to 0. Most addressing macros that target UV hub chips
34 * right shift the NASID by 1 to exclude the always-zero bit.
35 * NASIDs contain up to 15 bits.
36 *
37 * GNODE - NASID right shifted by 1 bit. Most mmrs contain gnodes instead
38 * of nasids.
39 *
40 * PNODE - the low N bits of the GNODE. The PNODE is the most useful variant
41 * of the nasid for socket usage.
42 *
43 *
44 * NumaLink Global Physical Address Format:
45 * +--------------------------------+---------------------+
46 * |00..000| GNODE | NodeOffset |
47 * +--------------------------------+---------------------+
48 * |<-------53 - M bits --->|<--------M bits ----->
49 *
50 * M - number of node offset bits (35 .. 40)
51 *
52 *
53 * Memory/UV-HUB Processor Socket Address Format:
54 * +----------------+---------------+---------------------+
55 * |00..000000000000| PNODE | NodeOffset |
56 * +----------------+---------------+---------------------+
57 * <--- N bits --->|<--------M bits ----->
58 *
59 * M - number of node offset bits (35 .. 40)
60 * N - number of PNODE bits (0 .. 10)
61 *
62 * Note: M + N cannot currently exceed 44 (x86_64) or 46 (IA64).
63 * The actual values are configuration dependent and are set at
64 * boot time. M & N values are set by the hardware/BIOS at boot.
65 *
66 *
67 * APICID format
68 * NOTE!!!!!! This is the current format of the APICID. However, code
69 * should assume that this will change in the future. Use functions
70 * in this file for all APICID bit manipulations and conversion.
71 *
72 * 1111110000000000
73 * 5432109876543210
74 * pppppppppplc0cch
75 * sssssssssss
76 *
77 * p = pnode bits
78 * l = socket number on board
79 * c = core
80 * h = hyperthread
81 * s = bits that are in the SOCKET_ID CSR
82 *
83 * Note: Processor only supports 12 bits in the APICID register. The ACPI
84 * tables hold all 16 bits. Software needs to be aware of this.
85 *
86 * Unless otherwise specified, all references to APICID refer to
87 * the FULL value contained in ACPI tables, not the subset in the
88 * processor APICID register.
89 */
90
91
92/*
93 * Maximum number of bricks in all partitions and in all coherency domains.
94 * This is the total number of bricks accessible in the numalink fabric. It
95 * includes all C & M bricks. Routers are NOT included.
96 *
97 * This value is also the value of the maximum number of non-router NASIDs
98 * in the numalink fabric.
99 *
100 * NOTE: a brick may contain 1 or 2 OS nodes. Don't get these confused.
101 */
102#define UV_MAX_NUMALINK_BLADES 16384
103
104/*
105 * Maximum number of C/Mbricks within a software SSI (hardware may support
106 * more).
107 */
108#define UV_MAX_SSI_BLADES 256
109
110/*
111 * The largest possible NASID of a C or M brick (+ 2)
112 */
113#define UV_MAX_NASID_VALUE (UV_MAX_NUMALINK_NODES * 2)
114
115/*
116 * The following defines attributes of the HUB chip. These attributes are
117 * frequently referenced and are kept in the per-cpu data areas of each cpu.
118 * They are kept together in a struct to minimize cache misses.
119 */
120struct uv_hub_info_s {
121 unsigned long global_mmr_base;
122 unsigned long gpa_mask;
123 unsigned long gnode_upper;
124 unsigned long lowmem_remap_top;
125 unsigned long lowmem_remap_base;
126 unsigned short pnode;
127 unsigned short pnode_mask;
128 unsigned short coherency_domain_number;
129 unsigned short numa_blade_id;
130 unsigned char blade_processor_id;
131 unsigned char m_val;
132 unsigned char n_val;
133};
134DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
135#define uv_hub_info (&__get_cpu_var(__uv_hub_info))
136#define uv_cpu_hub_info(cpu) (&per_cpu(__uv_hub_info, cpu))
137
138/*
139 * Local & Global MMR space macros.
140 * Note: macros are intended to be used ONLY by inline functions
141 * in this file - not by other kernel code.
142 * n - NASID (full 15-bit global nasid)
143 * g - GNODE (full 15-bit global nasid, right shifted 1)
144 * p - PNODE (local part of nsids, right shifted 1)
145 */
146#define UV_NASID_TO_PNODE(n) (((n) >> 1) & uv_hub_info->pnode_mask)
147#define UV_PNODE_TO_NASID(p) (((p) << 1) | uv_hub_info->gnode_upper)
148
149#define UV_LOCAL_MMR_BASE 0xf4000000UL
150#define UV_GLOBAL_MMR32_BASE 0xf8000000UL
151#define UV_GLOBAL_MMR64_BASE (uv_hub_info->global_mmr_base)
152#define UV_LOCAL_MMR_SIZE (64UL * 1024 * 1024)
153#define UV_GLOBAL_MMR32_SIZE (64UL * 1024 * 1024)
154
155#define UV_GLOBAL_MMR32_PNODE_SHIFT 15
156#define UV_GLOBAL_MMR64_PNODE_SHIFT 26
157
158#define UV_GLOBAL_MMR32_PNODE_BITS(p) ((p) << (UV_GLOBAL_MMR32_PNODE_SHIFT))
159
160#define UV_GLOBAL_MMR64_PNODE_BITS(p) \
161 ((unsigned long)(p) << UV_GLOBAL_MMR64_PNODE_SHIFT)
162
163#define UV_APIC_PNODE_SHIFT 6
164
165/*
166 * Macros for converting between kernel virtual addresses, socket local physical
167 * addresses, and UV global physical addresses.
168 * Note: use the standard __pa() & __va() macros for converting
169 * between socket virtual and socket physical addresses.
170 */
171
172/* socket phys RAM --> UV global physical address */
173static inline unsigned long uv_soc_phys_ram_to_gpa(unsigned long paddr)
174{
175 if (paddr < uv_hub_info->lowmem_remap_top)
176 paddr += uv_hub_info->lowmem_remap_base;
177 return paddr | uv_hub_info->gnode_upper;
178}
179
180
181/* socket virtual --> UV global physical address */
182static inline unsigned long uv_gpa(void *v)
183{
184 return __pa(v) | uv_hub_info->gnode_upper;
185}
186
187/* socket virtual --> UV global physical address */
188static inline void *uv_vgpa(void *v)
189{
190 return (void *)uv_gpa(v);
191}
192
193/* UV global physical address --> socket virtual */
194static inline void *uv_va(unsigned long gpa)
195{
196 return __va(gpa & uv_hub_info->gpa_mask);
197}
198
199/* pnode, offset --> socket virtual */
200static inline void *uv_pnode_offset_to_vaddr(int pnode, unsigned long offset)
201{
202 return __va(((unsigned long)pnode << uv_hub_info->m_val) | offset);
203}
204
205
206/*
207 * Extract a PNODE from an APICID (full apicid, not processor subset)
208 */
209static inline int uv_apicid_to_pnode(int apicid)
210{
211 return (apicid >> UV_APIC_PNODE_SHIFT);
212}
213
214/*
215 * Access global MMRs using the low memory MMR32 space. This region supports
216 * faster MMR access but not all MMRs are accessible in this space.
217 */
218static inline unsigned long *uv_global_mmr32_address(int pnode,
219 unsigned long offset)
220{
221 return __va(UV_GLOBAL_MMR32_BASE |
222 UV_GLOBAL_MMR32_PNODE_BITS(pnode) | offset);
223}
224
225static inline void uv_write_global_mmr32(int pnode, unsigned long offset,
226 unsigned long val)
227{
228 *uv_global_mmr32_address(pnode, offset) = val;
229}
230
231static inline unsigned long uv_read_global_mmr32(int pnode,
232 unsigned long offset)
233{
234 return *uv_global_mmr32_address(pnode, offset);
235}
236
237/*
238 * Access Global MMR space using the MMR space located at the top of physical
239 * memory.
240 */
241static inline unsigned long *uv_global_mmr64_address(int pnode,
242 unsigned long offset)
243{
244 return __va(UV_GLOBAL_MMR64_BASE |
245 UV_GLOBAL_MMR64_PNODE_BITS(pnode) | offset);
246}
247
248static inline void uv_write_global_mmr64(int pnode, unsigned long offset,
249 unsigned long val)
250{
251 *uv_global_mmr64_address(pnode, offset) = val;
252}
253
254static inline unsigned long uv_read_global_mmr64(int pnode,
255 unsigned long offset)
256{
257 return *uv_global_mmr64_address(pnode, offset);
258}
259
260/*
261 * Access hub local MMRs. Faster than using global space but only local MMRs
262 * are accessible.
263 */
264static inline unsigned long *uv_local_mmr_address(unsigned long offset)
265{
266 return __va(UV_LOCAL_MMR_BASE | offset);
267}
268
269static inline unsigned long uv_read_local_mmr(unsigned long offset)
270{
271 return *uv_local_mmr_address(offset);
272}
273
274static inline void uv_write_local_mmr(unsigned long offset, unsigned long val)
275{
276 *uv_local_mmr_address(offset) = val;
277}
278
279/*
280 * Structures and definitions for converting between cpu, node, pnode, and blade
281 * numbers.
282 */
283struct uv_blade_info {
284 unsigned short nr_possible_cpus;
285 unsigned short nr_online_cpus;
286 unsigned short pnode;
287};
288extern struct uv_blade_info *uv_blade_info;
289extern short *uv_node_to_blade;
290extern short *uv_cpu_to_blade;
291extern short uv_possible_blades;
292
293/* Blade-local cpu number of current cpu. Numbered 0 .. <# cpus on the blade> */
294static inline int uv_blade_processor_id(void)
295{
296 return uv_hub_info->blade_processor_id;
297}
298
299/* Blade number of current cpu. Numnbered 0 .. <#blades -1> */
300static inline int uv_numa_blade_id(void)
301{
302 return uv_hub_info->numa_blade_id;
303}
304
305/* Convert a cpu number to the the UV blade number */
306static inline int uv_cpu_to_blade_id(int cpu)
307{
308 return uv_cpu_to_blade[cpu];
309}
310
311/* Convert linux node number to the UV blade number */
312static inline int uv_node_to_blade_id(int nid)
313{
314 return uv_node_to_blade[nid];
315}
316
317/* Convert a blade id to the PNODE of the blade */
318static inline int uv_blade_to_pnode(int bid)
319{
320 return uv_blade_info[bid].pnode;
321}
322
323/* Determine the number of possible cpus on a blade */
324static inline int uv_blade_nr_possible_cpus(int bid)
325{
326 return uv_blade_info[bid].nr_possible_cpus;
327}
328
329/* Determine the number of online cpus on a blade */
330static inline int uv_blade_nr_online_cpus(int bid)
331{
332 return uv_blade_info[bid].nr_online_cpus;
333}
334
335/* Convert a cpu id to the PNODE of the blade containing the cpu */
336static inline int uv_cpu_to_pnode(int cpu)
337{
338 return uv_blade_info[uv_cpu_to_blade_id(cpu)].pnode;
339}
340
341/* Convert a linux node number to the PNODE of the blade */
342static inline int uv_node_to_pnode(int nid)
343{
344 return uv_blade_info[uv_node_to_blade_id(nid)].pnode;
345}
346
347/* Maximum possible number of blades */
348static inline int uv_num_possible_blades(void)
349{
350 return uv_possible_blades;
351}
352
353#endif /* ASM_X86__UV__UV_HUB_H */
354
diff --git a/include/asm-x86/uv/uv_irq.h b/include/asm-x86/uv/uv_irq.h
deleted file mode 100644
index 8bf5f32da9c6..000000000000
--- a/include/asm-x86/uv/uv_irq.h
+++ /dev/null
@@ -1,36 +0,0 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * SGI UV IRQ definitions
7 *
8 * Copyright (C) 2008 Silicon Graphics, Inc. All rights reserved.
9 */
10
11#ifndef ASM_X86__UV__UV_IRQ_H
12#define ASM_X86__UV__UV_IRQ_H
13
14/* If a generic version of this structure gets defined, eliminate this one. */
15struct uv_IO_APIC_route_entry {
16 __u64 vector : 8,
17 delivery_mode : 3,
18 dest_mode : 1,
19 delivery_status : 1,
20 polarity : 1,
21 __reserved_1 : 1,
22 trigger : 1,
23 mask : 1,
24 __reserved_2 : 15,
25 dest : 32;
26};
27
28extern struct irq_chip uv_irq_chip;
29
30extern int arch_enable_uv_irq(char *, unsigned int, int, int, unsigned long);
31extern void arch_disable_uv_irq(int, unsigned long);
32
33extern int uv_setup_irq(char *, int, int, unsigned long);
34extern void uv_teardown_irq(unsigned int, int, unsigned long);
35
36#endif /* ASM_X86__UV__UV_IRQ_H */
diff --git a/include/asm-x86/uv/uv_mmrs.h b/include/asm-x86/uv/uv_mmrs.h
deleted file mode 100644
index 8b03d89d2459..000000000000
--- a/include/asm-x86/uv/uv_mmrs.h
+++ /dev/null
@@ -1,1295 +0,0 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * SGI UV MMR definitions
7 *
8 * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved.
9 */
10
11#ifndef ASM_X86__UV__UV_MMRS_H
12#define ASM_X86__UV__UV_MMRS_H
13
14#define UV_MMR_ENABLE (1UL << 63)
15
16/* ========================================================================= */
17/* UVH_BAU_DATA_CONFIG */
18/* ========================================================================= */
19#define UVH_BAU_DATA_CONFIG 0x61680UL
20#define UVH_BAU_DATA_CONFIG_32 0x0438
21
22#define UVH_BAU_DATA_CONFIG_VECTOR_SHFT 0
23#define UVH_BAU_DATA_CONFIG_VECTOR_MASK 0x00000000000000ffUL
24#define UVH_BAU_DATA_CONFIG_DM_SHFT 8
25#define UVH_BAU_DATA_CONFIG_DM_MASK 0x0000000000000700UL
26#define UVH_BAU_DATA_CONFIG_DESTMODE_SHFT 11
27#define UVH_BAU_DATA_CONFIG_DESTMODE_MASK 0x0000000000000800UL
28#define UVH_BAU_DATA_CONFIG_STATUS_SHFT 12
29#define UVH_BAU_DATA_CONFIG_STATUS_MASK 0x0000000000001000UL
30#define UVH_BAU_DATA_CONFIG_P_SHFT 13
31#define UVH_BAU_DATA_CONFIG_P_MASK 0x0000000000002000UL
32#define UVH_BAU_DATA_CONFIG_T_SHFT 15
33#define UVH_BAU_DATA_CONFIG_T_MASK 0x0000000000008000UL
34#define UVH_BAU_DATA_CONFIG_M_SHFT 16
35#define UVH_BAU_DATA_CONFIG_M_MASK 0x0000000000010000UL
36#define UVH_BAU_DATA_CONFIG_APIC_ID_SHFT 32
37#define UVH_BAU_DATA_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
38
39union uvh_bau_data_config_u {
40 unsigned long v;
41 struct uvh_bau_data_config_s {
42 unsigned long vector_ : 8; /* RW */
43 unsigned long dm : 3; /* RW */
44 unsigned long destmode : 1; /* RW */
45 unsigned long status : 1; /* RO */
46 unsigned long p : 1; /* RO */
47 unsigned long rsvd_14 : 1; /* */
48 unsigned long t : 1; /* RO */
49 unsigned long m : 1; /* RW */
50 unsigned long rsvd_17_31: 15; /* */
51 unsigned long apic_id : 32; /* RW */
52 } s;
53};
54
55/* ========================================================================= */
56/* UVH_EVENT_OCCURRED0 */
57/* ========================================================================= */
58#define UVH_EVENT_OCCURRED0 0x70000UL
59#define UVH_EVENT_OCCURRED0_32 0x005e8
60
61#define UVH_EVENT_OCCURRED0_LB_HCERR_SHFT 0
62#define UVH_EVENT_OCCURRED0_LB_HCERR_MASK 0x0000000000000001UL
63#define UVH_EVENT_OCCURRED0_GR0_HCERR_SHFT 1
64#define UVH_EVENT_OCCURRED0_GR0_HCERR_MASK 0x0000000000000002UL
65#define UVH_EVENT_OCCURRED0_GR1_HCERR_SHFT 2
66#define UVH_EVENT_OCCURRED0_GR1_HCERR_MASK 0x0000000000000004UL
67#define UVH_EVENT_OCCURRED0_LH_HCERR_SHFT 3
68#define UVH_EVENT_OCCURRED0_LH_HCERR_MASK 0x0000000000000008UL
69#define UVH_EVENT_OCCURRED0_RH_HCERR_SHFT 4
70#define UVH_EVENT_OCCURRED0_RH_HCERR_MASK 0x0000000000000010UL
71#define UVH_EVENT_OCCURRED0_XN_HCERR_SHFT 5
72#define UVH_EVENT_OCCURRED0_XN_HCERR_MASK 0x0000000000000020UL
73#define UVH_EVENT_OCCURRED0_SI_HCERR_SHFT 6
74#define UVH_EVENT_OCCURRED0_SI_HCERR_MASK 0x0000000000000040UL
75#define UVH_EVENT_OCCURRED0_LB_AOERR0_SHFT 7
76#define UVH_EVENT_OCCURRED0_LB_AOERR0_MASK 0x0000000000000080UL
77#define UVH_EVENT_OCCURRED0_GR0_AOERR0_SHFT 8
78#define UVH_EVENT_OCCURRED0_GR0_AOERR0_MASK 0x0000000000000100UL
79#define UVH_EVENT_OCCURRED0_GR1_AOERR0_SHFT 9
80#define UVH_EVENT_OCCURRED0_GR1_AOERR0_MASK 0x0000000000000200UL
81#define UVH_EVENT_OCCURRED0_LH_AOERR0_SHFT 10
82#define UVH_EVENT_OCCURRED0_LH_AOERR0_MASK 0x0000000000000400UL
83#define UVH_EVENT_OCCURRED0_RH_AOERR0_SHFT 11
84#define UVH_EVENT_OCCURRED0_RH_AOERR0_MASK 0x0000000000000800UL
85#define UVH_EVENT_OCCURRED0_XN_AOERR0_SHFT 12
86#define UVH_EVENT_OCCURRED0_XN_AOERR0_MASK 0x0000000000001000UL
87#define UVH_EVENT_OCCURRED0_SI_AOERR0_SHFT 13
88#define UVH_EVENT_OCCURRED0_SI_AOERR0_MASK 0x0000000000002000UL
89#define UVH_EVENT_OCCURRED0_LB_AOERR1_SHFT 14
90#define UVH_EVENT_OCCURRED0_LB_AOERR1_MASK 0x0000000000004000UL
91#define UVH_EVENT_OCCURRED0_GR0_AOERR1_SHFT 15
92#define UVH_EVENT_OCCURRED0_GR0_AOERR1_MASK 0x0000000000008000UL
93#define UVH_EVENT_OCCURRED0_GR1_AOERR1_SHFT 16
94#define UVH_EVENT_OCCURRED0_GR1_AOERR1_MASK 0x0000000000010000UL
95#define UVH_EVENT_OCCURRED0_LH_AOERR1_SHFT 17
96#define UVH_EVENT_OCCURRED0_LH_AOERR1_MASK 0x0000000000020000UL
97#define UVH_EVENT_OCCURRED0_RH_AOERR1_SHFT 18
98#define UVH_EVENT_OCCURRED0_RH_AOERR1_MASK 0x0000000000040000UL
99#define UVH_EVENT_OCCURRED0_XN_AOERR1_SHFT 19
100#define UVH_EVENT_OCCURRED0_XN_AOERR1_MASK 0x0000000000080000UL
101#define UVH_EVENT_OCCURRED0_SI_AOERR1_SHFT 20
102#define UVH_EVENT_OCCURRED0_SI_AOERR1_MASK 0x0000000000100000UL
103#define UVH_EVENT_OCCURRED0_RH_VPI_INT_SHFT 21
104#define UVH_EVENT_OCCURRED0_RH_VPI_INT_MASK 0x0000000000200000UL
105#define UVH_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_SHFT 22
106#define UVH_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_MASK 0x0000000000400000UL
107#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_0_SHFT 23
108#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_0_MASK 0x0000000000800000UL
109#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_1_SHFT 24
110#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_1_MASK 0x0000000001000000UL
111#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_2_SHFT 25
112#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_2_MASK 0x0000000002000000UL
113#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_3_SHFT 26
114#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_3_MASK 0x0000000004000000UL
115#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_4_SHFT 27
116#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_4_MASK 0x0000000008000000UL
117#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_5_SHFT 28
118#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_5_MASK 0x0000000010000000UL
119#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_6_SHFT 29
120#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_6_MASK 0x0000000020000000UL
121#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_7_SHFT 30
122#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_7_MASK 0x0000000040000000UL
123#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_8_SHFT 31
124#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_8_MASK 0x0000000080000000UL
125#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_9_SHFT 32
126#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_9_MASK 0x0000000100000000UL
127#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_10_SHFT 33
128#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_10_MASK 0x0000000200000000UL
129#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_11_SHFT 34
130#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_11_MASK 0x0000000400000000UL
131#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_12_SHFT 35
132#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_12_MASK 0x0000000800000000UL
133#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_13_SHFT 36
134#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_13_MASK 0x0000001000000000UL
135#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_14_SHFT 37
136#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_14_MASK 0x0000002000000000UL
137#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_15_SHFT 38
138#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_15_MASK 0x0000004000000000UL
139#define UVH_EVENT_OCCURRED0_L1_NMI_INT_SHFT 39
140#define UVH_EVENT_OCCURRED0_L1_NMI_INT_MASK 0x0000008000000000UL
141#define UVH_EVENT_OCCURRED0_STOP_CLOCK_SHFT 40
142#define UVH_EVENT_OCCURRED0_STOP_CLOCK_MASK 0x0000010000000000UL
143#define UVH_EVENT_OCCURRED0_ASIC_TO_L1_SHFT 41
144#define UVH_EVENT_OCCURRED0_ASIC_TO_L1_MASK 0x0000020000000000UL
145#define UVH_EVENT_OCCURRED0_L1_TO_ASIC_SHFT 42
146#define UVH_EVENT_OCCURRED0_L1_TO_ASIC_MASK 0x0000040000000000UL
147#define UVH_EVENT_OCCURRED0_LTC_INT_SHFT 43
148#define UVH_EVENT_OCCURRED0_LTC_INT_MASK 0x0000080000000000UL
149#define UVH_EVENT_OCCURRED0_LA_SEQ_TRIGGER_SHFT 44
150#define UVH_EVENT_OCCURRED0_LA_SEQ_TRIGGER_MASK 0x0000100000000000UL
151#define UVH_EVENT_OCCURRED0_IPI_INT_SHFT 45
152#define UVH_EVENT_OCCURRED0_IPI_INT_MASK 0x0000200000000000UL
153#define UVH_EVENT_OCCURRED0_EXTIO_INT0_SHFT 46
154#define UVH_EVENT_OCCURRED0_EXTIO_INT0_MASK 0x0000400000000000UL
155#define UVH_EVENT_OCCURRED0_EXTIO_INT1_SHFT 47
156#define UVH_EVENT_OCCURRED0_EXTIO_INT1_MASK 0x0000800000000000UL
157#define UVH_EVENT_OCCURRED0_EXTIO_INT2_SHFT 48
158#define UVH_EVENT_OCCURRED0_EXTIO_INT2_MASK 0x0001000000000000UL
159#define UVH_EVENT_OCCURRED0_EXTIO_INT3_SHFT 49
160#define UVH_EVENT_OCCURRED0_EXTIO_INT3_MASK 0x0002000000000000UL
161#define UVH_EVENT_OCCURRED0_PROFILE_INT_SHFT 50
162#define UVH_EVENT_OCCURRED0_PROFILE_INT_MASK 0x0004000000000000UL
163#define UVH_EVENT_OCCURRED0_RTC0_SHFT 51
164#define UVH_EVENT_OCCURRED0_RTC0_MASK 0x0008000000000000UL
165#define UVH_EVENT_OCCURRED0_RTC1_SHFT 52
166#define UVH_EVENT_OCCURRED0_RTC1_MASK 0x0010000000000000UL
167#define UVH_EVENT_OCCURRED0_RTC2_SHFT 53
168#define UVH_EVENT_OCCURRED0_RTC2_MASK 0x0020000000000000UL
169#define UVH_EVENT_OCCURRED0_RTC3_SHFT 54
170#define UVH_EVENT_OCCURRED0_RTC3_MASK 0x0040000000000000UL
171#define UVH_EVENT_OCCURRED0_BAU_DATA_SHFT 55
172#define UVH_EVENT_OCCURRED0_BAU_DATA_MASK 0x0080000000000000UL
173#define UVH_EVENT_OCCURRED0_POWER_MANAGEMENT_REQ_SHFT 56
174#define UVH_EVENT_OCCURRED0_POWER_MANAGEMENT_REQ_MASK 0x0100000000000000UL
175union uvh_event_occurred0_u {
176 unsigned long v;
177 struct uvh_event_occurred0_s {
178 unsigned long lb_hcerr : 1; /* RW, W1C */
179 unsigned long gr0_hcerr : 1; /* RW, W1C */
180 unsigned long gr1_hcerr : 1; /* RW, W1C */
181 unsigned long lh_hcerr : 1; /* RW, W1C */
182 unsigned long rh_hcerr : 1; /* RW, W1C */
183 unsigned long xn_hcerr : 1; /* RW, W1C */
184 unsigned long si_hcerr : 1; /* RW, W1C */
185 unsigned long lb_aoerr0 : 1; /* RW, W1C */
186 unsigned long gr0_aoerr0 : 1; /* RW, W1C */
187 unsigned long gr1_aoerr0 : 1; /* RW, W1C */
188 unsigned long lh_aoerr0 : 1; /* RW, W1C */
189 unsigned long rh_aoerr0 : 1; /* RW, W1C */
190 unsigned long xn_aoerr0 : 1; /* RW, W1C */
191 unsigned long si_aoerr0 : 1; /* RW, W1C */
192 unsigned long lb_aoerr1 : 1; /* RW, W1C */
193 unsigned long gr0_aoerr1 : 1; /* RW, W1C */
194 unsigned long gr1_aoerr1 : 1; /* RW, W1C */
195 unsigned long lh_aoerr1 : 1; /* RW, W1C */
196 unsigned long rh_aoerr1 : 1; /* RW, W1C */
197 unsigned long xn_aoerr1 : 1; /* RW, W1C */
198 unsigned long si_aoerr1 : 1; /* RW, W1C */
199 unsigned long rh_vpi_int : 1; /* RW, W1C */
200 unsigned long system_shutdown_int : 1; /* RW, W1C */
201 unsigned long lb_irq_int_0 : 1; /* RW, W1C */
202 unsigned long lb_irq_int_1 : 1; /* RW, W1C */
203 unsigned long lb_irq_int_2 : 1; /* RW, W1C */
204 unsigned long lb_irq_int_3 : 1; /* RW, W1C */
205 unsigned long lb_irq_int_4 : 1; /* RW, W1C */
206 unsigned long lb_irq_int_5 : 1; /* RW, W1C */
207 unsigned long lb_irq_int_6 : 1; /* RW, W1C */
208 unsigned long lb_irq_int_7 : 1; /* RW, W1C */
209 unsigned long lb_irq_int_8 : 1; /* RW, W1C */
210 unsigned long lb_irq_int_9 : 1; /* RW, W1C */
211 unsigned long lb_irq_int_10 : 1; /* RW, W1C */
212 unsigned long lb_irq_int_11 : 1; /* RW, W1C */
213 unsigned long lb_irq_int_12 : 1; /* RW, W1C */
214 unsigned long lb_irq_int_13 : 1; /* RW, W1C */
215 unsigned long lb_irq_int_14 : 1; /* RW, W1C */
216 unsigned long lb_irq_int_15 : 1; /* RW, W1C */
217 unsigned long l1_nmi_int : 1; /* RW, W1C */
218 unsigned long stop_clock : 1; /* RW, W1C */
219 unsigned long asic_to_l1 : 1; /* RW, W1C */
220 unsigned long l1_to_asic : 1; /* RW, W1C */
221 unsigned long ltc_int : 1; /* RW, W1C */
222 unsigned long la_seq_trigger : 1; /* RW, W1C */
223 unsigned long ipi_int : 1; /* RW, W1C */
224 unsigned long extio_int0 : 1; /* RW, W1C */
225 unsigned long extio_int1 : 1; /* RW, W1C */
226 unsigned long extio_int2 : 1; /* RW, W1C */
227 unsigned long extio_int3 : 1; /* RW, W1C */
228 unsigned long profile_int : 1; /* RW, W1C */
229 unsigned long rtc0 : 1; /* RW, W1C */
230 unsigned long rtc1 : 1; /* RW, W1C */
231 unsigned long rtc2 : 1; /* RW, W1C */
232 unsigned long rtc3 : 1; /* RW, W1C */
233 unsigned long bau_data : 1; /* RW, W1C */
234 unsigned long power_management_req : 1; /* RW, W1C */
235 unsigned long rsvd_57_63 : 7; /* */
236 } s;
237};
238
239/* ========================================================================= */
240/* UVH_EVENT_OCCURRED0_ALIAS */
241/* ========================================================================= */
242#define UVH_EVENT_OCCURRED0_ALIAS 0x0000000000070008UL
243#define UVH_EVENT_OCCURRED0_ALIAS_32 0x005f0
244
245/* ========================================================================= */
246/* UVH_INT_CMPB */
247/* ========================================================================= */
248#define UVH_INT_CMPB 0x22080UL
249
250#define UVH_INT_CMPB_REAL_TIME_CMPB_SHFT 0
251#define UVH_INT_CMPB_REAL_TIME_CMPB_MASK 0x00ffffffffffffffUL
252
253union uvh_int_cmpb_u {
254 unsigned long v;
255 struct uvh_int_cmpb_s {
256 unsigned long real_time_cmpb : 56; /* RW */
257 unsigned long rsvd_56_63 : 8; /* */
258 } s;
259};
260
261/* ========================================================================= */
262/* UVH_INT_CMPC */
263/* ========================================================================= */
264#define UVH_INT_CMPC 0x22100UL
265
266#define UVH_INT_CMPC_REAL_TIME_CMPC_SHFT 0
267#define UVH_INT_CMPC_REAL_TIME_CMPC_MASK 0x00ffffffffffffffUL
268
269union uvh_int_cmpc_u {
270 unsigned long v;
271 struct uvh_int_cmpc_s {
272 unsigned long real_time_cmpc : 56; /* RW */
273 unsigned long rsvd_56_63 : 8; /* */
274 } s;
275};
276
277/* ========================================================================= */
278/* UVH_INT_CMPD */
279/* ========================================================================= */
280#define UVH_INT_CMPD 0x22180UL
281
282#define UVH_INT_CMPD_REAL_TIME_CMPD_SHFT 0
283#define UVH_INT_CMPD_REAL_TIME_CMPD_MASK 0x00ffffffffffffffUL
284
285union uvh_int_cmpd_u {
286 unsigned long v;
287 struct uvh_int_cmpd_s {
288 unsigned long real_time_cmpd : 56; /* RW */
289 unsigned long rsvd_56_63 : 8; /* */
290 } s;
291};
292
293/* ========================================================================= */
294/* UVH_IPI_INT */
295/* ========================================================================= */
296#define UVH_IPI_INT 0x60500UL
297#define UVH_IPI_INT_32 0x0348
298
299#define UVH_IPI_INT_VECTOR_SHFT 0
300#define UVH_IPI_INT_VECTOR_MASK 0x00000000000000ffUL
301#define UVH_IPI_INT_DELIVERY_MODE_SHFT 8
302#define UVH_IPI_INT_DELIVERY_MODE_MASK 0x0000000000000700UL
303#define UVH_IPI_INT_DESTMODE_SHFT 11
304#define UVH_IPI_INT_DESTMODE_MASK 0x0000000000000800UL
305#define UVH_IPI_INT_APIC_ID_SHFT 16
306#define UVH_IPI_INT_APIC_ID_MASK 0x0000ffffffff0000UL
307#define UVH_IPI_INT_SEND_SHFT 63
308#define UVH_IPI_INT_SEND_MASK 0x8000000000000000UL
309
310union uvh_ipi_int_u {
311 unsigned long v;
312 struct uvh_ipi_int_s {
313 unsigned long vector_ : 8; /* RW */
314 unsigned long delivery_mode : 3; /* RW */
315 unsigned long destmode : 1; /* RW */
316 unsigned long rsvd_12_15 : 4; /* */
317 unsigned long apic_id : 32; /* RW */
318 unsigned long rsvd_48_62 : 15; /* */
319 unsigned long send : 1; /* WP */
320 } s;
321};
322
323/* ========================================================================= */
324/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST */
325/* ========================================================================= */
326#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST 0x320050UL
327#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_32 0x009c0
328
329#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_ADDRESS_SHFT 4
330#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_ADDRESS_MASK 0x000007fffffffff0UL
331#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_NODE_ID_SHFT 49
332#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_NODE_ID_MASK 0x7ffe000000000000UL
333
334union uvh_lb_bau_intd_payload_queue_first_u {
335 unsigned long v;
336 struct uvh_lb_bau_intd_payload_queue_first_s {
337 unsigned long rsvd_0_3: 4; /* */
338 unsigned long address : 39; /* RW */
339 unsigned long rsvd_43_48: 6; /* */
340 unsigned long node_id : 14; /* RW */
341 unsigned long rsvd_63 : 1; /* */
342 } s;
343};
344
345/* ========================================================================= */
346/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST */
347/* ========================================================================= */
348#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST 0x320060UL
349#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_32 0x009c8
350
351#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_SHFT 4
352#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_MASK 0x000007fffffffff0UL
353
354union uvh_lb_bau_intd_payload_queue_last_u {
355 unsigned long v;
356 struct uvh_lb_bau_intd_payload_queue_last_s {
357 unsigned long rsvd_0_3: 4; /* */
358 unsigned long address : 39; /* RW */
359 unsigned long rsvd_43_63: 21; /* */
360 } s;
361};
362
363/* ========================================================================= */
364/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL */
365/* ========================================================================= */
366#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL 0x320070UL
367#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_32 0x009d0
368
369#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_SHFT 4
370#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_MASK 0x000007fffffffff0UL
371
372union uvh_lb_bau_intd_payload_queue_tail_u {
373 unsigned long v;
374 struct uvh_lb_bau_intd_payload_queue_tail_s {
375 unsigned long rsvd_0_3: 4; /* */
376 unsigned long address : 39; /* RW */
377 unsigned long rsvd_43_63: 21; /* */
378 } s;
379};
380
381/* ========================================================================= */
382/* UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE */
383/* ========================================================================= */
384#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE 0x320080UL
385#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_32 0x0a68
386
387#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_SHFT 0
388#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_MASK 0x0000000000000001UL
389#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_1_SHFT 1
390#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_1_MASK 0x0000000000000002UL
391#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_2_SHFT 2
392#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_2_MASK 0x0000000000000004UL
393#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_3_SHFT 3
394#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_3_MASK 0x0000000000000008UL
395#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_4_SHFT 4
396#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_4_MASK 0x0000000000000010UL
397#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_5_SHFT 5
398#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_5_MASK 0x0000000000000020UL
399#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_6_SHFT 6
400#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_6_MASK 0x0000000000000040UL
401#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_7_SHFT 7
402#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_7_MASK 0x0000000000000080UL
403#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_0_SHFT 8
404#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_0_MASK 0x0000000000000100UL
405#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_1_SHFT 9
406#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_1_MASK 0x0000000000000200UL
407#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_2_SHFT 10
408#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_2_MASK 0x0000000000000400UL
409#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_3_SHFT 11
410#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_3_MASK 0x0000000000000800UL
411#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_4_SHFT 12
412#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_4_MASK 0x0000000000001000UL
413#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_5_SHFT 13
414#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_5_MASK 0x0000000000002000UL
415#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_6_SHFT 14
416#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_6_MASK 0x0000000000004000UL
417#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_7_SHFT 15
418#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_7_MASK 0x0000000000008000UL
419union uvh_lb_bau_intd_software_acknowledge_u {
420 unsigned long v;
421 struct uvh_lb_bau_intd_software_acknowledge_s {
422 unsigned long pending_0 : 1; /* RW, W1C */
423 unsigned long pending_1 : 1; /* RW, W1C */
424 unsigned long pending_2 : 1; /* RW, W1C */
425 unsigned long pending_3 : 1; /* RW, W1C */
426 unsigned long pending_4 : 1; /* RW, W1C */
427 unsigned long pending_5 : 1; /* RW, W1C */
428 unsigned long pending_6 : 1; /* RW, W1C */
429 unsigned long pending_7 : 1; /* RW, W1C */
430 unsigned long timeout_0 : 1; /* RW, W1C */
431 unsigned long timeout_1 : 1; /* RW, W1C */
432 unsigned long timeout_2 : 1; /* RW, W1C */
433 unsigned long timeout_3 : 1; /* RW, W1C */
434 unsigned long timeout_4 : 1; /* RW, W1C */
435 unsigned long timeout_5 : 1; /* RW, W1C */
436 unsigned long timeout_6 : 1; /* RW, W1C */
437 unsigned long timeout_7 : 1; /* RW, W1C */
438 unsigned long rsvd_16_63: 48; /* */
439 } s;
440};
441
442/* ========================================================================= */
443/* UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS */
444/* ========================================================================= */
445#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS 0x0000000000320088UL
446#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS_32 0x0a70
447
448/* ========================================================================= */
449/* UVH_LB_BAU_SB_ACTIVATION_CONTROL */
450/* ========================================================================= */
451#define UVH_LB_BAU_SB_ACTIVATION_CONTROL 0x320020UL
452#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_32 0x009a8
453
454#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INDEX_SHFT 0
455#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INDEX_MASK 0x000000000000003fUL
456#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_SHFT 62
457#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_MASK 0x4000000000000000UL
458#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INIT_SHFT 63
459#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INIT_MASK 0x8000000000000000UL
460
461union uvh_lb_bau_sb_activation_control_u {
462 unsigned long v;
463 struct uvh_lb_bau_sb_activation_control_s {
464 unsigned long index : 6; /* RW */
465 unsigned long rsvd_6_61: 56; /* */
466 unsigned long push : 1; /* WP */
467 unsigned long init : 1; /* WP */
468 } s;
469};
470
471/* ========================================================================= */
472/* UVH_LB_BAU_SB_ACTIVATION_STATUS_0 */
473/* ========================================================================= */
474#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0 0x320030UL
475#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_32 0x009b0
476
477#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_STATUS_SHFT 0
478#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_STATUS_MASK 0xffffffffffffffffUL
479
480union uvh_lb_bau_sb_activation_status_0_u {
481 unsigned long v;
482 struct uvh_lb_bau_sb_activation_status_0_s {
483 unsigned long status : 64; /* RW */
484 } s;
485};
486
487/* ========================================================================= */
488/* UVH_LB_BAU_SB_ACTIVATION_STATUS_1 */
489/* ========================================================================= */
490#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1 0x320040UL
491#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_32 0x009b8
492
493#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_STATUS_SHFT 0
494#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_STATUS_MASK 0xffffffffffffffffUL
495
496union uvh_lb_bau_sb_activation_status_1_u {
497 unsigned long v;
498 struct uvh_lb_bau_sb_activation_status_1_s {
499 unsigned long status : 64; /* RW */
500 } s;
501};
502
503/* ========================================================================= */
504/* UVH_LB_BAU_SB_DESCRIPTOR_BASE */
505/* ========================================================================= */
506#define UVH_LB_BAU_SB_DESCRIPTOR_BASE 0x320010UL
507#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_32 0x009a0
508
509#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_SHFT 12
510#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_MASK 0x000007fffffff000UL
511#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT 49
512#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_MASK 0x7ffe000000000000UL
513
514union uvh_lb_bau_sb_descriptor_base_u {
515 unsigned long v;
516 struct uvh_lb_bau_sb_descriptor_base_s {
517 unsigned long rsvd_0_11 : 12; /* */
518 unsigned long page_address : 31; /* RW */
519 unsigned long rsvd_43_48 : 6; /* */
520 unsigned long node_id : 14; /* RW */
521 unsigned long rsvd_63 : 1; /* */
522 } s;
523};
524
525/* ========================================================================= */
526/* UVH_LB_MCAST_AOERR0_RPT_ENABLE */
527/* ========================================================================= */
528#define UVH_LB_MCAST_AOERR0_RPT_ENABLE 0x50b20UL
529
530#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_OBESE_MSG_SHFT 0
531#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_OBESE_MSG_MASK 0x0000000000000001UL
532#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_DATA_SB_ERR_SHFT 1
533#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_DATA_SB_ERR_MASK 0x0000000000000002UL
534#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_NACK_BUFF_PARITY_SHFT 2
535#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_NACK_BUFF_PARITY_MASK 0x0000000000000004UL
536#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_TIMEOUT_SHFT 3
537#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_TIMEOUT_MASK 0x0000000000000008UL
538#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_INACTIVE_REPLY_SHFT 4
539#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_INACTIVE_REPLY_MASK 0x0000000000000010UL
540#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_UPGRADE_ERROR_SHFT 5
541#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_UPGRADE_ERROR_MASK 0x0000000000000020UL
542#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_REG_COUNT_UNDERFLOW_SHFT 6
543#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_REG_COUNT_UNDERFLOW_MASK 0x0000000000000040UL
544#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_REP_OBESE_MSG_SHFT 7
545#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_REP_OBESE_MSG_MASK 0x0000000000000080UL
546#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REQ_RUNT_MSG_SHFT 8
547#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REQ_RUNT_MSG_MASK 0x0000000000000100UL
548#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REQ_OBESE_MSG_SHFT 9
549#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REQ_OBESE_MSG_MASK 0x0000000000000200UL
550#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REQ_DATA_SB_ERR_SHFT 10
551#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REQ_DATA_SB_ERR_MASK 0x0000000000000400UL
552#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_RUNT_MSG_SHFT 11
553#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_RUNT_MSG_MASK 0x0000000000000800UL
554#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_OBESE_MSG_SHFT 12
555#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_OBESE_MSG_MASK 0x0000000000001000UL
556#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_DATA_SB_ERR_SHFT 13
557#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_DATA_SB_ERR_MASK 0x0000000000002000UL
558#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_COMMAND_ERR_SHFT 14
559#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_COMMAND_ERR_MASK 0x0000000000004000UL
560#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_PEND_TIMEOUT_SHFT 15
561#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_PEND_TIMEOUT_MASK 0x0000000000008000UL
562#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REQ_RUNT_MSG_SHFT 16
563#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REQ_RUNT_MSG_MASK 0x0000000000010000UL
564#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REQ_OBESE_MSG_SHFT 17
565#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REQ_OBESE_MSG_MASK 0x0000000000020000UL
566#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REQ_DATA_SB_ERR_SHFT 18
567#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REQ_DATA_SB_ERR_MASK 0x0000000000040000UL
568#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REP_RUNT_MSG_SHFT 19
569#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REP_RUNT_MSG_MASK 0x0000000000080000UL
570#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REP_OBESE_MSG_SHFT 20
571#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REP_OBESE_MSG_MASK 0x0000000000100000UL
572#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REP_DATA_SB_ERR_SHFT 21
573#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REP_DATA_SB_ERR_MASK 0x0000000000200000UL
574#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_AMO_TIMEOUT_SHFT 22
575#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_AMO_TIMEOUT_MASK 0x0000000000400000UL
576#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_PUT_TIMEOUT_SHFT 23
577#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_PUT_TIMEOUT_MASK 0x0000000000800000UL
578#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_SPURIOUS_EVENT_SHFT 24
579#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_SPURIOUS_EVENT_MASK 0x0000000001000000UL
580#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_IOH_DESTINATION_TABLE_PARITY_SHFT 25
581#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_IOH_DESTINATION_TABLE_PARITY_MASK 0x0000000002000000UL
582#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_GET_HAD_ERROR_REPLY_SHFT 26
583#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_GET_HAD_ERROR_REPLY_MASK 0x0000000004000000UL
584#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_GET_TIMEOUT_SHFT 27
585#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_GET_TIMEOUT_MASK 0x0000000008000000UL
586#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_LOCK_MANAGER_HAD_ERROR_REPLY_SHFT 28
587#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_LOCK_MANAGER_HAD_ERROR_REPLY_MASK 0x0000000010000000UL
588#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_PUT_HAD_ERROR_REPLY_SHFT 29
589#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_PUT_HAD_ERROR_REPLY_MASK 0x0000000020000000UL
590#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_PUT_TIMEOUT_SHFT 30
591#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_PUT_TIMEOUT_MASK 0x0000000040000000UL
592#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_SB_ACTIVATION_OVERRUN_SHFT 31
593#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_SB_ACTIVATION_OVERRUN_MASK 0x0000000080000000UL
594#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_COMPLETED_GB_ACTIVATION_HAD_ERROR_REPLY_SHFT 32
595#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_COMPLETED_GB_ACTIVATION_HAD_ERROR_REPLY_MASK 0x0000000100000000UL
596#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_COMPLETED_GB_ACTIVATION_TIMEOUT_SHFT 33
597#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_COMPLETED_GB_ACTIVATION_TIMEOUT_MASK 0x0000000200000000UL
598#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_DESCRIPTOR_BUFFER_0_PARITY_SHFT 34
599#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_DESCRIPTOR_BUFFER_0_PARITY_MASK 0x0000000400000000UL
600#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_DESCRIPTOR_BUFFER_1_PARITY_SHFT 35
601#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_DESCRIPTOR_BUFFER_1_PARITY_MASK 0x0000000800000000UL
602#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_SOCKET_DESTINATION_TABLE_PARITY_SHFT 36
603#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_SOCKET_DESTINATION_TABLE_PARITY_MASK 0x0000001000000000UL
604#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_BAU_REPLY_PAYLOAD_CORRUPTION_SHFT 37
605#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_BAU_REPLY_PAYLOAD_CORRUPTION_MASK 0x0000002000000000UL
606#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_IO_PORT_DESTINATION_TABLE_PARITY_SHFT 38
607#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_IO_PORT_DESTINATION_TABLE_PARITY_MASK 0x0000004000000000UL
608#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INTD_SOFT_ACK_TIMEOUT_SHFT 39
609#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INTD_SOFT_ACK_TIMEOUT_MASK 0x0000008000000000UL
610#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INT_REP_OBESE_MSG_SHFT 40
611#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INT_REP_OBESE_MSG_MASK 0x0000010000000000UL
612#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INT_REP_COMMAND_ERR_SHFT 41
613#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INT_REP_COMMAND_ERR_MASK 0x0000020000000000UL
614#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INT_TIMEOUT_SHFT 42
615#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INT_TIMEOUT_MASK 0x0000040000000000UL
616
617union uvh_lb_mcast_aoerr0_rpt_enable_u {
618 unsigned long v;
619 struct uvh_lb_mcast_aoerr0_rpt_enable_s {
620 unsigned long mcast_obese_msg : 1; /* RW */
621 unsigned long mcast_data_sb_err : 1; /* RW */
622 unsigned long mcast_nack_buff_parity : 1; /* RW */
623 unsigned long mcast_timeout : 1; /* RW */
624 unsigned long mcast_inactive_reply : 1; /* RW */
625 unsigned long mcast_upgrade_error : 1; /* RW */
626 unsigned long mcast_reg_count_underflow : 1; /* RW */
627 unsigned long mcast_rep_obese_msg : 1; /* RW */
628 unsigned long ucache_req_runt_msg : 1; /* RW */
629 unsigned long ucache_req_obese_msg : 1; /* RW */
630 unsigned long ucache_req_data_sb_err : 1; /* RW */
631 unsigned long ucache_rep_runt_msg : 1; /* RW */
632 unsigned long ucache_rep_obese_msg : 1; /* RW */
633 unsigned long ucache_rep_data_sb_err : 1; /* RW */
634 unsigned long ucache_rep_command_err : 1; /* RW */
635 unsigned long ucache_pend_timeout : 1; /* RW */
636 unsigned long macc_req_runt_msg : 1; /* RW */
637 unsigned long macc_req_obese_msg : 1; /* RW */
638 unsigned long macc_req_data_sb_err : 1; /* RW */
639 unsigned long macc_rep_runt_msg : 1; /* RW */
640 unsigned long macc_rep_obese_msg : 1; /* RW */
641 unsigned long macc_rep_data_sb_err : 1; /* RW */
642 unsigned long macc_amo_timeout : 1; /* RW */
643 unsigned long macc_put_timeout : 1; /* RW */
644 unsigned long macc_spurious_event : 1; /* RW */
645 unsigned long ioh_destination_table_parity : 1; /* RW */
646 unsigned long get_had_error_reply : 1; /* RW */
647 unsigned long get_timeout : 1; /* RW */
648 unsigned long lock_manager_had_error_reply : 1; /* RW */
649 unsigned long put_had_error_reply : 1; /* RW */
650 unsigned long put_timeout : 1; /* RW */
651 unsigned long sb_activation_overrun : 1; /* RW */
652 unsigned long completed_gb_activation_had_error_reply : 1; /* RW */
653 unsigned long completed_gb_activation_timeout : 1; /* RW */
654 unsigned long descriptor_buffer_0_parity : 1; /* RW */
655 unsigned long descriptor_buffer_1_parity : 1; /* RW */
656 unsigned long socket_destination_table_parity : 1; /* RW */
657 unsigned long bau_reply_payload_corruption : 1; /* RW */
658 unsigned long io_port_destination_table_parity : 1; /* RW */
659 unsigned long intd_soft_ack_timeout : 1; /* RW */
660 unsigned long int_rep_obese_msg : 1; /* RW */
661 unsigned long int_rep_command_err : 1; /* RW */
662 unsigned long int_timeout : 1; /* RW */
663 unsigned long rsvd_43_63 : 21; /* */
664 } s;
665};
666
667/* ========================================================================= */
668/* UVH_LOCAL_INT0_CONFIG */
669/* ========================================================================= */
670#define UVH_LOCAL_INT0_CONFIG 0x61000UL
671
672#define UVH_LOCAL_INT0_CONFIG_VECTOR_SHFT 0
673#define UVH_LOCAL_INT0_CONFIG_VECTOR_MASK 0x00000000000000ffUL
674#define UVH_LOCAL_INT0_CONFIG_DM_SHFT 8
675#define UVH_LOCAL_INT0_CONFIG_DM_MASK 0x0000000000000700UL
676#define UVH_LOCAL_INT0_CONFIG_DESTMODE_SHFT 11
677#define UVH_LOCAL_INT0_CONFIG_DESTMODE_MASK 0x0000000000000800UL
678#define UVH_LOCAL_INT0_CONFIG_STATUS_SHFT 12
679#define UVH_LOCAL_INT0_CONFIG_STATUS_MASK 0x0000000000001000UL
680#define UVH_LOCAL_INT0_CONFIG_P_SHFT 13
681#define UVH_LOCAL_INT0_CONFIG_P_MASK 0x0000000000002000UL
682#define UVH_LOCAL_INT0_CONFIG_T_SHFT 15
683#define UVH_LOCAL_INT0_CONFIG_T_MASK 0x0000000000008000UL
684#define UVH_LOCAL_INT0_CONFIG_M_SHFT 16
685#define UVH_LOCAL_INT0_CONFIG_M_MASK 0x0000000000010000UL
686#define UVH_LOCAL_INT0_CONFIG_APIC_ID_SHFT 32
687#define UVH_LOCAL_INT0_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
688
689union uvh_local_int0_config_u {
690 unsigned long v;
691 struct uvh_local_int0_config_s {
692 unsigned long vector_ : 8; /* RW */
693 unsigned long dm : 3; /* RW */
694 unsigned long destmode : 1; /* RW */
695 unsigned long status : 1; /* RO */
696 unsigned long p : 1; /* RO */
697 unsigned long rsvd_14 : 1; /* */
698 unsigned long t : 1; /* RO */
699 unsigned long m : 1; /* RW */
700 unsigned long rsvd_17_31: 15; /* */
701 unsigned long apic_id : 32; /* RW */
702 } s;
703};
704
705/* ========================================================================= */
706/* UVH_LOCAL_INT0_ENABLE */
707/* ========================================================================= */
708#define UVH_LOCAL_INT0_ENABLE 0x65000UL
709
710#define UVH_LOCAL_INT0_ENABLE_LB_HCERR_SHFT 0
711#define UVH_LOCAL_INT0_ENABLE_LB_HCERR_MASK 0x0000000000000001UL
712#define UVH_LOCAL_INT0_ENABLE_GR0_HCERR_SHFT 1
713#define UVH_LOCAL_INT0_ENABLE_GR0_HCERR_MASK 0x0000000000000002UL
714#define UVH_LOCAL_INT0_ENABLE_GR1_HCERR_SHFT 2
715#define UVH_LOCAL_INT0_ENABLE_GR1_HCERR_MASK 0x0000000000000004UL
716#define UVH_LOCAL_INT0_ENABLE_LH_HCERR_SHFT 3
717#define UVH_LOCAL_INT0_ENABLE_LH_HCERR_MASK 0x0000000000000008UL
718#define UVH_LOCAL_INT0_ENABLE_RH_HCERR_SHFT 4
719#define UVH_LOCAL_INT0_ENABLE_RH_HCERR_MASK 0x0000000000000010UL
720#define UVH_LOCAL_INT0_ENABLE_XN_HCERR_SHFT 5
721#define UVH_LOCAL_INT0_ENABLE_XN_HCERR_MASK 0x0000000000000020UL
722#define UVH_LOCAL_INT0_ENABLE_SI_HCERR_SHFT 6
723#define UVH_LOCAL_INT0_ENABLE_SI_HCERR_MASK 0x0000000000000040UL
724#define UVH_LOCAL_INT0_ENABLE_LB_AOERR0_SHFT 7
725#define UVH_LOCAL_INT0_ENABLE_LB_AOERR0_MASK 0x0000000000000080UL
726#define UVH_LOCAL_INT0_ENABLE_GR0_AOERR0_SHFT 8
727#define UVH_LOCAL_INT0_ENABLE_GR0_AOERR0_MASK 0x0000000000000100UL
728#define UVH_LOCAL_INT0_ENABLE_GR1_AOERR0_SHFT 9
729#define UVH_LOCAL_INT0_ENABLE_GR1_AOERR0_MASK 0x0000000000000200UL
730#define UVH_LOCAL_INT0_ENABLE_LH_AOERR0_SHFT 10
731#define UVH_LOCAL_INT0_ENABLE_LH_AOERR0_MASK 0x0000000000000400UL
732#define UVH_LOCAL_INT0_ENABLE_RH_AOERR0_SHFT 11
733#define UVH_LOCAL_INT0_ENABLE_RH_AOERR0_MASK 0x0000000000000800UL
734#define UVH_LOCAL_INT0_ENABLE_XN_AOERR0_SHFT 12
735#define UVH_LOCAL_INT0_ENABLE_XN_AOERR0_MASK 0x0000000000001000UL
736#define UVH_LOCAL_INT0_ENABLE_SI_AOERR0_SHFT 13
737#define UVH_LOCAL_INT0_ENABLE_SI_AOERR0_MASK 0x0000000000002000UL
738#define UVH_LOCAL_INT0_ENABLE_LB_AOERR1_SHFT 14
739#define UVH_LOCAL_INT0_ENABLE_LB_AOERR1_MASK 0x0000000000004000UL
740#define UVH_LOCAL_INT0_ENABLE_GR0_AOERR1_SHFT 15
741#define UVH_LOCAL_INT0_ENABLE_GR0_AOERR1_MASK 0x0000000000008000UL
742#define UVH_LOCAL_INT0_ENABLE_GR1_AOERR1_SHFT 16
743#define UVH_LOCAL_INT0_ENABLE_GR1_AOERR1_MASK 0x0000000000010000UL
744#define UVH_LOCAL_INT0_ENABLE_LH_AOERR1_SHFT 17
745#define UVH_LOCAL_INT0_ENABLE_LH_AOERR1_MASK 0x0000000000020000UL
746#define UVH_LOCAL_INT0_ENABLE_RH_AOERR1_SHFT 18
747#define UVH_LOCAL_INT0_ENABLE_RH_AOERR1_MASK 0x0000000000040000UL
748#define UVH_LOCAL_INT0_ENABLE_XN_AOERR1_SHFT 19
749#define UVH_LOCAL_INT0_ENABLE_XN_AOERR1_MASK 0x0000000000080000UL
750#define UVH_LOCAL_INT0_ENABLE_SI_AOERR1_SHFT 20
751#define UVH_LOCAL_INT0_ENABLE_SI_AOERR1_MASK 0x0000000000100000UL
752#define UVH_LOCAL_INT0_ENABLE_RH_VPI_INT_SHFT 21
753#define UVH_LOCAL_INT0_ENABLE_RH_VPI_INT_MASK 0x0000000000200000UL
754#define UVH_LOCAL_INT0_ENABLE_SYSTEM_SHUTDOWN_INT_SHFT 22
755#define UVH_LOCAL_INT0_ENABLE_SYSTEM_SHUTDOWN_INT_MASK 0x0000000000400000UL
756#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_0_SHFT 23
757#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_0_MASK 0x0000000000800000UL
758#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_1_SHFT 24
759#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_1_MASK 0x0000000001000000UL
760#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_2_SHFT 25
761#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_2_MASK 0x0000000002000000UL
762#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_3_SHFT 26
763#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_3_MASK 0x0000000004000000UL
764#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_4_SHFT 27
765#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_4_MASK 0x0000000008000000UL
766#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_5_SHFT 28
767#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_5_MASK 0x0000000010000000UL
768#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_6_SHFT 29
769#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_6_MASK 0x0000000020000000UL
770#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_7_SHFT 30
771#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_7_MASK 0x0000000040000000UL
772#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_8_SHFT 31
773#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_8_MASK 0x0000000080000000UL
774#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_9_SHFT 32
775#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_9_MASK 0x0000000100000000UL
776#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_10_SHFT 33
777#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_10_MASK 0x0000000200000000UL
778#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_11_SHFT 34
779#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_11_MASK 0x0000000400000000UL
780#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_12_SHFT 35
781#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_12_MASK 0x0000000800000000UL
782#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_13_SHFT 36
783#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_13_MASK 0x0000001000000000UL
784#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_14_SHFT 37
785#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_14_MASK 0x0000002000000000UL
786#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_15_SHFT 38
787#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_15_MASK 0x0000004000000000UL
788#define UVH_LOCAL_INT0_ENABLE_L1_NMI_INT_SHFT 39
789#define UVH_LOCAL_INT0_ENABLE_L1_NMI_INT_MASK 0x0000008000000000UL
790#define UVH_LOCAL_INT0_ENABLE_STOP_CLOCK_SHFT 40
791#define UVH_LOCAL_INT0_ENABLE_STOP_CLOCK_MASK 0x0000010000000000UL
792#define UVH_LOCAL_INT0_ENABLE_ASIC_TO_L1_SHFT 41
793#define UVH_LOCAL_INT0_ENABLE_ASIC_TO_L1_MASK 0x0000020000000000UL
794#define UVH_LOCAL_INT0_ENABLE_L1_TO_ASIC_SHFT 42
795#define UVH_LOCAL_INT0_ENABLE_L1_TO_ASIC_MASK 0x0000040000000000UL
796#define UVH_LOCAL_INT0_ENABLE_LTC_INT_SHFT 43
797#define UVH_LOCAL_INT0_ENABLE_LTC_INT_MASK 0x0000080000000000UL
798#define UVH_LOCAL_INT0_ENABLE_LA_SEQ_TRIGGER_SHFT 44
799#define UVH_LOCAL_INT0_ENABLE_LA_SEQ_TRIGGER_MASK 0x0000100000000000UL
800
801union uvh_local_int0_enable_u {
802 unsigned long v;
803 struct uvh_local_int0_enable_s {
804 unsigned long lb_hcerr : 1; /* RW */
805 unsigned long gr0_hcerr : 1; /* RW */
806 unsigned long gr1_hcerr : 1; /* RW */
807 unsigned long lh_hcerr : 1; /* RW */
808 unsigned long rh_hcerr : 1; /* RW */
809 unsigned long xn_hcerr : 1; /* RW */
810 unsigned long si_hcerr : 1; /* RW */
811 unsigned long lb_aoerr0 : 1; /* RW */
812 unsigned long gr0_aoerr0 : 1; /* RW */
813 unsigned long gr1_aoerr0 : 1; /* RW */
814 unsigned long lh_aoerr0 : 1; /* RW */
815 unsigned long rh_aoerr0 : 1; /* RW */
816 unsigned long xn_aoerr0 : 1; /* RW */
817 unsigned long si_aoerr0 : 1; /* RW */
818 unsigned long lb_aoerr1 : 1; /* RW */
819 unsigned long gr0_aoerr1 : 1; /* RW */
820 unsigned long gr1_aoerr1 : 1; /* RW */
821 unsigned long lh_aoerr1 : 1; /* RW */
822 unsigned long rh_aoerr1 : 1; /* RW */
823 unsigned long xn_aoerr1 : 1; /* RW */
824 unsigned long si_aoerr1 : 1; /* RW */
825 unsigned long rh_vpi_int : 1; /* RW */
826 unsigned long system_shutdown_int : 1; /* RW */
827 unsigned long lb_irq_int_0 : 1; /* RW */
828 unsigned long lb_irq_int_1 : 1; /* RW */
829 unsigned long lb_irq_int_2 : 1; /* RW */
830 unsigned long lb_irq_int_3 : 1; /* RW */
831 unsigned long lb_irq_int_4 : 1; /* RW */
832 unsigned long lb_irq_int_5 : 1; /* RW */
833 unsigned long lb_irq_int_6 : 1; /* RW */
834 unsigned long lb_irq_int_7 : 1; /* RW */
835 unsigned long lb_irq_int_8 : 1; /* RW */
836 unsigned long lb_irq_int_9 : 1; /* RW */
837 unsigned long lb_irq_int_10 : 1; /* RW */
838 unsigned long lb_irq_int_11 : 1; /* RW */
839 unsigned long lb_irq_int_12 : 1; /* RW */
840 unsigned long lb_irq_int_13 : 1; /* RW */
841 unsigned long lb_irq_int_14 : 1; /* RW */
842 unsigned long lb_irq_int_15 : 1; /* RW */
843 unsigned long l1_nmi_int : 1; /* RW */
844 unsigned long stop_clock : 1; /* RW */
845 unsigned long asic_to_l1 : 1; /* RW */
846 unsigned long l1_to_asic : 1; /* RW */
847 unsigned long ltc_int : 1; /* RW */
848 unsigned long la_seq_trigger : 1; /* RW */
849 unsigned long rsvd_45_63 : 19; /* */
850 } s;
851};
852
853/* ========================================================================= */
854/* UVH_NODE_ID */
855/* ========================================================================= */
856#define UVH_NODE_ID 0x0UL
857
858#define UVH_NODE_ID_FORCE1_SHFT 0
859#define UVH_NODE_ID_FORCE1_MASK 0x0000000000000001UL
860#define UVH_NODE_ID_MANUFACTURER_SHFT 1
861#define UVH_NODE_ID_MANUFACTURER_MASK 0x0000000000000ffeUL
862#define UVH_NODE_ID_PART_NUMBER_SHFT 12
863#define UVH_NODE_ID_PART_NUMBER_MASK 0x000000000ffff000UL
864#define UVH_NODE_ID_REVISION_SHFT 28
865#define UVH_NODE_ID_REVISION_MASK 0x00000000f0000000UL
866#define UVH_NODE_ID_NODE_ID_SHFT 32
867#define UVH_NODE_ID_NODE_ID_MASK 0x00007fff00000000UL
868#define UVH_NODE_ID_NODES_PER_BIT_SHFT 48
869#define UVH_NODE_ID_NODES_PER_BIT_MASK 0x007f000000000000UL
870#define UVH_NODE_ID_NI_PORT_SHFT 56
871#define UVH_NODE_ID_NI_PORT_MASK 0x0f00000000000000UL
872
873union uvh_node_id_u {
874 unsigned long v;
875 struct uvh_node_id_s {
876 unsigned long force1 : 1; /* RO */
877 unsigned long manufacturer : 11; /* RO */
878 unsigned long part_number : 16; /* RO */
879 unsigned long revision : 4; /* RO */
880 unsigned long node_id : 15; /* RW */
881 unsigned long rsvd_47 : 1; /* */
882 unsigned long nodes_per_bit : 7; /* RW */
883 unsigned long rsvd_55 : 1; /* */
884 unsigned long ni_port : 4; /* RO */
885 unsigned long rsvd_60_63 : 4; /* */
886 } s;
887};
888
889/* ========================================================================= */
890/* UVH_NODE_PRESENT_TABLE */
891/* ========================================================================= */
892#define UVH_NODE_PRESENT_TABLE 0x1400UL
893#define UVH_NODE_PRESENT_TABLE_DEPTH 16
894
895#define UVH_NODE_PRESENT_TABLE_NODES_SHFT 0
896#define UVH_NODE_PRESENT_TABLE_NODES_MASK 0xffffffffffffffffUL
897
898union uvh_node_present_table_u {
899 unsigned long v;
900 struct uvh_node_present_table_s {
901 unsigned long nodes : 64; /* RW */
902 } s;
903};
904
905/* ========================================================================= */
906/* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR */
907/* ========================================================================= */
908#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR 0x16000d0UL
909
910#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT 24
911#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_MASK 0x00003fffff000000UL
912
913union uvh_rh_gam_alias210_redirect_config_0_mmr_u {
914 unsigned long v;
915 struct uvh_rh_gam_alias210_redirect_config_0_mmr_s {
916 unsigned long rsvd_0_23 : 24; /* */
917 unsigned long dest_base : 22; /* RW */
918 unsigned long rsvd_46_63: 18; /* */
919 } s;
920};
921
922/* ========================================================================= */
923/* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR */
924/* ========================================================================= */
925#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR 0x16000e0UL
926
927#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_SHFT 24
928#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_MASK 0x00003fffff000000UL
929
930union uvh_rh_gam_alias210_redirect_config_1_mmr_u {
931 unsigned long v;
932 struct uvh_rh_gam_alias210_redirect_config_1_mmr_s {
933 unsigned long rsvd_0_23 : 24; /* */
934 unsigned long dest_base : 22; /* RW */
935 unsigned long rsvd_46_63: 18; /* */
936 } s;
937};
938
939/* ========================================================================= */
940/* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR */
941/* ========================================================================= */
942#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR 0x16000f0UL
943
944#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_SHFT 24
945#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_MASK 0x00003fffff000000UL
946
947union uvh_rh_gam_alias210_redirect_config_2_mmr_u {
948 unsigned long v;
949 struct uvh_rh_gam_alias210_redirect_config_2_mmr_s {
950 unsigned long rsvd_0_23 : 24; /* */
951 unsigned long dest_base : 22; /* RW */
952 unsigned long rsvd_46_63: 18; /* */
953 } s;
954};
955
956/* ========================================================================= */
957/* UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR */
958/* ========================================================================= */
959#define UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR 0x1600020UL
960
961#define UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR_BASE_SHFT 26
962#define UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL
963#define UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
964#define UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
965
966union uvh_rh_gam_cfg_overlay_config_mmr_u {
967 unsigned long v;
968 struct uvh_rh_gam_cfg_overlay_config_mmr_s {
969 unsigned long rsvd_0_25: 26; /* */
970 unsigned long base : 20; /* RW */
971 unsigned long rsvd_46_62: 17; /* */
972 unsigned long enable : 1; /* RW */
973 } s;
974};
975
976/* ========================================================================= */
977/* UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR */
978/* ========================================================================= */
979#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL
980
981#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28
982#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff0000000UL
983#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_GR4_SHFT 48
984#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_GR4_MASK 0x0001000000000000UL
985#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52
986#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL
987#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
988#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
989
990union uvh_rh_gam_gru_overlay_config_mmr_u {
991 unsigned long v;
992 struct uvh_rh_gam_gru_overlay_config_mmr_s {
993 unsigned long rsvd_0_27: 28; /* */
994 unsigned long base : 18; /* RW */
995 unsigned long rsvd_46_47: 2; /* */
996 unsigned long gr4 : 1; /* RW */
997 unsigned long rsvd_49_51: 3; /* */
998 unsigned long n_gru : 4; /* RW */
999 unsigned long rsvd_56_62: 7; /* */
1000 unsigned long enable : 1; /* RW */
1001 } s;
1002};
1003
1004/* ========================================================================= */
1005/* UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR */
1006/* ========================================================================= */
1007#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR 0x1600030UL
1008
1009#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT 30
1010#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003fffc0000000UL
1011#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_M_IO_SHFT 46
1012#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_M_IO_MASK 0x000fc00000000000UL
1013#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_N_IO_SHFT 52
1014#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_N_IO_MASK 0x00f0000000000000UL
1015#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
1016#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
1017
1018union uvh_rh_gam_mmioh_overlay_config_mmr_u {
1019 unsigned long v;
1020 struct uvh_rh_gam_mmioh_overlay_config_mmr_s {
1021 unsigned long rsvd_0_29: 30; /* */
1022 unsigned long base : 16; /* RW */
1023 unsigned long m_io : 6; /* RW */
1024 unsigned long n_io : 4; /* RW */
1025 unsigned long rsvd_56_62: 7; /* */
1026 unsigned long enable : 1; /* RW */
1027 } s;
1028};
1029
1030/* ========================================================================= */
1031/* UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR */
1032/* ========================================================================= */
1033#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR 0x1600028UL
1034
1035#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26
1036#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL
1037#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_DUAL_HUB_SHFT 46
1038#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_DUAL_HUB_MASK 0x0000400000000000UL
1039#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
1040#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
1041
1042union uvh_rh_gam_mmr_overlay_config_mmr_u {
1043 unsigned long v;
1044 struct uvh_rh_gam_mmr_overlay_config_mmr_s {
1045 unsigned long rsvd_0_25: 26; /* */
1046 unsigned long base : 20; /* RW */
1047 unsigned long dual_hub : 1; /* RW */
1048 unsigned long rsvd_47_62: 16; /* */
1049 unsigned long enable : 1; /* RW */
1050 } s;
1051};
1052
1053/* ========================================================================= */
1054/* UVH_RTC */
1055/* ========================================================================= */
1056#define UVH_RTC 0x340000UL
1057
1058#define UVH_RTC_REAL_TIME_CLOCK_SHFT 0
1059#define UVH_RTC_REAL_TIME_CLOCK_MASK 0x00ffffffffffffffUL
1060
1061union uvh_rtc_u {
1062 unsigned long v;
1063 struct uvh_rtc_s {
1064 unsigned long real_time_clock : 56; /* RW */
1065 unsigned long rsvd_56_63 : 8; /* */
1066 } s;
1067};
1068
1069/* ========================================================================= */
1070/* UVH_RTC1_INT_CONFIG */
1071/* ========================================================================= */
1072#define UVH_RTC1_INT_CONFIG 0x615c0UL
1073
1074#define UVH_RTC1_INT_CONFIG_VECTOR_SHFT 0
1075#define UVH_RTC1_INT_CONFIG_VECTOR_MASK 0x00000000000000ffUL
1076#define UVH_RTC1_INT_CONFIG_DM_SHFT 8
1077#define UVH_RTC1_INT_CONFIG_DM_MASK 0x0000000000000700UL
1078#define UVH_RTC1_INT_CONFIG_DESTMODE_SHFT 11
1079#define UVH_RTC1_INT_CONFIG_DESTMODE_MASK 0x0000000000000800UL
1080#define UVH_RTC1_INT_CONFIG_STATUS_SHFT 12
1081#define UVH_RTC1_INT_CONFIG_STATUS_MASK 0x0000000000001000UL
1082#define UVH_RTC1_INT_CONFIG_P_SHFT 13
1083#define UVH_RTC1_INT_CONFIG_P_MASK 0x0000000000002000UL
1084#define UVH_RTC1_INT_CONFIG_T_SHFT 15
1085#define UVH_RTC1_INT_CONFIG_T_MASK 0x0000000000008000UL
1086#define UVH_RTC1_INT_CONFIG_M_SHFT 16
1087#define UVH_RTC1_INT_CONFIG_M_MASK 0x0000000000010000UL
1088#define UVH_RTC1_INT_CONFIG_APIC_ID_SHFT 32
1089#define UVH_RTC1_INT_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
1090
1091union uvh_rtc1_int_config_u {
1092 unsigned long v;
1093 struct uvh_rtc1_int_config_s {
1094 unsigned long vector_ : 8; /* RW */
1095 unsigned long dm : 3; /* RW */
1096 unsigned long destmode : 1; /* RW */
1097 unsigned long status : 1; /* RO */
1098 unsigned long p : 1; /* RO */
1099 unsigned long rsvd_14 : 1; /* */
1100 unsigned long t : 1; /* RO */
1101 unsigned long m : 1; /* RW */
1102 unsigned long rsvd_17_31: 15; /* */
1103 unsigned long apic_id : 32; /* RW */
1104 } s;
1105};
1106
1107/* ========================================================================= */
1108/* UVH_RTC2_INT_CONFIG */
1109/* ========================================================================= */
1110#define UVH_RTC2_INT_CONFIG 0x61600UL
1111
1112#define UVH_RTC2_INT_CONFIG_VECTOR_SHFT 0
1113#define UVH_RTC2_INT_CONFIG_VECTOR_MASK 0x00000000000000ffUL
1114#define UVH_RTC2_INT_CONFIG_DM_SHFT 8
1115#define UVH_RTC2_INT_CONFIG_DM_MASK 0x0000000000000700UL
1116#define UVH_RTC2_INT_CONFIG_DESTMODE_SHFT 11
1117#define UVH_RTC2_INT_CONFIG_DESTMODE_MASK 0x0000000000000800UL
1118#define UVH_RTC2_INT_CONFIG_STATUS_SHFT 12
1119#define UVH_RTC2_INT_CONFIG_STATUS_MASK 0x0000000000001000UL
1120#define UVH_RTC2_INT_CONFIG_P_SHFT 13
1121#define UVH_RTC2_INT_CONFIG_P_MASK 0x0000000000002000UL
1122#define UVH_RTC2_INT_CONFIG_T_SHFT 15
1123#define UVH_RTC2_INT_CONFIG_T_MASK 0x0000000000008000UL
1124#define UVH_RTC2_INT_CONFIG_M_SHFT 16
1125#define UVH_RTC2_INT_CONFIG_M_MASK 0x0000000000010000UL
1126#define UVH_RTC2_INT_CONFIG_APIC_ID_SHFT 32
1127#define UVH_RTC2_INT_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
1128
1129union uvh_rtc2_int_config_u {
1130 unsigned long v;
1131 struct uvh_rtc2_int_config_s {
1132 unsigned long vector_ : 8; /* RW */
1133 unsigned long dm : 3; /* RW */
1134 unsigned long destmode : 1; /* RW */
1135 unsigned long status : 1; /* RO */
1136 unsigned long p : 1; /* RO */
1137 unsigned long rsvd_14 : 1; /* */
1138 unsigned long t : 1; /* RO */
1139 unsigned long m : 1; /* RW */
1140 unsigned long rsvd_17_31: 15; /* */
1141 unsigned long apic_id : 32; /* RW */
1142 } s;
1143};
1144
1145/* ========================================================================= */
1146/* UVH_RTC3_INT_CONFIG */
1147/* ========================================================================= */
1148#define UVH_RTC3_INT_CONFIG 0x61640UL
1149
1150#define UVH_RTC3_INT_CONFIG_VECTOR_SHFT 0
1151#define UVH_RTC3_INT_CONFIG_VECTOR_MASK 0x00000000000000ffUL
1152#define UVH_RTC3_INT_CONFIG_DM_SHFT 8
1153#define UVH_RTC3_INT_CONFIG_DM_MASK 0x0000000000000700UL
1154#define UVH_RTC3_INT_CONFIG_DESTMODE_SHFT 11
1155#define UVH_RTC3_INT_CONFIG_DESTMODE_MASK 0x0000000000000800UL
1156#define UVH_RTC3_INT_CONFIG_STATUS_SHFT 12
1157#define UVH_RTC3_INT_CONFIG_STATUS_MASK 0x0000000000001000UL
1158#define UVH_RTC3_INT_CONFIG_P_SHFT 13
1159#define UVH_RTC3_INT_CONFIG_P_MASK 0x0000000000002000UL
1160#define UVH_RTC3_INT_CONFIG_T_SHFT 15
1161#define UVH_RTC3_INT_CONFIG_T_MASK 0x0000000000008000UL
1162#define UVH_RTC3_INT_CONFIG_M_SHFT 16
1163#define UVH_RTC3_INT_CONFIG_M_MASK 0x0000000000010000UL
1164#define UVH_RTC3_INT_CONFIG_APIC_ID_SHFT 32
1165#define UVH_RTC3_INT_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
1166
1167union uvh_rtc3_int_config_u {
1168 unsigned long v;
1169 struct uvh_rtc3_int_config_s {
1170 unsigned long vector_ : 8; /* RW */
1171 unsigned long dm : 3; /* RW */
1172 unsigned long destmode : 1; /* RW */
1173 unsigned long status : 1; /* RO */
1174 unsigned long p : 1; /* RO */
1175 unsigned long rsvd_14 : 1; /* */
1176 unsigned long t : 1; /* RO */
1177 unsigned long m : 1; /* RW */
1178 unsigned long rsvd_17_31: 15; /* */
1179 unsigned long apic_id : 32; /* RW */
1180 } s;
1181};
1182
1183/* ========================================================================= */
1184/* UVH_RTC_INC_RATIO */
1185/* ========================================================================= */
1186#define UVH_RTC_INC_RATIO 0x350000UL
1187
1188#define UVH_RTC_INC_RATIO_FRACTION_SHFT 0
1189#define UVH_RTC_INC_RATIO_FRACTION_MASK 0x00000000000fffffUL
1190#define UVH_RTC_INC_RATIO_RATIO_SHFT 20
1191#define UVH_RTC_INC_RATIO_RATIO_MASK 0x0000000000700000UL
1192
1193union uvh_rtc_inc_ratio_u {
1194 unsigned long v;
1195 struct uvh_rtc_inc_ratio_s {
1196 unsigned long fraction : 20; /* RW */
1197 unsigned long ratio : 3; /* RW */
1198 unsigned long rsvd_23_63: 41; /* */
1199 } s;
1200};
1201
1202/* ========================================================================= */
1203/* UVH_SI_ADDR_MAP_CONFIG */
1204/* ========================================================================= */
1205#define UVH_SI_ADDR_MAP_CONFIG 0xc80000UL
1206
1207#define UVH_SI_ADDR_MAP_CONFIG_M_SKT_SHFT 0
1208#define UVH_SI_ADDR_MAP_CONFIG_M_SKT_MASK 0x000000000000003fUL
1209#define UVH_SI_ADDR_MAP_CONFIG_N_SKT_SHFT 8
1210#define UVH_SI_ADDR_MAP_CONFIG_N_SKT_MASK 0x0000000000000f00UL
1211
1212union uvh_si_addr_map_config_u {
1213 unsigned long v;
1214 struct uvh_si_addr_map_config_s {
1215 unsigned long m_skt : 6; /* RW */
1216 unsigned long rsvd_6_7: 2; /* */
1217 unsigned long n_skt : 4; /* RW */
1218 unsigned long rsvd_12_63: 52; /* */
1219 } s;
1220};
1221
1222/* ========================================================================= */
1223/* UVH_SI_ALIAS0_OVERLAY_CONFIG */
1224/* ========================================================================= */
1225#define UVH_SI_ALIAS0_OVERLAY_CONFIG 0xc80008UL
1226
1227#define UVH_SI_ALIAS0_OVERLAY_CONFIG_BASE_SHFT 24
1228#define UVH_SI_ALIAS0_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL
1229#define UVH_SI_ALIAS0_OVERLAY_CONFIG_M_ALIAS_SHFT 48
1230#define UVH_SI_ALIAS0_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL
1231#define UVH_SI_ALIAS0_OVERLAY_CONFIG_ENABLE_SHFT 63
1232#define UVH_SI_ALIAS0_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL
1233
1234union uvh_si_alias0_overlay_config_u {
1235 unsigned long v;
1236 struct uvh_si_alias0_overlay_config_s {
1237 unsigned long rsvd_0_23: 24; /* */
1238 unsigned long base : 8; /* RW */
1239 unsigned long rsvd_32_47: 16; /* */
1240 unsigned long m_alias : 5; /* RW */
1241 unsigned long rsvd_53_62: 10; /* */
1242 unsigned long enable : 1; /* RW */
1243 } s;
1244};
1245
1246/* ========================================================================= */
1247/* UVH_SI_ALIAS1_OVERLAY_CONFIG */
1248/* ========================================================================= */
1249#define UVH_SI_ALIAS1_OVERLAY_CONFIG 0xc80010UL
1250
1251#define UVH_SI_ALIAS1_OVERLAY_CONFIG_BASE_SHFT 24
1252#define UVH_SI_ALIAS1_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL
1253#define UVH_SI_ALIAS1_OVERLAY_CONFIG_M_ALIAS_SHFT 48
1254#define UVH_SI_ALIAS1_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL
1255#define UVH_SI_ALIAS1_OVERLAY_CONFIG_ENABLE_SHFT 63
1256#define UVH_SI_ALIAS1_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL
1257
1258union uvh_si_alias1_overlay_config_u {
1259 unsigned long v;
1260 struct uvh_si_alias1_overlay_config_s {
1261 unsigned long rsvd_0_23: 24; /* */
1262 unsigned long base : 8; /* RW */
1263 unsigned long rsvd_32_47: 16; /* */
1264 unsigned long m_alias : 5; /* RW */
1265 unsigned long rsvd_53_62: 10; /* */
1266 unsigned long enable : 1; /* RW */
1267 } s;
1268};
1269
1270/* ========================================================================= */
1271/* UVH_SI_ALIAS2_OVERLAY_CONFIG */
1272/* ========================================================================= */
1273#define UVH_SI_ALIAS2_OVERLAY_CONFIG 0xc80018UL
1274
1275#define UVH_SI_ALIAS2_OVERLAY_CONFIG_BASE_SHFT 24
1276#define UVH_SI_ALIAS2_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL
1277#define UVH_SI_ALIAS2_OVERLAY_CONFIG_M_ALIAS_SHFT 48
1278#define UVH_SI_ALIAS2_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL
1279#define UVH_SI_ALIAS2_OVERLAY_CONFIG_ENABLE_SHFT 63
1280#define UVH_SI_ALIAS2_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL
1281
1282union uvh_si_alias2_overlay_config_u {
1283 unsigned long v;
1284 struct uvh_si_alias2_overlay_config_s {
1285 unsigned long rsvd_0_23: 24; /* */
1286 unsigned long base : 8; /* RW */
1287 unsigned long rsvd_32_47: 16; /* */
1288 unsigned long m_alias : 5; /* RW */
1289 unsigned long rsvd_53_62: 10; /* */
1290 unsigned long enable : 1; /* RW */
1291 } s;
1292};
1293
1294
1295#endif /* ASM_X86__UV__UV_MMRS_H */
diff --git a/include/asm-x86/vdso.h b/include/asm-x86/vdso.h
deleted file mode 100644
index 4ab320913ea3..000000000000
--- a/include/asm-x86/vdso.h
+++ /dev/null
@@ -1,47 +0,0 @@
1#ifndef ASM_X86__VDSO_H
2#define ASM_X86__VDSO_H
3
4#ifdef CONFIG_X86_64
5extern const char VDSO64_PRELINK[];
6
7/*
8 * Given a pointer to the vDSO image, find the pointer to VDSO64_name
9 * as that symbol is defined in the vDSO sources or linker script.
10 */
11#define VDSO64_SYMBOL(base, name) \
12({ \
13 extern const char VDSO64_##name[]; \
14 (void *)(VDSO64_##name - VDSO64_PRELINK + (unsigned long)(base)); \
15})
16#endif
17
18#if defined CONFIG_X86_32 || defined CONFIG_COMPAT
19extern const char VDSO32_PRELINK[];
20
21/*
22 * Given a pointer to the vDSO image, find the pointer to VDSO32_name
23 * as that symbol is defined in the vDSO sources or linker script.
24 */
25#define VDSO32_SYMBOL(base, name) \
26({ \
27 extern const char VDSO32_##name[]; \
28 (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
29})
30#endif
31
32/*
33 * These symbols are defined with the addresses in the vsyscall page.
34 * See vsyscall-sigreturn.S.
35 */
36extern void __user __kernel_sigreturn;
37extern void __user __kernel_rt_sigreturn;
38
39/*
40 * These symbols are defined by vdso32.S to mark the bounds
41 * of the ELF DSO images included therein.
42 */
43extern const char vdso32_int80_start, vdso32_int80_end;
44extern const char vdso32_syscall_start, vdso32_syscall_end;
45extern const char vdso32_sysenter_start, vdso32_sysenter_end;
46
47#endif /* ASM_X86__VDSO_H */
diff --git a/include/asm-x86/vga.h b/include/asm-x86/vga.h
deleted file mode 100644
index b9e493d07d07..000000000000
--- a/include/asm-x86/vga.h
+++ /dev/null
@@ -1,20 +0,0 @@
1/*
2 * Access to VGA videoram
3 *
4 * (c) 1998 Martin Mares <mj@ucw.cz>
5 */
6
7#ifndef ASM_X86__VGA_H
8#define ASM_X86__VGA_H
9
10/*
11 * On the PC, we can just recalculate addresses and then
12 * access the videoram directly without any black magic.
13 */
14
15#define VGA_MAP_MEM(x, s) (unsigned long)phys_to_virt(x)
16
17#define vga_readb(x) (*(x))
18#define vga_writeb(x, y) (*(y) = (x))
19
20#endif /* ASM_X86__VGA_H */
diff --git a/include/asm-x86/vgtod.h b/include/asm-x86/vgtod.h
deleted file mode 100644
index 38fd13364021..000000000000
--- a/include/asm-x86/vgtod.h
+++ /dev/null
@@ -1,29 +0,0 @@
1#ifndef ASM_X86__VGTOD_H
2#define ASM_X86__VGTOD_H
3
4#include <asm/vsyscall.h>
5#include <linux/clocksource.h>
6
7struct vsyscall_gtod_data {
8 seqlock_t lock;
9
10 /* open coded 'struct timespec' */
11 time_t wall_time_sec;
12 u32 wall_time_nsec;
13
14 int sysctl_enabled;
15 struct timezone sys_tz;
16 struct { /* extract of a clocksource struct */
17 cycle_t (*vread)(void);
18 cycle_t cycle_last;
19 cycle_t mask;
20 u32 mult;
21 u32 shift;
22 } clock;
23 struct timespec wall_to_monotonic;
24};
25extern struct vsyscall_gtod_data __vsyscall_gtod_data
26__section_vsyscall_gtod_data;
27extern struct vsyscall_gtod_data vsyscall_gtod_data;
28
29#endif /* ASM_X86__VGTOD_H */
diff --git a/include/asm-x86/vic.h b/include/asm-x86/vic.h
deleted file mode 100644
index 53100f353612..000000000000
--- a/include/asm-x86/vic.h
+++ /dev/null
@@ -1,61 +0,0 @@
1/* Copyright (C) 1999,2001
2 *
3 * Author: J.E.J.Bottomley@HansenPartnership.com
4 *
5 * Standard include definitions for the NCR Voyager Interrupt Controller */
6
7/* The eight CPI vectors. To activate a CPI, you write a bit mask
8 * corresponding to the processor set to be interrupted into the
9 * relevant register. That set of CPUs will then be interrupted with
10 * the CPI */
11static const int VIC_CPI_Registers[] =
12 {0xFC00, 0xFC01, 0xFC08, 0xFC09,
13 0xFC10, 0xFC11, 0xFC18, 0xFC19 };
14
15#define VIC_PROC_WHO_AM_I 0xfc29
16# define QUAD_IDENTIFIER 0xC0
17# define EIGHT_SLOT_IDENTIFIER 0xE0
18#define QIC_EXTENDED_PROCESSOR_SELECT 0xFC72
19#define VIC_CPI_BASE_REGISTER 0xFC41
20#define VIC_PROCESSOR_ID 0xFC21
21# define VIC_CPU_MASQUERADE_ENABLE 0x8
22
23#define VIC_CLAIM_REGISTER_0 0xFC38
24#define VIC_CLAIM_REGISTER_1 0xFC39
25#define VIC_REDIRECT_REGISTER_0 0xFC60
26#define VIC_REDIRECT_REGISTER_1 0xFC61
27#define VIC_PRIORITY_REGISTER 0xFC20
28
29#define VIC_PRIMARY_MC_BASE 0xFC48
30#define VIC_SECONDARY_MC_BASE 0xFC49
31
32#define QIC_PROCESSOR_ID 0xFC71
33# define QIC_CPUID_ENABLE 0x08
34
35#define QIC_VIC_CPI_BASE_REGISTER 0xFC79
36#define QIC_CPI_BASE_REGISTER 0xFC7A
37
38#define QIC_MASK_REGISTER0 0xFC80
39/* NOTE: these are masked high, enabled low */
40# define QIC_PERF_TIMER 0x01
41# define QIC_LPE 0x02
42# define QIC_SYS_INT 0x04
43# define QIC_CMN_INT 0x08
44/* at the moment, just enable CMN_INT, disable SYS_INT */
45# define QIC_DEFAULT_MASK0 (~(QIC_CMN_INT /* | VIC_SYS_INT */))
46#define QIC_MASK_REGISTER1 0xFC81
47# define QIC_BOOT_CPI_MASK 0xFE
48/* Enable CPI's 1-6 inclusive */
49# define QIC_CPI_ENABLE 0x81
50
51#define QIC_INTERRUPT_CLEAR0 0xFC8A
52#define QIC_INTERRUPT_CLEAR1 0xFC8B
53
54/* this is where we place the CPI vectors */
55#define VIC_DEFAULT_CPI_BASE 0xC0
56/* this is where we place the QIC CPI vectors */
57#define QIC_DEFAULT_CPI_BASE 0xD0
58
59#define VIC_BOOT_INTERRUPT_MASK 0xfe
60
61extern void smp_vic_timer_interrupt(void);
diff --git a/include/asm-x86/visws/cobalt.h b/include/asm-x86/visws/cobalt.h
deleted file mode 100644
index 9627a8fe84e9..000000000000
--- a/include/asm-x86/visws/cobalt.h
+++ /dev/null
@@ -1,125 +0,0 @@
1#ifndef ASM_X86__VISWS__COBALT_H
2#define ASM_X86__VISWS__COBALT_H
3
4#include <asm/fixmap.h>
5
6/*
7 * Cobalt SGI Visual Workstation system ASIC
8 */
9
10#define CO_CPU_NUM_PHYS 0x1e00
11#define CO_CPU_TAB_PHYS (CO_CPU_NUM_PHYS + 2)
12
13#define CO_CPU_MAX 4
14
15#define CO_CPU_PHYS 0xc2000000
16#define CO_APIC_PHYS 0xc4000000
17
18/* see set_fixmap() and asm/fixmap.h */
19#define CO_CPU_VADDR (fix_to_virt(FIX_CO_CPU))
20#define CO_APIC_VADDR (fix_to_virt(FIX_CO_APIC))
21
22/* Cobalt CPU registers -- relative to CO_CPU_VADDR, use co_cpu_*() */
23#define CO_CPU_REV 0x08
24#define CO_CPU_CTRL 0x10
25#define CO_CPU_STAT 0x20
26#define CO_CPU_TIMEVAL 0x30
27
28/* CO_CPU_CTRL bits */
29#define CO_CTRL_TIMERUN 0x04 /* 0 == disabled */
30#define CO_CTRL_TIMEMASK 0x08 /* 0 == unmasked */
31
32/* CO_CPU_STATUS bits */
33#define CO_STAT_TIMEINTR 0x02 /* (r) 1 == int pend, (w) 0 == clear */
34
35/* CO_CPU_TIMEVAL value */
36#define CO_TIME_HZ 100000000 /* Cobalt core rate */
37
38/* Cobalt APIC registers -- relative to CO_APIC_VADDR, use co_apic_*() */
39#define CO_APIC_HI(n) (((n) * 0x10) + 4)
40#define CO_APIC_LO(n) ((n) * 0x10)
41#define CO_APIC_ID 0x0ffc
42
43/* CO_APIC_ID bits */
44#define CO_APIC_ENABLE 0x00000100
45
46/* CO_APIC_LO bits */
47#define CO_APIC_MASK 0x00010000 /* 0 = enabled */
48#define CO_APIC_LEVEL 0x00008000 /* 0 = edge */
49
50/*
51 * Where things are physically wired to Cobalt
52 * #defines with no board _<type>_<rev>_ are common to all (thus far)
53 */
54#define CO_APIC_IDE0 4
55#define CO_APIC_IDE1 2 /* Only on 320 */
56
57#define CO_APIC_8259 12 /* serial, floppy, par-l-l */
58
59/* Lithium PCI Bridge A -- "the one with 82557 Ethernet" */
60#define CO_APIC_PCIA_BASE0 0 /* and 1 */ /* slot 0, line 0 */
61#define CO_APIC_PCIA_BASE123 5 /* and 6 */ /* slot 0, line 1 */
62
63#define CO_APIC_PIIX4_USB 7 /* this one is weird */
64
65/* Lithium PCI Bridge B -- "the one with PIIX4" */
66#define CO_APIC_PCIB_BASE0 8 /* and 9-12 *//* slot 0, line 0 */
67#define CO_APIC_PCIB_BASE123 13 /* 14.15 */ /* slot 0, line 1 */
68
69#define CO_APIC_VIDOUT0 16
70#define CO_APIC_VIDOUT1 17
71#define CO_APIC_VIDIN0 18
72#define CO_APIC_VIDIN1 19
73
74#define CO_APIC_LI_AUDIO 22
75
76#define CO_APIC_AS 24
77#define CO_APIC_RE 25
78
79#define CO_APIC_CPU 28 /* Timer and Cache interrupt */
80#define CO_APIC_NMI 29
81#define CO_APIC_LAST CO_APIC_NMI
82
83/*
84 * This is how irqs are assigned on the Visual Workstation.
85 * Legacy devices get irq's 1-15 (system clock is 0 and is CO_APIC_CPU).
86 * All other devices (including PCI) go to Cobalt and are irq's 16 on up.
87 */
88#define CO_IRQ_APIC0 16 /* irq of apic entry 0 */
89#define IS_CO_APIC(irq) ((irq) >= CO_IRQ_APIC0)
90#define CO_IRQ(apic) (CO_IRQ_APIC0 + (apic)) /* apic ent to irq */
91#define CO_APIC(irq) ((irq) - CO_IRQ_APIC0) /* irq to apic ent */
92#define CO_IRQ_IDE0 14 /* knowledge of... */
93#define CO_IRQ_IDE1 15 /* ... ide driver defaults! */
94#define CO_IRQ_8259 CO_IRQ(CO_APIC_8259)
95
96#ifdef CONFIG_X86_VISWS_APIC
97static inline void co_cpu_write(unsigned long reg, unsigned long v)
98{
99 *((volatile unsigned long *)(CO_CPU_VADDR+reg))=v;
100}
101
102static inline unsigned long co_cpu_read(unsigned long reg)
103{
104 return *((volatile unsigned long *)(CO_CPU_VADDR+reg));
105}
106
107static inline void co_apic_write(unsigned long reg, unsigned long v)
108{
109 *((volatile unsigned long *)(CO_APIC_VADDR+reg))=v;
110}
111
112static inline unsigned long co_apic_read(unsigned long reg)
113{
114 return *((volatile unsigned long *)(CO_APIC_VADDR+reg));
115}
116#endif
117
118extern char visws_board_type;
119
120#define VISWS_320 0
121#define VISWS_540 1
122
123extern char visws_board_rev;
124
125#endif /* ASM_X86__VISWS__COBALT_H */
diff --git a/include/asm-x86/visws/lithium.h b/include/asm-x86/visws/lithium.h
deleted file mode 100644
index b36d3b378c63..000000000000
--- a/include/asm-x86/visws/lithium.h
+++ /dev/null
@@ -1,53 +0,0 @@
1#ifndef ASM_X86__VISWS__LITHIUM_H
2#define ASM_X86__VISWS__LITHIUM_H
3
4#include <asm/fixmap.h>
5
6/*
7 * Lithium is the SGI Visual Workstation I/O ASIC
8 */
9
10#define LI_PCI_A_PHYS 0xfc000000 /* Enet is dev 3 */
11#define LI_PCI_B_PHYS 0xfd000000 /* PIIX4 is here */
12
13/* see set_fixmap() and asm/fixmap.h */
14#define LI_PCIA_VADDR (fix_to_virt(FIX_LI_PCIA))
15#define LI_PCIB_VADDR (fix_to_virt(FIX_LI_PCIB))
16
17/* Not a standard PCI? (not in linux/pci.h) */
18#define LI_PCI_BUSNUM 0x44 /* lo8: primary, hi8: sub */
19#define LI_PCI_INTEN 0x46
20
21/* LI_PCI_INTENT bits */
22#define LI_INTA_0 0x0001
23#define LI_INTA_1 0x0002
24#define LI_INTA_2 0x0004
25#define LI_INTA_3 0x0008
26#define LI_INTA_4 0x0010
27#define LI_INTB 0x0020
28#define LI_INTC 0x0040
29#define LI_INTD 0x0080
30
31/* More special purpose macros... */
32static inline void li_pcia_write16(unsigned long reg, unsigned short v)
33{
34 *((volatile unsigned short *)(LI_PCIA_VADDR+reg))=v;
35}
36
37static inline unsigned short li_pcia_read16(unsigned long reg)
38{
39 return *((volatile unsigned short *)(LI_PCIA_VADDR+reg));
40}
41
42static inline void li_pcib_write16(unsigned long reg, unsigned short v)
43{
44 *((volatile unsigned short *)(LI_PCIB_VADDR+reg))=v;
45}
46
47static inline unsigned short li_pcib_read16(unsigned long reg)
48{
49 return *((volatile unsigned short *)(LI_PCIB_VADDR+reg));
50}
51
52#endif /* ASM_X86__VISWS__LITHIUM_H */
53
diff --git a/include/asm-x86/visws/piix4.h b/include/asm-x86/visws/piix4.h
deleted file mode 100644
index 61c938045ec9..000000000000
--- a/include/asm-x86/visws/piix4.h
+++ /dev/null
@@ -1,107 +0,0 @@
1#ifndef ASM_X86__VISWS__PIIX4_H
2#define ASM_X86__VISWS__PIIX4_H
3
4/*
5 * PIIX4 as used on SGI Visual Workstations
6 */
7
8#define PIIX_PM_START 0x0F80
9
10#define SIO_GPIO_START 0x0FC0
11
12#define SIO_PM_START 0x0FC8
13
14#define PMBASE PIIX_PM_START
15#define GPIREG0 (PMBASE+0x30)
16#define GPIREG(x) (GPIREG0+((x)/8))
17#define GPIBIT(x) (1 << ((x)%8))
18
19#define PIIX_GPI_BD_ID1 18
20#define PIIX_GPI_BD_ID2 19
21#define PIIX_GPI_BD_ID3 20
22#define PIIX_GPI_BD_ID4 21
23#define PIIX_GPI_BD_REG GPIREG(PIIX_GPI_BD_ID1)
24#define PIIX_GPI_BD_MASK (GPIBIT(PIIX_GPI_BD_ID1) | \
25 GPIBIT(PIIX_GPI_BD_ID2) | \
26 GPIBIT(PIIX_GPI_BD_ID3) | \
27 GPIBIT(PIIX_GPI_BD_ID4) )
28
29#define PIIX_GPI_BD_SHIFT (PIIX_GPI_BD_ID1 % 8)
30
31#define SIO_INDEX 0x2e
32#define SIO_DATA 0x2f
33
34#define SIO_DEV_SEL 0x7
35#define SIO_DEV_ENB 0x30
36#define SIO_DEV_MSB 0x60
37#define SIO_DEV_LSB 0x61
38
39#define SIO_GP_DEV 0x7
40
41#define SIO_GP_BASE SIO_GPIO_START
42#define SIO_GP_MSB (SIO_GP_BASE>>8)
43#define SIO_GP_LSB (SIO_GP_BASE&0xff)
44
45#define SIO_GP_DATA1 (SIO_GP_BASE+0)
46
47#define SIO_PM_DEV 0x8
48
49#define SIO_PM_BASE SIO_PM_START
50#define SIO_PM_MSB (SIO_PM_BASE>>8)
51#define SIO_PM_LSB (SIO_PM_BASE&0xff)
52#define SIO_PM_INDEX (SIO_PM_BASE+0)
53#define SIO_PM_DATA (SIO_PM_BASE+1)
54
55#define SIO_PM_FER2 0x1
56
57#define SIO_PM_GP_EN 0x80
58
59
60
61/*
62 * This is the dev/reg where generating a config cycle will
63 * result in a PCI special cycle.
64 */
65#define SPECIAL_DEV 0xff
66#define SPECIAL_REG 0x00
67
68/*
69 * PIIX4 needs to see a special cycle with the following data
70 * to be convinced the processor has gone into the stop grant
71 * state. PIIX4 insists on seeing this before it will power
72 * down a system.
73 */
74#define PIIX_SPECIAL_STOP 0x00120002
75
76#define PIIX4_RESET_PORT 0xcf9
77#define PIIX4_RESET_VAL 0x6
78
79#define PMSTS_PORT 0xf80 // 2 bytes PM Status
80#define PMEN_PORT 0xf82 // 2 bytes PM Enable
81#define PMCNTRL_PORT 0xf84 // 2 bytes PM Control
82
83#define PM_SUSPEND_ENABLE 0x2000 // start sequence to suspend state
84
85/*
86 * PMSTS and PMEN I/O bit definitions.
87 * (Bits are the same in both registers)
88 */
89#define PM_STS_RSM (1<<15) // Resume Status
90#define PM_STS_PWRBTNOR (1<<11) // Power Button Override
91#define PM_STS_RTC (1<<10) // RTC status
92#define PM_STS_PWRBTN (1<<8) // Power Button Pressed?
93#define PM_STS_GBL (1<<5) // Global Status
94#define PM_STS_BM (1<<4) // Bus Master Status
95#define PM_STS_TMROF (1<<0) // Timer Overflow Status.
96
97/*
98 * Stop clock GPI register
99 */
100#define PIIX_GPIREG0 (0xf80 + 0x30)
101
102/*
103 * Stop clock GPI bit in GPIREG0
104 */
105#define PIIX_GPI_STPCLK 0x4 // STPCLK signal routed back in
106
107#endif /* ASM_X86__VISWS__PIIX4_H */
diff --git a/include/asm-x86/visws/sgivw.h b/include/asm-x86/visws/sgivw.h
deleted file mode 100644
index 5fbf63e1003c..000000000000
--- a/include/asm-x86/visws/sgivw.h
+++ /dev/null
@@ -1,5 +0,0 @@
1/*
2 * Frame buffer position and size:
3 */
4extern unsigned long sgivwfb_mem_phys;
5extern unsigned long sgivwfb_mem_size;
diff --git a/include/asm-x86/vm86.h b/include/asm-x86/vm86.h
deleted file mode 100644
index 998bd18eb737..000000000000
--- a/include/asm-x86/vm86.h
+++ /dev/null
@@ -1,208 +0,0 @@
1#ifndef ASM_X86__VM86_H
2#define ASM_X86__VM86_H
3
4/*
5 * I'm guessing at the VIF/VIP flag usage, but hope that this is how
6 * the Pentium uses them. Linux will return from vm86 mode when both
7 * VIF and VIP is set.
8 *
9 * On a Pentium, we could probably optimize the virtual flags directly
10 * in the eflags register instead of doing it "by hand" in vflags...
11 *
12 * Linus
13 */
14
15#include <asm/processor-flags.h>
16
17#define BIOSSEG 0x0f000
18
19#define CPU_086 0
20#define CPU_186 1
21#define CPU_286 2
22#define CPU_386 3
23#define CPU_486 4
24#define CPU_586 5
25
26/*
27 * Return values for the 'vm86()' system call
28 */
29#define VM86_TYPE(retval) ((retval) & 0xff)
30#define VM86_ARG(retval) ((retval) >> 8)
31
32#define VM86_SIGNAL 0 /* return due to signal */
33#define VM86_UNKNOWN 1 /* unhandled GP fault
34 - IO-instruction or similar */
35#define VM86_INTx 2 /* int3/int x instruction (ARG = x) */
36#define VM86_STI 3 /* sti/popf/iret instruction enabled
37 virtual interrupts */
38
39/*
40 * Additional return values when invoking new vm86()
41 */
42#define VM86_PICRETURN 4 /* return due to pending PIC request */
43#define VM86_TRAP 6 /* return due to DOS-debugger request */
44
45/*
46 * function codes when invoking new vm86()
47 */
48#define VM86_PLUS_INSTALL_CHECK 0
49#define VM86_ENTER 1
50#define VM86_ENTER_NO_BYPASS 2
51#define VM86_REQUEST_IRQ 3
52#define VM86_FREE_IRQ 4
53#define VM86_GET_IRQ_BITS 5
54#define VM86_GET_AND_RESET_IRQ 6
55
56/*
57 * This is the stack-layout seen by the user space program when we have
58 * done a translation of "SAVE_ALL" from vm86 mode. The real kernel layout
59 * is 'kernel_vm86_regs' (see below).
60 */
61
62struct vm86_regs {
63/*
64 * normal regs, with special meaning for the segment descriptors..
65 */
66 long ebx;
67 long ecx;
68 long edx;
69 long esi;
70 long edi;
71 long ebp;
72 long eax;
73 long __null_ds;
74 long __null_es;
75 long __null_fs;
76 long __null_gs;
77 long orig_eax;
78 long eip;
79 unsigned short cs, __csh;
80 long eflags;
81 long esp;
82 unsigned short ss, __ssh;
83/*
84 * these are specific to v86 mode:
85 */
86 unsigned short es, __esh;
87 unsigned short ds, __dsh;
88 unsigned short fs, __fsh;
89 unsigned short gs, __gsh;
90};
91
92struct revectored_struct {
93 unsigned long __map[8]; /* 256 bits */
94};
95
96struct vm86_struct {
97 struct vm86_regs regs;
98 unsigned long flags;
99 unsigned long screen_bitmap;
100 unsigned long cpu_type;
101 struct revectored_struct int_revectored;
102 struct revectored_struct int21_revectored;
103};
104
105/*
106 * flags masks
107 */
108#define VM86_SCREEN_BITMAP 0x0001
109
110struct vm86plus_info_struct {
111 unsigned long force_return_for_pic:1;
112 unsigned long vm86dbg_active:1; /* for debugger */
113 unsigned long vm86dbg_TFpendig:1; /* for debugger */
114 unsigned long unused:28;
115 unsigned long is_vm86pus:1; /* for vm86 internal use */
116 unsigned char vm86dbg_intxxtab[32]; /* for debugger */
117};
118struct vm86plus_struct {
119 struct vm86_regs regs;
120 unsigned long flags;
121 unsigned long screen_bitmap;
122 unsigned long cpu_type;
123 struct revectored_struct int_revectored;
124 struct revectored_struct int21_revectored;
125 struct vm86plus_info_struct vm86plus;
126};
127
128#ifdef __KERNEL__
129
130#include <asm/ptrace.h>
131
132/*
133 * This is the (kernel) stack-layout when we have done a "SAVE_ALL" from vm86
134 * mode - the main change is that the old segment descriptors aren't
135 * useful any more and are forced to be zero by the kernel (and the
136 * hardware when a trap occurs), and the real segment descriptors are
137 * at the end of the structure. Look at ptrace.h to see the "normal"
138 * setup. For user space layout see 'struct vm86_regs' above.
139 */
140
141struct kernel_vm86_regs {
142/*
143 * normal regs, with special meaning for the segment descriptors..
144 */
145 struct pt_regs pt;
146/*
147 * these are specific to v86 mode:
148 */
149 unsigned short es, __esh;
150 unsigned short ds, __dsh;
151 unsigned short fs, __fsh;
152 unsigned short gs, __gsh;
153};
154
155struct kernel_vm86_struct {
156 struct kernel_vm86_regs regs;
157/*
158 * the below part remains on the kernel stack while we are in VM86 mode.
159 * 'tss.esp0' then contains the address of VM86_TSS_ESP0 below, and when we
160 * get forced back from VM86, the CPU and "SAVE_ALL" will restore the above
161 * 'struct kernel_vm86_regs' with the then actual values.
162 * Therefore, pt_regs in fact points to a complete 'kernel_vm86_struct'
163 * in kernelspace, hence we need not reget the data from userspace.
164 */
165#define VM86_TSS_ESP0 flags
166 unsigned long flags;
167 unsigned long screen_bitmap;
168 unsigned long cpu_type;
169 struct revectored_struct int_revectored;
170 struct revectored_struct int21_revectored;
171 struct vm86plus_info_struct vm86plus;
172 struct pt_regs *regs32; /* here we save the pointer to the old regs */
173/*
174 * The below is not part of the structure, but the stack layout continues
175 * this way. In front of 'return-eip' may be some data, depending on
176 * compilation, so we don't rely on this and save the pointer to 'oldregs'
177 * in 'regs32' above.
178 * However, with GCC-2.7.2 and the current CFLAGS you see exactly this:
179
180 long return-eip; from call to vm86()
181 struct pt_regs oldregs; user space registers as saved by syscall
182 */
183};
184
185#ifdef CONFIG_VM86
186
187void handle_vm86_fault(struct kernel_vm86_regs *, long);
188int handle_vm86_trap(struct kernel_vm86_regs *, long, int);
189struct pt_regs *save_v86_state(struct kernel_vm86_regs *);
190
191struct task_struct;
192void release_vm86_irqs(struct task_struct *);
193
194#else
195
196#define handle_vm86_fault(a, b)
197#define release_vm86_irqs(a)
198
199static inline int handle_vm86_trap(struct kernel_vm86_regs *a, long b, int c)
200{
201 return 0;
202}
203
204#endif /* CONFIG_VM86 */
205
206#endif /* __KERNEL__ */
207
208#endif /* ASM_X86__VM86_H */
diff --git a/include/asm-x86/vmi.h b/include/asm-x86/vmi.h
deleted file mode 100644
index b7c0dea119fe..000000000000
--- a/include/asm-x86/vmi.h
+++ /dev/null
@@ -1,263 +0,0 @@
1/*
2 * VMI interface definition
3 *
4 * Copyright (C) 2005, VMware, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more
15 * details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 *
21 * Maintained by: Zachary Amsden zach@vmware.com
22 *
23 */
24#include <linux/types.h>
25
26/*
27 *---------------------------------------------------------------------
28 *
29 * VMI Option ROM API
30 *
31 *---------------------------------------------------------------------
32 */
33#define VMI_SIGNATURE 0x696d5663 /* "cVmi" */
34
35#define PCI_VENDOR_ID_VMWARE 0x15AD
36#define PCI_DEVICE_ID_VMWARE_VMI 0x0801
37
38/*
39 * We use two version numbers for compatibility, with the major
40 * number signifying interface breakages, and the minor number
41 * interface extensions.
42 */
43#define VMI_API_REV_MAJOR 3
44#define VMI_API_REV_MINOR 0
45
46#define VMI_CALL_CPUID 0
47#define VMI_CALL_WRMSR 1
48#define VMI_CALL_RDMSR 2
49#define VMI_CALL_SetGDT 3
50#define VMI_CALL_SetLDT 4
51#define VMI_CALL_SetIDT 5
52#define VMI_CALL_SetTR 6
53#define VMI_CALL_GetGDT 7
54#define VMI_CALL_GetLDT 8
55#define VMI_CALL_GetIDT 9
56#define VMI_CALL_GetTR 10
57#define VMI_CALL_WriteGDTEntry 11
58#define VMI_CALL_WriteLDTEntry 12
59#define VMI_CALL_WriteIDTEntry 13
60#define VMI_CALL_UpdateKernelStack 14
61#define VMI_CALL_SetCR0 15
62#define VMI_CALL_SetCR2 16
63#define VMI_CALL_SetCR3 17
64#define VMI_CALL_SetCR4 18
65#define VMI_CALL_GetCR0 19
66#define VMI_CALL_GetCR2 20
67#define VMI_CALL_GetCR3 21
68#define VMI_CALL_GetCR4 22
69#define VMI_CALL_WBINVD 23
70#define VMI_CALL_SetDR 24
71#define VMI_CALL_GetDR 25
72#define VMI_CALL_RDPMC 26
73#define VMI_CALL_RDTSC 27
74#define VMI_CALL_CLTS 28
75#define VMI_CALL_EnableInterrupts 29
76#define VMI_CALL_DisableInterrupts 30
77#define VMI_CALL_GetInterruptMask 31
78#define VMI_CALL_SetInterruptMask 32
79#define VMI_CALL_IRET 33
80#define VMI_CALL_SYSEXIT 34
81#define VMI_CALL_Halt 35
82#define VMI_CALL_Reboot 36
83#define VMI_CALL_Shutdown 37
84#define VMI_CALL_SetPxE 38
85#define VMI_CALL_SetPxELong 39
86#define VMI_CALL_UpdatePxE 40
87#define VMI_CALL_UpdatePxELong 41
88#define VMI_CALL_MachineToPhysical 42
89#define VMI_CALL_PhysicalToMachine 43
90#define VMI_CALL_AllocatePage 44
91#define VMI_CALL_ReleasePage 45
92#define VMI_CALL_InvalPage 46
93#define VMI_CALL_FlushTLB 47
94#define VMI_CALL_SetLinearMapping 48
95
96#define VMI_CALL_SetIOPLMask 61
97#define VMI_CALL_SetInitialAPState 62
98#define VMI_CALL_APICWrite 63
99#define VMI_CALL_APICRead 64
100#define VMI_CALL_IODelay 65
101#define VMI_CALL_SetLazyMode 73
102
103/*
104 *---------------------------------------------------------------------
105 *
106 * MMU operation flags
107 *
108 *---------------------------------------------------------------------
109 */
110
111/* Flags used by VMI_{Allocate|Release}Page call */
112#define VMI_PAGE_PAE 0x10 /* Allocate PAE shadow */
113#define VMI_PAGE_CLONE 0x20 /* Clone from another shadow */
114#define VMI_PAGE_ZEROED 0x40 /* Page is pre-zeroed */
115
116
117/* Flags shared by Allocate|Release Page and PTE updates */
118#define VMI_PAGE_PT 0x01
119#define VMI_PAGE_PD 0x02
120#define VMI_PAGE_PDP 0x04
121#define VMI_PAGE_PML4 0x08
122
123#define VMI_PAGE_NORMAL 0x00 /* for debugging */
124
125/* Flags used by PTE updates */
126#define VMI_PAGE_CURRENT_AS 0x10 /* implies VMI_PAGE_VA_MASK is valid */
127#define VMI_PAGE_DEFER 0x20 /* may queue update until TLB inval */
128#define VMI_PAGE_VA_MASK 0xfffff000
129
130#ifdef CONFIG_X86_PAE
131#define VMI_PAGE_L1 (VMI_PAGE_PT | VMI_PAGE_PAE | VMI_PAGE_ZEROED)
132#define VMI_PAGE_L2 (VMI_PAGE_PD | VMI_PAGE_PAE | VMI_PAGE_ZEROED)
133#else
134#define VMI_PAGE_L1 (VMI_PAGE_PT | VMI_PAGE_ZEROED)
135#define VMI_PAGE_L2 (VMI_PAGE_PD | VMI_PAGE_ZEROED)
136#endif
137
138/* Flags used by VMI_FlushTLB call */
139#define VMI_FLUSH_TLB 0x01
140#define VMI_FLUSH_GLOBAL 0x02
141
142/*
143 *---------------------------------------------------------------------
144 *
145 * VMI relocation definitions for ROM call get_reloc
146 *
147 *---------------------------------------------------------------------
148 */
149
150/* VMI Relocation types */
151#define VMI_RELOCATION_NONE 0
152#define VMI_RELOCATION_CALL_REL 1
153#define VMI_RELOCATION_JUMP_REL 2
154#define VMI_RELOCATION_NOP 3
155
156#ifndef __ASSEMBLY__
157struct vmi_relocation_info {
158 unsigned char *eip;
159 unsigned char type;
160 unsigned char reserved[3];
161};
162#endif
163
164
165/*
166 *---------------------------------------------------------------------
167 *
168 * Generic ROM structures and definitions
169 *
170 *---------------------------------------------------------------------
171 */
172
173#ifndef __ASSEMBLY__
174
175struct vrom_header {
176 u16 rom_signature; /* option ROM signature */
177 u8 rom_length; /* ROM length in 512 byte chunks */
178 u8 rom_entry[4]; /* 16-bit code entry point */
179 u8 rom_pad0; /* 4-byte align pad */
180 u32 vrom_signature; /* VROM identification signature */
181 u8 api_version_min;/* Minor version of API */
182 u8 api_version_maj;/* Major version of API */
183 u8 jump_slots; /* Number of jump slots */
184 u8 reserved1; /* Reserved for expansion */
185 u32 virtual_top; /* Hypervisor virtual address start */
186 u16 reserved2; /* Reserved for expansion */
187 u16 license_offs; /* Offset to License string */
188 u16 pci_header_offs;/* Offset to PCI OPROM header */
189 u16 pnp_header_offs;/* Offset to PnP OPROM header */
190 u32 rom_pad3; /* PnP reserverd / VMI reserved */
191 u8 reserved[96]; /* Reserved for headers */
192 char vmi_init[8]; /* VMI_Init jump point */
193 char get_reloc[8]; /* VMI_GetRelocationInfo jump point */
194} __attribute__((packed));
195
196struct pnp_header {
197 char sig[4];
198 char rev;
199 char size;
200 short next;
201 short res;
202 long devID;
203 unsigned short manufacturer_offset;
204 unsigned short product_offset;
205} __attribute__((packed));
206
207struct pci_header {
208 char sig[4];
209 short vendorID;
210 short deviceID;
211 short vpdData;
212 short size;
213 char rev;
214 char class;
215 char subclass;
216 char interface;
217 short chunks;
218 char rom_version_min;
219 char rom_version_maj;
220 char codetype;
221 char lastRom;
222 short reserved;
223} __attribute__((packed));
224
225/* Function prototypes for bootstrapping */
226extern void vmi_init(void);
227extern void vmi_bringup(void);
228extern void vmi_apply_boot_page_allocations(void);
229
230/* State needed to start an application processor in an SMP system. */
231struct vmi_ap_state {
232 u32 cr0;
233 u32 cr2;
234 u32 cr3;
235 u32 cr4;
236
237 u64 efer;
238
239 u32 eip;
240 u32 eflags;
241 u32 eax;
242 u32 ebx;
243 u32 ecx;
244 u32 edx;
245 u32 esp;
246 u32 ebp;
247 u32 esi;
248 u32 edi;
249 u16 cs;
250 u16 ss;
251 u16 ds;
252 u16 es;
253 u16 fs;
254 u16 gs;
255 u16 ldtr;
256
257 u16 gdtr_limit;
258 u32 gdtr_base;
259 u32 idtr_base;
260 u16 idtr_limit;
261};
262
263#endif
diff --git a/include/asm-x86/vmi_time.h b/include/asm-x86/vmi_time.h
deleted file mode 100644
index b2d39e6a08b7..000000000000
--- a/include/asm-x86/vmi_time.h
+++ /dev/null
@@ -1,98 +0,0 @@
1/*
2 * VMI Time wrappers
3 *
4 * Copyright (C) 2006, VMware, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more
15 * details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 *
21 * Send feedback to dhecht@vmware.com
22 *
23 */
24
25#ifndef ASM_X86__VMI_TIME_H
26#define ASM_X86__VMI_TIME_H
27
28/*
29 * Raw VMI call indices for timer functions
30 */
31#define VMI_CALL_GetCycleFrequency 66
32#define VMI_CALL_GetCycleCounter 67
33#define VMI_CALL_SetAlarm 68
34#define VMI_CALL_CancelAlarm 69
35#define VMI_CALL_GetWallclockTime 70
36#define VMI_CALL_WallclockUpdated 71
37
38/* Cached VMI timer operations */
39extern struct vmi_timer_ops {
40 u64 (*get_cycle_frequency)(void);
41 u64 (*get_cycle_counter)(int);
42 u64 (*get_wallclock)(void);
43 int (*wallclock_updated)(void);
44 void (*set_alarm)(u32 flags, u64 expiry, u64 period);
45 void (*cancel_alarm)(u32 flags);
46} vmi_timer_ops;
47
48/* Prototypes */
49extern void __init vmi_time_init(void);
50extern unsigned long vmi_get_wallclock(void);
51extern int vmi_set_wallclock(unsigned long now);
52extern unsigned long long vmi_sched_clock(void);
53extern unsigned long vmi_tsc_khz(void);
54
55#ifdef CONFIG_X86_LOCAL_APIC
56extern void __devinit vmi_time_bsp_init(void);
57extern void __devinit vmi_time_ap_init(void);
58#endif
59
60/*
61 * When run under a hypervisor, a vcpu is always in one of three states:
62 * running, halted, or ready. The vcpu is in the 'running' state if it
63 * is executing. When the vcpu executes the halt interface, the vcpu
64 * enters the 'halted' state and remains halted until there is some work
65 * pending for the vcpu (e.g. an alarm expires, host I/O completes on
66 * behalf of virtual I/O). At this point, the vcpu enters the 'ready'
67 * state (waiting for the hypervisor to reschedule it). Finally, at any
68 * time when the vcpu is not in the 'running' state nor the 'halted'
69 * state, it is in the 'ready' state.
70 *
71 * Real time is advances while the vcpu is 'running', 'ready', or
72 * 'halted'. Stolen time is the time in which the vcpu is in the
73 * 'ready' state. Available time is the remaining time -- the vcpu is
74 * either 'running' or 'halted'.
75 *
76 * All three views of time are accessible through the VMI cycle
77 * counters.
78 */
79
80/* The cycle counters. */
81#define VMI_CYCLES_REAL 0
82#define VMI_CYCLES_AVAILABLE 1
83#define VMI_CYCLES_STOLEN 2
84
85/* The alarm interface 'flags' bits */
86#define VMI_ALARM_COUNTERS 2
87
88#define VMI_ALARM_COUNTER_MASK 0x000000ff
89
90#define VMI_ALARM_WIRED_IRQ0 0x00000000
91#define VMI_ALARM_WIRED_LVTT 0x00010000
92
93#define VMI_ALARM_IS_ONESHOT 0x00000000
94#define VMI_ALARM_IS_PERIODIC 0x00000100
95
96#define CONFIG_VMI_ALARM_HZ 100
97
98#endif /* ASM_X86__VMI_TIME_H */
diff --git a/include/asm-x86/voyager.h b/include/asm-x86/voyager.h
deleted file mode 100644
index 9c811d2e6f91..000000000000
--- a/include/asm-x86/voyager.h
+++ /dev/null
@@ -1,528 +0,0 @@
1/* Copyright (C) 1999,2001
2 *
3 * Author: J.E.J.Bottomley@HansenPartnership.com
4 *
5 * Standard include definitions for the NCR Voyager system */
6
7#undef VOYAGER_DEBUG
8#undef VOYAGER_CAT_DEBUG
9
10#ifdef VOYAGER_DEBUG
11#define VDEBUG(x) printk x
12#else
13#define VDEBUG(x)
14#endif
15
16/* There are three levels of voyager machine: 3,4 and 5. The rule is
17 * if it's less than 3435 it's a Level 3 except for a 3360 which is
18 * a level 4. A 3435 or above is a Level 5 */
19#define VOYAGER_LEVEL5_AND_ABOVE 0x3435
20#define VOYAGER_LEVEL4 0x3360
21
22/* The L4 DINO ASIC */
23#define VOYAGER_DINO 0x43
24
25/* voyager ports in standard I/O space */
26#define VOYAGER_MC_SETUP 0x96
27
28
29#define VOYAGER_CAT_CONFIG_PORT 0x97
30# define VOYAGER_CAT_DESELECT 0xff
31#define VOYAGER_SSPB_RELOCATION_PORT 0x98
32
33/* Valid CAT controller commands */
34/* start instruction register cycle */
35#define VOYAGER_CAT_IRCYC 0x01
36/* start data register cycle */
37#define VOYAGER_CAT_DRCYC 0x02
38/* move to execute state */
39#define VOYAGER_CAT_RUN 0x0F
40/* end operation */
41#define VOYAGER_CAT_END 0x80
42/* hold in idle state */
43#define VOYAGER_CAT_HOLD 0x90
44/* single step an "intest" vector */
45#define VOYAGER_CAT_STEP 0xE0
46/* return cat controller to CLEMSON mode */
47#define VOYAGER_CAT_CLEMSON 0xFF
48
49/* the default cat command header */
50#define VOYAGER_CAT_HEADER 0x7F
51
52/* the range of possible CAT module ids in the system */
53#define VOYAGER_MIN_MODULE 0x10
54#define VOYAGER_MAX_MODULE 0x1f
55
56/* The voyager registers per asic */
57#define VOYAGER_ASIC_ID_REG 0x00
58#define VOYAGER_ASIC_TYPE_REG 0x01
59/* the sub address registers can be made auto incrementing on reads */
60#define VOYAGER_AUTO_INC_REG 0x02
61# define VOYAGER_AUTO_INC 0x04
62# define VOYAGER_NO_AUTO_INC 0xfb
63#define VOYAGER_SUBADDRDATA 0x03
64#define VOYAGER_SCANPATH 0x05
65# define VOYAGER_CONNECT_ASIC 0x01
66# define VOYAGER_DISCONNECT_ASIC 0xfe
67#define VOYAGER_SUBADDRLO 0x06
68#define VOYAGER_SUBADDRHI 0x07
69#define VOYAGER_SUBMODSELECT 0x08
70#define VOYAGER_SUBMODPRESENT 0x09
71
72#define VOYAGER_SUBADDR_LO 0xff
73#define VOYAGER_SUBADDR_HI 0xffff
74
75/* the maximum size of a scan path -- used to form instructions */
76#define VOYAGER_MAX_SCAN_PATH 0x100
77/* the biggest possible register size (in bytes) */
78#define VOYAGER_MAX_REG_SIZE 4
79
80/* Total number of possible modules (including submodules) */
81#define VOYAGER_MAX_MODULES 16
82/* Largest number of asics per module */
83#define VOYAGER_MAX_ASICS_PER_MODULE 7
84
85/* the CAT asic of each module is always the first one */
86#define VOYAGER_CAT_ID 0
87#define VOYAGER_PSI 0x1a
88
89/* voyager instruction operations and registers */
90#define VOYAGER_READ_CONFIG 0x1
91#define VOYAGER_WRITE_CONFIG 0x2
92#define VOYAGER_BYPASS 0xff
93
94typedef struct voyager_asic {
95 __u8 asic_addr; /* ASIC address; Level 4 */
96 __u8 asic_type; /* ASIC type */
97 __u8 asic_id; /* ASIC id */
98 __u8 jtag_id[4]; /* JTAG id */
99 __u8 asic_location; /* Location within scan path; start w/ 0 */
100 __u8 bit_location; /* Location within bit stream; start w/ 0 */
101 __u8 ireg_length; /* Instruction register length */
102 __u16 subaddr; /* Amount of sub address space */
103 struct voyager_asic *next; /* Next asic in linked list */
104} voyager_asic_t;
105
106typedef struct voyager_module {
107 __u8 module_addr; /* Module address */
108 __u8 scan_path_connected; /* Scan path connected */
109 __u16 ee_size; /* Size of the EEPROM */
110 __u16 num_asics; /* Number of Asics */
111 __u16 inst_bits; /* Instruction bits in the scan path */
112 __u16 largest_reg; /* Largest register in the scan path */
113 __u16 smallest_reg; /* Smallest register in the scan path */
114 voyager_asic_t *asic; /* First ASIC in scan path (CAT_I) */
115 struct voyager_module *submodule; /* Submodule pointer */
116 struct voyager_module *next; /* Next module in linked list */
117} voyager_module_t;
118
119typedef struct voyager_eeprom_hdr {
120 __u8 module_id[4];
121 __u8 version_id;
122 __u8 config_id;
123 __u16 boundry_id; /* boundary scan id */
124 __u16 ee_size; /* size of EEPROM */
125 __u8 assembly[11]; /* assembly # */
126 __u8 assembly_rev; /* assembly rev */
127 __u8 tracer[4]; /* tracer number */
128 __u16 assembly_cksum; /* asm checksum */
129 __u16 power_consump; /* pwr requirements */
130 __u16 num_asics; /* number of asics */
131 __u16 bist_time; /* min. bist time */
132 __u16 err_log_offset; /* error log offset */
133 __u16 scan_path_offset;/* scan path offset */
134 __u16 cct_offset;
135 __u16 log_length; /* length of err log */
136 __u16 xsum_end; /* offset to end of
137 checksum */
138 __u8 reserved[4];
139 __u8 sflag; /* starting sentinal */
140 __u8 part_number[13]; /* prom part number */
141 __u8 version[10]; /* version number */
142 __u8 signature[8];
143 __u16 eeprom_chksum;
144 __u32 data_stamp_offset;
145 __u8 eflag ; /* ending sentinal */
146} __attribute__((packed)) voyager_eprom_hdr_t;
147
148
149
150#define VOYAGER_EPROM_SIZE_OFFSET \
151 ((__u16)(&(((voyager_eprom_hdr_t *)0)->ee_size)))
152#define VOYAGER_XSUM_END_OFFSET 0x2a
153
154/* the following three definitions are for internal table layouts
155 * in the module EPROMs. We really only care about the IDs and
156 * offsets */
157typedef struct voyager_sp_table {
158 __u8 asic_id;
159 __u8 bypass_flag;
160 __u16 asic_data_offset;
161 __u16 config_data_offset;
162} __attribute__((packed)) voyager_sp_table_t;
163
164typedef struct voyager_jtag_table {
165 __u8 icode[4];
166 __u8 runbist[4];
167 __u8 intest[4];
168 __u8 samp_preld[4];
169 __u8 ireg_len;
170} __attribute__((packed)) voyager_jtt_t;
171
172typedef struct voyager_asic_data_table {
173 __u8 jtag_id[4];
174 __u16 length_bsr;
175 __u16 length_bist_reg;
176 __u32 bist_clk;
177 __u16 subaddr_bits;
178 __u16 seed_bits;
179 __u16 sig_bits;
180 __u16 jtag_offset;
181} __attribute__((packed)) voyager_at_t;
182
183/* Voyager Interrupt Controller (VIC) registers */
184
185/* Base to add to Cross Processor Interrupts (CPIs) when triggering
186 * the CPU IRQ line */
187/* register defines for the WCBICs (one per processor) */
188#define VOYAGER_WCBIC0 0x41 /* bus A node P1 processor 0 */
189#define VOYAGER_WCBIC1 0x49 /* bus A node P1 processor 1 */
190#define VOYAGER_WCBIC2 0x51 /* bus A node P2 processor 0 */
191#define VOYAGER_WCBIC3 0x59 /* bus A node P2 processor 1 */
192#define VOYAGER_WCBIC4 0x61 /* bus B node P1 processor 0 */
193#define VOYAGER_WCBIC5 0x69 /* bus B node P1 processor 1 */
194#define VOYAGER_WCBIC6 0x71 /* bus B node P2 processor 0 */
195#define VOYAGER_WCBIC7 0x79 /* bus B node P2 processor 1 */
196
197
198/* top of memory registers */
199#define VOYAGER_WCBIC_TOM_L 0x4
200#define VOYAGER_WCBIC_TOM_H 0x5
201
202/* register defines for Voyager Memory Contol (VMC)
203 * these are present on L4 machines only */
204#define VOYAGER_VMC1 0x81
205#define VOYAGER_VMC2 0x91
206#define VOYAGER_VMC3 0xa1
207#define VOYAGER_VMC4 0xb1
208
209/* VMC Ports */
210#define VOYAGER_VMC_MEMORY_SETUP 0x9
211# define VMC_Interleaving 0x01
212# define VMC_4Way 0x02
213# define VMC_EvenCacheLines 0x04
214# define VMC_HighLine 0x08
215# define VMC_Start0_Enable 0x20
216# define VMC_Start1_Enable 0x40
217# define VMC_Vremap 0x80
218#define VOYAGER_VMC_BANK_DENSITY 0xa
219# define VMC_BANK_EMPTY 0
220# define VMC_BANK_4MB 1
221# define VMC_BANK_16MB 2
222# define VMC_BANK_64MB 3
223# define VMC_BANK0_MASK 0x03
224# define VMC_BANK1_MASK 0x0C
225# define VMC_BANK2_MASK 0x30
226# define VMC_BANK3_MASK 0xC0
227
228/* Magellan Memory Controller (MMC) defines - present on L5 */
229#define VOYAGER_MMC_ASIC_ID 1
230/* the two memory modules corresponding to memory cards in the system */
231#define VOYAGER_MMC_MEMORY0_MODULE 0x14
232#define VOYAGER_MMC_MEMORY1_MODULE 0x15
233/* the Magellan Memory Address (MMA) defines */
234#define VOYAGER_MMA_ASIC_ID 2
235
236/* Submodule number for the Quad Baseboard */
237#define VOYAGER_QUAD_BASEBOARD 1
238
239/* ASIC defines for the Quad Baseboard */
240#define VOYAGER_QUAD_QDATA0 1
241#define VOYAGER_QUAD_QDATA1 2
242#define VOYAGER_QUAD_QABC 3
243
244/* Useful areas in extended CMOS */
245#define VOYAGER_PROCESSOR_PRESENT_MASK 0x88a
246#define VOYAGER_MEMORY_CLICKMAP 0xa23
247#define VOYAGER_DUMP_LOCATION 0xb1a
248
249/* SUS In Control bit - used to tell SUS that we don't need to be
250 * babysat anymore */
251#define VOYAGER_SUS_IN_CONTROL_PORT 0x3ff
252# define VOYAGER_IN_CONTROL_FLAG 0x80
253
254/* Voyager PSI defines */
255#define VOYAGER_PSI_STATUS_REG 0x08
256# define PSI_DC_FAIL 0x01
257# define PSI_MON 0x02
258# define PSI_FAULT 0x04
259# define PSI_ALARM 0x08
260# define PSI_CURRENT 0x10
261# define PSI_DVM 0x20
262# define PSI_PSCFAULT 0x40
263# define PSI_STAT_CHG 0x80
264
265#define VOYAGER_PSI_SUPPLY_REG 0x8000
266 /* read */
267# define PSI_FAIL_DC 0x01
268# define PSI_FAIL_AC 0x02
269# define PSI_MON_INT 0x04
270# define PSI_SWITCH_OFF 0x08
271# define PSI_HX_OFF 0x10
272# define PSI_SECURITY 0x20
273# define PSI_CMOS_BATT_LOW 0x40
274# define PSI_CMOS_BATT_FAIL 0x80
275 /* write */
276# define PSI_CLR_SWITCH_OFF 0x13
277# define PSI_CLR_HX_OFF 0x14
278# define PSI_CLR_CMOS_BATT_FAIL 0x17
279
280#define VOYAGER_PSI_MASK 0x8001
281# define PSI_MASK_MASK 0x10
282
283#define VOYAGER_PSI_AC_FAIL_REG 0x8004
284#define AC_FAIL_STAT_CHANGE 0x80
285
286#define VOYAGER_PSI_GENERAL_REG 0x8007
287 /* read */
288# define PSI_SWITCH_ON 0x01
289# define PSI_SWITCH_ENABLED 0x02
290# define PSI_ALARM_ENABLED 0x08
291# define PSI_SECURE_ENABLED 0x10
292# define PSI_COLD_RESET 0x20
293# define PSI_COLD_START 0x80
294 /* write */
295# define PSI_POWER_DOWN 0x10
296# define PSI_SWITCH_DISABLE 0x01
297# define PSI_SWITCH_ENABLE 0x11
298# define PSI_CLEAR 0x12
299# define PSI_ALARM_DISABLE 0x03
300# define PSI_ALARM_ENABLE 0x13
301# define PSI_CLEAR_COLD_RESET 0x05
302# define PSI_SET_COLD_RESET 0x15
303# define PSI_CLEAR_COLD_START 0x07
304# define PSI_SET_COLD_START 0x17
305
306
307
308struct voyager_bios_info {
309 __u8 len;
310 __u8 major;
311 __u8 minor;
312 __u8 debug;
313 __u8 num_classes;
314 __u8 class_1;
315 __u8 class_2;
316};
317
318/* The following structures and definitions are for the Kernel/SUS
319 * interface these are needed to find out how SUS initialised any Quad
320 * boards in the system */
321
322#define NUMBER_OF_MC_BUSSES 2
323#define SLOTS_PER_MC_BUS 8
324#define MAX_CPUS 16 /* 16 way CPU system */
325#define MAX_PROCESSOR_BOARDS 4 /* 4 processor slot system */
326#define MAX_CACHE_LEVELS 4 /* # of cache levels supported */
327#define MAX_SHARED_CPUS 4 /* # of CPUs that can share a LARC */
328#define NUMBER_OF_POS_REGS 8
329
330typedef struct {
331 __u8 MC_Slot;
332 __u8 POS_Values[NUMBER_OF_POS_REGS];
333} __attribute__((packed)) MC_SlotInformation_t;
334
335struct QuadDescription {
336 __u8 Type; /* for type 0 (DYADIC or MONADIC) all fields
337 * will be zero except for slot */
338 __u8 StructureVersion;
339 __u32 CPI_BaseAddress;
340 __u32 LARC_BankSize;
341 __u32 LocalMemoryStateBits;
342 __u8 Slot; /* Processor slots 1 - 4 */
343} __attribute__((packed));
344
345struct ProcBoardInfo {
346 __u8 Type;
347 __u8 StructureVersion;
348 __u8 NumberOfBoards;
349 struct QuadDescription QuadData[MAX_PROCESSOR_BOARDS];
350} __attribute__((packed));
351
352struct CacheDescription {
353 __u8 Level;
354 __u32 TotalSize;
355 __u16 LineSize;
356 __u8 Associativity;
357 __u8 CacheType;
358 __u8 WriteType;
359 __u8 Number_CPUs_SharedBy;
360 __u8 Shared_CPUs_Hardware_IDs[MAX_SHARED_CPUS];
361
362} __attribute__((packed));
363
364struct CPU_Description {
365 __u8 CPU_HardwareId;
366 char *FRU_String;
367 __u8 NumberOfCacheLevels;
368 struct CacheDescription CacheLevelData[MAX_CACHE_LEVELS];
369} __attribute__((packed));
370
371struct CPU_Info {
372 __u8 Type;
373 __u8 StructureVersion;
374 __u8 NumberOf_CPUs;
375 struct CPU_Description CPU_Data[MAX_CPUS];
376} __attribute__((packed));
377
378
379/*
380 * This structure will be used by SUS and the OS.
381 * The assumption about this structure is that no blank space is
382 * packed in it by our friend the compiler.
383 */
384typedef struct {
385 __u8 Mailbox_SUS; /* Written to by SUS to give
386 commands/response to the OS */
387 __u8 Mailbox_OS; /* Written to by the OS to give
388 commands/response to SUS */
389 __u8 SUS_MailboxVersion; /* Tells the OS which iteration of the
390 interface SUS supports */
391 __u8 OS_MailboxVersion; /* Tells SUS which iteration of the
392 interface the OS supports */
393 __u32 OS_Flags; /* Flags set by the OS as info for
394 SUS */
395 __u32 SUS_Flags; /* Flags set by SUS as info
396 for the OS */
397 __u32 WatchDogPeriod; /* Watchdog period (in seconds) which
398 the DP uses to see if the OS
399 is dead */
400 __u32 WatchDogCount; /* Updated by the OS on every tic. */
401 __u32 MemoryFor_SUS_ErrorLog; /* Flat 32 bit address which tells SUS
402 where to stuff the SUS error log
403 on a dump */
404 MC_SlotInformation_t MC_SlotInfo[NUMBER_OF_MC_BUSSES*SLOTS_PER_MC_BUS];
405 /* Storage for MCA POS data */
406 /* All new SECOND_PASS_INTERFACE fields added from this point */
407 struct ProcBoardInfo *BoardData;
408 struct CPU_Info *CPU_Data;
409 /* All new fields must be added from this point */
410} Voyager_KernelSUS_Mbox_t;
411
412/* structure for finding the right memory address to send a QIC CPI to */
413struct voyager_qic_cpi {
414 /* Each cache line (32 bytes) can trigger a cpi. The cpi
415 * read/write may occur anywhere in the cache line---pick the
416 * middle to be safe */
417 struct {
418 __u32 pad1[3];
419 __u32 cpi;
420 __u32 pad2[4];
421 } qic_cpi[8];
422};
423
424struct voyager_status {
425 __u32 power_fail:1;
426 __u32 switch_off:1;
427 __u32 request_from_kernel:1;
428};
429
430struct voyager_psi_regs {
431 __u8 cat_id;
432 __u8 cat_dev;
433 __u8 cat_control;
434 __u8 subaddr;
435 __u8 dummy4;
436 __u8 checkbit;
437 __u8 subaddr_low;
438 __u8 subaddr_high;
439 __u8 intstatus;
440 __u8 stat1;
441 __u8 stat3;
442 __u8 fault;
443 __u8 tms;
444 __u8 gen;
445 __u8 sysconf;
446 __u8 dummy15;
447};
448
449struct voyager_psi_subregs {
450 __u8 supply;
451 __u8 mask;
452 __u8 present;
453 __u8 DCfail;
454 __u8 ACfail;
455 __u8 fail;
456 __u8 UPSfail;
457 __u8 genstatus;
458};
459
460struct voyager_psi {
461 struct voyager_psi_regs regs;
462 struct voyager_psi_subregs subregs;
463};
464
465struct voyager_SUS {
466#define VOYAGER_DUMP_BUTTON_NMI 0x1
467#define VOYAGER_SUS_VALID 0x2
468#define VOYAGER_SYSINT_COMPLETE 0x3
469 __u8 SUS_mbox;
470#define VOYAGER_NO_COMMAND 0x0
471#define VOYAGER_IGNORE_DUMP 0x1
472#define VOYAGER_DO_DUMP 0x2
473#define VOYAGER_SYSINT_HANDSHAKE 0x3
474#define VOYAGER_DO_MEM_DUMP 0x4
475#define VOYAGER_SYSINT_WAS_RECOVERED 0x5
476 __u8 kernel_mbox;
477#define VOYAGER_MAILBOX_VERSION 0x10
478 __u8 SUS_version;
479 __u8 kernel_version;
480#define VOYAGER_OS_HAS_SYSINT 0x1
481#define VOYAGER_OS_IN_PROGRESS 0x2
482#define VOYAGER_UPDATING_WDPERIOD 0x4
483 __u32 kernel_flags;
484#define VOYAGER_SUS_BOOTING 0x1
485#define VOYAGER_SUS_IN_PROGRESS 0x2
486 __u32 SUS_flags;
487 __u32 watchdog_period;
488 __u32 watchdog_count;
489 __u32 SUS_errorlog;
490 /* lots of system configuration stuff under here */
491};
492
493/* Variables exported by voyager_smp */
494extern __u32 voyager_extended_vic_processors;
495extern __u32 voyager_allowed_boot_processors;
496extern __u32 voyager_quad_processors;
497extern struct voyager_qic_cpi *voyager_quad_cpi_addr[NR_CPUS];
498extern struct voyager_SUS *voyager_SUS;
499
500/* variables exported always */
501extern struct task_struct *voyager_thread;
502extern int voyager_level;
503extern struct voyager_status voyager_status;
504
505/* functions exported by the voyager and voyager_smp modules */
506extern int voyager_cat_readb(__u8 module, __u8 asic, int reg);
507extern void voyager_cat_init(void);
508extern void voyager_detect(struct voyager_bios_info *);
509extern void voyager_trap_init(void);
510extern void voyager_setup_irqs(void);
511extern int voyager_memory_detect(int region, __u32 *addr, __u32 *length);
512extern void voyager_smp_intr_init(void);
513extern __u8 voyager_extended_cmos_read(__u16 cmos_address);
514extern void voyager_smp_dump(void);
515extern void voyager_timer_interrupt(void);
516extern void smp_local_timer_interrupt(void);
517extern void voyager_power_off(void);
518extern void smp_voyager_power_off(void *dummy);
519extern void voyager_restart(void);
520extern void voyager_cat_power_off(void);
521extern void voyager_cat_do_common_interrupt(void);
522extern void voyager_handle_nmi(void);
523/* Commands for the following are */
524#define VOYAGER_PSI_READ 0
525#define VOYAGER_PSI_WRITE 1
526#define VOYAGER_PSI_SUBREAD 2
527#define VOYAGER_PSI_SUBWRITE 3
528extern void voyager_cat_psi(__u8, __u16, __u8 *);
diff --git a/include/asm-x86/vsyscall.h b/include/asm-x86/vsyscall.h
deleted file mode 100644
index dcd4682413de..000000000000
--- a/include/asm-x86/vsyscall.h
+++ /dev/null
@@ -1,44 +0,0 @@
1#ifndef ASM_X86__VSYSCALL_H
2#define ASM_X86__VSYSCALL_H
3
4enum vsyscall_num {
5 __NR_vgettimeofday,
6 __NR_vtime,
7 __NR_vgetcpu,
8};
9
10#define VSYSCALL_START (-10UL << 20)
11#define VSYSCALL_SIZE 1024
12#define VSYSCALL_END (-2UL << 20)
13#define VSYSCALL_MAPPED_PAGES 1
14#define VSYSCALL_ADDR(vsyscall_nr) (VSYSCALL_START+VSYSCALL_SIZE*(vsyscall_nr))
15
16#ifdef __KERNEL__
17#include <linux/seqlock.h>
18
19#define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16)))
20#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
21
22/* Definitions for CONFIG_GENERIC_TIME definitions */
23#define __section_vsyscall_gtod_data __attribute__ \
24 ((unused, __section__ (".vsyscall_gtod_data"),aligned(16)))
25#define __section_vsyscall_clock __attribute__ \
26 ((unused, __section__ (".vsyscall_clock"),aligned(16)))
27#define __vsyscall_fn \
28 __attribute__ ((unused, __section__(".vsyscall_fn"))) notrace
29
30#define VGETCPU_RDTSCP 1
31#define VGETCPU_LSL 2
32
33extern int __vgetcpu_mode;
34extern volatile unsigned long __jiffies;
35
36/* kernel space (writeable) */
37extern int vgetcpu_mode;
38extern struct timezone sys_tz;
39
40extern void map_vsyscall(void);
41
42#endif /* __KERNEL__ */
43
44#endif /* ASM_X86__VSYSCALL_H */
diff --git a/include/asm-x86/xcr.h b/include/asm-x86/xcr.h
deleted file mode 100644
index f2cba4e79a23..000000000000
--- a/include/asm-x86/xcr.h
+++ /dev/null
@@ -1,49 +0,0 @@
1/* -*- linux-c -*- ------------------------------------------------------- *
2 *
3 * Copyright 2008 rPath, Inc. - All Rights Reserved
4 *
5 * This file is part of the Linux kernel, and is made available under
6 * the terms of the GNU General Public License version 2 or (at your
7 * option) any later version; incorporated herein by reference.
8 *
9 * ----------------------------------------------------------------------- */
10
11/*
12 * asm-x86/xcr.h
13 *
14 * Definitions for the eXtended Control Register instructions
15 */
16
17#ifndef _ASM_X86_XCR_H
18#define _ASM_X86_XCR_H
19
20#define XCR_XFEATURE_ENABLED_MASK 0x00000000
21
22#ifdef __KERNEL__
23# ifndef __ASSEMBLY__
24
25#include <linux/types.h>
26
27static inline u64 xgetbv(u32 index)
28{
29 u32 eax, edx;
30
31 asm volatile(".byte 0x0f,0x01,0xd0" /* xgetbv */
32 : "=a" (eax), "=d" (edx)
33 : "c" (index));
34 return eax + ((u64)edx << 32);
35}
36
37static inline void xsetbv(u32 index, u64 value)
38{
39 u32 eax = value;
40 u32 edx = value >> 32;
41
42 asm volatile(".byte 0x0f,0x01,0xd1" /* xsetbv */
43 : : "a" (eax), "d" (edx), "c" (index));
44}
45
46# endif /* __ASSEMBLY__ */
47#endif /* __KERNEL__ */
48
49#endif /* _ASM_X86_XCR_H */
diff --git a/include/asm-x86/xen/events.h b/include/asm-x86/xen/events.h
deleted file mode 100644
index 8151f5b8b6cb..000000000000
--- a/include/asm-x86/xen/events.h
+++ /dev/null
@@ -1,24 +0,0 @@
1#ifndef ASM_X86__XEN__EVENTS_H
2#define ASM_X86__XEN__EVENTS_H
3
4enum ipi_vector {
5 XEN_RESCHEDULE_VECTOR,
6 XEN_CALL_FUNCTION_VECTOR,
7 XEN_CALL_FUNCTION_SINGLE_VECTOR,
8 XEN_SPIN_UNLOCK_VECTOR,
9
10 XEN_NR_IPIS,
11};
12
13static inline int xen_irqs_disabled(struct pt_regs *regs)
14{
15 return raw_irqs_disabled_flags(regs->flags);
16}
17
18static inline void xen_do_IRQ(int irq, struct pt_regs *regs)
19{
20 regs->orig_ax = ~irq;
21 do_IRQ(regs);
22}
23
24#endif /* ASM_X86__XEN__EVENTS_H */
diff --git a/include/asm-x86/xen/grant_table.h b/include/asm-x86/xen/grant_table.h
deleted file mode 100644
index c4baab4d2b68..000000000000
--- a/include/asm-x86/xen/grant_table.h
+++ /dev/null
@@ -1,7 +0,0 @@
1#ifndef ASM_X86__XEN__GRANT_TABLE_H
2#define ASM_X86__XEN__GRANT_TABLE_H
3
4#define xen_alloc_vm_area(size) alloc_vm_area(size)
5#define xen_free_vm_area(area) free_vm_area(area)
6
7#endif /* ASM_X86__XEN__GRANT_TABLE_H */
diff --git a/include/asm-x86/xen/hypercall.h b/include/asm-x86/xen/hypercall.h
deleted file mode 100644
index 44f4259bee3f..000000000000
--- a/include/asm-x86/xen/hypercall.h
+++ /dev/null
@@ -1,527 +0,0 @@
1/******************************************************************************
2 * hypercall.h
3 *
4 * Linux-specific hypervisor handling.
5 *
6 * Copyright (c) 2002-2004, K A Fraser
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version 2
10 * as published by the Free Software Foundation; or, when distributed
11 * separately from the Linux kernel or incorporated into other
12 * software packages, subject to the following license:
13 *
14 * Permission is hereby granted, free of charge, to any person obtaining a copy
15 * of this source file (the "Software"), to deal in the Software without
16 * restriction, including without limitation the rights to use, copy, modify,
17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18 * and to permit persons to whom the Software is furnished to do so, subject to
19 * the following conditions:
20 *
21 * The above copyright notice and this permission notice shall be included in
22 * all copies or substantial portions of the Software.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 * IN THE SOFTWARE.
31 */
32
33#ifndef ASM_X86__XEN__HYPERCALL_H
34#define ASM_X86__XEN__HYPERCALL_H
35
36#include <linux/errno.h>
37#include <linux/string.h>
38
39#include <xen/interface/xen.h>
40#include <xen/interface/sched.h>
41#include <xen/interface/physdev.h>
42
43/*
44 * The hypercall asms have to meet several constraints:
45 * - Work on 32- and 64-bit.
46 * The two architectures put their arguments in different sets of
47 * registers.
48 *
49 * - Work around asm syntax quirks
50 * It isn't possible to specify one of the rNN registers in a
51 * constraint, so we use explicit register variables to get the
52 * args into the right place.
53 *
54 * - Mark all registers as potentially clobbered
55 * Even unused parameters can be clobbered by the hypervisor, so we
56 * need to make sure gcc knows it.
57 *
58 * - Avoid compiler bugs.
59 * This is the tricky part. Because x86_32 has such a constrained
60 * register set, gcc versions below 4.3 have trouble generating
61 * code when all the arg registers and memory are trashed by the
62 * asm. There are syntactically simpler ways of achieving the
63 * semantics below, but they cause the compiler to crash.
64 *
65 * The only combination I found which works is:
66 * - assign the __argX variables first
67 * - list all actually used parameters as "+r" (__argX)
68 * - clobber the rest
69 *
70 * The result certainly isn't pretty, and it really shows up cpp's
71 * weakness as as macro language. Sorry. (But let's just give thanks
72 * there aren't more than 5 arguments...)
73 */
74
75extern struct { char _entry[32]; } hypercall_page[];
76
77#define __HYPERCALL "call hypercall_page+%c[offset]"
78#define __HYPERCALL_ENTRY(x) \
79 [offset] "i" (__HYPERVISOR_##x * sizeof(hypercall_page[0]))
80
81#ifdef CONFIG_X86_32
82#define __HYPERCALL_RETREG "eax"
83#define __HYPERCALL_ARG1REG "ebx"
84#define __HYPERCALL_ARG2REG "ecx"
85#define __HYPERCALL_ARG3REG "edx"
86#define __HYPERCALL_ARG4REG "esi"
87#define __HYPERCALL_ARG5REG "edi"
88#else
89#define __HYPERCALL_RETREG "rax"
90#define __HYPERCALL_ARG1REG "rdi"
91#define __HYPERCALL_ARG2REG "rsi"
92#define __HYPERCALL_ARG3REG "rdx"
93#define __HYPERCALL_ARG4REG "r10"
94#define __HYPERCALL_ARG5REG "r8"
95#endif
96
97#define __HYPERCALL_DECLS \
98 register unsigned long __res asm(__HYPERCALL_RETREG); \
99 register unsigned long __arg1 asm(__HYPERCALL_ARG1REG) = __arg1; \
100 register unsigned long __arg2 asm(__HYPERCALL_ARG2REG) = __arg2; \
101 register unsigned long __arg3 asm(__HYPERCALL_ARG3REG) = __arg3; \
102 register unsigned long __arg4 asm(__HYPERCALL_ARG4REG) = __arg4; \
103 register unsigned long __arg5 asm(__HYPERCALL_ARG5REG) = __arg5;
104
105#define __HYPERCALL_0PARAM "=r" (__res)
106#define __HYPERCALL_1PARAM __HYPERCALL_0PARAM, "+r" (__arg1)
107#define __HYPERCALL_2PARAM __HYPERCALL_1PARAM, "+r" (__arg2)
108#define __HYPERCALL_3PARAM __HYPERCALL_2PARAM, "+r" (__arg3)
109#define __HYPERCALL_4PARAM __HYPERCALL_3PARAM, "+r" (__arg4)
110#define __HYPERCALL_5PARAM __HYPERCALL_4PARAM, "+r" (__arg5)
111
112#define __HYPERCALL_0ARG()
113#define __HYPERCALL_1ARG(a1) \
114 __HYPERCALL_0ARG() __arg1 = (unsigned long)(a1);
115#define __HYPERCALL_2ARG(a1,a2) \
116 __HYPERCALL_1ARG(a1) __arg2 = (unsigned long)(a2);
117#define __HYPERCALL_3ARG(a1,a2,a3) \
118 __HYPERCALL_2ARG(a1,a2) __arg3 = (unsigned long)(a3);
119#define __HYPERCALL_4ARG(a1,a2,a3,a4) \
120 __HYPERCALL_3ARG(a1,a2,a3) __arg4 = (unsigned long)(a4);
121#define __HYPERCALL_5ARG(a1,a2,a3,a4,a5) \
122 __HYPERCALL_4ARG(a1,a2,a3,a4) __arg5 = (unsigned long)(a5);
123
124#define __HYPERCALL_CLOBBER5 "memory"
125#define __HYPERCALL_CLOBBER4 __HYPERCALL_CLOBBER5, __HYPERCALL_ARG5REG
126#define __HYPERCALL_CLOBBER3 __HYPERCALL_CLOBBER4, __HYPERCALL_ARG4REG
127#define __HYPERCALL_CLOBBER2 __HYPERCALL_CLOBBER3, __HYPERCALL_ARG3REG
128#define __HYPERCALL_CLOBBER1 __HYPERCALL_CLOBBER2, __HYPERCALL_ARG2REG
129#define __HYPERCALL_CLOBBER0 __HYPERCALL_CLOBBER1, __HYPERCALL_ARG1REG
130
131#define _hypercall0(type, name) \
132({ \
133 __HYPERCALL_DECLS; \
134 __HYPERCALL_0ARG(); \
135 asm volatile (__HYPERCALL \
136 : __HYPERCALL_0PARAM \
137 : __HYPERCALL_ENTRY(name) \
138 : __HYPERCALL_CLOBBER0); \
139 (type)__res; \
140})
141
142#define _hypercall1(type, name, a1) \
143({ \
144 __HYPERCALL_DECLS; \
145 __HYPERCALL_1ARG(a1); \
146 asm volatile (__HYPERCALL \
147 : __HYPERCALL_1PARAM \
148 : __HYPERCALL_ENTRY(name) \
149 : __HYPERCALL_CLOBBER1); \
150 (type)__res; \
151})
152
153#define _hypercall2(type, name, a1, a2) \
154({ \
155 __HYPERCALL_DECLS; \
156 __HYPERCALL_2ARG(a1, a2); \
157 asm volatile (__HYPERCALL \
158 : __HYPERCALL_2PARAM \
159 : __HYPERCALL_ENTRY(name) \
160 : __HYPERCALL_CLOBBER2); \
161 (type)__res; \
162})
163
164#define _hypercall3(type, name, a1, a2, a3) \
165({ \
166 __HYPERCALL_DECLS; \
167 __HYPERCALL_3ARG(a1, a2, a3); \
168 asm volatile (__HYPERCALL \
169 : __HYPERCALL_3PARAM \
170 : __HYPERCALL_ENTRY(name) \
171 : __HYPERCALL_CLOBBER3); \
172 (type)__res; \
173})
174
175#define _hypercall4(type, name, a1, a2, a3, a4) \
176({ \
177 __HYPERCALL_DECLS; \
178 __HYPERCALL_4ARG(a1, a2, a3, a4); \
179 asm volatile (__HYPERCALL \
180 : __HYPERCALL_4PARAM \
181 : __HYPERCALL_ENTRY(name) \
182 : __HYPERCALL_CLOBBER4); \
183 (type)__res; \
184})
185
186#define _hypercall5(type, name, a1, a2, a3, a4, a5) \
187({ \
188 __HYPERCALL_DECLS; \
189 __HYPERCALL_5ARG(a1, a2, a3, a4, a5); \
190 asm volatile (__HYPERCALL \
191 : __HYPERCALL_5PARAM \
192 : __HYPERCALL_ENTRY(name) \
193 : __HYPERCALL_CLOBBER5); \
194 (type)__res; \
195})
196
197static inline int
198HYPERVISOR_set_trap_table(struct trap_info *table)
199{
200 return _hypercall1(int, set_trap_table, table);
201}
202
203static inline int
204HYPERVISOR_mmu_update(struct mmu_update *req, int count,
205 int *success_count, domid_t domid)
206{
207 return _hypercall4(int, mmu_update, req, count, success_count, domid);
208}
209
210static inline int
211HYPERVISOR_mmuext_op(struct mmuext_op *op, int count,
212 int *success_count, domid_t domid)
213{
214 return _hypercall4(int, mmuext_op, op, count, success_count, domid);
215}
216
217static inline int
218HYPERVISOR_set_gdt(unsigned long *frame_list, int entries)
219{
220 return _hypercall2(int, set_gdt, frame_list, entries);
221}
222
223static inline int
224HYPERVISOR_stack_switch(unsigned long ss, unsigned long esp)
225{
226 return _hypercall2(int, stack_switch, ss, esp);
227}
228
229#ifdef CONFIG_X86_32
230static inline int
231HYPERVISOR_set_callbacks(unsigned long event_selector,
232 unsigned long event_address,
233 unsigned long failsafe_selector,
234 unsigned long failsafe_address)
235{
236 return _hypercall4(int, set_callbacks,
237 event_selector, event_address,
238 failsafe_selector, failsafe_address);
239}
240#else /* CONFIG_X86_64 */
241static inline int
242HYPERVISOR_set_callbacks(unsigned long event_address,
243 unsigned long failsafe_address,
244 unsigned long syscall_address)
245{
246 return _hypercall3(int, set_callbacks,
247 event_address, failsafe_address,
248 syscall_address);
249}
250#endif /* CONFIG_X86_{32,64} */
251
252static inline int
253HYPERVISOR_callback_op(int cmd, void *arg)
254{
255 return _hypercall2(int, callback_op, cmd, arg);
256}
257
258static inline int
259HYPERVISOR_fpu_taskswitch(int set)
260{
261 return _hypercall1(int, fpu_taskswitch, set);
262}
263
264static inline int
265HYPERVISOR_sched_op(int cmd, void *arg)
266{
267 return _hypercall2(int, sched_op_new, cmd, arg);
268}
269
270static inline long
271HYPERVISOR_set_timer_op(u64 timeout)
272{
273 unsigned long timeout_hi = (unsigned long)(timeout>>32);
274 unsigned long timeout_lo = (unsigned long)timeout;
275 return _hypercall2(long, set_timer_op, timeout_lo, timeout_hi);
276}
277
278static inline int
279HYPERVISOR_set_debugreg(int reg, unsigned long value)
280{
281 return _hypercall2(int, set_debugreg, reg, value);
282}
283
284static inline unsigned long
285HYPERVISOR_get_debugreg(int reg)
286{
287 return _hypercall1(unsigned long, get_debugreg, reg);
288}
289
290static inline int
291HYPERVISOR_update_descriptor(u64 ma, u64 desc)
292{
293 return _hypercall4(int, update_descriptor, ma, ma>>32, desc, desc>>32);
294}
295
296static inline int
297HYPERVISOR_memory_op(unsigned int cmd, void *arg)
298{
299 return _hypercall2(int, memory_op, cmd, arg);
300}
301
302static inline int
303HYPERVISOR_multicall(void *call_list, int nr_calls)
304{
305 return _hypercall2(int, multicall, call_list, nr_calls);
306}
307
308static inline int
309HYPERVISOR_update_va_mapping(unsigned long va, pte_t new_val,
310 unsigned long flags)
311{
312 if (sizeof(new_val) == sizeof(long))
313 return _hypercall3(int, update_va_mapping, va,
314 new_val.pte, flags);
315 else
316 return _hypercall4(int, update_va_mapping, va,
317 new_val.pte, new_val.pte >> 32, flags);
318}
319
320static inline int
321HYPERVISOR_event_channel_op(int cmd, void *arg)
322{
323 int rc = _hypercall2(int, event_channel_op, cmd, arg);
324 if (unlikely(rc == -ENOSYS)) {
325 struct evtchn_op op;
326 op.cmd = cmd;
327 memcpy(&op.u, arg, sizeof(op.u));
328 rc = _hypercall1(int, event_channel_op_compat, &op);
329 memcpy(arg, &op.u, sizeof(op.u));
330 }
331 return rc;
332}
333
334static inline int
335HYPERVISOR_xen_version(int cmd, void *arg)
336{
337 return _hypercall2(int, xen_version, cmd, arg);
338}
339
340static inline int
341HYPERVISOR_console_io(int cmd, int count, char *str)
342{
343 return _hypercall3(int, console_io, cmd, count, str);
344}
345
346static inline int
347HYPERVISOR_physdev_op(int cmd, void *arg)
348{
349 int rc = _hypercall2(int, physdev_op, cmd, arg);
350 if (unlikely(rc == -ENOSYS)) {
351 struct physdev_op op;
352 op.cmd = cmd;
353 memcpy(&op.u, arg, sizeof(op.u));
354 rc = _hypercall1(int, physdev_op_compat, &op);
355 memcpy(arg, &op.u, sizeof(op.u));
356 }
357 return rc;
358}
359
360static inline int
361HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count)
362{
363 return _hypercall3(int, grant_table_op, cmd, uop, count);
364}
365
366static inline int
367HYPERVISOR_update_va_mapping_otherdomain(unsigned long va, pte_t new_val,
368 unsigned long flags, domid_t domid)
369{
370 if (sizeof(new_val) == sizeof(long))
371 return _hypercall4(int, update_va_mapping_otherdomain, va,
372 new_val.pte, flags, domid);
373 else
374 return _hypercall5(int, update_va_mapping_otherdomain, va,
375 new_val.pte, new_val.pte >> 32,
376 flags, domid);
377}
378
379static inline int
380HYPERVISOR_vm_assist(unsigned int cmd, unsigned int type)
381{
382 return _hypercall2(int, vm_assist, cmd, type);
383}
384
385static inline int
386HYPERVISOR_vcpu_op(int cmd, int vcpuid, void *extra_args)
387{
388 return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args);
389}
390
391#ifdef CONFIG_X86_64
392static inline int
393HYPERVISOR_set_segment_base(int reg, unsigned long value)
394{
395 return _hypercall2(int, set_segment_base, reg, value);
396}
397#endif
398
399static inline int
400HYPERVISOR_suspend(unsigned long srec)
401{
402 return _hypercall3(int, sched_op, SCHEDOP_shutdown,
403 SHUTDOWN_suspend, srec);
404}
405
406static inline int
407HYPERVISOR_nmi_op(unsigned long op, unsigned long arg)
408{
409 return _hypercall2(int, nmi_op, op, arg);
410}
411
412static inline void
413MULTI_fpu_taskswitch(struct multicall_entry *mcl, int set)
414{
415 mcl->op = __HYPERVISOR_fpu_taskswitch;
416 mcl->args[0] = set;
417}
418
419static inline void
420MULTI_update_va_mapping(struct multicall_entry *mcl, unsigned long va,
421 pte_t new_val, unsigned long flags)
422{
423 mcl->op = __HYPERVISOR_update_va_mapping;
424 mcl->args[0] = va;
425 if (sizeof(new_val) == sizeof(long)) {
426 mcl->args[1] = new_val.pte;
427 mcl->args[2] = flags;
428 } else {
429 mcl->args[1] = new_val.pte;
430 mcl->args[2] = new_val.pte >> 32;
431 mcl->args[3] = flags;
432 }
433}
434
435static inline void
436MULTI_grant_table_op(struct multicall_entry *mcl, unsigned int cmd,
437 void *uop, unsigned int count)
438{
439 mcl->op = __HYPERVISOR_grant_table_op;
440 mcl->args[0] = cmd;
441 mcl->args[1] = (unsigned long)uop;
442 mcl->args[2] = count;
443}
444
445static inline void
446MULTI_update_va_mapping_otherdomain(struct multicall_entry *mcl, unsigned long va,
447 pte_t new_val, unsigned long flags,
448 domid_t domid)
449{
450 mcl->op = __HYPERVISOR_update_va_mapping_otherdomain;
451 mcl->args[0] = va;
452 if (sizeof(new_val) == sizeof(long)) {
453 mcl->args[1] = new_val.pte;
454 mcl->args[2] = flags;
455 mcl->args[3] = domid;
456 } else {
457 mcl->args[1] = new_val.pte;
458 mcl->args[2] = new_val.pte >> 32;
459 mcl->args[3] = flags;
460 mcl->args[4] = domid;
461 }
462}
463
464static inline void
465MULTI_update_descriptor(struct multicall_entry *mcl, u64 maddr,
466 struct desc_struct desc)
467{
468 mcl->op = __HYPERVISOR_update_descriptor;
469 if (sizeof(maddr) == sizeof(long)) {
470 mcl->args[0] = maddr;
471 mcl->args[1] = *(unsigned long *)&desc;
472 } else {
473 mcl->args[0] = maddr;
474 mcl->args[1] = maddr >> 32;
475 mcl->args[2] = desc.a;
476 mcl->args[3] = desc.b;
477 }
478}
479
480static inline void
481MULTI_memory_op(struct multicall_entry *mcl, unsigned int cmd, void *arg)
482{
483 mcl->op = __HYPERVISOR_memory_op;
484 mcl->args[0] = cmd;
485 mcl->args[1] = (unsigned long)arg;
486}
487
488static inline void
489MULTI_mmu_update(struct multicall_entry *mcl, struct mmu_update *req,
490 int count, int *success_count, domid_t domid)
491{
492 mcl->op = __HYPERVISOR_mmu_update;
493 mcl->args[0] = (unsigned long)req;
494 mcl->args[1] = count;
495 mcl->args[2] = (unsigned long)success_count;
496 mcl->args[3] = domid;
497}
498
499static inline void
500MULTI_mmuext_op(struct multicall_entry *mcl, struct mmuext_op *op, int count,
501 int *success_count, domid_t domid)
502{
503 mcl->op = __HYPERVISOR_mmuext_op;
504 mcl->args[0] = (unsigned long)op;
505 mcl->args[1] = count;
506 mcl->args[2] = (unsigned long)success_count;
507 mcl->args[3] = domid;
508}
509
510static inline void
511MULTI_set_gdt(struct multicall_entry *mcl, unsigned long *frames, int entries)
512{
513 mcl->op = __HYPERVISOR_set_gdt;
514 mcl->args[0] = (unsigned long)frames;
515 mcl->args[1] = entries;
516}
517
518static inline void
519MULTI_stack_switch(struct multicall_entry *mcl,
520 unsigned long ss, unsigned long esp)
521{
522 mcl->op = __HYPERVISOR_stack_switch;
523 mcl->args[0] = ss;
524 mcl->args[1] = esp;
525}
526
527#endif /* ASM_X86__XEN__HYPERCALL_H */
diff --git a/include/asm-x86/xen/hypervisor.h b/include/asm-x86/xen/hypervisor.h
deleted file mode 100644
index 445a24759560..000000000000
--- a/include/asm-x86/xen/hypervisor.h
+++ /dev/null
@@ -1,82 +0,0 @@
1/******************************************************************************
2 * hypervisor.h
3 *
4 * Linux-specific hypervisor handling.
5 *
6 * Copyright (c) 2002-2004, K A Fraser
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version 2
10 * as published by the Free Software Foundation; or, when distributed
11 * separately from the Linux kernel or incorporated into other
12 * software packages, subject to the following license:
13 *
14 * Permission is hereby granted, free of charge, to any person obtaining a copy
15 * of this source file (the "Software"), to deal in the Software without
16 * restriction, including without limitation the rights to use, copy, modify,
17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18 * and to permit persons to whom the Software is furnished to do so, subject to
19 * the following conditions:
20 *
21 * The above copyright notice and this permission notice shall be included in
22 * all copies or substantial portions of the Software.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 * IN THE SOFTWARE.
31 */
32
33#ifndef ASM_X86__XEN__HYPERVISOR_H
34#define ASM_X86__XEN__HYPERVISOR_H
35
36#include <linux/types.h>
37#include <linux/kernel.h>
38
39#include <xen/interface/xen.h>
40#include <xen/interface/version.h>
41
42#include <asm/ptrace.h>
43#include <asm/page.h>
44#include <asm/desc.h>
45#if defined(__i386__)
46# ifdef CONFIG_X86_PAE
47# include <asm-generic/pgtable-nopud.h>
48# else
49# include <asm-generic/pgtable-nopmd.h>
50# endif
51#endif
52#include <asm/xen/hypercall.h>
53
54/* arch/i386/kernel/setup.c */
55extern struct shared_info *HYPERVISOR_shared_info;
56extern struct start_info *xen_start_info;
57
58/* arch/i386/mach-xen/evtchn.c */
59/* Force a proper event-channel callback from Xen. */
60extern void force_evtchn_callback(void);
61
62/* Turn jiffies into Xen system time. */
63u64 jiffies_to_st(unsigned long jiffies);
64
65
66#define MULTI_UVMFLAGS_INDEX 3
67#define MULTI_UVMDOMID_INDEX 4
68
69enum xen_domain_type {
70 XEN_NATIVE,
71 XEN_PV_DOMAIN,
72 XEN_HVM_DOMAIN,
73};
74
75extern enum xen_domain_type xen_domain_type;
76
77#define xen_domain() (xen_domain_type != XEN_NATIVE)
78#define xen_pv_domain() (xen_domain_type == XEN_PV_DOMAIN)
79#define xen_initial_domain() (xen_pv_domain() && xen_start_info->flags & SIF_INITDOMAIN)
80#define xen_hvm_domain() (xen_domain_type == XEN_HVM_DOMAIN)
81
82#endif /* ASM_X86__XEN__HYPERVISOR_H */
diff --git a/include/asm-x86/xen/interface.h b/include/asm-x86/xen/interface.h
deleted file mode 100644
index d077bba96da9..000000000000
--- a/include/asm-x86/xen/interface.h
+++ /dev/null
@@ -1,175 +0,0 @@
1/******************************************************************************
2 * arch-x86_32.h
3 *
4 * Guest OS interface to x86 Xen.
5 *
6 * Copyright (c) 2004, K A Fraser
7 */
8
9#ifndef ASM_X86__XEN__INTERFACE_H
10#define ASM_X86__XEN__INTERFACE_H
11
12#ifdef __XEN__
13#define __DEFINE_GUEST_HANDLE(name, type) \
14 typedef struct { type *p; } __guest_handle_ ## name
15#else
16#define __DEFINE_GUEST_HANDLE(name, type) \
17 typedef type * __guest_handle_ ## name
18#endif
19
20#define DEFINE_GUEST_HANDLE_STRUCT(name) \
21 __DEFINE_GUEST_HANDLE(name, struct name)
22#define DEFINE_GUEST_HANDLE(name) __DEFINE_GUEST_HANDLE(name, name)
23#define GUEST_HANDLE(name) __guest_handle_ ## name
24
25#ifdef __XEN__
26#if defined(__i386__)
27#define set_xen_guest_handle(hnd, val) \
28 do { \
29 if (sizeof(hnd) == 8) \
30 *(uint64_t *)&(hnd) = 0; \
31 (hnd).p = val; \
32 } while (0)
33#elif defined(__x86_64__)
34#define set_xen_guest_handle(hnd, val) do { (hnd).p = val; } while (0)
35#endif
36#else
37#if defined(__i386__)
38#define set_xen_guest_handle(hnd, val) \
39 do { \
40 if (sizeof(hnd) == 8) \
41 *(uint64_t *)&(hnd) = 0; \
42 (hnd) = val; \
43 } while (0)
44#elif defined(__x86_64__)
45#define set_xen_guest_handle(hnd, val) do { (hnd) = val; } while (0)
46#endif
47#endif
48
49#ifndef __ASSEMBLY__
50/* Guest handles for primitive C types. */
51__DEFINE_GUEST_HANDLE(uchar, unsigned char);
52__DEFINE_GUEST_HANDLE(uint, unsigned int);
53__DEFINE_GUEST_HANDLE(ulong, unsigned long);
54DEFINE_GUEST_HANDLE(char);
55DEFINE_GUEST_HANDLE(int);
56DEFINE_GUEST_HANDLE(long);
57DEFINE_GUEST_HANDLE(void);
58#endif
59
60#ifndef HYPERVISOR_VIRT_START
61#define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START)
62#endif
63
64#ifndef machine_to_phys_mapping
65#define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START)
66#endif
67
68/* Maximum number of virtual CPUs in multi-processor guests. */
69#define MAX_VIRT_CPUS 32
70
71/*
72 * SEGMENT DESCRIPTOR TABLES
73 */
74/*
75 * A number of GDT entries are reserved by Xen. These are not situated at the
76 * start of the GDT because some stupid OSes export hard-coded selector values
77 * in their ABI. These hard-coded values are always near the start of the GDT,
78 * so Xen places itself out of the way, at the far end of the GDT.
79 */
80#define FIRST_RESERVED_GDT_PAGE 14
81#define FIRST_RESERVED_GDT_BYTE (FIRST_RESERVED_GDT_PAGE * 4096)
82#define FIRST_RESERVED_GDT_ENTRY (FIRST_RESERVED_GDT_BYTE / 8)
83
84/*
85 * Send an array of these to HYPERVISOR_set_trap_table()
86 * The privilege level specifies which modes may enter a trap via a software
87 * interrupt. On x86/64, since rings 1 and 2 are unavailable, we allocate
88 * privilege levels as follows:
89 * Level == 0: Noone may enter
90 * Level == 1: Kernel may enter
91 * Level == 2: Kernel may enter
92 * Level == 3: Everyone may enter
93 */
94#define TI_GET_DPL(_ti) ((_ti)->flags & 3)
95#define TI_GET_IF(_ti) ((_ti)->flags & 4)
96#define TI_SET_DPL(_ti, _dpl) ((_ti)->flags |= (_dpl))
97#define TI_SET_IF(_ti, _if) ((_ti)->flags |= ((!!(_if))<<2))
98
99#ifndef __ASSEMBLY__
100struct trap_info {
101 uint8_t vector; /* exception vector */
102 uint8_t flags; /* 0-3: privilege level; 4: clear event enable? */
103 uint16_t cs; /* code selector */
104 unsigned long address; /* code offset */
105};
106DEFINE_GUEST_HANDLE_STRUCT(trap_info);
107
108struct arch_shared_info {
109 unsigned long max_pfn; /* max pfn that appears in table */
110 /* Frame containing list of mfns containing list of mfns containing p2m. */
111 unsigned long pfn_to_mfn_frame_list_list;
112 unsigned long nmi_reason;
113};
114#endif /* !__ASSEMBLY__ */
115
116#ifdef CONFIG_X86_32
117#include "interface_32.h"
118#else
119#include "interface_64.h"
120#endif
121
122#ifndef __ASSEMBLY__
123/*
124 * The following is all CPU context. Note that the fpu_ctxt block is filled
125 * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used.
126 */
127struct vcpu_guest_context {
128 /* FPU registers come first so they can be aligned for FXSAVE/FXRSTOR. */
129 struct { char x[512]; } fpu_ctxt; /* User-level FPU registers */
130#define VGCF_I387_VALID (1<<0)
131#define VGCF_HVM_GUEST (1<<1)
132#define VGCF_IN_KERNEL (1<<2)
133 unsigned long flags; /* VGCF_* flags */
134 struct cpu_user_regs user_regs; /* User-level CPU registers */
135 struct trap_info trap_ctxt[256]; /* Virtual IDT */
136 unsigned long ldt_base, ldt_ents; /* LDT (linear address, # ents) */
137 unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */
138 unsigned long kernel_ss, kernel_sp; /* Virtual TSS (only SS1/SP1) */
139 /* NB. User pagetable on x86/64 is placed in ctrlreg[1]. */
140 unsigned long ctrlreg[8]; /* CR0-CR7 (control registers) */
141 unsigned long debugreg[8]; /* DB0-DB7 (debug registers) */
142#ifdef __i386__
143 unsigned long event_callback_cs; /* CS:EIP of event callback */
144 unsigned long event_callback_eip;
145 unsigned long failsafe_callback_cs; /* CS:EIP of failsafe callback */
146 unsigned long failsafe_callback_eip;
147#else
148 unsigned long event_callback_eip;
149 unsigned long failsafe_callback_eip;
150 unsigned long syscall_callback_eip;
151#endif
152 unsigned long vm_assist; /* VMASST_TYPE_* bitmap */
153#ifdef __x86_64__
154 /* Segment base addresses. */
155 uint64_t fs_base;
156 uint64_t gs_base_kernel;
157 uint64_t gs_base_user;
158#endif
159};
160DEFINE_GUEST_HANDLE_STRUCT(vcpu_guest_context);
161#endif /* !__ASSEMBLY__ */
162
163/*
164 * Prefix forces emulation of some non-trapping instructions.
165 * Currently only CPUID.
166 */
167#ifdef __ASSEMBLY__
168#define XEN_EMULATE_PREFIX .byte 0x0f,0x0b,0x78,0x65,0x6e ;
169#define XEN_CPUID XEN_EMULATE_PREFIX cpuid
170#else
171#define XEN_EMULATE_PREFIX ".byte 0x0f,0x0b,0x78,0x65,0x6e ; "
172#define XEN_CPUID XEN_EMULATE_PREFIX "cpuid"
173#endif
174
175#endif /* ASM_X86__XEN__INTERFACE_H */
diff --git a/include/asm-x86/xen/interface_32.h b/include/asm-x86/xen/interface_32.h
deleted file mode 100644
index 08167e19fc66..000000000000
--- a/include/asm-x86/xen/interface_32.h
+++ /dev/null
@@ -1,97 +0,0 @@
1/******************************************************************************
2 * arch-x86_32.h
3 *
4 * Guest OS interface to x86 32-bit Xen.
5 *
6 * Copyright (c) 2004, K A Fraser
7 */
8
9#ifndef ASM_X86__XEN__INTERFACE_32_H
10#define ASM_X86__XEN__INTERFACE_32_H
11
12
13/*
14 * These flat segments are in the Xen-private section of every GDT. Since these
15 * are also present in the initial GDT, many OSes will be able to avoid
16 * installing their own GDT.
17 */
18#define FLAT_RING1_CS 0xe019 /* GDT index 259 */
19#define FLAT_RING1_DS 0xe021 /* GDT index 260 */
20#define FLAT_RING1_SS 0xe021 /* GDT index 260 */
21#define FLAT_RING3_CS 0xe02b /* GDT index 261 */
22#define FLAT_RING3_DS 0xe033 /* GDT index 262 */
23#define FLAT_RING3_SS 0xe033 /* GDT index 262 */
24
25#define FLAT_KERNEL_CS FLAT_RING1_CS
26#define FLAT_KERNEL_DS FLAT_RING1_DS
27#define FLAT_KERNEL_SS FLAT_RING1_SS
28#define FLAT_USER_CS FLAT_RING3_CS
29#define FLAT_USER_DS FLAT_RING3_DS
30#define FLAT_USER_SS FLAT_RING3_SS
31
32/* And the trap vector is... */
33#define TRAP_INSTR "int $0x82"
34
35/*
36 * Virtual addresses beyond this are not modifiable by guest OSes. The
37 * machine->physical mapping table starts at this address, read-only.
38 */
39#define __HYPERVISOR_VIRT_START 0xF5800000
40
41#ifndef __ASSEMBLY__
42
43struct cpu_user_regs {
44 uint32_t ebx;
45 uint32_t ecx;
46 uint32_t edx;
47 uint32_t esi;
48 uint32_t edi;
49 uint32_t ebp;
50 uint32_t eax;
51 uint16_t error_code; /* private */
52 uint16_t entry_vector; /* private */
53 uint32_t eip;
54 uint16_t cs;
55 uint8_t saved_upcall_mask;
56 uint8_t _pad0;
57 uint32_t eflags; /* eflags.IF == !saved_upcall_mask */
58 uint32_t esp;
59 uint16_t ss, _pad1;
60 uint16_t es, _pad2;
61 uint16_t ds, _pad3;
62 uint16_t fs, _pad4;
63 uint16_t gs, _pad5;
64};
65DEFINE_GUEST_HANDLE_STRUCT(cpu_user_regs);
66
67typedef uint64_t tsc_timestamp_t; /* RDTSC timestamp */
68
69struct arch_vcpu_info {
70 unsigned long cr2;
71 unsigned long pad[5]; /* sizeof(struct vcpu_info) == 64 */
72};
73
74struct xen_callback {
75 unsigned long cs;
76 unsigned long eip;
77};
78typedef struct xen_callback xen_callback_t;
79
80#define XEN_CALLBACK(__cs, __eip) \
81 ((struct xen_callback){ .cs = (__cs), .eip = (unsigned long)(__eip) })
82#endif /* !__ASSEMBLY__ */
83
84
85/*
86 * Page-directory addresses above 4GB do not fit into architectural %cr3.
87 * When accessing %cr3, or equivalent field in vcpu_guest_context, guests
88 * must use the following accessor macros to pack/unpack valid MFNs.
89 *
90 * Note that Xen is using the fact that the pagetable base is always
91 * page-aligned, and putting the 12 MSB of the address into the 12 LSB
92 * of cr3.
93 */
94#define xen_pfn_to_cr3(pfn) (((unsigned)(pfn) << 12) | ((unsigned)(pfn) >> 20))
95#define xen_cr3_to_pfn(cr3) (((unsigned)(cr3) >> 12) | ((unsigned)(cr3) << 20))
96
97#endif /* ASM_X86__XEN__INTERFACE_32_H */
diff --git a/include/asm-x86/xen/interface_64.h b/include/asm-x86/xen/interface_64.h
deleted file mode 100644
index 046c0f1e01d4..000000000000
--- a/include/asm-x86/xen/interface_64.h
+++ /dev/null
@@ -1,159 +0,0 @@
1#ifndef ASM_X86__XEN__INTERFACE_64_H
2#define ASM_X86__XEN__INTERFACE_64_H
3
4/*
5 * 64-bit segment selectors
6 * These flat segments are in the Xen-private section of every GDT. Since these
7 * are also present in the initial GDT, many OSes will be able to avoid
8 * installing their own GDT.
9 */
10
11#define FLAT_RING3_CS32 0xe023 /* GDT index 260 */
12#define FLAT_RING3_CS64 0xe033 /* GDT index 261 */
13#define FLAT_RING3_DS32 0xe02b /* GDT index 262 */
14#define FLAT_RING3_DS64 0x0000 /* NULL selector */
15#define FLAT_RING3_SS32 0xe02b /* GDT index 262 */
16#define FLAT_RING3_SS64 0xe02b /* GDT index 262 */
17
18#define FLAT_KERNEL_DS64 FLAT_RING3_DS64
19#define FLAT_KERNEL_DS32 FLAT_RING3_DS32
20#define FLAT_KERNEL_DS FLAT_KERNEL_DS64
21#define FLAT_KERNEL_CS64 FLAT_RING3_CS64
22#define FLAT_KERNEL_CS32 FLAT_RING3_CS32
23#define FLAT_KERNEL_CS FLAT_KERNEL_CS64
24#define FLAT_KERNEL_SS64 FLAT_RING3_SS64
25#define FLAT_KERNEL_SS32 FLAT_RING3_SS32
26#define FLAT_KERNEL_SS FLAT_KERNEL_SS64
27
28#define FLAT_USER_DS64 FLAT_RING3_DS64
29#define FLAT_USER_DS32 FLAT_RING3_DS32
30#define FLAT_USER_DS FLAT_USER_DS64
31#define FLAT_USER_CS64 FLAT_RING3_CS64
32#define FLAT_USER_CS32 FLAT_RING3_CS32
33#define FLAT_USER_CS FLAT_USER_CS64
34#define FLAT_USER_SS64 FLAT_RING3_SS64
35#define FLAT_USER_SS32 FLAT_RING3_SS32
36#define FLAT_USER_SS FLAT_USER_SS64
37
38#define __HYPERVISOR_VIRT_START 0xFFFF800000000000
39#define __HYPERVISOR_VIRT_END 0xFFFF880000000000
40#define __MACH2PHYS_VIRT_START 0xFFFF800000000000
41#define __MACH2PHYS_VIRT_END 0xFFFF804000000000
42
43#ifndef HYPERVISOR_VIRT_START
44#define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START)
45#define HYPERVISOR_VIRT_END mk_unsigned_long(__HYPERVISOR_VIRT_END)
46#endif
47
48#define MACH2PHYS_VIRT_START mk_unsigned_long(__MACH2PHYS_VIRT_START)
49#define MACH2PHYS_VIRT_END mk_unsigned_long(__MACH2PHYS_VIRT_END)
50#define MACH2PHYS_NR_ENTRIES ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>3)
51#ifndef machine_to_phys_mapping
52#define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START)
53#endif
54
55/*
56 * int HYPERVISOR_set_segment_base(unsigned int which, unsigned long base)
57 * @which == SEGBASE_* ; @base == 64-bit base address
58 * Returns 0 on success.
59 */
60#define SEGBASE_FS 0
61#define SEGBASE_GS_USER 1
62#define SEGBASE_GS_KERNEL 2
63#define SEGBASE_GS_USER_SEL 3 /* Set user %gs specified in base[15:0] */
64
65/*
66 * int HYPERVISOR_iret(void)
67 * All arguments are on the kernel stack, in the following format.
68 * Never returns if successful. Current kernel context is lost.
69 * The saved CS is mapped as follows:
70 * RING0 -> RING3 kernel mode.
71 * RING1 -> RING3 kernel mode.
72 * RING2 -> RING3 kernel mode.
73 * RING3 -> RING3 user mode.
74 * However RING0 indicates that the guest kernel should return to iteself
75 * directly with
76 * orb $3,1*8(%rsp)
77 * iretq
78 * If flags contains VGCF_in_syscall:
79 * Restore RAX, RIP, RFLAGS, RSP.
80 * Discard R11, RCX, CS, SS.
81 * Otherwise:
82 * Restore RAX, R11, RCX, CS:RIP, RFLAGS, SS:RSP.
83 * All other registers are saved on hypercall entry and restored to user.
84 */
85/* Guest exited in SYSCALL context? Return to guest with SYSRET? */
86#define _VGCF_in_syscall 8
87#define VGCF_in_syscall (1<<_VGCF_in_syscall)
88#define VGCF_IN_SYSCALL VGCF_in_syscall
89
90#ifndef __ASSEMBLY__
91
92struct iret_context {
93 /* Top of stack (%rsp at point of hypercall). */
94 uint64_t rax, r11, rcx, flags, rip, cs, rflags, rsp, ss;
95 /* Bottom of iret stack frame. */
96};
97
98#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
99/* Anonymous union includes both 32- and 64-bit names (e.g., eax/rax). */
100#define __DECL_REG(name) union { \
101 uint64_t r ## name, e ## name; \
102 uint32_t _e ## name; \
103}
104#else
105/* Non-gcc sources must always use the proper 64-bit name (e.g., rax). */
106#define __DECL_REG(name) uint64_t r ## name
107#endif
108
109struct cpu_user_regs {
110 uint64_t r15;
111 uint64_t r14;
112 uint64_t r13;
113 uint64_t r12;
114 __DECL_REG(bp);
115 __DECL_REG(bx);
116 uint64_t r11;
117 uint64_t r10;
118 uint64_t r9;
119 uint64_t r8;
120 __DECL_REG(ax);
121 __DECL_REG(cx);
122 __DECL_REG(dx);
123 __DECL_REG(si);
124 __DECL_REG(di);
125 uint32_t error_code; /* private */
126 uint32_t entry_vector; /* private */
127 __DECL_REG(ip);
128 uint16_t cs, _pad0[1];
129 uint8_t saved_upcall_mask;
130 uint8_t _pad1[3];
131 __DECL_REG(flags); /* rflags.IF == !saved_upcall_mask */
132 __DECL_REG(sp);
133 uint16_t ss, _pad2[3];
134 uint16_t es, _pad3[3];
135 uint16_t ds, _pad4[3];
136 uint16_t fs, _pad5[3]; /* Non-zero => takes precedence over fs_base. */
137 uint16_t gs, _pad6[3]; /* Non-zero => takes precedence over gs_base_usr. */
138};
139DEFINE_GUEST_HANDLE_STRUCT(cpu_user_regs);
140
141#undef __DECL_REG
142
143#define xen_pfn_to_cr3(pfn) ((unsigned long)(pfn) << 12)
144#define xen_cr3_to_pfn(cr3) ((unsigned long)(cr3) >> 12)
145
146struct arch_vcpu_info {
147 unsigned long cr2;
148 unsigned long pad; /* sizeof(vcpu_info_t) == 64 */
149};
150
151typedef unsigned long xen_callback_t;
152
153#define XEN_CALLBACK(__cs, __rip) \
154 ((unsigned long)(__rip))
155
156#endif /* !__ASSEMBLY__ */
157
158
159#endif /* ASM_X86__XEN__INTERFACE_64_H */
diff --git a/include/asm-x86/xen/page.h b/include/asm-x86/xen/page.h
deleted file mode 100644
index d5eada0a48d9..000000000000
--- a/include/asm-x86/xen/page.h
+++ /dev/null
@@ -1,165 +0,0 @@
1#ifndef ASM_X86__XEN__PAGE_H
2#define ASM_X86__XEN__PAGE_H
3
4#include <linux/pfn.h>
5
6#include <asm/uaccess.h>
7#include <asm/pgtable.h>
8
9#include <xen/features.h>
10
11/* Xen machine address */
12typedef struct xmaddr {
13 phys_addr_t maddr;
14} xmaddr_t;
15
16/* Xen pseudo-physical address */
17typedef struct xpaddr {
18 phys_addr_t paddr;
19} xpaddr_t;
20
21#define XMADDR(x) ((xmaddr_t) { .maddr = (x) })
22#define XPADDR(x) ((xpaddr_t) { .paddr = (x) })
23
24/**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
25#define INVALID_P2M_ENTRY (~0UL)
26#define FOREIGN_FRAME_BIT (1UL<<31)
27#define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT)
28
29/* Maximum amount of memory we can handle in a domain in pages */
30#define MAX_DOMAIN_PAGES \
31 ((unsigned long)((u64)CONFIG_XEN_MAX_DOMAIN_MEMORY * 1024 * 1024 * 1024 / PAGE_SIZE))
32
33
34extern unsigned long get_phys_to_machine(unsigned long pfn);
35extern void set_phys_to_machine(unsigned long pfn, unsigned long mfn);
36
37static inline unsigned long pfn_to_mfn(unsigned long pfn)
38{
39 if (xen_feature(XENFEAT_auto_translated_physmap))
40 return pfn;
41
42 return get_phys_to_machine(pfn) & ~FOREIGN_FRAME_BIT;
43}
44
45static inline int phys_to_machine_mapping_valid(unsigned long pfn)
46{
47 if (xen_feature(XENFEAT_auto_translated_physmap))
48 return 1;
49
50 return get_phys_to_machine(pfn) != INVALID_P2M_ENTRY;
51}
52
53static inline unsigned long mfn_to_pfn(unsigned long mfn)
54{
55 unsigned long pfn;
56
57 if (xen_feature(XENFEAT_auto_translated_physmap))
58 return mfn;
59
60#if 0
61 if (unlikely((mfn >> machine_to_phys_order) != 0))
62 return max_mapnr;
63#endif
64
65 pfn = 0;
66 /*
67 * The array access can fail (e.g., device space beyond end of RAM).
68 * In such cases it doesn't matter what we return (we return garbage),
69 * but we must handle the fault without crashing!
70 */
71 __get_user(pfn, &machine_to_phys_mapping[mfn]);
72
73 return pfn;
74}
75
76static inline xmaddr_t phys_to_machine(xpaddr_t phys)
77{
78 unsigned offset = phys.paddr & ~PAGE_MASK;
79 return XMADDR(PFN_PHYS(pfn_to_mfn(PFN_DOWN(phys.paddr))) | offset);
80}
81
82static inline xpaddr_t machine_to_phys(xmaddr_t machine)
83{
84 unsigned offset = machine.maddr & ~PAGE_MASK;
85 return XPADDR(PFN_PHYS(mfn_to_pfn(PFN_DOWN(machine.maddr))) | offset);
86}
87
88/*
89 * We detect special mappings in one of two ways:
90 * 1. If the MFN is an I/O page then Xen will set the m2p entry
91 * to be outside our maximum possible pseudophys range.
92 * 2. If the MFN belongs to a different domain then we will certainly
93 * not have MFN in our p2m table. Conversely, if the page is ours,
94 * then we'll have p2m(m2p(MFN))==MFN.
95 * If we detect a special mapping then it doesn't have a 'struct page'.
96 * We force !pfn_valid() by returning an out-of-range pointer.
97 *
98 * NB. These checks require that, for any MFN that is not in our reservation,
99 * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if
100 * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN.
101 * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety.
102 *
103 * NB2. When deliberately mapping foreign pages into the p2m table, you *must*
104 * use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we
105 * require. In all the cases we care about, the FOREIGN_FRAME bit is
106 * masked (e.g., pfn_to_mfn()) so behaviour there is correct.
107 */
108static inline unsigned long mfn_to_local_pfn(unsigned long mfn)
109{
110 extern unsigned long max_mapnr;
111 unsigned long pfn = mfn_to_pfn(mfn);
112 if ((pfn < max_mapnr)
113 && !xen_feature(XENFEAT_auto_translated_physmap)
114 && (get_phys_to_machine(pfn) != mfn))
115 return max_mapnr; /* force !pfn_valid() */
116 /* XXX fixme; not true with sparsemem */
117 return pfn;
118}
119
120/* VIRT <-> MACHINE conversion */
121#define virt_to_machine(v) (phys_to_machine(XPADDR(__pa(v))))
122#define virt_to_mfn(v) (pfn_to_mfn(PFN_DOWN(__pa(v))))
123#define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT))
124
125static inline unsigned long pte_mfn(pte_t pte)
126{
127 return (pte.pte & PTE_PFN_MASK) >> PAGE_SHIFT;
128}
129
130static inline pte_t mfn_pte(unsigned long page_nr, pgprot_t pgprot)
131{
132 pte_t pte;
133
134 pte.pte = ((phys_addr_t)page_nr << PAGE_SHIFT) |
135 (pgprot_val(pgprot) & __supported_pte_mask);
136
137 return pte;
138}
139
140static inline pteval_t pte_val_ma(pte_t pte)
141{
142 return pte.pte;
143}
144
145static inline pte_t __pte_ma(pteval_t x)
146{
147 return (pte_t) { .pte = x };
148}
149
150#define pmd_val_ma(v) ((v).pmd)
151#ifdef __PAGETABLE_PUD_FOLDED
152#define pud_val_ma(v) ((v).pgd.pgd)
153#else
154#define pud_val_ma(v) ((v).pud)
155#endif
156#define __pmd_ma(x) ((pmd_t) { (x) } )
157
158#define pgd_val_ma(x) ((x).pgd)
159
160
161xmaddr_t arbitrary_virt_to_machine(void *address);
162void make_lowmem_page_readonly(void *vaddr);
163void make_lowmem_page_readwrite(void *vaddr);
164
165#endif /* ASM_X86__XEN__PAGE_H */
diff --git a/include/asm-x86/xor.h b/include/asm-x86/xor.h
deleted file mode 100644
index 11b3bb86e17b..000000000000
--- a/include/asm-x86/xor.h
+++ /dev/null
@@ -1,5 +0,0 @@
1#ifdef CONFIG_X86_32
2# include "xor_32.h"
3#else
4# include "xor_64.h"
5#endif
diff --git a/include/asm-x86/xor_32.h b/include/asm-x86/xor_32.h
deleted file mode 100644
index 921b45840449..000000000000
--- a/include/asm-x86/xor_32.h
+++ /dev/null
@@ -1,888 +0,0 @@
1#ifndef ASM_X86__XOR_32_H
2#define ASM_X86__XOR_32_H
3
4/*
5 * Optimized RAID-5 checksumming functions for MMX and SSE.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2, or (at your option)
10 * any later version.
11 *
12 * You should have received a copy of the GNU General Public License
13 * (for example /usr/src/linux/COPYING); if not, write to the Free
14 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
15 */
16
17/*
18 * High-speed RAID5 checksumming functions utilizing MMX instructions.
19 * Copyright (C) 1998 Ingo Molnar.
20 */
21
22#define LD(x, y) " movq 8*("#x")(%1), %%mm"#y" ;\n"
23#define ST(x, y) " movq %%mm"#y", 8*("#x")(%1) ;\n"
24#define XO1(x, y) " pxor 8*("#x")(%2), %%mm"#y" ;\n"
25#define XO2(x, y) " pxor 8*("#x")(%3), %%mm"#y" ;\n"
26#define XO3(x, y) " pxor 8*("#x")(%4), %%mm"#y" ;\n"
27#define XO4(x, y) " pxor 8*("#x")(%5), %%mm"#y" ;\n"
28
29#include <asm/i387.h>
30
31static void
32xor_pII_mmx_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
33{
34 unsigned long lines = bytes >> 7;
35
36 kernel_fpu_begin();
37
38 asm volatile(
39#undef BLOCK
40#define BLOCK(i) \
41 LD(i, 0) \
42 LD(i + 1, 1) \
43 LD(i + 2, 2) \
44 LD(i + 3, 3) \
45 XO1(i, 0) \
46 ST(i, 0) \
47 XO1(i+1, 1) \
48 ST(i+1, 1) \
49 XO1(i + 2, 2) \
50 ST(i + 2, 2) \
51 XO1(i + 3, 3) \
52 ST(i + 3, 3)
53
54 " .align 32 ;\n"
55 " 1: ;\n"
56
57 BLOCK(0)
58 BLOCK(4)
59 BLOCK(8)
60 BLOCK(12)
61
62 " addl $128, %1 ;\n"
63 " addl $128, %2 ;\n"
64 " decl %0 ;\n"
65 " jnz 1b ;\n"
66 : "+r" (lines),
67 "+r" (p1), "+r" (p2)
68 :
69 : "memory");
70
71 kernel_fpu_end();
72}
73
74static void
75xor_pII_mmx_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
76 unsigned long *p3)
77{
78 unsigned long lines = bytes >> 7;
79
80 kernel_fpu_begin();
81
82 asm volatile(
83#undef BLOCK
84#define BLOCK(i) \
85 LD(i, 0) \
86 LD(i + 1, 1) \
87 LD(i + 2, 2) \
88 LD(i + 3, 3) \
89 XO1(i, 0) \
90 XO1(i + 1, 1) \
91 XO1(i + 2, 2) \
92 XO1(i + 3, 3) \
93 XO2(i, 0) \
94 ST(i, 0) \
95 XO2(i + 1, 1) \
96 ST(i + 1, 1) \
97 XO2(i + 2, 2) \
98 ST(i + 2, 2) \
99 XO2(i + 3, 3) \
100 ST(i + 3, 3)
101
102 " .align 32 ;\n"
103 " 1: ;\n"
104
105 BLOCK(0)
106 BLOCK(4)
107 BLOCK(8)
108 BLOCK(12)
109
110 " addl $128, %1 ;\n"
111 " addl $128, %2 ;\n"
112 " addl $128, %3 ;\n"
113 " decl %0 ;\n"
114 " jnz 1b ;\n"
115 : "+r" (lines),
116 "+r" (p1), "+r" (p2), "+r" (p3)
117 :
118 : "memory");
119
120 kernel_fpu_end();
121}
122
123static void
124xor_pII_mmx_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
125 unsigned long *p3, unsigned long *p4)
126{
127 unsigned long lines = bytes >> 7;
128
129 kernel_fpu_begin();
130
131 asm volatile(
132#undef BLOCK
133#define BLOCK(i) \
134 LD(i, 0) \
135 LD(i + 1, 1) \
136 LD(i + 2, 2) \
137 LD(i + 3, 3) \
138 XO1(i, 0) \
139 XO1(i + 1, 1) \
140 XO1(i + 2, 2) \
141 XO1(i + 3, 3) \
142 XO2(i, 0) \
143 XO2(i + 1, 1) \
144 XO2(i + 2, 2) \
145 XO2(i + 3, 3) \
146 XO3(i, 0) \
147 ST(i, 0) \
148 XO3(i + 1, 1) \
149 ST(i + 1, 1) \
150 XO3(i + 2, 2) \
151 ST(i + 2, 2) \
152 XO3(i + 3, 3) \
153 ST(i + 3, 3)
154
155 " .align 32 ;\n"
156 " 1: ;\n"
157
158 BLOCK(0)
159 BLOCK(4)
160 BLOCK(8)
161 BLOCK(12)
162
163 " addl $128, %1 ;\n"
164 " addl $128, %2 ;\n"
165 " addl $128, %3 ;\n"
166 " addl $128, %4 ;\n"
167 " decl %0 ;\n"
168 " jnz 1b ;\n"
169 : "+r" (lines),
170 "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4)
171 :
172 : "memory");
173
174 kernel_fpu_end();
175}
176
177
178static void
179xor_pII_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
180 unsigned long *p3, unsigned long *p4, unsigned long *p5)
181{
182 unsigned long lines = bytes >> 7;
183
184 kernel_fpu_begin();
185
186 /* Make sure GCC forgets anything it knows about p4 or p5,
187 such that it won't pass to the asm volatile below a
188 register that is shared with any other variable. That's
189 because we modify p4 and p5 there, but we can't mark them
190 as read/write, otherwise we'd overflow the 10-asm-operands
191 limit of GCC < 3.1. */
192 asm("" : "+r" (p4), "+r" (p5));
193
194 asm volatile(
195#undef BLOCK
196#define BLOCK(i) \
197 LD(i, 0) \
198 LD(i + 1, 1) \
199 LD(i + 2, 2) \
200 LD(i + 3, 3) \
201 XO1(i, 0) \
202 XO1(i + 1, 1) \
203 XO1(i + 2, 2) \
204 XO1(i + 3, 3) \
205 XO2(i, 0) \
206 XO2(i + 1, 1) \
207 XO2(i + 2, 2) \
208 XO2(i + 3, 3) \
209 XO3(i, 0) \
210 XO3(i + 1, 1) \
211 XO3(i + 2, 2) \
212 XO3(i + 3, 3) \
213 XO4(i, 0) \
214 ST(i, 0) \
215 XO4(i + 1, 1) \
216 ST(i + 1, 1) \
217 XO4(i + 2, 2) \
218 ST(i + 2, 2) \
219 XO4(i + 3, 3) \
220 ST(i + 3, 3)
221
222 " .align 32 ;\n"
223 " 1: ;\n"
224
225 BLOCK(0)
226 BLOCK(4)
227 BLOCK(8)
228 BLOCK(12)
229
230 " addl $128, %1 ;\n"
231 " addl $128, %2 ;\n"
232 " addl $128, %3 ;\n"
233 " addl $128, %4 ;\n"
234 " addl $128, %5 ;\n"
235 " decl %0 ;\n"
236 " jnz 1b ;\n"
237 : "+r" (lines),
238 "+r" (p1), "+r" (p2), "+r" (p3)
239 : "r" (p4), "r" (p5)
240 : "memory");
241
242 /* p4 and p5 were modified, and now the variables are dead.
243 Clobber them just to be sure nobody does something stupid
244 like assuming they have some legal value. */
245 asm("" : "=r" (p4), "=r" (p5));
246
247 kernel_fpu_end();
248}
249
250#undef LD
251#undef XO1
252#undef XO2
253#undef XO3
254#undef XO4
255#undef ST
256#undef BLOCK
257
258static void
259xor_p5_mmx_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
260{
261 unsigned long lines = bytes >> 6;
262
263 kernel_fpu_begin();
264
265 asm volatile(
266 " .align 32 ;\n"
267 " 1: ;\n"
268 " movq (%1), %%mm0 ;\n"
269 " movq 8(%1), %%mm1 ;\n"
270 " pxor (%2), %%mm0 ;\n"
271 " movq 16(%1), %%mm2 ;\n"
272 " movq %%mm0, (%1) ;\n"
273 " pxor 8(%2), %%mm1 ;\n"
274 " movq 24(%1), %%mm3 ;\n"
275 " movq %%mm1, 8(%1) ;\n"
276 " pxor 16(%2), %%mm2 ;\n"
277 " movq 32(%1), %%mm4 ;\n"
278 " movq %%mm2, 16(%1) ;\n"
279 " pxor 24(%2), %%mm3 ;\n"
280 " movq 40(%1), %%mm5 ;\n"
281 " movq %%mm3, 24(%1) ;\n"
282 " pxor 32(%2), %%mm4 ;\n"
283 " movq 48(%1), %%mm6 ;\n"
284 " movq %%mm4, 32(%1) ;\n"
285 " pxor 40(%2), %%mm5 ;\n"
286 " movq 56(%1), %%mm7 ;\n"
287 " movq %%mm5, 40(%1) ;\n"
288 " pxor 48(%2), %%mm6 ;\n"
289 " pxor 56(%2), %%mm7 ;\n"
290 " movq %%mm6, 48(%1) ;\n"
291 " movq %%mm7, 56(%1) ;\n"
292
293 " addl $64, %1 ;\n"
294 " addl $64, %2 ;\n"
295 " decl %0 ;\n"
296 " jnz 1b ;\n"
297 : "+r" (lines),
298 "+r" (p1), "+r" (p2)
299 :
300 : "memory");
301
302 kernel_fpu_end();
303}
304
305static void
306xor_p5_mmx_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
307 unsigned long *p3)
308{
309 unsigned long lines = bytes >> 6;
310
311 kernel_fpu_begin();
312
313 asm volatile(
314 " .align 32,0x90 ;\n"
315 " 1: ;\n"
316 " movq (%1), %%mm0 ;\n"
317 " movq 8(%1), %%mm1 ;\n"
318 " pxor (%2), %%mm0 ;\n"
319 " movq 16(%1), %%mm2 ;\n"
320 " pxor 8(%2), %%mm1 ;\n"
321 " pxor (%3), %%mm0 ;\n"
322 " pxor 16(%2), %%mm2 ;\n"
323 " movq %%mm0, (%1) ;\n"
324 " pxor 8(%3), %%mm1 ;\n"
325 " pxor 16(%3), %%mm2 ;\n"
326 " movq 24(%1), %%mm3 ;\n"
327 " movq %%mm1, 8(%1) ;\n"
328 " movq 32(%1), %%mm4 ;\n"
329 " movq 40(%1), %%mm5 ;\n"
330 " pxor 24(%2), %%mm3 ;\n"
331 " movq %%mm2, 16(%1) ;\n"
332 " pxor 32(%2), %%mm4 ;\n"
333 " pxor 24(%3), %%mm3 ;\n"
334 " pxor 40(%2), %%mm5 ;\n"
335 " movq %%mm3, 24(%1) ;\n"
336 " pxor 32(%3), %%mm4 ;\n"
337 " pxor 40(%3), %%mm5 ;\n"
338 " movq 48(%1), %%mm6 ;\n"
339 " movq %%mm4, 32(%1) ;\n"
340 " movq 56(%1), %%mm7 ;\n"
341 " pxor 48(%2), %%mm6 ;\n"
342 " movq %%mm5, 40(%1) ;\n"
343 " pxor 56(%2), %%mm7 ;\n"
344 " pxor 48(%3), %%mm6 ;\n"
345 " pxor 56(%3), %%mm7 ;\n"
346 " movq %%mm6, 48(%1) ;\n"
347 " movq %%mm7, 56(%1) ;\n"
348
349 " addl $64, %1 ;\n"
350 " addl $64, %2 ;\n"
351 " addl $64, %3 ;\n"
352 " decl %0 ;\n"
353 " jnz 1b ;\n"
354 : "+r" (lines),
355 "+r" (p1), "+r" (p2), "+r" (p3)
356 :
357 : "memory" );
358
359 kernel_fpu_end();
360}
361
362static void
363xor_p5_mmx_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
364 unsigned long *p3, unsigned long *p4)
365{
366 unsigned long lines = bytes >> 6;
367
368 kernel_fpu_begin();
369
370 asm volatile(
371 " .align 32,0x90 ;\n"
372 " 1: ;\n"
373 " movq (%1), %%mm0 ;\n"
374 " movq 8(%1), %%mm1 ;\n"
375 " pxor (%2), %%mm0 ;\n"
376 " movq 16(%1), %%mm2 ;\n"
377 " pxor 8(%2), %%mm1 ;\n"
378 " pxor (%3), %%mm0 ;\n"
379 " pxor 16(%2), %%mm2 ;\n"
380 " pxor 8(%3), %%mm1 ;\n"
381 " pxor (%4), %%mm0 ;\n"
382 " movq 24(%1), %%mm3 ;\n"
383 " pxor 16(%3), %%mm2 ;\n"
384 " pxor 8(%4), %%mm1 ;\n"
385 " movq %%mm0, (%1) ;\n"
386 " movq 32(%1), %%mm4 ;\n"
387 " pxor 24(%2), %%mm3 ;\n"
388 " pxor 16(%4), %%mm2 ;\n"
389 " movq %%mm1, 8(%1) ;\n"
390 " movq 40(%1), %%mm5 ;\n"
391 " pxor 32(%2), %%mm4 ;\n"
392 " pxor 24(%3), %%mm3 ;\n"
393 " movq %%mm2, 16(%1) ;\n"
394 " pxor 40(%2), %%mm5 ;\n"
395 " pxor 32(%3), %%mm4 ;\n"
396 " pxor 24(%4), %%mm3 ;\n"
397 " movq %%mm3, 24(%1) ;\n"
398 " movq 56(%1), %%mm7 ;\n"
399 " movq 48(%1), %%mm6 ;\n"
400 " pxor 40(%3), %%mm5 ;\n"
401 " pxor 32(%4), %%mm4 ;\n"
402 " pxor 48(%2), %%mm6 ;\n"
403 " movq %%mm4, 32(%1) ;\n"
404 " pxor 56(%2), %%mm7 ;\n"
405 " pxor 40(%4), %%mm5 ;\n"
406 " pxor 48(%3), %%mm6 ;\n"
407 " pxor 56(%3), %%mm7 ;\n"
408 " movq %%mm5, 40(%1) ;\n"
409 " pxor 48(%4), %%mm6 ;\n"
410 " pxor 56(%4), %%mm7 ;\n"
411 " movq %%mm6, 48(%1) ;\n"
412 " movq %%mm7, 56(%1) ;\n"
413
414 " addl $64, %1 ;\n"
415 " addl $64, %2 ;\n"
416 " addl $64, %3 ;\n"
417 " addl $64, %4 ;\n"
418 " decl %0 ;\n"
419 " jnz 1b ;\n"
420 : "+r" (lines),
421 "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4)
422 :
423 : "memory");
424
425 kernel_fpu_end();
426}
427
428static void
429xor_p5_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
430 unsigned long *p3, unsigned long *p4, unsigned long *p5)
431{
432 unsigned long lines = bytes >> 6;
433
434 kernel_fpu_begin();
435
436 /* Make sure GCC forgets anything it knows about p4 or p5,
437 such that it won't pass to the asm volatile below a
438 register that is shared with any other variable. That's
439 because we modify p4 and p5 there, but we can't mark them
440 as read/write, otherwise we'd overflow the 10-asm-operands
441 limit of GCC < 3.1. */
442 asm("" : "+r" (p4), "+r" (p5));
443
444 asm volatile(
445 " .align 32,0x90 ;\n"
446 " 1: ;\n"
447 " movq (%1), %%mm0 ;\n"
448 " movq 8(%1), %%mm1 ;\n"
449 " pxor (%2), %%mm0 ;\n"
450 " pxor 8(%2), %%mm1 ;\n"
451 " movq 16(%1), %%mm2 ;\n"
452 " pxor (%3), %%mm0 ;\n"
453 " pxor 8(%3), %%mm1 ;\n"
454 " pxor 16(%2), %%mm2 ;\n"
455 " pxor (%4), %%mm0 ;\n"
456 " pxor 8(%4), %%mm1 ;\n"
457 " pxor 16(%3), %%mm2 ;\n"
458 " movq 24(%1), %%mm3 ;\n"
459 " pxor (%5), %%mm0 ;\n"
460 " pxor 8(%5), %%mm1 ;\n"
461 " movq %%mm0, (%1) ;\n"
462 " pxor 16(%4), %%mm2 ;\n"
463 " pxor 24(%2), %%mm3 ;\n"
464 " movq %%mm1, 8(%1) ;\n"
465 " pxor 16(%5), %%mm2 ;\n"
466 " pxor 24(%3), %%mm3 ;\n"
467 " movq 32(%1), %%mm4 ;\n"
468 " movq %%mm2, 16(%1) ;\n"
469 " pxor 24(%4), %%mm3 ;\n"
470 " pxor 32(%2), %%mm4 ;\n"
471 " movq 40(%1), %%mm5 ;\n"
472 " pxor 24(%5), %%mm3 ;\n"
473 " pxor 32(%3), %%mm4 ;\n"
474 " pxor 40(%2), %%mm5 ;\n"
475 " movq %%mm3, 24(%1) ;\n"
476 " pxor 32(%4), %%mm4 ;\n"
477 " pxor 40(%3), %%mm5 ;\n"
478 " movq 48(%1), %%mm6 ;\n"
479 " movq 56(%1), %%mm7 ;\n"
480 " pxor 32(%5), %%mm4 ;\n"
481 " pxor 40(%4), %%mm5 ;\n"
482 " pxor 48(%2), %%mm6 ;\n"
483 " pxor 56(%2), %%mm7 ;\n"
484 " movq %%mm4, 32(%1) ;\n"
485 " pxor 48(%3), %%mm6 ;\n"
486 " pxor 56(%3), %%mm7 ;\n"
487 " pxor 40(%5), %%mm5 ;\n"
488 " pxor 48(%4), %%mm6 ;\n"
489 " pxor 56(%4), %%mm7 ;\n"
490 " movq %%mm5, 40(%1) ;\n"
491 " pxor 48(%5), %%mm6 ;\n"
492 " pxor 56(%5), %%mm7 ;\n"
493 " movq %%mm6, 48(%1) ;\n"
494 " movq %%mm7, 56(%1) ;\n"
495
496 " addl $64, %1 ;\n"
497 " addl $64, %2 ;\n"
498 " addl $64, %3 ;\n"
499 " addl $64, %4 ;\n"
500 " addl $64, %5 ;\n"
501 " decl %0 ;\n"
502 " jnz 1b ;\n"
503 : "+r" (lines),
504 "+r" (p1), "+r" (p2), "+r" (p3)
505 : "r" (p4), "r" (p5)
506 : "memory");
507
508 /* p4 and p5 were modified, and now the variables are dead.
509 Clobber them just to be sure nobody does something stupid
510 like assuming they have some legal value. */
511 asm("" : "=r" (p4), "=r" (p5));
512
513 kernel_fpu_end();
514}
515
516static struct xor_block_template xor_block_pII_mmx = {
517 .name = "pII_mmx",
518 .do_2 = xor_pII_mmx_2,
519 .do_3 = xor_pII_mmx_3,
520 .do_4 = xor_pII_mmx_4,
521 .do_5 = xor_pII_mmx_5,
522};
523
524static struct xor_block_template xor_block_p5_mmx = {
525 .name = "p5_mmx",
526 .do_2 = xor_p5_mmx_2,
527 .do_3 = xor_p5_mmx_3,
528 .do_4 = xor_p5_mmx_4,
529 .do_5 = xor_p5_mmx_5,
530};
531
532/*
533 * Cache avoiding checksumming functions utilizing KNI instructions
534 * Copyright (C) 1999 Zach Brown (with obvious credit due Ingo)
535 */
536
537#define XMMS_SAVE \
538do { \
539 preempt_disable(); \
540 cr0 = read_cr0(); \
541 clts(); \
542 asm volatile( \
543 "movups %%xmm0,(%0) ;\n\t" \
544 "movups %%xmm1,0x10(%0) ;\n\t" \
545 "movups %%xmm2,0x20(%0) ;\n\t" \
546 "movups %%xmm3,0x30(%0) ;\n\t" \
547 : \
548 : "r" (xmm_save) \
549 : "memory"); \
550} while (0)
551
552#define XMMS_RESTORE \
553do { \
554 asm volatile( \
555 "sfence ;\n\t" \
556 "movups (%0),%%xmm0 ;\n\t" \
557 "movups 0x10(%0),%%xmm1 ;\n\t" \
558 "movups 0x20(%0),%%xmm2 ;\n\t" \
559 "movups 0x30(%0),%%xmm3 ;\n\t" \
560 : \
561 : "r" (xmm_save) \
562 : "memory"); \
563 write_cr0(cr0); \
564 preempt_enable(); \
565} while (0)
566
567#define ALIGN16 __attribute__((aligned(16)))
568
569#define OFFS(x) "16*("#x")"
570#define PF_OFFS(x) "256+16*("#x")"
571#define PF0(x) " prefetchnta "PF_OFFS(x)"(%1) ;\n"
572#define LD(x, y) " movaps "OFFS(x)"(%1), %%xmm"#y" ;\n"
573#define ST(x, y) " movaps %%xmm"#y", "OFFS(x)"(%1) ;\n"
574#define PF1(x) " prefetchnta "PF_OFFS(x)"(%2) ;\n"
575#define PF2(x) " prefetchnta "PF_OFFS(x)"(%3) ;\n"
576#define PF3(x) " prefetchnta "PF_OFFS(x)"(%4) ;\n"
577#define PF4(x) " prefetchnta "PF_OFFS(x)"(%5) ;\n"
578#define PF5(x) " prefetchnta "PF_OFFS(x)"(%6) ;\n"
579#define XO1(x, y) " xorps "OFFS(x)"(%2), %%xmm"#y" ;\n"
580#define XO2(x, y) " xorps "OFFS(x)"(%3), %%xmm"#y" ;\n"
581#define XO3(x, y) " xorps "OFFS(x)"(%4), %%xmm"#y" ;\n"
582#define XO4(x, y) " xorps "OFFS(x)"(%5), %%xmm"#y" ;\n"
583#define XO5(x, y) " xorps "OFFS(x)"(%6), %%xmm"#y" ;\n"
584
585
586static void
587xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
588{
589 unsigned long lines = bytes >> 8;
590 char xmm_save[16*4] ALIGN16;
591 int cr0;
592
593 XMMS_SAVE;
594
595 asm volatile(
596#undef BLOCK
597#define BLOCK(i) \
598 LD(i, 0) \
599 LD(i + 1, 1) \
600 PF1(i) \
601 PF1(i + 2) \
602 LD(i + 2, 2) \
603 LD(i + 3, 3) \
604 PF0(i + 4) \
605 PF0(i + 6) \
606 XO1(i, 0) \
607 XO1(i + 1, 1) \
608 XO1(i + 2, 2) \
609 XO1(i + 3, 3) \
610 ST(i, 0) \
611 ST(i + 1, 1) \
612 ST(i + 2, 2) \
613 ST(i + 3, 3) \
614
615
616 PF0(0)
617 PF0(2)
618
619 " .align 32 ;\n"
620 " 1: ;\n"
621
622 BLOCK(0)
623 BLOCK(4)
624 BLOCK(8)
625 BLOCK(12)
626
627 " addl $256, %1 ;\n"
628 " addl $256, %2 ;\n"
629 " decl %0 ;\n"
630 " jnz 1b ;\n"
631 : "+r" (lines),
632 "+r" (p1), "+r" (p2)
633 :
634 : "memory");
635
636 XMMS_RESTORE;
637}
638
639static void
640xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
641 unsigned long *p3)
642{
643 unsigned long lines = bytes >> 8;
644 char xmm_save[16*4] ALIGN16;
645 int cr0;
646
647 XMMS_SAVE;
648
649 asm volatile(
650#undef BLOCK
651#define BLOCK(i) \
652 PF1(i) \
653 PF1(i + 2) \
654 LD(i,0) \
655 LD(i + 1, 1) \
656 LD(i + 2, 2) \
657 LD(i + 3, 3) \
658 PF2(i) \
659 PF2(i + 2) \
660 PF0(i + 4) \
661 PF0(i + 6) \
662 XO1(i,0) \
663 XO1(i + 1, 1) \
664 XO1(i + 2, 2) \
665 XO1(i + 3, 3) \
666 XO2(i,0) \
667 XO2(i + 1, 1) \
668 XO2(i + 2, 2) \
669 XO2(i + 3, 3) \
670 ST(i,0) \
671 ST(i + 1, 1) \
672 ST(i + 2, 2) \
673 ST(i + 3, 3) \
674
675
676 PF0(0)
677 PF0(2)
678
679 " .align 32 ;\n"
680 " 1: ;\n"
681
682 BLOCK(0)
683 BLOCK(4)
684 BLOCK(8)
685 BLOCK(12)
686
687 " addl $256, %1 ;\n"
688 " addl $256, %2 ;\n"
689 " addl $256, %3 ;\n"
690 " decl %0 ;\n"
691 " jnz 1b ;\n"
692 : "+r" (lines),
693 "+r" (p1), "+r"(p2), "+r"(p3)
694 :
695 : "memory" );
696
697 XMMS_RESTORE;
698}
699
700static void
701xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
702 unsigned long *p3, unsigned long *p4)
703{
704 unsigned long lines = bytes >> 8;
705 char xmm_save[16*4] ALIGN16;
706 int cr0;
707
708 XMMS_SAVE;
709
710 asm volatile(
711#undef BLOCK
712#define BLOCK(i) \
713 PF1(i) \
714 PF1(i + 2) \
715 LD(i,0) \
716 LD(i + 1, 1) \
717 LD(i + 2, 2) \
718 LD(i + 3, 3) \
719 PF2(i) \
720 PF2(i + 2) \
721 XO1(i,0) \
722 XO1(i + 1, 1) \
723 XO1(i + 2, 2) \
724 XO1(i + 3, 3) \
725 PF3(i) \
726 PF3(i + 2) \
727 PF0(i + 4) \
728 PF0(i + 6) \
729 XO2(i,0) \
730 XO2(i + 1, 1) \
731 XO2(i + 2, 2) \
732 XO2(i + 3, 3) \
733 XO3(i,0) \
734 XO3(i + 1, 1) \
735 XO3(i + 2, 2) \
736 XO3(i + 3, 3) \
737 ST(i,0) \
738 ST(i + 1, 1) \
739 ST(i + 2, 2) \
740 ST(i + 3, 3) \
741
742
743 PF0(0)
744 PF0(2)
745
746 " .align 32 ;\n"
747 " 1: ;\n"
748
749 BLOCK(0)
750 BLOCK(4)
751 BLOCK(8)
752 BLOCK(12)
753
754 " addl $256, %1 ;\n"
755 " addl $256, %2 ;\n"
756 " addl $256, %3 ;\n"
757 " addl $256, %4 ;\n"
758 " decl %0 ;\n"
759 " jnz 1b ;\n"
760 : "+r" (lines),
761 "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4)
762 :
763 : "memory" );
764
765 XMMS_RESTORE;
766}
767
768static void
769xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
770 unsigned long *p3, unsigned long *p4, unsigned long *p5)
771{
772 unsigned long lines = bytes >> 8;
773 char xmm_save[16*4] ALIGN16;
774 int cr0;
775
776 XMMS_SAVE;
777
778 /* Make sure GCC forgets anything it knows about p4 or p5,
779 such that it won't pass to the asm volatile below a
780 register that is shared with any other variable. That's
781 because we modify p4 and p5 there, but we can't mark them
782 as read/write, otherwise we'd overflow the 10-asm-operands
783 limit of GCC < 3.1. */
784 asm("" : "+r" (p4), "+r" (p5));
785
786 asm volatile(
787#undef BLOCK
788#define BLOCK(i) \
789 PF1(i) \
790 PF1(i + 2) \
791 LD(i,0) \
792 LD(i + 1, 1) \
793 LD(i + 2, 2) \
794 LD(i + 3, 3) \
795 PF2(i) \
796 PF2(i + 2) \
797 XO1(i,0) \
798 XO1(i + 1, 1) \
799 XO1(i + 2, 2) \
800 XO1(i + 3, 3) \
801 PF3(i) \
802 PF3(i + 2) \
803 XO2(i,0) \
804 XO2(i + 1, 1) \
805 XO2(i + 2, 2) \
806 XO2(i + 3, 3) \
807 PF4(i) \
808 PF4(i + 2) \
809 PF0(i + 4) \
810 PF0(i + 6) \
811 XO3(i,0) \
812 XO3(i + 1, 1) \
813 XO3(i + 2, 2) \
814 XO3(i + 3, 3) \
815 XO4(i,0) \
816 XO4(i + 1, 1) \
817 XO4(i + 2, 2) \
818 XO4(i + 3, 3) \
819 ST(i,0) \
820 ST(i + 1, 1) \
821 ST(i + 2, 2) \
822 ST(i + 3, 3) \
823
824
825 PF0(0)
826 PF0(2)
827
828 " .align 32 ;\n"
829 " 1: ;\n"
830
831 BLOCK(0)
832 BLOCK(4)
833 BLOCK(8)
834 BLOCK(12)
835
836 " addl $256, %1 ;\n"
837 " addl $256, %2 ;\n"
838 " addl $256, %3 ;\n"
839 " addl $256, %4 ;\n"
840 " addl $256, %5 ;\n"
841 " decl %0 ;\n"
842 " jnz 1b ;\n"
843 : "+r" (lines),
844 "+r" (p1), "+r" (p2), "+r" (p3)
845 : "r" (p4), "r" (p5)
846 : "memory");
847
848 /* p4 and p5 were modified, and now the variables are dead.
849 Clobber them just to be sure nobody does something stupid
850 like assuming they have some legal value. */
851 asm("" : "=r" (p4), "=r" (p5));
852
853 XMMS_RESTORE;
854}
855
856static struct xor_block_template xor_block_pIII_sse = {
857 .name = "pIII_sse",
858 .do_2 = xor_sse_2,
859 .do_3 = xor_sse_3,
860 .do_4 = xor_sse_4,
861 .do_5 = xor_sse_5,
862};
863
864/* Also try the generic routines. */
865#include <asm-generic/xor.h>
866
867#undef XOR_TRY_TEMPLATES
868#define XOR_TRY_TEMPLATES \
869do { \
870 xor_speed(&xor_block_8regs); \
871 xor_speed(&xor_block_8regs_p); \
872 xor_speed(&xor_block_32regs); \
873 xor_speed(&xor_block_32regs_p); \
874 if (cpu_has_xmm) \
875 xor_speed(&xor_block_pIII_sse); \
876 if (cpu_has_mmx) { \
877 xor_speed(&xor_block_pII_mmx); \
878 xor_speed(&xor_block_p5_mmx); \
879 } \
880} while (0)
881
882/* We force the use of the SSE xor block because it can write around L2.
883 We may also be able to load into the L1 only depending on how the cpu
884 deals with a load to a line that is being prefetched. */
885#define XOR_SELECT_TEMPLATE(FASTEST) \
886 (cpu_has_xmm ? &xor_block_pIII_sse : FASTEST)
887
888#endif /* ASM_X86__XOR_32_H */
diff --git a/include/asm-x86/xor_64.h b/include/asm-x86/xor_64.h
deleted file mode 100644
index 2d3a18de295b..000000000000
--- a/include/asm-x86/xor_64.h
+++ /dev/null
@@ -1,361 +0,0 @@
1#ifndef ASM_X86__XOR_64_H
2#define ASM_X86__XOR_64_H
3
4/*
5 * Optimized RAID-5 checksumming functions for MMX and SSE.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2, or (at your option)
10 * any later version.
11 *
12 * You should have received a copy of the GNU General Public License
13 * (for example /usr/src/linux/COPYING); if not, write to the Free
14 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
15 */
16
17
18/*
19 * Cache avoiding checksumming functions utilizing KNI instructions
20 * Copyright (C) 1999 Zach Brown (with obvious credit due Ingo)
21 */
22
23/*
24 * Based on
25 * High-speed RAID5 checksumming functions utilizing SSE instructions.
26 * Copyright (C) 1998 Ingo Molnar.
27 */
28
29/*
30 * x86-64 changes / gcc fixes from Andi Kleen.
31 * Copyright 2002 Andi Kleen, SuSE Labs.
32 *
33 * This hasn't been optimized for the hammer yet, but there are likely
34 * no advantages to be gotten from x86-64 here anyways.
35 */
36
37typedef struct {
38 unsigned long a, b;
39} __attribute__((aligned(16))) xmm_store_t;
40
41/* Doesn't use gcc to save the XMM registers, because there is no easy way to
42 tell it to do a clts before the register saving. */
43#define XMMS_SAVE \
44do { \
45 preempt_disable(); \
46 asm volatile( \
47 "movq %%cr0,%0 ;\n\t" \
48 "clts ;\n\t" \
49 "movups %%xmm0,(%1) ;\n\t" \
50 "movups %%xmm1,0x10(%1) ;\n\t" \
51 "movups %%xmm2,0x20(%1) ;\n\t" \
52 "movups %%xmm3,0x30(%1) ;\n\t" \
53 : "=&r" (cr0) \
54 : "r" (xmm_save) \
55 : "memory"); \
56} while (0)
57
58#define XMMS_RESTORE \
59do { \
60 asm volatile( \
61 "sfence ;\n\t" \
62 "movups (%1),%%xmm0 ;\n\t" \
63 "movups 0x10(%1),%%xmm1 ;\n\t" \
64 "movups 0x20(%1),%%xmm2 ;\n\t" \
65 "movups 0x30(%1),%%xmm3 ;\n\t" \
66 "movq %0,%%cr0 ;\n\t" \
67 : \
68 : "r" (cr0), "r" (xmm_save) \
69 : "memory"); \
70 preempt_enable(); \
71} while (0)
72
73#define OFFS(x) "16*("#x")"
74#define PF_OFFS(x) "256+16*("#x")"
75#define PF0(x) " prefetchnta "PF_OFFS(x)"(%[p1]) ;\n"
76#define LD(x, y) " movaps "OFFS(x)"(%[p1]), %%xmm"#y" ;\n"
77#define ST(x, y) " movaps %%xmm"#y", "OFFS(x)"(%[p1]) ;\n"
78#define PF1(x) " prefetchnta "PF_OFFS(x)"(%[p2]) ;\n"
79#define PF2(x) " prefetchnta "PF_OFFS(x)"(%[p3]) ;\n"
80#define PF3(x) " prefetchnta "PF_OFFS(x)"(%[p4]) ;\n"
81#define PF4(x) " prefetchnta "PF_OFFS(x)"(%[p5]) ;\n"
82#define PF5(x) " prefetchnta "PF_OFFS(x)"(%[p6]) ;\n"
83#define XO1(x, y) " xorps "OFFS(x)"(%[p2]), %%xmm"#y" ;\n"
84#define XO2(x, y) " xorps "OFFS(x)"(%[p3]), %%xmm"#y" ;\n"
85#define XO3(x, y) " xorps "OFFS(x)"(%[p4]), %%xmm"#y" ;\n"
86#define XO4(x, y) " xorps "OFFS(x)"(%[p5]), %%xmm"#y" ;\n"
87#define XO5(x, y) " xorps "OFFS(x)"(%[p6]), %%xmm"#y" ;\n"
88
89
90static void
91xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
92{
93 unsigned int lines = bytes >> 8;
94 unsigned long cr0;
95 xmm_store_t xmm_save[4];
96
97 XMMS_SAVE;
98
99 asm volatile(
100#undef BLOCK
101#define BLOCK(i) \
102 LD(i, 0) \
103 LD(i + 1, 1) \
104 PF1(i) \
105 PF1(i + 2) \
106 LD(i + 2, 2) \
107 LD(i + 3, 3) \
108 PF0(i + 4) \
109 PF0(i + 6) \
110 XO1(i, 0) \
111 XO1(i + 1, 1) \
112 XO1(i + 2, 2) \
113 XO1(i + 3, 3) \
114 ST(i, 0) \
115 ST(i + 1, 1) \
116 ST(i + 2, 2) \
117 ST(i + 3, 3) \
118
119
120 PF0(0)
121 PF0(2)
122
123 " .align 32 ;\n"
124 " 1: ;\n"
125
126 BLOCK(0)
127 BLOCK(4)
128 BLOCK(8)
129 BLOCK(12)
130
131 " addq %[inc], %[p1] ;\n"
132 " addq %[inc], %[p2] ;\n"
133 " decl %[cnt] ; jnz 1b"
134 : [p1] "+r" (p1), [p2] "+r" (p2), [cnt] "+r" (lines)
135 : [inc] "r" (256UL)
136 : "memory");
137
138 XMMS_RESTORE;
139}
140
141static void
142xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
143 unsigned long *p3)
144{
145 unsigned int lines = bytes >> 8;
146 xmm_store_t xmm_save[4];
147 unsigned long cr0;
148
149 XMMS_SAVE;
150
151 asm volatile(
152#undef BLOCK
153#define BLOCK(i) \
154 PF1(i) \
155 PF1(i + 2) \
156 LD(i, 0) \
157 LD(i + 1, 1) \
158 LD(i + 2, 2) \
159 LD(i + 3, 3) \
160 PF2(i) \
161 PF2(i + 2) \
162 PF0(i + 4) \
163 PF0(i + 6) \
164 XO1(i, 0) \
165 XO1(i + 1, 1) \
166 XO1(i + 2, 2) \
167 XO1(i + 3, 3) \
168 XO2(i, 0) \
169 XO2(i + 1, 1) \
170 XO2(i + 2, 2) \
171 XO2(i + 3, 3) \
172 ST(i, 0) \
173 ST(i + 1, 1) \
174 ST(i + 2, 2) \
175 ST(i + 3, 3) \
176
177
178 PF0(0)
179 PF0(2)
180
181 " .align 32 ;\n"
182 " 1: ;\n"
183
184 BLOCK(0)
185 BLOCK(4)
186 BLOCK(8)
187 BLOCK(12)
188
189 " addq %[inc], %[p1] ;\n"
190 " addq %[inc], %[p2] ;\n"
191 " addq %[inc], %[p3] ;\n"
192 " decl %[cnt] ; jnz 1b"
193 : [cnt] "+r" (lines),
194 [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3)
195 : [inc] "r" (256UL)
196 : "memory");
197 XMMS_RESTORE;
198}
199
200static void
201xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
202 unsigned long *p3, unsigned long *p4)
203{
204 unsigned int lines = bytes >> 8;
205 xmm_store_t xmm_save[4];
206 unsigned long cr0;
207
208 XMMS_SAVE;
209
210 asm volatile(
211#undef BLOCK
212#define BLOCK(i) \
213 PF1(i) \
214 PF1(i + 2) \
215 LD(i, 0) \
216 LD(i + 1, 1) \
217 LD(i + 2, 2) \
218 LD(i + 3, 3) \
219 PF2(i) \
220 PF2(i + 2) \
221 XO1(i, 0) \
222 XO1(i + 1, 1) \
223 XO1(i + 2, 2) \
224 XO1(i + 3, 3) \
225 PF3(i) \
226 PF3(i + 2) \
227 PF0(i + 4) \
228 PF0(i + 6) \
229 XO2(i, 0) \
230 XO2(i + 1, 1) \
231 XO2(i + 2, 2) \
232 XO2(i + 3, 3) \
233 XO3(i, 0) \
234 XO3(i + 1, 1) \
235 XO3(i + 2, 2) \
236 XO3(i + 3, 3) \
237 ST(i, 0) \
238 ST(i + 1, 1) \
239 ST(i + 2, 2) \
240 ST(i + 3, 3) \
241
242
243 PF0(0)
244 PF0(2)
245
246 " .align 32 ;\n"
247 " 1: ;\n"
248
249 BLOCK(0)
250 BLOCK(4)
251 BLOCK(8)
252 BLOCK(12)
253
254 " addq %[inc], %[p1] ;\n"
255 " addq %[inc], %[p2] ;\n"
256 " addq %[inc], %[p3] ;\n"
257 " addq %[inc], %[p4] ;\n"
258 " decl %[cnt] ; jnz 1b"
259 : [cnt] "+c" (lines),
260 [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4)
261 : [inc] "r" (256UL)
262 : "memory" );
263
264 XMMS_RESTORE;
265}
266
267static void
268xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
269 unsigned long *p3, unsigned long *p4, unsigned long *p5)
270{
271 unsigned int lines = bytes >> 8;
272 xmm_store_t xmm_save[4];
273 unsigned long cr0;
274
275 XMMS_SAVE;
276
277 asm volatile(
278#undef BLOCK
279#define BLOCK(i) \
280 PF1(i) \
281 PF1(i + 2) \
282 LD(i, 0) \
283 LD(i + 1, 1) \
284 LD(i + 2, 2) \
285 LD(i + 3, 3) \
286 PF2(i) \
287 PF2(i + 2) \
288 XO1(i, 0) \
289 XO1(i + 1, 1) \
290 XO1(i + 2, 2) \
291 XO1(i + 3, 3) \
292 PF3(i) \
293 PF3(i + 2) \
294 XO2(i, 0) \
295 XO2(i + 1, 1) \
296 XO2(i + 2, 2) \
297 XO2(i + 3, 3) \
298 PF4(i) \
299 PF4(i + 2) \
300 PF0(i + 4) \
301 PF0(i + 6) \
302 XO3(i, 0) \
303 XO3(i + 1, 1) \
304 XO3(i + 2, 2) \
305 XO3(i + 3, 3) \
306 XO4(i, 0) \
307 XO4(i + 1, 1) \
308 XO4(i + 2, 2) \
309 XO4(i + 3, 3) \
310 ST(i, 0) \
311 ST(i + 1, 1) \
312 ST(i + 2, 2) \
313 ST(i + 3, 3) \
314
315
316 PF0(0)
317 PF0(2)
318
319 " .align 32 ;\n"
320 " 1: ;\n"
321
322 BLOCK(0)
323 BLOCK(4)
324 BLOCK(8)
325 BLOCK(12)
326
327 " addq %[inc], %[p1] ;\n"
328 " addq %[inc], %[p2] ;\n"
329 " addq %[inc], %[p3] ;\n"
330 " addq %[inc], %[p4] ;\n"
331 " addq %[inc], %[p5] ;\n"
332 " decl %[cnt] ; jnz 1b"
333 : [cnt] "+c" (lines),
334 [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4),
335 [p5] "+r" (p5)
336 : [inc] "r" (256UL)
337 : "memory");
338
339 XMMS_RESTORE;
340}
341
342static struct xor_block_template xor_block_sse = {
343 .name = "generic_sse",
344 .do_2 = xor_sse_2,
345 .do_3 = xor_sse_3,
346 .do_4 = xor_sse_4,
347 .do_5 = xor_sse_5,
348};
349
350#undef XOR_TRY_TEMPLATES
351#define XOR_TRY_TEMPLATES \
352do { \
353 xor_speed(&xor_block_sse); \
354} while (0)
355
356/* We force the use of the SSE xor block because it can write around L2.
357 We may also be able to load into the L1 only depending on how the cpu
358 deals with a load to a line that is being prefetched. */
359#define XOR_SELECT_TEMPLATE(FASTEST) (&xor_block_sse)
360
361#endif /* ASM_X86__XOR_64_H */
diff --git a/include/asm-x86/xsave.h b/include/asm-x86/xsave.h
deleted file mode 100644
index 08e9a1ac07a9..000000000000
--- a/include/asm-x86/xsave.h
+++ /dev/null
@@ -1,118 +0,0 @@
1#ifndef __ASM_X86_XSAVE_H
2#define __ASM_X86_XSAVE_H
3
4#include <linux/types.h>
5#include <asm/processor.h>
6#include <asm/i387.h>
7
8#define XSTATE_FP 0x1
9#define XSTATE_SSE 0x2
10
11#define XSTATE_FPSSE (XSTATE_FP | XSTATE_SSE)
12
13#define FXSAVE_SIZE 512
14
15/*
16 * These are the features that the OS can handle currently.
17 */
18#define XCNTXT_MASK (XSTATE_FP | XSTATE_SSE)
19
20#ifdef CONFIG_X86_64
21#define REX_PREFIX "0x48, "
22#else
23#define REX_PREFIX
24#endif
25
26extern unsigned int xstate_size;
27extern u64 pcntxt_mask;
28extern struct xsave_struct *init_xstate_buf;
29
30extern void xsave_cntxt_init(void);
31extern void xsave_init(void);
32extern int init_fpu(struct task_struct *child);
33extern int check_for_xstate(struct i387_fxsave_struct __user *buf,
34 void __user *fpstate,
35 struct _fpx_sw_bytes *sw);
36
37static inline int xrstor_checking(struct xsave_struct *fx)
38{
39 int err;
40
41 asm volatile("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n\t"
42 "2:\n"
43 ".section .fixup,\"ax\"\n"
44 "3: movl $-1,%[err]\n"
45 " jmp 2b\n"
46 ".previous\n"
47 _ASM_EXTABLE(1b, 3b)
48 : [err] "=r" (err)
49 : "D" (fx), "m" (*fx), "a" (-1), "d" (-1), "0" (0)
50 : "memory");
51
52 return err;
53}
54
55static inline int xsave_user(struct xsave_struct __user *buf)
56{
57 int err;
58 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
59 "2:\n"
60 ".section .fixup,\"ax\"\n"
61 "3: movl $-1,%[err]\n"
62 " jmp 2b\n"
63 ".previous\n"
64 ".section __ex_table,\"a\"\n"
65 _ASM_ALIGN "\n"
66 _ASM_PTR "1b,3b\n"
67 ".previous"
68 : [err] "=r" (err)
69 : "D" (buf), "a" (-1), "d" (-1), "0" (0)
70 : "memory");
71 if (unlikely(err) && __clear_user(buf, xstate_size))
72 err = -EFAULT;
73 /* No need to clear here because the caller clears USED_MATH */
74 return err;
75}
76
77static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
78{
79 int err;
80 struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
81 u32 lmask = mask;
82 u32 hmask = mask >> 32;
83
84 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
85 "2:\n"
86 ".section .fixup,\"ax\"\n"
87 "3: movl $-1,%[err]\n"
88 " jmp 2b\n"
89 ".previous\n"
90 ".section __ex_table,\"a\"\n"
91 _ASM_ALIGN "\n"
92 _ASM_PTR "1b,3b\n"
93 ".previous"
94 : [err] "=r" (err)
95 : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0)
96 : "memory"); /* memory required? */
97 return err;
98}
99
100static inline void xrstor_state(struct xsave_struct *fx, u64 mask)
101{
102 u32 lmask = mask;
103 u32 hmask = mask >> 32;
104
105 asm volatile(".byte " REX_PREFIX "0x0f,0xae,0x2f\n\t"
106 : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
107 : "memory");
108}
109
110static inline void xsave(struct task_struct *tsk)
111{
112 /* This, however, we can work around by forcing the compiler to select
113 an addressing mode that doesn't require extended registers. */
114 __asm__ __volatile__(".byte " REX_PREFIX "0x0f,0xae,0x27"
115 : : "D" (&(tsk->thread.xstate->xsave)),
116 "a" (-1), "d"(-1) : "memory");
117}
118#endif