aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-i386
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2007-10-11 05:20:03 -0400
committerThomas Gleixner <tglx@linutronix.de>2007-10-11 05:20:03 -0400
commit96a388de5dc53a8b234b3fd41f3ae2cedc9ffd42 (patch)
treed947a467aa2da3140279617bc4b9b101640d7bf4 /include/asm-i386
parent27bd0c955648646abf2a353a8371d28c37bcd982 (diff)
i386/x86_64: move headers to include/asm-x86
Move the headers to include/asm-x86 and fixup the header install make rules Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/asm-i386')
-rw-r--r--include/asm-i386/8253pit.h12
-rw-r--r--include/asm-i386/Kbuild12
-rw-r--r--include/asm-i386/a.out.h27
-rw-r--r--include/asm-i386/acpi.h147
-rw-r--r--include/asm-i386/agp.h36
-rw-r--r--include/asm-i386/alternative-asm.i12
-rw-r--r--include/asm-i386/alternative.h154
-rw-r--r--include/asm-i386/apic.h126
-rw-r--r--include/asm-i386/apicdef.h375
-rw-r--r--include/asm-i386/arch_hooks.h30
-rw-r--r--include/asm-i386/atomic.h266
-rw-r--r--include/asm-i386/auxvec.h11
-rw-r--r--include/asm-i386/bitops.h423
-rw-r--r--include/asm-i386/boot.h20
-rw-r--r--include/asm-i386/bootparam.h86
-rw-r--r--include/asm-i386/bug.h37
-rw-r--r--include/asm-i386/bugs.h12
-rw-r--r--include/asm-i386/byteorder.h58
-rw-r--r--include/asm-i386/cache.h14
-rw-r--r--include/asm-i386/cacheflush.h39
-rw-r--r--include/asm-i386/checksum.h191
-rw-r--r--include/asm-i386/cmpxchg.h289
-rw-r--r--include/asm-i386/cpu.h22
-rw-r--r--include/asm-i386/cpufeature.h175
-rw-r--r--include/asm-i386/cputime.h6
-rw-r--r--include/asm-i386/current.h17
-rw-r--r--include/asm-i386/debugreg.h64
-rw-r--r--include/asm-i386/delay.h31
-rw-r--r--include/asm-i386/desc.h244
-rw-r--r--include/asm-i386/device.h15
-rw-r--r--include/asm-i386/div64.h52
-rw-r--r--include/asm-i386/dma-mapping.h186
-rw-r--r--include/asm-i386/dma.h297
-rw-r--r--include/asm-i386/dmi.h11
-rw-r--r--include/asm-i386/dwarf2.h61
-rw-r--r--include/asm-i386/e820.h60
-rw-r--r--include/asm-i386/edac.h18
-rw-r--r--include/asm-i386/elf.h163
-rw-r--r--include/asm-i386/emergency-restart.h6
-rw-r--r--include/asm-i386/errno.h6
-rw-r--r--include/asm-i386/fb.h17
-rw-r--r--include/asm-i386/fcntl.h1
-rw-r--r--include/asm-i386/fixmap.h157
-rw-r--r--include/asm-i386/floppy.h284
-rw-r--r--include/asm-i386/frame.i23
-rw-r--r--include/asm-i386/futex.h135
-rw-r--r--include/asm-i386/genapic.h127
-rw-r--r--include/asm-i386/geode.h159
-rw-r--r--include/asm-i386/hardirq.h23
-rw-r--r--include/asm-i386/highmem.h85
-rw-r--r--include/asm-i386/hpet.h90
-rw-r--r--include/asm-i386/hw_irq.h66
-rw-r--r--include/asm-i386/hypertransport.h42
-rw-r--r--include/asm-i386/i387.h151
-rw-r--r--include/asm-i386/i8253.h17
-rw-r--r--include/asm-i386/i8259.h17
-rw-r--r--include/asm-i386/ide.h78
-rw-r--r--include/asm-i386/intel_arch_perfmon.h31
-rw-r--r--include/asm-i386/io.h349
-rw-r--r--include/asm-i386/io_apic.h155
-rw-r--r--include/asm-i386/ioctl.h1
-rw-r--r--include/asm-i386/ioctls.h87
-rw-r--r--include/asm-i386/ipc.h1
-rw-r--r--include/asm-i386/ipcbuf.h29
-rw-r--r--include/asm-i386/irq.h48
-rw-r--r--include/asm-i386/irq_regs.h29
-rw-r--r--include/asm-i386/irqflags.h163
-rw-r--r--include/asm-i386/ist.h34
-rw-r--r--include/asm-i386/k8.h1
-rw-r--r--include/asm-i386/kdebug.h33
-rw-r--r--include/asm-i386/kexec.h99
-rw-r--r--include/asm-i386/kmap_types.h30
-rw-r--r--include/asm-i386/kprobes.h92
-rw-r--r--include/asm-i386/ldt.h32
-rw-r--r--include/asm-i386/linkage.h15
-rw-r--r--include/asm-i386/local.h233
-rw-r--r--include/asm-i386/mach-bigsmp/mach_apic.h158
-rw-r--r--include/asm-i386/mach-bigsmp/mach_apicdef.h13
-rw-r--r--include/asm-i386/mach-bigsmp/mach_ipi.h25
-rw-r--r--include/asm-i386/mach-bigsmp/mach_mpspec.h8
-rw-r--r--include/asm-i386/mach-default/apm.h75
-rw-r--r--include/asm-i386/mach-default/bios_ebda.h15
-rw-r--r--include/asm-i386/mach-default/do_timer.h16
-rw-r--r--include/asm-i386/mach-default/entry_arch.h34
-rw-r--r--include/asm-i386/mach-default/io_ports.h25
-rw-r--r--include/asm-i386/mach-default/irq_vectors.h96
-rw-r--r--include/asm-i386/mach-default/irq_vectors_limits.h16
-rw-r--r--include/asm-i386/mach-default/mach_apic.h131
-rw-r--r--include/asm-i386/mach-default/mach_apicdef.h13
-rw-r--r--include/asm-i386/mach-default/mach_ipi.h54
-rw-r--r--include/asm-i386/mach-default/mach_mpparse.h28
-rw-r--r--include/asm-i386/mach-default/mach_mpspec.h12
-rw-r--r--include/asm-i386/mach-default/mach_reboot.h61
-rw-r--r--include/asm-i386/mach-default/mach_time.h111
-rw-r--r--include/asm-i386/mach-default/mach_timer.h50
-rw-r--r--include/asm-i386/mach-default/mach_traps.h41
-rw-r--r--include/asm-i386/mach-default/mach_wakecpu.h42
-rw-r--r--include/asm-i386/mach-default/pci-functions.h19
-rw-r--r--include/asm-i386/mach-default/setup_arch.h7
-rw-r--r--include/asm-i386/mach-default/smpboot_hooks.h44
-rw-r--r--include/asm-i386/mach-es7000/mach_apic.h206
-rw-r--r--include/asm-i386/mach-es7000/mach_apicdef.h13
-rw-r--r--include/asm-i386/mach-es7000/mach_ipi.h24
-rw-r--r--include/asm-i386/mach-es7000/mach_mpparse.h40
-rw-r--r--include/asm-i386/mach-es7000/mach_mpspec.h8
-rw-r--r--include/asm-i386/mach-es7000/mach_wakecpu.h59
-rw-r--r--include/asm-i386/mach-generic/irq_vectors_limits.h14
-rw-r--r--include/asm-i386/mach-generic/mach_apic.h33
-rw-r--r--include/asm-i386/mach-generic/mach_apicdef.h11
-rw-r--r--include/asm-i386/mach-generic/mach_ipi.h10
-rw-r--r--include/asm-i386/mach-generic/mach_mpparse.h12
-rw-r--r--include/asm-i386/mach-generic/mach_mpspec.h10
-rw-r--r--include/asm-i386/mach-numaq/mach_apic.h149
-rw-r--r--include/asm-i386/mach-numaq/mach_apicdef.h14
-rw-r--r--include/asm-i386/mach-numaq/mach_ipi.h25
-rw-r--r--include/asm-i386/mach-numaq/mach_mpparse.h29
-rw-r--r--include/asm-i386/mach-numaq/mach_mpspec.h8
-rw-r--r--include/asm-i386/mach-numaq/mach_wakecpu.h43
-rw-r--r--include/asm-i386/mach-summit/irq_vectors_limits.h14
-rw-r--r--include/asm-i386/mach-summit/mach_apic.h197
-rw-r--r--include/asm-i386/mach-summit/mach_apicdef.h13
-rw-r--r--include/asm-i386/mach-summit/mach_ipi.h25
-rw-r--r--include/asm-i386/mach-summit/mach_mpparse.h121
-rw-r--r--include/asm-i386/mach-summit/mach_mpspec.h9
-rw-r--r--include/asm-i386/mach-visws/cobalt.h125
-rw-r--r--include/asm-i386/mach-visws/entry_arch.h23
-rw-r--r--include/asm-i386/mach-visws/irq_vectors.h62
-rw-r--r--include/asm-i386/mach-visws/lithium.h53
-rw-r--r--include/asm-i386/mach-visws/mach_apic.h103
-rw-r--r--include/asm-i386/mach-visws/mach_apicdef.h12
-rw-r--r--include/asm-i386/mach-visws/piix4.h107
-rw-r--r--include/asm-i386/mach-visws/setup_arch.h8
-rw-r--r--include/asm-i386/mach-visws/smpboot_hooks.h24
-rw-r--r--include/asm-i386/mach-voyager/do_timer.h18
-rw-r--r--include/asm-i386/mach-voyager/entry_arch.h26
-rw-r--r--include/asm-i386/mach-voyager/irq_vectors.h79
-rw-r--r--include/asm-i386/mach-voyager/setup_arch.h10
-rw-r--r--include/asm-i386/math_emu.h36
-rw-r--r--include/asm-i386/mc146818rtc.h97
-rw-r--r--include/asm-i386/mca.h43
-rw-r--r--include/asm-i386/mca_dma.h201
-rw-r--r--include/asm-i386/mce.h11
-rw-r--r--include/asm-i386/mman.h17
-rw-r--r--include/asm-i386/mmu.h18
-rw-r--r--include/asm-i386/mmu_context.h86
-rw-r--r--include/asm-i386/mmx.h14
-rw-r--r--include/asm-i386/mmzone.h145
-rw-r--r--include/asm-i386/module.h75
-rw-r--r--include/asm-i386/mpspec.h81
-rw-r--r--include/asm-i386/mpspec_def.h186
-rw-r--r--include/asm-i386/msgbuf.h31
-rw-r--r--include/asm-i386/msidef.h47
-rw-r--r--include/asm-i386/msr-index.h278
-rw-r--r--include/asm-i386/msr.h161
-rw-r--r--include/asm-i386/mtrr.h115
-rw-r--r--include/asm-i386/mutex.h130
-rw-r--r--include/asm-i386/namei.h17
-rw-r--r--include/asm-i386/nmi.h64
-rw-r--r--include/asm-i386/numa.h3
-rw-r--r--include/asm-i386/numaq.h164
-rw-r--r--include/asm-i386/page.h206
-rw-r--r--include/asm-i386/param.h22
-rw-r--r--include/asm-i386/paravirt.h1085
-rw-r--r--include/asm-i386/parport.h18
-rw-r--r--include/asm-i386/pci-direct.h1
-rw-r--r--include/asm-i386/pci.h90
-rw-r--r--include/asm-i386/percpu.h154
-rw-r--r--include/asm-i386/pgalloc.h68
-rw-r--r--include/asm-i386/pgtable-2level-defs.h20
-rw-r--r--include/asm-i386/pgtable-2level.h86
-rw-r--r--include/asm-i386/pgtable-3level-defs.h28
-rw-r--r--include/asm-i386/pgtable-3level.h192
-rw-r--r--include/asm-i386/pgtable.h512
-rw-r--r--include/asm-i386/poll.h1
-rw-r--r--include/asm-i386/posix_types.h82
-rw-r--r--include/asm-i386/processor-cyrix.h30
-rw-r--r--include/asm-i386/processor-flags.h91
-rw-r--r--include/asm-i386/processor.h755
-rw-r--r--include/asm-i386/ptrace-abi.h39
-rw-r--r--include/asm-i386/ptrace.h63
-rw-r--r--include/asm-i386/reboot.h20
-rw-r--r--include/asm-i386/reboot_fixups.h6
-rw-r--r--include/asm-i386/required-features.h55
-rw-r--r--include/asm-i386/resource.h6
-rw-r--r--include/asm-i386/resume-trace.h13
-rw-r--r--include/asm-i386/rtc.h10
-rw-r--r--include/asm-i386/rwlock.h25
-rw-r--r--include/asm-i386/rwsem.h258
-rw-r--r--include/asm-i386/scatterlist.h23
-rw-r--r--include/asm-i386/seccomp.h16
-rw-r--r--include/asm-i386/sections.h7
-rw-r--r--include/asm-i386/segment.h148
-rw-r--r--include/asm-i386/semaphore.h176
-rw-r--r--include/asm-i386/sembuf.h25
-rw-r--r--include/asm-i386/serial.h29
-rw-r--r--include/asm-i386/setup.h92
-rw-r--r--include/asm-i386/shmbuf.h42
-rw-r--r--include/asm-i386/shmparam.h6
-rw-r--r--include/asm-i386/sigcontext.h85
-rw-r--r--include/asm-i386/siginfo.h6
-rw-r--r--include/asm-i386/signal.h232
-rw-r--r--include/asm-i386/smp.h182
-rw-r--r--include/asm-i386/socket.h55
-rw-r--r--include/asm-i386/sockios.h13
-rw-r--r--include/asm-i386/sparsemem.h31
-rw-r--r--include/asm-i386/spinlock.h221
-rw-r--r--include/asm-i386/spinlock_types.h20
-rw-r--r--include/asm-i386/srat.h37
-rw-r--r--include/asm-i386/stacktrace.h1
-rw-r--r--include/asm-i386/stat.h77
-rw-r--r--include/asm-i386/statfs.h6
-rw-r--r--include/asm-i386/string.h276
-rw-r--r--include/asm-i386/suspend.h46
-rw-r--r--include/asm-i386/sync_bitops.h156
-rw-r--r--include/asm-i386/system.h313
-rw-r--r--include/asm-i386/termbits.h198
-rw-r--r--include/asm-i386/termios.h90
-rw-r--r--include/asm-i386/therm_throt.h9
-rw-r--r--include/asm-i386/thread_info.h180
-rw-r--r--include/asm-i386/time.h44
-rw-r--r--include/asm-i386/timer.h50
-rw-r--r--include/asm-i386/timex.h22
-rw-r--r--include/asm-i386/tlb.h20
-rw-r--r--include/asm-i386/tlbflush.h175
-rw-r--r--include/asm-i386/topology.h121
-rw-r--r--include/asm-i386/tsc.h75
-rw-r--r--include/asm-i386/types.h64
-rw-r--r--include/asm-i386/uaccess.h590
-rw-r--r--include/asm-i386/ucontext.h12
-rw-r--r--include/asm-i386/unaligned.h37
-rw-r--r--include/asm-i386/unistd.h373
-rw-r--r--include/asm-i386/unwind.h13
-rw-r--r--include/asm-i386/user.h121
-rw-r--r--include/asm-i386/vga.h20
-rw-r--r--include/asm-i386/vic.h61
-rw-r--r--include/asm-i386/vm86.h215
-rw-r--r--include/asm-i386/vmi.h263
-rw-r--r--include/asm-i386/vmi_time.h98
-rw-r--r--include/asm-i386/voyager.h517
-rw-r--r--include/asm-i386/xen/hypercall.h413
-rw-r--r--include/asm-i386/xen/hypervisor.h73
-rw-r--r--include/asm-i386/xen/interface.h188
-rw-r--r--include/asm-i386/xor.h883
243 files changed, 0 insertions, 22778 deletions
diff --git a/include/asm-i386/8253pit.h b/include/asm-i386/8253pit.h
deleted file mode 100644
index 96c7c3592daf..000000000000
--- a/include/asm-i386/8253pit.h
+++ /dev/null
@@ -1,12 +0,0 @@
1/*
2 * 8253/8254 Programmable Interval Timer
3 */
4
5#ifndef _8253PIT_H
6#define _8253PIT_H
7
8#include <asm/timex.h>
9
10#define PIT_TICK_RATE CLOCK_TICK_RATE
11
12#endif
diff --git a/include/asm-i386/Kbuild b/include/asm-i386/Kbuild
deleted file mode 100644
index cbf6e8f1087b..000000000000
--- a/include/asm-i386/Kbuild
+++ /dev/null
@@ -1,12 +0,0 @@
1include include/asm-generic/Kbuild.asm
2
3header-y += boot.h
4header-y += debugreg.h
5header-y += ldt.h
6header-y += msr-index.h
7header-y += ptrace-abi.h
8header-y += ucontext.h
9
10unifdef-y += msr.h
11unifdef-y += mtrr.h
12unifdef-y += vm86.h
diff --git a/include/asm-i386/a.out.h b/include/asm-i386/a.out.h
deleted file mode 100644
index 851a60f8258c..000000000000
--- a/include/asm-i386/a.out.h
+++ /dev/null
@@ -1,27 +0,0 @@
1#ifndef __I386_A_OUT_H__
2#define __I386_A_OUT_H__
3
4struct exec
5{
6 unsigned long a_info; /* Use macros N_MAGIC, etc for access */
7 unsigned a_text; /* length of text, in bytes */
8 unsigned a_data; /* length of data, in bytes */
9 unsigned a_bss; /* length of uninitialized data area for file, in bytes */
10 unsigned a_syms; /* length of symbol table data in file, in bytes */
11 unsigned a_entry; /* start address */
12 unsigned a_trsize; /* length of relocation info for text, in bytes */
13 unsigned a_drsize; /* length of relocation info for data, in bytes */
14};
15
16#define N_TRSIZE(a) ((a).a_trsize)
17#define N_DRSIZE(a) ((a).a_drsize)
18#define N_SYMSIZE(a) ((a).a_syms)
19
20#ifdef __KERNEL__
21
22#define STACK_TOP TASK_SIZE
23#define STACK_TOP_MAX STACK_TOP
24
25#endif
26
27#endif /* __A_OUT_GNU_H__ */
diff --git a/include/asm-i386/acpi.h b/include/asm-i386/acpi.h
deleted file mode 100644
index 125179adf044..000000000000
--- a/include/asm-i386/acpi.h
+++ /dev/null
@@ -1,147 +0,0 @@
1/*
2 * asm-i386/acpi.h
3 *
4 * Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
5 * Copyright (C) 2001 Patrick Mochel <mochel@osdl.org>
6 *
7 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 *
23 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
24 */
25
26#ifndef _ASM_ACPI_H
27#define _ASM_ACPI_H
28
29#ifdef __KERNEL__
30
31#include <acpi/pdc_intel.h>
32
33#include <asm/system.h> /* defines cmpxchg */
34
35#define COMPILER_DEPENDENT_INT64 long long
36#define COMPILER_DEPENDENT_UINT64 unsigned long long
37
38/*
39 * Calling conventions:
40 *
41 * ACPI_SYSTEM_XFACE - Interfaces to host OS (handlers, threads)
42 * ACPI_EXTERNAL_XFACE - External ACPI interfaces
43 * ACPI_INTERNAL_XFACE - Internal ACPI interfaces
44 * ACPI_INTERNAL_VAR_XFACE - Internal variable-parameter list interfaces
45 */
46#define ACPI_SYSTEM_XFACE
47#define ACPI_EXTERNAL_XFACE
48#define ACPI_INTERNAL_XFACE
49#define ACPI_INTERNAL_VAR_XFACE
50
51/* Asm macros */
52
53#define ACPI_ASM_MACROS
54#define BREAKPOINT3
55#define ACPI_DISABLE_IRQS() local_irq_disable()
56#define ACPI_ENABLE_IRQS() local_irq_enable()
57#define ACPI_FLUSH_CPU_CACHE() wbinvd()
58
59int __acpi_acquire_global_lock(unsigned int *lock);
60int __acpi_release_global_lock(unsigned int *lock);
61
62#define ACPI_ACQUIRE_GLOBAL_LOCK(facs, Acq) \
63 ((Acq) = __acpi_acquire_global_lock(&facs->global_lock))
64
65#define ACPI_RELEASE_GLOBAL_LOCK(facs, Acq) \
66 ((Acq) = __acpi_release_global_lock(&facs->global_lock))
67
68/*
69 * Math helper asm macros
70 */
71#define ACPI_DIV_64_BY_32(n_hi, n_lo, d32, q32, r32) \
72 asm("divl %2;" \
73 :"=a"(q32), "=d"(r32) \
74 :"r"(d32), \
75 "0"(n_lo), "1"(n_hi))
76
77
78#define ACPI_SHIFT_RIGHT_64(n_hi, n_lo) \
79 asm("shrl $1,%2;" \
80 "rcrl $1,%3;" \
81 :"=r"(n_hi), "=r"(n_lo) \
82 :"0"(n_hi), "1"(n_lo))
83
84#ifdef CONFIG_X86_IO_APIC
85extern void check_acpi_pci(void);
86#else
87static inline void check_acpi_pci(void) { }
88#endif
89
90#ifdef CONFIG_ACPI
91extern int acpi_lapic;
92extern int acpi_ioapic;
93extern int acpi_noirq;
94extern int acpi_strict;
95extern int acpi_disabled;
96extern int acpi_ht;
97extern int acpi_pci_disabled;
98static inline void disable_acpi(void)
99{
100 acpi_disabled = 1;
101 acpi_ht = 0;
102 acpi_pci_disabled = 1;
103 acpi_noirq = 1;
104}
105
106/* Fixmap pages to reserve for ACPI boot-time tables (see fixmap.h) */
107#define FIX_ACPI_PAGES 4
108
109extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq);
110
111#ifdef CONFIG_X86_IO_APIC
112extern int acpi_skip_timer_override;
113extern int acpi_use_timer_override;
114#endif
115
116static inline void acpi_noirq_set(void) { acpi_noirq = 1; }
117static inline void acpi_disable_pci(void)
118{
119 acpi_pci_disabled = 1;
120 acpi_noirq_set();
121}
122extern int acpi_irq_balance_set(char *str);
123
124/* routines for saving/restoring kernel state */
125extern int acpi_save_state_mem(void);
126extern void acpi_restore_state_mem(void);
127
128extern unsigned long acpi_wakeup_address;
129
130/* early initialization routine */
131extern void acpi_reserve_bootmem(void);
132
133#else /* !CONFIG_ACPI */
134
135#define acpi_lapic 0
136#define acpi_ioapic 0
137static inline void acpi_noirq_set(void) { }
138static inline void acpi_disable_pci(void) { }
139static inline void disable_acpi(void) { }
140
141#endif /* !CONFIG_ACPI */
142
143#define ARCH_HAS_POWER_INIT 1
144
145#endif /*__KERNEL__*/
146
147#endif /*_ASM_ACPI_H*/
diff --git a/include/asm-i386/agp.h b/include/asm-i386/agp.h
deleted file mode 100644
index 6af173dbf123..000000000000
--- a/include/asm-i386/agp.h
+++ /dev/null
@@ -1,36 +0,0 @@
1#ifndef AGP_H
2#define AGP_H 1
3
4#include <asm/pgtable.h>
5#include <asm/cacheflush.h>
6
7/*
8 * Functions to keep the agpgart mappings coherent with the MMU.
9 * The GART gives the CPU a physical alias of pages in memory. The alias region is
10 * mapped uncacheable. Make sure there are no conflicting mappings
11 * with different cachability attributes for the same page. This avoids
12 * data corruption on some CPUs.
13 */
14
15/* Caller's responsibility to call global_flush_tlb() for
16 * performance reasons */
17#define map_page_into_agp(page) change_page_attr(page, 1, PAGE_KERNEL_NOCACHE)
18#define unmap_page_from_agp(page) change_page_attr(page, 1, PAGE_KERNEL)
19#define flush_agp_mappings() global_flush_tlb()
20
21/* Could use CLFLUSH here if the cpu supports it. But then it would
22 need to be called for each cacheline of the whole page so it may not be
23 worth it. Would need a page for it. */
24#define flush_agp_cache() wbinvd()
25
26/* Convert a physical address to an address suitable for the GART. */
27#define phys_to_gart(x) (x)
28#define gart_to_phys(x) (x)
29
30/* GATT allocation. Returns/accepts GATT kernel virtual address. */
31#define alloc_gatt_pages(order) \
32 ((char *)__get_free_pages(GFP_KERNEL, (order)))
33#define free_gatt_pages(table, order) \
34 free_pages((unsigned long)(table), (order))
35
36#endif
diff --git a/include/asm-i386/alternative-asm.i b/include/asm-i386/alternative-asm.i
deleted file mode 100644
index f0510209ccbe..000000000000
--- a/include/asm-i386/alternative-asm.i
+++ /dev/null
@@ -1,12 +0,0 @@
1#ifdef CONFIG_SMP
2 .macro LOCK_PREFIX
31: lock
4 .section .smp_locks,"a"
5 .align 4
6 .long 1b
7 .previous
8 .endm
9#else
10 .macro LOCK_PREFIX
11 .endm
12#endif
diff --git a/include/asm-i386/alternative.h b/include/asm-i386/alternative.h
deleted file mode 100644
index bda6c810c0f4..000000000000
--- a/include/asm-i386/alternative.h
+++ /dev/null
@@ -1,154 +0,0 @@
1#ifndef _I386_ALTERNATIVE_H
2#define _I386_ALTERNATIVE_H
3
4#include <asm/types.h>
5#include <linux/stddef.h>
6#include <linux/types.h>
7
8struct alt_instr {
9 u8 *instr; /* original instruction */
10 u8 *replacement;
11 u8 cpuid; /* cpuid bit set for replacement */
12 u8 instrlen; /* length of original instruction */
13 u8 replacementlen; /* length of new instruction, <= instrlen */
14 u8 pad;
15};
16
17extern void alternative_instructions(void);
18extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
19
20struct module;
21#ifdef CONFIG_SMP
22extern void alternatives_smp_module_add(struct module *mod, char *name,
23 void *locks, void *locks_end,
24 void *text, void *text_end);
25extern void alternatives_smp_module_del(struct module *mod);
26extern void alternatives_smp_switch(int smp);
27#else
28static inline void alternatives_smp_module_add(struct module *mod, char *name,
29 void *locks, void *locks_end,
30 void *text, void *text_end) {}
31static inline void alternatives_smp_module_del(struct module *mod) {}
32static inline void alternatives_smp_switch(int smp) {}
33#endif /* CONFIG_SMP */
34
35/*
36 * Alternative instructions for different CPU types or capabilities.
37 *
38 * This allows to use optimized instructions even on generic binary
39 * kernels.
40 *
41 * length of oldinstr must be longer or equal the length of newinstr
42 * It can be padded with nops as needed.
43 *
44 * For non barrier like inlines please define new variants
45 * without volatile and memory clobber.
46 */
47#define alternative(oldinstr, newinstr, feature) \
48 asm volatile ("661:\n\t" oldinstr "\n662:\n" \
49 ".section .altinstructions,\"a\"\n" \
50 " .align 4\n" \
51 " .long 661b\n" /* label */ \
52 " .long 663f\n" /* new instruction */ \
53 " .byte %c0\n" /* feature bit */ \
54 " .byte 662b-661b\n" /* sourcelen */ \
55 " .byte 664f-663f\n" /* replacementlen */ \
56 ".previous\n" \
57 ".section .altinstr_replacement,\"ax\"\n" \
58 "663:\n\t" newinstr "\n664:\n" /* replacement */\
59 ".previous" :: "i" (feature) : "memory")
60
61/*
62 * Alternative inline assembly with input.
63 *
64 * Pecularities:
65 * No memory clobber here.
66 * Argument numbers start with 1.
67 * Best is to use constraints that are fixed size (like (%1) ... "r")
68 * If you use variable sized constraints like "m" or "g" in the
69 * replacement maake sure to pad to the worst case length.
70 */
71#define alternative_input(oldinstr, newinstr, feature, input...) \
72 asm volatile ("661:\n\t" oldinstr "\n662:\n" \
73 ".section .altinstructions,\"a\"\n" \
74 " .align 4\n" \
75 " .long 661b\n" /* label */ \
76 " .long 663f\n" /* new instruction */ \
77 " .byte %c0\n" /* feature bit */ \
78 " .byte 662b-661b\n" /* sourcelen */ \
79 " .byte 664f-663f\n" /* replacementlen */ \
80 ".previous\n" \
81 ".section .altinstr_replacement,\"ax\"\n" \
82 "663:\n\t" newinstr "\n664:\n" /* replacement */\
83 ".previous" :: "i" (feature), ##input)
84
85/* Like alternative_input, but with a single output argument */
86#define alternative_io(oldinstr, newinstr, feature, output, input...) \
87 asm volatile ("661:\n\t" oldinstr "\n662:\n" \
88 ".section .altinstructions,\"a\"\n" \
89 " .align 4\n" \
90 " .long 661b\n" /* label */ \
91 " .long 663f\n" /* new instruction */ \
92 " .byte %c[feat]\n" /* feature bit */ \
93 " .byte 662b-661b\n" /* sourcelen */ \
94 " .byte 664f-663f\n" /* replacementlen */ \
95 ".previous\n" \
96 ".section .altinstr_replacement,\"ax\"\n" \
97 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
98 ".previous" : output : [feat] "i" (feature), ##input)
99
100/*
101 * use this macro(s) if you need more than one output parameter
102 * in alternative_io
103 */
104#define ASM_OUTPUT2(a, b) a, b
105
106/*
107 * Alternative inline assembly for SMP.
108 *
109 * The LOCK_PREFIX macro defined here replaces the LOCK and
110 * LOCK_PREFIX macros used everywhere in the source tree.
111 *
112 * SMP alternatives use the same data structures as the other
113 * alternatives and the X86_FEATURE_UP flag to indicate the case of a
114 * UP system running a SMP kernel. The existing apply_alternatives()
115 * works fine for patching a SMP kernel for UP.
116 *
117 * The SMP alternative tables can be kept after boot and contain both
118 * UP and SMP versions of the instructions to allow switching back to
119 * SMP at runtime, when hotplugging in a new CPU, which is especially
120 * useful in virtualized environments.
121 *
122 * The very common lock prefix is handled as special case in a
123 * separate table which is a pure address list without replacement ptr
124 * and size information. That keeps the table sizes small.
125 */
126
127#ifdef CONFIG_SMP
128#define LOCK_PREFIX \
129 ".section .smp_locks,\"a\"\n" \
130 " .align 4\n" \
131 " .long 661f\n" /* address */ \
132 ".previous\n" \
133 "661:\n\tlock; "
134
135#else /* ! CONFIG_SMP */
136#define LOCK_PREFIX ""
137#endif
138
139struct paravirt_patch_site;
140#ifdef CONFIG_PARAVIRT
141void apply_paravirt(struct paravirt_patch_site *start,
142 struct paravirt_patch_site *end);
143#else
144static inline void
145apply_paravirt(struct paravirt_patch_site *start,
146 struct paravirt_patch_site *end)
147{}
148#define __parainstructions NULL
149#define __parainstructions_end NULL
150#endif
151
152extern void text_poke(void *addr, unsigned char *opcode, int len);
153
154#endif /* _I386_ALTERNATIVE_H */
diff --git a/include/asm-i386/apic.h b/include/asm-i386/apic.h
deleted file mode 100644
index 4091b33dcb10..000000000000
--- a/include/asm-i386/apic.h
+++ /dev/null
@@ -1,126 +0,0 @@
1#ifndef __ASM_APIC_H
2#define __ASM_APIC_H
3
4#include <linux/pm.h>
5#include <linux/delay.h>
6#include <asm/fixmap.h>
7#include <asm/apicdef.h>
8#include <asm/processor.h>
9#include <asm/system.h>
10
11#define Dprintk(x...)
12
13/*
14 * Debugging macros
15 */
16#define APIC_QUIET 0
17#define APIC_VERBOSE 1
18#define APIC_DEBUG 2
19
20extern int apic_verbosity;
21
22/*
23 * Define the default level of output to be very little
24 * This can be turned up by using apic=verbose for more
25 * information and apic=debug for _lots_ of information.
26 * apic_verbosity is defined in apic.c
27 */
28#define apic_printk(v, s, a...) do { \
29 if ((v) <= apic_verbosity) \
30 printk(s, ##a); \
31 } while (0)
32
33
34extern void generic_apic_probe(void);
35
36#ifdef CONFIG_X86_LOCAL_APIC
37
38/*
39 * Basic functions accessing APICs.
40 */
41#ifdef CONFIG_PARAVIRT
42#include <asm/paravirt.h>
43#else
44#define apic_write native_apic_write
45#define apic_write_atomic native_apic_write_atomic
46#define apic_read native_apic_read
47#define setup_boot_clock setup_boot_APIC_clock
48#define setup_secondary_clock setup_secondary_APIC_clock
49#endif
50
51static __inline fastcall void native_apic_write(unsigned long reg,
52 unsigned long v)
53{
54 *((volatile unsigned long *)(APIC_BASE+reg)) = v;
55}
56
57static __inline fastcall void native_apic_write_atomic(unsigned long reg,
58 unsigned long v)
59{
60 xchg((volatile unsigned long *)(APIC_BASE+reg), v);
61}
62
63static __inline fastcall unsigned long native_apic_read(unsigned long reg)
64{
65 return *((volatile unsigned long *)(APIC_BASE+reg));
66}
67
68void apic_wait_icr_idle(void);
69unsigned long safe_apic_wait_icr_idle(void);
70int get_physical_broadcast(void);
71
72#ifdef CONFIG_X86_GOOD_APIC
73# define FORCE_READ_AROUND_WRITE 0
74# define apic_read_around(x)
75# define apic_write_around(x,y) apic_write((x),(y))
76#else
77# define FORCE_READ_AROUND_WRITE 1
78# define apic_read_around(x) apic_read(x)
79# define apic_write_around(x,y) apic_write_atomic((x),(y))
80#endif
81
82static inline void ack_APIC_irq(void)
83{
84 /*
85 * ack_APIC_irq() actually gets compiled as a single instruction:
86 * - a single rmw on Pentium/82489DX
87 * - a single write on P6+ cores (CONFIG_X86_GOOD_APIC)
88 * ... yummie.
89 */
90
91 /* Docs say use 0 for future compatibility */
92 apic_write_around(APIC_EOI, 0);
93}
94
95extern int lapic_get_maxlvt(void);
96extern void clear_local_APIC(void);
97extern void connect_bsp_APIC (void);
98extern void disconnect_bsp_APIC (int virt_wire_setup);
99extern void disable_local_APIC (void);
100extern void lapic_shutdown (void);
101extern int verify_local_APIC (void);
102extern void cache_APIC_registers (void);
103extern void sync_Arb_IDs (void);
104extern void init_bsp_APIC (void);
105extern void setup_local_APIC (void);
106extern void init_apic_mappings (void);
107extern void smp_local_timer_interrupt (void);
108extern void setup_boot_APIC_clock (void);
109extern void setup_secondary_APIC_clock (void);
110extern int APIC_init_uniprocessor (void);
111
112extern void enable_NMI_through_LVT0 (void * dummy);
113
114#define ARCH_APICTIMER_STOPS_ON_C3 1
115
116extern int timer_over_8254;
117extern int local_apic_timer_c2_ok;
118
119extern int local_apic_timer_disabled;
120
121#else /* !CONFIG_X86_LOCAL_APIC */
122static inline void lapic_shutdown(void) { }
123
124#endif /* !CONFIG_X86_LOCAL_APIC */
125
126#endif /* __ASM_APIC_H */
diff --git a/include/asm-i386/apicdef.h b/include/asm-i386/apicdef.h
deleted file mode 100644
index 9f6995341fdc..000000000000
--- a/include/asm-i386/apicdef.h
+++ /dev/null
@@ -1,375 +0,0 @@
1#ifndef __ASM_APICDEF_H
2#define __ASM_APICDEF_H
3
4/*
5 * Constants for various Intel APICs. (local APIC, IOAPIC, etc.)
6 *
7 * Alan Cox <Alan.Cox@linux.org>, 1995.
8 * Ingo Molnar <mingo@redhat.com>, 1999, 2000
9 */
10
11#define APIC_DEFAULT_PHYS_BASE 0xfee00000
12
13#define APIC_ID 0x20
14#define APIC_LVR 0x30
15#define APIC_LVR_MASK 0xFF00FF
16#define GET_APIC_VERSION(x) ((x)&0xFF)
17#define GET_APIC_MAXLVT(x) (((x)>>16)&0xFF)
18#define APIC_INTEGRATED(x) ((x)&0xF0)
19#define APIC_XAPIC(x) ((x) >= 0x14)
20#define APIC_TASKPRI 0x80
21#define APIC_TPRI_MASK 0xFF
22#define APIC_ARBPRI 0x90
23#define APIC_ARBPRI_MASK 0xFF
24#define APIC_PROCPRI 0xA0
25#define APIC_EOI 0xB0
26#define APIC_EIO_ACK 0x0 /* Write this to the EOI register */
27#define APIC_RRR 0xC0
28#define APIC_LDR 0xD0
29#define APIC_LDR_MASK (0xFF<<24)
30#define GET_APIC_LOGICAL_ID(x) (((x)>>24)&0xFF)
31#define SET_APIC_LOGICAL_ID(x) (((x)<<24))
32#define APIC_ALL_CPUS 0xFF
33#define APIC_DFR 0xE0
34#define APIC_DFR_CLUSTER 0x0FFFFFFFul
35#define APIC_DFR_FLAT 0xFFFFFFFFul
36#define APIC_SPIV 0xF0
37#define APIC_SPIV_FOCUS_DISABLED (1<<9)
38#define APIC_SPIV_APIC_ENABLED (1<<8)
39#define APIC_ISR 0x100
40#define APIC_ISR_NR 0x8 /* Number of 32 bit ISR registers. */
41#define APIC_TMR 0x180
42#define APIC_IRR 0x200
43#define APIC_ESR 0x280
44#define APIC_ESR_SEND_CS 0x00001
45#define APIC_ESR_RECV_CS 0x00002
46#define APIC_ESR_SEND_ACC 0x00004
47#define APIC_ESR_RECV_ACC 0x00008
48#define APIC_ESR_SENDILL 0x00020
49#define APIC_ESR_RECVILL 0x00040
50#define APIC_ESR_ILLREGA 0x00080
51#define APIC_ICR 0x300
52#define APIC_DEST_SELF 0x40000
53#define APIC_DEST_ALLINC 0x80000
54#define APIC_DEST_ALLBUT 0xC0000
55#define APIC_ICR_RR_MASK 0x30000
56#define APIC_ICR_RR_INVALID 0x00000
57#define APIC_ICR_RR_INPROG 0x10000
58#define APIC_ICR_RR_VALID 0x20000
59#define APIC_INT_LEVELTRIG 0x08000
60#define APIC_INT_ASSERT 0x04000
61#define APIC_ICR_BUSY 0x01000
62#define APIC_DEST_LOGICAL 0x00800
63#define APIC_DM_FIXED 0x00000
64#define APIC_DM_LOWEST 0x00100
65#define APIC_DM_SMI 0x00200
66#define APIC_DM_REMRD 0x00300
67#define APIC_DM_NMI 0x00400
68#define APIC_DM_INIT 0x00500
69#define APIC_DM_STARTUP 0x00600
70#define APIC_DM_EXTINT 0x00700
71#define APIC_VECTOR_MASK 0x000FF
72#define APIC_ICR2 0x310
73#define GET_APIC_DEST_FIELD(x) (((x)>>24)&0xFF)
74#define SET_APIC_DEST_FIELD(x) ((x)<<24)
75#define APIC_LVTT 0x320
76#define APIC_LVTTHMR 0x330
77#define APIC_LVTPC 0x340
78#define APIC_LVT0 0x350
79#define APIC_LVT_TIMER_BASE_MASK (0x3<<18)
80#define GET_APIC_TIMER_BASE(x) (((x)>>18)&0x3)
81#define SET_APIC_TIMER_BASE(x) (((x)<<18))
82#define APIC_TIMER_BASE_CLKIN 0x0
83#define APIC_TIMER_BASE_TMBASE 0x1
84#define APIC_TIMER_BASE_DIV 0x2
85#define APIC_LVT_TIMER_PERIODIC (1<<17)
86#define APIC_LVT_MASKED (1<<16)
87#define APIC_LVT_LEVEL_TRIGGER (1<<15)
88#define APIC_LVT_REMOTE_IRR (1<<14)
89#define APIC_INPUT_POLARITY (1<<13)
90#define APIC_SEND_PENDING (1<<12)
91#define APIC_MODE_MASK 0x700
92#define GET_APIC_DELIVERY_MODE(x) (((x)>>8)&0x7)
93#define SET_APIC_DELIVERY_MODE(x,y) (((x)&~0x700)|((y)<<8))
94#define APIC_MODE_FIXED 0x0
95#define APIC_MODE_NMI 0x4
96#define APIC_MODE_EXTINT 0x7
97#define APIC_LVT1 0x360
98#define APIC_LVTERR 0x370
99#define APIC_TMICT 0x380
100#define APIC_TMCCT 0x390
101#define APIC_TDCR 0x3E0
102#define APIC_TDR_DIV_TMBASE (1<<2)
103#define APIC_TDR_DIV_1 0xB
104#define APIC_TDR_DIV_2 0x0
105#define APIC_TDR_DIV_4 0x1
106#define APIC_TDR_DIV_8 0x2
107#define APIC_TDR_DIV_16 0x3
108#define APIC_TDR_DIV_32 0x8
109#define APIC_TDR_DIV_64 0x9
110#define APIC_TDR_DIV_128 0xA
111
112#define APIC_BASE (fix_to_virt(FIX_APIC_BASE))
113
114#define MAX_IO_APICS 64
115
116/*
117 * the local APIC register structure, memory mapped. Not terribly well
118 * tested, but we might eventually use this one in the future - the
119 * problem why we cannot use it right now is the P5 APIC, it has an
120 * errata which cannot take 8-bit reads and writes, only 32-bit ones ...
121 */
122#define u32 unsigned int
123
124
125struct local_apic {
126
127/*000*/ struct { u32 __reserved[4]; } __reserved_01;
128
129/*010*/ struct { u32 __reserved[4]; } __reserved_02;
130
131/*020*/ struct { /* APIC ID Register */
132 u32 __reserved_1 : 24,
133 phys_apic_id : 4,
134 __reserved_2 : 4;
135 u32 __reserved[3];
136 } id;
137
138/*030*/ const
139 struct { /* APIC Version Register */
140 u32 version : 8,
141 __reserved_1 : 8,
142 max_lvt : 8,
143 __reserved_2 : 8;
144 u32 __reserved[3];
145 } version;
146
147/*040*/ struct { u32 __reserved[4]; } __reserved_03;
148
149/*050*/ struct { u32 __reserved[4]; } __reserved_04;
150
151/*060*/ struct { u32 __reserved[4]; } __reserved_05;
152
153/*070*/ struct { u32 __reserved[4]; } __reserved_06;
154
155/*080*/ struct { /* Task Priority Register */
156 u32 priority : 8,
157 __reserved_1 : 24;
158 u32 __reserved_2[3];
159 } tpr;
160
161/*090*/ const
162 struct { /* Arbitration Priority Register */
163 u32 priority : 8,
164 __reserved_1 : 24;
165 u32 __reserved_2[3];
166 } apr;
167
168/*0A0*/ const
169 struct { /* Processor Priority Register */
170 u32 priority : 8,
171 __reserved_1 : 24;
172 u32 __reserved_2[3];
173 } ppr;
174
175/*0B0*/ struct { /* End Of Interrupt Register */
176 u32 eoi;
177 u32 __reserved[3];
178 } eoi;
179
180/*0C0*/ struct { u32 __reserved[4]; } __reserved_07;
181
182/*0D0*/ struct { /* Logical Destination Register */
183 u32 __reserved_1 : 24,
184 logical_dest : 8;
185 u32 __reserved_2[3];
186 } ldr;
187
188/*0E0*/ struct { /* Destination Format Register */
189 u32 __reserved_1 : 28,
190 model : 4;
191 u32 __reserved_2[3];
192 } dfr;
193
194/*0F0*/ struct { /* Spurious Interrupt Vector Register */
195 u32 spurious_vector : 8,
196 apic_enabled : 1,
197 focus_cpu : 1,
198 __reserved_2 : 22;
199 u32 __reserved_3[3];
200 } svr;
201
202/*100*/ struct { /* In Service Register */
203/*170*/ u32 bitfield;
204 u32 __reserved[3];
205 } isr [8];
206
207/*180*/ struct { /* Trigger Mode Register */
208/*1F0*/ u32 bitfield;
209 u32 __reserved[3];
210 } tmr [8];
211
212/*200*/ struct { /* Interrupt Request Register */
213/*270*/ u32 bitfield;
214 u32 __reserved[3];
215 } irr [8];
216
217/*280*/ union { /* Error Status Register */
218 struct {
219 u32 send_cs_error : 1,
220 receive_cs_error : 1,
221 send_accept_error : 1,
222 receive_accept_error : 1,
223 __reserved_1 : 1,
224 send_illegal_vector : 1,
225 receive_illegal_vector : 1,
226 illegal_register_address : 1,
227 __reserved_2 : 24;
228 u32 __reserved_3[3];
229 } error_bits;
230 struct {
231 u32 errors;
232 u32 __reserved_3[3];
233 } all_errors;
234 } esr;
235
236/*290*/ struct { u32 __reserved[4]; } __reserved_08;
237
238/*2A0*/ struct { u32 __reserved[4]; } __reserved_09;
239
240/*2B0*/ struct { u32 __reserved[4]; } __reserved_10;
241
242/*2C0*/ struct { u32 __reserved[4]; } __reserved_11;
243
244/*2D0*/ struct { u32 __reserved[4]; } __reserved_12;
245
246/*2E0*/ struct { u32 __reserved[4]; } __reserved_13;
247
248/*2F0*/ struct { u32 __reserved[4]; } __reserved_14;
249
250/*300*/ struct { /* Interrupt Command Register 1 */
251 u32 vector : 8,
252 delivery_mode : 3,
253 destination_mode : 1,
254 delivery_status : 1,
255 __reserved_1 : 1,
256 level : 1,
257 trigger : 1,
258 __reserved_2 : 2,
259 shorthand : 2,
260 __reserved_3 : 12;
261 u32 __reserved_4[3];
262 } icr1;
263
264/*310*/ struct { /* Interrupt Command Register 2 */
265 union {
266 u32 __reserved_1 : 24,
267 phys_dest : 4,
268 __reserved_2 : 4;
269 u32 __reserved_3 : 24,
270 logical_dest : 8;
271 } dest;
272 u32 __reserved_4[3];
273 } icr2;
274
275/*320*/ struct { /* LVT - Timer */
276 u32 vector : 8,
277 __reserved_1 : 4,
278 delivery_status : 1,
279 __reserved_2 : 3,
280 mask : 1,
281 timer_mode : 1,
282 __reserved_3 : 14;
283 u32 __reserved_4[3];
284 } lvt_timer;
285
286/*330*/ struct { /* LVT - Thermal Sensor */
287 u32 vector : 8,
288 delivery_mode : 3,
289 __reserved_1 : 1,
290 delivery_status : 1,
291 __reserved_2 : 3,
292 mask : 1,
293 __reserved_3 : 15;
294 u32 __reserved_4[3];
295 } lvt_thermal;
296
297/*340*/ struct { /* LVT - Performance Counter */
298 u32 vector : 8,
299 delivery_mode : 3,
300 __reserved_1 : 1,
301 delivery_status : 1,
302 __reserved_2 : 3,
303 mask : 1,
304 __reserved_3 : 15;
305 u32 __reserved_4[3];
306 } lvt_pc;
307
308/*350*/ struct { /* LVT - LINT0 */
309 u32 vector : 8,
310 delivery_mode : 3,
311 __reserved_1 : 1,
312 delivery_status : 1,
313 polarity : 1,
314 remote_irr : 1,
315 trigger : 1,
316 mask : 1,
317 __reserved_2 : 15;
318 u32 __reserved_3[3];
319 } lvt_lint0;
320
321/*360*/ struct { /* LVT - LINT1 */
322 u32 vector : 8,
323 delivery_mode : 3,
324 __reserved_1 : 1,
325 delivery_status : 1,
326 polarity : 1,
327 remote_irr : 1,
328 trigger : 1,
329 mask : 1,
330 __reserved_2 : 15;
331 u32 __reserved_3[3];
332 } lvt_lint1;
333
334/*370*/ struct { /* LVT - Error */
335 u32 vector : 8,
336 __reserved_1 : 4,
337 delivery_status : 1,
338 __reserved_2 : 3,
339 mask : 1,
340 __reserved_3 : 15;
341 u32 __reserved_4[3];
342 } lvt_error;
343
344/*380*/ struct { /* Timer Initial Count Register */
345 u32 initial_count;
346 u32 __reserved_2[3];
347 } timer_icr;
348
349/*390*/ const
350 struct { /* Timer Current Count Register */
351 u32 curr_count;
352 u32 __reserved_2[3];
353 } timer_ccr;
354
355/*3A0*/ struct { u32 __reserved[4]; } __reserved_16;
356
357/*3B0*/ struct { u32 __reserved[4]; } __reserved_17;
358
359/*3C0*/ struct { u32 __reserved[4]; } __reserved_18;
360
361/*3D0*/ struct { u32 __reserved[4]; } __reserved_19;
362
363/*3E0*/ struct { /* Timer Divide Configuration Register */
364 u32 divisor : 4,
365 __reserved_1 : 28;
366 u32 __reserved_2[3];
367 } timer_dcr;
368
369/*3F0*/ struct { u32 __reserved[4]; } __reserved_20;
370
371} __attribute__ ((packed));
372
373#undef u32
374
375#endif
diff --git a/include/asm-i386/arch_hooks.h b/include/asm-i386/arch_hooks.h
deleted file mode 100644
index a8c1fca9726d..000000000000
--- a/include/asm-i386/arch_hooks.h
+++ /dev/null
@@ -1,30 +0,0 @@
1#ifndef _ASM_ARCH_HOOKS_H
2#define _ASM_ARCH_HOOKS_H
3
4#include <linux/interrupt.h>
5
6/*
7 * linux/include/asm/arch_hooks.h
8 *
9 * define the architecture specific hooks
10 */
11
12/* these aren't arch hooks, they are generic routines
13 * that can be used by the hooks */
14extern void init_ISA_irqs(void);
15extern void apic_intr_init(void);
16extern void smp_intr_init(void);
17extern irqreturn_t timer_interrupt(int irq, void *dev_id);
18
19/* these are the defined hooks */
20extern void intr_init_hook(void);
21extern void pre_intr_init_hook(void);
22extern void pre_setup_arch_hook(void);
23extern void trap_init_hook(void);
24extern void time_init_hook(void);
25extern void mca_nmi_hook(void);
26
27extern int setup_early_printk(char *);
28extern void early_printk(const char *fmt, ...) __attribute__((format(printf,1,2)));
29
30#endif
diff --git a/include/asm-i386/atomic.h b/include/asm-i386/atomic.h
deleted file mode 100644
index 437aac801711..000000000000
--- a/include/asm-i386/atomic.h
+++ /dev/null
@@ -1,266 +0,0 @@
1#ifndef __ARCH_I386_ATOMIC__
2#define __ARCH_I386_ATOMIC__
3
4#include <linux/compiler.h>
5#include <asm/processor.h>
6#include <asm/cmpxchg.h>
7
8/*
9 * Atomic operations that C can't guarantee us. Useful for
10 * resource counting etc..
11 */
12
13/*
14 * Make sure gcc doesn't try to be clever and move things around
15 * on us. We need to use _exactly_ the address the user gave us,
16 * not some alias that contains the same information.
17 */
18typedef struct { int counter; } atomic_t;
19
20#define ATOMIC_INIT(i) { (i) }
21
22/**
23 * atomic_read - read atomic variable
24 * @v: pointer of type atomic_t
25 *
26 * Atomically reads the value of @v.
27 */
28#define atomic_read(v) ((v)->counter)
29
30/**
31 * atomic_set - set atomic variable
32 * @v: pointer of type atomic_t
33 * @i: required value
34 *
35 * Atomically sets the value of @v to @i.
36 */
37#define atomic_set(v,i) (((v)->counter) = (i))
38
39/**
40 * atomic_add - add integer to atomic variable
41 * @i: integer value to add
42 * @v: pointer of type atomic_t
43 *
44 * Atomically adds @i to @v.
45 */
46static __inline__ void atomic_add(int i, atomic_t *v)
47{
48 __asm__ __volatile__(
49 LOCK_PREFIX "addl %1,%0"
50 :"+m" (v->counter)
51 :"ir" (i));
52}
53
54/**
55 * atomic_sub - subtract integer from atomic variable
56 * @i: integer value to subtract
57 * @v: pointer of type atomic_t
58 *
59 * Atomically subtracts @i from @v.
60 */
61static __inline__ void atomic_sub(int i, atomic_t *v)
62{
63 __asm__ __volatile__(
64 LOCK_PREFIX "subl %1,%0"
65 :"+m" (v->counter)
66 :"ir" (i));
67}
68
69/**
70 * atomic_sub_and_test - subtract value from variable and test result
71 * @i: integer value to subtract
72 * @v: pointer of type atomic_t
73 *
74 * Atomically subtracts @i from @v and returns
75 * true if the result is zero, or false for all
76 * other cases.
77 */
78static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
79{
80 unsigned char c;
81
82 __asm__ __volatile__(
83 LOCK_PREFIX "subl %2,%0; sete %1"
84 :"+m" (v->counter), "=qm" (c)
85 :"ir" (i) : "memory");
86 return c;
87}
88
89/**
90 * atomic_inc - increment atomic variable
91 * @v: pointer of type atomic_t
92 *
93 * Atomically increments @v by 1.
94 */
95static __inline__ void atomic_inc(atomic_t *v)
96{
97 __asm__ __volatile__(
98 LOCK_PREFIX "incl %0"
99 :"+m" (v->counter));
100}
101
102/**
103 * atomic_dec - decrement atomic variable
104 * @v: pointer of type atomic_t
105 *
106 * Atomically decrements @v by 1.
107 */
108static __inline__ void atomic_dec(atomic_t *v)
109{
110 __asm__ __volatile__(
111 LOCK_PREFIX "decl %0"
112 :"+m" (v->counter));
113}
114
115/**
116 * atomic_dec_and_test - decrement and test
117 * @v: pointer of type atomic_t
118 *
119 * Atomically decrements @v by 1 and
120 * returns true if the result is 0, or false for all other
121 * cases.
122 */
123static __inline__ int atomic_dec_and_test(atomic_t *v)
124{
125 unsigned char c;
126
127 __asm__ __volatile__(
128 LOCK_PREFIX "decl %0; sete %1"
129 :"+m" (v->counter), "=qm" (c)
130 : : "memory");
131 return c != 0;
132}
133
134/**
135 * atomic_inc_and_test - increment and test
136 * @v: pointer of type atomic_t
137 *
138 * Atomically increments @v by 1
139 * and returns true if the result is zero, or false for all
140 * other cases.
141 */
142static __inline__ int atomic_inc_and_test(atomic_t *v)
143{
144 unsigned char c;
145
146 __asm__ __volatile__(
147 LOCK_PREFIX "incl %0; sete %1"
148 :"+m" (v->counter), "=qm" (c)
149 : : "memory");
150 return c != 0;
151}
152
153/**
154 * atomic_add_negative - add and test if negative
155 * @v: pointer of type atomic_t
156 * @i: integer value to add
157 *
158 * Atomically adds @i to @v and returns true
159 * if the result is negative, or false when
160 * result is greater than or equal to zero.
161 */
162static __inline__ int atomic_add_negative(int i, atomic_t *v)
163{
164 unsigned char c;
165
166 __asm__ __volatile__(
167 LOCK_PREFIX "addl %2,%0; sets %1"
168 :"+m" (v->counter), "=qm" (c)
169 :"ir" (i) : "memory");
170 return c;
171}
172
173/**
174 * atomic_add_return - add integer and return
175 * @v: pointer of type atomic_t
176 * @i: integer value to add
177 *
178 * Atomically adds @i to @v and returns @i + @v
179 */
180static __inline__ int atomic_add_return(int i, atomic_t *v)
181{
182 int __i;
183#ifdef CONFIG_M386
184 unsigned long flags;
185 if(unlikely(boot_cpu_data.x86 <= 3))
186 goto no_xadd;
187#endif
188 /* Modern 486+ processor */
189 __i = i;
190 __asm__ __volatile__(
191 LOCK_PREFIX "xaddl %0, %1"
192 :"+r" (i), "+m" (v->counter)
193 : : "memory");
194 return i + __i;
195
196#ifdef CONFIG_M386
197no_xadd: /* Legacy 386 processor */
198 local_irq_save(flags);
199 __i = atomic_read(v);
200 atomic_set(v, i + __i);
201 local_irq_restore(flags);
202 return i + __i;
203#endif
204}
205
206/**
207 * atomic_sub_return - subtract integer and return
208 * @v: pointer of type atomic_t
209 * @i: integer value to subtract
210 *
211 * Atomically subtracts @i from @v and returns @v - @i
212 */
213static __inline__ int atomic_sub_return(int i, atomic_t *v)
214{
215 return atomic_add_return(-i,v);
216}
217
218#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
219#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
220
221/**
222 * atomic_add_unless - add unless the number is already a given value
223 * @v: pointer of type atomic_t
224 * @a: the amount to add to v...
225 * @u: ...unless v is equal to u.
226 *
227 * Atomically adds @a to @v, so long as @v was not already @u.
228 * Returns non-zero if @v was not @u, and zero otherwise.
229 */
230static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
231{
232 int c, old;
233 c = atomic_read(v);
234 for (;;) {
235 if (unlikely(c == (u)))
236 break;
237 old = atomic_cmpxchg((v), c, c + (a));
238 if (likely(old == c))
239 break;
240 c = old;
241 }
242 return c != (u);
243}
244
245#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
246
247#define atomic_inc_return(v) (atomic_add_return(1,v))
248#define atomic_dec_return(v) (atomic_sub_return(1,v))
249
250/* These are x86-specific, used by some header files */
251#define atomic_clear_mask(mask, addr) \
252__asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \
253: : "r" (~(mask)),"m" (*addr) : "memory")
254
255#define atomic_set_mask(mask, addr) \
256__asm__ __volatile__(LOCK_PREFIX "orl %0,%1" \
257: : "r" (mask),"m" (*(addr)) : "memory")
258
259/* Atomic operations are already serializing on x86 */
260#define smp_mb__before_atomic_dec() barrier()
261#define smp_mb__after_atomic_dec() barrier()
262#define smp_mb__before_atomic_inc() barrier()
263#define smp_mb__after_atomic_inc() barrier()
264
265#include <asm-generic/atomic.h>
266#endif
diff --git a/include/asm-i386/auxvec.h b/include/asm-i386/auxvec.h
deleted file mode 100644
index 395e13016bfb..000000000000
--- a/include/asm-i386/auxvec.h
+++ /dev/null
@@ -1,11 +0,0 @@
1#ifndef __ASMi386_AUXVEC_H
2#define __ASMi386_AUXVEC_H
3
4/*
5 * Architecture-neutral AT_ values in 0-17, leave some room
6 * for more of them, start the x86-specific ones at 32.
7 */
8#define AT_SYSINFO 32
9#define AT_SYSINFO_EHDR 33
10
11#endif
diff --git a/include/asm-i386/bitops.h b/include/asm-i386/bitops.h
deleted file mode 100644
index a20fe9822f60..000000000000
--- a/include/asm-i386/bitops.h
+++ /dev/null
@@ -1,423 +0,0 @@
1#ifndef _I386_BITOPS_H
2#define _I386_BITOPS_H
3
4/*
5 * Copyright 1992, Linus Torvalds.
6 */
7
8#include <linux/compiler.h>
9#include <asm/alternative.h>
10
11/*
12 * These have to be done with inline assembly: that way the bit-setting
13 * is guaranteed to be atomic. All bit operations return 0 if the bit
14 * was cleared before the operation and != 0 if it was not.
15 *
16 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
17 */
18
19#define ADDR (*(volatile long *) addr)
20
21/**
22 * set_bit - Atomically set a bit in memory
23 * @nr: the bit to set
24 * @addr: the address to start counting from
25 *
26 * This function is atomic and may not be reordered. See __set_bit()
27 * if you do not require the atomic guarantees.
28 *
29 * Note: there are no guarantees that this function will not be reordered
30 * on non x86 architectures, so if you are writing portable code,
31 * make sure not to rely on its reordering guarantees.
32 *
33 * Note that @nr may be almost arbitrarily large; this function is not
34 * restricted to acting on a single-word quantity.
35 */
36static inline void set_bit(int nr, volatile unsigned long * addr)
37{
38 __asm__ __volatile__( LOCK_PREFIX
39 "btsl %1,%0"
40 :"+m" (ADDR)
41 :"Ir" (nr));
42}
43
44/**
45 * __set_bit - Set a bit in memory
46 * @nr: the bit to set
47 * @addr: the address to start counting from
48 *
49 * Unlike set_bit(), this function is non-atomic and may be reordered.
50 * If it's called on the same region of memory simultaneously, the effect
51 * may be that only one operation succeeds.
52 */
53static inline void __set_bit(int nr, volatile unsigned long * addr)
54{
55 __asm__(
56 "btsl %1,%0"
57 :"+m" (ADDR)
58 :"Ir" (nr));
59}
60
61/**
62 * clear_bit - Clears a bit in memory
63 * @nr: Bit to clear
64 * @addr: Address to start counting from
65 *
66 * clear_bit() is atomic and may not be reordered. However, it does
67 * not contain a memory barrier, so if it is used for locking purposes,
68 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
69 * in order to ensure changes are visible on other processors.
70 */
71static inline void clear_bit(int nr, volatile unsigned long * addr)
72{
73 __asm__ __volatile__( LOCK_PREFIX
74 "btrl %1,%0"
75 :"+m" (ADDR)
76 :"Ir" (nr));
77}
78
79static inline void __clear_bit(int nr, volatile unsigned long * addr)
80{
81 __asm__ __volatile__(
82 "btrl %1,%0"
83 :"+m" (ADDR)
84 :"Ir" (nr));
85}
86#define smp_mb__before_clear_bit() barrier()
87#define smp_mb__after_clear_bit() barrier()
88
89/**
90 * __change_bit - Toggle a bit in memory
91 * @nr: the bit to change
92 * @addr: the address to start counting from
93 *
94 * Unlike change_bit(), this function is non-atomic and may be reordered.
95 * If it's called on the same region of memory simultaneously, the effect
96 * may be that only one operation succeeds.
97 */
98static inline void __change_bit(int nr, volatile unsigned long * addr)
99{
100 __asm__ __volatile__(
101 "btcl %1,%0"
102 :"+m" (ADDR)
103 :"Ir" (nr));
104}
105
106/**
107 * change_bit - Toggle a bit in memory
108 * @nr: Bit to change
109 * @addr: Address to start counting from
110 *
111 * change_bit() is atomic and may not be reordered. It may be
112 * reordered on other architectures than x86.
113 * Note that @nr may be almost arbitrarily large; this function is not
114 * restricted to acting on a single-word quantity.
115 */
116static inline void change_bit(int nr, volatile unsigned long * addr)
117{
118 __asm__ __volatile__( LOCK_PREFIX
119 "btcl %1,%0"
120 :"+m" (ADDR)
121 :"Ir" (nr));
122}
123
124/**
125 * test_and_set_bit - Set a bit and return its old value
126 * @nr: Bit to set
127 * @addr: Address to count from
128 *
129 * This operation is atomic and cannot be reordered.
130 * It may be reordered on other architectures than x86.
131 * It also implies a memory barrier.
132 */
133static inline int test_and_set_bit(int nr, volatile unsigned long * addr)
134{
135 int oldbit;
136
137 __asm__ __volatile__( LOCK_PREFIX
138 "btsl %2,%1\n\tsbbl %0,%0"
139 :"=r" (oldbit),"+m" (ADDR)
140 :"Ir" (nr) : "memory");
141 return oldbit;
142}
143
144/**
145 * __test_and_set_bit - Set a bit and return its old value
146 * @nr: Bit to set
147 * @addr: Address to count from
148 *
149 * This operation is non-atomic and can be reordered.
150 * If two examples of this operation race, one can appear to succeed
151 * but actually fail. You must protect multiple accesses with a lock.
152 */
153static inline int __test_and_set_bit(int nr, volatile unsigned long * addr)
154{
155 int oldbit;
156
157 __asm__(
158 "btsl %2,%1\n\tsbbl %0,%0"
159 :"=r" (oldbit),"+m" (ADDR)
160 :"Ir" (nr));
161 return oldbit;
162}
163
164/**
165 * test_and_clear_bit - Clear a bit and return its old value
166 * @nr: Bit to clear
167 * @addr: Address to count from
168 *
169 * This operation is atomic and cannot be reordered.
170 * It can be reorderdered on other architectures other than x86.
171 * It also implies a memory barrier.
172 */
173static inline int test_and_clear_bit(int nr, volatile unsigned long * addr)
174{
175 int oldbit;
176
177 __asm__ __volatile__( LOCK_PREFIX
178 "btrl %2,%1\n\tsbbl %0,%0"
179 :"=r" (oldbit),"+m" (ADDR)
180 :"Ir" (nr) : "memory");
181 return oldbit;
182}
183
184/**
185 * __test_and_clear_bit - Clear a bit and return its old value
186 * @nr: Bit to clear
187 * @addr: Address to count from
188 *
189 * This operation is non-atomic and can be reordered.
190 * If two examples of this operation race, one can appear to succeed
191 * but actually fail. You must protect multiple accesses with a lock.
192 */
193static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
194{
195 int oldbit;
196
197 __asm__(
198 "btrl %2,%1\n\tsbbl %0,%0"
199 :"=r" (oldbit),"+m" (ADDR)
200 :"Ir" (nr));
201 return oldbit;
202}
203
204/* WARNING: non atomic and it can be reordered! */
205static inline int __test_and_change_bit(int nr, volatile unsigned long *addr)
206{
207 int oldbit;
208
209 __asm__ __volatile__(
210 "btcl %2,%1\n\tsbbl %0,%0"
211 :"=r" (oldbit),"+m" (ADDR)
212 :"Ir" (nr) : "memory");
213 return oldbit;
214}
215
216/**
217 * test_and_change_bit - Change a bit and return its old value
218 * @nr: Bit to change
219 * @addr: Address to count from
220 *
221 * This operation is atomic and cannot be reordered.
222 * It also implies a memory barrier.
223 */
224static inline int test_and_change_bit(int nr, volatile unsigned long* addr)
225{
226 int oldbit;
227
228 __asm__ __volatile__( LOCK_PREFIX
229 "btcl %2,%1\n\tsbbl %0,%0"
230 :"=r" (oldbit),"+m" (ADDR)
231 :"Ir" (nr) : "memory");
232 return oldbit;
233}
234
235#if 0 /* Fool kernel-doc since it doesn't do macros yet */
236/**
237 * test_bit - Determine whether a bit is set
238 * @nr: bit number to test
239 * @addr: Address to start counting from
240 */
241static int test_bit(int nr, const volatile void * addr);
242#endif
243
244static __always_inline int constant_test_bit(int nr, const volatile unsigned long *addr)
245{
246 return ((1UL << (nr & 31)) & (addr[nr >> 5])) != 0;
247}
248
249static inline int variable_test_bit(int nr, const volatile unsigned long * addr)
250{
251 int oldbit;
252
253 __asm__ __volatile__(
254 "btl %2,%1\n\tsbbl %0,%0"
255 :"=r" (oldbit)
256 :"m" (ADDR),"Ir" (nr));
257 return oldbit;
258}
259
260#define test_bit(nr,addr) \
261(__builtin_constant_p(nr) ? \
262 constant_test_bit((nr),(addr)) : \
263 variable_test_bit((nr),(addr)))
264
265#undef ADDR
266
267/**
268 * find_first_zero_bit - find the first zero bit in a memory region
269 * @addr: The address to start the search at
270 * @size: The maximum size to search
271 *
272 * Returns the bit-number of the first zero bit, not the number of the byte
273 * containing a bit.
274 */
275static inline int find_first_zero_bit(const unsigned long *addr, unsigned size)
276{
277 int d0, d1, d2;
278 int res;
279
280 if (!size)
281 return 0;
282 /* This looks at memory. Mark it volatile to tell gcc not to move it around */
283 __asm__ __volatile__(
284 "movl $-1,%%eax\n\t"
285 "xorl %%edx,%%edx\n\t"
286 "repe; scasl\n\t"
287 "je 1f\n\t"
288 "xorl -4(%%edi),%%eax\n\t"
289 "subl $4,%%edi\n\t"
290 "bsfl %%eax,%%edx\n"
291 "1:\tsubl %%ebx,%%edi\n\t"
292 "shll $3,%%edi\n\t"
293 "addl %%edi,%%edx"
294 :"=d" (res), "=&c" (d0), "=&D" (d1), "=&a" (d2)
295 :"1" ((size + 31) >> 5), "2" (addr), "b" (addr) : "memory");
296 return res;
297}
298
299/**
300 * find_next_zero_bit - find the first zero bit in a memory region
301 * @addr: The address to base the search on
302 * @offset: The bitnumber to start searching at
303 * @size: The maximum size to search
304 */
305int find_next_zero_bit(const unsigned long *addr, int size, int offset);
306
307/**
308 * __ffs - find first bit in word.
309 * @word: The word to search
310 *
311 * Undefined if no bit exists, so code should check against 0 first.
312 */
313static inline unsigned long __ffs(unsigned long word)
314{
315 __asm__("bsfl %1,%0"
316 :"=r" (word)
317 :"rm" (word));
318 return word;
319}
320
321/**
322 * find_first_bit - find the first set bit in a memory region
323 * @addr: The address to start the search at
324 * @size: The maximum size to search
325 *
326 * Returns the bit-number of the first set bit, not the number of the byte
327 * containing a bit.
328 */
329static inline unsigned find_first_bit(const unsigned long *addr, unsigned size)
330{
331 unsigned x = 0;
332
333 while (x < size) {
334 unsigned long val = *addr++;
335 if (val)
336 return __ffs(val) + x;
337 x += (sizeof(*addr)<<3);
338 }
339 return x;
340}
341
342/**
343 * find_next_bit - find the first set bit in a memory region
344 * @addr: The address to base the search on
345 * @offset: The bitnumber to start searching at
346 * @size: The maximum size to search
347 */
348int find_next_bit(const unsigned long *addr, int size, int offset);
349
350/**
351 * ffz - find first zero in word.
352 * @word: The word to search
353 *
354 * Undefined if no zero exists, so code should check against ~0UL first.
355 */
356static inline unsigned long ffz(unsigned long word)
357{
358 __asm__("bsfl %1,%0"
359 :"=r" (word)
360 :"r" (~word));
361 return word;
362}
363
364#ifdef __KERNEL__
365
366#include <asm-generic/bitops/sched.h>
367
368/**
369 * ffs - find first bit set
370 * @x: the word to search
371 *
372 * This is defined the same way as
373 * the libc and compiler builtin ffs routines, therefore
374 * differs in spirit from the above ffz() (man ffs).
375 */
376static inline int ffs(int x)
377{
378 int r;
379
380 __asm__("bsfl %1,%0\n\t"
381 "jnz 1f\n\t"
382 "movl $-1,%0\n"
383 "1:" : "=r" (r) : "rm" (x));
384 return r+1;
385}
386
387/**
388 * fls - find last bit set
389 * @x: the word to search
390 *
391 * This is defined the same way as ffs().
392 */
393static inline int fls(int x)
394{
395 int r;
396
397 __asm__("bsrl %1,%0\n\t"
398 "jnz 1f\n\t"
399 "movl $-1,%0\n"
400 "1:" : "=r" (r) : "rm" (x));
401 return r+1;
402}
403
404#include <asm-generic/bitops/hweight.h>
405
406#endif /* __KERNEL__ */
407
408#include <asm-generic/bitops/fls64.h>
409
410#ifdef __KERNEL__
411
412#include <asm-generic/bitops/ext2-non-atomic.h>
413
414#define ext2_set_bit_atomic(lock,nr,addr) \
415 test_and_set_bit((nr),(unsigned long*)addr)
416#define ext2_clear_bit_atomic(lock,nr, addr) \
417 test_and_clear_bit((nr),(unsigned long*)addr)
418
419#include <asm-generic/bitops/minix.h>
420
421#endif /* __KERNEL__ */
422
423#endif /* _I386_BITOPS_H */
diff --git a/include/asm-i386/boot.h b/include/asm-i386/boot.h
deleted file mode 100644
index ed8affbf96cb..000000000000
--- a/include/asm-i386/boot.h
+++ /dev/null
@@ -1,20 +0,0 @@
1#ifndef _ASM_BOOT_H
2#define _ASM_BOOT_H
3
4/* Don't touch these, unless you really know what you're doing. */
5#define DEF_INITSEG 0x9000
6#define DEF_SYSSEG 0x1000
7#define DEF_SETUPSEG 0x9020
8#define DEF_SYSSIZE 0x7F00
9
10/* Internal svga startup constants */
11#define NORMAL_VGA 0xffff /* 80x25 mode */
12#define EXTENDED_VGA 0xfffe /* 80x50 mode */
13#define ASK_VGA 0xfffd /* ask for it at bootup */
14
15/* Physical address where kernel should be loaded. */
16#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
17 + (CONFIG_PHYSICAL_ALIGN - 1)) \
18 & ~(CONFIG_PHYSICAL_ALIGN - 1))
19
20#endif /* _ASM_BOOT_H */
diff --git a/include/asm-i386/bootparam.h b/include/asm-i386/bootparam.h
deleted file mode 100644
index b91b01783e4b..000000000000
--- a/include/asm-i386/bootparam.h
+++ /dev/null
@@ -1,86 +0,0 @@
1#ifndef _ASM_BOOTPARAM_H
2#define _ASM_BOOTPARAM_H
3
4#include <linux/types.h>
5#include <linux/screen_info.h>
6#include <linux/apm_bios.h>
7#include <linux/edd.h>
8#include <asm/e820.h>
9#include <asm/ist.h>
10#include <video/edid.h>
11
12struct setup_header {
13 u8 setup_sects;
14 u16 root_flags;
15 u32 syssize;
16 u16 ram_size;
17 u16 vid_mode;
18 u16 root_dev;
19 u16 boot_flag;
20 u16 jump;
21 u32 header;
22 u16 version;
23 u32 realmode_swtch;
24 u16 start_sys;
25 u16 kernel_version;
26 u8 type_of_loader;
27 u8 loadflags;
28#define LOADED_HIGH 0x01
29#define CAN_USE_HEAP 0x80
30 u16 setup_move_size;
31 u32 code32_start;
32 u32 ramdisk_image;
33 u32 ramdisk_size;
34 u32 bootsect_kludge;
35 u16 heap_end_ptr;
36 u16 _pad1;
37 u32 cmd_line_ptr;
38 u32 initrd_addr_max;
39 u32 kernel_alignment;
40 u8 relocatable_kernel;
41} __attribute__((packed));
42
43struct sys_desc_table {
44 u16 length;
45 u8 table[14];
46};
47
48struct efi_info {
49 u32 _pad1;
50 u32 efi_systab;
51 u32 efi_memdesc_size;
52 u32 efi_memdesc_version;
53 u32 efi_memmap;
54 u32 efi_memmap_size;
55 u32 _pad2[2];
56};
57
58/* The so-called "zeropage" */
59struct boot_params {
60 struct screen_info screen_info; /* 0x000 */
61 struct apm_bios_info apm_bios_info; /* 0x040 */
62 u8 _pad2[12]; /* 0x054 */
63 struct ist_info ist_info; /* 0x060 */
64 u8 _pad3[16]; /* 0x070 */
65 u8 hd0_info[16]; /* obsolete! */ /* 0x080 */
66 u8 hd1_info[16]; /* obsolete! */ /* 0x090 */
67 struct sys_desc_table sys_desc_table; /* 0x0a0 */
68 u8 _pad4[144]; /* 0x0b0 */
69 struct edid_info edid_info; /* 0x140 */
70 struct efi_info efi_info; /* 0x1c0 */
71 u32 alt_mem_k; /* 0x1e0 */
72 u32 scratch; /* Scratch field! */ /* 0x1e4 */
73 u8 e820_entries; /* 0x1e8 */
74 u8 eddbuf_entries; /* 0x1e9 */
75 u8 edd_mbr_sig_buf_entries; /* 0x1ea */
76 u8 _pad6[6]; /* 0x1eb */
77 struct setup_header hdr; /* setup header */ /* 0x1f1 */
78 u8 _pad7[0x290-0x1f1-sizeof(struct setup_header)];
79 u32 edd_mbr_sig_buffer[EDD_MBR_SIG_MAX]; /* 0x290 */
80 struct e820entry e820_map[E820MAX]; /* 0x2d0 */
81 u8 _pad8[48]; /* 0xcd0 */
82 struct edd_info eddbuf[EDDMAXNR]; /* 0xd00 */
83 u8 _pad9[276]; /* 0xeec */
84} __attribute__((packed));
85
86#endif /* _ASM_BOOTPARAM_H */
diff --git a/include/asm-i386/bug.h b/include/asm-i386/bug.h
deleted file mode 100644
index b0fd78ca2619..000000000000
--- a/include/asm-i386/bug.h
+++ /dev/null
@@ -1,37 +0,0 @@
1#ifndef _I386_BUG_H
2#define _I386_BUG_H
3
4
5/*
6 * Tell the user there is some problem.
7 * The offending file and line are encoded encoded in the __bug_table section.
8 */
9
10#ifdef CONFIG_BUG
11#define HAVE_ARCH_BUG
12
13#ifdef CONFIG_DEBUG_BUGVERBOSE
14#define BUG() \
15 do { \
16 asm volatile("1:\tud2\n" \
17 ".pushsection __bug_table,\"a\"\n" \
18 "2:\t.long 1b, %c0\n" \
19 "\t.word %c1, 0\n" \
20 "\t.org 2b+%c2\n" \
21 ".popsection" \
22 : : "i" (__FILE__), "i" (__LINE__), \
23 "i" (sizeof(struct bug_entry))); \
24 for(;;) ; \
25 } while(0)
26
27#else
28#define BUG() \
29 do { \
30 asm volatile("ud2"); \
31 for(;;) ; \
32 } while(0)
33#endif
34#endif
35
36#include <asm-generic/bug.h>
37#endif
diff --git a/include/asm-i386/bugs.h b/include/asm-i386/bugs.h
deleted file mode 100644
index d28979ff73be..000000000000
--- a/include/asm-i386/bugs.h
+++ /dev/null
@@ -1,12 +0,0 @@
1/*
2 * This is included by init/main.c to check for architecture-dependent bugs.
3 *
4 * Needs:
5 * void check_bugs(void);
6 */
7#ifndef _ASM_I386_BUG_H
8#define _ASM_I386_BUG_H
9
10void check_bugs(void);
11
12#endif /* _ASM_I386_BUG_H */
diff --git a/include/asm-i386/byteorder.h b/include/asm-i386/byteorder.h
deleted file mode 100644
index a45470a8b74a..000000000000
--- a/include/asm-i386/byteorder.h
+++ /dev/null
@@ -1,58 +0,0 @@
1#ifndef _I386_BYTEORDER_H
2#define _I386_BYTEORDER_H
3
4#include <asm/types.h>
5#include <linux/compiler.h>
6
7#ifdef __GNUC__
8
9/* For avoiding bswap on i386 */
10#ifdef __KERNEL__
11#endif
12
13static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x)
14{
15#ifdef CONFIG_X86_BSWAP
16 __asm__("bswap %0" : "=r" (x) : "0" (x));
17#else
18 __asm__("xchgb %b0,%h0\n\t" /* swap lower bytes */
19 "rorl $16,%0\n\t" /* swap words */
20 "xchgb %b0,%h0" /* swap higher bytes */
21 :"=q" (x)
22 : "0" (x));
23#endif
24 return x;
25}
26
27static __inline__ __attribute_const__ __u64 ___arch__swab64(__u64 val)
28{
29 union {
30 struct { __u32 a,b; } s;
31 __u64 u;
32 } v;
33 v.u = val;
34#ifdef CONFIG_X86_BSWAP
35 asm("bswapl %0 ; bswapl %1 ; xchgl %0,%1"
36 : "=r" (v.s.a), "=r" (v.s.b)
37 : "0" (v.s.a), "1" (v.s.b));
38#else
39 v.s.a = ___arch__swab32(v.s.a);
40 v.s.b = ___arch__swab32(v.s.b);
41 asm("xchgl %0,%1" : "=r" (v.s.a), "=r" (v.s.b) : "0" (v.s.a), "1" (v.s.b));
42#endif
43 return v.u;
44}
45
46/* Do not define swab16. Gcc is smart enough to recognize "C" version and
47 convert it into rotation or exhange. */
48
49#define __arch__swab64(x) ___arch__swab64(x)
50#define __arch__swab32(x) ___arch__swab32(x)
51
52#define __BYTEORDER_HAS_U64__
53
54#endif /* __GNUC__ */
55
56#include <linux/byteorder/little_endian.h>
57
58#endif /* _I386_BYTEORDER_H */
diff --git a/include/asm-i386/cache.h b/include/asm-i386/cache.h
deleted file mode 100644
index 57c62f414158..000000000000
--- a/include/asm-i386/cache.h
+++ /dev/null
@@ -1,14 +0,0 @@
1/*
2 * include/asm-i386/cache.h
3 */
4#ifndef __ARCH_I386_CACHE_H
5#define __ARCH_I386_CACHE_H
6
7
8/* L1 cache line size */
9#define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
10#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
11
12#define __read_mostly __attribute__((__section__(".data.read_mostly")))
13
14#endif
diff --git a/include/asm-i386/cacheflush.h b/include/asm-i386/cacheflush.h
deleted file mode 100644
index 74e03c8f2e51..000000000000
--- a/include/asm-i386/cacheflush.h
+++ /dev/null
@@ -1,39 +0,0 @@
1#ifndef _I386_CACHEFLUSH_H
2#define _I386_CACHEFLUSH_H
3
4/* Keep includes the same across arches. */
5#include <linux/mm.h>
6
7/* Caches aren't brain-dead on the intel. */
8#define flush_cache_all() do { } while (0)
9#define flush_cache_mm(mm) do { } while (0)
10#define flush_cache_dup_mm(mm) do { } while (0)
11#define flush_cache_range(vma, start, end) do { } while (0)
12#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
13#define flush_dcache_page(page) do { } while (0)
14#define flush_dcache_mmap_lock(mapping) do { } while (0)
15#define flush_dcache_mmap_unlock(mapping) do { } while (0)
16#define flush_icache_range(start, end) do { } while (0)
17#define flush_icache_page(vma,pg) do { } while (0)
18#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
19#define flush_cache_vmap(start, end) do { } while (0)
20#define flush_cache_vunmap(start, end) do { } while (0)
21
22#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
23 memcpy(dst, src, len)
24#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
25 memcpy(dst, src, len)
26
27void global_flush_tlb(void);
28int change_page_attr(struct page *page, int numpages, pgprot_t prot);
29
30#ifdef CONFIG_DEBUG_PAGEALLOC
31/* internal debugging function */
32void kernel_map_pages(struct page *page, int numpages, int enable);
33#endif
34
35#ifdef CONFIG_DEBUG_RODATA
36void mark_rodata_ro(void);
37#endif
38
39#endif /* _I386_CACHEFLUSH_H */
diff --git a/include/asm-i386/checksum.h b/include/asm-i386/checksum.h
deleted file mode 100644
index 75194abbe8ee..000000000000
--- a/include/asm-i386/checksum.h
+++ /dev/null
@@ -1,191 +0,0 @@
1#ifndef _I386_CHECKSUM_H
2#define _I386_CHECKSUM_H
3
4#include <linux/in6.h>
5
6#include <asm/uaccess.h>
7
8/*
9 * computes the checksum of a memory block at buff, length len,
10 * and adds in "sum" (32-bit)
11 *
12 * returns a 32-bit number suitable for feeding into itself
13 * or csum_tcpudp_magic
14 *
15 * this function must be called with even lengths, except
16 * for the last fragment, which may be odd
17 *
18 * it's best to have buff aligned on a 32-bit boundary
19 */
20asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum);
21
22/*
23 * the same as csum_partial, but copies from src while it
24 * checksums, and handles user-space pointer exceptions correctly, when needed.
25 *
26 * here even more important to align src and dst on a 32-bit (or even
27 * better 64-bit) boundary
28 */
29
30asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
31 int len, __wsum sum, int *src_err_ptr, int *dst_err_ptr);
32
33/*
34 * Note: when you get a NULL pointer exception here this means someone
35 * passed in an incorrect kernel address to one of these functions.
36 *
37 * If you use these functions directly please don't forget the
38 * access_ok().
39 */
40static __inline__
41__wsum csum_partial_copy_nocheck (const void *src, void *dst,
42 int len, __wsum sum)
43{
44 return csum_partial_copy_generic ( src, dst, len, sum, NULL, NULL);
45}
46
47static __inline__
48__wsum csum_partial_copy_from_user(const void __user *src, void *dst,
49 int len, __wsum sum, int *err_ptr)
50{
51 might_sleep();
52 return csum_partial_copy_generic((__force void *)src, dst,
53 len, sum, err_ptr, NULL);
54}
55
56/*
57 * This is a version of ip_compute_csum() optimized for IP headers,
58 * which always checksum on 4 octet boundaries.
59 *
60 * By Jorge Cwik <jorge@laser.satlink.net>, adapted for linux by
61 * Arnt Gulbrandsen.
62 */
63static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
64{
65 unsigned int sum;
66
67 __asm__ __volatile__(
68 "movl (%1), %0 ;\n"
69 "subl $4, %2 ;\n"
70 "jbe 2f ;\n"
71 "addl 4(%1), %0 ;\n"
72 "adcl 8(%1), %0 ;\n"
73 "adcl 12(%1), %0 ;\n"
74"1: adcl 16(%1), %0 ;\n"
75 "lea 4(%1), %1 ;\n"
76 "decl %2 ;\n"
77 "jne 1b ;\n"
78 "adcl $0, %0 ;\n"
79 "movl %0, %2 ;\n"
80 "shrl $16, %0 ;\n"
81 "addw %w2, %w0 ;\n"
82 "adcl $0, %0 ;\n"
83 "notl %0 ;\n"
84"2: ;\n"
85 /* Since the input registers which are loaded with iph and ihl
86 are modified, we must also specify them as outputs, or gcc
87 will assume they contain their original values. */
88 : "=r" (sum), "=r" (iph), "=r" (ihl)
89 : "1" (iph), "2" (ihl)
90 : "memory");
91 return (__force __sum16)sum;
92}
93
94/*
95 * Fold a partial checksum
96 */
97
98static inline __sum16 csum_fold(__wsum sum)
99{
100 __asm__(
101 "addl %1, %0 ;\n"
102 "adcl $0xffff, %0 ;\n"
103 : "=r" (sum)
104 : "r" ((__force u32)sum << 16),
105 "0" ((__force u32)sum & 0xffff0000)
106 );
107 return (__force __sum16)(~(__force u32)sum >> 16);
108}
109
110static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
111 unsigned short len,
112 unsigned short proto,
113 __wsum sum)
114{
115 __asm__(
116 "addl %1, %0 ;\n"
117 "adcl %2, %0 ;\n"
118 "adcl %3, %0 ;\n"
119 "adcl $0, %0 ;\n"
120 : "=r" (sum)
121 : "g" (daddr), "g"(saddr), "g"((len + proto) << 8), "0"(sum));
122 return sum;
123}
124
125/*
126 * computes the checksum of the TCP/UDP pseudo-header
127 * returns a 16-bit checksum, already complemented
128 */
129static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
130 unsigned short len,
131 unsigned short proto,
132 __wsum sum)
133{
134 return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
135}
136
137/*
138 * this routine is used for miscellaneous IP-like checksums, mainly
139 * in icmp.c
140 */
141
142static inline __sum16 ip_compute_csum(const void *buff, int len)
143{
144 return csum_fold (csum_partial(buff, len, 0));
145}
146
147#define _HAVE_ARCH_IPV6_CSUM
148static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
149 const struct in6_addr *daddr,
150 __u32 len, unsigned short proto,
151 __wsum sum)
152{
153 __asm__(
154 "addl 0(%1), %0 ;\n"
155 "adcl 4(%1), %0 ;\n"
156 "adcl 8(%1), %0 ;\n"
157 "adcl 12(%1), %0 ;\n"
158 "adcl 0(%2), %0 ;\n"
159 "adcl 4(%2), %0 ;\n"
160 "adcl 8(%2), %0 ;\n"
161 "adcl 12(%2), %0 ;\n"
162 "adcl %3, %0 ;\n"
163 "adcl %4, %0 ;\n"
164 "adcl $0, %0 ;\n"
165 : "=&r" (sum)
166 : "r" (saddr), "r" (daddr),
167 "r"(htonl(len)), "r"(htonl(proto)), "0"(sum));
168
169 return csum_fold(sum);
170}
171
172/*
173 * Copy and checksum to user
174 */
175#define HAVE_CSUM_COPY_USER
176static __inline__ __wsum csum_and_copy_to_user(const void *src,
177 void __user *dst,
178 int len, __wsum sum,
179 int *err_ptr)
180{
181 might_sleep();
182 if (access_ok(VERIFY_WRITE, dst, len))
183 return csum_partial_copy_generic(src, (__force void *)dst, len, sum, NULL, err_ptr);
184
185 if (len)
186 *err_ptr = -EFAULT;
187
188 return (__force __wsum)-1; /* invalid checksum */
189}
190
191#endif
diff --git a/include/asm-i386/cmpxchg.h b/include/asm-i386/cmpxchg.h
deleted file mode 100644
index f86ede28f6dc..000000000000
--- a/include/asm-i386/cmpxchg.h
+++ /dev/null
@@ -1,289 +0,0 @@
1#ifndef __ASM_CMPXCHG_H
2#define __ASM_CMPXCHG_H
3
4#include <linux/bitops.h> /* for LOCK_PREFIX */
5
6/*
7 * Note: if you use set64_bit(), __cmpxchg64(), or their variants, you
8 * you need to test for the feature in boot_cpu_data.
9 */
10
11#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
12
13struct __xchg_dummy { unsigned long a[100]; };
14#define __xg(x) ((struct __xchg_dummy *)(x))
15
16/*
17 * The semantics of XCHGCMP8B are a bit strange, this is why
18 * there is a loop and the loading of %%eax and %%edx has to
19 * be inside. This inlines well in most cases, the cached
20 * cost is around ~38 cycles. (in the future we might want
21 * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that
22 * might have an implicit FPU-save as a cost, so it's not
23 * clear which path to go.)
24 *
25 * cmpxchg8b must be used with the lock prefix here to allow
26 * the instruction to be executed atomically, see page 3-102
27 * of the instruction set reference 24319102.pdf. We need
28 * the reader side to see the coherent 64bit value.
29 */
30static inline void __set_64bit (unsigned long long * ptr,
31 unsigned int low, unsigned int high)
32{
33 __asm__ __volatile__ (
34 "\n1:\t"
35 "movl (%0), %%eax\n\t"
36 "movl 4(%0), %%edx\n\t"
37 LOCK_PREFIX "cmpxchg8b (%0)\n\t"
38 "jnz 1b"
39 : /* no outputs */
40 : "D"(ptr),
41 "b"(low),
42 "c"(high)
43 : "ax","dx","memory");
44}
45
46static inline void __set_64bit_constant (unsigned long long *ptr,
47 unsigned long long value)
48{
49 __set_64bit(ptr,(unsigned int)(value), (unsigned int)((value)>>32ULL));
50}
51#define ll_low(x) *(((unsigned int*)&(x))+0)
52#define ll_high(x) *(((unsigned int*)&(x))+1)
53
54static inline void __set_64bit_var (unsigned long long *ptr,
55 unsigned long long value)
56{
57 __set_64bit(ptr,ll_low(value), ll_high(value));
58}
59
60#define set_64bit(ptr,value) \
61(__builtin_constant_p(value) ? \
62 __set_64bit_constant(ptr, value) : \
63 __set_64bit_var(ptr, value) )
64
65#define _set_64bit(ptr,value) \
66(__builtin_constant_p(value) ? \
67 __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \
68 __set_64bit(ptr, ll_low(value), ll_high(value)) )
69
70/*
71 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
72 * Note 2: xchg has side effect, so that attribute volatile is necessary,
73 * but generally the primitive is invalid, *ptr is output argument. --ANK
74 */
75static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
76{
77 switch (size) {
78 case 1:
79 __asm__ __volatile__("xchgb %b0,%1"
80 :"=q" (x)
81 :"m" (*__xg(ptr)), "0" (x)
82 :"memory");
83 break;
84 case 2:
85 __asm__ __volatile__("xchgw %w0,%1"
86 :"=r" (x)
87 :"m" (*__xg(ptr)), "0" (x)
88 :"memory");
89 break;
90 case 4:
91 __asm__ __volatile__("xchgl %0,%1"
92 :"=r" (x)
93 :"m" (*__xg(ptr)), "0" (x)
94 :"memory");
95 break;
96 }
97 return x;
98}
99
100/*
101 * Atomic compare and exchange. Compare OLD with MEM, if identical,
102 * store NEW in MEM. Return the initial value in MEM. Success is
103 * indicated by comparing RETURN with OLD.
104 */
105
106#ifdef CONFIG_X86_CMPXCHG
107#define __HAVE_ARCH_CMPXCHG 1
108#define cmpxchg(ptr,o,n)\
109 ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
110 (unsigned long)(n),sizeof(*(ptr))))
111#define sync_cmpxchg(ptr,o,n)\
112 ((__typeof__(*(ptr)))__sync_cmpxchg((ptr),(unsigned long)(o),\
113 (unsigned long)(n),sizeof(*(ptr))))
114#define cmpxchg_local(ptr,o,n)\
115 ((__typeof__(*(ptr)))__cmpxchg_local((ptr),(unsigned long)(o),\
116 (unsigned long)(n),sizeof(*(ptr))))
117#endif
118
119static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
120 unsigned long new, int size)
121{
122 unsigned long prev;
123 switch (size) {
124 case 1:
125 __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
126 : "=a"(prev)
127 : "q"(new), "m"(*__xg(ptr)), "0"(old)
128 : "memory");
129 return prev;
130 case 2:
131 __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
132 : "=a"(prev)
133 : "r"(new), "m"(*__xg(ptr)), "0"(old)
134 : "memory");
135 return prev;
136 case 4:
137 __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
138 : "=a"(prev)
139 : "r"(new), "m"(*__xg(ptr)), "0"(old)
140 : "memory");
141 return prev;
142 }
143 return old;
144}
145
146/*
147 * Always use locked operations when touching memory shared with a
148 * hypervisor, since the system may be SMP even if the guest kernel
149 * isn't.
150 */
151static inline unsigned long __sync_cmpxchg(volatile void *ptr,
152 unsigned long old,
153 unsigned long new, int size)
154{
155 unsigned long prev;
156 switch (size) {
157 case 1:
158 __asm__ __volatile__("lock; cmpxchgb %b1,%2"
159 : "=a"(prev)
160 : "q"(new), "m"(*__xg(ptr)), "0"(old)
161 : "memory");
162 return prev;
163 case 2:
164 __asm__ __volatile__("lock; cmpxchgw %w1,%2"
165 : "=a"(prev)
166 : "r"(new), "m"(*__xg(ptr)), "0"(old)
167 : "memory");
168 return prev;
169 case 4:
170 __asm__ __volatile__("lock; cmpxchgl %1,%2"
171 : "=a"(prev)
172 : "r"(new), "m"(*__xg(ptr)), "0"(old)
173 : "memory");
174 return prev;
175 }
176 return old;
177}
178
179static inline unsigned long __cmpxchg_local(volatile void *ptr,
180 unsigned long old, unsigned long new, int size)
181{
182 unsigned long prev;
183 switch (size) {
184 case 1:
185 __asm__ __volatile__("cmpxchgb %b1,%2"
186 : "=a"(prev)
187 : "q"(new), "m"(*__xg(ptr)), "0"(old)
188 : "memory");
189 return prev;
190 case 2:
191 __asm__ __volatile__("cmpxchgw %w1,%2"
192 : "=a"(prev)
193 : "r"(new), "m"(*__xg(ptr)), "0"(old)
194 : "memory");
195 return prev;
196 case 4:
197 __asm__ __volatile__("cmpxchgl %1,%2"
198 : "=a"(prev)
199 : "r"(new), "m"(*__xg(ptr)), "0"(old)
200 : "memory");
201 return prev;
202 }
203 return old;
204}
205
206#ifndef CONFIG_X86_CMPXCHG
207/*
208 * Building a kernel capable running on 80386. It may be necessary to
209 * simulate the cmpxchg on the 80386 CPU. For that purpose we define
210 * a function for each of the sizes we support.
211 */
212
213extern unsigned long cmpxchg_386_u8(volatile void *, u8, u8);
214extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16);
215extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32);
216
217static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
218 unsigned long new, int size)
219{
220 switch (size) {
221 case 1:
222 return cmpxchg_386_u8(ptr, old, new);
223 case 2:
224 return cmpxchg_386_u16(ptr, old, new);
225 case 4:
226 return cmpxchg_386_u32(ptr, old, new);
227 }
228 return old;
229}
230
231#define cmpxchg(ptr,o,n) \
232({ \
233 __typeof__(*(ptr)) __ret; \
234 if (likely(boot_cpu_data.x86 > 3)) \
235 __ret = __cmpxchg((ptr), (unsigned long)(o), \
236 (unsigned long)(n), sizeof(*(ptr))); \
237 else \
238 __ret = cmpxchg_386((ptr), (unsigned long)(o), \
239 (unsigned long)(n), sizeof(*(ptr))); \
240 __ret; \
241})
242#define cmpxchg_local(ptr,o,n) \
243({ \
244 __typeof__(*(ptr)) __ret; \
245 if (likely(boot_cpu_data.x86 > 3)) \
246 __ret = __cmpxchg_local((ptr), (unsigned long)(o), \
247 (unsigned long)(n), sizeof(*(ptr))); \
248 else \
249 __ret = cmpxchg_386((ptr), (unsigned long)(o), \
250 (unsigned long)(n), sizeof(*(ptr))); \
251 __ret; \
252})
253#endif
254
255static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long long old,
256 unsigned long long new)
257{
258 unsigned long long prev;
259 __asm__ __volatile__(LOCK_PREFIX "cmpxchg8b %3"
260 : "=A"(prev)
261 : "b"((unsigned long)new),
262 "c"((unsigned long)(new >> 32)),
263 "m"(*__xg(ptr)),
264 "0"(old)
265 : "memory");
266 return prev;
267}
268
269static inline unsigned long long __cmpxchg64_local(volatile void *ptr,
270 unsigned long long old, unsigned long long new)
271{
272 unsigned long long prev;
273 __asm__ __volatile__("cmpxchg8b %3"
274 : "=A"(prev)
275 : "b"((unsigned long)new),
276 "c"((unsigned long)(new >> 32)),
277 "m"(*__xg(ptr)),
278 "0"(old)
279 : "memory");
280 return prev;
281}
282
283#define cmpxchg64(ptr,o,n)\
284 ((__typeof__(*(ptr)))__cmpxchg64((ptr),(unsigned long long)(o),\
285 (unsigned long long)(n)))
286#define cmpxchg64_local(ptr,o,n)\
287 ((__typeof__(*(ptr)))__cmpxchg64_local((ptr),(unsigned long long)(o),\
288 (unsigned long long)(n)))
289#endif
diff --git a/include/asm-i386/cpu.h b/include/asm-i386/cpu.h
deleted file mode 100644
index 9d914e1e4aad..000000000000
--- a/include/asm-i386/cpu.h
+++ /dev/null
@@ -1,22 +0,0 @@
1#ifndef _ASM_I386_CPU_H_
2#define _ASM_I386_CPU_H_
3
4#include <linux/device.h>
5#include <linux/cpu.h>
6#include <linux/topology.h>
7#include <linux/nodemask.h>
8#include <linux/percpu.h>
9
10struct i386_cpu {
11 struct cpu cpu;
12};
13extern int arch_register_cpu(int num);
14#ifdef CONFIG_HOTPLUG_CPU
15extern void arch_unregister_cpu(int);
16extern int enable_cpu_hotplug;
17#else
18#define enable_cpu_hotplug 0
19#endif
20
21DECLARE_PER_CPU(int, cpu_state);
22#endif /* _ASM_I386_CPU_H_ */
diff --git a/include/asm-i386/cpufeature.h b/include/asm-i386/cpufeature.h
deleted file mode 100644
index 7b3aa28ebc6e..000000000000
--- a/include/asm-i386/cpufeature.h
+++ /dev/null
@@ -1,175 +0,0 @@
1/*
2 * cpufeature.h
3 *
4 * Defines x86 CPU feature bits
5 */
6
7#ifndef __ASM_I386_CPUFEATURE_H
8#define __ASM_I386_CPUFEATURE_H
9
10#ifndef __ASSEMBLY__
11#include <linux/bitops.h>
12#endif
13#include <asm/required-features.h>
14
15#define NCAPINTS 8 /* N 32-bit words worth of info */
16
17/* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */
18#define X86_FEATURE_FPU (0*32+ 0) /* Onboard FPU */
19#define X86_FEATURE_VME (0*32+ 1) /* Virtual Mode Extensions */
20#define X86_FEATURE_DE (0*32+ 2) /* Debugging Extensions */
21#define X86_FEATURE_PSE (0*32+ 3) /* Page Size Extensions */
22#define X86_FEATURE_TSC (0*32+ 4) /* Time Stamp Counter */
23#define X86_FEATURE_MSR (0*32+ 5) /* Model-Specific Registers, RDMSR, WRMSR */
24#define X86_FEATURE_PAE (0*32+ 6) /* Physical Address Extensions */
25#define X86_FEATURE_MCE (0*32+ 7) /* Machine Check Architecture */
26#define X86_FEATURE_CX8 (0*32+ 8) /* CMPXCHG8 instruction */
27#define X86_FEATURE_APIC (0*32+ 9) /* Onboard APIC */
28#define X86_FEATURE_SEP (0*32+11) /* SYSENTER/SYSEXIT */
29#define X86_FEATURE_MTRR (0*32+12) /* Memory Type Range Registers */
30#define X86_FEATURE_PGE (0*32+13) /* Page Global Enable */
31#define X86_FEATURE_MCA (0*32+14) /* Machine Check Architecture */
32#define X86_FEATURE_CMOV (0*32+15) /* CMOV instruction (FCMOVCC and FCOMI too if FPU present) */
33#define X86_FEATURE_PAT (0*32+16) /* Page Attribute Table */
34#define X86_FEATURE_PSE36 (0*32+17) /* 36-bit PSEs */
35#define X86_FEATURE_PN (0*32+18) /* Processor serial number */
36#define X86_FEATURE_CLFLSH (0*32+19) /* Supports the CLFLUSH instruction */
37#define X86_FEATURE_DS (0*32+21) /* Debug Store */
38#define X86_FEATURE_ACPI (0*32+22) /* ACPI via MSR */
39#define X86_FEATURE_MMX (0*32+23) /* Multimedia Extensions */
40#define X86_FEATURE_FXSR (0*32+24) /* FXSAVE and FXRSTOR instructions (fast save and restore */
41 /* of FPU context), and CR4.OSFXSR available */
42#define X86_FEATURE_XMM (0*32+25) /* Streaming SIMD Extensions */
43#define X86_FEATURE_XMM2 (0*32+26) /* Streaming SIMD Extensions-2 */
44#define X86_FEATURE_SELFSNOOP (0*32+27) /* CPU self snoop */
45#define X86_FEATURE_HT (0*32+28) /* Hyper-Threading */
46#define X86_FEATURE_ACC (0*32+29) /* Automatic clock control */
47#define X86_FEATURE_IA64 (0*32+30) /* IA-64 processor */
48
49/* AMD-defined CPU features, CPUID level 0x80000001, word 1 */
50/* Don't duplicate feature flags which are redundant with Intel! */
51#define X86_FEATURE_SYSCALL (1*32+11) /* SYSCALL/SYSRET */
52#define X86_FEATURE_MP (1*32+19) /* MP Capable. */
53#define X86_FEATURE_NX (1*32+20) /* Execute Disable */
54#define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */
55#define X86_FEATURE_RDTSCP (1*32+27) /* RDTSCP */
56#define X86_FEATURE_LM (1*32+29) /* Long Mode (x86-64) */
57#define X86_FEATURE_3DNOWEXT (1*32+30) /* AMD 3DNow! extensions */
58#define X86_FEATURE_3DNOW (1*32+31) /* 3DNow! */
59
60/* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */
61#define X86_FEATURE_RECOVERY (2*32+ 0) /* CPU in recovery mode */
62#define X86_FEATURE_LONGRUN (2*32+ 1) /* Longrun power control */
63#define X86_FEATURE_LRTI (2*32+ 3) /* LongRun table interface */
64
65/* Other features, Linux-defined mapping, word 3 */
66/* This range is used for feature bits which conflict or are synthesized */
67#define X86_FEATURE_CXMMX (3*32+ 0) /* Cyrix MMX extensions */
68#define X86_FEATURE_K6_MTRR (3*32+ 1) /* AMD K6 nonstandard MTRRs */
69#define X86_FEATURE_CYRIX_ARR (3*32+ 2) /* Cyrix ARRs (= MTRRs) */
70#define X86_FEATURE_CENTAUR_MCR (3*32+ 3) /* Centaur MCRs (= MTRRs) */
71/* cpu types for specific tunings: */
72#define X86_FEATURE_K8 (3*32+ 4) /* Opteron, Athlon64 */
73#define X86_FEATURE_K7 (3*32+ 5) /* Athlon */
74#define X86_FEATURE_P3 (3*32+ 6) /* P3 */
75#define X86_FEATURE_P4 (3*32+ 7) /* P4 */
76#define X86_FEATURE_CONSTANT_TSC (3*32+ 8) /* TSC ticks at a constant rate */
77#define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */
78#define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* FXSAVE leaks FOP/FIP/FOP */
79#define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */
80#define X86_FEATURE_PEBS (3*32+12) /* Precise-Event Based Sampling */
81#define X86_FEATURE_BTS (3*32+13) /* Branch Trace Store */
82/* 14 free */
83#define X86_FEATURE_SYNC_RDTSC (3*32+15) /* RDTSC synchronizes the CPU */
84#define X86_FEATURE_REP_GOOD (3*32+16) /* rep microcode works well on this CPU */
85
86/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
87#define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */
88#define X86_FEATURE_MWAIT (4*32+ 3) /* Monitor/Mwait support */
89#define X86_FEATURE_DSCPL (4*32+ 4) /* CPL Qualified Debug Store */
90#define X86_FEATURE_EST (4*32+ 7) /* Enhanced SpeedStep */
91#define X86_FEATURE_TM2 (4*32+ 8) /* Thermal Monitor 2 */
92#define X86_FEATURE_CID (4*32+10) /* Context ID */
93#define X86_FEATURE_CX16 (4*32+13) /* CMPXCHG16B */
94#define X86_FEATURE_XTPR (4*32+14) /* Send Task Priority Messages */
95
96/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
97#define X86_FEATURE_XSTORE (5*32+ 2) /* on-CPU RNG present (xstore insn) */
98#define X86_FEATURE_XSTORE_EN (5*32+ 3) /* on-CPU RNG enabled */
99#define X86_FEATURE_XCRYPT (5*32+ 6) /* on-CPU crypto (xcrypt insn) */
100#define X86_FEATURE_XCRYPT_EN (5*32+ 7) /* on-CPU crypto enabled */
101#define X86_FEATURE_ACE2 (5*32+ 8) /* Advanced Cryptography Engine v2 */
102#define X86_FEATURE_ACE2_EN (5*32+ 9) /* ACE v2 enabled */
103#define X86_FEATURE_PHE (5*32+ 10) /* PadLock Hash Engine */
104#define X86_FEATURE_PHE_EN (5*32+ 11) /* PHE enabled */
105#define X86_FEATURE_PMM (5*32+ 12) /* PadLock Montgomery Multiplier */
106#define X86_FEATURE_PMM_EN (5*32+ 13) /* PMM enabled */
107
108/* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */
109#define X86_FEATURE_LAHF_LM (6*32+ 0) /* LAHF/SAHF in long mode */
110#define X86_FEATURE_CMP_LEGACY (6*32+ 1) /* If yes HyperThreading not valid */
111
112/*
113 * Auxiliary flags: Linux defined - For features scattered in various
114 * CPUID levels like 0x6, 0xA etc
115 */
116#define X86_FEATURE_IDA (7*32+ 0) /* Intel Dynamic Acceleration */
117
118#define cpu_has(c, bit) \
119 (__builtin_constant_p(bit) && \
120 ( (((bit)>>5)==0 && (1UL<<((bit)&31) & REQUIRED_MASK0)) || \
121 (((bit)>>5)==1 && (1UL<<((bit)&31) & REQUIRED_MASK1)) || \
122 (((bit)>>5)==2 && (1UL<<((bit)&31) & REQUIRED_MASK2)) || \
123 (((bit)>>5)==3 && (1UL<<((bit)&31) & REQUIRED_MASK3)) || \
124 (((bit)>>5)==4 && (1UL<<((bit)&31) & REQUIRED_MASK4)) || \
125 (((bit)>>5)==5 && (1UL<<((bit)&31) & REQUIRED_MASK5)) || \
126 (((bit)>>5)==6 && (1UL<<((bit)&31) & REQUIRED_MASK6)) || \
127 (((bit)>>5)==7 && (1UL<<((bit)&31) & REQUIRED_MASK7)) ) \
128 ? 1 : \
129 test_bit(bit, (c)->x86_capability))
130#define boot_cpu_has(bit) cpu_has(&boot_cpu_data, bit)
131
132#define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU)
133#define cpu_has_vme boot_cpu_has(X86_FEATURE_VME)
134#define cpu_has_de boot_cpu_has(X86_FEATURE_DE)
135#define cpu_has_pse boot_cpu_has(X86_FEATURE_PSE)
136#define cpu_has_tsc boot_cpu_has(X86_FEATURE_TSC)
137#define cpu_has_pae boot_cpu_has(X86_FEATURE_PAE)
138#define cpu_has_pge boot_cpu_has(X86_FEATURE_PGE)
139#define cpu_has_apic boot_cpu_has(X86_FEATURE_APIC)
140#define cpu_has_sep boot_cpu_has(X86_FEATURE_SEP)
141#define cpu_has_mtrr boot_cpu_has(X86_FEATURE_MTRR)
142#define cpu_has_mmx boot_cpu_has(X86_FEATURE_MMX)
143#define cpu_has_fxsr boot_cpu_has(X86_FEATURE_FXSR)
144#define cpu_has_xmm boot_cpu_has(X86_FEATURE_XMM)
145#define cpu_has_xmm2 boot_cpu_has(X86_FEATURE_XMM2)
146#define cpu_has_xmm3 boot_cpu_has(X86_FEATURE_XMM3)
147#define cpu_has_ht boot_cpu_has(X86_FEATURE_HT)
148#define cpu_has_mp boot_cpu_has(X86_FEATURE_MP)
149#define cpu_has_nx boot_cpu_has(X86_FEATURE_NX)
150#define cpu_has_k6_mtrr boot_cpu_has(X86_FEATURE_K6_MTRR)
151#define cpu_has_cyrix_arr boot_cpu_has(X86_FEATURE_CYRIX_ARR)
152#define cpu_has_centaur_mcr boot_cpu_has(X86_FEATURE_CENTAUR_MCR)
153#define cpu_has_xstore boot_cpu_has(X86_FEATURE_XSTORE)
154#define cpu_has_xstore_enabled boot_cpu_has(X86_FEATURE_XSTORE_EN)
155#define cpu_has_xcrypt boot_cpu_has(X86_FEATURE_XCRYPT)
156#define cpu_has_xcrypt_enabled boot_cpu_has(X86_FEATURE_XCRYPT_EN)
157#define cpu_has_ace2 boot_cpu_has(X86_FEATURE_ACE2)
158#define cpu_has_ace2_enabled boot_cpu_has(X86_FEATURE_ACE2_EN)
159#define cpu_has_phe boot_cpu_has(X86_FEATURE_PHE)
160#define cpu_has_phe_enabled boot_cpu_has(X86_FEATURE_PHE_EN)
161#define cpu_has_pmm boot_cpu_has(X86_FEATURE_PMM)
162#define cpu_has_pmm_enabled boot_cpu_has(X86_FEATURE_PMM_EN)
163#define cpu_has_ds boot_cpu_has(X86_FEATURE_DS)
164#define cpu_has_pebs boot_cpu_has(X86_FEATURE_PEBS)
165#define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLSH)
166#define cpu_has_bts boot_cpu_has(X86_FEATURE_BTS)
167
168#endif /* __ASM_I386_CPUFEATURE_H */
169
170/*
171 * Local Variables:
172 * mode:c
173 * comment-column:42
174 * End:
175 */
diff --git a/include/asm-i386/cputime.h b/include/asm-i386/cputime.h
deleted file mode 100644
index 398ed7cd171d..000000000000
--- a/include/asm-i386/cputime.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __I386_CPUTIME_H
2#define __I386_CPUTIME_H
3
4#include <asm-generic/cputime.h>
5
6#endif /* __I386_CPUTIME_H */
diff --git a/include/asm-i386/current.h b/include/asm-i386/current.h
deleted file mode 100644
index d35248539912..000000000000
--- a/include/asm-i386/current.h
+++ /dev/null
@@ -1,17 +0,0 @@
1#ifndef _I386_CURRENT_H
2#define _I386_CURRENT_H
3
4#include <linux/compiler.h>
5#include <asm/percpu.h>
6
7struct task_struct;
8
9DECLARE_PER_CPU(struct task_struct *, current_task);
10static __always_inline struct task_struct *get_current(void)
11{
12 return x86_read_percpu(current_task);
13}
14
15#define current get_current()
16
17#endif /* !(_I386_CURRENT_H) */
diff --git a/include/asm-i386/debugreg.h b/include/asm-i386/debugreg.h
deleted file mode 100644
index f0b2b06ae0f7..000000000000
--- a/include/asm-i386/debugreg.h
+++ /dev/null
@@ -1,64 +0,0 @@
1#ifndef _I386_DEBUGREG_H
2#define _I386_DEBUGREG_H
3
4
5/* Indicate the register numbers for a number of the specific
6 debug registers. Registers 0-3 contain the addresses we wish to trap on */
7#define DR_FIRSTADDR 0 /* u_debugreg[DR_FIRSTADDR] */
8#define DR_LASTADDR 3 /* u_debugreg[DR_LASTADDR] */
9
10#define DR_STATUS 6 /* u_debugreg[DR_STATUS] */
11#define DR_CONTROL 7 /* u_debugreg[DR_CONTROL] */
12
13/* Define a few things for the status register. We can use this to determine
14 which debugging register was responsible for the trap. The other bits
15 are either reserved or not of interest to us. */
16
17#define DR_TRAP0 (0x1) /* db0 */
18#define DR_TRAP1 (0x2) /* db1 */
19#define DR_TRAP2 (0x4) /* db2 */
20#define DR_TRAP3 (0x8) /* db3 */
21
22#define DR_STEP (0x4000) /* single-step */
23#define DR_SWITCH (0x8000) /* task switch */
24
25/* Now define a bunch of things for manipulating the control register.
26 The top two bytes of the control register consist of 4 fields of 4
27 bits - each field corresponds to one of the four debug registers,
28 and indicates what types of access we trap on, and how large the data
29 field is that we are looking at */
30
31#define DR_CONTROL_SHIFT 16 /* Skip this many bits in ctl register */
32#define DR_CONTROL_SIZE 4 /* 4 control bits per register */
33
34#define DR_RW_EXECUTE (0x0) /* Settings for the access types to trap on */
35#define DR_RW_WRITE (0x1)
36#define DR_RW_READ (0x3)
37
38#define DR_LEN_1 (0x0) /* Settings for data length to trap on */
39#define DR_LEN_2 (0x4)
40#define DR_LEN_4 (0xC)
41
42/* The low byte to the control register determine which registers are
43 enabled. There are 4 fields of two bits. One bit is "local", meaning
44 that the processor will reset the bit after a task switch and the other
45 is global meaning that we have to explicitly reset the bit. With linux,
46 you can use either one, since we explicitly zero the register when we enter
47 kernel mode. */
48
49#define DR_LOCAL_ENABLE_SHIFT 0 /* Extra shift to the local enable bit */
50#define DR_GLOBAL_ENABLE_SHIFT 1 /* Extra shift to the global enable bit */
51#define DR_ENABLE_SIZE 2 /* 2 enable bits per register */
52
53#define DR_LOCAL_ENABLE_MASK (0x55) /* Set local bits for all 4 regs */
54#define DR_GLOBAL_ENABLE_MASK (0xAA) /* Set global bits for all 4 regs */
55
56/* The second byte to the control register has a few special things.
57 We can slow the instruction pipeline for instructions coming via the
58 gdt or the ldt if we want to. I am not sure why this is an advantage */
59
60#define DR_CONTROL_RESERVED (0xFC00) /* Reserved by Intel */
61#define DR_LOCAL_SLOWDOWN (0x100) /* Local slow the pipeline */
62#define DR_GLOBAL_SLOWDOWN (0x200) /* Global slow the pipeline */
63
64#endif
diff --git a/include/asm-i386/delay.h b/include/asm-i386/delay.h
deleted file mode 100644
index 9ae5e3782ed8..000000000000
--- a/include/asm-i386/delay.h
+++ /dev/null
@@ -1,31 +0,0 @@
1#ifndef _I386_DELAY_H
2#define _I386_DELAY_H
3
4/*
5 * Copyright (C) 1993 Linus Torvalds
6 *
7 * Delay routines calling functions in arch/i386/lib/delay.c
8 */
9
10/* Undefined functions to get compile-time errors */
11extern void __bad_udelay(void);
12extern void __bad_ndelay(void);
13
14extern void __udelay(unsigned long usecs);
15extern void __ndelay(unsigned long nsecs);
16extern void __const_udelay(unsigned long usecs);
17extern void __delay(unsigned long loops);
18
19/* 0x10c7 is 2**32 / 1000000 (rounded up) */
20#define udelay(n) (__builtin_constant_p(n) ? \
21 ((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c7ul)) : \
22 __udelay(n))
23
24/* 0x5 is 2**32 / 1000000000 (rounded up) */
25#define ndelay(n) (__builtin_constant_p(n) ? \
26 ((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \
27 __ndelay(n))
28
29void use_tsc_delay(void);
30
31#endif /* defined(_I386_DELAY_H) */
diff --git a/include/asm-i386/desc.h b/include/asm-i386/desc.h
deleted file mode 100644
index c547403f341d..000000000000
--- a/include/asm-i386/desc.h
+++ /dev/null
@@ -1,244 +0,0 @@
1#ifndef __ARCH_DESC_H
2#define __ARCH_DESC_H
3
4#include <asm/ldt.h>
5#include <asm/segment.h>
6
7#ifndef __ASSEMBLY__
8
9#include <linux/preempt.h>
10#include <linux/smp.h>
11#include <linux/percpu.h>
12
13#include <asm/mmu.h>
14
15struct Xgt_desc_struct {
16 unsigned short size;
17 unsigned long address __attribute__((packed));
18 unsigned short pad;
19} __attribute__ ((packed));
20
21struct gdt_page
22{
23 struct desc_struct gdt[GDT_ENTRIES];
24} __attribute__((aligned(PAGE_SIZE)));
25DECLARE_PER_CPU(struct gdt_page, gdt_page);
26
27static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
28{
29 return per_cpu(gdt_page, cpu).gdt;
30}
31
32extern struct Xgt_desc_struct idt_descr;
33extern struct desc_struct idt_table[];
34extern void set_intr_gate(unsigned int irq, void * addr);
35
36static inline void pack_descriptor(__u32 *a, __u32 *b,
37 unsigned long base, unsigned long limit, unsigned char type, unsigned char flags)
38{
39 *a = ((base & 0xffff) << 16) | (limit & 0xffff);
40 *b = (base & 0xff000000) | ((base & 0xff0000) >> 16) |
41 (limit & 0x000f0000) | ((type & 0xff) << 8) | ((flags & 0xf) << 20);
42}
43
44static inline void pack_gate(__u32 *a, __u32 *b,
45 unsigned long base, unsigned short seg, unsigned char type, unsigned char flags)
46{
47 *a = (seg << 16) | (base & 0xffff);
48 *b = (base & 0xffff0000) | ((type & 0xff) << 8) | (flags & 0xff);
49}
50
51#define DESCTYPE_LDT 0x82 /* present, system, DPL-0, LDT */
52#define DESCTYPE_TSS 0x89 /* present, system, DPL-0, 32-bit TSS */
53#define DESCTYPE_TASK 0x85 /* present, system, DPL-0, task gate */
54#define DESCTYPE_INT 0x8e /* present, system, DPL-0, interrupt gate */
55#define DESCTYPE_TRAP 0x8f /* present, system, DPL-0, trap gate */
56#define DESCTYPE_DPL3 0x60 /* DPL-3 */
57#define DESCTYPE_S 0x10 /* !system */
58
59#ifdef CONFIG_PARAVIRT
60#include <asm/paravirt.h>
61#else
62#define load_TR_desc() native_load_tr_desc()
63#define load_gdt(dtr) native_load_gdt(dtr)
64#define load_idt(dtr) native_load_idt(dtr)
65#define load_tr(tr) __asm__ __volatile("ltr %0"::"m" (tr))
66#define load_ldt(ldt) __asm__ __volatile("lldt %0"::"m" (ldt))
67
68#define store_gdt(dtr) native_store_gdt(dtr)
69#define store_idt(dtr) native_store_idt(dtr)
70#define store_tr(tr) (tr = native_store_tr())
71#define store_ldt(ldt) __asm__ ("sldt %0":"=m" (ldt))
72
73#define load_TLS(t, cpu) native_load_tls(t, cpu)
74#define set_ldt native_set_ldt
75
76#define write_ldt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b)
77#define write_gdt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b)
78#define write_idt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b)
79#endif
80
81static inline void write_dt_entry(struct desc_struct *dt,
82 int entry, u32 entry_low, u32 entry_high)
83{
84 dt[entry].a = entry_low;
85 dt[entry].b = entry_high;
86}
87
88static inline void native_set_ldt(const void *addr, unsigned int entries)
89{
90 if (likely(entries == 0))
91 __asm__ __volatile__("lldt %w0"::"q" (0));
92 else {
93 unsigned cpu = smp_processor_id();
94 __u32 a, b;
95
96 pack_descriptor(&a, &b, (unsigned long)addr,
97 entries * sizeof(struct desc_struct) - 1,
98 DESCTYPE_LDT, 0);
99 write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT, a, b);
100 __asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8));
101 }
102}
103
104
105static inline void native_load_tr_desc(void)
106{
107 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
108}
109
110static inline void native_load_gdt(const struct Xgt_desc_struct *dtr)
111{
112 asm volatile("lgdt %0"::"m" (*dtr));
113}
114
115static inline void native_load_idt(const struct Xgt_desc_struct *dtr)
116{
117 asm volatile("lidt %0"::"m" (*dtr));
118}
119
120static inline void native_store_gdt(struct Xgt_desc_struct *dtr)
121{
122 asm ("sgdt %0":"=m" (*dtr));
123}
124
125static inline void native_store_idt(struct Xgt_desc_struct *dtr)
126{
127 asm ("sidt %0":"=m" (*dtr));
128}
129
130static inline unsigned long native_store_tr(void)
131{
132 unsigned long tr;
133 asm ("str %0":"=r" (tr));
134 return tr;
135}
136
137static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
138{
139 unsigned int i;
140 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
141
142 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
143 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
144}
145
146static inline void _set_gate(int gate, unsigned int type, void *addr, unsigned short seg)
147{
148 __u32 a, b;
149 pack_gate(&a, &b, (unsigned long)addr, seg, type, 0);
150 write_idt_entry(idt_table, gate, a, b);
151}
152
153static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, const void *addr)
154{
155 __u32 a, b;
156 pack_descriptor(&a, &b, (unsigned long)addr,
157 offsetof(struct tss_struct, __cacheline_filler) - 1,
158 DESCTYPE_TSS, 0);
159 write_gdt_entry(get_cpu_gdt_table(cpu), entry, a, b);
160}
161
162
163#define set_tss_desc(cpu,addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr)
164
165#define LDT_entry_a(info) \
166 ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
167
168#define LDT_entry_b(info) \
169 (((info)->base_addr & 0xff000000) | \
170 (((info)->base_addr & 0x00ff0000) >> 16) | \
171 ((info)->limit & 0xf0000) | \
172 (((info)->read_exec_only ^ 1) << 9) | \
173 ((info)->contents << 10) | \
174 (((info)->seg_not_present ^ 1) << 15) | \
175 ((info)->seg_32bit << 22) | \
176 ((info)->limit_in_pages << 23) | \
177 ((info)->useable << 20) | \
178 0x7000)
179
180#define LDT_empty(info) (\
181 (info)->base_addr == 0 && \
182 (info)->limit == 0 && \
183 (info)->contents == 0 && \
184 (info)->read_exec_only == 1 && \
185 (info)->seg_32bit == 0 && \
186 (info)->limit_in_pages == 0 && \
187 (info)->seg_not_present == 1 && \
188 (info)->useable == 0 )
189
190static inline void clear_LDT(void)
191{
192 set_ldt(NULL, 0);
193}
194
195/*
196 * load one particular LDT into the current CPU
197 */
198static inline void load_LDT_nolock(mm_context_t *pc)
199{
200 set_ldt(pc->ldt, pc->size);
201}
202
203static inline void load_LDT(mm_context_t *pc)
204{
205 preempt_disable();
206 load_LDT_nolock(pc);
207 preempt_enable();
208}
209
210static inline unsigned long get_desc_base(unsigned long *desc)
211{
212 unsigned long base;
213 base = ((desc[0] >> 16) & 0x0000ffff) |
214 ((desc[1] << 16) & 0x00ff0000) |
215 (desc[1] & 0xff000000);
216 return base;
217}
218
219#else /* __ASSEMBLY__ */
220
221/*
222 * GET_DESC_BASE reads the descriptor base of the specified segment.
223 *
224 * Args:
225 * idx - descriptor index
226 * gdt - GDT pointer
227 * base - 32bit register to which the base will be written
228 * lo_w - lo word of the "base" register
229 * lo_b - lo byte of the "base" register
230 * hi_b - hi byte of the low word of the "base" register
231 *
232 * Example:
233 * GET_DESC_BASE(GDT_ENTRY_ESPFIX_SS, %ebx, %eax, %ax, %al, %ah)
234 * Will read the base address of GDT_ENTRY_ESPFIX_SS and put it into %eax.
235 */
236#define GET_DESC_BASE(idx, gdt, base, lo_w, lo_b, hi_b) \
237 movb idx*8+4(gdt), lo_b; \
238 movb idx*8+7(gdt), hi_b; \
239 shll $16, base; \
240 movw idx*8+2(gdt), lo_w;
241
242#endif /* !__ASSEMBLY__ */
243
244#endif
diff --git a/include/asm-i386/device.h b/include/asm-i386/device.h
deleted file mode 100644
index 849604c70e6b..000000000000
--- a/include/asm-i386/device.h
+++ /dev/null
@@ -1,15 +0,0 @@
1/*
2 * Arch specific extensions to struct device
3 *
4 * This file is released under the GPLv2
5 */
6#ifndef _ASM_I386_DEVICE_H
7#define _ASM_I386_DEVICE_H
8
9struct dev_archdata {
10#ifdef CONFIG_ACPI
11 void *acpi_handle;
12#endif
13};
14
15#endif /* _ASM_I386_DEVICE_H */
diff --git a/include/asm-i386/div64.h b/include/asm-i386/div64.h
deleted file mode 100644
index 438e980068bd..000000000000
--- a/include/asm-i386/div64.h
+++ /dev/null
@@ -1,52 +0,0 @@
1#ifndef __I386_DIV64
2#define __I386_DIV64
3
4#include <linux/types.h>
5
6/*
7 * do_div() is NOT a C function. It wants to return
8 * two values (the quotient and the remainder), but
9 * since that doesn't work very well in C, what it
10 * does is:
11 *
12 * - modifies the 64-bit dividend _in_place_
13 * - returns the 32-bit remainder
14 *
15 * This ends up being the most efficient "calling
16 * convention" on x86.
17 */
18#define do_div(n,base) ({ \
19 unsigned long __upper, __low, __high, __mod, __base; \
20 __base = (base); \
21 asm("":"=a" (__low), "=d" (__high):"A" (n)); \
22 __upper = __high; \
23 if (__high) { \
24 __upper = __high % (__base); \
25 __high = __high / (__base); \
26 } \
27 asm("divl %2":"=a" (__low), "=d" (__mod):"rm" (__base), "0" (__low), "1" (__upper)); \
28 asm("":"=A" (n):"a" (__low),"d" (__high)); \
29 __mod; \
30})
31
32/*
33 * (long)X = ((long long)divs) / (long)div
34 * (long)rem = ((long long)divs) % (long)div
35 *
36 * Warning, this will do an exception if X overflows.
37 */
38#define div_long_long_rem(a,b,c) div_ll_X_l_rem(a,b,c)
39
40static inline long
41div_ll_X_l_rem(long long divs, long div, long *rem)
42{
43 long dum2;
44 __asm__("divl %2":"=a"(dum2), "=d"(*rem)
45 : "rm"(div), "A"(divs));
46
47 return dum2;
48
49}
50
51extern uint64_t div64_64(uint64_t dividend, uint64_t divisor);
52#endif
diff --git a/include/asm-i386/dma-mapping.h b/include/asm-i386/dma-mapping.h
deleted file mode 100644
index f1d72d177f68..000000000000
--- a/include/asm-i386/dma-mapping.h
+++ /dev/null
@@ -1,186 +0,0 @@
1#ifndef _ASM_I386_DMA_MAPPING_H
2#define _ASM_I386_DMA_MAPPING_H
3
4#include <linux/mm.h>
5
6#include <asm/cache.h>
7#include <asm/io.h>
8#include <asm/scatterlist.h>
9#include <asm/bug.h>
10
11#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
12#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
13
14void *dma_alloc_coherent(struct device *dev, size_t size,
15 dma_addr_t *dma_handle, gfp_t flag);
16
17void dma_free_coherent(struct device *dev, size_t size,
18 void *vaddr, dma_addr_t dma_handle);
19
20static inline dma_addr_t
21dma_map_single(struct device *dev, void *ptr, size_t size,
22 enum dma_data_direction direction)
23{
24 BUG_ON(!valid_dma_direction(direction));
25 WARN_ON(size == 0);
26 flush_write_buffers();
27 return virt_to_phys(ptr);
28}
29
30static inline void
31dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
32 enum dma_data_direction direction)
33{
34 BUG_ON(!valid_dma_direction(direction));
35}
36
37static inline int
38dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
39 enum dma_data_direction direction)
40{
41 int i;
42
43 BUG_ON(!valid_dma_direction(direction));
44 WARN_ON(nents == 0 || sg[0].length == 0);
45
46 for (i = 0; i < nents; i++ ) {
47 BUG_ON(!sg[i].page);
48
49 sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
50 }
51
52 flush_write_buffers();
53 return nents;
54}
55
56static inline dma_addr_t
57dma_map_page(struct device *dev, struct page *page, unsigned long offset,
58 size_t size, enum dma_data_direction direction)
59{
60 BUG_ON(!valid_dma_direction(direction));
61 return page_to_phys(page) + offset;
62}
63
64static inline void
65dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
66 enum dma_data_direction direction)
67{
68 BUG_ON(!valid_dma_direction(direction));
69}
70
71
72static inline void
73dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
74 enum dma_data_direction direction)
75{
76 BUG_ON(!valid_dma_direction(direction));
77}
78
79static inline void
80dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
81 enum dma_data_direction direction)
82{
83}
84
85static inline void
86dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
87 enum dma_data_direction direction)
88{
89 flush_write_buffers();
90}
91
92static inline void
93dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
94 unsigned long offset, size_t size,
95 enum dma_data_direction direction)
96{
97}
98
99static inline void
100dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
101 unsigned long offset, size_t size,
102 enum dma_data_direction direction)
103{
104 flush_write_buffers();
105}
106
107static inline void
108dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
109 enum dma_data_direction direction)
110{
111}
112
113static inline void
114dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
115 enum dma_data_direction direction)
116{
117 flush_write_buffers();
118}
119
120static inline int
121dma_mapping_error(dma_addr_t dma_addr)
122{
123 return 0;
124}
125
126extern int forbid_dac;
127
128static inline int
129dma_supported(struct device *dev, u64 mask)
130{
131 /*
132 * we fall back to GFP_DMA when the mask isn't all 1s,
133 * so we can't guarantee allocations that must be
134 * within a tighter range than GFP_DMA..
135 */
136 if(mask < 0x00ffffff)
137 return 0;
138
139 /* Work around chipset bugs */
140 if (forbid_dac > 0 && mask > 0xffffffffULL)
141 return 0;
142
143 return 1;
144}
145
146static inline int
147dma_set_mask(struct device *dev, u64 mask)
148{
149 if(!dev->dma_mask || !dma_supported(dev, mask))
150 return -EIO;
151
152 *dev->dma_mask = mask;
153
154 return 0;
155}
156
157static inline int
158dma_get_cache_alignment(void)
159{
160 /* no easy way to get cache size on all x86, so return the
161 * maximum possible, to be safe */
162 return (1 << INTERNODE_CACHE_SHIFT);
163}
164
165#define dma_is_consistent(d, h) (1)
166
167static inline void
168dma_cache_sync(struct device *dev, void *vaddr, size_t size,
169 enum dma_data_direction direction)
170{
171 flush_write_buffers();
172}
173
174#define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
175extern int
176dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
177 dma_addr_t device_addr, size_t size, int flags);
178
179extern void
180dma_release_declared_memory(struct device *dev);
181
182extern void *
183dma_mark_declared_memory_occupied(struct device *dev,
184 dma_addr_t device_addr, size_t size);
185
186#endif
diff --git a/include/asm-i386/dma.h b/include/asm-i386/dma.h
deleted file mode 100644
index d23aac8e1a50..000000000000
--- a/include/asm-i386/dma.h
+++ /dev/null
@@ -1,297 +0,0 @@
1/* $Id: dma.h,v 1.7 1992/12/14 00:29:34 root Exp root $
2 * linux/include/asm/dma.h: Defines for using and allocating dma channels.
3 * Written by Hennus Bergman, 1992.
4 * High DMA channel support & info by Hannu Savolainen
5 * and John Boyd, Nov. 1992.
6 */
7
8#ifndef _ASM_DMA_H
9#define _ASM_DMA_H
10
11#include <linux/spinlock.h> /* And spinlocks */
12#include <asm/io.h> /* need byte IO */
13#include <linux/delay.h>
14
15
16#ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER
17#define dma_outb outb_p
18#else
19#define dma_outb outb
20#endif
21
22#define dma_inb inb
23
24/*
25 * NOTES about DMA transfers:
26 *
27 * controller 1: channels 0-3, byte operations, ports 00-1F
28 * controller 2: channels 4-7, word operations, ports C0-DF
29 *
30 * - ALL registers are 8 bits only, regardless of transfer size
31 * - channel 4 is not used - cascades 1 into 2.
32 * - channels 0-3 are byte - addresses/counts are for physical bytes
33 * - channels 5-7 are word - addresses/counts are for physical words
34 * - transfers must not cross physical 64K (0-3) or 128K (5-7) boundaries
35 * - transfer count loaded to registers is 1 less than actual count
36 * - controller 2 offsets are all even (2x offsets for controller 1)
37 * - page registers for 5-7 don't use data bit 0, represent 128K pages
38 * - page registers for 0-3 use bit 0, represent 64K pages
39 *
40 * DMA transfers are limited to the lower 16MB of _physical_ memory.
41 * Note that addresses loaded into registers must be _physical_ addresses,
42 * not logical addresses (which may differ if paging is active).
43 *
44 * Address mapping for channels 0-3:
45 *
46 * A23 ... A16 A15 ... A8 A7 ... A0 (Physical addresses)
47 * | ... | | ... | | ... |
48 * | ... | | ... | | ... |
49 * | ... | | ... | | ... |
50 * P7 ... P0 A7 ... A0 A7 ... A0
51 * | Page | Addr MSB | Addr LSB | (DMA registers)
52 *
53 * Address mapping for channels 5-7:
54 *
55 * A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0 (Physical addresses)
56 * | ... | \ \ ... \ \ \ ... \ \
57 * | ... | \ \ ... \ \ \ ... \ (not used)
58 * | ... | \ \ ... \ \ \ ... \
59 * P7 ... P1 (0) A7 A6 ... A0 A7 A6 ... A0
60 * | Page | Addr MSB | Addr LSB | (DMA registers)
61 *
62 * Again, channels 5-7 transfer _physical_ words (16 bits), so addresses
63 * and counts _must_ be word-aligned (the lowest address bit is _ignored_ at
64 * the hardware level, so odd-byte transfers aren't possible).
65 *
66 * Transfer count (_not # bytes_) is limited to 64K, represented as actual
67 * count - 1 : 64K => 0xFFFF, 1 => 0x0000. Thus, count is always 1 or more,
68 * and up to 128K bytes may be transferred on channels 5-7 in one operation.
69 *
70 */
71
72#define MAX_DMA_CHANNELS 8
73
74/* The maximum address that we can perform a DMA transfer to on this platform */
75#define MAX_DMA_ADDRESS (PAGE_OFFSET+0x1000000)
76
77/* 8237 DMA controllers */
78#define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */
79#define IO_DMA2_BASE 0xC0 /* 16 bit master DMA, ch 4(=slave input)..7 */
80
81/* DMA controller registers */
82#define DMA1_CMD_REG 0x08 /* command register (w) */
83#define DMA1_STAT_REG 0x08 /* status register (r) */
84#define DMA1_REQ_REG 0x09 /* request register (w) */
85#define DMA1_MASK_REG 0x0A /* single-channel mask (w) */
86#define DMA1_MODE_REG 0x0B /* mode register (w) */
87#define DMA1_CLEAR_FF_REG 0x0C /* clear pointer flip-flop (w) */
88#define DMA1_TEMP_REG 0x0D /* Temporary Register (r) */
89#define DMA1_RESET_REG 0x0D /* Master Clear (w) */
90#define DMA1_CLR_MASK_REG 0x0E /* Clear Mask */
91#define DMA1_MASK_ALL_REG 0x0F /* all-channels mask (w) */
92
93#define DMA2_CMD_REG 0xD0 /* command register (w) */
94#define DMA2_STAT_REG 0xD0 /* status register (r) */
95#define DMA2_REQ_REG 0xD2 /* request register (w) */
96#define DMA2_MASK_REG 0xD4 /* single-channel mask (w) */
97#define DMA2_MODE_REG 0xD6 /* mode register (w) */
98#define DMA2_CLEAR_FF_REG 0xD8 /* clear pointer flip-flop (w) */
99#define DMA2_TEMP_REG 0xDA /* Temporary Register (r) */
100#define DMA2_RESET_REG 0xDA /* Master Clear (w) */
101#define DMA2_CLR_MASK_REG 0xDC /* Clear Mask */
102#define DMA2_MASK_ALL_REG 0xDE /* all-channels mask (w) */
103
104#define DMA_ADDR_0 0x00 /* DMA address registers */
105#define DMA_ADDR_1 0x02
106#define DMA_ADDR_2 0x04
107#define DMA_ADDR_3 0x06
108#define DMA_ADDR_4 0xC0
109#define DMA_ADDR_5 0xC4
110#define DMA_ADDR_6 0xC8
111#define DMA_ADDR_7 0xCC
112
113#define DMA_CNT_0 0x01 /* DMA count registers */
114#define DMA_CNT_1 0x03
115#define DMA_CNT_2 0x05
116#define DMA_CNT_3 0x07
117#define DMA_CNT_4 0xC2
118#define DMA_CNT_5 0xC6
119#define DMA_CNT_6 0xCA
120#define DMA_CNT_7 0xCE
121
122#define DMA_PAGE_0 0x87 /* DMA page registers */
123#define DMA_PAGE_1 0x83
124#define DMA_PAGE_2 0x81
125#define DMA_PAGE_3 0x82
126#define DMA_PAGE_5 0x8B
127#define DMA_PAGE_6 0x89
128#define DMA_PAGE_7 0x8A
129
130#define DMA_MODE_READ 0x44 /* I/O to memory, no autoinit, increment, single mode */
131#define DMA_MODE_WRITE 0x48 /* memory to I/O, no autoinit, increment, single mode */
132#define DMA_MODE_CASCADE 0xC0 /* pass thru DREQ->HRQ, DACK<-HLDA only */
133
134#define DMA_AUTOINIT 0x10
135
136
137extern spinlock_t dma_spin_lock;
138
139static __inline__ unsigned long claim_dma_lock(void)
140{
141 unsigned long flags;
142 spin_lock_irqsave(&dma_spin_lock, flags);
143 return flags;
144}
145
146static __inline__ void release_dma_lock(unsigned long flags)
147{
148 spin_unlock_irqrestore(&dma_spin_lock, flags);
149}
150
151/* enable/disable a specific DMA channel */
152static __inline__ void enable_dma(unsigned int dmanr)
153{
154 if (dmanr<=3)
155 dma_outb(dmanr, DMA1_MASK_REG);
156 else
157 dma_outb(dmanr & 3, DMA2_MASK_REG);
158}
159
160static __inline__ void disable_dma(unsigned int dmanr)
161{
162 if (dmanr<=3)
163 dma_outb(dmanr | 4, DMA1_MASK_REG);
164 else
165 dma_outb((dmanr & 3) | 4, DMA2_MASK_REG);
166}
167
168/* Clear the 'DMA Pointer Flip Flop'.
169 * Write 0 for LSB/MSB, 1 for MSB/LSB access.
170 * Use this once to initialize the FF to a known state.
171 * After that, keep track of it. :-)
172 * --- In order to do that, the DMA routines below should ---
173 * --- only be used while holding the DMA lock ! ---
174 */
175static __inline__ void clear_dma_ff(unsigned int dmanr)
176{
177 if (dmanr<=3)
178 dma_outb(0, DMA1_CLEAR_FF_REG);
179 else
180 dma_outb(0, DMA2_CLEAR_FF_REG);
181}
182
183/* set mode (above) for a specific DMA channel */
184static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
185{
186 if (dmanr<=3)
187 dma_outb(mode | dmanr, DMA1_MODE_REG);
188 else
189 dma_outb(mode | (dmanr&3), DMA2_MODE_REG);
190}
191
192/* Set only the page register bits of the transfer address.
193 * This is used for successive transfers when we know the contents of
194 * the lower 16 bits of the DMA current address register, but a 64k boundary
195 * may have been crossed.
196 */
197static __inline__ void set_dma_page(unsigned int dmanr, char pagenr)
198{
199 switch(dmanr) {
200 case 0:
201 dma_outb(pagenr, DMA_PAGE_0);
202 break;
203 case 1:
204 dma_outb(pagenr, DMA_PAGE_1);
205 break;
206 case 2:
207 dma_outb(pagenr, DMA_PAGE_2);
208 break;
209 case 3:
210 dma_outb(pagenr, DMA_PAGE_3);
211 break;
212 case 5:
213 dma_outb(pagenr & 0xfe, DMA_PAGE_5);
214 break;
215 case 6:
216 dma_outb(pagenr & 0xfe, DMA_PAGE_6);
217 break;
218 case 7:
219 dma_outb(pagenr & 0xfe, DMA_PAGE_7);
220 break;
221 }
222}
223
224
225/* Set transfer address & page bits for specific DMA channel.
226 * Assumes dma flipflop is clear.
227 */
228static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a)
229{
230 set_dma_page(dmanr, a>>16);
231 if (dmanr <= 3) {
232 dma_outb( a & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE );
233 dma_outb( (a>>8) & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE );
234 } else {
235 dma_outb( (a>>1) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE );
236 dma_outb( (a>>9) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE );
237 }
238}
239
240
241/* Set transfer size (max 64k for DMA0..3, 128k for DMA5..7) for
242 * a specific DMA channel.
243 * You must ensure the parameters are valid.
244 * NOTE: from a manual: "the number of transfers is one more
245 * than the initial word count"! This is taken into account.
246 * Assumes dma flip-flop is clear.
247 * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7.
248 */
249static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
250{
251 count--;
252 if (dmanr <= 3) {
253 dma_outb( count & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE );
254 dma_outb( (count>>8) & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE );
255 } else {
256 dma_outb( (count>>1) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE );
257 dma_outb( (count>>9) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE );
258 }
259}
260
261
262/* Get DMA residue count. After a DMA transfer, this
263 * should return zero. Reading this while a DMA transfer is
264 * still in progress will return unpredictable results.
265 * If called before the channel has been used, it may return 1.
266 * Otherwise, it returns the number of _bytes_ left to transfer.
267 *
268 * Assumes DMA flip-flop is clear.
269 */
270static __inline__ int get_dma_residue(unsigned int dmanr)
271{
272 unsigned int io_port = (dmanr<=3)? ((dmanr&3)<<1) + 1 + IO_DMA1_BASE
273 : ((dmanr&3)<<2) + 2 + IO_DMA2_BASE;
274
275 /* using short to get 16-bit wrap around */
276 unsigned short count;
277
278 count = 1 + dma_inb(io_port);
279 count += dma_inb(io_port) << 8;
280
281 return (dmanr<=3)? count : (count<<1);
282}
283
284
285/* These are in kernel/dma.c: */
286extern int request_dma(unsigned int dmanr, const char * device_id); /* reserve a DMA channel */
287extern void free_dma(unsigned int dmanr); /* release it again */
288
289/* From PCI */
290
291#ifdef CONFIG_PCI
292extern int isa_dma_bridge_buggy;
293#else
294#define isa_dma_bridge_buggy (0)
295#endif
296
297#endif /* _ASM_DMA_H */
diff --git a/include/asm-i386/dmi.h b/include/asm-i386/dmi.h
deleted file mode 100644
index 38d4eeb7fc7e..000000000000
--- a/include/asm-i386/dmi.h
+++ /dev/null
@@ -1,11 +0,0 @@
1#ifndef _ASM_DMI_H
2#define _ASM_DMI_H 1
3
4#include <asm/io.h>
5
6/* Use early IO mappings for DMI because it's initialized early */
7#define dmi_ioremap bt_ioremap
8#define dmi_iounmap bt_iounmap
9#define dmi_alloc alloc_bootmem
10
11#endif
diff --git a/include/asm-i386/dwarf2.h b/include/asm-i386/dwarf2.h
deleted file mode 100644
index 6d66398a307d..000000000000
--- a/include/asm-i386/dwarf2.h
+++ /dev/null
@@ -1,61 +0,0 @@
1#ifndef _DWARF2_H
2#define _DWARF2_H
3
4#ifndef __ASSEMBLY__
5#warning "asm/dwarf2.h should be only included in pure assembly files"
6#endif
7
8/*
9 Macros for dwarf2 CFI unwind table entries.
10 See "as.info" for details on these pseudo ops. Unfortunately
11 they are only supported in very new binutils, so define them
12 away for older version.
13 */
14
15#ifdef CONFIG_UNWIND_INFO
16
17#define CFI_STARTPROC .cfi_startproc
18#define CFI_ENDPROC .cfi_endproc
19#define CFI_DEF_CFA .cfi_def_cfa
20#define CFI_DEF_CFA_REGISTER .cfi_def_cfa_register
21#define CFI_DEF_CFA_OFFSET .cfi_def_cfa_offset
22#define CFI_ADJUST_CFA_OFFSET .cfi_adjust_cfa_offset
23#define CFI_OFFSET .cfi_offset
24#define CFI_REL_OFFSET .cfi_rel_offset
25#define CFI_REGISTER .cfi_register
26#define CFI_RESTORE .cfi_restore
27#define CFI_REMEMBER_STATE .cfi_remember_state
28#define CFI_RESTORE_STATE .cfi_restore_state
29#define CFI_UNDEFINED .cfi_undefined
30
31#ifdef CONFIG_AS_CFI_SIGNAL_FRAME
32#define CFI_SIGNAL_FRAME .cfi_signal_frame
33#else
34#define CFI_SIGNAL_FRAME
35#endif
36
37#else
38
39/* Due to the structure of pre-exisiting code, don't use assembler line
40 comment character # to ignore the arguments. Instead, use a dummy macro. */
41.macro ignore a=0, b=0, c=0, d=0
42.endm
43
44#define CFI_STARTPROC ignore
45#define CFI_ENDPROC ignore
46#define CFI_DEF_CFA ignore
47#define CFI_DEF_CFA_REGISTER ignore
48#define CFI_DEF_CFA_OFFSET ignore
49#define CFI_ADJUST_CFA_OFFSET ignore
50#define CFI_OFFSET ignore
51#define CFI_REL_OFFSET ignore
52#define CFI_REGISTER ignore
53#define CFI_RESTORE ignore
54#define CFI_REMEMBER_STATE ignore
55#define CFI_RESTORE_STATE ignore
56#define CFI_UNDEFINED ignore
57#define CFI_SIGNAL_FRAME ignore
58
59#endif
60
61#endif
diff --git a/include/asm-i386/e820.h b/include/asm-i386/e820.h
deleted file mode 100644
index cf67dbb1db79..000000000000
--- a/include/asm-i386/e820.h
+++ /dev/null
@@ -1,60 +0,0 @@
1/*
2 * structures and definitions for the int 15, ax=e820 memory map
3 * scheme.
4 *
5 * In a nutshell, arch/i386/boot/setup.S populates a scratch table
6 * in the empty_zero_block that contains a list of usable address/size
7 * duples. In arch/i386/kernel/setup.c, this information is
8 * transferred into the e820map, and in arch/i386/mm/init.c, that
9 * new information is used to mark pages reserved or not.
10 *
11 */
12#ifndef __E820_HEADER
13#define __E820_HEADER
14
15#define E820MAP 0x2d0 /* our map */
16#define E820MAX 128 /* number of entries in E820MAP */
17#define E820NR 0x1e8 /* # entries in E820MAP */
18
19#define E820_RAM 1
20#define E820_RESERVED 2
21#define E820_ACPI 3
22#define E820_NVS 4
23
24#define HIGH_MEMORY (1024*1024)
25
26#ifndef __ASSEMBLY__
27
28struct e820entry {
29 u64 addr; /* start of memory segment */
30 u64 size; /* size of memory segment */
31 u32 type; /* type of memory segment */
32} __attribute__((packed));
33
34struct e820map {
35 u32 nr_map;
36 struct e820entry map[E820MAX];
37};
38
39extern struct e820map e820;
40
41extern int e820_all_mapped(unsigned long start, unsigned long end,
42 unsigned type);
43extern int e820_any_mapped(u64 start, u64 end, unsigned type);
44extern void find_max_pfn(void);
45extern void register_bootmem_low_pages(unsigned long max_low_pfn);
46extern void e820_register_memory(void);
47extern void limit_regions(unsigned long long size);
48extern void print_memory_map(char *who);
49
50#if defined(CONFIG_PM) && defined(CONFIG_HIBERNATION)
51extern void e820_mark_nosave_regions(void);
52#else
53static inline void e820_mark_nosave_regions(void)
54{
55}
56#endif
57
58#endif/*!__ASSEMBLY__*/
59
60#endif/*__E820_HEADER*/
diff --git a/include/asm-i386/edac.h b/include/asm-i386/edac.h
deleted file mode 100644
index 3e7dd0ab68ce..000000000000
--- a/include/asm-i386/edac.h
+++ /dev/null
@@ -1,18 +0,0 @@
1#ifndef ASM_EDAC_H
2#define ASM_EDAC_H
3
4/* ECC atomic, DMA, SMP and interrupt safe scrub function */
5
6static __inline__ void atomic_scrub(void *va, u32 size)
7{
8 unsigned long *virt_addr = va;
9 u32 i;
10
11 for (i = 0; i < size / 4; i++, virt_addr++)
12 /* Very carefully read and write to memory atomically
13 * so we are interrupt, DMA and SMP safe.
14 */
15 __asm__ __volatile__("lock; addl $0, %0"::"m"(*virt_addr));
16}
17
18#endif
diff --git a/include/asm-i386/elf.h b/include/asm-i386/elf.h
deleted file mode 100644
index b32df3a332da..000000000000
--- a/include/asm-i386/elf.h
+++ /dev/null
@@ -1,163 +0,0 @@
1#ifndef __ASMi386_ELF_H
2#define __ASMi386_ELF_H
3
4/*
5 * ELF register definitions..
6 */
7
8#include <asm/ptrace.h>
9#include <asm/user.h>
10#include <asm/auxvec.h>
11
12#define R_386_NONE 0
13#define R_386_32 1
14#define R_386_PC32 2
15#define R_386_GOT32 3
16#define R_386_PLT32 4
17#define R_386_COPY 5
18#define R_386_GLOB_DAT 6
19#define R_386_JMP_SLOT 7
20#define R_386_RELATIVE 8
21#define R_386_GOTOFF 9
22#define R_386_GOTPC 10
23#define R_386_NUM 11
24
25typedef unsigned long elf_greg_t;
26
27#define ELF_NGREG (sizeof (struct user_regs_struct) / sizeof(elf_greg_t))
28typedef elf_greg_t elf_gregset_t[ELF_NGREG];
29
30typedef struct user_i387_struct elf_fpregset_t;
31typedef struct user_fxsr_struct elf_fpxregset_t;
32
33/*
34 * This is used to ensure we don't load something for the wrong architecture.
35 */
36#define elf_check_arch(x) \
37 (((x)->e_machine == EM_386) || ((x)->e_machine == EM_486))
38
39/*
40 * These are used to set parameters in the core dumps.
41 */
42#define ELF_CLASS ELFCLASS32
43#define ELF_DATA ELFDATA2LSB
44#define ELF_ARCH EM_386
45
46#ifdef __KERNEL__
47
48#include <asm/processor.h>
49#include <asm/system.h> /* for savesegment */
50#include <asm/desc.h>
51
52/* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program starts %edx
53 contains a pointer to a function which might be registered using `atexit'.
54 This provides a mean for the dynamic linker to call DT_FINI functions for
55 shared libraries that have been loaded before the code runs.
56
57 A value of 0 tells we have no such handler.
58
59 We might as well make sure everything else is cleared too (except for %esp),
60 just to make things more deterministic.
61 */
62#define ELF_PLAT_INIT(_r, load_addr) do { \
63 _r->ebx = 0; _r->ecx = 0; _r->edx = 0; \
64 _r->esi = 0; _r->edi = 0; _r->ebp = 0; \
65 _r->eax = 0; \
66} while (0)
67
68#define USE_ELF_CORE_DUMP
69#define ELF_EXEC_PAGESIZE 4096
70
71/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
72 use of this is to invoke "./ld.so someprog" to test out a new version of
73 the loader. We need to make sure that it is out of the way of the program
74 that it will "exec", and that there is sufficient room for the brk. */
75
76#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
77
78/* regs is struct pt_regs, pr_reg is elf_gregset_t (which is
79 now struct_user_regs, they are different) */
80
81#define ELF_CORE_COPY_REGS(pr_reg, regs) \
82 pr_reg[0] = regs->ebx; \
83 pr_reg[1] = regs->ecx; \
84 pr_reg[2] = regs->edx; \
85 pr_reg[3] = regs->esi; \
86 pr_reg[4] = regs->edi; \
87 pr_reg[5] = regs->ebp; \
88 pr_reg[6] = regs->eax; \
89 pr_reg[7] = regs->xds & 0xffff; \
90 pr_reg[8] = regs->xes & 0xffff; \
91 pr_reg[9] = regs->xfs & 0xffff; \
92 savesegment(gs,pr_reg[10]); \
93 pr_reg[11] = regs->orig_eax; \
94 pr_reg[12] = regs->eip; \
95 pr_reg[13] = regs->xcs & 0xffff; \
96 pr_reg[14] = regs->eflags; \
97 pr_reg[15] = regs->esp; \
98 pr_reg[16] = regs->xss & 0xffff;
99
100/* This yields a mask that user programs can use to figure out what
101 instruction set this CPU supports. This could be done in user space,
102 but it's not easy, and we've already done it here. */
103
104#define ELF_HWCAP (boot_cpu_data.x86_capability[0])
105
106/* This yields a string that ld.so will use to load implementation
107 specific libraries for optimization. This is more specific in
108 intent than poking at uname or /proc/cpuinfo.
109
110 For the moment, we have only optimizations for the Intel generations,
111 but that could change... */
112
113#define ELF_PLATFORM (utsname()->machine)
114
115#define SET_PERSONALITY(ex, ibcs2) do { } while (0)
116
117/*
118 * An executable for which elf_read_implies_exec() returns TRUE will
119 * have the READ_IMPLIES_EXEC personality flag set automatically.
120 */
121#define elf_read_implies_exec(ex, executable_stack) (executable_stack != EXSTACK_DISABLE_X)
122
123struct task_struct;
124
125extern int dump_task_regs (struct task_struct *, elf_gregset_t *);
126extern int dump_task_fpu (struct task_struct *, elf_fpregset_t *);
127extern int dump_task_extended_fpu (struct task_struct *, struct user_fxsr_struct *);
128
129#define ELF_CORE_COPY_TASK_REGS(tsk, elf_regs) dump_task_regs(tsk, elf_regs)
130#define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs)
131#define ELF_CORE_COPY_XFPREGS(tsk, elf_xfpregs) dump_task_extended_fpu(tsk, elf_xfpregs)
132
133#define VDSO_HIGH_BASE (__fix_to_virt(FIX_VDSO))
134#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
135#define VDSO_PRELINK 0
136
137#define VDSO_SYM(x) \
138 (VDSO_CURRENT_BASE + (unsigned long)(x) - VDSO_PRELINK)
139
140#define VDSO_HIGH_EHDR ((const struct elfhdr *) VDSO_HIGH_BASE)
141#define VDSO_EHDR ((const struct elfhdr *) VDSO_CURRENT_BASE)
142
143extern void __kernel_vsyscall;
144
145#define VDSO_ENTRY VDSO_SYM(&__kernel_vsyscall)
146
147struct linux_binprm;
148
149#define ARCH_HAS_SETUP_ADDITIONAL_PAGES
150extern int arch_setup_additional_pages(struct linux_binprm *bprm,
151 int executable_stack);
152
153extern unsigned int vdso_enabled;
154
155#define ARCH_DLINFO \
156do if (vdso_enabled) { \
157 NEW_AUX_ENT(AT_SYSINFO, VDSO_ENTRY); \
158 NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_CURRENT_BASE); \
159} while (0)
160
161#endif
162
163#endif
diff --git a/include/asm-i386/emergency-restart.h b/include/asm-i386/emergency-restart.h
deleted file mode 100644
index 680c39563345..000000000000
--- a/include/asm-i386/emergency-restart.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _ASM_EMERGENCY_RESTART_H
2#define _ASM_EMERGENCY_RESTART_H
3
4extern void machine_emergency_restart(void);
5
6#endif /* _ASM_EMERGENCY_RESTART_H */
diff --git a/include/asm-i386/errno.h b/include/asm-i386/errno.h
deleted file mode 100644
index 969b34374728..000000000000
--- a/include/asm-i386/errno.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _I386_ERRNO_H
2#define _I386_ERRNO_H
3
4#include <asm-generic/errno.h>
5
6#endif
diff --git a/include/asm-i386/fb.h b/include/asm-i386/fb.h
deleted file mode 100644
index d1c6297d4a61..000000000000
--- a/include/asm-i386/fb.h
+++ /dev/null
@@ -1,17 +0,0 @@
1#ifndef _ASM_FB_H_
2#define _ASM_FB_H_
3
4#include <linux/fb.h>
5#include <linux/fs.h>
6#include <asm/page.h>
7
8extern int fb_is_primary_device(struct fb_info *info);
9
10static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
11 unsigned long off)
12{
13 if (boot_cpu_data.x86 > 3)
14 pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
15}
16
17#endif /* _ASM_FB_H_ */
diff --git a/include/asm-i386/fcntl.h b/include/asm-i386/fcntl.h
deleted file mode 100644
index 46ab12db5739..000000000000
--- a/include/asm-i386/fcntl.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/fcntl.h>
diff --git a/include/asm-i386/fixmap.h b/include/asm-i386/fixmap.h
deleted file mode 100644
index 249e753ac805..000000000000
--- a/include/asm-i386/fixmap.h
+++ /dev/null
@@ -1,157 +0,0 @@
1/*
2 * fixmap.h: compile-time virtual memory allocation
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 1998 Ingo Molnar
9 *
10 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
11 */
12
13#ifndef _ASM_FIXMAP_H
14#define _ASM_FIXMAP_H
15
16
17/* used by vmalloc.c, vsyscall.lds.S.
18 *
19 * Leave one empty page between vmalloc'ed areas and
20 * the start of the fixmap.
21 */
22extern unsigned long __FIXADDR_TOP;
23#define FIXADDR_USER_START __fix_to_virt(FIX_VDSO)
24#define FIXADDR_USER_END __fix_to_virt(FIX_VDSO - 1)
25
26#ifndef __ASSEMBLY__
27#include <linux/kernel.h>
28#include <asm/acpi.h>
29#include <asm/apicdef.h>
30#include <asm/page.h>
31#ifdef CONFIG_HIGHMEM
32#include <linux/threads.h>
33#include <asm/kmap_types.h>
34#endif
35
36/*
37 * Here we define all the compile-time 'special' virtual
38 * addresses. The point is to have a constant address at
39 * compile time, but to set the physical address only
40 * in the boot process. We allocate these special addresses
41 * from the end of virtual memory (0xfffff000) backwards.
42 * Also this lets us do fail-safe vmalloc(), we
43 * can guarantee that these special addresses and
44 * vmalloc()-ed addresses never overlap.
45 *
46 * these 'compile-time allocated' memory buffers are
47 * fixed-size 4k pages. (or larger if used with an increment
48 * highger than 1) use fixmap_set(idx,phys) to associate
49 * physical memory with fixmap indices.
50 *
51 * TLB entries of such buffers will not be flushed across
52 * task switches.
53 */
54enum fixed_addresses {
55 FIX_HOLE,
56 FIX_VDSO,
57 FIX_DBGP_BASE,
58 FIX_EARLYCON_MEM_BASE,
59#ifdef CONFIG_X86_LOCAL_APIC
60 FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
61#endif
62#ifdef CONFIG_X86_IO_APIC
63 FIX_IO_APIC_BASE_0,
64 FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1,
65#endif
66#ifdef CONFIG_X86_VISWS_APIC
67 FIX_CO_CPU, /* Cobalt timer */
68 FIX_CO_APIC, /* Cobalt APIC Redirection Table */
69 FIX_LI_PCIA, /* Lithium PCI Bridge A */
70 FIX_LI_PCIB, /* Lithium PCI Bridge B */
71#endif
72#ifdef CONFIG_X86_F00F_BUG
73 FIX_F00F_IDT, /* Virtual mapping for IDT */
74#endif
75#ifdef CONFIG_X86_CYCLONE_TIMER
76 FIX_CYCLONE_TIMER, /*cyclone timer register*/
77#endif
78#ifdef CONFIG_HIGHMEM
79 FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
80 FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
81#endif
82#ifdef CONFIG_ACPI
83 FIX_ACPI_BEGIN,
84 FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1,
85#endif
86#ifdef CONFIG_PCI_MMCONFIG
87 FIX_PCIE_MCFG,
88#endif
89#ifdef CONFIG_PARAVIRT
90 FIX_PARAVIRT_BOOTMAP,
91#endif
92 __end_of_permanent_fixed_addresses,
93 /* temporary boot-time mappings, used before ioremap() is functional */
94#define NR_FIX_BTMAPS 16
95 FIX_BTMAP_END = __end_of_permanent_fixed_addresses,
96 FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS - 1,
97 FIX_WP_TEST,
98 __end_of_fixed_addresses
99};
100
101extern void __set_fixmap (enum fixed_addresses idx,
102 unsigned long phys, pgprot_t flags);
103extern void reserve_top_address(unsigned long reserve);
104
105#define set_fixmap(idx, phys) \
106 __set_fixmap(idx, phys, PAGE_KERNEL)
107/*
108 * Some hardware wants to get fixmapped without caching.
109 */
110#define set_fixmap_nocache(idx, phys) \
111 __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
112
113#define clear_fixmap(idx) \
114 __set_fixmap(idx, 0, __pgprot(0))
115
116#define FIXADDR_TOP ((unsigned long)__FIXADDR_TOP)
117
118#define __FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT)
119#define __FIXADDR_BOOT_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
120#define FIXADDR_START (FIXADDR_TOP - __FIXADDR_SIZE)
121#define FIXADDR_BOOT_START (FIXADDR_TOP - __FIXADDR_BOOT_SIZE)
122
123#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
124#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT)
125
126extern void __this_fixmap_does_not_exist(void);
127
128/*
129 * 'index to address' translation. If anyone tries to use the idx
130 * directly without tranlation, we catch the bug with a NULL-deference
131 * kernel oops. Illegal ranges of incoming indices are caught too.
132 */
133static __always_inline unsigned long fix_to_virt(const unsigned int idx)
134{
135 /*
136 * this branch gets completely eliminated after inlining,
137 * except when someone tries to use fixaddr indices in an
138 * illegal way. (such as mixing up address types or using
139 * out-of-range indices).
140 *
141 * If it doesn't get removed, the linker will complain
142 * loudly with a reasonably clear error message..
143 */
144 if (idx >= __end_of_fixed_addresses)
145 __this_fixmap_does_not_exist();
146
147 return __fix_to_virt(idx);
148}
149
150static inline unsigned long virt_to_fix(const unsigned long vaddr)
151{
152 BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
153 return __virt_to_fix(vaddr);
154}
155
156#endif /* !__ASSEMBLY__ */
157#endif
diff --git a/include/asm-i386/floppy.h b/include/asm-i386/floppy.h
deleted file mode 100644
index 44ef2f55a8e9..000000000000
--- a/include/asm-i386/floppy.h
+++ /dev/null
@@ -1,284 +0,0 @@
1/*
2 * Architecture specific parts of the Floppy driver
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 1995
9 */
10#ifndef __ASM_I386_FLOPPY_H
11#define __ASM_I386_FLOPPY_H
12
13#include <linux/vmalloc.h>
14
15
16/*
17 * The DMA channel used by the floppy controller cannot access data at
18 * addresses >= 16MB
19 *
20 * Went back to the 1MB limit, as some people had problems with the floppy
21 * driver otherwise. It doesn't matter much for performance anyway, as most
22 * floppy accesses go through the track buffer.
23 */
24#define _CROSS_64KB(a,s,vdma) \
25(!(vdma) && ((unsigned long)(a)/K_64 != ((unsigned long)(a) + (s) - 1) / K_64))
26
27#define CROSS_64KB(a,s) _CROSS_64KB(a,s,use_virtual_dma & 1)
28
29
30#define SW fd_routine[use_virtual_dma&1]
31#define CSW fd_routine[can_use_virtual_dma & 1]
32
33
34#define fd_inb(port) inb_p(port)
35#define fd_outb(value,port) outb_p(value,port)
36
37#define fd_request_dma() CSW._request_dma(FLOPPY_DMA,"floppy")
38#define fd_free_dma() CSW._free_dma(FLOPPY_DMA)
39#define fd_enable_irq() enable_irq(FLOPPY_IRQ)
40#define fd_disable_irq() disable_irq(FLOPPY_IRQ)
41#define fd_free_irq() free_irq(FLOPPY_IRQ, NULL)
42#define fd_get_dma_residue() SW._get_dma_residue(FLOPPY_DMA)
43#define fd_dma_mem_alloc(size) SW._dma_mem_alloc(size)
44#define fd_dma_setup(addr, size, mode, io) SW._dma_setup(addr, size, mode, io)
45
46#define FLOPPY_CAN_FALLBACK_ON_NODMA
47
48static int virtual_dma_count;
49static int virtual_dma_residue;
50static char *virtual_dma_addr;
51static int virtual_dma_mode;
52static int doing_pdma;
53
54static irqreturn_t floppy_hardint(int irq, void *dev_id)
55{
56 register unsigned char st;
57
58#undef TRACE_FLPY_INT
59
60#ifdef TRACE_FLPY_INT
61 static int calls=0;
62 static int bytes=0;
63 static int dma_wait=0;
64#endif
65 if (!doing_pdma)
66 return floppy_interrupt(irq, dev_id);
67
68#ifdef TRACE_FLPY_INT
69 if(!calls)
70 bytes = virtual_dma_count;
71#endif
72
73 {
74 register int lcount;
75 register char *lptr;
76
77 st = 1;
78 for(lcount=virtual_dma_count, lptr=virtual_dma_addr;
79 lcount; lcount--, lptr++) {
80 st=inb(virtual_dma_port+4) & 0xa0 ;
81 if(st != 0xa0)
82 break;
83 if(virtual_dma_mode)
84 outb_p(*lptr, virtual_dma_port+5);
85 else
86 *lptr = inb_p(virtual_dma_port+5);
87 }
88 virtual_dma_count = lcount;
89 virtual_dma_addr = lptr;
90 st = inb(virtual_dma_port+4);
91 }
92
93#ifdef TRACE_FLPY_INT
94 calls++;
95#endif
96 if(st == 0x20)
97 return IRQ_HANDLED;
98 if(!(st & 0x20)) {
99 virtual_dma_residue += virtual_dma_count;
100 virtual_dma_count=0;
101#ifdef TRACE_FLPY_INT
102 printk("count=%x, residue=%x calls=%d bytes=%d dma_wait=%d\n",
103 virtual_dma_count, virtual_dma_residue, calls, bytes,
104 dma_wait);
105 calls = 0;
106 dma_wait=0;
107#endif
108 doing_pdma = 0;
109 floppy_interrupt(irq, dev_id);
110 return IRQ_HANDLED;
111 }
112#ifdef TRACE_FLPY_INT
113 if(!virtual_dma_count)
114 dma_wait++;
115#endif
116 return IRQ_HANDLED;
117}
118
119static void fd_disable_dma(void)
120{
121 if(! (can_use_virtual_dma & 1))
122 disable_dma(FLOPPY_DMA);
123 doing_pdma = 0;
124 virtual_dma_residue += virtual_dma_count;
125 virtual_dma_count=0;
126}
127
128static int vdma_request_dma(unsigned int dmanr, const char * device_id)
129{
130 return 0;
131}
132
133static void vdma_nop(unsigned int dummy)
134{
135}
136
137
138static int vdma_get_dma_residue(unsigned int dummy)
139{
140 return virtual_dma_count + virtual_dma_residue;
141}
142
143
144static int fd_request_irq(void)
145{
146 if(can_use_virtual_dma)
147 return request_irq(FLOPPY_IRQ, floppy_hardint,
148 IRQF_DISABLED, "floppy", NULL);
149 else
150 return request_irq(FLOPPY_IRQ, floppy_interrupt,
151 IRQF_DISABLED, "floppy", NULL);
152
153}
154
155static unsigned long dma_mem_alloc(unsigned long size)
156{
157 return __get_dma_pages(GFP_KERNEL,get_order(size));
158}
159
160
161static unsigned long vdma_mem_alloc(unsigned long size)
162{
163 return (unsigned long) vmalloc(size);
164
165}
166
167#define nodma_mem_alloc(size) vdma_mem_alloc(size)
168
169static void _fd_dma_mem_free(unsigned long addr, unsigned long size)
170{
171 if((unsigned int) addr >= (unsigned int) high_memory)
172 vfree((void *)addr);
173 else
174 free_pages(addr, get_order(size));
175}
176
177#define fd_dma_mem_free(addr, size) _fd_dma_mem_free(addr, size)
178
179static void _fd_chose_dma_mode(char *addr, unsigned long size)
180{
181 if(can_use_virtual_dma == 2) {
182 if((unsigned int) addr >= (unsigned int) high_memory ||
183 isa_virt_to_bus(addr) >= 0x1000000 ||
184 _CROSS_64KB(addr, size, 0))
185 use_virtual_dma = 1;
186 else
187 use_virtual_dma = 0;
188 } else {
189 use_virtual_dma = can_use_virtual_dma & 1;
190 }
191}
192
193#define fd_chose_dma_mode(addr, size) _fd_chose_dma_mode(addr, size)
194
195
196static int vdma_dma_setup(char *addr, unsigned long size, int mode, int io)
197{
198 doing_pdma = 1;
199 virtual_dma_port = io;
200 virtual_dma_mode = (mode == DMA_MODE_WRITE);
201 virtual_dma_addr = addr;
202 virtual_dma_count = size;
203 virtual_dma_residue = 0;
204 return 0;
205}
206
207static int hard_dma_setup(char *addr, unsigned long size, int mode, int io)
208{
209#ifdef FLOPPY_SANITY_CHECK
210 if (CROSS_64KB(addr, size)) {
211 printk("DMA crossing 64-K boundary %p-%p\n", addr, addr+size);
212 return -1;
213 }
214#endif
215 /* actual, physical DMA */
216 doing_pdma = 0;
217 clear_dma_ff(FLOPPY_DMA);
218 set_dma_mode(FLOPPY_DMA,mode);
219 set_dma_addr(FLOPPY_DMA,isa_virt_to_bus(addr));
220 set_dma_count(FLOPPY_DMA,size);
221 enable_dma(FLOPPY_DMA);
222 return 0;
223}
224
225static struct fd_routine_l {
226 int (*_request_dma)(unsigned int dmanr, const char * device_id);
227 void (*_free_dma)(unsigned int dmanr);
228 int (*_get_dma_residue)(unsigned int dummy);
229 unsigned long (*_dma_mem_alloc) (unsigned long size);
230 int (*_dma_setup)(char *addr, unsigned long size, int mode, int io);
231} fd_routine[] = {
232 {
233 request_dma,
234 free_dma,
235 get_dma_residue,
236 dma_mem_alloc,
237 hard_dma_setup
238 },
239 {
240 vdma_request_dma,
241 vdma_nop,
242 vdma_get_dma_residue,
243 vdma_mem_alloc,
244 vdma_dma_setup
245 }
246};
247
248
249static int FDC1 = 0x3f0;
250static int FDC2 = -1;
251
252/*
253 * Floppy types are stored in the rtc's CMOS RAM and so rtc_lock
254 * is needed to prevent corrupted CMOS RAM in case "insmod floppy"
255 * coincides with another rtc CMOS user. Paul G.
256 */
257#define FLOPPY0_TYPE ({ \
258 unsigned long flags; \
259 unsigned char val; \
260 spin_lock_irqsave(&rtc_lock, flags); \
261 val = (CMOS_READ(0x10) >> 4) & 15; \
262 spin_unlock_irqrestore(&rtc_lock, flags); \
263 val; \
264})
265
266#define FLOPPY1_TYPE ({ \
267 unsigned long flags; \
268 unsigned char val; \
269 spin_lock_irqsave(&rtc_lock, flags); \
270 val = CMOS_READ(0x10) & 15; \
271 spin_unlock_irqrestore(&rtc_lock, flags); \
272 val; \
273})
274
275#define N_FDC 2
276#define N_DRIVE 8
277
278#define FLOPPY_MOTOR_MASK 0xf0
279
280#define AUTO_DMA
281
282#define EXTRA_FLOPPY_PARAMS
283
284#endif /* __ASM_I386_FLOPPY_H */
diff --git a/include/asm-i386/frame.i b/include/asm-i386/frame.i
deleted file mode 100644
index 03620251ae17..000000000000
--- a/include/asm-i386/frame.i
+++ /dev/null
@@ -1,23 +0,0 @@
1#include <asm/dwarf2.h>
2
3/* The annotation hides the frame from the unwinder and makes it look
4 like a ordinary ebp save/restore. This avoids some special cases for
5 frame pointer later */
6#ifdef CONFIG_FRAME_POINTER
7 .macro FRAME
8 pushl %ebp
9 CFI_ADJUST_CFA_OFFSET 4
10 CFI_REL_OFFSET ebp,0
11 movl %esp,%ebp
12 .endm
13 .macro ENDFRAME
14 popl %ebp
15 CFI_ADJUST_CFA_OFFSET -4
16 CFI_RESTORE ebp
17 .endm
18#else
19 .macro FRAME
20 .endm
21 .macro ENDFRAME
22 .endm
23#endif
diff --git a/include/asm-i386/futex.h b/include/asm-i386/futex.h
deleted file mode 100644
index 438ef0ec7101..000000000000
--- a/include/asm-i386/futex.h
+++ /dev/null
@@ -1,135 +0,0 @@
1#ifndef _ASM_FUTEX_H
2#define _ASM_FUTEX_H
3
4#ifdef __KERNEL__
5
6#include <linux/futex.h>
7#include <asm/errno.h>
8#include <asm/system.h>
9#include <asm/processor.h>
10#include <asm/uaccess.h>
11
12#define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
13 __asm__ __volatile ( \
14"1: " insn "\n" \
15"2: .section .fixup,\"ax\"\n\
163: mov %3, %1\n\
17 jmp 2b\n\
18 .previous\n\
19 .section __ex_table,\"a\"\n\
20 .align 8\n\
21 .long 1b,3b\n\
22 .previous" \
23 : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
24 : "i" (-EFAULT), "0" (oparg), "1" (0))
25
26#define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
27 __asm__ __volatile ( \
28"1: movl %2, %0\n\
29 movl %0, %3\n" \
30 insn "\n" \
31"2: " LOCK_PREFIX "cmpxchgl %3, %2\n\
32 jnz 1b\n\
333: .section .fixup,\"ax\"\n\
344: mov %5, %1\n\
35 jmp 3b\n\
36 .previous\n\
37 .section __ex_table,\"a\"\n\
38 .align 8\n\
39 .long 1b,4b,2b,4b\n\
40 .previous" \
41 : "=&a" (oldval), "=&r" (ret), "+m" (*uaddr), \
42 "=&r" (tem) \
43 : "r" (oparg), "i" (-EFAULT), "1" (0))
44
45static inline int
46futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
47{
48 int op = (encoded_op >> 28) & 7;
49 int cmp = (encoded_op >> 24) & 15;
50 int oparg = (encoded_op << 8) >> 20;
51 int cmparg = (encoded_op << 20) >> 20;
52 int oldval = 0, ret, tem;
53 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
54 oparg = 1 << oparg;
55
56 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
57 return -EFAULT;
58
59 pagefault_disable();
60
61 if (op == FUTEX_OP_SET)
62 __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
63 else {
64#ifndef CONFIG_X86_BSWAP
65 if (boot_cpu_data.x86 == 3)
66 ret = -ENOSYS;
67 else
68#endif
69 switch (op) {
70 case FUTEX_OP_ADD:
71 __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret,
72 oldval, uaddr, oparg);
73 break;
74 case FUTEX_OP_OR:
75 __futex_atomic_op2("orl %4, %3", ret, oldval, uaddr,
76 oparg);
77 break;
78 case FUTEX_OP_ANDN:
79 __futex_atomic_op2("andl %4, %3", ret, oldval, uaddr,
80 ~oparg);
81 break;
82 case FUTEX_OP_XOR:
83 __futex_atomic_op2("xorl %4, %3", ret, oldval, uaddr,
84 oparg);
85 break;
86 default:
87 ret = -ENOSYS;
88 }
89 }
90
91 pagefault_enable();
92
93 if (!ret) {
94 switch (cmp) {
95 case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
96 case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
97 case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
98 case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
99 case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
100 case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
101 default: ret = -ENOSYS;
102 }
103 }
104 return ret;
105}
106
107static inline int
108futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
109{
110 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
111 return -EFAULT;
112
113 __asm__ __volatile__(
114 "1: " LOCK_PREFIX "cmpxchgl %3, %1 \n"
115
116 "2: .section .fixup, \"ax\" \n"
117 "3: mov %2, %0 \n"
118 " jmp 2b \n"
119 " .previous \n"
120
121 " .section __ex_table, \"a\" \n"
122 " .align 8 \n"
123 " .long 1b,3b \n"
124 " .previous \n"
125
126 : "=a" (oldval), "+m" (*uaddr)
127 : "i" (-EFAULT), "r" (newval), "0" (oldval)
128 : "memory"
129 );
130
131 return oldval;
132}
133
134#endif
135#endif
diff --git a/include/asm-i386/genapic.h b/include/asm-i386/genapic.h
deleted file mode 100644
index 33e3ffe1766c..000000000000
--- a/include/asm-i386/genapic.h
+++ /dev/null
@@ -1,127 +0,0 @@
1#ifndef _ASM_GENAPIC_H
2#define _ASM_GENAPIC_H 1
3
4#include <asm/mpspec.h>
5
6/*
7 * Generic APIC driver interface.
8 *
9 * An straight forward mapping of the APIC related parts of the
10 * x86 subarchitecture interface to a dynamic object.
11 *
12 * This is used by the "generic" x86 subarchitecture.
13 *
14 * Copyright 2003 Andi Kleen, SuSE Labs.
15 */
16
17struct mpc_config_translation;
18struct mpc_config_bus;
19struct mp_config_table;
20struct mpc_config_processor;
21
22struct genapic {
23 char *name;
24 int (*probe)(void);
25
26 int (*apic_id_registered)(void);
27 cpumask_t (*target_cpus)(void);
28 int int_delivery_mode;
29 int int_dest_mode;
30 int ESR_DISABLE;
31 int apic_destination_logical;
32 unsigned long (*check_apicid_used)(physid_mask_t bitmap, int apicid);
33 unsigned long (*check_apicid_present)(int apicid);
34 int no_balance_irq;
35 int no_ioapic_check;
36 void (*init_apic_ldr)(void);
37 physid_mask_t (*ioapic_phys_id_map)(physid_mask_t map);
38
39 void (*setup_apic_routing)(void);
40 int (*multi_timer_check)(int apic, int irq);
41 int (*apicid_to_node)(int logical_apicid);
42 int (*cpu_to_logical_apicid)(int cpu);
43 int (*cpu_present_to_apicid)(int mps_cpu);
44 physid_mask_t (*apicid_to_cpu_present)(int phys_apicid);
45 int (*mpc_apic_id)(struct mpc_config_processor *m,
46 struct mpc_config_translation *t);
47 void (*setup_portio_remap)(void);
48 int (*check_phys_apicid_present)(int boot_cpu_physical_apicid);
49 void (*enable_apic_mode)(void);
50 u32 (*phys_pkg_id)(u32 cpuid_apic, int index_msb);
51
52 /* mpparse */
53 void (*mpc_oem_bus_info)(struct mpc_config_bus *, char *,
54 struct mpc_config_translation *);
55 void (*mpc_oem_pci_bus)(struct mpc_config_bus *,
56 struct mpc_config_translation *);
57
58 /* When one of the next two hooks returns 1 the genapic
59 is switched to this. Essentially they are additional probe
60 functions. */
61 int (*mps_oem_check)(struct mp_config_table *mpc, char *oem,
62 char *productid);
63 int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id);
64
65 unsigned (*get_apic_id)(unsigned long x);
66 unsigned long apic_id_mask;
67 unsigned int (*cpu_mask_to_apicid)(cpumask_t cpumask);
68
69#ifdef CONFIG_SMP
70 /* ipi */
71 void (*send_IPI_mask)(cpumask_t mask, int vector);
72 void (*send_IPI_allbutself)(int vector);
73 void (*send_IPI_all)(int vector);
74#endif
75};
76
77#define APICFUNC(x) .x = x,
78
79/* More functions could be probably marked IPIFUNC and save some space
80 in UP GENERICARCH kernels, but I don't have the nerve right now
81 to untangle this mess. -AK */
82#ifdef CONFIG_SMP
83#define IPIFUNC(x) APICFUNC(x)
84#else
85#define IPIFUNC(x)
86#endif
87
88#define APIC_INIT(aname, aprobe) { \
89 .name = aname, \
90 .probe = aprobe, \
91 .int_delivery_mode = INT_DELIVERY_MODE, \
92 .int_dest_mode = INT_DEST_MODE, \
93 .no_balance_irq = NO_BALANCE_IRQ, \
94 .ESR_DISABLE = esr_disable, \
95 .apic_destination_logical = APIC_DEST_LOGICAL, \
96 APICFUNC(apic_id_registered) \
97 APICFUNC(target_cpus) \
98 APICFUNC(check_apicid_used) \
99 APICFUNC(check_apicid_present) \
100 APICFUNC(init_apic_ldr) \
101 APICFUNC(ioapic_phys_id_map) \
102 APICFUNC(setup_apic_routing) \
103 APICFUNC(multi_timer_check) \
104 APICFUNC(apicid_to_node) \
105 APICFUNC(cpu_to_logical_apicid) \
106 APICFUNC(cpu_present_to_apicid) \
107 APICFUNC(apicid_to_cpu_present) \
108 APICFUNC(mpc_apic_id) \
109 APICFUNC(setup_portio_remap) \
110 APICFUNC(check_phys_apicid_present) \
111 APICFUNC(mpc_oem_bus_info) \
112 APICFUNC(mpc_oem_pci_bus) \
113 APICFUNC(mps_oem_check) \
114 APICFUNC(get_apic_id) \
115 .apic_id_mask = APIC_ID_MASK, \
116 APICFUNC(cpu_mask_to_apicid) \
117 APICFUNC(acpi_madt_oem_check) \
118 IPIFUNC(send_IPI_mask) \
119 IPIFUNC(send_IPI_allbutself) \
120 IPIFUNC(send_IPI_all) \
121 APICFUNC(enable_apic_mode) \
122 APICFUNC(phys_pkg_id) \
123 }
124
125extern struct genapic *genapic;
126
127#endif
diff --git a/include/asm-i386/geode.h b/include/asm-i386/geode.h
deleted file mode 100644
index 6da4bbbea3dc..000000000000
--- a/include/asm-i386/geode.h
+++ /dev/null
@@ -1,159 +0,0 @@
1/*
2 * AMD Geode definitions
3 * Copyright (C) 2006, Advanced Micro Devices, Inc.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public License
7 * as published by the Free Software Foundation.
8 */
9
10#ifndef _ASM_GEODE_H_
11#define _ASM_GEODE_H_
12
13#include <asm/processor.h>
14#include <linux/io.h>
15
16/* Generic southbridge functions */
17
18#define GEODE_DEV_PMS 0
19#define GEODE_DEV_ACPI 1
20#define GEODE_DEV_GPIO 2
21#define GEODE_DEV_MFGPT 3
22
23extern int geode_get_dev_base(unsigned int dev);
24
25/* Useful macros */
26#define geode_pms_base() geode_get_dev_base(GEODE_DEV_PMS)
27#define geode_acpi_base() geode_get_dev_base(GEODE_DEV_ACPI)
28#define geode_gpio_base() geode_get_dev_base(GEODE_DEV_GPIO)
29#define geode_mfgpt_base() geode_get_dev_base(GEODE_DEV_MFGPT)
30
31/* MSRS */
32
33#define GX_GLCP_SYS_RSTPLL 0x4C000014
34
35#define MSR_LBAR_SMB 0x5140000B
36#define MSR_LBAR_GPIO 0x5140000C
37#define MSR_LBAR_MFGPT 0x5140000D
38#define MSR_LBAR_ACPI 0x5140000E
39#define MSR_LBAR_PMS 0x5140000F
40
41#define MSR_PIC_YSEL_LOW 0x51400020
42#define MSR_PIC_YSEL_HIGH 0x51400021
43#define MSR_PIC_ZSEL_LOW 0x51400022
44#define MSR_PIC_ZSEL_HIGH 0x51400023
45
46#define MFGPT_IRQ_MSR 0x51400028
47#define MFGPT_NR_MSR 0x51400029
48
49/* Resource Sizes */
50
51#define LBAR_GPIO_SIZE 0xFF
52#define LBAR_MFGPT_SIZE 0x40
53#define LBAR_ACPI_SIZE 0x40
54#define LBAR_PMS_SIZE 0x80
55
56/* ACPI registers (PMS block) */
57
58/*
59 * PM1_EN is only valid when VSA is enabled for 16 bit reads.
60 * When VSA is not enabled, *always* read both PM1_STS and PM1_EN
61 * with a 32 bit read at offset 0x0
62 */
63
64#define PM1_STS 0x00
65#define PM1_EN 0x02
66#define PM1_CNT 0x08
67#define PM2_CNT 0x0C
68#define PM_TMR 0x10
69#define PM_GPE0_STS 0x18
70#define PM_GPE0_EN 0x1C
71
72/* PMC registers (PMS block) */
73
74#define PM_SSD 0x00
75#define PM_SCXA 0x04
76#define PM_SCYA 0x08
77#define PM_OUT_SLPCTL 0x0C
78#define PM_SCLK 0x10
79#define PM_SED 0x1
80#define PM_SCXD 0x18
81#define PM_SCYD 0x1C
82#define PM_IN_SLPCTL 0x20
83#define PM_WKD 0x30
84#define PM_WKXD 0x34
85#define PM_RD 0x38
86#define PM_WKXA 0x3C
87#define PM_FSD 0x40
88#define PM_TSD 0x44
89#define PM_PSD 0x48
90#define PM_NWKD 0x4C
91#define PM_AWKD 0x50
92#define PM_SSC 0x54
93
94/* GPIO */
95
96#define GPIO_OUTPUT_VAL 0x00
97#define GPIO_OUTPUT_ENABLE 0x04
98#define GPIO_OUTPUT_OPEN_DRAIN 0x08
99#define GPIO_OUTPUT_INVERT 0x0C
100#define GPIO_OUTPUT_AUX1 0x10
101#define GPIO_OUTPUT_AUX2 0x14
102#define GPIO_PULL_UP 0x18
103#define GPIO_PULL_DOWN 0x1C
104#define GPIO_INPUT_ENABLE 0x20
105#define GPIO_INPUT_INVERT 0x24
106#define GPIO_INPUT_FILTER 0x28
107#define GPIO_INPUT_EVENT_COUNT 0x2C
108#define GPIO_READ_BACK 0x30
109#define GPIO_INPUT_AUX1 0x34
110#define GPIO_EVENTS_ENABLE 0x38
111#define GPIO_LOCK_ENABLE 0x3C
112#define GPIO_POSITIVE_EDGE_EN 0x40
113#define GPIO_NEGATIVE_EDGE_EN 0x44
114#define GPIO_POSITIVE_EDGE_STS 0x48
115#define GPIO_NEGATIVE_EDGE_STS 0x4C
116
117#define GPIO_MAP_X 0xE0
118#define GPIO_MAP_Y 0xE4
119#define GPIO_MAP_Z 0xE8
120#define GPIO_MAP_W 0xEC
121
122extern void geode_gpio_set(unsigned int, unsigned int);
123extern void geode_gpio_clear(unsigned int, unsigned int);
124extern int geode_gpio_isset(unsigned int, unsigned int);
125extern void geode_gpio_setup_event(unsigned int, int, int);
126extern void geode_gpio_set_irq(unsigned int, unsigned int);
127
128static inline void geode_gpio_event_irq(unsigned int gpio, int pair)
129{
130 geode_gpio_setup_event(gpio, pair, 0);
131}
132
133static inline void geode_gpio_event_pme(unsigned int gpio, int pair)
134{
135 geode_gpio_setup_event(gpio, pair, 1);
136}
137
138/* Specific geode tests */
139
140static inline int is_geode_gx(void)
141{
142 return ((boot_cpu_data.x86_vendor == X86_VENDOR_NSC) &&
143 (boot_cpu_data.x86 == 5) &&
144 (boot_cpu_data.x86_model == 5));
145}
146
147static inline int is_geode_lx(void)
148{
149 return ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) &&
150 (boot_cpu_data.x86 == 5) &&
151 (boot_cpu_data.x86_model == 10));
152}
153
154static inline int is_geode(void)
155{
156 return (is_geode_gx() || is_geode_lx());
157}
158
159#endif
diff --git a/include/asm-i386/hardirq.h b/include/asm-i386/hardirq.h
deleted file mode 100644
index 0e358dc405f8..000000000000
--- a/include/asm-i386/hardirq.h
+++ /dev/null
@@ -1,23 +0,0 @@
1#ifndef __ASM_HARDIRQ_H
2#define __ASM_HARDIRQ_H
3
4#include <linux/threads.h>
5#include <linux/irq.h>
6
7typedef struct {
8 unsigned int __softirq_pending;
9 unsigned long idle_timestamp;
10 unsigned int __nmi_count; /* arch dependent */
11 unsigned int apic_timer_irqs; /* arch dependent */
12} ____cacheline_aligned irq_cpustat_t;
13
14DECLARE_PER_CPU(irq_cpustat_t, irq_stat);
15extern irq_cpustat_t irq_stat[];
16
17#define __ARCH_IRQ_STAT
18#define __IRQ_STAT(cpu, member) (per_cpu(irq_stat, cpu).member)
19
20void ack_bad_irq(unsigned int irq);
21#include <linux/irq_cpustat.h>
22
23#endif /* __ASM_HARDIRQ_H */
diff --git a/include/asm-i386/highmem.h b/include/asm-i386/highmem.h
deleted file mode 100644
index 13cdcd66fff2..000000000000
--- a/include/asm-i386/highmem.h
+++ /dev/null
@@ -1,85 +0,0 @@
1/*
2 * highmem.h: virtual kernel memory mappings for high memory
3 *
4 * Used in CONFIG_HIGHMEM systems for memory pages which
5 * are not addressable by direct kernel virtual addresses.
6 *
7 * Copyright (C) 1999 Gerhard Wichert, Siemens AG
8 * Gerhard.Wichert@pdb.siemens.de
9 *
10 *
11 * Redesigned the x86 32-bit VM architecture to deal with
12 * up to 16 Terabyte physical memory. With current x86 CPUs
13 * we now support up to 64 Gigabytes physical RAM.
14 *
15 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
16 */
17
18#ifndef _ASM_HIGHMEM_H
19#define _ASM_HIGHMEM_H
20
21#ifdef __KERNEL__
22
23#include <linux/interrupt.h>
24#include <linux/threads.h>
25#include <asm/kmap_types.h>
26#include <asm/tlbflush.h>
27#include <asm/paravirt.h>
28
29/* declarations for highmem.c */
30extern unsigned long highstart_pfn, highend_pfn;
31
32extern pte_t *kmap_pte;
33extern pgprot_t kmap_prot;
34extern pte_t *pkmap_page_table;
35
36/*
37 * Right now we initialize only a single pte table. It can be extended
38 * easily, subsequent pte tables have to be allocated in one physical
39 * chunk of RAM.
40 */
41#ifdef CONFIG_X86_PAE
42#define LAST_PKMAP 512
43#else
44#define LAST_PKMAP 1024
45#endif
46/*
47 * Ordering is:
48 *
49 * FIXADDR_TOP
50 * fixed_addresses
51 * FIXADDR_START
52 * temp fixed addresses
53 * FIXADDR_BOOT_START
54 * Persistent kmap area
55 * PKMAP_BASE
56 * VMALLOC_END
57 * Vmalloc area
58 * VMALLOC_START
59 * high_memory
60 */
61#define PKMAP_BASE ( (FIXADDR_BOOT_START - PAGE_SIZE*(LAST_PKMAP + 1)) & PMD_MASK )
62#define LAST_PKMAP_MASK (LAST_PKMAP-1)
63#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
64#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
65
66extern void * FASTCALL(kmap_high(struct page *page));
67extern void FASTCALL(kunmap_high(struct page *page));
68
69void *kmap(struct page *page);
70void kunmap(struct page *page);
71void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot);
72void *kmap_atomic(struct page *page, enum km_type type);
73void kunmap_atomic(void *kvaddr, enum km_type type);
74void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
75struct page *kmap_atomic_to_page(void *ptr);
76
77#ifndef CONFIG_PARAVIRT
78#define kmap_atomic_pte(page, type) kmap_atomic(page, type)
79#endif
80
81#define flush_cache_kmaps() do { } while (0)
82
83#endif /* __KERNEL__ */
84
85#endif /* _ASM_HIGHMEM_H */
diff --git a/include/asm-i386/hpet.h b/include/asm-i386/hpet.h
deleted file mode 100644
index c82dc7ed96b3..000000000000
--- a/include/asm-i386/hpet.h
+++ /dev/null
@@ -1,90 +0,0 @@
1
2#ifndef _I386_HPET_H
3#define _I386_HPET_H
4
5#ifdef CONFIG_HPET_TIMER
6
7/*
8 * Documentation on HPET can be found at:
9 * http://www.intel.com/ial/home/sp/pcmmspec.htm
10 * ftp://download.intel.com/ial/home/sp/mmts098.pdf
11 */
12
13#define HPET_MMAP_SIZE 1024
14
15#define HPET_ID 0x000
16#define HPET_PERIOD 0x004
17#define HPET_CFG 0x010
18#define HPET_STATUS 0x020
19#define HPET_COUNTER 0x0f0
20#define HPET_T0_CFG 0x100
21#define HPET_T0_CMP 0x108
22#define HPET_T0_ROUTE 0x110
23#define HPET_T1_CFG 0x120
24#define HPET_T1_CMP 0x128
25#define HPET_T1_ROUTE 0x130
26#define HPET_T2_CFG 0x140
27#define HPET_T2_CMP 0x148
28#define HPET_T2_ROUTE 0x150
29
30#define HPET_ID_REV 0x000000ff
31#define HPET_ID_NUMBER 0x00001f00
32#define HPET_ID_64BIT 0x00002000
33#define HPET_ID_LEGSUP 0x00008000
34#define HPET_ID_VENDOR 0xffff0000
35#define HPET_ID_NUMBER_SHIFT 8
36#define HPET_ID_VENDOR_SHIFT 16
37
38#define HPET_ID_VENDOR_8086 0x8086
39
40#define HPET_CFG_ENABLE 0x001
41#define HPET_CFG_LEGACY 0x002
42#define HPET_LEGACY_8254 2
43#define HPET_LEGACY_RTC 8
44
45#define HPET_TN_LEVEL 0x0002
46#define HPET_TN_ENABLE 0x0004
47#define HPET_TN_PERIODIC 0x0008
48#define HPET_TN_PERIODIC_CAP 0x0010
49#define HPET_TN_64BIT_CAP 0x0020
50#define HPET_TN_SETVAL 0x0040
51#define HPET_TN_32BIT 0x0100
52#define HPET_TN_ROUTE 0x3e00
53#define HPET_TN_FSB 0x4000
54#define HPET_TN_FSB_CAP 0x8000
55#define HPET_TN_ROUTE_SHIFT 9
56
57/* Max HPET Period is 10^8 femto sec as in HPET spec */
58#define HPET_MAX_PERIOD 100000000UL
59/*
60 * Min HPET period is 10^5 femto sec just for safety. If it is less than this,
61 * then 32 bit HPET counter wrapsaround in less than 0.5 sec.
62 */
63#define HPET_MIN_PERIOD 100000UL
64
65/* hpet memory map physical address */
66extern unsigned long hpet_address;
67extern int is_hpet_enabled(void);
68extern int hpet_enable(void);
69
70#ifdef CONFIG_HPET_EMULATE_RTC
71
72#include <linux/interrupt.h>
73
74extern int hpet_mask_rtc_irq_bit(unsigned long bit_mask);
75extern int hpet_set_rtc_irq_bit(unsigned long bit_mask);
76extern int hpet_set_alarm_time(unsigned char hrs, unsigned char min,
77 unsigned char sec);
78extern int hpet_set_periodic_freq(unsigned long freq);
79extern int hpet_rtc_dropped_irq(void);
80extern int hpet_rtc_timer_init(void);
81extern irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id);
82
83#endif /* CONFIG_HPET_EMULATE_RTC */
84
85#else
86
87static inline int hpet_enable(void) { return 0; }
88
89#endif /* CONFIG_HPET_TIMER */
90#endif /* _I386_HPET_H */
diff --git a/include/asm-i386/hw_irq.h b/include/asm-i386/hw_irq.h
deleted file mode 100644
index 0bedbdf5e907..000000000000
--- a/include/asm-i386/hw_irq.h
+++ /dev/null
@@ -1,66 +0,0 @@
1#ifndef _ASM_HW_IRQ_H
2#define _ASM_HW_IRQ_H
3
4/*
5 * linux/include/asm/hw_irq.h
6 *
7 * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
8 *
9 * moved some of the old arch/i386/kernel/irq.h to here. VY
10 *
11 * IRQ/IPI changes taken from work by Thomas Radke
12 * <tomsoft@informatik.tu-chemnitz.de>
13 */
14
15#include <linux/profile.h>
16#include <asm/atomic.h>
17#include <asm/irq.h>
18#include <asm/sections.h>
19
20#define NMI_VECTOR 0x02
21
22/*
23 * Various low-level irq details needed by irq.c, process.c,
24 * time.c, io_apic.c and smp.c
25 *
26 * Interrupt entry/exit code at both C and assembly level
27 */
28
29extern void (*interrupt[NR_IRQS])(void);
30
31#ifdef CONFIG_SMP
32fastcall void reschedule_interrupt(void);
33fastcall void invalidate_interrupt(void);
34fastcall void call_function_interrupt(void);
35#endif
36
37#ifdef CONFIG_X86_LOCAL_APIC
38fastcall void apic_timer_interrupt(void);
39fastcall void error_interrupt(void);
40fastcall void spurious_interrupt(void);
41fastcall void thermal_interrupt(void);
42#define platform_legacy_irq(irq) ((irq) < 16)
43#endif
44
45void disable_8259A_irq(unsigned int irq);
46void enable_8259A_irq(unsigned int irq);
47int i8259A_irq_pending(unsigned int irq);
48void make_8259A_irq(unsigned int irq);
49void init_8259A(int aeoi);
50void FASTCALL(send_IPI_self(int vector));
51void init_VISWS_APIC_irqs(void);
52void setup_IO_APIC(void);
53void disable_IO_APIC(void);
54void print_IO_APIC(void);
55int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn);
56void send_IPI(int dest, int vector);
57void setup_ioapic_dest(void);
58
59extern unsigned long io_apic_irqs;
60
61extern atomic_t irq_err_count;
62extern atomic_t irq_mis_count;
63
64#define IO_APIC_IRQ(x) (((x) >= 16) || ((1<<(x)) & io_apic_irqs))
65
66#endif /* _ASM_HW_IRQ_H */
diff --git a/include/asm-i386/hypertransport.h b/include/asm-i386/hypertransport.h
deleted file mode 100644
index c16c6ff4bdd7..000000000000
--- a/include/asm-i386/hypertransport.h
+++ /dev/null
@@ -1,42 +0,0 @@
1#ifndef ASM_HYPERTRANSPORT_H
2#define ASM_HYPERTRANSPORT_H
3
4/*
5 * Constants for x86 Hypertransport Interrupts.
6 */
7
8#define HT_IRQ_LOW_BASE 0xf8000000
9
10#define HT_IRQ_LOW_VECTOR_SHIFT 16
11#define HT_IRQ_LOW_VECTOR_MASK 0x00ff0000
12#define HT_IRQ_LOW_VECTOR(v) (((v) << HT_IRQ_LOW_VECTOR_SHIFT) & HT_IRQ_LOW_VECTOR_MASK)
13
14#define HT_IRQ_LOW_DEST_ID_SHIFT 8
15#define HT_IRQ_LOW_DEST_ID_MASK 0x0000ff00
16#define HT_IRQ_LOW_DEST_ID(v) (((v) << HT_IRQ_LOW_DEST_ID_SHIFT) & HT_IRQ_LOW_DEST_ID_MASK)
17
18#define HT_IRQ_LOW_DM_PHYSICAL 0x0000000
19#define HT_IRQ_LOW_DM_LOGICAL 0x0000040
20
21#define HT_IRQ_LOW_RQEOI_EDGE 0x0000000
22#define HT_IRQ_LOW_RQEOI_LEVEL 0x0000020
23
24
25#define HT_IRQ_LOW_MT_FIXED 0x0000000
26#define HT_IRQ_LOW_MT_ARBITRATED 0x0000004
27#define HT_IRQ_LOW_MT_SMI 0x0000008
28#define HT_IRQ_LOW_MT_NMI 0x000000c
29#define HT_IRQ_LOW_MT_INIT 0x0000010
30#define HT_IRQ_LOW_MT_STARTUP 0x0000014
31#define HT_IRQ_LOW_MT_EXTINT 0x0000018
32#define HT_IRQ_LOW_MT_LINT1 0x000008c
33#define HT_IRQ_LOW_MT_LINT0 0x0000098
34
35#define HT_IRQ_LOW_IRQ_MASKED 0x0000001
36
37
38#define HT_IRQ_HIGH_DEST_ID_SHIFT 0
39#define HT_IRQ_HIGH_DEST_ID_MASK 0x00ffffff
40#define HT_IRQ_HIGH_DEST_ID(v) ((((v) >> 8) << HT_IRQ_HIGH_DEST_ID_SHIFT) & HT_IRQ_HIGH_DEST_ID_MASK)
41
42#endif /* ASM_HYPERTRANSPORT_H */
diff --git a/include/asm-i386/i387.h b/include/asm-i386/i387.h
deleted file mode 100644
index cdd1e248e3b4..000000000000
--- a/include/asm-i386/i387.h
+++ /dev/null
@@ -1,151 +0,0 @@
1/*
2 * include/asm-i386/i387.h
3 *
4 * Copyright (C) 1994 Linus Torvalds
5 *
6 * Pentium III FXSR, SSE support
7 * General FPU state handling cleanups
8 * Gareth Hughes <gareth@valinux.com>, May 2000
9 */
10
11#ifndef __ASM_I386_I387_H
12#define __ASM_I386_I387_H
13
14#include <linux/sched.h>
15#include <linux/init.h>
16#include <linux/kernel_stat.h>
17#include <asm/processor.h>
18#include <asm/sigcontext.h>
19#include <asm/user.h>
20
21extern void mxcsr_feature_mask_init(void);
22extern void init_fpu(struct task_struct *);
23
24/*
25 * FPU lazy state save handling...
26 */
27
28/*
29 * The "nop" is needed to make the instructions the same
30 * length.
31 */
32#define restore_fpu(tsk) \
33 alternative_input( \
34 "nop ; frstor %1", \
35 "fxrstor %1", \
36 X86_FEATURE_FXSR, \
37 "m" ((tsk)->thread.i387.fxsave))
38
39extern void kernel_fpu_begin(void);
40#define kernel_fpu_end() do { stts(); preempt_enable(); } while(0)
41
42/* We need a safe address that is cheap to find and that is already
43 in L1 during context switch. The best choices are unfortunately
44 different for UP and SMP */
45#ifdef CONFIG_SMP
46#define safe_address (__per_cpu_offset[0])
47#else
48#define safe_address (kstat_cpu(0).cpustat.user)
49#endif
50
51/*
52 * These must be called with preempt disabled
53 */
54static inline void __save_init_fpu( struct task_struct *tsk )
55{
56 /* Use more nops than strictly needed in case the compiler
57 varies code */
58 alternative_input(
59 "fnsave %[fx] ;fwait;" GENERIC_NOP8 GENERIC_NOP4,
60 "fxsave %[fx]\n"
61 "bt $7,%[fsw] ; jnc 1f ; fnclex\n1:",
62 X86_FEATURE_FXSR,
63 [fx] "m" (tsk->thread.i387.fxsave),
64 [fsw] "m" (tsk->thread.i387.fxsave.swd) : "memory");
65 /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
66 is pending. Clear the x87 state here by setting it to fixed
67 values. safe_address is a random variable that should be in L1 */
68 alternative_input(
69 GENERIC_NOP8 GENERIC_NOP2,
70 "emms\n\t" /* clear stack tags */
71 "fildl %[addr]", /* set F?P to defined value */
72 X86_FEATURE_FXSAVE_LEAK,
73 [addr] "m" (safe_address));
74 task_thread_info(tsk)->status &= ~TS_USEDFPU;
75}
76
77#define __unlazy_fpu( tsk ) do { \
78 if (task_thread_info(tsk)->status & TS_USEDFPU) { \
79 __save_init_fpu(tsk); \
80 stts(); \
81 } else \
82 tsk->fpu_counter = 0; \
83} while (0)
84
85#define __clear_fpu( tsk ) \
86do { \
87 if (task_thread_info(tsk)->status & TS_USEDFPU) { \
88 asm volatile("fnclex ; fwait"); \
89 task_thread_info(tsk)->status &= ~TS_USEDFPU; \
90 stts(); \
91 } \
92} while (0)
93
94
95/*
96 * These disable preemption on their own and are safe
97 */
98static inline void save_init_fpu( struct task_struct *tsk )
99{
100 preempt_disable();
101 __save_init_fpu(tsk);
102 stts();
103 preempt_enable();
104}
105
106#define unlazy_fpu( tsk ) do { \
107 preempt_disable(); \
108 __unlazy_fpu(tsk); \
109 preempt_enable(); \
110} while (0)
111
112#define clear_fpu( tsk ) do { \
113 preempt_disable(); \
114 __clear_fpu( tsk ); \
115 preempt_enable(); \
116} while (0)
117
118/*
119 * FPU state interaction...
120 */
121extern unsigned short get_fpu_cwd( struct task_struct *tsk );
122extern unsigned short get_fpu_swd( struct task_struct *tsk );
123extern unsigned short get_fpu_mxcsr( struct task_struct *tsk );
124extern asmlinkage void math_state_restore(void);
125
126/*
127 * Signal frame handlers...
128 */
129extern int save_i387( struct _fpstate __user *buf );
130extern int restore_i387( struct _fpstate __user *buf );
131
132/*
133 * ptrace request handers...
134 */
135extern int get_fpregs( struct user_i387_struct __user *buf,
136 struct task_struct *tsk );
137extern int set_fpregs( struct task_struct *tsk,
138 struct user_i387_struct __user *buf );
139
140extern int get_fpxregs( struct user_fxsr_struct __user *buf,
141 struct task_struct *tsk );
142extern int set_fpxregs( struct task_struct *tsk,
143 struct user_fxsr_struct __user *buf );
144
145/*
146 * FPU state for core dumps...
147 */
148extern int dump_fpu( struct pt_regs *regs,
149 struct user_i387_struct *fpu );
150
151#endif /* __ASM_I386_I387_H */
diff --git a/include/asm-i386/i8253.h b/include/asm-i386/i8253.h
deleted file mode 100644
index 7577d058d86e..000000000000
--- a/include/asm-i386/i8253.h
+++ /dev/null
@@ -1,17 +0,0 @@
1#ifndef __ASM_I8253_H__
2#define __ASM_I8253_H__
3
4#include <linux/clockchips.h>
5
6/* i8253A PIT registers */
7#define PIT_MODE 0x43
8#define PIT_CH0 0x40
9#define PIT_CH2 0x42
10
11extern spinlock_t i8253_lock;
12
13extern struct clock_event_device *global_clock_event;
14
15extern void setup_pit_timer(void);
16
17#endif /* __ASM_I8253_H__ */
diff --git a/include/asm-i386/i8259.h b/include/asm-i386/i8259.h
deleted file mode 100644
index 29d8f9a6b3fc..000000000000
--- a/include/asm-i386/i8259.h
+++ /dev/null
@@ -1,17 +0,0 @@
1#ifndef __ASM_I8259_H__
2#define __ASM_I8259_H__
3
4extern unsigned int cached_irq_mask;
5
6#define __byte(x,y) (((unsigned char *) &(y))[x])
7#define cached_master_mask (__byte(0, cached_irq_mask))
8#define cached_slave_mask (__byte(1, cached_irq_mask))
9
10extern spinlock_t i8259A_lock;
11
12extern void init_8259A(int auto_eoi);
13extern void enable_8259A_irq(unsigned int irq);
14extern void disable_8259A_irq(unsigned int irq);
15extern unsigned int startup_8259A_irq(unsigned int irq);
16
17#endif /* __ASM_I8259_H__ */
diff --git a/include/asm-i386/ide.h b/include/asm-i386/ide.h
deleted file mode 100644
index e7817a3d6578..000000000000
--- a/include/asm-i386/ide.h
+++ /dev/null
@@ -1,78 +0,0 @@
1/*
2 * linux/include/asm-i386/ide.h
3 *
4 * Copyright (C) 1994-1996 Linus Torvalds & authors
5 */
6
7/*
8 * This file contains the i386 architecture specific IDE code.
9 */
10
11#ifndef __ASMi386_IDE_H
12#define __ASMi386_IDE_H
13
14#ifdef __KERNEL__
15
16
17#ifndef MAX_HWIFS
18# ifdef CONFIG_BLK_DEV_IDEPCI
19#define MAX_HWIFS 10
20# else
21#define MAX_HWIFS 6
22# endif
23#endif
24
25#define IDE_ARCH_OBSOLETE_DEFAULTS
26
27static __inline__ int ide_default_irq(unsigned long base)
28{
29 switch (base) {
30 case 0x1f0: return 14;
31 case 0x170: return 15;
32 case 0x1e8: return 11;
33 case 0x168: return 10;
34 case 0x1e0: return 8;
35 case 0x160: return 12;
36 default:
37 return 0;
38 }
39}
40
41static __inline__ unsigned long ide_default_io_base(int index)
42{
43 /*
44 * If PCI is present then it is not safe to poke around
45 * the other legacy IDE ports. Only 0x1f0 and 0x170 are
46 * defined compatibility mode ports for PCI. A user can
47 * override this using ide= but we must default safe.
48 */
49 if (no_pci_devices()) {
50 switch(index) {
51 case 2: return 0x1e8;
52 case 3: return 0x168;
53 case 4: return 0x1e0;
54 case 5: return 0x160;
55 }
56 }
57 switch (index) {
58 case 0: return 0x1f0;
59 case 1: return 0x170;
60 default:
61 return 0;
62 }
63}
64
65#define IDE_ARCH_OBSOLETE_INIT
66#define ide_default_io_ctl(base) ((base) + 0x206) /* obsolete */
67
68#ifdef CONFIG_BLK_DEV_IDEPCI
69#define ide_init_default_irq(base) (0)
70#else
71#define ide_init_default_irq(base) ide_default_irq(base)
72#endif
73
74#include <asm-generic/ide_iops.h>
75
76#endif /* __KERNEL__ */
77
78#endif /* __ASMi386_IDE_H */
diff --git a/include/asm-i386/intel_arch_perfmon.h b/include/asm-i386/intel_arch_perfmon.h
deleted file mode 100644
index b52cd60a075b..000000000000
--- a/include/asm-i386/intel_arch_perfmon.h
+++ /dev/null
@@ -1,31 +0,0 @@
1#ifndef X86_INTEL_ARCH_PERFMON_H
2#define X86_INTEL_ARCH_PERFMON_H 1
3
4#define MSR_ARCH_PERFMON_PERFCTR0 0xc1
5#define MSR_ARCH_PERFMON_PERFCTR1 0xc2
6
7#define MSR_ARCH_PERFMON_EVENTSEL0 0x186
8#define MSR_ARCH_PERFMON_EVENTSEL1 0x187
9
10#define ARCH_PERFMON_EVENTSEL0_ENABLE (1 << 22)
11#define ARCH_PERFMON_EVENTSEL_INT (1 << 20)
12#define ARCH_PERFMON_EVENTSEL_OS (1 << 17)
13#define ARCH_PERFMON_EVENTSEL_USR (1 << 16)
14
15#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL (0x3c)
16#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
17#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX (0)
18#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \
19 (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX))
20
21union cpuid10_eax {
22 struct {
23 unsigned int version_id:8;
24 unsigned int num_counters:8;
25 unsigned int bit_width:8;
26 unsigned int mask_length:8;
27 } split;
28 unsigned int full;
29};
30
31#endif /* X86_INTEL_ARCH_PERFMON_H */
diff --git a/include/asm-i386/io.h b/include/asm-i386/io.h
deleted file mode 100644
index e8e0bd641120..000000000000
--- a/include/asm-i386/io.h
+++ /dev/null
@@ -1,349 +0,0 @@
1#ifndef _ASM_IO_H
2#define _ASM_IO_H
3
4#include <linux/string.h>
5#include <linux/compiler.h>
6
7/*
8 * This file contains the definitions for the x86 IO instructions
9 * inb/inw/inl/outb/outw/outl and the "string versions" of the same
10 * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
11 * versions of the single-IO instructions (inb_p/inw_p/..).
12 *
13 * This file is not meant to be obfuscating: it's just complicated
14 * to (a) handle it all in a way that makes gcc able to optimize it
15 * as well as possible and (b) trying to avoid writing the same thing
16 * over and over again with slight variations and possibly making a
17 * mistake somewhere.
18 */
19
20/*
21 * Thanks to James van Artsdalen for a better timing-fix than
22 * the two short jumps: using outb's to a nonexistent port seems
23 * to guarantee better timings even on fast machines.
24 *
25 * On the other hand, I'd like to be sure of a non-existent port:
26 * I feel a bit unsafe about using 0x80 (should be safe, though)
27 *
28 * Linus
29 */
30
31 /*
32 * Bit simplified and optimized by Jan Hubicka
33 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999.
34 *
35 * isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added,
36 * isa_read[wl] and isa_write[wl] fixed
37 * - Arnaldo Carvalho de Melo <acme@conectiva.com.br>
38 */
39
40#define IO_SPACE_LIMIT 0xffff
41
42#define XQUAD_PORTIO_BASE 0xfe400000
43#define XQUAD_PORTIO_QUAD 0x40000 /* 256k per quad. */
44
45#ifdef __KERNEL__
46
47#include <asm-generic/iomap.h>
48
49#include <linux/vmalloc.h>
50
51/*
52 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
53 * access
54 */
55#define xlate_dev_mem_ptr(p) __va(p)
56
57/*
58 * Convert a virtual cached pointer to an uncached pointer
59 */
60#define xlate_dev_kmem_ptr(p) p
61
62/**
63 * virt_to_phys - map virtual addresses to physical
64 * @address: address to remap
65 *
66 * The returned physical address is the physical (CPU) mapping for
67 * the memory address given. It is only valid to use this function on
68 * addresses directly mapped or allocated via kmalloc.
69 *
70 * This function does not give bus mappings for DMA transfers. In
71 * almost all conceivable cases a device driver should not be using
72 * this function
73 */
74
75static inline unsigned long virt_to_phys(volatile void * address)
76{
77 return __pa(address);
78}
79
80/**
81 * phys_to_virt - map physical address to virtual
82 * @address: address to remap
83 *
84 * The returned virtual address is a current CPU mapping for
85 * the memory address given. It is only valid to use this function on
86 * addresses that have a kernel mapping
87 *
88 * This function does not handle bus mappings for DMA transfers. In
89 * almost all conceivable cases a device driver should not be using
90 * this function
91 */
92
93static inline void * phys_to_virt(unsigned long address)
94{
95 return __va(address);
96}
97
98/*
99 * Change "struct page" to physical address.
100 */
101#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
102
103extern void __iomem * __ioremap(unsigned long offset, unsigned long size, unsigned long flags);
104
105/**
106 * ioremap - map bus memory into CPU space
107 * @offset: bus address of the memory
108 * @size: size of the resource to map
109 *
110 * ioremap performs a platform specific sequence of operations to
111 * make bus memory CPU accessible via the readb/readw/readl/writeb/
112 * writew/writel functions and the other mmio helpers. The returned
113 * address is not guaranteed to be usable directly as a virtual
114 * address.
115 *
116 * If the area you are trying to map is a PCI BAR you should have a
117 * look at pci_iomap().
118 */
119
120static inline void __iomem * ioremap(unsigned long offset, unsigned long size)
121{
122 return __ioremap(offset, size, 0);
123}
124
125extern void __iomem * ioremap_nocache(unsigned long offset, unsigned long size);
126extern void iounmap(volatile void __iomem *addr);
127
128/*
129 * bt_ioremap() and bt_iounmap() are for temporary early boot-time
130 * mappings, before the real ioremap() is functional.
131 * A boot-time mapping is currently limited to at most 16 pages.
132 */
133extern void *bt_ioremap(unsigned long offset, unsigned long size);
134extern void bt_iounmap(void *addr, unsigned long size);
135extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys);
136
137/* Use early IO mappings for DMI because it's initialized early */
138#define dmi_ioremap bt_ioremap
139#define dmi_iounmap bt_iounmap
140#define dmi_alloc alloc_bootmem
141
142/*
143 * ISA I/O bus memory addresses are 1:1 with the physical address.
144 */
145#define isa_virt_to_bus virt_to_phys
146#define isa_page_to_bus page_to_phys
147#define isa_bus_to_virt phys_to_virt
148
149/*
150 * However PCI ones are not necessarily 1:1 and therefore these interfaces
151 * are forbidden in portable PCI drivers.
152 *
153 * Allow them on x86 for legacy drivers, though.
154 */
155#define virt_to_bus virt_to_phys
156#define bus_to_virt phys_to_virt
157
158/*
159 * readX/writeX() are used to access memory mapped devices. On some
160 * architectures the memory mapped IO stuff needs to be accessed
161 * differently. On the x86 architecture, we just read/write the
162 * memory location directly.
163 */
164
165static inline unsigned char readb(const volatile void __iomem *addr)
166{
167 return *(volatile unsigned char __force *) addr;
168}
169static inline unsigned short readw(const volatile void __iomem *addr)
170{
171 return *(volatile unsigned short __force *) addr;
172}
173static inline unsigned int readl(const volatile void __iomem *addr)
174{
175 return *(volatile unsigned int __force *) addr;
176}
177#define readb_relaxed(addr) readb(addr)
178#define readw_relaxed(addr) readw(addr)
179#define readl_relaxed(addr) readl(addr)
180#define __raw_readb readb
181#define __raw_readw readw
182#define __raw_readl readl
183
184static inline void writeb(unsigned char b, volatile void __iomem *addr)
185{
186 *(volatile unsigned char __force *) addr = b;
187}
188static inline void writew(unsigned short b, volatile void __iomem *addr)
189{
190 *(volatile unsigned short __force *) addr = b;
191}
192static inline void writel(unsigned int b, volatile void __iomem *addr)
193{
194 *(volatile unsigned int __force *) addr = b;
195}
196#define __raw_writeb writeb
197#define __raw_writew writew
198#define __raw_writel writel
199
200#define mmiowb()
201
202static inline void memset_io(volatile void __iomem *addr, unsigned char val, int count)
203{
204 memset((void __force *) addr, val, count);
205}
206static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, int count)
207{
208 __memcpy(dst, (void __force *) src, count);
209}
210static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int count)
211{
212 __memcpy((void __force *) dst, src, count);
213}
214
215/*
216 * ISA space is 'always mapped' on a typical x86 system, no need to
217 * explicitly ioremap() it. The fact that the ISA IO space is mapped
218 * to PAGE_OFFSET is pure coincidence - it does not mean ISA values
219 * are physical addresses. The following constant pointer can be
220 * used as the IO-area pointer (it can be iounmapped as well, so the
221 * analogy with PCI is quite large):
222 */
223#define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET))
224
225/*
226 * Cache management
227 *
228 * This needed for two cases
229 * 1. Out of order aware processors
230 * 2. Accidentally out of order processors (PPro errata #51)
231 */
232
233#if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE)
234
235static inline void flush_write_buffers(void)
236{
237 __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory");
238}
239
240#define dma_cache_inv(_start,_size) flush_write_buffers()
241#define dma_cache_wback(_start,_size) flush_write_buffers()
242#define dma_cache_wback_inv(_start,_size) flush_write_buffers()
243
244#else
245
246/* Nothing to do */
247
248#define dma_cache_inv(_start,_size) do { } while (0)
249#define dma_cache_wback(_start,_size) do { } while (0)
250#define dma_cache_wback_inv(_start,_size) do { } while (0)
251#define flush_write_buffers()
252
253#endif
254
255#endif /* __KERNEL__ */
256
257static inline void native_io_delay(void)
258{
259 asm volatile("outb %%al,$0x80" : : : "memory");
260}
261
262#if defined(CONFIG_PARAVIRT)
263#include <asm/paravirt.h>
264#else
265
266static inline void slow_down_io(void) {
267 native_io_delay();
268#ifdef REALLY_SLOW_IO
269 native_io_delay();
270 native_io_delay();
271 native_io_delay();
272#endif
273}
274
275#endif
276
277#ifdef CONFIG_X86_NUMAQ
278extern void *xquad_portio; /* Where the IO area was mapped */
279#define XQUAD_PORT_ADDR(port, quad) (xquad_portio + (XQUAD_PORTIO_QUAD*quad) + port)
280#define __BUILDIO(bwl,bw,type) \
281static inline void out##bwl##_quad(unsigned type value, int port, int quad) { \
282 if (xquad_portio) \
283 write##bwl(value, XQUAD_PORT_ADDR(port, quad)); \
284 else \
285 out##bwl##_local(value, port); \
286} \
287static inline void out##bwl(unsigned type value, int port) { \
288 out##bwl##_quad(value, port, 0); \
289} \
290static inline unsigned type in##bwl##_quad(int port, int quad) { \
291 if (xquad_portio) \
292 return read##bwl(XQUAD_PORT_ADDR(port, quad)); \
293 else \
294 return in##bwl##_local(port); \
295} \
296static inline unsigned type in##bwl(int port) { \
297 return in##bwl##_quad(port, 0); \
298}
299#else
300#define __BUILDIO(bwl,bw,type) \
301static inline void out##bwl(unsigned type value, int port) { \
302 out##bwl##_local(value, port); \
303} \
304static inline unsigned type in##bwl(int port) { \
305 return in##bwl##_local(port); \
306}
307#endif
308
309
310#define BUILDIO(bwl,bw,type) \
311static inline void out##bwl##_local(unsigned type value, int port) { \
312 __asm__ __volatile__("out" #bwl " %" #bw "0, %w1" : : "a"(value), "Nd"(port)); \
313} \
314static inline unsigned type in##bwl##_local(int port) { \
315 unsigned type value; \
316 __asm__ __volatile__("in" #bwl " %w1, %" #bw "0" : "=a"(value) : "Nd"(port)); \
317 return value; \
318} \
319static inline void out##bwl##_local_p(unsigned type value, int port) { \
320 out##bwl##_local(value, port); \
321 slow_down_io(); \
322} \
323static inline unsigned type in##bwl##_local_p(int port) { \
324 unsigned type value = in##bwl##_local(port); \
325 slow_down_io(); \
326 return value; \
327} \
328__BUILDIO(bwl,bw,type) \
329static inline void out##bwl##_p(unsigned type value, int port) { \
330 out##bwl(value, port); \
331 slow_down_io(); \
332} \
333static inline unsigned type in##bwl##_p(int port) { \
334 unsigned type value = in##bwl(port); \
335 slow_down_io(); \
336 return value; \
337} \
338static inline void outs##bwl(int port, const void *addr, unsigned long count) { \
339 __asm__ __volatile__("rep; outs" #bwl : "+S"(addr), "+c"(count) : "d"(port)); \
340} \
341static inline void ins##bwl(int port, void *addr, unsigned long count) { \
342 __asm__ __volatile__("rep; ins" #bwl : "+D"(addr), "+c"(count) : "d"(port)); \
343}
344
345BUILDIO(b,b,char)
346BUILDIO(w,w,short)
347BUILDIO(l,,int)
348
349#endif
diff --git a/include/asm-i386/io_apic.h b/include/asm-i386/io_apic.h
deleted file mode 100644
index dbe734ddf2af..000000000000
--- a/include/asm-i386/io_apic.h
+++ /dev/null
@@ -1,155 +0,0 @@
1#ifndef __ASM_IO_APIC_H
2#define __ASM_IO_APIC_H
3
4#include <asm/types.h>
5#include <asm/mpspec.h>
6#include <asm/apicdef.h>
7
8/*
9 * Intel IO-APIC support for SMP and UP systems.
10 *
11 * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar
12 */
13
14#ifdef CONFIG_X86_IO_APIC
15
16/*
17 * The structure of the IO-APIC:
18 */
19union IO_APIC_reg_00 {
20 u32 raw;
21 struct {
22 u32 __reserved_2 : 14,
23 LTS : 1,
24 delivery_type : 1,
25 __reserved_1 : 8,
26 ID : 8;
27 } __attribute__ ((packed)) bits;
28};
29
30union IO_APIC_reg_01 {
31 u32 raw;
32 struct {
33 u32 version : 8,
34 __reserved_2 : 7,
35 PRQ : 1,
36 entries : 8,
37 __reserved_1 : 8;
38 } __attribute__ ((packed)) bits;
39};
40
41union IO_APIC_reg_02 {
42 u32 raw;
43 struct {
44 u32 __reserved_2 : 24,
45 arbitration : 4,
46 __reserved_1 : 4;
47 } __attribute__ ((packed)) bits;
48};
49
50union IO_APIC_reg_03 {
51 u32 raw;
52 struct {
53 u32 boot_DT : 1,
54 __reserved_1 : 31;
55 } __attribute__ ((packed)) bits;
56};
57
58/*
59 * # of IO-APICs and # of IRQ routing registers
60 */
61extern int nr_ioapics;
62extern int nr_ioapic_registers[MAX_IO_APICS];
63
64enum ioapic_irq_destination_types {
65 dest_Fixed = 0,
66 dest_LowestPrio = 1,
67 dest_SMI = 2,
68 dest__reserved_1 = 3,
69 dest_NMI = 4,
70 dest_INIT = 5,
71 dest__reserved_2 = 6,
72 dest_ExtINT = 7
73};
74
75struct IO_APIC_route_entry {
76 __u32 vector : 8,
77 delivery_mode : 3, /* 000: FIXED
78 * 001: lowest prio
79 * 111: ExtINT
80 */
81 dest_mode : 1, /* 0: physical, 1: logical */
82 delivery_status : 1,
83 polarity : 1,
84 irr : 1,
85 trigger : 1, /* 0: edge, 1: level */
86 mask : 1, /* 0: enabled, 1: disabled */
87 __reserved_2 : 15;
88
89 union { struct { __u32
90 __reserved_1 : 24,
91 physical_dest : 4,
92 __reserved_2 : 4;
93 } physical;
94
95 struct { __u32
96 __reserved_1 : 24,
97 logical_dest : 8;
98 } logical;
99 } dest;
100
101} __attribute__ ((packed));
102
103/*
104 * MP-BIOS irq configuration table structures:
105 */
106
107/* I/O APIC entries */
108extern struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS];
109
110/* # of MP IRQ source entries */
111extern int mp_irq_entries;
112
113/* MP IRQ source entries */
114extern struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
115
116/* non-0 if default (table-less) MP configuration */
117extern int mpc_default_type;
118
119/* Older SiS APIC requires we rewrite the index register */
120extern int sis_apic_bug;
121
122/* 1 if "noapic" boot option passed */
123extern int skip_ioapic_setup;
124
125static inline void disable_ioapic_setup(void)
126{
127 skip_ioapic_setup = 1;
128}
129
130static inline int ioapic_setup_disabled(void)
131{
132 return skip_ioapic_setup;
133}
134
135/*
136 * If we use the IO-APIC for IRQ routing, disable automatic
137 * assignment of PCI IRQ's.
138 */
139#define io_apic_assign_pci_irqs (mp_irq_entries && !skip_ioapic_setup && io_apic_irqs)
140
141#ifdef CONFIG_ACPI
142extern int io_apic_get_unique_id (int ioapic, int apic_id);
143extern int io_apic_get_version (int ioapic);
144extern int io_apic_get_redir_entries (int ioapic);
145extern int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int active_high_low);
146extern int timer_uses_ioapic_pin_0;
147#endif /* CONFIG_ACPI */
148
149extern int (*ioapic_renumber_irq)(int ioapic, int irq);
150
151#else /* !CONFIG_X86_IO_APIC */
152#define io_apic_assign_pci_irqs 0
153#endif
154
155#endif
diff --git a/include/asm-i386/ioctl.h b/include/asm-i386/ioctl.h
deleted file mode 100644
index b279fe06dfe5..000000000000
--- a/include/asm-i386/ioctl.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/ioctl.h>
diff --git a/include/asm-i386/ioctls.h b/include/asm-i386/ioctls.h
deleted file mode 100644
index ef5878762dc9..000000000000
--- a/include/asm-i386/ioctls.h
+++ /dev/null
@@ -1,87 +0,0 @@
1#ifndef __ARCH_I386_IOCTLS_H__
2#define __ARCH_I386_IOCTLS_H__
3
4#include <asm/ioctl.h>
5
6/* 0x54 is just a magic number to make these relatively unique ('T') */
7
8#define TCGETS 0x5401
9#define TCSETS 0x5402 /* Clashes with SNDCTL_TMR_START sound ioctl */
10#define TCSETSW 0x5403
11#define TCSETSF 0x5404
12#define TCGETA 0x5405
13#define TCSETA 0x5406
14#define TCSETAW 0x5407
15#define TCSETAF 0x5408
16#define TCSBRK 0x5409
17#define TCXONC 0x540A
18#define TCFLSH 0x540B
19#define TIOCEXCL 0x540C
20#define TIOCNXCL 0x540D
21#define TIOCSCTTY 0x540E
22#define TIOCGPGRP 0x540F
23#define TIOCSPGRP 0x5410
24#define TIOCOUTQ 0x5411
25#define TIOCSTI 0x5412
26#define TIOCGWINSZ 0x5413
27#define TIOCSWINSZ 0x5414
28#define TIOCMGET 0x5415
29#define TIOCMBIS 0x5416
30#define TIOCMBIC 0x5417
31#define TIOCMSET 0x5418
32#define TIOCGSOFTCAR 0x5419
33#define TIOCSSOFTCAR 0x541A
34#define FIONREAD 0x541B
35#define TIOCINQ FIONREAD
36#define TIOCLINUX 0x541C
37#define TIOCCONS 0x541D
38#define TIOCGSERIAL 0x541E
39#define TIOCSSERIAL 0x541F
40#define TIOCPKT 0x5420
41#define FIONBIO 0x5421
42#define TIOCNOTTY 0x5422
43#define TIOCSETD 0x5423
44#define TIOCGETD 0x5424
45#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */
46/* #define TIOCTTYGSTRUCT 0x5426 - Former debugging-only ioctl */
47#define TIOCSBRK 0x5427 /* BSD compatibility */
48#define TIOCCBRK 0x5428 /* BSD compatibility */
49#define TIOCGSID 0x5429 /* Return the session ID of FD */
50#define TCGETS2 _IOR('T',0x2A, struct termios2)
51#define TCSETS2 _IOW('T',0x2B, struct termios2)
52#define TCSETSW2 _IOW('T',0x2C, struct termios2)
53#define TCSETSF2 _IOW('T',0x2D, struct termios2)
54#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
55#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */
56
57#define FIONCLEX 0x5450
58#define FIOCLEX 0x5451
59#define FIOASYNC 0x5452
60#define TIOCSERCONFIG 0x5453
61#define TIOCSERGWILD 0x5454
62#define TIOCSERSWILD 0x5455
63#define TIOCGLCKTRMIOS 0x5456
64#define TIOCSLCKTRMIOS 0x5457
65#define TIOCSERGSTRUCT 0x5458 /* For debugging only */
66#define TIOCSERGETLSR 0x5459 /* Get line status register */
67#define TIOCSERGETMULTI 0x545A /* Get multiport config */
68#define TIOCSERSETMULTI 0x545B /* Set multiport config */
69
70#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */
71#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
72#define TIOCGHAYESESP 0x545E /* Get Hayes ESP configuration */
73#define TIOCSHAYESESP 0x545F /* Set Hayes ESP configuration */
74#define FIOQSIZE 0x5460
75
76/* Used for packet mode */
77#define TIOCPKT_DATA 0
78#define TIOCPKT_FLUSHREAD 1
79#define TIOCPKT_FLUSHWRITE 2
80#define TIOCPKT_STOP 4
81#define TIOCPKT_START 8
82#define TIOCPKT_NOSTOP 16
83#define TIOCPKT_DOSTOP 32
84
85#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
86
87#endif
diff --git a/include/asm-i386/ipc.h b/include/asm-i386/ipc.h
deleted file mode 100644
index a46e3d9c2a3f..000000000000
--- a/include/asm-i386/ipc.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/ipc.h>
diff --git a/include/asm-i386/ipcbuf.h b/include/asm-i386/ipcbuf.h
deleted file mode 100644
index 0dcad4f84c2a..000000000000
--- a/include/asm-i386/ipcbuf.h
+++ /dev/null
@@ -1,29 +0,0 @@
1#ifndef __i386_IPCBUF_H__
2#define __i386_IPCBUF_H__
3
4/*
5 * The ipc64_perm structure for i386 architecture.
6 * Note extra padding because this structure is passed back and forth
7 * between kernel and user space.
8 *
9 * Pad space is left for:
10 * - 32-bit mode_t and seq
11 * - 2 miscellaneous 32-bit values
12 */
13
14struct ipc64_perm
15{
16 __kernel_key_t key;
17 __kernel_uid32_t uid;
18 __kernel_gid32_t gid;
19 __kernel_uid32_t cuid;
20 __kernel_gid32_t cgid;
21 __kernel_mode_t mode;
22 unsigned short __pad1;
23 unsigned short seq;
24 unsigned short __pad2;
25 unsigned long __unused1;
26 unsigned long __unused2;
27};
28
29#endif /* __i386_IPCBUF_H__ */
diff --git a/include/asm-i386/irq.h b/include/asm-i386/irq.h
deleted file mode 100644
index 36f310632c49..000000000000
--- a/include/asm-i386/irq.h
+++ /dev/null
@@ -1,48 +0,0 @@
1#ifndef _ASM_IRQ_H
2#define _ASM_IRQ_H
3
4/*
5 * linux/include/asm/irq.h
6 *
7 * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
8 *
9 * IRQ/IPI changes taken from work by Thomas Radke
10 * <tomsoft@informatik.tu-chemnitz.de>
11 */
12
13#include <linux/sched.h>
14/* include comes from machine specific directory */
15#include "irq_vectors.h"
16#include <asm/thread_info.h>
17
18static __inline__ int irq_canonicalize(int irq)
19{
20 return ((irq == 2) ? 9 : irq);
21}
22
23#ifdef CONFIG_X86_LOCAL_APIC
24# define ARCH_HAS_NMI_WATCHDOG /* See include/linux/nmi.h */
25#endif
26
27#ifdef CONFIG_4KSTACKS
28 extern void irq_ctx_init(int cpu);
29 extern void irq_ctx_exit(int cpu);
30# define __ARCH_HAS_DO_SOFTIRQ
31#else
32# define irq_ctx_init(cpu) do { } while (0)
33# define irq_ctx_exit(cpu) do { } while (0)
34#endif
35
36#ifdef CONFIG_IRQBALANCE
37extern int irqbalance_disable(char *str);
38#endif
39
40#ifdef CONFIG_HOTPLUG_CPU
41extern void fixup_irqs(cpumask_t map);
42#endif
43
44unsigned int do_IRQ(struct pt_regs *regs);
45void init_IRQ(void);
46void __init native_init_IRQ(void);
47
48#endif /* _ASM_IRQ_H */
diff --git a/include/asm-i386/irq_regs.h b/include/asm-i386/irq_regs.h
deleted file mode 100644
index 3368b20c0b48..000000000000
--- a/include/asm-i386/irq_regs.h
+++ /dev/null
@@ -1,29 +0,0 @@
1/*
2 * Per-cpu current frame pointer - the location of the last exception frame on
3 * the stack, stored in the per-cpu area.
4 *
5 * Jeremy Fitzhardinge <jeremy@goop.org>
6 */
7#ifndef _ASM_I386_IRQ_REGS_H
8#define _ASM_I386_IRQ_REGS_H
9
10#include <asm/percpu.h>
11
12DECLARE_PER_CPU(struct pt_regs *, irq_regs);
13
14static inline struct pt_regs *get_irq_regs(void)
15{
16 return x86_read_percpu(irq_regs);
17}
18
19static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs)
20{
21 struct pt_regs *old_regs;
22
23 old_regs = get_irq_regs();
24 x86_write_percpu(irq_regs, new_regs);
25
26 return old_regs;
27}
28
29#endif /* _ASM_I386_IRQ_REGS_H */
diff --git a/include/asm-i386/irqflags.h b/include/asm-i386/irqflags.h
deleted file mode 100644
index eff8585cb741..000000000000
--- a/include/asm-i386/irqflags.h
+++ /dev/null
@@ -1,163 +0,0 @@
1/*
2 * include/asm-i386/irqflags.h
3 *
4 * IRQ flags handling
5 *
6 * This file gets included from lowlevel asm headers too, to provide
7 * wrapped versions of the local_irq_*() APIs, based on the
8 * raw_local_irq_*() functions from the lowlevel headers.
9 */
10#ifndef _ASM_IRQFLAGS_H
11#define _ASM_IRQFLAGS_H
12#include <asm/processor-flags.h>
13
14#ifndef __ASSEMBLY__
15static inline unsigned long native_save_fl(void)
16{
17 unsigned long f;
18 asm volatile("pushfl ; popl %0":"=g" (f): /* no input */);
19 return f;
20}
21
22static inline void native_restore_fl(unsigned long f)
23{
24 asm volatile("pushl %0 ; popfl": /* no output */
25 :"g" (f)
26 :"memory", "cc");
27}
28
29static inline void native_irq_disable(void)
30{
31 asm volatile("cli": : :"memory");
32}
33
34static inline void native_irq_enable(void)
35{
36 asm volatile("sti": : :"memory");
37}
38
39static inline void native_safe_halt(void)
40{
41 asm volatile("sti; hlt": : :"memory");
42}
43
44static inline void native_halt(void)
45{
46 asm volatile("hlt": : :"memory");
47}
48#endif /* __ASSEMBLY__ */
49
50#ifdef CONFIG_PARAVIRT
51#include <asm/paravirt.h>
52#else
53#ifndef __ASSEMBLY__
54
55static inline unsigned long __raw_local_save_flags(void)
56{
57 return native_save_fl();
58}
59
60static inline void raw_local_irq_restore(unsigned long flags)
61{
62 native_restore_fl(flags);
63}
64
65static inline void raw_local_irq_disable(void)
66{
67 native_irq_disable();
68}
69
70static inline void raw_local_irq_enable(void)
71{
72 native_irq_enable();
73}
74
75/*
76 * Used in the idle loop; sti takes one instruction cycle
77 * to complete:
78 */
79static inline void raw_safe_halt(void)
80{
81 native_safe_halt();
82}
83
84/*
85 * Used when interrupts are already enabled or to
86 * shutdown the processor:
87 */
88static inline void halt(void)
89{
90 native_halt();
91}
92
93/*
94 * For spinlocks, etc:
95 */
96static inline unsigned long __raw_local_irq_save(void)
97{
98 unsigned long flags = __raw_local_save_flags();
99
100 raw_local_irq_disable();
101
102 return flags;
103}
104
105#else
106#define DISABLE_INTERRUPTS(clobbers) cli
107#define ENABLE_INTERRUPTS(clobbers) sti
108#define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
109#define INTERRUPT_RETURN iret
110#define GET_CR0_INTO_EAX movl %cr0, %eax
111#endif /* __ASSEMBLY__ */
112#endif /* CONFIG_PARAVIRT */
113
114#ifndef __ASSEMBLY__
115#define raw_local_save_flags(flags) \
116 do { (flags) = __raw_local_save_flags(); } while (0)
117
118#define raw_local_irq_save(flags) \
119 do { (flags) = __raw_local_irq_save(); } while (0)
120
121static inline int raw_irqs_disabled_flags(unsigned long flags)
122{
123 return !(flags & X86_EFLAGS_IF);
124}
125
126static inline int raw_irqs_disabled(void)
127{
128 unsigned long flags = __raw_local_save_flags();
129
130 return raw_irqs_disabled_flags(flags);
131}
132#endif /* __ASSEMBLY__ */
133
134/*
135 * Do the CPU's IRQ-state tracing from assembly code. We call a
136 * C function, so save all the C-clobbered registers:
137 */
138#ifdef CONFIG_TRACE_IRQFLAGS
139
140# define TRACE_IRQS_ON \
141 pushl %eax; \
142 pushl %ecx; \
143 pushl %edx; \
144 call trace_hardirqs_on; \
145 popl %edx; \
146 popl %ecx; \
147 popl %eax;
148
149# define TRACE_IRQS_OFF \
150 pushl %eax; \
151 pushl %ecx; \
152 pushl %edx; \
153 call trace_hardirqs_off; \
154 popl %edx; \
155 popl %ecx; \
156 popl %eax;
157
158#else
159# define TRACE_IRQS_ON
160# define TRACE_IRQS_OFF
161#endif
162
163#endif
diff --git a/include/asm-i386/ist.h b/include/asm-i386/ist.h
deleted file mode 100644
index ef2003ebc6f9..000000000000
--- a/include/asm-i386/ist.h
+++ /dev/null
@@ -1,34 +0,0 @@
1#ifndef _ASM_IST_H
2#define _ASM_IST_H
3
4/*
5 * Include file for the interface to IST BIOS
6 * Copyright 2002 Andy Grover <andrew.grover@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2, or (at your option) any
11 * later version.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 */
18
19
20#ifdef __KERNEL__
21
22#include <linux/types.h>
23
24struct ist_info {
25 u32 signature;
26 u32 command;
27 u32 event;
28 u32 perf_level;
29};
30
31extern struct ist_info ist_info;
32
33#endif /* __KERNEL__ */
34#endif /* _ASM_IST_H */
diff --git a/include/asm-i386/k8.h b/include/asm-i386/k8.h
deleted file mode 100644
index dfd88a6e6040..000000000000
--- a/include/asm-i386/k8.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-x86_64/k8.h>
diff --git a/include/asm-i386/kdebug.h b/include/asm-i386/kdebug.h
deleted file mode 100644
index a185b5f73e7f..000000000000
--- a/include/asm-i386/kdebug.h
+++ /dev/null
@@ -1,33 +0,0 @@
1#ifndef _I386_KDEBUG_H
2#define _I386_KDEBUG_H 1
3
4/*
5 * Aug-05 2004 Ported by Prasanna S Panchamukhi <prasanna@in.ibm.com>
6 * from x86_64 architecture.
7 */
8#include <linux/notifier.h>
9
10struct pt_regs;
11
12extern int register_page_fault_notifier(struct notifier_block *);
13extern int unregister_page_fault_notifier(struct notifier_block *);
14
15
16/* Grossly misnamed. */
17enum die_val {
18 DIE_OOPS = 1,
19 DIE_INT3,
20 DIE_DEBUG,
21 DIE_PANIC,
22 DIE_NMI,
23 DIE_DIE,
24 DIE_NMIWATCHDOG,
25 DIE_KERNELDEBUG,
26 DIE_TRAP,
27 DIE_GPF,
28 DIE_CALL,
29 DIE_NMI_IPI,
30 DIE_PAGE_FAULT,
31};
32
33#endif
diff --git a/include/asm-i386/kexec.h b/include/asm-i386/kexec.h
deleted file mode 100644
index 4b9dc9e6b701..000000000000
--- a/include/asm-i386/kexec.h
+++ /dev/null
@@ -1,99 +0,0 @@
1#ifndef _I386_KEXEC_H
2#define _I386_KEXEC_H
3
4#define PA_CONTROL_PAGE 0
5#define VA_CONTROL_PAGE 1
6#define PA_PGD 2
7#define VA_PGD 3
8#define PA_PTE_0 4
9#define VA_PTE_0 5
10#define PA_PTE_1 6
11#define VA_PTE_1 7
12#ifdef CONFIG_X86_PAE
13#define PA_PMD_0 8
14#define VA_PMD_0 9
15#define PA_PMD_1 10
16#define VA_PMD_1 11
17#define PAGES_NR 12
18#else
19#define PAGES_NR 8
20#endif
21
22#ifndef __ASSEMBLY__
23
24#include <asm/ptrace.h>
25#include <asm/string.h>
26
27/*
28 * KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return.
29 * I.e. Maximum page that is mapped directly into kernel memory,
30 * and kmap is not required.
31 */
32
33/* Maximum physical address we can use pages from */
34#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
35/* Maximum address we can reach in physical address mode */
36#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL)
37/* Maximum address we can use for the control code buffer */
38#define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE
39
40#define KEXEC_CONTROL_CODE_SIZE 4096
41
42/* The native architecture */
43#define KEXEC_ARCH KEXEC_ARCH_386
44
45/* We can also handle crash dumps from 64 bit kernel. */
46#define vmcore_elf_check_arch_cross(x) ((x)->e_machine == EM_X86_64)
47
48/* CPU does not save ss and esp on stack if execution is already
49 * running in kernel mode at the time of NMI occurrence. This code
50 * fixes it.
51 */
52static inline void crash_fixup_ss_esp(struct pt_regs *newregs,
53 struct pt_regs *oldregs)
54{
55 memcpy(newregs, oldregs, sizeof(*newregs));
56 newregs->esp = (unsigned long)&(oldregs->esp);
57 __asm__ __volatile__(
58 "xorl %%eax, %%eax\n\t"
59 "movw %%ss, %%ax\n\t"
60 :"=a"(newregs->xss));
61}
62
63/*
64 * This function is responsible for capturing register states if coming
65 * via panic otherwise just fix up the ss and esp if coming via kernel
66 * mode exception.
67 */
68static inline void crash_setup_regs(struct pt_regs *newregs,
69 struct pt_regs *oldregs)
70{
71 if (oldregs)
72 crash_fixup_ss_esp(newregs, oldregs);
73 else {
74 __asm__ __volatile__("movl %%ebx,%0" : "=m"(newregs->ebx));
75 __asm__ __volatile__("movl %%ecx,%0" : "=m"(newregs->ecx));
76 __asm__ __volatile__("movl %%edx,%0" : "=m"(newregs->edx));
77 __asm__ __volatile__("movl %%esi,%0" : "=m"(newregs->esi));
78 __asm__ __volatile__("movl %%edi,%0" : "=m"(newregs->edi));
79 __asm__ __volatile__("movl %%ebp,%0" : "=m"(newregs->ebp));
80 __asm__ __volatile__("movl %%eax,%0" : "=m"(newregs->eax));
81 __asm__ __volatile__("movl %%esp,%0" : "=m"(newregs->esp));
82 __asm__ __volatile__("movw %%ss, %%ax;" :"=a"(newregs->xss));
83 __asm__ __volatile__("movw %%cs, %%ax;" :"=a"(newregs->xcs));
84 __asm__ __volatile__("movw %%ds, %%ax;" :"=a"(newregs->xds));
85 __asm__ __volatile__("movw %%es, %%ax;" :"=a"(newregs->xes));
86 __asm__ __volatile__("pushfl; popl %0" :"=m"(newregs->eflags));
87
88 newregs->eip = (unsigned long)current_text_addr();
89 }
90}
91asmlinkage NORET_TYPE void
92relocate_kernel(unsigned long indirection_page,
93 unsigned long control_page,
94 unsigned long start_address,
95 unsigned int has_pae) ATTRIB_NORET;
96
97#endif /* __ASSEMBLY__ */
98
99#endif /* _I386_KEXEC_H */
diff --git a/include/asm-i386/kmap_types.h b/include/asm-i386/kmap_types.h
deleted file mode 100644
index 806aae3c5338..000000000000
--- a/include/asm-i386/kmap_types.h
+++ /dev/null
@@ -1,30 +0,0 @@
1#ifndef _ASM_KMAP_TYPES_H
2#define _ASM_KMAP_TYPES_H
3
4
5#ifdef CONFIG_DEBUG_HIGHMEM
6# define D(n) __KM_FENCE_##n ,
7#else
8# define D(n)
9#endif
10
11enum km_type {
12D(0) KM_BOUNCE_READ,
13D(1) KM_SKB_SUNRPC_DATA,
14D(2) KM_SKB_DATA_SOFTIRQ,
15D(3) KM_USER0,
16D(4) KM_USER1,
17D(5) KM_BIO_SRC_IRQ,
18D(6) KM_BIO_DST_IRQ,
19D(7) KM_PTE0,
20D(8) KM_PTE1,
21D(9) KM_IRQ0,
22D(10) KM_IRQ1,
23D(11) KM_SOFTIRQ0,
24D(12) KM_SOFTIRQ1,
25D(13) KM_TYPE_NR
26};
27
28#undef D
29
30#endif
diff --git a/include/asm-i386/kprobes.h b/include/asm-i386/kprobes.h
deleted file mode 100644
index 06f7303c30ca..000000000000
--- a/include/asm-i386/kprobes.h
+++ /dev/null
@@ -1,92 +0,0 @@
1#ifndef _ASM_KPROBES_H
2#define _ASM_KPROBES_H
3/*
4 * Kernel Probes (KProbes)
5 * include/asm-i386/kprobes.h
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 *
21 * Copyright (C) IBM Corporation, 2002, 2004
22 *
23 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
24 * Probes initial implementation ( includes suggestions from
25 * Rusty Russell).
26 */
27#include <linux/types.h>
28#include <linux/ptrace.h>
29
30#define __ARCH_WANT_KPROBES_INSN_SLOT
31
32struct kprobe;
33struct pt_regs;
34
35typedef u8 kprobe_opcode_t;
36#define BREAKPOINT_INSTRUCTION 0xcc
37#define RELATIVEJUMP_INSTRUCTION 0xe9
38#define MAX_INSN_SIZE 16
39#define MAX_STACK_SIZE 64
40#define MIN_STACK_SIZE(ADDR) (((MAX_STACK_SIZE) < \
41 (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR))) \
42 ? (MAX_STACK_SIZE) \
43 : (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR)))
44
45#define ARCH_SUPPORTS_KRETPROBES
46#define ARCH_INACTIVE_KPROBE_COUNT 0
47#define flush_insn_slot(p) do { } while (0)
48
49void arch_remove_kprobe(struct kprobe *p);
50void kretprobe_trampoline(void);
51
52/* Architecture specific copy of original instruction*/
53struct arch_specific_insn {
54 /* copy of the original instruction */
55 kprobe_opcode_t *insn;
56 /*
57 * If this flag is not 0, this kprobe can be boost when its
58 * post_handler and break_handler is not set.
59 */
60 int boostable;
61};
62
63struct prev_kprobe {
64 struct kprobe *kp;
65 unsigned long status;
66 unsigned long old_eflags;
67 unsigned long saved_eflags;
68};
69
70/* per-cpu kprobe control block */
71struct kprobe_ctlblk {
72 unsigned long kprobe_status;
73 unsigned long kprobe_old_eflags;
74 unsigned long kprobe_saved_eflags;
75 long *jprobe_saved_esp;
76 struct pt_regs jprobe_saved_regs;
77 kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE];
78 struct prev_kprobe prev_kprobe;
79};
80
81/* trap3/1 are intr gates for kprobes. So, restore the status of IF,
82 * if necessary, before executing the original int3/1 (trap) handler.
83 */
84static inline void restore_interrupts(struct pt_regs *regs)
85{
86 if (regs->eflags & IF_MASK)
87 local_irq_enable();
88}
89
90extern int kprobe_exceptions_notify(struct notifier_block *self,
91 unsigned long val, void *data);
92#endif /* _ASM_KPROBES_H */
diff --git a/include/asm-i386/ldt.h b/include/asm-i386/ldt.h
deleted file mode 100644
index e9d3de1dee6c..000000000000
--- a/include/asm-i386/ldt.h
+++ /dev/null
@@ -1,32 +0,0 @@
1/*
2 * ldt.h
3 *
4 * Definitions of structures used with the modify_ldt system call.
5 */
6#ifndef _LINUX_LDT_H
7#define _LINUX_LDT_H
8
9/* Maximum number of LDT entries supported. */
10#define LDT_ENTRIES 8192
11/* The size of each LDT entry. */
12#define LDT_ENTRY_SIZE 8
13
14#ifndef __ASSEMBLY__
15struct user_desc {
16 unsigned int entry_number;
17 unsigned long base_addr;
18 unsigned int limit;
19 unsigned int seg_32bit:1;
20 unsigned int contents:2;
21 unsigned int read_exec_only:1;
22 unsigned int limit_in_pages:1;
23 unsigned int seg_not_present:1;
24 unsigned int useable:1;
25};
26
27#define MODIFY_LDT_CONTENTS_DATA 0
28#define MODIFY_LDT_CONTENTS_STACK 1
29#define MODIFY_LDT_CONTENTS_CODE 2
30
31#endif /* !__ASSEMBLY__ */
32#endif
diff --git a/include/asm-i386/linkage.h b/include/asm-i386/linkage.h
deleted file mode 100644
index f4a6ebac0247..000000000000
--- a/include/asm-i386/linkage.h
+++ /dev/null
@@ -1,15 +0,0 @@
1#ifndef __ASM_LINKAGE_H
2#define __ASM_LINKAGE_H
3
4#define asmlinkage CPP_ASMLINKAGE __attribute__((regparm(0)))
5#define FASTCALL(x) x __attribute__((regparm(3)))
6#define fastcall __attribute__((regparm(3)))
7
8#define prevent_tail_call(ret) __asm__ ("" : "=r" (ret) : "0" (ret))
9
10#ifdef CONFIG_X86_ALIGNMENT_16
11#define __ALIGN .align 16,0x90
12#define __ALIGN_STR ".align 16,0x90"
13#endif
14
15#endif
diff --git a/include/asm-i386/local.h b/include/asm-i386/local.h
deleted file mode 100644
index 6e85975b9ed2..000000000000
--- a/include/asm-i386/local.h
+++ /dev/null
@@ -1,233 +0,0 @@
1#ifndef _ARCH_I386_LOCAL_H
2#define _ARCH_I386_LOCAL_H
3
4#include <linux/percpu.h>
5#include <asm/system.h>
6#include <asm/atomic.h>
7
8typedef struct
9{
10 atomic_long_t a;
11} local_t;
12
13#define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
14
15#define local_read(l) atomic_long_read(&(l)->a)
16#define local_set(l,i) atomic_long_set(&(l)->a, (i))
17
18static __inline__ void local_inc(local_t *l)
19{
20 __asm__ __volatile__(
21 "incl %0"
22 :"+m" (l->a.counter));
23}
24
25static __inline__ void local_dec(local_t *l)
26{
27 __asm__ __volatile__(
28 "decl %0"
29 :"+m" (l->a.counter));
30}
31
32static __inline__ void local_add(long i, local_t *l)
33{
34 __asm__ __volatile__(
35 "addl %1,%0"
36 :"+m" (l->a.counter)
37 :"ir" (i));
38}
39
40static __inline__ void local_sub(long i, local_t *l)
41{
42 __asm__ __volatile__(
43 "subl %1,%0"
44 :"+m" (l->a.counter)
45 :"ir" (i));
46}
47
48/**
49 * local_sub_and_test - subtract value from variable and test result
50 * @i: integer value to subtract
51 * @l: pointer of type local_t
52 *
53 * Atomically subtracts @i from @l and returns
54 * true if the result is zero, or false for all
55 * other cases.
56 */
57static __inline__ int local_sub_and_test(long i, local_t *l)
58{
59 unsigned char c;
60
61 __asm__ __volatile__(
62 "subl %2,%0; sete %1"
63 :"+m" (l->a.counter), "=qm" (c)
64 :"ir" (i) : "memory");
65 return c;
66}
67
68/**
69 * local_dec_and_test - decrement and test
70 * @l: pointer of type local_t
71 *
72 * Atomically decrements @l by 1 and
73 * returns true if the result is 0, or false for all other
74 * cases.
75 */
76static __inline__ int local_dec_and_test(local_t *l)
77{
78 unsigned char c;
79
80 __asm__ __volatile__(
81 "decl %0; sete %1"
82 :"+m" (l->a.counter), "=qm" (c)
83 : : "memory");
84 return c != 0;
85}
86
87/**
88 * local_inc_and_test - increment and test
89 * @l: pointer of type local_t
90 *
91 * Atomically increments @l by 1
92 * and returns true if the result is zero, or false for all
93 * other cases.
94 */
95static __inline__ int local_inc_and_test(local_t *l)
96{
97 unsigned char c;
98
99 __asm__ __volatile__(
100 "incl %0; sete %1"
101 :"+m" (l->a.counter), "=qm" (c)
102 : : "memory");
103 return c != 0;
104}
105
106/**
107 * local_add_negative - add and test if negative
108 * @l: pointer of type local_t
109 * @i: integer value to add
110 *
111 * Atomically adds @i to @l and returns true
112 * if the result is negative, or false when
113 * result is greater than or equal to zero.
114 */
115static __inline__ int local_add_negative(long i, local_t *l)
116{
117 unsigned char c;
118
119 __asm__ __volatile__(
120 "addl %2,%0; sets %1"
121 :"+m" (l->a.counter), "=qm" (c)
122 :"ir" (i) : "memory");
123 return c;
124}
125
126/**
127 * local_add_return - add and return
128 * @l: pointer of type local_t
129 * @i: integer value to add
130 *
131 * Atomically adds @i to @l and returns @i + @l
132 */
133static __inline__ long local_add_return(long i, local_t *l)
134{
135 long __i;
136#ifdef CONFIG_M386
137 unsigned long flags;
138 if(unlikely(boot_cpu_data.x86 <= 3))
139 goto no_xadd;
140#endif
141 /* Modern 486+ processor */
142 __i = i;
143 __asm__ __volatile__(
144 "xaddl %0, %1;"
145 :"+r" (i), "+m" (l->a.counter)
146 : : "memory");
147 return i + __i;
148
149#ifdef CONFIG_M386
150no_xadd: /* Legacy 386 processor */
151 local_irq_save(flags);
152 __i = local_read(l);
153 local_set(l, i + __i);
154 local_irq_restore(flags);
155 return i + __i;
156#endif
157}
158
159static __inline__ long local_sub_return(long i, local_t *l)
160{
161 return local_add_return(-i,l);
162}
163
164#define local_inc_return(l) (local_add_return(1,l))
165#define local_dec_return(l) (local_sub_return(1,l))
166
167#define local_cmpxchg(l, o, n) \
168 (cmpxchg_local(&((l)->a.counter), (o), (n)))
169/* Always has a lock prefix */
170#define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
171
172/**
173 * local_add_unless - add unless the number is a given value
174 * @l: pointer of type local_t
175 * @a: the amount to add to l...
176 * @u: ...unless l is equal to u.
177 *
178 * Atomically adds @a to @l, so long as it was not @u.
179 * Returns non-zero if @l was not @u, and zero otherwise.
180 */
181#define local_add_unless(l, a, u) \
182({ \
183 long c, old; \
184 c = local_read(l); \
185 for (;;) { \
186 if (unlikely(c == (u))) \
187 break; \
188 old = local_cmpxchg((l), c, c + (a)); \
189 if (likely(old == c)) \
190 break; \
191 c = old; \
192 } \
193 c != (u); \
194})
195#define local_inc_not_zero(l) local_add_unless((l), 1, 0)
196
197/* On x86, these are no better than the atomic variants. */
198#define __local_inc(l) local_inc(l)
199#define __local_dec(l) local_dec(l)
200#define __local_add(i,l) local_add((i),(l))
201#define __local_sub(i,l) local_sub((i),(l))
202
203/* Use these for per-cpu local_t variables: on some archs they are
204 * much more efficient than these naive implementations. Note they take
205 * a variable, not an address.
206 */
207
208/* Need to disable preemption for the cpu local counters otherwise we could
209 still access a variable of a previous CPU in a non atomic way. */
210#define cpu_local_wrap_v(l) \
211 ({ local_t res__; \
212 preempt_disable(); \
213 res__ = (l); \
214 preempt_enable(); \
215 res__; })
216#define cpu_local_wrap(l) \
217 ({ preempt_disable(); \
218 l; \
219 preempt_enable(); }) \
220
221#define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l)))
222#define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i)))
223#define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l)))
224#define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l)))
225#define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l)))
226#define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l)))
227
228#define __cpu_local_inc(l) cpu_local_inc(l)
229#define __cpu_local_dec(l) cpu_local_dec(l)
230#define __cpu_local_add(i, l) cpu_local_add((i), (l))
231#define __cpu_local_sub(i, l) cpu_local_sub((i), (l))
232
233#endif /* _ARCH_I386_LOCAL_H */
diff --git a/include/asm-i386/mach-bigsmp/mach_apic.h b/include/asm-i386/mach-bigsmp/mach_apic.h
deleted file mode 100644
index ebd319f838ab..000000000000
--- a/include/asm-i386/mach-bigsmp/mach_apic.h
+++ /dev/null
@@ -1,158 +0,0 @@
1#ifndef __ASM_MACH_APIC_H
2#define __ASM_MACH_APIC_H
3
4
5extern u8 bios_cpu_apicid[];
6
7#define xapic_phys_to_log_apicid(cpu) (bios_cpu_apicid[cpu])
8#define esr_disable (1)
9
10static inline int apic_id_registered(void)
11{
12 return (1);
13}
14
15/* Round robin the irqs amoung the online cpus */
16static inline cpumask_t target_cpus(void)
17{
18 static unsigned long cpu = NR_CPUS;
19 do {
20 if (cpu >= NR_CPUS)
21 cpu = first_cpu(cpu_online_map);
22 else
23 cpu = next_cpu(cpu, cpu_online_map);
24 } while (cpu >= NR_CPUS);
25 return cpumask_of_cpu(cpu);
26}
27
28#undef APIC_DEST_LOGICAL
29#define APIC_DEST_LOGICAL 0
30#define TARGET_CPUS (target_cpus())
31#define APIC_DFR_VALUE (APIC_DFR_FLAT)
32#define INT_DELIVERY_MODE (dest_Fixed)
33#define INT_DEST_MODE (0) /* phys delivery to target proc */
34#define NO_BALANCE_IRQ (0)
35#define WAKE_SECONDARY_VIA_INIT
36
37
38static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
39{
40 return (0);
41}
42
43static inline unsigned long check_apicid_present(int bit)
44{
45 return (1);
46}
47
48static inline unsigned long calculate_ldr(int cpu)
49{
50 unsigned long val, id;
51 val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
52 id = xapic_phys_to_log_apicid(cpu);
53 val |= SET_APIC_LOGICAL_ID(id);
54 return val;
55}
56
57/*
58 * Set up the logical destination ID.
59 *
60 * Intel recommends to set DFR, LDR and TPR before enabling
61 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
62 * document number 292116). So here it goes...
63 */
64static inline void init_apic_ldr(void)
65{
66 unsigned long val;
67 int cpu = smp_processor_id();
68
69 apic_write_around(APIC_DFR, APIC_DFR_VALUE);
70 val = calculate_ldr(cpu);
71 apic_write_around(APIC_LDR, val);
72}
73
74static inline void setup_apic_routing(void)
75{
76 printk("Enabling APIC mode: %s. Using %d I/O APICs\n",
77 "Physflat", nr_ioapics);
78}
79
80static inline int multi_timer_check(int apic, int irq)
81{
82 return (0);
83}
84
85static inline int apicid_to_node(int logical_apicid)
86{
87 return (0);
88}
89
90static inline int cpu_present_to_apicid(int mps_cpu)
91{
92 if (mps_cpu < NR_CPUS)
93 return (int) bios_cpu_apicid[mps_cpu];
94
95 return BAD_APICID;
96}
97
98static inline physid_mask_t apicid_to_cpu_present(int phys_apicid)
99{
100 return physid_mask_of_physid(phys_apicid);
101}
102
103extern u8 cpu_2_logical_apicid[];
104/* Mapping from cpu number to logical apicid */
105static inline int cpu_to_logical_apicid(int cpu)
106{
107 if (cpu >= NR_CPUS)
108 return BAD_APICID;
109 return cpu_physical_id(cpu);
110}
111
112static inline int mpc_apic_id(struct mpc_config_processor *m,
113 struct mpc_config_translation *translation_record)
114{
115 printk("Processor #%d %ld:%ld APIC version %d\n",
116 m->mpc_apicid,
117 (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8,
118 (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4,
119 m->mpc_apicver);
120 return m->mpc_apicid;
121}
122
123static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
124{
125 /* For clustered we don't have a good way to do this yet - hack */
126 return physids_promote(0xFFL);
127}
128
129static inline void setup_portio_remap(void)
130{
131}
132
133static inline void enable_apic_mode(void)
134{
135}
136
137static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
138{
139 return (1);
140}
141
142/* As we are using single CPU as destination, pick only one CPU here */
143static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
144{
145 int cpu;
146 int apicid;
147
148 cpu = first_cpu(cpumask);
149 apicid = cpu_to_logical_apicid(cpu);
150 return apicid;
151}
152
153static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
154{
155 return cpuid_apic >> index_msb;
156}
157
158#endif /* __ASM_MACH_APIC_H */
diff --git a/include/asm-i386/mach-bigsmp/mach_apicdef.h b/include/asm-i386/mach-bigsmp/mach_apicdef.h
deleted file mode 100644
index a58ab5a75c8c..000000000000
--- a/include/asm-i386/mach-bigsmp/mach_apicdef.h
+++ /dev/null
@@ -1,13 +0,0 @@
1#ifndef __ASM_MACH_APICDEF_H
2#define __ASM_MACH_APICDEF_H
3
4#define APIC_ID_MASK (0xFF<<24)
5
6static inline unsigned get_apic_id(unsigned long x)
7{
8 return (((x)>>24)&0xFF);
9}
10
11#define GET_APIC_ID(x) get_apic_id(x)
12
13#endif
diff --git a/include/asm-i386/mach-bigsmp/mach_ipi.h b/include/asm-i386/mach-bigsmp/mach_ipi.h
deleted file mode 100644
index 9404c535b7ec..000000000000
--- a/include/asm-i386/mach-bigsmp/mach_ipi.h
+++ /dev/null
@@ -1,25 +0,0 @@
1#ifndef __ASM_MACH_IPI_H
2#define __ASM_MACH_IPI_H
3
4void send_IPI_mask_sequence(cpumask_t mask, int vector);
5
6static inline void send_IPI_mask(cpumask_t mask, int vector)
7{
8 send_IPI_mask_sequence(mask, vector);
9}
10
11static inline void send_IPI_allbutself(int vector)
12{
13 cpumask_t mask = cpu_online_map;
14 cpu_clear(smp_processor_id(), mask);
15
16 if (!cpus_empty(mask))
17 send_IPI_mask(mask, vector);
18}
19
20static inline void send_IPI_all(int vector)
21{
22 send_IPI_mask(cpu_online_map, vector);
23}
24
25#endif /* __ASM_MACH_IPI_H */
diff --git a/include/asm-i386/mach-bigsmp/mach_mpspec.h b/include/asm-i386/mach-bigsmp/mach_mpspec.h
deleted file mode 100644
index 6b5dadcf1d0e..000000000000
--- a/include/asm-i386/mach-bigsmp/mach_mpspec.h
+++ /dev/null
@@ -1,8 +0,0 @@
1#ifndef __ASM_MACH_MPSPEC_H
2#define __ASM_MACH_MPSPEC_H
3
4#define MAX_IRQ_SOURCES 256
5
6#define MAX_MP_BUSSES 32
7
8#endif /* __ASM_MACH_MPSPEC_H */
diff --git a/include/asm-i386/mach-default/apm.h b/include/asm-i386/mach-default/apm.h
deleted file mode 100644
index 1f730b8bd1fd..000000000000
--- a/include/asm-i386/mach-default/apm.h
+++ /dev/null
@@ -1,75 +0,0 @@
1/*
2 * include/asm-i386/mach-default/apm.h
3 *
4 * Machine specific APM BIOS functions for generic.
5 * Split out from apm.c by Osamu Tomita <tomita@cinet.co.jp>
6 */
7
8#ifndef _ASM_APM_H
9#define _ASM_APM_H
10
11#ifdef APM_ZERO_SEGS
12# define APM_DO_ZERO_SEGS \
13 "pushl %%ds\n\t" \
14 "pushl %%es\n\t" \
15 "xorl %%edx, %%edx\n\t" \
16 "mov %%dx, %%ds\n\t" \
17 "mov %%dx, %%es\n\t" \
18 "mov %%dx, %%fs\n\t" \
19 "mov %%dx, %%gs\n\t"
20# define APM_DO_POP_SEGS \
21 "popl %%es\n\t" \
22 "popl %%ds\n\t"
23#else
24# define APM_DO_ZERO_SEGS
25# define APM_DO_POP_SEGS
26#endif
27
28static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
29 u32 *eax, u32 *ebx, u32 *ecx,
30 u32 *edx, u32 *esi)
31{
32 /*
33 * N.B. We do NOT need a cld after the BIOS call
34 * because we always save and restore the flags.
35 */
36 __asm__ __volatile__(APM_DO_ZERO_SEGS
37 "pushl %%edi\n\t"
38 "pushl %%ebp\n\t"
39 "lcall *%%cs:apm_bios_entry\n\t"
40 "setc %%al\n\t"
41 "popl %%ebp\n\t"
42 "popl %%edi\n\t"
43 APM_DO_POP_SEGS
44 : "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx),
45 "=S" (*esi)
46 : "a" (func), "b" (ebx_in), "c" (ecx_in)
47 : "memory", "cc");
48}
49
50static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
51 u32 ecx_in, u32 *eax)
52{
53 int cx, dx, si;
54 u8 error;
55
56 /*
57 * N.B. We do NOT need a cld after the BIOS call
58 * because we always save and restore the flags.
59 */
60 __asm__ __volatile__(APM_DO_ZERO_SEGS
61 "pushl %%edi\n\t"
62 "pushl %%ebp\n\t"
63 "lcall *%%cs:apm_bios_entry\n\t"
64 "setc %%bl\n\t"
65 "popl %%ebp\n\t"
66 "popl %%edi\n\t"
67 APM_DO_POP_SEGS
68 : "=a" (*eax), "=b" (error), "=c" (cx), "=d" (dx),
69 "=S" (si)
70 : "a" (func), "b" (ebx_in), "c" (ecx_in)
71 : "memory", "cc");
72 return error;
73}
74
75#endif /* _ASM_APM_H */
diff --git a/include/asm-i386/mach-default/bios_ebda.h b/include/asm-i386/mach-default/bios_ebda.h
deleted file mode 100644
index 9cbd9a668af8..000000000000
--- a/include/asm-i386/mach-default/bios_ebda.h
+++ /dev/null
@@ -1,15 +0,0 @@
1#ifndef _MACH_BIOS_EBDA_H
2#define _MACH_BIOS_EBDA_H
3
4/*
5 * there is a real-mode segmented pointer pointing to the
6 * 4K EBDA area at 0x40E.
7 */
8static inline unsigned int get_bios_ebda(void)
9{
10 unsigned int address = *(unsigned short *)phys_to_virt(0x40E);
11 address <<= 4;
12 return address; /* 0 means none */
13}
14
15#endif /* _MACH_BIOS_EBDA_H */
diff --git a/include/asm-i386/mach-default/do_timer.h b/include/asm-i386/mach-default/do_timer.h
deleted file mode 100644
index 23ecda0b28a0..000000000000
--- a/include/asm-i386/mach-default/do_timer.h
+++ /dev/null
@@ -1,16 +0,0 @@
1/* defines for inline arch setup functions */
2#include <linux/clockchips.h>
3
4#include <asm/i8259.h>
5#include <asm/i8253.h>
6
7/**
8 * do_timer_interrupt_hook - hook into timer tick
9 *
10 * Call the pit clock event handler. see asm/i8253.h
11 **/
12
13static inline void do_timer_interrupt_hook(void)
14{
15 global_clock_event->event_handler(global_clock_event);
16}
diff --git a/include/asm-i386/mach-default/entry_arch.h b/include/asm-i386/mach-default/entry_arch.h
deleted file mode 100644
index bc861469bdba..000000000000
--- a/include/asm-i386/mach-default/entry_arch.h
+++ /dev/null
@@ -1,34 +0,0 @@
1/*
2 * This file is designed to contain the BUILD_INTERRUPT specifications for
3 * all of the extra named interrupt vectors used by the architecture.
4 * Usually this is the Inter Process Interrupts (IPIs)
5 */
6
7/*
8 * The following vectors are part of the Linux architecture, there
9 * is no hardware IRQ pin equivalent for them, they are triggered
10 * through the ICC by us (IPIs)
11 */
12#ifdef CONFIG_X86_SMP
13BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR)
14BUILD_INTERRUPT(invalidate_interrupt,INVALIDATE_TLB_VECTOR)
15BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR)
16#endif
17
18/*
19 * every pentium local APIC has two 'local interrupts', with a
20 * soft-definable vector attached to both interrupts, one of
21 * which is a timer interrupt, the other one is error counter
22 * overflow. Linux uses the local APIC timer interrupt to get
23 * a much simpler SMP time architecture:
24 */
25#ifdef CONFIG_X86_LOCAL_APIC
26BUILD_INTERRUPT(apic_timer_interrupt,LOCAL_TIMER_VECTOR)
27BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR)
28BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR)
29
30#ifdef CONFIG_X86_MCE_P4THERMAL
31BUILD_INTERRUPT(thermal_interrupt,THERMAL_APIC_VECTOR)
32#endif
33
34#endif
diff --git a/include/asm-i386/mach-default/io_ports.h b/include/asm-i386/mach-default/io_ports.h
deleted file mode 100644
index 48540ba97166..000000000000
--- a/include/asm-i386/mach-default/io_ports.h
+++ /dev/null
@@ -1,25 +0,0 @@
1/*
2 * arch/i386/mach-generic/io_ports.h
3 *
4 * Machine specific IO port address definition for generic.
5 * Written by Osamu Tomita <tomita@cinet.co.jp>
6 */
7#ifndef _MACH_IO_PORTS_H
8#define _MACH_IO_PORTS_H
9
10/* i8259A PIC registers */
11#define PIC_MASTER_CMD 0x20
12#define PIC_MASTER_IMR 0x21
13#define PIC_MASTER_ISR PIC_MASTER_CMD
14#define PIC_MASTER_POLL PIC_MASTER_ISR
15#define PIC_MASTER_OCW3 PIC_MASTER_ISR
16#define PIC_SLAVE_CMD 0xa0
17#define PIC_SLAVE_IMR 0xa1
18
19/* i8259A PIC related value */
20#define PIC_CASCADE_IR 2
21#define MASTER_ICW4_DEFAULT 0x01
22#define SLAVE_ICW4_DEFAULT 0x01
23#define PIC_ICW4_AEOI 2
24
25#endif /* !_MACH_IO_PORTS_H */
diff --git a/include/asm-i386/mach-default/irq_vectors.h b/include/asm-i386/mach-default/irq_vectors.h
deleted file mode 100644
index 881c63ca61ad..000000000000
--- a/include/asm-i386/mach-default/irq_vectors.h
+++ /dev/null
@@ -1,96 +0,0 @@
1/*
2 * This file should contain #defines for all of the interrupt vector
3 * numbers used by this architecture.
4 *
5 * In addition, there are some standard defines:
6 *
7 * FIRST_EXTERNAL_VECTOR:
8 * The first free place for external interrupts
9 *
10 * SYSCALL_VECTOR:
11 * The IRQ vector a syscall makes the user to kernel transition
12 * under.
13 *
14 * TIMER_IRQ:
15 * The IRQ number the timer interrupt comes in at.
16 *
17 * NR_IRQS:
18 * The total number of interrupt vectors (including all the
19 * architecture specific interrupts) needed.
20 *
21 */
22#ifndef _ASM_IRQ_VECTORS_H
23#define _ASM_IRQ_VECTORS_H
24
25/*
26 * IDT vectors usable for external interrupt sources start
27 * at 0x20:
28 */
29#define FIRST_EXTERNAL_VECTOR 0x20
30
31#define SYSCALL_VECTOR 0x80
32
33/*
34 * Vectors 0x20-0x2f are used for ISA interrupts.
35 */
36
37/*
38 * Special IRQ vectors used by the SMP architecture, 0xf0-0xff
39 *
40 * some of the following vectors are 'rare', they are merged
41 * into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
42 * TLB, reschedule and local APIC vectors are performance-critical.
43 *
44 * Vectors 0xf0-0xfa are free (reserved for future Linux use).
45 */
46#define SPURIOUS_APIC_VECTOR 0xff
47#define ERROR_APIC_VECTOR 0xfe
48#define INVALIDATE_TLB_VECTOR 0xfd
49#define RESCHEDULE_VECTOR 0xfc
50#define CALL_FUNCTION_VECTOR 0xfb
51
52#define THERMAL_APIC_VECTOR 0xf0
53/*
54 * Local APIC timer IRQ vector is on a different priority level,
55 * to work around the 'lost local interrupt if more than 2 IRQ
56 * sources per level' errata.
57 */
58#define LOCAL_TIMER_VECTOR 0xef
59
60/*
61 * First APIC vector available to drivers: (vectors 0x30-0xee)
62 * we start at 0x31 to spread out vectors evenly between priority
63 * levels. (0x80 is the syscall vector)
64 */
65#define FIRST_DEVICE_VECTOR 0x31
66#define FIRST_SYSTEM_VECTOR 0xef
67
68#define TIMER_IRQ 0
69
70/*
71 * 16 8259A IRQ's, 208 potential APIC interrupt sources.
72 * Right now the APIC is mostly only used for SMP.
73 * 256 vectors is an architectural limit. (we can have
74 * more than 256 devices theoretically, but they will
75 * have to use shared interrupts)
76 * Since vectors 0x00-0x1f are used/reserved for the CPU,
77 * the usable vector space is 0x20-0xff (224 vectors)
78 */
79
80/*
81 * The maximum number of vectors supported by i386 processors
82 * is limited to 256. For processors other than i386, NR_VECTORS
83 * should be changed accordingly.
84 */
85#define NR_VECTORS 256
86
87#include "irq_vectors_limits.h"
88
89#define FPU_IRQ 13
90
91#define FIRST_VM86_IRQ 3
92#define LAST_VM86_IRQ 15
93#define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15)
94
95
96#endif /* _ASM_IRQ_VECTORS_H */
diff --git a/include/asm-i386/mach-default/irq_vectors_limits.h b/include/asm-i386/mach-default/irq_vectors_limits.h
deleted file mode 100644
index a90c7a60109f..000000000000
--- a/include/asm-i386/mach-default/irq_vectors_limits.h
+++ /dev/null
@@ -1,16 +0,0 @@
1#ifndef _ASM_IRQ_VECTORS_LIMITS_H
2#define _ASM_IRQ_VECTORS_LIMITS_H
3
4#if defined(CONFIG_X86_IO_APIC) || defined(CONFIG_PARAVIRT)
5#define NR_IRQS 224
6# if (224 >= 32 * NR_CPUS)
7# define NR_IRQ_VECTORS NR_IRQS
8# else
9# define NR_IRQ_VECTORS (32 * NR_CPUS)
10# endif
11#else
12#define NR_IRQS 16
13#define NR_IRQ_VECTORS NR_IRQS
14#endif
15
16#endif /* _ASM_IRQ_VECTORS_LIMITS_H */
diff --git a/include/asm-i386/mach-default/mach_apic.h b/include/asm-i386/mach-default/mach_apic.h
deleted file mode 100644
index 6db1c3babe9a..000000000000
--- a/include/asm-i386/mach-default/mach_apic.h
+++ /dev/null
@@ -1,131 +0,0 @@
1#ifndef __ASM_MACH_APIC_H
2#define __ASM_MACH_APIC_H
3
4#include <mach_apicdef.h>
5#include <asm/smp.h>
6
7#define APIC_DFR_VALUE (APIC_DFR_FLAT)
8
9static inline cpumask_t target_cpus(void)
10{
11#ifdef CONFIG_SMP
12 return cpu_online_map;
13#else
14 return cpumask_of_cpu(0);
15#endif
16}
17#define TARGET_CPUS (target_cpus())
18
19#define NO_BALANCE_IRQ (0)
20#define esr_disable (0)
21
22#define INT_DELIVERY_MODE dest_LowestPrio
23#define INT_DEST_MODE 1 /* logical delivery broadcast to all procs */
24
25static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
26{
27 return physid_isset(apicid, bitmap);
28}
29
30static inline unsigned long check_apicid_present(int bit)
31{
32 return physid_isset(bit, phys_cpu_present_map);
33}
34
35/*
36 * Set up the logical destination ID.
37 *
38 * Intel recommends to set DFR, LDR and TPR before enabling
39 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
40 * document number 292116). So here it goes...
41 */
42static inline void init_apic_ldr(void)
43{
44 unsigned long val;
45
46 apic_write_around(APIC_DFR, APIC_DFR_VALUE);
47 val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
48 val |= SET_APIC_LOGICAL_ID(1UL << smp_processor_id());
49 apic_write_around(APIC_LDR, val);
50}
51
52static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
53{
54 return phys_map;
55}
56
57static inline void setup_apic_routing(void)
58{
59 printk("Enabling APIC mode: %s. Using %d I/O APICs\n",
60 "Flat", nr_ioapics);
61}
62
63static inline int multi_timer_check(int apic, int irq)
64{
65 return 0;
66}
67
68static inline int apicid_to_node(int logical_apicid)
69{
70 return 0;
71}
72
73/* Mapping from cpu number to logical apicid */
74static inline int cpu_to_logical_apicid(int cpu)
75{
76 return 1 << cpu;
77}
78
79static inline int cpu_present_to_apicid(int mps_cpu)
80{
81 if (mps_cpu < get_physical_broadcast())
82 return mps_cpu;
83 else
84 return BAD_APICID;
85}
86
87static inline physid_mask_t apicid_to_cpu_present(int phys_apicid)
88{
89 return physid_mask_of_physid(phys_apicid);
90}
91
92static inline int mpc_apic_id(struct mpc_config_processor *m,
93 struct mpc_config_translation *translation_record)
94{
95 printk("Processor #%d %ld:%ld APIC version %d\n",
96 m->mpc_apicid,
97 (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8,
98 (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4,
99 m->mpc_apicver);
100 return (m->mpc_apicid);
101}
102
103static inline void setup_portio_remap(void)
104{
105}
106
107static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
108{
109 return physid_isset(boot_cpu_physical_apicid, phys_cpu_present_map);
110}
111
112static inline int apic_id_registered(void)
113{
114 return physid_isset(GET_APIC_ID(apic_read(APIC_ID)), phys_cpu_present_map);
115}
116
117static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
118{
119 return cpus_addr(cpumask)[0];
120}
121
122static inline void enable_apic_mode(void)
123{
124}
125
126static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
127{
128 return cpuid_apic >> index_msb;
129}
130
131#endif /* __ASM_MACH_APIC_H */
diff --git a/include/asm-i386/mach-default/mach_apicdef.h b/include/asm-i386/mach-default/mach_apicdef.h
deleted file mode 100644
index 7bcb350c3ee8..000000000000
--- a/include/asm-i386/mach-default/mach_apicdef.h
+++ /dev/null
@@ -1,13 +0,0 @@
1#ifndef __ASM_MACH_APICDEF_H
2#define __ASM_MACH_APICDEF_H
3
4#define APIC_ID_MASK (0xF<<24)
5
6static inline unsigned get_apic_id(unsigned long x)
7{
8 return (((x)>>24)&0xF);
9}
10
11#define GET_APIC_ID(x) get_apic_id(x)
12
13#endif
diff --git a/include/asm-i386/mach-default/mach_ipi.h b/include/asm-i386/mach-default/mach_ipi.h
deleted file mode 100644
index 0dba244c86db..000000000000
--- a/include/asm-i386/mach-default/mach_ipi.h
+++ /dev/null
@@ -1,54 +0,0 @@
1#ifndef __ASM_MACH_IPI_H
2#define __ASM_MACH_IPI_H
3
4/* Avoid include hell */
5#define NMI_VECTOR 0x02
6
7void send_IPI_mask_bitmask(cpumask_t mask, int vector);
8void __send_IPI_shortcut(unsigned int shortcut, int vector);
9
10extern int no_broadcast;
11
12static inline void send_IPI_mask(cpumask_t mask, int vector)
13{
14 send_IPI_mask_bitmask(mask, vector);
15}
16
17static inline void __local_send_IPI_allbutself(int vector)
18{
19 if (no_broadcast || vector == NMI_VECTOR) {
20 cpumask_t mask = cpu_online_map;
21
22 cpu_clear(smp_processor_id(), mask);
23 send_IPI_mask(mask, vector);
24 } else
25 __send_IPI_shortcut(APIC_DEST_ALLBUT, vector);
26}
27
28static inline void __local_send_IPI_all(int vector)
29{
30 if (no_broadcast || vector == NMI_VECTOR)
31 send_IPI_mask(cpu_online_map, vector);
32 else
33 __send_IPI_shortcut(APIC_DEST_ALLINC, vector);
34}
35
36static inline void send_IPI_allbutself(int vector)
37{
38 /*
39 * if there are no other CPUs in the system then we get an APIC send
40 * error if we try to broadcast, thus avoid sending IPIs in this case.
41 */
42 if (!(num_online_cpus() > 1))
43 return;
44
45 __local_send_IPI_allbutself(vector);
46 return;
47}
48
49static inline void send_IPI_all(int vector)
50{
51 __local_send_IPI_all(vector);
52}
53
54#endif /* __ASM_MACH_IPI_H */
diff --git a/include/asm-i386/mach-default/mach_mpparse.h b/include/asm-i386/mach-default/mach_mpparse.h
deleted file mode 100644
index 1d3832482580..000000000000
--- a/include/asm-i386/mach-default/mach_mpparse.h
+++ /dev/null
@@ -1,28 +0,0 @@
1#ifndef __ASM_MACH_MPPARSE_H
2#define __ASM_MACH_MPPARSE_H
3
4static inline void mpc_oem_bus_info(struct mpc_config_bus *m, char *name,
5 struct mpc_config_translation *translation)
6{
7// Dprintk("Bus #%d is %s\n", m->mpc_busid, name);
8}
9
10static inline void mpc_oem_pci_bus(struct mpc_config_bus *m,
11 struct mpc_config_translation *translation)
12{
13}
14
15static inline int mps_oem_check(struct mp_config_table *mpc, char *oem,
16 char *productid)
17{
18 return 0;
19}
20
21/* Hook from generic ACPI tables.c */
22static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id)
23{
24 return 0;
25}
26
27
28#endif /* __ASM_MACH_MPPARSE_H */
diff --git a/include/asm-i386/mach-default/mach_mpspec.h b/include/asm-i386/mach-default/mach_mpspec.h
deleted file mode 100644
index 51c9a9775932..000000000000
--- a/include/asm-i386/mach-default/mach_mpspec.h
+++ /dev/null
@@ -1,12 +0,0 @@
1#ifndef __ASM_MACH_MPSPEC_H
2#define __ASM_MACH_MPSPEC_H
3
4#define MAX_IRQ_SOURCES 256
5
6#if CONFIG_BASE_SMALL == 0
7#define MAX_MP_BUSSES 256
8#else
9#define MAX_MP_BUSSES 32
10#endif
11
12#endif /* __ASM_MACH_MPSPEC_H */
diff --git a/include/asm-i386/mach-default/mach_reboot.h b/include/asm-i386/mach-default/mach_reboot.h
deleted file mode 100644
index e23fd9fbebb3..000000000000
--- a/include/asm-i386/mach-default/mach_reboot.h
+++ /dev/null
@@ -1,61 +0,0 @@
1/*
2 * arch/i386/mach-generic/mach_reboot.h
3 *
4 * Machine specific reboot functions for generic.
5 * Split out from reboot.c by Osamu Tomita <tomita@cinet.co.jp>
6 */
7#ifndef _MACH_REBOOT_H
8#define _MACH_REBOOT_H
9
10static inline void kb_wait(void)
11{
12 int i;
13
14 for (i = 0; i < 0x10000; i++)
15 if ((inb_p(0x64) & 0x02) == 0)
16 break;
17}
18
19static inline void mach_reboot(void)
20{
21 int i;
22
23 /* old method, works on most machines */
24 for (i = 0; i < 10; i++) {
25 kb_wait();
26 udelay(50);
27 outb(0xfe, 0x64); /* pulse reset low */
28 udelay(50);
29 }
30
31 /* New method: sets the "System flag" which, when set, indicates
32 * successful completion of the keyboard controller self-test (Basic
33 * Assurance Test, BAT). This is needed for some machines with no
34 * keyboard plugged in. This read-modify-write sequence sets only the
35 * system flag
36 */
37 for (i = 0; i < 10; i++) {
38 int cmd;
39
40 outb(0x20, 0x64); /* read Controller Command Byte */
41 udelay(50);
42 kb_wait();
43 udelay(50);
44 cmd = inb(0x60);
45 udelay(50);
46 kb_wait();
47 udelay(50);
48 outb(0x60, 0x64); /* write Controller Command Byte */
49 udelay(50);
50 kb_wait();
51 udelay(50);
52 outb(cmd | 0x04, 0x60); /* set "System flag" */
53 udelay(50);
54 kb_wait();
55 udelay(50);
56 outb(0xfe, 0x64); /* pulse reset low */
57 udelay(50);
58 }
59}
60
61#endif /* !_MACH_REBOOT_H */
diff --git a/include/asm-i386/mach-default/mach_time.h b/include/asm-i386/mach-default/mach_time.h
deleted file mode 100644
index 31eb5de6f3dc..000000000000
--- a/include/asm-i386/mach-default/mach_time.h
+++ /dev/null
@@ -1,111 +0,0 @@
1/*
2 * include/asm-i386/mach-default/mach_time.h
3 *
4 * Machine specific set RTC function for generic.
5 * Split out from time.c by Osamu Tomita <tomita@cinet.co.jp>
6 */
7#ifndef _MACH_TIME_H
8#define _MACH_TIME_H
9
10#include <linux/mc146818rtc.h>
11
12/* for check timing call set_rtc_mmss() 500ms */
13/* used in arch/i386/time.c::do_timer_interrupt() */
14#define USEC_AFTER 500000
15#define USEC_BEFORE 500000
16
17/*
18 * In order to set the CMOS clock precisely, set_rtc_mmss has to be
19 * called 500 ms after the second nowtime has started, because when
20 * nowtime is written into the registers of the CMOS clock, it will
21 * jump to the next second precisely 500 ms later. Check the Motorola
22 * MC146818A or Dallas DS12887 data sheet for details.
23 *
24 * BUG: This routine does not handle hour overflow properly; it just
25 * sets the minutes. Usually you'll only notice that after reboot!
26 */
27static inline int mach_set_rtc_mmss(unsigned long nowtime)
28{
29 int retval = 0;
30 int real_seconds, real_minutes, cmos_minutes;
31 unsigned char save_control, save_freq_select;
32
33 save_control = CMOS_READ(RTC_CONTROL); /* tell the clock it's being set */
34 CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
35
36 save_freq_select = CMOS_READ(RTC_FREQ_SELECT); /* stop and reset prescaler */
37 CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
38
39 cmos_minutes = CMOS_READ(RTC_MINUTES);
40 if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
41 BCD_TO_BIN(cmos_minutes);
42
43 /*
44 * since we're only adjusting minutes and seconds,
45 * don't interfere with hour overflow. This avoids
46 * messing with unknown time zones but requires your
47 * RTC not to be off by more than 15 minutes
48 */
49 real_seconds = nowtime % 60;
50 real_minutes = nowtime / 60;
51 if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1)
52 real_minutes += 30; /* correct for half hour time zone */
53 real_minutes %= 60;
54
55 if (abs(real_minutes - cmos_minutes) < 30) {
56 if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
57 BIN_TO_BCD(real_seconds);
58 BIN_TO_BCD(real_minutes);
59 }
60 CMOS_WRITE(real_seconds,RTC_SECONDS);
61 CMOS_WRITE(real_minutes,RTC_MINUTES);
62 } else {
63 printk(KERN_WARNING
64 "set_rtc_mmss: can't update from %d to %d\n",
65 cmos_minutes, real_minutes);
66 retval = -1;
67 }
68
69 /* The following flags have to be released exactly in this order,
70 * otherwise the DS12887 (popular MC146818A clone with integrated
71 * battery and quartz) will not reset the oscillator and will not
72 * update precisely 500 ms later. You won't find this mentioned in
73 * the Dallas Semiconductor data sheets, but who believes data
74 * sheets anyway ... -- Markus Kuhn
75 */
76 CMOS_WRITE(save_control, RTC_CONTROL);
77 CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
78
79 return retval;
80}
81
82static inline unsigned long mach_get_cmos_time(void)
83{
84 unsigned int year, mon, day, hour, min, sec;
85
86 do {
87 sec = CMOS_READ(RTC_SECONDS);
88 min = CMOS_READ(RTC_MINUTES);
89 hour = CMOS_READ(RTC_HOURS);
90 day = CMOS_READ(RTC_DAY_OF_MONTH);
91 mon = CMOS_READ(RTC_MONTH);
92 year = CMOS_READ(RTC_YEAR);
93 } while (sec != CMOS_READ(RTC_SECONDS));
94
95 if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
96 BCD_TO_BIN(sec);
97 BCD_TO_BIN(min);
98 BCD_TO_BIN(hour);
99 BCD_TO_BIN(day);
100 BCD_TO_BIN(mon);
101 BCD_TO_BIN(year);
102 }
103
104 year += 1900;
105 if (year < 1970)
106 year += 100;
107
108 return mktime(year, mon, day, hour, min, sec);
109}
110
111#endif /* !_MACH_TIME_H */
diff --git a/include/asm-i386/mach-default/mach_timer.h b/include/asm-i386/mach-default/mach_timer.h
deleted file mode 100644
index 807992fd4171..000000000000
--- a/include/asm-i386/mach-default/mach_timer.h
+++ /dev/null
@@ -1,50 +0,0 @@
1/*
2 * include/asm-i386/mach-default/mach_timer.h
3 *
4 * Machine specific calibrate_tsc() for generic.
5 * Split out from timer_tsc.c by Osamu Tomita <tomita@cinet.co.jp>
6 */
7/* ------ Calibrate the TSC -------
8 * Return 2^32 * (1 / (TSC clocks per usec)) for do_fast_gettimeoffset().
9 * Too much 64-bit arithmetic here to do this cleanly in C, and for
10 * accuracy's sake we want to keep the overhead on the CTC speaker (channel 2)
11 * output busy loop as low as possible. We avoid reading the CTC registers
12 * directly because of the awkward 8-bit access mechanism of the 82C54
13 * device.
14 */
15#ifndef _MACH_TIMER_H
16#define _MACH_TIMER_H
17
18#define CALIBRATE_TIME_MSEC 30 /* 30 msecs */
19#define CALIBRATE_LATCH \
20 ((CLOCK_TICK_RATE * CALIBRATE_TIME_MSEC + 1000/2)/1000)
21
22static inline void mach_prepare_counter(void)
23{
24 /* Set the Gate high, disable speaker */
25 outb((inb(0x61) & ~0x02) | 0x01, 0x61);
26
27 /*
28 * Now let's take care of CTC channel 2
29 *
30 * Set the Gate high, program CTC channel 2 for mode 0,
31 * (interrupt on terminal count mode), binary count,
32 * load 5 * LATCH count, (LSB and MSB) to begin countdown.
33 *
34 * Some devices need a delay here.
35 */
36 outb(0xb0, 0x43); /* binary, mode 0, LSB/MSB, Ch 2 */
37 outb_p(CALIBRATE_LATCH & 0xff, 0x42); /* LSB of count */
38 outb_p(CALIBRATE_LATCH >> 8, 0x42); /* MSB of count */
39}
40
41static inline void mach_countup(unsigned long *count_p)
42{
43 unsigned long count = 0;
44 do {
45 count++;
46 } while ((inb_p(0x61) & 0x20) == 0);
47 *count_p = count;
48}
49
50#endif /* !_MACH_TIMER_H */
diff --git a/include/asm-i386/mach-default/mach_traps.h b/include/asm-i386/mach-default/mach_traps.h
deleted file mode 100644
index 625438b8a6eb..000000000000
--- a/include/asm-i386/mach-default/mach_traps.h
+++ /dev/null
@@ -1,41 +0,0 @@
1/*
2 * include/asm-i386/mach-default/mach_traps.h
3 *
4 * Machine specific NMI handling for generic.
5 * Split out from traps.c by Osamu Tomita <tomita@cinet.co.jp>
6 */
7#ifndef _MACH_TRAPS_H
8#define _MACH_TRAPS_H
9
10#include <asm/mc146818rtc.h>
11
12static inline void clear_mem_error(unsigned char reason)
13{
14 reason = (reason & 0xf) | 4;
15 outb(reason, 0x61);
16}
17
18static inline unsigned char get_nmi_reason(void)
19{
20 return inb(0x61);
21}
22
23static inline void reassert_nmi(void)
24{
25 int old_reg = -1;
26
27 if (do_i_have_lock_cmos())
28 old_reg = current_lock_cmos_reg();
29 else
30 lock_cmos(0); /* register doesn't matter here */
31 outb(0x8f, 0x70);
32 inb(0x71); /* dummy */
33 outb(0x0f, 0x70);
34 inb(0x71); /* dummy */
35 if (old_reg >= 0)
36 outb(old_reg, 0x70);
37 else
38 unlock_cmos();
39}
40
41#endif /* !_MACH_TRAPS_H */
diff --git a/include/asm-i386/mach-default/mach_wakecpu.h b/include/asm-i386/mach-default/mach_wakecpu.h
deleted file mode 100644
index 3ebb17893aa5..000000000000
--- a/include/asm-i386/mach-default/mach_wakecpu.h
+++ /dev/null
@@ -1,42 +0,0 @@
1#ifndef __ASM_MACH_WAKECPU_H
2#define __ASM_MACH_WAKECPU_H
3
4/*
5 * This file copes with machines that wakeup secondary CPUs by the
6 * INIT, INIT, STARTUP sequence.
7 */
8
9#define WAKE_SECONDARY_VIA_INIT
10
11#define TRAMPOLINE_LOW phys_to_virt(0x467)
12#define TRAMPOLINE_HIGH phys_to_virt(0x469)
13
14#define boot_cpu_apicid boot_cpu_physical_apicid
15
16static inline void wait_for_init_deassert(atomic_t *deassert)
17{
18 while (!atomic_read(deassert))
19 cpu_relax();
20 return;
21}
22
23/* Nothing to do for most platforms, since cleared by the INIT cycle */
24static inline void smp_callin_clear_local_apic(void)
25{
26}
27
28static inline void store_NMI_vector(unsigned short *high, unsigned short *low)
29{
30}
31
32static inline void restore_NMI_vector(unsigned short *high, unsigned short *low)
33{
34}
35
36#if APIC_DEBUG
37 #define inquire_remote_apic(apicid) __inquire_remote_apic(apicid)
38#else
39 #define inquire_remote_apic(apicid) {}
40#endif
41
42#endif /* __ASM_MACH_WAKECPU_H */
diff --git a/include/asm-i386/mach-default/pci-functions.h b/include/asm-i386/mach-default/pci-functions.h
deleted file mode 100644
index ed0bab427354..000000000000
--- a/include/asm-i386/mach-default/pci-functions.h
+++ /dev/null
@@ -1,19 +0,0 @@
1/*
2 * PCI BIOS function numbering for conventional PCI BIOS
3 * systems
4 */
5
6#define PCIBIOS_PCI_FUNCTION_ID 0xb1XX
7#define PCIBIOS_PCI_BIOS_PRESENT 0xb101
8#define PCIBIOS_FIND_PCI_DEVICE 0xb102
9#define PCIBIOS_FIND_PCI_CLASS_CODE 0xb103
10#define PCIBIOS_GENERATE_SPECIAL_CYCLE 0xb106
11#define PCIBIOS_READ_CONFIG_BYTE 0xb108
12#define PCIBIOS_READ_CONFIG_WORD 0xb109
13#define PCIBIOS_READ_CONFIG_DWORD 0xb10a
14#define PCIBIOS_WRITE_CONFIG_BYTE 0xb10b
15#define PCIBIOS_WRITE_CONFIG_WORD 0xb10c
16#define PCIBIOS_WRITE_CONFIG_DWORD 0xb10d
17#define PCIBIOS_GET_ROUTING_OPTIONS 0xb10e
18#define PCIBIOS_SET_PCI_HW_INT 0xb10f
19
diff --git a/include/asm-i386/mach-default/setup_arch.h b/include/asm-i386/mach-default/setup_arch.h
deleted file mode 100644
index 605e3ccb991b..000000000000
--- a/include/asm-i386/mach-default/setup_arch.h
+++ /dev/null
@@ -1,7 +0,0 @@
1/* Hook to call BIOS initialisation function */
2
3/* no action for generic */
4
5#ifndef ARCH_SETUP
6#define ARCH_SETUP
7#endif
diff --git a/include/asm-i386/mach-default/smpboot_hooks.h b/include/asm-i386/mach-default/smpboot_hooks.h
deleted file mode 100644
index 7f45f6311059..000000000000
--- a/include/asm-i386/mach-default/smpboot_hooks.h
+++ /dev/null
@@ -1,44 +0,0 @@
1/* two abstractions specific to kernel/smpboot.c, mainly to cater to visws
2 * which needs to alter them. */
3
4static inline void smpboot_clear_io_apic_irqs(void)
5{
6 io_apic_irqs = 0;
7}
8
9static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
10{
11 CMOS_WRITE(0xa, 0xf);
12 local_flush_tlb();
13 Dprintk("1.\n");
14 *((volatile unsigned short *) TRAMPOLINE_HIGH) = start_eip >> 4;
15 Dprintk("2.\n");
16 *((volatile unsigned short *) TRAMPOLINE_LOW) = start_eip & 0xf;
17 Dprintk("3.\n");
18}
19
20static inline void smpboot_restore_warm_reset_vector(void)
21{
22 /*
23 * Install writable page 0 entry to set BIOS data area.
24 */
25 local_flush_tlb();
26
27 /*
28 * Paranoid: Set warm reset code and vector here back
29 * to default values.
30 */
31 CMOS_WRITE(0, 0xf);
32
33 *((volatile long *) phys_to_virt(0x467)) = 0;
34}
35
36static inline void smpboot_setup_io_apic(void)
37{
38 /*
39 * Here we can be sure that there is an IO-APIC in the system. Let's
40 * go and set it up:
41 */
42 if (!skip_ioapic_setup && nr_ioapics)
43 setup_IO_APIC();
44}
diff --git a/include/asm-i386/mach-es7000/mach_apic.h b/include/asm-i386/mach-es7000/mach_apic.h
deleted file mode 100644
index caec64be516d..000000000000
--- a/include/asm-i386/mach-es7000/mach_apic.h
+++ /dev/null
@@ -1,206 +0,0 @@
1#ifndef __ASM_MACH_APIC_H
2#define __ASM_MACH_APIC_H
3
4extern u8 bios_cpu_apicid[];
5
6#define xapic_phys_to_log_apicid(cpu) (bios_cpu_apicid[cpu])
7#define esr_disable (1)
8
9static inline int apic_id_registered(void)
10{
11 return (1);
12}
13
14static inline cpumask_t target_cpus(void)
15{
16#if defined CONFIG_ES7000_CLUSTERED_APIC
17 return CPU_MASK_ALL;
18#else
19 return cpumask_of_cpu(smp_processor_id());
20#endif
21}
22#define TARGET_CPUS (target_cpus())
23
24#if defined CONFIG_ES7000_CLUSTERED_APIC
25#define APIC_DFR_VALUE (APIC_DFR_CLUSTER)
26#define INT_DELIVERY_MODE (dest_LowestPrio)
27#define INT_DEST_MODE (1) /* logical delivery broadcast to all procs */
28#define NO_BALANCE_IRQ (1)
29#undef WAKE_SECONDARY_VIA_INIT
30#define WAKE_SECONDARY_VIA_MIP
31#else
32#define APIC_DFR_VALUE (APIC_DFR_FLAT)
33#define INT_DELIVERY_MODE (dest_Fixed)
34#define INT_DEST_MODE (0) /* phys delivery to target procs */
35#define NO_BALANCE_IRQ (0)
36#undef APIC_DEST_LOGICAL
37#define APIC_DEST_LOGICAL 0x0
38#define WAKE_SECONDARY_VIA_INIT
39#endif
40
41static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
42{
43 return 0;
44}
45static inline unsigned long check_apicid_present(int bit)
46{
47 return physid_isset(bit, phys_cpu_present_map);
48}
49
50#define apicid_cluster(apicid) (apicid & 0xF0)
51
52static inline unsigned long calculate_ldr(int cpu)
53{
54 unsigned long id;
55 id = xapic_phys_to_log_apicid(cpu);
56 return (SET_APIC_LOGICAL_ID(id));
57}
58
59/*
60 * Set up the logical destination ID.
61 *
62 * Intel recommends to set DFR, LdR and TPR before enabling
63 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
64 * document number 292116). So here it goes...
65 */
66static inline void init_apic_ldr(void)
67{
68 unsigned long val;
69 int cpu = smp_processor_id();
70
71 apic_write_around(APIC_DFR, APIC_DFR_VALUE);
72 val = calculate_ldr(cpu);
73 apic_write_around(APIC_LDR, val);
74}
75
76#ifndef CONFIG_X86_GENERICARCH
77extern void enable_apic_mode(void);
78#endif
79
80extern int apic_version [MAX_APICS];
81static inline void setup_apic_routing(void)
82{
83 int apic = bios_cpu_apicid[smp_processor_id()];
84 printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n",
85 (apic_version[apic] == 0x14) ?
86 "Physical Cluster" : "Logical Cluster", nr_ioapics, cpus_addr(TARGET_CPUS)[0]);
87}
88
89static inline int multi_timer_check(int apic, int irq)
90{
91 return 0;
92}
93
94static inline int apicid_to_node(int logical_apicid)
95{
96 return 0;
97}
98
99
100static inline int cpu_present_to_apicid(int mps_cpu)
101{
102 if (!mps_cpu)
103 return boot_cpu_physical_apicid;
104 else if (mps_cpu < NR_CPUS)
105 return (int) bios_cpu_apicid[mps_cpu];
106 else
107 return BAD_APICID;
108}
109
110static inline physid_mask_t apicid_to_cpu_present(int phys_apicid)
111{
112 static int id = 0;
113 physid_mask_t mask;
114 mask = physid_mask_of_physid(id);
115 ++id;
116 return mask;
117}
118
119extern u8 cpu_2_logical_apicid[];
120/* Mapping from cpu number to logical apicid */
121static inline int cpu_to_logical_apicid(int cpu)
122{
123#ifdef CONFIG_SMP
124 if (cpu >= NR_CPUS)
125 return BAD_APICID;
126 return (int)cpu_2_logical_apicid[cpu];
127#else
128 return logical_smp_processor_id();
129#endif
130}
131
132static inline int mpc_apic_id(struct mpc_config_processor *m, struct mpc_config_translation *unused)
133{
134 printk("Processor #%d %ld:%ld APIC version %d\n",
135 m->mpc_apicid,
136 (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8,
137 (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4,
138 m->mpc_apicver);
139 return (m->mpc_apicid);
140}
141
142static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
143{
144 /* For clustered we don't have a good way to do this yet - hack */
145 return physids_promote(0xff);
146}
147
148
149static inline void setup_portio_remap(void)
150{
151}
152
153extern unsigned int boot_cpu_physical_apicid;
154static inline int check_phys_apicid_present(int cpu_physical_apicid)
155{
156 boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
157 return (1);
158}
159
160static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
161{
162 int num_bits_set;
163 int cpus_found = 0;
164 int cpu;
165 int apicid;
166
167 num_bits_set = cpus_weight(cpumask);
168 /* Return id to all */
169 if (num_bits_set == NR_CPUS)
170#if defined CONFIG_ES7000_CLUSTERED_APIC
171 return 0xFF;
172#else
173 return cpu_to_logical_apicid(0);
174#endif
175 /*
176 * The cpus in the mask must all be on the apic cluster. If are not
177 * on the same apicid cluster return default value of TARGET_CPUS.
178 */
179 cpu = first_cpu(cpumask);
180 apicid = cpu_to_logical_apicid(cpu);
181 while (cpus_found < num_bits_set) {
182 if (cpu_isset(cpu, cpumask)) {
183 int new_apicid = cpu_to_logical_apicid(cpu);
184 if (apicid_cluster(apicid) !=
185 apicid_cluster(new_apicid)){
186 printk ("%s: Not a valid mask!\n",__FUNCTION__);
187#if defined CONFIG_ES7000_CLUSTERED_APIC
188 return 0xFF;
189#else
190 return cpu_to_logical_apicid(0);
191#endif
192 }
193 apicid = new_apicid;
194 cpus_found++;
195 }
196 cpu++;
197 }
198 return apicid;
199}
200
201static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
202{
203 return cpuid_apic >> index_msb;
204}
205
206#endif /* __ASM_MACH_APIC_H */
diff --git a/include/asm-i386/mach-es7000/mach_apicdef.h b/include/asm-i386/mach-es7000/mach_apicdef.h
deleted file mode 100644
index a58ab5a75c8c..000000000000
--- a/include/asm-i386/mach-es7000/mach_apicdef.h
+++ /dev/null
@@ -1,13 +0,0 @@
1#ifndef __ASM_MACH_APICDEF_H
2#define __ASM_MACH_APICDEF_H
3
4#define APIC_ID_MASK (0xFF<<24)
5
6static inline unsigned get_apic_id(unsigned long x)
7{
8 return (((x)>>24)&0xFF);
9}
10
11#define GET_APIC_ID(x) get_apic_id(x)
12
13#endif
diff --git a/include/asm-i386/mach-es7000/mach_ipi.h b/include/asm-i386/mach-es7000/mach_ipi.h
deleted file mode 100644
index 5e61bd220b06..000000000000
--- a/include/asm-i386/mach-es7000/mach_ipi.h
+++ /dev/null
@@ -1,24 +0,0 @@
1#ifndef __ASM_MACH_IPI_H
2#define __ASM_MACH_IPI_H
3
4void send_IPI_mask_sequence(cpumask_t mask, int vector);
5
6static inline void send_IPI_mask(cpumask_t mask, int vector)
7{
8 send_IPI_mask_sequence(mask, vector);
9}
10
11static inline void send_IPI_allbutself(int vector)
12{
13 cpumask_t mask = cpu_online_map;
14 cpu_clear(smp_processor_id(), mask);
15 if (!cpus_empty(mask))
16 send_IPI_mask(mask, vector);
17}
18
19static inline void send_IPI_all(int vector)
20{
21 send_IPI_mask(cpu_online_map, vector);
22}
23
24#endif /* __ASM_MACH_IPI_H */
diff --git a/include/asm-i386/mach-es7000/mach_mpparse.h b/include/asm-i386/mach-es7000/mach_mpparse.h
deleted file mode 100644
index 8aa10547b4b1..000000000000
--- a/include/asm-i386/mach-es7000/mach_mpparse.h
+++ /dev/null
@@ -1,40 +0,0 @@
1#ifndef __ASM_MACH_MPPARSE_H
2#define __ASM_MACH_MPPARSE_H
3
4#include <linux/acpi.h>
5
6static inline void mpc_oem_bus_info(struct mpc_config_bus *m, char *name,
7 struct mpc_config_translation *translation)
8{
9 Dprintk("Bus #%d is %s\n", m->mpc_busid, name);
10}
11
12static inline void mpc_oem_pci_bus(struct mpc_config_bus *m,
13 struct mpc_config_translation *translation)
14{
15}
16
17extern int parse_unisys_oem (char *oemptr);
18extern int find_unisys_acpi_oem_table(unsigned long *oem_addr);
19extern void setup_unisys(void);
20
21#ifndef CONFIG_X86_GENERICARCH
22extern int acpi_madt_oem_check(char *oem_id, char *oem_table_id);
23extern int mps_oem_check(struct mp_config_table *mpc, char *oem,
24 char *productid);
25#endif
26
27#ifdef CONFIG_ACPI
28
29static inline int es7000_check_dsdt(void)
30{
31 struct acpi_table_header header;
32 memcpy(&header, 0, sizeof(struct acpi_table_header));
33 acpi_get_table_header(ACPI_SIG_DSDT, 0, &header);
34 if (!strncmp(header.oem_id, "UNISYS", 6))
35 return 1;
36 return 0;
37}
38#endif
39
40#endif /* __ASM_MACH_MPPARSE_H */
diff --git a/include/asm-i386/mach-es7000/mach_mpspec.h b/include/asm-i386/mach-es7000/mach_mpspec.h
deleted file mode 100644
index b1f5039d4506..000000000000
--- a/include/asm-i386/mach-es7000/mach_mpspec.h
+++ /dev/null
@@ -1,8 +0,0 @@
1#ifndef __ASM_MACH_MPSPEC_H
2#define __ASM_MACH_MPSPEC_H
3
4#define MAX_IRQ_SOURCES 256
5
6#define MAX_MP_BUSSES 256
7
8#endif /* __ASM_MACH_MPSPEC_H */
diff --git a/include/asm-i386/mach-es7000/mach_wakecpu.h b/include/asm-i386/mach-es7000/mach_wakecpu.h
deleted file mode 100644
index 84ff58314501..000000000000
--- a/include/asm-i386/mach-es7000/mach_wakecpu.h
+++ /dev/null
@@ -1,59 +0,0 @@
1#ifndef __ASM_MACH_WAKECPU_H
2#define __ASM_MACH_WAKECPU_H
3
4/*
5 * This file copes with machines that wakeup secondary CPUs by the
6 * INIT, INIT, STARTUP sequence.
7 */
8
9#ifdef CONFIG_ES7000_CLUSTERED_APIC
10#define WAKE_SECONDARY_VIA_MIP
11#else
12#define WAKE_SECONDARY_VIA_INIT
13#endif
14
15#ifdef WAKE_SECONDARY_VIA_MIP
16extern int es7000_start_cpu(int cpu, unsigned long eip);
17static inline int
18wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
19{
20 int boot_error = 0;
21 boot_error = es7000_start_cpu(phys_apicid, start_eip);
22 return boot_error;
23}
24#endif
25
26#define TRAMPOLINE_LOW phys_to_virt(0x467)
27#define TRAMPOLINE_HIGH phys_to_virt(0x469)
28
29#define boot_cpu_apicid boot_cpu_physical_apicid
30
31static inline void wait_for_init_deassert(atomic_t *deassert)
32{
33#ifdef WAKE_SECONDARY_VIA_INIT
34 while (!atomic_read(deassert))
35 cpu_relax();
36#endif
37 return;
38}
39
40/* Nothing to do for most platforms, since cleared by the INIT cycle */
41static inline void smp_callin_clear_local_apic(void)
42{
43}
44
45static inline void store_NMI_vector(unsigned short *high, unsigned short *low)
46{
47}
48
49static inline void restore_NMI_vector(unsigned short *high, unsigned short *low)
50{
51}
52
53#if APIC_DEBUG
54 #define inquire_remote_apic(apicid) __inquire_remote_apic(apicid)
55#else
56 #define inquire_remote_apic(apicid) {}
57#endif
58
59#endif /* __ASM_MACH_WAKECPU_H */
diff --git a/include/asm-i386/mach-generic/irq_vectors_limits.h b/include/asm-i386/mach-generic/irq_vectors_limits.h
deleted file mode 100644
index 890ce3f5e09a..000000000000
--- a/include/asm-i386/mach-generic/irq_vectors_limits.h
+++ /dev/null
@@ -1,14 +0,0 @@
1#ifndef _ASM_IRQ_VECTORS_LIMITS_H
2#define _ASM_IRQ_VECTORS_LIMITS_H
3
4/*
5 * For Summit or generic (i.e. installer) kernels, we have lots of I/O APICs,
6 * even with uni-proc kernels, so use a big array.
7 *
8 * This value should be the same in both the generic and summit subarches.
9 * Change one, change 'em both.
10 */
11#define NR_IRQS 224
12#define NR_IRQ_VECTORS 1024
13
14#endif /* _ASM_IRQ_VECTORS_LIMITS_H */
diff --git a/include/asm-i386/mach-generic/mach_apic.h b/include/asm-i386/mach-generic/mach_apic.h
deleted file mode 100644
index a236e7021528..000000000000
--- a/include/asm-i386/mach-generic/mach_apic.h
+++ /dev/null
@@ -1,33 +0,0 @@
1#ifndef __ASM_MACH_APIC_H
2#define __ASM_MACH_APIC_H
3
4#include <asm/genapic.h>
5
6#define esr_disable (genapic->ESR_DISABLE)
7#define NO_BALANCE_IRQ (genapic->no_balance_irq)
8#define INT_DELIVERY_MODE (genapic->int_delivery_mode)
9#define INT_DEST_MODE (genapic->int_dest_mode)
10#undef APIC_DEST_LOGICAL
11#define APIC_DEST_LOGICAL (genapic->apic_destination_logical)
12#define TARGET_CPUS (genapic->target_cpus())
13#define apic_id_registered (genapic->apic_id_registered)
14#define init_apic_ldr (genapic->init_apic_ldr)
15#define ioapic_phys_id_map (genapic->ioapic_phys_id_map)
16#define setup_apic_routing (genapic->setup_apic_routing)
17#define multi_timer_check (genapic->multi_timer_check)
18#define apicid_to_node (genapic->apicid_to_node)
19#define cpu_to_logical_apicid (genapic->cpu_to_logical_apicid)
20#define cpu_present_to_apicid (genapic->cpu_present_to_apicid)
21#define apicid_to_cpu_present (genapic->apicid_to_cpu_present)
22#define mpc_apic_id (genapic->mpc_apic_id)
23#define setup_portio_remap (genapic->setup_portio_remap)
24#define check_apicid_present (genapic->check_apicid_present)
25#define check_phys_apicid_present (genapic->check_phys_apicid_present)
26#define check_apicid_used (genapic->check_apicid_used)
27#define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid)
28#define enable_apic_mode (genapic->enable_apic_mode)
29#define phys_pkg_id (genapic->phys_pkg_id)
30
31extern void generic_bigsmp_probe(void);
32
33#endif /* __ASM_MACH_APIC_H */
diff --git a/include/asm-i386/mach-generic/mach_apicdef.h b/include/asm-i386/mach-generic/mach_apicdef.h
deleted file mode 100644
index 28ed98972ca8..000000000000
--- a/include/asm-i386/mach-generic/mach_apicdef.h
+++ /dev/null
@@ -1,11 +0,0 @@
1#ifndef _GENAPIC_MACH_APICDEF_H
2#define _GENAPIC_MACH_APICDEF_H 1
3
4#ifndef APIC_DEFINITION
5#include <asm/genapic.h>
6
7#define GET_APIC_ID (genapic->get_apic_id)
8#define APIC_ID_MASK (genapic->apic_id_mask)
9#endif
10
11#endif
diff --git a/include/asm-i386/mach-generic/mach_ipi.h b/include/asm-i386/mach-generic/mach_ipi.h
deleted file mode 100644
index 441b0fe3ed1d..000000000000
--- a/include/asm-i386/mach-generic/mach_ipi.h
+++ /dev/null
@@ -1,10 +0,0 @@
1#ifndef _MACH_IPI_H
2#define _MACH_IPI_H 1
3
4#include <asm/genapic.h>
5
6#define send_IPI_mask (genapic->send_IPI_mask)
7#define send_IPI_allbutself (genapic->send_IPI_allbutself)
8#define send_IPI_all (genapic->send_IPI_all)
9
10#endif
diff --git a/include/asm-i386/mach-generic/mach_mpparse.h b/include/asm-i386/mach-generic/mach_mpparse.h
deleted file mode 100644
index dbd9fce54f4d..000000000000
--- a/include/asm-i386/mach-generic/mach_mpparse.h
+++ /dev/null
@@ -1,12 +0,0 @@
1#ifndef _MACH_MPPARSE_H
2#define _MACH_MPPARSE_H 1
3
4#include <asm/genapic.h>
5
6#define mpc_oem_bus_info (genapic->mpc_oem_bus_info)
7#define mpc_oem_pci_bus (genapic->mpc_oem_pci_bus)
8
9int mps_oem_check(struct mp_config_table *mpc, char *oem, char *productid);
10int acpi_madt_oem_check(char *oem_id, char *oem_table_id);
11
12#endif
diff --git a/include/asm-i386/mach-generic/mach_mpspec.h b/include/asm-i386/mach-generic/mach_mpspec.h
deleted file mode 100644
index 9ef0b941bb22..000000000000
--- a/include/asm-i386/mach-generic/mach_mpspec.h
+++ /dev/null
@@ -1,10 +0,0 @@
1#ifndef __ASM_MACH_MPSPEC_H
2#define __ASM_MACH_MPSPEC_H
3
4#define MAX_IRQ_SOURCES 256
5
6/* Summit or generic (i.e. installer) kernels need lots of bus entries. */
7/* Maximum 256 PCI busses, plus 1 ISA bus in each of 4 cabinets. */
8#define MAX_MP_BUSSES 260
9
10#endif /* __ASM_MACH_MPSPEC_H */
diff --git a/include/asm-i386/mach-numaq/mach_apic.h b/include/asm-i386/mach-numaq/mach_apic.h
deleted file mode 100644
index 5e5e7dd2692e..000000000000
--- a/include/asm-i386/mach-numaq/mach_apic.h
+++ /dev/null
@@ -1,149 +0,0 @@
1#ifndef __ASM_MACH_APIC_H
2#define __ASM_MACH_APIC_H
3
4#include <asm/io.h>
5#include <linux/mmzone.h>
6#include <linux/nodemask.h>
7
8#define APIC_DFR_VALUE (APIC_DFR_CLUSTER)
9
10static inline cpumask_t target_cpus(void)
11{
12 return CPU_MASK_ALL;
13}
14
15#define TARGET_CPUS (target_cpus())
16
17#define NO_BALANCE_IRQ (1)
18#define esr_disable (1)
19
20#define INT_DELIVERY_MODE dest_LowestPrio
21#define INT_DEST_MODE 0 /* physical delivery on LOCAL quad */
22
23#define check_apicid_used(bitmap, apicid) physid_isset(apicid, bitmap)
24#define check_apicid_present(bit) physid_isset(bit, phys_cpu_present_map)
25#define apicid_cluster(apicid) (apicid & 0xF0)
26
27static inline int apic_id_registered(void)
28{
29 return 1;
30}
31
32static inline void init_apic_ldr(void)
33{
34 /* Already done in NUMA-Q firmware */
35}
36
37static inline void setup_apic_routing(void)
38{
39 printk("Enabling APIC mode: %s. Using %d I/O APICs\n",
40 "NUMA-Q", nr_ioapics);
41}
42
43/*
44 * Skip adding the timer int on secondary nodes, which causes
45 * a small but painful rift in the time-space continuum.
46 */
47static inline int multi_timer_check(int apic, int irq)
48{
49 return apic != 0 && irq == 0;
50}
51
52static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
53{
54 /* We don't have a good way to do this yet - hack */
55 return physids_promote(0xFUL);
56}
57
58/* Mapping from cpu number to logical apicid */
59extern u8 cpu_2_logical_apicid[];
60static inline int cpu_to_logical_apicid(int cpu)
61{
62 if (cpu >= NR_CPUS)
63 return BAD_APICID;
64 return (int)cpu_2_logical_apicid[cpu];
65}
66
67/*
68 * Supporting over 60 cpus on NUMA-Q requires a locality-dependent
69 * cpu to APIC ID relation to properly interact with the intelligent
70 * mode of the cluster controller.
71 */
72static inline int cpu_present_to_apicid(int mps_cpu)
73{
74 if (mps_cpu < 60)
75 return ((mps_cpu >> 2) << 4) | (1 << (mps_cpu & 0x3));
76 else
77 return BAD_APICID;
78}
79
80static inline int generate_logical_apicid(int quad, int phys_apicid)
81{
82 return (quad << 4) + (phys_apicid ? phys_apicid << 1 : 1);
83}
84
85static inline int apicid_to_node(int logical_apicid)
86{
87 return logical_apicid >> 4;
88}
89
90static inline physid_mask_t apicid_to_cpu_present(int logical_apicid)
91{
92 int node = apicid_to_node(logical_apicid);
93 int cpu = __ffs(logical_apicid & 0xf);
94
95 return physid_mask_of_physid(cpu + 4*node);
96}
97
98static inline int mpc_apic_id(struct mpc_config_processor *m,
99 struct mpc_config_translation *translation_record)
100{
101 int quad = translation_record->trans_quad;
102 int logical_apicid = generate_logical_apicid(quad, m->mpc_apicid);
103
104 printk("Processor #%d %ld:%ld APIC version %d (quad %d, apic %d)\n",
105 m->mpc_apicid,
106 (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8,
107 (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4,
108 m->mpc_apicver, quad, logical_apicid);
109 return logical_apicid;
110}
111
112static inline void setup_portio_remap(void)
113{
114 int num_quads = num_online_nodes();
115
116 if (num_quads <= 1)
117 return;
118
119 printk("Remapping cross-quad port I/O for %d quads\n", num_quads);
120 xquad_portio = ioremap(XQUAD_PORTIO_BASE, num_quads*XQUAD_PORTIO_QUAD);
121 printk("xquad_portio vaddr 0x%08lx, len %08lx\n",
122 (u_long) xquad_portio, (u_long) num_quads*XQUAD_PORTIO_QUAD);
123}
124
125static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
126{
127 return (1);
128}
129
130static inline void enable_apic_mode(void)
131{
132}
133
134/*
135 * We use physical apicids here, not logical, so just return the default
136 * physical broadcast to stop people from breaking us
137 */
138static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
139{
140 return (int) 0xF;
141}
142
143/* No NUMA-Q box has a HT CPU, but it can't hurt to use the default code. */
144static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
145{
146 return cpuid_apic >> index_msb;
147}
148
149#endif /* __ASM_MACH_APIC_H */
diff --git a/include/asm-i386/mach-numaq/mach_apicdef.h b/include/asm-i386/mach-numaq/mach_apicdef.h
deleted file mode 100644
index bf439d0690f5..000000000000
--- a/include/asm-i386/mach-numaq/mach_apicdef.h
+++ /dev/null
@@ -1,14 +0,0 @@
1#ifndef __ASM_MACH_APICDEF_H
2#define __ASM_MACH_APICDEF_H
3
4
5#define APIC_ID_MASK (0xF<<24)
6
7static inline unsigned get_apic_id(unsigned long x)
8{
9 return (((x)>>24)&0x0F);
10}
11
12#define GET_APIC_ID(x) get_apic_id(x)
13
14#endif
diff --git a/include/asm-i386/mach-numaq/mach_ipi.h b/include/asm-i386/mach-numaq/mach_ipi.h
deleted file mode 100644
index c6044488e9e6..000000000000
--- a/include/asm-i386/mach-numaq/mach_ipi.h
+++ /dev/null
@@ -1,25 +0,0 @@
1#ifndef __ASM_MACH_IPI_H
2#define __ASM_MACH_IPI_H
3
4void send_IPI_mask_sequence(cpumask_t, int vector);
5
6static inline void send_IPI_mask(cpumask_t mask, int vector)
7{
8 send_IPI_mask_sequence(mask, vector);
9}
10
11static inline void send_IPI_allbutself(int vector)
12{
13 cpumask_t mask = cpu_online_map;
14 cpu_clear(smp_processor_id(), mask);
15
16 if (!cpus_empty(mask))
17 send_IPI_mask(mask, vector);
18}
19
20static inline void send_IPI_all(int vector)
21{
22 send_IPI_mask(cpu_online_map, vector);
23}
24
25#endif /* __ASM_MACH_IPI_H */
diff --git a/include/asm-i386/mach-numaq/mach_mpparse.h b/include/asm-i386/mach-numaq/mach_mpparse.h
deleted file mode 100644
index 51bbac8fc0c2..000000000000
--- a/include/asm-i386/mach-numaq/mach_mpparse.h
+++ /dev/null
@@ -1,29 +0,0 @@
1#ifndef __ASM_MACH_MPPARSE_H
2#define __ASM_MACH_MPPARSE_H
3
4static inline void mpc_oem_bus_info(struct mpc_config_bus *m, char *name,
5 struct mpc_config_translation *translation)
6{
7 int quad = translation->trans_quad;
8 int local = translation->trans_local;
9
10 mp_bus_id_to_node[m->mpc_busid] = quad;
11 mp_bus_id_to_local[m->mpc_busid] = local;
12 printk("Bus #%d is %s (node %d)\n", m->mpc_busid, name, quad);
13}
14
15static inline void mpc_oem_pci_bus(struct mpc_config_bus *m,
16 struct mpc_config_translation *translation)
17{
18 int quad = translation->trans_quad;
19 int local = translation->trans_local;
20
21 quad_local_to_mp_bus_id[quad][local] = m->mpc_busid;
22}
23
24/* Hook from generic ACPI tables.c */
25static inline void acpi_madt_oem_check(char *oem_id, char *oem_table_id)
26{
27}
28
29#endif /* __ASM_MACH_MPPARSE_H */
diff --git a/include/asm-i386/mach-numaq/mach_mpspec.h b/include/asm-i386/mach-numaq/mach_mpspec.h
deleted file mode 100644
index dffb09856f8f..000000000000
--- a/include/asm-i386/mach-numaq/mach_mpspec.h
+++ /dev/null
@@ -1,8 +0,0 @@
1#ifndef __ASM_MACH_MPSPEC_H
2#define __ASM_MACH_MPSPEC_H
3
4#define MAX_IRQ_SOURCES 512
5
6#define MAX_MP_BUSSES 32
7
8#endif /* __ASM_MACH_MPSPEC_H */
diff --git a/include/asm-i386/mach-numaq/mach_wakecpu.h b/include/asm-i386/mach-numaq/mach_wakecpu.h
deleted file mode 100644
index 00530041a991..000000000000
--- a/include/asm-i386/mach-numaq/mach_wakecpu.h
+++ /dev/null
@@ -1,43 +0,0 @@
1#ifndef __ASM_MACH_WAKECPU_H
2#define __ASM_MACH_WAKECPU_H
3
4/* This file copes with machines that wakeup secondary CPUs by NMIs */
5
6#define WAKE_SECONDARY_VIA_NMI
7
8#define TRAMPOLINE_LOW phys_to_virt(0x8)
9#define TRAMPOLINE_HIGH phys_to_virt(0xa)
10
11#define boot_cpu_apicid boot_cpu_logical_apicid
12
13/* We don't do anything here because we use NMI's to boot instead */
14static inline void wait_for_init_deassert(atomic_t *deassert)
15{
16}
17
18/*
19 * Because we use NMIs rather than the INIT-STARTUP sequence to
20 * bootstrap the CPUs, the APIC may be in a weird state. Kick it.
21 */
22static inline void smp_callin_clear_local_apic(void)
23{
24 clear_local_APIC();
25}
26
27static inline void store_NMI_vector(unsigned short *high, unsigned short *low)
28{
29 printk("Storing NMI vector\n");
30 *high = *((volatile unsigned short *) TRAMPOLINE_HIGH);
31 *low = *((volatile unsigned short *) TRAMPOLINE_LOW);
32}
33
34static inline void restore_NMI_vector(unsigned short *high, unsigned short *low)
35{
36 printk("Restoring NMI vector\n");
37 *((volatile unsigned short *) TRAMPOLINE_HIGH) = *high;
38 *((volatile unsigned short *) TRAMPOLINE_LOW) = *low;
39}
40
41#define inquire_remote_apic(apicid) {}
42
43#endif /* __ASM_MACH_WAKECPU_H */
diff --git a/include/asm-i386/mach-summit/irq_vectors_limits.h b/include/asm-i386/mach-summit/irq_vectors_limits.h
deleted file mode 100644
index 890ce3f5e09a..000000000000
--- a/include/asm-i386/mach-summit/irq_vectors_limits.h
+++ /dev/null
@@ -1,14 +0,0 @@
1#ifndef _ASM_IRQ_VECTORS_LIMITS_H
2#define _ASM_IRQ_VECTORS_LIMITS_H
3
4/*
5 * For Summit or generic (i.e. installer) kernels, we have lots of I/O APICs,
6 * even with uni-proc kernels, so use a big array.
7 *
8 * This value should be the same in both the generic and summit subarches.
9 * Change one, change 'em both.
10 */
11#define NR_IRQS 224
12#define NR_IRQ_VECTORS 1024
13
14#endif /* _ASM_IRQ_VECTORS_LIMITS_H */
diff --git a/include/asm-i386/mach-summit/mach_apic.h b/include/asm-i386/mach-summit/mach_apic.h
deleted file mode 100644
index 732f776aab8e..000000000000
--- a/include/asm-i386/mach-summit/mach_apic.h
+++ /dev/null
@@ -1,197 +0,0 @@
1#ifndef __ASM_MACH_APIC_H
2#define __ASM_MACH_APIC_H
3
4#include <asm/smp.h>
5
6#define esr_disable (1)
7#define NO_BALANCE_IRQ (0)
8
9/* In clustered mode, the high nibble of APIC ID is a cluster number.
10 * The low nibble is a 4-bit bitmap. */
11#define XAPIC_DEST_CPUS_SHIFT 4
12#define XAPIC_DEST_CPUS_MASK ((1u << XAPIC_DEST_CPUS_SHIFT) - 1)
13#define XAPIC_DEST_CLUSTER_MASK (XAPIC_DEST_CPUS_MASK << XAPIC_DEST_CPUS_SHIFT)
14
15#define APIC_DFR_VALUE (APIC_DFR_CLUSTER)
16
17static inline cpumask_t target_cpus(void)
18{
19 /* CPU_MASK_ALL (0xff) has undefined behaviour with
20 * dest_LowestPrio mode logical clustered apic interrupt routing
21 * Just start on cpu 0. IRQ balancing will spread load
22 */
23 return cpumask_of_cpu(0);
24}
25#define TARGET_CPUS (target_cpus())
26
27#define INT_DELIVERY_MODE (dest_LowestPrio)
28#define INT_DEST_MODE 1 /* logical delivery broadcast to all procs */
29
30static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
31{
32 return 0;
33}
34
35/* we don't use the phys_cpu_present_map to indicate apicid presence */
36static inline unsigned long check_apicid_present(int bit)
37{
38 return 1;
39}
40
41#define apicid_cluster(apicid) ((apicid) & XAPIC_DEST_CLUSTER_MASK)
42
43extern u8 bios_cpu_apicid[];
44extern u8 cpu_2_logical_apicid[];
45
46static inline void init_apic_ldr(void)
47{
48 unsigned long val, id;
49 int count = 0;
50 u8 my_id = (u8)hard_smp_processor_id();
51 u8 my_cluster = (u8)apicid_cluster(my_id);
52#ifdef CONFIG_SMP
53 u8 lid;
54 int i;
55
56 /* Create logical APIC IDs by counting CPUs already in cluster. */
57 for (count = 0, i = NR_CPUS; --i >= 0; ) {
58 lid = cpu_2_logical_apicid[i];
59 if (lid != BAD_APICID && apicid_cluster(lid) == my_cluster)
60 ++count;
61 }
62#endif
63 /* We only have a 4 wide bitmap in cluster mode. If a deranged
64 * BIOS puts 5 CPUs in one APIC cluster, we're hosed. */
65 BUG_ON(count >= XAPIC_DEST_CPUS_SHIFT);
66 id = my_cluster | (1UL << count);
67 apic_write_around(APIC_DFR, APIC_DFR_VALUE);
68 val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
69 val |= SET_APIC_LOGICAL_ID(id);
70 apic_write_around(APIC_LDR, val);
71}
72
73static inline int multi_timer_check(int apic, int irq)
74{
75 return 0;
76}
77
78static inline int apic_id_registered(void)
79{
80 return 1;
81}
82
83static inline void setup_apic_routing(void)
84{
85 printk("Enabling APIC mode: Summit. Using %d I/O APICs\n",
86 nr_ioapics);
87}
88
89static inline int apicid_to_node(int logical_apicid)
90{
91#ifdef CONFIG_SMP
92 return apicid_2_node[hard_smp_processor_id()];
93#else
94 return 0;
95#endif
96}
97
98/* Mapping from cpu number to logical apicid */
99static inline int cpu_to_logical_apicid(int cpu)
100{
101#ifdef CONFIG_SMP
102 if (cpu >= NR_CPUS)
103 return BAD_APICID;
104 return (int)cpu_2_logical_apicid[cpu];
105#else
106 return logical_smp_processor_id();
107#endif
108}
109
110static inline int cpu_present_to_apicid(int mps_cpu)
111{
112 if (mps_cpu < NR_CPUS)
113 return (int)bios_cpu_apicid[mps_cpu];
114 else
115 return BAD_APICID;
116}
117
118static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_id_map)
119{
120 /* For clustered we don't have a good way to do this yet - hack */
121 return physids_promote(0x0F);
122}
123
124static inline physid_mask_t apicid_to_cpu_present(int apicid)
125{
126 return physid_mask_of_physid(0);
127}
128
129static inline int mpc_apic_id(struct mpc_config_processor *m,
130 struct mpc_config_translation *translation_record)
131{
132 printk("Processor #%d %ld:%ld APIC version %d\n",
133 m->mpc_apicid,
134 (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8,
135 (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4,
136 m->mpc_apicver);
137 return (m->mpc_apicid);
138}
139
140static inline void setup_portio_remap(void)
141{
142}
143
144static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
145{
146 return 1;
147}
148
149static inline void enable_apic_mode(void)
150{
151}
152
153static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
154{
155 int num_bits_set;
156 int cpus_found = 0;
157 int cpu;
158 int apicid;
159
160 num_bits_set = cpus_weight(cpumask);
161 /* Return id to all */
162 if (num_bits_set == NR_CPUS)
163 return (int) 0xFF;
164 /*
165 * The cpus in the mask must all be on the apic cluster. If are not
166 * on the same apicid cluster return default value of TARGET_CPUS.
167 */
168 cpu = first_cpu(cpumask);
169 apicid = cpu_to_logical_apicid(cpu);
170 while (cpus_found < num_bits_set) {
171 if (cpu_isset(cpu, cpumask)) {
172 int new_apicid = cpu_to_logical_apicid(cpu);
173 if (apicid_cluster(apicid) !=
174 apicid_cluster(new_apicid)){
175 printk ("%s: Not a valid mask!\n",__FUNCTION__);
176 return 0xFF;
177 }
178 apicid = apicid | new_apicid;
179 cpus_found++;
180 }
181 cpu++;
182 }
183 return apicid;
184}
185
186/* cpuid returns the value latched in the HW at reset, not the APIC ID
187 * register's value. For any box whose BIOS changes APIC IDs, like
188 * clustered APIC systems, we must use hard_smp_processor_id.
189 *
190 * See Intel's IA-32 SW Dev's Manual Vol2 under CPUID.
191 */
192static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
193{
194 return hard_smp_processor_id() >> index_msb;
195}
196
197#endif /* __ASM_MACH_APIC_H */
diff --git a/include/asm-i386/mach-summit/mach_apicdef.h b/include/asm-i386/mach-summit/mach_apicdef.h
deleted file mode 100644
index a58ab5a75c8c..000000000000
--- a/include/asm-i386/mach-summit/mach_apicdef.h
+++ /dev/null
@@ -1,13 +0,0 @@
1#ifndef __ASM_MACH_APICDEF_H
2#define __ASM_MACH_APICDEF_H
3
4#define APIC_ID_MASK (0xFF<<24)
5
6static inline unsigned get_apic_id(unsigned long x)
7{
8 return (((x)>>24)&0xFF);
9}
10
11#define GET_APIC_ID(x) get_apic_id(x)
12
13#endif
diff --git a/include/asm-i386/mach-summit/mach_ipi.h b/include/asm-i386/mach-summit/mach_ipi.h
deleted file mode 100644
index 9404c535b7ec..000000000000
--- a/include/asm-i386/mach-summit/mach_ipi.h
+++ /dev/null
@@ -1,25 +0,0 @@
1#ifndef __ASM_MACH_IPI_H
2#define __ASM_MACH_IPI_H
3
4void send_IPI_mask_sequence(cpumask_t mask, int vector);
5
6static inline void send_IPI_mask(cpumask_t mask, int vector)
7{
8 send_IPI_mask_sequence(mask, vector);
9}
10
11static inline void send_IPI_allbutself(int vector)
12{
13 cpumask_t mask = cpu_online_map;
14 cpu_clear(smp_processor_id(), mask);
15
16 if (!cpus_empty(mask))
17 send_IPI_mask(mask, vector);
18}
19
20static inline void send_IPI_all(int vector)
21{
22 send_IPI_mask(cpu_online_map, vector);
23}
24
25#endif /* __ASM_MACH_IPI_H */
diff --git a/include/asm-i386/mach-summit/mach_mpparse.h b/include/asm-i386/mach-summit/mach_mpparse.h
deleted file mode 100644
index c2520539d934..000000000000
--- a/include/asm-i386/mach-summit/mach_mpparse.h
+++ /dev/null
@@ -1,121 +0,0 @@
1#ifndef __ASM_MACH_MPPARSE_H
2#define __ASM_MACH_MPPARSE_H
3
4#include <mach_apic.h>
5#include <asm/tsc.h>
6
7extern int use_cyclone;
8
9#ifdef CONFIG_X86_SUMMIT_NUMA
10extern void setup_summit(void);
11#else
12#define setup_summit() {}
13#endif
14
15static inline void mpc_oem_bus_info(struct mpc_config_bus *m, char *name,
16 struct mpc_config_translation *translation)
17{
18 Dprintk("Bus #%d is %s\n", m->mpc_busid, name);
19}
20
21static inline void mpc_oem_pci_bus(struct mpc_config_bus *m,
22 struct mpc_config_translation *translation)
23{
24}
25
26static inline int mps_oem_check(struct mp_config_table *mpc, char *oem,
27 char *productid)
28{
29 if (!strncmp(oem, "IBM ENSW", 8) &&
30 (!strncmp(productid, "VIGIL SMP", 9)
31 || !strncmp(productid, "EXA", 3)
32 || !strncmp(productid, "RUTHLESS SMP", 12))){
33 mark_tsc_unstable("Summit based system");
34 use_cyclone = 1; /*enable cyclone-timer*/
35 setup_summit();
36 return 1;
37 }
38 return 0;
39}
40
41/* Hook from generic ACPI tables.c */
42static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id)
43{
44 if (!strncmp(oem_id, "IBM", 3) &&
45 (!strncmp(oem_table_id, "SERVIGIL", 8)
46 || !strncmp(oem_table_id, "EXA", 3))){
47 mark_tsc_unstable("Summit based system");
48 use_cyclone = 1; /*enable cyclone-timer*/
49 setup_summit();
50 return 1;
51 }
52 return 0;
53}
54
55struct rio_table_hdr {
56 unsigned char version; /* Version number of this data structure */
57 /* Version 3 adds chassis_num & WP_index */
58 unsigned char num_scal_dev; /* # of Scalability devices (Twisters for Vigil) */
59 unsigned char num_rio_dev; /* # of RIO I/O devices (Cyclones and Winnipegs) */
60} __attribute__((packed));
61
62struct scal_detail {
63 unsigned char node_id; /* Scalability Node ID */
64 unsigned long CBAR; /* Address of 1MB register space */
65 unsigned char port0node; /* Node ID port connected to: 0xFF=None */
66 unsigned char port0port; /* Port num port connected to: 0,1,2, or 0xFF=None */
67 unsigned char port1node; /* Node ID port connected to: 0xFF = None */
68 unsigned char port1port; /* Port num port connected to: 0,1,2, or 0xFF=None */
69 unsigned char port2node; /* Node ID port connected to: 0xFF = None */
70 unsigned char port2port; /* Port num port connected to: 0,1,2, or 0xFF=None */
71 unsigned char chassis_num; /* 1 based Chassis number (1 = boot node) */
72} __attribute__((packed));
73
74struct rio_detail {
75 unsigned char node_id; /* RIO Node ID */
76 unsigned long BBAR; /* Address of 1MB register space */
77 unsigned char type; /* Type of device */
78 unsigned char owner_id; /* For WPEG: Node ID of Cyclone that owns this WPEG*/
79 /* For CYC: Node ID of Twister that owns this CYC */
80 unsigned char port0node; /* Node ID port connected to: 0xFF=None */
81 unsigned char port0port; /* Port num port connected to: 0,1,2, or 0xFF=None */
82 unsigned char port1node; /* Node ID port connected to: 0xFF=None */
83 unsigned char port1port; /* Port num port connected to: 0,1,2, or 0xFF=None */
84 unsigned char first_slot; /* For WPEG: Lowest slot number below this WPEG */
85 /* For CYC: 0 */
86 unsigned char status; /* For WPEG: Bit 0 = 1 : the XAPIC is used */
87 /* = 0 : the XAPIC is not used, ie:*/
88 /* ints fwded to another XAPIC */
89 /* Bits1:7 Reserved */
90 /* For CYC: Bits0:7 Reserved */
91 unsigned char WP_index; /* For WPEG: WPEG instance index - lower ones have */
92 /* lower slot numbers/PCI bus numbers */
93 /* For CYC: No meaning */
94 unsigned char chassis_num; /* 1 based Chassis number */
95 /* For LookOut WPEGs this field indicates the */
96 /* Expansion Chassis #, enumerated from Boot */
97 /* Node WPEG external port, then Boot Node CYC */
98 /* external port, then Next Vigil chassis WPEG */
99 /* external port, etc. */
100 /* Shared Lookouts have only 1 chassis number (the */
101 /* first one assigned) */
102} __attribute__((packed));
103
104
105typedef enum {
106 CompatTwister = 0, /* Compatibility Twister */
107 AltTwister = 1, /* Alternate Twister of internal 8-way */
108 CompatCyclone = 2, /* Compatibility Cyclone */
109 AltCyclone = 3, /* Alternate Cyclone of internal 8-way */
110 CompatWPEG = 4, /* Compatibility WPEG */
111 AltWPEG = 5, /* Second Planar WPEG */
112 LookOutAWPEG = 6, /* LookOut WPEG */
113 LookOutBWPEG = 7, /* LookOut WPEG */
114} node_type;
115
116static inline int is_WPEG(struct rio_detail *rio){
117 return (rio->type == CompatWPEG || rio->type == AltWPEG ||
118 rio->type == LookOutAWPEG || rio->type == LookOutBWPEG);
119}
120
121#endif /* __ASM_MACH_MPPARSE_H */
diff --git a/include/asm-i386/mach-summit/mach_mpspec.h b/include/asm-i386/mach-summit/mach_mpspec.h
deleted file mode 100644
index bd765523511a..000000000000
--- a/include/asm-i386/mach-summit/mach_mpspec.h
+++ /dev/null
@@ -1,9 +0,0 @@
1#ifndef __ASM_MACH_MPSPEC_H
2#define __ASM_MACH_MPSPEC_H
3
4#define MAX_IRQ_SOURCES 256
5
6/* Maximum 256 PCI busses, plus 1 ISA bus in each of 4 cabinets. */
7#define MAX_MP_BUSSES 260
8
9#endif /* __ASM_MACH_MPSPEC_H */
diff --git a/include/asm-i386/mach-visws/cobalt.h b/include/asm-i386/mach-visws/cobalt.h
deleted file mode 100644
index 33c36225a042..000000000000
--- a/include/asm-i386/mach-visws/cobalt.h
+++ /dev/null
@@ -1,125 +0,0 @@
1#ifndef __I386_SGI_COBALT_H
2#define __I386_SGI_COBALT_H
3
4#include <asm/fixmap.h>
5
6/*
7 * Cobalt SGI Visual Workstation system ASIC
8 */
9
10#define CO_CPU_NUM_PHYS 0x1e00
11#define CO_CPU_TAB_PHYS (CO_CPU_NUM_PHYS + 2)
12
13#define CO_CPU_MAX 4
14
15#define CO_CPU_PHYS 0xc2000000
16#define CO_APIC_PHYS 0xc4000000
17
18/* see set_fixmap() and asm/fixmap.h */
19#define CO_CPU_VADDR (fix_to_virt(FIX_CO_CPU))
20#define CO_APIC_VADDR (fix_to_virt(FIX_CO_APIC))
21
22/* Cobalt CPU registers -- relative to CO_CPU_VADDR, use co_cpu_*() */
23#define CO_CPU_REV 0x08
24#define CO_CPU_CTRL 0x10
25#define CO_CPU_STAT 0x20
26#define CO_CPU_TIMEVAL 0x30
27
28/* CO_CPU_CTRL bits */
29#define CO_CTRL_TIMERUN 0x04 /* 0 == disabled */
30#define CO_CTRL_TIMEMASK 0x08 /* 0 == unmasked */
31
32/* CO_CPU_STATUS bits */
33#define CO_STAT_TIMEINTR 0x02 /* (r) 1 == int pend, (w) 0 == clear */
34
35/* CO_CPU_TIMEVAL value */
36#define CO_TIME_HZ 100000000 /* Cobalt core rate */
37
38/* Cobalt APIC registers -- relative to CO_APIC_VADDR, use co_apic_*() */
39#define CO_APIC_HI(n) (((n) * 0x10) + 4)
40#define CO_APIC_LO(n) ((n) * 0x10)
41#define CO_APIC_ID 0x0ffc
42
43/* CO_APIC_ID bits */
44#define CO_APIC_ENABLE 0x00000100
45
46/* CO_APIC_LO bits */
47#define CO_APIC_MASK 0x00010000 /* 0 = enabled */
48#define CO_APIC_LEVEL 0x00008000 /* 0 = edge */
49
50/*
51 * Where things are physically wired to Cobalt
52 * #defines with no board _<type>_<rev>_ are common to all (thus far)
53 */
54#define CO_APIC_IDE0 4
55#define CO_APIC_IDE1 2 /* Only on 320 */
56
57#define CO_APIC_8259 12 /* serial, floppy, par-l-l */
58
59/* Lithium PCI Bridge A -- "the one with 82557 Ethernet" */
60#define CO_APIC_PCIA_BASE0 0 /* and 1 */ /* slot 0, line 0 */
61#define CO_APIC_PCIA_BASE123 5 /* and 6 */ /* slot 0, line 1 */
62
63#define CO_APIC_PIIX4_USB 7 /* this one is weird */
64
65/* Lithium PCI Bridge B -- "the one with PIIX4" */
66#define CO_APIC_PCIB_BASE0 8 /* and 9-12 *//* slot 0, line 0 */
67#define CO_APIC_PCIB_BASE123 13 /* 14.15 */ /* slot 0, line 1 */
68
69#define CO_APIC_VIDOUT0 16
70#define CO_APIC_VIDOUT1 17
71#define CO_APIC_VIDIN0 18
72#define CO_APIC_VIDIN1 19
73
74#define CO_APIC_LI_AUDIO 22
75
76#define CO_APIC_AS 24
77#define CO_APIC_RE 25
78
79#define CO_APIC_CPU 28 /* Timer and Cache interrupt */
80#define CO_APIC_NMI 29
81#define CO_APIC_LAST CO_APIC_NMI
82
83/*
84 * This is how irqs are assigned on the Visual Workstation.
85 * Legacy devices get irq's 1-15 (system clock is 0 and is CO_APIC_CPU).
86 * All other devices (including PCI) go to Cobalt and are irq's 16 on up.
87 */
88#define CO_IRQ_APIC0 16 /* irq of apic entry 0 */
89#define IS_CO_APIC(irq) ((irq) >= CO_IRQ_APIC0)
90#define CO_IRQ(apic) (CO_IRQ_APIC0 + (apic)) /* apic ent to irq */
91#define CO_APIC(irq) ((irq) - CO_IRQ_APIC0) /* irq to apic ent */
92#define CO_IRQ_IDE0 14 /* knowledge of... */
93#define CO_IRQ_IDE1 15 /* ... ide driver defaults! */
94#define CO_IRQ_8259 CO_IRQ(CO_APIC_8259)
95
96#ifdef CONFIG_X86_VISWS_APIC
97extern __inline void co_cpu_write(unsigned long reg, unsigned long v)
98{
99 *((volatile unsigned long *)(CO_CPU_VADDR+reg))=v;
100}
101
102extern __inline unsigned long co_cpu_read(unsigned long reg)
103{
104 return *((volatile unsigned long *)(CO_CPU_VADDR+reg));
105}
106
107extern __inline void co_apic_write(unsigned long reg, unsigned long v)
108{
109 *((volatile unsigned long *)(CO_APIC_VADDR+reg))=v;
110}
111
112extern __inline unsigned long co_apic_read(unsigned long reg)
113{
114 return *((volatile unsigned long *)(CO_APIC_VADDR+reg));
115}
116#endif
117
118extern char visws_board_type;
119
120#define VISWS_320 0
121#define VISWS_540 1
122
123extern char visws_board_rev;
124
125#endif /* __I386_SGI_COBALT_H */
diff --git a/include/asm-i386/mach-visws/entry_arch.h b/include/asm-i386/mach-visws/entry_arch.h
deleted file mode 100644
index b183fa6d83d9..000000000000
--- a/include/asm-i386/mach-visws/entry_arch.h
+++ /dev/null
@@ -1,23 +0,0 @@
1/*
2 * The following vectors are part of the Linux architecture, there
3 * is no hardware IRQ pin equivalent for them, they are triggered
4 * through the ICC by us (IPIs)
5 */
6#ifdef CONFIG_X86_SMP
7BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR)
8BUILD_INTERRUPT(invalidate_interrupt,INVALIDATE_TLB_VECTOR)
9BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR)
10#endif
11
12/*
13 * every pentium local APIC has two 'local interrupts', with a
14 * soft-definable vector attached to both interrupts, one of
15 * which is a timer interrupt, the other one is error counter
16 * overflow. Linux uses the local APIC timer interrupt to get
17 * a much simpler SMP time architecture:
18 */
19#ifdef CONFIG_X86_LOCAL_APIC
20BUILD_INTERRUPT(apic_timer_interrupt,LOCAL_TIMER_VECTOR)
21BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR)
22BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR)
23#endif
diff --git a/include/asm-i386/mach-visws/irq_vectors.h b/include/asm-i386/mach-visws/irq_vectors.h
deleted file mode 100644
index cb572d8db505..000000000000
--- a/include/asm-i386/mach-visws/irq_vectors.h
+++ /dev/null
@@ -1,62 +0,0 @@
1#ifndef _ASM_IRQ_VECTORS_H
2#define _ASM_IRQ_VECTORS_H
3
4/*
5 * IDT vectors usable for external interrupt sources start
6 * at 0x20:
7 */
8#define FIRST_EXTERNAL_VECTOR 0x20
9
10#define SYSCALL_VECTOR 0x80
11
12/*
13 * Vectors 0x20-0x2f are used for ISA interrupts.
14 */
15
16/*
17 * Special IRQ vectors used by the SMP architecture, 0xf0-0xff
18 *
19 * some of the following vectors are 'rare', they are merged
20 * into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
21 * TLB, reschedule and local APIC vectors are performance-critical.
22 *
23 * Vectors 0xf0-0xfa are free (reserved for future Linux use).
24 */
25#define SPURIOUS_APIC_VECTOR 0xff
26#define ERROR_APIC_VECTOR 0xfe
27#define INVALIDATE_TLB_VECTOR 0xfd
28#define RESCHEDULE_VECTOR 0xfc
29#define CALL_FUNCTION_VECTOR 0xfb
30
31#define THERMAL_APIC_VECTOR 0xf0
32/*
33 * Local APIC timer IRQ vector is on a different priority level,
34 * to work around the 'lost local interrupt if more than 2 IRQ
35 * sources per level' errata.
36 */
37#define LOCAL_TIMER_VECTOR 0xef
38
39/*
40 * First APIC vector available to drivers: (vectors 0x30-0xee)
41 * we start at 0x31 to spread out vectors evenly between priority
42 * levels. (0x80 is the syscall vector)
43 */
44#define FIRST_DEVICE_VECTOR 0x31
45#define FIRST_SYSTEM_VECTOR 0xef
46
47#define TIMER_IRQ 0
48
49/*
50 * IRQ definitions
51 */
52#define NR_VECTORS 256
53#define NR_IRQS 224
54#define NR_IRQ_VECTORS NR_IRQS
55
56#define FPU_IRQ 13
57
58#define FIRST_VM86_IRQ 3
59#define LAST_VM86_IRQ 15
60#define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15)
61
62#endif /* _ASM_IRQ_VECTORS_H */
diff --git a/include/asm-i386/mach-visws/lithium.h b/include/asm-i386/mach-visws/lithium.h
deleted file mode 100644
index d443e68d0066..000000000000
--- a/include/asm-i386/mach-visws/lithium.h
+++ /dev/null
@@ -1,53 +0,0 @@
1#ifndef __I386_SGI_LITHIUM_H
2#define __I386_SGI_LITHIUM_H
3
4#include <asm/fixmap.h>
5
6/*
7 * Lithium is the SGI Visual Workstation I/O ASIC
8 */
9
10#define LI_PCI_A_PHYS 0xfc000000 /* Enet is dev 3 */
11#define LI_PCI_B_PHYS 0xfd000000 /* PIIX4 is here */
12
13/* see set_fixmap() and asm/fixmap.h */
14#define LI_PCIA_VADDR (fix_to_virt(FIX_LI_PCIA))
15#define LI_PCIB_VADDR (fix_to_virt(FIX_LI_PCIB))
16
17/* Not a standard PCI? (not in linux/pci.h) */
18#define LI_PCI_BUSNUM 0x44 /* lo8: primary, hi8: sub */
19#define LI_PCI_INTEN 0x46
20
21/* LI_PCI_INTENT bits */
22#define LI_INTA_0 0x0001
23#define LI_INTA_1 0x0002
24#define LI_INTA_2 0x0004
25#define LI_INTA_3 0x0008
26#define LI_INTA_4 0x0010
27#define LI_INTB 0x0020
28#define LI_INTC 0x0040
29#define LI_INTD 0x0080
30
31/* More special purpose macros... */
32extern __inline void li_pcia_write16(unsigned long reg, unsigned short v)
33{
34 *((volatile unsigned short *)(LI_PCIA_VADDR+reg))=v;
35}
36
37extern __inline unsigned short li_pcia_read16(unsigned long reg)
38{
39 return *((volatile unsigned short *)(LI_PCIA_VADDR+reg));
40}
41
42extern __inline void li_pcib_write16(unsigned long reg, unsigned short v)
43{
44 *((volatile unsigned short *)(LI_PCIB_VADDR+reg))=v;
45}
46
47extern __inline unsigned short li_pcib_read16(unsigned long reg)
48{
49 return *((volatile unsigned short *)(LI_PCIB_VADDR+reg));
50}
51
52#endif
53
diff --git a/include/asm-i386/mach-visws/mach_apic.h b/include/asm-i386/mach-visws/mach_apic.h
deleted file mode 100644
index efac6f0d139f..000000000000
--- a/include/asm-i386/mach-visws/mach_apic.h
+++ /dev/null
@@ -1,103 +0,0 @@
1#ifndef __ASM_MACH_APIC_H
2#define __ASM_MACH_APIC_H
3
4#include <mach_apicdef.h>
5#include <asm/smp.h>
6
7#define APIC_DFR_VALUE (APIC_DFR_FLAT)
8
9#define no_balance_irq (0)
10#define esr_disable (0)
11
12#define INT_DELIVERY_MODE dest_LowestPrio
13#define INT_DEST_MODE 1 /* logical delivery broadcast to all procs */
14
15#ifdef CONFIG_SMP
16 #define TARGET_CPUS cpu_online_map
17#else
18 #define TARGET_CPUS cpumask_of_cpu(0)
19#endif
20
21#define check_apicid_used(bitmap, apicid) physid_isset(apicid, bitmap)
22#define check_apicid_present(bit) physid_isset(bit, phys_cpu_present_map)
23
24static inline int apic_id_registered(void)
25{
26 return physid_isset(GET_APIC_ID(apic_read(APIC_ID)), phys_cpu_present_map);
27}
28
29/*
30 * Set up the logical destination ID.
31 *
32 * Intel recommends to set DFR, LDR and TPR before enabling
33 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
34 * document number 292116). So here it goes...
35 */
36static inline void init_apic_ldr(void)
37{
38 unsigned long val;
39
40 apic_write_around(APIC_DFR, APIC_DFR_VALUE);
41 val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
42 val |= SET_APIC_LOGICAL_ID(1UL << smp_processor_id());
43 apic_write_around(APIC_LDR, val);
44}
45
46static inline void summit_check(char *oem, char *productid)
47{
48}
49
50static inline void setup_apic_routing(void)
51{
52}
53
54static inline int apicid_to_node(int logical_apicid)
55{
56 return 0;
57}
58
59/* Mapping from cpu number to logical apicid */
60static inline int cpu_to_logical_apicid(int cpu)
61{
62 return 1 << cpu;
63}
64
65static inline int cpu_present_to_apicid(int mps_cpu)
66{
67 if (mps_cpu < get_physical_broadcast())
68 return mps_cpu;
69 else
70 return BAD_APICID;
71}
72
73static inline physid_mask_t apicid_to_cpu_present(int apicid)
74{
75 return physid_mask_of_physid(apicid);
76}
77
78#define WAKE_SECONDARY_VIA_INIT
79
80static inline void setup_portio_remap(void)
81{
82}
83
84static inline void enable_apic_mode(void)
85{
86}
87
88static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
89{
90 return physid_isset(boot_cpu_physical_apicid, phys_cpu_present_map);
91}
92
93static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
94{
95 return cpus_addr(cpumask)[0];
96}
97
98static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
99{
100 return cpuid_apic >> index_msb;
101}
102
103#endif /* __ASM_MACH_APIC_H */
diff --git a/include/asm-i386/mach-visws/mach_apicdef.h b/include/asm-i386/mach-visws/mach_apicdef.h
deleted file mode 100644
index 826cfa97d778..000000000000
--- a/include/asm-i386/mach-visws/mach_apicdef.h
+++ /dev/null
@@ -1,12 +0,0 @@
1#ifndef __ASM_MACH_APICDEF_H
2#define __ASM_MACH_APICDEF_H
3
4#define APIC_ID_MASK (0xF<<24)
5
6static inline unsigned get_apic_id(unsigned long x)
7{
8 return (((x)>>24)&0xF);
9}
10#define GET_APIC_ID(x) get_apic_id(x)
11
12#endif
diff --git a/include/asm-i386/mach-visws/piix4.h b/include/asm-i386/mach-visws/piix4.h
deleted file mode 100644
index 83ea4f46e419..000000000000
--- a/include/asm-i386/mach-visws/piix4.h
+++ /dev/null
@@ -1,107 +0,0 @@
1#ifndef __I386_SGI_PIIX_H
2#define __I386_SGI_PIIX_H
3
4/*
5 * PIIX4 as used on SGI Visual Workstations
6 */
7
8#define PIIX_PM_START 0x0F80
9
10#define SIO_GPIO_START 0x0FC0
11
12#define SIO_PM_START 0x0FC8
13
14#define PMBASE PIIX_PM_START
15#define GPIREG0 (PMBASE+0x30)
16#define GPIREG(x) (GPIREG0+((x)/8))
17#define GPIBIT(x) (1 << ((x)%8))
18
19#define PIIX_GPI_BD_ID1 18
20#define PIIX_GPI_BD_ID2 19
21#define PIIX_GPI_BD_ID3 20
22#define PIIX_GPI_BD_ID4 21
23#define PIIX_GPI_BD_REG GPIREG(PIIX_GPI_BD_ID1)
24#define PIIX_GPI_BD_MASK (GPIBIT(PIIX_GPI_BD_ID1) | \
25 GPIBIT(PIIX_GPI_BD_ID2) | \
26 GPIBIT(PIIX_GPI_BD_ID3) | \
27 GPIBIT(PIIX_GPI_BD_ID4) )
28
29#define PIIX_GPI_BD_SHIFT (PIIX_GPI_BD_ID1 % 8)
30
31#define SIO_INDEX 0x2e
32#define SIO_DATA 0x2f
33
34#define SIO_DEV_SEL 0x7
35#define SIO_DEV_ENB 0x30
36#define SIO_DEV_MSB 0x60
37#define SIO_DEV_LSB 0x61
38
39#define SIO_GP_DEV 0x7
40
41#define SIO_GP_BASE SIO_GPIO_START
42#define SIO_GP_MSB (SIO_GP_BASE>>8)
43#define SIO_GP_LSB (SIO_GP_BASE&0xff)
44
45#define SIO_GP_DATA1 (SIO_GP_BASE+0)
46
47#define SIO_PM_DEV 0x8
48
49#define SIO_PM_BASE SIO_PM_START
50#define SIO_PM_MSB (SIO_PM_BASE>>8)
51#define SIO_PM_LSB (SIO_PM_BASE&0xff)
52#define SIO_PM_INDEX (SIO_PM_BASE+0)
53#define SIO_PM_DATA (SIO_PM_BASE+1)
54
55#define SIO_PM_FER2 0x1
56
57#define SIO_PM_GP_EN 0x80
58
59
60
61/*
62 * This is the dev/reg where generating a config cycle will
63 * result in a PCI special cycle.
64 */
65#define SPECIAL_DEV 0xff
66#define SPECIAL_REG 0x00
67
68/*
69 * PIIX4 needs to see a special cycle with the following data
70 * to be convinced the processor has gone into the stop grant
71 * state. PIIX4 insists on seeing this before it will power
72 * down a system.
73 */
74#define PIIX_SPECIAL_STOP 0x00120002
75
76#define PIIX4_RESET_PORT 0xcf9
77#define PIIX4_RESET_VAL 0x6
78
79#define PMSTS_PORT 0xf80 // 2 bytes PM Status
80#define PMEN_PORT 0xf82 // 2 bytes PM Enable
81#define PMCNTRL_PORT 0xf84 // 2 bytes PM Control
82
83#define PM_SUSPEND_ENABLE 0x2000 // start sequence to suspend state
84
85/*
86 * PMSTS and PMEN I/O bit definitions.
87 * (Bits are the same in both registers)
88 */
89#define PM_STS_RSM (1<<15) // Resume Status
90#define PM_STS_PWRBTNOR (1<<11) // Power Button Override
91#define PM_STS_RTC (1<<10) // RTC status
92#define PM_STS_PWRBTN (1<<8) // Power Button Pressed?
93#define PM_STS_GBL (1<<5) // Global Status
94#define PM_STS_BM (1<<4) // Bus Master Status
95#define PM_STS_TMROF (1<<0) // Timer Overflow Status.
96
97/*
98 * Stop clock GPI register
99 */
100#define PIIX_GPIREG0 (0xf80 + 0x30)
101
102/*
103 * Stop clock GPI bit in GPIREG0
104 */
105#define PIIX_GPI_STPCLK 0x4 // STPCLK signal routed back in
106
107#endif
diff --git a/include/asm-i386/mach-visws/setup_arch.h b/include/asm-i386/mach-visws/setup_arch.h
deleted file mode 100644
index 33f700ef6831..000000000000
--- a/include/asm-i386/mach-visws/setup_arch.h
+++ /dev/null
@@ -1,8 +0,0 @@
1/* Hook to call BIOS initialisation function */
2
3extern unsigned long sgivwfb_mem_phys;
4extern unsigned long sgivwfb_mem_size;
5
6/* no action for visws */
7
8#define ARCH_SETUP
diff --git a/include/asm-i386/mach-visws/smpboot_hooks.h b/include/asm-i386/mach-visws/smpboot_hooks.h
deleted file mode 100644
index d926471fa359..000000000000
--- a/include/asm-i386/mach-visws/smpboot_hooks.h
+++ /dev/null
@@ -1,24 +0,0 @@
1static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
2{
3 CMOS_WRITE(0xa, 0xf);
4 local_flush_tlb();
5 Dprintk("1.\n");
6 *((volatile unsigned short *) TRAMPOLINE_HIGH) = start_eip >> 4;
7 Dprintk("2.\n");
8 *((volatile unsigned short *) TRAMPOLINE_LOW) = start_eip & 0xf;
9 Dprintk("3.\n");
10}
11
12/* for visws do nothing for any of these */
13
14static inline void smpboot_clear_io_apic_irqs(void)
15{
16}
17
18static inline void smpboot_restore_warm_reset_vector(void)
19{
20}
21
22static inline void smpboot_setup_io_apic(void)
23{
24}
diff --git a/include/asm-i386/mach-voyager/do_timer.h b/include/asm-i386/mach-voyager/do_timer.h
deleted file mode 100644
index bc2b58926308..000000000000
--- a/include/asm-i386/mach-voyager/do_timer.h
+++ /dev/null
@@ -1,18 +0,0 @@
1/* defines for inline arch setup functions */
2#include <linux/clockchips.h>
3
4#include <asm/voyager.h>
5#include <asm/i8253.h>
6
7/**
8 * do_timer_interrupt_hook - hook into timer tick
9 * @regs: standard registers from interrupt
10 *
11 * Call the pit clock event handler. see asm/i8253.h
12 **/
13static inline void do_timer_interrupt_hook(void)
14{
15 global_clock_event->event_handler(global_clock_event);
16 voyager_timer_interrupt();
17}
18
diff --git a/include/asm-i386/mach-voyager/entry_arch.h b/include/asm-i386/mach-voyager/entry_arch.h
deleted file mode 100644
index 4a1e1e8c10b6..000000000000
--- a/include/asm-i386/mach-voyager/entry_arch.h
+++ /dev/null
@@ -1,26 +0,0 @@
1/* -*- mode: c; c-basic-offset: 8 -*- */
2
3/* Copyright (C) 2002
4 *
5 * Author: James.Bottomley@HansenPartnership.com
6 *
7 * linux/arch/i386/voyager/entry_arch.h
8 *
9 * This file builds the VIC and QIC CPI gates
10 */
11
12/* initialise the voyager interrupt gates
13 *
14 * This uses the macros in irq.h to set up assembly jump gates. The
15 * calls are then redirected to the same routine with smp_ prefixed */
16BUILD_INTERRUPT(vic_sys_interrupt, VIC_SYS_INT)
17BUILD_INTERRUPT(vic_cmn_interrupt, VIC_CMN_INT)
18BUILD_INTERRUPT(vic_cpi_interrupt, VIC_CPI_LEVEL0);
19
20/* do all the QIC interrupts */
21BUILD_INTERRUPT(qic_timer_interrupt, QIC_TIMER_CPI);
22BUILD_INTERRUPT(qic_invalidate_interrupt, QIC_INVALIDATE_CPI);
23BUILD_INTERRUPT(qic_reschedule_interrupt, QIC_RESCHEDULE_CPI);
24BUILD_INTERRUPT(qic_enable_irq_interrupt, QIC_ENABLE_IRQ_CPI);
25BUILD_INTERRUPT(qic_call_function_interrupt, QIC_CALL_FUNCTION_CPI);
26
diff --git a/include/asm-i386/mach-voyager/irq_vectors.h b/include/asm-i386/mach-voyager/irq_vectors.h
deleted file mode 100644
index 165421f5821c..000000000000
--- a/include/asm-i386/mach-voyager/irq_vectors.h
+++ /dev/null
@@ -1,79 +0,0 @@
1/* -*- mode: c; c-basic-offset: 8 -*- */
2
3/* Copyright (C) 2002
4 *
5 * Author: James.Bottomley@HansenPartnership.com
6 *
7 * linux/arch/i386/voyager/irq_vectors.h
8 *
9 * This file provides definitions for the VIC and QIC CPIs
10 */
11
12#ifndef _ASM_IRQ_VECTORS_H
13#define _ASM_IRQ_VECTORS_H
14
15/*
16 * IDT vectors usable for external interrupt sources start
17 * at 0x20:
18 */
19#define FIRST_EXTERNAL_VECTOR 0x20
20
21#define SYSCALL_VECTOR 0x80
22
23/*
24 * Vectors 0x20-0x2f are used for ISA interrupts.
25 */
26
27/* These define the CPIs we use in linux */
28#define VIC_CPI_LEVEL0 0
29#define VIC_CPI_LEVEL1 1
30/* now the fake CPIs */
31#define VIC_TIMER_CPI 2
32#define VIC_INVALIDATE_CPI 3
33#define VIC_RESCHEDULE_CPI 4
34#define VIC_ENABLE_IRQ_CPI 5
35#define VIC_CALL_FUNCTION_CPI 6
36
37/* Now the QIC CPIs: Since we don't need the two initial levels,
38 * these are 2 less than the VIC CPIs */
39#define QIC_CPI_OFFSET 1
40#define QIC_TIMER_CPI (VIC_TIMER_CPI - QIC_CPI_OFFSET)
41#define QIC_INVALIDATE_CPI (VIC_INVALIDATE_CPI - QIC_CPI_OFFSET)
42#define QIC_RESCHEDULE_CPI (VIC_RESCHEDULE_CPI - QIC_CPI_OFFSET)
43#define QIC_ENABLE_IRQ_CPI (VIC_ENABLE_IRQ_CPI - QIC_CPI_OFFSET)
44#define QIC_CALL_FUNCTION_CPI (VIC_CALL_FUNCTION_CPI - QIC_CPI_OFFSET)
45
46#define VIC_START_FAKE_CPI VIC_TIMER_CPI
47#define VIC_END_FAKE_CPI VIC_CALL_FUNCTION_CPI
48
49/* this is the SYS_INT CPI. */
50#define VIC_SYS_INT 8
51#define VIC_CMN_INT 15
52
53/* This is the boot CPI for alternate processors. It gets overwritten
54 * by the above once the system has activated all available processors */
55#define VIC_CPU_BOOT_CPI VIC_CPI_LEVEL0
56#define VIC_CPU_BOOT_ERRATA_CPI (VIC_CPI_LEVEL0 + 8)
57
58#define NR_VECTORS 256
59#define NR_IRQS 224
60#define NR_IRQ_VECTORS NR_IRQS
61
62#define FPU_IRQ 13
63
64#define FIRST_VM86_IRQ 3
65#define LAST_VM86_IRQ 15
66#define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15)
67
68#ifndef __ASSEMBLY__
69extern asmlinkage void vic_cpi_interrupt(void);
70extern asmlinkage void vic_sys_interrupt(void);
71extern asmlinkage void vic_cmn_interrupt(void);
72extern asmlinkage void qic_timer_interrupt(void);
73extern asmlinkage void qic_invalidate_interrupt(void);
74extern asmlinkage void qic_reschedule_interrupt(void);
75extern asmlinkage void qic_enable_irq_interrupt(void);
76extern asmlinkage void qic_call_function_interrupt(void);
77#endif /* !__ASSEMBLY__ */
78
79#endif /* _ASM_IRQ_VECTORS_H */
diff --git a/include/asm-i386/mach-voyager/setup_arch.h b/include/asm-i386/mach-voyager/setup_arch.h
deleted file mode 100644
index 84d01ad33459..000000000000
--- a/include/asm-i386/mach-voyager/setup_arch.h
+++ /dev/null
@@ -1,10 +0,0 @@
1#include <asm/voyager.h>
2#define VOYAGER_BIOS_INFO ((struct voyager_bios_info *)(PARAM+0x40))
3
4/* Hook to call BIOS initialisation function */
5
6/* for voyager, pass the voyager BIOS/SUS info area to the detection
7 * routines */
8
9#define ARCH_SETUP voyager_detect(VOYAGER_BIOS_INFO);
10
diff --git a/include/asm-i386/math_emu.h b/include/asm-i386/math_emu.h
deleted file mode 100644
index a4b0aa3320e6..000000000000
--- a/include/asm-i386/math_emu.h
+++ /dev/null
@@ -1,36 +0,0 @@
1#ifndef _I386_MATH_EMU_H
2#define _I386_MATH_EMU_H
3
4#include <asm/sigcontext.h>
5
6int restore_i387_soft(void *s387, struct _fpstate __user *buf);
7int save_i387_soft(void *s387, struct _fpstate __user *buf);
8
9/* This structure matches the layout of the data saved to the stack
10 following a device-not-present interrupt, part of it saved
11 automatically by the 80386/80486.
12 */
13struct info {
14 long ___orig_eip;
15 long ___ebx;
16 long ___ecx;
17 long ___edx;
18 long ___esi;
19 long ___edi;
20 long ___ebp;
21 long ___eax;
22 long ___ds;
23 long ___es;
24 long ___fs;
25 long ___orig_eax;
26 long ___eip;
27 long ___cs;
28 long ___eflags;
29 long ___esp;
30 long ___ss;
31 long ___vm86_es; /* This and the following only in vm86 mode */
32 long ___vm86_ds;
33 long ___vm86_fs;
34 long ___vm86_gs;
35};
36#endif
diff --git a/include/asm-i386/mc146818rtc.h b/include/asm-i386/mc146818rtc.h
deleted file mode 100644
index 1613b42eaf58..000000000000
--- a/include/asm-i386/mc146818rtc.h
+++ /dev/null
@@ -1,97 +0,0 @@
1/*
2 * Machine dependent access functions for RTC registers.
3 */
4#ifndef _ASM_MC146818RTC_H
5#define _ASM_MC146818RTC_H
6
7#include <asm/io.h>
8#include <asm/system.h>
9#include <asm/processor.h>
10#include <linux/mc146818rtc.h>
11
12#ifndef RTC_PORT
13#define RTC_PORT(x) (0x70 + (x))
14#define RTC_ALWAYS_BCD 1 /* RTC operates in binary mode */
15#endif
16
17#ifdef __HAVE_ARCH_CMPXCHG
18/*
19 * This lock provides nmi access to the CMOS/RTC registers. It has some
20 * special properties. It is owned by a CPU and stores the index register
21 * currently being accessed (if owned). The idea here is that it works
22 * like a normal lock (normally). However, in an NMI, the NMI code will
23 * first check to see if its CPU owns the lock, meaning that the NMI
24 * interrupted during the read/write of the device. If it does, it goes ahead
25 * and performs the access and then restores the index register. If it does
26 * not, it locks normally.
27 *
28 * Note that since we are working with NMIs, we need this lock even in
29 * a non-SMP machine just to mark that the lock is owned.
30 *
31 * This only works with compare-and-swap. There is no other way to
32 * atomically claim the lock and set the owner.
33 */
34#include <linux/smp.h>
35extern volatile unsigned long cmos_lock;
36
37/*
38 * All of these below must be called with interrupts off, preempt
39 * disabled, etc.
40 */
41
42static inline void lock_cmos(unsigned char reg)
43{
44 unsigned long new;
45 new = ((smp_processor_id()+1) << 8) | reg;
46 for (;;) {
47 if (cmos_lock) {
48 cpu_relax();
49 continue;
50 }
51 if (__cmpxchg(&cmos_lock, 0, new, sizeof(cmos_lock)) == 0)
52 return;
53 }
54}
55
56static inline void unlock_cmos(void)
57{
58 cmos_lock = 0;
59}
60static inline int do_i_have_lock_cmos(void)
61{
62 return (cmos_lock >> 8) == (smp_processor_id()+1);
63}
64static inline unsigned char current_lock_cmos_reg(void)
65{
66 return cmos_lock & 0xff;
67}
68#define lock_cmos_prefix(reg) \
69 do { \
70 unsigned long cmos_flags; \
71 local_irq_save(cmos_flags); \
72 lock_cmos(reg)
73#define lock_cmos_suffix(reg) \
74 unlock_cmos(); \
75 local_irq_restore(cmos_flags); \
76 } while (0)
77#else
78#define lock_cmos_prefix(reg) do {} while (0)
79#define lock_cmos_suffix(reg) do {} while (0)
80#define lock_cmos(reg)
81#define unlock_cmos()
82#define do_i_have_lock_cmos() 0
83#define current_lock_cmos_reg() 0
84#endif
85
86/*
87 * The yet supported machines all access the RTC index register via
88 * an ISA port access but the way to access the date register differs ...
89 */
90#define CMOS_READ(addr) rtc_cmos_read(addr)
91#define CMOS_WRITE(val, addr) rtc_cmos_write(val, addr)
92unsigned char rtc_cmos_read(unsigned char addr);
93void rtc_cmos_write(unsigned char val, unsigned char addr);
94
95#define RTC_IRQ 8
96
97#endif /* _ASM_MC146818RTC_H */
diff --git a/include/asm-i386/mca.h b/include/asm-i386/mca.h
deleted file mode 100644
index 09adf2eac4dc..000000000000
--- a/include/asm-i386/mca.h
+++ /dev/null
@@ -1,43 +0,0 @@
1/* -*- mode: c; c-basic-offset: 8 -*- */
2
3/* Platform specific MCA defines */
4#ifndef _ASM_MCA_H
5#define _ASM_MCA_H
6
7/* Maximal number of MCA slots - actually, some machines have less, but
8 * they all have sufficient number of POS registers to cover 8.
9 */
10#define MCA_MAX_SLOT_NR 8
11
12/* Most machines have only one MCA bus. The only multiple bus machines
13 * I know have at most two */
14#define MAX_MCA_BUSSES 2
15
16#define MCA_PRIMARY_BUS 0
17#define MCA_SECONDARY_BUS 1
18
19/* Dummy slot numbers on primary MCA for integrated functions */
20#define MCA_INTEGSCSI (MCA_MAX_SLOT_NR)
21#define MCA_INTEGVIDEO (MCA_MAX_SLOT_NR+1)
22#define MCA_MOTHERBOARD (MCA_MAX_SLOT_NR+2)
23
24/* Dummy POS values for integrated functions */
25#define MCA_DUMMY_POS_START 0x10000
26#define MCA_INTEGSCSI_POS (MCA_DUMMY_POS_START+1)
27#define MCA_INTEGVIDEO_POS (MCA_DUMMY_POS_START+2)
28#define MCA_MOTHERBOARD_POS (MCA_DUMMY_POS_START+3)
29
30/* MCA registers */
31
32#define MCA_MOTHERBOARD_SETUP_REG 0x94
33#define MCA_ADAPTER_SETUP_REG 0x96
34#define MCA_POS_REG(n) (0x100+(n))
35
36#define MCA_ENABLED 0x01 /* POS 2, set if adapter enabled */
37
38/* Max number of adapters, including both slots and various integrated
39 * things.
40 */
41#define MCA_NUMADAPTERS (MCA_MAX_SLOT_NR+3)
42
43#endif
diff --git a/include/asm-i386/mca_dma.h b/include/asm-i386/mca_dma.h
deleted file mode 100644
index fbb1f3b71279..000000000000
--- a/include/asm-i386/mca_dma.h
+++ /dev/null
@@ -1,201 +0,0 @@
1#ifndef MCA_DMA_H
2#define MCA_DMA_H
3
4#include <asm/io.h>
5#include <linux/ioport.h>
6
7/*
8 * Microchannel specific DMA stuff. DMA on an MCA machine is fairly similar to
9 * standard PC dma, but it certainly has its quirks. DMA register addresses
10 * are in a different place and there are some added functions. Most of this
11 * should be pretty obvious on inspection. Note that the user must divide
12 * count by 2 when using 16-bit dma; that is not handled by these functions.
13 *
14 * Ramen Noodles are yummy.
15 *
16 * 1998 Tymm Twillman <tymm@computer.org>
17 */
18
19/*
20 * Registers that are used by the DMA controller; FN is the function register
21 * (tell the controller what to do) and EXE is the execution register (how
22 * to do it)
23 */
24
25#define MCA_DMA_REG_FN 0x18
26#define MCA_DMA_REG_EXE 0x1A
27
28/*
29 * Functions that the DMA controller can do
30 */
31
32#define MCA_DMA_FN_SET_IO 0x00
33#define MCA_DMA_FN_SET_ADDR 0x20
34#define MCA_DMA_FN_GET_ADDR 0x30
35#define MCA_DMA_FN_SET_COUNT 0x40
36#define MCA_DMA_FN_GET_COUNT 0x50
37#define MCA_DMA_FN_GET_STATUS 0x60
38#define MCA_DMA_FN_SET_MODE 0x70
39#define MCA_DMA_FN_SET_ARBUS 0x80
40#define MCA_DMA_FN_MASK 0x90
41#define MCA_DMA_FN_RESET_MASK 0xA0
42#define MCA_DMA_FN_MASTER_CLEAR 0xD0
43
44/*
45 * Modes (used by setting MCA_DMA_FN_MODE in the function register)
46 *
47 * Note that the MODE_READ is read from memory (write to device), and
48 * MODE_WRITE is vice-versa.
49 */
50
51#define MCA_DMA_MODE_XFER 0x04 /* read by default */
52#define MCA_DMA_MODE_READ 0x04 /* same as XFER */
53#define MCA_DMA_MODE_WRITE 0x08 /* OR with MODE_XFER to use */
54#define MCA_DMA_MODE_IO 0x01 /* DMA from IO register */
55#define MCA_DMA_MODE_16 0x40 /* 16 bit xfers */
56
57
58/**
59 * mca_enable_dma - channel to enable DMA on
60 * @dmanr: DMA channel
61 *
62 * Enable the MCA bus DMA on a channel. This can be called from
63 * IRQ context.
64 */
65
66static __inline__ void mca_enable_dma(unsigned int dmanr)
67{
68 outb(MCA_DMA_FN_RESET_MASK | dmanr, MCA_DMA_REG_FN);
69}
70
71/**
72 * mca_disble_dma - channel to disable DMA on
73 * @dmanr: DMA channel
74 *
75 * Enable the MCA bus DMA on a channel. This can be called from
76 * IRQ context.
77 */
78
79static __inline__ void mca_disable_dma(unsigned int dmanr)
80{
81 outb(MCA_DMA_FN_MASK | dmanr, MCA_DMA_REG_FN);
82}
83
84/**
85 * mca_set_dma_addr - load a 24bit DMA address
86 * @dmanr: DMA channel
87 * @a: 24bit bus address
88 *
89 * Load the address register in the DMA controller. This has a 24bit
90 * limitation (16Mb).
91 */
92
93static __inline__ void mca_set_dma_addr(unsigned int dmanr, unsigned int a)
94{
95 outb(MCA_DMA_FN_SET_ADDR | dmanr, MCA_DMA_REG_FN);
96 outb(a & 0xff, MCA_DMA_REG_EXE);
97 outb((a >> 8) & 0xff, MCA_DMA_REG_EXE);
98 outb((a >> 16) & 0xff, MCA_DMA_REG_EXE);
99}
100
101/**
102 * mca_get_dma_addr - load a 24bit DMA address
103 * @dmanr: DMA channel
104 *
105 * Read the address register in the DMA controller. This has a 24bit
106 * limitation (16Mb). The return is a bus address.
107 */
108
109static __inline__ unsigned int mca_get_dma_addr(unsigned int dmanr)
110{
111 unsigned int addr;
112
113 outb(MCA_DMA_FN_GET_ADDR | dmanr, MCA_DMA_REG_FN);
114 addr = inb(MCA_DMA_REG_EXE);
115 addr |= inb(MCA_DMA_REG_EXE) << 8;
116 addr |= inb(MCA_DMA_REG_EXE) << 16;
117
118 return addr;
119}
120
121/**
122 * mca_set_dma_count - load a 16bit transfer count
123 * @dmanr: DMA channel
124 * @count: count
125 *
126 * Set the DMA count for this channel. This can be up to 64Kbytes.
127 * Setting a count of zero will not do what you expect.
128 */
129
130static __inline__ void mca_set_dma_count(unsigned int dmanr, unsigned int count)
131{
132 count--; /* transfers one more than count -- correct for this */
133
134 outb(MCA_DMA_FN_SET_COUNT | dmanr, MCA_DMA_REG_FN);
135 outb(count & 0xff, MCA_DMA_REG_EXE);
136 outb((count >> 8) & 0xff, MCA_DMA_REG_EXE);
137}
138
139/**
140 * mca_get_dma_residue - get the remaining bytes to transfer
141 * @dmanr: DMA channel
142 *
143 * This function returns the number of bytes left to transfer
144 * on this DMA channel.
145 */
146
147static __inline__ unsigned int mca_get_dma_residue(unsigned int dmanr)
148{
149 unsigned short count;
150
151 outb(MCA_DMA_FN_GET_COUNT | dmanr, MCA_DMA_REG_FN);
152 count = 1 + inb(MCA_DMA_REG_EXE);
153 count += inb(MCA_DMA_REG_EXE) << 8;
154
155 return count;
156}
157
158/**
159 * mca_set_dma_io - set the port for an I/O transfer
160 * @dmanr: DMA channel
161 * @io_addr: an I/O port number
162 *
163 * Unlike the ISA bus DMA controllers the DMA on MCA bus can transfer
164 * with an I/O port target.
165 */
166
167static __inline__ void mca_set_dma_io(unsigned int dmanr, unsigned int io_addr)
168{
169 /*
170 * DMA from a port address -- set the io address
171 */
172
173 outb(MCA_DMA_FN_SET_IO | dmanr, MCA_DMA_REG_FN);
174 outb(io_addr & 0xff, MCA_DMA_REG_EXE);
175 outb((io_addr >> 8) & 0xff, MCA_DMA_REG_EXE);
176}
177
178/**
179 * mca_set_dma_mode - set the DMA mode
180 * @dmanr: DMA channel
181 * @mode: mode to set
182 *
183 * The DMA controller supports several modes. The mode values you can
184 * set are-
185 *
186 * %MCA_DMA_MODE_READ when reading from the DMA device.
187 *
188 * %MCA_DMA_MODE_WRITE to writing to the DMA device.
189 *
190 * %MCA_DMA_MODE_IO to do DMA to or from an I/O port.
191 *
192 * %MCA_DMA_MODE_16 to do 16bit transfers.
193 */
194
195static __inline__ void mca_set_dma_mode(unsigned int dmanr, unsigned int mode)
196{
197 outb(MCA_DMA_FN_SET_MODE | dmanr, MCA_DMA_REG_FN);
198 outb(mode, MCA_DMA_REG_EXE);
199}
200
201#endif /* MCA_DMA_H */
diff --git a/include/asm-i386/mce.h b/include/asm-i386/mce.h
deleted file mode 100644
index d56d89742e8f..000000000000
--- a/include/asm-i386/mce.h
+++ /dev/null
@@ -1,11 +0,0 @@
1#ifdef CONFIG_X86_MCE
2extern void mcheck_init(struct cpuinfo_x86 *c);
3#else
4#define mcheck_init(c) do {} while(0)
5#endif
6
7extern int mce_disabled;
8
9extern void stop_mce(void);
10extern void restart_mce(void);
11
diff --git a/include/asm-i386/mman.h b/include/asm-i386/mman.h
deleted file mode 100644
index 8fd9d7ab7faf..000000000000
--- a/include/asm-i386/mman.h
+++ /dev/null
@@ -1,17 +0,0 @@
1#ifndef __I386_MMAN_H__
2#define __I386_MMAN_H__
3
4#include <asm-generic/mman.h>
5
6#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
7#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
8#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */
9#define MAP_LOCKED 0x2000 /* pages are locked */
10#define MAP_NORESERVE 0x4000 /* don't check for reservations */
11#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
12#define MAP_NONBLOCK 0x10000 /* do not block on IO */
13
14#define MCL_CURRENT 1 /* lock all current mappings */
15#define MCL_FUTURE 2 /* lock all future mappings */
16
17#endif /* __I386_MMAN_H__ */
diff --git a/include/asm-i386/mmu.h b/include/asm-i386/mmu.h
deleted file mode 100644
index 8358dd3df7aa..000000000000
--- a/include/asm-i386/mmu.h
+++ /dev/null
@@ -1,18 +0,0 @@
1#ifndef __i386_MMU_H
2#define __i386_MMU_H
3
4#include <asm/semaphore.h>
5/*
6 * The i386 doesn't have a mmu context, but
7 * we put the segment information here.
8 *
9 * cpu_vm_mask is used to optimize ldt flushing.
10 */
11typedef struct {
12 int size;
13 struct semaphore sem;
14 void *ldt;
15 void *vdso;
16} mm_context_t;
17
18#endif
diff --git a/include/asm-i386/mmu_context.h b/include/asm-i386/mmu_context.h
deleted file mode 100644
index 7eb0b0b1fb3c..000000000000
--- a/include/asm-i386/mmu_context.h
+++ /dev/null
@@ -1,86 +0,0 @@
1#ifndef __I386_SCHED_H
2#define __I386_SCHED_H
3
4#include <asm/desc.h>
5#include <asm/atomic.h>
6#include <asm/pgalloc.h>
7#include <asm/tlbflush.h>
8#include <asm/paravirt.h>
9#ifndef CONFIG_PARAVIRT
10#include <asm-generic/mm_hooks.h>
11
12static inline void paravirt_activate_mm(struct mm_struct *prev,
13 struct mm_struct *next)
14{
15}
16#endif /* !CONFIG_PARAVIRT */
17
18
19/*
20 * Used for LDT copy/destruction.
21 */
22int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
23void destroy_context(struct mm_struct *mm);
24
25
26static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
27{
28#ifdef CONFIG_SMP
29 unsigned cpu = smp_processor_id();
30 if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
31 per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_LAZY;
32#endif
33}
34
35void leave_mm(unsigned long cpu);
36
37static inline void switch_mm(struct mm_struct *prev,
38 struct mm_struct *next,
39 struct task_struct *tsk)
40{
41 int cpu = smp_processor_id();
42
43 if (likely(prev != next)) {
44 /* stop flush ipis for the previous mm */
45 cpu_clear(cpu, prev->cpu_vm_mask);
46#ifdef CONFIG_SMP
47 per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK;
48 per_cpu(cpu_tlbstate, cpu).active_mm = next;
49#endif
50 cpu_set(cpu, next->cpu_vm_mask);
51
52 /* Re-load page tables */
53 load_cr3(next->pgd);
54
55 /*
56 * load the LDT, if the LDT is different:
57 */
58 if (unlikely(prev->context.ldt != next->context.ldt))
59 load_LDT_nolock(&next->context);
60 }
61#ifdef CONFIG_SMP
62 else {
63 per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK;
64 BUG_ON(per_cpu(cpu_tlbstate, cpu).active_mm != next);
65
66 if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
67 /* We were in lazy tlb mode and leave_mm disabled
68 * tlb flush IPI delivery. We must reload %cr3.
69 */
70 load_cr3(next->pgd);
71 load_LDT_nolock(&next->context);
72 }
73 }
74#endif
75}
76
77#define deactivate_mm(tsk, mm) \
78 asm("movl %0,%%gs": :"r" (0));
79
80#define activate_mm(prev, next) \
81 do { \
82 paravirt_activate_mm(prev, next); \
83 switch_mm((prev),(next),NULL); \
84 } while(0);
85
86#endif
diff --git a/include/asm-i386/mmx.h b/include/asm-i386/mmx.h
deleted file mode 100644
index 46b71da99869..000000000000
--- a/include/asm-i386/mmx.h
+++ /dev/null
@@ -1,14 +0,0 @@
1#ifndef _ASM_MMX_H
2#define _ASM_MMX_H
3
4/*
5 * MMX 3Dnow! helper operations
6 */
7
8#include <linux/types.h>
9
10extern void *_mmx_memcpy(void *to, const void *from, size_t size);
11extern void mmx_clear_page(void *page);
12extern void mmx_copy_page(void *to, void *from);
13
14#endif
diff --git a/include/asm-i386/mmzone.h b/include/asm-i386/mmzone.h
deleted file mode 100644
index 118e9812778f..000000000000
--- a/include/asm-i386/mmzone.h
+++ /dev/null
@@ -1,145 +0,0 @@
1/*
2 * Written by Pat Gaughen (gone@us.ibm.com) Mar 2002
3 *
4 */
5
6#ifndef _ASM_MMZONE_H_
7#define _ASM_MMZONE_H_
8
9#include <asm/smp.h>
10
11#ifdef CONFIG_NUMA
12extern struct pglist_data *node_data[];
13#define NODE_DATA(nid) (node_data[nid])
14
15#ifdef CONFIG_X86_NUMAQ
16 #include <asm/numaq.h>
17#elif defined(CONFIG_ACPI_SRAT)/* summit or generic arch */
18 #include <asm/srat.h>
19#endif
20
21extern int get_memcfg_numa_flat(void );
22/*
23 * This allows any one NUMA architecture to be compiled
24 * for, and still fall back to the flat function if it
25 * fails.
26 */
27static inline void get_memcfg_numa(void)
28{
29#ifdef CONFIG_X86_NUMAQ
30 if (get_memcfg_numaq())
31 return;
32#elif defined(CONFIG_ACPI_SRAT)
33 if (get_memcfg_from_srat())
34 return;
35#endif
36
37 get_memcfg_numa_flat();
38}
39
40extern int early_pfn_to_nid(unsigned long pfn);
41extern void numa_kva_reserve(void);
42
43#else /* !CONFIG_NUMA */
44
45#define get_memcfg_numa get_memcfg_numa_flat
46#define get_zholes_size(n) (0)
47
48static inline void numa_kva_reserve(void)
49{
50}
51#endif /* CONFIG_NUMA */
52
53#ifdef CONFIG_DISCONTIGMEM
54
55/*
56 * generic node memory support, the following assumptions apply:
57 *
58 * 1) memory comes in 256Mb contigious chunks which are either present or not
59 * 2) we will not have more than 64Gb in total
60 *
61 * for now assume that 64Gb is max amount of RAM for whole system
62 * 64Gb / 4096bytes/page = 16777216 pages
63 */
64#define MAX_NR_PAGES 16777216
65#define MAX_ELEMENTS 256
66#define PAGES_PER_ELEMENT (MAX_NR_PAGES/MAX_ELEMENTS)
67
68extern s8 physnode_map[];
69
70static inline int pfn_to_nid(unsigned long pfn)
71{
72#ifdef CONFIG_NUMA
73 return((int) physnode_map[(pfn) / PAGES_PER_ELEMENT]);
74#else
75 return 0;
76#endif
77}
78
79/*
80 * Following are macros that each numa implmentation must define.
81 */
82
83#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
84#define node_end_pfn(nid) \
85({ \
86 pg_data_t *__pgdat = NODE_DATA(nid); \
87 __pgdat->node_start_pfn + __pgdat->node_spanned_pages; \
88})
89
90/* XXX: FIXME -- wli */
91#define kern_addr_valid(kaddr) (0)
92
93#ifdef CONFIG_X86_NUMAQ /* we have contiguous memory on NUMA-Q */
94#define pfn_valid(pfn) ((pfn) < num_physpages)
95#else
96static inline int pfn_valid(int pfn)
97{
98 int nid = pfn_to_nid(pfn);
99
100 if (nid >= 0)
101 return (pfn < node_end_pfn(nid));
102 return 0;
103}
104#endif /* CONFIG_X86_NUMAQ */
105
106#endif /* CONFIG_DISCONTIGMEM */
107
108#ifdef CONFIG_NEED_MULTIPLE_NODES
109
110/*
111 * Following are macros that are specific to this numa platform.
112 */
113#define reserve_bootmem(addr, size) \
114 reserve_bootmem_node(NODE_DATA(0), (addr), (size))
115#define alloc_bootmem(x) \
116 __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
117#define alloc_bootmem_low(x) \
118 __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, 0)
119#define alloc_bootmem_pages(x) \
120 __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
121#define alloc_bootmem_low_pages(x) \
122 __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0)
123#define alloc_bootmem_node(pgdat, x) \
124({ \
125 struct pglist_data __maybe_unused \
126 *__alloc_bootmem_node__pgdat = (pgdat); \
127 __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, \
128 __pa(MAX_DMA_ADDRESS)); \
129})
130#define alloc_bootmem_pages_node(pgdat, x) \
131({ \
132 struct pglist_data __maybe_unused \
133 *__alloc_bootmem_node__pgdat = (pgdat); \
134 __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, \
135 __pa(MAX_DMA_ADDRESS)) \
136})
137#define alloc_bootmem_low_pages_node(pgdat, x) \
138({ \
139 struct pglist_data __maybe_unused \
140 *__alloc_bootmem_node__pgdat = (pgdat); \
141 __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0); \
142})
143#endif /* CONFIG_NEED_MULTIPLE_NODES */
144
145#endif /* _ASM_MMZONE_H_ */
diff --git a/include/asm-i386/module.h b/include/asm-i386/module.h
deleted file mode 100644
index 7e5fda6c3976..000000000000
--- a/include/asm-i386/module.h
+++ /dev/null
@@ -1,75 +0,0 @@
1#ifndef _ASM_I386_MODULE_H
2#define _ASM_I386_MODULE_H
3
4/* x86 is simple */
5struct mod_arch_specific
6{
7};
8
9#define Elf_Shdr Elf32_Shdr
10#define Elf_Sym Elf32_Sym
11#define Elf_Ehdr Elf32_Ehdr
12
13#ifdef CONFIG_M386
14#define MODULE_PROC_FAMILY "386 "
15#elif defined CONFIG_M486
16#define MODULE_PROC_FAMILY "486 "
17#elif defined CONFIG_M586
18#define MODULE_PROC_FAMILY "586 "
19#elif defined CONFIG_M586TSC
20#define MODULE_PROC_FAMILY "586TSC "
21#elif defined CONFIG_M586MMX
22#define MODULE_PROC_FAMILY "586MMX "
23#elif defined CONFIG_MCORE2
24#define MODULE_PROC_FAMILY "CORE2 "
25#elif defined CONFIG_M686
26#define MODULE_PROC_FAMILY "686 "
27#elif defined CONFIG_MPENTIUMII
28#define MODULE_PROC_FAMILY "PENTIUMII "
29#elif defined CONFIG_MPENTIUMIII
30#define MODULE_PROC_FAMILY "PENTIUMIII "
31#elif defined CONFIG_MPENTIUMM
32#define MODULE_PROC_FAMILY "PENTIUMM "
33#elif defined CONFIG_MPENTIUM4
34#define MODULE_PROC_FAMILY "PENTIUM4 "
35#elif defined CONFIG_MK6
36#define MODULE_PROC_FAMILY "K6 "
37#elif defined CONFIG_MK7
38#define MODULE_PROC_FAMILY "K7 "
39#elif defined CONFIG_MK8
40#define MODULE_PROC_FAMILY "K8 "
41#elif defined CONFIG_X86_ELAN
42#define MODULE_PROC_FAMILY "ELAN "
43#elif defined CONFIG_MCRUSOE
44#define MODULE_PROC_FAMILY "CRUSOE "
45#elif defined CONFIG_MEFFICEON
46#define MODULE_PROC_FAMILY "EFFICEON "
47#elif defined CONFIG_MWINCHIPC6
48#define MODULE_PROC_FAMILY "WINCHIPC6 "
49#elif defined CONFIG_MWINCHIP2
50#define MODULE_PROC_FAMILY "WINCHIP2 "
51#elif defined CONFIG_MWINCHIP3D
52#define MODULE_PROC_FAMILY "WINCHIP3D "
53#elif defined CONFIG_MCYRIXIII
54#define MODULE_PROC_FAMILY "CYRIXIII "
55#elif defined CONFIG_MVIAC3_2
56#define MODULE_PROC_FAMILY "VIAC3-2 "
57#elif defined CONFIG_MVIAC7
58#define MODULE_PROC_FAMILY "VIAC7 "
59#elif defined CONFIG_MGEODEGX1
60#define MODULE_PROC_FAMILY "GEODEGX1 "
61#elif defined CONFIG_MGEODE_LX
62#define MODULE_PROC_FAMILY "GEODE "
63#else
64#error unknown processor family
65#endif
66
67#ifdef CONFIG_4KSTACKS
68#define MODULE_STACKSIZE "4KSTACKS "
69#else
70#define MODULE_STACKSIZE ""
71#endif
72
73#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE
74
75#endif /* _ASM_I386_MODULE_H */
diff --git a/include/asm-i386/mpspec.h b/include/asm-i386/mpspec.h
deleted file mode 100644
index f21349399d14..000000000000
--- a/include/asm-i386/mpspec.h
+++ /dev/null
@@ -1,81 +0,0 @@
1#ifndef __ASM_MPSPEC_H
2#define __ASM_MPSPEC_H
3
4#include <linux/cpumask.h>
5#include <asm/mpspec_def.h>
6#include <mach_mpspec.h>
7
8extern int mp_bus_id_to_type [MAX_MP_BUSSES];
9extern int mp_bus_id_to_node [MAX_MP_BUSSES];
10extern int mp_bus_id_to_local [MAX_MP_BUSSES];
11extern int quad_local_to_mp_bus_id [NR_CPUS/4][4];
12extern int mp_bus_id_to_pci_bus [MAX_MP_BUSSES];
13
14extern unsigned int def_to_bigsmp;
15extern unsigned int boot_cpu_physical_apicid;
16extern int smp_found_config;
17extern void find_smp_config (void);
18extern void get_smp_config (void);
19extern int nr_ioapics;
20extern int apic_version [MAX_APICS];
21extern int mp_irq_entries;
22extern struct mpc_config_intsrc mp_irqs [MAX_IRQ_SOURCES];
23extern int mpc_default_type;
24extern unsigned long mp_lapic_addr;
25extern int pic_mode;
26
27#ifdef CONFIG_ACPI
28extern void mp_register_lapic (u8 id, u8 enabled);
29extern void mp_register_lapic_address (u64 address);
30extern void mp_register_ioapic (u8 id, u32 address, u32 gsi_base);
31extern void mp_override_legacy_irq (u8 bus_irq, u8 polarity, u8 trigger, u32 gsi);
32extern void mp_config_acpi_legacy_irqs (void);
33extern int mp_register_gsi (u32 gsi, int edge_level, int active_high_low);
34#endif /* CONFIG_ACPI */
35
36#define PHYSID_ARRAY_SIZE BITS_TO_LONGS(MAX_APICS)
37
38struct physid_mask
39{
40 unsigned long mask[PHYSID_ARRAY_SIZE];
41};
42
43typedef struct physid_mask physid_mask_t;
44
45#define physid_set(physid, map) set_bit(physid, (map).mask)
46#define physid_clear(physid, map) clear_bit(physid, (map).mask)
47#define physid_isset(physid, map) test_bit(physid, (map).mask)
48#define physid_test_and_set(physid, map) test_and_set_bit(physid, (map).mask)
49
50#define physids_and(dst, src1, src2) bitmap_and((dst).mask, (src1).mask, (src2).mask, MAX_APICS)
51#define physids_or(dst, src1, src2) bitmap_or((dst).mask, (src1).mask, (src2).mask, MAX_APICS)
52#define physids_clear(map) bitmap_zero((map).mask, MAX_APICS)
53#define physids_complement(dst, src) bitmap_complement((dst).mask,(src).mask, MAX_APICS)
54#define physids_empty(map) bitmap_empty((map).mask, MAX_APICS)
55#define physids_equal(map1, map2) bitmap_equal((map1).mask, (map2).mask, MAX_APICS)
56#define physids_weight(map) bitmap_weight((map).mask, MAX_APICS)
57#define physids_shift_right(d, s, n) bitmap_shift_right((d).mask, (s).mask, n, MAX_APICS)
58#define physids_shift_left(d, s, n) bitmap_shift_left((d).mask, (s).mask, n, MAX_APICS)
59#define physids_coerce(map) ((map).mask[0])
60
61#define physids_promote(physids) \
62 ({ \
63 physid_mask_t __physid_mask = PHYSID_MASK_NONE; \
64 __physid_mask.mask[0] = physids; \
65 __physid_mask; \
66 })
67
68#define physid_mask_of_physid(physid) \
69 ({ \
70 physid_mask_t __physid_mask = PHYSID_MASK_NONE; \
71 physid_set(physid, __physid_mask); \
72 __physid_mask; \
73 })
74
75#define PHYSID_MASK_ALL { {[0 ... PHYSID_ARRAY_SIZE-1] = ~0UL} }
76#define PHYSID_MASK_NONE { {[0 ... PHYSID_ARRAY_SIZE-1] = 0UL} }
77
78extern physid_mask_t phys_cpu_present_map;
79
80#endif
81
diff --git a/include/asm-i386/mpspec_def.h b/include/asm-i386/mpspec_def.h
deleted file mode 100644
index 13bafb16e7af..000000000000
--- a/include/asm-i386/mpspec_def.h
+++ /dev/null
@@ -1,186 +0,0 @@
1#ifndef __ASM_MPSPEC_DEF_H
2#define __ASM_MPSPEC_DEF_H
3
4/*
5 * Structure definitions for SMP machines following the
6 * Intel Multiprocessing Specification 1.1 and 1.4.
7 */
8
9/*
10 * This tag identifies where the SMP configuration
11 * information is.
12 */
13
14#define SMP_MAGIC_IDENT (('_'<<24)|('P'<<16)|('M'<<8)|'_')
15
16#define MAX_MPC_ENTRY 1024
17#define MAX_APICS 256
18
19struct intel_mp_floating
20{
21 char mpf_signature[4]; /* "_MP_" */
22 unsigned long mpf_physptr; /* Configuration table address */
23 unsigned char mpf_length; /* Our length (paragraphs) */
24 unsigned char mpf_specification;/* Specification version */
25 unsigned char mpf_checksum; /* Checksum (makes sum 0) */
26 unsigned char mpf_feature1; /* Standard or configuration ? */
27 unsigned char mpf_feature2; /* Bit7 set for IMCR|PIC */
28 unsigned char mpf_feature3; /* Unused (0) */
29 unsigned char mpf_feature4; /* Unused (0) */
30 unsigned char mpf_feature5; /* Unused (0) */
31};
32
33struct mp_config_table
34{
35 char mpc_signature[4];
36#define MPC_SIGNATURE "PCMP"
37 unsigned short mpc_length; /* Size of table */
38 char mpc_spec; /* 0x01 */
39 char mpc_checksum;
40 char mpc_oem[8];
41 char mpc_productid[12];
42 unsigned long mpc_oemptr; /* 0 if not present */
43 unsigned short mpc_oemsize; /* 0 if not present */
44 unsigned short mpc_oemcount;
45 unsigned long mpc_lapic; /* APIC address */
46 unsigned long reserved;
47};
48
49/* Followed by entries */
50
51#define MP_PROCESSOR 0
52#define MP_BUS 1
53#define MP_IOAPIC 2
54#define MP_INTSRC 3
55#define MP_LINTSRC 4
56#define MP_TRANSLATION 192 /* Used by IBM NUMA-Q to describe node locality */
57
58struct mpc_config_processor
59{
60 unsigned char mpc_type;
61 unsigned char mpc_apicid; /* Local APIC number */
62 unsigned char mpc_apicver; /* Its versions */
63 unsigned char mpc_cpuflag;
64#define CPU_ENABLED 1 /* Processor is available */
65#define CPU_BOOTPROCESSOR 2 /* Processor is the BP */
66 unsigned long mpc_cpufeature;
67#define CPU_STEPPING_MASK 0x0F
68#define CPU_MODEL_MASK 0xF0
69#define CPU_FAMILY_MASK 0xF00
70 unsigned long mpc_featureflag; /* CPUID feature value */
71 unsigned long mpc_reserved[2];
72};
73
74struct mpc_config_bus
75{
76 unsigned char mpc_type;
77 unsigned char mpc_busid;
78 unsigned char mpc_bustype[6];
79};
80
81/* List of Bus Type string values, Intel MP Spec. */
82#define BUSTYPE_EISA "EISA"
83#define BUSTYPE_ISA "ISA"
84#define BUSTYPE_INTERN "INTERN" /* Internal BUS */
85#define BUSTYPE_MCA "MCA"
86#define BUSTYPE_VL "VL" /* Local bus */
87#define BUSTYPE_PCI "PCI"
88#define BUSTYPE_PCMCIA "PCMCIA"
89#define BUSTYPE_CBUS "CBUS"
90#define BUSTYPE_CBUSII "CBUSII"
91#define BUSTYPE_FUTURE "FUTURE"
92#define BUSTYPE_MBI "MBI"
93#define BUSTYPE_MBII "MBII"
94#define BUSTYPE_MPI "MPI"
95#define BUSTYPE_MPSA "MPSA"
96#define BUSTYPE_NUBUS "NUBUS"
97#define BUSTYPE_TC "TC"
98#define BUSTYPE_VME "VME"
99#define BUSTYPE_XPRESS "XPRESS"
100
101struct mpc_config_ioapic
102{
103 unsigned char mpc_type;
104 unsigned char mpc_apicid;
105 unsigned char mpc_apicver;
106 unsigned char mpc_flags;
107#define MPC_APIC_USABLE 0x01
108 unsigned long mpc_apicaddr;
109};
110
111struct mpc_config_intsrc
112{
113 unsigned char mpc_type;
114 unsigned char mpc_irqtype;
115 unsigned short mpc_irqflag;
116 unsigned char mpc_srcbus;
117 unsigned char mpc_srcbusirq;
118 unsigned char mpc_dstapic;
119 unsigned char mpc_dstirq;
120};
121
122enum mp_irq_source_types {
123 mp_INT = 0,
124 mp_NMI = 1,
125 mp_SMI = 2,
126 mp_ExtINT = 3
127};
128
129#define MP_IRQDIR_DEFAULT 0
130#define MP_IRQDIR_HIGH 1
131#define MP_IRQDIR_LOW 3
132
133
134struct mpc_config_lintsrc
135{
136 unsigned char mpc_type;
137 unsigned char mpc_irqtype;
138 unsigned short mpc_irqflag;
139 unsigned char mpc_srcbusid;
140 unsigned char mpc_srcbusirq;
141 unsigned char mpc_destapic;
142#define MP_APIC_ALL 0xFF
143 unsigned char mpc_destapiclint;
144};
145
146struct mp_config_oemtable
147{
148 char oem_signature[4];
149#define MPC_OEM_SIGNATURE "_OEM"
150 unsigned short oem_length; /* Size of table */
151 char oem_rev; /* 0x01 */
152 char oem_checksum;
153 char mpc_oem[8];
154};
155
156struct mpc_config_translation
157{
158 unsigned char mpc_type;
159 unsigned char trans_len;
160 unsigned char trans_type;
161 unsigned char trans_quad;
162 unsigned char trans_global;
163 unsigned char trans_local;
164 unsigned short trans_reserved;
165};
166
167/*
168 * Default configurations
169 *
170 * 1 2 CPU ISA 82489DX
171 * 2 2 CPU EISA 82489DX neither IRQ 0 timer nor IRQ 13 DMA chaining
172 * 3 2 CPU EISA 82489DX
173 * 4 2 CPU MCA 82489DX
174 * 5 2 CPU ISA+PCI
175 * 6 2 CPU EISA+PCI
176 * 7 2 CPU MCA+PCI
177 */
178
179enum mp_bustype {
180 MP_BUS_ISA = 1,
181 MP_BUS_EISA,
182 MP_BUS_PCI,
183 MP_BUS_MCA,
184};
185#endif
186
diff --git a/include/asm-i386/msgbuf.h b/include/asm-i386/msgbuf.h
deleted file mode 100644
index b8d659c157ae..000000000000
--- a/include/asm-i386/msgbuf.h
+++ /dev/null
@@ -1,31 +0,0 @@
1#ifndef _I386_MSGBUF_H
2#define _I386_MSGBUF_H
3
4/*
5 * The msqid64_ds structure for i386 architecture.
6 * Note extra padding because this structure is passed back and forth
7 * between kernel and user space.
8 *
9 * Pad space is left for:
10 * - 64-bit time_t to solve y2038 problem
11 * - 2 miscellaneous 32-bit values
12 */
13
14struct msqid64_ds {
15 struct ipc64_perm msg_perm;
16 __kernel_time_t msg_stime; /* last msgsnd time */
17 unsigned long __unused1;
18 __kernel_time_t msg_rtime; /* last msgrcv time */
19 unsigned long __unused2;
20 __kernel_time_t msg_ctime; /* last change time */
21 unsigned long __unused3;
22 unsigned long msg_cbytes; /* current number of bytes on queue */
23 unsigned long msg_qnum; /* number of messages in queue */
24 unsigned long msg_qbytes; /* max number of bytes on queue */
25 __kernel_pid_t msg_lspid; /* pid of last msgsnd */
26 __kernel_pid_t msg_lrpid; /* last receive pid */
27 unsigned long __unused4;
28 unsigned long __unused5;
29};
30
31#endif /* _I386_MSGBUF_H */
diff --git a/include/asm-i386/msidef.h b/include/asm-i386/msidef.h
deleted file mode 100644
index 5b8acddb70fb..000000000000
--- a/include/asm-i386/msidef.h
+++ /dev/null
@@ -1,47 +0,0 @@
1#ifndef ASM_MSIDEF_H
2#define ASM_MSIDEF_H
3
4/*
5 * Constants for Intel APIC based MSI messages.
6 */
7
8/*
9 * Shifts for MSI data
10 */
11
12#define MSI_DATA_VECTOR_SHIFT 0
13#define MSI_DATA_VECTOR_MASK 0x000000ff
14#define MSI_DATA_VECTOR(v) (((v) << MSI_DATA_VECTOR_SHIFT) & MSI_DATA_VECTOR_MASK)
15
16#define MSI_DATA_DELIVERY_MODE_SHIFT 8
17#define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_MODE_SHIFT)
18#define MSI_DATA_DELIVERY_LOWPRI (1 << MSI_DATA_DELIVERY_MODE_SHIFT)
19
20#define MSI_DATA_LEVEL_SHIFT 14
21#define MSI_DATA_LEVEL_DEASSERT (0 << MSI_DATA_LEVEL_SHIFT)
22#define MSI_DATA_LEVEL_ASSERT (1 << MSI_DATA_LEVEL_SHIFT)
23
24#define MSI_DATA_TRIGGER_SHIFT 15
25#define MSI_DATA_TRIGGER_EDGE (0 << MSI_DATA_TRIGGER_SHIFT)
26#define MSI_DATA_TRIGGER_LEVEL (1 << MSI_DATA_TRIGGER_SHIFT)
27
28/*
29 * Shift/mask fields for msi address
30 */
31
32#define MSI_ADDR_BASE_HI 0
33#define MSI_ADDR_BASE_LO 0xfee00000
34
35#define MSI_ADDR_DEST_MODE_SHIFT 2
36#define MSI_ADDR_DEST_MODE_PHYSICAL (0 << MSI_ADDR_DEST_MODE_SHIFT)
37#define MSI_ADDR_DEST_MODE_LOGICAL (1 << MSI_ADDR_DEST_MODE_SHIFT)
38
39#define MSI_ADDR_REDIRECTION_SHIFT 3
40#define MSI_ADDR_REDIRECTION_CPU (0 << MSI_ADDR_REDIRECTION_SHIFT) /* dedicated cpu */
41#define MSI_ADDR_REDIRECTION_LOWPRI (1 << MSI_ADDR_REDIRECTION_SHIFT) /* lowest priority */
42
43#define MSI_ADDR_DEST_ID_SHIFT 12
44#define MSI_ADDR_DEST_ID_MASK 0x00ffff0
45#define MSI_ADDR_DEST_ID(dest) (((dest) << MSI_ADDR_DEST_ID_SHIFT) & MSI_ADDR_DEST_ID_MASK)
46
47#endif /* ASM_MSIDEF_H */
diff --git a/include/asm-i386/msr-index.h b/include/asm-i386/msr-index.h
deleted file mode 100644
index a02eb2991349..000000000000
--- a/include/asm-i386/msr-index.h
+++ /dev/null
@@ -1,278 +0,0 @@
1#ifndef __ASM_MSR_INDEX_H
2#define __ASM_MSR_INDEX_H
3
4/* CPU model specific register (MSR) numbers */
5
6/* x86-64 specific MSRs */
7#define MSR_EFER 0xc0000080 /* extended feature register */
8#define MSR_STAR 0xc0000081 /* legacy mode SYSCALL target */
9#define MSR_LSTAR 0xc0000082 /* long mode SYSCALL target */
10#define MSR_CSTAR 0xc0000083 /* compat mode SYSCALL target */
11#define MSR_SYSCALL_MASK 0xc0000084 /* EFLAGS mask for syscall */
12#define MSR_FS_BASE 0xc0000100 /* 64bit FS base */
13#define MSR_GS_BASE 0xc0000101 /* 64bit GS base */
14#define MSR_KERNEL_GS_BASE 0xc0000102 /* SwapGS GS shadow */
15
16/* EFER bits: */
17#define _EFER_SCE 0 /* SYSCALL/SYSRET */
18#define _EFER_LME 8 /* Long mode enable */
19#define _EFER_LMA 10 /* Long mode active (read-only) */
20#define _EFER_NX 11 /* No execute enable */
21
22#define EFER_SCE (1<<_EFER_SCE)
23#define EFER_LME (1<<_EFER_LME)
24#define EFER_LMA (1<<_EFER_LMA)
25#define EFER_NX (1<<_EFER_NX)
26
27/* Intel MSRs. Some also available on other CPUs */
28#define MSR_IA32_PERFCTR0 0x000000c1
29#define MSR_IA32_PERFCTR1 0x000000c2
30#define MSR_FSB_FREQ 0x000000cd
31
32#define MSR_MTRRcap 0x000000fe
33#define MSR_IA32_BBL_CR_CTL 0x00000119
34
35#define MSR_IA32_SYSENTER_CS 0x00000174
36#define MSR_IA32_SYSENTER_ESP 0x00000175
37#define MSR_IA32_SYSENTER_EIP 0x00000176
38
39#define MSR_IA32_MCG_CAP 0x00000179
40#define MSR_IA32_MCG_STATUS 0x0000017a
41#define MSR_IA32_MCG_CTL 0x0000017b
42
43#define MSR_IA32_PEBS_ENABLE 0x000003f1
44#define MSR_IA32_DS_AREA 0x00000600
45#define MSR_IA32_PERF_CAPABILITIES 0x00000345
46
47#define MSR_MTRRfix64K_00000 0x00000250
48#define MSR_MTRRfix16K_80000 0x00000258
49#define MSR_MTRRfix16K_A0000 0x00000259
50#define MSR_MTRRfix4K_C0000 0x00000268
51#define MSR_MTRRfix4K_C8000 0x00000269
52#define MSR_MTRRfix4K_D0000 0x0000026a
53#define MSR_MTRRfix4K_D8000 0x0000026b
54#define MSR_MTRRfix4K_E0000 0x0000026c
55#define MSR_MTRRfix4K_E8000 0x0000026d
56#define MSR_MTRRfix4K_F0000 0x0000026e
57#define MSR_MTRRfix4K_F8000 0x0000026f
58#define MSR_MTRRdefType 0x000002ff
59
60#define MSR_IA32_DEBUGCTLMSR 0x000001d9
61#define MSR_IA32_LASTBRANCHFROMIP 0x000001db
62#define MSR_IA32_LASTBRANCHTOIP 0x000001dc
63#define MSR_IA32_LASTINTFROMIP 0x000001dd
64#define MSR_IA32_LASTINTTOIP 0x000001de
65
66#define MSR_IA32_MC0_CTL 0x00000400
67#define MSR_IA32_MC0_STATUS 0x00000401
68#define MSR_IA32_MC0_ADDR 0x00000402
69#define MSR_IA32_MC0_MISC 0x00000403
70
71#define MSR_P6_PERFCTR0 0x000000c1
72#define MSR_P6_PERFCTR1 0x000000c2
73#define MSR_P6_EVNTSEL0 0x00000186
74#define MSR_P6_EVNTSEL1 0x00000187
75
76/* K7/K8 MSRs. Not complete. See the architecture manual for a more
77 complete list. */
78#define MSR_K7_EVNTSEL0 0xc0010000
79#define MSR_K7_PERFCTR0 0xc0010004
80#define MSR_K7_EVNTSEL1 0xc0010001
81#define MSR_K7_PERFCTR1 0xc0010005
82#define MSR_K7_EVNTSEL2 0xc0010002
83#define MSR_K7_PERFCTR2 0xc0010006
84#define MSR_K7_EVNTSEL3 0xc0010003
85#define MSR_K7_PERFCTR3 0xc0010007
86#define MSR_K8_TOP_MEM1 0xc001001a
87#define MSR_K7_CLK_CTL 0xc001001b
88#define MSR_K8_TOP_MEM2 0xc001001d
89#define MSR_K8_SYSCFG 0xc0010010
90
91#define K8_MTRRFIXRANGE_DRAM_ENABLE 0x00040000 /* MtrrFixDramEn bit */
92#define K8_MTRRFIXRANGE_DRAM_MODIFY 0x00080000 /* MtrrFixDramModEn bit */
93#define K8_MTRR_RDMEM_WRMEM_MASK 0x18181818 /* Mask: RdMem|WrMem */
94
95#define MSR_K7_HWCR 0xc0010015
96#define MSR_K8_HWCR 0xc0010015
97#define MSR_K7_FID_VID_CTL 0xc0010041
98#define MSR_K7_FID_VID_STATUS 0xc0010042
99#define MSR_K8_ENABLE_C1E 0xc0010055
100
101/* K6 MSRs */
102#define MSR_K6_EFER 0xc0000080
103#define MSR_K6_STAR 0xc0000081
104#define MSR_K6_WHCR 0xc0000082
105#define MSR_K6_UWCCR 0xc0000085
106#define MSR_K6_EPMR 0xc0000086
107#define MSR_K6_PSOR 0xc0000087
108#define MSR_K6_PFIR 0xc0000088
109
110/* Centaur-Hauls/IDT defined MSRs. */
111#define MSR_IDT_FCR1 0x00000107
112#define MSR_IDT_FCR2 0x00000108
113#define MSR_IDT_FCR3 0x00000109
114#define MSR_IDT_FCR4 0x0000010a
115
116#define MSR_IDT_MCR0 0x00000110
117#define MSR_IDT_MCR1 0x00000111
118#define MSR_IDT_MCR2 0x00000112
119#define MSR_IDT_MCR3 0x00000113
120#define MSR_IDT_MCR4 0x00000114
121#define MSR_IDT_MCR5 0x00000115
122#define MSR_IDT_MCR6 0x00000116
123#define MSR_IDT_MCR7 0x00000117
124#define MSR_IDT_MCR_CTRL 0x00000120
125
126/* VIA Cyrix defined MSRs*/
127#define MSR_VIA_FCR 0x00001107
128#define MSR_VIA_LONGHAUL 0x0000110a
129#define MSR_VIA_RNG 0x0000110b
130#define MSR_VIA_BCR2 0x00001147
131
132/* Transmeta defined MSRs */
133#define MSR_TMTA_LONGRUN_CTRL 0x80868010
134#define MSR_TMTA_LONGRUN_FLAGS 0x80868011
135#define MSR_TMTA_LRTI_READOUT 0x80868018
136#define MSR_TMTA_LRTI_VOLT_MHZ 0x8086801a
137
138/* Intel defined MSRs. */
139#define MSR_IA32_P5_MC_ADDR 0x00000000
140#define MSR_IA32_P5_MC_TYPE 0x00000001
141#define MSR_IA32_TSC 0x00000010
142#define MSR_IA32_PLATFORM_ID 0x00000017
143#define MSR_IA32_EBL_CR_POWERON 0x0000002a
144
145#define MSR_IA32_APICBASE 0x0000001b
146#define MSR_IA32_APICBASE_BSP (1<<8)
147#define MSR_IA32_APICBASE_ENABLE (1<<11)
148#define MSR_IA32_APICBASE_BASE (0xfffff<<12)
149
150#define MSR_IA32_UCODE_WRITE 0x00000079
151#define MSR_IA32_UCODE_REV 0x0000008b
152
153#define MSR_IA32_PERF_STATUS 0x00000198
154#define MSR_IA32_PERF_CTL 0x00000199
155
156#define MSR_IA32_MPERF 0x000000e7
157#define MSR_IA32_APERF 0x000000e8
158
159#define MSR_IA32_THERM_CONTROL 0x0000019a
160#define MSR_IA32_THERM_INTERRUPT 0x0000019b
161#define MSR_IA32_THERM_STATUS 0x0000019c
162#define MSR_IA32_MISC_ENABLE 0x000001a0
163
164/* Intel Model 6 */
165#define MSR_P6_EVNTSEL0 0x00000186
166#define MSR_P6_EVNTSEL1 0x00000187
167
168/* P4/Xeon+ specific */
169#define MSR_IA32_MCG_EAX 0x00000180
170#define MSR_IA32_MCG_EBX 0x00000181
171#define MSR_IA32_MCG_ECX 0x00000182
172#define MSR_IA32_MCG_EDX 0x00000183
173#define MSR_IA32_MCG_ESI 0x00000184
174#define MSR_IA32_MCG_EDI 0x00000185
175#define MSR_IA32_MCG_EBP 0x00000186
176#define MSR_IA32_MCG_ESP 0x00000187
177#define MSR_IA32_MCG_EFLAGS 0x00000188
178#define MSR_IA32_MCG_EIP 0x00000189
179#define MSR_IA32_MCG_RESERVED 0x0000018a
180
181/* Pentium IV performance counter MSRs */
182#define MSR_P4_BPU_PERFCTR0 0x00000300
183#define MSR_P4_BPU_PERFCTR1 0x00000301
184#define MSR_P4_BPU_PERFCTR2 0x00000302
185#define MSR_P4_BPU_PERFCTR3 0x00000303
186#define MSR_P4_MS_PERFCTR0 0x00000304
187#define MSR_P4_MS_PERFCTR1 0x00000305
188#define MSR_P4_MS_PERFCTR2 0x00000306
189#define MSR_P4_MS_PERFCTR3 0x00000307
190#define MSR_P4_FLAME_PERFCTR0 0x00000308
191#define MSR_P4_FLAME_PERFCTR1 0x00000309
192#define MSR_P4_FLAME_PERFCTR2 0x0000030a
193#define MSR_P4_FLAME_PERFCTR3 0x0000030b
194#define MSR_P4_IQ_PERFCTR0 0x0000030c
195#define MSR_P4_IQ_PERFCTR1 0x0000030d
196#define MSR_P4_IQ_PERFCTR2 0x0000030e
197#define MSR_P4_IQ_PERFCTR3 0x0000030f
198#define MSR_P4_IQ_PERFCTR4 0x00000310
199#define MSR_P4_IQ_PERFCTR5 0x00000311
200#define MSR_P4_BPU_CCCR0 0x00000360
201#define MSR_P4_BPU_CCCR1 0x00000361
202#define MSR_P4_BPU_CCCR2 0x00000362
203#define MSR_P4_BPU_CCCR3 0x00000363
204#define MSR_P4_MS_CCCR0 0x00000364
205#define MSR_P4_MS_CCCR1 0x00000365
206#define MSR_P4_MS_CCCR2 0x00000366
207#define MSR_P4_MS_CCCR3 0x00000367
208#define MSR_P4_FLAME_CCCR0 0x00000368
209#define MSR_P4_FLAME_CCCR1 0x00000369
210#define MSR_P4_FLAME_CCCR2 0x0000036a
211#define MSR_P4_FLAME_CCCR3 0x0000036b
212#define MSR_P4_IQ_CCCR0 0x0000036c
213#define MSR_P4_IQ_CCCR1 0x0000036d
214#define MSR_P4_IQ_CCCR2 0x0000036e
215#define MSR_P4_IQ_CCCR3 0x0000036f
216#define MSR_P4_IQ_CCCR4 0x00000370
217#define MSR_P4_IQ_CCCR5 0x00000371
218#define MSR_P4_ALF_ESCR0 0x000003ca
219#define MSR_P4_ALF_ESCR1 0x000003cb
220#define MSR_P4_BPU_ESCR0 0x000003b2
221#define MSR_P4_BPU_ESCR1 0x000003b3
222#define MSR_P4_BSU_ESCR0 0x000003a0
223#define MSR_P4_BSU_ESCR1 0x000003a1
224#define MSR_P4_CRU_ESCR0 0x000003b8
225#define MSR_P4_CRU_ESCR1 0x000003b9
226#define MSR_P4_CRU_ESCR2 0x000003cc
227#define MSR_P4_CRU_ESCR3 0x000003cd
228#define MSR_P4_CRU_ESCR4 0x000003e0
229#define MSR_P4_CRU_ESCR5 0x000003e1
230#define MSR_P4_DAC_ESCR0 0x000003a8
231#define MSR_P4_DAC_ESCR1 0x000003a9
232#define MSR_P4_FIRM_ESCR0 0x000003a4
233#define MSR_P4_FIRM_ESCR1 0x000003a5
234#define MSR_P4_FLAME_ESCR0 0x000003a6
235#define MSR_P4_FLAME_ESCR1 0x000003a7
236#define MSR_P4_FSB_ESCR0 0x000003a2
237#define MSR_P4_FSB_ESCR1 0x000003a3
238#define MSR_P4_IQ_ESCR0 0x000003ba
239#define MSR_P4_IQ_ESCR1 0x000003bb
240#define MSR_P4_IS_ESCR0 0x000003b4
241#define MSR_P4_IS_ESCR1 0x000003b5
242#define MSR_P4_ITLB_ESCR0 0x000003b6
243#define MSR_P4_ITLB_ESCR1 0x000003b7
244#define MSR_P4_IX_ESCR0 0x000003c8
245#define MSR_P4_IX_ESCR1 0x000003c9
246#define MSR_P4_MOB_ESCR0 0x000003aa
247#define MSR_P4_MOB_ESCR1 0x000003ab
248#define MSR_P4_MS_ESCR0 0x000003c0
249#define MSR_P4_MS_ESCR1 0x000003c1
250#define MSR_P4_PMH_ESCR0 0x000003ac
251#define MSR_P4_PMH_ESCR1 0x000003ad
252#define MSR_P4_RAT_ESCR0 0x000003bc
253#define MSR_P4_RAT_ESCR1 0x000003bd
254#define MSR_P4_SAAT_ESCR0 0x000003ae
255#define MSR_P4_SAAT_ESCR1 0x000003af
256#define MSR_P4_SSU_ESCR0 0x000003be
257#define MSR_P4_SSU_ESCR1 0x000003bf /* guess: not in manual */
258
259#define MSR_P4_TBPU_ESCR0 0x000003c2
260#define MSR_P4_TBPU_ESCR1 0x000003c3
261#define MSR_P4_TC_ESCR0 0x000003c4
262#define MSR_P4_TC_ESCR1 0x000003c5
263#define MSR_P4_U2L_ESCR0 0x000003b0
264#define MSR_P4_U2L_ESCR1 0x000003b1
265
266/* Intel Core-based CPU performance counters */
267#define MSR_CORE_PERF_FIXED_CTR0 0x00000309
268#define MSR_CORE_PERF_FIXED_CTR1 0x0000030a
269#define MSR_CORE_PERF_FIXED_CTR2 0x0000030b
270#define MSR_CORE_PERF_FIXED_CTR_CTRL 0x0000038d
271#define MSR_CORE_PERF_GLOBAL_STATUS 0x0000038e
272#define MSR_CORE_PERF_GLOBAL_CTRL 0x0000038f
273#define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x00000390
274
275/* Geode defined MSRs */
276#define MSR_GEODE_BUSCONT_CONF0 0x00001900
277
278#endif /* __ASM_MSR_INDEX_H */
diff --git a/include/asm-i386/msr.h b/include/asm-i386/msr.h
deleted file mode 100644
index df21ea049369..000000000000
--- a/include/asm-i386/msr.h
+++ /dev/null
@@ -1,161 +0,0 @@
1#ifndef __ASM_MSR_H
2#define __ASM_MSR_H
3
4#include <asm/msr-index.h>
5
6#ifdef __KERNEL__
7#ifndef __ASSEMBLY__
8
9#include <asm/errno.h>
10
11static inline unsigned long long native_read_msr(unsigned int msr)
12{
13 unsigned long long val;
14
15 asm volatile("rdmsr" : "=A" (val) : "c" (msr));
16 return val;
17}
18
19static inline unsigned long long native_read_msr_safe(unsigned int msr,
20 int *err)
21{
22 unsigned long long val;
23
24 asm volatile("2: rdmsr ; xorl %0,%0\n"
25 "1:\n\t"
26 ".section .fixup,\"ax\"\n\t"
27 "3: movl %3,%0 ; jmp 1b\n\t"
28 ".previous\n\t"
29 ".section __ex_table,\"a\"\n"
30 " .align 4\n\t"
31 " .long 2b,3b\n\t"
32 ".previous"
33 : "=r" (*err), "=A" (val)
34 : "c" (msr), "i" (-EFAULT));
35
36 return val;
37}
38
39static inline void native_write_msr(unsigned int msr, unsigned long long val)
40{
41 asm volatile("wrmsr" : : "c" (msr), "A"(val));
42}
43
44static inline int native_write_msr_safe(unsigned int msr,
45 unsigned long long val)
46{
47 int err;
48 asm volatile("2: wrmsr ; xorl %0,%0\n"
49 "1:\n\t"
50 ".section .fixup,\"ax\"\n\t"
51 "3: movl %4,%0 ; jmp 1b\n\t"
52 ".previous\n\t"
53 ".section __ex_table,\"a\"\n"
54 " .align 4\n\t"
55 " .long 2b,3b\n\t"
56 ".previous"
57 : "=a" (err)
58 : "c" (msr), "0" ((u32)val), "d" ((u32)(val>>32)),
59 "i" (-EFAULT));
60 return err;
61}
62
63static inline unsigned long long native_read_tsc(void)
64{
65 unsigned long long val;
66 asm volatile("rdtsc" : "=A" (val));
67 return val;
68}
69
70static inline unsigned long long native_read_pmc(void)
71{
72 unsigned long long val;
73 asm volatile("rdpmc" : "=A" (val));
74 return val;
75}
76
77#ifdef CONFIG_PARAVIRT
78#include <asm/paravirt.h>
79#else
80#include <linux/errno.h>
81/*
82 * Access to machine-specific registers (available on 586 and better only)
83 * Note: the rd* operations modify the parameters directly (without using
84 * pointer indirection), this allows gcc to optimize better
85 */
86
87#define rdmsr(msr,val1,val2) \
88 do { \
89 u64 __val = native_read_msr(msr); \
90 (val1) = (u32)__val; \
91 (val2) = (u32)(__val >> 32); \
92 } while(0)
93
94static inline void wrmsr(u32 __msr, u32 __low, u32 __high)
95{
96 native_write_msr(__msr, ((u64)__high << 32) | __low);
97}
98
99#define rdmsrl(msr,val) \
100 ((val) = native_read_msr(msr))
101
102#define wrmsrl(msr,val) native_write_msr(msr, val)
103
104/* wrmsr with exception handling */
105static inline int wrmsr_safe(u32 __msr, u32 __low, u32 __high)
106{
107 return native_write_msr_safe(__msr, ((u64)__high << 32) | __low);
108}
109
110/* rdmsr with exception handling */
111#define rdmsr_safe(msr,p1,p2) \
112 ({ \
113 int __err; \
114 u64 __val = native_read_msr_safe(msr, &__err); \
115 (*p1) = (u32)__val; \
116 (*p2) = (u32)(__val >> 32); \
117 __err; \
118 })
119
120#define rdtscl(low) \
121 ((low) = (u32)native_read_tsc())
122
123#define rdtscll(val) \
124 ((val) = native_read_tsc())
125
126#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
127
128#define rdpmc(counter,low,high) \
129 do { \
130 u64 _l = native_read_pmc(); \
131 (low) = (u32)_l; \
132 (high) = (u32)(_l >> 32); \
133 } while(0)
134#endif /* !CONFIG_PARAVIRT */
135
136#ifdef CONFIG_SMP
137void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
138void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
139int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
140int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
141#else /* CONFIG_SMP */
142static inline void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
143{
144 rdmsr(msr_no, *l, *h);
145}
146static inline void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
147{
148 wrmsr(msr_no, l, h);
149}
150static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
151{
152 return rdmsr_safe(msr_no, l, h);
153}
154static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
155{
156 return wrmsr_safe(msr_no, l, h);
157}
158#endif /* CONFIG_SMP */
159#endif
160#endif
161#endif /* __ASM_MSR_H */
diff --git a/include/asm-i386/mtrr.h b/include/asm-i386/mtrr.h
deleted file mode 100644
index 7e9c7ccbdcfe..000000000000
--- a/include/asm-i386/mtrr.h
+++ /dev/null
@@ -1,115 +0,0 @@
1/* Generic MTRR (Memory Type Range Register) ioctls.
2
3 Copyright (C) 1997-1999 Richard Gooch
4
5 This library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Library General Public
7 License as published by the Free Software Foundation; either
8 version 2 of the License, or (at your option) any later version.
9
10 This library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Library General Public License for more details.
14
15 You should have received a copy of the GNU Library General Public
16 License along with this library; if not, write to the Free
17 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18
19 Richard Gooch may be reached by email at rgooch@atnf.csiro.au
20 The postal address is:
21 Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia.
22*/
23#ifndef _LINUX_MTRR_H
24#define _LINUX_MTRR_H
25
26#include <linux/ioctl.h>
27#include <linux/errno.h>
28
29#define MTRR_IOCTL_BASE 'M'
30
31struct mtrr_sentry
32{
33 unsigned long base; /* Base address */
34 unsigned int size; /* Size of region */
35 unsigned int type; /* Type of region */
36};
37
38struct mtrr_gentry
39{
40 unsigned int regnum; /* Register number */
41 unsigned long base; /* Base address */
42 unsigned int size; /* Size of region */
43 unsigned int type; /* Type of region */
44};
45
46/* These are the various ioctls */
47#define MTRRIOC_ADD_ENTRY _IOW(MTRR_IOCTL_BASE, 0, struct mtrr_sentry)
48#define MTRRIOC_SET_ENTRY _IOW(MTRR_IOCTL_BASE, 1, struct mtrr_sentry)
49#define MTRRIOC_DEL_ENTRY _IOW(MTRR_IOCTL_BASE, 2, struct mtrr_sentry)
50#define MTRRIOC_GET_ENTRY _IOWR(MTRR_IOCTL_BASE, 3, struct mtrr_gentry)
51#define MTRRIOC_KILL_ENTRY _IOW(MTRR_IOCTL_BASE, 4, struct mtrr_sentry)
52#define MTRRIOC_ADD_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 5, struct mtrr_sentry)
53#define MTRRIOC_SET_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 6, struct mtrr_sentry)
54#define MTRRIOC_DEL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 7, struct mtrr_sentry)
55#define MTRRIOC_GET_PAGE_ENTRY _IOWR(MTRR_IOCTL_BASE, 8, struct mtrr_gentry)
56#define MTRRIOC_KILL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 9, struct mtrr_sentry)
57
58/* These are the region types */
59#define MTRR_TYPE_UNCACHABLE 0
60#define MTRR_TYPE_WRCOMB 1
61/*#define MTRR_TYPE_ 2*/
62/*#define MTRR_TYPE_ 3*/
63#define MTRR_TYPE_WRTHROUGH 4
64#define MTRR_TYPE_WRPROT 5
65#define MTRR_TYPE_WRBACK 6
66#define MTRR_NUM_TYPES 7
67
68#ifdef __KERNEL__
69
70/* The following functions are for use by other drivers */
71# ifdef CONFIG_MTRR
72extern void mtrr_save_fixed_ranges(void *);
73extern void mtrr_save_state(void);
74extern int mtrr_add (unsigned long base, unsigned long size,
75 unsigned int type, char increment);
76extern int mtrr_add_page (unsigned long base, unsigned long size,
77 unsigned int type, char increment);
78extern int mtrr_del (int reg, unsigned long base, unsigned long size);
79extern int mtrr_del_page (int reg, unsigned long base, unsigned long size);
80extern void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi);
81extern void mtrr_ap_init(void);
82extern void mtrr_bp_init(void);
83# else
84#define mtrr_save_fixed_ranges(arg) do {} while (0)
85#define mtrr_save_state() do {} while (0)
86static __inline__ int mtrr_add (unsigned long base, unsigned long size,
87 unsigned int type, char increment)
88{
89 return -ENODEV;
90}
91static __inline__ int mtrr_add_page (unsigned long base, unsigned long size,
92 unsigned int type, char increment)
93{
94 return -ENODEV;
95}
96static __inline__ int mtrr_del (int reg, unsigned long base,
97 unsigned long size)
98{
99 return -ENODEV;
100}
101static __inline__ int mtrr_del_page (int reg, unsigned long base,
102 unsigned long size)
103{
104 return -ENODEV;
105}
106
107static __inline__ void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi) {;}
108
109#define mtrr_ap_init() do {} while (0)
110#define mtrr_bp_init() do {} while (0)
111# endif
112
113#endif
114
115#endif /* _LINUX_MTRR_H */
diff --git a/include/asm-i386/mutex.h b/include/asm-i386/mutex.h
deleted file mode 100644
index 7a17d9e58ad6..000000000000
--- a/include/asm-i386/mutex.h
+++ /dev/null
@@ -1,130 +0,0 @@
1/*
2 * Assembly implementation of the mutex fastpath, based on atomic
3 * decrement/increment.
4 *
5 * started by Ingo Molnar:
6 *
7 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
8 */
9#ifndef _ASM_MUTEX_H
10#define _ASM_MUTEX_H
11
12#include "asm/alternative.h"
13
14/**
15 * __mutex_fastpath_lock - try to take the lock by moving the count
16 * from 1 to a 0 value
17 * @count: pointer of type atomic_t
18 * @fn: function to call if the original value was not 1
19 *
20 * Change the count from 1 to a value lower than 1, and call <fn> if it
21 * wasn't 1 originally. This function MUST leave the value lower than 1
22 * even when the "1" assertion wasn't true.
23 */
24#define __mutex_fastpath_lock(count, fail_fn) \
25do { \
26 unsigned int dummy; \
27 \
28 typecheck(atomic_t *, count); \
29 typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \
30 \
31 __asm__ __volatile__( \
32 LOCK_PREFIX " decl (%%eax) \n" \
33 " jns 1f \n" \
34 " call "#fail_fn" \n" \
35 "1: \n" \
36 \
37 :"=a" (dummy) \
38 : "a" (count) \
39 : "memory", "ecx", "edx"); \
40} while (0)
41
42
43/**
44 * __mutex_fastpath_lock_retval - try to take the lock by moving the count
45 * from 1 to a 0 value
46 * @count: pointer of type atomic_t
47 * @fail_fn: function to call if the original value was not 1
48 *
49 * Change the count from 1 to a value lower than 1, and call <fail_fn> if it
50 * wasn't 1 originally. This function returns 0 if the fastpath succeeds,
51 * or anything the slow path function returns
52 */
53static inline int
54__mutex_fastpath_lock_retval(atomic_t *count,
55 int fastcall (*fail_fn)(atomic_t *))
56{
57 if (unlikely(atomic_dec_return(count) < 0))
58 return fail_fn(count);
59 else
60 return 0;
61}
62
63/**
64 * __mutex_fastpath_unlock - try to promote the mutex from 0 to 1
65 * @count: pointer of type atomic_t
66 * @fail_fn: function to call if the original value was not 0
67 *
68 * try to promote the mutex from 0 to 1. if it wasn't 0, call <fail_fn>.
69 * In the failure case, this function is allowed to either set the value
70 * to 1, or to set it to a value lower than 1.
71 *
72 * If the implementation sets it to a value of lower than 1, the
73 * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
74 * to return 0 otherwise.
75 */
76#define __mutex_fastpath_unlock(count, fail_fn) \
77do { \
78 unsigned int dummy; \
79 \
80 typecheck(atomic_t *, count); \
81 typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \
82 \
83 __asm__ __volatile__( \
84 LOCK_PREFIX " incl (%%eax) \n" \
85 " jg 1f \n" \
86 " call "#fail_fn" \n" \
87 "1: \n" \
88 \
89 :"=a" (dummy) \
90 : "a" (count) \
91 : "memory", "ecx", "edx"); \
92} while (0)
93
94#define __mutex_slowpath_needs_to_unlock() 1
95
96/**
97 * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
98 *
99 * @count: pointer of type atomic_t
100 * @fail_fn: fallback function
101 *
102 * Change the count from 1 to a value lower than 1, and return 0 (failure)
103 * if it wasn't 1 originally, or return 1 (success) otherwise. This function
104 * MUST leave the value lower than 1 even when the "1" assertion wasn't true.
105 * Additionally, if the value was < 0 originally, this function must not leave
106 * it to 0 on failure.
107 */
108static inline int
109__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
110{
111 /*
112 * We have two variants here. The cmpxchg based one is the best one
113 * because it never induce a false contention state. It is included
114 * here because architectures using the inc/dec algorithms over the
115 * xchg ones are much more likely to support cmpxchg natively.
116 *
117 * If not we fall back to the spinlock based variant - that is
118 * just as efficient (and simpler) as a 'destructive' probing of
119 * the mutex state would be.
120 */
121#ifdef __HAVE_ARCH_CMPXCHG
122 if (likely(atomic_cmpxchg(count, 1, 0) == 1))
123 return 1;
124 return 0;
125#else
126 return fail_fn(count);
127#endif
128}
129
130#endif
diff --git a/include/asm-i386/namei.h b/include/asm-i386/namei.h
deleted file mode 100644
index 814865088617..000000000000
--- a/include/asm-i386/namei.h
+++ /dev/null
@@ -1,17 +0,0 @@
1/* $Id: namei.h,v 1.1 1996/12/13 14:48:21 jj Exp $
2 * linux/include/asm-i386/namei.h
3 *
4 * Included from linux/fs/namei.c
5 */
6
7#ifndef __I386_NAMEI_H
8#define __I386_NAMEI_H
9
10/* This dummy routine maybe changed to something useful
11 * for /usr/gnemul/ emulation stuff.
12 * Look at asm-sparc/namei.h for details.
13 */
14
15#define __emul_prefix() NULL
16
17#endif /* __I386_NAMEI_H */
diff --git a/include/asm-i386/nmi.h b/include/asm-i386/nmi.h
deleted file mode 100644
index 70a958a8e381..000000000000
--- a/include/asm-i386/nmi.h
+++ /dev/null
@@ -1,64 +0,0 @@
1/*
2 * linux/include/asm-i386/nmi.h
3 */
4#ifndef ASM_NMI_H
5#define ASM_NMI_H
6
7#include <linux/pm.h>
8#include <asm/irq.h>
9
10#ifdef ARCH_HAS_NMI_WATCHDOG
11
12/**
13 * do_nmi_callback
14 *
15 * Check to see if a callback exists and execute it. Return 1
16 * if the handler exists and was handled successfully.
17 */
18int do_nmi_callback(struct pt_regs *regs, int cpu);
19
20extern int nmi_watchdog_enabled;
21extern int avail_to_resrv_perfctr_nmi_bit(unsigned int);
22extern int avail_to_resrv_perfctr_nmi(unsigned int);
23extern int reserve_perfctr_nmi(unsigned int);
24extern void release_perfctr_nmi(unsigned int);
25extern int reserve_evntsel_nmi(unsigned int);
26extern void release_evntsel_nmi(unsigned int);
27
28extern void setup_apic_nmi_watchdog (void *);
29extern void stop_apic_nmi_watchdog (void *);
30extern void disable_timer_nmi_watchdog(void);
31extern void enable_timer_nmi_watchdog(void);
32extern int nmi_watchdog_tick (struct pt_regs * regs, unsigned reason);
33
34extern atomic_t nmi_active;
35extern unsigned int nmi_watchdog;
36#define NMI_DISABLED -1
37#define NMI_NONE 0
38#define NMI_IO_APIC 1
39#define NMI_LOCAL_APIC 2
40#define NMI_INVALID 3
41#define NMI_DEFAULT NMI_DISABLED
42
43struct ctl_table;
44struct file;
45extern int proc_nmi_enabled(struct ctl_table *, int , struct file *,
46 void __user *, size_t *, loff_t *);
47extern int unknown_nmi_panic;
48
49void __trigger_all_cpu_backtrace(void);
50#define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace()
51
52#endif
53
54void lapic_watchdog_stop(void);
55int lapic_watchdog_init(unsigned nmi_hz);
56int lapic_wd_event(unsigned nmi_hz);
57unsigned lapic_adjust_nmi_hz(unsigned hz);
58int lapic_watchdog_ok(void);
59void disable_lapic_nmi_watchdog(void);
60void enable_lapic_nmi_watchdog(void);
61void stop_nmi(void);
62void restart_nmi(void);
63
64#endif /* ASM_NMI_H */
diff --git a/include/asm-i386/numa.h b/include/asm-i386/numa.h
deleted file mode 100644
index 96fcb157db1d..000000000000
--- a/include/asm-i386/numa.h
+++ /dev/null
@@ -1,3 +0,0 @@
1
2int pxm_to_nid(int pxm);
3
diff --git a/include/asm-i386/numaq.h b/include/asm-i386/numaq.h
deleted file mode 100644
index 38f710dc37f2..000000000000
--- a/include/asm-i386/numaq.h
+++ /dev/null
@@ -1,164 +0,0 @@
1/*
2 * Written by: Patricia Gaughen, IBM Corporation
3 *
4 * Copyright (C) 2002, IBM Corp.
5 *
6 * All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
16 * NON INFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 *
23 * Send feedback to <gone@us.ibm.com>
24 */
25
26#ifndef NUMAQ_H
27#define NUMAQ_H
28
29#ifdef CONFIG_X86_NUMAQ
30
31extern int get_memcfg_numaq(void);
32
33/*
34 * SYS_CFG_DATA_PRIV_ADDR, struct eachquadmem, and struct sys_cfg_data are the
35 */
36#define SYS_CFG_DATA_PRIV_ADDR 0x0009d000 /* place for scd in private quad space */
37
38/*
39 * Communication area for each processor on lynxer-processor tests.
40 *
41 * NOTE: If you change the size of this eachproc structure you need
42 * to change the definition for EACH_QUAD_SIZE.
43 */
44struct eachquadmem {
45 unsigned int priv_mem_start; /* Starting address of this */
46 /* quad's private memory. */
47 /* This is always 0. */
48 /* In MB. */
49 unsigned int priv_mem_size; /* Size of this quad's */
50 /* private memory. */
51 /* In MB. */
52 unsigned int low_shrd_mem_strp_start;/* Starting address of this */
53 /* quad's low shared block */
54 /* (untranslated). */
55 /* In MB. */
56 unsigned int low_shrd_mem_start; /* Starting address of this */
57 /* quad's low shared memory */
58 /* (untranslated). */
59 /* In MB. */
60 unsigned int low_shrd_mem_size; /* Size of this quad's low */
61 /* shared memory. */
62 /* In MB. */
63 unsigned int lmmio_copb_start; /* Starting address of this */
64 /* quad's local memory */
65 /* mapped I/O in the */
66 /* compatibility OPB. */
67 /* In MB. */
68 unsigned int lmmio_copb_size; /* Size of this quad's local */
69 /* memory mapped I/O in the */
70 /* compatibility OPB. */
71 /* In MB. */
72 unsigned int lmmio_nopb_start; /* Starting address of this */
73 /* quad's local memory */
74 /* mapped I/O in the */
75 /* non-compatibility OPB. */
76 /* In MB. */
77 unsigned int lmmio_nopb_size; /* Size of this quad's local */
78 /* memory mapped I/O in the */
79 /* non-compatibility OPB. */
80 /* In MB. */
81 unsigned int io_apic_0_start; /* Starting address of I/O */
82 /* APIC 0. */
83 unsigned int io_apic_0_sz; /* Size I/O APIC 0. */
84 unsigned int io_apic_1_start; /* Starting address of I/O */
85 /* APIC 1. */
86 unsigned int io_apic_1_sz; /* Size I/O APIC 1. */
87 unsigned int hi_shrd_mem_start; /* Starting address of this */
88 /* quad's high shared memory.*/
89 /* In MB. */
90 unsigned int hi_shrd_mem_size; /* Size of this quad's high */
91 /* shared memory. */
92 /* In MB. */
93 unsigned int mps_table_addr; /* Address of this quad's */
94 /* MPS tables from BIOS, */
95 /* in system space.*/
96 unsigned int lcl_MDC_pio_addr; /* Port-I/O address for */
97 /* local access of MDC. */
98 unsigned int rmt_MDC_mmpio_addr; /* MM-Port-I/O address for */
99 /* remote access of MDC. */
100 unsigned int mm_port_io_start; /* Starting address of this */
101 /* quad's memory mapped Port */
102 /* I/O space. */
103 unsigned int mm_port_io_size; /* Size of this quad's memory*/
104 /* mapped Port I/O space. */
105 unsigned int mm_rmt_io_apic_start; /* Starting address of this */
106 /* quad's memory mapped */
107 /* remote I/O APIC space. */
108 unsigned int mm_rmt_io_apic_size; /* Size of this quad's memory*/
109 /* mapped remote I/O APIC */
110 /* space. */
111 unsigned int mm_isa_start; /* Starting address of this */
112 /* quad's memory mapped ISA */
113 /* space (contains MDC */
114 /* memory space). */
115 unsigned int mm_isa_size; /* Size of this quad's memory*/
116 /* mapped ISA space (contains*/
117 /* MDC memory space). */
118 unsigned int rmt_qmi_addr; /* Remote addr to access QMI.*/
119 unsigned int lcl_qmi_addr; /* Local addr to access QMI. */
120};
121
122/*
123 * Note: This structure must be NOT be changed unless the multiproc and
124 * OS are changed to reflect the new structure.
125 */
126struct sys_cfg_data {
127 unsigned int quad_id;
128 unsigned int bsp_proc_id; /* Boot Strap Processor in this quad. */
129 unsigned int scd_version; /* Version number of this table. */
130 unsigned int first_quad_id;
131 unsigned int quads_present31_0; /* 1 bit for each quad */
132 unsigned int quads_present63_32; /* 1 bit for each quad */
133 unsigned int config_flags;
134 unsigned int boot_flags;
135 unsigned int csr_start_addr; /* Absolute value (not in MB) */
136 unsigned int csr_size; /* Absolute value (not in MB) */
137 unsigned int lcl_apic_start_addr; /* Absolute value (not in MB) */
138 unsigned int lcl_apic_size; /* Absolute value (not in MB) */
139 unsigned int low_shrd_mem_base; /* 0 or 512MB or 1GB */
140 unsigned int low_shrd_mem_quad_offset; /* 0,128M,256M,512M,1G */
141 /* may not be totally populated */
142 unsigned int split_mem_enbl; /* 0 for no low shared memory */
143 unsigned int mmio_sz; /* Size of total system memory mapped I/O */
144 /* (in MB). */
145 unsigned int quad_spin_lock; /* Spare location used for quad */
146 /* bringup. */
147 unsigned int nonzero55; /* For checksumming. */
148 unsigned int nonzeroaa; /* For checksumming. */
149 unsigned int scd_magic_number;
150 unsigned int system_type;
151 unsigned int checksum;
152 /*
153 * memory configuration area for each quad
154 */
155 struct eachquadmem eq[MAX_NUMNODES]; /* indexed by quad id */
156};
157
158static inline unsigned long *get_zholes_size(int nid)
159{
160 return NULL;
161}
162#endif /* CONFIG_X86_NUMAQ */
163#endif /* NUMAQ_H */
164
diff --git a/include/asm-i386/page.h b/include/asm-i386/page.h
deleted file mode 100644
index 80ecc66b6d86..000000000000
--- a/include/asm-i386/page.h
+++ /dev/null
@@ -1,206 +0,0 @@
1#ifndef _I386_PAGE_H
2#define _I386_PAGE_H
3
4/* PAGE_SHIFT determines the page size */
5#define PAGE_SHIFT 12
6#define PAGE_SIZE (1UL << PAGE_SHIFT)
7#define PAGE_MASK (~(PAGE_SIZE-1))
8
9#define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1))
10#define LARGE_PAGE_SIZE (1UL << PMD_SHIFT)
11
12#ifdef __KERNEL__
13#ifndef __ASSEMBLY__
14
15#ifdef CONFIG_X86_USE_3DNOW
16
17#include <asm/mmx.h>
18
19#define clear_page(page) mmx_clear_page((void *)(page))
20#define copy_page(to,from) mmx_copy_page(to,from)
21
22#else
23
24/*
25 * On older X86 processors it's not a win to use MMX here it seems.
26 * Maybe the K6-III ?
27 */
28
29#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
30#define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE)
31
32#endif
33
34#define clear_user_page(page, vaddr, pg) clear_page(page)
35#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
36
37#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
38 alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
39#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
40
41/*
42 * These are used to make use of C type-checking..
43 */
44extern int nx_enabled;
45
46#ifdef CONFIG_X86_PAE
47typedef struct { unsigned long pte_low, pte_high; } pte_t;
48typedef struct { unsigned long long pmd; } pmd_t;
49typedef struct { unsigned long long pgd; } pgd_t;
50typedef struct { unsigned long long pgprot; } pgprot_t;
51
52static inline unsigned long long native_pgd_val(pgd_t pgd)
53{
54 return pgd.pgd;
55}
56
57static inline unsigned long long native_pmd_val(pmd_t pmd)
58{
59 return pmd.pmd;
60}
61
62static inline unsigned long long native_pte_val(pte_t pte)
63{
64 return pte.pte_low | ((unsigned long long)pte.pte_high << 32);
65}
66
67static inline pgd_t native_make_pgd(unsigned long long val)
68{
69 return (pgd_t) { val };
70}
71
72static inline pmd_t native_make_pmd(unsigned long long val)
73{
74 return (pmd_t) { val };
75}
76
77static inline pte_t native_make_pte(unsigned long long val)
78{
79 return (pte_t) { .pte_low = val, .pte_high = (val >> 32) } ;
80}
81
82#ifndef CONFIG_PARAVIRT
83#define pmd_val(x) native_pmd_val(x)
84#define __pmd(x) native_make_pmd(x)
85#endif
86
87#define HPAGE_SHIFT 21
88#include <asm-generic/pgtable-nopud.h>
89#else /* !CONFIG_X86_PAE */
90typedef struct { unsigned long pte_low; } pte_t;
91typedef struct { unsigned long pgd; } pgd_t;
92typedef struct { unsigned long pgprot; } pgprot_t;
93#define boot_pte_t pte_t /* or would you rather have a typedef */
94
95static inline unsigned long native_pgd_val(pgd_t pgd)
96{
97 return pgd.pgd;
98}
99
100static inline unsigned long native_pte_val(pte_t pte)
101{
102 return pte.pte_low;
103}
104
105static inline pgd_t native_make_pgd(unsigned long val)
106{
107 return (pgd_t) { val };
108}
109
110static inline pte_t native_make_pte(unsigned long val)
111{
112 return (pte_t) { .pte_low = val };
113}
114
115#define HPAGE_SHIFT 22
116#include <asm-generic/pgtable-nopmd.h>
117#endif /* CONFIG_X86_PAE */
118
119#define PTE_MASK PAGE_MASK
120
121#ifdef CONFIG_HUGETLB_PAGE
122#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
123#define HPAGE_MASK (~(HPAGE_SIZE - 1))
124#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
125#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
126#endif
127
128#define pgprot_val(x) ((x).pgprot)
129#define __pgprot(x) ((pgprot_t) { (x) } )
130
131#ifndef CONFIG_PARAVIRT
132#define pgd_val(x) native_pgd_val(x)
133#define __pgd(x) native_make_pgd(x)
134#define pte_val(x) native_pte_val(x)
135#define __pte(x) native_make_pte(x)
136#endif
137
138#endif /* !__ASSEMBLY__ */
139
140/* to align the pointer to the (next) page boundary */
141#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
142
143/*
144 * This handles the memory map.. We could make this a config
145 * option, but too many people screw it up, and too few need
146 * it.
147 *
148 * A __PAGE_OFFSET of 0xC0000000 means that the kernel has
149 * a virtual address space of one gigabyte, which limits the
150 * amount of physical memory you can use to about 950MB.
151 *
152 * If you want more physical memory than this then see the CONFIG_HIGHMEM4G
153 * and CONFIG_HIGHMEM64G options in the kernel configuration.
154 */
155
156#ifndef __ASSEMBLY__
157
158struct vm_area_struct;
159
160/*
161 * This much address space is reserved for vmalloc() and iomap()
162 * as well as fixmap mappings.
163 */
164extern unsigned int __VMALLOC_RESERVE;
165
166extern int sysctl_legacy_va_layout;
167
168extern int page_is_ram(unsigned long pagenr);
169
170#endif /* __ASSEMBLY__ */
171
172#ifdef __ASSEMBLY__
173#define __PAGE_OFFSET CONFIG_PAGE_OFFSET
174#else
175#define __PAGE_OFFSET ((unsigned long)CONFIG_PAGE_OFFSET)
176#endif
177
178
179#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
180#define VMALLOC_RESERVE ((unsigned long)__VMALLOC_RESERVE)
181#define MAXMEM (-__PAGE_OFFSET-__VMALLOC_RESERVE)
182#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
183/* __pa_symbol should be used for C visible symbols.
184 This seems to be the official gcc blessed way to do such arithmetic. */
185#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x),0))
186#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
187#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
188#ifdef CONFIG_FLATMEM
189#define pfn_valid(pfn) ((pfn) < max_mapnr)
190#endif /* CONFIG_FLATMEM */
191#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
192
193#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
194
195#define VM_DATA_DEFAULT_FLAGS \
196 (VM_READ | VM_WRITE | \
197 ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
198 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
199
200#include <asm-generic/memory_model.h>
201#include <asm-generic/page.h>
202
203#define __HAVE_ARCH_GATE_AREA 1
204#endif /* __KERNEL__ */
205
206#endif /* _I386_PAGE_H */
diff --git a/include/asm-i386/param.h b/include/asm-i386/param.h
deleted file mode 100644
index 21b32466fcdc..000000000000
--- a/include/asm-i386/param.h
+++ /dev/null
@@ -1,22 +0,0 @@
1#ifndef _ASMi386_PARAM_H
2#define _ASMi386_PARAM_H
3
4#ifdef __KERNEL__
5# define HZ CONFIG_HZ /* Internal kernel timer frequency */
6# define USER_HZ 100 /* .. some user interfaces are in "ticks" */
7# define CLOCKS_PER_SEC (USER_HZ) /* like times() */
8#endif
9
10#ifndef HZ
11#define HZ 100
12#endif
13
14#define EXEC_PAGESIZE 4096
15
16#ifndef NOGROUP
17#define NOGROUP (-1)
18#endif
19
20#define MAXHOSTNAMELEN 64 /* max length of hostname */
21
22#endif
diff --git a/include/asm-i386/paravirt.h b/include/asm-i386/paravirt.h
deleted file mode 100644
index 9fa3fa9e62d1..000000000000
--- a/include/asm-i386/paravirt.h
+++ /dev/null
@@ -1,1085 +0,0 @@
1#ifndef __ASM_PARAVIRT_H
2#define __ASM_PARAVIRT_H
3/* Various instructions on x86 need to be replaced for
4 * para-virtualization: those hooks are defined here. */
5
6#ifdef CONFIG_PARAVIRT
7#include <asm/page.h>
8
9/* Bitmask of what can be clobbered: usually at least eax. */
10#define CLBR_NONE 0x0
11#define CLBR_EAX 0x1
12#define CLBR_ECX 0x2
13#define CLBR_EDX 0x4
14#define CLBR_ANY 0x7
15
16#ifndef __ASSEMBLY__
17#include <linux/types.h>
18#include <linux/cpumask.h>
19#include <asm/kmap_types.h>
20
21struct page;
22struct thread_struct;
23struct Xgt_desc_struct;
24struct tss_struct;
25struct mm_struct;
26struct desc_struct;
27
28/* Lazy mode for batching updates / context switch */
29enum paravirt_lazy_mode {
30 PARAVIRT_LAZY_NONE = 0,
31 PARAVIRT_LAZY_MMU = 1,
32 PARAVIRT_LAZY_CPU = 2,
33 PARAVIRT_LAZY_FLUSH = 3,
34};
35
36struct paravirt_ops
37{
38 unsigned int kernel_rpl;
39 int shared_kernel_pmd;
40 int paravirt_enabled;
41 const char *name;
42
43 /*
44 * Patch may replace one of the defined code sequences with arbitrary
45 * code, subject to the same register constraints. This generally
46 * means the code is not free to clobber any registers other than EAX.
47 * The patch function should return the number of bytes of code
48 * generated, as we nop pad the rest in generic code.
49 */
50 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
51 unsigned long addr, unsigned len);
52
53 /* Basic arch-specific setup */
54 void (*arch_setup)(void);
55 char *(*memory_setup)(void);
56 void (*post_allocator_init)(void);
57
58 void (*init_IRQ)(void);
59 void (*time_init)(void);
60
61 /*
62 * Called before/after init_mm pagetable setup. setup_start
63 * may reset %cr3, and may pre-install parts of the pagetable;
64 * pagetable setup is expected to preserve any existing
65 * mapping.
66 */
67 void (*pagetable_setup_start)(pgd_t *pgd_base);
68 void (*pagetable_setup_done)(pgd_t *pgd_base);
69
70 /* Print a banner to identify the environment */
71 void (*banner)(void);
72
73 /* Set and set time of day */
74 unsigned long (*get_wallclock)(void);
75 int (*set_wallclock)(unsigned long);
76
77 /* cpuid emulation, mostly so that caps bits can be disabled */
78 void (*cpuid)(unsigned int *eax, unsigned int *ebx,
79 unsigned int *ecx, unsigned int *edx);
80
81 /* hooks for various privileged instructions */
82 unsigned long (*get_debugreg)(int regno);
83 void (*set_debugreg)(int regno, unsigned long value);
84
85 void (*clts)(void);
86
87 unsigned long (*read_cr0)(void);
88 void (*write_cr0)(unsigned long);
89
90 unsigned long (*read_cr2)(void);
91 void (*write_cr2)(unsigned long);
92
93 unsigned long (*read_cr3)(void);
94 void (*write_cr3)(unsigned long);
95
96 unsigned long (*read_cr4_safe)(void);
97 unsigned long (*read_cr4)(void);
98 void (*write_cr4)(unsigned long);
99
100 /*
101 * Get/set interrupt state. save_fl and restore_fl are only
102 * expected to use X86_EFLAGS_IF; all other bits
103 * returned from save_fl are undefined, and may be ignored by
104 * restore_fl.
105 */
106 unsigned long (*save_fl)(void);
107 void (*restore_fl)(unsigned long);
108 void (*irq_disable)(void);
109 void (*irq_enable)(void);
110 void (*safe_halt)(void);
111 void (*halt)(void);
112
113 void (*wbinvd)(void);
114
115 /* MSR, PMC and TSR operations.
116 err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */
117 u64 (*read_msr)(unsigned int msr, int *err);
118 int (*write_msr)(unsigned int msr, u64 val);
119
120 u64 (*read_tsc)(void);
121 u64 (*read_pmc)(void);
122 unsigned long long (*sched_clock)(void);
123 unsigned long (*get_cpu_khz)(void);
124
125 /* Segment descriptor handling */
126 void (*load_tr_desc)(void);
127 void (*load_gdt)(const struct Xgt_desc_struct *);
128 void (*load_idt)(const struct Xgt_desc_struct *);
129 void (*store_gdt)(struct Xgt_desc_struct *);
130 void (*store_idt)(struct Xgt_desc_struct *);
131 void (*set_ldt)(const void *desc, unsigned entries);
132 unsigned long (*store_tr)(void);
133 void (*load_tls)(struct thread_struct *t, unsigned int cpu);
134 void (*write_ldt_entry)(struct desc_struct *,
135 int entrynum, u32 low, u32 high);
136 void (*write_gdt_entry)(struct desc_struct *,
137 int entrynum, u32 low, u32 high);
138 void (*write_idt_entry)(struct desc_struct *,
139 int entrynum, u32 low, u32 high);
140 void (*load_esp0)(struct tss_struct *tss, struct thread_struct *t);
141
142 void (*set_iopl_mask)(unsigned mask);
143 void (*io_delay)(void);
144
145 /*
146 * Hooks for intercepting the creation/use/destruction of an
147 * mm_struct.
148 */
149 void (*activate_mm)(struct mm_struct *prev,
150 struct mm_struct *next);
151 void (*dup_mmap)(struct mm_struct *oldmm,
152 struct mm_struct *mm);
153 void (*exit_mmap)(struct mm_struct *mm);
154
155#ifdef CONFIG_X86_LOCAL_APIC
156 /*
157 * Direct APIC operations, principally for VMI. Ideally
158 * these shouldn't be in this interface.
159 */
160 void (*apic_write)(unsigned long reg, unsigned long v);
161 void (*apic_write_atomic)(unsigned long reg, unsigned long v);
162 unsigned long (*apic_read)(unsigned long reg);
163 void (*setup_boot_clock)(void);
164 void (*setup_secondary_clock)(void);
165
166 void (*startup_ipi_hook)(int phys_apicid,
167 unsigned long start_eip,
168 unsigned long start_esp);
169#endif
170
171 /* TLB operations */
172 void (*flush_tlb_user)(void);
173 void (*flush_tlb_kernel)(void);
174 void (*flush_tlb_single)(unsigned long addr);
175 void (*flush_tlb_others)(const cpumask_t *cpus, struct mm_struct *mm,
176 unsigned long va);
177
178 /* Hooks for allocating/releasing pagetable pages */
179 void (*alloc_pt)(struct mm_struct *mm, u32 pfn);
180 void (*alloc_pd)(u32 pfn);
181 void (*alloc_pd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count);
182 void (*release_pt)(u32 pfn);
183 void (*release_pd)(u32 pfn);
184
185 /* Pagetable manipulation functions */
186 void (*set_pte)(pte_t *ptep, pte_t pteval);
187 void (*set_pte_at)(struct mm_struct *mm, unsigned long addr,
188 pte_t *ptep, pte_t pteval);
189 void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval);
190 void (*pte_update)(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
191 void (*pte_update_defer)(struct mm_struct *mm,
192 unsigned long addr, pte_t *ptep);
193
194#ifdef CONFIG_HIGHPTE
195 void *(*kmap_atomic_pte)(struct page *page, enum km_type type);
196#endif
197
198#ifdef CONFIG_X86_PAE
199 void (*set_pte_atomic)(pte_t *ptep, pte_t pteval);
200 void (*set_pte_present)(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte);
201 void (*set_pud)(pud_t *pudp, pud_t pudval);
202 void (*pte_clear)(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
203 void (*pmd_clear)(pmd_t *pmdp);
204
205 unsigned long long (*pte_val)(pte_t);
206 unsigned long long (*pmd_val)(pmd_t);
207 unsigned long long (*pgd_val)(pgd_t);
208
209 pte_t (*make_pte)(unsigned long long pte);
210 pmd_t (*make_pmd)(unsigned long long pmd);
211 pgd_t (*make_pgd)(unsigned long long pgd);
212#else
213 unsigned long (*pte_val)(pte_t);
214 unsigned long (*pgd_val)(pgd_t);
215
216 pte_t (*make_pte)(unsigned long pte);
217 pgd_t (*make_pgd)(unsigned long pgd);
218#endif
219
220 /* Set deferred update mode, used for batching operations. */
221 void (*set_lazy_mode)(enum paravirt_lazy_mode mode);
222
223 /* These two are jmp to, not actually called. */
224 void (*irq_enable_sysexit)(void);
225 void (*iret)(void);
226};
227
228extern struct paravirt_ops paravirt_ops;
229
230#define PARAVIRT_PATCH(x) \
231 (offsetof(struct paravirt_ops, x) / sizeof(void *))
232
233#define paravirt_type(type) \
234 [paravirt_typenum] "i" (PARAVIRT_PATCH(type))
235#define paravirt_clobber(clobber) \
236 [paravirt_clobber] "i" (clobber)
237
238/*
239 * Generate some code, and mark it as patchable by the
240 * apply_paravirt() alternate instruction patcher.
241 */
242#define _paravirt_alt(insn_string, type, clobber) \
243 "771:\n\t" insn_string "\n" "772:\n" \
244 ".pushsection .parainstructions,\"a\"\n" \
245 " .long 771b\n" \
246 " .byte " type "\n" \
247 " .byte 772b-771b\n" \
248 " .short " clobber "\n" \
249 ".popsection\n"
250
251/* Generate patchable code, with the default asm parameters. */
252#define paravirt_alt(insn_string) \
253 _paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]")
254
255unsigned paravirt_patch_nop(void);
256unsigned paravirt_patch_ignore(unsigned len);
257unsigned paravirt_patch_call(void *insnbuf,
258 const void *target, u16 tgt_clobbers,
259 unsigned long addr, u16 site_clobbers,
260 unsigned len);
261unsigned paravirt_patch_jmp(const void *target, void *insnbuf,
262 unsigned long addr, unsigned len);
263unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
264 unsigned long addr, unsigned len);
265
266unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
267 const char *start, const char *end);
268
269int paravirt_disable_iospace(void);
270
271/*
272 * This generates an indirect call based on the operation type number.
273 * The type number, computed in PARAVIRT_PATCH, is derived from the
274 * offset into the paravirt_ops structure, and can therefore be freely
275 * converted back into a structure offset.
276 */
277#define PARAVIRT_CALL "call *(paravirt_ops+%c[paravirt_typenum]*4);"
278
279/*
280 * These macros are intended to wrap calls into a paravirt_ops
281 * operation, so that they can be later identified and patched at
282 * runtime.
283 *
284 * Normally, a call to a pv_op function is a simple indirect call:
285 * (paravirt_ops.operations)(args...).
286 *
287 * Unfortunately, this is a relatively slow operation for modern CPUs,
288 * because it cannot necessarily determine what the destination
289 * address is. In this case, the address is a runtime constant, so at
290 * the very least we can patch the call to e a simple direct call, or
291 * ideally, patch an inline implementation into the callsite. (Direct
292 * calls are essentially free, because the call and return addresses
293 * are completely predictable.)
294 *
295 * These macros rely on the standard gcc "regparm(3)" calling
296 * convention, in which the first three arguments are placed in %eax,
297 * %edx, %ecx (in that order), and the remaining arguments are placed
298 * on the stack. All caller-save registers (eax,edx,ecx) are expected
299 * to be modified (either clobbered or used for return values).
300 *
301 * The call instruction itself is marked by placing its start address
302 * and size into the .parainstructions section, so that
303 * apply_paravirt() in arch/i386/kernel/alternative.c can do the
304 * appropriate patching under the control of the backend paravirt_ops
305 * implementation.
306 *
307 * Unfortunately there's no way to get gcc to generate the args setup
308 * for the call, and then allow the call itself to be generated by an
309 * inline asm. Because of this, we must do the complete arg setup and
310 * return value handling from within these macros. This is fairly
311 * cumbersome.
312 *
313 * There are 5 sets of PVOP_* macros for dealing with 0-4 arguments.
314 * It could be extended to more arguments, but there would be little
315 * to be gained from that. For each number of arguments, there are
316 * the two VCALL and CALL variants for void and non-void functions.
317 *
318 * When there is a return value, the invoker of the macro must specify
319 * the return type. The macro then uses sizeof() on that type to
320 * determine whether its a 32 or 64 bit value, and places the return
321 * in the right register(s) (just %eax for 32-bit, and %edx:%eax for
322 * 64-bit).
323 *
324 * 64-bit arguments are passed as a pair of adjacent 32-bit arguments
325 * in low,high order.
326 *
327 * Small structures are passed and returned in registers. The macro
328 * calling convention can't directly deal with this, so the wrapper
329 * functions must do this.
330 *
331 * These PVOP_* macros are only defined within this header. This
332 * means that all uses must be wrapped in inline functions. This also
333 * makes sure the incoming and outgoing types are always correct.
334 */
335#define __PVOP_CALL(rettype, op, pre, post, ...) \
336 ({ \
337 rettype __ret; \
338 unsigned long __eax, __edx, __ecx; \
339 if (sizeof(rettype) > sizeof(unsigned long)) { \
340 asm volatile(pre \
341 paravirt_alt(PARAVIRT_CALL) \
342 post \
343 : "=a" (__eax), "=d" (__edx), \
344 "=c" (__ecx) \
345 : paravirt_type(op), \
346 paravirt_clobber(CLBR_ANY), \
347 ##__VA_ARGS__ \
348 : "memory", "cc"); \
349 __ret = (rettype)((((u64)__edx) << 32) | __eax); \
350 } else { \
351 asm volatile(pre \
352 paravirt_alt(PARAVIRT_CALL) \
353 post \
354 : "=a" (__eax), "=d" (__edx), \
355 "=c" (__ecx) \
356 : paravirt_type(op), \
357 paravirt_clobber(CLBR_ANY), \
358 ##__VA_ARGS__ \
359 : "memory", "cc"); \
360 __ret = (rettype)__eax; \
361 } \
362 __ret; \
363 })
364#define __PVOP_VCALL(op, pre, post, ...) \
365 ({ \
366 unsigned long __eax, __edx, __ecx; \
367 asm volatile(pre \
368 paravirt_alt(PARAVIRT_CALL) \
369 post \
370 : "=a" (__eax), "=d" (__edx), "=c" (__ecx) \
371 : paravirt_type(op), \
372 paravirt_clobber(CLBR_ANY), \
373 ##__VA_ARGS__ \
374 : "memory", "cc"); \
375 })
376
377#define PVOP_CALL0(rettype, op) \
378 __PVOP_CALL(rettype, op, "", "")
379#define PVOP_VCALL0(op) \
380 __PVOP_VCALL(op, "", "")
381
382#define PVOP_CALL1(rettype, op, arg1) \
383 __PVOP_CALL(rettype, op, "", "", "0" ((u32)(arg1)))
384#define PVOP_VCALL1(op, arg1) \
385 __PVOP_VCALL(op, "", "", "0" ((u32)(arg1)))
386
387#define PVOP_CALL2(rettype, op, arg1, arg2) \
388 __PVOP_CALL(rettype, op, "", "", "0" ((u32)(arg1)), "1" ((u32)(arg2)))
389#define PVOP_VCALL2(op, arg1, arg2) \
390 __PVOP_VCALL(op, "", "", "0" ((u32)(arg1)), "1" ((u32)(arg2)))
391
392#define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \
393 __PVOP_CALL(rettype, op, "", "", "0" ((u32)(arg1)), \
394 "1"((u32)(arg2)), "2"((u32)(arg3)))
395#define PVOP_VCALL3(op, arg1, arg2, arg3) \
396 __PVOP_VCALL(op, "", "", "0" ((u32)(arg1)), "1"((u32)(arg2)), \
397 "2"((u32)(arg3)))
398
399#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
400 __PVOP_CALL(rettype, op, \
401 "push %[_arg4];", "lea 4(%%esp),%%esp;", \
402 "0" ((u32)(arg1)), "1" ((u32)(arg2)), \
403 "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
404#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
405 __PVOP_VCALL(op, \
406 "push %[_arg4];", "lea 4(%%esp),%%esp;", \
407 "0" ((u32)(arg1)), "1" ((u32)(arg2)), \
408 "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
409
410static inline int paravirt_enabled(void)
411{
412 return paravirt_ops.paravirt_enabled;
413}
414
415static inline void load_esp0(struct tss_struct *tss,
416 struct thread_struct *thread)
417{
418 PVOP_VCALL2(load_esp0, tss, thread);
419}
420
421#define ARCH_SETUP paravirt_ops.arch_setup();
422static inline unsigned long get_wallclock(void)
423{
424 return PVOP_CALL0(unsigned long, get_wallclock);
425}
426
427static inline int set_wallclock(unsigned long nowtime)
428{
429 return PVOP_CALL1(int, set_wallclock, nowtime);
430}
431
432static inline void (*choose_time_init(void))(void)
433{
434 return paravirt_ops.time_init;
435}
436
437/* The paravirtualized CPUID instruction. */
438static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
439 unsigned int *ecx, unsigned int *edx)
440{
441 PVOP_VCALL4(cpuid, eax, ebx, ecx, edx);
442}
443
444/*
445 * These special macros can be used to get or set a debugging register
446 */
447static inline unsigned long paravirt_get_debugreg(int reg)
448{
449 return PVOP_CALL1(unsigned long, get_debugreg, reg);
450}
451#define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
452static inline void set_debugreg(unsigned long val, int reg)
453{
454 PVOP_VCALL2(set_debugreg, reg, val);
455}
456
457static inline void clts(void)
458{
459 PVOP_VCALL0(clts);
460}
461
462static inline unsigned long read_cr0(void)
463{
464 return PVOP_CALL0(unsigned long, read_cr0);
465}
466
467static inline void write_cr0(unsigned long x)
468{
469 PVOP_VCALL1(write_cr0, x);
470}
471
472static inline unsigned long read_cr2(void)
473{
474 return PVOP_CALL0(unsigned long, read_cr2);
475}
476
477static inline void write_cr2(unsigned long x)
478{
479 PVOP_VCALL1(write_cr2, x);
480}
481
482static inline unsigned long read_cr3(void)
483{
484 return PVOP_CALL0(unsigned long, read_cr3);
485}
486
487static inline void write_cr3(unsigned long x)
488{
489 PVOP_VCALL1(write_cr3, x);
490}
491
492static inline unsigned long read_cr4(void)
493{
494 return PVOP_CALL0(unsigned long, read_cr4);
495}
496static inline unsigned long read_cr4_safe(void)
497{
498 return PVOP_CALL0(unsigned long, read_cr4_safe);
499}
500
501static inline void write_cr4(unsigned long x)
502{
503 PVOP_VCALL1(write_cr4, x);
504}
505
506static inline void raw_safe_halt(void)
507{
508 PVOP_VCALL0(safe_halt);
509}
510
511static inline void halt(void)
512{
513 PVOP_VCALL0(safe_halt);
514}
515
516static inline void wbinvd(void)
517{
518 PVOP_VCALL0(wbinvd);
519}
520
521#define get_kernel_rpl() (paravirt_ops.kernel_rpl)
522
523static inline u64 paravirt_read_msr(unsigned msr, int *err)
524{
525 return PVOP_CALL2(u64, read_msr, msr, err);
526}
527static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
528{
529 return PVOP_CALL3(int, write_msr, msr, low, high);
530}
531
532/* These should all do BUG_ON(_err), but our headers are too tangled. */
533#define rdmsr(msr,val1,val2) do { \
534 int _err; \
535 u64 _l = paravirt_read_msr(msr, &_err); \
536 val1 = (u32)_l; \
537 val2 = _l >> 32; \
538} while(0)
539
540#define wrmsr(msr,val1,val2) do { \
541 paravirt_write_msr(msr, val1, val2); \
542} while(0)
543
544#define rdmsrl(msr,val) do { \
545 int _err; \
546 val = paravirt_read_msr(msr, &_err); \
547} while(0)
548
549#define wrmsrl(msr,val) wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32)
550#define wrmsr_safe(msr,a,b) paravirt_write_msr(msr, a, b)
551
552/* rdmsr with exception handling */
553#define rdmsr_safe(msr,a,b) ({ \
554 int _err; \
555 u64 _l = paravirt_read_msr(msr, &_err); \
556 (*a) = (u32)_l; \
557 (*b) = _l >> 32; \
558 _err; })
559
560
561static inline u64 paravirt_read_tsc(void)
562{
563 return PVOP_CALL0(u64, read_tsc);
564}
565
566#define rdtscl(low) do { \
567 u64 _l = paravirt_read_tsc(); \
568 low = (int)_l; \
569} while(0)
570
571#define rdtscll(val) (val = paravirt_read_tsc())
572
573static inline unsigned long long paravirt_sched_clock(void)
574{
575 return PVOP_CALL0(unsigned long long, sched_clock);
576}
577#define calculate_cpu_khz() (paravirt_ops.get_cpu_khz())
578
579#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
580
581static inline unsigned long long paravirt_read_pmc(int counter)
582{
583 return PVOP_CALL1(u64, read_pmc, counter);
584}
585
586#define rdpmc(counter,low,high) do { \
587 u64 _l = paravirt_read_pmc(counter); \
588 low = (u32)_l; \
589 high = _l >> 32; \
590} while(0)
591
592static inline void load_TR_desc(void)
593{
594 PVOP_VCALL0(load_tr_desc);
595}
596static inline void load_gdt(const struct Xgt_desc_struct *dtr)
597{
598 PVOP_VCALL1(load_gdt, dtr);
599}
600static inline void load_idt(const struct Xgt_desc_struct *dtr)
601{
602 PVOP_VCALL1(load_idt, dtr);
603}
604static inline void set_ldt(const void *addr, unsigned entries)
605{
606 PVOP_VCALL2(set_ldt, addr, entries);
607}
608static inline void store_gdt(struct Xgt_desc_struct *dtr)
609{
610 PVOP_VCALL1(store_gdt, dtr);
611}
612static inline void store_idt(struct Xgt_desc_struct *dtr)
613{
614 PVOP_VCALL1(store_idt, dtr);
615}
616static inline unsigned long paravirt_store_tr(void)
617{
618 return PVOP_CALL0(unsigned long, store_tr);
619}
620#define store_tr(tr) ((tr) = paravirt_store_tr())
621static inline void load_TLS(struct thread_struct *t, unsigned cpu)
622{
623 PVOP_VCALL2(load_tls, t, cpu);
624}
625static inline void write_ldt_entry(void *dt, int entry, u32 low, u32 high)
626{
627 PVOP_VCALL4(write_ldt_entry, dt, entry, low, high);
628}
629static inline void write_gdt_entry(void *dt, int entry, u32 low, u32 high)
630{
631 PVOP_VCALL4(write_gdt_entry, dt, entry, low, high);
632}
633static inline void write_idt_entry(void *dt, int entry, u32 low, u32 high)
634{
635 PVOP_VCALL4(write_idt_entry, dt, entry, low, high);
636}
637static inline void set_iopl_mask(unsigned mask)
638{
639 PVOP_VCALL1(set_iopl_mask, mask);
640}
641
642/* The paravirtualized I/O functions */
643static inline void slow_down_io(void) {
644 paravirt_ops.io_delay();
645#ifdef REALLY_SLOW_IO
646 paravirt_ops.io_delay();
647 paravirt_ops.io_delay();
648 paravirt_ops.io_delay();
649#endif
650}
651
652#ifdef CONFIG_X86_LOCAL_APIC
653/*
654 * Basic functions accessing APICs.
655 */
656static inline void apic_write(unsigned long reg, unsigned long v)
657{
658 PVOP_VCALL2(apic_write, reg, v);
659}
660
661static inline void apic_write_atomic(unsigned long reg, unsigned long v)
662{
663 PVOP_VCALL2(apic_write_atomic, reg, v);
664}
665
666static inline unsigned long apic_read(unsigned long reg)
667{
668 return PVOP_CALL1(unsigned long, apic_read, reg);
669}
670
671static inline void setup_boot_clock(void)
672{
673 PVOP_VCALL0(setup_boot_clock);
674}
675
676static inline void setup_secondary_clock(void)
677{
678 PVOP_VCALL0(setup_secondary_clock);
679}
680#endif
681
682static inline void paravirt_post_allocator_init(void)
683{
684 if (paravirt_ops.post_allocator_init)
685 (*paravirt_ops.post_allocator_init)();
686}
687
688static inline void paravirt_pagetable_setup_start(pgd_t *base)
689{
690 if (paravirt_ops.pagetable_setup_start)
691 (*paravirt_ops.pagetable_setup_start)(base);
692}
693
694static inline void paravirt_pagetable_setup_done(pgd_t *base)
695{
696 if (paravirt_ops.pagetable_setup_done)
697 (*paravirt_ops.pagetable_setup_done)(base);
698}
699
700#ifdef CONFIG_SMP
701static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip,
702 unsigned long start_esp)
703{
704 PVOP_VCALL3(startup_ipi_hook, phys_apicid, start_eip, start_esp);
705}
706#endif
707
708static inline void paravirt_activate_mm(struct mm_struct *prev,
709 struct mm_struct *next)
710{
711 PVOP_VCALL2(activate_mm, prev, next);
712}
713
714static inline void arch_dup_mmap(struct mm_struct *oldmm,
715 struct mm_struct *mm)
716{
717 PVOP_VCALL2(dup_mmap, oldmm, mm);
718}
719
720static inline void arch_exit_mmap(struct mm_struct *mm)
721{
722 PVOP_VCALL1(exit_mmap, mm);
723}
724
725static inline void __flush_tlb(void)
726{
727 PVOP_VCALL0(flush_tlb_user);
728}
729static inline void __flush_tlb_global(void)
730{
731 PVOP_VCALL0(flush_tlb_kernel);
732}
733static inline void __flush_tlb_single(unsigned long addr)
734{
735 PVOP_VCALL1(flush_tlb_single, addr);
736}
737
738static inline void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
739 unsigned long va)
740{
741 PVOP_VCALL3(flush_tlb_others, &cpumask, mm, va);
742}
743
744static inline void paravirt_alloc_pt(struct mm_struct *mm, unsigned pfn)
745{
746 PVOP_VCALL2(alloc_pt, mm, pfn);
747}
748static inline void paravirt_release_pt(unsigned pfn)
749{
750 PVOP_VCALL1(release_pt, pfn);
751}
752
753static inline void paravirt_alloc_pd(unsigned pfn)
754{
755 PVOP_VCALL1(alloc_pd, pfn);
756}
757
758static inline void paravirt_alloc_pd_clone(unsigned pfn, unsigned clonepfn,
759 unsigned start, unsigned count)
760{
761 PVOP_VCALL4(alloc_pd_clone, pfn, clonepfn, start, count);
762}
763static inline void paravirt_release_pd(unsigned pfn)
764{
765 PVOP_VCALL1(release_pd, pfn);
766}
767
768#ifdef CONFIG_HIGHPTE
769static inline void *kmap_atomic_pte(struct page *page, enum km_type type)
770{
771 unsigned long ret;
772 ret = PVOP_CALL2(unsigned long, kmap_atomic_pte, page, type);
773 return (void *)ret;
774}
775#endif
776
777static inline void pte_update(struct mm_struct *mm, unsigned long addr,
778 pte_t *ptep)
779{
780 PVOP_VCALL3(pte_update, mm, addr, ptep);
781}
782
783static inline void pte_update_defer(struct mm_struct *mm, unsigned long addr,
784 pte_t *ptep)
785{
786 PVOP_VCALL3(pte_update_defer, mm, addr, ptep);
787}
788
789#ifdef CONFIG_X86_PAE
790static inline pte_t __pte(unsigned long long val)
791{
792 unsigned long long ret = PVOP_CALL2(unsigned long long, make_pte,
793 val, val >> 32);
794 return (pte_t) { ret, ret >> 32 };
795}
796
797static inline pmd_t __pmd(unsigned long long val)
798{
799 return (pmd_t) { PVOP_CALL2(unsigned long long, make_pmd, val, val >> 32) };
800}
801
802static inline pgd_t __pgd(unsigned long long val)
803{
804 return (pgd_t) { PVOP_CALL2(unsigned long long, make_pgd, val, val >> 32) };
805}
806
807static inline unsigned long long pte_val(pte_t x)
808{
809 return PVOP_CALL2(unsigned long long, pte_val, x.pte_low, x.pte_high);
810}
811
812static inline unsigned long long pmd_val(pmd_t x)
813{
814 return PVOP_CALL2(unsigned long long, pmd_val, x.pmd, x.pmd >> 32);
815}
816
817static inline unsigned long long pgd_val(pgd_t x)
818{
819 return PVOP_CALL2(unsigned long long, pgd_val, x.pgd, x.pgd >> 32);
820}
821
822static inline void set_pte(pte_t *ptep, pte_t pteval)
823{
824 PVOP_VCALL3(set_pte, ptep, pteval.pte_low, pteval.pte_high);
825}
826
827static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
828 pte_t *ptep, pte_t pteval)
829{
830 /* 5 arg words */
831 paravirt_ops.set_pte_at(mm, addr, ptep, pteval);
832}
833
834static inline void set_pte_atomic(pte_t *ptep, pte_t pteval)
835{
836 PVOP_VCALL3(set_pte_atomic, ptep, pteval.pte_low, pteval.pte_high);
837}
838
839static inline void set_pte_present(struct mm_struct *mm, unsigned long addr,
840 pte_t *ptep, pte_t pte)
841{
842 /* 5 arg words */
843 paravirt_ops.set_pte_present(mm, addr, ptep, pte);
844}
845
846static inline void set_pmd(pmd_t *pmdp, pmd_t pmdval)
847{
848 PVOP_VCALL3(set_pmd, pmdp, pmdval.pmd, pmdval.pmd >> 32);
849}
850
851static inline void set_pud(pud_t *pudp, pud_t pudval)
852{
853 PVOP_VCALL3(set_pud, pudp, pudval.pgd.pgd, pudval.pgd.pgd >> 32);
854}
855
856static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
857{
858 PVOP_VCALL3(pte_clear, mm, addr, ptep);
859}
860
861static inline void pmd_clear(pmd_t *pmdp)
862{
863 PVOP_VCALL1(pmd_clear, pmdp);
864}
865
866#else /* !CONFIG_X86_PAE */
867
868static inline pte_t __pte(unsigned long val)
869{
870 return (pte_t) { PVOP_CALL1(unsigned long, make_pte, val) };
871}
872
873static inline pgd_t __pgd(unsigned long val)
874{
875 return (pgd_t) { PVOP_CALL1(unsigned long, make_pgd, val) };
876}
877
878static inline unsigned long pte_val(pte_t x)
879{
880 return PVOP_CALL1(unsigned long, pte_val, x.pte_low);
881}
882
883static inline unsigned long pgd_val(pgd_t x)
884{
885 return PVOP_CALL1(unsigned long, pgd_val, x.pgd);
886}
887
888static inline void set_pte(pte_t *ptep, pte_t pteval)
889{
890 PVOP_VCALL2(set_pte, ptep, pteval.pte_low);
891}
892
893static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
894 pte_t *ptep, pte_t pteval)
895{
896 PVOP_VCALL4(set_pte_at, mm, addr, ptep, pteval.pte_low);
897}
898
899static inline void set_pmd(pmd_t *pmdp, pmd_t pmdval)
900{
901 PVOP_VCALL2(set_pmd, pmdp, pmdval.pud.pgd.pgd);
902}
903#endif /* CONFIG_X86_PAE */
904
905#define __HAVE_ARCH_ENTER_LAZY_CPU_MODE
906static inline void arch_enter_lazy_cpu_mode(void)
907{
908 PVOP_VCALL1(set_lazy_mode, PARAVIRT_LAZY_CPU);
909}
910
911static inline void arch_leave_lazy_cpu_mode(void)
912{
913 PVOP_VCALL1(set_lazy_mode, PARAVIRT_LAZY_NONE);
914}
915
916static inline void arch_flush_lazy_cpu_mode(void)
917{
918 PVOP_VCALL1(set_lazy_mode, PARAVIRT_LAZY_FLUSH);
919}
920
921
922#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
923static inline void arch_enter_lazy_mmu_mode(void)
924{
925 PVOP_VCALL1(set_lazy_mode, PARAVIRT_LAZY_MMU);
926}
927
928static inline void arch_leave_lazy_mmu_mode(void)
929{
930 PVOP_VCALL1(set_lazy_mode, PARAVIRT_LAZY_NONE);
931}
932
933static inline void arch_flush_lazy_mmu_mode(void)
934{
935 PVOP_VCALL1(set_lazy_mode, PARAVIRT_LAZY_FLUSH);
936}
937
938void _paravirt_nop(void);
939#define paravirt_nop ((void *)_paravirt_nop)
940
941/* These all sit in the .parainstructions section to tell us what to patch. */
942struct paravirt_patch_site {
943 u8 *instr; /* original instructions */
944 u8 instrtype; /* type of this instruction */
945 u8 len; /* length of original instruction */
946 u16 clobbers; /* what registers you may clobber */
947};
948
949extern struct paravirt_patch_site __parainstructions[],
950 __parainstructions_end[];
951
952static inline unsigned long __raw_local_save_flags(void)
953{
954 unsigned long f;
955
956 asm volatile(paravirt_alt("pushl %%ecx; pushl %%edx;"
957 PARAVIRT_CALL
958 "popl %%edx; popl %%ecx")
959 : "=a"(f)
960 : paravirt_type(save_fl),
961 paravirt_clobber(CLBR_EAX)
962 : "memory", "cc");
963 return f;
964}
965
966static inline void raw_local_irq_restore(unsigned long f)
967{
968 asm volatile(paravirt_alt("pushl %%ecx; pushl %%edx;"
969 PARAVIRT_CALL
970 "popl %%edx; popl %%ecx")
971 : "=a"(f)
972 : "0"(f),
973 paravirt_type(restore_fl),
974 paravirt_clobber(CLBR_EAX)
975 : "memory", "cc");
976}
977
978static inline void raw_local_irq_disable(void)
979{
980 asm volatile(paravirt_alt("pushl %%ecx; pushl %%edx;"
981 PARAVIRT_CALL
982 "popl %%edx; popl %%ecx")
983 :
984 : paravirt_type(irq_disable),
985 paravirt_clobber(CLBR_EAX)
986 : "memory", "eax", "cc");
987}
988
989static inline void raw_local_irq_enable(void)
990{
991 asm volatile(paravirt_alt("pushl %%ecx; pushl %%edx;"
992 PARAVIRT_CALL
993 "popl %%edx; popl %%ecx")
994 :
995 : paravirt_type(irq_enable),
996 paravirt_clobber(CLBR_EAX)
997 : "memory", "eax", "cc");
998}
999
1000static inline unsigned long __raw_local_irq_save(void)
1001{
1002 unsigned long f;
1003
1004 f = __raw_local_save_flags();
1005 raw_local_irq_disable();
1006 return f;
1007}
1008
1009#define CLI_STRING \
1010 _paravirt_alt("pushl %%ecx; pushl %%edx;" \
1011 "call *paravirt_ops+%c[paravirt_cli_type]*4;" \
1012 "popl %%edx; popl %%ecx", \
1013 "%c[paravirt_cli_type]", "%c[paravirt_clobber]")
1014
1015#define STI_STRING \
1016 _paravirt_alt("pushl %%ecx; pushl %%edx;" \
1017 "call *paravirt_ops+%c[paravirt_sti_type]*4;" \
1018 "popl %%edx; popl %%ecx", \
1019 "%c[paravirt_sti_type]", "%c[paravirt_clobber]")
1020
1021#define CLI_STI_CLOBBERS , "%eax"
1022#define CLI_STI_INPUT_ARGS \
1023 , \
1024 [paravirt_cli_type] "i" (PARAVIRT_PATCH(irq_disable)), \
1025 [paravirt_sti_type] "i" (PARAVIRT_PATCH(irq_enable)), \
1026 paravirt_clobber(CLBR_EAX)
1027
1028/* Make sure as little as possible of this mess escapes. */
1029#undef PARAVIRT_CALL
1030#undef __PVOP_CALL
1031#undef __PVOP_VCALL
1032#undef PVOP_VCALL0
1033#undef PVOP_CALL0
1034#undef PVOP_VCALL1
1035#undef PVOP_CALL1
1036#undef PVOP_VCALL2
1037#undef PVOP_CALL2
1038#undef PVOP_VCALL3
1039#undef PVOP_CALL3
1040#undef PVOP_VCALL4
1041#undef PVOP_CALL4
1042
1043#else /* __ASSEMBLY__ */
1044
1045#define PARA_PATCH(off) ((off) / 4)
1046
1047#define PARA_SITE(ptype, clobbers, ops) \
1048771:; \
1049 ops; \
1050772:; \
1051 .pushsection .parainstructions,"a"; \
1052 .long 771b; \
1053 .byte ptype; \
1054 .byte 772b-771b; \
1055 .short clobbers; \
1056 .popsection
1057
1058#define INTERRUPT_RETURN \
1059 PARA_SITE(PARA_PATCH(PARAVIRT_iret), CLBR_NONE, \
1060 jmp *%cs:paravirt_ops+PARAVIRT_iret)
1061
1062#define DISABLE_INTERRUPTS(clobbers) \
1063 PARA_SITE(PARA_PATCH(PARAVIRT_irq_disable), clobbers, \
1064 pushl %eax; pushl %ecx; pushl %edx; \
1065 call *%cs:paravirt_ops+PARAVIRT_irq_disable; \
1066 popl %edx; popl %ecx; popl %eax) \
1067
1068#define ENABLE_INTERRUPTS(clobbers) \
1069 PARA_SITE(PARA_PATCH(PARAVIRT_irq_enable), clobbers, \
1070 pushl %eax; pushl %ecx; pushl %edx; \
1071 call *%cs:paravirt_ops+PARAVIRT_irq_enable; \
1072 popl %edx; popl %ecx; popl %eax)
1073
1074#define ENABLE_INTERRUPTS_SYSEXIT \
1075 PARA_SITE(PARA_PATCH(PARAVIRT_irq_enable_sysexit), CLBR_NONE, \
1076 jmp *%cs:paravirt_ops+PARAVIRT_irq_enable_sysexit)
1077
1078#define GET_CR0_INTO_EAX \
1079 push %ecx; push %edx; \
1080 call *paravirt_ops+PARAVIRT_read_cr0; \
1081 pop %edx; pop %ecx
1082
1083#endif /* __ASSEMBLY__ */
1084#endif /* CONFIG_PARAVIRT */
1085#endif /* __ASM_PARAVIRT_H */
diff --git a/include/asm-i386/parport.h b/include/asm-i386/parport.h
deleted file mode 100644
index fa0e321e498e..000000000000
--- a/include/asm-i386/parport.h
+++ /dev/null
@@ -1,18 +0,0 @@
1/*
2 * parport.h: ia32-specific parport initialisation
3 *
4 * Copyright (C) 1999, 2000 Tim Waugh <tim@cyberelk.demon.co.uk>
5 *
6 * This file should only be included by drivers/parport/parport_pc.c.
7 */
8
9#ifndef _ASM_I386_PARPORT_H
10#define _ASM_I386_PARPORT_H 1
11
12static int __devinit parport_pc_find_isa_ports (int autoirq, int autodma);
13static int __devinit parport_pc_find_nonpci_ports (int autoirq, int autodma)
14{
15 return parport_pc_find_isa_ports (autoirq, autodma);
16}
17
18#endif /* !(_ASM_I386_PARPORT_H) */
diff --git a/include/asm-i386/pci-direct.h b/include/asm-i386/pci-direct.h
deleted file mode 100644
index 4f6738b08206..000000000000
--- a/include/asm-i386/pci-direct.h
+++ /dev/null
@@ -1 +0,0 @@
1#include "asm-x86_64/pci-direct.h"
diff --git a/include/asm-i386/pci.h b/include/asm-i386/pci.h
deleted file mode 100644
index 4fcacc711385..000000000000
--- a/include/asm-i386/pci.h
+++ /dev/null
@@ -1,90 +0,0 @@
1#ifndef __i386_PCI_H
2#define __i386_PCI_H
3
4
5#ifdef __KERNEL__
6
7struct pci_sysdata {
8 int node; /* NUMA node */
9};
10
11/* scan a bus after allocating a pci_sysdata for it */
12extern struct pci_bus *pci_scan_bus_with_sysdata(int busno);
13
14#include <linux/mm.h> /* for struct page */
15
16/* Can be used to override the logic in pci_scan_bus for skipping
17 already-configured bus numbers - to be used for buggy BIOSes
18 or architectures with incomplete PCI setup by the loader */
19
20#ifdef CONFIG_PCI
21extern unsigned int pcibios_assign_all_busses(void);
22#else
23#define pcibios_assign_all_busses() 0
24#endif
25#define pcibios_scan_all_fns(a, b) 0
26
27extern unsigned long pci_mem_start;
28#define PCIBIOS_MIN_IO 0x1000
29#define PCIBIOS_MIN_MEM (pci_mem_start)
30
31#define PCIBIOS_MIN_CARDBUS_IO 0x4000
32
33void pcibios_config_init(void);
34struct pci_bus * pcibios_scan_root(int bus);
35
36void pcibios_set_master(struct pci_dev *dev);
37void pcibios_penalize_isa_irq(int irq, int active);
38struct irq_routing_table *pcibios_get_irq_routing_table(void);
39int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq);
40
41/* Dynamic DMA mapping stuff.
42 * i386 has everything mapped statically.
43 */
44
45#include <linux/types.h>
46#include <linux/slab.h>
47#include <asm/scatterlist.h>
48#include <linux/string.h>
49#include <asm/io.h>
50
51struct pci_dev;
52
53/* The PCI address space does equal the physical memory
54 * address space. The networking and block device layers use
55 * this boolean for bounce buffer decisions.
56 */
57#define PCI_DMA_BUS_IS_PHYS (1)
58
59/* pci_unmap_{page,single} is a nop so... */
60#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
61#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
62#define pci_unmap_addr(PTR, ADDR_NAME) (0)
63#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
64#define pci_unmap_len(PTR, LEN_NAME) (0)
65#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
66
67#define HAVE_PCI_MMAP
68extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
69 enum pci_mmap_state mmap_state, int write_combine);
70
71
72#ifdef CONFIG_PCI
73static inline void pci_dma_burst_advice(struct pci_dev *pdev,
74 enum pci_dma_burst_strategy *strat,
75 unsigned long *strategy_parameter)
76{
77 *strat = PCI_DMA_BURST_INFINITY;
78 *strategy_parameter = ~0UL;
79}
80#endif
81
82#endif /* __KERNEL__ */
83
84/* implement the pci_ DMA API in terms of the generic device dma_ one */
85#include <asm-generic/pci-dma-compat.h>
86
87/* generic pci stuff */
88#include <asm-generic/pci.h>
89
90#endif /* __i386_PCI_H */
diff --git a/include/asm-i386/percpu.h b/include/asm-i386/percpu.h
deleted file mode 100644
index a7ebd436f3cc..000000000000
--- a/include/asm-i386/percpu.h
+++ /dev/null
@@ -1,154 +0,0 @@
1#ifndef __ARCH_I386_PERCPU__
2#define __ARCH_I386_PERCPU__
3
4#ifdef __ASSEMBLY__
5
6/*
7 * PER_CPU finds an address of a per-cpu variable.
8 *
9 * Args:
10 * var - variable name
11 * reg - 32bit register
12 *
13 * The resulting address is stored in the "reg" argument.
14 *
15 * Example:
16 * PER_CPU(cpu_gdt_descr, %ebx)
17 */
18#ifdef CONFIG_SMP
19#define PER_CPU(var, reg) \
20 movl %fs:per_cpu__##this_cpu_off, reg; \
21 lea per_cpu__##var(reg), reg
22#define PER_CPU_VAR(var) %fs:per_cpu__##var
23#else /* ! SMP */
24#define PER_CPU(var, reg) \
25 movl $per_cpu__##var, reg
26#define PER_CPU_VAR(var) per_cpu__##var
27#endif /* SMP */
28
29#else /* ...!ASSEMBLY */
30
31/*
32 * PER_CPU finds an address of a per-cpu variable.
33 *
34 * Args:
35 * var - variable name
36 * cpu - 32bit register containing the current CPU number
37 *
38 * The resulting address is stored in the "cpu" argument.
39 *
40 * Example:
41 * PER_CPU(cpu_gdt_descr, %ebx)
42 */
43#ifdef CONFIG_SMP
44/* Same as generic implementation except for optimized local access. */
45#define __GENERIC_PER_CPU
46
47/* This is used for other cpus to find our section. */
48extern unsigned long __per_cpu_offset[];
49
50#define per_cpu_offset(x) (__per_cpu_offset[x])
51
52/* Separate out the type, so (int[3], foo) works. */
53#define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name
54#define DEFINE_PER_CPU(type, name) \
55 __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name
56
57#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
58 __attribute__((__section__(".data.percpu.shared_aligned"))) \
59 __typeof__(type) per_cpu__##name \
60 ____cacheline_aligned_in_smp
61
62/* We can use this directly for local CPU (faster). */
63DECLARE_PER_CPU(unsigned long, this_cpu_off);
64
65/* var is in discarded region: offset to particular copy we want */
66#define per_cpu(var, cpu) (*({ \
67 extern int simple_indentifier_##var(void); \
68 RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu]); }))
69
70#define __raw_get_cpu_var(var) (*({ \
71 extern int simple_indentifier_##var(void); \
72 RELOC_HIDE(&per_cpu__##var, x86_read_percpu(this_cpu_off)); \
73}))
74
75#define __get_cpu_var(var) __raw_get_cpu_var(var)
76
77/* A macro to avoid #include hell... */
78#define percpu_modcopy(pcpudst, src, size) \
79do { \
80 unsigned int __i; \
81 for_each_possible_cpu(__i) \
82 memcpy((pcpudst)+__per_cpu_offset[__i], \
83 (src), (size)); \
84} while (0)
85
86#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
87#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
88
89/* fs segment starts at (positive) offset == __per_cpu_offset[cpu] */
90#define __percpu_seg "%%fs:"
91#else /* !SMP */
92#include <asm-generic/percpu.h>
93#define __percpu_seg ""
94#endif /* SMP */
95
96/* For arch-specific code, we can use direct single-insn ops (they
97 * don't give an lvalue though). */
98extern void __bad_percpu_size(void);
99
100#define percpu_to_op(op,var,val) \
101 do { \
102 typedef typeof(var) T__; \
103 if (0) { T__ tmp__; tmp__ = (val); } \
104 switch (sizeof(var)) { \
105 case 1: \
106 asm(op "b %1,"__percpu_seg"%0" \
107 : "+m" (var) \
108 :"ri" ((T__)val)); \
109 break; \
110 case 2: \
111 asm(op "w %1,"__percpu_seg"%0" \
112 : "+m" (var) \
113 :"ri" ((T__)val)); \
114 break; \
115 case 4: \
116 asm(op "l %1,"__percpu_seg"%0" \
117 : "+m" (var) \
118 :"ri" ((T__)val)); \
119 break; \
120 default: __bad_percpu_size(); \
121 } \
122 } while (0)
123
124#define percpu_from_op(op,var) \
125 ({ \
126 typeof(var) ret__; \
127 switch (sizeof(var)) { \
128 case 1: \
129 asm(op "b "__percpu_seg"%1,%0" \
130 : "=r" (ret__) \
131 : "m" (var)); \
132 break; \
133 case 2: \
134 asm(op "w "__percpu_seg"%1,%0" \
135 : "=r" (ret__) \
136 : "m" (var)); \
137 break; \
138 case 4: \
139 asm(op "l "__percpu_seg"%1,%0" \
140 : "=r" (ret__) \
141 : "m" (var)); \
142 break; \
143 default: __bad_percpu_size(); \
144 } \
145 ret__; })
146
147#define x86_read_percpu(var) percpu_from_op("mov", per_cpu__##var)
148#define x86_write_percpu(var,val) percpu_to_op("mov", per_cpu__##var, val)
149#define x86_add_percpu(var,val) percpu_to_op("add", per_cpu__##var, val)
150#define x86_sub_percpu(var,val) percpu_to_op("sub", per_cpu__##var, val)
151#define x86_or_percpu(var,val) percpu_to_op("or", per_cpu__##var, val)
152#endif /* !__ASSEMBLY__ */
153
154#endif /* __ARCH_I386_PERCPU__ */
diff --git a/include/asm-i386/pgalloc.h b/include/asm-i386/pgalloc.h
deleted file mode 100644
index f2fc33ceb9f2..000000000000
--- a/include/asm-i386/pgalloc.h
+++ /dev/null
@@ -1,68 +0,0 @@
1#ifndef _I386_PGALLOC_H
2#define _I386_PGALLOC_H
3
4#include <linux/threads.h>
5#include <linux/mm.h> /* for struct page */
6
7#ifdef CONFIG_PARAVIRT
8#include <asm/paravirt.h>
9#else
10#define paravirt_alloc_pt(mm, pfn) do { } while (0)
11#define paravirt_alloc_pd(pfn) do { } while (0)
12#define paravirt_alloc_pd(pfn) do { } while (0)
13#define paravirt_alloc_pd_clone(pfn, clonepfn, start, count) do { } while (0)
14#define paravirt_release_pt(pfn) do { } while (0)
15#define paravirt_release_pd(pfn) do { } while (0)
16#endif
17
18#define pmd_populate_kernel(mm, pmd, pte) \
19do { \
20 paravirt_alloc_pt(mm, __pa(pte) >> PAGE_SHIFT); \
21 set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte))); \
22} while (0)
23
24#define pmd_populate(mm, pmd, pte) \
25do { \
26 paravirt_alloc_pt(mm, page_to_pfn(pte)); \
27 set_pmd(pmd, __pmd(_PAGE_TABLE + \
28 ((unsigned long long)page_to_pfn(pte) << \
29 (unsigned long long) PAGE_SHIFT))); \
30} while (0)
31
32/*
33 * Allocate and free page tables.
34 */
35extern pgd_t *pgd_alloc(struct mm_struct *);
36extern void pgd_free(pgd_t *pgd);
37
38extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
39extern struct page *pte_alloc_one(struct mm_struct *, unsigned long);
40
41static inline void pte_free_kernel(pte_t *pte)
42{
43 free_page((unsigned long)pte);
44}
45
46static inline void pte_free(struct page *pte)
47{
48 __free_page(pte);
49}
50
51
52#define __pte_free_tlb(tlb,pte) \
53do { \
54 paravirt_release_pt(page_to_pfn(pte)); \
55 tlb_remove_page((tlb),(pte)); \
56} while (0)
57
58#ifdef CONFIG_X86_PAE
59/*
60 * In the PAE case we free the pmds as part of the pgd.
61 */
62#define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
63#define pmd_free(x) do { } while (0)
64#define __pmd_free_tlb(tlb,x) do { } while (0)
65#define pud_populate(mm, pmd, pte) BUG()
66#endif
67
68#endif /* _I386_PGALLOC_H */
diff --git a/include/asm-i386/pgtable-2level-defs.h b/include/asm-i386/pgtable-2level-defs.h
deleted file mode 100644
index 0f71c9f13da4..000000000000
--- a/include/asm-i386/pgtable-2level-defs.h
+++ /dev/null
@@ -1,20 +0,0 @@
1#ifndef _I386_PGTABLE_2LEVEL_DEFS_H
2#define _I386_PGTABLE_2LEVEL_DEFS_H
3
4#define SHARED_KERNEL_PMD 0
5
6/*
7 * traditional i386 two-level paging structure:
8 */
9
10#define PGDIR_SHIFT 22
11#define PTRS_PER_PGD 1024
12
13/*
14 * the i386 is two-level, so we don't really have any
15 * PMD directory physically.
16 */
17
18#define PTRS_PER_PTE 1024
19
20#endif /* _I386_PGTABLE_2LEVEL_DEFS_H */
diff --git a/include/asm-i386/pgtable-2level.h b/include/asm-i386/pgtable-2level.h
deleted file mode 100644
index 84b03cf56a79..000000000000
--- a/include/asm-i386/pgtable-2level.h
+++ /dev/null
@@ -1,86 +0,0 @@
1#ifndef _I386_PGTABLE_2LEVEL_H
2#define _I386_PGTABLE_2LEVEL_H
3
4#define pte_ERROR(e) \
5 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low)
6#define pgd_ERROR(e) \
7 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
8
9/*
10 * Certain architectures need to do special things when PTEs
11 * within a page table are directly modified. Thus, the following
12 * hook is made available.
13 */
14static inline void native_set_pte(pte_t *ptep , pte_t pte)
15{
16 *ptep = pte;
17}
18static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
19 pte_t *ptep , pte_t pte)
20{
21 native_set_pte(ptep, pte);
22}
23static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
24{
25 *pmdp = pmd;
26}
27#ifndef CONFIG_PARAVIRT
28#define set_pte(pteptr, pteval) native_set_pte(pteptr, pteval)
29#define set_pte_at(mm,addr,ptep,pteval) native_set_pte_at(mm, addr, ptep, pteval)
30#define set_pmd(pmdptr, pmdval) native_set_pmd(pmdptr, pmdval)
31#endif
32
33#define set_pte_atomic(pteptr, pteval) set_pte(pteptr,pteval)
34#define set_pte_present(mm,addr,ptep,pteval) set_pte_at(mm,addr,ptep,pteval)
35
36#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
37#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
38
39static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *xp)
40{
41 *xp = __pte(0);
42}
43
44#ifdef CONFIG_SMP
45static inline pte_t native_ptep_get_and_clear(pte_t *xp)
46{
47 return __pte(xchg(&xp->pte_low, 0));
48}
49#else
50#define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
51#endif
52
53#define pte_page(x) pfn_to_page(pte_pfn(x))
54#define pte_none(x) (!(x).pte_low)
55#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
56#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
57#define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
58
59/*
60 * All present pages are kernel-executable:
61 */
62static inline int pte_exec_kernel(pte_t pte)
63{
64 return 1;
65}
66
67/*
68 * Bits 0, 6 and 7 are taken, split up the 29 bits of offset
69 * into this range:
70 */
71#define PTE_FILE_MAX_BITS 29
72
73#define pte_to_pgoff(pte) \
74 ((((pte).pte_low >> 1) & 0x1f ) + (((pte).pte_low >> 8) << 5 ))
75
76#define pgoff_to_pte(off) \
77 ((pte_t) { (((off) & 0x1f) << 1) + (((off) >> 5) << 8) + _PAGE_FILE })
78
79/* Encode and de-code a swap entry */
80#define __swp_type(x) (((x).val >> 1) & 0x1f)
81#define __swp_offset(x) ((x).val >> 8)
82#define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) })
83#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low })
84#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
85
86#endif /* _I386_PGTABLE_2LEVEL_H */
diff --git a/include/asm-i386/pgtable-3level-defs.h b/include/asm-i386/pgtable-3level-defs.h
deleted file mode 100644
index c0df89f66e8b..000000000000
--- a/include/asm-i386/pgtable-3level-defs.h
+++ /dev/null
@@ -1,28 +0,0 @@
1#ifndef _I386_PGTABLE_3LEVEL_DEFS_H
2#define _I386_PGTABLE_3LEVEL_DEFS_H
3
4#ifdef CONFIG_PARAVIRT
5#define SHARED_KERNEL_PMD (paravirt_ops.shared_kernel_pmd)
6#else
7#define SHARED_KERNEL_PMD 1
8#endif
9
10/*
11 * PGDIR_SHIFT determines what a top-level page table entry can map
12 */
13#define PGDIR_SHIFT 30
14#define PTRS_PER_PGD 4
15
16/*
17 * PMD_SHIFT determines the size of the area a middle-level
18 * page table can map
19 */
20#define PMD_SHIFT 21
21#define PTRS_PER_PMD 512
22
23/*
24 * entries per page directory level
25 */
26#define PTRS_PER_PTE 512
27
28#endif /* _I386_PGTABLE_3LEVEL_DEFS_H */
diff --git a/include/asm-i386/pgtable-3level.h b/include/asm-i386/pgtable-3level.h
deleted file mode 100644
index 948a33414118..000000000000
--- a/include/asm-i386/pgtable-3level.h
+++ /dev/null
@@ -1,192 +0,0 @@
1#ifndef _I386_PGTABLE_3LEVEL_H
2#define _I386_PGTABLE_3LEVEL_H
3
4/*
5 * Intel Physical Address Extension (PAE) Mode - three-level page
6 * tables on PPro+ CPUs.
7 *
8 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
9 */
10
11#define pte_ERROR(e) \
12 printk("%s:%d: bad pte %p(%08lx%08lx).\n", __FILE__, __LINE__, &(e), (e).pte_high, (e).pte_low)
13#define pmd_ERROR(e) \
14 printk("%s:%d: bad pmd %p(%016Lx).\n", __FILE__, __LINE__, &(e), pmd_val(e))
15#define pgd_ERROR(e) \
16 printk("%s:%d: bad pgd %p(%016Lx).\n", __FILE__, __LINE__, &(e), pgd_val(e))
17
18#define pud_none(pud) 0
19#define pud_bad(pud) 0
20#define pud_present(pud) 1
21
22/*
23 * All present pages with !NX bit are kernel-executable:
24 */
25static inline int pte_exec_kernel(pte_t pte)
26{
27 return !(pte_val(pte) & _PAGE_NX);
28}
29
30/* Rules for using set_pte: the pte being assigned *must* be
31 * either not present or in a state where the hardware will
32 * not attempt to update the pte. In places where this is
33 * not possible, use pte_get_and_clear to obtain the old pte
34 * value and then use set_pte to update it. -ben
35 */
36static inline void native_set_pte(pte_t *ptep, pte_t pte)
37{
38 ptep->pte_high = pte.pte_high;
39 smp_wmb();
40 ptep->pte_low = pte.pte_low;
41}
42static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
43 pte_t *ptep , pte_t pte)
44{
45 native_set_pte(ptep, pte);
46}
47
48/*
49 * Since this is only called on user PTEs, and the page fault handler
50 * must handle the already racy situation of simultaneous page faults,
51 * we are justified in merely clearing the PTE present bit, followed
52 * by a set. The ordering here is important.
53 */
54static inline void native_set_pte_present(struct mm_struct *mm, unsigned long addr,
55 pte_t *ptep, pte_t pte)
56{
57 ptep->pte_low = 0;
58 smp_wmb();
59 ptep->pte_high = pte.pte_high;
60 smp_wmb();
61 ptep->pte_low = pte.pte_low;
62}
63
64static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
65{
66 set_64bit((unsigned long long *)(ptep),native_pte_val(pte));
67}
68static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
69{
70 set_64bit((unsigned long long *)(pmdp),native_pmd_val(pmd));
71}
72static inline void native_set_pud(pud_t *pudp, pud_t pud)
73{
74 *pudp = pud;
75}
76
77/*
78 * For PTEs and PDEs, we must clear the P-bit first when clearing a page table
79 * entry, so clear the bottom half first and enforce ordering with a compiler
80 * barrier.
81 */
82static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
83{
84 ptep->pte_low = 0;
85 smp_wmb();
86 ptep->pte_high = 0;
87}
88
89static inline void native_pmd_clear(pmd_t *pmd)
90{
91 u32 *tmp = (u32 *)pmd;
92 *tmp = 0;
93 smp_wmb();
94 *(tmp + 1) = 0;
95}
96
97#ifndef CONFIG_PARAVIRT
98#define set_pte(ptep, pte) native_set_pte(ptep, pte)
99#define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
100#define set_pte_present(mm, addr, ptep, pte) native_set_pte_present(mm, addr, ptep, pte)
101#define set_pte_atomic(ptep, pte) native_set_pte_atomic(ptep, pte)
102#define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd)
103#define set_pud(pudp, pud) native_set_pud(pudp, pud)
104#define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep)
105#define pmd_clear(pmd) native_pmd_clear(pmd)
106#endif
107
108/*
109 * Pentium-II erratum A13: in PAE mode we explicitly have to flush
110 * the TLB via cr3 if the top-level pgd is changed...
111 * We do not let the generic code free and clear pgd entries due to
112 * this erratum.
113 */
114static inline void pud_clear (pud_t * pud) { }
115
116#define pud_page(pud) \
117((struct page *) __va(pud_val(pud) & PAGE_MASK))
118
119#define pud_page_vaddr(pud) \
120((unsigned long) __va(pud_val(pud) & PAGE_MASK))
121
122
123/* Find an entry in the second-level page table.. */
124#define pmd_offset(pud, address) ((pmd_t *) pud_page(*(pud)) + \
125 pmd_index(address))
126
127#ifdef CONFIG_SMP
128static inline pte_t native_ptep_get_and_clear(pte_t *ptep)
129{
130 pte_t res;
131
132 /* xchg acts as a barrier before the setting of the high bits */
133 res.pte_low = xchg(&ptep->pte_low, 0);
134 res.pte_high = ptep->pte_high;
135 ptep->pte_high = 0;
136
137 return res;
138}
139#else
140#define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
141#endif
142
143#define __HAVE_ARCH_PTE_SAME
144static inline int pte_same(pte_t a, pte_t b)
145{
146 return a.pte_low == b.pte_low && a.pte_high == b.pte_high;
147}
148
149#define pte_page(x) pfn_to_page(pte_pfn(x))
150
151static inline int pte_none(pte_t pte)
152{
153 return !pte.pte_low && !pte.pte_high;
154}
155
156static inline unsigned long pte_pfn(pte_t pte)
157{
158 return pte_val(pte) >> PAGE_SHIFT;
159}
160
161extern unsigned long long __supported_pte_mask;
162
163static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
164{
165 return __pte((((unsigned long long)page_nr << PAGE_SHIFT) |
166 pgprot_val(pgprot)) & __supported_pte_mask);
167}
168
169static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
170{
171 return __pmd((((unsigned long long)page_nr << PAGE_SHIFT) |
172 pgprot_val(pgprot)) & __supported_pte_mask);
173}
174
175/*
176 * Bits 0, 6 and 7 are taken in the low part of the pte,
177 * put the 32 bits of offset into the high part.
178 */
179#define pte_to_pgoff(pte) ((pte).pte_high)
180#define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) })
181#define PTE_FILE_MAX_BITS 32
182
183/* Encode and de-code a swap entry */
184#define __swp_type(x) (((x).val) & 0x1f)
185#define __swp_offset(x) ((x).val >> 5)
186#define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) << 5})
187#define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high })
188#define __swp_entry_to_pte(x) ((pte_t){ 0, (x).val })
189
190#define __pmd_free_tlb(tlb, x) do { } while (0)
191
192#endif /* _I386_PGTABLE_3LEVEL_H */
diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h
deleted file mode 100644
index c7fefa6b12fd..000000000000
--- a/include/asm-i386/pgtable.h
+++ /dev/null
@@ -1,512 +0,0 @@
1#ifndef _I386_PGTABLE_H
2#define _I386_PGTABLE_H
3
4
5/*
6 * The Linux memory management assumes a three-level page table setup. On
7 * the i386, we use that, but "fold" the mid level into the top-level page
8 * table, so that we physically have the same two-level page table as the
9 * i386 mmu expects.
10 *
11 * This file contains the functions and defines necessary to modify and use
12 * the i386 page table tree.
13 */
14#ifndef __ASSEMBLY__
15#include <asm/processor.h>
16#include <asm/fixmap.h>
17#include <linux/threads.h>
18#include <asm/paravirt.h>
19
20#ifndef _I386_BITOPS_H
21#include <asm/bitops.h>
22#endif
23
24#include <linux/slab.h>
25#include <linux/list.h>
26#include <linux/spinlock.h>
27
28struct mm_struct;
29struct vm_area_struct;
30
31/*
32 * ZERO_PAGE is a global shared page that is always zero: used
33 * for zero-mapped memory areas etc..
34 */
35#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
36extern unsigned long empty_zero_page[1024];
37extern pgd_t swapper_pg_dir[1024];
38extern struct kmem_cache *pmd_cache;
39extern spinlock_t pgd_lock;
40extern struct page *pgd_list;
41void check_pgt_cache(void);
42
43void pmd_ctor(void *, struct kmem_cache *, unsigned long);
44void pgtable_cache_init(void);
45void paging_init(void);
46
47
48/*
49 * The Linux x86 paging architecture is 'compile-time dual-mode', it
50 * implements both the traditional 2-level x86 page tables and the
51 * newer 3-level PAE-mode page tables.
52 */
53#ifdef CONFIG_X86_PAE
54# include <asm/pgtable-3level-defs.h>
55# define PMD_SIZE (1UL << PMD_SHIFT)
56# define PMD_MASK (~(PMD_SIZE-1))
57#else
58# include <asm/pgtable-2level-defs.h>
59#endif
60
61#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
62#define PGDIR_MASK (~(PGDIR_SIZE-1))
63
64#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
65#define FIRST_USER_ADDRESS 0
66
67#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
68#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
69
70#define TWOLEVEL_PGDIR_SHIFT 22
71#define BOOT_USER_PGD_PTRS (__PAGE_OFFSET >> TWOLEVEL_PGDIR_SHIFT)
72#define BOOT_KERNEL_PGD_PTRS (1024-BOOT_USER_PGD_PTRS)
73
74/* Just any arbitrary offset to the start of the vmalloc VM area: the
75 * current 8MB value just means that there will be a 8MB "hole" after the
76 * physical memory until the kernel virtual memory starts. That means that
77 * any out-of-bounds memory accesses will hopefully be caught.
78 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
79 * area for the same reason. ;)
80 */
81#define VMALLOC_OFFSET (8*1024*1024)
82#define VMALLOC_START (((unsigned long) high_memory + \
83 2*VMALLOC_OFFSET-1) & ~(VMALLOC_OFFSET-1))
84#ifdef CONFIG_HIGHMEM
85# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
86#else
87# define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
88#endif
89
90/*
91 * _PAGE_PSE set in the page directory entry just means that
92 * the page directory entry points directly to a 4MB-aligned block of
93 * memory.
94 */
95#define _PAGE_BIT_PRESENT 0
96#define _PAGE_BIT_RW 1
97#define _PAGE_BIT_USER 2
98#define _PAGE_BIT_PWT 3
99#define _PAGE_BIT_PCD 4
100#define _PAGE_BIT_ACCESSED 5
101#define _PAGE_BIT_DIRTY 6
102#define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page, Pentium+, if present.. */
103#define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
104#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
105#define _PAGE_BIT_UNUSED2 10
106#define _PAGE_BIT_UNUSED3 11
107#define _PAGE_BIT_NX 63
108
109#define _PAGE_PRESENT 0x001
110#define _PAGE_RW 0x002
111#define _PAGE_USER 0x004
112#define _PAGE_PWT 0x008
113#define _PAGE_PCD 0x010
114#define _PAGE_ACCESSED 0x020
115#define _PAGE_DIRTY 0x040
116#define _PAGE_PSE 0x080 /* 4 MB (or 2MB) page, Pentium+, if present.. */
117#define _PAGE_GLOBAL 0x100 /* Global TLB entry PPro+ */
118#define _PAGE_UNUSED1 0x200 /* available for programmer */
119#define _PAGE_UNUSED2 0x400
120#define _PAGE_UNUSED3 0x800
121
122/* If _PAGE_PRESENT is clear, we use these: */
123#define _PAGE_FILE 0x040 /* nonlinear file mapping, saved PTE; unset:swap */
124#define _PAGE_PROTNONE 0x080 /* if the user mapped it with PROT_NONE;
125 pte_present gives true */
126#ifdef CONFIG_X86_PAE
127#define _PAGE_NX (1ULL<<_PAGE_BIT_NX)
128#else
129#define _PAGE_NX 0
130#endif
131
132#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
133#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
134#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
135
136#define PAGE_NONE \
137 __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
138#define PAGE_SHARED \
139 __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
140
141#define PAGE_SHARED_EXEC \
142 __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
143#define PAGE_COPY_NOEXEC \
144 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
145#define PAGE_COPY_EXEC \
146 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
147#define PAGE_COPY \
148 PAGE_COPY_NOEXEC
149#define PAGE_READONLY \
150 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
151#define PAGE_READONLY_EXEC \
152 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
153
154#define _PAGE_KERNEL \
155 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX)
156#define _PAGE_KERNEL_EXEC \
157 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
158
159extern unsigned long long __PAGE_KERNEL, __PAGE_KERNEL_EXEC;
160#define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
161#define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
162#define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD)
163#define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
164#define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
165
166#define PAGE_KERNEL __pgprot(__PAGE_KERNEL)
167#define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO)
168#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
169#define PAGE_KERNEL_RX __pgprot(__PAGE_KERNEL_RX)
170#define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE)
171#define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE)
172#define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC)
173
174/*
175 * The i386 can't do page protection for execute, and considers that
176 * the same are read. Also, write permissions imply read permissions.
177 * This is the closest we can get..
178 */
179#define __P000 PAGE_NONE
180#define __P001 PAGE_READONLY
181#define __P010 PAGE_COPY
182#define __P011 PAGE_COPY
183#define __P100 PAGE_READONLY_EXEC
184#define __P101 PAGE_READONLY_EXEC
185#define __P110 PAGE_COPY_EXEC
186#define __P111 PAGE_COPY_EXEC
187
188#define __S000 PAGE_NONE
189#define __S001 PAGE_READONLY
190#define __S010 PAGE_SHARED
191#define __S011 PAGE_SHARED
192#define __S100 PAGE_READONLY_EXEC
193#define __S101 PAGE_READONLY_EXEC
194#define __S110 PAGE_SHARED_EXEC
195#define __S111 PAGE_SHARED_EXEC
196
197/*
198 * Define this if things work differently on an i386 and an i486:
199 * it will (on an i486) warn about kernel memory accesses that are
200 * done without a 'access_ok(VERIFY_WRITE,..)'
201 */
202#undef TEST_ACCESS_OK
203
204/* The boot page tables (all created as a single array) */
205extern unsigned long pg0[];
206
207#define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE))
208
209/* To avoid harmful races, pmd_none(x) should check only the lower when PAE */
210#define pmd_none(x) (!(unsigned long)pmd_val(x))
211#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
212#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
213
214
215#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
216
217/*
218 * The following only work if pte_present() is true.
219 * Undefined behaviour if not..
220 */
221static inline int pte_dirty(pte_t pte) { return (pte).pte_low & _PAGE_DIRTY; }
222static inline int pte_young(pte_t pte) { return (pte).pte_low & _PAGE_ACCESSED; }
223static inline int pte_write(pte_t pte) { return (pte).pte_low & _PAGE_RW; }
224static inline int pte_huge(pte_t pte) { return (pte).pte_low & _PAGE_PSE; }
225
226/*
227 * The following only works if pte_present() is not true.
228 */
229static inline int pte_file(pte_t pte) { return (pte).pte_low & _PAGE_FILE; }
230
231static inline pte_t pte_mkclean(pte_t pte) { (pte).pte_low &= ~_PAGE_DIRTY; return pte; }
232static inline pte_t pte_mkold(pte_t pte) { (pte).pte_low &= ~_PAGE_ACCESSED; return pte; }
233static inline pte_t pte_wrprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_RW; return pte; }
234static inline pte_t pte_mkdirty(pte_t pte) { (pte).pte_low |= _PAGE_DIRTY; return pte; }
235static inline pte_t pte_mkyoung(pte_t pte) { (pte).pte_low |= _PAGE_ACCESSED; return pte; }
236static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte_low |= _PAGE_RW; return pte; }
237static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return pte; }
238
239#ifdef CONFIG_X86_PAE
240# include <asm/pgtable-3level.h>
241#else
242# include <asm/pgtable-2level.h>
243#endif
244
245#ifndef CONFIG_PARAVIRT
246/*
247 * Rules for using pte_update - it must be called after any PTE update which
248 * has not been done using the set_pte / clear_pte interfaces. It is used by
249 * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE
250 * updates should either be sets, clears, or set_pte_atomic for P->P
251 * transitions, which means this hook should only be called for user PTEs.
252 * This hook implies a P->P protection or access change has taken place, which
253 * requires a subsequent TLB flush. The notification can optionally be delayed
254 * until the TLB flush event by using the pte_update_defer form of the
255 * interface, but care must be taken to assure that the flush happens while
256 * still holding the same page table lock so that the shadow and primary pages
257 * do not become out of sync on SMP.
258 */
259#define pte_update(mm, addr, ptep) do { } while (0)
260#define pte_update_defer(mm, addr, ptep) do { } while (0)
261#endif
262
263/* local pte updates need not use xchg for locking */
264static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
265{
266 pte_t res = *ptep;
267
268 /* Pure native function needs no input for mm, addr */
269 native_pte_clear(NULL, 0, ptep);
270 return res;
271}
272
273/*
274 * We only update the dirty/accessed state if we set
275 * the dirty bit by hand in the kernel, since the hardware
276 * will do the accessed bit for us, and we don't want to
277 * race with other CPU's that might be updating the dirty
278 * bit at the same time.
279 */
280#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
281#define ptep_set_access_flags(vma, address, ptep, entry, dirty) \
282({ \
283 int __changed = !pte_same(*(ptep), entry); \
284 if (__changed && dirty) { \
285 (ptep)->pte_low = (entry).pte_low; \
286 pte_update_defer((vma)->vm_mm, (address), (ptep)); \
287 flush_tlb_page(vma, address); \
288 } \
289 __changed; \
290})
291
292#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
293#define ptep_test_and_clear_young(vma, addr, ptep) ({ \
294 int __ret = 0; \
295 if (pte_young(*(ptep))) \
296 __ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, \
297 &(ptep)->pte_low); \
298 if (__ret) \
299 pte_update((vma)->vm_mm, addr, ptep); \
300 __ret; \
301})
302
303#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
304#define ptep_clear_flush_young(vma, address, ptep) \
305({ \
306 int __young; \
307 __young = ptep_test_and_clear_young((vma), (address), (ptep)); \
308 if (__young) \
309 flush_tlb_page(vma, address); \
310 __young; \
311})
312
313#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
314static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
315{
316 pte_t pte = native_ptep_get_and_clear(ptep);
317 pte_update(mm, addr, ptep);
318 return pte;
319}
320
321#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
322static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full)
323{
324 pte_t pte;
325 if (full) {
326 /*
327 * Full address destruction in progress; paravirt does not
328 * care about updates and native needs no locking
329 */
330 pte = native_local_ptep_get_and_clear(ptep);
331 } else {
332 pte = ptep_get_and_clear(mm, addr, ptep);
333 }
334 return pte;
335}
336
337#define __HAVE_ARCH_PTEP_SET_WRPROTECT
338static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
339{
340 clear_bit(_PAGE_BIT_RW, &ptep->pte_low);
341 pte_update(mm, addr, ptep);
342}
343
344/*
345 * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
346 *
347 * dst - pointer to pgd range anwhere on a pgd page
348 * src - ""
349 * count - the number of pgds to copy.
350 *
351 * dst and src can be on the same page, but the range must not overlap,
352 * and must not cross a page boundary.
353 */
354static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
355{
356 memcpy(dst, src, count * sizeof(pgd_t));
357}
358
359/*
360 * Macro to mark a page protection value as "uncacheable". On processors which do not support
361 * it, this is a no-op.
362 */
363#define pgprot_noncached(prot) ((boot_cpu_data.x86 > 3) \
364 ? (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) : (prot))
365
366/*
367 * Conversion functions: convert a page and protection to a page entry,
368 * and a page entry and page directory to the page they refer to.
369 */
370
371#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
372
373static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
374{
375 pte.pte_low &= _PAGE_CHG_MASK;
376 pte.pte_low |= pgprot_val(newprot);
377#ifdef CONFIG_X86_PAE
378 /*
379 * Chop off the NX bit (if present), and add the NX portion of
380 * the newprot (if present):
381 */
382 pte.pte_high &= ~(1 << (_PAGE_BIT_NX - 32));
383 pte.pte_high |= (pgprot_val(newprot) >> 32) & \
384 (__supported_pte_mask >> 32);
385#endif
386 return pte;
387}
388
389#define pmd_large(pmd) \
390((pmd_val(pmd) & (_PAGE_PSE|_PAGE_PRESENT)) == (_PAGE_PSE|_PAGE_PRESENT))
391
392/*
393 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
394 *
395 * this macro returns the index of the entry in the pgd page which would
396 * control the given virtual address
397 */
398#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
399#define pgd_index_k(addr) pgd_index(addr)
400
401/*
402 * pgd_offset() returns a (pgd_t *)
403 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
404 */
405#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
406
407/*
408 * a shortcut which implies the use of the kernel's pgd, instead
409 * of a process's
410 */
411#define pgd_offset_k(address) pgd_offset(&init_mm, address)
412
413/*
414 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
415 *
416 * this macro returns the index of the entry in the pmd page which would
417 * control the given virtual address
418 */
419#define pmd_index(address) \
420 (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
421
422/*
423 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
424 *
425 * this macro returns the index of the entry in the pte page which would
426 * control the given virtual address
427 */
428#define pte_index(address) \
429 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
430#define pte_offset_kernel(dir, address) \
431 ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
432
433#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
434
435#define pmd_page_vaddr(pmd) \
436 ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
437
438/*
439 * Helper function that returns the kernel pagetable entry controlling
440 * the virtual address 'address'. NULL means no pagetable entry present.
441 * NOTE: the return type is pte_t but if the pmd is PSE then we return it
442 * as a pte too.
443 */
444extern pte_t *lookup_address(unsigned long address);
445
446/*
447 * Make a given kernel text page executable/non-executable.
448 * Returns the previous executability setting of that page (which
449 * is used to restore the previous state). Used by the SMP bootup code.
450 * NOTE: this is an __init function for security reasons.
451 */
452#ifdef CONFIG_X86_PAE
453 extern int set_kernel_exec(unsigned long vaddr, int enable);
454#else
455 static inline int set_kernel_exec(unsigned long vaddr, int enable) { return 0;}
456#endif
457
458#if defined(CONFIG_HIGHPTE)
459#define pte_offset_map(dir, address) \
460 ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE0) + pte_index(address))
461#define pte_offset_map_nested(dir, address) \
462 ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE1) + pte_index(address))
463#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
464#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
465#else
466#define pte_offset_map(dir, address) \
467 ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
468#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)
469#define pte_unmap(pte) do { } while (0)
470#define pte_unmap_nested(pte) do { } while (0)
471#endif
472
473/* Clear a kernel PTE and flush it from the TLB */
474#define kpte_clear_flush(ptep, vaddr) \
475do { \
476 pte_clear(&init_mm, vaddr, ptep); \
477 __flush_tlb_one(vaddr); \
478} while (0)
479
480/*
481 * The i386 doesn't have any external MMU info: the kernel page
482 * tables contain all the necessary information.
483 */
484#define update_mmu_cache(vma,address,pte) do { } while (0)
485
486void native_pagetable_setup_start(pgd_t *base);
487void native_pagetable_setup_done(pgd_t *base);
488
489#ifndef CONFIG_PARAVIRT
490static inline void paravirt_pagetable_setup_start(pgd_t *base)
491{
492 native_pagetable_setup_start(base);
493}
494
495static inline void paravirt_pagetable_setup_done(pgd_t *base)
496{
497 native_pagetable_setup_done(base);
498}
499#endif /* !CONFIG_PARAVIRT */
500
501#endif /* !__ASSEMBLY__ */
502
503#ifdef CONFIG_FLATMEM
504#define kern_addr_valid(addr) (1)
505#endif /* CONFIG_FLATMEM */
506
507#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
508 remap_pfn_range(vma, vaddr, pfn, size, prot)
509
510#include <asm-generic/pgtable.h>
511
512#endif /* _I386_PGTABLE_H */
diff --git a/include/asm-i386/poll.h b/include/asm-i386/poll.h
deleted file mode 100644
index c98509d3149e..000000000000
--- a/include/asm-i386/poll.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/poll.h>
diff --git a/include/asm-i386/posix_types.h b/include/asm-i386/posix_types.h
deleted file mode 100644
index 133e31e7dfde..000000000000
--- a/include/asm-i386/posix_types.h
+++ /dev/null
@@ -1,82 +0,0 @@
1#ifndef __ARCH_I386_POSIX_TYPES_H
2#define __ARCH_I386_POSIX_TYPES_H
3
4/*
5 * This file is generally used by user-level software, so you need to
6 * be a little careful about namespace pollution etc. Also, we cannot
7 * assume GCC is being used.
8 */
9
10typedef unsigned long __kernel_ino_t;
11typedef unsigned short __kernel_mode_t;
12typedef unsigned short __kernel_nlink_t;
13typedef long __kernel_off_t;
14typedef int __kernel_pid_t;
15typedef unsigned short __kernel_ipc_pid_t;
16typedef unsigned short __kernel_uid_t;
17typedef unsigned short __kernel_gid_t;
18typedef unsigned int __kernel_size_t;
19typedef int __kernel_ssize_t;
20typedef int __kernel_ptrdiff_t;
21typedef long __kernel_time_t;
22typedef long __kernel_suseconds_t;
23typedef long __kernel_clock_t;
24typedef int __kernel_timer_t;
25typedef int __kernel_clockid_t;
26typedef int __kernel_daddr_t;
27typedef char * __kernel_caddr_t;
28typedef unsigned short __kernel_uid16_t;
29typedef unsigned short __kernel_gid16_t;
30typedef unsigned int __kernel_uid32_t;
31typedef unsigned int __kernel_gid32_t;
32
33typedef unsigned short __kernel_old_uid_t;
34typedef unsigned short __kernel_old_gid_t;
35typedef unsigned short __kernel_old_dev_t;
36
37#ifdef __GNUC__
38typedef long long __kernel_loff_t;
39#endif
40
41typedef struct {
42#if defined(__KERNEL__) || defined(__USE_ALL)
43 int val[2];
44#else /* !defined(__KERNEL__) && !defined(__USE_ALL) */
45 int __val[2];
46#endif /* !defined(__KERNEL__) && !defined(__USE_ALL) */
47} __kernel_fsid_t;
48
49#if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2)
50
51#undef __FD_SET
52#define __FD_SET(fd,fdsetp) \
53 __asm__ __volatile__("btsl %1,%0": \
54 "+m" (*(__kernel_fd_set *) (fdsetp)):"r" ((int) (fd)))
55
56#undef __FD_CLR
57#define __FD_CLR(fd,fdsetp) \
58 __asm__ __volatile__("btrl %1,%0": \
59 "+m" (*(__kernel_fd_set *) (fdsetp)):"r" ((int) (fd)))
60
61#undef __FD_ISSET
62#define __FD_ISSET(fd,fdsetp) (__extension__ ({ \
63 unsigned char __result; \
64 __asm__ __volatile__("btl %1,%2 ; setb %0" \
65 :"=q" (__result) :"r" ((int) (fd)), \
66 "m" (*(__kernel_fd_set *) (fdsetp))); \
67 __result; }))
68
69#undef __FD_ZERO
70#define __FD_ZERO(fdsetp) \
71do { \
72 int __d0, __d1; \
73 __asm__ __volatile__("cld ; rep ; stosl" \
74 :"=m" (*(__kernel_fd_set *) (fdsetp)), \
75 "=&c" (__d0), "=&D" (__d1) \
76 :"a" (0), "1" (__FDSET_LONGS), \
77 "2" ((__kernel_fd_set *) (fdsetp)) : "memory"); \
78} while (0)
79
80#endif /* defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) */
81
82#endif
diff --git a/include/asm-i386/processor-cyrix.h b/include/asm-i386/processor-cyrix.h
deleted file mode 100644
index 97568ada1f97..000000000000
--- a/include/asm-i386/processor-cyrix.h
+++ /dev/null
@@ -1,30 +0,0 @@
1/*
2 * NSC/Cyrix CPU indexed register access. Must be inlined instead of
3 * macros to ensure correct access ordering
4 * Access order is always 0x22 (=offset), 0x23 (=value)
5 *
6 * When using the old macros a line like
7 * setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x88);
8 * gets expanded to:
9 * do {
10 * outb((CX86_CCR2), 0x22);
11 * outb((({
12 * outb((CX86_CCR2), 0x22);
13 * inb(0x23);
14 * }) | 0x88), 0x23);
15 * } while (0);
16 *
17 * which in fact violates the access order (= 0x22, 0x22, 0x23, 0x23).
18 */
19
20static inline u8 getCx86(u8 reg)
21{
22 outb(reg, 0x22);
23 return inb(0x23);
24}
25
26static inline void setCx86(u8 reg, u8 data)
27{
28 outb(reg, 0x22);
29 outb(data, 0x23);
30}
diff --git a/include/asm-i386/processor-flags.h b/include/asm-i386/processor-flags.h
deleted file mode 100644
index 5404e90edd57..000000000000
--- a/include/asm-i386/processor-flags.h
+++ /dev/null
@@ -1,91 +0,0 @@
1#ifndef __ASM_I386_PROCESSOR_FLAGS_H
2#define __ASM_I386_PROCESSOR_FLAGS_H
3/* Various flags defined: can be included from assembler. */
4
5/*
6 * EFLAGS bits
7 */
8#define X86_EFLAGS_CF 0x00000001 /* Carry Flag */
9#define X86_EFLAGS_PF 0x00000004 /* Parity Flag */
10#define X86_EFLAGS_AF 0x00000010 /* Auxillary carry Flag */
11#define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */
12#define X86_EFLAGS_SF 0x00000080 /* Sign Flag */
13#define X86_EFLAGS_TF 0x00000100 /* Trap Flag */
14#define X86_EFLAGS_IF 0x00000200 /* Interrupt Flag */
15#define X86_EFLAGS_DF 0x00000400 /* Direction Flag */
16#define X86_EFLAGS_OF 0x00000800 /* Overflow Flag */
17#define X86_EFLAGS_IOPL 0x00003000 /* IOPL mask */
18#define X86_EFLAGS_NT 0x00004000 /* Nested Task */
19#define X86_EFLAGS_RF 0x00010000 /* Resume Flag */
20#define X86_EFLAGS_VM 0x00020000 /* Virtual Mode */
21#define X86_EFLAGS_AC 0x00040000 /* Alignment Check */
22#define X86_EFLAGS_VIF 0x00080000 /* Virtual Interrupt Flag */
23#define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */
24#define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */
25
26/*
27 * Basic CPU control in CR0
28 */
29#define X86_CR0_PE 0x00000001 /* Protection Enable */
30#define X86_CR0_MP 0x00000002 /* Monitor Coprocessor */
31#define X86_CR0_EM 0x00000004 /* Emulation */
32#define X86_CR0_TS 0x00000008 /* Task Switched */
33#define X86_CR0_ET 0x00000010 /* Extension Type */
34#define X86_CR0_NE 0x00000020 /* Numeric Error */
35#define X86_CR0_WP 0x00010000 /* Write Protect */
36#define X86_CR0_AM 0x00040000 /* Alignment Mask */
37#define X86_CR0_NW 0x20000000 /* Not Write-through */
38#define X86_CR0_CD 0x40000000 /* Cache Disable */
39#define X86_CR0_PG 0x80000000 /* Paging */
40
41/*
42 * Paging options in CR3
43 */
44#define X86_CR3_PWT 0x00000008 /* Page Write Through */
45#define X86_CR3_PCD 0x00000010 /* Page Cache Disable */
46
47/*
48 * Intel CPU features in CR4
49 */
50#define X86_CR4_VME 0x00000001 /* enable vm86 extensions */
51#define X86_CR4_PVI 0x00000002 /* virtual interrupts flag enable */
52#define X86_CR4_TSD 0x00000004 /* disable time stamp at ipl 3 */
53#define X86_CR4_DE 0x00000008 /* enable debugging extensions */
54#define X86_CR4_PSE 0x00000010 /* enable page size extensions */
55#define X86_CR4_PAE 0x00000020 /* enable physical address extensions */
56#define X86_CR4_MCE 0x00000040 /* Machine check enable */
57#define X86_CR4_PGE 0x00000080 /* enable global pages */
58#define X86_CR4_PCE 0x00000100 /* enable performance counters at ipl 3 */
59#define X86_CR4_OSFXSR 0x00000200 /* enable fast FPU save and restore */
60#define X86_CR4_OSXMMEXCPT 0x00000400 /* enable unmasked SSE exceptions */
61#define X86_CR4_VMXE 0x00002000 /* enable VMX virtualization */
62
63/*
64 * x86-64 Task Priority Register, CR8
65 */
66#define X86_CR8_TPR 0x00000007 /* task priority register */
67
68/*
69 * AMD and Transmeta use MSRs for configuration; see <asm/msr-index.h>
70 */
71
72/*
73 * NSC/Cyrix CPU configuration register indexes
74 */
75#define CX86_PCR0 0x20
76#define CX86_GCR 0xb8
77#define CX86_CCR0 0xc0
78#define CX86_CCR1 0xc1
79#define CX86_CCR2 0xc2
80#define CX86_CCR3 0xc3
81#define CX86_CCR4 0xe8
82#define CX86_CCR5 0xe9
83#define CX86_CCR6 0xea
84#define CX86_CCR7 0xeb
85#define CX86_PCR1 0xf0
86#define CX86_DIR0 0xfe
87#define CX86_DIR1 0xff
88#define CX86_ARR_BASE 0xc4
89#define CX86_RCR_BASE 0xdc
90
91#endif /* __ASM_I386_PROCESSOR_FLAGS_H */
diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h
deleted file mode 100644
index 3845fe72383e..000000000000
--- a/include/asm-i386/processor.h
+++ /dev/null
@@ -1,755 +0,0 @@
1/*
2 * include/asm-i386/processor.h
3 *
4 * Copyright (C) 1994 Linus Torvalds
5 */
6
7#ifndef __ASM_I386_PROCESSOR_H
8#define __ASM_I386_PROCESSOR_H
9
10#include <asm/vm86.h>
11#include <asm/math_emu.h>
12#include <asm/segment.h>
13#include <asm/page.h>
14#include <asm/types.h>
15#include <asm/sigcontext.h>
16#include <asm/cpufeature.h>
17#include <asm/msr.h>
18#include <asm/system.h>
19#include <linux/cache.h>
20#include <linux/threads.h>
21#include <asm/percpu.h>
22#include <linux/cpumask.h>
23#include <linux/init.h>
24#include <asm/processor-flags.h>
25
26/* flag for disabling the tsc */
27extern int tsc_disable;
28
29struct desc_struct {
30 unsigned long a,b;
31};
32
33#define desc_empty(desc) \
34 (!((desc)->a | (desc)->b))
35
36#define desc_equal(desc1, desc2) \
37 (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b))
38/*
39 * Default implementation of macro that returns current
40 * instruction pointer ("program counter").
41 */
42#define current_text_addr() ({ void *pc; __asm__("movl $1f,%0\n1:":"=g" (pc)); pc; })
43
44/*
45 * CPU type and hardware bug flags. Kept separately for each CPU.
46 * Members of this structure are referenced in head.S, so think twice
47 * before touching them. [mj]
48 */
49
50struct cpuinfo_x86 {
51 __u8 x86; /* CPU family */
52 __u8 x86_vendor; /* CPU vendor */
53 __u8 x86_model;
54 __u8 x86_mask;
55 char wp_works_ok; /* It doesn't on 386's */
56 char hlt_works_ok; /* Problems on some 486Dx4's and old 386's */
57 char hard_math;
58 char rfu;
59 int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */
60 unsigned long x86_capability[NCAPINTS];
61 char x86_vendor_id[16];
62 char x86_model_id[64];
63 int x86_cache_size; /* in KB - valid for CPUS which support this
64 call */
65 int x86_cache_alignment; /* In bytes */
66 char fdiv_bug;
67 char f00f_bug;
68 char coma_bug;
69 char pad0;
70 int x86_power;
71 unsigned long loops_per_jiffy;
72#ifdef CONFIG_SMP
73 cpumask_t llc_shared_map; /* cpus sharing the last level cache */
74#endif
75 unsigned char x86_max_cores; /* cpuid returned max cores value */
76 unsigned char apicid;
77 unsigned short x86_clflush_size;
78#ifdef CONFIG_SMP
79 unsigned char booted_cores; /* number of cores as seen by OS */
80 __u8 phys_proc_id; /* Physical processor id. */
81 __u8 cpu_core_id; /* Core id */
82#endif
83} __attribute__((__aligned__(SMP_CACHE_BYTES)));
84
85#define X86_VENDOR_INTEL 0
86#define X86_VENDOR_CYRIX 1
87#define X86_VENDOR_AMD 2
88#define X86_VENDOR_UMC 3
89#define X86_VENDOR_NEXGEN 4
90#define X86_VENDOR_CENTAUR 5
91#define X86_VENDOR_TRANSMETA 7
92#define X86_VENDOR_NSC 8
93#define X86_VENDOR_NUM 9
94#define X86_VENDOR_UNKNOWN 0xff
95
96/*
97 * capabilities of CPUs
98 */
99
100extern struct cpuinfo_x86 boot_cpu_data;
101extern struct cpuinfo_x86 new_cpu_data;
102extern struct tss_struct doublefault_tss;
103DECLARE_PER_CPU(struct tss_struct, init_tss);
104
105#ifdef CONFIG_SMP
106extern struct cpuinfo_x86 cpu_data[];
107#define current_cpu_data cpu_data[smp_processor_id()]
108#else
109#define cpu_data (&boot_cpu_data)
110#define current_cpu_data boot_cpu_data
111#endif
112
113extern int cpu_llc_id[NR_CPUS];
114extern char ignore_fpu_irq;
115
116void __init cpu_detect(struct cpuinfo_x86 *c);
117
118extern void identify_boot_cpu(void);
119extern void identify_secondary_cpu(struct cpuinfo_x86 *);
120extern void print_cpu_info(struct cpuinfo_x86 *);
121extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
122extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
123extern unsigned short num_cache_leaves;
124
125#ifdef CONFIG_X86_HT
126extern void detect_ht(struct cpuinfo_x86 *c);
127#else
128static inline void detect_ht(struct cpuinfo_x86 *c) {}
129#endif
130
131static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
132 unsigned int *ecx, unsigned int *edx)
133{
134 /* ecx is often an input as well as an output. */
135 __asm__("cpuid"
136 : "=a" (*eax),
137 "=b" (*ebx),
138 "=c" (*ecx),
139 "=d" (*edx)
140 : "0" (*eax), "2" (*ecx));
141}
142
143#define load_cr3(pgdir) write_cr3(__pa(pgdir))
144
145/*
146 * Save the cr4 feature set we're using (ie
147 * Pentium 4MB enable and PPro Global page
148 * enable), so that any CPU's that boot up
149 * after us can get the correct flags.
150 */
151extern unsigned long mmu_cr4_features;
152
153static inline void set_in_cr4 (unsigned long mask)
154{
155 unsigned cr4;
156 mmu_cr4_features |= mask;
157 cr4 = read_cr4();
158 cr4 |= mask;
159 write_cr4(cr4);
160}
161
162static inline void clear_in_cr4 (unsigned long mask)
163{
164 unsigned cr4;
165 mmu_cr4_features &= ~mask;
166 cr4 = read_cr4();
167 cr4 &= ~mask;
168 write_cr4(cr4);
169}
170
171/* Stop speculative execution */
172static inline void sync_core(void)
173{
174 int tmp;
175 asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory");
176}
177
178static inline void __monitor(const void *eax, unsigned long ecx,
179 unsigned long edx)
180{
181 /* "monitor %eax,%ecx,%edx;" */
182 asm volatile(
183 ".byte 0x0f,0x01,0xc8;"
184 : :"a" (eax), "c" (ecx), "d"(edx));
185}
186
187static inline void __mwait(unsigned long eax, unsigned long ecx)
188{
189 /* "mwait %eax,%ecx;" */
190 asm volatile(
191 ".byte 0x0f,0x01,0xc9;"
192 : :"a" (eax), "c" (ecx));
193}
194
195extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx);
196
197/* from system description table in BIOS. Mostly for MCA use, but
198others may find it useful. */
199extern unsigned int machine_id;
200extern unsigned int machine_submodel_id;
201extern unsigned int BIOS_revision;
202extern unsigned int mca_pentium_flag;
203
204/* Boot loader type from the setup header */
205extern int bootloader_type;
206
207/*
208 * User space process size: 3GB (default).
209 */
210#define TASK_SIZE (PAGE_OFFSET)
211
212/* This decides where the kernel will search for a free chunk of vm
213 * space during mmap's.
214 */
215#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
216
217#define HAVE_ARCH_PICK_MMAP_LAYOUT
218
219extern void hard_disable_TSC(void);
220extern void disable_TSC(void);
221extern void hard_enable_TSC(void);
222
223/*
224 * Size of io_bitmap.
225 */
226#define IO_BITMAP_BITS 65536
227#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
228#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
229#define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap)
230#define INVALID_IO_BITMAP_OFFSET 0x8000
231#define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000
232
233struct i387_fsave_struct {
234 long cwd;
235 long swd;
236 long twd;
237 long fip;
238 long fcs;
239 long foo;
240 long fos;
241 long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
242 long status; /* software status information */
243};
244
245struct i387_fxsave_struct {
246 unsigned short cwd;
247 unsigned short swd;
248 unsigned short twd;
249 unsigned short fop;
250 long fip;
251 long fcs;
252 long foo;
253 long fos;
254 long mxcsr;
255 long mxcsr_mask;
256 long st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
257 long xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
258 long padding[56];
259} __attribute__ ((aligned (16)));
260
261struct i387_soft_struct {
262 long cwd;
263 long swd;
264 long twd;
265 long fip;
266 long fcs;
267 long foo;
268 long fos;
269 long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
270 unsigned char ftop, changed, lookahead, no_update, rm, alimit;
271 struct info *info;
272 unsigned long entry_eip;
273};
274
275union i387_union {
276 struct i387_fsave_struct fsave;
277 struct i387_fxsave_struct fxsave;
278 struct i387_soft_struct soft;
279};
280
281typedef struct {
282 unsigned long seg;
283} mm_segment_t;
284
285struct thread_struct;
286
287/* This is the TSS defined by the hardware. */
288struct i386_hw_tss {
289 unsigned short back_link,__blh;
290 unsigned long esp0;
291 unsigned short ss0,__ss0h;
292 unsigned long esp1;
293 unsigned short ss1,__ss1h; /* ss1 is used to cache MSR_IA32_SYSENTER_CS */
294 unsigned long esp2;
295 unsigned short ss2,__ss2h;
296 unsigned long __cr3;
297 unsigned long eip;
298 unsigned long eflags;
299 unsigned long eax,ecx,edx,ebx;
300 unsigned long esp;
301 unsigned long ebp;
302 unsigned long esi;
303 unsigned long edi;
304 unsigned short es, __esh;
305 unsigned short cs, __csh;
306 unsigned short ss, __ssh;
307 unsigned short ds, __dsh;
308 unsigned short fs, __fsh;
309 unsigned short gs, __gsh;
310 unsigned short ldt, __ldth;
311 unsigned short trace, io_bitmap_base;
312} __attribute__((packed));
313
314struct tss_struct {
315 struct i386_hw_tss x86_tss;
316
317 /*
318 * The extra 1 is there because the CPU will access an
319 * additional byte beyond the end of the IO permission
320 * bitmap. The extra byte must be all 1 bits, and must
321 * be within the limit.
322 */
323 unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
324 /*
325 * Cache the current maximum and the last task that used the bitmap:
326 */
327 unsigned long io_bitmap_max;
328 struct thread_struct *io_bitmap_owner;
329 /*
330 * pads the TSS to be cacheline-aligned (size is 0x100)
331 */
332 unsigned long __cacheline_filler[35];
333 /*
334 * .. and then another 0x100 bytes for emergency kernel stack
335 */
336 unsigned long stack[64];
337} __attribute__((packed));
338
339#define ARCH_MIN_TASKALIGN 16
340
341struct thread_struct {
342/* cached TLS descriptors. */
343 struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
344 unsigned long esp0;
345 unsigned long sysenter_cs;
346 unsigned long eip;
347 unsigned long esp;
348 unsigned long fs;
349 unsigned long gs;
350/* Hardware debugging registers */
351 unsigned long debugreg[8]; /* %%db0-7 debug registers */
352/* fault info */
353 unsigned long cr2, trap_no, error_code;
354/* floating point info */
355 union i387_union i387;
356/* virtual 86 mode info */
357 struct vm86_struct __user * vm86_info;
358 unsigned long screen_bitmap;
359 unsigned long v86flags, v86mask, saved_esp0;
360 unsigned int saved_fs, saved_gs;
361/* IO permissions */
362 unsigned long *io_bitmap_ptr;
363 unsigned long iopl;
364/* max allowed port in the bitmap, in bytes: */
365 unsigned long io_bitmap_max;
366};
367
368#define INIT_THREAD { \
369 .esp0 = sizeof(init_stack) + (long)&init_stack, \
370 .vm86_info = NULL, \
371 .sysenter_cs = __KERNEL_CS, \
372 .io_bitmap_ptr = NULL, \
373 .fs = __KERNEL_PERCPU, \
374}
375
376/*
377 * Note that the .io_bitmap member must be extra-big. This is because
378 * the CPU will access an additional byte beyond the end of the IO
379 * permission bitmap. The extra byte must be all 1 bits, and must
380 * be within the limit.
381 */
382#define INIT_TSS { \
383 .x86_tss = { \
384 .esp0 = sizeof(init_stack) + (long)&init_stack, \
385 .ss0 = __KERNEL_DS, \
386 .ss1 = __KERNEL_CS, \
387 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
388 }, \
389 .io_bitmap = { [ 0 ... IO_BITMAP_LONGS] = ~0 }, \
390}
391
392#define start_thread(regs, new_eip, new_esp) do { \
393 __asm__("movl %0,%%gs": :"r" (0)); \
394 regs->xfs = 0; \
395 set_fs(USER_DS); \
396 regs->xds = __USER_DS; \
397 regs->xes = __USER_DS; \
398 regs->xss = __USER_DS; \
399 regs->xcs = __USER_CS; \
400 regs->eip = new_eip; \
401 regs->esp = new_esp; \
402} while (0)
403
404/* Forward declaration, a strange C thing */
405struct task_struct;
406struct mm_struct;
407
408/* Free all resources held by a thread. */
409extern void release_thread(struct task_struct *);
410
411/* Prepare to copy thread state - unlazy all lazy status */
412extern void prepare_to_copy(struct task_struct *tsk);
413
414/*
415 * create a kernel thread without removing it from tasklists
416 */
417extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
418
419extern unsigned long thread_saved_pc(struct task_struct *tsk);
420void show_trace(struct task_struct *task, struct pt_regs *regs, unsigned long *stack);
421
422unsigned long get_wchan(struct task_struct *p);
423
424#define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
425#define KSTK_TOP(info) \
426({ \
427 unsigned long *__ptr = (unsigned long *)(info); \
428 (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
429})
430
431/*
432 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
433 * This is necessary to guarantee that the entire "struct pt_regs"
434 * is accessable even if the CPU haven't stored the SS/ESP registers
435 * on the stack (interrupt gate does not save these registers
436 * when switching to the same priv ring).
437 * Therefore beware: accessing the xss/esp fields of the
438 * "struct pt_regs" is possible, but they may contain the
439 * completely wrong values.
440 */
441#define task_pt_regs(task) \
442({ \
443 struct pt_regs *__regs__; \
444 __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
445 __regs__ - 1; \
446})
447
448#define KSTK_EIP(task) (task_pt_regs(task)->eip)
449#define KSTK_ESP(task) (task_pt_regs(task)->esp)
450
451
452struct microcode_header {
453 unsigned int hdrver;
454 unsigned int rev;
455 unsigned int date;
456 unsigned int sig;
457 unsigned int cksum;
458 unsigned int ldrver;
459 unsigned int pf;
460 unsigned int datasize;
461 unsigned int totalsize;
462 unsigned int reserved[3];
463};
464
465struct microcode {
466 struct microcode_header hdr;
467 unsigned int bits[0];
468};
469
470typedef struct microcode microcode_t;
471typedef struct microcode_header microcode_header_t;
472
473/* microcode format is extended from prescott processors */
474struct extended_signature {
475 unsigned int sig;
476 unsigned int pf;
477 unsigned int cksum;
478};
479
480struct extended_sigtable {
481 unsigned int count;
482 unsigned int cksum;
483 unsigned int reserved[3];
484 struct extended_signature sigs[0];
485};
486
487/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
488static inline void rep_nop(void)
489{
490 __asm__ __volatile__("rep;nop": : :"memory");
491}
492
493#define cpu_relax() rep_nop()
494
495static inline void native_load_esp0(struct tss_struct *tss, struct thread_struct *thread)
496{
497 tss->x86_tss.esp0 = thread->esp0;
498 /* This can only happen when SEP is enabled, no need to test "SEP"arately */
499 if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) {
500 tss->x86_tss.ss1 = thread->sysenter_cs;
501 wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
502 }
503}
504
505
506static inline unsigned long native_get_debugreg(int regno)
507{
508 unsigned long val = 0; /* Damn you, gcc! */
509
510 switch (regno) {
511 case 0:
512 asm("movl %%db0, %0" :"=r" (val)); break;
513 case 1:
514 asm("movl %%db1, %0" :"=r" (val)); break;
515 case 2:
516 asm("movl %%db2, %0" :"=r" (val)); break;
517 case 3:
518 asm("movl %%db3, %0" :"=r" (val)); break;
519 case 6:
520 asm("movl %%db6, %0" :"=r" (val)); break;
521 case 7:
522 asm("movl %%db7, %0" :"=r" (val)); break;
523 default:
524 BUG();
525 }
526 return val;
527}
528
529static inline void native_set_debugreg(int regno, unsigned long value)
530{
531 switch (regno) {
532 case 0:
533 asm("movl %0,%%db0" : /* no output */ :"r" (value));
534 break;
535 case 1:
536 asm("movl %0,%%db1" : /* no output */ :"r" (value));
537 break;
538 case 2:
539 asm("movl %0,%%db2" : /* no output */ :"r" (value));
540 break;
541 case 3:
542 asm("movl %0,%%db3" : /* no output */ :"r" (value));
543 break;
544 case 6:
545 asm("movl %0,%%db6" : /* no output */ :"r" (value));
546 break;
547 case 7:
548 asm("movl %0,%%db7" : /* no output */ :"r" (value));
549 break;
550 default:
551 BUG();
552 }
553}
554
555/*
556 * Set IOPL bits in EFLAGS from given mask
557 */
558static inline void native_set_iopl_mask(unsigned mask)
559{
560 unsigned int reg;
561 __asm__ __volatile__ ("pushfl;"
562 "popl %0;"
563 "andl %1, %0;"
564 "orl %2, %0;"
565 "pushl %0;"
566 "popfl"
567 : "=&r" (reg)
568 : "i" (~X86_EFLAGS_IOPL), "r" (mask));
569}
570
571#ifdef CONFIG_PARAVIRT
572#include <asm/paravirt.h>
573#else
574#define paravirt_enabled() 0
575#define __cpuid native_cpuid
576
577static inline void load_esp0(struct tss_struct *tss, struct thread_struct *thread)
578{
579 native_load_esp0(tss, thread);
580}
581
582/*
583 * These special macros can be used to get or set a debugging register
584 */
585#define get_debugreg(var, register) \
586 (var) = native_get_debugreg(register)
587#define set_debugreg(value, register) \
588 native_set_debugreg(register, value)
589
590#define set_iopl_mask native_set_iopl_mask
591#endif /* CONFIG_PARAVIRT */
592
593/*
594 * Generic CPUID function
595 * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
596 * resulting in stale register contents being returned.
597 */
598static inline void cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx)
599{
600 *eax = op;
601 *ecx = 0;
602 __cpuid(eax, ebx, ecx, edx);
603}
604
605/* Some CPUID calls want 'count' to be placed in ecx */
606static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx,
607 int *edx)
608{
609 *eax = op;
610 *ecx = count;
611 __cpuid(eax, ebx, ecx, edx);
612}
613
614/*
615 * CPUID functions returning a single datum
616 */
617static inline unsigned int cpuid_eax(unsigned int op)
618{
619 unsigned int eax, ebx, ecx, edx;
620
621 cpuid(op, &eax, &ebx, &ecx, &edx);
622 return eax;
623}
624static inline unsigned int cpuid_ebx(unsigned int op)
625{
626 unsigned int eax, ebx, ecx, edx;
627
628 cpuid(op, &eax, &ebx, &ecx, &edx);
629 return ebx;
630}
631static inline unsigned int cpuid_ecx(unsigned int op)
632{
633 unsigned int eax, ebx, ecx, edx;
634
635 cpuid(op, &eax, &ebx, &ecx, &edx);
636 return ecx;
637}
638static inline unsigned int cpuid_edx(unsigned int op)
639{
640 unsigned int eax, ebx, ecx, edx;
641
642 cpuid(op, &eax, &ebx, &ecx, &edx);
643 return edx;
644}
645
646/* generic versions from gas */
647#define GENERIC_NOP1 ".byte 0x90\n"
648#define GENERIC_NOP2 ".byte 0x89,0xf6\n"
649#define GENERIC_NOP3 ".byte 0x8d,0x76,0x00\n"
650#define GENERIC_NOP4 ".byte 0x8d,0x74,0x26,0x00\n"
651#define GENERIC_NOP5 GENERIC_NOP1 GENERIC_NOP4
652#define GENERIC_NOP6 ".byte 0x8d,0xb6,0x00,0x00,0x00,0x00\n"
653#define GENERIC_NOP7 ".byte 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00\n"
654#define GENERIC_NOP8 GENERIC_NOP1 GENERIC_NOP7
655
656/* Opteron nops */
657#define K8_NOP1 GENERIC_NOP1
658#define K8_NOP2 ".byte 0x66,0x90\n"
659#define K8_NOP3 ".byte 0x66,0x66,0x90\n"
660#define K8_NOP4 ".byte 0x66,0x66,0x66,0x90\n"
661#define K8_NOP5 K8_NOP3 K8_NOP2
662#define K8_NOP6 K8_NOP3 K8_NOP3
663#define K8_NOP7 K8_NOP4 K8_NOP3
664#define K8_NOP8 K8_NOP4 K8_NOP4
665
666/* K7 nops */
667/* uses eax dependencies (arbitary choice) */
668#define K7_NOP1 GENERIC_NOP1
669#define K7_NOP2 ".byte 0x8b,0xc0\n"
670#define K7_NOP3 ".byte 0x8d,0x04,0x20\n"
671#define K7_NOP4 ".byte 0x8d,0x44,0x20,0x00\n"
672#define K7_NOP5 K7_NOP4 ASM_NOP1
673#define K7_NOP6 ".byte 0x8d,0x80,0,0,0,0\n"
674#define K7_NOP7 ".byte 0x8D,0x04,0x05,0,0,0,0\n"
675#define K7_NOP8 K7_NOP7 ASM_NOP1
676
677#ifdef CONFIG_MK8
678#define ASM_NOP1 K8_NOP1
679#define ASM_NOP2 K8_NOP2
680#define ASM_NOP3 K8_NOP3
681#define ASM_NOP4 K8_NOP4
682#define ASM_NOP5 K8_NOP5
683#define ASM_NOP6 K8_NOP6
684#define ASM_NOP7 K8_NOP7
685#define ASM_NOP8 K8_NOP8
686#elif defined(CONFIG_MK7)
687#define ASM_NOP1 K7_NOP1
688#define ASM_NOP2 K7_NOP2
689#define ASM_NOP3 K7_NOP3
690#define ASM_NOP4 K7_NOP4
691#define ASM_NOP5 K7_NOP5
692#define ASM_NOP6 K7_NOP6
693#define ASM_NOP7 K7_NOP7
694#define ASM_NOP8 K7_NOP8
695#else
696#define ASM_NOP1 GENERIC_NOP1
697#define ASM_NOP2 GENERIC_NOP2
698#define ASM_NOP3 GENERIC_NOP3
699#define ASM_NOP4 GENERIC_NOP4
700#define ASM_NOP5 GENERIC_NOP5
701#define ASM_NOP6 GENERIC_NOP6
702#define ASM_NOP7 GENERIC_NOP7
703#define ASM_NOP8 GENERIC_NOP8
704#endif
705
706#define ASM_NOP_MAX 8
707
708/* Prefetch instructions for Pentium III and AMD Athlon */
709/* It's not worth to care about 3dnow! prefetches for the K6
710 because they are microcoded there and very slow.
711 However we don't do prefetches for pre XP Athlons currently
712 That should be fixed. */
713#define ARCH_HAS_PREFETCH
714static inline void prefetch(const void *x)
715{
716 alternative_input(ASM_NOP4,
717 "prefetchnta (%1)",
718 X86_FEATURE_XMM,
719 "r" (x));
720}
721
722#define ARCH_HAS_PREFETCH
723#define ARCH_HAS_PREFETCHW
724#define ARCH_HAS_SPINLOCK_PREFETCH
725
726/* 3dnow! prefetch to get an exclusive cache line. Useful for
727 spinlocks to avoid one state transition in the cache coherency protocol. */
728static inline void prefetchw(const void *x)
729{
730 alternative_input(ASM_NOP4,
731 "prefetchw (%1)",
732 X86_FEATURE_3DNOW,
733 "r" (x));
734}
735#define spin_lock_prefetch(x) prefetchw(x)
736
737extern void select_idle_routine(const struct cpuinfo_x86 *c);
738
739#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
740
741extern unsigned long boot_option_idle_override;
742extern void enable_sep_cpu(void);
743extern int sysenter_setup(void);
744
745/* Defined in head.S */
746extern struct Xgt_desc_struct early_gdt_descr;
747
748extern void cpu_set_gdt(int);
749extern void switch_to_new_gdt(void);
750extern void cpu_init(void);
751extern void init_gdt(int cpu);
752
753extern int force_mwait;
754
755#endif /* __ASM_I386_PROCESSOR_H */
diff --git a/include/asm-i386/ptrace-abi.h b/include/asm-i386/ptrace-abi.h
deleted file mode 100644
index a44901817a26..000000000000
--- a/include/asm-i386/ptrace-abi.h
+++ /dev/null
@@ -1,39 +0,0 @@
1#ifndef I386_PTRACE_ABI_H
2#define I386_PTRACE_ABI_H
3
4#define EBX 0
5#define ECX 1
6#define EDX 2
7#define ESI 3
8#define EDI 4
9#define EBP 5
10#define EAX 6
11#define DS 7
12#define ES 8
13#define FS 9
14#define GS 10
15#define ORIG_EAX 11
16#define EIP 12
17#define CS 13
18#define EFL 14
19#define UESP 15
20#define SS 16
21#define FRAME_SIZE 17
22
23/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
24#define PTRACE_GETREGS 12
25#define PTRACE_SETREGS 13
26#define PTRACE_GETFPREGS 14
27#define PTRACE_SETFPREGS 15
28#define PTRACE_GETFPXREGS 18
29#define PTRACE_SETFPXREGS 19
30
31#define PTRACE_OLDSETOPTIONS 21
32
33#define PTRACE_GET_THREAD_AREA 25
34#define PTRACE_SET_THREAD_AREA 26
35
36#define PTRACE_SYSEMU 31
37#define PTRACE_SYSEMU_SINGLESTEP 32
38
39#endif
diff --git a/include/asm-i386/ptrace.h b/include/asm-i386/ptrace.h
deleted file mode 100644
index 6002597b9e12..000000000000
--- a/include/asm-i386/ptrace.h
+++ /dev/null
@@ -1,63 +0,0 @@
1#ifndef _I386_PTRACE_H
2#define _I386_PTRACE_H
3
4#include <asm/ptrace-abi.h>
5
6/* this struct defines the way the registers are stored on the
7 stack during a system call. */
8
9struct pt_regs {
10 long ebx;
11 long ecx;
12 long edx;
13 long esi;
14 long edi;
15 long ebp;
16 long eax;
17 int xds;
18 int xes;
19 int xfs;
20 /* int xgs; */
21 long orig_eax;
22 long eip;
23 int xcs;
24 long eflags;
25 long esp;
26 int xss;
27};
28
29#ifdef __KERNEL__
30
31#include <asm/vm86.h>
32#include <asm/segment.h>
33
34struct task_struct;
35extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code);
36
37/*
38 * user_mode_vm(regs) determines whether a register set came from user mode.
39 * This is true if V8086 mode was enabled OR if the register set was from
40 * protected mode with RPL-3 CS value. This tricky test checks that with
41 * one comparison. Many places in the kernel can bypass this full check
42 * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
43 */
44static inline int user_mode(struct pt_regs *regs)
45{
46 return (regs->xcs & SEGMENT_RPL_MASK) == USER_RPL;
47}
48static inline int user_mode_vm(struct pt_regs *regs)
49{
50 return ((regs->xcs & SEGMENT_RPL_MASK) | (regs->eflags & VM_MASK)) >= USER_RPL;
51}
52static inline int v8086_mode(struct pt_regs *regs)
53{
54 return (regs->eflags & VM_MASK);
55}
56
57#define instruction_pointer(regs) ((regs)->eip)
58#define regs_return_value(regs) ((regs)->eax)
59
60extern unsigned long profile_pc(struct pt_regs *regs);
61#endif /* __KERNEL__ */
62
63#endif
diff --git a/include/asm-i386/reboot.h b/include/asm-i386/reboot.h
deleted file mode 100644
index e9e3ffc22c07..000000000000
--- a/include/asm-i386/reboot.h
+++ /dev/null
@@ -1,20 +0,0 @@
1#ifndef _ASM_REBOOT_H
2#define _ASM_REBOOT_H
3
4struct pt_regs;
5
6struct machine_ops
7{
8 void (*restart)(char *cmd);
9 void (*halt)(void);
10 void (*power_off)(void);
11 void (*shutdown)(void);
12 void (*crash_shutdown)(struct pt_regs *);
13 void (*emergency_restart)(void);
14};
15
16extern struct machine_ops machine_ops;
17
18void machine_real_restart(unsigned char *code, int length);
19
20#endif /* _ASM_REBOOT_H */
diff --git a/include/asm-i386/reboot_fixups.h b/include/asm-i386/reboot_fixups.h
deleted file mode 100644
index 0cb7d87c2b68..000000000000
--- a/include/asm-i386/reboot_fixups.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _LINUX_REBOOT_FIXUPS_H
2#define _LINUX_REBOOT_FIXUPS_H
3
4extern void mach_reboot_fixups(void);
5
6#endif /* _LINUX_REBOOT_FIXUPS_H */
diff --git a/include/asm-i386/required-features.h b/include/asm-i386/required-features.h
deleted file mode 100644
index 618feb98f9f5..000000000000
--- a/include/asm-i386/required-features.h
+++ /dev/null
@@ -1,55 +0,0 @@
1#ifndef _ASM_REQUIRED_FEATURES_H
2#define _ASM_REQUIRED_FEATURES_H 1
3
4/* Define minimum CPUID feature set for kernel These bits are checked
5 really early to actually display a visible error message before the
6 kernel dies. Make sure to assign features to the proper mask!
7
8 Some requirements that are not in CPUID yet are also in the
9 CONFIG_X86_MINIMUM_CPU_FAMILY which is checked too.
10
11 The real information is in arch/i386/Kconfig.cpu, this just converts
12 the CONFIGs into a bitmask */
13
14#ifndef CONFIG_MATH_EMULATION
15# define NEED_FPU (1<<(X86_FEATURE_FPU & 31))
16#else
17# define NEED_FPU 0
18#endif
19
20#ifdef CONFIG_X86_PAE
21# define NEED_PAE (1<<(X86_FEATURE_PAE & 31))
22#else
23# define NEED_PAE 0
24#endif
25
26#ifdef CONFIG_X86_CMOV
27# define NEED_CMOV (1<<(X86_FEATURE_CMOV & 31))
28#else
29# define NEED_CMOV 0
30#endif
31
32#ifdef CONFIG_X86_PAE
33# define NEED_CX8 (1<<(X86_FEATURE_CX8 & 31))
34#else
35# define NEED_CX8 0
36#endif
37
38#define REQUIRED_MASK0 (NEED_FPU|NEED_PAE|NEED_CMOV|NEED_CX8)
39
40#ifdef CONFIG_X86_USE_3DNOW
41# define NEED_3DNOW (1<<(X86_FEATURE_3DNOW & 31))
42#else
43# define NEED_3DNOW 0
44#endif
45
46#define REQUIRED_MASK1 (NEED_3DNOW)
47
48#define REQUIRED_MASK2 0
49#define REQUIRED_MASK3 0
50#define REQUIRED_MASK4 0
51#define REQUIRED_MASK5 0
52#define REQUIRED_MASK6 0
53#define REQUIRED_MASK7 0
54
55#endif
diff --git a/include/asm-i386/resource.h b/include/asm-i386/resource.h
deleted file mode 100644
index 6c1ea37c7718..000000000000
--- a/include/asm-i386/resource.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _I386_RESOURCE_H
2#define _I386_RESOURCE_H
3
4#include <asm-generic/resource.h>
5
6#endif
diff --git a/include/asm-i386/resume-trace.h b/include/asm-i386/resume-trace.h
deleted file mode 100644
index ec9cfd656230..000000000000
--- a/include/asm-i386/resume-trace.h
+++ /dev/null
@@ -1,13 +0,0 @@
1#define TRACE_RESUME(user) do { \
2 if (pm_trace_enabled) { \
3 void *tracedata; \
4 asm volatile("movl $1f,%0\n" \
5 ".section .tracedata,\"a\"\n" \
6 "1:\t.word %c1\n" \
7 "\t.long %c2\n" \
8 ".previous" \
9 :"=r" (tracedata) \
10 : "i" (__LINE__), "i" (__FILE__)); \
11 generate_resume_trace(tracedata, user); \
12 } \
13} while (0)
diff --git a/include/asm-i386/rtc.h b/include/asm-i386/rtc.h
deleted file mode 100644
index ffd02109a0e5..000000000000
--- a/include/asm-i386/rtc.h
+++ /dev/null
@@ -1,10 +0,0 @@
1#ifndef _I386_RTC_H
2#define _I386_RTC_H
3
4/*
5 * x86 uses the default access methods for the RTC.
6 */
7
8#include <asm-generic/rtc.h>
9
10#endif
diff --git a/include/asm-i386/rwlock.h b/include/asm-i386/rwlock.h
deleted file mode 100644
index c3e5db32fa48..000000000000
--- a/include/asm-i386/rwlock.h
+++ /dev/null
@@ -1,25 +0,0 @@
1/* include/asm-i386/rwlock.h
2 *
3 * Helpers used by both rw spinlocks and rw semaphores.
4 *
5 * Based in part on code from semaphore.h and
6 * spinlock.h Copyright 1996 Linus Torvalds.
7 *
8 * Copyright 1999 Red Hat, Inc.
9 *
10 * Written by Benjamin LaHaise.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 */
17#ifndef _ASM_I386_RWLOCK_H
18#define _ASM_I386_RWLOCK_H
19
20#define RW_LOCK_BIAS 0x01000000
21#define RW_LOCK_BIAS_STR "0x01000000"
22
23/* Code is in asm-i386/spinlock.h */
24
25#endif
diff --git a/include/asm-i386/rwsem.h b/include/asm-i386/rwsem.h
deleted file mode 100644
index 041906f3c6df..000000000000
--- a/include/asm-i386/rwsem.h
+++ /dev/null
@@ -1,258 +0,0 @@
1/* rwsem.h: R/W semaphores implemented using XADD/CMPXCHG for i486+
2 *
3 * Written by David Howells (dhowells@redhat.com).
4 *
5 * Derived from asm-i386/semaphore.h
6 *
7 *
8 * The MSW of the count is the negated number of active writers and waiting
9 * lockers, and the LSW is the total number of active locks
10 *
11 * The lock count is initialized to 0 (no active and no waiting lockers).
12 *
13 * When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case of an
14 * uncontended lock. This can be determined because XADD returns the old value.
15 * Readers increment by 1 and see a positive value when uncontended, negative
16 * if there are writers (and maybe) readers waiting (in which case it goes to
17 * sleep).
18 *
19 * The value of WAITING_BIAS supports up to 32766 waiting processes. This can
20 * be extended to 65534 by manually checking the whole MSW rather than relying
21 * on the S flag.
22 *
23 * The value of ACTIVE_BIAS supports up to 65535 active processes.
24 *
25 * This should be totally fair - if anything is waiting, a process that wants a
26 * lock will go to the back of the queue. When the currently active lock is
27 * released, if there's a writer at the front of the queue, then that and only
28 * that will be woken up; if there's a bunch of consequtive readers at the
29 * front, then they'll all be woken up, but no other readers will be.
30 */
31
32#ifndef _I386_RWSEM_H
33#define _I386_RWSEM_H
34
35#ifndef _LINUX_RWSEM_H
36#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
37#endif
38
39#ifdef __KERNEL__
40
41#include <linux/list.h>
42#include <linux/spinlock.h>
43#include <linux/lockdep.h>
44
45struct rwsem_waiter;
46
47extern struct rw_semaphore *FASTCALL(rwsem_down_read_failed(struct rw_semaphore *sem));
48extern struct rw_semaphore *FASTCALL(rwsem_down_write_failed(struct rw_semaphore *sem));
49extern struct rw_semaphore *FASTCALL(rwsem_wake(struct rw_semaphore *));
50extern struct rw_semaphore *FASTCALL(rwsem_downgrade_wake(struct rw_semaphore *sem));
51
52/*
53 * the semaphore definition
54 */
55struct rw_semaphore {
56 signed long count;
57#define RWSEM_UNLOCKED_VALUE 0x00000000
58#define RWSEM_ACTIVE_BIAS 0x00000001
59#define RWSEM_ACTIVE_MASK 0x0000ffff
60#define RWSEM_WAITING_BIAS (-0x00010000)
61#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
62#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
63 spinlock_t wait_lock;
64 struct list_head wait_list;
65#ifdef CONFIG_DEBUG_LOCK_ALLOC
66 struct lockdep_map dep_map;
67#endif
68};
69
70#ifdef CONFIG_DEBUG_LOCK_ALLOC
71# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
72#else
73# define __RWSEM_DEP_MAP_INIT(lockname)
74#endif
75
76
77#define __RWSEM_INITIALIZER(name) \
78{ RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
79 LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) }
80
81#define DECLARE_RWSEM(name) \
82 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
83
84extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
85 struct lock_class_key *key);
86
87#define init_rwsem(sem) \
88do { \
89 static struct lock_class_key __key; \
90 \
91 __init_rwsem((sem), #sem, &__key); \
92} while (0)
93
94/*
95 * lock for reading
96 */
97static inline void __down_read(struct rw_semaphore *sem)
98{
99 __asm__ __volatile__(
100 "# beginning down_read\n\t"
101LOCK_PREFIX " incl (%%eax)\n\t" /* adds 0x00000001, returns the old value */
102 " jns 1f\n"
103 " call call_rwsem_down_read_failed\n"
104 "1:\n\t"
105 "# ending down_read\n\t"
106 : "+m" (sem->count)
107 : "a" (sem)
108 : "memory", "cc");
109}
110
111/*
112 * trylock for reading -- returns 1 if successful, 0 if contention
113 */
114static inline int __down_read_trylock(struct rw_semaphore *sem)
115{
116 __s32 result, tmp;
117 __asm__ __volatile__(
118 "# beginning __down_read_trylock\n\t"
119 " movl %0,%1\n\t"
120 "1:\n\t"
121 " movl %1,%2\n\t"
122 " addl %3,%2\n\t"
123 " jle 2f\n\t"
124LOCK_PREFIX " cmpxchgl %2,%0\n\t"
125 " jnz 1b\n\t"
126 "2:\n\t"
127 "# ending __down_read_trylock\n\t"
128 : "+m" (sem->count), "=&a" (result), "=&r" (tmp)
129 : "i" (RWSEM_ACTIVE_READ_BIAS)
130 : "memory", "cc");
131 return result>=0 ? 1 : 0;
132}
133
134/*
135 * lock for writing
136 */
137static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
138{
139 int tmp;
140
141 tmp = RWSEM_ACTIVE_WRITE_BIAS;
142 __asm__ __volatile__(
143 "# beginning down_write\n\t"
144LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" /* subtract 0x0000ffff, returns the old value */
145 " testl %%edx,%%edx\n\t" /* was the count 0 before? */
146 " jz 1f\n"
147 " call call_rwsem_down_write_failed\n"
148 "1:\n"
149 "# ending down_write"
150 : "+m" (sem->count), "=d" (tmp)
151 : "a" (sem), "1" (tmp)
152 : "memory", "cc");
153}
154
155static inline void __down_write(struct rw_semaphore *sem)
156{
157 __down_write_nested(sem, 0);
158}
159
160/*
161 * trylock for writing -- returns 1 if successful, 0 if contention
162 */
163static inline int __down_write_trylock(struct rw_semaphore *sem)
164{
165 signed long ret = cmpxchg(&sem->count,
166 RWSEM_UNLOCKED_VALUE,
167 RWSEM_ACTIVE_WRITE_BIAS);
168 if (ret == RWSEM_UNLOCKED_VALUE)
169 return 1;
170 return 0;
171}
172
173/*
174 * unlock after reading
175 */
176static inline void __up_read(struct rw_semaphore *sem)
177{
178 __s32 tmp = -RWSEM_ACTIVE_READ_BIAS;
179 __asm__ __volatile__(
180 "# beginning __up_read\n\t"
181LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" /* subtracts 1, returns the old value */
182 " jns 1f\n\t"
183 " call call_rwsem_wake\n"
184 "1:\n"
185 "# ending __up_read\n"
186 : "+m" (sem->count), "=d" (tmp)
187 : "a" (sem), "1" (tmp)
188 : "memory", "cc");
189}
190
191/*
192 * unlock after writing
193 */
194static inline void __up_write(struct rw_semaphore *sem)
195{
196 __asm__ __volatile__(
197 "# beginning __up_write\n\t"
198 " movl %2,%%edx\n\t"
199LOCK_PREFIX " xaddl %%edx,(%%eax)\n\t" /* tries to transition 0xffff0001 -> 0x00000000 */
200 " jz 1f\n"
201 " call call_rwsem_wake\n"
202 "1:\n\t"
203 "# ending __up_write\n"
204 : "+m" (sem->count)
205 : "a" (sem), "i" (-RWSEM_ACTIVE_WRITE_BIAS)
206 : "memory", "cc", "edx");
207}
208
209/*
210 * downgrade write lock to read lock
211 */
212static inline void __downgrade_write(struct rw_semaphore *sem)
213{
214 __asm__ __volatile__(
215 "# beginning __downgrade_write\n\t"
216LOCK_PREFIX " addl %2,(%%eax)\n\t" /* transitions 0xZZZZ0001 -> 0xYYYY0001 */
217 " jns 1f\n\t"
218 " call call_rwsem_downgrade_wake\n"
219 "1:\n\t"
220 "# ending __downgrade_write\n"
221 : "+m" (sem->count)
222 : "a" (sem), "i" (-RWSEM_WAITING_BIAS)
223 : "memory", "cc");
224}
225
226/*
227 * implement atomic add functionality
228 */
229static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
230{
231 __asm__ __volatile__(
232LOCK_PREFIX "addl %1,%0"
233 : "+m" (sem->count)
234 : "ir" (delta));
235}
236
237/*
238 * implement exchange and add functionality
239 */
240static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
241{
242 int tmp = delta;
243
244 __asm__ __volatile__(
245LOCK_PREFIX "xadd %0,%1"
246 : "+r" (tmp), "+m" (sem->count)
247 : : "memory");
248
249 return tmp+delta;
250}
251
252static inline int rwsem_is_locked(struct rw_semaphore *sem)
253{
254 return (sem->count != 0);
255}
256
257#endif /* __KERNEL__ */
258#endif /* _I386_RWSEM_H */
diff --git a/include/asm-i386/scatterlist.h b/include/asm-i386/scatterlist.h
deleted file mode 100644
index d7e45a8f1aae..000000000000
--- a/include/asm-i386/scatterlist.h
+++ /dev/null
@@ -1,23 +0,0 @@
1#ifndef _I386_SCATTERLIST_H
2#define _I386_SCATTERLIST_H
3
4#include <asm/types.h>
5
6struct scatterlist {
7 struct page *page;
8 unsigned int offset;
9 dma_addr_t dma_address;
10 unsigned int length;
11};
12
13/* These macros should be used after a pci_map_sg call has been done
14 * to get bus addresses of each of the SG entries and their lengths.
15 * You should only work with the number of sg entries pci_map_sg
16 * returns.
17 */
18#define sg_dma_address(sg) ((sg)->dma_address)
19#define sg_dma_len(sg) ((sg)->length)
20
21#define ISA_DMA_THRESHOLD (0x00ffffff)
22
23#endif /* !(_I386_SCATTERLIST_H) */
diff --git a/include/asm-i386/seccomp.h b/include/asm-i386/seccomp.h
deleted file mode 100644
index 18da19e89bff..000000000000
--- a/include/asm-i386/seccomp.h
+++ /dev/null
@@ -1,16 +0,0 @@
1#ifndef _ASM_SECCOMP_H
2
3#include <linux/thread_info.h>
4
5#ifdef TIF_32BIT
6#error "unexpected TIF_32BIT on i386"
7#endif
8
9#include <linux/unistd.h>
10
11#define __NR_seccomp_read __NR_read
12#define __NR_seccomp_write __NR_write
13#define __NR_seccomp_exit __NR_exit
14#define __NR_seccomp_sigreturn __NR_sigreturn
15
16#endif /* _ASM_SECCOMP_H */
diff --git a/include/asm-i386/sections.h b/include/asm-i386/sections.h
deleted file mode 100644
index 2dcbb92918b2..000000000000
--- a/include/asm-i386/sections.h
+++ /dev/null
@@ -1,7 +0,0 @@
1#ifndef _I386_SECTIONS_H
2#define _I386_SECTIONS_H
3
4/* nothing to see, move along */
5#include <asm-generic/sections.h>
6
7#endif
diff --git a/include/asm-i386/segment.h b/include/asm-i386/segment.h
deleted file mode 100644
index 597a47c2515f..000000000000
--- a/include/asm-i386/segment.h
+++ /dev/null
@@ -1,148 +0,0 @@
1#ifndef _ASM_SEGMENT_H
2#define _ASM_SEGMENT_H
3
4/*
5 * The layout of the per-CPU GDT under Linux:
6 *
7 * 0 - null
8 * 1 - reserved
9 * 2 - reserved
10 * 3 - reserved
11 *
12 * 4 - unused <==== new cacheline
13 * 5 - unused
14 *
15 * ------- start of TLS (Thread-Local Storage) segments:
16 *
17 * 6 - TLS segment #1 [ glibc's TLS segment ]
18 * 7 - TLS segment #2 [ Wine's %fs Win32 segment ]
19 * 8 - TLS segment #3
20 * 9 - reserved
21 * 10 - reserved
22 * 11 - reserved
23 *
24 * ------- start of kernel segments:
25 *
26 * 12 - kernel code segment <==== new cacheline
27 * 13 - kernel data segment
28 * 14 - default user CS
29 * 15 - default user DS
30 * 16 - TSS
31 * 17 - LDT
32 * 18 - PNPBIOS support (16->32 gate)
33 * 19 - PNPBIOS support
34 * 20 - PNPBIOS support
35 * 21 - PNPBIOS support
36 * 22 - PNPBIOS support
37 * 23 - APM BIOS support
38 * 24 - APM BIOS support
39 * 25 - APM BIOS support
40 *
41 * 26 - ESPFIX small SS
42 * 27 - per-cpu [ offset to per-cpu data area ]
43 * 28 - unused
44 * 29 - unused
45 * 30 - unused
46 * 31 - TSS for double fault handler
47 */
48#define GDT_ENTRY_TLS_ENTRIES 3
49#define GDT_ENTRY_TLS_MIN 6
50#define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
51
52#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8)
53
54#define GDT_ENTRY_DEFAULT_USER_CS 14
55#define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS * 8 + 3)
56
57#define GDT_ENTRY_DEFAULT_USER_DS 15
58#define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS * 8 + 3)
59
60#define GDT_ENTRY_KERNEL_BASE 12
61
62#define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0)
63#define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8)
64
65#define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1)
66#define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8)
67
68#define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4)
69#define GDT_ENTRY_LDT (GDT_ENTRY_KERNEL_BASE + 5)
70
71#define GDT_ENTRY_PNPBIOS_BASE (GDT_ENTRY_KERNEL_BASE + 6)
72#define GDT_ENTRY_APMBIOS_BASE (GDT_ENTRY_KERNEL_BASE + 11)
73
74#define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14)
75#define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
76
77#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
78#ifdef CONFIG_SMP
79#define __KERNEL_PERCPU (GDT_ENTRY_PERCPU * 8)
80#else
81#define __KERNEL_PERCPU 0
82#endif
83
84#define GDT_ENTRY_DOUBLEFAULT_TSS 31
85
86/*
87 * The GDT has 32 entries
88 */
89#define GDT_ENTRIES 32
90#define GDT_SIZE (GDT_ENTRIES * 8)
91
92/* Simple and small GDT entries for booting only */
93
94#define GDT_ENTRY_BOOT_CS 2
95#define __BOOT_CS (GDT_ENTRY_BOOT_CS * 8)
96
97#define GDT_ENTRY_BOOT_DS (GDT_ENTRY_BOOT_CS + 1)
98#define __BOOT_DS (GDT_ENTRY_BOOT_DS * 8)
99
100/* The PnP BIOS entries in the GDT */
101#define GDT_ENTRY_PNPBIOS_CS32 (GDT_ENTRY_PNPBIOS_BASE + 0)
102#define GDT_ENTRY_PNPBIOS_CS16 (GDT_ENTRY_PNPBIOS_BASE + 1)
103#define GDT_ENTRY_PNPBIOS_DS (GDT_ENTRY_PNPBIOS_BASE + 2)
104#define GDT_ENTRY_PNPBIOS_TS1 (GDT_ENTRY_PNPBIOS_BASE + 3)
105#define GDT_ENTRY_PNPBIOS_TS2 (GDT_ENTRY_PNPBIOS_BASE + 4)
106
107/* The PnP BIOS selectors */
108#define PNP_CS32 (GDT_ENTRY_PNPBIOS_CS32 * 8) /* segment for calling fn */
109#define PNP_CS16 (GDT_ENTRY_PNPBIOS_CS16 * 8) /* code segment for BIOS */
110#define PNP_DS (GDT_ENTRY_PNPBIOS_DS * 8) /* data segment for BIOS */
111#define PNP_TS1 (GDT_ENTRY_PNPBIOS_TS1 * 8) /* transfer data segment */
112#define PNP_TS2 (GDT_ENTRY_PNPBIOS_TS2 * 8) /* another data segment */
113
114/*
115 * The interrupt descriptor table has room for 256 idt's,
116 * the global descriptor table is dependent on the number
117 * of tasks we can have..
118 */
119#define IDT_ENTRIES 256
120
121/* Bottom two bits of selector give the ring privilege level */
122#define SEGMENT_RPL_MASK 0x3
123/* Bit 2 is table indicator (LDT/GDT) */
124#define SEGMENT_TI_MASK 0x4
125
126/* User mode is privilege level 3 */
127#define USER_RPL 0x3
128/* LDT segment has TI set, GDT has it cleared */
129#define SEGMENT_LDT 0x4
130#define SEGMENT_GDT 0x0
131
132#ifndef CONFIG_PARAVIRT
133#define get_kernel_rpl() 0
134#endif
135/*
136 * Matching rules for certain types of segments.
137 */
138
139/* Matches only __KERNEL_CS, ignoring PnP / USER / APM segments */
140#define SEGMENT_IS_KERNEL_CODE(x) (((x) & 0xfc) == GDT_ENTRY_KERNEL_CS * 8)
141
142/* Matches __KERNEL_CS and __USER_CS (they must be 2 entries apart) */
143#define SEGMENT_IS_FLAT_CODE(x) (((x) & 0xec) == GDT_ENTRY_KERNEL_CS * 8)
144
145/* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
146#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
147
148#endif
diff --git a/include/asm-i386/semaphore.h b/include/asm-i386/semaphore.h
deleted file mode 100644
index 4e34a468c383..000000000000
--- a/include/asm-i386/semaphore.h
+++ /dev/null
@@ -1,176 +0,0 @@
1#ifndef _I386_SEMAPHORE_H
2#define _I386_SEMAPHORE_H
3
4#include <linux/linkage.h>
5
6#ifdef __KERNEL__
7
8/*
9 * SMP- and interrupt-safe semaphores..
10 *
11 * (C) Copyright 1996 Linus Torvalds
12 *
13 * Modified 1996-12-23 by Dave Grothe <dave@gcom.com> to fix bugs in
14 * the original code and to make semaphore waits
15 * interruptible so that processes waiting on
16 * semaphores can be killed.
17 * Modified 1999-02-14 by Andrea Arcangeli, split the sched.c helper
18 * functions in asm/sempahore-helper.h while fixing a
19 * potential and subtle race discovered by Ulrich Schmid
20 * in down_interruptible(). Since I started to play here I
21 * also implemented the `trylock' semaphore operation.
22 * 1999-07-02 Artur Skawina <skawina@geocities.com>
23 * Optimized "0(ecx)" -> "(ecx)" (the assembler does not
24 * do this). Changed calling sequences from push/jmp to
25 * traditional call/ret.
26 * Modified 2001-01-01 Andreas Franck <afranck@gmx.de>
27 * Some hacks to ensure compatibility with recent
28 * GCC snapshots, to avoid stack corruption when compiling
29 * with -fomit-frame-pointer. It's not sure if this will
30 * be fixed in GCC, as our previous implementation was a
31 * bit dubious.
32 *
33 * If you would like to see an analysis of this implementation, please
34 * ftp to gcom.com and download the file
35 * /pub/linux/src/semaphore/semaphore-2.0.24.tar.gz.
36 *
37 */
38
39#include <asm/system.h>
40#include <asm/atomic.h>
41#include <linux/wait.h>
42#include <linux/rwsem.h>
43
44struct semaphore {
45 atomic_t count;
46 int sleepers;
47 wait_queue_head_t wait;
48};
49
50
51#define __SEMAPHORE_INITIALIZER(name, n) \
52{ \
53 .count = ATOMIC_INIT(n), \
54 .sleepers = 0, \
55 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
56}
57
58#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
59 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
60
61#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
62#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
63
64static inline void sema_init (struct semaphore *sem, int val)
65{
66/*
67 * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
68 *
69 * i'd rather use the more flexible initialization above, but sadly
70 * GCC 2.7.2.3 emits a bogus warning. EGCS doesn't. Oh well.
71 */
72 atomic_set(&sem->count, val);
73 sem->sleepers = 0;
74 init_waitqueue_head(&sem->wait);
75}
76
77static inline void init_MUTEX (struct semaphore *sem)
78{
79 sema_init(sem, 1);
80}
81
82static inline void init_MUTEX_LOCKED (struct semaphore *sem)
83{
84 sema_init(sem, 0);
85}
86
87fastcall void __down_failed(void /* special register calling convention */);
88fastcall int __down_failed_interruptible(void /* params in registers */);
89fastcall int __down_failed_trylock(void /* params in registers */);
90fastcall void __up_wakeup(void /* special register calling convention */);
91
92/*
93 * This is ugly, but we want the default case to fall through.
94 * "__down_failed" is a special asm handler that calls the C
95 * routine that actually waits. See arch/i386/kernel/semaphore.c
96 */
97static inline void down(struct semaphore * sem)
98{
99 might_sleep();
100 __asm__ __volatile__(
101 "# atomic down operation\n\t"
102 LOCK_PREFIX "decl %0\n\t" /* --sem->count */
103 "jns 2f\n"
104 "\tlea %0,%%eax\n\t"
105 "call __down_failed\n"
106 "2:"
107 :"+m" (sem->count)
108 :
109 :"memory","ax");
110}
111
112/*
113 * Interruptible try to acquire a semaphore. If we obtained
114 * it, return zero. If we were interrupted, returns -EINTR
115 */
116static inline int down_interruptible(struct semaphore * sem)
117{
118 int result;
119
120 might_sleep();
121 __asm__ __volatile__(
122 "# atomic interruptible down operation\n\t"
123 "xorl %0,%0\n\t"
124 LOCK_PREFIX "decl %1\n\t" /* --sem->count */
125 "jns 2f\n\t"
126 "lea %1,%%eax\n\t"
127 "call __down_failed_interruptible\n"
128 "2:"
129 :"=&a" (result), "+m" (sem->count)
130 :
131 :"memory");
132 return result;
133}
134
135/*
136 * Non-blockingly attempt to down() a semaphore.
137 * Returns zero if we acquired it
138 */
139static inline int down_trylock(struct semaphore * sem)
140{
141 int result;
142
143 __asm__ __volatile__(
144 "# atomic interruptible down operation\n\t"
145 "xorl %0,%0\n\t"
146 LOCK_PREFIX "decl %1\n\t" /* --sem->count */
147 "jns 2f\n\t"
148 "lea %1,%%eax\n\t"
149 "call __down_failed_trylock\n\t"
150 "2:\n"
151 :"=&a" (result), "+m" (sem->count)
152 :
153 :"memory");
154 return result;
155}
156
157/*
158 * Note! This is subtle. We jump to wake people up only if
159 * the semaphore was negative (== somebody was waiting on it).
160 */
161static inline void up(struct semaphore * sem)
162{
163 __asm__ __volatile__(
164 "# atomic up operation\n\t"
165 LOCK_PREFIX "incl %0\n\t" /* ++sem->count */
166 "jg 1f\n\t"
167 "lea %0,%%eax\n\t"
168 "call __up_wakeup\n"
169 "1:"
170 :"+m" (sem->count)
171 :
172 :"memory","ax");
173}
174
175#endif
176#endif
diff --git a/include/asm-i386/sembuf.h b/include/asm-i386/sembuf.h
deleted file mode 100644
index 323835166c14..000000000000
--- a/include/asm-i386/sembuf.h
+++ /dev/null
@@ -1,25 +0,0 @@
1#ifndef _I386_SEMBUF_H
2#define _I386_SEMBUF_H
3
4/*
5 * The semid64_ds structure for i386 architecture.
6 * Note extra padding because this structure is passed back and forth
7 * between kernel and user space.
8 *
9 * Pad space is left for:
10 * - 64-bit time_t to solve y2038 problem
11 * - 2 miscellaneous 32-bit values
12 */
13
14struct semid64_ds {
15 struct ipc64_perm sem_perm; /* permissions .. see ipc.h */
16 __kernel_time_t sem_otime; /* last semop time */
17 unsigned long __unused1;
18 __kernel_time_t sem_ctime; /* last change time */
19 unsigned long __unused2;
20 unsigned long sem_nsems; /* no. of semaphores in array */
21 unsigned long __unused3;
22 unsigned long __unused4;
23};
24
25#endif /* _I386_SEMBUF_H */
diff --git a/include/asm-i386/serial.h b/include/asm-i386/serial.h
deleted file mode 100644
index bd67480ca109..000000000000
--- a/include/asm-i386/serial.h
+++ /dev/null
@@ -1,29 +0,0 @@
1/*
2 * include/asm-i386/serial.h
3 */
4
5
6/*
7 * This assumes you have a 1.8432 MHz clock for your UART.
8 *
9 * It'd be nice if someone built a serial card with a 24.576 MHz
10 * clock, since the 16550A is capable of handling a top speed of 1.5
11 * megabits/second; but this requires the faster clock.
12 */
13#define BASE_BAUD ( 1843200 / 16 )
14
15/* Standard COM flags (except for COM4, because of the 8514 problem) */
16#ifdef CONFIG_SERIAL_DETECT_IRQ
17#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST | ASYNC_AUTO_IRQ)
18#define STD_COM4_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_AUTO_IRQ)
19#else
20#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST)
21#define STD_COM4_FLAGS ASYNC_BOOT_AUTOCONF
22#endif
23
24#define SERIAL_PORT_DFNS \
25 /* UART CLK PORT IRQ FLAGS */ \
26 { 0, BASE_BAUD, 0x3F8, 4, STD_COM_FLAGS }, /* ttyS0 */ \
27 { 0, BASE_BAUD, 0x2F8, 3, STD_COM_FLAGS }, /* ttyS1 */ \
28 { 0, BASE_BAUD, 0x3E8, 4, STD_COM_FLAGS }, /* ttyS2 */ \
29 { 0, BASE_BAUD, 0x2E8, 3, STD_COM4_FLAGS }, /* ttyS3 */
diff --git a/include/asm-i386/setup.h b/include/asm-i386/setup.h
deleted file mode 100644
index 7862fe858a9e..000000000000
--- a/include/asm-i386/setup.h
+++ /dev/null
@@ -1,92 +0,0 @@
1/*
2 * Just a place holder. We don't want to have to test x86 before
3 * we include stuff
4 */
5
6#ifndef _i386_SETUP_H
7#define _i386_SETUP_H
8
9#define COMMAND_LINE_SIZE 2048
10
11#ifdef __KERNEL__
12#include <linux/pfn.h>
13
14/*
15 * Reserved space for vmalloc and iomap - defined in asm/page.h
16 */
17#define MAXMEM_PFN PFN_DOWN(MAXMEM)
18#define MAX_NONPAE_PFN (1 << 20)
19
20#define PARAM_SIZE 4096
21
22#define OLD_CL_MAGIC_ADDR 0x90020
23#define OLD_CL_MAGIC 0xA33F
24#define OLD_CL_BASE_ADDR 0x90000
25#define OLD_CL_OFFSET 0x90022
26#define NEW_CL_POINTER 0x228 /* Relative to real mode data */
27
28#ifndef __ASSEMBLY__
29
30#include <asm/bootparam.h>
31
32/*
33 * This is set up by the setup-routine at boot-time
34 */
35extern struct boot_params boot_params;
36
37#define PARAM ((char *)&boot_params)
38#define SCREEN_INFO (*(struct screen_info *) (PARAM+0))
39#define EXT_MEM_K (*(unsigned short *) (PARAM+2))
40#define ALT_MEM_K (*(unsigned long *) (PARAM+0x1e0))
41#define E820_MAP_NR (*(char*) (PARAM+E820NR))
42#define E820_MAP ((struct e820entry *) (PARAM+E820MAP))
43#define APM_BIOS_INFO (*(struct apm_bios_info *) (PARAM+0x40))
44#define IST_INFO (*(struct ist_info *) (PARAM+0x60))
45#define SYS_DESC_TABLE (*(struct sys_desc_table *)(PARAM+0xa0))
46#define EFI_SYSTAB ((efi_system_table_t *) *((unsigned long *)(PARAM+0x1c4)))
47#define EFI_MEMDESC_SIZE (*((unsigned long *) (PARAM+0x1c8)))
48#define EFI_MEMDESC_VERSION (*((unsigned long *) (PARAM+0x1cc)))
49#define EFI_MEMMAP ((void *) *((unsigned long *)(PARAM+0x1d0)))
50#define EFI_MEMMAP_SIZE (*((unsigned long *) (PARAM+0x1d4)))
51#define MOUNT_ROOT_RDONLY (*(unsigned short *) (PARAM+0x1F2))
52#define RAMDISK_FLAGS (*(unsigned short *) (PARAM+0x1F8))
53#define VIDEO_MODE (*(unsigned short *) (PARAM+0x1FA))
54#define ORIG_ROOT_DEV (*(unsigned short *) (PARAM+0x1FC))
55#define AUX_DEVICE_INFO (*(unsigned char *) (PARAM+0x1FF))
56#define LOADER_TYPE (*(unsigned char *) (PARAM+0x210))
57#define KERNEL_START (*(unsigned long *) (PARAM+0x214))
58#define INITRD_START (*(unsigned long *) (PARAM+0x218))
59#define INITRD_SIZE (*(unsigned long *) (PARAM+0x21c))
60#define EDID_INFO (*(struct edid_info *) (PARAM+0x140))
61#define EDD_NR (*(unsigned char *) (PARAM+EDDNR))
62#define EDD_MBR_SIG_NR (*(unsigned char *) (PARAM+EDD_MBR_SIG_NR_BUF))
63#define EDD_MBR_SIGNATURE ((unsigned int *) (PARAM+EDD_MBR_SIG_BUF))
64#define EDD_BUF ((struct edd_info *) (PARAM+EDDBUF))
65
66/*
67 * Do NOT EVER look at the BIOS memory size location.
68 * It does not work on many machines.
69 */
70#define LOWMEMSIZE() (0x9f000)
71
72struct e820entry;
73
74char * __init machine_specific_memory_setup(void);
75char *memory_setup(void);
76
77int __init copy_e820_map(struct e820entry * biosmap, int nr_map);
78int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map);
79void __init add_memory_region(unsigned long long start,
80 unsigned long long size, int type);
81
82extern unsigned long init_pg_tables_end;
83
84#ifndef CONFIG_PARAVIRT
85#define paravirt_post_allocator_init() do {} while (0)
86#endif
87
88#endif /* __ASSEMBLY__ */
89
90#endif /* __KERNEL__ */
91
92#endif /* _i386_SETUP_H */
diff --git a/include/asm-i386/shmbuf.h b/include/asm-i386/shmbuf.h
deleted file mode 100644
index d1cdc3cb079b..000000000000
--- a/include/asm-i386/shmbuf.h
+++ /dev/null
@@ -1,42 +0,0 @@
1#ifndef _I386_SHMBUF_H
2#define _I386_SHMBUF_H
3
4/*
5 * The shmid64_ds structure for i386 architecture.
6 * Note extra padding because this structure is passed back and forth
7 * between kernel and user space.
8 *
9 * Pad space is left for:
10 * - 64-bit time_t to solve y2038 problem
11 * - 2 miscellaneous 32-bit values
12 */
13
14struct shmid64_ds {
15 struct ipc64_perm shm_perm; /* operation perms */
16 size_t shm_segsz; /* size of segment (bytes) */
17 __kernel_time_t shm_atime; /* last attach time */
18 unsigned long __unused1;
19 __kernel_time_t shm_dtime; /* last detach time */
20 unsigned long __unused2;
21 __kernel_time_t shm_ctime; /* last change time */
22 unsigned long __unused3;
23 __kernel_pid_t shm_cpid; /* pid of creator */
24 __kernel_pid_t shm_lpid; /* pid of last operator */
25 unsigned long shm_nattch; /* no. of current attaches */
26 unsigned long __unused4;
27 unsigned long __unused5;
28};
29
30struct shminfo64 {
31 unsigned long shmmax;
32 unsigned long shmmin;
33 unsigned long shmmni;
34 unsigned long shmseg;
35 unsigned long shmall;
36 unsigned long __unused1;
37 unsigned long __unused2;
38 unsigned long __unused3;
39 unsigned long __unused4;
40};
41
42#endif /* _I386_SHMBUF_H */
diff --git a/include/asm-i386/shmparam.h b/include/asm-i386/shmparam.h
deleted file mode 100644
index 786243a5b319..000000000000
--- a/include/asm-i386/shmparam.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _ASMI386_SHMPARAM_H
2#define _ASMI386_SHMPARAM_H
3
4#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */
5
6#endif /* _ASMI386_SHMPARAM_H */
diff --git a/include/asm-i386/sigcontext.h b/include/asm-i386/sigcontext.h
deleted file mode 100644
index aaef089a7787..000000000000
--- a/include/asm-i386/sigcontext.h
+++ /dev/null
@@ -1,85 +0,0 @@
1#ifndef _ASMi386_SIGCONTEXT_H
2#define _ASMi386_SIGCONTEXT_H
3
4#include <linux/compiler.h>
5
6/*
7 * As documented in the iBCS2 standard..
8 *
9 * The first part of "struct _fpstate" is just the normal i387
10 * hardware setup, the extra "status" word is used to save the
11 * coprocessor status word before entering the handler.
12 *
13 * Pentium III FXSR, SSE support
14 * Gareth Hughes <gareth@valinux.com>, May 2000
15 *
16 * The FPU state data structure has had to grow to accommodate the
17 * extended FPU state required by the Streaming SIMD Extensions.
18 * There is no documented standard to accomplish this at the moment.
19 */
20struct _fpreg {
21 unsigned short significand[4];
22 unsigned short exponent;
23};
24
25struct _fpxreg {
26 unsigned short significand[4];
27 unsigned short exponent;
28 unsigned short padding[3];
29};
30
31struct _xmmreg {
32 unsigned long element[4];
33};
34
35struct _fpstate {
36 /* Regular FPU environment */
37 unsigned long cw;
38 unsigned long sw;
39 unsigned long tag;
40 unsigned long ipoff;
41 unsigned long cssel;
42 unsigned long dataoff;
43 unsigned long datasel;
44 struct _fpreg _st[8];
45 unsigned short status;
46 unsigned short magic; /* 0xffff = regular FPU data only */
47
48 /* FXSR FPU environment */
49 unsigned long _fxsr_env[6]; /* FXSR FPU env is ignored */
50 unsigned long mxcsr;
51 unsigned long reserved;
52 struct _fpxreg _fxsr_st[8]; /* FXSR FPU reg data is ignored */
53 struct _xmmreg _xmm[8];
54 unsigned long padding[56];
55};
56
57#define X86_FXSR_MAGIC 0x0000
58
59struct sigcontext {
60 unsigned short gs, __gsh;
61 unsigned short fs, __fsh;
62 unsigned short es, __esh;
63 unsigned short ds, __dsh;
64 unsigned long edi;
65 unsigned long esi;
66 unsigned long ebp;
67 unsigned long esp;
68 unsigned long ebx;
69 unsigned long edx;
70 unsigned long ecx;
71 unsigned long eax;
72 unsigned long trapno;
73 unsigned long err;
74 unsigned long eip;
75 unsigned short cs, __csh;
76 unsigned long eflags;
77 unsigned long esp_at_signal;
78 unsigned short ss, __ssh;
79 struct _fpstate __user * fpstate;
80 unsigned long oldmask;
81 unsigned long cr2;
82};
83
84
85#endif
diff --git a/include/asm-i386/siginfo.h b/include/asm-i386/siginfo.h
deleted file mode 100644
index fe18f98fccfa..000000000000
--- a/include/asm-i386/siginfo.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _I386_SIGINFO_H
2#define _I386_SIGINFO_H
3
4#include <asm-generic/siginfo.h>
5
6#endif
diff --git a/include/asm-i386/signal.h b/include/asm-i386/signal.h
deleted file mode 100644
index c3e8adec5918..000000000000
--- a/include/asm-i386/signal.h
+++ /dev/null
@@ -1,232 +0,0 @@
1#ifndef _ASMi386_SIGNAL_H
2#define _ASMi386_SIGNAL_H
3
4#include <linux/types.h>
5#include <linux/time.h>
6#include <linux/compiler.h>
7
8/* Avoid too many header ordering problems. */
9struct siginfo;
10
11#ifdef __KERNEL__
12
13#include <linux/linkage.h>
14
15/* Most things should be clean enough to redefine this at will, if care
16 is taken to make libc match. */
17
18#define _NSIG 64
19#define _NSIG_BPW 32
20#define _NSIG_WORDS (_NSIG / _NSIG_BPW)
21
22typedef unsigned long old_sigset_t; /* at least 32 bits */
23
24typedef struct {
25 unsigned long sig[_NSIG_WORDS];
26} sigset_t;
27
28#else
29/* Here we must cater to libcs that poke about in kernel headers. */
30
31#define NSIG 32
32typedef unsigned long sigset_t;
33
34#endif /* __KERNEL__ */
35
36#define SIGHUP 1
37#define SIGINT 2
38#define SIGQUIT 3
39#define SIGILL 4
40#define SIGTRAP 5
41#define SIGABRT 6
42#define SIGIOT 6
43#define SIGBUS 7
44#define SIGFPE 8
45#define SIGKILL 9
46#define SIGUSR1 10
47#define SIGSEGV 11
48#define SIGUSR2 12
49#define SIGPIPE 13
50#define SIGALRM 14
51#define SIGTERM 15
52#define SIGSTKFLT 16
53#define SIGCHLD 17
54#define SIGCONT 18
55#define SIGSTOP 19
56#define SIGTSTP 20
57#define SIGTTIN 21
58#define SIGTTOU 22
59#define SIGURG 23
60#define SIGXCPU 24
61#define SIGXFSZ 25
62#define SIGVTALRM 26
63#define SIGPROF 27
64#define SIGWINCH 28
65#define SIGIO 29
66#define SIGPOLL SIGIO
67/*
68#define SIGLOST 29
69*/
70#define SIGPWR 30
71#define SIGSYS 31
72#define SIGUNUSED 31
73
74/* These should not be considered constants from userland. */
75#define SIGRTMIN 32
76#define SIGRTMAX _NSIG
77
78/*
79 * SA_FLAGS values:
80 *
81 * SA_ONSTACK indicates that a registered stack_t will be used.
82 * SA_RESTART flag to get restarting signals (which were the default long ago)
83 * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
84 * SA_RESETHAND clears the handler when the signal is delivered.
85 * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
86 * SA_NODEFER prevents the current signal from being masked in the handler.
87 *
88 * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
89 * Unix names RESETHAND and NODEFER respectively.
90 */
91#define SA_NOCLDSTOP 0x00000001u
92#define SA_NOCLDWAIT 0x00000002u
93#define SA_SIGINFO 0x00000004u
94#define SA_ONSTACK 0x08000000u
95#define SA_RESTART 0x10000000u
96#define SA_NODEFER 0x40000000u
97#define SA_RESETHAND 0x80000000u
98
99#define SA_NOMASK SA_NODEFER
100#define SA_ONESHOT SA_RESETHAND
101
102#define SA_RESTORER 0x04000000
103
104/*
105 * sigaltstack controls
106 */
107#define SS_ONSTACK 1
108#define SS_DISABLE 2
109
110#define MINSIGSTKSZ 2048
111#define SIGSTKSZ 8192
112
113#include <asm-generic/signal.h>
114
115#ifdef __KERNEL__
116struct old_sigaction {
117 __sighandler_t sa_handler;
118 old_sigset_t sa_mask;
119 unsigned long sa_flags;
120 __sigrestore_t sa_restorer;
121};
122
123struct sigaction {
124 __sighandler_t sa_handler;
125 unsigned long sa_flags;
126 __sigrestore_t sa_restorer;
127 sigset_t sa_mask; /* mask last for extensibility */
128};
129
130struct k_sigaction {
131 struct sigaction sa;
132};
133#else
134/* Here we must cater to libcs that poke about in kernel headers. */
135
136struct sigaction {
137 union {
138 __sighandler_t _sa_handler;
139 void (*_sa_sigaction)(int, struct siginfo *, void *);
140 } _u;
141 sigset_t sa_mask;
142 unsigned long sa_flags;
143 void (*sa_restorer)(void);
144};
145
146#define sa_handler _u._sa_handler
147#define sa_sigaction _u._sa_sigaction
148
149#endif /* __KERNEL__ */
150
151typedef struct sigaltstack {
152 void __user *ss_sp;
153 int ss_flags;
154 size_t ss_size;
155} stack_t;
156
157#ifdef __KERNEL__
158#include <asm/sigcontext.h>
159
160#define __HAVE_ARCH_SIG_BITOPS
161
162#define sigaddset(set,sig) \
163 (__builtin_constant_p(sig) ? \
164 __const_sigaddset((set),(sig)) : \
165 __gen_sigaddset((set),(sig)))
166
167static __inline__ void __gen_sigaddset(sigset_t *set, int _sig)
168{
169 __asm__("btsl %1,%0" : "+m"(*set) : "Ir"(_sig - 1) : "cc");
170}
171
172static __inline__ void __const_sigaddset(sigset_t *set, int _sig)
173{
174 unsigned long sig = _sig - 1;
175 set->sig[sig / _NSIG_BPW] |= 1 << (sig % _NSIG_BPW);
176}
177
178#define sigdelset(set,sig) \
179 (__builtin_constant_p(sig) ? \
180 __const_sigdelset((set),(sig)) : \
181 __gen_sigdelset((set),(sig)))
182
183
184static __inline__ void __gen_sigdelset(sigset_t *set, int _sig)
185{
186 __asm__("btrl %1,%0" : "+m"(*set) : "Ir"(_sig - 1) : "cc");
187}
188
189static __inline__ void __const_sigdelset(sigset_t *set, int _sig)
190{
191 unsigned long sig = _sig - 1;
192 set->sig[sig / _NSIG_BPW] &= ~(1 << (sig % _NSIG_BPW));
193}
194
195static __inline__ int __const_sigismember(sigset_t *set, int _sig)
196{
197 unsigned long sig = _sig - 1;
198 return 1 & (set->sig[sig / _NSIG_BPW] >> (sig % _NSIG_BPW));
199}
200
201static __inline__ int __gen_sigismember(sigset_t *set, int _sig)
202{
203 int ret;
204 __asm__("btl %2,%1\n\tsbbl %0,%0"
205 : "=r"(ret) : "m"(*set), "Ir"(_sig-1) : "cc");
206 return ret;
207}
208
209#define sigismember(set,sig) \
210 (__builtin_constant_p(sig) ? \
211 __const_sigismember((set),(sig)) : \
212 __gen_sigismember((set),(sig)))
213
214static __inline__ int sigfindinword(unsigned long word)
215{
216 __asm__("bsfl %1,%0" : "=r"(word) : "rm"(word) : "cc");
217 return word;
218}
219
220struct pt_regs;
221
222#define ptrace_signal_deliver(regs, cookie) \
223 do { \
224 if (current->ptrace & PT_DTRACE) { \
225 current->ptrace &= ~PT_DTRACE; \
226 (regs)->eflags &= ~TF_MASK; \
227 } \
228 } while (0)
229
230#endif /* __KERNEL__ */
231
232#endif
diff --git a/include/asm-i386/smp.h b/include/asm-i386/smp.h
deleted file mode 100644
index 1f73bde165b1..000000000000
--- a/include/asm-i386/smp.h
+++ /dev/null
@@ -1,182 +0,0 @@
1#ifndef __ASM_SMP_H
2#define __ASM_SMP_H
3
4/*
5 * We need the APIC definitions automatically as part of 'smp.h'
6 */
7#ifndef __ASSEMBLY__
8#include <linux/kernel.h>
9#include <linux/threads.h>
10#include <linux/cpumask.h>
11#endif
12
13#if defined(CONFIG_X86_LOCAL_APIC) && !defined(__ASSEMBLY__)
14#include <asm/bitops.h>
15#include <asm/mpspec.h>
16#include <asm/apic.h>
17#ifdef CONFIG_X86_IO_APIC
18#include <asm/io_apic.h>
19#endif
20#endif
21
22#define BAD_APICID 0xFFu
23#ifdef CONFIG_SMP
24#ifndef __ASSEMBLY__
25
26/*
27 * Private routines/data
28 */
29
30extern void smp_alloc_memory(void);
31extern int pic_mode;
32extern int smp_num_siblings;
33extern cpumask_t cpu_sibling_map[];
34extern cpumask_t cpu_core_map[];
35
36extern void (*mtrr_hook) (void);
37extern void zap_low_mappings (void);
38extern void lock_ipi_call_lock(void);
39extern void unlock_ipi_call_lock(void);
40
41#define MAX_APICID 256
42extern u8 x86_cpu_to_apicid[];
43
44#define cpu_physical_id(cpu) x86_cpu_to_apicid[cpu]
45
46extern void set_cpu_sibling_map(int cpu);
47
48#ifdef CONFIG_HOTPLUG_CPU
49extern void cpu_exit_clear(void);
50extern void cpu_uninit(void);
51extern void remove_siblinginfo(int cpu);
52#endif
53
54struct smp_ops
55{
56 void (*smp_prepare_boot_cpu)(void);
57 void (*smp_prepare_cpus)(unsigned max_cpus);
58 int (*cpu_up)(unsigned cpu);
59 void (*smp_cpus_done)(unsigned max_cpus);
60
61 void (*smp_send_stop)(void);
62 void (*smp_send_reschedule)(int cpu);
63 int (*smp_call_function_mask)(cpumask_t mask,
64 void (*func)(void *info), void *info,
65 int wait);
66};
67
68extern struct smp_ops smp_ops;
69
70static inline void smp_prepare_boot_cpu(void)
71{
72 smp_ops.smp_prepare_boot_cpu();
73}
74static inline void smp_prepare_cpus(unsigned int max_cpus)
75{
76 smp_ops.smp_prepare_cpus(max_cpus);
77}
78static inline int __cpu_up(unsigned int cpu)
79{
80 return smp_ops.cpu_up(cpu);
81}
82static inline void smp_cpus_done(unsigned int max_cpus)
83{
84 smp_ops.smp_cpus_done(max_cpus);
85}
86
87static inline void smp_send_stop(void)
88{
89 smp_ops.smp_send_stop();
90}
91static inline void smp_send_reschedule(int cpu)
92{
93 smp_ops.smp_send_reschedule(cpu);
94}
95static inline int smp_call_function_mask(cpumask_t mask,
96 void (*func) (void *info), void *info,
97 int wait)
98{
99 return smp_ops.smp_call_function_mask(mask, func, info, wait);
100}
101
102void native_smp_prepare_boot_cpu(void);
103void native_smp_prepare_cpus(unsigned int max_cpus);
104int native_cpu_up(unsigned int cpunum);
105void native_smp_cpus_done(unsigned int max_cpus);
106
107#ifndef CONFIG_PARAVIRT
108#define startup_ipi_hook(phys_apicid, start_eip, start_esp) \
109do { } while (0)
110#endif
111
112/*
113 * This function is needed by all SMP systems. It must _always_ be valid
114 * from the initial startup. We map APIC_BASE very early in page_setup(),
115 * so this is correct in the x86 case.
116 */
117DECLARE_PER_CPU(int, cpu_number);
118#define raw_smp_processor_id() (x86_read_percpu(cpu_number))
119
120extern cpumask_t cpu_callout_map;
121extern cpumask_t cpu_callin_map;
122extern cpumask_t cpu_possible_map;
123
124/* We don't mark CPUs online until __cpu_up(), so we need another measure */
125static inline int num_booting_cpus(void)
126{
127 return cpus_weight(cpu_callout_map);
128}
129
130extern int safe_smp_processor_id(void);
131extern int __cpu_disable(void);
132extern void __cpu_die(unsigned int cpu);
133extern unsigned int num_processors;
134
135void __cpuinit smp_store_cpu_info(int id);
136
137#endif /* !__ASSEMBLY__ */
138
139#else /* CONFIG_SMP */
140
141#define safe_smp_processor_id() 0
142#define cpu_physical_id(cpu) boot_cpu_physical_apicid
143
144#define NO_PROC_ID 0xFF /* No processor magic marker */
145
146#endif /* CONFIG_SMP */
147
148#ifndef __ASSEMBLY__
149
150#ifdef CONFIG_X86_LOCAL_APIC
151
152#ifdef APIC_DEFINITION
153extern int hard_smp_processor_id(void);
154#else
155#include <mach_apicdef.h>
156static inline int hard_smp_processor_id(void)
157{
158 /* we don't want to mark this access volatile - bad code generation */
159 return GET_APIC_ID(*(unsigned long *)(APIC_BASE+APIC_ID));
160}
161#endif /* APIC_DEFINITION */
162
163#else /* CONFIG_X86_LOCAL_APIC */
164
165#ifndef CONFIG_SMP
166#define hard_smp_processor_id() 0
167#endif
168
169#endif /* CONFIG_X86_LOCAL_APIC */
170
171extern u8 apicid_2_node[];
172
173#ifdef CONFIG_X86_LOCAL_APIC
174static __inline int logical_smp_processor_id(void)
175{
176 /* we don't want to mark this access volatile - bad code generation */
177 return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR));
178}
179#endif
180#endif
181
182#endif
diff --git a/include/asm-i386/socket.h b/include/asm-i386/socket.h
deleted file mode 100644
index 99ca648b94c5..000000000000
--- a/include/asm-i386/socket.h
+++ /dev/null
@@ -1,55 +0,0 @@
1#ifndef _ASM_SOCKET_H
2#define _ASM_SOCKET_H
3
4#include <asm/sockios.h>
5
6/* For setsockopt(2) */
7#define SOL_SOCKET 1
8
9#define SO_DEBUG 1
10#define SO_REUSEADDR 2
11#define SO_TYPE 3
12#define SO_ERROR 4
13#define SO_DONTROUTE 5
14#define SO_BROADCAST 6
15#define SO_SNDBUF 7
16#define SO_RCVBUF 8
17#define SO_SNDBUFFORCE 32
18#define SO_RCVBUFFORCE 33
19#define SO_KEEPALIVE 9
20#define SO_OOBINLINE 10
21#define SO_NO_CHECK 11
22#define SO_PRIORITY 12
23#define SO_LINGER 13
24#define SO_BSDCOMPAT 14
25/* To add :#define SO_REUSEPORT 15 */
26#define SO_PASSCRED 16
27#define SO_PEERCRED 17
28#define SO_RCVLOWAT 18
29#define SO_SNDLOWAT 19
30#define SO_RCVTIMEO 20
31#define SO_SNDTIMEO 21
32
33/* Security levels - as per NRL IPv6 - don't actually do anything */
34#define SO_SECURITY_AUTHENTICATION 22
35#define SO_SECURITY_ENCRYPTION_TRANSPORT 23
36#define SO_SECURITY_ENCRYPTION_NETWORK 24
37
38#define SO_BINDTODEVICE 25
39
40/* Socket filtering */
41#define SO_ATTACH_FILTER 26
42#define SO_DETACH_FILTER 27
43
44#define SO_PEERNAME 28
45#define SO_TIMESTAMP 29
46#define SCM_TIMESTAMP SO_TIMESTAMP
47
48#define SO_ACCEPTCONN 30
49
50#define SO_PEERSEC 31
51#define SO_PASSSEC 34
52#define SO_TIMESTAMPNS 35
53#define SCM_TIMESTAMPNS SO_TIMESTAMPNS
54
55#endif /* _ASM_SOCKET_H */
diff --git a/include/asm-i386/sockios.h b/include/asm-i386/sockios.h
deleted file mode 100644
index ff528c7d255c..000000000000
--- a/include/asm-i386/sockios.h
+++ /dev/null
@@ -1,13 +0,0 @@
1#ifndef __ARCH_I386_SOCKIOS__
2#define __ARCH_I386_SOCKIOS__
3
4/* Socket-level I/O control calls. */
5#define FIOSETOWN 0x8901
6#define SIOCSPGRP 0x8902
7#define FIOGETOWN 0x8903
8#define SIOCGPGRP 0x8904
9#define SIOCATMARK 0x8905
10#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */
11#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */
12
13#endif
diff --git a/include/asm-i386/sparsemem.h b/include/asm-i386/sparsemem.h
deleted file mode 100644
index cfeed990585f..000000000000
--- a/include/asm-i386/sparsemem.h
+++ /dev/null
@@ -1,31 +0,0 @@
1#ifndef _I386_SPARSEMEM_H
2#define _I386_SPARSEMEM_H
3#ifdef CONFIG_SPARSEMEM
4
5/*
6 * generic non-linear memory support:
7 *
8 * 1) we will not split memory into more chunks than will fit into the
9 * flags field of the struct page
10 */
11
12/*
13 * SECTION_SIZE_BITS 2^N: how big each section will be
14 * MAX_PHYSADDR_BITS 2^N: how much physical address space we have
15 * MAX_PHYSMEM_BITS 2^N: how much memory we can have in that space
16 */
17#ifdef CONFIG_X86_PAE
18#define SECTION_SIZE_BITS 30
19#define MAX_PHYSADDR_BITS 36
20#define MAX_PHYSMEM_BITS 36
21#else
22#define SECTION_SIZE_BITS 26
23#define MAX_PHYSADDR_BITS 32
24#define MAX_PHYSMEM_BITS 32
25#endif
26
27/* XXX: FIXME -- wli */
28#define kern_addr_valid(kaddr) (0)
29
30#endif /* CONFIG_SPARSEMEM */
31#endif /* _I386_SPARSEMEM_H */
diff --git a/include/asm-i386/spinlock.h b/include/asm-i386/spinlock.h
deleted file mode 100644
index d3bcebed60ca..000000000000
--- a/include/asm-i386/spinlock.h
+++ /dev/null
@@ -1,221 +0,0 @@
1#ifndef __ASM_SPINLOCK_H
2#define __ASM_SPINLOCK_H
3
4#include <asm/atomic.h>
5#include <asm/rwlock.h>
6#include <asm/page.h>
7#include <asm/processor.h>
8#include <linux/compiler.h>
9
10#ifdef CONFIG_PARAVIRT
11#include <asm/paravirt.h>
12#else
13#define CLI_STRING "cli"
14#define STI_STRING "sti"
15#define CLI_STI_CLOBBERS
16#define CLI_STI_INPUT_ARGS
17#endif /* CONFIG_PARAVIRT */
18
19/*
20 * Your basic SMP spinlocks, allowing only a single CPU anywhere
21 *
22 * Simple spin lock operations. There are two variants, one clears IRQ's
23 * on the local processor, one does not.
24 *
25 * We make no fairness assumptions. They have a cost.
26 *
27 * (the type definitions are in asm/spinlock_types.h)
28 */
29
30static inline int __raw_spin_is_locked(raw_spinlock_t *x)
31{
32 return *(volatile signed char *)(&(x)->slock) <= 0;
33}
34
35static inline void __raw_spin_lock(raw_spinlock_t *lock)
36{
37 asm volatile("\n1:\t"
38 LOCK_PREFIX " ; decb %0\n\t"
39 "jns 3f\n"
40 "2:\t"
41 "rep;nop\n\t"
42 "cmpb $0,%0\n\t"
43 "jle 2b\n\t"
44 "jmp 1b\n"
45 "3:\n\t"
46 : "+m" (lock->slock) : : "memory");
47}
48
49/*
50 * It is easier for the lock validator if interrupts are not re-enabled
51 * in the middle of a lock-acquire. This is a performance feature anyway
52 * so we turn it off:
53 *
54 * NOTE: there's an irqs-on section here, which normally would have to be
55 * irq-traced, but on CONFIG_TRACE_IRQFLAGS we never use this variant.
56 */
57#ifndef CONFIG_PROVE_LOCKING
58static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
59{
60 asm volatile(
61 "\n1:\t"
62 LOCK_PREFIX " ; decb %[slock]\n\t"
63 "jns 5f\n"
64 "2:\t"
65 "testl $0x200, %[flags]\n\t"
66 "jz 4f\n\t"
67 STI_STRING "\n"
68 "3:\t"
69 "rep;nop\n\t"
70 "cmpb $0, %[slock]\n\t"
71 "jle 3b\n\t"
72 CLI_STRING "\n\t"
73 "jmp 1b\n"
74 "4:\t"
75 "rep;nop\n\t"
76 "cmpb $0, %[slock]\n\t"
77 "jg 1b\n\t"
78 "jmp 4b\n"
79 "5:\n\t"
80 : [slock] "+m" (lock->slock)
81 : [flags] "r" (flags)
82 CLI_STI_INPUT_ARGS
83 : "memory" CLI_STI_CLOBBERS);
84}
85#endif
86
87static inline int __raw_spin_trylock(raw_spinlock_t *lock)
88{
89 char oldval;
90 asm volatile(
91 "xchgb %b0,%1"
92 :"=q" (oldval), "+m" (lock->slock)
93 :"0" (0) : "memory");
94 return oldval > 0;
95}
96
97/*
98 * __raw_spin_unlock based on writing $1 to the low byte.
99 * This method works. Despite all the confusion.
100 * (except on PPro SMP or if we are using OOSTORE, so we use xchgb there)
101 * (PPro errata 66, 92)
102 */
103
104#if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE)
105
106static inline void __raw_spin_unlock(raw_spinlock_t *lock)
107{
108 asm volatile("movb $1,%0" : "+m" (lock->slock) :: "memory");
109}
110
111#else
112
113static inline void __raw_spin_unlock(raw_spinlock_t *lock)
114{
115 char oldval = 1;
116
117 asm volatile("xchgb %b0, %1"
118 : "=q" (oldval), "+m" (lock->slock)
119 : "0" (oldval) : "memory");
120}
121
122#endif
123
124static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
125{
126 while (__raw_spin_is_locked(lock))
127 cpu_relax();
128}
129
130/*
131 * Read-write spinlocks, allowing multiple readers
132 * but only one writer.
133 *
134 * NOTE! it is quite common to have readers in interrupts
135 * but no interrupt writers. For those circumstances we
136 * can "mix" irq-safe locks - any writer needs to get a
137 * irq-safe write-lock, but readers can get non-irqsafe
138 * read-locks.
139 *
140 * On x86, we implement read-write locks as a 32-bit counter
141 * with the high bit (sign) being the "contended" bit.
142 *
143 * The inline assembly is non-obvious. Think about it.
144 *
145 * Changed to use the same technique as rw semaphores. See
146 * semaphore.h for details. -ben
147 *
148 * the helpers are in arch/i386/kernel/semaphore.c
149 */
150
151/**
152 * read_can_lock - would read_trylock() succeed?
153 * @lock: the rwlock in question.
154 */
155static inline int __raw_read_can_lock(raw_rwlock_t *x)
156{
157 return (int)(x)->lock > 0;
158}
159
160/**
161 * write_can_lock - would write_trylock() succeed?
162 * @lock: the rwlock in question.
163 */
164static inline int __raw_write_can_lock(raw_rwlock_t *x)
165{
166 return (x)->lock == RW_LOCK_BIAS;
167}
168
169static inline void __raw_read_lock(raw_rwlock_t *rw)
170{
171 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
172 "jns 1f\n"
173 "call __read_lock_failed\n\t"
174 "1:\n"
175 ::"a" (rw) : "memory");
176}
177
178static inline void __raw_write_lock(raw_rwlock_t *rw)
179{
180 asm volatile(LOCK_PREFIX " subl $" RW_LOCK_BIAS_STR ",(%0)\n\t"
181 "jz 1f\n"
182 "call __write_lock_failed\n\t"
183 "1:\n"
184 ::"a" (rw) : "memory");
185}
186
187static inline int __raw_read_trylock(raw_rwlock_t *lock)
188{
189 atomic_t *count = (atomic_t *)lock;
190 atomic_dec(count);
191 if (atomic_read(count) >= 0)
192 return 1;
193 atomic_inc(count);
194 return 0;
195}
196
197static inline int __raw_write_trylock(raw_rwlock_t *lock)
198{
199 atomic_t *count = (atomic_t *)lock;
200 if (atomic_sub_and_test(RW_LOCK_BIAS, count))
201 return 1;
202 atomic_add(RW_LOCK_BIAS, count);
203 return 0;
204}
205
206static inline void __raw_read_unlock(raw_rwlock_t *rw)
207{
208 asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
209}
210
211static inline void __raw_write_unlock(raw_rwlock_t *rw)
212{
213 asm volatile(LOCK_PREFIX "addl $" RW_LOCK_BIAS_STR ", %0"
214 : "+m" (rw->lock) : : "memory");
215}
216
217#define _raw_spin_relax(lock) cpu_relax()
218#define _raw_read_relax(lock) cpu_relax()
219#define _raw_write_relax(lock) cpu_relax()
220
221#endif /* __ASM_SPINLOCK_H */
diff --git a/include/asm-i386/spinlock_types.h b/include/asm-i386/spinlock_types.h
deleted file mode 100644
index 4da9345c1500..000000000000
--- a/include/asm-i386/spinlock_types.h
+++ /dev/null
@@ -1,20 +0,0 @@
1#ifndef __ASM_SPINLOCK_TYPES_H
2#define __ASM_SPINLOCK_TYPES_H
3
4#ifndef __LINUX_SPINLOCK_TYPES_H
5# error "please don't include this file directly"
6#endif
7
8typedef struct {
9 unsigned int slock;
10} raw_spinlock_t;
11
12#define __RAW_SPIN_LOCK_UNLOCKED { 1 }
13
14typedef struct {
15 unsigned int lock;
16} raw_rwlock_t;
17
18#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
19
20#endif
diff --git a/include/asm-i386/srat.h b/include/asm-i386/srat.h
deleted file mode 100644
index 165ab4bdc02b..000000000000
--- a/include/asm-i386/srat.h
+++ /dev/null
@@ -1,37 +0,0 @@
1/*
2 * Some of the code in this file has been gleaned from the 64 bit
3 * discontigmem support code base.
4 *
5 * Copyright (C) 2002, IBM Corp.
6 *
7 * All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
17 * NON INFRINGEMENT. See the GNU General Public License for more
18 * details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 *
24 * Send feedback to Pat Gaughen <gone@us.ibm.com>
25 */
26
27#ifndef _ASM_SRAT_H_
28#define _ASM_SRAT_H_
29
30#ifndef CONFIG_ACPI_SRAT
31#error CONFIG_ACPI_SRAT not defined, and srat.h header has been included
32#endif
33
34extern int get_memcfg_from_srat(void);
35extern unsigned long *get_zholes_size(int);
36
37#endif /* _ASM_SRAT_H_ */
diff --git a/include/asm-i386/stacktrace.h b/include/asm-i386/stacktrace.h
deleted file mode 100644
index 7d1f6a5cbfca..000000000000
--- a/include/asm-i386/stacktrace.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-x86_64/stacktrace.h>
diff --git a/include/asm-i386/stat.h b/include/asm-i386/stat.h
deleted file mode 100644
index 67eae78323ba..000000000000
--- a/include/asm-i386/stat.h
+++ /dev/null
@@ -1,77 +0,0 @@
1#ifndef _I386_STAT_H
2#define _I386_STAT_H
3
4struct __old_kernel_stat {
5 unsigned short st_dev;
6 unsigned short st_ino;
7 unsigned short st_mode;
8 unsigned short st_nlink;
9 unsigned short st_uid;
10 unsigned short st_gid;
11 unsigned short st_rdev;
12 unsigned long st_size;
13 unsigned long st_atime;
14 unsigned long st_mtime;
15 unsigned long st_ctime;
16};
17
18struct stat {
19 unsigned long st_dev;
20 unsigned long st_ino;
21 unsigned short st_mode;
22 unsigned short st_nlink;
23 unsigned short st_uid;
24 unsigned short st_gid;
25 unsigned long st_rdev;
26 unsigned long st_size;
27 unsigned long st_blksize;
28 unsigned long st_blocks;
29 unsigned long st_atime;
30 unsigned long st_atime_nsec;
31 unsigned long st_mtime;
32 unsigned long st_mtime_nsec;
33 unsigned long st_ctime;
34 unsigned long st_ctime_nsec;
35 unsigned long __unused4;
36 unsigned long __unused5;
37};
38
39/* This matches struct stat64 in glibc2.1, hence the absolutely
40 * insane amounts of padding around dev_t's.
41 */
42struct stat64 {
43 unsigned long long st_dev;
44 unsigned char __pad0[4];
45
46#define STAT64_HAS_BROKEN_ST_INO 1
47 unsigned long __st_ino;
48
49 unsigned int st_mode;
50 unsigned int st_nlink;
51
52 unsigned long st_uid;
53 unsigned long st_gid;
54
55 unsigned long long st_rdev;
56 unsigned char __pad3[4];
57
58 long long st_size;
59 unsigned long st_blksize;
60
61 unsigned long long st_blocks; /* Number 512-byte blocks allocated. */
62
63 unsigned long st_atime;
64 unsigned long st_atime_nsec;
65
66 unsigned long st_mtime;
67 unsigned int st_mtime_nsec;
68
69 unsigned long st_ctime;
70 unsigned long st_ctime_nsec;
71
72 unsigned long long st_ino;
73};
74
75#define STAT_HAVE_NSEC 1
76
77#endif
diff --git a/include/asm-i386/statfs.h b/include/asm-i386/statfs.h
deleted file mode 100644
index 24972c175132..000000000000
--- a/include/asm-i386/statfs.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _I386_STATFS_H
2#define _I386_STATFS_H
3
4#include <asm-generic/statfs.h>
5
6#endif
diff --git a/include/asm-i386/string.h b/include/asm-i386/string.h
deleted file mode 100644
index a9b64453bdf5..000000000000
--- a/include/asm-i386/string.h
+++ /dev/null
@@ -1,276 +0,0 @@
1#ifndef _I386_STRING_H_
2#define _I386_STRING_H_
3
4#ifdef __KERNEL__
5
6/* Let gcc decide wether to inline or use the out of line functions */
7
8#define __HAVE_ARCH_STRCPY
9extern char *strcpy(char *dest, const char *src);
10
11#define __HAVE_ARCH_STRNCPY
12extern char *strncpy(char *dest, const char *src, size_t count);
13
14#define __HAVE_ARCH_STRCAT
15extern char *strcat(char *dest, const char *src);
16
17#define __HAVE_ARCH_STRNCAT
18extern char *strncat(char *dest, const char *src, size_t count);
19
20#define __HAVE_ARCH_STRCMP
21extern int strcmp(const char *cs, const char *ct);
22
23#define __HAVE_ARCH_STRNCMP
24extern int strncmp(const char *cs, const char *ct, size_t count);
25
26#define __HAVE_ARCH_STRCHR
27extern char *strchr(const char *s, int c);
28
29#define __HAVE_ARCH_STRRCHR
30extern char *strrchr(const char *s, int c);
31
32#define __HAVE_ARCH_STRLEN
33extern size_t strlen(const char *s);
34
35static __always_inline void * __memcpy(void * to, const void * from, size_t n)
36{
37int d0, d1, d2;
38__asm__ __volatile__(
39 "rep ; movsl\n\t"
40 "movl %4,%%ecx\n\t"
41 "andl $3,%%ecx\n\t"
42 "jz 1f\n\t"
43 "rep ; movsb\n\t"
44 "1:"
45 : "=&c" (d0), "=&D" (d1), "=&S" (d2)
46 : "0" (n/4), "g" (n), "1" ((long) to), "2" ((long) from)
47 : "memory");
48return (to);
49}
50
51/*
52 * This looks ugly, but the compiler can optimize it totally,
53 * as the count is constant.
54 */
55static __always_inline void * __constant_memcpy(void * to, const void * from, size_t n)
56{
57 long esi, edi;
58 if (!n) return to;
59#if 1 /* want to do small copies with non-string ops? */
60 switch (n) {
61 case 1: *(char*)to = *(char*)from; return to;
62 case 2: *(short*)to = *(short*)from; return to;
63 case 4: *(int*)to = *(int*)from; return to;
64#if 1 /* including those doable with two moves? */
65 case 3: *(short*)to = *(short*)from;
66 *((char*)to+2) = *((char*)from+2); return to;
67 case 5: *(int*)to = *(int*)from;
68 *((char*)to+4) = *((char*)from+4); return to;
69 case 6: *(int*)to = *(int*)from;
70 *((short*)to+2) = *((short*)from+2); return to;
71 case 8: *(int*)to = *(int*)from;
72 *((int*)to+1) = *((int*)from+1); return to;
73#endif
74 }
75#endif
76 esi = (long) from;
77 edi = (long) to;
78 if (n >= 5*4) {
79 /* large block: use rep prefix */
80 int ecx;
81 __asm__ __volatile__(
82 "rep ; movsl"
83 : "=&c" (ecx), "=&D" (edi), "=&S" (esi)
84 : "0" (n/4), "1" (edi),"2" (esi)
85 : "memory"
86 );
87 } else {
88 /* small block: don't clobber ecx + smaller code */
89 if (n >= 4*4) __asm__ __volatile__("movsl"
90 :"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory");
91 if (n >= 3*4) __asm__ __volatile__("movsl"
92 :"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory");
93 if (n >= 2*4) __asm__ __volatile__("movsl"
94 :"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory");
95 if (n >= 1*4) __asm__ __volatile__("movsl"
96 :"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory");
97 }
98 switch (n % 4) {
99 /* tail */
100 case 0: return to;
101 case 1: __asm__ __volatile__("movsb"
102 :"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory");
103 return to;
104 case 2: __asm__ __volatile__("movsw"
105 :"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory");
106 return to;
107 default: __asm__ __volatile__("movsw\n\tmovsb"
108 :"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory");
109 return to;
110 }
111}
112
113#define __HAVE_ARCH_MEMCPY
114
115#ifdef CONFIG_X86_USE_3DNOW
116
117#include <asm/mmx.h>
118
119/*
120 * This CPU favours 3DNow strongly (eg AMD Athlon)
121 */
122
123static inline void * __constant_memcpy3d(void * to, const void * from, size_t len)
124{
125 if (len < 512)
126 return __constant_memcpy(to, from, len);
127 return _mmx_memcpy(to, from, len);
128}
129
130static __inline__ void *__memcpy3d(void *to, const void *from, size_t len)
131{
132 if (len < 512)
133 return __memcpy(to, from, len);
134 return _mmx_memcpy(to, from, len);
135}
136
137#define memcpy(t, f, n) \
138(__builtin_constant_p(n) ? \
139 __constant_memcpy3d((t),(f),(n)) : \
140 __memcpy3d((t),(f),(n)))
141
142#else
143
144/*
145 * No 3D Now!
146 */
147
148#define memcpy(t, f, n) \
149(__builtin_constant_p(n) ? \
150 __constant_memcpy((t),(f),(n)) : \
151 __memcpy((t),(f),(n)))
152
153#endif
154
155#define __HAVE_ARCH_MEMMOVE
156void *memmove(void * dest,const void * src, size_t n);
157
158#define memcmp __builtin_memcmp
159
160#define __HAVE_ARCH_MEMCHR
161extern void *memchr(const void * cs,int c,size_t count);
162
163static inline void * __memset_generic(void * s, char c,size_t count)
164{
165int d0, d1;
166__asm__ __volatile__(
167 "rep\n\t"
168 "stosb"
169 : "=&c" (d0), "=&D" (d1)
170 :"a" (c),"1" (s),"0" (count)
171 :"memory");
172return s;
173}
174
175/* we might want to write optimized versions of these later */
176#define __constant_count_memset(s,c,count) __memset_generic((s),(c),(count))
177
178/*
179 * memset(x,0,y) is a reasonably common thing to do, so we want to fill
180 * things 32 bits at a time even when we don't know the size of the
181 * area at compile-time..
182 */
183static __always_inline void * __constant_c_memset(void * s, unsigned long c, size_t count)
184{
185int d0, d1;
186__asm__ __volatile__(
187 "rep ; stosl\n\t"
188 "testb $2,%b3\n\t"
189 "je 1f\n\t"
190 "stosw\n"
191 "1:\ttestb $1,%b3\n\t"
192 "je 2f\n\t"
193 "stosb\n"
194 "2:"
195 :"=&c" (d0), "=&D" (d1)
196 :"a" (c), "q" (count), "0" (count/4), "1" ((long) s)
197 :"memory");
198return (s);
199}
200
201/* Added by Gertjan van Wingerde to make minix and sysv module work */
202#define __HAVE_ARCH_STRNLEN
203extern size_t strnlen(const char * s, size_t count);
204/* end of additional stuff */
205
206#define __HAVE_ARCH_STRSTR
207extern char *strstr(const char *cs, const char *ct);
208
209/*
210 * This looks horribly ugly, but the compiler can optimize it totally,
211 * as we by now know that both pattern and count is constant..
212 */
213static __always_inline void * __constant_c_and_count_memset(void * s, unsigned long pattern, size_t count)
214{
215 switch (count) {
216 case 0:
217 return s;
218 case 1:
219 *(unsigned char *)s = pattern;
220 return s;
221 case 2:
222 *(unsigned short *)s = pattern;
223 return s;
224 case 3:
225 *(unsigned short *)s = pattern;
226 *(2+(unsigned char *)s) = pattern;
227 return s;
228 case 4:
229 *(unsigned long *)s = pattern;
230 return s;
231 }
232#define COMMON(x) \
233__asm__ __volatile__( \
234 "rep ; stosl" \
235 x \
236 : "=&c" (d0), "=&D" (d1) \
237 : "a" (pattern),"0" (count/4),"1" ((long) s) \
238 : "memory")
239{
240 int d0, d1;
241 switch (count % 4) {
242 case 0: COMMON(""); return s;
243 case 1: COMMON("\n\tstosb"); return s;
244 case 2: COMMON("\n\tstosw"); return s;
245 default: COMMON("\n\tstosw\n\tstosb"); return s;
246 }
247}
248
249#undef COMMON
250}
251
252#define __constant_c_x_memset(s, c, count) \
253(__builtin_constant_p(count) ? \
254 __constant_c_and_count_memset((s),(c),(count)) : \
255 __constant_c_memset((s),(c),(count)))
256
257#define __memset(s, c, count) \
258(__builtin_constant_p(count) ? \
259 __constant_count_memset((s),(c),(count)) : \
260 __memset_generic((s),(c),(count)))
261
262#define __HAVE_ARCH_MEMSET
263#define memset(s, c, count) \
264(__builtin_constant_p(c) ? \
265 __constant_c_x_memset((s),(0x01010101UL*(unsigned char)(c)),(count)) : \
266 __memset((s),(c),(count)))
267
268/*
269 * find the first occurrence of byte 'c', or 1 past the area if none
270 */
271#define __HAVE_ARCH_MEMSCAN
272extern void *memscan(void * addr, int c, size_t size);
273
274#endif /* __KERNEL__ */
275
276#endif
diff --git a/include/asm-i386/suspend.h b/include/asm-i386/suspend.h
deleted file mode 100644
index a2520732ffd6..000000000000
--- a/include/asm-i386/suspend.h
+++ /dev/null
@@ -1,46 +0,0 @@
1/*
2 * Copyright 2001-2002 Pavel Machek <pavel@suse.cz>
3 * Based on code
4 * Copyright 2001 Patrick Mochel <mochel@osdl.org>
5 */
6#include <asm/desc.h>
7#include <asm/i387.h>
8
9static inline int arch_prepare_suspend(void) { return 0; }
10
11/* image of the saved processor state */
12struct saved_context {
13 u16 es, fs, gs, ss;
14 unsigned long cr0, cr2, cr3, cr4;
15 struct Xgt_desc_struct gdt;
16 struct Xgt_desc_struct idt;
17 u16 ldt;
18 u16 tss;
19 unsigned long tr;
20 unsigned long safety;
21 unsigned long return_address;
22} __attribute__((packed));
23
24#ifdef CONFIG_ACPI
25extern unsigned long saved_eip;
26extern unsigned long saved_esp;
27extern unsigned long saved_ebp;
28extern unsigned long saved_ebx;
29extern unsigned long saved_esi;
30extern unsigned long saved_edi;
31
32static inline void acpi_save_register_state(unsigned long return_point)
33{
34 saved_eip = return_point;
35 asm volatile ("movl %%esp,%0" : "=m" (saved_esp));
36 asm volatile ("movl %%ebp,%0" : "=m" (saved_ebp));
37 asm volatile ("movl %%ebx,%0" : "=m" (saved_ebx));
38 asm volatile ("movl %%edi,%0" : "=m" (saved_edi));
39 asm volatile ("movl %%esi,%0" : "=m" (saved_esi));
40}
41
42#define acpi_restore_register_state() do {} while (0)
43
44/* routines for saving/restoring kernel state */
45extern int acpi_save_state_mem(void);
46#endif
diff --git a/include/asm-i386/sync_bitops.h b/include/asm-i386/sync_bitops.h
deleted file mode 100644
index cbce08a2d135..000000000000
--- a/include/asm-i386/sync_bitops.h
+++ /dev/null
@@ -1,156 +0,0 @@
1#ifndef _I386_SYNC_BITOPS_H
2#define _I386_SYNC_BITOPS_H
3
4/*
5 * Copyright 1992, Linus Torvalds.
6 */
7
8/*
9 * These have to be done with inline assembly: that way the bit-setting
10 * is guaranteed to be atomic. All bit operations return 0 if the bit
11 * was cleared before the operation and != 0 if it was not.
12 *
13 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
14 */
15
16#define ADDR (*(volatile long *) addr)
17
18/**
19 * sync_set_bit - Atomically set a bit in memory
20 * @nr: the bit to set
21 * @addr: the address to start counting from
22 *
23 * This function is atomic and may not be reordered. See __set_bit()
24 * if you do not require the atomic guarantees.
25 *
26 * Note: there are no guarantees that this function will not be reordered
27 * on non-x86 architectures, so if you are writing portable code,
28 * make sure not to rely on its reordering guarantees.
29 *
30 * Note that @nr may be almost arbitrarily large; this function is not
31 * restricted to acting on a single-word quantity.
32 */
33static inline void sync_set_bit(int nr, volatile unsigned long * addr)
34{
35 __asm__ __volatile__("lock; btsl %1,%0"
36 :"+m" (ADDR)
37 :"Ir" (nr)
38 : "memory");
39}
40
41/**
42 * sync_clear_bit - Clears a bit in memory
43 * @nr: Bit to clear
44 * @addr: Address to start counting from
45 *
46 * sync_clear_bit() is atomic and may not be reordered. However, it does
47 * not contain a memory barrier, so if it is used for locking purposes,
48 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
49 * in order to ensure changes are visible on other processors.
50 */
51static inline void sync_clear_bit(int nr, volatile unsigned long * addr)
52{
53 __asm__ __volatile__("lock; btrl %1,%0"
54 :"+m" (ADDR)
55 :"Ir" (nr)
56 : "memory");
57}
58
59/**
60 * sync_change_bit - Toggle a bit in memory
61 * @nr: Bit to change
62 * @addr: Address to start counting from
63 *
64 * change_bit() is atomic and may not be reordered. It may be
65 * reordered on other architectures than x86.
66 * Note that @nr may be almost arbitrarily large; this function is not
67 * restricted to acting on a single-word quantity.
68 */
69static inline void sync_change_bit(int nr, volatile unsigned long * addr)
70{
71 __asm__ __volatile__("lock; btcl %1,%0"
72 :"+m" (ADDR)
73 :"Ir" (nr)
74 : "memory");
75}
76
77/**
78 * sync_test_and_set_bit - Set a bit and return its old value
79 * @nr: Bit to set
80 * @addr: Address to count from
81 *
82 * This operation is atomic and cannot be reordered.
83 * It may be reordered on other architectures than x86.
84 * It also implies a memory barrier.
85 */
86static inline int sync_test_and_set_bit(int nr, volatile unsigned long * addr)
87{
88 int oldbit;
89
90 __asm__ __volatile__("lock; btsl %2,%1\n\tsbbl %0,%0"
91 :"=r" (oldbit),"+m" (ADDR)
92 :"Ir" (nr) : "memory");
93 return oldbit;
94}
95
96/**
97 * sync_test_and_clear_bit - Clear a bit and return its old value
98 * @nr: Bit to clear
99 * @addr: Address to count from
100 *
101 * This operation is atomic and cannot be reordered.
102 * It can be reorderdered on other architectures other than x86.
103 * It also implies a memory barrier.
104 */
105static inline int sync_test_and_clear_bit(int nr, volatile unsigned long * addr)
106{
107 int oldbit;
108
109 __asm__ __volatile__("lock; btrl %2,%1\n\tsbbl %0,%0"
110 :"=r" (oldbit),"+m" (ADDR)
111 :"Ir" (nr) : "memory");
112 return oldbit;
113}
114
115/**
116 * sync_test_and_change_bit - Change a bit and return its old value
117 * @nr: Bit to change
118 * @addr: Address to count from
119 *
120 * This operation is atomic and cannot be reordered.
121 * It also implies a memory barrier.
122 */
123static inline int sync_test_and_change_bit(int nr, volatile unsigned long* addr)
124{
125 int oldbit;
126
127 __asm__ __volatile__("lock; btcl %2,%1\n\tsbbl %0,%0"
128 :"=r" (oldbit),"+m" (ADDR)
129 :"Ir" (nr) : "memory");
130 return oldbit;
131}
132
133static __always_inline int sync_constant_test_bit(int nr, const volatile unsigned long *addr)
134{
135 return ((1UL << (nr & 31)) &
136 (((const volatile unsigned int *)addr)[nr >> 5])) != 0;
137}
138
139static inline int sync_var_test_bit(int nr, const volatile unsigned long * addr)
140{
141 int oldbit;
142
143 __asm__ __volatile__("btl %2,%1\n\tsbbl %0,%0"
144 :"=r" (oldbit)
145 :"m" (ADDR),"Ir" (nr));
146 return oldbit;
147}
148
149#define sync_test_bit(nr,addr) \
150 (__builtin_constant_p(nr) ? \
151 sync_constant_test_bit((nr),(addr)) : \
152 sync_var_test_bit((nr),(addr)))
153
154#undef ADDR
155
156#endif /* _I386_SYNC_BITOPS_H */
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h
deleted file mode 100644
index d69ba937e092..000000000000
--- a/include/asm-i386/system.h
+++ /dev/null
@@ -1,313 +0,0 @@
1#ifndef __ASM_SYSTEM_H
2#define __ASM_SYSTEM_H
3
4#include <linux/kernel.h>
5#include <asm/segment.h>
6#include <asm/cpufeature.h>
7#include <asm/cmpxchg.h>
8
9#ifdef __KERNEL__
10
11struct task_struct; /* one of the stranger aspects of C forward declarations.. */
12extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next));
13
14/*
15 * Saving eflags is important. It switches not only IOPL between tasks,
16 * it also protects other tasks from NT leaking through sysenter etc.
17 */
18#define switch_to(prev,next,last) do { \
19 unsigned long esi,edi; \
20 asm volatile("pushfl\n\t" /* Save flags */ \
21 "pushl %%ebp\n\t" \
22 "movl %%esp,%0\n\t" /* save ESP */ \
23 "movl %5,%%esp\n\t" /* restore ESP */ \
24 "movl $1f,%1\n\t" /* save EIP */ \
25 "pushl %6\n\t" /* restore EIP */ \
26 "jmp __switch_to\n" \
27 "1:\t" \
28 "popl %%ebp\n\t" \
29 "popfl" \
30 :"=m" (prev->thread.esp),"=m" (prev->thread.eip), \
31 "=a" (last),"=S" (esi),"=D" (edi) \
32 :"m" (next->thread.esp),"m" (next->thread.eip), \
33 "2" (prev), "d" (next)); \
34} while (0)
35
36#define _set_base(addr,base) do { unsigned long __pr; \
37__asm__ __volatile__ ("movw %%dx,%1\n\t" \
38 "rorl $16,%%edx\n\t" \
39 "movb %%dl,%2\n\t" \
40 "movb %%dh,%3" \
41 :"=&d" (__pr) \
42 :"m" (*((addr)+2)), \
43 "m" (*((addr)+4)), \
44 "m" (*((addr)+7)), \
45 "0" (base) \
46 ); } while(0)
47
48#define _set_limit(addr,limit) do { unsigned long __lr; \
49__asm__ __volatile__ ("movw %%dx,%1\n\t" \
50 "rorl $16,%%edx\n\t" \
51 "movb %2,%%dh\n\t" \
52 "andb $0xf0,%%dh\n\t" \
53 "orb %%dh,%%dl\n\t" \
54 "movb %%dl,%2" \
55 :"=&d" (__lr) \
56 :"m" (*(addr)), \
57 "m" (*((addr)+6)), \
58 "0" (limit) \
59 ); } while(0)
60
61#define set_base(ldt,base) _set_base( ((char *)&(ldt)) , (base) )
62#define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1) )
63
64/*
65 * Load a segment. Fall back on loading the zero
66 * segment if something goes wrong..
67 */
68#define loadsegment(seg,value) \
69 asm volatile("\n" \
70 "1:\t" \
71 "mov %0,%%" #seg "\n" \
72 "2:\n" \
73 ".section .fixup,\"ax\"\n" \
74 "3:\t" \
75 "pushl $0\n\t" \
76 "popl %%" #seg "\n\t" \
77 "jmp 2b\n" \
78 ".previous\n" \
79 ".section __ex_table,\"a\"\n\t" \
80 ".align 4\n\t" \
81 ".long 1b,3b\n" \
82 ".previous" \
83 : :"rm" (value))
84
85/*
86 * Save a segment register away
87 */
88#define savesegment(seg, value) \
89 asm volatile("mov %%" #seg ",%0":"=rm" (value))
90
91
92static inline void native_clts(void)
93{
94 asm volatile ("clts");
95}
96
97static inline unsigned long native_read_cr0(void)
98{
99 unsigned long val;
100 asm volatile("movl %%cr0,%0\n\t" :"=r" (val));
101 return val;
102}
103
104static inline void native_write_cr0(unsigned long val)
105{
106 asm volatile("movl %0,%%cr0": :"r" (val));
107}
108
109static inline unsigned long native_read_cr2(void)
110{
111 unsigned long val;
112 asm volatile("movl %%cr2,%0\n\t" :"=r" (val));
113 return val;
114}
115
116static inline void native_write_cr2(unsigned long val)
117{
118 asm volatile("movl %0,%%cr2": :"r" (val));
119}
120
121static inline unsigned long native_read_cr3(void)
122{
123 unsigned long val;
124 asm volatile("movl %%cr3,%0\n\t" :"=r" (val));
125 return val;
126}
127
128static inline void native_write_cr3(unsigned long val)
129{
130 asm volatile("movl %0,%%cr3": :"r" (val));
131}
132
133static inline unsigned long native_read_cr4(void)
134{
135 unsigned long val;
136 asm volatile("movl %%cr4,%0\n\t" :"=r" (val));
137 return val;
138}
139
140static inline unsigned long native_read_cr4_safe(void)
141{
142 unsigned long val;
143 /* This could fault if %cr4 does not exist */
144 asm("1: movl %%cr4, %0 \n"
145 "2: \n"
146 ".section __ex_table,\"a\" \n"
147 ".long 1b,2b \n"
148 ".previous \n"
149 : "=r" (val): "0" (0));
150 return val;
151}
152
153static inline void native_write_cr4(unsigned long val)
154{
155 asm volatile("movl %0,%%cr4": :"r" (val));
156}
157
158static inline void native_wbinvd(void)
159{
160 asm volatile("wbinvd": : :"memory");
161}
162
163
164#ifdef CONFIG_PARAVIRT
165#include <asm/paravirt.h>
166#else
167#define read_cr0() (native_read_cr0())
168#define write_cr0(x) (native_write_cr0(x))
169#define read_cr2() (native_read_cr2())
170#define write_cr2(x) (native_write_cr2(x))
171#define read_cr3() (native_read_cr3())
172#define write_cr3(x) (native_write_cr3(x))
173#define read_cr4() (native_read_cr4())
174#define read_cr4_safe() (native_read_cr4_safe())
175#define write_cr4(x) (native_write_cr4(x))
176#define wbinvd() (native_wbinvd())
177
178/* Clear the 'TS' bit */
179#define clts() (native_clts())
180
181#endif/* CONFIG_PARAVIRT */
182
183/* Set the 'TS' bit */
184#define stts() write_cr0(8 | read_cr0())
185
186#endif /* __KERNEL__ */
187
188static inline unsigned long get_limit(unsigned long segment)
189{
190 unsigned long __limit;
191 __asm__("lsll %1,%0"
192 :"=r" (__limit):"r" (segment));
193 return __limit+1;
194}
195
196#define nop() __asm__ __volatile__ ("nop")
197
198/*
199 * Force strict CPU ordering.
200 * And yes, this is required on UP too when we're talking
201 * to devices.
202 *
203 * For now, "wmb()" doesn't actually do anything, as all
204 * Intel CPU's follow what Intel calls a *Processor Order*,
205 * in which all writes are seen in the program order even
206 * outside the CPU.
207 *
208 * I expect future Intel CPU's to have a weaker ordering,
209 * but I'd also expect them to finally get their act together
210 * and add some real memory barriers if so.
211 *
212 * Some non intel clones support out of order store. wmb() ceases to be a
213 * nop for these.
214 */
215
216
217#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
218#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
219
220/**
221 * read_barrier_depends - Flush all pending reads that subsequents reads
222 * depend on.
223 *
224 * No data-dependent reads from memory-like regions are ever reordered
225 * over this barrier. All reads preceding this primitive are guaranteed
226 * to access memory (but not necessarily other CPUs' caches) before any
227 * reads following this primitive that depend on the data return by
228 * any of the preceding reads. This primitive is much lighter weight than
229 * rmb() on most CPUs, and is never heavier weight than is
230 * rmb().
231 *
232 * These ordering constraints are respected by both the local CPU
233 * and the compiler.
234 *
235 * Ordering is not guaranteed by anything other than these primitives,
236 * not even by data dependencies. See the documentation for
237 * memory_barrier() for examples and URLs to more information.
238 *
239 * For example, the following code would force ordering (the initial
240 * value of "a" is zero, "b" is one, and "p" is "&a"):
241 *
242 * <programlisting>
243 * CPU 0 CPU 1
244 *
245 * b = 2;
246 * memory_barrier();
247 * p = &b; q = p;
248 * read_barrier_depends();
249 * d = *q;
250 * </programlisting>
251 *
252 * because the read of "*q" depends on the read of "p" and these
253 * two reads are separated by a read_barrier_depends(). However,
254 * the following code, with the same initial values for "a" and "b":
255 *
256 * <programlisting>
257 * CPU 0 CPU 1
258 *
259 * a = 2;
260 * memory_barrier();
261 * b = 3; y = b;
262 * read_barrier_depends();
263 * x = a;
264 * </programlisting>
265 *
266 * does not enforce ordering, since there is no data dependency between
267 * the read of "a" and the read of "b". Therefore, on some CPUs, such
268 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
269 * in cases like this where there are no data dependencies.
270 **/
271
272#define read_barrier_depends() do { } while(0)
273
274#ifdef CONFIG_X86_OOSTORE
275/* Actually there are no OOO store capable CPUs for now that do SSE,
276 but make it already an possibility. */
277#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
278#else
279#define wmb() __asm__ __volatile__ ("": : :"memory")
280#endif
281
282#ifdef CONFIG_SMP
283#define smp_mb() mb()
284#define smp_rmb() rmb()
285#define smp_wmb() wmb()
286#define smp_read_barrier_depends() read_barrier_depends()
287#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
288#else
289#define smp_mb() barrier()
290#define smp_rmb() barrier()
291#define smp_wmb() barrier()
292#define smp_read_barrier_depends() do { } while(0)
293#define set_mb(var, value) do { var = value; barrier(); } while (0)
294#endif
295
296#include <linux/irqflags.h>
297
298/*
299 * disable hlt during certain critical i/o operations
300 */
301#define HAVE_DISABLE_HLT
302void disable_hlt(void);
303void enable_hlt(void);
304
305extern int es7000_plat;
306void cpu_idle_wait(void);
307
308extern unsigned long arch_align_stack(unsigned long sp);
309extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
310
311void default_idle(void);
312
313#endif
diff --git a/include/asm-i386/termbits.h b/include/asm-i386/termbits.h
deleted file mode 100644
index a21700352e7b..000000000000
--- a/include/asm-i386/termbits.h
+++ /dev/null
@@ -1,198 +0,0 @@
1#ifndef __ARCH_I386_TERMBITS_H__
2#define __ARCH_I386_TERMBITS_H__
3
4#include <linux/posix_types.h>
5
6typedef unsigned char cc_t;
7typedef unsigned int speed_t;
8typedef unsigned int tcflag_t;
9
10#define NCCS 19
11struct termios {
12 tcflag_t c_iflag; /* input mode flags */
13 tcflag_t c_oflag; /* output mode flags */
14 tcflag_t c_cflag; /* control mode flags */
15 tcflag_t c_lflag; /* local mode flags */
16 cc_t c_line; /* line discipline */
17 cc_t c_cc[NCCS]; /* control characters */
18};
19
20struct termios2 {
21 tcflag_t c_iflag; /* input mode flags */
22 tcflag_t c_oflag; /* output mode flags */
23 tcflag_t c_cflag; /* control mode flags */
24 tcflag_t c_lflag; /* local mode flags */
25 cc_t c_line; /* line discipline */
26 cc_t c_cc[NCCS]; /* control characters */
27 speed_t c_ispeed; /* input speed */
28 speed_t c_ospeed; /* output speed */
29};
30
31struct ktermios {
32 tcflag_t c_iflag; /* input mode flags */
33 tcflag_t c_oflag; /* output mode flags */
34 tcflag_t c_cflag; /* control mode flags */
35 tcflag_t c_lflag; /* local mode flags */
36 cc_t c_line; /* line discipline */
37 cc_t c_cc[NCCS]; /* control characters */
38 speed_t c_ispeed; /* input speed */
39 speed_t c_ospeed; /* output speed */
40};
41
42/* c_cc characters */
43#define VINTR 0
44#define VQUIT 1
45#define VERASE 2
46#define VKILL 3
47#define VEOF 4
48#define VTIME 5
49#define VMIN 6
50#define VSWTC 7
51#define VSTART 8
52#define VSTOP 9
53#define VSUSP 10
54#define VEOL 11
55#define VREPRINT 12
56#define VDISCARD 13
57#define VWERASE 14
58#define VLNEXT 15
59#define VEOL2 16
60
61/* c_iflag bits */
62#define IGNBRK 0000001
63#define BRKINT 0000002
64#define IGNPAR 0000004
65#define PARMRK 0000010
66#define INPCK 0000020
67#define ISTRIP 0000040
68#define INLCR 0000100
69#define IGNCR 0000200
70#define ICRNL 0000400
71#define IUCLC 0001000
72#define IXON 0002000
73#define IXANY 0004000
74#define IXOFF 0010000
75#define IMAXBEL 0020000
76#define IUTF8 0040000
77
78/* c_oflag bits */
79#define OPOST 0000001
80#define OLCUC 0000002
81#define ONLCR 0000004
82#define OCRNL 0000010
83#define ONOCR 0000020
84#define ONLRET 0000040
85#define OFILL 0000100
86#define OFDEL 0000200
87#define NLDLY 0000400
88#define NL0 0000000
89#define NL1 0000400
90#define CRDLY 0003000
91#define CR0 0000000
92#define CR1 0001000
93#define CR2 0002000
94#define CR3 0003000
95#define TABDLY 0014000
96#define TAB0 0000000
97#define TAB1 0004000
98#define TAB2 0010000
99#define TAB3 0014000
100#define XTABS 0014000
101#define BSDLY 0020000
102#define BS0 0000000
103#define BS1 0020000
104#define VTDLY 0040000
105#define VT0 0000000
106#define VT1 0040000
107#define FFDLY 0100000
108#define FF0 0000000
109#define FF1 0100000
110
111/* c_cflag bit meaning */
112#define CBAUD 0010017
113#define B0 0000000 /* hang up */
114#define B50 0000001
115#define B75 0000002
116#define B110 0000003
117#define B134 0000004
118#define B150 0000005
119#define B200 0000006
120#define B300 0000007
121#define B600 0000010
122#define B1200 0000011
123#define B1800 0000012
124#define B2400 0000013
125#define B4800 0000014
126#define B9600 0000015
127#define B19200 0000016
128#define B38400 0000017
129#define EXTA B19200
130#define EXTB B38400
131#define CSIZE 0000060
132#define CS5 0000000
133#define CS6 0000020
134#define CS7 0000040
135#define CS8 0000060
136#define CSTOPB 0000100
137#define CREAD 0000200
138#define PARENB 0000400
139#define PARODD 0001000
140#define HUPCL 0002000
141#define CLOCAL 0004000
142#define CBAUDEX 0010000
143#define BOTHER 0010000
144#define B57600 0010001
145#define B115200 0010002
146#define B230400 0010003
147#define B460800 0010004
148#define B500000 0010005
149#define B576000 0010006
150#define B921600 0010007
151#define B1000000 0010010
152#define B1152000 0010011
153#define B1500000 0010012
154#define B2000000 0010013
155#define B2500000 0010014
156#define B3000000 0010015
157#define B3500000 0010016
158#define B4000000 0010017
159#define CIBAUD 002003600000
160#define CMSPAR 010000000000 /* mark or space (stick) parity */
161#define CRTSCTS 020000000000 /* flow control */
162
163#define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */
164
165/* c_lflag bits */
166#define ISIG 0000001
167#define ICANON 0000002
168#define XCASE 0000004
169#define ECHO 0000010
170#define ECHOE 0000020
171#define ECHOK 0000040
172#define ECHONL 0000100
173#define NOFLSH 0000200
174#define TOSTOP 0000400
175#define ECHOCTL 0001000
176#define ECHOPRT 0002000
177#define ECHOKE 0004000
178#define FLUSHO 0010000
179#define PENDIN 0040000
180#define IEXTEN 0100000
181
182/* tcflow() and TCXONC use these */
183#define TCOOFF 0
184#define TCOON 1
185#define TCIOFF 2
186#define TCION 3
187
188/* tcflush() and TCFLSH use these */
189#define TCIFLUSH 0
190#define TCOFLUSH 1
191#define TCIOFLUSH 2
192
193/* tcsetattr uses these */
194#define TCSANOW 0
195#define TCSADRAIN 1
196#define TCSAFLUSH 2
197
198#endif
diff --git a/include/asm-i386/termios.h b/include/asm-i386/termios.h
deleted file mode 100644
index 6fdb2c841b73..000000000000
--- a/include/asm-i386/termios.h
+++ /dev/null
@@ -1,90 +0,0 @@
1#ifndef _I386_TERMIOS_H
2#define _I386_TERMIOS_H
3
4#include <asm/termbits.h>
5#include <asm/ioctls.h>
6
7struct winsize {
8 unsigned short ws_row;
9 unsigned short ws_col;
10 unsigned short ws_xpixel;
11 unsigned short ws_ypixel;
12};
13
14#define NCC 8
15struct termio {
16 unsigned short c_iflag; /* input mode flags */
17 unsigned short c_oflag; /* output mode flags */
18 unsigned short c_cflag; /* control mode flags */
19 unsigned short c_lflag; /* local mode flags */
20 unsigned char c_line; /* line discipline */
21 unsigned char c_cc[NCC]; /* control characters */
22};
23
24/* modem lines */
25#define TIOCM_LE 0x001
26#define TIOCM_DTR 0x002
27#define TIOCM_RTS 0x004
28#define TIOCM_ST 0x008
29#define TIOCM_SR 0x010
30#define TIOCM_CTS 0x020
31#define TIOCM_CAR 0x040
32#define TIOCM_RNG 0x080
33#define TIOCM_DSR 0x100
34#define TIOCM_CD TIOCM_CAR
35#define TIOCM_RI TIOCM_RNG
36#define TIOCM_OUT1 0x2000
37#define TIOCM_OUT2 0x4000
38#define TIOCM_LOOP 0x8000
39
40/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
41
42#ifdef __KERNEL__
43
44/* intr=^C quit=^\ erase=del kill=^U
45 eof=^D vtime=\0 vmin=\1 sxtc=\0
46 start=^Q stop=^S susp=^Z eol=\0
47 reprint=^R discard=^U werase=^W lnext=^V
48 eol2=\0
49*/
50#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0"
51
52/*
53 * Translate a "termio" structure into a "termios". Ugh.
54 */
55#define SET_LOW_TERMIOS_BITS(termios, termio, x) { \
56 unsigned short __tmp; \
57 get_user(__tmp,&(termio)->x); \
58 *(unsigned short *) &(termios)->x = __tmp; \
59}
60
61#define user_termio_to_kernel_termios(termios, termio) \
62({ \
63 SET_LOW_TERMIOS_BITS(termios, termio, c_iflag); \
64 SET_LOW_TERMIOS_BITS(termios, termio, c_oflag); \
65 SET_LOW_TERMIOS_BITS(termios, termio, c_cflag); \
66 SET_LOW_TERMIOS_BITS(termios, termio, c_lflag); \
67 copy_from_user((termios)->c_cc, (termio)->c_cc, NCC); \
68})
69
70/*
71 * Translate a "termios" structure into a "termio". Ugh.
72 */
73#define kernel_termios_to_user_termio(termio, termios) \
74({ \
75 put_user((termios)->c_iflag, &(termio)->c_iflag); \
76 put_user((termios)->c_oflag, &(termio)->c_oflag); \
77 put_user((termios)->c_cflag, &(termio)->c_cflag); \
78 put_user((termios)->c_lflag, &(termio)->c_lflag); \
79 put_user((termios)->c_line, &(termio)->c_line); \
80 copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \
81})
82
83#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios2))
84#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios2))
85#define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios))
86#define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios))
87
88#endif /* __KERNEL__ */
89
90#endif /* _I386_TERMIOS_H */
diff --git a/include/asm-i386/therm_throt.h b/include/asm-i386/therm_throt.h
deleted file mode 100644
index 399bf6026b16..000000000000
--- a/include/asm-i386/therm_throt.h
+++ /dev/null
@@ -1,9 +0,0 @@
1#ifndef __ASM_I386_THERM_THROT_H__
2#define __ASM_I386_THERM_THROT_H__ 1
3
4#include <asm/atomic.h>
5
6extern atomic_t therm_throt_en;
7int therm_throt_process(int curr);
8
9#endif /* __ASM_I386_THERM_THROT_H__ */
diff --git a/include/asm-i386/thread_info.h b/include/asm-i386/thread_info.h
deleted file mode 100644
index 22a8cbcd35e2..000000000000
--- a/include/asm-i386/thread_info.h
+++ /dev/null
@@ -1,180 +0,0 @@
1/* thread_info.h: i386 low-level thread information
2 *
3 * Copyright (C) 2002 David Howells (dhowells@redhat.com)
4 * - Incorporating suggestions made by Linus Torvalds and Dave Miller
5 */
6
7#ifndef _ASM_THREAD_INFO_H
8#define _ASM_THREAD_INFO_H
9
10#ifdef __KERNEL__
11
12#include <linux/compiler.h>
13#include <asm/page.h>
14
15#ifndef __ASSEMBLY__
16#include <asm/processor.h>
17#endif
18
19/*
20 * low level task data that entry.S needs immediate access to
21 * - this struct should fit entirely inside of one cache line
22 * - this struct shares the supervisor stack pages
23 * - if the contents of this structure are changed, the assembly constants must also be changed
24 */
25#ifndef __ASSEMBLY__
26
27struct thread_info {
28 struct task_struct *task; /* main task structure */
29 struct exec_domain *exec_domain; /* execution domain */
30 unsigned long flags; /* low level flags */
31 unsigned long status; /* thread-synchronous flags */
32 __u32 cpu; /* current CPU */
33 int preempt_count; /* 0 => preemptable, <0 => BUG */
34
35
36 mm_segment_t addr_limit; /* thread address space:
37 0-0xBFFFFFFF for user-thead
38 0-0xFFFFFFFF for kernel-thread
39 */
40 void *sysenter_return;
41 struct restart_block restart_block;
42
43 unsigned long previous_esp; /* ESP of the previous stack in case
44 of nested (IRQ) stacks
45 */
46 __u8 supervisor_stack[0];
47};
48
49#else /* !__ASSEMBLY__ */
50
51#include <asm/asm-offsets.h>
52
53#endif
54
55#define PREEMPT_ACTIVE 0x10000000
56#ifdef CONFIG_4KSTACKS
57#define THREAD_SIZE (4096)
58#else
59#define THREAD_SIZE (8192)
60#endif
61
62#define STACK_WARN (THREAD_SIZE/8)
63/*
64 * macros/functions for gaining access to the thread information structure
65 *
66 * preempt_count needs to be 1 initially, until the scheduler is functional.
67 */
68#ifndef __ASSEMBLY__
69
70#define INIT_THREAD_INFO(tsk) \
71{ \
72 .task = &tsk, \
73 .exec_domain = &default_exec_domain, \
74 .flags = 0, \
75 .cpu = 0, \
76 .preempt_count = 1, \
77 .addr_limit = KERNEL_DS, \
78 .restart_block = { \
79 .fn = do_no_restart_syscall, \
80 }, \
81}
82
83#define init_thread_info (init_thread_union.thread_info)
84#define init_stack (init_thread_union.stack)
85
86
87/* how to get the current stack pointer from C */
88register unsigned long current_stack_pointer asm("esp") __attribute_used__;
89
90/* how to get the thread information struct from C */
91static inline struct thread_info *current_thread_info(void)
92{
93 return (struct thread_info *)(current_stack_pointer & ~(THREAD_SIZE - 1));
94}
95
96/* thread information allocation */
97#ifdef CONFIG_DEBUG_STACK_USAGE
98#define alloc_thread_info(tsk) ((struct thread_info *) \
99 __get_free_pages(GFP_KERNEL| __GFP_ZERO, get_order(THREAD_SIZE)))
100#else
101#define alloc_thread_info(tsk) ((struct thread_info *) \
102 __get_free_pages(GFP_KERNEL, get_order(THREAD_SIZE)))
103#endif
104
105#define free_thread_info(info) free_pages((unsigned long)(info), get_order(THREAD_SIZE))
106
107#else /* !__ASSEMBLY__ */
108
109/* how to get the thread information struct from ASM */
110#define GET_THREAD_INFO(reg) \
111 movl $-THREAD_SIZE, reg; \
112 andl %esp, reg
113
114/* use this one if reg already contains %esp */
115#define GET_THREAD_INFO_WITH_ESP(reg) \
116 andl $-THREAD_SIZE, reg
117
118#endif
119
120/*
121 * thread information flags
122 * - these are process state flags that various assembly files may need to access
123 * - pending work-to-be-done flags are in LSW
124 * - other flags in MSW
125 */
126#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
127#define TIF_SIGPENDING 1 /* signal pending */
128#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
129#define TIF_SINGLESTEP 3 /* restore singlestep on return to user mode */
130#define TIF_IRET 4 /* return with iret */
131#define TIF_SYSCALL_EMU 5 /* syscall emulation active */
132#define TIF_SYSCALL_AUDIT 6 /* syscall auditing active */
133#define TIF_SECCOMP 7 /* secure computing */
134#define TIF_RESTORE_SIGMASK 8 /* restore signal mask in do_signal() */
135#define TIF_MEMDIE 16
136#define TIF_DEBUG 17 /* uses debug registers */
137#define TIF_IO_BITMAP 18 /* uses I/O bitmap */
138#define TIF_FREEZE 19 /* is freezing for suspend */
139#define TIF_NOTSC 20 /* TSC is not accessible in userland */
140
141#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
142#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
143#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
144#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP)
145#define _TIF_IRET (1<<TIF_IRET)
146#define _TIF_SYSCALL_EMU (1<<TIF_SYSCALL_EMU)
147#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
148#define _TIF_SECCOMP (1<<TIF_SECCOMP)
149#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
150#define _TIF_DEBUG (1<<TIF_DEBUG)
151#define _TIF_IO_BITMAP (1<<TIF_IO_BITMAP)
152#define _TIF_FREEZE (1<<TIF_FREEZE)
153#define _TIF_NOTSC (1<<TIF_NOTSC)
154
155/* work to do on interrupt/exception return */
156#define _TIF_WORK_MASK \
157 (0x0000FFFF & ~(_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
158 _TIF_SECCOMP | _TIF_SYSCALL_EMU))
159/* work to do on any return to u-space */
160#define _TIF_ALLWORK_MASK (0x0000FFFF & ~_TIF_SECCOMP)
161
162/* flags to check in __switch_to() */
163#define _TIF_WORK_CTXSW_NEXT (_TIF_IO_BITMAP | _TIF_NOTSC | _TIF_DEBUG)
164#define _TIF_WORK_CTXSW_PREV (_TIF_IO_BITMAP | _TIF_NOTSC)
165
166/*
167 * Thread-synchronous status.
168 *
169 * This is different from the flags in that nobody else
170 * ever touches our thread-synchronous status, so we don't
171 * have to worry about atomic accesses.
172 */
173#define TS_USEDFPU 0x0001 /* FPU was used by this task this quantum (SMP) */
174#define TS_POLLING 0x0002 /* True if in idle loop and not sleeping */
175
176#define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING)
177
178#endif /* __KERNEL__ */
179
180#endif /* _ASM_THREAD_INFO_H */
diff --git a/include/asm-i386/time.h b/include/asm-i386/time.h
deleted file mode 100644
index eac011366dc2..000000000000
--- a/include/asm-i386/time.h
+++ /dev/null
@@ -1,44 +0,0 @@
1#ifndef _ASMi386_TIME_H
2#define _ASMi386_TIME_H
3
4#include <linux/efi.h>
5#include "mach_time.h"
6
7static inline unsigned long native_get_wallclock(void)
8{
9 unsigned long retval;
10
11 if (efi_enabled)
12 retval = efi_get_time();
13 else
14 retval = mach_get_cmos_time();
15
16 return retval;
17}
18
19static inline int native_set_wallclock(unsigned long nowtime)
20{
21 int retval;
22
23 if (efi_enabled)
24 retval = efi_set_rtc_mmss(nowtime);
25 else
26 retval = mach_set_rtc_mmss(nowtime);
27
28 return retval;
29}
30
31extern void (*late_time_init)(void);
32extern void hpet_time_init(void);
33
34#ifdef CONFIG_PARAVIRT
35#include <asm/paravirt.h>
36#else /* !CONFIG_PARAVIRT */
37
38#define get_wallclock() native_get_wallclock()
39#define set_wallclock(x) native_set_wallclock(x)
40#define choose_time_init() hpet_time_init
41
42#endif /* CONFIG_PARAVIRT */
43
44#endif
diff --git a/include/asm-i386/timer.h b/include/asm-i386/timer.h
deleted file mode 100644
index 0db7e994fb8b..000000000000
--- a/include/asm-i386/timer.h
+++ /dev/null
@@ -1,50 +0,0 @@
1#ifndef _ASMi386_TIMER_H
2#define _ASMi386_TIMER_H
3#include <linux/init.h>
4#include <linux/pm.h>
5
6#define TICK_SIZE (tick_nsec / 1000)
7
8unsigned long long native_sched_clock(void);
9unsigned long native_calculate_cpu_khz(void);
10
11extern int timer_ack;
12extern int no_timer_check;
13extern int recalibrate_cpu_khz(void);
14
15#ifndef CONFIG_PARAVIRT
16#define calculate_cpu_khz() native_calculate_cpu_khz()
17#endif
18
19/* Accellerators for sched_clock()
20 * convert from cycles(64bits) => nanoseconds (64bits)
21 * basic equation:
22 * ns = cycles / (freq / ns_per_sec)
23 * ns = cycles * (ns_per_sec / freq)
24 * ns = cycles * (10^9 / (cpu_khz * 10^3))
25 * ns = cycles * (10^6 / cpu_khz)
26 *
27 * Then we use scaling math (suggested by george@mvista.com) to get:
28 * ns = cycles * (10^6 * SC / cpu_khz) / SC
29 * ns = cycles * cyc2ns_scale / SC
30 *
31 * And since SC is a constant power of two, we can convert the div
32 * into a shift.
33 *
34 * We can use khz divisor instead of mhz to keep a better percision, since
35 * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
36 * (mathieu.desnoyers@polymtl.ca)
37 *
38 * -johnstul@us.ibm.com "math is hard, lets go shopping!"
39 */
40extern unsigned long cyc2ns_scale __read_mostly;
41
42#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
43
44static inline unsigned long long cycles_2_ns(unsigned long long cyc)
45{
46 return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR;
47}
48
49
50#endif
diff --git a/include/asm-i386/timex.h b/include/asm-i386/timex.h
deleted file mode 100644
index 3666044409f0..000000000000
--- a/include/asm-i386/timex.h
+++ /dev/null
@@ -1,22 +0,0 @@
1/*
2 * linux/include/asm-i386/timex.h
3 *
4 * i386 architecture timex specifications
5 */
6#ifndef _ASMi386_TIMEX_H
7#define _ASMi386_TIMEX_H
8
9#include <asm/processor.h>
10#include <asm/tsc.h>
11
12#ifdef CONFIG_X86_ELAN
13# define CLOCK_TICK_RATE 1189200 /* AMD Elan has different frequency! */
14#else
15# define CLOCK_TICK_RATE 1193182 /* Underlying HZ */
16#endif
17
18
19extern int read_current_timer(unsigned long *timer_value);
20#define ARCH_HAS_READ_CURRENT_TIMER 1
21
22#endif
diff --git a/include/asm-i386/tlb.h b/include/asm-i386/tlb.h
deleted file mode 100644
index c006c5c92bea..000000000000
--- a/include/asm-i386/tlb.h
+++ /dev/null
@@ -1,20 +0,0 @@
1#ifndef _I386_TLB_H
2#define _I386_TLB_H
3
4/*
5 * x86 doesn't need any special per-pte or
6 * per-vma handling..
7 */
8#define tlb_start_vma(tlb, vma) do { } while (0)
9#define tlb_end_vma(tlb, vma) do { } while (0)
10#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
11
12/*
13 * .. because we flush the whole mm when it
14 * fills up.
15 */
16#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
17
18#include <asm-generic/tlb.h>
19
20#endif
diff --git a/include/asm-i386/tlbflush.h b/include/asm-i386/tlbflush.h
deleted file mode 100644
index a50fa6741486..000000000000
--- a/include/asm-i386/tlbflush.h
+++ /dev/null
@@ -1,175 +0,0 @@
1#ifndef _I386_TLBFLUSH_H
2#define _I386_TLBFLUSH_H
3
4#include <linux/mm.h>
5#include <asm/processor.h>
6
7#ifdef CONFIG_PARAVIRT
8#include <asm/paravirt.h>
9#else
10#define __flush_tlb() __native_flush_tlb()
11#define __flush_tlb_global() __native_flush_tlb_global()
12#define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
13#endif
14
15#define __native_flush_tlb() \
16 do { \
17 unsigned int tmpreg; \
18 \
19 __asm__ __volatile__( \
20 "movl %%cr3, %0; \n" \
21 "movl %0, %%cr3; # flush TLB \n" \
22 : "=r" (tmpreg) \
23 :: "memory"); \
24 } while (0)
25
26/*
27 * Global pages have to be flushed a bit differently. Not a real
28 * performance problem because this does not happen often.
29 */
30#define __native_flush_tlb_global() \
31 do { \
32 unsigned int tmpreg, cr4, cr4_orig; \
33 \
34 __asm__ __volatile__( \
35 "movl %%cr4, %2; # turn off PGE \n" \
36 "movl %2, %1; \n" \
37 "andl %3, %1; \n" \
38 "movl %1, %%cr4; \n" \
39 "movl %%cr3, %0; \n" \
40 "movl %0, %%cr3; # flush TLB \n" \
41 "movl %2, %%cr4; # turn PGE back on \n" \
42 : "=&r" (tmpreg), "=&r" (cr4), "=&r" (cr4_orig) \
43 : "i" (~X86_CR4_PGE) \
44 : "memory"); \
45 } while (0)
46
47#define __native_flush_tlb_single(addr) \
48 __asm__ __volatile__("invlpg (%0)" ::"r" (addr) : "memory")
49
50# define __flush_tlb_all() \
51 do { \
52 if (cpu_has_pge) \
53 __flush_tlb_global(); \
54 else \
55 __flush_tlb(); \
56 } while (0)
57
58#define cpu_has_invlpg (boot_cpu_data.x86 > 3)
59
60#ifdef CONFIG_X86_INVLPG
61# define __flush_tlb_one(addr) __flush_tlb_single(addr)
62#else
63# define __flush_tlb_one(addr) \
64 do { \
65 if (cpu_has_invlpg) \
66 __flush_tlb_single(addr); \
67 else \
68 __flush_tlb(); \
69 } while (0)
70#endif
71
72/*
73 * TLB flushing:
74 *
75 * - flush_tlb() flushes the current mm struct TLBs
76 * - flush_tlb_all() flushes all processes TLBs
77 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
78 * - flush_tlb_page(vma, vmaddr) flushes one page
79 * - flush_tlb_range(vma, start, end) flushes a range of pages
80 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
81 * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
82 * - flush_tlb_others(cpumask, mm, va) flushes a TLBs on other cpus
83 *
84 * ..but the i386 has somewhat limited tlb flushing capabilities,
85 * and page-granular flushes are available only on i486 and up.
86 */
87
88#define TLB_FLUSH_ALL 0xffffffff
89
90
91#ifndef CONFIG_SMP
92
93#include <linux/sched.h>
94
95#define flush_tlb() __flush_tlb()
96#define flush_tlb_all() __flush_tlb_all()
97#define local_flush_tlb() __flush_tlb()
98
99static inline void flush_tlb_mm(struct mm_struct *mm)
100{
101 if (mm == current->active_mm)
102 __flush_tlb();
103}
104
105static inline void flush_tlb_page(struct vm_area_struct *vma,
106 unsigned long addr)
107{
108 if (vma->vm_mm == current->active_mm)
109 __flush_tlb_one(addr);
110}
111
112static inline void flush_tlb_range(struct vm_area_struct *vma,
113 unsigned long start, unsigned long end)
114{
115 if (vma->vm_mm == current->active_mm)
116 __flush_tlb();
117}
118
119static inline void native_flush_tlb_others(const cpumask_t *cpumask,
120 struct mm_struct *mm, unsigned long va)
121{
122}
123
124#else /* SMP */
125
126#include <asm/smp.h>
127
128#define local_flush_tlb() \
129 __flush_tlb()
130
131extern void flush_tlb_all(void);
132extern void flush_tlb_current_task(void);
133extern void flush_tlb_mm(struct mm_struct *);
134extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
135
136#define flush_tlb() flush_tlb_current_task()
137
138static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end)
139{
140 flush_tlb_mm(vma->vm_mm);
141}
142
143void native_flush_tlb_others(const cpumask_t *cpumask, struct mm_struct *mm,
144 unsigned long va);
145
146#define TLBSTATE_OK 1
147#define TLBSTATE_LAZY 2
148
149struct tlb_state
150{
151 struct mm_struct *active_mm;
152 int state;
153 char __cacheline_padding[L1_CACHE_BYTES-8];
154};
155DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate);
156#endif /* SMP */
157
158#ifndef CONFIG_PARAVIRT
159#define flush_tlb_others(mask, mm, va) \
160 native_flush_tlb_others(&mask, mm, va)
161#endif
162
163static inline void flush_tlb_kernel_range(unsigned long start,
164 unsigned long end)
165{
166 flush_tlb_all();
167}
168
169static inline void flush_tlb_pgtables(struct mm_struct *mm,
170 unsigned long start, unsigned long end)
171{
172 /* i386 does not keep any page table caches in TLB */
173}
174
175#endif /* _I386_TLBFLUSH_H */
diff --git a/include/asm-i386/topology.h b/include/asm-i386/topology.h
deleted file mode 100644
index 19b2dafd0c81..000000000000
--- a/include/asm-i386/topology.h
+++ /dev/null
@@ -1,121 +0,0 @@
1/*
2 * linux/include/asm-i386/topology.h
3 *
4 * Written by: Matthew Dobson, IBM Corporation
5 *
6 * Copyright (C) 2002, IBM Corp.
7 *
8 * All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
18 * NON INFRINGEMENT. See the GNU General Public License for more
19 * details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 *
25 * Send feedback to <colpatch@us.ibm.com>
26 */
27#ifndef _ASM_I386_TOPOLOGY_H
28#define _ASM_I386_TOPOLOGY_H
29
30#ifdef CONFIG_X86_HT
31#define topology_physical_package_id(cpu) (cpu_data[cpu].phys_proc_id)
32#define topology_core_id(cpu) (cpu_data[cpu].cpu_core_id)
33#define topology_core_siblings(cpu) (cpu_core_map[cpu])
34#define topology_thread_siblings(cpu) (cpu_sibling_map[cpu])
35#endif
36
37#ifdef CONFIG_NUMA
38
39#include <asm/mpspec.h>
40
41#include <linux/cpumask.h>
42
43/* Mappings between logical cpu number and node number */
44extern cpumask_t node_2_cpu_mask[];
45extern int cpu_2_node[];
46
47/* Returns the number of the node containing CPU 'cpu' */
48static inline int cpu_to_node(int cpu)
49{
50 return cpu_2_node[cpu];
51}
52
53/* Returns the number of the node containing Node 'node'. This architecture is flat,
54 so it is a pretty simple function! */
55#define parent_node(node) (node)
56
57/* Returns a bitmask of CPUs on Node 'node'. */
58static inline cpumask_t node_to_cpumask(int node)
59{
60 return node_2_cpu_mask[node];
61}
62
63/* Returns the number of the first CPU on Node 'node'. */
64static inline int node_to_first_cpu(int node)
65{
66 cpumask_t mask = node_to_cpumask(node);
67 return first_cpu(mask);
68}
69
70#define pcibus_to_node(bus) ((struct pci_sysdata *)((bus)->sysdata))->node
71#define pcibus_to_cpumask(bus) node_to_cpumask(pcibus_to_node(bus))
72
73/* sched_domains SD_NODE_INIT for NUMAQ machines */
74#define SD_NODE_INIT (struct sched_domain) { \
75 .span = CPU_MASK_NONE, \
76 .parent = NULL, \
77 .child = NULL, \
78 .groups = NULL, \
79 .min_interval = 8, \
80 .max_interval = 32, \
81 .busy_factor = 32, \
82 .imbalance_pct = 125, \
83 .cache_nice_tries = 1, \
84 .busy_idx = 3, \
85 .idle_idx = 1, \
86 .newidle_idx = 2, \
87 .wake_idx = 1, \
88 .flags = SD_LOAD_BALANCE \
89 | SD_BALANCE_EXEC \
90 | SD_BALANCE_FORK \
91 | SD_SERIALIZE \
92 | SD_WAKE_BALANCE, \
93 .last_balance = jiffies, \
94 .balance_interval = 1, \
95 .nr_balance_failed = 0, \
96}
97
98extern unsigned long node_start_pfn[];
99extern unsigned long node_end_pfn[];
100extern unsigned long node_remap_size[];
101
102#define node_has_online_mem(nid) (node_start_pfn[nid] != node_end_pfn[nid])
103
104#else /* !CONFIG_NUMA */
105/*
106 * Other i386 platforms should define their own version of the
107 * above macros here.
108 */
109
110#include <asm-generic/topology.h>
111
112#endif /* CONFIG_NUMA */
113
114extern cpumask_t cpu_coregroup_map(int cpu);
115
116#ifdef CONFIG_SMP
117#define mc_capable() (boot_cpu_data.x86_max_cores > 1)
118#define smt_capable() (smp_num_siblings > 1)
119#endif
120
121#endif /* _ASM_I386_TOPOLOGY_H */
diff --git a/include/asm-i386/tsc.h b/include/asm-i386/tsc.h
deleted file mode 100644
index a4d806610b7f..000000000000
--- a/include/asm-i386/tsc.h
+++ /dev/null
@@ -1,75 +0,0 @@
1/*
2 * linux/include/asm-i386/tsc.h
3 *
4 * i386 TSC related functions
5 */
6#ifndef _ASM_i386_TSC_H
7#define _ASM_i386_TSC_H
8
9#include <asm/processor.h>
10
11/*
12 * Standard way to access the cycle counter.
13 */
14typedef unsigned long long cycles_t;
15
16extern unsigned int cpu_khz;
17extern unsigned int tsc_khz;
18
19static inline cycles_t get_cycles(void)
20{
21 unsigned long long ret = 0;
22
23#ifndef CONFIG_X86_TSC
24 if (!cpu_has_tsc)
25 return 0;
26#endif
27
28#if defined(CONFIG_X86_GENERIC) || defined(CONFIG_X86_TSC)
29 rdtscll(ret);
30#endif
31 return ret;
32}
33
34/* Like get_cycles, but make sure the CPU is synchronized. */
35static __always_inline cycles_t get_cycles_sync(void)
36{
37 unsigned long long ret;
38 unsigned eax, edx;
39
40 /*
41 * Use RDTSCP if possible; it is guaranteed to be synchronous
42 * and doesn't cause a VMEXIT on Hypervisors
43 */
44 alternative_io(ASM_NOP3, ".byte 0x0f,0x01,0xf9", X86_FEATURE_RDTSCP,
45 ASM_OUTPUT2("=a" (eax), "=d" (edx)),
46 "a" (0U), "d" (0U) : "ecx", "memory");
47 ret = (((unsigned long long)edx) << 32) | ((unsigned long long)eax);
48 if (ret)
49 return ret;
50
51 /*
52 * Don't do an additional sync on CPUs where we know
53 * RDTSC is already synchronous:
54 */
55 alternative_io("cpuid", ASM_NOP2, X86_FEATURE_SYNC_RDTSC,
56 "=a" (eax), "0" (1) : "ebx","ecx","edx","memory");
57 rdtscll(ret);
58
59 return ret;
60}
61
62extern void tsc_init(void);
63extern void mark_tsc_unstable(char *reason);
64extern int unsynchronized_tsc(void);
65extern void init_tsc_clocksource(void);
66int check_tsc_unstable(void);
67
68/*
69 * Boot-time check whether the TSCs are synchronized across
70 * all CPUs/cores:
71 */
72extern void check_tsc_sync_source(int cpu);
73extern void check_tsc_sync_target(void);
74
75#endif
diff --git a/include/asm-i386/types.h b/include/asm-i386/types.h
deleted file mode 100644
index ad0a55bd782f..000000000000
--- a/include/asm-i386/types.h
+++ /dev/null
@@ -1,64 +0,0 @@
1#ifndef _I386_TYPES_H
2#define _I386_TYPES_H
3
4#ifndef __ASSEMBLY__
5
6typedef unsigned short umode_t;
7
8/*
9 * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the
10 * header files exported to user space
11 */
12
13typedef __signed__ char __s8;
14typedef unsigned char __u8;
15
16typedef __signed__ short __s16;
17typedef unsigned short __u16;
18
19typedef __signed__ int __s32;
20typedef unsigned int __u32;
21
22#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
23typedef __signed__ long long __s64;
24typedef unsigned long long __u64;
25#endif
26
27#endif /* __ASSEMBLY__ */
28
29/*
30 * These aren't exported outside the kernel to avoid name space clashes
31 */
32#ifdef __KERNEL__
33
34#define BITS_PER_LONG 32
35
36#ifndef __ASSEMBLY__
37
38
39typedef signed char s8;
40typedef unsigned char u8;
41
42typedef signed short s16;
43typedef unsigned short u16;
44
45typedef signed int s32;
46typedef unsigned int u32;
47
48typedef signed long long s64;
49typedef unsigned long long u64;
50
51/* DMA addresses come in generic and 64-bit flavours. */
52
53#ifdef CONFIG_HIGHMEM64G
54typedef u64 dma_addr_t;
55#else
56typedef u32 dma_addr_t;
57#endif
58typedef u64 dma64_addr_t;
59
60#endif /* __ASSEMBLY__ */
61
62#endif /* __KERNEL__ */
63
64#endif
diff --git a/include/asm-i386/uaccess.h b/include/asm-i386/uaccess.h
deleted file mode 100644
index d2a4f7be9c2c..000000000000
--- a/include/asm-i386/uaccess.h
+++ /dev/null
@@ -1,590 +0,0 @@
1#ifndef __i386_UACCESS_H
2#define __i386_UACCESS_H
3
4/*
5 * User space memory access functions
6 */
7#include <linux/errno.h>
8#include <linux/thread_info.h>
9#include <linux/prefetch.h>
10#include <linux/string.h>
11#include <asm/page.h>
12
13#define VERIFY_READ 0
14#define VERIFY_WRITE 1
15
16/*
17 * The fs value determines whether argument validity checking should be
18 * performed or not. If get_fs() == USER_DS, checking is performed, with
19 * get_fs() == KERNEL_DS, checking is bypassed.
20 *
21 * For historical reasons, these macros are grossly misnamed.
22 */
23
24#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
25
26
27#define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFFUL)
28#define USER_DS MAKE_MM_SEG(PAGE_OFFSET)
29
30#define get_ds() (KERNEL_DS)
31#define get_fs() (current_thread_info()->addr_limit)
32#define set_fs(x) (current_thread_info()->addr_limit = (x))
33
34#define segment_eq(a,b) ((a).seg == (b).seg)
35
36/*
37 * movsl can be slow when source and dest are not both 8-byte aligned
38 */
39#ifdef CONFIG_X86_INTEL_USERCOPY
40extern struct movsl_mask {
41 int mask;
42} ____cacheline_aligned_in_smp movsl_mask;
43#endif
44
45#define __addr_ok(addr) ((unsigned long __force)(addr) < (current_thread_info()->addr_limit.seg))
46
47/*
48 * Test whether a block of memory is a valid user space address.
49 * Returns 0 if the range is valid, nonzero otherwise.
50 *
51 * This is equivalent to the following test:
52 * (u33)addr + (u33)size >= (u33)current->addr_limit.seg
53 *
54 * This needs 33-bit arithmetic. We have a carry...
55 */
56#define __range_ok(addr,size) ({ \
57 unsigned long flag,roksum; \
58 __chk_user_ptr(addr); \
59 asm("addl %3,%1 ; sbbl %0,%0; cmpl %1,%4; sbbl $0,%0" \
60 :"=&r" (flag), "=r" (roksum) \
61 :"1" (addr),"g" ((int)(size)),"rm" (current_thread_info()->addr_limit.seg)); \
62 flag; })
63
64/**
65 * access_ok: - Checks if a user space pointer is valid
66 * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that
67 * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
68 * to write to a block, it is always safe to read from it.
69 * @addr: User space pointer to start of block to check
70 * @size: Size of block to check
71 *
72 * Context: User context only. This function may sleep.
73 *
74 * Checks if a pointer to a block of memory in user space is valid.
75 *
76 * Returns true (nonzero) if the memory block may be valid, false (zero)
77 * if it is definitely invalid.
78 *
79 * Note that, depending on architecture, this function probably just
80 * checks that the pointer is in the user space range - after calling
81 * this function, memory access functions may still return -EFAULT.
82 */
83#define access_ok(type,addr,size) (likely(__range_ok(addr,size) == 0))
84
85/*
86 * The exception table consists of pairs of addresses: the first is the
87 * address of an instruction that is allowed to fault, and the second is
88 * the address at which the program should continue. No registers are
89 * modified, so it is entirely up to the continuation code to figure out
90 * what to do.
91 *
92 * All the routines below use bits of fixup code that are out of line
93 * with the main instruction path. This means when everything is well,
94 * we don't even have to jump over them. Further, they do not intrude
95 * on our cache or tlb entries.
96 */
97
98struct exception_table_entry
99{
100 unsigned long insn, fixup;
101};
102
103extern int fixup_exception(struct pt_regs *regs);
104
105/*
106 * These are the main single-value transfer routines. They automatically
107 * use the right size if we just have the right pointer type.
108 *
109 * This gets kind of ugly. We want to return _two_ values in "get_user()"
110 * and yet we don't want to do any pointers, because that is too much
111 * of a performance impact. Thus we have a few rather ugly macros here,
112 * and hide all the ugliness from the user.
113 *
114 * The "__xxx" versions of the user access functions are versions that
115 * do not verify the address space, that must have been done previously
116 * with a separate "access_ok()" call (this is used when we do multiple
117 * accesses to the same area of user memory).
118 */
119
120extern void __get_user_1(void);
121extern void __get_user_2(void);
122extern void __get_user_4(void);
123
124#define __get_user_x(size,ret,x,ptr) \
125 __asm__ __volatile__("call __get_user_" #size \
126 :"=a" (ret),"=d" (x) \
127 :"0" (ptr))
128
129
130/* Careful: we have to cast the result to the type of the pointer for sign reasons */
131/**
132 * get_user: - Get a simple variable from user space.
133 * @x: Variable to store result.
134 * @ptr: Source address, in user space.
135 *
136 * Context: User context only. This function may sleep.
137 *
138 * This macro copies a single simple variable from user space to kernel
139 * space. It supports simple types like char and int, but not larger
140 * data types like structures or arrays.
141 *
142 * @ptr must have pointer-to-simple-variable type, and the result of
143 * dereferencing @ptr must be assignable to @x without a cast.
144 *
145 * Returns zero on success, or -EFAULT on error.
146 * On error, the variable @x is set to zero.
147 */
148#define get_user(x,ptr) \
149({ int __ret_gu; \
150 unsigned long __val_gu; \
151 __chk_user_ptr(ptr); \
152 switch(sizeof (*(ptr))) { \
153 case 1: __get_user_x(1,__ret_gu,__val_gu,ptr); break; \
154 case 2: __get_user_x(2,__ret_gu,__val_gu,ptr); break; \
155 case 4: __get_user_x(4,__ret_gu,__val_gu,ptr); break; \
156 default: __get_user_x(X,__ret_gu,__val_gu,ptr); break; \
157 } \
158 (x) = (__typeof__(*(ptr)))__val_gu; \
159 __ret_gu; \
160})
161
162extern void __put_user_bad(void);
163
164/*
165 * Strange magic calling convention: pointer in %ecx,
166 * value in %eax(:%edx), return value in %eax, no clobbers.
167 */
168extern void __put_user_1(void);
169extern void __put_user_2(void);
170extern void __put_user_4(void);
171extern void __put_user_8(void);
172
173#define __put_user_1(x, ptr) __asm__ __volatile__("call __put_user_1":"=a" (__ret_pu):"0" ((typeof(*(ptr)))(x)), "c" (ptr))
174#define __put_user_2(x, ptr) __asm__ __volatile__("call __put_user_2":"=a" (__ret_pu):"0" ((typeof(*(ptr)))(x)), "c" (ptr))
175#define __put_user_4(x, ptr) __asm__ __volatile__("call __put_user_4":"=a" (__ret_pu):"0" ((typeof(*(ptr)))(x)), "c" (ptr))
176#define __put_user_8(x, ptr) __asm__ __volatile__("call __put_user_8":"=a" (__ret_pu):"A" ((typeof(*(ptr)))(x)), "c" (ptr))
177#define __put_user_X(x, ptr) __asm__ __volatile__("call __put_user_X":"=a" (__ret_pu):"c" (ptr))
178
179/**
180 * put_user: - Write a simple value into user space.
181 * @x: Value to copy to user space.
182 * @ptr: Destination address, in user space.
183 *
184 * Context: User context only. This function may sleep.
185 *
186 * This macro copies a single simple value from kernel space to user
187 * space. It supports simple types like char and int, but not larger
188 * data types like structures or arrays.
189 *
190 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
191 * to the result of dereferencing @ptr.
192 *
193 * Returns zero on success, or -EFAULT on error.
194 */
195#ifdef CONFIG_X86_WP_WORKS_OK
196
197#define put_user(x,ptr) \
198({ int __ret_pu; \
199 __typeof__(*(ptr)) __pu_val; \
200 __chk_user_ptr(ptr); \
201 __pu_val = x; \
202 switch(sizeof(*(ptr))) { \
203 case 1: __put_user_1(__pu_val, ptr); break; \
204 case 2: __put_user_2(__pu_val, ptr); break; \
205 case 4: __put_user_4(__pu_val, ptr); break; \
206 case 8: __put_user_8(__pu_val, ptr); break; \
207 default:__put_user_X(__pu_val, ptr); break; \
208 } \
209 __ret_pu; \
210})
211
212#else
213#define put_user(x,ptr) \
214({ \
215 int __ret_pu; \
216 __typeof__(*(ptr)) __pus_tmp = x; \
217 __ret_pu=0; \
218 if(unlikely(__copy_to_user_ll(ptr, &__pus_tmp, \
219 sizeof(*(ptr))) != 0)) \
220 __ret_pu=-EFAULT; \
221 __ret_pu; \
222 })
223
224
225#endif
226
227/**
228 * __get_user: - Get a simple variable from user space, with less checking.
229 * @x: Variable to store result.
230 * @ptr: Source address, in user space.
231 *
232 * Context: User context only. This function may sleep.
233 *
234 * This macro copies a single simple variable from user space to kernel
235 * space. It supports simple types like char and int, but not larger
236 * data types like structures or arrays.
237 *
238 * @ptr must have pointer-to-simple-variable type, and the result of
239 * dereferencing @ptr must be assignable to @x without a cast.
240 *
241 * Caller must check the pointer with access_ok() before calling this
242 * function.
243 *
244 * Returns zero on success, or -EFAULT on error.
245 * On error, the variable @x is set to zero.
246 */
247#define __get_user(x,ptr) \
248 __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
249
250
251/**
252 * __put_user: - Write a simple value into user space, with less checking.
253 * @x: Value to copy to user space.
254 * @ptr: Destination address, in user space.
255 *
256 * Context: User context only. This function may sleep.
257 *
258 * This macro copies a single simple value from kernel space to user
259 * space. It supports simple types like char and int, but not larger
260 * data types like structures or arrays.
261 *
262 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
263 * to the result of dereferencing @ptr.
264 *
265 * Caller must check the pointer with access_ok() before calling this
266 * function.
267 *
268 * Returns zero on success, or -EFAULT on error.
269 */
270#define __put_user(x,ptr) \
271 __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
272
273#define __put_user_nocheck(x,ptr,size) \
274({ \
275 long __pu_err; \
276 __put_user_size((x),(ptr),(size),__pu_err,-EFAULT); \
277 __pu_err; \
278})
279
280
281#define __put_user_u64(x, addr, err) \
282 __asm__ __volatile__( \
283 "1: movl %%eax,0(%2)\n" \
284 "2: movl %%edx,4(%2)\n" \
285 "3:\n" \
286 ".section .fixup,\"ax\"\n" \
287 "4: movl %3,%0\n" \
288 " jmp 3b\n" \
289 ".previous\n" \
290 ".section __ex_table,\"a\"\n" \
291 " .align 4\n" \
292 " .long 1b,4b\n" \
293 " .long 2b,4b\n" \
294 ".previous" \
295 : "=r"(err) \
296 : "A" (x), "r" (addr), "i"(-EFAULT), "0"(err))
297
298#ifdef CONFIG_X86_WP_WORKS_OK
299
300#define __put_user_size(x,ptr,size,retval,errret) \
301do { \
302 retval = 0; \
303 __chk_user_ptr(ptr); \
304 switch (size) { \
305 case 1: __put_user_asm(x,ptr,retval,"b","b","iq",errret);break; \
306 case 2: __put_user_asm(x,ptr,retval,"w","w","ir",errret);break; \
307 case 4: __put_user_asm(x,ptr,retval,"l","","ir",errret); break; \
308 case 8: __put_user_u64((__typeof__(*ptr))(x),ptr,retval); break;\
309 default: __put_user_bad(); \
310 } \
311} while (0)
312
313#else
314
315#define __put_user_size(x,ptr,size,retval,errret) \
316do { \
317 __typeof__(*(ptr)) __pus_tmp = x; \
318 retval = 0; \
319 \
320 if(unlikely(__copy_to_user_ll(ptr, &__pus_tmp, size) != 0)) \
321 retval = errret; \
322} while (0)
323
324#endif
325struct __large_struct { unsigned long buf[100]; };
326#define __m(x) (*(struct __large_struct __user *)(x))
327
328/*
329 * Tell gcc we read from memory instead of writing: this is because
330 * we do not write to any memory gcc knows about, so there are no
331 * aliasing issues.
332 */
333#define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
334 __asm__ __volatile__( \
335 "1: mov"itype" %"rtype"1,%2\n" \
336 "2:\n" \
337 ".section .fixup,\"ax\"\n" \
338 "3: movl %3,%0\n" \
339 " jmp 2b\n" \
340 ".previous\n" \
341 ".section __ex_table,\"a\"\n" \
342 " .align 4\n" \
343 " .long 1b,3b\n" \
344 ".previous" \
345 : "=r"(err) \
346 : ltype (x), "m"(__m(addr)), "i"(errret), "0"(err))
347
348
349#define __get_user_nocheck(x,ptr,size) \
350({ \
351 long __gu_err; \
352 unsigned long __gu_val; \
353 __get_user_size(__gu_val,(ptr),(size),__gu_err,-EFAULT);\
354 (x) = (__typeof__(*(ptr)))__gu_val; \
355 __gu_err; \
356})
357
358extern long __get_user_bad(void);
359
360#define __get_user_size(x,ptr,size,retval,errret) \
361do { \
362 retval = 0; \
363 __chk_user_ptr(ptr); \
364 switch (size) { \
365 case 1: __get_user_asm(x,ptr,retval,"b","b","=q",errret);break; \
366 case 2: __get_user_asm(x,ptr,retval,"w","w","=r",errret);break; \
367 case 4: __get_user_asm(x,ptr,retval,"l","","=r",errret);break; \
368 default: (x) = __get_user_bad(); \
369 } \
370} while (0)
371
372#define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
373 __asm__ __volatile__( \
374 "1: mov"itype" %2,%"rtype"1\n" \
375 "2:\n" \
376 ".section .fixup,\"ax\"\n" \
377 "3: movl %3,%0\n" \
378 " xor"itype" %"rtype"1,%"rtype"1\n" \
379 " jmp 2b\n" \
380 ".previous\n" \
381 ".section __ex_table,\"a\"\n" \
382 " .align 4\n" \
383 " .long 1b,3b\n" \
384 ".previous" \
385 : "=r"(err), ltype (x) \
386 : "m"(__m(addr)), "i"(errret), "0"(err))
387
388
389unsigned long __must_check __copy_to_user_ll(void __user *to,
390 const void *from, unsigned long n);
391unsigned long __must_check __copy_from_user_ll(void *to,
392 const void __user *from, unsigned long n);
393unsigned long __must_check __copy_from_user_ll_nozero(void *to,
394 const void __user *from, unsigned long n);
395unsigned long __must_check __copy_from_user_ll_nocache(void *to,
396 const void __user *from, unsigned long n);
397unsigned long __must_check __copy_from_user_ll_nocache_nozero(void *to,
398 const void __user *from, unsigned long n);
399
400/**
401 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
402 * @to: Destination address, in user space.
403 * @from: Source address, in kernel space.
404 * @n: Number of bytes to copy.
405 *
406 * Context: User context only.
407 *
408 * Copy data from kernel space to user space. Caller must check
409 * the specified block with access_ok() before calling this function.
410 * The caller should also make sure he pins the user space address
411 * so that the we don't result in page fault and sleep.
412 *
413 * Here we special-case 1, 2 and 4-byte copy_*_user invocations. On a fault
414 * we return the initial request size (1, 2 or 4), as copy_*_user should do.
415 * If a store crosses a page boundary and gets a fault, the x86 will not write
416 * anything, so this is accurate.
417 */
418
419static __always_inline unsigned long __must_check
420__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
421{
422 if (__builtin_constant_p(n)) {
423 unsigned long ret;
424
425 switch (n) {
426 case 1:
427 __put_user_size(*(u8 *)from, (u8 __user *)to, 1, ret, 1);
428 return ret;
429 case 2:
430 __put_user_size(*(u16 *)from, (u16 __user *)to, 2, ret, 2);
431 return ret;
432 case 4:
433 __put_user_size(*(u32 *)from, (u32 __user *)to, 4, ret, 4);
434 return ret;
435 }
436 }
437 return __copy_to_user_ll(to, from, n);
438}
439
440/**
441 * __copy_to_user: - Copy a block of data into user space, with less checking.
442 * @to: Destination address, in user space.
443 * @from: Source address, in kernel space.
444 * @n: Number of bytes to copy.
445 *
446 * Context: User context only. This function may sleep.
447 *
448 * Copy data from kernel space to user space. Caller must check
449 * the specified block with access_ok() before calling this function.
450 *
451 * Returns number of bytes that could not be copied.
452 * On success, this will be zero.
453 */
454static __always_inline unsigned long __must_check
455__copy_to_user(void __user *to, const void *from, unsigned long n)
456{
457 might_sleep();
458 return __copy_to_user_inatomic(to, from, n);
459}
460
461static __always_inline unsigned long
462__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
463{
464 /* Avoid zeroing the tail if the copy fails..
465 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
466 * but as the zeroing behaviour is only significant when n is not
467 * constant, that shouldn't be a problem.
468 */
469 if (__builtin_constant_p(n)) {
470 unsigned long ret;
471
472 switch (n) {
473 case 1:
474 __get_user_size(*(u8 *)to, from, 1, ret, 1);
475 return ret;
476 case 2:
477 __get_user_size(*(u16 *)to, from, 2, ret, 2);
478 return ret;
479 case 4:
480 __get_user_size(*(u32 *)to, from, 4, ret, 4);
481 return ret;
482 }
483 }
484 return __copy_from_user_ll_nozero(to, from, n);
485}
486
487/**
488 * __copy_from_user: - Copy a block of data from user space, with less checking.
489 * @to: Destination address, in kernel space.
490 * @from: Source address, in user space.
491 * @n: Number of bytes to copy.
492 *
493 * Context: User context only. This function may sleep.
494 *
495 * Copy data from user space to kernel space. Caller must check
496 * the specified block with access_ok() before calling this function.
497 *
498 * Returns number of bytes that could not be copied.
499 * On success, this will be zero.
500 *
501 * If some data could not be copied, this function will pad the copied
502 * data to the requested size using zero bytes.
503 *
504 * An alternate version - __copy_from_user_inatomic() - may be called from
505 * atomic context and will fail rather than sleep. In this case the
506 * uncopied bytes will *NOT* be padded with zeros. See fs/filemap.h
507 * for explanation of why this is needed.
508 */
509static __always_inline unsigned long
510__copy_from_user(void *to, const void __user *from, unsigned long n)
511{
512 might_sleep();
513 if (__builtin_constant_p(n)) {
514 unsigned long ret;
515
516 switch (n) {
517 case 1:
518 __get_user_size(*(u8 *)to, from, 1, ret, 1);
519 return ret;
520 case 2:
521 __get_user_size(*(u16 *)to, from, 2, ret, 2);
522 return ret;
523 case 4:
524 __get_user_size(*(u32 *)to, from, 4, ret, 4);
525 return ret;
526 }
527 }
528 return __copy_from_user_ll(to, from, n);
529}
530
531#define ARCH_HAS_NOCACHE_UACCESS
532
533static __always_inline unsigned long __copy_from_user_nocache(void *to,
534 const void __user *from, unsigned long n)
535{
536 might_sleep();
537 if (__builtin_constant_p(n)) {
538 unsigned long ret;
539
540 switch (n) {
541 case 1:
542 __get_user_size(*(u8 *)to, from, 1, ret, 1);
543 return ret;
544 case 2:
545 __get_user_size(*(u16 *)to, from, 2, ret, 2);
546 return ret;
547 case 4:
548 __get_user_size(*(u32 *)to, from, 4, ret, 4);
549 return ret;
550 }
551 }
552 return __copy_from_user_ll_nocache(to, from, n);
553}
554
555static __always_inline unsigned long
556__copy_from_user_inatomic_nocache(void *to, const void __user *from, unsigned long n)
557{
558 return __copy_from_user_ll_nocache_nozero(to, from, n);
559}
560
561unsigned long __must_check copy_to_user(void __user *to,
562 const void *from, unsigned long n);
563unsigned long __must_check copy_from_user(void *to,
564 const void __user *from, unsigned long n);
565long __must_check strncpy_from_user(char *dst, const char __user *src,
566 long count);
567long __must_check __strncpy_from_user(char *dst,
568 const char __user *src, long count);
569
570/**
571 * strlen_user: - Get the size of a string in user space.
572 * @str: The string to measure.
573 *
574 * Context: User context only. This function may sleep.
575 *
576 * Get the size of a NUL-terminated string in user space.
577 *
578 * Returns the size of the string INCLUDING the terminating NUL.
579 * On exception, returns 0.
580 *
581 * If there is a limit on the length of a valid string, you may wish to
582 * consider using strnlen_user() instead.
583 */
584#define strlen_user(str) strnlen_user(str, LONG_MAX)
585
586long strnlen_user(const char __user *str, long n);
587unsigned long __must_check clear_user(void __user *mem, unsigned long len);
588unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
589
590#endif /* __i386_UACCESS_H */
diff --git a/include/asm-i386/ucontext.h b/include/asm-i386/ucontext.h
deleted file mode 100644
index b0db36925f55..000000000000
--- a/include/asm-i386/ucontext.h
+++ /dev/null
@@ -1,12 +0,0 @@
1#ifndef _ASMi386_UCONTEXT_H
2#define _ASMi386_UCONTEXT_H
3
4struct ucontext {
5 unsigned long uc_flags;
6 struct ucontext *uc_link;
7 stack_t uc_stack;
8 struct sigcontext uc_mcontext;
9 sigset_t uc_sigmask; /* mask last for extensibility */
10};
11
12#endif /* !_ASMi386_UCONTEXT_H */
diff --git a/include/asm-i386/unaligned.h b/include/asm-i386/unaligned.h
deleted file mode 100644
index 7acd7957621e..000000000000
--- a/include/asm-i386/unaligned.h
+++ /dev/null
@@ -1,37 +0,0 @@
1#ifndef __I386_UNALIGNED_H
2#define __I386_UNALIGNED_H
3
4/*
5 * The i386 can do unaligned accesses itself.
6 *
7 * The strange macros are there to make sure these can't
8 * be misused in a way that makes them not work on other
9 * architectures where unaligned accesses aren't as simple.
10 */
11
12/**
13 * get_unaligned - get value from possibly mis-aligned location
14 * @ptr: pointer to value
15 *
16 * This macro should be used for accessing values larger in size than
17 * single bytes at locations that are expected to be improperly aligned,
18 * e.g. retrieving a u16 value from a location not u16-aligned.
19 *
20 * Note that unaligned accesses can be very expensive on some architectures.
21 */
22#define get_unaligned(ptr) (*(ptr))
23
24/**
25 * put_unaligned - put value to a possibly mis-aligned location
26 * @val: value to place
27 * @ptr: pointer to location
28 *
29 * This macro should be used for placing values larger in size than
30 * single bytes at locations that are expected to be improperly aligned,
31 * e.g. writing a u16 value to a location not u16-aligned.
32 *
33 * Note that unaligned accesses can be very expensive on some architectures.
34 */
35#define put_unaligned(val, ptr) ((void)( *(ptr) = (val) ))
36
37#endif
diff --git a/include/asm-i386/unistd.h b/include/asm-i386/unistd.h
deleted file mode 100644
index 9b15545eb9b5..000000000000
--- a/include/asm-i386/unistd.h
+++ /dev/null
@@ -1,373 +0,0 @@
1#ifndef _ASM_I386_UNISTD_H_
2#define _ASM_I386_UNISTD_H_
3
4/*
5 * This file contains the system call numbers.
6 */
7
8#define __NR_restart_syscall 0
9#define __NR_exit 1
10#define __NR_fork 2
11#define __NR_read 3
12#define __NR_write 4
13#define __NR_open 5
14#define __NR_close 6
15#define __NR_waitpid 7
16#define __NR_creat 8
17#define __NR_link 9
18#define __NR_unlink 10
19#define __NR_execve 11
20#define __NR_chdir 12
21#define __NR_time 13
22#define __NR_mknod 14
23#define __NR_chmod 15
24#define __NR_lchown 16
25#define __NR_break 17
26#define __NR_oldstat 18
27#define __NR_lseek 19
28#define __NR_getpid 20
29#define __NR_mount 21
30#define __NR_umount 22
31#define __NR_setuid 23
32#define __NR_getuid 24
33#define __NR_stime 25
34#define __NR_ptrace 26
35#define __NR_alarm 27
36#define __NR_oldfstat 28
37#define __NR_pause 29
38#define __NR_utime 30
39#define __NR_stty 31
40#define __NR_gtty 32
41#define __NR_access 33
42#define __NR_nice 34
43#define __NR_ftime 35
44#define __NR_sync 36
45#define __NR_kill 37
46#define __NR_rename 38
47#define __NR_mkdir 39
48#define __NR_rmdir 40
49#define __NR_dup 41
50#define __NR_pipe 42
51#define __NR_times 43
52#define __NR_prof 44
53#define __NR_brk 45
54#define __NR_setgid 46
55#define __NR_getgid 47
56#define __NR_signal 48
57#define __NR_geteuid 49
58#define __NR_getegid 50
59#define __NR_acct 51
60#define __NR_umount2 52
61#define __NR_lock 53
62#define __NR_ioctl 54
63#define __NR_fcntl 55
64#define __NR_mpx 56
65#define __NR_setpgid 57
66#define __NR_ulimit 58
67#define __NR_oldolduname 59
68#define __NR_umask 60
69#define __NR_chroot 61
70#define __NR_ustat 62
71#define __NR_dup2 63
72#define __NR_getppid 64
73#define __NR_getpgrp 65
74#define __NR_setsid 66
75#define __NR_sigaction 67
76#define __NR_sgetmask 68
77#define __NR_ssetmask 69
78#define __NR_setreuid 70
79#define __NR_setregid 71
80#define __NR_sigsuspend 72
81#define __NR_sigpending 73
82#define __NR_sethostname 74
83#define __NR_setrlimit 75
84#define __NR_getrlimit 76 /* Back compatible 2Gig limited rlimit */
85#define __NR_getrusage 77
86#define __NR_gettimeofday 78
87#define __NR_settimeofday 79
88#define __NR_getgroups 80
89#define __NR_setgroups 81
90#define __NR_select 82
91#define __NR_symlink 83
92#define __NR_oldlstat 84
93#define __NR_readlink 85
94#define __NR_uselib 86
95#define __NR_swapon 87
96#define __NR_reboot 88
97#define __NR_readdir 89
98#define __NR_mmap 90
99#define __NR_munmap 91
100#define __NR_truncate 92
101#define __NR_ftruncate 93
102#define __NR_fchmod 94
103#define __NR_fchown 95
104#define __NR_getpriority 96
105#define __NR_setpriority 97
106#define __NR_profil 98
107#define __NR_statfs 99
108#define __NR_fstatfs 100
109#define __NR_ioperm 101
110#define __NR_socketcall 102
111#define __NR_syslog 103
112#define __NR_setitimer 104
113#define __NR_getitimer 105
114#define __NR_stat 106
115#define __NR_lstat 107
116#define __NR_fstat 108
117#define __NR_olduname 109
118#define __NR_iopl 110
119#define __NR_vhangup 111
120#define __NR_idle 112
121#define __NR_vm86old 113
122#define __NR_wait4 114
123#define __NR_swapoff 115
124#define __NR_sysinfo 116
125#define __NR_ipc 117
126#define __NR_fsync 118
127#define __NR_sigreturn 119
128#define __NR_clone 120
129#define __NR_setdomainname 121
130#define __NR_uname 122
131#define __NR_modify_ldt 123
132#define __NR_adjtimex 124
133#define __NR_mprotect 125
134#define __NR_sigprocmask 126
135#define __NR_create_module 127
136#define __NR_init_module 128
137#define __NR_delete_module 129
138#define __NR_get_kernel_syms 130
139#define __NR_quotactl 131
140#define __NR_getpgid 132
141#define __NR_fchdir 133
142#define __NR_bdflush 134
143#define __NR_sysfs 135
144#define __NR_personality 136
145#define __NR_afs_syscall 137 /* Syscall for Andrew File System */
146#define __NR_setfsuid 138
147#define __NR_setfsgid 139
148#define __NR__llseek 140
149#define __NR_getdents 141
150#define __NR__newselect 142
151#define __NR_flock 143
152#define __NR_msync 144
153#define __NR_readv 145
154#define __NR_writev 146
155#define __NR_getsid 147
156#define __NR_fdatasync 148
157#define __NR__sysctl 149
158#define __NR_mlock 150
159#define __NR_munlock 151
160#define __NR_mlockall 152
161#define __NR_munlockall 153
162#define __NR_sched_setparam 154
163#define __NR_sched_getparam 155
164#define __NR_sched_setscheduler 156
165#define __NR_sched_getscheduler 157
166#define __NR_sched_yield 158
167#define __NR_sched_get_priority_max 159
168#define __NR_sched_get_priority_min 160
169#define __NR_sched_rr_get_interval 161
170#define __NR_nanosleep 162
171#define __NR_mremap 163
172#define __NR_setresuid 164
173#define __NR_getresuid 165
174#define __NR_vm86 166
175#define __NR_query_module 167
176#define __NR_poll 168
177#define __NR_nfsservctl 169
178#define __NR_setresgid 170
179#define __NR_getresgid 171
180#define __NR_prctl 172
181#define __NR_rt_sigreturn 173
182#define __NR_rt_sigaction 174
183#define __NR_rt_sigprocmask 175
184#define __NR_rt_sigpending 176
185#define __NR_rt_sigtimedwait 177
186#define __NR_rt_sigqueueinfo 178
187#define __NR_rt_sigsuspend 179
188#define __NR_pread64 180
189#define __NR_pwrite64 181
190#define __NR_chown 182
191#define __NR_getcwd 183
192#define __NR_capget 184
193#define __NR_capset 185
194#define __NR_sigaltstack 186
195#define __NR_sendfile 187
196#define __NR_getpmsg 188 /* some people actually want streams */
197#define __NR_putpmsg 189 /* some people actually want streams */
198#define __NR_vfork 190
199#define __NR_ugetrlimit 191 /* SuS compliant getrlimit */
200#define __NR_mmap2 192
201#define __NR_truncate64 193
202#define __NR_ftruncate64 194
203#define __NR_stat64 195
204#define __NR_lstat64 196
205#define __NR_fstat64 197
206#define __NR_lchown32 198
207#define __NR_getuid32 199
208#define __NR_getgid32 200
209#define __NR_geteuid32 201
210#define __NR_getegid32 202
211#define __NR_setreuid32 203
212#define __NR_setregid32 204
213#define __NR_getgroups32 205
214#define __NR_setgroups32 206
215#define __NR_fchown32 207
216#define __NR_setresuid32 208
217#define __NR_getresuid32 209
218#define __NR_setresgid32 210
219#define __NR_getresgid32 211
220#define __NR_chown32 212
221#define __NR_setuid32 213
222#define __NR_setgid32 214
223#define __NR_setfsuid32 215
224#define __NR_setfsgid32 216
225#define __NR_pivot_root 217
226#define __NR_mincore 218
227#define __NR_madvise 219
228#define __NR_madvise1 219 /* delete when C lib stub is removed */
229#define __NR_getdents64 220
230#define __NR_fcntl64 221
231/* 223 is unused */
232#define __NR_gettid 224
233#define __NR_readahead 225
234#define __NR_setxattr 226
235#define __NR_lsetxattr 227
236#define __NR_fsetxattr 228
237#define __NR_getxattr 229
238#define __NR_lgetxattr 230
239#define __NR_fgetxattr 231
240#define __NR_listxattr 232
241#define __NR_llistxattr 233
242#define __NR_flistxattr 234
243#define __NR_removexattr 235
244#define __NR_lremovexattr 236
245#define __NR_fremovexattr 237
246#define __NR_tkill 238
247#define __NR_sendfile64 239
248#define __NR_futex 240
249#define __NR_sched_setaffinity 241
250#define __NR_sched_getaffinity 242
251#define __NR_set_thread_area 243
252#define __NR_get_thread_area 244
253#define __NR_io_setup 245
254#define __NR_io_destroy 246
255#define __NR_io_getevents 247
256#define __NR_io_submit 248
257#define __NR_io_cancel 249
258#define __NR_fadvise64 250
259/* 251 is available for reuse (was briefly sys_set_zone_reclaim) */
260#define __NR_exit_group 252
261#define __NR_lookup_dcookie 253
262#define __NR_epoll_create 254
263#define __NR_epoll_ctl 255
264#define __NR_epoll_wait 256
265#define __NR_remap_file_pages 257
266#define __NR_set_tid_address 258
267#define __NR_timer_create 259
268#define __NR_timer_settime (__NR_timer_create+1)
269#define __NR_timer_gettime (__NR_timer_create+2)
270#define __NR_timer_getoverrun (__NR_timer_create+3)
271#define __NR_timer_delete (__NR_timer_create+4)
272#define __NR_clock_settime (__NR_timer_create+5)
273#define __NR_clock_gettime (__NR_timer_create+6)
274#define __NR_clock_getres (__NR_timer_create+7)
275#define __NR_clock_nanosleep (__NR_timer_create+8)
276#define __NR_statfs64 268
277#define __NR_fstatfs64 269
278#define __NR_tgkill 270
279#define __NR_utimes 271
280#define __NR_fadvise64_64 272
281#define __NR_vserver 273
282#define __NR_mbind 274
283#define __NR_get_mempolicy 275
284#define __NR_set_mempolicy 276
285#define __NR_mq_open 277
286#define __NR_mq_unlink (__NR_mq_open+1)
287#define __NR_mq_timedsend (__NR_mq_open+2)
288#define __NR_mq_timedreceive (__NR_mq_open+3)
289#define __NR_mq_notify (__NR_mq_open+4)
290#define __NR_mq_getsetattr (__NR_mq_open+5)
291#define __NR_kexec_load 283
292#define __NR_waitid 284
293/* #define __NR_sys_setaltroot 285 */
294#define __NR_add_key 286
295#define __NR_request_key 287
296#define __NR_keyctl 288
297#define __NR_ioprio_set 289
298#define __NR_ioprio_get 290
299#define __NR_inotify_init 291
300#define __NR_inotify_add_watch 292
301#define __NR_inotify_rm_watch 293
302#define __NR_migrate_pages 294
303#define __NR_openat 295
304#define __NR_mkdirat 296
305#define __NR_mknodat 297
306#define __NR_fchownat 298
307#define __NR_futimesat 299
308#define __NR_fstatat64 300
309#define __NR_unlinkat 301
310#define __NR_renameat 302
311#define __NR_linkat 303
312#define __NR_symlinkat 304
313#define __NR_readlinkat 305
314#define __NR_fchmodat 306
315#define __NR_faccessat 307
316#define __NR_pselect6 308
317#define __NR_ppoll 309
318#define __NR_unshare 310
319#define __NR_set_robust_list 311
320#define __NR_get_robust_list 312
321#define __NR_splice 313
322#define __NR_sync_file_range 314
323#define __NR_tee 315
324#define __NR_vmsplice 316
325#define __NR_move_pages 317
326#define __NR_getcpu 318
327#define __NR_epoll_pwait 319
328#define __NR_utimensat 320
329#define __NR_signalfd 321
330#define __NR_timerfd 322
331#define __NR_eventfd 323
332#define __NR_fallocate 324
333
334#ifdef __KERNEL__
335
336#define NR_syscalls 325
337
338#define __ARCH_WANT_IPC_PARSE_VERSION
339#define __ARCH_WANT_OLD_READDIR
340#define __ARCH_WANT_OLD_STAT
341#define __ARCH_WANT_STAT64
342#define __ARCH_WANT_SYS_ALARM
343#define __ARCH_WANT_SYS_GETHOSTNAME
344#define __ARCH_WANT_SYS_PAUSE
345#define __ARCH_WANT_SYS_SGETMASK
346#define __ARCH_WANT_SYS_SIGNAL
347#define __ARCH_WANT_SYS_TIME
348#define __ARCH_WANT_SYS_UTIME
349#define __ARCH_WANT_SYS_WAITPID
350#define __ARCH_WANT_SYS_SOCKETCALL
351#define __ARCH_WANT_SYS_FADVISE64
352#define __ARCH_WANT_SYS_GETPGRP
353#define __ARCH_WANT_SYS_LLSEEK
354#define __ARCH_WANT_SYS_NICE
355#define __ARCH_WANT_SYS_OLD_GETRLIMIT
356#define __ARCH_WANT_SYS_OLDUMOUNT
357#define __ARCH_WANT_SYS_SIGPENDING
358#define __ARCH_WANT_SYS_SIGPROCMASK
359#define __ARCH_WANT_SYS_RT_SIGACTION
360#define __ARCH_WANT_SYS_RT_SIGSUSPEND
361
362/*
363 * "Conditional" syscalls
364 *
365 * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
366 * but it doesn't work on all toolchains, so we just do it by hand
367 */
368#ifndef cond_syscall
369#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
370#endif
371
372#endif /* __KERNEL__ */
373#endif /* _ASM_I386_UNISTD_H_ */
diff --git a/include/asm-i386/unwind.h b/include/asm-i386/unwind.h
deleted file mode 100644
index 43c70c3de2f9..000000000000
--- a/include/asm-i386/unwind.h
+++ /dev/null
@@ -1,13 +0,0 @@
1#ifndef _ASM_I386_UNWIND_H
2#define _ASM_I386_UNWIND_H
3
4#define UNW_PC(frame) ((void)(frame), 0)
5#define UNW_SP(frame) ((void)(frame), 0)
6#define UNW_FP(frame) ((void)(frame), 0)
7
8static inline int arch_unw_user_mode(const void *info)
9{
10 return 0;
11}
12
13#endif /* _ASM_I386_UNWIND_H */
diff --git a/include/asm-i386/user.h b/include/asm-i386/user.h
deleted file mode 100644
index 0e85d2a5e33a..000000000000
--- a/include/asm-i386/user.h
+++ /dev/null
@@ -1,121 +0,0 @@
1#ifndef _I386_USER_H
2#define _I386_USER_H
3
4#include <asm/page.h>
5/* Core file format: The core file is written in such a way that gdb
6 can understand it and provide useful information to the user (under
7 linux we use the 'trad-core' bfd). There are quite a number of
8 obstacles to being able to view the contents of the floating point
9 registers, and until these are solved you will not be able to view the
10 contents of them. Actually, you can read in the core file and look at
11 the contents of the user struct to find out what the floating point
12 registers contain.
13 The actual file contents are as follows:
14 UPAGE: 1 page consisting of a user struct that tells gdb what is present
15 in the file. Directly after this is a copy of the task_struct, which
16 is currently not used by gdb, but it may come in useful at some point.
17 All of the registers are stored as part of the upage. The upage should
18 always be only one page.
19 DATA: The data area is stored. We use current->end_text to
20 current->brk to pick up all of the user variables, plus any memory
21 that may have been malloced. No attempt is made to determine if a page
22 is demand-zero or if a page is totally unused, we just cover the entire
23 range. All of the addresses are rounded in such a way that an integral
24 number of pages is written.
25 STACK: We need the stack information in order to get a meaningful
26 backtrace. We need to write the data from (esp) to
27 current->start_stack, so we round each of these off in order to be able
28 to write an integer number of pages.
29 The minimum core file size is 3 pages, or 12288 bytes.
30*/
31
32/*
33 * Pentium III FXSR, SSE support
34 * Gareth Hughes <gareth@valinux.com>, May 2000
35 *
36 * Provide support for the GDB 5.0+ PTRACE_{GET|SET}FPXREGS requests for
37 * interacting with the FXSR-format floating point environment. Floating
38 * point data can be accessed in the regular format in the usual manner,
39 * and both the standard and SIMD floating point data can be accessed via
40 * the new ptrace requests. In either case, changes to the FPU environment
41 * will be reflected in the task's state as expected.
42 */
43
44struct user_i387_struct {
45 long cwd;
46 long swd;
47 long twd;
48 long fip;
49 long fcs;
50 long foo;
51 long fos;
52 long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
53};
54
55struct user_fxsr_struct {
56 unsigned short cwd;
57 unsigned short swd;
58 unsigned short twd;
59 unsigned short fop;
60 long fip;
61 long fcs;
62 long foo;
63 long fos;
64 long mxcsr;
65 long reserved;
66 long st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
67 long xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
68 long padding[56];
69};
70
71/*
72 * This is the old layout of "struct pt_regs", and
73 * is still the layout used by user mode (the new
74 * pt_regs doesn't have all registers as the kernel
75 * doesn't use the extra segment registers)
76 */
77struct user_regs_struct {
78 long ebx, ecx, edx, esi, edi, ebp, eax;
79 unsigned short ds, __ds, es, __es;
80 unsigned short fs, __fs, gs, __gs;
81 long orig_eax, eip;
82 unsigned short cs, __cs;
83 long eflags, esp;
84 unsigned short ss, __ss;
85};
86
87/* When the kernel dumps core, it starts by dumping the user struct -
88 this will be used by gdb to figure out where the data and stack segments
89 are within the file, and what virtual addresses to use. */
90struct user{
91/* We start with the registers, to mimic the way that "memory" is returned
92 from the ptrace(3,...) function. */
93 struct user_regs_struct regs; /* Where the registers are actually stored */
94/* ptrace does not yet supply these. Someday.... */
95 int u_fpvalid; /* True if math co-processor being used. */
96 /* for this mess. Not yet used. */
97 struct user_i387_struct i387; /* Math Co-processor registers. */
98/* The rest of this junk is to help gdb figure out what goes where */
99 unsigned long int u_tsize; /* Text segment size (pages). */
100 unsigned long int u_dsize; /* Data segment size (pages). */
101 unsigned long int u_ssize; /* Stack segment size (pages). */
102 unsigned long start_code; /* Starting virtual address of text. */
103 unsigned long start_stack; /* Starting virtual address of stack area.
104 This is actually the bottom of the stack,
105 the top of the stack is always found in the
106 esp register. */
107 long int signal; /* Signal that caused the core dump. */
108 int reserved; /* No longer used */
109 struct user_pt_regs * u_ar0; /* Used by gdb to help find the values for */
110 /* the registers. */
111 struct user_i387_struct* u_fpstate; /* Math Co-processor pointer. */
112 unsigned long magic; /* To uniquely identify a core file */
113 char u_comm[32]; /* User command that was responsible */
114 int u_debugreg[8];
115};
116#define NBPG PAGE_SIZE
117#define UPAGES 1
118#define HOST_TEXT_START_ADDR (u.start_code)
119#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)
120
121#endif /* _I386_USER_H */
diff --git a/include/asm-i386/vga.h b/include/asm-i386/vga.h
deleted file mode 100644
index 0ecf68ac03aa..000000000000
--- a/include/asm-i386/vga.h
+++ /dev/null
@@ -1,20 +0,0 @@
1/*
2 * Access to VGA videoram
3 *
4 * (c) 1998 Martin Mares <mj@ucw.cz>
5 */
6
7#ifndef _LINUX_ASM_VGA_H_
8#define _LINUX_ASM_VGA_H_
9
10/*
11 * On the PC, we can just recalculate addresses and then
12 * access the videoram directly without any black magic.
13 */
14
15#define VGA_MAP_MEM(x,s) (unsigned long)phys_to_virt(x)
16
17#define vga_readb(x) (*(x))
18#define vga_writeb(x,y) (*(y) = (x))
19
20#endif
diff --git a/include/asm-i386/vic.h b/include/asm-i386/vic.h
deleted file mode 100644
index 53100f353612..000000000000
--- a/include/asm-i386/vic.h
+++ /dev/null
@@ -1,61 +0,0 @@
1/* Copyright (C) 1999,2001
2 *
3 * Author: J.E.J.Bottomley@HansenPartnership.com
4 *
5 * Standard include definitions for the NCR Voyager Interrupt Controller */
6
7/* The eight CPI vectors. To activate a CPI, you write a bit mask
8 * corresponding to the processor set to be interrupted into the
9 * relevant register. That set of CPUs will then be interrupted with
10 * the CPI */
11static const int VIC_CPI_Registers[] =
12 {0xFC00, 0xFC01, 0xFC08, 0xFC09,
13 0xFC10, 0xFC11, 0xFC18, 0xFC19 };
14
15#define VIC_PROC_WHO_AM_I 0xfc29
16# define QUAD_IDENTIFIER 0xC0
17# define EIGHT_SLOT_IDENTIFIER 0xE0
18#define QIC_EXTENDED_PROCESSOR_SELECT 0xFC72
19#define VIC_CPI_BASE_REGISTER 0xFC41
20#define VIC_PROCESSOR_ID 0xFC21
21# define VIC_CPU_MASQUERADE_ENABLE 0x8
22
23#define VIC_CLAIM_REGISTER_0 0xFC38
24#define VIC_CLAIM_REGISTER_1 0xFC39
25#define VIC_REDIRECT_REGISTER_0 0xFC60
26#define VIC_REDIRECT_REGISTER_1 0xFC61
27#define VIC_PRIORITY_REGISTER 0xFC20
28
29#define VIC_PRIMARY_MC_BASE 0xFC48
30#define VIC_SECONDARY_MC_BASE 0xFC49
31
32#define QIC_PROCESSOR_ID 0xFC71
33# define QIC_CPUID_ENABLE 0x08
34
35#define QIC_VIC_CPI_BASE_REGISTER 0xFC79
36#define QIC_CPI_BASE_REGISTER 0xFC7A
37
38#define QIC_MASK_REGISTER0 0xFC80
39/* NOTE: these are masked high, enabled low */
40# define QIC_PERF_TIMER 0x01
41# define QIC_LPE 0x02
42# define QIC_SYS_INT 0x04
43# define QIC_CMN_INT 0x08
44/* at the moment, just enable CMN_INT, disable SYS_INT */
45# define QIC_DEFAULT_MASK0 (~(QIC_CMN_INT /* | VIC_SYS_INT */))
46#define QIC_MASK_REGISTER1 0xFC81
47# define QIC_BOOT_CPI_MASK 0xFE
48/* Enable CPI's 1-6 inclusive */
49# define QIC_CPI_ENABLE 0x81
50
51#define QIC_INTERRUPT_CLEAR0 0xFC8A
52#define QIC_INTERRUPT_CLEAR1 0xFC8B
53
54/* this is where we place the CPI vectors */
55#define VIC_DEFAULT_CPI_BASE 0xC0
56/* this is where we place the QIC CPI vectors */
57#define QIC_DEFAULT_CPI_BASE 0xD0
58
59#define VIC_BOOT_INTERRUPT_MASK 0xfe
60
61extern void smp_vic_timer_interrupt(void);
diff --git a/include/asm-i386/vm86.h b/include/asm-i386/vm86.h
deleted file mode 100644
index a5edf517b992..000000000000
--- a/include/asm-i386/vm86.h
+++ /dev/null
@@ -1,215 +0,0 @@
1#ifndef _LINUX_VM86_H
2#define _LINUX_VM86_H
3
4/*
5 * I'm guessing at the VIF/VIP flag usage, but hope that this is how
6 * the Pentium uses them. Linux will return from vm86 mode when both
7 * VIF and VIP is set.
8 *
9 * On a Pentium, we could probably optimize the virtual flags directly
10 * in the eflags register instead of doing it "by hand" in vflags...
11 *
12 * Linus
13 */
14
15#define TF_MASK 0x00000100
16#define IF_MASK 0x00000200
17#define IOPL_MASK 0x00003000
18#define NT_MASK 0x00004000
19#ifdef CONFIG_VM86
20#define VM_MASK 0x00020000
21#else
22#define VM_MASK 0 /* ignored */
23#endif
24#define AC_MASK 0x00040000
25#define VIF_MASK 0x00080000 /* virtual interrupt flag */
26#define VIP_MASK 0x00100000 /* virtual interrupt pending */
27#define ID_MASK 0x00200000
28
29#define BIOSSEG 0x0f000
30
31#define CPU_086 0
32#define CPU_186 1
33#define CPU_286 2
34#define CPU_386 3
35#define CPU_486 4
36#define CPU_586 5
37
38/*
39 * Return values for the 'vm86()' system call
40 */
41#define VM86_TYPE(retval) ((retval) & 0xff)
42#define VM86_ARG(retval) ((retval) >> 8)
43
44#define VM86_SIGNAL 0 /* return due to signal */
45#define VM86_UNKNOWN 1 /* unhandled GP fault - IO-instruction or similar */
46#define VM86_INTx 2 /* int3/int x instruction (ARG = x) */
47#define VM86_STI 3 /* sti/popf/iret instruction enabled virtual interrupts */
48
49/*
50 * Additional return values when invoking new vm86()
51 */
52#define VM86_PICRETURN 4 /* return due to pending PIC request */
53#define VM86_TRAP 6 /* return due to DOS-debugger request */
54
55/*
56 * function codes when invoking new vm86()
57 */
58#define VM86_PLUS_INSTALL_CHECK 0
59#define VM86_ENTER 1
60#define VM86_ENTER_NO_BYPASS 2
61#define VM86_REQUEST_IRQ 3
62#define VM86_FREE_IRQ 4
63#define VM86_GET_IRQ_BITS 5
64#define VM86_GET_AND_RESET_IRQ 6
65
66/*
67 * This is the stack-layout seen by the user space program when we have
68 * done a translation of "SAVE_ALL" from vm86 mode. The real kernel layout
69 * is 'kernel_vm86_regs' (see below).
70 */
71
72struct vm86_regs {
73/*
74 * normal regs, with special meaning for the segment descriptors..
75 */
76 long ebx;
77 long ecx;
78 long edx;
79 long esi;
80 long edi;
81 long ebp;
82 long eax;
83 long __null_ds;
84 long __null_es;
85 long __null_fs;
86 long __null_gs;
87 long orig_eax;
88 long eip;
89 unsigned short cs, __csh;
90 long eflags;
91 long esp;
92 unsigned short ss, __ssh;
93/*
94 * these are specific to v86 mode:
95 */
96 unsigned short es, __esh;
97 unsigned short ds, __dsh;
98 unsigned short fs, __fsh;
99 unsigned short gs, __gsh;
100};
101
102struct revectored_struct {
103 unsigned long __map[8]; /* 256 bits */
104};
105
106struct vm86_struct {
107 struct vm86_regs regs;
108 unsigned long flags;
109 unsigned long screen_bitmap;
110 unsigned long cpu_type;
111 struct revectored_struct int_revectored;
112 struct revectored_struct int21_revectored;
113};
114
115/*
116 * flags masks
117 */
118#define VM86_SCREEN_BITMAP 0x0001
119
120struct vm86plus_info_struct {
121 unsigned long force_return_for_pic:1;
122 unsigned long vm86dbg_active:1; /* for debugger */
123 unsigned long vm86dbg_TFpendig:1; /* for debugger */
124 unsigned long unused:28;
125 unsigned long is_vm86pus:1; /* for vm86 internal use */
126 unsigned char vm86dbg_intxxtab[32]; /* for debugger */
127};
128
129struct vm86plus_struct {
130 struct vm86_regs regs;
131 unsigned long flags;
132 unsigned long screen_bitmap;
133 unsigned long cpu_type;
134 struct revectored_struct int_revectored;
135 struct revectored_struct int21_revectored;
136 struct vm86plus_info_struct vm86plus;
137};
138
139#ifdef __KERNEL__
140/*
141 * This is the (kernel) stack-layout when we have done a "SAVE_ALL" from vm86
142 * mode - the main change is that the old segment descriptors aren't
143 * useful any more and are forced to be zero by the kernel (and the
144 * hardware when a trap occurs), and the real segment descriptors are
145 * at the end of the structure. Look at ptrace.h to see the "normal"
146 * setup. For user space layout see 'struct vm86_regs' above.
147 */
148#include <asm/ptrace.h>
149
150struct kernel_vm86_regs {
151/*
152 * normal regs, with special meaning for the segment descriptors..
153 */
154 struct pt_regs pt;
155/*
156 * these are specific to v86 mode:
157 */
158 unsigned short es, __esh;
159 unsigned short ds, __dsh;
160 unsigned short fs, __fsh;
161 unsigned short gs, __gsh;
162};
163
164struct kernel_vm86_struct {
165 struct kernel_vm86_regs regs;
166/*
167 * the below part remains on the kernel stack while we are in VM86 mode.
168 * 'tss.esp0' then contains the address of VM86_TSS_ESP0 below, and when we
169 * get forced back from VM86, the CPU and "SAVE_ALL" will restore the above
170 * 'struct kernel_vm86_regs' with the then actual values.
171 * Therefore, pt_regs in fact points to a complete 'kernel_vm86_struct'
172 * in kernelspace, hence we need not reget the data from userspace.
173 */
174#define VM86_TSS_ESP0 flags
175 unsigned long flags;
176 unsigned long screen_bitmap;
177 unsigned long cpu_type;
178 struct revectored_struct int_revectored;
179 struct revectored_struct int21_revectored;
180 struct vm86plus_info_struct vm86plus;
181 struct pt_regs *regs32; /* here we save the pointer to the old regs */
182/*
183 * The below is not part of the structure, but the stack layout continues
184 * this way. In front of 'return-eip' may be some data, depending on
185 * compilation, so we don't rely on this and save the pointer to 'oldregs'
186 * in 'regs32' above.
187 * However, with GCC-2.7.2 and the current CFLAGS you see exactly this:
188
189 long return-eip; from call to vm86()
190 struct pt_regs oldregs; user space registers as saved by syscall
191 */
192};
193
194#ifdef CONFIG_VM86
195
196void handle_vm86_fault(struct kernel_vm86_regs *, long);
197int handle_vm86_trap(struct kernel_vm86_regs *, long, int);
198
199struct task_struct;
200void release_vm86_irqs(struct task_struct *);
201
202#else
203
204#define handle_vm86_fault(a, b)
205#define release_vm86_irqs(a)
206
207static inline int handle_vm86_trap(struct kernel_vm86_regs *a, long b, int c) {
208 return 0;
209}
210
211#endif /* CONFIG_VM86 */
212
213#endif /* __KERNEL__ */
214
215#endif
diff --git a/include/asm-i386/vmi.h b/include/asm-i386/vmi.h
deleted file mode 100644
index eb8bd892c01e..000000000000
--- a/include/asm-i386/vmi.h
+++ /dev/null
@@ -1,263 +0,0 @@
1/*
2 * VMI interface definition
3 *
4 * Copyright (C) 2005, VMware, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more
15 * details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 *
21 * Maintained by: Zachary Amsden zach@vmware.com
22 *
23 */
24#include <linux/types.h>
25
26/*
27 *---------------------------------------------------------------------
28 *
29 * VMI Option ROM API
30 *
31 *---------------------------------------------------------------------
32 */
33#define VMI_SIGNATURE 0x696d5663 /* "cVmi" */
34
35#define PCI_VENDOR_ID_VMWARE 0x15AD
36#define PCI_DEVICE_ID_VMWARE_VMI 0x0801
37
38/*
39 * We use two version numbers for compatibility, with the major
40 * number signifying interface breakages, and the minor number
41 * interface extensions.
42 */
43#define VMI_API_REV_MAJOR 3
44#define VMI_API_REV_MINOR 0
45
46#define VMI_CALL_CPUID 0
47#define VMI_CALL_WRMSR 1
48#define VMI_CALL_RDMSR 2
49#define VMI_CALL_SetGDT 3
50#define VMI_CALL_SetLDT 4
51#define VMI_CALL_SetIDT 5
52#define VMI_CALL_SetTR 6
53#define VMI_CALL_GetGDT 7
54#define VMI_CALL_GetLDT 8
55#define VMI_CALL_GetIDT 9
56#define VMI_CALL_GetTR 10
57#define VMI_CALL_WriteGDTEntry 11
58#define VMI_CALL_WriteLDTEntry 12
59#define VMI_CALL_WriteIDTEntry 13
60#define VMI_CALL_UpdateKernelStack 14
61#define VMI_CALL_SetCR0 15
62#define VMI_CALL_SetCR2 16
63#define VMI_CALL_SetCR3 17
64#define VMI_CALL_SetCR4 18
65#define VMI_CALL_GetCR0 19
66#define VMI_CALL_GetCR2 20
67#define VMI_CALL_GetCR3 21
68#define VMI_CALL_GetCR4 22
69#define VMI_CALL_WBINVD 23
70#define VMI_CALL_SetDR 24
71#define VMI_CALL_GetDR 25
72#define VMI_CALL_RDPMC 26
73#define VMI_CALL_RDTSC 27
74#define VMI_CALL_CLTS 28
75#define VMI_CALL_EnableInterrupts 29
76#define VMI_CALL_DisableInterrupts 30
77#define VMI_CALL_GetInterruptMask 31
78#define VMI_CALL_SetInterruptMask 32
79#define VMI_CALL_IRET 33
80#define VMI_CALL_SYSEXIT 34
81#define VMI_CALL_Halt 35
82#define VMI_CALL_Reboot 36
83#define VMI_CALL_Shutdown 37
84#define VMI_CALL_SetPxE 38
85#define VMI_CALL_SetPxELong 39
86#define VMI_CALL_UpdatePxE 40
87#define VMI_CALL_UpdatePxELong 41
88#define VMI_CALL_MachineToPhysical 42
89#define VMI_CALL_PhysicalToMachine 43
90#define VMI_CALL_AllocatePage 44
91#define VMI_CALL_ReleasePage 45
92#define VMI_CALL_InvalPage 46
93#define VMI_CALL_FlushTLB 47
94#define VMI_CALL_SetLinearMapping 48
95
96#define VMI_CALL_SetIOPLMask 61
97#define VMI_CALL_SetInitialAPState 62
98#define VMI_CALL_APICWrite 63
99#define VMI_CALL_APICRead 64
100#define VMI_CALL_IODelay 65
101#define VMI_CALL_SetLazyMode 73
102
103/*
104 *---------------------------------------------------------------------
105 *
106 * MMU operation flags
107 *
108 *---------------------------------------------------------------------
109 */
110
111/* Flags used by VMI_{Allocate|Release}Page call */
112#define VMI_PAGE_PAE 0x10 /* Allocate PAE shadow */
113#define VMI_PAGE_CLONE 0x20 /* Clone from another shadow */
114#define VMI_PAGE_ZEROED 0x40 /* Page is pre-zeroed */
115
116
117/* Flags shared by Allocate|Release Page and PTE updates */
118#define VMI_PAGE_PT 0x01
119#define VMI_PAGE_PD 0x02
120#define VMI_PAGE_PDP 0x04
121#define VMI_PAGE_PML4 0x08
122
123#define VMI_PAGE_NORMAL 0x00 /* for debugging */
124
125/* Flags used by PTE updates */
126#define VMI_PAGE_CURRENT_AS 0x10 /* implies VMI_PAGE_VA_MASK is valid */
127#define VMI_PAGE_DEFER 0x20 /* may queue update until TLB inval */
128#define VMI_PAGE_VA_MASK 0xfffff000
129
130#ifdef CONFIG_X86_PAE
131#define VMI_PAGE_L1 (VMI_PAGE_PT | VMI_PAGE_PAE | VMI_PAGE_ZEROED)
132#define VMI_PAGE_L2 (VMI_PAGE_PD | VMI_PAGE_PAE | VMI_PAGE_ZEROED)
133#else
134#define VMI_PAGE_L1 (VMI_PAGE_PT | VMI_PAGE_ZEROED)
135#define VMI_PAGE_L2 (VMI_PAGE_PD | VMI_PAGE_ZEROED)
136#endif
137
138/* Flags used by VMI_FlushTLB call */
139#define VMI_FLUSH_TLB 0x01
140#define VMI_FLUSH_GLOBAL 0x02
141
142/*
143 *---------------------------------------------------------------------
144 *
145 * VMI relocation definitions for ROM call get_reloc
146 *
147 *---------------------------------------------------------------------
148 */
149
150/* VMI Relocation types */
151#define VMI_RELOCATION_NONE 0
152#define VMI_RELOCATION_CALL_REL 1
153#define VMI_RELOCATION_JUMP_REL 2
154#define VMI_RELOCATION_NOP 3
155
156#ifndef __ASSEMBLY__
157struct vmi_relocation_info {
158 unsigned char *eip;
159 unsigned char type;
160 unsigned char reserved[3];
161};
162#endif
163
164
165/*
166 *---------------------------------------------------------------------
167 *
168 * Generic ROM structures and definitions
169 *
170 *---------------------------------------------------------------------
171 */
172
173#ifndef __ASSEMBLY__
174
175struct vrom_header {
176 u16 rom_signature; // option ROM signature
177 u8 rom_length; // ROM length in 512 byte chunks
178 u8 rom_entry[4]; // 16-bit code entry point
179 u8 rom_pad0; // 4-byte align pad
180 u32 vrom_signature; // VROM identification signature
181 u8 api_version_min;// Minor version of API
182 u8 api_version_maj;// Major version of API
183 u8 jump_slots; // Number of jump slots
184 u8 reserved1; // Reserved for expansion
185 u32 virtual_top; // Hypervisor virtual address start
186 u16 reserved2; // Reserved for expansion
187 u16 license_offs; // Offset to License string
188 u16 pci_header_offs;// Offset to PCI OPROM header
189 u16 pnp_header_offs;// Offset to PnP OPROM header
190 u32 rom_pad3; // PnP reserverd / VMI reserved
191 u8 reserved[96]; // Reserved for headers
192 char vmi_init[8]; // VMI_Init jump point
193 char get_reloc[8]; // VMI_GetRelocationInfo jump point
194} __attribute__((packed));
195
196struct pnp_header {
197 char sig[4];
198 char rev;
199 char size;
200 short next;
201 short res;
202 long devID;
203 unsigned short manufacturer_offset;
204 unsigned short product_offset;
205} __attribute__((packed));
206
207struct pci_header {
208 char sig[4];
209 short vendorID;
210 short deviceID;
211 short vpdData;
212 short size;
213 char rev;
214 char class;
215 char subclass;
216 char interface;
217 short chunks;
218 char rom_version_min;
219 char rom_version_maj;
220 char codetype;
221 char lastRom;
222 short reserved;
223} __attribute__((packed));
224
225/* Function prototypes for bootstrapping */
226extern void vmi_init(void);
227extern void vmi_bringup(void);
228extern void vmi_apply_boot_page_allocations(void);
229
230/* State needed to start an application processor in an SMP system. */
231struct vmi_ap_state {
232 u32 cr0;
233 u32 cr2;
234 u32 cr3;
235 u32 cr4;
236
237 u64 efer;
238
239 u32 eip;
240 u32 eflags;
241 u32 eax;
242 u32 ebx;
243 u32 ecx;
244 u32 edx;
245 u32 esp;
246 u32 ebp;
247 u32 esi;
248 u32 edi;
249 u16 cs;
250 u16 ss;
251 u16 ds;
252 u16 es;
253 u16 fs;
254 u16 gs;
255 u16 ldtr;
256
257 u16 gdtr_limit;
258 u32 gdtr_base;
259 u32 idtr_base;
260 u16 idtr_limit;
261};
262
263#endif
diff --git a/include/asm-i386/vmi_time.h b/include/asm-i386/vmi_time.h
deleted file mode 100644
index 478188130328..000000000000
--- a/include/asm-i386/vmi_time.h
+++ /dev/null
@@ -1,98 +0,0 @@
1/*
2 * VMI Time wrappers
3 *
4 * Copyright (C) 2006, VMware, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more
15 * details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 *
21 * Send feedback to dhecht@vmware.com
22 *
23 */
24
25#ifndef __VMI_TIME_H
26#define __VMI_TIME_H
27
28/*
29 * Raw VMI call indices for timer functions
30 */
31#define VMI_CALL_GetCycleFrequency 66
32#define VMI_CALL_GetCycleCounter 67
33#define VMI_CALL_SetAlarm 68
34#define VMI_CALL_CancelAlarm 69
35#define VMI_CALL_GetWallclockTime 70
36#define VMI_CALL_WallclockUpdated 71
37
38/* Cached VMI timer operations */
39extern struct vmi_timer_ops {
40 u64 (*get_cycle_frequency)(void);
41 u64 (*get_cycle_counter)(int);
42 u64 (*get_wallclock)(void);
43 int (*wallclock_updated)(void);
44 void (*set_alarm)(u32 flags, u64 expiry, u64 period);
45 void (*cancel_alarm)(u32 flags);
46} vmi_timer_ops;
47
48/* Prototypes */
49extern void __init vmi_time_init(void);
50extern unsigned long vmi_get_wallclock(void);
51extern int vmi_set_wallclock(unsigned long now);
52extern unsigned long long vmi_sched_clock(void);
53extern unsigned long vmi_cpu_khz(void);
54
55#ifdef CONFIG_X86_LOCAL_APIC
56extern void __devinit vmi_time_bsp_init(void);
57extern void __devinit vmi_time_ap_init(void);
58#endif
59
60/*
61 * When run under a hypervisor, a vcpu is always in one of three states:
62 * running, halted, or ready. The vcpu is in the 'running' state if it
63 * is executing. When the vcpu executes the halt interface, the vcpu
64 * enters the 'halted' state and remains halted until there is some work
65 * pending for the vcpu (e.g. an alarm expires, host I/O completes on
66 * behalf of virtual I/O). At this point, the vcpu enters the 'ready'
67 * state (waiting for the hypervisor to reschedule it). Finally, at any
68 * time when the vcpu is not in the 'running' state nor the 'halted'
69 * state, it is in the 'ready' state.
70 *
71 * Real time is advances while the vcpu is 'running', 'ready', or
72 * 'halted'. Stolen time is the time in which the vcpu is in the
73 * 'ready' state. Available time is the remaining time -- the vcpu is
74 * either 'running' or 'halted'.
75 *
76 * All three views of time are accessible through the VMI cycle
77 * counters.
78 */
79
80/* The cycle counters. */
81#define VMI_CYCLES_REAL 0
82#define VMI_CYCLES_AVAILABLE 1
83#define VMI_CYCLES_STOLEN 2
84
85/* The alarm interface 'flags' bits */
86#define VMI_ALARM_COUNTERS 2
87
88#define VMI_ALARM_COUNTER_MASK 0x000000ff
89
90#define VMI_ALARM_WIRED_IRQ0 0x00000000
91#define VMI_ALARM_WIRED_LVTT 0x00010000
92
93#define VMI_ALARM_IS_ONESHOT 0x00000000
94#define VMI_ALARM_IS_PERIODIC 0x00000100
95
96#define CONFIG_VMI_ALARM_HZ 100
97
98#endif
diff --git a/include/asm-i386/voyager.h b/include/asm-i386/voyager.h
deleted file mode 100644
index 91a9932937ab..000000000000
--- a/include/asm-i386/voyager.h
+++ /dev/null
@@ -1,517 +0,0 @@
1/* Copyright (C) 1999,2001
2 *
3 * Author: J.E.J.Bottomley@HansenPartnership.com
4 *
5 * Standard include definitions for the NCR Voyager system */
6
7#undef VOYAGER_DEBUG
8#undef VOYAGER_CAT_DEBUG
9
10#ifdef VOYAGER_DEBUG
11#define VDEBUG(x) printk x
12#else
13#define VDEBUG(x)
14#endif
15
16/* There are three levels of voyager machine: 3,4 and 5. The rule is
17 * if it's less than 3435 it's a Level 3 except for a 3360 which is
18 * a level 4. A 3435 or above is a Level 5 */
19#define VOYAGER_LEVEL5_AND_ABOVE 0x3435
20#define VOYAGER_LEVEL4 0x3360
21
22/* The L4 DINO ASIC */
23#define VOYAGER_DINO 0x43
24
25/* voyager ports in standard I/O space */
26#define VOYAGER_MC_SETUP 0x96
27
28
29#define VOYAGER_CAT_CONFIG_PORT 0x97
30# define VOYAGER_CAT_DESELECT 0xff
31#define VOYAGER_SSPB_RELOCATION_PORT 0x98
32
33/* Valid CAT controller commands */
34/* start instruction register cycle */
35#define VOYAGER_CAT_IRCYC 0x01
36/* start data register cycle */
37#define VOYAGER_CAT_DRCYC 0x02
38/* move to execute state */
39#define VOYAGER_CAT_RUN 0x0F
40/* end operation */
41#define VOYAGER_CAT_END 0x80
42/* hold in idle state */
43#define VOYAGER_CAT_HOLD 0x90
44/* single step an "intest" vector */
45#define VOYAGER_CAT_STEP 0xE0
46/* return cat controller to CLEMSON mode */
47#define VOYAGER_CAT_CLEMSON 0xFF
48
49/* the default cat command header */
50#define VOYAGER_CAT_HEADER 0x7F
51
52/* the range of possible CAT module ids in the system */
53#define VOYAGER_MIN_MODULE 0x10
54#define VOYAGER_MAX_MODULE 0x1f
55
56/* The voyager registers per asic */
57#define VOYAGER_ASIC_ID_REG 0x00
58#define VOYAGER_ASIC_TYPE_REG 0x01
59/* the sub address registers can be made auto incrementing on reads */
60#define VOYAGER_AUTO_INC_REG 0x02
61# define VOYAGER_AUTO_INC 0x04
62# define VOYAGER_NO_AUTO_INC 0xfb
63#define VOYAGER_SUBADDRDATA 0x03
64#define VOYAGER_SCANPATH 0x05
65# define VOYAGER_CONNECT_ASIC 0x01
66# define VOYAGER_DISCONNECT_ASIC 0xfe
67#define VOYAGER_SUBADDRLO 0x06
68#define VOYAGER_SUBADDRHI 0x07
69#define VOYAGER_SUBMODSELECT 0x08
70#define VOYAGER_SUBMODPRESENT 0x09
71
72#define VOYAGER_SUBADDR_LO 0xff
73#define VOYAGER_SUBADDR_HI 0xffff
74
75/* the maximum size of a scan path -- used to form instructions */
76#define VOYAGER_MAX_SCAN_PATH 0x100
77/* the biggest possible register size (in bytes) */
78#define VOYAGER_MAX_REG_SIZE 4
79
80/* Total number of possible modules (including submodules) */
81#define VOYAGER_MAX_MODULES 16
82/* Largest number of asics per module */
83#define VOYAGER_MAX_ASICS_PER_MODULE 7
84
85/* the CAT asic of each module is always the first one */
86#define VOYAGER_CAT_ID 0
87#define VOYAGER_PSI 0x1a
88
89/* voyager instruction operations and registers */
90#define VOYAGER_READ_CONFIG 0x1
91#define VOYAGER_WRITE_CONFIG 0x2
92#define VOYAGER_BYPASS 0xff
93
94typedef struct voyager_asic
95{
96 __u8 asic_addr; /* ASIC address; Level 4 */
97 __u8 asic_type; /* ASIC type */
98 __u8 asic_id; /* ASIC id */
99 __u8 jtag_id[4]; /* JTAG id */
100 __u8 asic_location; /* Location within scan path; start w/ 0 */
101 __u8 bit_location; /* Location within bit stream; start w/ 0 */
102 __u8 ireg_length; /* Instruction register length */
103 __u16 subaddr; /* Amount of sub address space */
104 struct voyager_asic *next; /* Next asic in linked list */
105} voyager_asic_t;
106
107typedef struct voyager_module {
108 __u8 module_addr; /* Module address */
109 __u8 scan_path_connected; /* Scan path connected */
110 __u16 ee_size; /* Size of the EEPROM */
111 __u16 num_asics; /* Number of Asics */
112 __u16 inst_bits; /* Instruction bits in the scan path */
113 __u16 largest_reg; /* Largest register in the scan path */
114 __u16 smallest_reg; /* Smallest register in the scan path */
115 voyager_asic_t *asic; /* First ASIC in scan path (CAT_I) */
116 struct voyager_module *submodule; /* Submodule pointer */
117 struct voyager_module *next; /* Next module in linked list */
118} voyager_module_t;
119
120typedef struct voyager_eeprom_hdr {
121 __u8 module_id[4];
122 __u8 version_id;
123 __u8 config_id;
124 __u16 boundry_id; /* boundary scan id */
125 __u16 ee_size; /* size of EEPROM */
126 __u8 assembly[11]; /* assembly # */
127 __u8 assembly_rev; /* assembly rev */
128 __u8 tracer[4]; /* tracer number */
129 __u16 assembly_cksum; /* asm checksum */
130 __u16 power_consump; /* pwr requirements */
131 __u16 num_asics; /* number of asics */
132 __u16 bist_time; /* min. bist time */
133 __u16 err_log_offset; /* error log offset */
134 __u16 scan_path_offset;/* scan path offset */
135 __u16 cct_offset;
136 __u16 log_length; /* length of err log */
137 __u16 xsum_end; /* offset to end of
138 checksum */
139 __u8 reserved[4];
140 __u8 sflag; /* starting sentinal */
141 __u8 part_number[13]; /* prom part number */
142 __u8 version[10]; /* version number */
143 __u8 signature[8];
144 __u16 eeprom_chksum;
145 __u32 data_stamp_offset;
146 __u8 eflag ; /* ending sentinal */
147} __attribute__((packed)) voyager_eprom_hdr_t;
148
149
150
151#define VOYAGER_EPROM_SIZE_OFFSET ((__u16)(&(((voyager_eprom_hdr_t *)0)->ee_size)))
152#define VOYAGER_XSUM_END_OFFSET 0x2a
153
154/* the following three definitions are for internal table layouts
155 * in the module EPROMs. We really only care about the IDs and
156 * offsets */
157typedef struct voyager_sp_table {
158 __u8 asic_id;
159 __u8 bypass_flag;
160 __u16 asic_data_offset;
161 __u16 config_data_offset;
162} __attribute__((packed)) voyager_sp_table_t;
163
164typedef struct voyager_jtag_table {
165 __u8 icode[4];
166 __u8 runbist[4];
167 __u8 intest[4];
168 __u8 samp_preld[4];
169 __u8 ireg_len;
170} __attribute__((packed)) voyager_jtt_t;
171
172typedef struct voyager_asic_data_table {
173 __u8 jtag_id[4];
174 __u16 length_bsr;
175 __u16 length_bist_reg;
176 __u32 bist_clk;
177 __u16 subaddr_bits;
178 __u16 seed_bits;
179 __u16 sig_bits;
180 __u16 jtag_offset;
181} __attribute__((packed)) voyager_at_t;
182
183/* Voyager Interrupt Controller (VIC) registers */
184
185/* Base to add to Cross Processor Interrupts (CPIs) when triggering
186 * the CPU IRQ line */
187/* register defines for the WCBICs (one per processor) */
188#define VOYAGER_WCBIC0 0x41 /* bus A node P1 processor 0 */
189#define VOYAGER_WCBIC1 0x49 /* bus A node P1 processor 1 */
190#define VOYAGER_WCBIC2 0x51 /* bus A node P2 processor 0 */
191#define VOYAGER_WCBIC3 0x59 /* bus A node P2 processor 1 */
192#define VOYAGER_WCBIC4 0x61 /* bus B node P1 processor 0 */
193#define VOYAGER_WCBIC5 0x69 /* bus B node P1 processor 1 */
194#define VOYAGER_WCBIC6 0x71 /* bus B node P2 processor 0 */
195#define VOYAGER_WCBIC7 0x79 /* bus B node P2 processor 1 */
196
197
198/* top of memory registers */
199#define VOYAGER_WCBIC_TOM_L 0x4
200#define VOYAGER_WCBIC_TOM_H 0x5
201
202/* register defines for Voyager Memory Contol (VMC)
203 * these are present on L4 machines only */
204#define VOYAGER_VMC1 0x81
205#define VOYAGER_VMC2 0x91
206#define VOYAGER_VMC3 0xa1
207#define VOYAGER_VMC4 0xb1
208
209/* VMC Ports */
210#define VOYAGER_VMC_MEMORY_SETUP 0x9
211# define VMC_Interleaving 0x01
212# define VMC_4Way 0x02
213# define VMC_EvenCacheLines 0x04
214# define VMC_HighLine 0x08
215# define VMC_Start0_Enable 0x20
216# define VMC_Start1_Enable 0x40
217# define VMC_Vremap 0x80
218#define VOYAGER_VMC_BANK_DENSITY 0xa
219# define VMC_BANK_EMPTY 0
220# define VMC_BANK_4MB 1
221# define VMC_BANK_16MB 2
222# define VMC_BANK_64MB 3
223# define VMC_BANK0_MASK 0x03
224# define VMC_BANK1_MASK 0x0C
225# define VMC_BANK2_MASK 0x30
226# define VMC_BANK3_MASK 0xC0
227
228/* Magellan Memory Controller (MMC) defines - present on L5 */
229#define VOYAGER_MMC_ASIC_ID 1
230/* the two memory modules corresponding to memory cards in the system */
231#define VOYAGER_MMC_MEMORY0_MODULE 0x14
232#define VOYAGER_MMC_MEMORY1_MODULE 0x15
233/* the Magellan Memory Address (MMA) defines */
234#define VOYAGER_MMA_ASIC_ID 2
235
236/* Submodule number for the Quad Baseboard */
237#define VOYAGER_QUAD_BASEBOARD 1
238
239/* ASIC defines for the Quad Baseboard */
240#define VOYAGER_QUAD_QDATA0 1
241#define VOYAGER_QUAD_QDATA1 2
242#define VOYAGER_QUAD_QABC 3
243
244/* Useful areas in extended CMOS */
245#define VOYAGER_PROCESSOR_PRESENT_MASK 0x88a
246#define VOYAGER_MEMORY_CLICKMAP 0xa23
247#define VOYAGER_DUMP_LOCATION 0xb1a
248
249/* SUS In Control bit - used to tell SUS that we don't need to be
250 * babysat anymore */
251#define VOYAGER_SUS_IN_CONTROL_PORT 0x3ff
252# define VOYAGER_IN_CONTROL_FLAG 0x80
253
254/* Voyager PSI defines */
255#define VOYAGER_PSI_STATUS_REG 0x08
256# define PSI_DC_FAIL 0x01
257# define PSI_MON 0x02
258# define PSI_FAULT 0x04
259# define PSI_ALARM 0x08
260# define PSI_CURRENT 0x10
261# define PSI_DVM 0x20
262# define PSI_PSCFAULT 0x40
263# define PSI_STAT_CHG 0x80
264
265#define VOYAGER_PSI_SUPPLY_REG 0x8000
266 /* read */
267# define PSI_FAIL_DC 0x01
268# define PSI_FAIL_AC 0x02
269# define PSI_MON_INT 0x04
270# define PSI_SWITCH_OFF 0x08
271# define PSI_HX_OFF 0x10
272# define PSI_SECURITY 0x20
273# define PSI_CMOS_BATT_LOW 0x40
274# define PSI_CMOS_BATT_FAIL 0x80
275 /* write */
276# define PSI_CLR_SWITCH_OFF 0x13
277# define PSI_CLR_HX_OFF 0x14
278# define PSI_CLR_CMOS_BATT_FAIL 0x17
279
280#define VOYAGER_PSI_MASK 0x8001
281# define PSI_MASK_MASK 0x10
282
283#define VOYAGER_PSI_AC_FAIL_REG 0x8004
284#define AC_FAIL_STAT_CHANGE 0x80
285
286#define VOYAGER_PSI_GENERAL_REG 0x8007
287 /* read */
288# define PSI_SWITCH_ON 0x01
289# define PSI_SWITCH_ENABLED 0x02
290# define PSI_ALARM_ENABLED 0x08
291# define PSI_SECURE_ENABLED 0x10
292# define PSI_COLD_RESET 0x20
293# define PSI_COLD_START 0x80
294 /* write */
295# define PSI_POWER_DOWN 0x10
296# define PSI_SWITCH_DISABLE 0x01
297# define PSI_SWITCH_ENABLE 0x11
298# define PSI_CLEAR 0x12
299# define PSI_ALARM_DISABLE 0x03
300# define PSI_ALARM_ENABLE 0x13
301# define PSI_CLEAR_COLD_RESET 0x05
302# define PSI_SET_COLD_RESET 0x15
303# define PSI_CLEAR_COLD_START 0x07
304# define PSI_SET_COLD_START 0x17
305
306
307
308struct voyager_bios_info {
309 __u8 len;
310 __u8 major;
311 __u8 minor;
312 __u8 debug;
313 __u8 num_classes;
314 __u8 class_1;
315 __u8 class_2;
316};
317
318/* The following structures and definitions are for the Kernel/SUS
319 * interface these are needed to find out how SUS initialised any Quad
320 * boards in the system */
321
322#define NUMBER_OF_MC_BUSSES 2
323#define SLOTS_PER_MC_BUS 8
324#define MAX_CPUS 16 /* 16 way CPU system */
325#define MAX_PROCESSOR_BOARDS 4 /* 4 processor slot system */
326#define MAX_CACHE_LEVELS 4 /* # of cache levels supported */
327#define MAX_SHARED_CPUS 4 /* # of CPUs that can share a LARC */
328#define NUMBER_OF_POS_REGS 8
329
330typedef struct {
331 __u8 MC_Slot;
332 __u8 POS_Values[NUMBER_OF_POS_REGS];
333} __attribute__((packed)) MC_SlotInformation_t;
334
335struct QuadDescription {
336 __u8 Type; /* for type 0 (DYADIC or MONADIC) all fields
337 * will be zero except for slot */
338 __u8 StructureVersion;
339 __u32 CPI_BaseAddress;
340 __u32 LARC_BankSize;
341 __u32 LocalMemoryStateBits;
342 __u8 Slot; /* Processor slots 1 - 4 */
343} __attribute__((packed));
344
345struct ProcBoardInfo {
346 __u8 Type;
347 __u8 StructureVersion;
348 __u8 NumberOfBoards;
349 struct QuadDescription QuadData[MAX_PROCESSOR_BOARDS];
350} __attribute__((packed));
351
352struct CacheDescription {
353 __u8 Level;
354 __u32 TotalSize;
355 __u16 LineSize;
356 __u8 Associativity;
357 __u8 CacheType;
358 __u8 WriteType;
359 __u8 Number_CPUs_SharedBy;
360 __u8 Shared_CPUs_Hardware_IDs[MAX_SHARED_CPUS];
361
362} __attribute__((packed));
363
364struct CPU_Description {
365 __u8 CPU_HardwareId;
366 char *FRU_String;
367 __u8 NumberOfCacheLevels;
368 struct CacheDescription CacheLevelData[MAX_CACHE_LEVELS];
369} __attribute__((packed));
370
371struct CPU_Info {
372 __u8 Type;
373 __u8 StructureVersion;
374 __u8 NumberOf_CPUs;
375 struct CPU_Description CPU_Data[MAX_CPUS];
376} __attribute__((packed));
377
378
379/*
380 * This structure will be used by SUS and the OS.
381 * The assumption about this structure is that no blank space is
382 * packed in it by our friend the compiler.
383 */
384typedef struct {
385 __u8 Mailbox_SUS; /* Written to by SUS to give commands/response to the OS */
386 __u8 Mailbox_OS; /* Written to by the OS to give commands/response to SUS */
387 __u8 SUS_MailboxVersion; /* Tells the OS which iteration of the interface SUS supports */
388 __u8 OS_MailboxVersion; /* Tells SUS which iteration of the interface the OS supports */
389 __u32 OS_Flags; /* Flags set by the OS as info for SUS */
390 __u32 SUS_Flags; /* Flags set by SUS as info for the OS */
391 __u32 WatchDogPeriod; /* Watchdog period (in seconds) which the DP uses to see if the OS is dead */
392 __u32 WatchDogCount; /* Updated by the OS on every tic. */
393 __u32 MemoryFor_SUS_ErrorLog; /* Flat 32 bit address which tells SUS where to stuff the SUS error log on a dump */
394 MC_SlotInformation_t MC_SlotInfo[NUMBER_OF_MC_BUSSES*SLOTS_PER_MC_BUS]; /* Storage for MCA POS data */
395 /* All new SECOND_PASS_INTERFACE fields added from this point */
396 struct ProcBoardInfo *BoardData;
397 struct CPU_Info *CPU_Data;
398 /* All new fields must be added from this point */
399} Voyager_KernelSUS_Mbox_t;
400
401/* structure for finding the right memory address to send a QIC CPI to */
402struct voyager_qic_cpi {
403 /* Each cache line (32 bytes) can trigger a cpi. The cpi
404 * read/write may occur anywhere in the cache line---pick the
405 * middle to be safe */
406 struct {
407 __u32 pad1[3];
408 __u32 cpi;
409 __u32 pad2[4];
410 } qic_cpi[8];
411};
412
413struct voyager_status {
414 __u32 power_fail:1;
415 __u32 switch_off:1;
416 __u32 request_from_kernel:1;
417};
418
419struct voyager_psi_regs {
420 __u8 cat_id;
421 __u8 cat_dev;
422 __u8 cat_control;
423 __u8 subaddr;
424 __u8 dummy4;
425 __u8 checkbit;
426 __u8 subaddr_low;
427 __u8 subaddr_high;
428 __u8 intstatus;
429 __u8 stat1;
430 __u8 stat3;
431 __u8 fault;
432 __u8 tms;
433 __u8 gen;
434 __u8 sysconf;
435 __u8 dummy15;
436};
437
438struct voyager_psi_subregs {
439 __u8 supply;
440 __u8 mask;
441 __u8 present;
442 __u8 DCfail;
443 __u8 ACfail;
444 __u8 fail;
445 __u8 UPSfail;
446 __u8 genstatus;
447};
448
449struct voyager_psi {
450 struct voyager_psi_regs regs;
451 struct voyager_psi_subregs subregs;
452};
453
454struct voyager_SUS {
455#define VOYAGER_DUMP_BUTTON_NMI 0x1
456#define VOYAGER_SUS_VALID 0x2
457#define VOYAGER_SYSINT_COMPLETE 0x3
458 __u8 SUS_mbox;
459#define VOYAGER_NO_COMMAND 0x0
460#define VOYAGER_IGNORE_DUMP 0x1
461#define VOYAGER_DO_DUMP 0x2
462#define VOYAGER_SYSINT_HANDSHAKE 0x3
463#define VOYAGER_DO_MEM_DUMP 0x4
464#define VOYAGER_SYSINT_WAS_RECOVERED 0x5
465 __u8 kernel_mbox;
466#define VOYAGER_MAILBOX_VERSION 0x10
467 __u8 SUS_version;
468 __u8 kernel_version;
469#define VOYAGER_OS_HAS_SYSINT 0x1
470#define VOYAGER_OS_IN_PROGRESS 0x2
471#define VOYAGER_UPDATING_WDPERIOD 0x4
472 __u32 kernel_flags;
473#define VOYAGER_SUS_BOOTING 0x1
474#define VOYAGER_SUS_IN_PROGRESS 0x2
475 __u32 SUS_flags;
476 __u32 watchdog_period;
477 __u32 watchdog_count;
478 __u32 SUS_errorlog;
479 /* lots of system configuration stuff under here */
480};
481
482/* Variables exported by voyager_smp */
483extern __u32 voyager_extended_vic_processors;
484extern __u32 voyager_allowed_boot_processors;
485extern __u32 voyager_quad_processors;
486extern struct voyager_qic_cpi *voyager_quad_cpi_addr[NR_CPUS];
487extern struct voyager_SUS *voyager_SUS;
488
489/* variables exported always */
490extern struct task_struct *voyager_thread;
491extern int voyager_level;
492extern struct voyager_status voyager_status;
493
494/* functions exported by the voyager and voyager_smp modules */
495extern int voyager_cat_readb(__u8 module, __u8 asic, int reg);
496extern void voyager_cat_init(void);
497extern void voyager_detect(struct voyager_bios_info *);
498extern void voyager_trap_init(void);
499extern void voyager_setup_irqs(void);
500extern int voyager_memory_detect(int region, __u32 *addr, __u32 *length);
501extern void voyager_smp_intr_init(void);
502extern __u8 voyager_extended_cmos_read(__u16 cmos_address);
503extern void voyager_smp_dump(void);
504extern void voyager_timer_interrupt(void);
505extern void smp_local_timer_interrupt(void);
506extern void voyager_power_off(void);
507extern void smp_voyager_power_off(void *dummy);
508extern void voyager_restart(void);
509extern void voyager_cat_power_off(void);
510extern void voyager_cat_do_common_interrupt(void);
511extern void voyager_handle_nmi(void);
512/* Commands for the following are */
513#define VOYAGER_PSI_READ 0
514#define VOYAGER_PSI_WRITE 1
515#define VOYAGER_PSI_SUBREAD 2
516#define VOYAGER_PSI_SUBWRITE 3
517extern void voyager_cat_psi(__u8, __u16, __u8 *);
diff --git a/include/asm-i386/xen/hypercall.h b/include/asm-i386/xen/hypercall.h
deleted file mode 100644
index bc0ee7d961ca..000000000000
--- a/include/asm-i386/xen/hypercall.h
+++ /dev/null
@@ -1,413 +0,0 @@
1/******************************************************************************
2 * hypercall.h
3 *
4 * Linux-specific hypervisor handling.
5 *
6 * Copyright (c) 2002-2004, K A Fraser
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version 2
10 * as published by the Free Software Foundation; or, when distributed
11 * separately from the Linux kernel or incorporated into other
12 * software packages, subject to the following license:
13 *
14 * Permission is hereby granted, free of charge, to any person obtaining a copy
15 * of this source file (the "Software"), to deal in the Software without
16 * restriction, including without limitation the rights to use, copy, modify,
17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18 * and to permit persons to whom the Software is furnished to do so, subject to
19 * the following conditions:
20 *
21 * The above copyright notice and this permission notice shall be included in
22 * all copies or substantial portions of the Software.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 * IN THE SOFTWARE.
31 */
32
33#ifndef __HYPERCALL_H__
34#define __HYPERCALL_H__
35
36#include <linux/errno.h>
37#include <linux/string.h>
38
39#include <xen/interface/xen.h>
40#include <xen/interface/sched.h>
41#include <xen/interface/physdev.h>
42
43extern struct { char _entry[32]; } hypercall_page[];
44
45#define _hypercall0(type, name) \
46({ \
47 long __res; \
48 asm volatile ( \
49 "call %[call]" \
50 : "=a" (__res) \
51 : [call] "m" (hypercall_page[__HYPERVISOR_##name]) \
52 : "memory" ); \
53 (type)__res; \
54})
55
56#define _hypercall1(type, name, a1) \
57({ \
58 long __res, __ign1; \
59 asm volatile ( \
60 "call %[call]" \
61 : "=a" (__res), "=b" (__ign1) \
62 : "1" ((long)(a1)), \
63 [call] "m" (hypercall_page[__HYPERVISOR_##name]) \
64 : "memory" ); \
65 (type)__res; \
66})
67
68#define _hypercall2(type, name, a1, a2) \
69({ \
70 long __res, __ign1, __ign2; \
71 asm volatile ( \
72 "call %[call]" \
73 : "=a" (__res), "=b" (__ign1), "=c" (__ign2) \
74 : "1" ((long)(a1)), "2" ((long)(a2)), \
75 [call] "m" (hypercall_page[__HYPERVISOR_##name]) \
76 : "memory" ); \
77 (type)__res; \
78})
79
80#define _hypercall3(type, name, a1, a2, a3) \
81({ \
82 long __res, __ign1, __ign2, __ign3; \
83 asm volatile ( \
84 "call %[call]" \
85 : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \
86 "=d" (__ign3) \
87 : "1" ((long)(a1)), "2" ((long)(a2)), \
88 "3" ((long)(a3)), \
89 [call] "m" (hypercall_page[__HYPERVISOR_##name]) \
90 : "memory" ); \
91 (type)__res; \
92})
93
94#define _hypercall4(type, name, a1, a2, a3, a4) \
95({ \
96 long __res, __ign1, __ign2, __ign3, __ign4; \
97 asm volatile ( \
98 "call %[call]" \
99 : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \
100 "=d" (__ign3), "=S" (__ign4) \
101 : "1" ((long)(a1)), "2" ((long)(a2)), \
102 "3" ((long)(a3)), "4" ((long)(a4)), \
103 [call] "m" (hypercall_page[__HYPERVISOR_##name]) \
104 : "memory" ); \
105 (type)__res; \
106})
107
108#define _hypercall5(type, name, a1, a2, a3, a4, a5) \
109({ \
110 long __res, __ign1, __ign2, __ign3, __ign4, __ign5; \
111 asm volatile ( \
112 "call %[call]" \
113 : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \
114 "=d" (__ign3), "=S" (__ign4), "=D" (__ign5) \
115 : "1" ((long)(a1)), "2" ((long)(a2)), \
116 "3" ((long)(a3)), "4" ((long)(a4)), \
117 "5" ((long)(a5)), \
118 [call] "m" (hypercall_page[__HYPERVISOR_##name]) \
119 : "memory" ); \
120 (type)__res; \
121})
122
123static inline int
124HYPERVISOR_set_trap_table(struct trap_info *table)
125{
126 return _hypercall1(int, set_trap_table, table);
127}
128
129static inline int
130HYPERVISOR_mmu_update(struct mmu_update *req, int count,
131 int *success_count, domid_t domid)
132{
133 return _hypercall4(int, mmu_update, req, count, success_count, domid);
134}
135
136static inline int
137HYPERVISOR_mmuext_op(struct mmuext_op *op, int count,
138 int *success_count, domid_t domid)
139{
140 return _hypercall4(int, mmuext_op, op, count, success_count, domid);
141}
142
143static inline int
144HYPERVISOR_set_gdt(unsigned long *frame_list, int entries)
145{
146 return _hypercall2(int, set_gdt, frame_list, entries);
147}
148
149static inline int
150HYPERVISOR_stack_switch(unsigned long ss, unsigned long esp)
151{
152 return _hypercall2(int, stack_switch, ss, esp);
153}
154
155static inline int
156HYPERVISOR_set_callbacks(unsigned long event_selector,
157 unsigned long event_address,
158 unsigned long failsafe_selector,
159 unsigned long failsafe_address)
160{
161 return _hypercall4(int, set_callbacks,
162 event_selector, event_address,
163 failsafe_selector, failsafe_address);
164}
165
166static inline int
167HYPERVISOR_fpu_taskswitch(int set)
168{
169 return _hypercall1(int, fpu_taskswitch, set);
170}
171
172static inline int
173HYPERVISOR_sched_op(int cmd, unsigned long arg)
174{
175 return _hypercall2(int, sched_op, cmd, arg);
176}
177
178static inline long
179HYPERVISOR_set_timer_op(u64 timeout)
180{
181 unsigned long timeout_hi = (unsigned long)(timeout>>32);
182 unsigned long timeout_lo = (unsigned long)timeout;
183 return _hypercall2(long, set_timer_op, timeout_lo, timeout_hi);
184}
185
186static inline int
187HYPERVISOR_set_debugreg(int reg, unsigned long value)
188{
189 return _hypercall2(int, set_debugreg, reg, value);
190}
191
192static inline unsigned long
193HYPERVISOR_get_debugreg(int reg)
194{
195 return _hypercall1(unsigned long, get_debugreg, reg);
196}
197
198static inline int
199HYPERVISOR_update_descriptor(u64 ma, u64 desc)
200{
201 return _hypercall4(int, update_descriptor, ma, ma>>32, desc, desc>>32);
202}
203
204static inline int
205HYPERVISOR_memory_op(unsigned int cmd, void *arg)
206{
207 return _hypercall2(int, memory_op, cmd, arg);
208}
209
210static inline int
211HYPERVISOR_multicall(void *call_list, int nr_calls)
212{
213 return _hypercall2(int, multicall, call_list, nr_calls);
214}
215
216static inline int
217HYPERVISOR_update_va_mapping(unsigned long va, pte_t new_val,
218 unsigned long flags)
219{
220 unsigned long pte_hi = 0;
221#ifdef CONFIG_X86_PAE
222 pte_hi = new_val.pte_high;
223#endif
224 return _hypercall4(int, update_va_mapping, va,
225 new_val.pte_low, pte_hi, flags);
226}
227
228static inline int
229HYPERVISOR_event_channel_op(int cmd, void *arg)
230{
231 int rc = _hypercall2(int, event_channel_op, cmd, arg);
232 if (unlikely(rc == -ENOSYS)) {
233 struct evtchn_op op;
234 op.cmd = cmd;
235 memcpy(&op.u, arg, sizeof(op.u));
236 rc = _hypercall1(int, event_channel_op_compat, &op);
237 memcpy(arg, &op.u, sizeof(op.u));
238 }
239 return rc;
240}
241
242static inline int
243HYPERVISOR_xen_version(int cmd, void *arg)
244{
245 return _hypercall2(int, xen_version, cmd, arg);
246}
247
248static inline int
249HYPERVISOR_console_io(int cmd, int count, char *str)
250{
251 return _hypercall3(int, console_io, cmd, count, str);
252}
253
254static inline int
255HYPERVISOR_physdev_op(int cmd, void *arg)
256{
257 int rc = _hypercall2(int, physdev_op, cmd, arg);
258 if (unlikely(rc == -ENOSYS)) {
259 struct physdev_op op;
260 op.cmd = cmd;
261 memcpy(&op.u, arg, sizeof(op.u));
262 rc = _hypercall1(int, physdev_op_compat, &op);
263 memcpy(arg, &op.u, sizeof(op.u));
264 }
265 return rc;
266}
267
268static inline int
269HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count)
270{
271 return _hypercall3(int, grant_table_op, cmd, uop, count);
272}
273
274static inline int
275HYPERVISOR_update_va_mapping_otherdomain(unsigned long va, pte_t new_val,
276 unsigned long flags, domid_t domid)
277{
278 unsigned long pte_hi = 0;
279#ifdef CONFIG_X86_PAE
280 pte_hi = new_val.pte_high;
281#endif
282 return _hypercall5(int, update_va_mapping_otherdomain, va,
283 new_val.pte_low, pte_hi, flags, domid);
284}
285
286static inline int
287HYPERVISOR_vm_assist(unsigned int cmd, unsigned int type)
288{
289 return _hypercall2(int, vm_assist, cmd, type);
290}
291
292static inline int
293HYPERVISOR_vcpu_op(int cmd, int vcpuid, void *extra_args)
294{
295 return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args);
296}
297
298static inline int
299HYPERVISOR_suspend(unsigned long srec)
300{
301 return _hypercall3(int, sched_op, SCHEDOP_shutdown,
302 SHUTDOWN_suspend, srec);
303}
304
305static inline int
306HYPERVISOR_nmi_op(unsigned long op, unsigned long arg)
307{
308 return _hypercall2(int, nmi_op, op, arg);
309}
310
311static inline void
312MULTI_update_va_mapping(struct multicall_entry *mcl, unsigned long va,
313 pte_t new_val, unsigned long flags)
314{
315 mcl->op = __HYPERVISOR_update_va_mapping;
316 mcl->args[0] = va;
317#ifdef CONFIG_X86_PAE
318 mcl->args[1] = new_val.pte_low;
319 mcl->args[2] = new_val.pte_high;
320#else
321 mcl->args[1] = new_val.pte_low;
322 mcl->args[2] = 0;
323#endif
324 mcl->args[3] = flags;
325}
326
327static inline void
328MULTI_grant_table_op(struct multicall_entry *mcl, unsigned int cmd,
329 void *uop, unsigned int count)
330{
331 mcl->op = __HYPERVISOR_grant_table_op;
332 mcl->args[0] = cmd;
333 mcl->args[1] = (unsigned long)uop;
334 mcl->args[2] = count;
335}
336
337static inline void
338MULTI_update_va_mapping_otherdomain(struct multicall_entry *mcl, unsigned long va,
339 pte_t new_val, unsigned long flags,
340 domid_t domid)
341{
342 mcl->op = __HYPERVISOR_update_va_mapping_otherdomain;
343 mcl->args[0] = va;
344#ifdef CONFIG_X86_PAE
345 mcl->args[1] = new_val.pte_low;
346 mcl->args[2] = new_val.pte_high;
347#else
348 mcl->args[1] = new_val.pte_low;
349 mcl->args[2] = 0;
350#endif
351 mcl->args[3] = flags;
352 mcl->args[4] = domid;
353}
354
355static inline void
356MULTI_update_descriptor(struct multicall_entry *mcl, u64 maddr,
357 struct desc_struct desc)
358{
359 mcl->op = __HYPERVISOR_update_descriptor;
360 mcl->args[0] = maddr;
361 mcl->args[1] = maddr >> 32;
362 mcl->args[2] = desc.a;
363 mcl->args[3] = desc.b;
364}
365
366static inline void
367MULTI_memory_op(struct multicall_entry *mcl, unsigned int cmd, void *arg)
368{
369 mcl->op = __HYPERVISOR_memory_op;
370 mcl->args[0] = cmd;
371 mcl->args[1] = (unsigned long)arg;
372}
373
374static inline void
375MULTI_mmu_update(struct multicall_entry *mcl, struct mmu_update *req,
376 int count, int *success_count, domid_t domid)
377{
378 mcl->op = __HYPERVISOR_mmu_update;
379 mcl->args[0] = (unsigned long)req;
380 mcl->args[1] = count;
381 mcl->args[2] = (unsigned long)success_count;
382 mcl->args[3] = domid;
383}
384
385static inline void
386MULTI_mmuext_op(struct multicall_entry *mcl, struct mmuext_op *op, int count,
387 int *success_count, domid_t domid)
388{
389 mcl->op = __HYPERVISOR_mmuext_op;
390 mcl->args[0] = (unsigned long)op;
391 mcl->args[1] = count;
392 mcl->args[2] = (unsigned long)success_count;
393 mcl->args[3] = domid;
394}
395
396static inline void
397MULTI_set_gdt(struct multicall_entry *mcl, unsigned long *frames, int entries)
398{
399 mcl->op = __HYPERVISOR_set_gdt;
400 mcl->args[0] = (unsigned long)frames;
401 mcl->args[1] = entries;
402}
403
404static inline void
405MULTI_stack_switch(struct multicall_entry *mcl,
406 unsigned long ss, unsigned long esp)
407{
408 mcl->op = __HYPERVISOR_stack_switch;
409 mcl->args[0] = ss;
410 mcl->args[1] = esp;
411}
412
413#endif /* __HYPERCALL_H__ */
diff --git a/include/asm-i386/xen/hypervisor.h b/include/asm-i386/xen/hypervisor.h
deleted file mode 100644
index 8e15dd28c91f..000000000000
--- a/include/asm-i386/xen/hypervisor.h
+++ /dev/null
@@ -1,73 +0,0 @@
1/******************************************************************************
2 * hypervisor.h
3 *
4 * Linux-specific hypervisor handling.
5 *
6 * Copyright (c) 2002-2004, K A Fraser
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version 2
10 * as published by the Free Software Foundation; or, when distributed
11 * separately from the Linux kernel or incorporated into other
12 * software packages, subject to the following license:
13 *
14 * Permission is hereby granted, free of charge, to any person obtaining a copy
15 * of this source file (the "Software"), to deal in the Software without
16 * restriction, including without limitation the rights to use, copy, modify,
17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18 * and to permit persons to whom the Software is furnished to do so, subject to
19 * the following conditions:
20 *
21 * The above copyright notice and this permission notice shall be included in
22 * all copies or substantial portions of the Software.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 * IN THE SOFTWARE.
31 */
32
33#ifndef __HYPERVISOR_H__
34#define __HYPERVISOR_H__
35
36#include <linux/types.h>
37#include <linux/kernel.h>
38#include <linux/version.h>
39
40#include <xen/interface/xen.h>
41#include <xen/interface/version.h>
42
43#include <asm/ptrace.h>
44#include <asm/page.h>
45#include <asm/desc.h>
46#if defined(__i386__)
47# ifdef CONFIG_X86_PAE
48# include <asm-generic/pgtable-nopud.h>
49# else
50# include <asm-generic/pgtable-nopmd.h>
51# endif
52#endif
53#include <asm/xen/hypercall.h>
54
55/* arch/i386/kernel/setup.c */
56extern struct shared_info *HYPERVISOR_shared_info;
57extern struct start_info *xen_start_info;
58#define is_initial_xendomain() (xen_start_info->flags & SIF_INITDOMAIN)
59
60/* arch/i386/mach-xen/evtchn.c */
61/* Force a proper event-channel callback from Xen. */
62extern void force_evtchn_callback(void);
63
64/* Turn jiffies into Xen system time. */
65u64 jiffies_to_st(unsigned long jiffies);
66
67
68#define MULTI_UVMFLAGS_INDEX 3
69#define MULTI_UVMDOMID_INDEX 4
70
71#define is_running_on_xen() (xen_start_info ? 1 : 0)
72
73#endif /* __HYPERVISOR_H__ */
diff --git a/include/asm-i386/xen/interface.h b/include/asm-i386/xen/interface.h
deleted file mode 100644
index 165c3968e138..000000000000
--- a/include/asm-i386/xen/interface.h
+++ /dev/null
@@ -1,188 +0,0 @@
1/******************************************************************************
2 * arch-x86_32.h
3 *
4 * Guest OS interface to x86 32-bit Xen.
5 *
6 * Copyright (c) 2004, K A Fraser
7 */
8
9#ifndef __XEN_PUBLIC_ARCH_X86_32_H__
10#define __XEN_PUBLIC_ARCH_X86_32_H__
11
12#ifdef __XEN__
13#define __DEFINE_GUEST_HANDLE(name, type) \
14 typedef struct { type *p; } __guest_handle_ ## name
15#else
16#define __DEFINE_GUEST_HANDLE(name, type) \
17 typedef type * __guest_handle_ ## name
18#endif
19
20#define DEFINE_GUEST_HANDLE_STRUCT(name) \
21 __DEFINE_GUEST_HANDLE(name, struct name)
22#define DEFINE_GUEST_HANDLE(name) __DEFINE_GUEST_HANDLE(name, name)
23#define GUEST_HANDLE(name) __guest_handle_ ## name
24
25#ifndef __ASSEMBLY__
26/* Guest handles for primitive C types. */
27__DEFINE_GUEST_HANDLE(uchar, unsigned char);
28__DEFINE_GUEST_HANDLE(uint, unsigned int);
29__DEFINE_GUEST_HANDLE(ulong, unsigned long);
30DEFINE_GUEST_HANDLE(char);
31DEFINE_GUEST_HANDLE(int);
32DEFINE_GUEST_HANDLE(long);
33DEFINE_GUEST_HANDLE(void);
34#endif
35
36/*
37 * SEGMENT DESCRIPTOR TABLES
38 */
39/*
40 * A number of GDT entries are reserved by Xen. These are not situated at the
41 * start of the GDT because some stupid OSes export hard-coded selector values
42 * in their ABI. These hard-coded values are always near the start of the GDT,
43 * so Xen places itself out of the way, at the far end of the GDT.
44 */
45#define FIRST_RESERVED_GDT_PAGE 14
46#define FIRST_RESERVED_GDT_BYTE (FIRST_RESERVED_GDT_PAGE * 4096)
47#define FIRST_RESERVED_GDT_ENTRY (FIRST_RESERVED_GDT_BYTE / 8)
48
49/*
50 * These flat segments are in the Xen-private section of every GDT. Since these
51 * are also present in the initial GDT, many OSes will be able to avoid
52 * installing their own GDT.
53 */
54#define FLAT_RING1_CS 0xe019 /* GDT index 259 */
55#define FLAT_RING1_DS 0xe021 /* GDT index 260 */
56#define FLAT_RING1_SS 0xe021 /* GDT index 260 */
57#define FLAT_RING3_CS 0xe02b /* GDT index 261 */
58#define FLAT_RING3_DS 0xe033 /* GDT index 262 */
59#define FLAT_RING3_SS 0xe033 /* GDT index 262 */
60
61#define FLAT_KERNEL_CS FLAT_RING1_CS
62#define FLAT_KERNEL_DS FLAT_RING1_DS
63#define FLAT_KERNEL_SS FLAT_RING1_SS
64#define FLAT_USER_CS FLAT_RING3_CS
65#define FLAT_USER_DS FLAT_RING3_DS
66#define FLAT_USER_SS FLAT_RING3_SS
67
68/* And the trap vector is... */
69#define TRAP_INSTR "int $0x82"
70
71/*
72 * Virtual addresses beyond this are not modifiable by guest OSes. The
73 * machine->physical mapping table starts at this address, read-only.
74 */
75#ifdef CONFIG_X86_PAE
76#define __HYPERVISOR_VIRT_START 0xF5800000
77#else
78#define __HYPERVISOR_VIRT_START 0xFC000000
79#endif
80
81#ifndef HYPERVISOR_VIRT_START
82#define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START)
83#endif
84
85#ifndef machine_to_phys_mapping
86#define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START)
87#endif
88
89/* Maximum number of virtual CPUs in multi-processor guests. */
90#define MAX_VIRT_CPUS 32
91
92#ifndef __ASSEMBLY__
93
94/*
95 * Send an array of these to HYPERVISOR_set_trap_table()
96 */
97#define TI_GET_DPL(_ti) ((_ti)->flags & 3)
98#define TI_GET_IF(_ti) ((_ti)->flags & 4)
99#define TI_SET_DPL(_ti, _dpl) ((_ti)->flags |= (_dpl))
100#define TI_SET_IF(_ti, _if) ((_ti)->flags |= ((!!(_if))<<2))
101
102struct trap_info {
103 uint8_t vector; /* exception vector */
104 uint8_t flags; /* 0-3: privilege level; 4: clear event enable? */
105 uint16_t cs; /* code selector */
106 unsigned long address; /* code offset */
107};
108DEFINE_GUEST_HANDLE_STRUCT(trap_info);
109
110struct cpu_user_regs {
111 uint32_t ebx;
112 uint32_t ecx;
113 uint32_t edx;
114 uint32_t esi;
115 uint32_t edi;
116 uint32_t ebp;
117 uint32_t eax;
118 uint16_t error_code; /* private */
119 uint16_t entry_vector; /* private */
120 uint32_t eip;
121 uint16_t cs;
122 uint8_t saved_upcall_mask;
123 uint8_t _pad0;
124 uint32_t eflags; /* eflags.IF == !saved_upcall_mask */
125 uint32_t esp;
126 uint16_t ss, _pad1;
127 uint16_t es, _pad2;
128 uint16_t ds, _pad3;
129 uint16_t fs, _pad4;
130 uint16_t gs, _pad5;
131};
132DEFINE_GUEST_HANDLE_STRUCT(cpu_user_regs);
133
134typedef uint64_t tsc_timestamp_t; /* RDTSC timestamp */
135
136/*
137 * The following is all CPU context. Note that the fpu_ctxt block is filled
138 * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used.
139 */
140struct vcpu_guest_context {
141 /* FPU registers come first so they can be aligned for FXSAVE/FXRSTOR. */
142 struct { char x[512]; } fpu_ctxt; /* User-level FPU registers */
143#define VGCF_I387_VALID (1<<0)
144#define VGCF_HVM_GUEST (1<<1)
145#define VGCF_IN_KERNEL (1<<2)
146 unsigned long flags; /* VGCF_* flags */
147 struct cpu_user_regs user_regs; /* User-level CPU registers */
148 struct trap_info trap_ctxt[256]; /* Virtual IDT */
149 unsigned long ldt_base, ldt_ents; /* LDT (linear address, # ents) */
150 unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */
151 unsigned long kernel_ss, kernel_sp; /* Virtual TSS (only SS1/SP1) */
152 unsigned long ctrlreg[8]; /* CR0-CR7 (control registers) */
153 unsigned long debugreg[8]; /* DB0-DB7 (debug registers) */
154 unsigned long event_callback_cs; /* CS:EIP of event callback */
155 unsigned long event_callback_eip;
156 unsigned long failsafe_callback_cs; /* CS:EIP of failsafe callback */
157 unsigned long failsafe_callback_eip;
158 unsigned long vm_assist; /* VMASST_TYPE_* bitmap */
159};
160DEFINE_GUEST_HANDLE_STRUCT(vcpu_guest_context);
161
162struct arch_shared_info {
163 unsigned long max_pfn; /* max pfn that appears in table */
164 /* Frame containing list of mfns containing list of mfns containing p2m. */
165 unsigned long pfn_to_mfn_frame_list_list;
166 unsigned long nmi_reason;
167};
168
169struct arch_vcpu_info {
170 unsigned long cr2;
171 unsigned long pad[5]; /* sizeof(struct vcpu_info) == 64 */
172};
173
174#endif /* !__ASSEMBLY__ */
175
176/*
177 * Prefix forces emulation of some non-trapping instructions.
178 * Currently only CPUID.
179 */
180#ifdef __ASSEMBLY__
181#define XEN_EMULATE_PREFIX .byte 0x0f,0x0b,0x78,0x65,0x6e ;
182#define XEN_CPUID XEN_EMULATE_PREFIX cpuid
183#else
184#define XEN_EMULATE_PREFIX ".byte 0x0f,0x0b,0x78,0x65,0x6e ; "
185#define XEN_CPUID XEN_EMULATE_PREFIX "cpuid"
186#endif
187
188#endif
diff --git a/include/asm-i386/xor.h b/include/asm-i386/xor.h
deleted file mode 100644
index 23c86cef3b25..000000000000
--- a/include/asm-i386/xor.h
+++ /dev/null
@@ -1,883 +0,0 @@
1/*
2 * include/asm-i386/xor.h
3 *
4 * Optimized RAID-5 checksumming functions for MMX and SSE.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2, or (at your option)
9 * any later version.
10 *
11 * You should have received a copy of the GNU General Public License
12 * (for example /usr/src/linux/COPYING); if not, write to the Free
13 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
14 */
15
16/*
17 * High-speed RAID5 checksumming functions utilizing MMX instructions.
18 * Copyright (C) 1998 Ingo Molnar.
19 */
20
21#define LD(x,y) " movq 8*("#x")(%1), %%mm"#y" ;\n"
22#define ST(x,y) " movq %%mm"#y", 8*("#x")(%1) ;\n"
23#define XO1(x,y) " pxor 8*("#x")(%2), %%mm"#y" ;\n"
24#define XO2(x,y) " pxor 8*("#x")(%3), %%mm"#y" ;\n"
25#define XO3(x,y) " pxor 8*("#x")(%4), %%mm"#y" ;\n"
26#define XO4(x,y) " pxor 8*("#x")(%5), %%mm"#y" ;\n"
27
28#include <asm/i387.h>
29
30static void
31xor_pII_mmx_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
32{
33 unsigned long lines = bytes >> 7;
34
35 kernel_fpu_begin();
36
37 __asm__ __volatile__ (
38#undef BLOCK
39#define BLOCK(i) \
40 LD(i,0) \
41 LD(i+1,1) \
42 LD(i+2,2) \
43 LD(i+3,3) \
44 XO1(i,0) \
45 ST(i,0) \
46 XO1(i+1,1) \
47 ST(i+1,1) \
48 XO1(i+2,2) \
49 ST(i+2,2) \
50 XO1(i+3,3) \
51 ST(i+3,3)
52
53 " .align 32 ;\n"
54 " 1: ;\n"
55
56 BLOCK(0)
57 BLOCK(4)
58 BLOCK(8)
59 BLOCK(12)
60
61 " addl $128, %1 ;\n"
62 " addl $128, %2 ;\n"
63 " decl %0 ;\n"
64 " jnz 1b ;\n"
65 : "+r" (lines),
66 "+r" (p1), "+r" (p2)
67 :
68 : "memory");
69
70 kernel_fpu_end();
71}
72
73static void
74xor_pII_mmx_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
75 unsigned long *p3)
76{
77 unsigned long lines = bytes >> 7;
78
79 kernel_fpu_begin();
80
81 __asm__ __volatile__ (
82#undef BLOCK
83#define BLOCK(i) \
84 LD(i,0) \
85 LD(i+1,1) \
86 LD(i+2,2) \
87 LD(i+3,3) \
88 XO1(i,0) \
89 XO1(i+1,1) \
90 XO1(i+2,2) \
91 XO1(i+3,3) \
92 XO2(i,0) \
93 ST(i,0) \
94 XO2(i+1,1) \
95 ST(i+1,1) \
96 XO2(i+2,2) \
97 ST(i+2,2) \
98 XO2(i+3,3) \
99 ST(i+3,3)
100
101 " .align 32 ;\n"
102 " 1: ;\n"
103
104 BLOCK(0)
105 BLOCK(4)
106 BLOCK(8)
107 BLOCK(12)
108
109 " addl $128, %1 ;\n"
110 " addl $128, %2 ;\n"
111 " addl $128, %3 ;\n"
112 " decl %0 ;\n"
113 " jnz 1b ;\n"
114 : "+r" (lines),
115 "+r" (p1), "+r" (p2), "+r" (p3)
116 :
117 : "memory");
118
119 kernel_fpu_end();
120}
121
122static void
123xor_pII_mmx_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
124 unsigned long *p3, unsigned long *p4)
125{
126 unsigned long lines = bytes >> 7;
127
128 kernel_fpu_begin();
129
130 __asm__ __volatile__ (
131#undef BLOCK
132#define BLOCK(i) \
133 LD(i,0) \
134 LD(i+1,1) \
135 LD(i+2,2) \
136 LD(i+3,3) \
137 XO1(i,0) \
138 XO1(i+1,1) \
139 XO1(i+2,2) \
140 XO1(i+3,3) \
141 XO2(i,0) \
142 XO2(i+1,1) \
143 XO2(i+2,2) \
144 XO2(i+3,3) \
145 XO3(i,0) \
146 ST(i,0) \
147 XO3(i+1,1) \
148 ST(i+1,1) \
149 XO3(i+2,2) \
150 ST(i+2,2) \
151 XO3(i+3,3) \
152 ST(i+3,3)
153
154 " .align 32 ;\n"
155 " 1: ;\n"
156
157 BLOCK(0)
158 BLOCK(4)
159 BLOCK(8)
160 BLOCK(12)
161
162 " addl $128, %1 ;\n"
163 " addl $128, %2 ;\n"
164 " addl $128, %3 ;\n"
165 " addl $128, %4 ;\n"
166 " decl %0 ;\n"
167 " jnz 1b ;\n"
168 : "+r" (lines),
169 "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4)
170 :
171 : "memory");
172
173 kernel_fpu_end();
174}
175
176
177static void
178xor_pII_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
179 unsigned long *p3, unsigned long *p4, unsigned long *p5)
180{
181 unsigned long lines = bytes >> 7;
182
183 kernel_fpu_begin();
184
185 /* Make sure GCC forgets anything it knows about p4 or p5,
186 such that it won't pass to the asm volatile below a
187 register that is shared with any other variable. That's
188 because we modify p4 and p5 there, but we can't mark them
189 as read/write, otherwise we'd overflow the 10-asm-operands
190 limit of GCC < 3.1. */
191 __asm__ ("" : "+r" (p4), "+r" (p5));
192
193 __asm__ __volatile__ (
194#undef BLOCK
195#define BLOCK(i) \
196 LD(i,0) \
197 LD(i+1,1) \
198 LD(i+2,2) \
199 LD(i+3,3) \
200 XO1(i,0) \
201 XO1(i+1,1) \
202 XO1(i+2,2) \
203 XO1(i+3,3) \
204 XO2(i,0) \
205 XO2(i+1,1) \
206 XO2(i+2,2) \
207 XO2(i+3,3) \
208 XO3(i,0) \
209 XO3(i+1,1) \
210 XO3(i+2,2) \
211 XO3(i+3,3) \
212 XO4(i,0) \
213 ST(i,0) \
214 XO4(i+1,1) \
215 ST(i+1,1) \
216 XO4(i+2,2) \
217 ST(i+2,2) \
218 XO4(i+3,3) \
219 ST(i+3,3)
220
221 " .align 32 ;\n"
222 " 1: ;\n"
223
224 BLOCK(0)
225 BLOCK(4)
226 BLOCK(8)
227 BLOCK(12)
228
229 " addl $128, %1 ;\n"
230 " addl $128, %2 ;\n"
231 " addl $128, %3 ;\n"
232 " addl $128, %4 ;\n"
233 " addl $128, %5 ;\n"
234 " decl %0 ;\n"
235 " jnz 1b ;\n"
236 : "+r" (lines),
237 "+r" (p1), "+r" (p2), "+r" (p3)
238 : "r" (p4), "r" (p5)
239 : "memory");
240
241 /* p4 and p5 were modified, and now the variables are dead.
242 Clobber them just to be sure nobody does something stupid
243 like assuming they have some legal value. */
244 __asm__ ("" : "=r" (p4), "=r" (p5));
245
246 kernel_fpu_end();
247}
248
249#undef LD
250#undef XO1
251#undef XO2
252#undef XO3
253#undef XO4
254#undef ST
255#undef BLOCK
256
257static void
258xor_p5_mmx_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
259{
260 unsigned long lines = bytes >> 6;
261
262 kernel_fpu_begin();
263
264 __asm__ __volatile__ (
265 " .align 32 ;\n"
266 " 1: ;\n"
267 " movq (%1), %%mm0 ;\n"
268 " movq 8(%1), %%mm1 ;\n"
269 " pxor (%2), %%mm0 ;\n"
270 " movq 16(%1), %%mm2 ;\n"
271 " movq %%mm0, (%1) ;\n"
272 " pxor 8(%2), %%mm1 ;\n"
273 " movq 24(%1), %%mm3 ;\n"
274 " movq %%mm1, 8(%1) ;\n"
275 " pxor 16(%2), %%mm2 ;\n"
276 " movq 32(%1), %%mm4 ;\n"
277 " movq %%mm2, 16(%1) ;\n"
278 " pxor 24(%2), %%mm3 ;\n"
279 " movq 40(%1), %%mm5 ;\n"
280 " movq %%mm3, 24(%1) ;\n"
281 " pxor 32(%2), %%mm4 ;\n"
282 " movq 48(%1), %%mm6 ;\n"
283 " movq %%mm4, 32(%1) ;\n"
284 " pxor 40(%2), %%mm5 ;\n"
285 " movq 56(%1), %%mm7 ;\n"
286 " movq %%mm5, 40(%1) ;\n"
287 " pxor 48(%2), %%mm6 ;\n"
288 " pxor 56(%2), %%mm7 ;\n"
289 " movq %%mm6, 48(%1) ;\n"
290 " movq %%mm7, 56(%1) ;\n"
291
292 " addl $64, %1 ;\n"
293 " addl $64, %2 ;\n"
294 " decl %0 ;\n"
295 " jnz 1b ;\n"
296 : "+r" (lines),
297 "+r" (p1), "+r" (p2)
298 :
299 : "memory");
300
301 kernel_fpu_end();
302}
303
304static void
305xor_p5_mmx_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
306 unsigned long *p3)
307{
308 unsigned long lines = bytes >> 6;
309
310 kernel_fpu_begin();
311
312 __asm__ __volatile__ (
313 " .align 32,0x90 ;\n"
314 " 1: ;\n"
315 " movq (%1), %%mm0 ;\n"
316 " movq 8(%1), %%mm1 ;\n"
317 " pxor (%2), %%mm0 ;\n"
318 " movq 16(%1), %%mm2 ;\n"
319 " pxor 8(%2), %%mm1 ;\n"
320 " pxor (%3), %%mm0 ;\n"
321 " pxor 16(%2), %%mm2 ;\n"
322 " movq %%mm0, (%1) ;\n"
323 " pxor 8(%3), %%mm1 ;\n"
324 " pxor 16(%3), %%mm2 ;\n"
325 " movq 24(%1), %%mm3 ;\n"
326 " movq %%mm1, 8(%1) ;\n"
327 " movq 32(%1), %%mm4 ;\n"
328 " movq 40(%1), %%mm5 ;\n"
329 " pxor 24(%2), %%mm3 ;\n"
330 " movq %%mm2, 16(%1) ;\n"
331 " pxor 32(%2), %%mm4 ;\n"
332 " pxor 24(%3), %%mm3 ;\n"
333 " pxor 40(%2), %%mm5 ;\n"
334 " movq %%mm3, 24(%1) ;\n"
335 " pxor 32(%3), %%mm4 ;\n"
336 " pxor 40(%3), %%mm5 ;\n"
337 " movq 48(%1), %%mm6 ;\n"
338 " movq %%mm4, 32(%1) ;\n"
339 " movq 56(%1), %%mm7 ;\n"
340 " pxor 48(%2), %%mm6 ;\n"
341 " movq %%mm5, 40(%1) ;\n"
342 " pxor 56(%2), %%mm7 ;\n"
343 " pxor 48(%3), %%mm6 ;\n"
344 " pxor 56(%3), %%mm7 ;\n"
345 " movq %%mm6, 48(%1) ;\n"
346 " movq %%mm7, 56(%1) ;\n"
347
348 " addl $64, %1 ;\n"
349 " addl $64, %2 ;\n"
350 " addl $64, %3 ;\n"
351 " decl %0 ;\n"
352 " jnz 1b ;\n"
353 : "+r" (lines),
354 "+r" (p1), "+r" (p2), "+r" (p3)
355 :
356 : "memory" );
357
358 kernel_fpu_end();
359}
360
361static void
362xor_p5_mmx_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
363 unsigned long *p3, unsigned long *p4)
364{
365 unsigned long lines = bytes >> 6;
366
367 kernel_fpu_begin();
368
369 __asm__ __volatile__ (
370 " .align 32,0x90 ;\n"
371 " 1: ;\n"
372 " movq (%1), %%mm0 ;\n"
373 " movq 8(%1), %%mm1 ;\n"
374 " pxor (%2), %%mm0 ;\n"
375 " movq 16(%1), %%mm2 ;\n"
376 " pxor 8(%2), %%mm1 ;\n"
377 " pxor (%3), %%mm0 ;\n"
378 " pxor 16(%2), %%mm2 ;\n"
379 " pxor 8(%3), %%mm1 ;\n"
380 " pxor (%4), %%mm0 ;\n"
381 " movq 24(%1), %%mm3 ;\n"
382 " pxor 16(%3), %%mm2 ;\n"
383 " pxor 8(%4), %%mm1 ;\n"
384 " movq %%mm0, (%1) ;\n"
385 " movq 32(%1), %%mm4 ;\n"
386 " pxor 24(%2), %%mm3 ;\n"
387 " pxor 16(%4), %%mm2 ;\n"
388 " movq %%mm1, 8(%1) ;\n"
389 " movq 40(%1), %%mm5 ;\n"
390 " pxor 32(%2), %%mm4 ;\n"
391 " pxor 24(%3), %%mm3 ;\n"
392 " movq %%mm2, 16(%1) ;\n"
393 " pxor 40(%2), %%mm5 ;\n"
394 " pxor 32(%3), %%mm4 ;\n"
395 " pxor 24(%4), %%mm3 ;\n"
396 " movq %%mm3, 24(%1) ;\n"
397 " movq 56(%1), %%mm7 ;\n"
398 " movq 48(%1), %%mm6 ;\n"
399 " pxor 40(%3), %%mm5 ;\n"
400 " pxor 32(%4), %%mm4 ;\n"
401 " pxor 48(%2), %%mm6 ;\n"
402 " movq %%mm4, 32(%1) ;\n"
403 " pxor 56(%2), %%mm7 ;\n"
404 " pxor 40(%4), %%mm5 ;\n"
405 " pxor 48(%3), %%mm6 ;\n"
406 " pxor 56(%3), %%mm7 ;\n"
407 " movq %%mm5, 40(%1) ;\n"
408 " pxor 48(%4), %%mm6 ;\n"
409 " pxor 56(%4), %%mm7 ;\n"
410 " movq %%mm6, 48(%1) ;\n"
411 " movq %%mm7, 56(%1) ;\n"
412
413 " addl $64, %1 ;\n"
414 " addl $64, %2 ;\n"
415 " addl $64, %3 ;\n"
416 " addl $64, %4 ;\n"
417 " decl %0 ;\n"
418 " jnz 1b ;\n"
419 : "+r" (lines),
420 "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4)
421 :
422 : "memory");
423
424 kernel_fpu_end();
425}
426
427static void
428xor_p5_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
429 unsigned long *p3, unsigned long *p4, unsigned long *p5)
430{
431 unsigned long lines = bytes >> 6;
432
433 kernel_fpu_begin();
434
435 /* Make sure GCC forgets anything it knows about p4 or p5,
436 such that it won't pass to the asm volatile below a
437 register that is shared with any other variable. That's
438 because we modify p4 and p5 there, but we can't mark them
439 as read/write, otherwise we'd overflow the 10-asm-operands
440 limit of GCC < 3.1. */
441 __asm__ ("" : "+r" (p4), "+r" (p5));
442
443 __asm__ __volatile__ (
444 " .align 32,0x90 ;\n"
445 " 1: ;\n"
446 " movq (%1), %%mm0 ;\n"
447 " movq 8(%1), %%mm1 ;\n"
448 " pxor (%2), %%mm0 ;\n"
449 " pxor 8(%2), %%mm1 ;\n"
450 " movq 16(%1), %%mm2 ;\n"
451 " pxor (%3), %%mm0 ;\n"
452 " pxor 8(%3), %%mm1 ;\n"
453 " pxor 16(%2), %%mm2 ;\n"
454 " pxor (%4), %%mm0 ;\n"
455 " pxor 8(%4), %%mm1 ;\n"
456 " pxor 16(%3), %%mm2 ;\n"
457 " movq 24(%1), %%mm3 ;\n"
458 " pxor (%5), %%mm0 ;\n"
459 " pxor 8(%5), %%mm1 ;\n"
460 " movq %%mm0, (%1) ;\n"
461 " pxor 16(%4), %%mm2 ;\n"
462 " pxor 24(%2), %%mm3 ;\n"
463 " movq %%mm1, 8(%1) ;\n"
464 " pxor 16(%5), %%mm2 ;\n"
465 " pxor 24(%3), %%mm3 ;\n"
466 " movq 32(%1), %%mm4 ;\n"
467 " movq %%mm2, 16(%1) ;\n"
468 " pxor 24(%4), %%mm3 ;\n"
469 " pxor 32(%2), %%mm4 ;\n"
470 " movq 40(%1), %%mm5 ;\n"
471 " pxor 24(%5), %%mm3 ;\n"
472 " pxor 32(%3), %%mm4 ;\n"
473 " pxor 40(%2), %%mm5 ;\n"
474 " movq %%mm3, 24(%1) ;\n"
475 " pxor 32(%4), %%mm4 ;\n"
476 " pxor 40(%3), %%mm5 ;\n"
477 " movq 48(%1), %%mm6 ;\n"
478 " movq 56(%1), %%mm7 ;\n"
479 " pxor 32(%5), %%mm4 ;\n"
480 " pxor 40(%4), %%mm5 ;\n"
481 " pxor 48(%2), %%mm6 ;\n"
482 " pxor 56(%2), %%mm7 ;\n"
483 " movq %%mm4, 32(%1) ;\n"
484 " pxor 48(%3), %%mm6 ;\n"
485 " pxor 56(%3), %%mm7 ;\n"
486 " pxor 40(%5), %%mm5 ;\n"
487 " pxor 48(%4), %%mm6 ;\n"
488 " pxor 56(%4), %%mm7 ;\n"
489 " movq %%mm5, 40(%1) ;\n"
490 " pxor 48(%5), %%mm6 ;\n"
491 " pxor 56(%5), %%mm7 ;\n"
492 " movq %%mm6, 48(%1) ;\n"
493 " movq %%mm7, 56(%1) ;\n"
494
495 " addl $64, %1 ;\n"
496 " addl $64, %2 ;\n"
497 " addl $64, %3 ;\n"
498 " addl $64, %4 ;\n"
499 " addl $64, %5 ;\n"
500 " decl %0 ;\n"
501 " jnz 1b ;\n"
502 : "+r" (lines),
503 "+r" (p1), "+r" (p2), "+r" (p3)
504 : "r" (p4), "r" (p5)
505 : "memory");
506
507 /* p4 and p5 were modified, and now the variables are dead.
508 Clobber them just to be sure nobody does something stupid
509 like assuming they have some legal value. */
510 __asm__ ("" : "=r" (p4), "=r" (p5));
511
512 kernel_fpu_end();
513}
514
515static struct xor_block_template xor_block_pII_mmx = {
516 .name = "pII_mmx",
517 .do_2 = xor_pII_mmx_2,
518 .do_3 = xor_pII_mmx_3,
519 .do_4 = xor_pII_mmx_4,
520 .do_5 = xor_pII_mmx_5,
521};
522
523static struct xor_block_template xor_block_p5_mmx = {
524 .name = "p5_mmx",
525 .do_2 = xor_p5_mmx_2,
526 .do_3 = xor_p5_mmx_3,
527 .do_4 = xor_p5_mmx_4,
528 .do_5 = xor_p5_mmx_5,
529};
530
531/*
532 * Cache avoiding checksumming functions utilizing KNI instructions
533 * Copyright (C) 1999 Zach Brown (with obvious credit due Ingo)
534 */
535
536#define XMMS_SAVE do { \
537 preempt_disable(); \
538 cr0 = read_cr0(); \
539 clts(); \
540 __asm__ __volatile__ ( \
541 "movups %%xmm0,(%0) ;\n\t" \
542 "movups %%xmm1,0x10(%0) ;\n\t" \
543 "movups %%xmm2,0x20(%0) ;\n\t" \
544 "movups %%xmm3,0x30(%0) ;\n\t" \
545 : \
546 : "r" (xmm_save) \
547 : "memory"); \
548} while(0)
549
550#define XMMS_RESTORE do { \
551 __asm__ __volatile__ ( \
552 "sfence ;\n\t" \
553 "movups (%0),%%xmm0 ;\n\t" \
554 "movups 0x10(%0),%%xmm1 ;\n\t" \
555 "movups 0x20(%0),%%xmm2 ;\n\t" \
556 "movups 0x30(%0),%%xmm3 ;\n\t" \
557 : \
558 : "r" (xmm_save) \
559 : "memory"); \
560 write_cr0(cr0); \
561 preempt_enable(); \
562} while(0)
563
564#define ALIGN16 __attribute__((aligned(16)))
565
566#define OFFS(x) "16*("#x")"
567#define PF_OFFS(x) "256+16*("#x")"
568#define PF0(x) " prefetchnta "PF_OFFS(x)"(%1) ;\n"
569#define LD(x,y) " movaps "OFFS(x)"(%1), %%xmm"#y" ;\n"
570#define ST(x,y) " movaps %%xmm"#y", "OFFS(x)"(%1) ;\n"
571#define PF1(x) " prefetchnta "PF_OFFS(x)"(%2) ;\n"
572#define PF2(x) " prefetchnta "PF_OFFS(x)"(%3) ;\n"
573#define PF3(x) " prefetchnta "PF_OFFS(x)"(%4) ;\n"
574#define PF4(x) " prefetchnta "PF_OFFS(x)"(%5) ;\n"
575#define PF5(x) " prefetchnta "PF_OFFS(x)"(%6) ;\n"
576#define XO1(x,y) " xorps "OFFS(x)"(%2), %%xmm"#y" ;\n"
577#define XO2(x,y) " xorps "OFFS(x)"(%3), %%xmm"#y" ;\n"
578#define XO3(x,y) " xorps "OFFS(x)"(%4), %%xmm"#y" ;\n"
579#define XO4(x,y) " xorps "OFFS(x)"(%5), %%xmm"#y" ;\n"
580#define XO5(x,y) " xorps "OFFS(x)"(%6), %%xmm"#y" ;\n"
581
582
583static void
584xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
585{
586 unsigned long lines = bytes >> 8;
587 char xmm_save[16*4] ALIGN16;
588 int cr0;
589
590 XMMS_SAVE;
591
592 __asm__ __volatile__ (
593#undef BLOCK
594#define BLOCK(i) \
595 LD(i,0) \
596 LD(i+1,1) \
597 PF1(i) \
598 PF1(i+2) \
599 LD(i+2,2) \
600 LD(i+3,3) \
601 PF0(i+4) \
602 PF0(i+6) \
603 XO1(i,0) \
604 XO1(i+1,1) \
605 XO1(i+2,2) \
606 XO1(i+3,3) \
607 ST(i,0) \
608 ST(i+1,1) \
609 ST(i+2,2) \
610 ST(i+3,3) \
611
612
613 PF0(0)
614 PF0(2)
615
616 " .align 32 ;\n"
617 " 1: ;\n"
618
619 BLOCK(0)
620 BLOCK(4)
621 BLOCK(8)
622 BLOCK(12)
623
624 " addl $256, %1 ;\n"
625 " addl $256, %2 ;\n"
626 " decl %0 ;\n"
627 " jnz 1b ;\n"
628 : "+r" (lines),
629 "+r" (p1), "+r" (p2)
630 :
631 : "memory");
632
633 XMMS_RESTORE;
634}
635
636static void
637xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
638 unsigned long *p3)
639{
640 unsigned long lines = bytes >> 8;
641 char xmm_save[16*4] ALIGN16;
642 int cr0;
643
644 XMMS_SAVE;
645
646 __asm__ __volatile__ (
647#undef BLOCK
648#define BLOCK(i) \
649 PF1(i) \
650 PF1(i+2) \
651 LD(i,0) \
652 LD(i+1,1) \
653 LD(i+2,2) \
654 LD(i+3,3) \
655 PF2(i) \
656 PF2(i+2) \
657 PF0(i+4) \
658 PF0(i+6) \
659 XO1(i,0) \
660 XO1(i+1,1) \
661 XO1(i+2,2) \
662 XO1(i+3,3) \
663 XO2(i,0) \
664 XO2(i+1,1) \
665 XO2(i+2,2) \
666 XO2(i+3,3) \
667 ST(i,0) \
668 ST(i+1,1) \
669 ST(i+2,2) \
670 ST(i+3,3) \
671
672
673 PF0(0)
674 PF0(2)
675
676 " .align 32 ;\n"
677 " 1: ;\n"
678
679 BLOCK(0)
680 BLOCK(4)
681 BLOCK(8)
682 BLOCK(12)
683
684 " addl $256, %1 ;\n"
685 " addl $256, %2 ;\n"
686 " addl $256, %3 ;\n"
687 " decl %0 ;\n"
688 " jnz 1b ;\n"
689 : "+r" (lines),
690 "+r" (p1), "+r"(p2), "+r"(p3)
691 :
692 : "memory" );
693
694 XMMS_RESTORE;
695}
696
697static void
698xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
699 unsigned long *p3, unsigned long *p4)
700{
701 unsigned long lines = bytes >> 8;
702 char xmm_save[16*4] ALIGN16;
703 int cr0;
704
705 XMMS_SAVE;
706
707 __asm__ __volatile__ (
708#undef BLOCK
709#define BLOCK(i) \
710 PF1(i) \
711 PF1(i+2) \
712 LD(i,0) \
713 LD(i+1,1) \
714 LD(i+2,2) \
715 LD(i+3,3) \
716 PF2(i) \
717 PF2(i+2) \
718 XO1(i,0) \
719 XO1(i+1,1) \
720 XO1(i+2,2) \
721 XO1(i+3,3) \
722 PF3(i) \
723 PF3(i+2) \
724 PF0(i+4) \
725 PF0(i+6) \
726 XO2(i,0) \
727 XO2(i+1,1) \
728 XO2(i+2,2) \
729 XO2(i+3,3) \
730 XO3(i,0) \
731 XO3(i+1,1) \
732 XO3(i+2,2) \
733 XO3(i+3,3) \
734 ST(i,0) \
735 ST(i+1,1) \
736 ST(i+2,2) \
737 ST(i+3,3) \
738
739
740 PF0(0)
741 PF0(2)
742
743 " .align 32 ;\n"
744 " 1: ;\n"
745
746 BLOCK(0)
747 BLOCK(4)
748 BLOCK(8)
749 BLOCK(12)
750
751 " addl $256, %1 ;\n"
752 " addl $256, %2 ;\n"
753 " addl $256, %3 ;\n"
754 " addl $256, %4 ;\n"
755 " decl %0 ;\n"
756 " jnz 1b ;\n"
757 : "+r" (lines),
758 "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4)
759 :
760 : "memory" );
761
762 XMMS_RESTORE;
763}
764
765static void
766xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
767 unsigned long *p3, unsigned long *p4, unsigned long *p5)
768{
769 unsigned long lines = bytes >> 8;
770 char xmm_save[16*4] ALIGN16;
771 int cr0;
772
773 XMMS_SAVE;
774
775 /* Make sure GCC forgets anything it knows about p4 or p5,
776 such that it won't pass to the asm volatile below a
777 register that is shared with any other variable. That's
778 because we modify p4 and p5 there, but we can't mark them
779 as read/write, otherwise we'd overflow the 10-asm-operands
780 limit of GCC < 3.1. */
781 __asm__ ("" : "+r" (p4), "+r" (p5));
782
783 __asm__ __volatile__ (
784#undef BLOCK
785#define BLOCK(i) \
786 PF1(i) \
787 PF1(i+2) \
788 LD(i,0) \
789 LD(i+1,1) \
790 LD(i+2,2) \
791 LD(i+3,3) \
792 PF2(i) \
793 PF2(i+2) \
794 XO1(i,0) \
795 XO1(i+1,1) \
796 XO1(i+2,2) \
797 XO1(i+3,3) \
798 PF3(i) \
799 PF3(i+2) \
800 XO2(i,0) \
801 XO2(i+1,1) \
802 XO2(i+2,2) \
803 XO2(i+3,3) \
804 PF4(i) \
805 PF4(i+2) \
806 PF0(i+4) \
807 PF0(i+6) \
808 XO3(i,0) \
809 XO3(i+1,1) \
810 XO3(i+2,2) \
811 XO3(i+3,3) \
812 XO4(i,0) \
813 XO4(i+1,1) \
814 XO4(i+2,2) \
815 XO4(i+3,3) \
816 ST(i,0) \
817 ST(i+1,1) \
818 ST(i+2,2) \
819 ST(i+3,3) \
820
821
822 PF0(0)
823 PF0(2)
824
825 " .align 32 ;\n"
826 " 1: ;\n"
827
828 BLOCK(0)
829 BLOCK(4)
830 BLOCK(8)
831 BLOCK(12)
832
833 " addl $256, %1 ;\n"
834 " addl $256, %2 ;\n"
835 " addl $256, %3 ;\n"
836 " addl $256, %4 ;\n"
837 " addl $256, %5 ;\n"
838 " decl %0 ;\n"
839 " jnz 1b ;\n"
840 : "+r" (lines),
841 "+r" (p1), "+r" (p2), "+r" (p3)
842 : "r" (p4), "r" (p5)
843 : "memory");
844
845 /* p4 and p5 were modified, and now the variables are dead.
846 Clobber them just to be sure nobody does something stupid
847 like assuming they have some legal value. */
848 __asm__ ("" : "=r" (p4), "=r" (p5));
849
850 XMMS_RESTORE;
851}
852
853static struct xor_block_template xor_block_pIII_sse = {
854 .name = "pIII_sse",
855 .do_2 = xor_sse_2,
856 .do_3 = xor_sse_3,
857 .do_4 = xor_sse_4,
858 .do_5 = xor_sse_5,
859};
860
861/* Also try the generic routines. */
862#include <asm-generic/xor.h>
863
864#undef XOR_TRY_TEMPLATES
865#define XOR_TRY_TEMPLATES \
866 do { \
867 xor_speed(&xor_block_8regs); \
868 xor_speed(&xor_block_8regs_p); \
869 xor_speed(&xor_block_32regs); \
870 xor_speed(&xor_block_32regs_p); \
871 if (cpu_has_xmm) \
872 xor_speed(&xor_block_pIII_sse); \
873 if (cpu_has_mmx) { \
874 xor_speed(&xor_block_pII_mmx); \
875 xor_speed(&xor_block_p5_mmx); \
876 } \
877 } while (0)
878
879/* We force the use of the SSE xor block because it can write around L2.
880 We may also be able to load into the L1 only depending on how the cpu
881 deals with a load to a line that is being prefetched. */
882#define XOR_SELECT_TEMPLATE(FASTEST) \
883 (cpu_has_xmm ? &xor_block_pIII_sse : FASTEST)