aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-x86_64
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-x86_64')
-rw-r--r--include/asm-x86_64/8253pit.h10
-rw-r--r--include/asm-x86_64/Kbuild21
-rw-r--r--include/asm-x86_64/a.out.h28
-rw-r--r--include/asm-x86_64/acpi.h153
-rw-r--r--include/asm-x86_64/agp.h34
-rw-r--r--include/asm-x86_64/alternative-asm.i12
-rw-r--r--include/asm-x86_64/alternative.h159
-rw-r--r--include/asm-x86_64/apic.h107
-rw-r--r--include/asm-x86_64/apicdef.h392
-rw-r--r--include/asm-x86_64/atomic.h466
-rw-r--r--include/asm-x86_64/auxvec.h6
-rw-r--r--include/asm-x86_64/bitops.h427
-rw-r--r--include/asm-x86_64/boot.h1
-rw-r--r--include/asm-x86_64/bootparam.h1
-rw-r--r--include/asm-x86_64/bootsetup.h40
-rw-r--r--include/asm-x86_64/bug.h34
-rw-r--r--include/asm-x86_64/bugs.h6
-rw-r--r--include/asm-x86_64/byteorder.h33
-rw-r--r--include/asm-x86_64/cache.h26
-rw-r--r--include/asm-x86_64/cacheflush.h35
-rw-r--r--include/asm-x86_64/calgary.h72
-rw-r--r--include/asm-x86_64/calling.h162
-rw-r--r--include/asm-x86_64/checksum.h195
-rw-r--r--include/asm-x86_64/cmpxchg.h134
-rw-r--r--include/asm-x86_64/compat.h212
-rw-r--r--include/asm-x86_64/cpu.h1
-rw-r--r--include/asm-x86_64/cpufeature.h30
-rw-r--r--include/asm-x86_64/cputime.h6
-rw-r--r--include/asm-x86_64/current.h27
-rw-r--r--include/asm-x86_64/debugreg.h65
-rw-r--r--include/asm-x86_64/delay.h30
-rw-r--r--include/asm-x86_64/desc.h174
-rw-r--r--include/asm-x86_64/desc_defs.h69
-rw-r--r--include/asm-x86_64/device.h15
-rw-r--r--include/asm-x86_64/div64.h1
-rw-r--r--include/asm-x86_64/dma-mapping.h203
-rw-r--r--include/asm-x86_64/dma.h304
-rw-r--r--include/asm-x86_64/dmi.h24
-rw-r--r--include/asm-x86_64/dwarf2.h57
-rw-r--r--include/asm-x86_64/e820.h61
-rw-r--r--include/asm-x86_64/edac.h18
-rw-r--r--include/asm-x86_64/elf.h180
-rw-r--r--include/asm-x86_64/emergency-restart.h6
-rw-r--r--include/asm-x86_64/errno.h6
-rw-r--r--include/asm-x86_64/fb.h19
-rw-r--r--include/asm-x86_64/fcntl.h1
-rw-r--r--include/asm-x86_64/fixmap.h92
-rw-r--r--include/asm-x86_64/floppy.h283
-rw-r--r--include/asm-x86_64/fpu32.h10
-rw-r--r--include/asm-x86_64/futex.h125
-rw-r--r--include/asm-x86_64/genapic.h37
-rw-r--r--include/asm-x86_64/hardirq.h23
-rw-r--r--include/asm-x86_64/hpet.h18
-rw-r--r--include/asm-x86_64/hw_irq.h175
-rw-r--r--include/asm-x86_64/hypertransport.h1
-rw-r--r--include/asm-x86_64/i387.h209
-rw-r--r--include/asm-x86_64/i8253.h6
-rw-r--r--include/asm-x86_64/ia32.h178
-rw-r--r--include/asm-x86_64/ia32_unistd.h18
-rw-r--r--include/asm-x86_64/ide.h1
-rw-r--r--include/asm-x86_64/idle.h14
-rw-r--r--include/asm-x86_64/intel_arch_perfmon.h31
-rw-r--r--include/asm-x86_64/io.h276
-rw-r--r--include/asm-x86_64/io_apic.h136
-rw-r--r--include/asm-x86_64/ioctl.h1
-rw-r--r--include/asm-x86_64/ioctls.h86
-rw-r--r--include/asm-x86_64/iommu.h29
-rw-r--r--include/asm-x86_64/ipcbuf.h29
-rw-r--r--include/asm-x86_64/ipi.h128
-rw-r--r--include/asm-x86_64/irq.h51
-rw-r--r--include/asm-x86_64/irq_regs.h1
-rw-r--r--include/asm-x86_64/irqflags.h142
-rw-r--r--include/asm-x86_64/ist.h1
-rw-r--r--include/asm-x86_64/k8.h14
-rw-r--r--include/asm-x86_64/kdebug.h36
-rw-r--r--include/asm-x86_64/kexec.h94
-rw-r--r--include/asm-x86_64/kmap_types.h19
-rw-r--r--include/asm-x86_64/kprobes.h90
-rw-r--r--include/asm-x86_64/ldt.h36
-rw-r--r--include/asm-x86_64/linkage.h6
-rw-r--r--include/asm-x86_64/local.h222
-rw-r--r--include/asm-x86_64/mach_apic.h29
-rw-r--r--include/asm-x86_64/mc146818rtc.h29
-rw-r--r--include/asm-x86_64/mce.h115
-rw-r--r--include/asm-x86_64/mman.h19
-rw-r--r--include/asm-x86_64/mmsegment.h8
-rw-r--r--include/asm-x86_64/mmu.h21
-rw-r--r--include/asm-x86_64/mmu_context.h74
-rw-r--r--include/asm-x86_64/mmzone.h56
-rw-r--r--include/asm-x86_64/module.h10
-rw-r--r--include/asm-x86_64/mpspec.h233
-rw-r--r--include/asm-x86_64/msgbuf.h27
-rw-r--r--include/asm-x86_64/msidef.h1
-rw-r--r--include/asm-x86_64/msr-index.h1
-rw-r--r--include/asm-x86_64/msr.h187
-rw-r--r--include/asm-x86_64/mtrr.h152
-rw-r--r--include/asm-x86_64/mutex.h105
-rw-r--r--include/asm-x86_64/namei.h11
-rw-r--r--include/asm-x86_64/nmi.h95
-rw-r--r--include/asm-x86_64/numa.h38
-rw-r--r--include/asm-x86_64/page.h143
-rw-r--r--include/asm-x86_64/param.h22
-rw-r--r--include/asm-x86_64/parport.h18
-rw-r--r--include/asm-x86_64/pci-direct.h17
-rw-r--r--include/asm-x86_64/pci.h126
-rw-r--r--include/asm-x86_64/pda.h125
-rw-r--r--include/asm-x86_64/percpu.h68
-rw-r--r--include/asm-x86_64/pgalloc.h119
-rw-r--r--include/asm-x86_64/pgtable.h432
-rw-r--r--include/asm-x86_64/poll.h1
-rw-r--r--include/asm-x86_64/posix_types.h119
-rw-r--r--include/asm-x86_64/prctl.h10
-rw-r--r--include/asm-x86_64/processor-flags.h1
-rw-r--r--include/asm-x86_64/processor.h439
-rw-r--r--include/asm-x86_64/proto.h104
-rw-r--r--include/asm-x86_64/ptrace-abi.h51
-rw-r--r--include/asm-x86_64/ptrace.h78
-rw-r--r--include/asm-x86_64/required-features.h46
-rw-r--r--include/asm-x86_64/resource.h6
-rw-r--r--include/asm-x86_64/resume-trace.h13
-rw-r--r--include/asm-x86_64/rio.h74
-rw-r--r--include/asm-x86_64/rtc.h10
-rw-r--r--include/asm-x86_64/rwlock.h26
-rw-r--r--include/asm-x86_64/scatterlist.h24
-rw-r--r--include/asm-x86_64/seccomp.h24
-rw-r--r--include/asm-x86_64/sections.h7
-rw-r--r--include/asm-x86_64/segment.h53
-rw-r--r--include/asm-x86_64/semaphore.h181
-rw-r--r--include/asm-x86_64/sembuf.h25
-rw-r--r--include/asm-x86_64/serial.h29
-rw-r--r--include/asm-x86_64/setup.h6
-rw-r--r--include/asm-x86_64/shmbuf.h38
-rw-r--r--include/asm-x86_64/shmparam.h6
-rw-r--r--include/asm-x86_64/sigcontext.h55
-rw-r--r--include/asm-x86_64/sigcontext32.h71
-rw-r--r--include/asm-x86_64/siginfo.h8
-rw-r--r--include/asm-x86_64/signal.h181
-rw-r--r--include/asm-x86_64/smp.h117
-rw-r--r--include/asm-x86_64/socket.h55
-rw-r--r--include/asm-x86_64/sockios.h13
-rw-r--r--include/asm-x86_64/sparsemem.h26
-rw-r--r--include/asm-x86_64/spinlock.h167
-rw-r--r--include/asm-x86_64/spinlock_types.h20
-rw-r--r--include/asm-x86_64/stacktrace.h20
-rw-r--r--include/asm-x86_64/stat.h44
-rw-r--r--include/asm-x86_64/statfs.h58
-rw-r--r--include/asm-x86_64/string.h60
-rw-r--r--include/asm-x86_64/suspend.h55
-rw-r--r--include/asm-x86_64/swiotlb.h56
-rw-r--r--include/asm-x86_64/system.h180
-rw-r--r--include/asm-x86_64/tce.h48
-rw-r--r--include/asm-x86_64/termbits.h198
-rw-r--r--include/asm-x86_64/termios.h90
-rw-r--r--include/asm-x86_64/therm_throt.h1
-rw-r--r--include/asm-x86_64/thread_info.h169
-rw-r--r--include/asm-x86_64/timex.h31
-rw-r--r--include/asm-x86_64/tlb.h13
-rw-r--r--include/asm-x86_64/tlbflush.h109
-rw-r--r--include/asm-x86_64/topology.h71
-rw-r--r--include/asm-x86_64/tsc.h1
-rw-r--r--include/asm-x86_64/types.h55
-rw-r--r--include/asm-x86_64/uaccess.h384
-rw-r--r--include/asm-x86_64/ucontext.h12
-rw-r--r--include/asm-x86_64/unaligned.h37
-rw-r--r--include/asm-x86_64/unistd.h687
-rw-r--r--include/asm-x86_64/unwind.h12
-rw-r--r--include/asm-x86_64/user.h114
-rw-r--r--include/asm-x86_64/user32.h69
-rw-r--r--include/asm-x86_64/vga.h20
-rw-r--r--include/asm-x86_64/vgtod.h29
-rw-r--r--include/asm-x86_64/vsyscall.h44
-rw-r--r--include/asm-x86_64/vsyscall32.h20
-rw-r--r--include/asm-x86_64/xor.h354
173 files changed, 0 insertions, 14087 deletions
diff --git a/include/asm-x86_64/8253pit.h b/include/asm-x86_64/8253pit.h
deleted file mode 100644
index 285f78488ccb..000000000000
--- a/include/asm-x86_64/8253pit.h
+++ /dev/null
@@ -1,10 +0,0 @@
1/*
2 * 8253/8254 Programmable Interval Timer
3 */
4
5#ifndef _8253PIT_H
6#define _8253PIT_H
7
8#define PIT_TICK_RATE 1193182UL
9
10#endif
diff --git a/include/asm-x86_64/Kbuild b/include/asm-x86_64/Kbuild
deleted file mode 100644
index 75a2deffca68..000000000000
--- a/include/asm-x86_64/Kbuild
+++ /dev/null
@@ -1,21 +0,0 @@
1include include/asm-generic/Kbuild.asm
2
3ALTARCH := i386
4ARCHDEF := defined __x86_64__
5ALTARCHDEF := defined __i386__
6
7header-y += boot.h
8header-y += bootsetup.h
9header-y += debugreg.h
10header-y += ldt.h
11header-y += msr-index.h
12header-y += prctl.h
13header-y += ptrace-abi.h
14header-y += sigcontext32.h
15header-y += ucontext.h
16header-y += vsyscall32.h
17
18unifdef-y += mce.h
19unifdef-y += msr.h
20unifdef-y += mtrr.h
21unifdef-y += vsyscall.h
diff --git a/include/asm-x86_64/a.out.h b/include/asm-x86_64/a.out.h
deleted file mode 100644
index e789300e41a5..000000000000
--- a/include/asm-x86_64/a.out.h
+++ /dev/null
@@ -1,28 +0,0 @@
1#ifndef __X8664_A_OUT_H__
2#define __X8664_A_OUT_H__
3
4/* 32bit a.out */
5
6struct exec
7{
8 unsigned int a_info; /* Use macros N_MAGIC, etc for access */
9 unsigned a_text; /* length of text, in bytes */
10 unsigned a_data; /* length of data, in bytes */
11 unsigned a_bss; /* length of uninitialized data area for file, in bytes */
12 unsigned a_syms; /* length of symbol table data in file, in bytes */
13 unsigned a_entry; /* start address */
14 unsigned a_trsize; /* length of relocation info for text, in bytes */
15 unsigned a_drsize; /* length of relocation info for data, in bytes */
16};
17
18#define N_TRSIZE(a) ((a).a_trsize)
19#define N_DRSIZE(a) ((a).a_drsize)
20#define N_SYMSIZE(a) ((a).a_syms)
21
22#ifdef __KERNEL__
23#include <linux/thread_info.h>
24#define STACK_TOP TASK_SIZE
25#define STACK_TOP_MAX TASK_SIZE64
26#endif
27
28#endif /* __A_OUT_GNU_H__ */
diff --git a/include/asm-x86_64/acpi.h b/include/asm-x86_64/acpi.h
deleted file mode 100644
index 98173357dd89..000000000000
--- a/include/asm-x86_64/acpi.h
+++ /dev/null
@@ -1,153 +0,0 @@
1/*
2 * asm-x86_64/acpi.h
3 *
4 * Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
5 * Copyright (C) 2001 Patrick Mochel <mochel@osdl.org>
6 *
7 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 *
23 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
24 */
25
26#ifndef _ASM_ACPI_H
27#define _ASM_ACPI_H
28
29#ifdef __KERNEL__
30
31#include <acpi/pdc_intel.h>
32#include <asm/numa.h>
33
34#define COMPILER_DEPENDENT_INT64 long long
35#define COMPILER_DEPENDENT_UINT64 unsigned long long
36
37/*
38 * Calling conventions:
39 *
40 * ACPI_SYSTEM_XFACE - Interfaces to host OS (handlers, threads)
41 * ACPI_EXTERNAL_XFACE - External ACPI interfaces
42 * ACPI_INTERNAL_XFACE - Internal ACPI interfaces
43 * ACPI_INTERNAL_VAR_XFACE - Internal variable-parameter list interfaces
44 */
45#define ACPI_SYSTEM_XFACE
46#define ACPI_EXTERNAL_XFACE
47#define ACPI_INTERNAL_XFACE
48#define ACPI_INTERNAL_VAR_XFACE
49
50/* Asm macros */
51
52#define ACPI_ASM_MACROS
53#define BREAKPOINT3
54#define ACPI_DISABLE_IRQS() local_irq_disable()
55#define ACPI_ENABLE_IRQS() local_irq_enable()
56#define ACPI_FLUSH_CPU_CACHE() wbinvd()
57
58int __acpi_acquire_global_lock(unsigned int *lock);
59int __acpi_release_global_lock(unsigned int *lock);
60
61#define ACPI_ACQUIRE_GLOBAL_LOCK(facs, Acq) \
62 ((Acq) = __acpi_acquire_global_lock(&facs->global_lock))
63
64#define ACPI_RELEASE_GLOBAL_LOCK(facs, Acq) \
65 ((Acq) = __acpi_release_global_lock(&facs->global_lock))
66
67/*
68 * Math helper asm macros
69 */
70#define ACPI_DIV_64_BY_32(n_hi, n_lo, d32, q32, r32) \
71 asm("divl %2;" \
72 :"=a"(q32), "=d"(r32) \
73 :"r"(d32), \
74 "0"(n_lo), "1"(n_hi))
75
76
77#define ACPI_SHIFT_RIGHT_64(n_hi, n_lo) \
78 asm("shrl $1,%2;" \
79 "rcrl $1,%3;" \
80 :"=r"(n_hi), "=r"(n_lo) \
81 :"0"(n_hi), "1"(n_lo))
82
83#ifdef CONFIG_ACPI
84extern int acpi_lapic;
85extern int acpi_ioapic;
86extern int acpi_noirq;
87extern int acpi_strict;
88extern int acpi_disabled;
89extern int acpi_pci_disabled;
90extern int acpi_ht;
91static inline void disable_acpi(void)
92{
93 acpi_disabled = 1;
94 acpi_ht = 0;
95 acpi_pci_disabled = 1;
96 acpi_noirq = 1;
97}
98
99/* Fixmap pages to reserve for ACPI boot-time tables (see fixmap.h) */
100#define FIX_ACPI_PAGES 4
101
102extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq);
103static inline void acpi_noirq_set(void) { acpi_noirq = 1; }
104static inline void acpi_disable_pci(void)
105{
106 acpi_pci_disabled = 1;
107 acpi_noirq_set();
108}
109extern int acpi_irq_balance_set(char *str);
110
111/* routines for saving/restoring kernel state */
112extern int acpi_save_state_mem(void);
113extern void acpi_restore_state_mem(void);
114
115extern unsigned long acpi_wakeup_address;
116
117/* early initialization routine */
118extern void acpi_reserve_bootmem(void);
119
120#else /* !CONFIG_ACPI */
121
122#define acpi_lapic 0
123#define acpi_ioapic 0
124static inline void acpi_noirq_set(void) { }
125static inline void acpi_disable_pci(void) { }
126
127#endif /* !CONFIG_ACPI */
128
129extern int acpi_numa;
130extern int acpi_scan_nodes(unsigned long start, unsigned long end);
131#define NR_NODE_MEMBLKS (MAX_NUMNODES*2)
132
133extern int acpi_disabled;
134extern int acpi_pci_disabled;
135
136#define ARCH_HAS_POWER_INIT 1
137
138extern int acpi_skip_timer_override;
139extern int acpi_use_timer_override;
140
141#ifdef CONFIG_ACPI_NUMA
142extern void __init acpi_fake_nodes(const struct bootnode *fake_nodes,
143 int num_nodes);
144#else
145static inline void acpi_fake_nodes(const struct bootnode *fake_nodes,
146 int num_nodes)
147{
148}
149#endif
150
151#endif /*__KERNEL__*/
152
153#endif /*_ASM_ACPI_H*/
diff --git a/include/asm-x86_64/agp.h b/include/asm-x86_64/agp.h
deleted file mode 100644
index de338666f3f9..000000000000
--- a/include/asm-x86_64/agp.h
+++ /dev/null
@@ -1,34 +0,0 @@
1#ifndef AGP_H
2#define AGP_H 1
3
4#include <asm/cacheflush.h>
5
6/*
7 * Functions to keep the agpgart mappings coherent.
8 * The GART gives the CPU a physical alias of memory. The alias is
9 * mapped uncacheable. Make sure there are no conflicting mappings
10 * with different cachability attributes for the same page.
11 */
12
13/* Caller's responsibility to call global_flush_tlb() for
14 * performance reasons */
15#define map_page_into_agp(page) change_page_attr(page, 1, PAGE_KERNEL_NOCACHE)
16#define unmap_page_from_agp(page) change_page_attr(page, 1, PAGE_KERNEL)
17#define flush_agp_mappings() global_flush_tlb()
18
19/* Could use CLFLUSH here if the cpu supports it. But then it would
20 need to be called for each cacheline of the whole page so it may not be
21 worth it. Would need a page for it. */
22#define flush_agp_cache() asm volatile("wbinvd":::"memory")
23
24/* Convert a physical address to an address suitable for the GART. */
25#define phys_to_gart(x) (x)
26#define gart_to_phys(x) (x)
27
28/* GATT allocation. Returns/accepts GATT kernel virtual address. */
29#define alloc_gatt_pages(order) \
30 ((char *)__get_free_pages(GFP_KERNEL, (order)))
31#define free_gatt_pages(table, order) \
32 free_pages((unsigned long)(table), (order))
33
34#endif
diff --git a/include/asm-x86_64/alternative-asm.i b/include/asm-x86_64/alternative-asm.i
deleted file mode 100644
index 0b3f1a2bb2cb..000000000000
--- a/include/asm-x86_64/alternative-asm.i
+++ /dev/null
@@ -1,12 +0,0 @@
1#ifdef CONFIG_SMP
2 .macro LOCK_PREFIX
31: lock
4 .section .smp_locks,"a"
5 .align 8
6 .quad 1b
7 .previous
8 .endm
9#else
10 .macro LOCK_PREFIX
11 .endm
12#endif
diff --git a/include/asm-x86_64/alternative.h b/include/asm-x86_64/alternative.h
deleted file mode 100644
index ab161e810151..000000000000
--- a/include/asm-x86_64/alternative.h
+++ /dev/null
@@ -1,159 +0,0 @@
1#ifndef _X86_64_ALTERNATIVE_H
2#define _X86_64_ALTERNATIVE_H
3
4#ifdef __KERNEL__
5
6#include <linux/types.h>
7#include <linux/stddef.h>
8
9/*
10 * Alternative inline assembly for SMP.
11 *
12 * The LOCK_PREFIX macro defined here replaces the LOCK and
13 * LOCK_PREFIX macros used everywhere in the source tree.
14 *
15 * SMP alternatives use the same data structures as the other
16 * alternatives and the X86_FEATURE_UP flag to indicate the case of a
17 * UP system running a SMP kernel. The existing apply_alternatives()
18 * works fine for patching a SMP kernel for UP.
19 *
20 * The SMP alternative tables can be kept after boot and contain both
21 * UP and SMP versions of the instructions to allow switching back to
22 * SMP at runtime, when hotplugging in a new CPU, which is especially
23 * useful in virtualized environments.
24 *
25 * The very common lock prefix is handled as special case in a
26 * separate table which is a pure address list without replacement ptr
27 * and size information. That keeps the table sizes small.
28 */
29
30#ifdef CONFIG_SMP
31#define LOCK_PREFIX \
32 ".section .smp_locks,\"a\"\n" \
33 " .align 8\n" \
34 " .quad 661f\n" /* address */ \
35 ".previous\n" \
36 "661:\n\tlock; "
37
38#else /* ! CONFIG_SMP */
39#define LOCK_PREFIX ""
40#endif
41
42/* This must be included *after* the definition of LOCK_PREFIX */
43#include <asm/cpufeature.h>
44
45struct alt_instr {
46 u8 *instr; /* original instruction */
47 u8 *replacement;
48 u8 cpuid; /* cpuid bit set for replacement */
49 u8 instrlen; /* length of original instruction */
50 u8 replacementlen; /* length of new instruction, <= instrlen */
51 u8 pad[5];
52};
53
54extern void alternative_instructions(void);
55extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
56
57struct module;
58
59#ifdef CONFIG_SMP
60extern void alternatives_smp_module_add(struct module *mod, char *name,
61 void *locks, void *locks_end,
62 void *text, void *text_end);
63extern void alternatives_smp_module_del(struct module *mod);
64extern void alternatives_smp_switch(int smp);
65#else
66static inline void alternatives_smp_module_add(struct module *mod, char *name,
67 void *locks, void *locks_end,
68 void *text, void *text_end) {}
69static inline void alternatives_smp_module_del(struct module *mod) {}
70static inline void alternatives_smp_switch(int smp) {}
71#endif
72
73#endif
74
75/*
76 * Alternative instructions for different CPU types or capabilities.
77 *
78 * This allows to use optimized instructions even on generic binary
79 * kernels.
80 *
81 * length of oldinstr must be longer or equal the length of newinstr
82 * It can be padded with nops as needed.
83 *
84 * For non barrier like inlines please define new variants
85 * without volatile and memory clobber.
86 */
87#define alternative(oldinstr, newinstr, feature) \
88 asm volatile ("661:\n\t" oldinstr "\n662:\n" \
89 ".section .altinstructions,\"a\"\n" \
90 " .align 8\n" \
91 " .quad 661b\n" /* label */ \
92 " .quad 663f\n" /* new instruction */ \
93 " .byte %c0\n" /* feature bit */ \
94 " .byte 662b-661b\n" /* sourcelen */ \
95 " .byte 664f-663f\n" /* replacementlen */ \
96 ".previous\n" \
97 ".section .altinstr_replacement,\"ax\"\n" \
98 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
99 ".previous" :: "i" (feature) : "memory")
100
101/*
102 * Alternative inline assembly with input.
103 *
104 * Pecularities:
105 * No memory clobber here.
106 * Argument numbers start with 1.
107 * Best is to use constraints that are fixed size (like (%1) ... "r")
108 * If you use variable sized constraints like "m" or "g" in the
109 * replacement make sure to pad to the worst case length.
110 */
111#define alternative_input(oldinstr, newinstr, feature, input...) \
112 asm volatile ("661:\n\t" oldinstr "\n662:\n" \
113 ".section .altinstructions,\"a\"\n" \
114 " .align 8\n" \
115 " .quad 661b\n" /* label */ \
116 " .quad 663f\n" /* new instruction */ \
117 " .byte %c0\n" /* feature bit */ \
118 " .byte 662b-661b\n" /* sourcelen */ \
119 " .byte 664f-663f\n" /* replacementlen */ \
120 ".previous\n" \
121 ".section .altinstr_replacement,\"ax\"\n" \
122 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
123 ".previous" :: "i" (feature), ##input)
124
125/* Like alternative_input, but with a single output argument */
126#define alternative_io(oldinstr, newinstr, feature, output, input...) \
127 asm volatile ("661:\n\t" oldinstr "\n662:\n" \
128 ".section .altinstructions,\"a\"\n" \
129 " .align 8\n" \
130 " .quad 661b\n" /* label */ \
131 " .quad 663f\n" /* new instruction */ \
132 " .byte %c[feat]\n" /* feature bit */ \
133 " .byte 662b-661b\n" /* sourcelen */ \
134 " .byte 664f-663f\n" /* replacementlen */ \
135 ".previous\n" \
136 ".section .altinstr_replacement,\"ax\"\n" \
137 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
138 ".previous" : output : [feat] "i" (feature), ##input)
139
140/*
141 * use this macro(s) if you need more than one output parameter
142 * in alternative_io
143 */
144#define ASM_OUTPUT2(a, b) a, b
145
146struct paravirt_patch;
147#ifdef CONFIG_PARAVIRT
148void apply_paravirt(struct paravirt_patch *start, struct paravirt_patch *end);
149#else
150static inline void
151apply_paravirt(struct paravirt_patch *start, struct paravirt_patch *end)
152{}
153#define __parainstructions NULL
154#define __parainstructions_end NULL
155#endif
156
157extern void text_poke(void *addr, unsigned char *opcode, int len);
158
159#endif /* _X86_64_ALTERNATIVE_H */
diff --git a/include/asm-x86_64/apic.h b/include/asm-x86_64/apic.h
deleted file mode 100644
index 85125ef3c414..000000000000
--- a/include/asm-x86_64/apic.h
+++ /dev/null
@@ -1,107 +0,0 @@
1#ifndef __ASM_APIC_H
2#define __ASM_APIC_H
3
4#include <linux/pm.h>
5#include <linux/delay.h>
6#include <asm/fixmap.h>
7#include <asm/apicdef.h>
8#include <asm/system.h>
9
10#define Dprintk(x...)
11
12/*
13 * Debugging macros
14 */
15#define APIC_QUIET 0
16#define APIC_VERBOSE 1
17#define APIC_DEBUG 2
18
19extern int apic_verbosity;
20extern int apic_runs_main_timer;
21extern int ioapic_force;
22extern int apic_mapped;
23
24/*
25 * Define the default level of output to be very little
26 * This can be turned up by using apic=verbose for more
27 * information and apic=debug for _lots_ of information.
28 * apic_verbosity is defined in apic.c
29 */
30#define apic_printk(v, s, a...) do { \
31 if ((v) <= apic_verbosity) \
32 printk(s, ##a); \
33 } while (0)
34
35struct pt_regs;
36
37/*
38 * Basic functions accessing APICs.
39 */
40
41static __inline void apic_write(unsigned long reg, unsigned int v)
42{
43 *((volatile unsigned int *)(APIC_BASE+reg)) = v;
44}
45
46static __inline unsigned int apic_read(unsigned long reg)
47{
48 return *((volatile unsigned int *)(APIC_BASE+reg));
49}
50
51extern void apic_wait_icr_idle(void);
52extern unsigned int safe_apic_wait_icr_idle(void);
53
54static inline void ack_APIC_irq(void)
55{
56 /*
57 * ack_APIC_irq() actually gets compiled as a single instruction:
58 * - a single rmw on Pentium/82489DX
59 * - a single write on P6+ cores (CONFIG_X86_GOOD_APIC)
60 * ... yummie.
61 */
62
63 /* Docs say use 0 for future compatibility */
64 apic_write(APIC_EOI, 0);
65}
66
67extern int get_maxlvt (void);
68extern void clear_local_APIC (void);
69extern void connect_bsp_APIC (void);
70extern void disconnect_bsp_APIC (int virt_wire_setup);
71extern void disable_local_APIC (void);
72extern int verify_local_APIC (void);
73extern void cache_APIC_registers (void);
74extern void sync_Arb_IDs (void);
75extern void init_bsp_APIC (void);
76extern void setup_local_APIC (void);
77extern void init_apic_mappings (void);
78extern void smp_local_timer_interrupt (void);
79extern void setup_boot_APIC_clock (void);
80extern void setup_secondary_APIC_clock (void);
81extern int APIC_init_uniprocessor (void);
82extern void disable_APIC_timer(void);
83extern void enable_APIC_timer(void);
84extern void setup_apic_routing(void);
85
86extern void setup_APIC_extended_lvt(unsigned char lvt_off, unsigned char vector,
87 unsigned char msg_type, unsigned char mask);
88
89extern int apic_is_clustered_box(void);
90
91#define K8_APIC_EXT_LVT_BASE 0x500
92#define K8_APIC_EXT_INT_MSG_FIX 0x0
93#define K8_APIC_EXT_INT_MSG_SMI 0x2
94#define K8_APIC_EXT_INT_MSG_NMI 0x4
95#define K8_APIC_EXT_INT_MSG_EXT 0x7
96#define K8_APIC_EXT_LVT_ENTRY_THRESHOLD 0
97
98void smp_send_timer_broadcast_ipi(void);
99void switch_APIC_timer_to_ipi(void *cpumask);
100void switch_ipi_to_APIC_timer(void *cpumask);
101
102#define ARCH_APICTIMER_STOPS_ON_C3 1
103
104extern unsigned boot_cpu_id;
105extern int local_apic_timer_c2_ok;
106
107#endif /* __ASM_APIC_H */
diff --git a/include/asm-x86_64/apicdef.h b/include/asm-x86_64/apicdef.h
deleted file mode 100644
index 1dd40067c67c..000000000000
--- a/include/asm-x86_64/apicdef.h
+++ /dev/null
@@ -1,392 +0,0 @@
1#ifndef __ASM_APICDEF_H
2#define __ASM_APICDEF_H
3
4/*
5 * Constants for various Intel APICs. (local APIC, IOAPIC, etc.)
6 *
7 * Alan Cox <Alan.Cox@linux.org>, 1995.
8 * Ingo Molnar <mingo@redhat.com>, 1999, 2000
9 */
10
11#define APIC_DEFAULT_PHYS_BASE 0xfee00000
12
13#define APIC_ID 0x20
14#define APIC_ID_MASK (0xFFu<<24)
15#define GET_APIC_ID(x) (((x)>>24)&0xFFu)
16#define SET_APIC_ID(x) (((x)<<24))
17#define APIC_LVR 0x30
18#define APIC_LVR_MASK 0xFF00FF
19#define GET_APIC_VERSION(x) ((x)&0xFFu)
20#define GET_APIC_MAXLVT(x) (((x)>>16)&0xFFu)
21#define APIC_INTEGRATED(x) ((x)&0xF0u)
22#define APIC_TASKPRI 0x80
23#define APIC_TPRI_MASK 0xFFu
24#define APIC_ARBPRI 0x90
25#define APIC_ARBPRI_MASK 0xFFu
26#define APIC_PROCPRI 0xA0
27#define APIC_EOI 0xB0
28#define APIC_EIO_ACK 0x0 /* Write this to the EOI register */
29#define APIC_RRR 0xC0
30#define APIC_LDR 0xD0
31#define APIC_LDR_MASK (0xFFu<<24)
32#define GET_APIC_LOGICAL_ID(x) (((x)>>24)&0xFFu)
33#define SET_APIC_LOGICAL_ID(x) (((x)<<24))
34#define APIC_ALL_CPUS 0xFFu
35#define APIC_DFR 0xE0
36#define APIC_DFR_CLUSTER 0x0FFFFFFFul
37#define APIC_DFR_FLAT 0xFFFFFFFFul
38#define APIC_SPIV 0xF0
39#define APIC_SPIV_FOCUS_DISABLED (1<<9)
40#define APIC_SPIV_APIC_ENABLED (1<<8)
41#define APIC_ISR 0x100
42#define APIC_ISR_NR 0x8 /* Number of 32 bit ISR registers. */
43#define APIC_TMR 0x180
44#define APIC_IRR 0x200
45#define APIC_ESR 0x280
46#define APIC_ESR_SEND_CS 0x00001
47#define APIC_ESR_RECV_CS 0x00002
48#define APIC_ESR_SEND_ACC 0x00004
49#define APIC_ESR_RECV_ACC 0x00008
50#define APIC_ESR_SENDILL 0x00020
51#define APIC_ESR_RECVILL 0x00040
52#define APIC_ESR_ILLREGA 0x00080
53#define APIC_ICR 0x300
54#define APIC_DEST_SELF 0x40000
55#define APIC_DEST_ALLINC 0x80000
56#define APIC_DEST_ALLBUT 0xC0000
57#define APIC_ICR_RR_MASK 0x30000
58#define APIC_ICR_RR_INVALID 0x00000
59#define APIC_ICR_RR_INPROG 0x10000
60#define APIC_ICR_RR_VALID 0x20000
61#define APIC_INT_LEVELTRIG 0x08000
62#define APIC_INT_ASSERT 0x04000
63#define APIC_ICR_BUSY 0x01000
64#define APIC_DEST_LOGICAL 0x00800
65#define APIC_DEST_PHYSICAL 0x00000
66#define APIC_DM_FIXED 0x00000
67#define APIC_DM_LOWEST 0x00100
68#define APIC_DM_SMI 0x00200
69#define APIC_DM_REMRD 0x00300
70#define APIC_DM_NMI 0x00400
71#define APIC_DM_INIT 0x00500
72#define APIC_DM_STARTUP 0x00600
73#define APIC_DM_EXTINT 0x00700
74#define APIC_VECTOR_MASK 0x000FF
75#define APIC_ICR2 0x310
76#define GET_APIC_DEST_FIELD(x) (((x)>>24)&0xFF)
77#define SET_APIC_DEST_FIELD(x) ((x)<<24)
78#define APIC_LVTT 0x320
79#define APIC_LVTTHMR 0x330
80#define APIC_LVTPC 0x340
81#define APIC_LVT0 0x350
82#define APIC_LVT_TIMER_BASE_MASK (0x3<<18)
83#define GET_APIC_TIMER_BASE(x) (((x)>>18)&0x3)
84#define SET_APIC_TIMER_BASE(x) (((x)<<18))
85#define APIC_TIMER_BASE_CLKIN 0x0
86#define APIC_TIMER_BASE_TMBASE 0x1
87#define APIC_TIMER_BASE_DIV 0x2
88#define APIC_LVT_TIMER_PERIODIC (1<<17)
89#define APIC_LVT_MASKED (1<<16)
90#define APIC_LVT_LEVEL_TRIGGER (1<<15)
91#define APIC_LVT_REMOTE_IRR (1<<14)
92#define APIC_INPUT_POLARITY (1<<13)
93#define APIC_SEND_PENDING (1<<12)
94#define APIC_MODE_MASK 0x700
95#define GET_APIC_DELIVERY_MODE(x) (((x)>>8)&0x7)
96#define SET_APIC_DELIVERY_MODE(x,y) (((x)&~0x700)|((y)<<8))
97#define APIC_MODE_FIXED 0x0
98#define APIC_MODE_NMI 0x4
99#define APIC_MODE_EXTINT 0x7
100#define APIC_LVT1 0x360
101#define APIC_LVTERR 0x370
102#define APIC_TMICT 0x380
103#define APIC_TMCCT 0x390
104#define APIC_TDCR 0x3E0
105#define APIC_TDR_DIV_TMBASE (1<<2)
106#define APIC_TDR_DIV_1 0xB
107#define APIC_TDR_DIV_2 0x0
108#define APIC_TDR_DIV_4 0x1
109#define APIC_TDR_DIV_8 0x2
110#define APIC_TDR_DIV_16 0x3
111#define APIC_TDR_DIV_32 0x8
112#define APIC_TDR_DIV_64 0x9
113#define APIC_TDR_DIV_128 0xA
114
115#define APIC_BASE (fix_to_virt(FIX_APIC_BASE))
116
117#define MAX_IO_APICS 128
118#define MAX_LOCAL_APIC 256
119
120/*
121 * All x86-64 systems are xAPIC compatible.
122 * In the following, "apicid" is a physical APIC ID.
123 */
124#define XAPIC_DEST_CPUS_SHIFT 4
125#define XAPIC_DEST_CPUS_MASK ((1u << XAPIC_DEST_CPUS_SHIFT) - 1)
126#define XAPIC_DEST_CLUSTER_MASK (XAPIC_DEST_CPUS_MASK << XAPIC_DEST_CPUS_SHIFT)
127#define APIC_CLUSTER(apicid) ((apicid) & XAPIC_DEST_CLUSTER_MASK)
128#define APIC_CLUSTERID(apicid) (APIC_CLUSTER(apicid) >> XAPIC_DEST_CPUS_SHIFT)
129#define APIC_CPUID(apicid) ((apicid) & XAPIC_DEST_CPUS_MASK)
130#define NUM_APIC_CLUSTERS ((BAD_APICID + 1) >> XAPIC_DEST_CPUS_SHIFT)
131
132/*
133 * the local APIC register structure, memory mapped. Not terribly well
134 * tested, but we might eventually use this one in the future - the
135 * problem why we cannot use it right now is the P5 APIC, it has an
136 * errata which cannot take 8-bit reads and writes, only 32-bit ones ...
137 */
138#define u32 unsigned int
139
140struct local_apic {
141
142/*000*/ struct { u32 __reserved[4]; } __reserved_01;
143
144/*010*/ struct { u32 __reserved[4]; } __reserved_02;
145
146/*020*/ struct { /* APIC ID Register */
147 u32 __reserved_1 : 24,
148 phys_apic_id : 4,
149 __reserved_2 : 4;
150 u32 __reserved[3];
151 } id;
152
153/*030*/ const
154 struct { /* APIC Version Register */
155 u32 version : 8,
156 __reserved_1 : 8,
157 max_lvt : 8,
158 __reserved_2 : 8;
159 u32 __reserved[3];
160 } version;
161
162/*040*/ struct { u32 __reserved[4]; } __reserved_03;
163
164/*050*/ struct { u32 __reserved[4]; } __reserved_04;
165
166/*060*/ struct { u32 __reserved[4]; } __reserved_05;
167
168/*070*/ struct { u32 __reserved[4]; } __reserved_06;
169
170/*080*/ struct { /* Task Priority Register */
171 u32 priority : 8,
172 __reserved_1 : 24;
173 u32 __reserved_2[3];
174 } tpr;
175
176/*090*/ const
177 struct { /* Arbitration Priority Register */
178 u32 priority : 8,
179 __reserved_1 : 24;
180 u32 __reserved_2[3];
181 } apr;
182
183/*0A0*/ const
184 struct { /* Processor Priority Register */
185 u32 priority : 8,
186 __reserved_1 : 24;
187 u32 __reserved_2[3];
188 } ppr;
189
190/*0B0*/ struct { /* End Of Interrupt Register */
191 u32 eoi;
192 u32 __reserved[3];
193 } eoi;
194
195/*0C0*/ struct { u32 __reserved[4]; } __reserved_07;
196
197/*0D0*/ struct { /* Logical Destination Register */
198 u32 __reserved_1 : 24,
199 logical_dest : 8;
200 u32 __reserved_2[3];
201 } ldr;
202
203/*0E0*/ struct { /* Destination Format Register */
204 u32 __reserved_1 : 28,
205 model : 4;
206 u32 __reserved_2[3];
207 } dfr;
208
209/*0F0*/ struct { /* Spurious Interrupt Vector Register */
210 u32 spurious_vector : 8,
211 apic_enabled : 1,
212 focus_cpu : 1,
213 __reserved_2 : 22;
214 u32 __reserved_3[3];
215 } svr;
216
217/*100*/ struct { /* In Service Register */
218/*170*/ u32 bitfield;
219 u32 __reserved[3];
220 } isr [8];
221
222/*180*/ struct { /* Trigger Mode Register */
223/*1F0*/ u32 bitfield;
224 u32 __reserved[3];
225 } tmr [8];
226
227/*200*/ struct { /* Interrupt Request Register */
228/*270*/ u32 bitfield;
229 u32 __reserved[3];
230 } irr [8];
231
232/*280*/ union { /* Error Status Register */
233 struct {
234 u32 send_cs_error : 1,
235 receive_cs_error : 1,
236 send_accept_error : 1,
237 receive_accept_error : 1,
238 __reserved_1 : 1,
239 send_illegal_vector : 1,
240 receive_illegal_vector : 1,
241 illegal_register_address : 1,
242 __reserved_2 : 24;
243 u32 __reserved_3[3];
244 } error_bits;
245 struct {
246 u32 errors;
247 u32 __reserved_3[3];
248 } all_errors;
249 } esr;
250
251/*290*/ struct { u32 __reserved[4]; } __reserved_08;
252
253/*2A0*/ struct { u32 __reserved[4]; } __reserved_09;
254
255/*2B0*/ struct { u32 __reserved[4]; } __reserved_10;
256
257/*2C0*/ struct { u32 __reserved[4]; } __reserved_11;
258
259/*2D0*/ struct { u32 __reserved[4]; } __reserved_12;
260
261/*2E0*/ struct { u32 __reserved[4]; } __reserved_13;
262
263/*2F0*/ struct { u32 __reserved[4]; } __reserved_14;
264
265/*300*/ struct { /* Interrupt Command Register 1 */
266 u32 vector : 8,
267 delivery_mode : 3,
268 destination_mode : 1,
269 delivery_status : 1,
270 __reserved_1 : 1,
271 level : 1,
272 trigger : 1,
273 __reserved_2 : 2,
274 shorthand : 2,
275 __reserved_3 : 12;
276 u32 __reserved_4[3];
277 } icr1;
278
279/*310*/ struct { /* Interrupt Command Register 2 */
280 union {
281 u32 __reserved_1 : 24,
282 phys_dest : 4,
283 __reserved_2 : 4;
284 u32 __reserved_3 : 24,
285 logical_dest : 8;
286 } dest;
287 u32 __reserved_4[3];
288 } icr2;
289
290/*320*/ struct { /* LVT - Timer */
291 u32 vector : 8,
292 __reserved_1 : 4,
293 delivery_status : 1,
294 __reserved_2 : 3,
295 mask : 1,
296 timer_mode : 1,
297 __reserved_3 : 14;
298 u32 __reserved_4[3];
299 } lvt_timer;
300
301/*330*/ struct { /* LVT - Thermal Sensor */
302 u32 vector : 8,
303 delivery_mode : 3,
304 __reserved_1 : 1,
305 delivery_status : 1,
306 __reserved_2 : 3,
307 mask : 1,
308 __reserved_3 : 15;
309 u32 __reserved_4[3];
310 } lvt_thermal;
311
312/*340*/ struct { /* LVT - Performance Counter */
313 u32 vector : 8,
314 delivery_mode : 3,
315 __reserved_1 : 1,
316 delivery_status : 1,
317 __reserved_2 : 3,
318 mask : 1,
319 __reserved_3 : 15;
320 u32 __reserved_4[3];
321 } lvt_pc;
322
323/*350*/ struct { /* LVT - LINT0 */
324 u32 vector : 8,
325 delivery_mode : 3,
326 __reserved_1 : 1,
327 delivery_status : 1,
328 polarity : 1,
329 remote_irr : 1,
330 trigger : 1,
331 mask : 1,
332 __reserved_2 : 15;
333 u32 __reserved_3[3];
334 } lvt_lint0;
335
336/*360*/ struct { /* LVT - LINT1 */
337 u32 vector : 8,
338 delivery_mode : 3,
339 __reserved_1 : 1,
340 delivery_status : 1,
341 polarity : 1,
342 remote_irr : 1,
343 trigger : 1,
344 mask : 1,
345 __reserved_2 : 15;
346 u32 __reserved_3[3];
347 } lvt_lint1;
348
349/*370*/ struct { /* LVT - Error */
350 u32 vector : 8,
351 __reserved_1 : 4,
352 delivery_status : 1,
353 __reserved_2 : 3,
354 mask : 1,
355 __reserved_3 : 15;
356 u32 __reserved_4[3];
357 } lvt_error;
358
359/*380*/ struct { /* Timer Initial Count Register */
360 u32 initial_count;
361 u32 __reserved_2[3];
362 } timer_icr;
363
364/*390*/ const
365 struct { /* Timer Current Count Register */
366 u32 curr_count;
367 u32 __reserved_2[3];
368 } timer_ccr;
369
370/*3A0*/ struct { u32 __reserved[4]; } __reserved_16;
371
372/*3B0*/ struct { u32 __reserved[4]; } __reserved_17;
373
374/*3C0*/ struct { u32 __reserved[4]; } __reserved_18;
375
376/*3D0*/ struct { u32 __reserved[4]; } __reserved_19;
377
378/*3E0*/ struct { /* Timer Divide Configuration Register */
379 u32 divisor : 4,
380 __reserved_1 : 28;
381 u32 __reserved_2[3];
382 } timer_dcr;
383
384/*3F0*/ struct { u32 __reserved[4]; } __reserved_20;
385
386} __attribute__ ((packed));
387
388#undef u32
389
390#define BAD_APICID 0xFFu
391
392#endif
diff --git a/include/asm-x86_64/atomic.h b/include/asm-x86_64/atomic.h
deleted file mode 100644
index f2e64634fa48..000000000000
--- a/include/asm-x86_64/atomic.h
+++ /dev/null
@@ -1,466 +0,0 @@
1#ifndef __ARCH_X86_64_ATOMIC__
2#define __ARCH_X86_64_ATOMIC__
3
4#include <asm/alternative.h>
5#include <asm/cmpxchg.h>
6
7/* atomic_t should be 32 bit signed type */
8
9/*
10 * Atomic operations that C can't guarantee us. Useful for
11 * resource counting etc..
12 */
13
14#ifdef CONFIG_SMP
15#define LOCK "lock ; "
16#else
17#define LOCK ""
18#endif
19
20/*
21 * Make sure gcc doesn't try to be clever and move things around
22 * on us. We need to use _exactly_ the address the user gave us,
23 * not some alias that contains the same information.
24 */
25typedef struct { int counter; } atomic_t;
26
27#define ATOMIC_INIT(i) { (i) }
28
29/**
30 * atomic_read - read atomic variable
31 * @v: pointer of type atomic_t
32 *
33 * Atomically reads the value of @v.
34 */
35#define atomic_read(v) ((v)->counter)
36
37/**
38 * atomic_set - set atomic variable
39 * @v: pointer of type atomic_t
40 * @i: required value
41 *
42 * Atomically sets the value of @v to @i.
43 */
44#define atomic_set(v,i) (((v)->counter) = (i))
45
46/**
47 * atomic_add - add integer to atomic variable
48 * @i: integer value to add
49 * @v: pointer of type atomic_t
50 *
51 * Atomically adds @i to @v.
52 */
53static __inline__ void atomic_add(int i, atomic_t *v)
54{
55 __asm__ __volatile__(
56 LOCK_PREFIX "addl %1,%0"
57 :"=m" (v->counter)
58 :"ir" (i), "m" (v->counter));
59}
60
61/**
62 * atomic_sub - subtract the atomic variable
63 * @i: integer value to subtract
64 * @v: pointer of type atomic_t
65 *
66 * Atomically subtracts @i from @v.
67 */
68static __inline__ void atomic_sub(int i, atomic_t *v)
69{
70 __asm__ __volatile__(
71 LOCK_PREFIX "subl %1,%0"
72 :"=m" (v->counter)
73 :"ir" (i), "m" (v->counter));
74}
75
76/**
77 * atomic_sub_and_test - subtract value from variable and test result
78 * @i: integer value to subtract
79 * @v: pointer of type atomic_t
80 *
81 * Atomically subtracts @i from @v and returns
82 * true if the result is zero, or false for all
83 * other cases.
84 */
85static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
86{
87 unsigned char c;
88
89 __asm__ __volatile__(
90 LOCK_PREFIX "subl %2,%0; sete %1"
91 :"=m" (v->counter), "=qm" (c)
92 :"ir" (i), "m" (v->counter) : "memory");
93 return c;
94}
95
96/**
97 * atomic_inc - increment atomic variable
98 * @v: pointer of type atomic_t
99 *
100 * Atomically increments @v by 1.
101 */
102static __inline__ void atomic_inc(atomic_t *v)
103{
104 __asm__ __volatile__(
105 LOCK_PREFIX "incl %0"
106 :"=m" (v->counter)
107 :"m" (v->counter));
108}
109
110/**
111 * atomic_dec - decrement atomic variable
112 * @v: pointer of type atomic_t
113 *
114 * Atomically decrements @v by 1.
115 */
116static __inline__ void atomic_dec(atomic_t *v)
117{
118 __asm__ __volatile__(
119 LOCK_PREFIX "decl %0"
120 :"=m" (v->counter)
121 :"m" (v->counter));
122}
123
124/**
125 * atomic_dec_and_test - decrement and test
126 * @v: pointer of type atomic_t
127 *
128 * Atomically decrements @v by 1 and
129 * returns true if the result is 0, or false for all other
130 * cases.
131 */
132static __inline__ int atomic_dec_and_test(atomic_t *v)
133{
134 unsigned char c;
135
136 __asm__ __volatile__(
137 LOCK_PREFIX "decl %0; sete %1"
138 :"=m" (v->counter), "=qm" (c)
139 :"m" (v->counter) : "memory");
140 return c != 0;
141}
142
143/**
144 * atomic_inc_and_test - increment and test
145 * @v: pointer of type atomic_t
146 *
147 * Atomically increments @v by 1
148 * and returns true if the result is zero, or false for all
149 * other cases.
150 */
151static __inline__ int atomic_inc_and_test(atomic_t *v)
152{
153 unsigned char c;
154
155 __asm__ __volatile__(
156 LOCK_PREFIX "incl %0; sete %1"
157 :"=m" (v->counter), "=qm" (c)
158 :"m" (v->counter) : "memory");
159 return c != 0;
160}
161
162/**
163 * atomic_add_negative - add and test if negative
164 * @i: integer value to add
165 * @v: pointer of type atomic_t
166 *
167 * Atomically adds @i to @v and returns true
168 * if the result is negative, or false when
169 * result is greater than or equal to zero.
170 */
171static __inline__ int atomic_add_negative(int i, atomic_t *v)
172{
173 unsigned char c;
174
175 __asm__ __volatile__(
176 LOCK_PREFIX "addl %2,%0; sets %1"
177 :"=m" (v->counter), "=qm" (c)
178 :"ir" (i), "m" (v->counter) : "memory");
179 return c;
180}
181
182/**
183 * atomic_add_return - add and return
184 * @i: integer value to add
185 * @v: pointer of type atomic_t
186 *
187 * Atomically adds @i to @v and returns @i + @v
188 */
189static __inline__ int atomic_add_return(int i, atomic_t *v)
190{
191 int __i = i;
192 __asm__ __volatile__(
193 LOCK_PREFIX "xaddl %0, %1"
194 :"+r" (i), "+m" (v->counter)
195 : : "memory");
196 return i + __i;
197}
198
199static __inline__ int atomic_sub_return(int i, atomic_t *v)
200{
201 return atomic_add_return(-i,v);
202}
203
204#define atomic_inc_return(v) (atomic_add_return(1,v))
205#define atomic_dec_return(v) (atomic_sub_return(1,v))
206
207/* An 64bit atomic type */
208
209typedef struct { volatile long counter; } atomic64_t;
210
211#define ATOMIC64_INIT(i) { (i) }
212
213/**
214 * atomic64_read - read atomic64 variable
215 * @v: pointer of type atomic64_t
216 *
217 * Atomically reads the value of @v.
218 * Doesn't imply a read memory barrier.
219 */
220#define atomic64_read(v) ((v)->counter)
221
222/**
223 * atomic64_set - set atomic64 variable
224 * @v: pointer to type atomic64_t
225 * @i: required value
226 *
227 * Atomically sets the value of @v to @i.
228 */
229#define atomic64_set(v,i) (((v)->counter) = (i))
230
231/**
232 * atomic64_add - add integer to atomic64 variable
233 * @i: integer value to add
234 * @v: pointer to type atomic64_t
235 *
236 * Atomically adds @i to @v.
237 */
238static __inline__ void atomic64_add(long i, atomic64_t *v)
239{
240 __asm__ __volatile__(
241 LOCK_PREFIX "addq %1,%0"
242 :"=m" (v->counter)
243 :"ir" (i), "m" (v->counter));
244}
245
246/**
247 * atomic64_sub - subtract the atomic64 variable
248 * @i: integer value to subtract
249 * @v: pointer to type atomic64_t
250 *
251 * Atomically subtracts @i from @v.
252 */
253static __inline__ void atomic64_sub(long i, atomic64_t *v)
254{
255 __asm__ __volatile__(
256 LOCK_PREFIX "subq %1,%0"
257 :"=m" (v->counter)
258 :"ir" (i), "m" (v->counter));
259}
260
261/**
262 * atomic64_sub_and_test - subtract value from variable and test result
263 * @i: integer value to subtract
264 * @v: pointer to type atomic64_t
265 *
266 * Atomically subtracts @i from @v and returns
267 * true if the result is zero, or false for all
268 * other cases.
269 */
270static __inline__ int atomic64_sub_and_test(long i, atomic64_t *v)
271{
272 unsigned char c;
273
274 __asm__ __volatile__(
275 LOCK_PREFIX "subq %2,%0; sete %1"
276 :"=m" (v->counter), "=qm" (c)
277 :"ir" (i), "m" (v->counter) : "memory");
278 return c;
279}
280
281/**
282 * atomic64_inc - increment atomic64 variable
283 * @v: pointer to type atomic64_t
284 *
285 * Atomically increments @v by 1.
286 */
287static __inline__ void atomic64_inc(atomic64_t *v)
288{
289 __asm__ __volatile__(
290 LOCK_PREFIX "incq %0"
291 :"=m" (v->counter)
292 :"m" (v->counter));
293}
294
295/**
296 * atomic64_dec - decrement atomic64 variable
297 * @v: pointer to type atomic64_t
298 *
299 * Atomically decrements @v by 1.
300 */
301static __inline__ void atomic64_dec(atomic64_t *v)
302{
303 __asm__ __volatile__(
304 LOCK_PREFIX "decq %0"
305 :"=m" (v->counter)
306 :"m" (v->counter));
307}
308
309/**
310 * atomic64_dec_and_test - decrement and test
311 * @v: pointer to type atomic64_t
312 *
313 * Atomically decrements @v by 1 and
314 * returns true if the result is 0, or false for all other
315 * cases.
316 */
317static __inline__ int atomic64_dec_and_test(atomic64_t *v)
318{
319 unsigned char c;
320
321 __asm__ __volatile__(
322 LOCK_PREFIX "decq %0; sete %1"
323 :"=m" (v->counter), "=qm" (c)
324 :"m" (v->counter) : "memory");
325 return c != 0;
326}
327
328/**
329 * atomic64_inc_and_test - increment and test
330 * @v: pointer to type atomic64_t
331 *
332 * Atomically increments @v by 1
333 * and returns true if the result is zero, or false for all
334 * other cases.
335 */
336static __inline__ int atomic64_inc_and_test(atomic64_t *v)
337{
338 unsigned char c;
339
340 __asm__ __volatile__(
341 LOCK_PREFIX "incq %0; sete %1"
342 :"=m" (v->counter), "=qm" (c)
343 :"m" (v->counter) : "memory");
344 return c != 0;
345}
346
347/**
348 * atomic64_add_negative - add and test if negative
349 * @i: integer value to add
350 * @v: pointer to type atomic64_t
351 *
352 * Atomically adds @i to @v and returns true
353 * if the result is negative, or false when
354 * result is greater than or equal to zero.
355 */
356static __inline__ int atomic64_add_negative(long i, atomic64_t *v)
357{
358 unsigned char c;
359
360 __asm__ __volatile__(
361 LOCK_PREFIX "addq %2,%0; sets %1"
362 :"=m" (v->counter), "=qm" (c)
363 :"ir" (i), "m" (v->counter) : "memory");
364 return c;
365}
366
367/**
368 * atomic64_add_return - add and return
369 * @i: integer value to add
370 * @v: pointer to type atomic64_t
371 *
372 * Atomically adds @i to @v and returns @i + @v
373 */
374static __inline__ long atomic64_add_return(long i, atomic64_t *v)
375{
376 long __i = i;
377 __asm__ __volatile__(
378 LOCK_PREFIX "xaddq %0, %1;"
379 :"+r" (i), "+m" (v->counter)
380 : : "memory");
381 return i + __i;
382}
383
384static __inline__ long atomic64_sub_return(long i, atomic64_t *v)
385{
386 return atomic64_add_return(-i,v);
387}
388
389#define atomic64_inc_return(v) (atomic64_add_return(1,v))
390#define atomic64_dec_return(v) (atomic64_sub_return(1,v))
391
392#define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
393#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
394
395#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
396#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
397
398/**
399 * atomic_add_unless - add unless the number is a given value
400 * @v: pointer of type atomic_t
401 * @a: the amount to add to v...
402 * @u: ...unless v is equal to u.
403 *
404 * Atomically adds @a to @v, so long as it was not @u.
405 * Returns non-zero if @v was not @u, and zero otherwise.
406 */
407static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
408{
409 int c, old;
410 c = atomic_read(v);
411 for (;;) {
412 if (unlikely(c == (u)))
413 break;
414 old = atomic_cmpxchg((v), c, c + (a));
415 if (likely(old == c))
416 break;
417 c = old;
418 }
419 return c != (u);
420}
421
422#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
423
424/**
425 * atomic64_add_unless - add unless the number is a given value
426 * @v: pointer of type atomic64_t
427 * @a: the amount to add to v...
428 * @u: ...unless v is equal to u.
429 *
430 * Atomically adds @a to @v, so long as it was not @u.
431 * Returns non-zero if @v was not @u, and zero otherwise.
432 */
433static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
434{
435 long c, old;
436 c = atomic64_read(v);
437 for (;;) {
438 if (unlikely(c == (u)))
439 break;
440 old = atomic64_cmpxchg((v), c, c + (a));
441 if (likely(old == c))
442 break;
443 c = old;
444 }
445 return c != (u);
446}
447
448#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
449
450/* These are x86-specific, used by some header files */
451#define atomic_clear_mask(mask, addr) \
452__asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \
453: : "r" (~(mask)),"m" (*addr) : "memory")
454
455#define atomic_set_mask(mask, addr) \
456__asm__ __volatile__(LOCK_PREFIX "orl %0,%1" \
457: : "r" ((unsigned)mask),"m" (*(addr)) : "memory")
458
459/* Atomic operations are already serializing on x86 */
460#define smp_mb__before_atomic_dec() barrier()
461#define smp_mb__after_atomic_dec() barrier()
462#define smp_mb__before_atomic_inc() barrier()
463#define smp_mb__after_atomic_inc() barrier()
464
465#include <asm-generic/atomic.h>
466#endif
diff --git a/include/asm-x86_64/auxvec.h b/include/asm-x86_64/auxvec.h
deleted file mode 100644
index 1d5ab0d03950..000000000000
--- a/include/asm-x86_64/auxvec.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __ASM_X86_64_AUXVEC_H
2#define __ASM_X86_64_AUXVEC_H
3
4#define AT_SYSINFO_EHDR 33
5
6#endif
diff --git a/include/asm-x86_64/bitops.h b/include/asm-x86_64/bitops.h
deleted file mode 100644
index d4dbbe5f7bd9..000000000000
--- a/include/asm-x86_64/bitops.h
+++ /dev/null
@@ -1,427 +0,0 @@
1#ifndef _X86_64_BITOPS_H
2#define _X86_64_BITOPS_H
3
4/*
5 * Copyright 1992, Linus Torvalds.
6 */
7
8#include <asm/alternative.h>
9
10#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1)
11/* Technically wrong, but this avoids compilation errors on some gcc
12 versions. */
13#define ADDR "=m" (*(volatile long *) addr)
14#else
15#define ADDR "+m" (*(volatile long *) addr)
16#endif
17
18/**
19 * set_bit - Atomically set a bit in memory
20 * @nr: the bit to set
21 * @addr: the address to start counting from
22 *
23 * This function is atomic and may not be reordered. See __set_bit()
24 * if you do not require the atomic guarantees.
25 * Note that @nr may be almost arbitrarily large; this function is not
26 * restricted to acting on a single-word quantity.
27 */
28static __inline__ void set_bit(int nr, volatile void * addr)
29{
30 __asm__ __volatile__( LOCK_PREFIX
31 "btsl %1,%0"
32 :ADDR
33 :"dIr" (nr) : "memory");
34}
35
36/**
37 * __set_bit - Set a bit in memory
38 * @nr: the bit to set
39 * @addr: the address to start counting from
40 *
41 * Unlike set_bit(), this function is non-atomic and may be reordered.
42 * If it's called on the same region of memory simultaneously, the effect
43 * may be that only one operation succeeds.
44 */
45static __inline__ void __set_bit(int nr, volatile void * addr)
46{
47 __asm__ volatile(
48 "btsl %1,%0"
49 :ADDR
50 :"dIr" (nr) : "memory");
51}
52
53/**
54 * clear_bit - Clears a bit in memory
55 * @nr: Bit to clear
56 * @addr: Address to start counting from
57 *
58 * clear_bit() is atomic and may not be reordered. However, it does
59 * not contain a memory barrier, so if it is used for locking purposes,
60 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
61 * in order to ensure changes are visible on other processors.
62 */
63static __inline__ void clear_bit(int nr, volatile void * addr)
64{
65 __asm__ __volatile__( LOCK_PREFIX
66 "btrl %1,%0"
67 :ADDR
68 :"dIr" (nr));
69}
70
71static __inline__ void __clear_bit(int nr, volatile void * addr)
72{
73 __asm__ __volatile__(
74 "btrl %1,%0"
75 :ADDR
76 :"dIr" (nr));
77}
78
79#define smp_mb__before_clear_bit() barrier()
80#define smp_mb__after_clear_bit() barrier()
81
82/**
83 * __change_bit - Toggle a bit in memory
84 * @nr: the bit to change
85 * @addr: the address to start counting from
86 *
87 * Unlike change_bit(), this function is non-atomic and may be reordered.
88 * If it's called on the same region of memory simultaneously, the effect
89 * may be that only one operation succeeds.
90 */
91static __inline__ void __change_bit(int nr, volatile void * addr)
92{
93 __asm__ __volatile__(
94 "btcl %1,%0"
95 :ADDR
96 :"dIr" (nr));
97}
98
99/**
100 * change_bit - Toggle a bit in memory
101 * @nr: Bit to change
102 * @addr: Address to start counting from
103 *
104 * change_bit() is atomic and may not be reordered.
105 * Note that @nr may be almost arbitrarily large; this function is not
106 * restricted to acting on a single-word quantity.
107 */
108static __inline__ void change_bit(int nr, volatile void * addr)
109{
110 __asm__ __volatile__( LOCK_PREFIX
111 "btcl %1,%0"
112 :ADDR
113 :"dIr" (nr));
114}
115
116/**
117 * test_and_set_bit - Set a bit and return its old value
118 * @nr: Bit to set
119 * @addr: Address to count from
120 *
121 * This operation is atomic and cannot be reordered.
122 * It also implies a memory barrier.
123 */
124static __inline__ int test_and_set_bit(int nr, volatile void * addr)
125{
126 int oldbit;
127
128 __asm__ __volatile__( LOCK_PREFIX
129 "btsl %2,%1\n\tsbbl %0,%0"
130 :"=r" (oldbit),ADDR
131 :"dIr" (nr) : "memory");
132 return oldbit;
133}
134
135/**
136 * __test_and_set_bit - Set a bit and return its old value
137 * @nr: Bit to set
138 * @addr: Address to count from
139 *
140 * This operation is non-atomic and can be reordered.
141 * If two examples of this operation race, one can appear to succeed
142 * but actually fail. You must protect multiple accesses with a lock.
143 */
144static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
145{
146 int oldbit;
147
148 __asm__(
149 "btsl %2,%1\n\tsbbl %0,%0"
150 :"=r" (oldbit),ADDR
151 :"dIr" (nr));
152 return oldbit;
153}
154
155/**
156 * test_and_clear_bit - Clear a bit and return its old value
157 * @nr: Bit to clear
158 * @addr: Address to count from
159 *
160 * This operation is atomic and cannot be reordered.
161 * It also implies a memory barrier.
162 */
163static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
164{
165 int oldbit;
166
167 __asm__ __volatile__( LOCK_PREFIX
168 "btrl %2,%1\n\tsbbl %0,%0"
169 :"=r" (oldbit),ADDR
170 :"dIr" (nr) : "memory");
171 return oldbit;
172}
173
174/**
175 * __test_and_clear_bit - Clear a bit and return its old value
176 * @nr: Bit to clear
177 * @addr: Address to count from
178 *
179 * This operation is non-atomic and can be reordered.
180 * If two examples of this operation race, one can appear to succeed
181 * but actually fail. You must protect multiple accesses with a lock.
182 */
183static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
184{
185 int oldbit;
186
187 __asm__(
188 "btrl %2,%1\n\tsbbl %0,%0"
189 :"=r" (oldbit),ADDR
190 :"dIr" (nr));
191 return oldbit;
192}
193
194/* WARNING: non atomic and it can be reordered! */
195static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
196{
197 int oldbit;
198
199 __asm__ __volatile__(
200 "btcl %2,%1\n\tsbbl %0,%0"
201 :"=r" (oldbit),ADDR
202 :"dIr" (nr) : "memory");
203 return oldbit;
204}
205
206/**
207 * test_and_change_bit - Change a bit and return its old value
208 * @nr: Bit to change
209 * @addr: Address to count from
210 *
211 * This operation is atomic and cannot be reordered.
212 * It also implies a memory barrier.
213 */
214static __inline__ int test_and_change_bit(int nr, volatile void * addr)
215{
216 int oldbit;
217
218 __asm__ __volatile__( LOCK_PREFIX
219 "btcl %2,%1\n\tsbbl %0,%0"
220 :"=r" (oldbit),ADDR
221 :"dIr" (nr) : "memory");
222 return oldbit;
223}
224
225#if 0 /* Fool kernel-doc since it doesn't do macros yet */
226/**
227 * test_bit - Determine whether a bit is set
228 * @nr: bit number to test
229 * @addr: Address to start counting from
230 */
231static int test_bit(int nr, const volatile void * addr);
232#endif
233
234static __inline__ int constant_test_bit(int nr, const volatile void * addr)
235{
236 return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
237}
238
239static __inline__ int variable_test_bit(int nr, volatile const void * addr)
240{
241 int oldbit;
242
243 __asm__ __volatile__(
244 "btl %2,%1\n\tsbbl %0,%0"
245 :"=r" (oldbit)
246 :"m" (*(volatile long *)addr),"dIr" (nr));
247 return oldbit;
248}
249
250#define test_bit(nr,addr) \
251(__builtin_constant_p(nr) ? \
252 constant_test_bit((nr),(addr)) : \
253 variable_test_bit((nr),(addr)))
254
255#undef ADDR
256
257extern long find_first_zero_bit(const unsigned long * addr, unsigned long size);
258extern long find_next_zero_bit (const unsigned long * addr, long size, long offset);
259extern long find_first_bit(const unsigned long * addr, unsigned long size);
260extern long find_next_bit(const unsigned long * addr, long size, long offset);
261
262/* return index of first bet set in val or max when no bit is set */
263static inline unsigned long __scanbit(unsigned long val, unsigned long max)
264{
265 asm("bsfq %1,%0 ; cmovz %2,%0" : "=&r" (val) : "r" (val), "r" (max));
266 return val;
267}
268
269#define find_first_bit(addr,size) \
270((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
271 (__scanbit(*(unsigned long *)addr,(size))) : \
272 find_first_bit(addr,size)))
273
274#define find_next_bit(addr,size,off) \
275((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
276 ((off) + (__scanbit((*(unsigned long *)addr) >> (off),(size)-(off)))) : \
277 find_next_bit(addr,size,off)))
278
279#define find_first_zero_bit(addr,size) \
280((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
281 (__scanbit(~*(unsigned long *)addr,(size))) : \
282 find_first_zero_bit(addr,size)))
283
284#define find_next_zero_bit(addr,size,off) \
285((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
286 ((off)+(__scanbit(~(((*(unsigned long *)addr)) >> (off)),(size)-(off)))) : \
287 find_next_zero_bit(addr,size,off)))
288
289/*
290 * Find string of zero bits in a bitmap. -1 when not found.
291 */
292extern unsigned long
293find_next_zero_string(unsigned long *bitmap, long start, long nbits, int len);
294
295static inline void set_bit_string(unsigned long *bitmap, unsigned long i,
296 int len)
297{
298 unsigned long end = i + len;
299 while (i < end) {
300 __set_bit(i, bitmap);
301 i++;
302 }
303}
304
305static inline void __clear_bit_string(unsigned long *bitmap, unsigned long i,
306 int len)
307{
308 unsigned long end = i + len;
309 while (i < end) {
310 __clear_bit(i, bitmap);
311 i++;
312 }
313}
314
315/**
316 * ffz - find first zero in word.
317 * @word: The word to search
318 *
319 * Undefined if no zero exists, so code should check against ~0UL first.
320 */
321static __inline__ unsigned long ffz(unsigned long word)
322{
323 __asm__("bsfq %1,%0"
324 :"=r" (word)
325 :"r" (~word));
326 return word;
327}
328
329/**
330 * __ffs - find first bit in word.
331 * @word: The word to search
332 *
333 * Undefined if no bit exists, so code should check against 0 first.
334 */
335static __inline__ unsigned long __ffs(unsigned long word)
336{
337 __asm__("bsfq %1,%0"
338 :"=r" (word)
339 :"rm" (word));
340 return word;
341}
342
343/*
344 * __fls: find last bit set.
345 * @word: The word to search
346 *
347 * Undefined if no zero exists, so code should check against ~0UL first.
348 */
349static __inline__ unsigned long __fls(unsigned long word)
350{
351 __asm__("bsrq %1,%0"
352 :"=r" (word)
353 :"rm" (word));
354 return word;
355}
356
357#ifdef __KERNEL__
358
359#include <asm-generic/bitops/sched.h>
360
361/**
362 * ffs - find first bit set
363 * @x: the word to search
364 *
365 * This is defined the same way as
366 * the libc and compiler builtin ffs routines, therefore
367 * differs in spirit from the above ffz (man ffs).
368 */
369static __inline__ int ffs(int x)
370{
371 int r;
372
373 __asm__("bsfl %1,%0\n\t"
374 "cmovzl %2,%0"
375 : "=r" (r) : "rm" (x), "r" (-1));
376 return r+1;
377}
378
379/**
380 * fls64 - find last bit set in 64 bit word
381 * @x: the word to search
382 *
383 * This is defined the same way as fls.
384 */
385static __inline__ int fls64(__u64 x)
386{
387 if (x == 0)
388 return 0;
389 return __fls(x) + 1;
390}
391
392/**
393 * fls - find last bit set
394 * @x: the word to search
395 *
396 * This is defined the same way as ffs.
397 */
398static __inline__ int fls(int x)
399{
400 int r;
401
402 __asm__("bsrl %1,%0\n\t"
403 "cmovzl %2,%0"
404 : "=&r" (r) : "rm" (x), "rm" (-1));
405 return r+1;
406}
407
408#define ARCH_HAS_FAST_MULTIPLIER 1
409
410#include <asm-generic/bitops/hweight.h>
411
412#endif /* __KERNEL__ */
413
414#ifdef __KERNEL__
415
416#include <asm-generic/bitops/ext2-non-atomic.h>
417
418#define ext2_set_bit_atomic(lock,nr,addr) \
419 test_and_set_bit((nr),(unsigned long*)addr)
420#define ext2_clear_bit_atomic(lock,nr,addr) \
421 test_and_clear_bit((nr),(unsigned long*)addr)
422
423#include <asm-generic/bitops/minix.h>
424
425#endif /* __KERNEL__ */
426
427#endif /* _X86_64_BITOPS_H */
diff --git a/include/asm-x86_64/boot.h b/include/asm-x86_64/boot.h
deleted file mode 100644
index 3c46cea8db7f..000000000000
--- a/include/asm-x86_64/boot.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-i386/boot.h>
diff --git a/include/asm-x86_64/bootparam.h b/include/asm-x86_64/bootparam.h
deleted file mode 100644
index aa82e5238d82..000000000000
--- a/include/asm-x86_64/bootparam.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-i386/bootparam.h>
diff --git a/include/asm-x86_64/bootsetup.h b/include/asm-x86_64/bootsetup.h
deleted file mode 100644
index 7b1c3ad155fd..000000000000
--- a/include/asm-x86_64/bootsetup.h
+++ /dev/null
@@ -1,40 +0,0 @@
1
2#ifndef _X86_64_BOOTSETUP_H
3#define _X86_64_BOOTSETUP_H 1
4
5#define BOOT_PARAM_SIZE 4096
6extern char x86_boot_params[BOOT_PARAM_SIZE];
7
8/*
9 * This is set up by the setup-routine at boot-time
10 */
11#define PARAM ((unsigned char *)x86_boot_params)
12#define SCREEN_INFO (*(struct screen_info *) (PARAM+0))
13#define EXT_MEM_K (*(unsigned short *) (PARAM+2))
14#define ALT_MEM_K (*(unsigned int *) (PARAM+0x1e0))
15#define E820_MAP_NR (*(char*) (PARAM+E820NR))
16#define E820_MAP ((struct e820entry *) (PARAM+E820MAP))
17#define APM_BIOS_INFO (*(struct apm_bios_info *) (PARAM+0x40))
18#define DRIVE_INFO (*(struct drive_info_struct *) (PARAM+0x80))
19#define SYS_DESC_TABLE (*(struct sys_desc_table_struct*)(PARAM+0xa0))
20#define MOUNT_ROOT_RDONLY (*(unsigned short *) (PARAM+0x1F2))
21#define RAMDISK_FLAGS (*(unsigned short *) (PARAM+0x1F8))
22#define SAVED_VIDEO_MODE (*(unsigned short *) (PARAM+0x1FA))
23#define ORIG_ROOT_DEV (*(unsigned short *) (PARAM+0x1FC))
24#define AUX_DEVICE_INFO (*(unsigned char *) (PARAM+0x1FF))
25#define LOADER_TYPE (*(unsigned char *) (PARAM+0x210))
26#define KERNEL_START (*(unsigned int *) (PARAM+0x214))
27#define INITRD_START (*(unsigned int *) (PARAM+0x218))
28#define INITRD_SIZE (*(unsigned int *) (PARAM+0x21c))
29#define EDID_INFO (*(struct edid_info *) (PARAM+0x140))
30#define EDD_NR (*(unsigned char *) (PARAM+EDDNR))
31#define EDD_MBR_SIG_NR (*(unsigned char *) (PARAM+EDD_MBR_SIG_NR_BUF))
32#define EDD_MBR_SIGNATURE ((unsigned int *) (PARAM+EDD_MBR_SIG_BUF))
33#define EDD_BUF ((struct edd_info *) (PARAM+EDDBUF))
34#define COMMAND_LINE boot_command_line
35
36#define RAMDISK_IMAGE_START_MASK 0x07FF
37#define RAMDISK_PROMPT_FLAG 0x8000
38#define RAMDISK_LOAD_FLAG 0x4000
39
40#endif
diff --git a/include/asm-x86_64/bug.h b/include/asm-x86_64/bug.h
deleted file mode 100644
index 682606414913..000000000000
--- a/include/asm-x86_64/bug.h
+++ /dev/null
@@ -1,34 +0,0 @@
1#ifndef __ASM_X8664_BUG_H
2#define __ASM_X8664_BUG_H 1
3
4#ifdef CONFIG_BUG
5#define HAVE_ARCH_BUG
6
7#ifdef CONFIG_DEBUG_BUGVERBOSE
8#define BUG() \
9 do { \
10 asm volatile("1:\tud2\n" \
11 ".pushsection __bug_table,\"a\"\n" \
12 "2:\t.quad 1b, %c0\n" \
13 "\t.word %c1, 0\n" \
14 "\t.org 2b+%c2\n" \
15 ".popsection" \
16 : : "i" (__FILE__), "i" (__LINE__), \
17 "i" (sizeof(struct bug_entry))); \
18 for(;;) ; \
19 } while(0)
20#else
21#define BUG() \
22 do { \
23 asm volatile("ud2"); \
24 for(;;) ; \
25 } while(0)
26#endif
27
28void out_of_line_bug(void);
29#else
30static inline void out_of_line_bug(void) { }
31#endif
32
33#include <asm-generic/bug.h>
34#endif
diff --git a/include/asm-x86_64/bugs.h b/include/asm-x86_64/bugs.h
deleted file mode 100644
index b33dc04d8f42..000000000000
--- a/include/asm-x86_64/bugs.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _ASM_X86_64_BUGS_H
2#define _ASM_X86_64_BUGS_H
3
4void check_bugs(void);
5
6#endif /* _ASM_X86_64_BUGS_H */
diff --git a/include/asm-x86_64/byteorder.h b/include/asm-x86_64/byteorder.h
deleted file mode 100644
index 5e86c868c75e..000000000000
--- a/include/asm-x86_64/byteorder.h
+++ /dev/null
@@ -1,33 +0,0 @@
1#ifndef _X86_64_BYTEORDER_H
2#define _X86_64_BYTEORDER_H
3
4#include <asm/types.h>
5#include <linux/compiler.h>
6
7#ifdef __GNUC__
8
9static __inline__ __attribute_const__ __u64 ___arch__swab64(__u64 x)
10{
11 __asm__("bswapq %0" : "=r" (x) : "0" (x));
12 return x;
13}
14
15static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x)
16{
17 __asm__("bswapl %0" : "=r" (x) : "0" (x));
18 return x;
19}
20
21/* Do not define swab16. Gcc is smart enough to recognize "C" version and
22 convert it into rotation or exhange. */
23
24#define __arch__swab32(x) ___arch__swab32(x)
25#define __arch__swab64(x) ___arch__swab64(x)
26
27#endif /* __GNUC__ */
28
29#define __BYTEORDER_HAS_U64__
30
31#include <linux/byteorder/little_endian.h>
32
33#endif /* _X86_64_BYTEORDER_H */
diff --git a/include/asm-x86_64/cache.h b/include/asm-x86_64/cache.h
deleted file mode 100644
index 052df758ae61..000000000000
--- a/include/asm-x86_64/cache.h
+++ /dev/null
@@ -1,26 +0,0 @@
1/*
2 * include/asm-x86_64/cache.h
3 */
4#ifndef __ARCH_X8664_CACHE_H
5#define __ARCH_X8664_CACHE_H
6
7
8/* L1 cache line size */
9#define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
10#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
11
12#ifdef CONFIG_X86_VSMP
13
14/* vSMP Internode cacheline shift */
15#define INTERNODE_CACHE_SHIFT (12)
16#ifdef CONFIG_SMP
17#define __cacheline_aligned_in_smp \
18 __attribute__((__aligned__(1 << (INTERNODE_CACHE_SHIFT)))) \
19 __attribute__((__section__(".data.page_aligned")))
20#endif
21
22#endif
23
24#define __read_mostly __attribute__((__section__(".data.read_mostly")))
25
26#endif
diff --git a/include/asm-x86_64/cacheflush.h b/include/asm-x86_64/cacheflush.h
deleted file mode 100644
index ab1cb5c7dc92..000000000000
--- a/include/asm-x86_64/cacheflush.h
+++ /dev/null
@@ -1,35 +0,0 @@
1#ifndef _X8664_CACHEFLUSH_H
2#define _X8664_CACHEFLUSH_H
3
4/* Keep includes the same across arches. */
5#include <linux/mm.h>
6
7/* Caches aren't brain-dead on the intel. */
8#define flush_cache_all() do { } while (0)
9#define flush_cache_mm(mm) do { } while (0)
10#define flush_cache_dup_mm(mm) do { } while (0)
11#define flush_cache_range(vma, start, end) do { } while (0)
12#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
13#define flush_dcache_page(page) do { } while (0)
14#define flush_dcache_mmap_lock(mapping) do { } while (0)
15#define flush_dcache_mmap_unlock(mapping) do { } while (0)
16#define flush_icache_range(start, end) do { } while (0)
17#define flush_icache_page(vma,pg) do { } while (0)
18#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
19#define flush_cache_vmap(start, end) do { } while (0)
20#define flush_cache_vunmap(start, end) do { } while (0)
21
22#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
23 memcpy(dst, src, len)
24#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
25 memcpy(dst, src, len)
26
27void global_flush_tlb(void);
28int change_page_attr(struct page *page, int numpages, pgprot_t prot);
29int change_page_attr_addr(unsigned long addr, int numpages, pgprot_t prot);
30
31#ifdef CONFIG_DEBUG_RODATA
32void mark_rodata_ro(void);
33#endif
34
35#endif /* _X8664_CACHEFLUSH_H */
diff --git a/include/asm-x86_64/calgary.h b/include/asm-x86_64/calgary.h
deleted file mode 100644
index 67f60406e2d8..000000000000
--- a/include/asm-x86_64/calgary.h
+++ /dev/null
@@ -1,72 +0,0 @@
1/*
2 * Derived from include/asm-powerpc/iommu.h
3 *
4 * Copyright IBM Corporation, 2006-2007
5 *
6 * Author: Jon Mason <jdmason@us.ibm.com>
7 * Author: Muli Ben-Yehuda <muli@il.ibm.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 */
23
24#ifndef _ASM_X86_64_CALGARY_H
25#define _ASM_X86_64_CALGARY_H
26
27#include <linux/spinlock.h>
28#include <linux/device.h>
29#include <linux/dma-mapping.h>
30#include <linux/timer.h>
31#include <asm/types.h>
32
33struct iommu_table {
34 struct cal_chipset_ops *chip_ops; /* chipset specific funcs */
35 unsigned long it_base; /* mapped address of tce table */
36 unsigned long it_hint; /* Hint for next alloc */
37 unsigned long *it_map; /* A simple allocation bitmap for now */
38 void __iomem *bbar; /* Bridge BAR */
39 u64 tar_val; /* Table Address Register */
40 struct timer_list watchdog_timer;
41 spinlock_t it_lock; /* Protects it_map */
42 unsigned int it_size; /* Size of iommu table in entries */
43 unsigned char it_busno; /* Bus number this table belongs to */
44};
45
46struct cal_chipset_ops {
47 void (*handle_quirks)(struct iommu_table *tbl, struct pci_dev *dev);
48 void (*tce_cache_blast)(struct iommu_table *tbl);
49 void (*dump_error_regs)(struct iommu_table *tbl);
50};
51
52#define TCE_TABLE_SIZE_UNSPECIFIED ~0
53#define TCE_TABLE_SIZE_64K 0
54#define TCE_TABLE_SIZE_128K 1
55#define TCE_TABLE_SIZE_256K 2
56#define TCE_TABLE_SIZE_512K 3
57#define TCE_TABLE_SIZE_1M 4
58#define TCE_TABLE_SIZE_2M 5
59#define TCE_TABLE_SIZE_4M 6
60#define TCE_TABLE_SIZE_8M 7
61
62extern int use_calgary;
63
64#ifdef CONFIG_CALGARY_IOMMU
65extern int calgary_iommu_init(void);
66extern void detect_calgary(void);
67#else
68static inline int calgary_iommu_init(void) { return 1; }
69static inline void detect_calgary(void) { return; }
70#endif
71
72#endif /* _ASM_X86_64_CALGARY_H */
diff --git a/include/asm-x86_64/calling.h b/include/asm-x86_64/calling.h
deleted file mode 100644
index 6f4f63af96e1..000000000000
--- a/include/asm-x86_64/calling.h
+++ /dev/null
@@ -1,162 +0,0 @@
1/*
2 * Some macros to handle stack frames in assembly.
3 */
4
5
6#define R15 0
7#define R14 8
8#define R13 16
9#define R12 24
10#define RBP 32
11#define RBX 40
12/* arguments: interrupts/non tracing syscalls only save upto here*/
13#define R11 48
14#define R10 56
15#define R9 64
16#define R8 72
17#define RAX 80
18#define RCX 88
19#define RDX 96
20#define RSI 104
21#define RDI 112
22#define ORIG_RAX 120 /* + error_code */
23/* end of arguments */
24/* cpu exception frame or undefined in case of fast syscall. */
25#define RIP 128
26#define CS 136
27#define EFLAGS 144
28#define RSP 152
29#define SS 160
30#define ARGOFFSET R11
31#define SWFRAME ORIG_RAX
32
33 .macro SAVE_ARGS addskip=0,norcx=0,nor891011=0
34 subq $9*8+\addskip,%rsp
35 CFI_ADJUST_CFA_OFFSET 9*8+\addskip
36 movq %rdi,8*8(%rsp)
37 CFI_REL_OFFSET rdi,8*8
38 movq %rsi,7*8(%rsp)
39 CFI_REL_OFFSET rsi,7*8
40 movq %rdx,6*8(%rsp)
41 CFI_REL_OFFSET rdx,6*8
42 .if \norcx
43 .else
44 movq %rcx,5*8(%rsp)
45 CFI_REL_OFFSET rcx,5*8
46 .endif
47 movq %rax,4*8(%rsp)
48 CFI_REL_OFFSET rax,4*8
49 .if \nor891011
50 .else
51 movq %r8,3*8(%rsp)
52 CFI_REL_OFFSET r8,3*8
53 movq %r9,2*8(%rsp)
54 CFI_REL_OFFSET r9,2*8
55 movq %r10,1*8(%rsp)
56 CFI_REL_OFFSET r10,1*8
57 movq %r11,(%rsp)
58 CFI_REL_OFFSET r11,0*8
59 .endif
60 .endm
61
62#define ARG_SKIP 9*8
63 .macro RESTORE_ARGS skiprax=0,addskip=0,skiprcx=0,skipr11=0,skipr8910=0,skiprdx=0
64 .if \skipr11
65 .else
66 movq (%rsp),%r11
67 CFI_RESTORE r11
68 .endif
69 .if \skipr8910
70 .else
71 movq 1*8(%rsp),%r10
72 CFI_RESTORE r10
73 movq 2*8(%rsp),%r9
74 CFI_RESTORE r9
75 movq 3*8(%rsp),%r8
76 CFI_RESTORE r8
77 .endif
78 .if \skiprax
79 .else
80 movq 4*8(%rsp),%rax
81 CFI_RESTORE rax
82 .endif
83 .if \skiprcx
84 .else
85 movq 5*8(%rsp),%rcx
86 CFI_RESTORE rcx
87 .endif
88 .if \skiprdx
89 .else
90 movq 6*8(%rsp),%rdx
91 CFI_RESTORE rdx
92 .endif
93 movq 7*8(%rsp),%rsi
94 CFI_RESTORE rsi
95 movq 8*8(%rsp),%rdi
96 CFI_RESTORE rdi
97 .if ARG_SKIP+\addskip > 0
98 addq $ARG_SKIP+\addskip,%rsp
99 CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip)
100 .endif
101 .endm
102
103 .macro LOAD_ARGS offset
104 movq \offset(%rsp),%r11
105 movq \offset+8(%rsp),%r10
106 movq \offset+16(%rsp),%r9
107 movq \offset+24(%rsp),%r8
108 movq \offset+40(%rsp),%rcx
109 movq \offset+48(%rsp),%rdx
110 movq \offset+56(%rsp),%rsi
111 movq \offset+64(%rsp),%rdi
112 movq \offset+72(%rsp),%rax
113 .endm
114
115#define REST_SKIP 6*8
116 .macro SAVE_REST
117 subq $REST_SKIP,%rsp
118 CFI_ADJUST_CFA_OFFSET REST_SKIP
119 movq %rbx,5*8(%rsp)
120 CFI_REL_OFFSET rbx,5*8
121 movq %rbp,4*8(%rsp)
122 CFI_REL_OFFSET rbp,4*8
123 movq %r12,3*8(%rsp)
124 CFI_REL_OFFSET r12,3*8
125 movq %r13,2*8(%rsp)
126 CFI_REL_OFFSET r13,2*8
127 movq %r14,1*8(%rsp)
128 CFI_REL_OFFSET r14,1*8
129 movq %r15,(%rsp)
130 CFI_REL_OFFSET r15,0*8
131 .endm
132
133 .macro RESTORE_REST
134 movq (%rsp),%r15
135 CFI_RESTORE r15
136 movq 1*8(%rsp),%r14
137 CFI_RESTORE r14
138 movq 2*8(%rsp),%r13
139 CFI_RESTORE r13
140 movq 3*8(%rsp),%r12
141 CFI_RESTORE r12
142 movq 4*8(%rsp),%rbp
143 CFI_RESTORE rbp
144 movq 5*8(%rsp),%rbx
145 CFI_RESTORE rbx
146 addq $REST_SKIP,%rsp
147 CFI_ADJUST_CFA_OFFSET -(REST_SKIP)
148 .endm
149
150 .macro SAVE_ALL
151 SAVE_ARGS
152 SAVE_REST
153 .endm
154
155 .macro RESTORE_ALL addskip=0
156 RESTORE_REST
157 RESTORE_ARGS 0,\addskip
158 .endm
159
160 .macro icebp
161 .byte 0xf1
162 .endm
diff --git a/include/asm-x86_64/checksum.h b/include/asm-x86_64/checksum.h
deleted file mode 100644
index 419fe88a0342..000000000000
--- a/include/asm-x86_64/checksum.h
+++ /dev/null
@@ -1,195 +0,0 @@
1#ifndef _X86_64_CHECKSUM_H
2#define _X86_64_CHECKSUM_H
3
4/*
5 * Checksums for x86-64
6 * Copyright 2002 by Andi Kleen, SuSE Labs
7 * with some code from asm-i386/checksum.h
8 */
9
10#include <linux/compiler.h>
11#include <asm/uaccess.h>
12#include <asm/byteorder.h>
13
14/**
15 * csum_fold - Fold and invert a 32bit checksum.
16 * sum: 32bit unfolded sum
17 *
18 * Fold a 32bit running checksum to 16bit and invert it. This is usually
19 * the last step before putting a checksum into a packet.
20 * Make sure not to mix with 64bit checksums.
21 */
22static inline __sum16 csum_fold(__wsum sum)
23{
24 __asm__(
25 " addl %1,%0\n"
26 " adcl $0xffff,%0"
27 : "=r" (sum)
28 : "r" ((__force u32)sum << 16),
29 "0" ((__force u32)sum & 0xffff0000)
30 );
31 return (__force __sum16)(~(__force u32)sum >> 16);
32}
33
34/*
35 * This is a version of ip_compute_csum() optimized for IP headers,
36 * which always checksum on 4 octet boundaries.
37 *
38 * By Jorge Cwik <jorge@laser.satlink.net>, adapted for linux by
39 * Arnt Gulbrandsen.
40 */
41
42/**
43 * ip_fast_csum - Compute the IPv4 header checksum efficiently.
44 * iph: ipv4 header
45 * ihl: length of header / 4
46 */
47static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
48{
49 unsigned int sum;
50
51 asm( " movl (%1), %0\n"
52 " subl $4, %2\n"
53 " jbe 2f\n"
54 " addl 4(%1), %0\n"
55 " adcl 8(%1), %0\n"
56 " adcl 12(%1), %0\n"
57 "1: adcl 16(%1), %0\n"
58 " lea 4(%1), %1\n"
59 " decl %2\n"
60 " jne 1b\n"
61 " adcl $0, %0\n"
62 " movl %0, %2\n"
63 " shrl $16, %0\n"
64 " addw %w2, %w0\n"
65 " adcl $0, %0\n"
66 " notl %0\n"
67 "2:"
68 /* Since the input registers which are loaded with iph and ihl
69 are modified, we must also specify them as outputs, or gcc
70 will assume they contain their original values. */
71 : "=r" (sum), "=r" (iph), "=r" (ihl)
72 : "1" (iph), "2" (ihl)
73 : "memory");
74 return (__force __sum16)sum;
75}
76
77/**
78 * csum_tcpup_nofold - Compute an IPv4 pseudo header checksum.
79 * @saddr: source address
80 * @daddr: destination address
81 * @len: length of packet
82 * @proto: ip protocol of packet
83 * @sum: initial sum to be added in (32bit unfolded)
84 *
85 * Returns the pseudo header checksum the input data. Result is
86 * 32bit unfolded.
87 */
88static inline __wsum
89csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
90 unsigned short proto, __wsum sum)
91{
92 asm(" addl %1, %0\n"
93 " adcl %2, %0\n"
94 " adcl %3, %0\n"
95 " adcl $0, %0\n"
96 : "=r" (sum)
97 : "g" (daddr), "g" (saddr),
98 "g" ((len + proto)<<8), "0" (sum));
99 return sum;
100}
101
102
103/**
104 * csum_tcpup_magic - Compute an IPv4 pseudo header checksum.
105 * @saddr: source address
106 * @daddr: destination address
107 * @len: length of packet
108 * @proto: ip protocol of packet
109 * @sum: initial sum to be added in (32bit unfolded)
110 *
111 * Returns the 16bit pseudo header checksum the input data already
112 * complemented and ready to be filled in.
113 */
114static inline __sum16
115csum_tcpudp_magic(__be32 saddr, __be32 daddr,
116 unsigned short len, unsigned short proto, __wsum sum)
117{
118 return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
119}
120
121/**
122 * csum_partial - Compute an internet checksum.
123 * @buff: buffer to be checksummed
124 * @len: length of buffer.
125 * @sum: initial sum to be added in (32bit unfolded)
126 *
127 * Returns the 32bit unfolded internet checksum of the buffer.
128 * Before filling it in it needs to be csum_fold()'ed.
129 * buff should be aligned to a 64bit boundary if possible.
130 */
131extern __wsum csum_partial(const void *buff, int len, __wsum sum);
132
133#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER 1
134#define HAVE_CSUM_COPY_USER 1
135
136
137/* Do not call this directly. Use the wrappers below */
138extern __wsum csum_partial_copy_generic(const void *src, const void *dst,
139 int len,
140 __wsum sum,
141 int *src_err_ptr, int *dst_err_ptr);
142
143
144extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst,
145 int len, __wsum isum, int *errp);
146extern __wsum csum_partial_copy_to_user(const void *src, void __user *dst,
147 int len, __wsum isum, int *errp);
148extern __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len,
149 __wsum sum);
150
151/* Old names. To be removed. */
152#define csum_and_copy_to_user csum_partial_copy_to_user
153#define csum_and_copy_from_user csum_partial_copy_from_user
154
155/**
156 * ip_compute_csum - Compute an 16bit IP checksum.
157 * @buff: buffer address.
158 * @len: length of buffer.
159 *
160 * Returns the 16bit folded/inverted checksum of the passed buffer.
161 * Ready to fill in.
162 */
163extern __sum16 ip_compute_csum(const void *buff, int len);
164
165/**
166 * csum_ipv6_magic - Compute checksum of an IPv6 pseudo header.
167 * @saddr: source address
168 * @daddr: destination address
169 * @len: length of packet
170 * @proto: protocol of packet
171 * @sum: initial sum (32bit unfolded) to be added in
172 *
173 * Computes an IPv6 pseudo header checksum. This sum is added the checksum
174 * into UDP/TCP packets and contains some link layer information.
175 * Returns the unfolded 32bit checksum.
176 */
177
178struct in6_addr;
179
180#define _HAVE_ARCH_IPV6_CSUM 1
181extern __sum16
182csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr,
183 __u32 len, unsigned short proto, __wsum sum);
184
185static inline unsigned add32_with_carry(unsigned a, unsigned b)
186{
187 asm("addl %2,%0\n\t"
188 "adcl $0,%0"
189 : "=r" (a)
190 : "0" (a), "r" (b));
191 return a;
192}
193
194#endif
195
diff --git a/include/asm-x86_64/cmpxchg.h b/include/asm-x86_64/cmpxchg.h
deleted file mode 100644
index 5e182062e6ec..000000000000
--- a/include/asm-x86_64/cmpxchg.h
+++ /dev/null
@@ -1,134 +0,0 @@
1#ifndef __ASM_CMPXCHG_H
2#define __ASM_CMPXCHG_H
3
4#include <asm/alternative.h> /* Provides LOCK_PREFIX */
5
6#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
7
8#define __xg(x) ((volatile long *)(x))
9
10static inline void set_64bit(volatile unsigned long *ptr, unsigned long val)
11{
12 *ptr = val;
13}
14
15#define _set_64bit set_64bit
16
17/*
18 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
19 * Note 2: xchg has side effect, so that attribute volatile is necessary,
20 * but generally the primitive is invalid, *ptr is output argument. --ANK
21 */
22static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
23{
24 switch (size) {
25 case 1:
26 __asm__ __volatile__("xchgb %b0,%1"
27 :"=q" (x)
28 :"m" (*__xg(ptr)), "0" (x)
29 :"memory");
30 break;
31 case 2:
32 __asm__ __volatile__("xchgw %w0,%1"
33 :"=r" (x)
34 :"m" (*__xg(ptr)), "0" (x)
35 :"memory");
36 break;
37 case 4:
38 __asm__ __volatile__("xchgl %k0,%1"
39 :"=r" (x)
40 :"m" (*__xg(ptr)), "0" (x)
41 :"memory");
42 break;
43 case 8:
44 __asm__ __volatile__("xchgq %0,%1"
45 :"=r" (x)
46 :"m" (*__xg(ptr)), "0" (x)
47 :"memory");
48 break;
49 }
50 return x;
51}
52
53/*
54 * Atomic compare and exchange. Compare OLD with MEM, if identical,
55 * store NEW in MEM. Return the initial value in MEM. Success is
56 * indicated by comparing RETURN with OLD.
57 */
58
59#define __HAVE_ARCH_CMPXCHG 1
60
61static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
62 unsigned long new, int size)
63{
64 unsigned long prev;
65 switch (size) {
66 case 1:
67 __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
68 : "=a"(prev)
69 : "q"(new), "m"(*__xg(ptr)), "0"(old)
70 : "memory");
71 return prev;
72 case 2:
73 __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
74 : "=a"(prev)
75 : "r"(new), "m"(*__xg(ptr)), "0"(old)
76 : "memory");
77 return prev;
78 case 4:
79 __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2"
80 : "=a"(prev)
81 : "r"(new), "m"(*__xg(ptr)), "0"(old)
82 : "memory");
83 return prev;
84 case 8:
85 __asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2"
86 : "=a"(prev)
87 : "r"(new), "m"(*__xg(ptr)), "0"(old)
88 : "memory");
89 return prev;
90 }
91 return old;
92}
93
94static inline unsigned long __cmpxchg_local(volatile void *ptr,
95 unsigned long old, unsigned long new, int size)
96{
97 unsigned long prev;
98 switch (size) {
99 case 1:
100 __asm__ __volatile__("cmpxchgb %b1,%2"
101 : "=a"(prev)
102 : "q"(new), "m"(*__xg(ptr)), "0"(old)
103 : "memory");
104 return prev;
105 case 2:
106 __asm__ __volatile__("cmpxchgw %w1,%2"
107 : "=a"(prev)
108 : "r"(new), "m"(*__xg(ptr)), "0"(old)
109 : "memory");
110 return prev;
111 case 4:
112 __asm__ __volatile__("cmpxchgl %k1,%2"
113 : "=a"(prev)
114 : "r"(new), "m"(*__xg(ptr)), "0"(old)
115 : "memory");
116 return prev;
117 case 8:
118 __asm__ __volatile__("cmpxchgq %1,%2"
119 : "=a"(prev)
120 : "r"(new), "m"(*__xg(ptr)), "0"(old)
121 : "memory");
122 return prev;
123 }
124 return old;
125}
126
127#define cmpxchg(ptr,o,n)\
128 ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
129 (unsigned long)(n),sizeof(*(ptr))))
130#define cmpxchg_local(ptr,o,n)\
131 ((__typeof__(*(ptr)))__cmpxchg_local((ptr),(unsigned long)(o),\
132 (unsigned long)(n),sizeof(*(ptr))))
133
134#endif
diff --git a/include/asm-x86_64/compat.h b/include/asm-x86_64/compat.h
deleted file mode 100644
index 53cb96b68a62..000000000000
--- a/include/asm-x86_64/compat.h
+++ /dev/null
@@ -1,212 +0,0 @@
1#ifndef _ASM_X86_64_COMPAT_H
2#define _ASM_X86_64_COMPAT_H
3
4/*
5 * Architecture specific compatibility types
6 */
7#include <linux/types.h>
8#include <linux/sched.h>
9
10#define COMPAT_USER_HZ 100
11
12typedef u32 compat_size_t;
13typedef s32 compat_ssize_t;
14typedef s32 compat_time_t;
15typedef s32 compat_clock_t;
16typedef s32 compat_pid_t;
17typedef u16 __compat_uid_t;
18typedef u16 __compat_gid_t;
19typedef u32 __compat_uid32_t;
20typedef u32 __compat_gid32_t;
21typedef u16 compat_mode_t;
22typedef u32 compat_ino_t;
23typedef u16 compat_dev_t;
24typedef s32 compat_off_t;
25typedef s64 compat_loff_t;
26typedef u16 compat_nlink_t;
27typedef u16 compat_ipc_pid_t;
28typedef s32 compat_daddr_t;
29typedef u32 compat_caddr_t;
30typedef __kernel_fsid_t compat_fsid_t;
31typedef s32 compat_timer_t;
32typedef s32 compat_key_t;
33
34typedef s32 compat_int_t;
35typedef s32 compat_long_t;
36typedef s64 __attribute__((aligned(4))) compat_s64;
37typedef u32 compat_uint_t;
38typedef u32 compat_ulong_t;
39typedef u64 __attribute__((aligned(4))) compat_u64;
40
41struct compat_timespec {
42 compat_time_t tv_sec;
43 s32 tv_nsec;
44};
45
46struct compat_timeval {
47 compat_time_t tv_sec;
48 s32 tv_usec;
49};
50
51struct compat_stat {
52 compat_dev_t st_dev;
53 u16 __pad1;
54 compat_ino_t st_ino;
55 compat_mode_t st_mode;
56 compat_nlink_t st_nlink;
57 __compat_uid_t st_uid;
58 __compat_gid_t st_gid;
59 compat_dev_t st_rdev;
60 u16 __pad2;
61 u32 st_size;
62 u32 st_blksize;
63 u32 st_blocks;
64 u32 st_atime;
65 u32 st_atime_nsec;
66 u32 st_mtime;
67 u32 st_mtime_nsec;
68 u32 st_ctime;
69 u32 st_ctime_nsec;
70 u32 __unused4;
71 u32 __unused5;
72};
73
74struct compat_flock {
75 short l_type;
76 short l_whence;
77 compat_off_t l_start;
78 compat_off_t l_len;
79 compat_pid_t l_pid;
80};
81
82#define F_GETLK64 12 /* using 'struct flock64' */
83#define F_SETLK64 13
84#define F_SETLKW64 14
85
86/*
87 * IA32 uses 4 byte alignment for 64 bit quantities,
88 * so we need to pack this structure.
89 */
90struct compat_flock64 {
91 short l_type;
92 short l_whence;
93 compat_loff_t l_start;
94 compat_loff_t l_len;
95 compat_pid_t l_pid;
96} __attribute__((packed));
97
98struct compat_statfs {
99 int f_type;
100 int f_bsize;
101 int f_blocks;
102 int f_bfree;
103 int f_bavail;
104 int f_files;
105 int f_ffree;
106 compat_fsid_t f_fsid;
107 int f_namelen; /* SunOS ignores this field. */
108 int f_frsize;
109 int f_spare[5];
110};
111
112#define COMPAT_RLIM_OLD_INFINITY 0x7fffffff
113#define COMPAT_RLIM_INFINITY 0xffffffff
114
115typedef u32 compat_old_sigset_t; /* at least 32 bits */
116
117#define _COMPAT_NSIG 64
118#define _COMPAT_NSIG_BPW 32
119
120typedef u32 compat_sigset_word;
121
122#define COMPAT_OFF_T_MAX 0x7fffffff
123#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL
124
125struct compat_ipc64_perm {
126 compat_key_t key;
127 __compat_uid32_t uid;
128 __compat_gid32_t gid;
129 __compat_uid32_t cuid;
130 __compat_gid32_t cgid;
131 unsigned short mode;
132 unsigned short __pad1;
133 unsigned short seq;
134 unsigned short __pad2;
135 compat_ulong_t unused1;
136 compat_ulong_t unused2;
137};
138
139struct compat_semid64_ds {
140 struct compat_ipc64_perm sem_perm;
141 compat_time_t sem_otime;
142 compat_ulong_t __unused1;
143 compat_time_t sem_ctime;
144 compat_ulong_t __unused2;
145 compat_ulong_t sem_nsems;
146 compat_ulong_t __unused3;
147 compat_ulong_t __unused4;
148};
149
150struct compat_msqid64_ds {
151 struct compat_ipc64_perm msg_perm;
152 compat_time_t msg_stime;
153 compat_ulong_t __unused1;
154 compat_time_t msg_rtime;
155 compat_ulong_t __unused2;
156 compat_time_t msg_ctime;
157 compat_ulong_t __unused3;
158 compat_ulong_t msg_cbytes;
159 compat_ulong_t msg_qnum;
160 compat_ulong_t msg_qbytes;
161 compat_pid_t msg_lspid;
162 compat_pid_t msg_lrpid;
163 compat_ulong_t __unused4;
164 compat_ulong_t __unused5;
165};
166
167struct compat_shmid64_ds {
168 struct compat_ipc64_perm shm_perm;
169 compat_size_t shm_segsz;
170 compat_time_t shm_atime;
171 compat_ulong_t __unused1;
172 compat_time_t shm_dtime;
173 compat_ulong_t __unused2;
174 compat_time_t shm_ctime;
175 compat_ulong_t __unused3;
176 compat_pid_t shm_cpid;
177 compat_pid_t shm_lpid;
178 compat_ulong_t shm_nattch;
179 compat_ulong_t __unused4;
180 compat_ulong_t __unused5;
181};
182
183/*
184 * A pointer passed in from user mode. This should not
185 * be used for syscall parameters, just declare them
186 * as pointers because the syscall entry code will have
187 * appropriately comverted them already.
188 */
189typedef u32 compat_uptr_t;
190
191static inline void __user *compat_ptr(compat_uptr_t uptr)
192{
193 return (void __user *)(unsigned long)uptr;
194}
195
196static inline compat_uptr_t ptr_to_compat(void __user *uptr)
197{
198 return (u32)(unsigned long)uptr;
199}
200
201static __inline__ void __user *compat_alloc_user_space(long len)
202{
203 struct pt_regs *regs = task_pt_regs(current);
204 return (void __user *)regs->rsp - len;
205}
206
207static inline int is_compat_task(void)
208{
209 return current_thread_info()->status & TS_COMPAT;
210}
211
212#endif /* _ASM_X86_64_COMPAT_H */
diff --git a/include/asm-x86_64/cpu.h b/include/asm-x86_64/cpu.h
deleted file mode 100644
index 8eea076525a4..000000000000
--- a/include/asm-x86_64/cpu.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-i386/cpu.h>
diff --git a/include/asm-x86_64/cpufeature.h b/include/asm-x86_64/cpufeature.h
deleted file mode 100644
index 8baefc3beb2e..000000000000
--- a/include/asm-x86_64/cpufeature.h
+++ /dev/null
@@ -1,30 +0,0 @@
1/*
2 * cpufeature.h
3 *
4 * Defines x86 CPU feature bits
5 */
6
7#ifndef __ASM_X8664_CPUFEATURE_H
8#define __ASM_X8664_CPUFEATURE_H
9
10#include <asm-i386/cpufeature.h>
11
12#undef cpu_has_vme
13#define cpu_has_vme 0
14
15#undef cpu_has_pae
16#define cpu_has_pae ___BUG___
17
18#undef cpu_has_mp
19#define cpu_has_mp 1 /* XXX */
20
21#undef cpu_has_k6_mtrr
22#define cpu_has_k6_mtrr 0
23
24#undef cpu_has_cyrix_arr
25#define cpu_has_cyrix_arr 0
26
27#undef cpu_has_centaur_mcr
28#define cpu_has_centaur_mcr 0
29
30#endif /* __ASM_X8664_CPUFEATURE_H */
diff --git a/include/asm-x86_64/cputime.h b/include/asm-x86_64/cputime.h
deleted file mode 100644
index a07012dc5a3c..000000000000
--- a/include/asm-x86_64/cputime.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __X86_64_CPUTIME_H
2#define __X86_64_CPUTIME_H
3
4#include <asm-generic/cputime.h>
5
6#endif /* __X86_64_CPUTIME_H */
diff --git a/include/asm-x86_64/current.h b/include/asm-x86_64/current.h
deleted file mode 100644
index bc8adecee66d..000000000000
--- a/include/asm-x86_64/current.h
+++ /dev/null
@@ -1,27 +0,0 @@
1#ifndef _X86_64_CURRENT_H
2#define _X86_64_CURRENT_H
3
4#if !defined(__ASSEMBLY__)
5struct task_struct;
6
7#include <asm/pda.h>
8
9static inline struct task_struct *get_current(void)
10{
11 struct task_struct *t = read_pda(pcurrent);
12 return t;
13}
14
15#define current get_current()
16
17#else
18
19#ifndef ASM_OFFSET_H
20#include <asm/asm-offsets.h>
21#endif
22
23#define GET_CURRENT(reg) movq %gs:(pda_pcurrent),reg
24
25#endif
26
27#endif /* !(_X86_64_CURRENT_H) */
diff --git a/include/asm-x86_64/debugreg.h b/include/asm-x86_64/debugreg.h
deleted file mode 100644
index bd1aab1d8c4a..000000000000
--- a/include/asm-x86_64/debugreg.h
+++ /dev/null
@@ -1,65 +0,0 @@
1#ifndef _X86_64_DEBUGREG_H
2#define _X86_64_DEBUGREG_H
3
4
5/* Indicate the register numbers for a number of the specific
6 debug registers. Registers 0-3 contain the addresses we wish to trap on */
7#define DR_FIRSTADDR 0 /* u_debugreg[DR_FIRSTADDR] */
8#define DR_LASTADDR 3 /* u_debugreg[DR_LASTADDR] */
9
10#define DR_STATUS 6 /* u_debugreg[DR_STATUS] */
11#define DR_CONTROL 7 /* u_debugreg[DR_CONTROL] */
12
13/* Define a few things for the status register. We can use this to determine
14 which debugging register was responsible for the trap. The other bits
15 are either reserved or not of interest to us. */
16
17#define DR_TRAP0 (0x1) /* db0 */
18#define DR_TRAP1 (0x2) /* db1 */
19#define DR_TRAP2 (0x4) /* db2 */
20#define DR_TRAP3 (0x8) /* db3 */
21
22#define DR_STEP (0x4000) /* single-step */
23#define DR_SWITCH (0x8000) /* task switch */
24
25/* Now define a bunch of things for manipulating the control register.
26 The top two bytes of the control register consist of 4 fields of 4
27 bits - each field corresponds to one of the four debug registers,
28 and indicates what types of access we trap on, and how large the data
29 field is that we are looking at */
30
31#define DR_CONTROL_SHIFT 16 /* Skip this many bits in ctl register */
32#define DR_CONTROL_SIZE 4 /* 4 control bits per register */
33
34#define DR_RW_EXECUTE (0x0) /* Settings for the access types to trap on */
35#define DR_RW_WRITE (0x1)
36#define DR_RW_READ (0x3)
37
38#define DR_LEN_1 (0x0) /* Settings for data length to trap on */
39#define DR_LEN_2 (0x4)
40#define DR_LEN_4 (0xC)
41#define DR_LEN_8 (0x8)
42
43/* The low byte to the control register determine which registers are
44 enabled. There are 4 fields of two bits. One bit is "local", meaning
45 that the processor will reset the bit after a task switch and the other
46 is global meaning that we have to explicitly reset the bit. With linux,
47 you can use either one, since we explicitly zero the register when we enter
48 kernel mode. */
49
50#define DR_LOCAL_ENABLE_SHIFT 0 /* Extra shift to the local enable bit */
51#define DR_GLOBAL_ENABLE_SHIFT 1 /* Extra shift to the global enable bit */
52#define DR_ENABLE_SIZE 2 /* 2 enable bits per register */
53
54#define DR_LOCAL_ENABLE_MASK (0x55) /* Set local bits for all 4 regs */
55#define DR_GLOBAL_ENABLE_MASK (0xAA) /* Set global bits for all 4 regs */
56
57/* The second byte to the control register has a few special things.
58 We can slow the instruction pipeline for instructions coming via the
59 gdt or the ldt if we want to. I am not sure why this is an advantage */
60
61#define DR_CONTROL_RESERVED (0xFFFFFFFF0000FC00UL) /* Reserved */
62#define DR_LOCAL_SLOWDOWN (0x100) /* Local slow the pipeline */
63#define DR_GLOBAL_SLOWDOWN (0x200) /* Global slow the pipeline */
64
65#endif
diff --git a/include/asm-x86_64/delay.h b/include/asm-x86_64/delay.h
deleted file mode 100644
index c2669f1f5529..000000000000
--- a/include/asm-x86_64/delay.h
+++ /dev/null
@@ -1,30 +0,0 @@
1#ifndef _X8664_DELAY_H
2#define _X8664_DELAY_H
3
4/*
5 * Copyright (C) 1993 Linus Torvalds
6 *
7 * Delay routines calling functions in arch/x86_64/lib/delay.c
8 */
9
10/* Undefined functions to get compile-time errors */
11extern void __bad_udelay(void);
12extern void __bad_ndelay(void);
13
14extern void __udelay(unsigned long usecs);
15extern void __ndelay(unsigned long nsecs);
16extern void __const_udelay(unsigned long usecs);
17extern void __delay(unsigned long loops);
18
19/* 0x10c7 is 2**32 / 1000000 (rounded up) */
20#define udelay(n) (__builtin_constant_p(n) ? \
21 ((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c7ul)) : \
22 __udelay(n))
23
24/* 0x5 is 2**32 / 1000000000 (rounded up) */
25#define ndelay(n) (__builtin_constant_p(n) ? \
26 ((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \
27 __ndelay(n))
28
29
30#endif /* defined(_X8664_DELAY_H) */
diff --git a/include/asm-x86_64/desc.h b/include/asm-x86_64/desc.h
deleted file mode 100644
index ac991b5ca0fd..000000000000
--- a/include/asm-x86_64/desc.h
+++ /dev/null
@@ -1,174 +0,0 @@
1/* Written 2000 by Andi Kleen */
2#ifndef __ARCH_DESC_H
3#define __ARCH_DESC_H
4
5#include <linux/threads.h>
6#include <asm/ldt.h>
7
8#ifndef __ASSEMBLY__
9
10#include <linux/string.h>
11#include <linux/smp.h>
12#include <asm/desc_defs.h>
13
14#include <asm/segment.h>
15#include <asm/mmu.h>
16
17extern struct desc_struct cpu_gdt_table[GDT_ENTRIES];
18
19#define load_TR_desc() asm volatile("ltr %w0"::"r" (GDT_ENTRY_TSS*8))
20#define load_LDT_desc() asm volatile("lldt %w0"::"r" (GDT_ENTRY_LDT*8))
21#define clear_LDT() asm volatile("lldt %w0"::"r" (0))
22
23/*
24 * This is the ldt that every process will get unless we need
25 * something other than this.
26 */
27extern struct desc_struct default_ldt[];
28extern struct gate_struct idt_table[];
29extern struct desc_ptr cpu_gdt_descr[];
30
31/* the cpu gdt accessor */
32#define cpu_gdt(_cpu) ((struct desc_struct *)cpu_gdt_descr[_cpu].address)
33
34static inline void _set_gate(void *adr, unsigned type, unsigned long func, unsigned dpl, unsigned ist)
35{
36 struct gate_struct s;
37 s.offset_low = PTR_LOW(func);
38 s.segment = __KERNEL_CS;
39 s.ist = ist;
40 s.p = 1;
41 s.dpl = dpl;
42 s.zero0 = 0;
43 s.zero1 = 0;
44 s.type = type;
45 s.offset_middle = PTR_MIDDLE(func);
46 s.offset_high = PTR_HIGH(func);
47 /* does not need to be atomic because it is only done once at setup time */
48 memcpy(adr, &s, 16);
49}
50
51static inline void set_intr_gate(int nr, void *func)
52{
53 BUG_ON((unsigned)nr > 0xFF);
54 _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 0, 0);
55}
56
57static inline void set_intr_gate_ist(int nr, void *func, unsigned ist)
58{
59 BUG_ON((unsigned)nr > 0xFF);
60 _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 0, ist);
61}
62
63static inline void set_system_gate(int nr, void *func)
64{
65 BUG_ON((unsigned)nr > 0xFF);
66 _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 3, 0);
67}
68
69static inline void set_system_gate_ist(int nr, void *func, unsigned ist)
70{
71 _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 3, ist);
72}
73
74static inline void set_tssldt_descriptor(void *ptr, unsigned long tss, unsigned type,
75 unsigned size)
76{
77 struct ldttss_desc d;
78 memset(&d,0,sizeof(d));
79 d.limit0 = size & 0xFFFF;
80 d.base0 = PTR_LOW(tss);
81 d.base1 = PTR_MIDDLE(tss) & 0xFF;
82 d.type = type;
83 d.p = 1;
84 d.limit1 = (size >> 16) & 0xF;
85 d.base2 = (PTR_MIDDLE(tss) >> 8) & 0xFF;
86 d.base3 = PTR_HIGH(tss);
87 memcpy(ptr, &d, 16);
88}
89
90static inline void set_tss_desc(unsigned cpu, void *addr)
91{
92 /*
93 * sizeof(unsigned long) coming from an extra "long" at the end
94 * of the iobitmap. See tss_struct definition in processor.h
95 *
96 * -1? seg base+limit should be pointing to the address of the
97 * last valid byte
98 */
99 set_tssldt_descriptor(&cpu_gdt(cpu)[GDT_ENTRY_TSS],
100 (unsigned long)addr, DESC_TSS,
101 IO_BITMAP_OFFSET + IO_BITMAP_BYTES + sizeof(unsigned long) - 1);
102}
103
104static inline void set_ldt_desc(unsigned cpu, void *addr, int size)
105{
106 set_tssldt_descriptor(&cpu_gdt(cpu)[GDT_ENTRY_LDT], (unsigned long)addr,
107 DESC_LDT, size * 8 - 1);
108}
109
110#define LDT_entry_a(info) \
111 ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
112/* Don't allow setting of the lm bit. It is useless anyways because
113 64bit system calls require __USER_CS. */
114#define LDT_entry_b(info) \
115 (((info)->base_addr & 0xff000000) | \
116 (((info)->base_addr & 0x00ff0000) >> 16) | \
117 ((info)->limit & 0xf0000) | \
118 (((info)->read_exec_only ^ 1) << 9) | \
119 ((info)->contents << 10) | \
120 (((info)->seg_not_present ^ 1) << 15) | \
121 ((info)->seg_32bit << 22) | \
122 ((info)->limit_in_pages << 23) | \
123 ((info)->useable << 20) | \
124 /* ((info)->lm << 21) | */ \
125 0x7000)
126
127#define LDT_empty(info) (\
128 (info)->base_addr == 0 && \
129 (info)->limit == 0 && \
130 (info)->contents == 0 && \
131 (info)->read_exec_only == 1 && \
132 (info)->seg_32bit == 0 && \
133 (info)->limit_in_pages == 0 && \
134 (info)->seg_not_present == 1 && \
135 (info)->useable == 0 && \
136 (info)->lm == 0)
137
138static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
139{
140 unsigned int i;
141 u64 *gdt = (u64 *)(cpu_gdt(cpu) + GDT_ENTRY_TLS_MIN);
142
143 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
144 gdt[i] = t->tls_array[i];
145}
146
147/*
148 * load one particular LDT into the current CPU
149 */
150static inline void load_LDT_nolock (mm_context_t *pc, int cpu)
151{
152 int count = pc->size;
153
154 if (likely(!count)) {
155 clear_LDT();
156 return;
157 }
158
159 set_ldt_desc(cpu, pc->ldt, count);
160 load_LDT_desc();
161}
162
163static inline void load_LDT(mm_context_t *pc)
164{
165 int cpu = get_cpu();
166 load_LDT_nolock(pc, cpu);
167 put_cpu();
168}
169
170extern struct desc_ptr idt_descr;
171
172#endif /* !__ASSEMBLY__ */
173
174#endif
diff --git a/include/asm-x86_64/desc_defs.h b/include/asm-x86_64/desc_defs.h
deleted file mode 100644
index 089004070099..000000000000
--- a/include/asm-x86_64/desc_defs.h
+++ /dev/null
@@ -1,69 +0,0 @@
1/* Written 2000 by Andi Kleen */
2#ifndef __ARCH_DESC_DEFS_H
3#define __ARCH_DESC_DEFS_H
4
5/*
6 * Segment descriptor structure definitions, usable from both x86_64 and i386
7 * archs.
8 */
9
10#ifndef __ASSEMBLY__
11
12#include <linux/types.h>
13
14// 8 byte segment descriptor
15struct desc_struct {
16 u16 limit0;
17 u16 base0;
18 unsigned base1 : 8, type : 4, s : 1, dpl : 2, p : 1;
19 unsigned limit : 4, avl : 1, l : 1, d : 1, g : 1, base2 : 8;
20} __attribute__((packed));
21
22struct n_desc_struct {
23 unsigned int a,b;
24};
25
26enum {
27 GATE_INTERRUPT = 0xE,
28 GATE_TRAP = 0xF,
29 GATE_CALL = 0xC,
30};
31
32// 16byte gate
33struct gate_struct {
34 u16 offset_low;
35 u16 segment;
36 unsigned ist : 3, zero0 : 5, type : 5, dpl : 2, p : 1;
37 u16 offset_middle;
38 u32 offset_high;
39 u32 zero1;
40} __attribute__((packed));
41
42#define PTR_LOW(x) ((unsigned long)(x) & 0xFFFF)
43#define PTR_MIDDLE(x) (((unsigned long)(x) >> 16) & 0xFFFF)
44#define PTR_HIGH(x) ((unsigned long)(x) >> 32)
45
46enum {
47 DESC_TSS = 0x9,
48 DESC_LDT = 0x2,
49};
50
51// LDT or TSS descriptor in the GDT. 16 bytes.
52struct ldttss_desc {
53 u16 limit0;
54 u16 base0;
55 unsigned base1 : 8, type : 5, dpl : 2, p : 1;
56 unsigned limit1 : 4, zero0 : 3, g : 1, base2 : 8;
57 u32 base3;
58 u32 zero1;
59} __attribute__((packed));
60
61struct desc_ptr {
62 unsigned short size;
63 unsigned long address;
64} __attribute__((packed)) ;
65
66
67#endif /* !__ASSEMBLY__ */
68
69#endif
diff --git a/include/asm-x86_64/device.h b/include/asm-x86_64/device.h
deleted file mode 100644
index 3afa03f33a36..000000000000
--- a/include/asm-x86_64/device.h
+++ /dev/null
@@ -1,15 +0,0 @@
1/*
2 * Arch specific extensions to struct device
3 *
4 * This file is released under the GPLv2
5 */
6#ifndef _ASM_X86_64_DEVICE_H
7#define _ASM_X86_64_DEVICE_H
8
9struct dev_archdata {
10#ifdef CONFIG_ACPI
11 void *acpi_handle;
12#endif
13};
14
15#endif /* _ASM_X86_64_DEVICE_H */
diff --git a/include/asm-x86_64/div64.h b/include/asm-x86_64/div64.h
deleted file mode 100644
index 6cd978cefb28..000000000000
--- a/include/asm-x86_64/div64.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/div64.h>
diff --git a/include/asm-x86_64/dma-mapping.h b/include/asm-x86_64/dma-mapping.h
deleted file mode 100644
index 6897e2a436e5..000000000000
--- a/include/asm-x86_64/dma-mapping.h
+++ /dev/null
@@ -1,203 +0,0 @@
1#ifndef _X8664_DMA_MAPPING_H
2#define _X8664_DMA_MAPPING_H 1
3
4/*
5 * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
6 * documentation.
7 */
8
9
10#include <asm/scatterlist.h>
11#include <asm/io.h>
12#include <asm/swiotlb.h>
13
14struct dma_mapping_ops {
15 int (*mapping_error)(dma_addr_t dma_addr);
16 void* (*alloc_coherent)(struct device *dev, size_t size,
17 dma_addr_t *dma_handle, gfp_t gfp);
18 void (*free_coherent)(struct device *dev, size_t size,
19 void *vaddr, dma_addr_t dma_handle);
20 dma_addr_t (*map_single)(struct device *hwdev, void *ptr,
21 size_t size, int direction);
22 /* like map_single, but doesn't check the device mask */
23 dma_addr_t (*map_simple)(struct device *hwdev, char *ptr,
24 size_t size, int direction);
25 void (*unmap_single)(struct device *dev, dma_addr_t addr,
26 size_t size, int direction);
27 void (*sync_single_for_cpu)(struct device *hwdev,
28 dma_addr_t dma_handle, size_t size,
29 int direction);
30 void (*sync_single_for_device)(struct device *hwdev,
31 dma_addr_t dma_handle, size_t size,
32 int direction);
33 void (*sync_single_range_for_cpu)(struct device *hwdev,
34 dma_addr_t dma_handle, unsigned long offset,
35 size_t size, int direction);
36 void (*sync_single_range_for_device)(struct device *hwdev,
37 dma_addr_t dma_handle, unsigned long offset,
38 size_t size, int direction);
39 void (*sync_sg_for_cpu)(struct device *hwdev,
40 struct scatterlist *sg, int nelems,
41 int direction);
42 void (*sync_sg_for_device)(struct device *hwdev,
43 struct scatterlist *sg, int nelems,
44 int direction);
45 int (*map_sg)(struct device *hwdev, struct scatterlist *sg,
46 int nents, int direction);
47 void (*unmap_sg)(struct device *hwdev,
48 struct scatterlist *sg, int nents,
49 int direction);
50 int (*dma_supported)(struct device *hwdev, u64 mask);
51 int is_phys;
52};
53
54extern dma_addr_t bad_dma_address;
55extern const struct dma_mapping_ops* dma_ops;
56extern int iommu_merge;
57
58static inline int dma_mapping_error(dma_addr_t dma_addr)
59{
60 if (dma_ops->mapping_error)
61 return dma_ops->mapping_error(dma_addr);
62
63 return (dma_addr == bad_dma_address);
64}
65
66#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
67#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
68
69#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
70#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
71
72extern void *dma_alloc_coherent(struct device *dev, size_t size,
73 dma_addr_t *dma_handle, gfp_t gfp);
74extern void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
75 dma_addr_t dma_handle);
76
77static inline dma_addr_t
78dma_map_single(struct device *hwdev, void *ptr, size_t size,
79 int direction)
80{
81 BUG_ON(!valid_dma_direction(direction));
82 return dma_ops->map_single(hwdev, ptr, size, direction);
83}
84
85static inline void
86dma_unmap_single(struct device *dev, dma_addr_t addr,size_t size,
87 int direction)
88{
89 BUG_ON(!valid_dma_direction(direction));
90 dma_ops->unmap_single(dev, addr, size, direction);
91}
92
93#define dma_map_page(dev,page,offset,size,dir) \
94 dma_map_single((dev), page_address(page)+(offset), (size), (dir))
95
96#define dma_unmap_page dma_unmap_single
97
98static inline void
99dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
100 size_t size, int direction)
101{
102 BUG_ON(!valid_dma_direction(direction));
103 if (dma_ops->sync_single_for_cpu)
104 dma_ops->sync_single_for_cpu(hwdev, dma_handle, size,
105 direction);
106 flush_write_buffers();
107}
108
109static inline void
110dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
111 size_t size, int direction)
112{
113 BUG_ON(!valid_dma_direction(direction));
114 if (dma_ops->sync_single_for_device)
115 dma_ops->sync_single_for_device(hwdev, dma_handle, size,
116 direction);
117 flush_write_buffers();
118}
119
120static inline void
121dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
122 unsigned long offset, size_t size, int direction)
123{
124 BUG_ON(!valid_dma_direction(direction));
125 if (dma_ops->sync_single_range_for_cpu) {
126 dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, size, direction);
127 }
128
129 flush_write_buffers();
130}
131
132static inline void
133dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
134 unsigned long offset, size_t size, int direction)
135{
136 BUG_ON(!valid_dma_direction(direction));
137 if (dma_ops->sync_single_range_for_device)
138 dma_ops->sync_single_range_for_device(hwdev, dma_handle,
139 offset, size, direction);
140
141 flush_write_buffers();
142}
143
144static inline void
145dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
146 int nelems, int direction)
147{
148 BUG_ON(!valid_dma_direction(direction));
149 if (dma_ops->sync_sg_for_cpu)
150 dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
151 flush_write_buffers();
152}
153
154static inline void
155dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
156 int nelems, int direction)
157{
158 BUG_ON(!valid_dma_direction(direction));
159 if (dma_ops->sync_sg_for_device) {
160 dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction);
161 }
162
163 flush_write_buffers();
164}
165
166static inline int
167dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, int direction)
168{
169 BUG_ON(!valid_dma_direction(direction));
170 return dma_ops->map_sg(hwdev, sg, nents, direction);
171}
172
173static inline void
174dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
175 int direction)
176{
177 BUG_ON(!valid_dma_direction(direction));
178 dma_ops->unmap_sg(hwdev, sg, nents, direction);
179}
180
181extern int dma_supported(struct device *hwdev, u64 mask);
182
183/* same for gart, swiotlb, and nommu */
184static inline int dma_get_cache_alignment(void)
185{
186 return boot_cpu_data.x86_clflush_size;
187}
188
189#define dma_is_consistent(d, h) 1
190
191extern int dma_set_mask(struct device *dev, u64 mask);
192
193static inline void
194dma_cache_sync(struct device *dev, void *vaddr, size_t size,
195 enum dma_data_direction dir)
196{
197 flush_write_buffers();
198}
199
200extern struct device fallback_dev;
201extern int panic_on_overflow;
202
203#endif /* _X8664_DMA_MAPPING_H */
diff --git a/include/asm-x86_64/dma.h b/include/asm-x86_64/dma.h
deleted file mode 100644
index a37c16f06289..000000000000
--- a/include/asm-x86_64/dma.h
+++ /dev/null
@@ -1,304 +0,0 @@
1/*
2 * linux/include/asm/dma.h: Defines for using and allocating dma channels.
3 * Written by Hennus Bergman, 1992.
4 * High DMA channel support & info by Hannu Savolainen
5 * and John Boyd, Nov. 1992.
6 */
7
8#ifndef _ASM_DMA_H
9#define _ASM_DMA_H
10
11#include <linux/spinlock.h> /* And spinlocks */
12#include <asm/io.h> /* need byte IO */
13#include <linux/delay.h>
14
15
16#ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER
17#define dma_outb outb_p
18#else
19#define dma_outb outb
20#endif
21
22#define dma_inb inb
23
24/*
25 * NOTES about DMA transfers:
26 *
27 * controller 1: channels 0-3, byte operations, ports 00-1F
28 * controller 2: channels 4-7, word operations, ports C0-DF
29 *
30 * - ALL registers are 8 bits only, regardless of transfer size
31 * - channel 4 is not used - cascades 1 into 2.
32 * - channels 0-3 are byte - addresses/counts are for physical bytes
33 * - channels 5-7 are word - addresses/counts are for physical words
34 * - transfers must not cross physical 64K (0-3) or 128K (5-7) boundaries
35 * - transfer count loaded to registers is 1 less than actual count
36 * - controller 2 offsets are all even (2x offsets for controller 1)
37 * - page registers for 5-7 don't use data bit 0, represent 128K pages
38 * - page registers for 0-3 use bit 0, represent 64K pages
39 *
40 * DMA transfers are limited to the lower 16MB of _physical_ memory.
41 * Note that addresses loaded into registers must be _physical_ addresses,
42 * not logical addresses (which may differ if paging is active).
43 *
44 * Address mapping for channels 0-3:
45 *
46 * A23 ... A16 A15 ... A8 A7 ... A0 (Physical addresses)
47 * | ... | | ... | | ... |
48 * | ... | | ... | | ... |
49 * | ... | | ... | | ... |
50 * P7 ... P0 A7 ... A0 A7 ... A0
51 * | Page | Addr MSB | Addr LSB | (DMA registers)
52 *
53 * Address mapping for channels 5-7:
54 *
55 * A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0 (Physical addresses)
56 * | ... | \ \ ... \ \ \ ... \ \
57 * | ... | \ \ ... \ \ \ ... \ (not used)
58 * | ... | \ \ ... \ \ \ ... \
59 * P7 ... P1 (0) A7 A6 ... A0 A7 A6 ... A0
60 * | Page | Addr MSB | Addr LSB | (DMA registers)
61 *
62 * Again, channels 5-7 transfer _physical_ words (16 bits), so addresses
63 * and counts _must_ be word-aligned (the lowest address bit is _ignored_ at
64 * the hardware level, so odd-byte transfers aren't possible).
65 *
66 * Transfer count (_not # bytes_) is limited to 64K, represented as actual
67 * count - 1 : 64K => 0xFFFF, 1 => 0x0000. Thus, count is always 1 or more,
68 * and up to 128K bytes may be transferred on channels 5-7 in one operation.
69 *
70 */
71
72#define MAX_DMA_CHANNELS 8
73
74
75/* 16MB ISA DMA zone */
76#define MAX_DMA_PFN ((16*1024*1024) >> PAGE_SHIFT)
77
78/* 4GB broken PCI/AGP hardware bus master zone */
79#define MAX_DMA32_PFN ((4UL*1024*1024*1024) >> PAGE_SHIFT)
80
81/* Compat define for old dma zone */
82#define MAX_DMA_ADDRESS ((unsigned long)__va(MAX_DMA_PFN << PAGE_SHIFT))
83
84/* 8237 DMA controllers */
85#define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */
86#define IO_DMA2_BASE 0xC0 /* 16 bit master DMA, ch 4(=slave input)..7 */
87
88/* DMA controller registers */
89#define DMA1_CMD_REG 0x08 /* command register (w) */
90#define DMA1_STAT_REG 0x08 /* status register (r) */
91#define DMA1_REQ_REG 0x09 /* request register (w) */
92#define DMA1_MASK_REG 0x0A /* single-channel mask (w) */
93#define DMA1_MODE_REG 0x0B /* mode register (w) */
94#define DMA1_CLEAR_FF_REG 0x0C /* clear pointer flip-flop (w) */
95#define DMA1_TEMP_REG 0x0D /* Temporary Register (r) */
96#define DMA1_RESET_REG 0x0D /* Master Clear (w) */
97#define DMA1_CLR_MASK_REG 0x0E /* Clear Mask */
98#define DMA1_MASK_ALL_REG 0x0F /* all-channels mask (w) */
99
100#define DMA2_CMD_REG 0xD0 /* command register (w) */
101#define DMA2_STAT_REG 0xD0 /* status register (r) */
102#define DMA2_REQ_REG 0xD2 /* request register (w) */
103#define DMA2_MASK_REG 0xD4 /* single-channel mask (w) */
104#define DMA2_MODE_REG 0xD6 /* mode register (w) */
105#define DMA2_CLEAR_FF_REG 0xD8 /* clear pointer flip-flop (w) */
106#define DMA2_TEMP_REG 0xDA /* Temporary Register (r) */
107#define DMA2_RESET_REG 0xDA /* Master Clear (w) */
108#define DMA2_CLR_MASK_REG 0xDC /* Clear Mask */
109#define DMA2_MASK_ALL_REG 0xDE /* all-channels mask (w) */
110
111#define DMA_ADDR_0 0x00 /* DMA address registers */
112#define DMA_ADDR_1 0x02
113#define DMA_ADDR_2 0x04
114#define DMA_ADDR_3 0x06
115#define DMA_ADDR_4 0xC0
116#define DMA_ADDR_5 0xC4
117#define DMA_ADDR_6 0xC8
118#define DMA_ADDR_7 0xCC
119
120#define DMA_CNT_0 0x01 /* DMA count registers */
121#define DMA_CNT_1 0x03
122#define DMA_CNT_2 0x05
123#define DMA_CNT_3 0x07
124#define DMA_CNT_4 0xC2
125#define DMA_CNT_5 0xC6
126#define DMA_CNT_6 0xCA
127#define DMA_CNT_7 0xCE
128
129#define DMA_PAGE_0 0x87 /* DMA page registers */
130#define DMA_PAGE_1 0x83
131#define DMA_PAGE_2 0x81
132#define DMA_PAGE_3 0x82
133#define DMA_PAGE_5 0x8B
134#define DMA_PAGE_6 0x89
135#define DMA_PAGE_7 0x8A
136
137#define DMA_MODE_READ 0x44 /* I/O to memory, no autoinit, increment, single mode */
138#define DMA_MODE_WRITE 0x48 /* memory to I/O, no autoinit, increment, single mode */
139#define DMA_MODE_CASCADE 0xC0 /* pass thru DREQ->HRQ, DACK<-HLDA only */
140
141#define DMA_AUTOINIT 0x10
142
143
144extern spinlock_t dma_spin_lock;
145
146static __inline__ unsigned long claim_dma_lock(void)
147{
148 unsigned long flags;
149 spin_lock_irqsave(&dma_spin_lock, flags);
150 return flags;
151}
152
153static __inline__ void release_dma_lock(unsigned long flags)
154{
155 spin_unlock_irqrestore(&dma_spin_lock, flags);
156}
157
158/* enable/disable a specific DMA channel */
159static __inline__ void enable_dma(unsigned int dmanr)
160{
161 if (dmanr<=3)
162 dma_outb(dmanr, DMA1_MASK_REG);
163 else
164 dma_outb(dmanr & 3, DMA2_MASK_REG);
165}
166
167static __inline__ void disable_dma(unsigned int dmanr)
168{
169 if (dmanr<=3)
170 dma_outb(dmanr | 4, DMA1_MASK_REG);
171 else
172 dma_outb((dmanr & 3) | 4, DMA2_MASK_REG);
173}
174
175/* Clear the 'DMA Pointer Flip Flop'.
176 * Write 0 for LSB/MSB, 1 for MSB/LSB access.
177 * Use this once to initialize the FF to a known state.
178 * After that, keep track of it. :-)
179 * --- In order to do that, the DMA routines below should ---
180 * --- only be used while holding the DMA lock ! ---
181 */
182static __inline__ void clear_dma_ff(unsigned int dmanr)
183{
184 if (dmanr<=3)
185 dma_outb(0, DMA1_CLEAR_FF_REG);
186 else
187 dma_outb(0, DMA2_CLEAR_FF_REG);
188}
189
190/* set mode (above) for a specific DMA channel */
191static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
192{
193 if (dmanr<=3)
194 dma_outb(mode | dmanr, DMA1_MODE_REG);
195 else
196 dma_outb(mode | (dmanr&3), DMA2_MODE_REG);
197}
198
199/* Set only the page register bits of the transfer address.
200 * This is used for successive transfers when we know the contents of
201 * the lower 16 bits of the DMA current address register, but a 64k boundary
202 * may have been crossed.
203 */
204static __inline__ void set_dma_page(unsigned int dmanr, char pagenr)
205{
206 switch(dmanr) {
207 case 0:
208 dma_outb(pagenr, DMA_PAGE_0);
209 break;
210 case 1:
211 dma_outb(pagenr, DMA_PAGE_1);
212 break;
213 case 2:
214 dma_outb(pagenr, DMA_PAGE_2);
215 break;
216 case 3:
217 dma_outb(pagenr, DMA_PAGE_3);
218 break;
219 case 5:
220 dma_outb(pagenr & 0xfe, DMA_PAGE_5);
221 break;
222 case 6:
223 dma_outb(pagenr & 0xfe, DMA_PAGE_6);
224 break;
225 case 7:
226 dma_outb(pagenr & 0xfe, DMA_PAGE_7);
227 break;
228 }
229}
230
231
232/* Set transfer address & page bits for specific DMA channel.
233 * Assumes dma flipflop is clear.
234 */
235static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a)
236{
237 set_dma_page(dmanr, a>>16);
238 if (dmanr <= 3) {
239 dma_outb( a & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE );
240 dma_outb( (a>>8) & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE );
241 } else {
242 dma_outb( (a>>1) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE );
243 dma_outb( (a>>9) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE );
244 }
245}
246
247
248/* Set transfer size (max 64k for DMA1..3, 128k for DMA5..7) for
249 * a specific DMA channel.
250 * You must ensure the parameters are valid.
251 * NOTE: from a manual: "the number of transfers is one more
252 * than the initial word count"! This is taken into account.
253 * Assumes dma flip-flop is clear.
254 * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7.
255 */
256static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
257{
258 count--;
259 if (dmanr <= 3) {
260 dma_outb( count & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE );
261 dma_outb( (count>>8) & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE );
262 } else {
263 dma_outb( (count>>1) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE );
264 dma_outb( (count>>9) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE );
265 }
266}
267
268
269/* Get DMA residue count. After a DMA transfer, this
270 * should return zero. Reading this while a DMA transfer is
271 * still in progress will return unpredictable results.
272 * If called before the channel has been used, it may return 1.
273 * Otherwise, it returns the number of _bytes_ left to transfer.
274 *
275 * Assumes DMA flip-flop is clear.
276 */
277static __inline__ int get_dma_residue(unsigned int dmanr)
278{
279 unsigned int io_port = (dmanr<=3)? ((dmanr&3)<<1) + 1 + IO_DMA1_BASE
280 : ((dmanr&3)<<2) + 2 + IO_DMA2_BASE;
281
282 /* using short to get 16-bit wrap around */
283 unsigned short count;
284
285 count = 1 + dma_inb(io_port);
286 count += dma_inb(io_port) << 8;
287
288 return (dmanr<=3)? count : (count<<1);
289}
290
291
292/* These are in kernel/dma.c: */
293extern int request_dma(unsigned int dmanr, const char * device_id); /* reserve a DMA channel */
294extern void free_dma(unsigned int dmanr); /* release it again */
295
296/* From PCI */
297
298#ifdef CONFIG_PCI
299extern int isa_dma_bridge_buggy;
300#else
301#define isa_dma_bridge_buggy (0)
302#endif
303
304#endif /* _ASM_DMA_H */
diff --git a/include/asm-x86_64/dmi.h b/include/asm-x86_64/dmi.h
deleted file mode 100644
index d02e32e3c3f0..000000000000
--- a/include/asm-x86_64/dmi.h
+++ /dev/null
@@ -1,24 +0,0 @@
1#ifndef _ASM_DMI_H
2#define _ASM_DMI_H 1
3
4#include <asm/io.h>
5
6#define DMI_MAX_DATA 2048
7
8extern int dmi_alloc_index;
9extern char dmi_alloc_data[DMI_MAX_DATA];
10
11/* This is so early that there is no good way to allocate dynamic memory.
12 Allocate data in an BSS array. */
13static inline void *dmi_alloc(unsigned len)
14{
15 int idx = dmi_alloc_index;
16 if ((dmi_alloc_index += len) > DMI_MAX_DATA)
17 return NULL;
18 return dmi_alloc_data + idx;
19}
20
21#define dmi_ioremap early_ioremap
22#define dmi_iounmap early_iounmap
23
24#endif
diff --git a/include/asm-x86_64/dwarf2.h b/include/asm-x86_64/dwarf2.h
deleted file mode 100644
index eedc08526b0b..000000000000
--- a/include/asm-x86_64/dwarf2.h
+++ /dev/null
@@ -1,57 +0,0 @@
1#ifndef _DWARF2_H
2#define _DWARF2_H 1
3
4
5#ifndef __ASSEMBLY__
6#warning "asm/dwarf2.h should be only included in pure assembly files"
7#endif
8
9/*
10 Macros for dwarf2 CFI unwind table entries.
11 See "as.info" for details on these pseudo ops. Unfortunately
12 they are only supported in very new binutils, so define them
13 away for older version.
14 */
15
16#ifdef CONFIG_AS_CFI
17
18#define CFI_STARTPROC .cfi_startproc
19#define CFI_ENDPROC .cfi_endproc
20#define CFI_DEF_CFA .cfi_def_cfa
21#define CFI_DEF_CFA_REGISTER .cfi_def_cfa_register
22#define CFI_DEF_CFA_OFFSET .cfi_def_cfa_offset
23#define CFI_ADJUST_CFA_OFFSET .cfi_adjust_cfa_offset
24#define CFI_OFFSET .cfi_offset
25#define CFI_REL_OFFSET .cfi_rel_offset
26#define CFI_REGISTER .cfi_register
27#define CFI_RESTORE .cfi_restore
28#define CFI_REMEMBER_STATE .cfi_remember_state
29#define CFI_RESTORE_STATE .cfi_restore_state
30#define CFI_UNDEFINED .cfi_undefined
31#ifdef CONFIG_AS_CFI_SIGNAL_FRAME
32#define CFI_SIGNAL_FRAME .cfi_signal_frame
33#else
34#define CFI_SIGNAL_FRAME
35#endif
36
37#else
38
39/* use assembler line comment character # to ignore the arguments. */
40#define CFI_STARTPROC #
41#define CFI_ENDPROC #
42#define CFI_DEF_CFA #
43#define CFI_DEF_CFA_REGISTER #
44#define CFI_DEF_CFA_OFFSET #
45#define CFI_ADJUST_CFA_OFFSET #
46#define CFI_OFFSET #
47#define CFI_REL_OFFSET #
48#define CFI_REGISTER #
49#define CFI_RESTORE #
50#define CFI_REMEMBER_STATE #
51#define CFI_RESTORE_STATE #
52#define CFI_UNDEFINED #
53#define CFI_SIGNAL_FRAME #
54
55#endif
56
57#endif
diff --git a/include/asm-x86_64/e820.h b/include/asm-x86_64/e820.h
deleted file mode 100644
index 3486e701bd86..000000000000
--- a/include/asm-x86_64/e820.h
+++ /dev/null
@@ -1,61 +0,0 @@
1/*
2 * structures and definitions for the int 15, ax=e820 memory map
3 * scheme.
4 *
5 * In a nutshell, setup.S populates a scratch table in the
6 * empty_zero_block that contains a list of usable address/size
7 * duples. setup.c, this information is transferred into the e820map,
8 * and in init.c/numa.c, that new information is used to mark pages
9 * reserved or not.
10 */
11#ifndef __E820_HEADER
12#define __E820_HEADER
13
14#define E820MAP 0x2d0 /* our map */
15#define E820MAX 128 /* number of entries in E820MAP */
16#define E820NR 0x1e8 /* # entries in E820MAP */
17
18#define E820_RAM 1
19#define E820_RESERVED 2
20#define E820_ACPI 3
21#define E820_NVS 4
22
23#ifndef __ASSEMBLY__
24struct e820entry {
25 u64 addr; /* start of memory segment */
26 u64 size; /* size of memory segment */
27 u32 type; /* type of memory segment */
28} __attribute__((packed));
29
30struct e820map {
31 u32 nr_map;
32 struct e820entry map[E820MAX];
33};
34
35extern unsigned long find_e820_area(unsigned long start, unsigned long end,
36 unsigned size);
37extern void add_memory_region(unsigned long start, unsigned long size,
38 int type);
39extern void setup_memory_region(void);
40extern void contig_e820_setup(void);
41extern unsigned long e820_end_of_ram(void);
42extern void e820_reserve_resources(void);
43extern void e820_mark_nosave_regions(void);
44extern void e820_print_map(char *who);
45extern int e820_any_mapped(unsigned long start, unsigned long end, unsigned type);
46extern int e820_all_mapped(unsigned long start, unsigned long end, unsigned type);
47extern unsigned long e820_hole_size(unsigned long start, unsigned long end);
48
49extern void e820_setup_gap(void);
50extern void e820_register_active_regions(int nid,
51 unsigned long start_pfn, unsigned long end_pfn);
52
53extern void finish_e820_parsing(void);
54
55extern struct e820map e820;
56
57extern unsigned ebda_addr, ebda_size;
58extern unsigned long nodemap_addr, nodemap_size;
59#endif/*!__ASSEMBLY__*/
60
61#endif/*__E820_HEADER*/
diff --git a/include/asm-x86_64/edac.h b/include/asm-x86_64/edac.h
deleted file mode 100644
index cad1cd42b4ee..000000000000
--- a/include/asm-x86_64/edac.h
+++ /dev/null
@@ -1,18 +0,0 @@
1#ifndef ASM_EDAC_H
2#define ASM_EDAC_H
3
4/* ECC atomic, DMA, SMP and interrupt safe scrub function */
5
6static __inline__ void atomic_scrub(void *va, u32 size)
7{
8 unsigned int *virt_addr = va;
9 u32 i;
10
11 for (i = 0; i < size / 4; i++, virt_addr++)
12 /* Very carefully read and write to memory atomically
13 * so we are interrupt, DMA and SMP safe.
14 */
15 __asm__ __volatile__("lock; addl $0, %0"::"m"(*virt_addr));
16}
17
18#endif
diff --git a/include/asm-x86_64/elf.h b/include/asm-x86_64/elf.h
deleted file mode 100644
index b4fbe47f6ccd..000000000000
--- a/include/asm-x86_64/elf.h
+++ /dev/null
@@ -1,180 +0,0 @@
1#ifndef __ASM_X86_64_ELF_H
2#define __ASM_X86_64_ELF_H
3
4/*
5 * ELF register definitions..
6 */
7
8#include <asm/ptrace.h>
9#include <asm/user.h>
10
11/* x86-64 relocation types */
12#define R_X86_64_NONE 0 /* No reloc */
13#define R_X86_64_64 1 /* Direct 64 bit */
14#define R_X86_64_PC32 2 /* PC relative 32 bit signed */
15#define R_X86_64_GOT32 3 /* 32 bit GOT entry */
16#define R_X86_64_PLT32 4 /* 32 bit PLT address */
17#define R_X86_64_COPY 5 /* Copy symbol at runtime */
18#define R_X86_64_GLOB_DAT 6 /* Create GOT entry */
19#define R_X86_64_JUMP_SLOT 7 /* Create PLT entry */
20#define R_X86_64_RELATIVE 8 /* Adjust by program base */
21#define R_X86_64_GOTPCREL 9 /* 32 bit signed pc relative
22 offset to GOT */
23#define R_X86_64_32 10 /* Direct 32 bit zero extended */
24#define R_X86_64_32S 11 /* Direct 32 bit sign extended */
25#define R_X86_64_16 12 /* Direct 16 bit zero extended */
26#define R_X86_64_PC16 13 /* 16 bit sign extended pc relative */
27#define R_X86_64_8 14 /* Direct 8 bit sign extended */
28#define R_X86_64_PC8 15 /* 8 bit sign extended pc relative */
29
30#define R_X86_64_NUM 16
31
32typedef unsigned long elf_greg_t;
33
34#define ELF_NGREG (sizeof (struct user_regs_struct) / sizeof(elf_greg_t))
35typedef elf_greg_t elf_gregset_t[ELF_NGREG];
36
37typedef struct user_i387_struct elf_fpregset_t;
38
39/*
40 * These are used to set parameters in the core dumps.
41 */
42#define ELF_CLASS ELFCLASS64
43#define ELF_DATA ELFDATA2LSB
44#define ELF_ARCH EM_X86_64
45
46#ifdef __KERNEL__
47#include <asm/processor.h>
48
49/*
50 * This is used to ensure we don't load something for the wrong architecture.
51 */
52#define elf_check_arch(x) \
53 ((x)->e_machine == EM_X86_64)
54
55
56/* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program starts %edx
57 contains a pointer to a function which might be registered using `atexit'.
58 This provides a mean for the dynamic linker to call DT_FINI functions for
59 shared libraries that have been loaded before the code runs.
60
61 A value of 0 tells we have no such handler.
62
63 We might as well make sure everything else is cleared too (except for %esp),
64 just to make things more deterministic.
65 */
66#define ELF_PLAT_INIT(_r, load_addr) do { \
67 struct task_struct *cur = current; \
68 (_r)->rbx = 0; (_r)->rcx = 0; (_r)->rdx = 0; \
69 (_r)->rsi = 0; (_r)->rdi = 0; (_r)->rbp = 0; \
70 (_r)->rax = 0; \
71 (_r)->r8 = 0; \
72 (_r)->r9 = 0; \
73 (_r)->r10 = 0; \
74 (_r)->r11 = 0; \
75 (_r)->r12 = 0; \
76 (_r)->r13 = 0; \
77 (_r)->r14 = 0; \
78 (_r)->r15 = 0; \
79 cur->thread.fs = 0; cur->thread.gs = 0; \
80 cur->thread.fsindex = 0; cur->thread.gsindex = 0; \
81 cur->thread.ds = 0; cur->thread.es = 0; \
82 clear_thread_flag(TIF_IA32); \
83} while (0)
84
85#define USE_ELF_CORE_DUMP
86#define ELF_EXEC_PAGESIZE 4096
87
88/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
89 use of this is to invoke "./ld.so someprog" to test out a new version of
90 the loader. We need to make sure that it is out of the way of the program
91 that it will "exec", and that there is sufficient room for the brk. */
92
93#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
94
95/* regs is struct pt_regs, pr_reg is elf_gregset_t (which is
96 now struct_user_regs, they are different). Assumes current is the process
97 getting dumped. */
98
99#define ELF_CORE_COPY_REGS(pr_reg, regs) do { \
100 unsigned v; \
101 (pr_reg)[0] = (regs)->r15; \
102 (pr_reg)[1] = (regs)->r14; \
103 (pr_reg)[2] = (regs)->r13; \
104 (pr_reg)[3] = (regs)->r12; \
105 (pr_reg)[4] = (regs)->rbp; \
106 (pr_reg)[5] = (regs)->rbx; \
107 (pr_reg)[6] = (regs)->r11; \
108 (pr_reg)[7] = (regs)->r10; \
109 (pr_reg)[8] = (regs)->r9; \
110 (pr_reg)[9] = (regs)->r8; \
111 (pr_reg)[10] = (regs)->rax; \
112 (pr_reg)[11] = (regs)->rcx; \
113 (pr_reg)[12] = (regs)->rdx; \
114 (pr_reg)[13] = (regs)->rsi; \
115 (pr_reg)[14] = (regs)->rdi; \
116 (pr_reg)[15] = (regs)->orig_rax; \
117 (pr_reg)[16] = (regs)->rip; \
118 (pr_reg)[17] = (regs)->cs; \
119 (pr_reg)[18] = (regs)->eflags; \
120 (pr_reg)[19] = (regs)->rsp; \
121 (pr_reg)[20] = (regs)->ss; \
122 (pr_reg)[21] = current->thread.fs; \
123 (pr_reg)[22] = current->thread.gs; \
124 asm("movl %%ds,%0" : "=r" (v)); (pr_reg)[23] = v; \
125 asm("movl %%es,%0" : "=r" (v)); (pr_reg)[24] = v; \
126 asm("movl %%fs,%0" : "=r" (v)); (pr_reg)[25] = v; \
127 asm("movl %%gs,%0" : "=r" (v)); (pr_reg)[26] = v; \
128} while(0);
129
130/* This yields a mask that user programs can use to figure out what
131 instruction set this CPU supports. This could be done in user space,
132 but it's not easy, and we've already done it here. */
133
134#define ELF_HWCAP (boot_cpu_data.x86_capability[0])
135
136/* This yields a string that ld.so will use to load implementation
137 specific libraries for optimization. This is more specific in
138 intent than poking at uname or /proc/cpuinfo.
139
140 For the moment, we have only optimizations for the Intel generations,
141 but that could change... */
142
143/* I'm not sure if we can use '-' here */
144#define ELF_PLATFORM ("x86_64")
145
146extern void set_personality_64bit(void);
147#define SET_PERSONALITY(ex, ibcs2) set_personality_64bit()
148/*
149 * An executable for which elf_read_implies_exec() returns TRUE will
150 * have the READ_IMPLIES_EXEC personality flag set automatically.
151 */
152#define elf_read_implies_exec(ex, executable_stack) (executable_stack != EXSTACK_DISABLE_X)
153
154struct task_struct;
155
156extern int dump_task_regs (struct task_struct *, elf_gregset_t *);
157extern int dump_task_fpu (struct task_struct *, elf_fpregset_t *);
158
159#define ELF_CORE_COPY_TASK_REGS(tsk, elf_regs) dump_task_regs(tsk, elf_regs)
160#define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs)
161
162/* 1GB for 64bit, 8MB for 32bit */
163#define STACK_RND_MASK (test_thread_flag(TIF_IA32) ? 0x7ff : 0x3fffff)
164
165
166#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
167struct linux_binprm;
168extern int arch_setup_additional_pages(struct linux_binprm *bprm,
169 int executable_stack);
170
171extern int vdso_enabled;
172
173#define ARCH_DLINFO \
174do if (vdso_enabled) { \
175 NEW_AUX_ENT(AT_SYSINFO_EHDR,(unsigned long)current->mm->context.vdso);\
176} while (0)
177
178#endif
179
180#endif
diff --git a/include/asm-x86_64/emergency-restart.h b/include/asm-x86_64/emergency-restart.h
deleted file mode 100644
index 680c39563345..000000000000
--- a/include/asm-x86_64/emergency-restart.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _ASM_EMERGENCY_RESTART_H
2#define _ASM_EMERGENCY_RESTART_H
3
4extern void machine_emergency_restart(void);
5
6#endif /* _ASM_EMERGENCY_RESTART_H */
diff --git a/include/asm-x86_64/errno.h b/include/asm-x86_64/errno.h
deleted file mode 100644
index 311182129e32..000000000000
--- a/include/asm-x86_64/errno.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _X8664_ERRNO_H
2#define _X8664_ERRNO_H
3
4#include <asm-generic/errno.h>
5
6#endif
diff --git a/include/asm-x86_64/fb.h b/include/asm-x86_64/fb.h
deleted file mode 100644
index 60548e651d12..000000000000
--- a/include/asm-x86_64/fb.h
+++ /dev/null
@@ -1,19 +0,0 @@
1#ifndef _ASM_FB_H_
2#define _ASM_FB_H_
3#include <linux/fb.h>
4#include <linux/fs.h>
5#include <asm/page.h>
6
7static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
8 unsigned long off)
9{
10 if (boot_cpu_data.x86 > 3)
11 pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
12}
13
14static inline int fb_is_primary_device(struct fb_info *info)
15{
16 return 0;
17}
18
19#endif /* _ASM_FB_H_ */
diff --git a/include/asm-x86_64/fcntl.h b/include/asm-x86_64/fcntl.h
deleted file mode 100644
index 46ab12db5739..000000000000
--- a/include/asm-x86_64/fcntl.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/fcntl.h>
diff --git a/include/asm-x86_64/fixmap.h b/include/asm-x86_64/fixmap.h
deleted file mode 100644
index cdfbe4a6ae6f..000000000000
--- a/include/asm-x86_64/fixmap.h
+++ /dev/null
@@ -1,92 +0,0 @@
1/*
2 * fixmap.h: compile-time virtual memory allocation
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 1998 Ingo Molnar
9 */
10
11#ifndef _ASM_FIXMAP_H
12#define _ASM_FIXMAP_H
13
14#include <linux/kernel.h>
15#include <asm/apicdef.h>
16#include <asm/page.h>
17#include <asm/vsyscall.h>
18
19/*
20 * Here we define all the compile-time 'special' virtual
21 * addresses. The point is to have a constant address at
22 * compile time, but to set the physical address only
23 * in the boot process.
24 *
25 * These 'compile-time allocated' memory buffers are
26 * fixed-size 4k pages (or larger if used with an increment
27 * higher than 1). Use set_fixmap(idx,phys) to associate
28 * physical memory with fixmap indices.
29 *
30 * TLB entries of such buffers will not be flushed across
31 * task switches.
32 */
33
34enum fixed_addresses {
35 VSYSCALL_LAST_PAGE,
36 VSYSCALL_FIRST_PAGE = VSYSCALL_LAST_PAGE + ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1,
37 VSYSCALL_HPET,
38 FIX_DBGP_BASE,
39 FIX_EARLYCON_MEM_BASE,
40 FIX_HPET_BASE,
41 FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
42 FIX_IO_APIC_BASE_0,
43 FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1,
44 __end_of_fixed_addresses
45};
46
47extern void __set_fixmap (enum fixed_addresses idx,
48 unsigned long phys, pgprot_t flags);
49
50#define set_fixmap(idx, phys) \
51 __set_fixmap(idx, phys, PAGE_KERNEL)
52/*
53 * Some hardware wants to get fixmapped without caching.
54 */
55#define set_fixmap_nocache(idx, phys) \
56 __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
57
58#define FIXADDR_TOP (VSYSCALL_END-PAGE_SIZE)
59#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
60#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
61
62/* Only covers 32bit vsyscalls currently. Need another set for 64bit. */
63#define FIXADDR_USER_START ((unsigned long)VSYSCALL32_VSYSCALL)
64#define FIXADDR_USER_END (FIXADDR_USER_START + PAGE_SIZE)
65
66#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
67
68extern void __this_fixmap_does_not_exist(void);
69
70/*
71 * 'index to address' translation. If anyone tries to use the idx
72 * directly without translation, we catch the bug with a NULL-deference
73 * kernel oops. Illegal ranges of incoming indices are caught too.
74 */
75static __always_inline unsigned long fix_to_virt(const unsigned int idx)
76{
77 /*
78 * this branch gets completely eliminated after inlining,
79 * except when someone tries to use fixaddr indices in an
80 * illegal way. (such as mixing up address types or using
81 * out-of-range indices).
82 *
83 * If it doesn't get removed, the linker will complain
84 * loudly with a reasonably clear error message..
85 */
86 if (idx >= __end_of_fixed_addresses)
87 __this_fixmap_does_not_exist();
88
89 return __fix_to_virt(idx);
90}
91
92#endif
diff --git a/include/asm-x86_64/floppy.h b/include/asm-x86_64/floppy.h
deleted file mode 100644
index 6ea13c3806f3..000000000000
--- a/include/asm-x86_64/floppy.h
+++ /dev/null
@@ -1,283 +0,0 @@
1/*
2 * Architecture specific parts of the Floppy driver
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 1995
9 */
10#ifndef __ASM_X86_64_FLOPPY_H
11#define __ASM_X86_64_FLOPPY_H
12
13#include <linux/vmalloc.h>
14
15
16/*
17 * The DMA channel used by the floppy controller cannot access data at
18 * addresses >= 16MB
19 *
20 * Went back to the 1MB limit, as some people had problems with the floppy
21 * driver otherwise. It doesn't matter much for performance anyway, as most
22 * floppy accesses go through the track buffer.
23 */
24#define _CROSS_64KB(a,s,vdma) \
25(!(vdma) && ((unsigned long)(a)/K_64 != ((unsigned long)(a) + (s) - 1) / K_64))
26
27#define CROSS_64KB(a,s) _CROSS_64KB(a,s,use_virtual_dma & 1)
28
29
30#define SW fd_routine[use_virtual_dma&1]
31#define CSW fd_routine[can_use_virtual_dma & 1]
32
33
34#define fd_inb(port) inb_p(port)
35#define fd_outb(value,port) outb_p(value,port)
36
37#define fd_request_dma() CSW._request_dma(FLOPPY_DMA,"floppy")
38#define fd_free_dma() CSW._free_dma(FLOPPY_DMA)
39#define fd_enable_irq() enable_irq(FLOPPY_IRQ)
40#define fd_disable_irq() disable_irq(FLOPPY_IRQ)
41#define fd_free_irq() free_irq(FLOPPY_IRQ, NULL)
42#define fd_get_dma_residue() SW._get_dma_residue(FLOPPY_DMA)
43#define fd_dma_mem_alloc(size) SW._dma_mem_alloc(size)
44#define fd_dma_setup(addr, size, mode, io) SW._dma_setup(addr, size, mode, io)
45
46#define FLOPPY_CAN_FALLBACK_ON_NODMA
47
48static int virtual_dma_count;
49static int virtual_dma_residue;
50static char *virtual_dma_addr;
51static int virtual_dma_mode;
52static int doing_pdma;
53
54static irqreturn_t floppy_hardint(int irq, void *dev_id)
55{
56 register unsigned char st;
57
58#undef TRACE_FLPY_INT
59
60#ifdef TRACE_FLPY_INT
61 static int calls=0;
62 static int bytes=0;
63 static int dma_wait=0;
64#endif
65 if (!doing_pdma)
66 return floppy_interrupt(irq, dev_id);
67
68#ifdef TRACE_FLPY_INT
69 if(!calls)
70 bytes = virtual_dma_count;
71#endif
72
73 {
74 register int lcount;
75 register char *lptr;
76
77 st = 1;
78 for(lcount=virtual_dma_count, lptr=virtual_dma_addr;
79 lcount; lcount--, lptr++) {
80 st=inb(virtual_dma_port+4) & 0xa0 ;
81 if(st != 0xa0)
82 break;
83 if(virtual_dma_mode)
84 outb_p(*lptr, virtual_dma_port+5);
85 else
86 *lptr = inb_p(virtual_dma_port+5);
87 }
88 virtual_dma_count = lcount;
89 virtual_dma_addr = lptr;
90 st = inb(virtual_dma_port+4);
91 }
92
93#ifdef TRACE_FLPY_INT
94 calls++;
95#endif
96 if(st == 0x20)
97 return IRQ_HANDLED;
98 if(!(st & 0x20)) {
99 virtual_dma_residue += virtual_dma_count;
100 virtual_dma_count=0;
101#ifdef TRACE_FLPY_INT
102 printk("count=%x, residue=%x calls=%d bytes=%d dma_wait=%d\n",
103 virtual_dma_count, virtual_dma_residue, calls, bytes,
104 dma_wait);
105 calls = 0;
106 dma_wait=0;
107#endif
108 doing_pdma = 0;
109 floppy_interrupt(irq, dev_id);
110 return IRQ_HANDLED;
111 }
112#ifdef TRACE_FLPY_INT
113 if(!virtual_dma_count)
114 dma_wait++;
115#endif
116 return IRQ_HANDLED;
117}
118
119static void fd_disable_dma(void)
120{
121 if(! (can_use_virtual_dma & 1))
122 disable_dma(FLOPPY_DMA);
123 doing_pdma = 0;
124 virtual_dma_residue += virtual_dma_count;
125 virtual_dma_count=0;
126}
127
128static int vdma_request_dma(unsigned int dmanr, const char * device_id)
129{
130 return 0;
131}
132
133static void vdma_nop(unsigned int dummy)
134{
135}
136
137
138static int vdma_get_dma_residue(unsigned int dummy)
139{
140 return virtual_dma_count + virtual_dma_residue;
141}
142
143
144static int fd_request_irq(void)
145{
146 if(can_use_virtual_dma)
147 return request_irq(FLOPPY_IRQ, floppy_hardint,
148 IRQF_DISABLED, "floppy", NULL);
149 else
150 return request_irq(FLOPPY_IRQ, floppy_interrupt,
151 IRQF_DISABLED, "floppy", NULL);
152}
153
154static unsigned long dma_mem_alloc(unsigned long size)
155{
156 return __get_dma_pages(GFP_KERNEL|__GFP_NORETRY,get_order(size));
157}
158
159
160static unsigned long vdma_mem_alloc(unsigned long size)
161{
162 return (unsigned long) vmalloc(size);
163
164}
165
166#define nodma_mem_alloc(size) vdma_mem_alloc(size)
167
168static void _fd_dma_mem_free(unsigned long addr, unsigned long size)
169{
170 if((unsigned long) addr >= (unsigned long) high_memory)
171 vfree((void *)addr);
172 else
173 free_pages(addr, get_order(size));
174}
175
176#define fd_dma_mem_free(addr, size) _fd_dma_mem_free(addr, size)
177
178static void _fd_chose_dma_mode(char *addr, unsigned long size)
179{
180 if(can_use_virtual_dma == 2) {
181 if((unsigned long) addr >= (unsigned long) high_memory ||
182 isa_virt_to_bus(addr) >= 0x1000000 ||
183 _CROSS_64KB(addr, size, 0))
184 use_virtual_dma = 1;
185 else
186 use_virtual_dma = 0;
187 } else {
188 use_virtual_dma = can_use_virtual_dma & 1;
189 }
190}
191
192#define fd_chose_dma_mode(addr, size) _fd_chose_dma_mode(addr, size)
193
194
195static int vdma_dma_setup(char *addr, unsigned long size, int mode, int io)
196{
197 doing_pdma = 1;
198 virtual_dma_port = io;
199 virtual_dma_mode = (mode == DMA_MODE_WRITE);
200 virtual_dma_addr = addr;
201 virtual_dma_count = size;
202 virtual_dma_residue = 0;
203 return 0;
204}
205
206static int hard_dma_setup(char *addr, unsigned long size, int mode, int io)
207{
208#ifdef FLOPPY_SANITY_CHECK
209 if (CROSS_64KB(addr, size)) {
210 printk("DMA crossing 64-K boundary %p-%p\n", addr, addr+size);
211 return -1;
212 }
213#endif
214 /* actual, physical DMA */
215 doing_pdma = 0;
216 clear_dma_ff(FLOPPY_DMA);
217 set_dma_mode(FLOPPY_DMA,mode);
218 set_dma_addr(FLOPPY_DMA,isa_virt_to_bus(addr));
219 set_dma_count(FLOPPY_DMA,size);
220 enable_dma(FLOPPY_DMA);
221 return 0;
222}
223
224static struct fd_routine_l {
225 int (*_request_dma)(unsigned int dmanr, const char * device_id);
226 void (*_free_dma)(unsigned int dmanr);
227 int (*_get_dma_residue)(unsigned int dummy);
228 unsigned long (*_dma_mem_alloc) (unsigned long size);
229 int (*_dma_setup)(char *addr, unsigned long size, int mode, int io);
230} fd_routine[] = {
231 {
232 request_dma,
233 free_dma,
234 get_dma_residue,
235 dma_mem_alloc,
236 hard_dma_setup
237 },
238 {
239 vdma_request_dma,
240 vdma_nop,
241 vdma_get_dma_residue,
242 vdma_mem_alloc,
243 vdma_dma_setup
244 }
245};
246
247
248static int FDC1 = 0x3f0;
249static int FDC2 = -1;
250
251/*
252 * Floppy types are stored in the rtc's CMOS RAM and so rtc_lock
253 * is needed to prevent corrupted CMOS RAM in case "insmod floppy"
254 * coincides with another rtc CMOS user. Paul G.
255 */
256#define FLOPPY0_TYPE ({ \
257 unsigned long flags; \
258 unsigned char val; \
259 spin_lock_irqsave(&rtc_lock, flags); \
260 val = (CMOS_READ(0x10) >> 4) & 15; \
261 spin_unlock_irqrestore(&rtc_lock, flags); \
262 val; \
263})
264
265#define FLOPPY1_TYPE ({ \
266 unsigned long flags; \
267 unsigned char val; \
268 spin_lock_irqsave(&rtc_lock, flags); \
269 val = CMOS_READ(0x10) & 15; \
270 spin_unlock_irqrestore(&rtc_lock, flags); \
271 val; \
272})
273
274#define N_FDC 2
275#define N_DRIVE 8
276
277#define FLOPPY_MOTOR_MASK 0xf0
278
279#define AUTO_DMA
280
281#define EXTRA_FLOPPY_PARAMS
282
283#endif /* __ASM_X86_64_FLOPPY_H */
diff --git a/include/asm-x86_64/fpu32.h b/include/asm-x86_64/fpu32.h
deleted file mode 100644
index 4153db5c0c31..000000000000
--- a/include/asm-x86_64/fpu32.h
+++ /dev/null
@@ -1,10 +0,0 @@
1#ifndef _FPU32_H
2#define _FPU32_H 1
3
4struct _fpstate_ia32;
5
6int restore_i387_ia32(struct task_struct *tsk, struct _fpstate_ia32 __user *buf, int fsave);
7int save_i387_ia32(struct task_struct *tsk, struct _fpstate_ia32 __user *buf,
8 struct pt_regs *regs, int fsave);
9
10#endif
diff --git a/include/asm-x86_64/futex.h b/include/asm-x86_64/futex.h
deleted file mode 100644
index 5cdfb08013c3..000000000000
--- a/include/asm-x86_64/futex.h
+++ /dev/null
@@ -1,125 +0,0 @@
1#ifndef _ASM_FUTEX_H
2#define _ASM_FUTEX_H
3
4#ifdef __KERNEL__
5
6#include <linux/futex.h>
7#include <asm/errno.h>
8#include <asm/system.h>
9#include <asm/uaccess.h>
10
11#define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
12 __asm__ __volatile ( \
13"1: " insn "\n" \
14"2: .section .fixup,\"ax\"\n\
153: mov %3, %1\n\
16 jmp 2b\n\
17 .previous\n\
18 .section __ex_table,\"a\"\n\
19 .align 8\n\
20 .quad 1b,3b\n\
21 .previous" \
22 : "=r" (oldval), "=r" (ret), "=m" (*uaddr) \
23 : "i" (-EFAULT), "m" (*uaddr), "0" (oparg), "1" (0))
24
25#define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
26 __asm__ __volatile ( \
27"1: movl %2, %0\n\
28 movl %0, %3\n" \
29 insn "\n" \
30"2: " LOCK_PREFIX "cmpxchgl %3, %2\n\
31 jnz 1b\n\
323: .section .fixup,\"ax\"\n\
334: mov %5, %1\n\
34 jmp 3b\n\
35 .previous\n\
36 .section __ex_table,\"a\"\n\
37 .align 8\n\
38 .quad 1b,4b,2b,4b\n\
39 .previous" \
40 : "=&a" (oldval), "=&r" (ret), "=m" (*uaddr), \
41 "=&r" (tem) \
42 : "r" (oparg), "i" (-EFAULT), "m" (*uaddr), "1" (0))
43
44static inline int
45futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
46{
47 int op = (encoded_op >> 28) & 7;
48 int cmp = (encoded_op >> 24) & 15;
49 int oparg = (encoded_op << 8) >> 20;
50 int cmparg = (encoded_op << 20) >> 20;
51 int oldval = 0, ret, tem;
52 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
53 oparg = 1 << oparg;
54
55 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
56 return -EFAULT;
57
58 pagefault_disable();
59
60 switch (op) {
61 case FUTEX_OP_SET:
62 __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
63 break;
64 case FUTEX_OP_ADD:
65 __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
66 uaddr, oparg);
67 break;
68 case FUTEX_OP_OR:
69 __futex_atomic_op2("orl %4, %3", ret, oldval, uaddr, oparg);
70 break;
71 case FUTEX_OP_ANDN:
72 __futex_atomic_op2("andl %4, %3", ret, oldval, uaddr, ~oparg);
73 break;
74 case FUTEX_OP_XOR:
75 __futex_atomic_op2("xorl %4, %3", ret, oldval, uaddr, oparg);
76 break;
77 default:
78 ret = -ENOSYS;
79 }
80
81 pagefault_enable();
82
83 if (!ret) {
84 switch (cmp) {
85 case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
86 case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
87 case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
88 case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
89 case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
90 case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
91 default: ret = -ENOSYS;
92 }
93 }
94 return ret;
95}
96
97static inline int
98futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
99{
100 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
101 return -EFAULT;
102
103 __asm__ __volatile__(
104 "1: " LOCK_PREFIX "cmpxchgl %3, %1 \n"
105
106 "2: .section .fixup, \"ax\" \n"
107 "3: mov %2, %0 \n"
108 " jmp 2b \n"
109 " .previous \n"
110
111 " .section __ex_table, \"a\" \n"
112 " .align 8 \n"
113 " .quad 1b,3b \n"
114 " .previous \n"
115
116 : "=a" (oldval), "=m" (*uaddr)
117 : "i" (-EFAULT), "r" (newval), "0" (oldval)
118 : "memory"
119 );
120
121 return oldval;
122}
123
124#endif
125#endif
diff --git a/include/asm-x86_64/genapic.h b/include/asm-x86_64/genapic.h
deleted file mode 100644
index d7e516ccbaa4..000000000000
--- a/include/asm-x86_64/genapic.h
+++ /dev/null
@@ -1,37 +0,0 @@
1#ifndef _ASM_GENAPIC_H
2#define _ASM_GENAPIC_H 1
3
4/*
5 * Copyright 2004 James Cleverdon, IBM.
6 * Subject to the GNU Public License, v.2
7 *
8 * Generic APIC sub-arch data struct.
9 *
10 * Hacked for x86-64 by James Cleverdon from i386 architecture code by
11 * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
12 * James Cleverdon.
13 */
14
15struct genapic {
16 char *name;
17 u32 int_delivery_mode;
18 u32 int_dest_mode;
19 int (*apic_id_registered)(void);
20 cpumask_t (*target_cpus)(void);
21 cpumask_t (*vector_allocation_domain)(int cpu);
22 void (*init_apic_ldr)(void);
23 /* ipi */
24 void (*send_IPI_mask)(cpumask_t mask, int vector);
25 void (*send_IPI_allbutself)(int vector);
26 void (*send_IPI_all)(int vector);
27 /* */
28 unsigned int (*cpu_mask_to_apicid)(cpumask_t cpumask);
29 unsigned int (*phys_pkg_id)(int index_msb);
30};
31
32extern struct genapic *genapic;
33
34extern struct genapic apic_flat;
35extern struct genapic apic_physflat;
36
37#endif
diff --git a/include/asm-x86_64/hardirq.h b/include/asm-x86_64/hardirq.h
deleted file mode 100644
index 95d5e090ed89..000000000000
--- a/include/asm-x86_64/hardirq.h
+++ /dev/null
@@ -1,23 +0,0 @@
1#ifndef __ASM_HARDIRQ_H
2#define __ASM_HARDIRQ_H
3
4#include <linux/threads.h>
5#include <linux/irq.h>
6#include <asm/pda.h>
7#include <asm/apic.h>
8
9/* We can have at most NR_VECTORS irqs routed to a cpu at a time */
10#define MAX_HARDIRQS_PER_CPU NR_VECTORS
11
12#define __ARCH_IRQ_STAT 1
13
14#define local_softirq_pending() read_pda(__softirq_pending)
15
16#define __ARCH_SET_SOFTIRQ_PENDING 1
17
18#define set_softirq_pending(x) write_pda(__softirq_pending, (x))
19#define or_softirq_pending(x) or_pda(__softirq_pending, (x))
20
21extern void ack_bad_irq(unsigned int irq);
22
23#endif /* __ASM_HARDIRQ_H */
diff --git a/include/asm-x86_64/hpet.h b/include/asm-x86_64/hpet.h
deleted file mode 100644
index 79bb950f82c5..000000000000
--- a/include/asm-x86_64/hpet.h
+++ /dev/null
@@ -1,18 +0,0 @@
1#ifndef _ASM_X8664_HPET_H
2#define _ASM_X8664_HPET_H 1
3
4#include <asm-i386/hpet.h>
5
6#define HPET_TICK_RATE (HZ * 100000UL)
7
8extern int hpet_rtc_timer_init(void);
9extern int hpet_arch_init(void);
10extern int hpet_timer_stop_set_go(unsigned long tick);
11extern int hpet_reenable(void);
12extern unsigned int hpet_calibrate_tsc(void);
13
14extern int hpet_use_timer;
15extern unsigned long hpet_period;
16extern unsigned long hpet_tick;
17
18#endif
diff --git a/include/asm-x86_64/hw_irq.h b/include/asm-x86_64/hw_irq.h
deleted file mode 100644
index 09dfc18a6dd0..000000000000
--- a/include/asm-x86_64/hw_irq.h
+++ /dev/null
@@ -1,175 +0,0 @@
1#ifndef _ASM_HW_IRQ_H
2#define _ASM_HW_IRQ_H
3
4/*
5 * linux/include/asm/hw_irq.h
6 *
7 * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
8 *
9 * moved some of the old arch/i386/kernel/irq.h to here. VY
10 *
11 * IRQ/IPI changes taken from work by Thomas Radke
12 * <tomsoft@informatik.tu-chemnitz.de>
13 *
14 * hacked by Andi Kleen for x86-64.
15 */
16
17#ifndef __ASSEMBLY__
18#include <asm/atomic.h>
19#include <asm/irq.h>
20#include <linux/profile.h>
21#include <linux/smp.h>
22#include <linux/percpu.h>
23#endif
24
25#define NMI_VECTOR 0x02
26/*
27 * IDT vectors usable for external interrupt sources start
28 * at 0x20:
29 */
30#define FIRST_EXTERNAL_VECTOR 0x20
31
32#define IA32_SYSCALL_VECTOR 0x80
33
34
35/* Reserve the lowest usable priority level 0x20 - 0x2f for triggering
36 * cleanup after irq migration.
37 */
38#define IRQ_MOVE_CLEANUP_VECTOR FIRST_EXTERNAL_VECTOR
39
40/*
41 * Vectors 0x30-0x3f are used for ISA interrupts.
42 */
43#define IRQ0_VECTOR FIRST_EXTERNAL_VECTOR + 0x10
44#define IRQ1_VECTOR IRQ0_VECTOR + 1
45#define IRQ2_VECTOR IRQ0_VECTOR + 2
46#define IRQ3_VECTOR IRQ0_VECTOR + 3
47#define IRQ4_VECTOR IRQ0_VECTOR + 4
48#define IRQ5_VECTOR IRQ0_VECTOR + 5
49#define IRQ6_VECTOR IRQ0_VECTOR + 6
50#define IRQ7_VECTOR IRQ0_VECTOR + 7
51#define IRQ8_VECTOR IRQ0_VECTOR + 8
52#define IRQ9_VECTOR IRQ0_VECTOR + 9
53#define IRQ10_VECTOR IRQ0_VECTOR + 10
54#define IRQ11_VECTOR IRQ0_VECTOR + 11
55#define IRQ12_VECTOR IRQ0_VECTOR + 12
56#define IRQ13_VECTOR IRQ0_VECTOR + 13
57#define IRQ14_VECTOR IRQ0_VECTOR + 14
58#define IRQ15_VECTOR IRQ0_VECTOR + 15
59
60/*
61 * Special IRQ vectors used by the SMP architecture, 0xf0-0xff
62 *
63 * some of the following vectors are 'rare', they are merged
64 * into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
65 * TLB, reschedule and local APIC vectors are performance-critical.
66 */
67#define SPURIOUS_APIC_VECTOR 0xff
68#define ERROR_APIC_VECTOR 0xfe
69#define RESCHEDULE_VECTOR 0xfd
70#define CALL_FUNCTION_VECTOR 0xfc
71/* fb free - please don't readd KDB here because it's useless
72 (hint - think what a NMI bit does to a vector) */
73#define THERMAL_APIC_VECTOR 0xfa
74#define THRESHOLD_APIC_VECTOR 0xf9
75/* f8 free */
76#define INVALIDATE_TLB_VECTOR_END 0xf7
77#define INVALIDATE_TLB_VECTOR_START 0xf0 /* f0-f7 used for TLB flush */
78
79#define NUM_INVALIDATE_TLB_VECTORS 8
80
81/*
82 * Local APIC timer IRQ vector is on a different priority level,
83 * to work around the 'lost local interrupt if more than 2 IRQ
84 * sources per level' errata.
85 */
86#define LOCAL_TIMER_VECTOR 0xef
87
88/*
89 * First APIC vector available to drivers: (vectors 0x30-0xee)
90 * we start at 0x41 to spread out vectors evenly between priority
91 * levels. (0x80 is the syscall vector)
92 */
93#define FIRST_DEVICE_VECTOR (IRQ15_VECTOR + 2)
94#define FIRST_SYSTEM_VECTOR 0xef /* duplicated in irq.h */
95
96
97#ifndef __ASSEMBLY__
98
99/* Interrupt handlers registered during init_IRQ */
100void apic_timer_interrupt(void);
101void spurious_interrupt(void);
102void error_interrupt(void);
103void reschedule_interrupt(void);
104void call_function_interrupt(void);
105void irq_move_cleanup_interrupt(void);
106void invalidate_interrupt0(void);
107void invalidate_interrupt1(void);
108void invalidate_interrupt2(void);
109void invalidate_interrupt3(void);
110void invalidate_interrupt4(void);
111void invalidate_interrupt5(void);
112void invalidate_interrupt6(void);
113void invalidate_interrupt7(void);
114void thermal_interrupt(void);
115void threshold_interrupt(void);
116void i8254_timer_resume(void);
117
118typedef int vector_irq_t[NR_VECTORS];
119DECLARE_PER_CPU(vector_irq_t, vector_irq);
120extern void __setup_vector_irq(int cpu);
121extern spinlock_t vector_lock;
122
123/*
124 * Various low-level irq details needed by irq.c, process.c,
125 * time.c, io_apic.c and smp.c
126 *
127 * Interrupt entry/exit code at both C and assembly level
128 */
129
130extern void disable_8259A_irq(unsigned int irq);
131extern void enable_8259A_irq(unsigned int irq);
132extern int i8259A_irq_pending(unsigned int irq);
133extern void make_8259A_irq(unsigned int irq);
134extern void init_8259A(int aeoi);
135extern void send_IPI_self(int vector);
136extern void init_VISWS_APIC_irqs(void);
137extern void setup_IO_APIC(void);
138extern void disable_IO_APIC(void);
139extern void print_IO_APIC(void);
140extern int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn);
141extern void send_IPI(int dest, int vector);
142extern void setup_ioapic_dest(void);
143
144extern unsigned long io_apic_irqs;
145
146extern atomic_t irq_err_count;
147extern atomic_t irq_mis_count;
148
149#define IO_APIC_IRQ(x) (((x) >= 16) || ((1<<(x)) & io_apic_irqs))
150
151#define __STR(x) #x
152#define STR(x) __STR(x)
153
154#include <asm/ptrace.h>
155
156#define IRQ_NAME2(nr) nr##_interrupt(void)
157#define IRQ_NAME(nr) IRQ_NAME2(IRQ##nr)
158
159/*
160 * SMP has a few special interrupts for IPI messages
161 */
162
163#define BUILD_IRQ(nr) \
164asmlinkage void IRQ_NAME(nr); \
165__asm__( \
166"\n.p2align\n" \
167"IRQ" #nr "_interrupt:\n\t" \
168 "push $~(" #nr ") ; " \
169 "jmp common_interrupt");
170
171#define platform_legacy_irq(irq) ((irq) < 16)
172
173#endif
174
175#endif /* _ASM_HW_IRQ_H */
diff --git a/include/asm-x86_64/hypertransport.h b/include/asm-x86_64/hypertransport.h
deleted file mode 100644
index 5cbf9fa5e0b5..000000000000
--- a/include/asm-x86_64/hypertransport.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-i386/hypertransport.h>
diff --git a/include/asm-x86_64/i387.h b/include/asm-x86_64/i387.h
deleted file mode 100644
index 0217b74cc9fc..000000000000
--- a/include/asm-x86_64/i387.h
+++ /dev/null
@@ -1,209 +0,0 @@
1/*
2 * include/asm-x86_64/i387.h
3 *
4 * Copyright (C) 1994 Linus Torvalds
5 *
6 * Pentium III FXSR, SSE support
7 * General FPU state handling cleanups
8 * Gareth Hughes <gareth@valinux.com>, May 2000
9 * x86-64 work by Andi Kleen 2002
10 */
11
12#ifndef __ASM_X86_64_I387_H
13#define __ASM_X86_64_I387_H
14
15#include <linux/sched.h>
16#include <asm/processor.h>
17#include <asm/sigcontext.h>
18#include <asm/user.h>
19#include <asm/thread_info.h>
20#include <asm/uaccess.h>
21
22extern void fpu_init(void);
23extern unsigned int mxcsr_feature_mask;
24extern void mxcsr_feature_mask_init(void);
25extern void init_fpu(struct task_struct *child);
26extern int save_i387(struct _fpstate __user *buf);
27extern asmlinkage void math_state_restore(void);
28
29/*
30 * FPU lazy state save handling...
31 */
32
33#define unlazy_fpu(tsk) do { \
34 if (task_thread_info(tsk)->status & TS_USEDFPU) \
35 save_init_fpu(tsk); \
36 else \
37 tsk->fpu_counter = 0; \
38} while (0)
39
40/* Ignore delayed exceptions from user space */
41static inline void tolerant_fwait(void)
42{
43 asm volatile("1: fwait\n"
44 "2:\n"
45 " .section __ex_table,\"a\"\n"
46 " .align 8\n"
47 " .quad 1b,2b\n"
48 " .previous\n");
49}
50
51#define clear_fpu(tsk) do { \
52 if (task_thread_info(tsk)->status & TS_USEDFPU) { \
53 tolerant_fwait(); \
54 task_thread_info(tsk)->status &= ~TS_USEDFPU; \
55 stts(); \
56 } \
57} while (0)
58
59/*
60 * ptrace request handers...
61 */
62extern int get_fpregs(struct user_i387_struct __user *buf,
63 struct task_struct *tsk);
64extern int set_fpregs(struct task_struct *tsk,
65 struct user_i387_struct __user *buf);
66
67/*
68 * i387 state interaction
69 */
70#define get_fpu_mxcsr(t) ((t)->thread.i387.fxsave.mxcsr)
71#define get_fpu_cwd(t) ((t)->thread.i387.fxsave.cwd)
72#define get_fpu_fxsr_twd(t) ((t)->thread.i387.fxsave.twd)
73#define get_fpu_swd(t) ((t)->thread.i387.fxsave.swd)
74#define set_fpu_cwd(t,val) ((t)->thread.i387.fxsave.cwd = (val))
75#define set_fpu_swd(t,val) ((t)->thread.i387.fxsave.swd = (val))
76#define set_fpu_fxsr_twd(t,val) ((t)->thread.i387.fxsave.twd = (val))
77
78#define X87_FSW_ES (1 << 7) /* Exception Summary */
79
80/* AMD CPUs don't save/restore FDP/FIP/FOP unless an exception
81 is pending. Clear the x87 state here by setting it to fixed
82 values. The kernel data segment can be sometimes 0 and sometimes
83 new user value. Both should be ok.
84 Use the PDA as safe address because it should be already in L1. */
85static inline void clear_fpu_state(struct i387_fxsave_struct *fx)
86{
87 if (unlikely(fx->swd & X87_FSW_ES))
88 asm volatile("fnclex");
89 alternative_input(ASM_NOP8 ASM_NOP2,
90 " emms\n" /* clear stack tags */
91 " fildl %%gs:0", /* load to clear state */
92 X86_FEATURE_FXSAVE_LEAK);
93}
94
95static inline int restore_fpu_checking(struct i387_fxsave_struct *fx)
96{
97 int err;
98
99 asm volatile("1: rex64/fxrstor (%[fx])\n\t"
100 "2:\n"
101 ".section .fixup,\"ax\"\n"
102 "3: movl $-1,%[err]\n"
103 " jmp 2b\n"
104 ".previous\n"
105 ".section __ex_table,\"a\"\n"
106 " .align 8\n"
107 " .quad 1b,3b\n"
108 ".previous"
109 : [err] "=r" (err)
110#if 0 /* See comment in __fxsave_clear() below. */
111 : [fx] "r" (fx), "m" (*fx), "0" (0));
112#else
113 : [fx] "cdaSDb" (fx), "m" (*fx), "0" (0));
114#endif
115 if (unlikely(err))
116 init_fpu(current);
117 return err;
118}
119
120static inline int save_i387_checking(struct i387_fxsave_struct __user *fx)
121{
122 int err;
123
124 asm volatile("1: rex64/fxsave (%[fx])\n\t"
125 "2:\n"
126 ".section .fixup,\"ax\"\n"
127 "3: movl $-1,%[err]\n"
128 " jmp 2b\n"
129 ".previous\n"
130 ".section __ex_table,\"a\"\n"
131 " .align 8\n"
132 " .quad 1b,3b\n"
133 ".previous"
134 : [err] "=r" (err), "=m" (*fx)
135#if 0 /* See comment in __fxsave_clear() below. */
136 : [fx] "r" (fx), "0" (0));
137#else
138 : [fx] "cdaSDb" (fx), "0" (0));
139#endif
140 if (unlikely(err) && __clear_user(fx, sizeof(struct i387_fxsave_struct)))
141 err = -EFAULT;
142 /* No need to clear here because the caller clears USED_MATH */
143 return err;
144}
145
146static inline void __fxsave_clear(struct task_struct *tsk)
147{
148 /* Using "rex64; fxsave %0" is broken because, if the memory operand
149 uses any extended registers for addressing, a second REX prefix
150 will be generated (to the assembler, rex64 followed by semicolon
151 is a separate instruction), and hence the 64-bitness is lost. */
152#if 0
153 /* Using "fxsaveq %0" would be the ideal choice, but is only supported
154 starting with gas 2.16. */
155 __asm__ __volatile__("fxsaveq %0"
156 : "=m" (tsk->thread.i387.fxsave));
157#elif 0
158 /* Using, as a workaround, the properly prefixed form below isn't
159 accepted by any binutils version so far released, complaining that
160 the same type of prefix is used twice if an extended register is
161 needed for addressing (fix submitted to mainline 2005-11-21). */
162 __asm__ __volatile__("rex64/fxsave %0"
163 : "=m" (tsk->thread.i387.fxsave));
164#else
165 /* This, however, we can work around by forcing the compiler to select
166 an addressing mode that doesn't require extended registers. */
167 __asm__ __volatile__("rex64/fxsave %P2(%1)"
168 : "=m" (tsk->thread.i387.fxsave)
169 : "cdaSDb" (tsk),
170 "i" (offsetof(__typeof__(*tsk),
171 thread.i387.fxsave)));
172#endif
173 clear_fpu_state(&tsk->thread.i387.fxsave);
174}
175
176static inline void kernel_fpu_begin(void)
177{
178 struct thread_info *me = current_thread_info();
179 preempt_disable();
180 if (me->status & TS_USEDFPU) {
181 __fxsave_clear(me->task);
182 me->status &= ~TS_USEDFPU;
183 return;
184 }
185 clts();
186}
187
188static inline void kernel_fpu_end(void)
189{
190 stts();
191 preempt_enable();
192}
193
194static inline void save_init_fpu(struct task_struct *tsk)
195{
196 __fxsave_clear(tsk);
197 task_thread_info(tsk)->status &= ~TS_USEDFPU;
198 stts();
199}
200
201/*
202 * This restores directly out of user space. Exceptions are handled.
203 */
204static inline int restore_i387(struct _fpstate __user *buf)
205{
206 return restore_fpu_checking((__force struct i387_fxsave_struct *)buf);
207}
208
209#endif /* __ASM_X86_64_I387_H */
diff --git a/include/asm-x86_64/i8253.h b/include/asm-x86_64/i8253.h
deleted file mode 100644
index 015d8df07690..000000000000
--- a/include/asm-x86_64/i8253.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __ASM_I8253_H__
2#define __ASM_I8253_H__
3
4extern spinlock_t i8253_lock;
5
6#endif /* __ASM_I8253_H__ */
diff --git a/include/asm-x86_64/ia32.h b/include/asm-x86_64/ia32.h
deleted file mode 100644
index 0190b7c4e319..000000000000
--- a/include/asm-x86_64/ia32.h
+++ /dev/null
@@ -1,178 +0,0 @@
1#ifndef _ASM_X86_64_IA32_H
2#define _ASM_X86_64_IA32_H
3
4
5#ifdef CONFIG_IA32_EMULATION
6
7#include <linux/compat.h>
8
9/*
10 * 32 bit structures for IA32 support.
11 */
12
13#include <asm/sigcontext32.h>
14
15/* signal.h */
16struct sigaction32 {
17 unsigned int sa_handler; /* Really a pointer, but need to deal
18 with 32 bits */
19 unsigned int sa_flags;
20 unsigned int sa_restorer; /* Another 32 bit pointer */
21 compat_sigset_t sa_mask; /* A 32 bit mask */
22};
23
24struct old_sigaction32 {
25 unsigned int sa_handler; /* Really a pointer, but need to deal
26 with 32 bits */
27 compat_old_sigset_t sa_mask; /* A 32 bit mask */
28 unsigned int sa_flags;
29 unsigned int sa_restorer; /* Another 32 bit pointer */
30};
31
32typedef struct sigaltstack_ia32 {
33 unsigned int ss_sp;
34 int ss_flags;
35 unsigned int ss_size;
36} stack_ia32_t;
37
38struct ucontext_ia32 {
39 unsigned int uc_flags;
40 unsigned int uc_link;
41 stack_ia32_t uc_stack;
42 struct sigcontext_ia32 uc_mcontext;
43 compat_sigset_t uc_sigmask; /* mask last for extensibility */
44};
45
46/* This matches struct stat64 in glibc2.2, hence the absolutely
47 * insane amounts of padding around dev_t's.
48 */
49struct stat64 {
50 unsigned long long st_dev;
51 unsigned char __pad0[4];
52
53#define STAT64_HAS_BROKEN_ST_INO 1
54 unsigned int __st_ino;
55
56 unsigned int st_mode;
57 unsigned int st_nlink;
58
59 unsigned int st_uid;
60 unsigned int st_gid;
61
62 unsigned long long st_rdev;
63 unsigned char __pad3[4];
64
65 long long st_size;
66 unsigned int st_blksize;
67
68 long long st_blocks;/* Number 512-byte blocks allocated. */
69
70 unsigned st_atime;
71 unsigned st_atime_nsec;
72 unsigned st_mtime;
73 unsigned st_mtime_nsec;
74 unsigned st_ctime;
75 unsigned st_ctime_nsec;
76
77 unsigned long long st_ino;
78} __attribute__((packed));
79
80typedef struct compat_siginfo{
81 int si_signo;
82 int si_errno;
83 int si_code;
84
85 union {
86 int _pad[((128/sizeof(int)) - 3)];
87
88 /* kill() */
89 struct {
90 unsigned int _pid; /* sender's pid */
91 unsigned int _uid; /* sender's uid */
92 } _kill;
93
94 /* POSIX.1b timers */
95 struct {
96 compat_timer_t _tid; /* timer id */
97 int _overrun; /* overrun count */
98 compat_sigval_t _sigval; /* same as below */
99 int _sys_private; /* not to be passed to user */
100 int _overrun_incr; /* amount to add to overrun */
101 } _timer;
102
103 /* POSIX.1b signals */
104 struct {
105 unsigned int _pid; /* sender's pid */
106 unsigned int _uid; /* sender's uid */
107 compat_sigval_t _sigval;
108 } _rt;
109
110 /* SIGCHLD */
111 struct {
112 unsigned int _pid; /* which child */
113 unsigned int _uid; /* sender's uid */
114 int _status; /* exit code */
115 compat_clock_t _utime;
116 compat_clock_t _stime;
117 } _sigchld;
118
119 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
120 struct {
121 unsigned int _addr; /* faulting insn/memory ref. */
122 } _sigfault;
123
124 /* SIGPOLL */
125 struct {
126 int _band; /* POLL_IN, POLL_OUT, POLL_MSG */
127 int _fd;
128 } _sigpoll;
129 } _sifields;
130} compat_siginfo_t;
131
132struct sigframe32
133{
134 u32 pretcode;
135 int sig;
136 struct sigcontext_ia32 sc;
137 struct _fpstate_ia32 fpstate;
138 unsigned int extramask[_COMPAT_NSIG_WORDS-1];
139};
140
141struct rt_sigframe32
142{
143 u32 pretcode;
144 int sig;
145 u32 pinfo;
146 u32 puc;
147 compat_siginfo_t info;
148 struct ucontext_ia32 uc;
149 struct _fpstate_ia32 fpstate;
150};
151
152struct ustat32 {
153 __u32 f_tfree;
154 compat_ino_t f_tinode;
155 char f_fname[6];
156 char f_fpack[6];
157};
158
159#define IA32_STACK_TOP IA32_PAGE_OFFSET
160
161#ifdef __KERNEL__
162struct user_desc;
163struct siginfo_t;
164int do_get_thread_area(struct thread_struct *t, struct user_desc __user *info);
165int do_set_thread_area(struct thread_struct *t, struct user_desc __user *info);
166int ia32_child_tls(struct task_struct *p, struct pt_regs *childregs);
167
168struct linux_binprm;
169extern int ia32_setup_arg_pages(struct linux_binprm *bprm,
170 unsigned long stack_top, int exec_stack);
171struct mm_struct;
172extern void ia32_pick_mmap_layout(struct mm_struct *mm);
173
174#endif
175
176#endif /* !CONFIG_IA32_SUPPORT */
177
178#endif
diff --git a/include/asm-x86_64/ia32_unistd.h b/include/asm-x86_64/ia32_unistd.h
deleted file mode 100644
index 5b52ce507338..000000000000
--- a/include/asm-x86_64/ia32_unistd.h
+++ /dev/null
@@ -1,18 +0,0 @@
1#ifndef _ASM_X86_64_IA32_UNISTD_H_
2#define _ASM_X86_64_IA32_UNISTD_H_
3
4/*
5 * This file contains the system call numbers of the ia32 port,
6 * this is for the kernel only.
7 * Only add syscalls here where some part of the kernel needs to know
8 * the number. This should be otherwise in sync with asm-i386/unistd.h. -AK
9 */
10
11#define __NR_ia32_restart_syscall 0
12#define __NR_ia32_exit 1
13#define __NR_ia32_read 3
14#define __NR_ia32_write 4
15#define __NR_ia32_sigreturn 119
16#define __NR_ia32_rt_sigreturn 173
17
18#endif /* _ASM_X86_64_IA32_UNISTD_H_ */
diff --git a/include/asm-x86_64/ide.h b/include/asm-x86_64/ide.h
deleted file mode 100644
index 4cef0ef61878..000000000000
--- a/include/asm-x86_64/ide.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-i386/ide.h>
diff --git a/include/asm-x86_64/idle.h b/include/asm-x86_64/idle.h
deleted file mode 100644
index 6bd47dcf2067..000000000000
--- a/include/asm-x86_64/idle.h
+++ /dev/null
@@ -1,14 +0,0 @@
1#ifndef _ASM_X86_64_IDLE_H
2#define _ASM_X86_64_IDLE_H 1
3
4#define IDLE_START 1
5#define IDLE_END 2
6
7struct notifier_block;
8void idle_notifier_register(struct notifier_block *n);
9void idle_notifier_unregister(struct notifier_block *n);
10
11void enter_idle(void);
12void exit_idle(void);
13
14#endif
diff --git a/include/asm-x86_64/intel_arch_perfmon.h b/include/asm-x86_64/intel_arch_perfmon.h
deleted file mode 100644
index 8633331420ec..000000000000
--- a/include/asm-x86_64/intel_arch_perfmon.h
+++ /dev/null
@@ -1,31 +0,0 @@
1#ifndef X86_64_INTEL_ARCH_PERFMON_H
2#define X86_64_INTEL_ARCH_PERFMON_H 1
3
4#define MSR_ARCH_PERFMON_PERFCTR0 0xc1
5#define MSR_ARCH_PERFMON_PERFCTR1 0xc2
6
7#define MSR_ARCH_PERFMON_EVENTSEL0 0x186
8#define MSR_ARCH_PERFMON_EVENTSEL1 0x187
9
10#define ARCH_PERFMON_EVENTSEL0_ENABLE (1 << 22)
11#define ARCH_PERFMON_EVENTSEL_INT (1 << 20)
12#define ARCH_PERFMON_EVENTSEL_OS (1 << 17)
13#define ARCH_PERFMON_EVENTSEL_USR (1 << 16)
14
15#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL (0x3c)
16#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
17#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX (0)
18#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \
19 (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX))
20
21union cpuid10_eax {
22 struct {
23 unsigned int version_id:8;
24 unsigned int num_counters:8;
25 unsigned int bit_width:8;
26 unsigned int mask_length:8;
27 } split;
28 unsigned int full;
29};
30
31#endif /* X86_64_INTEL_ARCH_PERFMON_H */
diff --git a/include/asm-x86_64/io.h b/include/asm-x86_64/io.h
deleted file mode 100644
index 7475095c5061..000000000000
--- a/include/asm-x86_64/io.h
+++ /dev/null
@@ -1,276 +0,0 @@
1#ifndef _ASM_IO_H
2#define _ASM_IO_H
3
4
5/*
6 * This file contains the definitions for the x86 IO instructions
7 * inb/inw/inl/outb/outw/outl and the "string versions" of the same
8 * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
9 * versions of the single-IO instructions (inb_p/inw_p/..).
10 *
11 * This file is not meant to be obfuscating: it's just complicated
12 * to (a) handle it all in a way that makes gcc able to optimize it
13 * as well as possible and (b) trying to avoid writing the same thing
14 * over and over again with slight variations and possibly making a
15 * mistake somewhere.
16 */
17
18/*
19 * Thanks to James van Artsdalen for a better timing-fix than
20 * the two short jumps: using outb's to a nonexistent port seems
21 * to guarantee better timings even on fast machines.
22 *
23 * On the other hand, I'd like to be sure of a non-existent port:
24 * I feel a bit unsafe about using 0x80 (should be safe, though)
25 *
26 * Linus
27 */
28
29 /*
30 * Bit simplified and optimized by Jan Hubicka
31 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999.
32 *
33 * isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added,
34 * isa_read[wl] and isa_write[wl] fixed
35 * - Arnaldo Carvalho de Melo <acme@conectiva.com.br>
36 */
37
38#define __SLOW_DOWN_IO "\noutb %%al,$0x80"
39
40#ifdef REALLY_SLOW_IO
41#define __FULL_SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO
42#else
43#define __FULL_SLOW_DOWN_IO __SLOW_DOWN_IO
44#endif
45
46/*
47 * Talk about misusing macros..
48 */
49#define __OUT1(s,x) \
50static inline void out##s(unsigned x value, unsigned short port) {
51
52#define __OUT2(s,s1,s2) \
53__asm__ __volatile__ ("out" #s " %" s1 "0,%" s2 "1"
54
55#define __OUT(s,s1,x) \
56__OUT1(s,x) __OUT2(s,s1,"w") : : "a" (value), "Nd" (port)); } \
57__OUT1(s##_p,x) __OUT2(s,s1,"w") __FULL_SLOW_DOWN_IO : : "a" (value), "Nd" (port));} \
58
59#define __IN1(s) \
60static inline RETURN_TYPE in##s(unsigned short port) { RETURN_TYPE _v;
61
62#define __IN2(s,s1,s2) \
63__asm__ __volatile__ ("in" #s " %" s2 "1,%" s1 "0"
64
65#define __IN(s,s1,i...) \
66__IN1(s) __IN2(s,s1,"w") : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \
67__IN1(s##_p) __IN2(s,s1,"w") __FULL_SLOW_DOWN_IO : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \
68
69#define __INS(s) \
70static inline void ins##s(unsigned short port, void * addr, unsigned long count) \
71{ __asm__ __volatile__ ("rep ; ins" #s \
72: "=D" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
73
74#define __OUTS(s) \
75static inline void outs##s(unsigned short port, const void * addr, unsigned long count) \
76{ __asm__ __volatile__ ("rep ; outs" #s \
77: "=S" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
78
79#define RETURN_TYPE unsigned char
80__IN(b,"")
81#undef RETURN_TYPE
82#define RETURN_TYPE unsigned short
83__IN(w,"")
84#undef RETURN_TYPE
85#define RETURN_TYPE unsigned int
86__IN(l,"")
87#undef RETURN_TYPE
88
89__OUT(b,"b",char)
90__OUT(w,"w",short)
91__OUT(l,,int)
92
93__INS(b)
94__INS(w)
95__INS(l)
96
97__OUTS(b)
98__OUTS(w)
99__OUTS(l)
100
101#define IO_SPACE_LIMIT 0xffff
102
103#if defined(__KERNEL__) && defined(__x86_64__)
104
105#include <linux/vmalloc.h>
106
107#ifndef __i386__
108/*
109 * Change virtual addresses to physical addresses and vv.
110 * These are pretty trivial
111 */
112static inline unsigned long virt_to_phys(volatile void * address)
113{
114 return __pa(address);
115}
116
117static inline void * phys_to_virt(unsigned long address)
118{
119 return __va(address);
120}
121#endif
122
123/*
124 * Change "struct page" to physical address.
125 */
126#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
127
128#include <asm-generic/iomap.h>
129
130extern void __iomem *__ioremap(unsigned long offset, unsigned long size, unsigned long flags);
131
132static inline void __iomem * ioremap (unsigned long offset, unsigned long size)
133{
134 return __ioremap(offset, size, 0);
135}
136
137extern void *early_ioremap(unsigned long addr, unsigned long size);
138extern void early_iounmap(void *addr, unsigned long size);
139
140/*
141 * This one maps high address device memory and turns off caching for that area.
142 * it's useful if some control registers are in such an area and write combining
143 * or read caching is not desirable:
144 */
145extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size);
146extern void iounmap(volatile void __iomem *addr);
147extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys);
148
149/*
150 * ISA I/O bus memory addresses are 1:1 with the physical address.
151 */
152#define isa_virt_to_bus virt_to_phys
153#define isa_page_to_bus page_to_phys
154#define isa_bus_to_virt phys_to_virt
155
156/*
157 * However PCI ones are not necessarily 1:1 and therefore these interfaces
158 * are forbidden in portable PCI drivers.
159 *
160 * Allow them on x86 for legacy drivers, though.
161 */
162#define virt_to_bus virt_to_phys
163#define bus_to_virt phys_to_virt
164
165/*
166 * readX/writeX() are used to access memory mapped devices. On some
167 * architectures the memory mapped IO stuff needs to be accessed
168 * differently. On the x86 architecture, we just read/write the
169 * memory location directly.
170 */
171
172static inline __u8 __readb(const volatile void __iomem *addr)
173{
174 return *(__force volatile __u8 *)addr;
175}
176static inline __u16 __readw(const volatile void __iomem *addr)
177{
178 return *(__force volatile __u16 *)addr;
179}
180static __always_inline __u32 __readl(const volatile void __iomem *addr)
181{
182 return *(__force volatile __u32 *)addr;
183}
184static inline __u64 __readq(const volatile void __iomem *addr)
185{
186 return *(__force volatile __u64 *)addr;
187}
188#define readb(x) __readb(x)
189#define readw(x) __readw(x)
190#define readl(x) __readl(x)
191#define readq(x) __readq(x)
192#define readb_relaxed(a) readb(a)
193#define readw_relaxed(a) readw(a)
194#define readl_relaxed(a) readl(a)
195#define readq_relaxed(a) readq(a)
196#define __raw_readb readb
197#define __raw_readw readw
198#define __raw_readl readl
199#define __raw_readq readq
200
201#define mmiowb()
202
203static inline void __writel(__u32 b, volatile void __iomem *addr)
204{
205 *(__force volatile __u32 *)addr = b;
206}
207static inline void __writeq(__u64 b, volatile void __iomem *addr)
208{
209 *(__force volatile __u64 *)addr = b;
210}
211static inline void __writeb(__u8 b, volatile void __iomem *addr)
212{
213 *(__force volatile __u8 *)addr = b;
214}
215static inline void __writew(__u16 b, volatile void __iomem *addr)
216{
217 *(__force volatile __u16 *)addr = b;
218}
219#define writeq(val,addr) __writeq((val),(addr))
220#define writel(val,addr) __writel((val),(addr))
221#define writew(val,addr) __writew((val),(addr))
222#define writeb(val,addr) __writeb((val),(addr))
223#define __raw_writeb writeb
224#define __raw_writew writew
225#define __raw_writel writel
226#define __raw_writeq writeq
227
228void __memcpy_fromio(void*,unsigned long,unsigned);
229void __memcpy_toio(unsigned long,const void*,unsigned);
230
231static inline void memcpy_fromio(void *to, const volatile void __iomem *from, unsigned len)
232{
233 __memcpy_fromio(to,(unsigned long)from,len);
234}
235static inline void memcpy_toio(volatile void __iomem *to, const void *from, unsigned len)
236{
237 __memcpy_toio((unsigned long)to,from,len);
238}
239
240void memset_io(volatile void __iomem *a, int b, size_t c);
241
242/*
243 * ISA space is 'always mapped' on a typical x86 system, no need to
244 * explicitly ioremap() it. The fact that the ISA IO space is mapped
245 * to PAGE_OFFSET is pure coincidence - it does not mean ISA values
246 * are physical addresses. The following constant pointer can be
247 * used as the IO-area pointer (it can be iounmapped as well, so the
248 * analogy with PCI is quite large):
249 */
250#define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET))
251
252/* Nothing to do */
253
254#define dma_cache_inv(_start,_size) do { } while (0)
255#define dma_cache_wback(_start,_size) do { } while (0)
256#define dma_cache_wback_inv(_start,_size) do { } while (0)
257
258#define flush_write_buffers()
259
260extern int iommu_bio_merge;
261#define BIO_VMERGE_BOUNDARY iommu_bio_merge
262
263/*
264 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
265 * access
266 */
267#define xlate_dev_mem_ptr(p) __va(p)
268
269/*
270 * Convert a virtual cached pointer to an uncached pointer
271 */
272#define xlate_dev_kmem_ptr(p) p
273
274#endif /* __KERNEL__ */
275
276#endif
diff --git a/include/asm-x86_64/io_apic.h b/include/asm-x86_64/io_apic.h
deleted file mode 100644
index d9f2e54324d5..000000000000
--- a/include/asm-x86_64/io_apic.h
+++ /dev/null
@@ -1,136 +0,0 @@
1#ifndef __ASM_IO_APIC_H
2#define __ASM_IO_APIC_H
3
4#include <asm/types.h>
5#include <asm/mpspec.h>
6#include <asm/apicdef.h>
7
8/*
9 * Intel IO-APIC support for SMP and UP systems.
10 *
11 * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar
12 */
13
14#define APIC_MISMATCH_DEBUG
15
16/*
17 * The structure of the IO-APIC:
18 */
19union IO_APIC_reg_00 {
20 u32 raw;
21 struct {
22 u32 __reserved_2 : 14,
23 LTS : 1,
24 delivery_type : 1,
25 __reserved_1 : 8,
26 ID : 8;
27 } __attribute__ ((packed)) bits;
28};
29
30union IO_APIC_reg_01 {
31 u32 raw;
32 struct {
33 u32 version : 8,
34 __reserved_2 : 7,
35 PRQ : 1,
36 entries : 8,
37 __reserved_1 : 8;
38 } __attribute__ ((packed)) bits;
39};
40
41union IO_APIC_reg_02 {
42 u32 raw;
43 struct {
44 u32 __reserved_2 : 24,
45 arbitration : 4,
46 __reserved_1 : 4;
47 } __attribute__ ((packed)) bits;
48};
49
50union IO_APIC_reg_03 {
51 u32 raw;
52 struct {
53 u32 boot_DT : 1,
54 __reserved_1 : 31;
55 } __attribute__ ((packed)) bits;
56};
57
58/*
59 * # of IO-APICs and # of IRQ routing registers
60 */
61extern int nr_ioapics;
62extern int nr_ioapic_registers[MAX_IO_APICS];
63
64enum ioapic_irq_destination_types {
65 dest_Fixed = 0,
66 dest_LowestPrio = 1,
67 dest_SMI = 2,
68 dest__reserved_1 = 3,
69 dest_NMI = 4,
70 dest_INIT = 5,
71 dest__reserved_2 = 6,
72 dest_ExtINT = 7
73};
74
75struct IO_APIC_route_entry {
76 __u32 vector : 8,
77 delivery_mode : 3, /* 000: FIXED
78 * 001: lowest prio
79 * 111: ExtINT
80 */
81 dest_mode : 1, /* 0: physical, 1: logical */
82 delivery_status : 1,
83 polarity : 1,
84 irr : 1,
85 trigger : 1, /* 0: edge, 1: level */
86 mask : 1, /* 0: enabled, 1: disabled */
87 __reserved_2 : 15;
88
89 __u32 __reserved_3 : 24,
90 dest : 8;
91} __attribute__ ((packed));
92
93/*
94 * MP-BIOS irq configuration table structures:
95 */
96
97/* I/O APIC entries */
98extern struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS];
99
100/* # of MP IRQ source entries */
101extern int mp_irq_entries;
102
103/* MP IRQ source entries */
104extern struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
105
106/* non-0 if default (table-less) MP configuration */
107extern int mpc_default_type;
108
109/* 1 if "noapic" boot option passed */
110extern int skip_ioapic_setup;
111
112static inline void disable_ioapic_setup(void)
113{
114 skip_ioapic_setup = 1;
115}
116
117
118/*
119 * If we use the IO-APIC for IRQ routing, disable automatic
120 * assignment of PCI IRQ's.
121 */
122#define io_apic_assign_pci_irqs (mp_irq_entries && !skip_ioapic_setup && io_apic_irqs)
123
124#ifdef CONFIG_ACPI
125extern int io_apic_get_version (int ioapic);
126extern int io_apic_get_redir_entries (int ioapic);
127extern int io_apic_set_pci_routing (int ioapic, int pin, int irq, int, int);
128#endif
129
130extern int sis_apic_bug; /* dummy */
131
132void enable_NMI_through_LVT0 (void * dummy);
133
134extern spinlock_t i8259A_lock;
135
136#endif
diff --git a/include/asm-x86_64/ioctl.h b/include/asm-x86_64/ioctl.h
deleted file mode 100644
index b279fe06dfe5..000000000000
--- a/include/asm-x86_64/ioctl.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/ioctl.h>
diff --git a/include/asm-x86_64/ioctls.h b/include/asm-x86_64/ioctls.h
deleted file mode 100644
index 3fc0b15a0d7e..000000000000
--- a/include/asm-x86_64/ioctls.h
+++ /dev/null
@@ -1,86 +0,0 @@
1#ifndef __ARCH_X8664_IOCTLS_H__
2#define __ARCH_X8664_IOCTLS_H__
3
4#include <asm/ioctl.h>
5
6/* 0x54 is just a magic number to make these relatively unique ('T') */
7
8#define TCGETS 0x5401
9#define TCSETS 0x5402
10#define TCSETSW 0x5403
11#define TCSETSF 0x5404
12#define TCGETA 0x5405
13#define TCSETA 0x5406
14#define TCSETAW 0x5407
15#define TCSETAF 0x5408
16#define TCSBRK 0x5409
17#define TCXONC 0x540A
18#define TCFLSH 0x540B
19#define TIOCEXCL 0x540C
20#define TIOCNXCL 0x540D
21#define TIOCSCTTY 0x540E
22#define TIOCGPGRP 0x540F
23#define TIOCSPGRP 0x5410
24#define TIOCOUTQ 0x5411
25#define TIOCSTI 0x5412
26#define TIOCGWINSZ 0x5413
27#define TIOCSWINSZ 0x5414
28#define TIOCMGET 0x5415
29#define TIOCMBIS 0x5416
30#define TIOCMBIC 0x5417
31#define TIOCMSET 0x5418
32#define TIOCGSOFTCAR 0x5419
33#define TIOCSSOFTCAR 0x541A
34#define FIONREAD 0x541B
35#define TIOCINQ FIONREAD
36#define TIOCLINUX 0x541C
37#define TIOCCONS 0x541D
38#define TIOCGSERIAL 0x541E
39#define TIOCSSERIAL 0x541F
40#define TIOCPKT 0x5420
41#define FIONBIO 0x5421
42#define TIOCNOTTY 0x5422
43#define TIOCSETD 0x5423
44#define TIOCGETD 0x5424
45#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */
46#define TIOCSBRK 0x5427 /* BSD compatibility */
47#define TIOCCBRK 0x5428 /* BSD compatibility */
48#define TIOCGSID 0x5429 /* Return the session ID of FD */
49#define TCGETS2 _IOR('T',0x2A, struct termios2)
50#define TCSETS2 _IOW('T',0x2B, struct termios2)
51#define TCSETSW2 _IOW('T',0x2C, struct termios2)
52#define TCSETSF2 _IOW('T',0x2D, struct termios2)
53#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
54#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */
55
56#define FIONCLEX 0x5450 /* these numbers need to be adjusted. */
57#define FIOCLEX 0x5451
58#define FIOASYNC 0x5452
59#define TIOCSERCONFIG 0x5453
60#define TIOCSERGWILD 0x5454
61#define TIOCSERSWILD 0x5455
62#define TIOCGLCKTRMIOS 0x5456
63#define TIOCSLCKTRMIOS 0x5457
64#define TIOCSERGSTRUCT 0x5458 /* For debugging only */
65#define TIOCSERGETLSR 0x5459 /* Get line status register */
66#define TIOCSERGETMULTI 0x545A /* Get multiport config */
67#define TIOCSERSETMULTI 0x545B /* Set multiport config */
68
69#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */
70#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
71#define TIOCGHAYESESP 0x545E /* Get Hayes ESP configuration */
72#define TIOCSHAYESESP 0x545F /* Set Hayes ESP configuration */
73#define FIOQSIZE 0x5460
74
75/* Used for packet mode */
76#define TIOCPKT_DATA 0
77#define TIOCPKT_FLUSHREAD 1
78#define TIOCPKT_FLUSHWRITE 2
79#define TIOCPKT_STOP 4
80#define TIOCPKT_START 8
81#define TIOCPKT_NOSTOP 16
82#define TIOCPKT_DOSTOP 32
83
84#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
85
86#endif
diff --git a/include/asm-x86_64/iommu.h b/include/asm-x86_64/iommu.h
deleted file mode 100644
index 5af471f228ee..000000000000
--- a/include/asm-x86_64/iommu.h
+++ /dev/null
@@ -1,29 +0,0 @@
1#ifndef _ASM_X8664_IOMMU_H
2#define _ASM_X8664_IOMMU_H 1
3
4extern void pci_iommu_shutdown(void);
5extern void no_iommu_init(void);
6extern int force_iommu, no_iommu;
7extern int iommu_detected;
8#ifdef CONFIG_IOMMU
9extern void gart_iommu_init(void);
10extern void gart_iommu_shutdown(void);
11extern void __init gart_parse_options(char *);
12extern void iommu_hole_init(void);
13extern int fallback_aper_order;
14extern int fallback_aper_force;
15extern int iommu_aperture;
16extern int iommu_aperture_allowed;
17extern int iommu_aperture_disabled;
18extern int fix_aperture;
19#else
20#define iommu_aperture 0
21#define iommu_aperture_allowed 0
22
23static inline void gart_iommu_shutdown(void)
24{
25}
26
27#endif
28
29#endif
diff --git a/include/asm-x86_64/ipcbuf.h b/include/asm-x86_64/ipcbuf.h
deleted file mode 100644
index 470cf85e3ba8..000000000000
--- a/include/asm-x86_64/ipcbuf.h
+++ /dev/null
@@ -1,29 +0,0 @@
1#ifndef __x86_64_IPCBUF_H__
2#define __x86_64_IPCBUF_H__
3
4/*
5 * The ipc64_perm structure for x86_64 architecture.
6 * Note extra padding because this structure is passed back and forth
7 * between kernel and user space.
8 *
9 * Pad space is left for:
10 * - 32-bit mode_t and seq
11 * - 2 miscellaneous 32-bit values
12 */
13
14struct ipc64_perm
15{
16 __kernel_key_t key;
17 __kernel_uid32_t uid;
18 __kernel_gid32_t gid;
19 __kernel_uid32_t cuid;
20 __kernel_gid32_t cgid;
21 __kernel_mode_t mode;
22 unsigned short __pad1;
23 unsigned short seq;
24 unsigned short __pad2;
25 unsigned long __unused1;
26 unsigned long __unused2;
27};
28
29#endif /* __x86_64_IPCBUF_H__ */
diff --git a/include/asm-x86_64/ipi.h b/include/asm-x86_64/ipi.h
deleted file mode 100644
index a7c75ea408a8..000000000000
--- a/include/asm-x86_64/ipi.h
+++ /dev/null
@@ -1,128 +0,0 @@
1#ifndef __ASM_IPI_H
2#define __ASM_IPI_H
3
4/*
5 * Copyright 2004 James Cleverdon, IBM.
6 * Subject to the GNU Public License, v.2
7 *
8 * Generic APIC InterProcessor Interrupt code.
9 *
10 * Moved to include file by James Cleverdon from
11 * arch/x86-64/kernel/smp.c
12 *
13 * Copyrights from kernel/smp.c:
14 *
15 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
16 * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
17 * (c) 2002,2003 Andi Kleen, SuSE Labs.
18 * Subject to the GNU Public License, v.2
19 */
20
21#include <asm/hw_irq.h>
22#include <asm/apic.h>
23
24/*
25 * the following functions deal with sending IPIs between CPUs.
26 *
27 * We use 'broadcast', CPU->CPU IPIs and self-IPIs too.
28 */
29
30static inline unsigned int __prepare_ICR (unsigned int shortcut, int vector, unsigned int dest)
31{
32 unsigned int icr = shortcut | dest;
33
34 switch (vector) {
35 default:
36 icr |= APIC_DM_FIXED | vector;
37 break;
38 case NMI_VECTOR:
39 icr |= APIC_DM_NMI;
40 break;
41 }
42 return icr;
43}
44
45static inline int __prepare_ICR2 (unsigned int mask)
46{
47 return SET_APIC_DEST_FIELD(mask);
48}
49
50static inline void __send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest)
51{
52 /*
53 * Subtle. In the case of the 'never do double writes' workaround
54 * we have to lock out interrupts to be safe. As we don't care
55 * of the value read we use an atomic rmw access to avoid costly
56 * cli/sti. Otherwise we use an even cheaper single atomic write
57 * to the APIC.
58 */
59 unsigned int cfg;
60
61 /*
62 * Wait for idle.
63 */
64 apic_wait_icr_idle();
65
66 /*
67 * No need to touch the target chip field
68 */
69 cfg = __prepare_ICR(shortcut, vector, dest);
70
71 /*
72 * Send the IPI. The write to APIC_ICR fires this off.
73 */
74 apic_write(APIC_ICR, cfg);
75}
76
77/*
78 * This is used to send an IPI with no shorthand notation (the destination is
79 * specified in bits 56 to 63 of the ICR).
80 */
81static inline void __send_IPI_dest_field(unsigned int mask, int vector, unsigned int dest)
82{
83 unsigned long cfg;
84
85 /*
86 * Wait for idle.
87 */
88 if (unlikely(vector == NMI_VECTOR))
89 safe_apic_wait_icr_idle();
90 else
91 apic_wait_icr_idle();
92
93 /*
94 * prepare target chip field
95 */
96 cfg = __prepare_ICR2(mask);
97 apic_write(APIC_ICR2, cfg);
98
99 /*
100 * program the ICR
101 */
102 cfg = __prepare_ICR(0, vector, dest);
103
104 /*
105 * Send the IPI. The write to APIC_ICR fires this off.
106 */
107 apic_write(APIC_ICR, cfg);
108}
109
110static inline void send_IPI_mask_sequence(cpumask_t mask, int vector)
111{
112 unsigned long flags;
113 unsigned long query_cpu;
114
115 /*
116 * Hack. The clustered APIC addressing mode doesn't allow us to send
117 * to an arbitrary mask, so I do a unicast to each CPU instead.
118 * - mbligh
119 */
120 local_irq_save(flags);
121 for_each_cpu_mask(query_cpu, mask) {
122 __send_IPI_dest_field(x86_cpu_to_apicid[query_cpu],
123 vector, APIC_DEST_PHYSICAL);
124 }
125 local_irq_restore(flags);
126}
127
128#endif /* __ASM_IPI_H */
diff --git a/include/asm-x86_64/irq.h b/include/asm-x86_64/irq.h
deleted file mode 100644
index 5006c6e75656..000000000000
--- a/include/asm-x86_64/irq.h
+++ /dev/null
@@ -1,51 +0,0 @@
1#ifndef _ASM_IRQ_H
2#define _ASM_IRQ_H
3
4/*
5 * linux/include/asm/irq.h
6 *
7 * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
8 *
9 * IRQ/IPI changes taken from work by Thomas Radke
10 * <tomsoft@informatik.tu-chemnitz.de>
11 */
12
13#define TIMER_IRQ 0
14
15/*
16 * 16 8259A IRQ's, 208 potential APIC interrupt sources.
17 * Right now the APIC is mostly only used for SMP.
18 * 256 vectors is an architectural limit. (we can have
19 * more than 256 devices theoretically, but they will
20 * have to use shared interrupts)
21 * Since vectors 0x00-0x1f are used/reserved for the CPU,
22 * the usable vector space is 0x20-0xff (224 vectors)
23 */
24
25/*
26 * The maximum number of vectors supported by x86_64 processors
27 * is limited to 256. For processors other than x86_64, NR_VECTORS
28 * should be changed accordingly.
29 */
30#define NR_VECTORS 256
31
32#define FIRST_SYSTEM_VECTOR 0xef /* duplicated in hw_irq.h */
33
34#define NR_IRQS (NR_VECTORS + (32 *NR_CPUS))
35#define NR_IRQ_VECTORS NR_IRQS
36
37static __inline__ int irq_canonicalize(int irq)
38{
39 return ((irq == 2) ? 9 : irq);
40}
41
42#define ARCH_HAS_NMI_WATCHDOG /* See include/linux/nmi.h */
43
44#ifdef CONFIG_HOTPLUG_CPU
45#include <linux/cpumask.h>
46extern void fixup_irqs(cpumask_t map);
47#endif
48
49#define __ARCH_HAS_DO_SOFTIRQ 1
50
51#endif /* _ASM_IRQ_H */
diff --git a/include/asm-x86_64/irq_regs.h b/include/asm-x86_64/irq_regs.h
deleted file mode 100644
index 3dd9c0b70270..000000000000
--- a/include/asm-x86_64/irq_regs.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/irq_regs.h>
diff --git a/include/asm-x86_64/irqflags.h b/include/asm-x86_64/irqflags.h
deleted file mode 100644
index 86e70fe23659..000000000000
--- a/include/asm-x86_64/irqflags.h
+++ /dev/null
@@ -1,142 +0,0 @@
1/*
2 * include/asm-x86_64/irqflags.h
3 *
4 * IRQ flags handling
5 *
6 * This file gets included from lowlevel asm headers too, to provide
7 * wrapped versions of the local_irq_*() APIs, based on the
8 * raw_local_irq_*() functions from the lowlevel headers.
9 */
10#ifndef _ASM_IRQFLAGS_H
11#define _ASM_IRQFLAGS_H
12#include <asm/processor-flags.h>
13
14#ifndef __ASSEMBLY__
15/*
16 * Interrupt control:
17 */
18
19static inline unsigned long __raw_local_save_flags(void)
20{
21 unsigned long flags;
22
23 __asm__ __volatile__(
24 "# __raw_save_flags\n\t"
25 "pushfq ; popq %q0"
26 : "=g" (flags)
27 : /* no input */
28 : "memory"
29 );
30
31 return flags;
32}
33
34#define raw_local_save_flags(flags) \
35 do { (flags) = __raw_local_save_flags(); } while (0)
36
37static inline void raw_local_irq_restore(unsigned long flags)
38{
39 __asm__ __volatile__(
40 "pushq %0 ; popfq"
41 : /* no output */
42 :"g" (flags)
43 :"memory", "cc"
44 );
45}
46
47#ifdef CONFIG_X86_VSMP
48
49/*
50 * Interrupt control for the VSMP architecture:
51 */
52
53static inline void raw_local_irq_disable(void)
54{
55 unsigned long flags = __raw_local_save_flags();
56
57 raw_local_irq_restore((flags & ~X86_EFLAGS_IF) | X86_EFLAGS_AC);
58}
59
60static inline void raw_local_irq_enable(void)
61{
62 unsigned long flags = __raw_local_save_flags();
63
64 raw_local_irq_restore((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC));
65}
66
67static inline int raw_irqs_disabled_flags(unsigned long flags)
68{
69 return !(flags & X86_EFLAGS_IF) || (flags & X86_EFLAGS_AC);
70}
71
72#else /* CONFIG_X86_VSMP */
73
74static inline void raw_local_irq_disable(void)
75{
76 __asm__ __volatile__("cli" : : : "memory");
77}
78
79static inline void raw_local_irq_enable(void)
80{
81 __asm__ __volatile__("sti" : : : "memory");
82}
83
84static inline int raw_irqs_disabled_flags(unsigned long flags)
85{
86 return !(flags & X86_EFLAGS_IF);
87}
88
89#endif
90
91/*
92 * For spinlocks, etc.:
93 */
94
95static inline unsigned long __raw_local_irq_save(void)
96{
97 unsigned long flags = __raw_local_save_flags();
98
99 raw_local_irq_disable();
100
101 return flags;
102}
103
104#define raw_local_irq_save(flags) \
105 do { (flags) = __raw_local_irq_save(); } while (0)
106
107static inline int raw_irqs_disabled(void)
108{
109 unsigned long flags = __raw_local_save_flags();
110
111 return raw_irqs_disabled_flags(flags);
112}
113
114/*
115 * Used in the idle loop; sti takes one instruction cycle
116 * to complete:
117 */
118static inline void raw_safe_halt(void)
119{
120 __asm__ __volatile__("sti; hlt" : : : "memory");
121}
122
123/*
124 * Used when interrupts are already enabled or to
125 * shutdown the processor:
126 */
127static inline void halt(void)
128{
129 __asm__ __volatile__("hlt": : :"memory");
130}
131
132#else /* __ASSEMBLY__: */
133# ifdef CONFIG_TRACE_IRQFLAGS
134# define TRACE_IRQS_ON call trace_hardirqs_on_thunk
135# define TRACE_IRQS_OFF call trace_hardirqs_off_thunk
136# else
137# define TRACE_IRQS_ON
138# define TRACE_IRQS_OFF
139# endif
140#endif
141
142#endif
diff --git a/include/asm-x86_64/ist.h b/include/asm-x86_64/ist.h
deleted file mode 100644
index 338857ecbc68..000000000000
--- a/include/asm-x86_64/ist.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-i386/ist.h>
diff --git a/include/asm-x86_64/k8.h b/include/asm-x86_64/k8.h
deleted file mode 100644
index 699dd6961eda..000000000000
--- a/include/asm-x86_64/k8.h
+++ /dev/null
@@ -1,14 +0,0 @@
1#ifndef _ASM_K8_H
2#define _ASM_K8_H 1
3
4#include <linux/pci.h>
5
6extern struct pci_device_id k8_nb_ids[];
7
8extern int early_is_k8_nb(u32 value);
9extern struct pci_dev **k8_northbridges;
10extern int num_k8_northbridges;
11extern int cache_k8_northbridges(void);
12extern void k8_flush_garts(void);
13
14#endif
diff --git a/include/asm-x86_64/kdebug.h b/include/asm-x86_64/kdebug.h
deleted file mode 100644
index d7e2bcf49e4f..000000000000
--- a/include/asm-x86_64/kdebug.h
+++ /dev/null
@@ -1,36 +0,0 @@
1#ifndef _X86_64_KDEBUG_H
2#define _X86_64_KDEBUG_H 1
3
4#include <linux/notifier.h>
5
6struct pt_regs;
7
8extern int register_page_fault_notifier(struct notifier_block *);
9extern int unregister_page_fault_notifier(struct notifier_block *);
10
11/* Grossly misnamed. */
12enum die_val {
13 DIE_OOPS = 1,
14 DIE_INT3,
15 DIE_DEBUG,
16 DIE_PANIC,
17 DIE_NMI,
18 DIE_DIE,
19 DIE_NMIWATCHDOG,
20 DIE_KERNELDEBUG,
21 DIE_TRAP,
22 DIE_GPF,
23 DIE_CALL,
24 DIE_NMI_IPI,
25 DIE_PAGE_FAULT,
26};
27
28extern void printk_address(unsigned long address);
29extern void die(const char *,struct pt_regs *,long);
30extern void __die(const char *,struct pt_regs *,long);
31extern void show_registers(struct pt_regs *regs);
32extern void dump_pagetable(unsigned long);
33extern unsigned long oops_begin(void);
34extern void oops_end(unsigned long);
35
36#endif
diff --git a/include/asm-x86_64/kexec.h b/include/asm-x86_64/kexec.h
deleted file mode 100644
index 738e581b67f8..000000000000
--- a/include/asm-x86_64/kexec.h
+++ /dev/null
@@ -1,94 +0,0 @@
1#ifndef _X86_64_KEXEC_H
2#define _X86_64_KEXEC_H
3
4#define PA_CONTROL_PAGE 0
5#define VA_CONTROL_PAGE 1
6#define PA_PGD 2
7#define VA_PGD 3
8#define PA_PUD_0 4
9#define VA_PUD_0 5
10#define PA_PMD_0 6
11#define VA_PMD_0 7
12#define PA_PTE_0 8
13#define VA_PTE_0 9
14#define PA_PUD_1 10
15#define VA_PUD_1 11
16#define PA_PMD_1 12
17#define VA_PMD_1 13
18#define PA_PTE_1 14
19#define VA_PTE_1 15
20#define PA_TABLE_PAGE 16
21#define PAGES_NR 17
22
23#ifndef __ASSEMBLY__
24
25#include <linux/string.h>
26
27#include <asm/page.h>
28#include <asm/ptrace.h>
29
30/*
31 * KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return.
32 * I.e. Maximum page that is mapped directly into kernel memory,
33 * and kmap is not required.
34 *
35 * So far x86_64 is limited to 40 physical address bits.
36 */
37
38/* Maximum physical address we can use pages from */
39#define KEXEC_SOURCE_MEMORY_LIMIT (0xFFFFFFFFFFUL)
40/* Maximum address we can reach in physical address mode */
41#define KEXEC_DESTINATION_MEMORY_LIMIT (0xFFFFFFFFFFUL)
42/* Maximum address we can use for the control pages */
43#define KEXEC_CONTROL_MEMORY_LIMIT (0xFFFFFFFFFFUL)
44
45/* Allocate one page for the pdp and the second for the code */
46#define KEXEC_CONTROL_CODE_SIZE (4096UL + 4096UL)
47
48/* The native architecture */
49#define KEXEC_ARCH KEXEC_ARCH_X86_64
50
51/*
52 * Saving the registers of the cpu on which panic occured in
53 * crash_kexec to save a valid sp. The registers of other cpus
54 * will be saved in machine_crash_shutdown while shooting down them.
55 */
56
57static inline void crash_setup_regs(struct pt_regs *newregs,
58 struct pt_regs *oldregs)
59{
60 if (oldregs)
61 memcpy(newregs, oldregs, sizeof(*newregs));
62 else {
63 __asm__ __volatile__("movq %%rbx,%0" : "=m"(newregs->rbx));
64 __asm__ __volatile__("movq %%rcx,%0" : "=m"(newregs->rcx));
65 __asm__ __volatile__("movq %%rdx,%0" : "=m"(newregs->rdx));
66 __asm__ __volatile__("movq %%rsi,%0" : "=m"(newregs->rsi));
67 __asm__ __volatile__("movq %%rdi,%0" : "=m"(newregs->rdi));
68 __asm__ __volatile__("movq %%rbp,%0" : "=m"(newregs->rbp));
69 __asm__ __volatile__("movq %%rax,%0" : "=m"(newregs->rax));
70 __asm__ __volatile__("movq %%rsp,%0" : "=m"(newregs->rsp));
71 __asm__ __volatile__("movq %%r8,%0" : "=m"(newregs->r8));
72 __asm__ __volatile__("movq %%r9,%0" : "=m"(newregs->r9));
73 __asm__ __volatile__("movq %%r10,%0" : "=m"(newregs->r10));
74 __asm__ __volatile__("movq %%r11,%0" : "=m"(newregs->r11));
75 __asm__ __volatile__("movq %%r12,%0" : "=m"(newregs->r12));
76 __asm__ __volatile__("movq %%r13,%0" : "=m"(newregs->r13));
77 __asm__ __volatile__("movq %%r14,%0" : "=m"(newregs->r14));
78 __asm__ __volatile__("movq %%r15,%0" : "=m"(newregs->r15));
79 __asm__ __volatile__("movl %%ss, %%eax;" :"=a"(newregs->ss));
80 __asm__ __volatile__("movl %%cs, %%eax;" :"=a"(newregs->cs));
81 __asm__ __volatile__("pushfq; popq %0" :"=m"(newregs->eflags));
82
83 newregs->rip = (unsigned long)current_text_addr();
84 }
85}
86
87NORET_TYPE void
88relocate_kernel(unsigned long indirection_page,
89 unsigned long page_list,
90 unsigned long start_address) ATTRIB_NORET;
91
92#endif /* __ASSEMBLY__ */
93
94#endif /* _X86_64_KEXEC_H */
diff --git a/include/asm-x86_64/kmap_types.h b/include/asm-x86_64/kmap_types.h
deleted file mode 100644
index 7486338c6cea..000000000000
--- a/include/asm-x86_64/kmap_types.h
+++ /dev/null
@@ -1,19 +0,0 @@
1#ifndef _ASM_KMAP_TYPES_H
2#define _ASM_KMAP_TYPES_H
3
4enum km_type {
5 KM_BOUNCE_READ,
6 KM_SKB_SUNRPC_DATA,
7 KM_SKB_DATA_SOFTIRQ,
8 KM_USER0,
9 KM_USER1,
10 KM_BIO_SRC_IRQ,
11 KM_BIO_DST_IRQ,
12 KM_IRQ0,
13 KM_IRQ1,
14 KM_SOFTIRQ0,
15 KM_SOFTIRQ1,
16 KM_TYPE_NR
17};
18
19#endif
diff --git a/include/asm-x86_64/kprobes.h b/include/asm-x86_64/kprobes.h
deleted file mode 100644
index 7db825403e01..000000000000
--- a/include/asm-x86_64/kprobes.h
+++ /dev/null
@@ -1,90 +0,0 @@
1#ifndef _ASM_KPROBES_H
2#define _ASM_KPROBES_H
3/*
4 * Kernel Probes (KProbes)
5 * include/asm-x86_64/kprobes.h
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 *
21 * Copyright (C) IBM Corporation, 2002, 2004
22 *
23 * 2004-Oct Prasanna S Panchamukhi <prasanna@in.ibm.com> and Jim Keniston
24 * kenistoj@us.ibm.com adopted from i386.
25 */
26#include <linux/types.h>
27#include <linux/ptrace.h>
28#include <linux/percpu.h>
29
30#define __ARCH_WANT_KPROBES_INSN_SLOT
31
32struct pt_regs;
33struct kprobe;
34
35typedef u8 kprobe_opcode_t;
36#define BREAKPOINT_INSTRUCTION 0xcc
37#define MAX_INSN_SIZE 15
38#define MAX_STACK_SIZE 64
39#define MIN_STACK_SIZE(ADDR) (((MAX_STACK_SIZE) < \
40 (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR))) \
41 ? (MAX_STACK_SIZE) \
42 : (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR)))
43
44#define ARCH_SUPPORTS_KRETPROBES
45#define ARCH_INACTIVE_KPROBE_COUNT 1
46
47void kretprobe_trampoline(void);
48extern void arch_remove_kprobe(struct kprobe *p);
49#define flush_insn_slot(p) do { } while (0)
50
51/* Architecture specific copy of original instruction*/
52struct arch_specific_insn {
53 /* copy of the original instruction */
54 kprobe_opcode_t *insn;
55};
56
57struct prev_kprobe {
58 struct kprobe *kp;
59 unsigned long status;
60 unsigned long old_rflags;
61 unsigned long saved_rflags;
62};
63
64/* per-cpu kprobe control block */
65struct kprobe_ctlblk {
66 unsigned long kprobe_status;
67 unsigned long kprobe_old_rflags;
68 unsigned long kprobe_saved_rflags;
69 long *jprobe_saved_rsp;
70 struct pt_regs jprobe_saved_regs;
71 kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE];
72 struct prev_kprobe prev_kprobe;
73};
74
75/* trap3/1 are intr gates for kprobes. So, restore the status of IF,
76 * if necessary, before executing the original int3/1 (trap) handler.
77 */
78static inline void restore_interrupts(struct pt_regs *regs)
79{
80 if (regs->eflags & IF_MASK)
81 local_irq_enable();
82}
83
84extern int post_kprobe_handler(struct pt_regs *regs);
85extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
86extern int kprobe_handler(struct pt_regs *regs);
87
88extern int kprobe_exceptions_notify(struct notifier_block *self,
89 unsigned long val, void *data);
90#endif /* _ASM_KPROBES_H */
diff --git a/include/asm-x86_64/ldt.h b/include/asm-x86_64/ldt.h
deleted file mode 100644
index 9ef647b890d2..000000000000
--- a/include/asm-x86_64/ldt.h
+++ /dev/null
@@ -1,36 +0,0 @@
1/*
2 * ldt.h
3 *
4 * Definitions of structures used with the modify_ldt system call.
5 */
6#ifndef _LINUX_LDT_H
7#define _LINUX_LDT_H
8
9/* Maximum number of LDT entries supported. */
10#define LDT_ENTRIES 8192
11/* The size of each LDT entry. */
12#define LDT_ENTRY_SIZE 8
13
14#ifndef __ASSEMBLY__
15/* Note on 64bit base and limit is ignored and you cannot set
16 DS/ES/CS not to the default values if you still want to do syscalls. This
17 call is more for 32bit mode therefore. */
18struct user_desc {
19 unsigned int entry_number;
20 unsigned int base_addr;
21 unsigned int limit;
22 unsigned int seg_32bit:1;
23 unsigned int contents:2;
24 unsigned int read_exec_only:1;
25 unsigned int limit_in_pages:1;
26 unsigned int seg_not_present:1;
27 unsigned int useable:1;
28 unsigned int lm:1;
29};
30
31#define MODIFY_LDT_CONTENTS_DATA 0
32#define MODIFY_LDT_CONTENTS_STACK 1
33#define MODIFY_LDT_CONTENTS_CODE 2
34
35#endif /* !__ASSEMBLY__ */
36#endif
diff --git a/include/asm-x86_64/linkage.h b/include/asm-x86_64/linkage.h
deleted file mode 100644
index b5f39d0189ce..000000000000
--- a/include/asm-x86_64/linkage.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __ASM_LINKAGE_H
2#define __ASM_LINKAGE_H
3
4#define __ALIGN .p2align 4,,15
5
6#endif
diff --git a/include/asm-x86_64/local.h b/include/asm-x86_64/local.h
deleted file mode 100644
index e87492bb0693..000000000000
--- a/include/asm-x86_64/local.h
+++ /dev/null
@@ -1,222 +0,0 @@
1#ifndef _ARCH_X8664_LOCAL_H
2#define _ARCH_X8664_LOCAL_H
3
4#include <linux/percpu.h>
5#include <asm/atomic.h>
6
7typedef struct
8{
9 atomic_long_t a;
10} local_t;
11
12#define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
13
14#define local_read(l) atomic_long_read(&(l)->a)
15#define local_set(l,i) atomic_long_set(&(l)->a, (i))
16
17static inline void local_inc(local_t *l)
18{
19 __asm__ __volatile__(
20 "incq %0"
21 :"=m" (l->a.counter)
22 :"m" (l->a.counter));
23}
24
25static inline void local_dec(local_t *l)
26{
27 __asm__ __volatile__(
28 "decq %0"
29 :"=m" (l->a.counter)
30 :"m" (l->a.counter));
31}
32
33static inline void local_add(long i, local_t *l)
34{
35 __asm__ __volatile__(
36 "addq %1,%0"
37 :"=m" (l->a.counter)
38 :"ir" (i), "m" (l->a.counter));
39}
40
41static inline void local_sub(long i, local_t *l)
42{
43 __asm__ __volatile__(
44 "subq %1,%0"
45 :"=m" (l->a.counter)
46 :"ir" (i), "m" (l->a.counter));
47}
48
49/**
50 * local_sub_and_test - subtract value from variable and test result
51 * @i: integer value to subtract
52 * @l: pointer to type local_t
53 *
54 * Atomically subtracts @i from @l and returns
55 * true if the result is zero, or false for all
56 * other cases.
57 */
58static __inline__ int local_sub_and_test(long i, local_t *l)
59{
60 unsigned char c;
61
62 __asm__ __volatile__(
63 "subq %2,%0; sete %1"
64 :"=m" (l->a.counter), "=qm" (c)
65 :"ir" (i), "m" (l->a.counter) : "memory");
66 return c;
67}
68
69/**
70 * local_dec_and_test - decrement and test
71 * @l: pointer to type local_t
72 *
73 * Atomically decrements @l by 1 and
74 * returns true if the result is 0, or false for all other
75 * cases.
76 */
77static __inline__ int local_dec_and_test(local_t *l)
78{
79 unsigned char c;
80
81 __asm__ __volatile__(
82 "decq %0; sete %1"
83 :"=m" (l->a.counter), "=qm" (c)
84 :"m" (l->a.counter) : "memory");
85 return c != 0;
86}
87
88/**
89 * local_inc_and_test - increment and test
90 * @l: pointer to type local_t
91 *
92 * Atomically increments @l by 1
93 * and returns true if the result is zero, or false for all
94 * other cases.
95 */
96static __inline__ int local_inc_and_test(local_t *l)
97{
98 unsigned char c;
99
100 __asm__ __volatile__(
101 "incq %0; sete %1"
102 :"=m" (l->a.counter), "=qm" (c)
103 :"m" (l->a.counter) : "memory");
104 return c != 0;
105}
106
107/**
108 * local_add_negative - add and test if negative
109 * @i: integer value to add
110 * @l: pointer to type local_t
111 *
112 * Atomically adds @i to @l and returns true
113 * if the result is negative, or false when
114 * result is greater than or equal to zero.
115 */
116static __inline__ int local_add_negative(long i, local_t *l)
117{
118 unsigned char c;
119
120 __asm__ __volatile__(
121 "addq %2,%0; sets %1"
122 :"=m" (l->a.counter), "=qm" (c)
123 :"ir" (i), "m" (l->a.counter) : "memory");
124 return c;
125}
126
127/**
128 * local_add_return - add and return
129 * @i: integer value to add
130 * @l: pointer to type local_t
131 *
132 * Atomically adds @i to @l and returns @i + @l
133 */
134static __inline__ long local_add_return(long i, local_t *l)
135{
136 long __i = i;
137 __asm__ __volatile__(
138 "xaddq %0, %1;"
139 :"+r" (i), "+m" (l->a.counter)
140 : : "memory");
141 return i + __i;
142}
143
144static __inline__ long local_sub_return(long i, local_t *l)
145{
146 return local_add_return(-i,l);
147}
148
149#define local_inc_return(l) (local_add_return(1,l))
150#define local_dec_return(l) (local_sub_return(1,l))
151
152#define local_cmpxchg(l, o, n) \
153 (cmpxchg_local(&((l)->a.counter), (o), (n)))
154/* Always has a lock prefix */
155#define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
156
157/**
158 * atomic_up_add_unless - add unless the number is a given value
159 * @l: pointer of type local_t
160 * @a: the amount to add to l...
161 * @u: ...unless l is equal to u.
162 *
163 * Atomically adds @a to @l, so long as it was not @u.
164 * Returns non-zero if @l was not @u, and zero otherwise.
165 */
166#define local_add_unless(l, a, u) \
167({ \
168 long c, old; \
169 c = local_read(l); \
170 for (;;) { \
171 if (unlikely(c == (u))) \
172 break; \
173 old = local_cmpxchg((l), c, c + (a)); \
174 if (likely(old == c)) \
175 break; \
176 c = old; \
177 } \
178 c != (u); \
179})
180#define local_inc_not_zero(l) local_add_unless((l), 1, 0)
181
182/* On x86-64 these are better than the atomic variants on SMP kernels
183 because they dont use a lock prefix. */
184#define __local_inc(l) local_inc(l)
185#define __local_dec(l) local_dec(l)
186#define __local_add(i,l) local_add((i),(l))
187#define __local_sub(i,l) local_sub((i),(l))
188
189/* Use these for per-cpu local_t variables: on some archs they are
190 * much more efficient than these naive implementations. Note they take
191 * a variable, not an address.
192 *
193 * This could be done better if we moved the per cpu data directly
194 * after GS.
195 */
196
197/* Need to disable preemption for the cpu local counters otherwise we could
198 still access a variable of a previous CPU in a non atomic way. */
199#define cpu_local_wrap_v(l) \
200 ({ local_t res__; \
201 preempt_disable(); \
202 res__ = (l); \
203 preempt_enable(); \
204 res__; })
205#define cpu_local_wrap(l) \
206 ({ preempt_disable(); \
207 l; \
208 preempt_enable(); }) \
209
210#define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l)))
211#define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i)))
212#define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l)))
213#define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l)))
214#define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l)))
215#define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l)))
216
217#define __cpu_local_inc(l) cpu_local_inc(l)
218#define __cpu_local_dec(l) cpu_local_dec(l)
219#define __cpu_local_add(i, l) cpu_local_add((i), (l))
220#define __cpu_local_sub(i, l) cpu_local_sub((i), (l))
221
222#endif /* _ARCH_X8664_LOCAL_H */
diff --git a/include/asm-x86_64/mach_apic.h b/include/asm-x86_64/mach_apic.h
deleted file mode 100644
index 7b7115a0c1c9..000000000000
--- a/include/asm-x86_64/mach_apic.h
+++ /dev/null
@@ -1,29 +0,0 @@
1#ifndef __ASM_MACH_APIC_H
2#define __ASM_MACH_APIC_H
3
4/*
5 * Copyright 2004 James Cleverdon, IBM.
6 * Subject to the GNU Public License, v.2
7 *
8 * Generic APIC sub-arch defines.
9 *
10 * Hacked for x86-64 by James Cleverdon from i386 architecture code by
11 * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
12 * James Cleverdon.
13 */
14
15#include <asm/genapic.h>
16
17#define INT_DELIVERY_MODE (genapic->int_delivery_mode)
18#define INT_DEST_MODE (genapic->int_dest_mode)
19#define TARGET_CPUS (genapic->target_cpus())
20#define vector_allocation_domain (genapic->vector_allocation_domain)
21#define apic_id_registered (genapic->apic_id_registered)
22#define init_apic_ldr (genapic->init_apic_ldr)
23#define send_IPI_mask (genapic->send_IPI_mask)
24#define send_IPI_allbutself (genapic->send_IPI_allbutself)
25#define send_IPI_all (genapic->send_IPI_all)
26#define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid)
27#define phys_pkg_id (genapic->phys_pkg_id)
28
29#endif /* __ASM_MACH_APIC_H */
diff --git a/include/asm-x86_64/mc146818rtc.h b/include/asm-x86_64/mc146818rtc.h
deleted file mode 100644
index d6e3009430c1..000000000000
--- a/include/asm-x86_64/mc146818rtc.h
+++ /dev/null
@@ -1,29 +0,0 @@
1/*
2 * Machine dependent access functions for RTC registers.
3 */
4#ifndef _ASM_MC146818RTC_H
5#define _ASM_MC146818RTC_H
6
7#include <asm/io.h>
8
9#ifndef RTC_PORT
10#define RTC_PORT(x) (0x70 + (x))
11#define RTC_ALWAYS_BCD 1 /* RTC operates in binary mode */
12#endif
13
14/*
15 * The yet supported machines all access the RTC index register via
16 * an ISA port access but the way to access the date register differs ...
17 */
18#define CMOS_READ(addr) ({ \
19outb_p((addr),RTC_PORT(0)); \
20inb_p(RTC_PORT(1)); \
21})
22#define CMOS_WRITE(val, addr) ({ \
23outb_p((addr),RTC_PORT(0)); \
24outb_p((val),RTC_PORT(1)); \
25})
26
27#define RTC_IRQ 8
28
29#endif /* _ASM_MC146818RTC_H */
diff --git a/include/asm-x86_64/mce.h b/include/asm-x86_64/mce.h
deleted file mode 100644
index 7bc030a1996d..000000000000
--- a/include/asm-x86_64/mce.h
+++ /dev/null
@@ -1,115 +0,0 @@
1#ifndef _ASM_MCE_H
2#define _ASM_MCE_H 1
3
4#include <asm/ioctls.h>
5#include <asm/types.h>
6
7/*
8 * Machine Check support for x86
9 */
10
11#define MCG_CTL_P (1UL<<8) /* MCG_CAP register available */
12
13#define MCG_STATUS_RIPV (1UL<<0) /* restart ip valid */
14#define MCG_STATUS_EIPV (1UL<<1) /* eip points to correct instruction */
15#define MCG_STATUS_MCIP (1UL<<2) /* machine check in progress */
16
17#define MCI_STATUS_VAL (1UL<<63) /* valid error */
18#define MCI_STATUS_OVER (1UL<<62) /* previous errors lost */
19#define MCI_STATUS_UC (1UL<<61) /* uncorrected error */
20#define MCI_STATUS_EN (1UL<<60) /* error enabled */
21#define MCI_STATUS_MISCV (1UL<<59) /* misc error reg. valid */
22#define MCI_STATUS_ADDRV (1UL<<58) /* addr reg. valid */
23#define MCI_STATUS_PCC (1UL<<57) /* processor context corrupt */
24
25/* Fields are zero when not available */
26struct mce {
27 __u64 status;
28 __u64 misc;
29 __u64 addr;
30 __u64 mcgstatus;
31 __u64 rip;
32 __u64 tsc; /* cpu time stamp counter */
33 __u64 res1; /* for future extension */
34 __u64 res2; /* dito. */
35 __u8 cs; /* code segment */
36 __u8 bank; /* machine check bank */
37 __u8 cpu; /* cpu that raised the error */
38 __u8 finished; /* entry is valid */
39 __u32 pad;
40};
41
42/*
43 * This structure contains all data related to the MCE log.
44 * Also carries a signature to make it easier to find from external debugging tools.
45 * Each entry is only valid when its finished flag is set.
46 */
47
48#define MCE_LOG_LEN 32
49
50struct mce_log {
51 char signature[12]; /* "MACHINECHECK" */
52 unsigned len; /* = MCE_LOG_LEN */
53 unsigned next;
54 unsigned flags;
55 unsigned pad0;
56 struct mce entry[MCE_LOG_LEN];
57};
58
59#define MCE_OVERFLOW 0 /* bit 0 in flags means overflow */
60
61#define MCE_LOG_SIGNATURE "MACHINECHECK"
62
63#define MCE_GET_RECORD_LEN _IOR('M', 1, int)
64#define MCE_GET_LOG_LEN _IOR('M', 2, int)
65#define MCE_GETCLEAR_FLAGS _IOR('M', 3, int)
66
67/* Software defined banks */
68#define MCE_EXTENDED_BANK 128
69#define MCE_THERMAL_BANK MCE_EXTENDED_BANK + 0
70
71#define K8_MCE_THRESHOLD_BASE (MCE_EXTENDED_BANK + 1) /* MCE_AMD */
72#define K8_MCE_THRESHOLD_BANK_0 (MCE_THRESHOLD_BASE + 0 * 9)
73#define K8_MCE_THRESHOLD_BANK_1 (MCE_THRESHOLD_BASE + 1 * 9)
74#define K8_MCE_THRESHOLD_BANK_2 (MCE_THRESHOLD_BASE + 2 * 9)
75#define K8_MCE_THRESHOLD_BANK_3 (MCE_THRESHOLD_BASE + 3 * 9)
76#define K8_MCE_THRESHOLD_BANK_4 (MCE_THRESHOLD_BASE + 4 * 9)
77#define K8_MCE_THRESHOLD_BANK_5 (MCE_THRESHOLD_BASE + 5 * 9)
78#define K8_MCE_THRESHOLD_DRAM_ECC (MCE_THRESHOLD_BANK_4 + 0)
79
80#ifdef __KERNEL__
81#include <asm/atomic.h>
82
83void mce_log(struct mce *m);
84DECLARE_PER_CPU(struct sys_device, device_mce);
85
86#ifdef CONFIG_X86_MCE_INTEL
87void mce_intel_feature_init(struct cpuinfo_x86 *c);
88#else
89static inline void mce_intel_feature_init(struct cpuinfo_x86 *c)
90{
91}
92#endif
93
94#ifdef CONFIG_X86_MCE_AMD
95void mce_amd_feature_init(struct cpuinfo_x86 *c);
96#else
97static inline void mce_amd_feature_init(struct cpuinfo_x86 *c)
98{
99}
100#endif
101
102void mce_log_therm_throt_event(unsigned int cpu, __u64 status);
103
104extern atomic_t mce_entry;
105
106extern void do_machine_check(struct pt_regs *, long);
107
108extern int mce_notify_user(void);
109
110extern void stop_mce(void);
111extern void restart_mce(void);
112
113#endif
114
115#endif
diff --git a/include/asm-x86_64/mman.h b/include/asm-x86_64/mman.h
deleted file mode 100644
index dd5cb0534d37..000000000000
--- a/include/asm-x86_64/mman.h
+++ /dev/null
@@ -1,19 +0,0 @@
1#ifndef __X8664_MMAN_H__
2#define __X8664_MMAN_H__
3
4#include <asm-generic/mman.h>
5
6#define MAP_32BIT 0x40 /* only give out 32bit addresses */
7
8#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
9#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
10#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */
11#define MAP_LOCKED 0x2000 /* pages are locked */
12#define MAP_NORESERVE 0x4000 /* don't check for reservations */
13#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
14#define MAP_NONBLOCK 0x10000 /* do not block on IO */
15
16#define MCL_CURRENT 1 /* lock all current mappings */
17#define MCL_FUTURE 2 /* lock all future mappings */
18
19#endif
diff --git a/include/asm-x86_64/mmsegment.h b/include/asm-x86_64/mmsegment.h
deleted file mode 100644
index d3f80c996330..000000000000
--- a/include/asm-x86_64/mmsegment.h
+++ /dev/null
@@ -1,8 +0,0 @@
1#ifndef _ASM_MMSEGMENT_H
2#define _ASM_MMSEGMENT_H 1
3
4typedef struct {
5 unsigned long seg;
6} mm_segment_t;
7
8#endif
diff --git a/include/asm-x86_64/mmu.h b/include/asm-x86_64/mmu.h
deleted file mode 100644
index d2cd4a9d984d..000000000000
--- a/include/asm-x86_64/mmu.h
+++ /dev/null
@@ -1,21 +0,0 @@
1#ifndef __x86_64_MMU_H
2#define __x86_64_MMU_H
3
4#include <linux/spinlock.h>
5#include <asm/semaphore.h>
6
7/*
8 * The x86_64 doesn't have a mmu context, but
9 * we put the segment information here.
10 *
11 * cpu_vm_mask is used to optimize ldt flushing.
12 */
13typedef struct {
14 void *ldt;
15 rwlock_t ldtlock;
16 int size;
17 struct semaphore sem;
18 void *vdso;
19} mm_context_t;
20
21#endif
diff --git a/include/asm-x86_64/mmu_context.h b/include/asm-x86_64/mmu_context.h
deleted file mode 100644
index 0cce83a78378..000000000000
--- a/include/asm-x86_64/mmu_context.h
+++ /dev/null
@@ -1,74 +0,0 @@
1#ifndef __X86_64_MMU_CONTEXT_H
2#define __X86_64_MMU_CONTEXT_H
3
4#include <asm/desc.h>
5#include <asm/atomic.h>
6#include <asm/pgalloc.h>
7#include <asm/pda.h>
8#include <asm/pgtable.h>
9#include <asm/tlbflush.h>
10#include <asm-generic/mm_hooks.h>
11
12/*
13 * possibly do the LDT unload here?
14 */
15int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
16void destroy_context(struct mm_struct *mm);
17
18static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
19{
20#ifdef CONFIG_SMP
21 if (read_pda(mmu_state) == TLBSTATE_OK)
22 write_pda(mmu_state, TLBSTATE_LAZY);
23#endif
24}
25
26static inline void load_cr3(pgd_t *pgd)
27{
28 asm volatile("movq %0,%%cr3" :: "r" (__pa(pgd)) : "memory");
29}
30
31static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
32 struct task_struct *tsk)
33{
34 unsigned cpu = smp_processor_id();
35 if (likely(prev != next)) {
36 /* stop flush ipis for the previous mm */
37 cpu_clear(cpu, prev->cpu_vm_mask);
38#ifdef CONFIG_SMP
39 write_pda(mmu_state, TLBSTATE_OK);
40 write_pda(active_mm, next);
41#endif
42 cpu_set(cpu, next->cpu_vm_mask);
43 load_cr3(next->pgd);
44
45 if (unlikely(next->context.ldt != prev->context.ldt))
46 load_LDT_nolock(&next->context, cpu);
47 }
48#ifdef CONFIG_SMP
49 else {
50 write_pda(mmu_state, TLBSTATE_OK);
51 if (read_pda(active_mm) != next)
52 out_of_line_bug();
53 if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
54 /* We were in lazy tlb mode and leave_mm disabled
55 * tlb flush IPI delivery. We must reload CR3
56 * to make sure to use no freed page tables.
57 */
58 load_cr3(next->pgd);
59 load_LDT_nolock(&next->context, cpu);
60 }
61 }
62#endif
63}
64
65#define deactivate_mm(tsk,mm) do { \
66 load_gs_index(0); \
67 asm volatile("movl %0,%%fs"::"r"(0)); \
68} while(0)
69
70#define activate_mm(prev, next) \
71 switch_mm((prev),(next),NULL)
72
73
74#endif
diff --git a/include/asm-x86_64/mmzone.h b/include/asm-x86_64/mmzone.h
deleted file mode 100644
index 19a89377b123..000000000000
--- a/include/asm-x86_64/mmzone.h
+++ /dev/null
@@ -1,56 +0,0 @@
1/* K8 NUMA support */
2/* Copyright 2002,2003 by Andi Kleen, SuSE Labs */
3/* 2.5 Version loosely based on the NUMAQ Code by Pat Gaughen. */
4#ifndef _ASM_X86_64_MMZONE_H
5#define _ASM_X86_64_MMZONE_H 1
6
7
8#ifdef CONFIG_NUMA
9
10#define VIRTUAL_BUG_ON(x)
11
12#include <asm/smp.h>
13
14/* Simple perfect hash to map physical addresses to node numbers */
15struct memnode {
16 int shift;
17 unsigned int mapsize;
18 u8 *map;
19 u8 embedded_map[64-16];
20} ____cacheline_aligned; /* total size = 64 bytes */
21extern struct memnode memnode;
22#define memnode_shift memnode.shift
23#define memnodemap memnode.map
24#define memnodemapsize memnode.mapsize
25
26extern struct pglist_data *node_data[];
27
28static inline __attribute__((pure)) int phys_to_nid(unsigned long addr)
29{
30 unsigned nid;
31 VIRTUAL_BUG_ON(!memnodemap);
32 VIRTUAL_BUG_ON((addr >> memnode_shift) >= memnodemapsize);
33 nid = memnodemap[addr >> memnode_shift];
34 VIRTUAL_BUG_ON(nid >= MAX_NUMNODES || !node_data[nid]);
35 return nid;
36}
37
38#define NODE_DATA(nid) (node_data[nid])
39
40#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
41#define node_end_pfn(nid) (NODE_DATA(nid)->node_start_pfn + \
42 NODE_DATA(nid)->node_spanned_pages)
43
44#ifdef CONFIG_DISCONTIGMEM
45#define pfn_to_nid(pfn) phys_to_nid((unsigned long)(pfn) << PAGE_SHIFT)
46
47extern int pfn_valid(unsigned long pfn);
48#endif
49
50#ifdef CONFIG_NUMA_EMU
51#define FAKE_NODE_MIN_SIZE (64*1024*1024)
52#define FAKE_NODE_MIN_HASH_MASK (~(FAKE_NODE_MIN_SIZE - 1uL))
53#endif
54
55#endif
56#endif
diff --git a/include/asm-x86_64/module.h b/include/asm-x86_64/module.h
deleted file mode 100644
index 67f8f69fa7b1..000000000000
--- a/include/asm-x86_64/module.h
+++ /dev/null
@@ -1,10 +0,0 @@
1#ifndef _ASM_X8664_MODULE_H
2#define _ASM_X8664_MODULE_H
3
4struct mod_arch_specific {};
5
6#define Elf_Shdr Elf64_Shdr
7#define Elf_Sym Elf64_Sym
8#define Elf_Ehdr Elf64_Ehdr
9
10#endif
diff --git a/include/asm-x86_64/mpspec.h b/include/asm-x86_64/mpspec.h
deleted file mode 100644
index 017fddb61dc5..000000000000
--- a/include/asm-x86_64/mpspec.h
+++ /dev/null
@@ -1,233 +0,0 @@
1#ifndef __ASM_MPSPEC_H
2#define __ASM_MPSPEC_H
3
4/*
5 * Structure definitions for SMP machines following the
6 * Intel Multiprocessing Specification 1.1 and 1.4.
7 */
8
9/*
10 * This tag identifies where the SMP configuration
11 * information is.
12 */
13
14#define SMP_MAGIC_IDENT (('_'<<24)|('P'<<16)|('M'<<8)|'_')
15
16/*
17 * A maximum of 255 APICs with the current APIC ID architecture.
18 */
19#define MAX_APICS 255
20
21struct intel_mp_floating
22{
23 char mpf_signature[4]; /* "_MP_" */
24 unsigned int mpf_physptr; /* Configuration table address */
25 unsigned char mpf_length; /* Our length (paragraphs) */
26 unsigned char mpf_specification;/* Specification version */
27 unsigned char mpf_checksum; /* Checksum (makes sum 0) */
28 unsigned char mpf_feature1; /* Standard or configuration ? */
29 unsigned char mpf_feature2; /* Bit7 set for IMCR|PIC */
30 unsigned char mpf_feature3; /* Unused (0) */
31 unsigned char mpf_feature4; /* Unused (0) */
32 unsigned char mpf_feature5; /* Unused (0) */
33};
34
35struct mp_config_table
36{
37 char mpc_signature[4];
38#define MPC_SIGNATURE "PCMP"
39 unsigned short mpc_length; /* Size of table */
40 char mpc_spec; /* 0x01 */
41 char mpc_checksum;
42 char mpc_oem[8];
43 char mpc_productid[12];
44 unsigned int mpc_oemptr; /* 0 if not present */
45 unsigned short mpc_oemsize; /* 0 if not present */
46 unsigned short mpc_oemcount;
47 unsigned int mpc_lapic; /* APIC address */
48 unsigned int reserved;
49};
50
51/* Followed by entries */
52
53#define MP_PROCESSOR 0
54#define MP_BUS 1
55#define MP_IOAPIC 2
56#define MP_INTSRC 3
57#define MP_LINTSRC 4
58
59struct mpc_config_processor
60{
61 unsigned char mpc_type;
62 unsigned char mpc_apicid; /* Local APIC number */
63 unsigned char mpc_apicver; /* Its versions */
64 unsigned char mpc_cpuflag;
65#define CPU_ENABLED 1 /* Processor is available */
66#define CPU_BOOTPROCESSOR 2 /* Processor is the BP */
67 unsigned int mpc_cpufeature;
68#define CPU_STEPPING_MASK 0x0F
69#define CPU_MODEL_MASK 0xF0
70#define CPU_FAMILY_MASK 0xF00
71 unsigned int mpc_featureflag; /* CPUID feature value */
72 unsigned int mpc_reserved[2];
73};
74
75struct mpc_config_bus
76{
77 unsigned char mpc_type;
78 unsigned char mpc_busid;
79 unsigned char mpc_bustype[6];
80};
81
82/* List of Bus Type string values, Intel MP Spec. */
83#define BUSTYPE_EISA "EISA"
84#define BUSTYPE_ISA "ISA"
85#define BUSTYPE_INTERN "INTERN" /* Internal BUS */
86#define BUSTYPE_MCA "MCA"
87#define BUSTYPE_VL "VL" /* Local bus */
88#define BUSTYPE_PCI "PCI"
89#define BUSTYPE_PCMCIA "PCMCIA"
90#define BUSTYPE_CBUS "CBUS"
91#define BUSTYPE_CBUSII "CBUSII"
92#define BUSTYPE_FUTURE "FUTURE"
93#define BUSTYPE_MBI "MBI"
94#define BUSTYPE_MBII "MBII"
95#define BUSTYPE_MPI "MPI"
96#define BUSTYPE_MPSA "MPSA"
97#define BUSTYPE_NUBUS "NUBUS"
98#define BUSTYPE_TC "TC"
99#define BUSTYPE_VME "VME"
100#define BUSTYPE_XPRESS "XPRESS"
101
102struct mpc_config_ioapic
103{
104 unsigned char mpc_type;
105 unsigned char mpc_apicid;
106 unsigned char mpc_apicver;
107 unsigned char mpc_flags;
108#define MPC_APIC_USABLE 0x01
109 unsigned int mpc_apicaddr;
110};
111
112struct mpc_config_intsrc
113{
114 unsigned char mpc_type;
115 unsigned char mpc_irqtype;
116 unsigned short mpc_irqflag;
117 unsigned char mpc_srcbus;
118 unsigned char mpc_srcbusirq;
119 unsigned char mpc_dstapic;
120 unsigned char mpc_dstirq;
121};
122
123enum mp_irq_source_types {
124 mp_INT = 0,
125 mp_NMI = 1,
126 mp_SMI = 2,
127 mp_ExtINT = 3
128};
129
130#define MP_IRQDIR_DEFAULT 0
131#define MP_IRQDIR_HIGH 1
132#define MP_IRQDIR_LOW 3
133
134
135struct mpc_config_lintsrc
136{
137 unsigned char mpc_type;
138 unsigned char mpc_irqtype;
139 unsigned short mpc_irqflag;
140 unsigned char mpc_srcbusid;
141 unsigned char mpc_srcbusirq;
142 unsigned char mpc_destapic;
143#define MP_APIC_ALL 0xFF
144 unsigned char mpc_destapiclint;
145};
146
147/*
148 * Default configurations
149 *
150 * 1 2 CPU ISA 82489DX
151 * 2 2 CPU EISA 82489DX neither IRQ 0 timer nor IRQ 13 DMA chaining
152 * 3 2 CPU EISA 82489DX
153 * 4 2 CPU MCA 82489DX
154 * 5 2 CPU ISA+PCI
155 * 6 2 CPU EISA+PCI
156 * 7 2 CPU MCA+PCI
157 */
158
159#define MAX_MP_BUSSES 256
160/* Each PCI slot may be a combo card with its own bus. 4 IRQ pins per slot. */
161#define MAX_IRQ_SOURCES (MAX_MP_BUSSES * 4)
162extern DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);
163extern int mp_bus_id_to_pci_bus [MAX_MP_BUSSES];
164
165extern unsigned int boot_cpu_physical_apicid;
166extern int smp_found_config;
167extern void find_smp_config (void);
168extern void get_smp_config (void);
169extern int nr_ioapics;
170extern unsigned char apic_version [MAX_APICS];
171extern int mp_irq_entries;
172extern struct mpc_config_intsrc mp_irqs [MAX_IRQ_SOURCES];
173extern int mpc_default_type;
174extern unsigned long mp_lapic_addr;
175
176#ifdef CONFIG_ACPI
177extern void mp_register_lapic (u8 id, u8 enabled);
178extern void mp_register_lapic_address (u64 address);
179
180extern void mp_register_ioapic (u8 id, u32 address, u32 gsi_base);
181extern void mp_override_legacy_irq (u8 bus_irq, u8 polarity, u8 trigger, u32 gsi);
182extern void mp_config_acpi_legacy_irqs (void);
183extern int mp_register_gsi (u32 gsi, int triggering, int polarity);
184#endif
185
186extern int using_apic_timer;
187
188#define PHYSID_ARRAY_SIZE BITS_TO_LONGS(MAX_APICS)
189
190struct physid_mask
191{
192 unsigned long mask[PHYSID_ARRAY_SIZE];
193};
194
195typedef struct physid_mask physid_mask_t;
196
197#define physid_set(physid, map) set_bit(physid, (map).mask)
198#define physid_clear(physid, map) clear_bit(physid, (map).mask)
199#define physid_isset(physid, map) test_bit(physid, (map).mask)
200#define physid_test_and_set(physid, map) test_and_set_bit(physid, (map).mask)
201
202#define physids_and(dst, src1, src2) bitmap_and((dst).mask, (src1).mask, (src2).mask, MAX_APICS)
203#define physids_or(dst, src1, src2) bitmap_or((dst).mask, (src1).mask, (src2).mask, MAX_APICS)
204#define physids_clear(map) bitmap_zero((map).mask, MAX_APICS)
205#define physids_complement(dst, src) bitmap_complement((dst).mask, (src).mask, MAX_APICS)
206#define physids_empty(map) bitmap_empty((map).mask, MAX_APICS)
207#define physids_equal(map1, map2) bitmap_equal((map1).mask, (map2).mask, MAX_APICS)
208#define physids_weight(map) bitmap_weight((map).mask, MAX_APICS)
209#define physids_shift_right(d, s, n) bitmap_shift_right((d).mask, (s).mask, n, MAX_APICS)
210#define physids_shift_left(d, s, n) bitmap_shift_left((d).mask, (s).mask, n, MAX_APICS)
211#define physids_coerce(map) ((map).mask[0])
212
213#define physids_promote(physids) \
214 ({ \
215 physid_mask_t __physid_mask = PHYSID_MASK_NONE; \
216 __physid_mask.mask[0] = physids; \
217 __physid_mask; \
218 })
219
220#define physid_mask_of_physid(physid) \
221 ({ \
222 physid_mask_t __physid_mask = PHYSID_MASK_NONE; \
223 physid_set(physid, __physid_mask); \
224 __physid_mask; \
225 })
226
227#define PHYSID_MASK_ALL { {[0 ... PHYSID_ARRAY_SIZE-1] = ~0UL} }
228#define PHYSID_MASK_NONE { {[0 ... PHYSID_ARRAY_SIZE-1] = 0UL} }
229
230extern physid_mask_t phys_cpu_present_map;
231
232#endif
233
diff --git a/include/asm-x86_64/msgbuf.h b/include/asm-x86_64/msgbuf.h
deleted file mode 100644
index cd6f95dd54da..000000000000
--- a/include/asm-x86_64/msgbuf.h
+++ /dev/null
@@ -1,27 +0,0 @@
1#ifndef _X8664_MSGBUF_H
2#define _X8664_MSGBUF_H
3
4/*
5 * The msqid64_ds structure for x86-64 architecture.
6 * Note extra padding because this structure is passed back and forth
7 * between kernel and user space.
8 *
9 * Pad space is left for:
10 * - 2 miscellaneous 64-bit values
11 */
12
13struct msqid64_ds {
14 struct ipc64_perm msg_perm;
15 __kernel_time_t msg_stime; /* last msgsnd time */
16 __kernel_time_t msg_rtime; /* last msgrcv time */
17 __kernel_time_t msg_ctime; /* last change time */
18 unsigned long msg_cbytes; /* current number of bytes on queue */
19 unsigned long msg_qnum; /* number of messages in queue */
20 unsigned long msg_qbytes; /* max number of bytes on queue */
21 __kernel_pid_t msg_lspid; /* pid of last msgsnd */
22 __kernel_pid_t msg_lrpid; /* last receive pid */
23 unsigned long __unused4;
24 unsigned long __unused5;
25};
26
27#endif
diff --git a/include/asm-x86_64/msidef.h b/include/asm-x86_64/msidef.h
deleted file mode 100644
index 083ad5827e48..000000000000
--- a/include/asm-x86_64/msidef.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-i386/msidef.h>
diff --git a/include/asm-x86_64/msr-index.h b/include/asm-x86_64/msr-index.h
deleted file mode 100644
index d77a63f1ddf2..000000000000
--- a/include/asm-x86_64/msr-index.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-i386/msr-index.h>
diff --git a/include/asm-x86_64/msr.h b/include/asm-x86_64/msr.h
deleted file mode 100644
index d5c55b80da54..000000000000
--- a/include/asm-x86_64/msr.h
+++ /dev/null
@@ -1,187 +0,0 @@
1#ifndef X86_64_MSR_H
2#define X86_64_MSR_H 1
3
4#include <asm/msr-index.h>
5
6#ifndef __ASSEMBLY__
7#include <linux/errno.h>
8/*
9 * Access to machine-specific registers (available on 586 and better only)
10 * Note: the rd* operations modify the parameters directly (without using
11 * pointer indirection), this allows gcc to optimize better
12 */
13
14#define rdmsr(msr,val1,val2) \
15 __asm__ __volatile__("rdmsr" \
16 : "=a" (val1), "=d" (val2) \
17 : "c" (msr))
18
19
20#define rdmsrl(msr,val) do { unsigned long a__,b__; \
21 __asm__ __volatile__("rdmsr" \
22 : "=a" (a__), "=d" (b__) \
23 : "c" (msr)); \
24 val = a__ | (b__<<32); \
25} while(0)
26
27#define wrmsr(msr,val1,val2) \
28 __asm__ __volatile__("wrmsr" \
29 : /* no outputs */ \
30 : "c" (msr), "a" (val1), "d" (val2))
31
32#define wrmsrl(msr,val) wrmsr(msr,(__u32)((__u64)(val)),((__u64)(val))>>32)
33
34/* wrmsr with exception handling */
35#define wrmsr_safe(msr,a,b) ({ int ret__; \
36 asm volatile("2: wrmsr ; xorl %0,%0\n" \
37 "1:\n\t" \
38 ".section .fixup,\"ax\"\n\t" \
39 "3: movl %4,%0 ; jmp 1b\n\t" \
40 ".previous\n\t" \
41 ".section __ex_table,\"a\"\n" \
42 " .align 8\n\t" \
43 " .quad 2b,3b\n\t" \
44 ".previous" \
45 : "=a" (ret__) \
46 : "c" (msr), "0" (a), "d" (b), "i" (-EFAULT)); \
47 ret__; })
48
49#define checking_wrmsrl(msr,val) wrmsr_safe(msr,(u32)(val),(u32)((val)>>32))
50
51#define rdmsr_safe(msr,a,b) \
52 ({ int ret__; \
53 asm volatile ("1: rdmsr\n" \
54 "2:\n" \
55 ".section .fixup,\"ax\"\n" \
56 "3: movl %4,%0\n" \
57 " jmp 2b\n" \
58 ".previous\n" \
59 ".section __ex_table,\"a\"\n" \
60 " .align 8\n" \
61 " .quad 1b,3b\n" \
62 ".previous":"=&bDS" (ret__), "=a"(*(a)), "=d"(*(b))\
63 :"c"(msr), "i"(-EIO), "0"(0)); \
64 ret__; })
65
66#define rdtsc(low,high) \
67 __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high))
68
69#define rdtscl(low) \
70 __asm__ __volatile__ ("rdtsc" : "=a" (low) : : "edx")
71
72#define rdtscp(low,high,aux) \
73 asm volatile (".byte 0x0f,0x01,0xf9" : "=a" (low), "=d" (high), "=c" (aux))
74
75#define rdtscll(val) do { \
76 unsigned int __a,__d; \
77 asm volatile("rdtsc" : "=a" (__a), "=d" (__d)); \
78 (val) = ((unsigned long)__a) | (((unsigned long)__d)<<32); \
79} while(0)
80
81#define rdtscpll(val, aux) do { \
82 unsigned long __a, __d; \
83 asm volatile (".byte 0x0f,0x01,0xf9" : "=a" (__a), "=d" (__d), "=c" (aux)); \
84 (val) = (__d << 32) | __a; \
85} while (0)
86
87#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
88
89#define write_rdtscp_aux(val) wrmsr(0xc0000103, val, 0)
90
91#define rdpmc(counter,low,high) \
92 __asm__ __volatile__("rdpmc" \
93 : "=a" (low), "=d" (high) \
94 : "c" (counter))
95
96static inline void cpuid(int op, unsigned int *eax, unsigned int *ebx,
97 unsigned int *ecx, unsigned int *edx)
98{
99 __asm__("cpuid"
100 : "=a" (*eax),
101 "=b" (*ebx),
102 "=c" (*ecx),
103 "=d" (*edx)
104 : "0" (op));
105}
106
107/* Some CPUID calls want 'count' to be placed in ecx */
108static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx,
109 int *edx)
110{
111 __asm__("cpuid"
112 : "=a" (*eax),
113 "=b" (*ebx),
114 "=c" (*ecx),
115 "=d" (*edx)
116 : "0" (op), "c" (count));
117}
118
119/*
120 * CPUID functions returning a single datum
121 */
122static inline unsigned int cpuid_eax(unsigned int op)
123{
124 unsigned int eax;
125
126 __asm__("cpuid"
127 : "=a" (eax)
128 : "0" (op)
129 : "bx", "cx", "dx");
130 return eax;
131}
132static inline unsigned int cpuid_ebx(unsigned int op)
133{
134 unsigned int eax, ebx;
135
136 __asm__("cpuid"
137 : "=a" (eax), "=b" (ebx)
138 : "0" (op)
139 : "cx", "dx" );
140 return ebx;
141}
142static inline unsigned int cpuid_ecx(unsigned int op)
143{
144 unsigned int eax, ecx;
145
146 __asm__("cpuid"
147 : "=a" (eax), "=c" (ecx)
148 : "0" (op)
149 : "bx", "dx" );
150 return ecx;
151}
152static inline unsigned int cpuid_edx(unsigned int op)
153{
154 unsigned int eax, edx;
155
156 __asm__("cpuid"
157 : "=a" (eax), "=d" (edx)
158 : "0" (op)
159 : "bx", "cx");
160 return edx;
161}
162
163#ifdef CONFIG_SMP
164void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
165void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
166int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
167int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
168#else /* CONFIG_SMP */
169static inline void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
170{
171 rdmsr(msr_no, *l, *h);
172}
173static inline void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
174{
175 wrmsr(msr_no, l, h);
176}
177static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
178{
179 return rdmsr_safe(msr_no, l, h);
180}
181static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
182{
183 return wrmsr_safe(msr_no, l, h);
184}
185#endif /* CONFIG_SMP */
186#endif /* __ASSEMBLY__ */
187#endif /* X86_64_MSR_H */
diff --git a/include/asm-x86_64/mtrr.h b/include/asm-x86_64/mtrr.h
deleted file mode 100644
index b557c486bef8..000000000000
--- a/include/asm-x86_64/mtrr.h
+++ /dev/null
@@ -1,152 +0,0 @@
1/* Generic MTRR (Memory Type Range Register) ioctls.
2
3 Copyright (C) 1997-1999 Richard Gooch
4
5 This library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Library General Public
7 License as published by the Free Software Foundation; either
8 version 2 of the License, or (at your option) any later version.
9
10 This library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Library General Public License for more details.
14
15 You should have received a copy of the GNU Library General Public
16 License along with this library; if not, write to the Free
17 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18
19 Richard Gooch may be reached by email at rgooch@atnf.csiro.au
20 The postal address is:
21 Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia.
22*/
23#ifndef _LINUX_MTRR_H
24#define _LINUX_MTRR_H
25
26#include <linux/ioctl.h>
27
28#define MTRR_IOCTL_BASE 'M'
29
30struct mtrr_sentry
31{
32 unsigned long base; /* Base address */
33 unsigned int size; /* Size of region */
34 unsigned int type; /* Type of region */
35};
36
37/* Warning: this structure has a different order from i386
38 on x86-64. The 32bit emulation code takes care of that.
39 But you need to use this for 64bit, otherwise your X server
40 will break. */
41struct mtrr_gentry
42{
43 unsigned long base; /* Base address */
44 unsigned int size; /* Size of region */
45 unsigned int regnum; /* Register number */
46 unsigned int type; /* Type of region */
47};
48
49/* These are the various ioctls */
50#define MTRRIOC_ADD_ENTRY _IOW(MTRR_IOCTL_BASE, 0, struct mtrr_sentry)
51#define MTRRIOC_SET_ENTRY _IOW(MTRR_IOCTL_BASE, 1, struct mtrr_sentry)
52#define MTRRIOC_DEL_ENTRY _IOW(MTRR_IOCTL_BASE, 2, struct mtrr_sentry)
53#define MTRRIOC_GET_ENTRY _IOWR(MTRR_IOCTL_BASE, 3, struct mtrr_gentry)
54#define MTRRIOC_KILL_ENTRY _IOW(MTRR_IOCTL_BASE, 4, struct mtrr_sentry)
55#define MTRRIOC_ADD_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 5, struct mtrr_sentry)
56#define MTRRIOC_SET_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 6, struct mtrr_sentry)
57#define MTRRIOC_DEL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 7, struct mtrr_sentry)
58#define MTRRIOC_GET_PAGE_ENTRY _IOWR(MTRR_IOCTL_BASE, 8, struct mtrr_gentry)
59#define MTRRIOC_KILL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 9, struct mtrr_sentry)
60
61/* These are the region types */
62#define MTRR_TYPE_UNCACHABLE 0
63#define MTRR_TYPE_WRCOMB 1
64/*#define MTRR_TYPE_ 2*/
65/*#define MTRR_TYPE_ 3*/
66#define MTRR_TYPE_WRTHROUGH 4
67#define MTRR_TYPE_WRPROT 5
68#define MTRR_TYPE_WRBACK 6
69#define MTRR_NUM_TYPES 7
70
71#ifdef __KERNEL__
72
73/* The following functions are for use by other drivers */
74# ifdef CONFIG_MTRR
75extern int mtrr_add (unsigned long base, unsigned long size,
76 unsigned int type, char increment);
77extern int mtrr_add_page (unsigned long base, unsigned long size,
78 unsigned int type, char increment);
79extern int mtrr_del (int reg, unsigned long base, unsigned long size);
80extern int mtrr_del_page (int reg, unsigned long base, unsigned long size);
81# else
82static __inline__ int mtrr_add (unsigned long base, unsigned long size,
83 unsigned int type, char increment)
84{
85 return -ENODEV;
86}
87static __inline__ int mtrr_add_page (unsigned long base, unsigned long size,
88 unsigned int type, char increment)
89{
90 return -ENODEV;
91}
92static __inline__ int mtrr_del (int reg, unsigned long base,
93 unsigned long size)
94{
95 return -ENODEV;
96}
97static __inline__ int mtrr_del_page (int reg, unsigned long base,
98 unsigned long size)
99{
100 return -ENODEV;
101}
102
103#endif /* CONFIG_MTRR */
104
105#ifdef CONFIG_COMPAT
106#include <linux/compat.h>
107
108struct mtrr_sentry32
109{
110 compat_ulong_t base; /* Base address */
111 compat_uint_t size; /* Size of region */
112 compat_uint_t type; /* Type of region */
113};
114
115struct mtrr_gentry32
116{
117 compat_ulong_t regnum; /* Register number */
118 compat_uint_t base; /* Base address */
119 compat_uint_t size; /* Size of region */
120 compat_uint_t type; /* Type of region */
121};
122
123#define MTRR_IOCTL_BASE 'M'
124
125#define MTRRIOC32_ADD_ENTRY _IOW(MTRR_IOCTL_BASE, 0, struct mtrr_sentry32)
126#define MTRRIOC32_SET_ENTRY _IOW(MTRR_IOCTL_BASE, 1, struct mtrr_sentry32)
127#define MTRRIOC32_DEL_ENTRY _IOW(MTRR_IOCTL_BASE, 2, struct mtrr_sentry32)
128#define MTRRIOC32_GET_ENTRY _IOWR(MTRR_IOCTL_BASE, 3, struct mtrr_gentry32)
129#define MTRRIOC32_KILL_ENTRY _IOW(MTRR_IOCTL_BASE, 4, struct mtrr_sentry32)
130#define MTRRIOC32_ADD_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 5, struct mtrr_sentry32)
131#define MTRRIOC32_SET_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 6, struct mtrr_sentry32)
132#define MTRRIOC32_DEL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 7, struct mtrr_sentry32)
133#define MTRRIOC32_GET_PAGE_ENTRY _IOWR(MTRR_IOCTL_BASE, 8, struct mtrr_gentry32)
134#define MTRRIOC32_KILL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 9, struct mtrr_sentry32)
135
136#endif /* CONFIG_COMPAT */
137
138#ifdef CONFIG_MTRR
139extern void mtrr_ap_init(void);
140extern void mtrr_bp_init(void);
141extern void mtrr_save_fixed_ranges(void *);
142extern void mtrr_save_state(void);
143#else
144#define mtrr_ap_init() do {} while (0)
145#define mtrr_bp_init() do {} while (0)
146#define mtrr_save_fixed_ranges(arg) do {} while (0)
147#define mtrr_save_state() do {} while (0)
148#endif
149
150#endif /* __KERNEL__ */
151
152#endif /* _LINUX_MTRR_H */
diff --git a/include/asm-x86_64/mutex.h b/include/asm-x86_64/mutex.h
deleted file mode 100644
index 6c2949a3c677..000000000000
--- a/include/asm-x86_64/mutex.h
+++ /dev/null
@@ -1,105 +0,0 @@
1/*
2 * Assembly implementation of the mutex fastpath, based on atomic
3 * decrement/increment.
4 *
5 * started by Ingo Molnar:
6 *
7 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
8 */
9#ifndef _ASM_MUTEX_H
10#define _ASM_MUTEX_H
11
12/**
13 * __mutex_fastpath_lock - decrement and call function if negative
14 * @v: pointer of type atomic_t
15 * @fail_fn: function to call if the result is negative
16 *
17 * Atomically decrements @v and calls <fail_fn> if the result is negative.
18 */
19#define __mutex_fastpath_lock(v, fail_fn) \
20do { \
21 unsigned long dummy; \
22 \
23 typecheck(atomic_t *, v); \
24 typecheck_fn(void (*)(atomic_t *), fail_fn); \
25 \
26 __asm__ __volatile__( \
27 LOCK_PREFIX " decl (%%rdi) \n" \
28 " jns 1f \n" \
29 " call "#fail_fn" \n" \
30 "1:" \
31 \
32 :"=D" (dummy) \
33 : "D" (v) \
34 : "rax", "rsi", "rdx", "rcx", \
35 "r8", "r9", "r10", "r11", "memory"); \
36} while (0)
37
38/**
39 * __mutex_fastpath_lock_retval - try to take the lock by moving the count
40 * from 1 to a 0 value
41 * @count: pointer of type atomic_t
42 * @fail_fn: function to call if the original value was not 1
43 *
44 * Change the count from 1 to a value lower than 1, and call <fail_fn> if
45 * it wasn't 1 originally. This function returns 0 if the fastpath succeeds,
46 * or anything the slow path function returns
47 */
48static inline int
49__mutex_fastpath_lock_retval(atomic_t *count,
50 int (*fail_fn)(atomic_t *))
51{
52 if (unlikely(atomic_dec_return(count) < 0))
53 return fail_fn(count);
54 else
55 return 0;
56}
57
58/**
59 * __mutex_fastpath_unlock - increment and call function if nonpositive
60 * @v: pointer of type atomic_t
61 * @fail_fn: function to call if the result is nonpositive
62 *
63 * Atomically increments @v and calls <fail_fn> if the result is nonpositive.
64 */
65#define __mutex_fastpath_unlock(v, fail_fn) \
66do { \
67 unsigned long dummy; \
68 \
69 typecheck(atomic_t *, v); \
70 typecheck_fn(void (*)(atomic_t *), fail_fn); \
71 \
72 __asm__ __volatile__( \
73 LOCK_PREFIX " incl (%%rdi) \n" \
74 " jg 1f \n" \
75 " call "#fail_fn" \n" \
76 "1: " \
77 \
78 :"=D" (dummy) \
79 : "D" (v) \
80 : "rax", "rsi", "rdx", "rcx", \
81 "r8", "r9", "r10", "r11", "memory"); \
82} while (0)
83
84#define __mutex_slowpath_needs_to_unlock() 1
85
86/**
87 * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
88 *
89 * @count: pointer of type atomic_t
90 * @fail_fn: fallback function
91 *
92 * Change the count from 1 to 0 and return 1 (success), or return 0 (failure)
93 * if it wasn't 1 originally. [the fallback function is never used on
94 * x86_64, because all x86_64 CPUs have a CMPXCHG instruction.]
95 */
96static inline int
97__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
98{
99 if (likely(atomic_cmpxchg(count, 1, 0) == 1))
100 return 1;
101 else
102 return 0;
103}
104
105#endif
diff --git a/include/asm-x86_64/namei.h b/include/asm-x86_64/namei.h
deleted file mode 100644
index bef239f5318f..000000000000
--- a/include/asm-x86_64/namei.h
+++ /dev/null
@@ -1,11 +0,0 @@
1#ifndef __X8664_NAMEI_H
2#define __X8664_NAMEI_H
3
4/* This dummy routine maybe changed to something useful
5 * for /usr/gnemul/ emulation stuff.
6 * Look at asm-sparc/namei.h for details.
7 */
8
9#define __emul_prefix() NULL
10
11#endif
diff --git a/include/asm-x86_64/nmi.h b/include/asm-x86_64/nmi.h
deleted file mode 100644
index 65b6acf3bb59..000000000000
--- a/include/asm-x86_64/nmi.h
+++ /dev/null
@@ -1,95 +0,0 @@
1/*
2 * linux/include/asm-i386/nmi.h
3 */
4#ifndef ASM_NMI_H
5#define ASM_NMI_H
6
7#include <linux/pm.h>
8#include <asm/io.h>
9
10/**
11 * do_nmi_callback
12 *
13 * Check to see if a callback exists and execute it. Return 1
14 * if the handler exists and was handled successfully.
15 */
16int do_nmi_callback(struct pt_regs *regs, int cpu);
17
18#ifdef CONFIG_PM
19
20/** Replace the PM callback routine for NMI. */
21struct pm_dev * set_nmi_pm_callback(pm_callback callback);
22
23/** Unset the PM callback routine back to the default. */
24void unset_nmi_pm_callback(struct pm_dev * dev);
25
26#else
27
28static inline struct pm_dev * set_nmi_pm_callback(pm_callback callback)
29{
30 return 0;
31}
32
33static inline void unset_nmi_pm_callback(struct pm_dev * dev)
34{
35}
36
37#endif /* CONFIG_PM */
38
39extern void default_do_nmi(struct pt_regs *);
40extern void die_nmi(char *str, struct pt_regs *regs, int do_panic);
41
42#define get_nmi_reason() inb(0x61)
43
44extern int panic_on_timeout;
45extern int unknown_nmi_panic;
46extern int nmi_watchdog_enabled;
47
48extern int check_nmi_watchdog(void);
49extern int avail_to_resrv_perfctr_nmi_bit(unsigned int);
50extern int avail_to_resrv_perfctr_nmi(unsigned int);
51extern int reserve_perfctr_nmi(unsigned int);
52extern void release_perfctr_nmi(unsigned int);
53extern int reserve_evntsel_nmi(unsigned int);
54extern void release_evntsel_nmi(unsigned int);
55
56extern void setup_apic_nmi_watchdog (void *);
57extern void stop_apic_nmi_watchdog (void *);
58extern void disable_timer_nmi_watchdog(void);
59extern void enable_timer_nmi_watchdog(void);
60extern int nmi_watchdog_tick (struct pt_regs * regs, unsigned reason);
61
62extern void nmi_watchdog_default(void);
63extern int setup_nmi_watchdog(char *);
64
65extern atomic_t nmi_active;
66extern unsigned int nmi_watchdog;
67#define NMI_DISABLED -1
68#define NMI_NONE 0
69#define NMI_IO_APIC 1
70#define NMI_LOCAL_APIC 2
71#define NMI_INVALID 3
72#define NMI_DEFAULT NMI_DISABLED
73
74struct ctl_table;
75struct file;
76extern int proc_nmi_enabled(struct ctl_table *, int , struct file *,
77 void __user *, size_t *, loff_t *);
78
79extern int unknown_nmi_panic;
80
81void __trigger_all_cpu_backtrace(void);
82#define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace()
83
84
85void lapic_watchdog_stop(void);
86int lapic_watchdog_init(unsigned nmi_hz);
87int lapic_wd_event(unsigned nmi_hz);
88unsigned lapic_adjust_nmi_hz(unsigned hz);
89int lapic_watchdog_ok(void);
90void disable_lapic_nmi_watchdog(void);
91void enable_lapic_nmi_watchdog(void);
92void stop_nmi(void);
93void restart_nmi(void);
94
95#endif /* ASM_NMI_H */
diff --git a/include/asm-x86_64/numa.h b/include/asm-x86_64/numa.h
deleted file mode 100644
index 933ff11ece15..000000000000
--- a/include/asm-x86_64/numa.h
+++ /dev/null
@@ -1,38 +0,0 @@
1#ifndef _ASM_X8664_NUMA_H
2#define _ASM_X8664_NUMA_H 1
3
4#include <linux/nodemask.h>
5
6struct bootnode {
7 u64 start,end;
8};
9
10extern int compute_hash_shift(struct bootnode *nodes, int numnodes);
11
12#define ZONE_ALIGN (1UL << (MAX_ORDER+PAGE_SHIFT))
13
14extern void numa_add_cpu(int cpu);
15extern void numa_init_array(void);
16extern int numa_off;
17
18extern void numa_set_node(int cpu, int node);
19extern void srat_reserve_add_area(int nodeid);
20extern int hotadd_percent;
21
22extern unsigned char apicid_to_node[256];
23#ifdef CONFIG_NUMA
24extern void __init init_cpu_to_node(void);
25
26static inline void clear_node_cpumask(int cpu)
27{
28 clear_bit(cpu, &node_to_cpumask[cpu_to_node(cpu)]);
29}
30
31#else
32#define init_cpu_to_node() do {} while (0)
33#define clear_node_cpumask(cpu) do {} while (0)
34#endif
35
36#define NUMA_NO_NODE 0xff
37
38#endif
diff --git a/include/asm-x86_64/page.h b/include/asm-x86_64/page.h
deleted file mode 100644
index 88adf1afb0a2..000000000000
--- a/include/asm-x86_64/page.h
+++ /dev/null
@@ -1,143 +0,0 @@
1#ifndef _X86_64_PAGE_H
2#define _X86_64_PAGE_H
3
4#include <linux/const.h>
5
6/* PAGE_SHIFT determines the page size */
7#define PAGE_SHIFT 12
8#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
9#define PAGE_MASK (~(PAGE_SIZE-1))
10#define PHYSICAL_PAGE_MASK (~(PAGE_SIZE-1) & __PHYSICAL_MASK)
11
12#define THREAD_ORDER 1
13#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
14#define CURRENT_MASK (~(THREAD_SIZE-1))
15
16#define EXCEPTION_STACK_ORDER 0
17#define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER)
18
19#define DEBUG_STACK_ORDER (EXCEPTION_STACK_ORDER + 1)
20#define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER)
21
22#define IRQSTACK_ORDER 2
23#define IRQSTACKSIZE (PAGE_SIZE << IRQSTACK_ORDER)
24
25#define STACKFAULT_STACK 1
26#define DOUBLEFAULT_STACK 2
27#define NMI_STACK 3
28#define DEBUG_STACK 4
29#define MCE_STACK 5
30#define N_EXCEPTION_STACKS 5 /* hw limit: 7 */
31
32#define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1))
33#define LARGE_PAGE_SIZE (_AC(1,UL) << PMD_SHIFT)
34
35#define HPAGE_SHIFT PMD_SHIFT
36#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT)
37#define HPAGE_MASK (~(HPAGE_SIZE - 1))
38#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
39
40#ifdef __KERNEL__
41#ifndef __ASSEMBLY__
42
43extern unsigned long end_pfn;
44
45void clear_page(void *);
46void copy_page(void *, void *);
47
48#define clear_user_page(page, vaddr, pg) clear_page(page)
49#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
50
51#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
52 alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
53#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
54/*
55 * These are used to make use of C type-checking..
56 */
57typedef struct { unsigned long pte; } pte_t;
58typedef struct { unsigned long pmd; } pmd_t;
59typedef struct { unsigned long pud; } pud_t;
60typedef struct { unsigned long pgd; } pgd_t;
61#define PTE_MASK PHYSICAL_PAGE_MASK
62
63typedef struct { unsigned long pgprot; } pgprot_t;
64
65extern unsigned long phys_base;
66
67#define pte_val(x) ((x).pte)
68#define pmd_val(x) ((x).pmd)
69#define pud_val(x) ((x).pud)
70#define pgd_val(x) ((x).pgd)
71#define pgprot_val(x) ((x).pgprot)
72
73#define __pte(x) ((pte_t) { (x) } )
74#define __pmd(x) ((pmd_t) { (x) } )
75#define __pud(x) ((pud_t) { (x) } )
76#define __pgd(x) ((pgd_t) { (x) } )
77#define __pgprot(x) ((pgprot_t) { (x) } )
78
79#endif /* !__ASSEMBLY__ */
80
81#define __PHYSICAL_START CONFIG_PHYSICAL_START
82#define __KERNEL_ALIGN 0x200000
83
84/*
85 * Make sure kernel is aligned to 2MB address. Catching it at compile
86 * time is better. Change your config file and compile the kernel
87 * for a 2MB aligned address (CONFIG_PHYSICAL_START)
88 */
89#if (CONFIG_PHYSICAL_START % __KERNEL_ALIGN) != 0
90#error "CONFIG_PHYSICAL_START must be a multiple of 2MB"
91#endif
92
93#define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START)
94#define __START_KERNEL_map _AC(0xffffffff80000000, UL)
95#define __PAGE_OFFSET _AC(0xffff810000000000, UL)
96
97/* to align the pointer to the (next) page boundary */
98#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
99
100/* See Documentation/x86_64/mm.txt for a description of the memory map. */
101#define __PHYSICAL_MASK_SHIFT 46
102#define __PHYSICAL_MASK ((_AC(1,UL) << __PHYSICAL_MASK_SHIFT) - 1)
103#define __VIRTUAL_MASK_SHIFT 48
104#define __VIRTUAL_MASK ((_AC(1,UL) << __VIRTUAL_MASK_SHIFT) - 1)
105
106#define KERNEL_TEXT_SIZE (40*1024*1024)
107#define KERNEL_TEXT_START _AC(0xffffffff80000000, UL)
108#define PAGE_OFFSET __PAGE_OFFSET
109
110#ifndef __ASSEMBLY__
111
112#include <asm/bug.h>
113
114extern unsigned long __phys_addr(unsigned long);
115
116#endif /* __ASSEMBLY__ */
117
118#define __pa(x) __phys_addr((unsigned long)(x))
119#define __pa_symbol(x) __phys_addr((unsigned long)(x))
120
121#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
122#define __boot_va(x) __va(x)
123#define __boot_pa(x) __pa(x)
124#ifdef CONFIG_FLATMEM
125#define pfn_valid(pfn) ((pfn) < end_pfn)
126#endif
127
128#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
129#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
130#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
131
132#define VM_DATA_DEFAULT_FLAGS \
133 (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
134 VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
135
136#define __HAVE_ARCH_GATE_AREA 1
137
138#include <asm-generic/memory_model.h>
139#include <asm-generic/page.h>
140
141#endif /* __KERNEL__ */
142
143#endif /* _X86_64_PAGE_H */
diff --git a/include/asm-x86_64/param.h b/include/asm-x86_64/param.h
deleted file mode 100644
index a728786c3c7c..000000000000
--- a/include/asm-x86_64/param.h
+++ /dev/null
@@ -1,22 +0,0 @@
1#ifndef _ASMx86_64_PARAM_H
2#define _ASMx86_64_PARAM_H
3
4#ifdef __KERNEL__
5# define HZ CONFIG_HZ /* Internal kernel timer frequency */
6# define USER_HZ 100 /* .. some user interfaces are in "ticks */
7#define CLOCKS_PER_SEC (USER_HZ) /* like times() */
8#endif
9
10#ifndef HZ
11#define HZ 100
12#endif
13
14#define EXEC_PAGESIZE 4096
15
16#ifndef NOGROUP
17#define NOGROUP (-1)
18#endif
19
20#define MAXHOSTNAMELEN 64 /* max length of hostname */
21
22#endif
diff --git a/include/asm-x86_64/parport.h b/include/asm-x86_64/parport.h
deleted file mode 100644
index 7135ef977c96..000000000000
--- a/include/asm-x86_64/parport.h
+++ /dev/null
@@ -1,18 +0,0 @@
1/*
2 * parport.h: ia32-specific parport initialisation
3 *
4 * Copyright (C) 1999, 2000 Tim Waugh <tim@cyberelk.demon.co.uk>
5 *
6 * This file should only be included by drivers/parport/parport_pc.c.
7 */
8
9#ifndef _ASM_X8664_PARPORT_H
10#define _ASM_X8664_PARPORT_H 1
11
12static int __devinit parport_pc_find_isa_ports (int autoirq, int autodma);
13static int __devinit parport_pc_find_nonpci_ports (int autoirq, int autodma)
14{
15 return parport_pc_find_isa_ports (autoirq, autodma);
16}
17
18#endif
diff --git a/include/asm-x86_64/pci-direct.h b/include/asm-x86_64/pci-direct.h
deleted file mode 100644
index 6823fa4f1afa..000000000000
--- a/include/asm-x86_64/pci-direct.h
+++ /dev/null
@@ -1,17 +0,0 @@
1#ifndef ASM_PCI_DIRECT_H
2#define ASM_PCI_DIRECT_H 1
3
4#include <linux/types.h>
5
6/* Direct PCI access. This is used for PCI accesses in early boot before
7 the PCI subsystem works. */
8
9extern u32 read_pci_config(u8 bus, u8 slot, u8 func, u8 offset);
10extern u8 read_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset);
11extern u16 read_pci_config_16(u8 bus, u8 slot, u8 func, u8 offset);
12extern void write_pci_config(u8 bus, u8 slot, u8 func, u8 offset, u32 val);
13extern void write_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset, u8 val);
14
15extern int early_pci_allowed(void);
16
17#endif
diff --git a/include/asm-x86_64/pci.h b/include/asm-x86_64/pci.h
deleted file mode 100644
index 5da8cb0c0599..000000000000
--- a/include/asm-x86_64/pci.h
+++ /dev/null
@@ -1,126 +0,0 @@
1#ifndef __x8664_PCI_H
2#define __x8664_PCI_H
3
4#include <asm/io.h>
5
6#ifdef __KERNEL__
7
8struct pci_sysdata {
9 int node; /* NUMA node */
10 void* iommu; /* IOMMU private data */
11};
12
13extern struct pci_bus *pci_scan_bus_with_sysdata(int busno);
14
15#ifdef CONFIG_CALGARY_IOMMU
16static inline void* pci_iommu(struct pci_bus *bus)
17{
18 struct pci_sysdata *sd = bus->sysdata;
19 return sd->iommu;
20}
21
22static inline void set_pci_iommu(struct pci_bus *bus, void *val)
23{
24 struct pci_sysdata *sd = bus->sysdata;
25 sd->iommu = val;
26}
27#endif /* CONFIG_CALGARY_IOMMU */
28
29#include <linux/mm.h> /* for struct page */
30
31/* Can be used to override the logic in pci_scan_bus for skipping
32 already-configured bus numbers - to be used for buggy BIOSes
33 or architectures with incomplete PCI setup by the loader */
34
35#ifdef CONFIG_PCI
36extern unsigned int pcibios_assign_all_busses(void);
37#else
38#define pcibios_assign_all_busses() 0
39#endif
40#define pcibios_scan_all_fns(a, b) 0
41
42extern unsigned long pci_mem_start;
43#define PCIBIOS_MIN_IO 0x1000
44#define PCIBIOS_MIN_MEM (pci_mem_start)
45
46#define PCIBIOS_MIN_CARDBUS_IO 0x4000
47
48void pcibios_config_init(void);
49struct pci_bus * pcibios_scan_root(int bus);
50extern int (*pci_config_read)(int seg, int bus, int dev, int fn, int reg, int len, u32 *value);
51extern int (*pci_config_write)(int seg, int bus, int dev, int fn, int reg, int len, u32 value);
52
53void pcibios_set_master(struct pci_dev *dev);
54void pcibios_penalize_isa_irq(int irq, int active);
55struct irq_routing_table *pcibios_get_irq_routing_table(void);
56int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq);
57
58#include <linux/types.h>
59#include <linux/slab.h>
60#include <asm/scatterlist.h>
61#include <linux/string.h>
62#include <asm/page.h>
63
64extern void pci_iommu_alloc(void);
65extern int iommu_setup(char *opt);
66
67/* The PCI address space does equal the physical memory
68 * address space. The networking and block device layers use
69 * this boolean for bounce buffer decisions
70 *
71 * On AMD64 it mostly equals, but we set it to zero if a hardware
72 * IOMMU (gart) of sotware IOMMU (swiotlb) is available.
73 */
74#define PCI_DMA_BUS_IS_PHYS (dma_ops->is_phys)
75
76#if defined(CONFIG_IOMMU) || defined(CONFIG_CALGARY_IOMMU)
77
78#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
79 dma_addr_t ADDR_NAME;
80#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
81 __u32 LEN_NAME;
82#define pci_unmap_addr(PTR, ADDR_NAME) \
83 ((PTR)->ADDR_NAME)
84#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
85 (((PTR)->ADDR_NAME) = (VAL))
86#define pci_unmap_len(PTR, LEN_NAME) \
87 ((PTR)->LEN_NAME)
88#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
89 (((PTR)->LEN_NAME) = (VAL))
90
91#else
92/* No IOMMU */
93
94#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
95#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
96#define pci_unmap_addr(PTR, ADDR_NAME) (0)
97#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
98#define pci_unmap_len(PTR, LEN_NAME) (0)
99#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
100
101#endif
102
103#include <asm-generic/pci-dma-compat.h>
104
105#ifdef CONFIG_PCI
106static inline void pci_dma_burst_advice(struct pci_dev *pdev,
107 enum pci_dma_burst_strategy *strat,
108 unsigned long *strategy_parameter)
109{
110 *strat = PCI_DMA_BURST_INFINITY;
111 *strategy_parameter = ~0UL;
112}
113#endif
114
115#define HAVE_PCI_MMAP
116extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
117 enum pci_mmap_state mmap_state, int write_combine);
118
119#endif /* __KERNEL__ */
120
121/* generic pci stuff */
122#ifdef CONFIG_PCI
123#include <asm-generic/pci.h>
124#endif
125
126#endif /* __x8664_PCI_H */
diff --git a/include/asm-x86_64/pda.h b/include/asm-x86_64/pda.h
deleted file mode 100644
index 5642634843c4..000000000000
--- a/include/asm-x86_64/pda.h
+++ /dev/null
@@ -1,125 +0,0 @@
1#ifndef X86_64_PDA_H
2#define X86_64_PDA_H
3
4#ifndef __ASSEMBLY__
5#include <linux/stddef.h>
6#include <linux/types.h>
7#include <linux/cache.h>
8#include <asm/page.h>
9
10/* Per processor datastructure. %gs points to it while the kernel runs */
11struct x8664_pda {
12 struct task_struct *pcurrent; /* 0 Current process */
13 unsigned long data_offset; /* 8 Per cpu data offset from linker
14 address */
15 unsigned long kernelstack; /* 16 top of kernel stack for current */
16 unsigned long oldrsp; /* 24 user rsp for system call */
17 int irqcount; /* 32 Irq nesting counter. Starts with -1 */
18 int cpunumber; /* 36 Logical CPU number */
19#ifdef CONFIG_CC_STACKPROTECTOR
20 unsigned long stack_canary; /* 40 stack canary value */
21 /* gcc-ABI: this canary MUST be at
22 offset 40!!! */
23#endif
24 char *irqstackptr;
25 int nodenumber; /* number of current node */
26 unsigned int __softirq_pending;
27 unsigned int __nmi_count; /* number of NMI on this CPUs */
28 short mmu_state;
29 short isidle;
30 struct mm_struct *active_mm;
31 unsigned apic_timer_irqs;
32} ____cacheline_aligned_in_smp;
33
34extern struct x8664_pda *_cpu_pda[];
35extern struct x8664_pda boot_cpu_pda[];
36
37#define cpu_pda(i) (_cpu_pda[i])
38
39/*
40 * There is no fast way to get the base address of the PDA, all the accesses
41 * have to mention %fs/%gs. So it needs to be done this Torvaldian way.
42 */
43extern void __bad_pda_field(void) __attribute__((noreturn));
44
45/*
46 * proxy_pda doesn't actually exist, but tell gcc it is accessed for
47 * all PDA accesses so it gets read/write dependencies right.
48 */
49extern struct x8664_pda _proxy_pda;
50
51#define pda_offset(field) offsetof(struct x8664_pda, field)
52
53#define pda_to_op(op,field,val) do { \
54 typedef typeof(_proxy_pda.field) T__; \
55 if (0) { T__ tmp__; tmp__ = (val); } /* type checking */ \
56 switch (sizeof(_proxy_pda.field)) { \
57 case 2: \
58 asm(op "w %1,%%gs:%c2" : \
59 "+m" (_proxy_pda.field) : \
60 "ri" ((T__)val), \
61 "i"(pda_offset(field))); \
62 break; \
63 case 4: \
64 asm(op "l %1,%%gs:%c2" : \
65 "+m" (_proxy_pda.field) : \
66 "ri" ((T__)val), \
67 "i" (pda_offset(field))); \
68 break; \
69 case 8: \
70 asm(op "q %1,%%gs:%c2": \
71 "+m" (_proxy_pda.field) : \
72 "ri" ((T__)val), \
73 "i"(pda_offset(field))); \
74 break; \
75 default: \
76 __bad_pda_field(); \
77 } \
78 } while (0)
79
80#define pda_from_op(op,field) ({ \
81 typeof(_proxy_pda.field) ret__; \
82 switch (sizeof(_proxy_pda.field)) { \
83 case 2: \
84 asm(op "w %%gs:%c1,%0" : \
85 "=r" (ret__) : \
86 "i" (pda_offset(field)), \
87 "m" (_proxy_pda.field)); \
88 break; \
89 case 4: \
90 asm(op "l %%gs:%c1,%0": \
91 "=r" (ret__): \
92 "i" (pda_offset(field)), \
93 "m" (_proxy_pda.field)); \
94 break; \
95 case 8: \
96 asm(op "q %%gs:%c1,%0": \
97 "=r" (ret__) : \
98 "i" (pda_offset(field)), \
99 "m" (_proxy_pda.field)); \
100 break; \
101 default: \
102 __bad_pda_field(); \
103 } \
104 ret__; })
105
106#define read_pda(field) pda_from_op("mov",field)
107#define write_pda(field,val) pda_to_op("mov",field,val)
108#define add_pda(field,val) pda_to_op("add",field,val)
109#define sub_pda(field,val) pda_to_op("sub",field,val)
110#define or_pda(field,val) pda_to_op("or",field,val)
111
112/* This is not atomic against other CPUs -- CPU preemption needs to be off */
113#define test_and_clear_bit_pda(bit,field) ({ \
114 int old__; \
115 asm volatile("btr %2,%%gs:%c3\n\tsbbl %0,%0" \
116 : "=r" (old__), "+m" (_proxy_pda.field) \
117 : "dIr" (bit), "i" (pda_offset(field)) : "memory"); \
118 old__; \
119})
120
121#endif
122
123#define PDA_STACKOFFSET (5*8)
124
125#endif
diff --git a/include/asm-x86_64/percpu.h b/include/asm-x86_64/percpu.h
deleted file mode 100644
index 5abd48270101..000000000000
--- a/include/asm-x86_64/percpu.h
+++ /dev/null
@@ -1,68 +0,0 @@
1#ifndef _ASM_X8664_PERCPU_H_
2#define _ASM_X8664_PERCPU_H_
3#include <linux/compiler.h>
4
5/* Same as asm-generic/percpu.h, except that we store the per cpu offset
6 in the PDA. Longer term the PDA and every per cpu variable
7 should be just put into a single section and referenced directly
8 from %gs */
9
10#ifdef CONFIG_SMP
11
12#include <asm/pda.h>
13
14#define __per_cpu_offset(cpu) (cpu_pda(cpu)->data_offset)
15#define __my_cpu_offset() read_pda(data_offset)
16
17#define per_cpu_offset(x) (__per_cpu_offset(x))
18
19/* Separate out the type, so (int[3], foo) works. */
20#define DEFINE_PER_CPU(type, name) \
21 __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name
22
23#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
24 __attribute__((__section__(".data.percpu.shared_aligned"))) \
25 __typeof__(type) per_cpu__##name \
26 ____cacheline_internodealigned_in_smp
27
28/* var is in discarded region: offset to particular copy we want */
29#define per_cpu(var, cpu) (*({ \
30 extern int simple_identifier_##var(void); \
31 RELOC_HIDE(&per_cpu__##var, __per_cpu_offset(cpu)); }))
32#define __get_cpu_var(var) (*({ \
33 extern int simple_identifier_##var(void); \
34 RELOC_HIDE(&per_cpu__##var, __my_cpu_offset()); }))
35#define __raw_get_cpu_var(var) (*({ \
36 extern int simple_identifier_##var(void); \
37 RELOC_HIDE(&per_cpu__##var, __my_cpu_offset()); }))
38
39/* A macro to avoid #include hell... */
40#define percpu_modcopy(pcpudst, src, size) \
41do { \
42 unsigned int __i; \
43 for_each_possible_cpu(__i) \
44 memcpy((pcpudst)+__per_cpu_offset(__i), \
45 (src), (size)); \
46} while (0)
47
48extern void setup_per_cpu_areas(void);
49
50#else /* ! SMP */
51
52#define DEFINE_PER_CPU(type, name) \
53 __typeof__(type) per_cpu__##name
54#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
55 DEFINE_PER_CPU(type, name)
56
57#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var))
58#define __get_cpu_var(var) per_cpu__##var
59#define __raw_get_cpu_var(var) per_cpu__##var
60
61#endif /* SMP */
62
63#define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name
64
65#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
66#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
67
68#endif /* _ASM_X8664_PERCPU_H_ */
diff --git a/include/asm-x86_64/pgalloc.h b/include/asm-x86_64/pgalloc.h
deleted file mode 100644
index 8bb564687860..000000000000
--- a/include/asm-x86_64/pgalloc.h
+++ /dev/null
@@ -1,119 +0,0 @@
1#ifndef _X86_64_PGALLOC_H
2#define _X86_64_PGALLOC_H
3
4#include <asm/pda.h>
5#include <linux/threads.h>
6#include <linux/mm.h>
7
8#define pmd_populate_kernel(mm, pmd, pte) \
9 set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte)))
10#define pud_populate(mm, pud, pmd) \
11 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)))
12#define pgd_populate(mm, pgd, pud) \
13 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)))
14
15static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *pte)
16{
17 set_pmd(pmd, __pmd(_PAGE_TABLE | (page_to_pfn(pte) << PAGE_SHIFT)));
18}
19
20static inline void pmd_free(pmd_t *pmd)
21{
22 BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
23 free_page((unsigned long)pmd);
24}
25
26static inline pmd_t *pmd_alloc_one (struct mm_struct *mm, unsigned long addr)
27{
28 return (pmd_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
29}
30
31static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
32{
33 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
34}
35
36static inline void pud_free (pud_t *pud)
37{
38 BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
39 free_page((unsigned long)pud);
40}
41
42static inline void pgd_list_add(pgd_t *pgd)
43{
44 struct page *page = virt_to_page(pgd);
45
46 spin_lock(&pgd_lock);
47 list_add(&page->lru, &pgd_list);
48 spin_unlock(&pgd_lock);
49}
50
51static inline void pgd_list_del(pgd_t *pgd)
52{
53 struct page *page = virt_to_page(pgd);
54
55 spin_lock(&pgd_lock);
56 list_del(&page->lru);
57 spin_unlock(&pgd_lock);
58}
59
60static inline pgd_t *pgd_alloc(struct mm_struct *mm)
61{
62 unsigned boundary;
63 pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
64 if (!pgd)
65 return NULL;
66 pgd_list_add(pgd);
67 /*
68 * Copy kernel pointers in from init.
69 * Could keep a freelist or slab cache of those because the kernel
70 * part never changes.
71 */
72 boundary = pgd_index(__PAGE_OFFSET);
73 memset(pgd, 0, boundary * sizeof(pgd_t));
74 memcpy(pgd + boundary,
75 init_level4_pgt + boundary,
76 (PTRS_PER_PGD - boundary) * sizeof(pgd_t));
77 return pgd;
78}
79
80static inline void pgd_free(pgd_t *pgd)
81{
82 BUG_ON((unsigned long)pgd & (PAGE_SIZE-1));
83 pgd_list_del(pgd);
84 free_page((unsigned long)pgd);
85}
86
87static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
88{
89 return (pte_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
90}
91
92static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
93{
94 void *p = (void *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
95 if (!p)
96 return NULL;
97 return virt_to_page(p);
98}
99
100/* Should really implement gc for free page table pages. This could be
101 done with a reference count in struct page. */
102
103static inline void pte_free_kernel(pte_t *pte)
104{
105 BUG_ON((unsigned long)pte & (PAGE_SIZE-1));
106 free_page((unsigned long)pte);
107}
108
109static inline void pte_free(struct page *pte)
110{
111 __free_page(pte);
112}
113
114#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
115
116#define __pmd_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x))
117#define __pud_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x))
118
119#endif /* _X86_64_PGALLOC_H */
diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h
deleted file mode 100644
index 57dd6b3107ea..000000000000
--- a/include/asm-x86_64/pgtable.h
+++ /dev/null
@@ -1,432 +0,0 @@
1#ifndef _X86_64_PGTABLE_H
2#define _X86_64_PGTABLE_H
3
4#include <linux/const.h>
5#ifndef __ASSEMBLY__
6
7/*
8 * This file contains the functions and defines necessary to modify and use
9 * the x86-64 page table tree.
10 */
11#include <asm/processor.h>
12#include <asm/bitops.h>
13#include <linux/threads.h>
14#include <asm/pda.h>
15
16extern pud_t level3_kernel_pgt[512];
17extern pud_t level3_ident_pgt[512];
18extern pmd_t level2_kernel_pgt[512];
19extern pgd_t init_level4_pgt[];
20extern unsigned long __supported_pte_mask;
21
22#define swapper_pg_dir init_level4_pgt
23
24extern void paging_init(void);
25extern void clear_kernel_mapping(unsigned long addr, unsigned long size);
26
27/*
28 * ZERO_PAGE is a global shared page that is always zero: used
29 * for zero-mapped memory areas etc..
30 */
31extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
32#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
33
34#endif /* !__ASSEMBLY__ */
35
36/*
37 * PGDIR_SHIFT determines what a top-level page table entry can map
38 */
39#define PGDIR_SHIFT 39
40#define PTRS_PER_PGD 512
41
42/*
43 * 3rd level page
44 */
45#define PUD_SHIFT 30
46#define PTRS_PER_PUD 512
47
48/*
49 * PMD_SHIFT determines the size of the area a middle-level
50 * page table can map
51 */
52#define PMD_SHIFT 21
53#define PTRS_PER_PMD 512
54
55/*
56 * entries per page directory level
57 */
58#define PTRS_PER_PTE 512
59
60#ifndef __ASSEMBLY__
61
62#define pte_ERROR(e) \
63 printk("%s:%d: bad pte %p(%016lx).\n", __FILE__, __LINE__, &(e), pte_val(e))
64#define pmd_ERROR(e) \
65 printk("%s:%d: bad pmd %p(%016lx).\n", __FILE__, __LINE__, &(e), pmd_val(e))
66#define pud_ERROR(e) \
67 printk("%s:%d: bad pud %p(%016lx).\n", __FILE__, __LINE__, &(e), pud_val(e))
68#define pgd_ERROR(e) \
69 printk("%s:%d: bad pgd %p(%016lx).\n", __FILE__, __LINE__, &(e), pgd_val(e))
70
71#define pgd_none(x) (!pgd_val(x))
72#define pud_none(x) (!pud_val(x))
73
74static inline void set_pte(pte_t *dst, pte_t val)
75{
76 pte_val(*dst) = pte_val(val);
77}
78#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
79
80static inline void set_pmd(pmd_t *dst, pmd_t val)
81{
82 pmd_val(*dst) = pmd_val(val);
83}
84
85static inline void set_pud(pud_t *dst, pud_t val)
86{
87 pud_val(*dst) = pud_val(val);
88}
89
90static inline void pud_clear (pud_t *pud)
91{
92 set_pud(pud, __pud(0));
93}
94
95static inline void set_pgd(pgd_t *dst, pgd_t val)
96{
97 pgd_val(*dst) = pgd_val(val);
98}
99
100static inline void pgd_clear (pgd_t * pgd)
101{
102 set_pgd(pgd, __pgd(0));
103}
104
105#define ptep_get_and_clear(mm,addr,xp) __pte(xchg(&(xp)->pte, 0))
106
107struct mm_struct;
108
109static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full)
110{
111 pte_t pte;
112 if (full) {
113 pte = *ptep;
114 *ptep = __pte(0);
115 } else {
116 pte = ptep_get_and_clear(mm, addr, ptep);
117 }
118 return pte;
119}
120
121#define pte_same(a, b) ((a).pte == (b).pte)
122
123#define pte_pgprot(a) (__pgprot((a).pte & ~PHYSICAL_PAGE_MASK))
124
125#endif /* !__ASSEMBLY__ */
126
127#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
128#define PMD_MASK (~(PMD_SIZE-1))
129#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
130#define PUD_MASK (~(PUD_SIZE-1))
131#define PGDIR_SIZE (_AC(1,UL) << PGDIR_SHIFT)
132#define PGDIR_MASK (~(PGDIR_SIZE-1))
133
134#define USER_PTRS_PER_PGD ((TASK_SIZE-1)/PGDIR_SIZE+1)
135#define FIRST_USER_ADDRESS 0
136
137#define MAXMEM _AC(0x3fffffffffff, UL)
138#define VMALLOC_START _AC(0xffffc20000000000, UL)
139#define VMALLOC_END _AC(0xffffe1ffffffffff, UL)
140#define MODULES_VADDR _AC(0xffffffff88000000, UL)
141#define MODULES_END _AC(0xfffffffffff00000, UL)
142#define MODULES_LEN (MODULES_END - MODULES_VADDR)
143
144#define _PAGE_BIT_PRESENT 0
145#define _PAGE_BIT_RW 1
146#define _PAGE_BIT_USER 2
147#define _PAGE_BIT_PWT 3
148#define _PAGE_BIT_PCD 4
149#define _PAGE_BIT_ACCESSED 5
150#define _PAGE_BIT_DIRTY 6
151#define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
152#define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
153#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
154
155#define _PAGE_PRESENT 0x001
156#define _PAGE_RW 0x002
157#define _PAGE_USER 0x004
158#define _PAGE_PWT 0x008
159#define _PAGE_PCD 0x010
160#define _PAGE_ACCESSED 0x020
161#define _PAGE_DIRTY 0x040
162#define _PAGE_PSE 0x080 /* 2MB page */
163#define _PAGE_FILE 0x040 /* nonlinear file mapping, saved PTE; unset:swap */
164#define _PAGE_GLOBAL 0x100 /* Global TLB entry */
165
166#define _PAGE_PROTNONE 0x080 /* If not present */
167#define _PAGE_NX (_AC(1,UL)<<_PAGE_BIT_NX)
168
169#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
170#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
171
172#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
173
174#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
175#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
176#define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
177#define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
178#define PAGE_COPY PAGE_COPY_NOEXEC
179#define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
180#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
181#define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
182#define __PAGE_KERNEL \
183 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX)
184#define __PAGE_KERNEL_EXEC \
185 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
186#define __PAGE_KERNEL_NOCACHE \
187 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_PCD | _PAGE_ACCESSED | _PAGE_NX)
188#define __PAGE_KERNEL_RO \
189 (_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX)
190#define __PAGE_KERNEL_VSYSCALL \
191 (_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
192#define __PAGE_KERNEL_VSYSCALL_NOCACHE \
193 (_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_PCD)
194#define __PAGE_KERNEL_LARGE \
195 (__PAGE_KERNEL | _PAGE_PSE)
196#define __PAGE_KERNEL_LARGE_EXEC \
197 (__PAGE_KERNEL_EXEC | _PAGE_PSE)
198
199#define MAKE_GLOBAL(x) __pgprot((x) | _PAGE_GLOBAL)
200
201#define PAGE_KERNEL MAKE_GLOBAL(__PAGE_KERNEL)
202#define PAGE_KERNEL_EXEC MAKE_GLOBAL(__PAGE_KERNEL_EXEC)
203#define PAGE_KERNEL_RO MAKE_GLOBAL(__PAGE_KERNEL_RO)
204#define PAGE_KERNEL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_NOCACHE)
205#define PAGE_KERNEL_VSYSCALL32 __pgprot(__PAGE_KERNEL_VSYSCALL)
206#define PAGE_KERNEL_VSYSCALL MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL)
207#define PAGE_KERNEL_LARGE MAKE_GLOBAL(__PAGE_KERNEL_LARGE)
208#define PAGE_KERNEL_VSYSCALL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL_NOCACHE)
209
210/* xwr */
211#define __P000 PAGE_NONE
212#define __P001 PAGE_READONLY
213#define __P010 PAGE_COPY
214#define __P011 PAGE_COPY
215#define __P100 PAGE_READONLY_EXEC
216#define __P101 PAGE_READONLY_EXEC
217#define __P110 PAGE_COPY_EXEC
218#define __P111 PAGE_COPY_EXEC
219
220#define __S000 PAGE_NONE
221#define __S001 PAGE_READONLY
222#define __S010 PAGE_SHARED
223#define __S011 PAGE_SHARED
224#define __S100 PAGE_READONLY_EXEC
225#define __S101 PAGE_READONLY_EXEC
226#define __S110 PAGE_SHARED_EXEC
227#define __S111 PAGE_SHARED_EXEC
228
229#ifndef __ASSEMBLY__
230
231static inline unsigned long pgd_bad(pgd_t pgd)
232{
233 return pgd_val(pgd) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER);
234}
235
236static inline unsigned long pud_bad(pud_t pud)
237{
238 return pud_val(pud) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER);
239}
240
241static inline unsigned long pmd_bad(pmd_t pmd)
242{
243 return pmd_val(pmd) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER);
244}
245
246#define pte_none(x) (!pte_val(x))
247#define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE))
248#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
249
250#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) /* FIXME: is this
251 right? */
252#define pte_page(x) pfn_to_page(pte_pfn(x))
253#define pte_pfn(x) ((pte_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT)
254
255static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
256{
257 pte_t pte;
258 pte_val(pte) = (page_nr << PAGE_SHIFT);
259 pte_val(pte) |= pgprot_val(pgprot);
260 pte_val(pte) &= __supported_pte_mask;
261 return pte;
262}
263
264/*
265 * The following only work if pte_present() is true.
266 * Undefined behaviour if not..
267 */
268#define __LARGE_PTE (_PAGE_PSE|_PAGE_PRESENT)
269static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
270static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
271static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
272static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
273static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_PSE; }
274
275static inline pte_t pte_mkclean(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; }
276static inline pte_t pte_mkold(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_ACCESSED)); return pte; }
277static inline pte_t pte_wrprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_RW)); return pte; }
278static inline pte_t pte_mkexec(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_NX)); return pte; }
279static inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; }
280static inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; }
281static inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_RW)); return pte; }
282static inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_PSE)); return pte; }
283static inline pte_t pte_clrhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_PSE)); return pte; }
284
285struct vm_area_struct;
286
287static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
288{
289 if (!pte_young(*ptep))
290 return 0;
291 return test_and_clear_bit(_PAGE_BIT_ACCESSED, &ptep->pte);
292}
293
294static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
295{
296 clear_bit(_PAGE_BIT_RW, &ptep->pte);
297}
298
299/*
300 * Macro to mark a page protection value as "uncacheable".
301 */
302#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT))
303
304static inline int pmd_large(pmd_t pte) {
305 return (pmd_val(pte) & __LARGE_PTE) == __LARGE_PTE;
306}
307
308
309/*
310 * Conversion functions: convert a page and protection to a page entry,
311 * and a page entry and page directory to the page they refer to.
312 */
313
314/*
315 * Level 4 access.
316 */
317#define pgd_page_vaddr(pgd) ((unsigned long) __va((unsigned long)pgd_val(pgd) & PTE_MASK))
318#define pgd_page(pgd) (pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT))
319#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
320#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
321#define pgd_offset_k(address) (init_level4_pgt + pgd_index(address))
322#define pgd_present(pgd) (pgd_val(pgd) & _PAGE_PRESENT)
323#define mk_kernel_pgd(address) ((pgd_t){ (address) | _KERNPG_TABLE })
324
325/* PUD - Level3 access */
326/* to find an entry in a page-table-directory. */
327#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PHYSICAL_PAGE_MASK))
328#define pud_page(pud) (pfn_to_page(pud_val(pud) >> PAGE_SHIFT))
329#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
330#define pud_offset(pgd, address) ((pud_t *) pgd_page_vaddr(*(pgd)) + pud_index(address))
331#define pud_present(pud) (pud_val(pud) & _PAGE_PRESENT)
332
333/* PMD - Level 2 access */
334#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PTE_MASK))
335#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
336
337#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
338#define pmd_offset(dir, address) ((pmd_t *) pud_page_vaddr(*(dir)) + \
339 pmd_index(address))
340#define pmd_none(x) (!pmd_val(x))
341#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
342#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
343#define pfn_pmd(nr,prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val(prot)))
344#define pmd_pfn(x) ((pmd_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT)
345
346#define pte_to_pgoff(pte) ((pte_val(pte) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT)
347#define pgoff_to_pte(off) ((pte_t) { ((off) << PAGE_SHIFT) | _PAGE_FILE })
348#define PTE_FILE_MAX_BITS __PHYSICAL_MASK_SHIFT
349
350/* PTE - Level 1 access. */
351
352/* page, protection -> pte */
353#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
354#define mk_pte_huge(entry) (pte_val(entry) |= _PAGE_PRESENT | _PAGE_PSE)
355
356/* Change flags of a PTE */
357static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
358{
359 pte_val(pte) &= _PAGE_CHG_MASK;
360 pte_val(pte) |= pgprot_val(newprot);
361 pte_val(pte) &= __supported_pte_mask;
362 return pte;
363}
364
365#define pte_index(address) \
366 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
367#define pte_offset_kernel(dir, address) ((pte_t *) pmd_page_vaddr(*(dir)) + \
368 pte_index(address))
369
370/* x86-64 always has all page tables mapped. */
371#define pte_offset_map(dir,address) pte_offset_kernel(dir,address)
372#define pte_offset_map_nested(dir,address) pte_offset_kernel(dir,address)
373#define pte_unmap(pte) /* NOP */
374#define pte_unmap_nested(pte) /* NOP */
375
376#define update_mmu_cache(vma,address,pte) do { } while (0)
377
378/* We only update the dirty/accessed state if we set
379 * the dirty bit by hand in the kernel, since the hardware
380 * will do the accessed bit for us, and we don't want to
381 * race with other CPU's that might be updating the dirty
382 * bit at the same time. */
383#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
384#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
385({ \
386 int __changed = !pte_same(*(__ptep), __entry); \
387 if (__changed && __dirty) { \
388 set_pte(__ptep, __entry); \
389 flush_tlb_page(__vma, __address); \
390 } \
391 __changed; \
392})
393
394/* Encode and de-code a swap entry */
395#define __swp_type(x) (((x).val >> 1) & 0x3f)
396#define __swp_offset(x) ((x).val >> 8)
397#define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) })
398#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
399#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
400
401extern spinlock_t pgd_lock;
402extern struct list_head pgd_list;
403
404extern int kern_addr_valid(unsigned long addr);
405
406pte_t *lookup_address(unsigned long addr);
407
408#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
409 remap_pfn_range(vma, vaddr, pfn, size, prot)
410
411#define HAVE_ARCH_UNMAPPED_AREA
412
413#define pgtable_cache_init() do { } while (0)
414#define check_pgt_cache() do { } while (0)
415
416#define PAGE_AGP PAGE_KERNEL_NOCACHE
417#define HAVE_PAGE_AGP 1
418
419/* fs/proc/kcore.c */
420#define kc_vaddr_to_offset(v) ((v) & __VIRTUAL_MASK)
421#define kc_offset_to_vaddr(o) \
422 (((o) & (1UL << (__VIRTUAL_MASK_SHIFT-1))) ? ((o) | (~__VIRTUAL_MASK)) : (o))
423
424#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
425#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
426#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
427#define __HAVE_ARCH_PTEP_SET_WRPROTECT
428#define __HAVE_ARCH_PTE_SAME
429#include <asm-generic/pgtable.h>
430#endif /* !__ASSEMBLY__ */
431
432#endif /* _X86_64_PGTABLE_H */
diff --git a/include/asm-x86_64/poll.h b/include/asm-x86_64/poll.h
deleted file mode 100644
index c98509d3149e..000000000000
--- a/include/asm-x86_64/poll.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/poll.h>
diff --git a/include/asm-x86_64/posix_types.h b/include/asm-x86_64/posix_types.h
deleted file mode 100644
index 9926aa43775b..000000000000
--- a/include/asm-x86_64/posix_types.h
+++ /dev/null
@@ -1,119 +0,0 @@
1#ifndef _ASM_X86_64_POSIX_TYPES_H
2#define _ASM_X86_64_POSIX_TYPES_H
3
4/*
5 * This file is generally used by user-level software, so you need to
6 * be a little careful about namespace pollution etc. Also, we cannot
7 * assume GCC is being used.
8 */
9
10typedef unsigned long __kernel_ino_t;
11typedef unsigned int __kernel_mode_t;
12typedef unsigned long __kernel_nlink_t;
13typedef long __kernel_off_t;
14typedef int __kernel_pid_t;
15typedef int __kernel_ipc_pid_t;
16typedef unsigned int __kernel_uid_t;
17typedef unsigned int __kernel_gid_t;
18typedef unsigned long __kernel_size_t;
19typedef long __kernel_ssize_t;
20typedef long __kernel_ptrdiff_t;
21typedef long __kernel_time_t;
22typedef long __kernel_suseconds_t;
23typedef long __kernel_clock_t;
24typedef int __kernel_timer_t;
25typedef int __kernel_clockid_t;
26typedef int __kernel_daddr_t;
27typedef char * __kernel_caddr_t;
28typedef unsigned short __kernel_uid16_t;
29typedef unsigned short __kernel_gid16_t;
30
31#ifdef __GNUC__
32typedef long long __kernel_loff_t;
33#endif
34
35typedef struct {
36 int val[2];
37} __kernel_fsid_t;
38
39typedef unsigned short __kernel_old_uid_t;
40typedef unsigned short __kernel_old_gid_t;
41typedef __kernel_uid_t __kernel_uid32_t;
42typedef __kernel_gid_t __kernel_gid32_t;
43
44typedef unsigned long __kernel_old_dev_t;
45
46#ifdef __KERNEL__
47
48#undef __FD_SET
49static __inline__ void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp)
50{
51 unsigned long _tmp = fd / __NFDBITS;
52 unsigned long _rem = fd % __NFDBITS;
53 fdsetp->fds_bits[_tmp] |= (1UL<<_rem);
54}
55
56#undef __FD_CLR
57static __inline__ void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp)
58{
59 unsigned long _tmp = fd / __NFDBITS;
60 unsigned long _rem = fd % __NFDBITS;
61 fdsetp->fds_bits[_tmp] &= ~(1UL<<_rem);
62}
63
64#undef __FD_ISSET
65static __inline__ int __FD_ISSET(unsigned long fd, __const__ __kernel_fd_set *p)
66{
67 unsigned long _tmp = fd / __NFDBITS;
68 unsigned long _rem = fd % __NFDBITS;
69 return (p->fds_bits[_tmp] & (1UL<<_rem)) != 0;
70}
71
72/*
73 * This will unroll the loop for the normal constant cases (8 or 32 longs,
74 * for 256 and 1024-bit fd_sets respectively)
75 */
76#undef __FD_ZERO
77static __inline__ void __FD_ZERO(__kernel_fd_set *p)
78{
79 unsigned long *tmp = p->fds_bits;
80 int i;
81
82 if (__builtin_constant_p(__FDSET_LONGS)) {
83 switch (__FDSET_LONGS) {
84 case 32:
85 tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
86 tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
87 tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0;
88 tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0;
89 tmp[16] = 0; tmp[17] = 0; tmp[18] = 0; tmp[19] = 0;
90 tmp[20] = 0; tmp[21] = 0; tmp[22] = 0; tmp[23] = 0;
91 tmp[24] = 0; tmp[25] = 0; tmp[26] = 0; tmp[27] = 0;
92 tmp[28] = 0; tmp[29] = 0; tmp[30] = 0; tmp[31] = 0;
93 return;
94 case 16:
95 tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
96 tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
97 tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0;
98 tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0;
99 return;
100 case 8:
101 tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
102 tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
103 return;
104 case 4:
105 tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
106 return;
107 }
108 }
109 i = __FDSET_LONGS;
110 while (i) {
111 i--;
112 *tmp = 0;
113 tmp++;
114 }
115}
116
117#endif /* defined(__KERNEL__) */
118
119#endif
diff --git a/include/asm-x86_64/prctl.h b/include/asm-x86_64/prctl.h
deleted file mode 100644
index 52952adef1ca..000000000000
--- a/include/asm-x86_64/prctl.h
+++ /dev/null
@@ -1,10 +0,0 @@
1#ifndef X86_64_PRCTL_H
2#define X86_64_PRCTL_H 1
3
4#define ARCH_SET_GS 0x1001
5#define ARCH_SET_FS 0x1002
6#define ARCH_GET_FS 0x1003
7#define ARCH_GET_GS 0x1004
8
9
10#endif
diff --git a/include/asm-x86_64/processor-flags.h b/include/asm-x86_64/processor-flags.h
deleted file mode 100644
index ec99a57b2c6a..000000000000
--- a/include/asm-x86_64/processor-flags.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-i386/processor-flags.h>
diff --git a/include/asm-x86_64/processor.h b/include/asm-x86_64/processor.h
deleted file mode 100644
index 31f579b828f2..000000000000
--- a/include/asm-x86_64/processor.h
+++ /dev/null
@@ -1,439 +0,0 @@
1/*
2 * include/asm-x86_64/processor.h
3 *
4 * Copyright (C) 1994 Linus Torvalds
5 */
6
7#ifndef __ASM_X86_64_PROCESSOR_H
8#define __ASM_X86_64_PROCESSOR_H
9
10#include <asm/segment.h>
11#include <asm/page.h>
12#include <asm/types.h>
13#include <asm/sigcontext.h>
14#include <asm/cpufeature.h>
15#include <linux/threads.h>
16#include <asm/msr.h>
17#include <asm/current.h>
18#include <asm/system.h>
19#include <asm/mmsegment.h>
20#include <asm/percpu.h>
21#include <linux/personality.h>
22#include <linux/cpumask.h>
23#include <asm/processor-flags.h>
24
25#define TF_MASK 0x00000100
26#define IF_MASK 0x00000200
27#define IOPL_MASK 0x00003000
28#define NT_MASK 0x00004000
29#define VM_MASK 0x00020000
30#define AC_MASK 0x00040000
31#define VIF_MASK 0x00080000 /* virtual interrupt flag */
32#define VIP_MASK 0x00100000 /* virtual interrupt pending */
33#define ID_MASK 0x00200000
34
35#define desc_empty(desc) \
36 (!((desc)->a | (desc)->b))
37
38#define desc_equal(desc1, desc2) \
39 (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b))
40
41/*
42 * Default implementation of macro that returns current
43 * instruction pointer ("program counter").
44 */
45#define current_text_addr() ({ void *pc; asm volatile("leaq 1f(%%rip),%0\n1:":"=r"(pc)); pc; })
46
47/*
48 * CPU type and hardware bug flags. Kept separately for each CPU.
49 */
50
51struct cpuinfo_x86 {
52 __u8 x86; /* CPU family */
53 __u8 x86_vendor; /* CPU vendor */
54 __u8 x86_model;
55 __u8 x86_mask;
56 int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */
57 __u32 x86_capability[NCAPINTS];
58 char x86_vendor_id[16];
59 char x86_model_id[64];
60 int x86_cache_size; /* in KB */
61 int x86_clflush_size;
62 int x86_cache_alignment;
63 int x86_tlbsize; /* number of 4K pages in DTLB/ITLB combined(in pages)*/
64 __u8 x86_virt_bits, x86_phys_bits;
65 __u8 x86_max_cores; /* cpuid returned max cores value */
66 __u32 x86_power;
67 __u32 extended_cpuid_level; /* Max extended CPUID function supported */
68 unsigned long loops_per_jiffy;
69#ifdef CONFIG_SMP
70 cpumask_t llc_shared_map; /* cpus sharing the last level cache */
71#endif
72 __u8 apicid;
73#ifdef CONFIG_SMP
74 __u8 booted_cores; /* number of cores as seen by OS */
75 __u8 phys_proc_id; /* Physical Processor id. */
76 __u8 cpu_core_id; /* Core id. */
77#endif
78} ____cacheline_aligned;
79
80#define X86_VENDOR_INTEL 0
81#define X86_VENDOR_CYRIX 1
82#define X86_VENDOR_AMD 2
83#define X86_VENDOR_UMC 3
84#define X86_VENDOR_NEXGEN 4
85#define X86_VENDOR_CENTAUR 5
86#define X86_VENDOR_TRANSMETA 7
87#define X86_VENDOR_NUM 8
88#define X86_VENDOR_UNKNOWN 0xff
89
90#ifdef CONFIG_SMP
91extern struct cpuinfo_x86 cpu_data[];
92#define current_cpu_data cpu_data[smp_processor_id()]
93#else
94#define cpu_data (&boot_cpu_data)
95#define current_cpu_data boot_cpu_data
96#endif
97
98extern char ignore_irq13;
99
100extern void identify_cpu(struct cpuinfo_x86 *);
101extern void print_cpu_info(struct cpuinfo_x86 *);
102extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
103extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
104extern unsigned short num_cache_leaves;
105
106/*
107 * Save the cr4 feature set we're using (ie
108 * Pentium 4MB enable and PPro Global page
109 * enable), so that any CPU's that boot up
110 * after us can get the correct flags.
111 */
112extern unsigned long mmu_cr4_features;
113
114static inline void set_in_cr4 (unsigned long mask)
115{
116 mmu_cr4_features |= mask;
117 __asm__("movq %%cr4,%%rax\n\t"
118 "orq %0,%%rax\n\t"
119 "movq %%rax,%%cr4\n"
120 : : "irg" (mask)
121 :"ax");
122}
123
124static inline void clear_in_cr4 (unsigned long mask)
125{
126 mmu_cr4_features &= ~mask;
127 __asm__("movq %%cr4,%%rax\n\t"
128 "andq %0,%%rax\n\t"
129 "movq %%rax,%%cr4\n"
130 : : "irg" (~mask)
131 :"ax");
132}
133
134
135/*
136 * User space process size. 47bits minus one guard page.
137 */
138#define TASK_SIZE64 (0x800000000000UL - 4096)
139
140/* This decides where the kernel will search for a free chunk of vm
141 * space during mmap's.
142 */
143#define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? 0xc0000000 : 0xFFFFe000)
144
145#define TASK_SIZE (test_thread_flag(TIF_IA32) ? IA32_PAGE_OFFSET : TASK_SIZE64)
146#define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? IA32_PAGE_OFFSET : TASK_SIZE64)
147
148#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE/3)
149
150/*
151 * Size of io_bitmap.
152 */
153#define IO_BITMAP_BITS 65536
154#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
155#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
156#define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap)
157#define INVALID_IO_BITMAP_OFFSET 0x8000
158
159struct i387_fxsave_struct {
160 u16 cwd;
161 u16 swd;
162 u16 twd;
163 u16 fop;
164 u64 rip;
165 u64 rdp;
166 u32 mxcsr;
167 u32 mxcsr_mask;
168 u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
169 u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */
170 u32 padding[24];
171} __attribute__ ((aligned (16)));
172
173union i387_union {
174 struct i387_fxsave_struct fxsave;
175};
176
177struct tss_struct {
178 u32 reserved1;
179 u64 rsp0;
180 u64 rsp1;
181 u64 rsp2;
182 u64 reserved2;
183 u64 ist[7];
184 u32 reserved3;
185 u32 reserved4;
186 u16 reserved5;
187 u16 io_bitmap_base;
188 /*
189 * The extra 1 is there because the CPU will access an
190 * additional byte beyond the end of the IO permission
191 * bitmap. The extra byte must be all 1 bits, and must
192 * be within the limit. Thus we have:
193 *
194 * 128 bytes, the bitmap itself, for ports 0..0x3ff
195 * 8 bytes, for an extra "long" of ~0UL
196 */
197 unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
198} __attribute__((packed)) ____cacheline_aligned;
199
200
201extern struct cpuinfo_x86 boot_cpu_data;
202DECLARE_PER_CPU(struct tss_struct,init_tss);
203/* Save the original ist values for checking stack pointers during debugging */
204struct orig_ist {
205 unsigned long ist[7];
206};
207DECLARE_PER_CPU(struct orig_ist, orig_ist);
208
209#ifdef CONFIG_X86_VSMP
210#define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT)
211#define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT)
212#else
213#define ARCH_MIN_TASKALIGN 16
214#define ARCH_MIN_MMSTRUCT_ALIGN 0
215#endif
216
217struct thread_struct {
218 unsigned long rsp0;
219 unsigned long rsp;
220 unsigned long userrsp; /* Copy from PDA */
221 unsigned long fs;
222 unsigned long gs;
223 unsigned short es, ds, fsindex, gsindex;
224/* Hardware debugging registers */
225 unsigned long debugreg0;
226 unsigned long debugreg1;
227 unsigned long debugreg2;
228 unsigned long debugreg3;
229 unsigned long debugreg6;
230 unsigned long debugreg7;
231/* fault info */
232 unsigned long cr2, trap_no, error_code;
233/* floating point info */
234 union i387_union i387 __attribute__((aligned(16)));
235/* IO permissions. the bitmap could be moved into the GDT, that would make
236 switch faster for a limited number of ioperm using tasks. -AK */
237 int ioperm;
238 unsigned long *io_bitmap_ptr;
239 unsigned io_bitmap_max;
240/* cached TLS descriptors. */
241 u64 tls_array[GDT_ENTRY_TLS_ENTRIES];
242} __attribute__((aligned(16)));
243
244#define INIT_THREAD { \
245 .rsp0 = (unsigned long)&init_stack + sizeof(init_stack) \
246}
247
248#define INIT_TSS { \
249 .rsp0 = (unsigned long)&init_stack + sizeof(init_stack) \
250}
251
252#define INIT_MMAP \
253{ &init_mm, 0, 0, NULL, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, 1, NULL, NULL }
254
255#define start_thread(regs,new_rip,new_rsp) do { \
256 asm volatile("movl %0,%%fs; movl %0,%%es; movl %0,%%ds": :"r" (0)); \
257 load_gs_index(0); \
258 (regs)->rip = (new_rip); \
259 (regs)->rsp = (new_rsp); \
260 write_pda(oldrsp, (new_rsp)); \
261 (regs)->cs = __USER_CS; \
262 (regs)->ss = __USER_DS; \
263 (regs)->eflags = 0x200; \
264 set_fs(USER_DS); \
265} while(0)
266
267#define get_debugreg(var, register) \
268 __asm__("movq %%db" #register ", %0" \
269 :"=r" (var))
270#define set_debugreg(value, register) \
271 __asm__("movq %0,%%db" #register \
272 : /* no output */ \
273 :"r" (value))
274
275struct task_struct;
276struct mm_struct;
277
278/* Free all resources held by a thread. */
279extern void release_thread(struct task_struct *);
280
281/* Prepare to copy thread state - unlazy all lazy status */
282extern void prepare_to_copy(struct task_struct *tsk);
283
284/*
285 * create a kernel thread without removing it from tasklists
286 */
287extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
288
289/*
290 * Return saved PC of a blocked thread.
291 * What is this good for? it will be always the scheduler or ret_from_fork.
292 */
293#define thread_saved_pc(t) (*(unsigned long *)((t)->thread.rsp - 8))
294
295extern unsigned long get_wchan(struct task_struct *p);
296#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.rsp0 - 1)
297#define KSTK_EIP(tsk) (task_pt_regs(tsk)->rip)
298#define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */
299
300
301struct microcode_header {
302 unsigned int hdrver;
303 unsigned int rev;
304 unsigned int date;
305 unsigned int sig;
306 unsigned int cksum;
307 unsigned int ldrver;
308 unsigned int pf;
309 unsigned int datasize;
310 unsigned int totalsize;
311 unsigned int reserved[3];
312};
313
314struct microcode {
315 struct microcode_header hdr;
316 unsigned int bits[0];
317};
318
319typedef struct microcode microcode_t;
320typedef struct microcode_header microcode_header_t;
321
322/* microcode format is extended from prescott processors */
323struct extended_signature {
324 unsigned int sig;
325 unsigned int pf;
326 unsigned int cksum;
327};
328
329struct extended_sigtable {
330 unsigned int count;
331 unsigned int cksum;
332 unsigned int reserved[3];
333 struct extended_signature sigs[0];
334};
335
336
337#define ASM_NOP1 K8_NOP1
338#define ASM_NOP2 K8_NOP2
339#define ASM_NOP3 K8_NOP3
340#define ASM_NOP4 K8_NOP4
341#define ASM_NOP5 K8_NOP5
342#define ASM_NOP6 K8_NOP6
343#define ASM_NOP7 K8_NOP7
344#define ASM_NOP8 K8_NOP8
345
346/* Opteron nops */
347#define K8_NOP1 ".byte 0x90\n"
348#define K8_NOP2 ".byte 0x66,0x90\n"
349#define K8_NOP3 ".byte 0x66,0x66,0x90\n"
350#define K8_NOP4 ".byte 0x66,0x66,0x66,0x90\n"
351#define K8_NOP5 K8_NOP3 K8_NOP2
352#define K8_NOP6 K8_NOP3 K8_NOP3
353#define K8_NOP7 K8_NOP4 K8_NOP3
354#define K8_NOP8 K8_NOP4 K8_NOP4
355
356#define ASM_NOP_MAX 8
357
358/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
359static inline void rep_nop(void)
360{
361 __asm__ __volatile__("rep;nop": : :"memory");
362}
363
364/* Stop speculative execution */
365static inline void sync_core(void)
366{
367 int tmp;
368 asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory");
369}
370
371#define ARCH_HAS_PREFETCH
372static inline void prefetch(void *x)
373{
374 asm volatile("prefetcht0 (%0)" :: "r" (x));
375}
376
377#define ARCH_HAS_PREFETCHW 1
378static inline void prefetchw(void *x)
379{
380 alternative_input("prefetcht0 (%1)",
381 "prefetchw (%1)",
382 X86_FEATURE_3DNOW,
383 "r" (x));
384}
385
386#define ARCH_HAS_SPINLOCK_PREFETCH 1
387
388#define spin_lock_prefetch(x) prefetchw(x)
389
390#define cpu_relax() rep_nop()
391
392static inline void serialize_cpu(void)
393{
394 __asm__ __volatile__ ("cpuid" : : : "ax", "bx", "cx", "dx");
395}
396
397static inline void __monitor(const void *eax, unsigned long ecx,
398 unsigned long edx)
399{
400 /* "monitor %eax,%ecx,%edx;" */
401 asm volatile(
402 ".byte 0x0f,0x01,0xc8;"
403 : :"a" (eax), "c" (ecx), "d"(edx));
404}
405
406static inline void __mwait(unsigned long eax, unsigned long ecx)
407{
408 /* "mwait %eax,%ecx;" */
409 asm volatile(
410 ".byte 0x0f,0x01,0xc9;"
411 : :"a" (eax), "c" (ecx));
412}
413
414static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
415{
416 /* "mwait %eax,%ecx;" */
417 asm volatile(
418 "sti; .byte 0x0f,0x01,0xc9;"
419 : :"a" (eax), "c" (ecx));
420}
421
422extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx);
423
424#define stack_current() \
425({ \
426 struct thread_info *ti; \
427 asm("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
428 ti->task; \
429})
430
431#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
432
433extern unsigned long boot_option_idle_override;
434/* Boot loader type from the setup header */
435extern int bootloader_type;
436
437#define HAVE_ARCH_PICK_MMAP_LAYOUT 1
438
439#endif /* __ASM_X86_64_PROCESSOR_H */
diff --git a/include/asm-x86_64/proto.h b/include/asm-x86_64/proto.h
deleted file mode 100644
index 31f20ad65876..000000000000
--- a/include/asm-x86_64/proto.h
+++ /dev/null
@@ -1,104 +0,0 @@
1#ifndef _ASM_X8664_PROTO_H
2#define _ASM_X8664_PROTO_H 1
3
4#include <asm/ldt.h>
5
6/* misc architecture specific prototypes */
7
8struct cpuinfo_x86;
9struct pt_regs;
10
11extern void start_kernel(void);
12extern void pda_init(int);
13
14extern void early_idt_handler(void);
15
16extern void mcheck_init(struct cpuinfo_x86 *c);
17extern void init_memory_mapping(unsigned long start, unsigned long end);
18
19extern void system_call(void);
20extern int kernel_syscall(void);
21extern void syscall_init(void);
22
23extern void ia32_syscall(void);
24extern void ia32_cstar_target(void);
25extern void ia32_sysenter_target(void);
26
27extern void config_acpi_tables(void);
28extern void ia32_syscall(void);
29
30extern int pmtimer_mark_offset(void);
31extern void pmtimer_resume(void);
32extern void pmtimer_wait(unsigned);
33extern unsigned int do_gettimeoffset_pm(void);
34#ifdef CONFIG_X86_PM_TIMER
35extern u32 pmtmr_ioport;
36#else
37#define pmtmr_ioport 0
38#endif
39extern int nohpet;
40
41extern void early_printk(const char *fmt, ...) __attribute__((format(printf,1,2)));
42
43extern void early_identify_cpu(struct cpuinfo_x86 *c);
44
45extern int k8_scan_nodes(unsigned long start, unsigned long end);
46
47extern void numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn);
48extern unsigned long numa_free_all_bootmem(void);
49
50extern void reserve_bootmem_generic(unsigned long phys, unsigned len);
51
52extern void load_gs_index(unsigned gs);
53
54extern void stop_timer_interrupt(void);
55extern void main_timer_handler(void);
56
57extern unsigned long end_pfn_map;
58
59extern void show_trace(struct task_struct *, struct pt_regs *, unsigned long * rsp);
60extern void show_registers(struct pt_regs *regs);
61
62extern void exception_table_check(void);
63
64extern void acpi_reserve_bootmem(void);
65
66extern void swap_low_mappings(void);
67
68extern void __show_regs(struct pt_regs * regs);
69extern void show_regs(struct pt_regs * regs);
70
71extern void syscall32_cpu_init(void);
72
73extern void setup_node_bootmem(int nodeid, unsigned long start, unsigned long end);
74
75extern void early_quirks(void);
76extern void check_efer(void);
77
78extern void select_idle_routine(const struct cpuinfo_x86 *c);
79
80extern unsigned long table_start, table_end;
81
82extern int exception_trace;
83extern unsigned cpu_khz;
84extern unsigned tsc_khz;
85
86extern int reboot_force;
87extern int notsc_setup(char *);
88
89extern int timer_over_8254;
90
91extern int gsi_irq_sharing(int gsi);
92
93extern void smp_local_timer_interrupt(void);
94
95extern int force_mwait;
96
97long do_arch_prctl(struct task_struct *task, int code, unsigned long addr);
98
99void i8254_timer_resume(void);
100
101#define round_up(x,y) (((x) + (y) - 1) & ~((y)-1))
102#define round_down(x,y) ((x) & ~((y)-1))
103
104#endif
diff --git a/include/asm-x86_64/ptrace-abi.h b/include/asm-x86_64/ptrace-abi.h
deleted file mode 100644
index 19184b0806b1..000000000000
--- a/include/asm-x86_64/ptrace-abi.h
+++ /dev/null
@@ -1,51 +0,0 @@
1#ifndef _X86_64_PTRACE_ABI_H
2#define _X86_64_PTRACE_ABI_H
3
4#if defined(__ASSEMBLY__) || defined(__FRAME_OFFSETS)
5#define R15 0
6#define R14 8
7#define R13 16
8#define R12 24
9#define RBP 32
10#define RBX 40
11/* arguments: interrupts/non tracing syscalls only save upto here*/
12#define R11 48
13#define R10 56
14#define R9 64
15#define R8 72
16#define RAX 80
17#define RCX 88
18#define RDX 96
19#define RSI 104
20#define RDI 112
21#define ORIG_RAX 120 /* = ERROR */
22/* end of arguments */
23/* cpu exception frame or undefined in case of fast syscall. */
24#define RIP 128
25#define CS 136
26#define EFLAGS 144
27#define RSP 152
28#define SS 160
29#define ARGOFFSET R11
30#endif /* __ASSEMBLY__ */
31
32/* top of stack page */
33#define FRAME_SIZE 168
34
35#define PTRACE_OLDSETOPTIONS 21
36
37/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
38#define PTRACE_GETREGS 12
39#define PTRACE_SETREGS 13
40#define PTRACE_GETFPREGS 14
41#define PTRACE_SETFPREGS 15
42#define PTRACE_GETFPXREGS 18
43#define PTRACE_SETFPXREGS 19
44
45/* only useful for access 32bit programs */
46#define PTRACE_GET_THREAD_AREA 25
47#define PTRACE_SET_THREAD_AREA 26
48
49#define PTRACE_ARCH_PRCTL 30 /* arch_prctl for child */
50
51#endif
diff --git a/include/asm-x86_64/ptrace.h b/include/asm-x86_64/ptrace.h
deleted file mode 100644
index 7f166ccb0606..000000000000
--- a/include/asm-x86_64/ptrace.h
+++ /dev/null
@@ -1,78 +0,0 @@
1#ifndef _X86_64_PTRACE_H
2#define _X86_64_PTRACE_H
3
4#include <linux/compiler.h> /* For __user */
5#include <asm/ptrace-abi.h>
6
7#ifndef __ASSEMBLY__
8
9struct pt_regs {
10 unsigned long r15;
11 unsigned long r14;
12 unsigned long r13;
13 unsigned long r12;
14 unsigned long rbp;
15 unsigned long rbx;
16/* arguments: non interrupts/non tracing syscalls only save upto here*/
17 unsigned long r11;
18 unsigned long r10;
19 unsigned long r9;
20 unsigned long r8;
21 unsigned long rax;
22 unsigned long rcx;
23 unsigned long rdx;
24 unsigned long rsi;
25 unsigned long rdi;
26 unsigned long orig_rax;
27/* end of arguments */
28/* cpu exception frame or undefined */
29 unsigned long rip;
30 unsigned long cs;
31 unsigned long eflags;
32 unsigned long rsp;
33 unsigned long ss;
34/* top of stack page */
35};
36
37#endif
38
39#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
40#define user_mode(regs) (!!((regs)->cs & 3))
41#define user_mode_vm(regs) user_mode(regs)
42#define instruction_pointer(regs) ((regs)->rip)
43#define regs_return_value(regs) ((regs)->rax)
44
45extern unsigned long profile_pc(struct pt_regs *regs);
46void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
47
48struct task_struct;
49
50extern unsigned long
51convert_rip_to_linear(struct task_struct *child, struct pt_regs *regs);
52
53enum {
54 EF_CF = 0x00000001,
55 EF_PF = 0x00000004,
56 EF_AF = 0x00000010,
57 EF_ZF = 0x00000040,
58 EF_SF = 0x00000080,
59 EF_TF = 0x00000100,
60 EF_IE = 0x00000200,
61 EF_DF = 0x00000400,
62 EF_OF = 0x00000800,
63 EF_IOPL = 0x00003000,
64 EF_IOPL_RING0 = 0x00000000,
65 EF_IOPL_RING1 = 0x00001000,
66 EF_IOPL_RING2 = 0x00002000,
67 EF_NT = 0x00004000, /* nested task */
68 EF_RF = 0x00010000, /* resume */
69 EF_VM = 0x00020000, /* virtual mode */
70 EF_AC = 0x00040000, /* alignment */
71 EF_VIF = 0x00080000, /* virtual interrupt */
72 EF_VIP = 0x00100000, /* virtual interrupt pending */
73 EF_ID = 0x00200000, /* id */
74};
75
76#endif
77
78#endif
diff --git a/include/asm-x86_64/required-features.h b/include/asm-x86_64/required-features.h
deleted file mode 100644
index e80d5761b00a..000000000000
--- a/include/asm-x86_64/required-features.h
+++ /dev/null
@@ -1,46 +0,0 @@
1#ifndef _ASM_REQUIRED_FEATURES_H
2#define _ASM_REQUIRED_FEATURES_H 1
3
4/* Define minimum CPUID feature set for kernel These bits are checked
5 really early to actually display a visible error message before the
6 kernel dies. Make sure to assign features to the proper mask!
7
8 The real information is in arch/x86_64/Kconfig.cpu, this just converts
9 the CONFIGs into a bitmask */
10
11/* x86-64 baseline features */
12#define NEED_FPU (1<<(X86_FEATURE_FPU & 31))
13#define NEED_PSE (1<<(X86_FEATURE_PSE & 31))
14#define NEED_MSR (1<<(X86_FEATURE_MSR & 31))
15#define NEED_PAE (1<<(X86_FEATURE_PAE & 31))
16#define NEED_CX8 (1<<(X86_FEATURE_CX8 & 31))
17#define NEED_PGE (1<<(X86_FEATURE_PGE & 31))
18#define NEED_FXSR (1<<(X86_FEATURE_FXSR & 31))
19#define NEED_CMOV (1<<(X86_FEATURE_CMOV & 31))
20#define NEED_XMM (1<<(X86_FEATURE_XMM & 31))
21#define NEED_XMM2 (1<<(X86_FEATURE_XMM2 & 31))
22
23#define REQUIRED_MASK0 (NEED_FPU|NEED_PSE|NEED_MSR|NEED_PAE|\
24 NEED_CX8|NEED_PGE|NEED_FXSR|NEED_CMOV|\
25 NEED_XMM|NEED_XMM2)
26#define SSE_MASK (NEED_XMM|NEED_XMM2)
27
28/* x86-64 baseline features */
29#define NEED_LM (1<<(X86_FEATURE_LM & 31))
30
31#ifdef CONFIG_X86_USE_3DNOW
32# define NEED_3DNOW (1<<(X86_FEATURE_3DNOW & 31))
33#else
34# define NEED_3DNOW 0
35#endif
36
37#define REQUIRED_MASK1 (NEED_LM|NEED_3DNOW)
38
39#define REQUIRED_MASK2 0
40#define REQUIRED_MASK3 0
41#define REQUIRED_MASK4 0
42#define REQUIRED_MASK5 0
43#define REQUIRED_MASK6 0
44#define REQUIRED_MASK7 0
45
46#endif
diff --git a/include/asm-x86_64/resource.h b/include/asm-x86_64/resource.h
deleted file mode 100644
index f40b40623234..000000000000
--- a/include/asm-x86_64/resource.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _X8664_RESOURCE_H
2#define _X8664_RESOURCE_H
3
4#include <asm-generic/resource.h>
5
6#endif
diff --git a/include/asm-x86_64/resume-trace.h b/include/asm-x86_64/resume-trace.h
deleted file mode 100644
index 34bf998fdf62..000000000000
--- a/include/asm-x86_64/resume-trace.h
+++ /dev/null
@@ -1,13 +0,0 @@
1#define TRACE_RESUME(user) do { \
2 if (pm_trace_enabled) { \
3 void *tracedata; \
4 asm volatile("movq $1f,%0\n" \
5 ".section .tracedata,\"a\"\n" \
6 "1:\t.word %c1\n" \
7 "\t.quad %c2\n" \
8 ".previous" \
9 :"=r" (tracedata) \
10 : "i" (__LINE__), "i" (__FILE__)); \
11 generate_resume_trace(tracedata, user); \
12 } \
13} while (0)
diff --git a/include/asm-x86_64/rio.h b/include/asm-x86_64/rio.h
deleted file mode 100644
index c7350f6d2015..000000000000
--- a/include/asm-x86_64/rio.h
+++ /dev/null
@@ -1,74 +0,0 @@
1/*
2 * Derived from include/asm-i386/mach-summit/mach_mpparse.h
3 * and include/asm-i386/mach-default/bios_ebda.h
4 *
5 * Author: Laurent Vivier <Laurent.Vivier@bull.net>
6 */
7
8#ifndef __ASM_RIO_H
9#define __ASM_RIO_H
10
11#define RIO_TABLE_VERSION 3
12
13struct rio_table_hdr {
14 u8 version; /* Version number of this data structure */
15 u8 num_scal_dev; /* # of Scalability devices */
16 u8 num_rio_dev; /* # of RIO I/O devices */
17} __attribute__((packed));
18
19struct scal_detail {
20 u8 node_id; /* Scalability Node ID */
21 u32 CBAR; /* Address of 1MB register space */
22 u8 port0node; /* Node ID port connected to: 0xFF=None */
23 u8 port0port; /* Port num port connected to: 0,1,2, or */
24 /* 0xFF=None */
25 u8 port1node; /* Node ID port connected to: 0xFF = None */
26 u8 port1port; /* Port num port connected to: 0,1,2, or */
27 /* 0xFF=None */
28 u8 port2node; /* Node ID port connected to: 0xFF = None */
29 u8 port2port; /* Port num port connected to: 0,1,2, or */
30 /* 0xFF=None */
31 u8 chassis_num; /* 1 based Chassis number (1 = boot node) */
32} __attribute__((packed));
33
34struct rio_detail {
35 u8 node_id; /* RIO Node ID */
36 u32 BBAR; /* Address of 1MB register space */
37 u8 type; /* Type of device */
38 u8 owner_id; /* Node ID of Hurricane that owns this */
39 /* node */
40 u8 port0node; /* Node ID port connected to: 0xFF=None */
41 u8 port0port; /* Port num port connected to: 0,1,2, or */
42 /* 0xFF=None */
43 u8 port1node; /* Node ID port connected to: 0xFF=None */
44 u8 port1port; /* Port num port connected to: 0,1,2, or */
45 /* 0xFF=None */
46 u8 first_slot; /* Lowest slot number below this Calgary */
47 u8 status; /* Bit 0 = 1 : the XAPIC is used */
48 /* = 0 : the XAPIC is not used, ie: */
49 /* ints fwded to another XAPIC */
50 /* Bits1:7 Reserved */
51 u8 WP_index; /* instance index - lower ones have */
52 /* lower slot numbers/PCI bus numbers */
53 u8 chassis_num; /* 1 based Chassis number */
54} __attribute__((packed));
55
56enum {
57 HURR_SCALABILTY = 0, /* Hurricane Scalability info */
58 HURR_RIOIB = 2, /* Hurricane RIOIB info */
59 COMPAT_CALGARY = 4, /* Compatibility Calgary */
60 ALT_CALGARY = 5, /* Second Planar Calgary */
61};
62
63/*
64 * there is a real-mode segmented pointer pointing to the
65 * 4K EBDA area at 0x40E.
66 */
67static inline unsigned long get_bios_ebda(void)
68{
69 unsigned long address = *(unsigned short *)phys_to_virt(0x40EUL);
70 address <<= 4;
71 return address;
72}
73
74#endif /* __ASM_RIO_H */
diff --git a/include/asm-x86_64/rtc.h b/include/asm-x86_64/rtc.h
deleted file mode 100644
index 18ed713ac7de..000000000000
--- a/include/asm-x86_64/rtc.h
+++ /dev/null
@@ -1,10 +0,0 @@
1#ifndef _X86_64_RTC_H
2#define _X86_64_RTC_H
3
4/*
5 * x86 uses the default access methods for the RTC.
6 */
7
8#include <asm-generic/rtc.h>
9
10#endif
diff --git a/include/asm-x86_64/rwlock.h b/include/asm-x86_64/rwlock.h
deleted file mode 100644
index 72aeebed920b..000000000000
--- a/include/asm-x86_64/rwlock.h
+++ /dev/null
@@ -1,26 +0,0 @@
1/* include/asm-x86_64/rwlock.h
2 *
3 * Helpers used by both rw spinlocks and rw semaphores.
4 *
5 * Based in part on code from semaphore.h and
6 * spinlock.h Copyright 1996 Linus Torvalds.
7 *
8 * Copyright 1999 Red Hat, Inc.
9 * Copyright 2001,2002 SuSE labs
10 *
11 * Written by Benjamin LaHaise.
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
17 */
18#ifndef _ASM_X86_64_RWLOCK_H
19#define _ASM_X86_64_RWLOCK_H
20
21#define RW_LOCK_BIAS 0x01000000
22#define RW_LOCK_BIAS_STR "0x01000000"
23
24/* Actual code is in asm/spinlock.h or in arch/x86_64/lib/rwlock.S */
25
26#endif
diff --git a/include/asm-x86_64/scatterlist.h b/include/asm-x86_64/scatterlist.h
deleted file mode 100644
index eaf7ada27e14..000000000000
--- a/include/asm-x86_64/scatterlist.h
+++ /dev/null
@@ -1,24 +0,0 @@
1#ifndef _X8664_SCATTERLIST_H
2#define _X8664_SCATTERLIST_H
3
4#include <asm/types.h>
5
6struct scatterlist {
7 struct page *page;
8 unsigned int offset;
9 unsigned int length;
10 dma_addr_t dma_address;
11 unsigned int dma_length;
12};
13
14#define ISA_DMA_THRESHOLD (0x00ffffff)
15
16/* These macros should be used after a pci_map_sg call has been done
17 * to get bus addresses of each of the SG entries and their lengths.
18 * You should only work with the number of sg entries pci_map_sg
19 * returns.
20 */
21#define sg_dma_address(sg) ((sg)->dma_address)
22#define sg_dma_len(sg) ((sg)->dma_length)
23
24#endif
diff --git a/include/asm-x86_64/seccomp.h b/include/asm-x86_64/seccomp.h
deleted file mode 100644
index 553af65a2287..000000000000
--- a/include/asm-x86_64/seccomp.h
+++ /dev/null
@@ -1,24 +0,0 @@
1#ifndef _ASM_SECCOMP_H
2
3#include <linux/thread_info.h>
4
5#ifdef TIF_32BIT
6#error "unexpected TIF_32BIT on x86_64"
7#else
8#define TIF_32BIT TIF_IA32
9#endif
10
11#include <linux/unistd.h>
12#include <asm/ia32_unistd.h>
13
14#define __NR_seccomp_read __NR_read
15#define __NR_seccomp_write __NR_write
16#define __NR_seccomp_exit __NR_exit
17#define __NR_seccomp_sigreturn __NR_rt_sigreturn
18
19#define __NR_seccomp_read_32 __NR_ia32_read
20#define __NR_seccomp_write_32 __NR_ia32_write
21#define __NR_seccomp_exit_32 __NR_ia32_exit
22#define __NR_seccomp_sigreturn_32 __NR_ia32_sigreturn
23
24#endif /* _ASM_SECCOMP_H */
diff --git a/include/asm-x86_64/sections.h b/include/asm-x86_64/sections.h
deleted file mode 100644
index c746d9f1e70c..000000000000
--- a/include/asm-x86_64/sections.h
+++ /dev/null
@@ -1,7 +0,0 @@
1#ifndef _X8664_SECTIONS_H
2#define _X8664_SECTIONS_H
3
4/* nothing to see, move along */
5#include <asm-generic/sections.h>
6
7#endif
diff --git a/include/asm-x86_64/segment.h b/include/asm-x86_64/segment.h
deleted file mode 100644
index 04b8ab21328f..000000000000
--- a/include/asm-x86_64/segment.h
+++ /dev/null
@@ -1,53 +0,0 @@
1#ifndef _ASM_SEGMENT_H
2#define _ASM_SEGMENT_H
3
4#include <asm/cache.h>
5
6/* Simple and small GDT entries for booting only */
7
8#define GDT_ENTRY_BOOT_CS 2
9#define __BOOT_CS (GDT_ENTRY_BOOT_CS * 8)
10
11#define GDT_ENTRY_BOOT_DS (GDT_ENTRY_BOOT_CS + 1)
12#define __BOOT_DS (GDT_ENTRY_BOOT_DS * 8)
13
14#define __KERNEL_CS 0x10
15#define __KERNEL_DS 0x18
16
17#define __KERNEL32_CS 0x08
18
19/*
20 * we cannot use the same code segment descriptor for user and kernel
21 * -- not even in the long flat mode, because of different DPL /kkeil
22 * The segment offset needs to contain a RPL. Grr. -AK
23 * GDT layout to get 64bit syscall right (sysret hardcodes gdt offsets)
24 */
25
26#define __USER32_CS 0x23 /* 4*8+3 */
27#define __USER_DS 0x2b /* 5*8+3 */
28#define __USER_CS 0x33 /* 6*8+3 */
29#define __USER32_DS __USER_DS
30
31#define GDT_ENTRY_TSS 8 /* needs two entries */
32#define GDT_ENTRY_LDT 10 /* needs two entries */
33#define GDT_ENTRY_TLS_MIN 12
34#define GDT_ENTRY_TLS_MAX 14
35
36#define GDT_ENTRY_TLS_ENTRIES 3
37
38#define GDT_ENTRY_PER_CPU 15 /* Abused to load per CPU data from limit */
39#define __PER_CPU_SEG (GDT_ENTRY_PER_CPU * 8 + 3)
40
41/* TLS indexes for 64bit - hardcoded in arch_prctl */
42#define FS_TLS 0
43#define GS_TLS 1
44
45#define GS_TLS_SEL ((GDT_ENTRY_TLS_MIN+GS_TLS)*8 + 3)
46#define FS_TLS_SEL ((GDT_ENTRY_TLS_MIN+FS_TLS)*8 + 3)
47
48#define IDT_ENTRIES 256
49#define GDT_ENTRIES 16
50#define GDT_SIZE (GDT_ENTRIES * 8)
51#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8)
52
53#endif
diff --git a/include/asm-x86_64/semaphore.h b/include/asm-x86_64/semaphore.h
deleted file mode 100644
index 1194888536b9..000000000000
--- a/include/asm-x86_64/semaphore.h
+++ /dev/null
@@ -1,181 +0,0 @@
1#ifndef _X86_64_SEMAPHORE_H
2#define _X86_64_SEMAPHORE_H
3
4#include <linux/linkage.h>
5
6#ifdef __KERNEL__
7
8/*
9 * SMP- and interrupt-safe semaphores..
10 *
11 * (C) Copyright 1996 Linus Torvalds
12 *
13 * Modified 1996-12-23 by Dave Grothe <dave@gcom.com> to fix bugs in
14 * the original code and to make semaphore waits
15 * interruptible so that processes waiting on
16 * semaphores can be killed.
17 * Modified 1999-02-14 by Andrea Arcangeli, split the sched.c helper
18 * functions in asm/sempahore-helper.h while fixing a
19 * potential and subtle race discovered by Ulrich Schmid
20 * in down_interruptible(). Since I started to play here I
21 * also implemented the `trylock' semaphore operation.
22 * 1999-07-02 Artur Skawina <skawina@geocities.com>
23 * Optimized "0(ecx)" -> "(ecx)" (the assembler does not
24 * do this). Changed calling sequences from push/jmp to
25 * traditional call/ret.
26 * Modified 2001-01-01 Andreas Franck <afranck@gmx.de>
27 * Some hacks to ensure compatibility with recent
28 * GCC snapshots, to avoid stack corruption when compiling
29 * with -fomit-frame-pointer. It's not sure if this will
30 * be fixed in GCC, as our previous implementation was a
31 * bit dubious.
32 *
33 * If you would like to see an analysis of this implementation, please
34 * ftp to gcom.com and download the file
35 * /pub/linux/src/semaphore/semaphore-2.0.24.tar.gz.
36 *
37 */
38
39#include <asm/system.h>
40#include <asm/atomic.h>
41#include <asm/rwlock.h>
42#include <linux/wait.h>
43#include <linux/rwsem.h>
44#include <linux/stringify.h>
45
46struct semaphore {
47 atomic_t count;
48 int sleepers;
49 wait_queue_head_t wait;
50};
51
52#define __SEMAPHORE_INITIALIZER(name, n) \
53{ \
54 .count = ATOMIC_INIT(n), \
55 .sleepers = 0, \
56 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
57}
58
59#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
60 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
61
62#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
63#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
64
65static inline void sema_init (struct semaphore *sem, int val)
66{
67/*
68 * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
69 *
70 * i'd rather use the more flexible initialization above, but sadly
71 * GCC 2.7.2.3 emits a bogus warning. EGCS doesn't. Oh well.
72 */
73 atomic_set(&sem->count, val);
74 sem->sleepers = 0;
75 init_waitqueue_head(&sem->wait);
76}
77
78static inline void init_MUTEX (struct semaphore *sem)
79{
80 sema_init(sem, 1);
81}
82
83static inline void init_MUTEX_LOCKED (struct semaphore *sem)
84{
85 sema_init(sem, 0);
86}
87
88asmlinkage void __down_failed(void /* special register calling convention */);
89asmlinkage int __down_failed_interruptible(void /* params in registers */);
90asmlinkage int __down_failed_trylock(void /* params in registers */);
91asmlinkage void __up_wakeup(void /* special register calling convention */);
92
93asmlinkage void __down(struct semaphore * sem);
94asmlinkage int __down_interruptible(struct semaphore * sem);
95asmlinkage int __down_trylock(struct semaphore * sem);
96asmlinkage void __up(struct semaphore * sem);
97
98/*
99 * This is ugly, but we want the default case to fall through.
100 * "__down_failed" is a special asm handler that calls the C
101 * routine that actually waits. See arch/x86_64/kernel/semaphore.c
102 */
103static inline void down(struct semaphore * sem)
104{
105 might_sleep();
106
107 __asm__ __volatile__(
108 "# atomic down operation\n\t"
109 LOCK_PREFIX "decl %0\n\t" /* --sem->count */
110 "jns 1f\n\t"
111 "call __down_failed\n"
112 "1:"
113 :"=m" (sem->count)
114 :"D" (sem)
115 :"memory");
116}
117
118/*
119 * Interruptible try to acquire a semaphore. If we obtained
120 * it, return zero. If we were interrupted, returns -EINTR
121 */
122static inline int down_interruptible(struct semaphore * sem)
123{
124 int result;
125
126 might_sleep();
127
128 __asm__ __volatile__(
129 "# atomic interruptible down operation\n\t"
130 "xorl %0,%0\n\t"
131 LOCK_PREFIX "decl %1\n\t" /* --sem->count */
132 "jns 2f\n\t"
133 "call __down_failed_interruptible\n"
134 "2:\n"
135 :"=&a" (result), "=m" (sem->count)
136 :"D" (sem)
137 :"memory");
138 return result;
139}
140
141/*
142 * Non-blockingly attempt to down() a semaphore.
143 * Returns zero if we acquired it
144 */
145static inline int down_trylock(struct semaphore * sem)
146{
147 int result;
148
149 __asm__ __volatile__(
150 "# atomic interruptible down operation\n\t"
151 "xorl %0,%0\n\t"
152 LOCK_PREFIX "decl %1\n\t" /* --sem->count */
153 "jns 2f\n\t"
154 "call __down_failed_trylock\n\t"
155 "2:\n"
156 :"=&a" (result), "=m" (sem->count)
157 :"D" (sem)
158 :"memory","cc");
159 return result;
160}
161
162/*
163 * Note! This is subtle. We jump to wake people up only if
164 * the semaphore was negative (== somebody was waiting on it).
165 * The default case (no contention) will result in NO
166 * jumps for both down() and up().
167 */
168static inline void up(struct semaphore * sem)
169{
170 __asm__ __volatile__(
171 "# atomic up operation\n\t"
172 LOCK_PREFIX "incl %0\n\t" /* ++sem->count */
173 "jg 1f\n\t"
174 "call __up_wakeup\n"
175 "1:"
176 :"=m" (sem->count)
177 :"D" (sem)
178 :"memory");
179}
180#endif /* __KERNEL__ */
181#endif
diff --git a/include/asm-x86_64/sembuf.h b/include/asm-x86_64/sembuf.h
deleted file mode 100644
index 63b52925ae2a..000000000000
--- a/include/asm-x86_64/sembuf.h
+++ /dev/null
@@ -1,25 +0,0 @@
1#ifndef _X86_64_SEMBUF_H
2#define _X86_64_SEMBUF_H
3
4/*
5 * The semid64_ds structure for x86_64 architecture.
6 * Note extra padding because this structure is passed back and forth
7 * between kernel and user space.
8 *
9 * Pad space is left for:
10 * - 64-bit time_t to solve y2038 problem
11 * - 2 miscellaneous 32-bit values
12 */
13
14struct semid64_ds {
15 struct ipc64_perm sem_perm; /* permissions .. see ipc.h */
16 __kernel_time_t sem_otime; /* last semop time */
17 unsigned long __unused1;
18 __kernel_time_t sem_ctime; /* last change time */
19 unsigned long __unused2;
20 unsigned long sem_nsems; /* no. of semaphores in array */
21 unsigned long __unused3;
22 unsigned long __unused4;
23};
24
25#endif /* _X86_64_SEMBUF_H */
diff --git a/include/asm-x86_64/serial.h b/include/asm-x86_64/serial.h
deleted file mode 100644
index b0496e0d72a6..000000000000
--- a/include/asm-x86_64/serial.h
+++ /dev/null
@@ -1,29 +0,0 @@
1/*
2 * include/asm-x86_64/serial.h
3 */
4
5
6/*
7 * This assumes you have a 1.8432 MHz clock for your UART.
8 *
9 * It'd be nice if someone built a serial card with a 24.576 MHz
10 * clock, since the 16550A is capable of handling a top speed of 1.5
11 * megabits/second; but this requires the faster clock.
12 */
13#define BASE_BAUD ( 1843200 / 16 )
14
15/* Standard COM flags (except for COM4, because of the 8514 problem) */
16#ifdef CONFIG_SERIAL_DETECT_IRQ
17#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST | ASYNC_AUTO_IRQ)
18#define STD_COM4_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_AUTO_IRQ)
19#else
20#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST)
21#define STD_COM4_FLAGS ASYNC_BOOT_AUTOCONF
22#endif
23
24#define SERIAL_PORT_DFNS \
25 /* UART CLK PORT IRQ FLAGS */ \
26 { 0, BASE_BAUD, 0x3F8, 4, STD_COM_FLAGS }, /* ttyS0 */ \
27 { 0, BASE_BAUD, 0x2F8, 3, STD_COM_FLAGS }, /* ttyS1 */ \
28 { 0, BASE_BAUD, 0x3E8, 4, STD_COM_FLAGS }, /* ttyS2 */ \
29 { 0, BASE_BAUD, 0x2E8, 3, STD_COM4_FLAGS }, /* ttyS3 */
diff --git a/include/asm-x86_64/setup.h b/include/asm-x86_64/setup.h
deleted file mode 100644
index eaeff73d6c10..000000000000
--- a/include/asm-x86_64/setup.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _x8664_SETUP_H
2#define _x8664_SETUP_H
3
4#define COMMAND_LINE_SIZE 2048
5
6#endif
diff --git a/include/asm-x86_64/shmbuf.h b/include/asm-x86_64/shmbuf.h
deleted file mode 100644
index 5a6d6dda7c48..000000000000
--- a/include/asm-x86_64/shmbuf.h
+++ /dev/null
@@ -1,38 +0,0 @@
1#ifndef _X8664_SHMBUF_H
2#define _X8664_SHMBUF_H
3
4/*
5 * The shmid64_ds structure for x8664 architecture.
6 * Note extra padding because this structure is passed back and forth
7 * between kernel and user space.
8 *
9 * Pad space is left for:
10 * - 2 miscellaneous 64-bit values
11 */
12
13struct shmid64_ds {
14 struct ipc64_perm shm_perm; /* operation perms */
15 size_t shm_segsz; /* size of segment (bytes) */
16 __kernel_time_t shm_atime; /* last attach time */
17 __kernel_time_t shm_dtime; /* last detach time */
18 __kernel_time_t shm_ctime; /* last change time */
19 __kernel_pid_t shm_cpid; /* pid of creator */
20 __kernel_pid_t shm_lpid; /* pid of last operator */
21 unsigned long shm_nattch; /* no. of current attaches */
22 unsigned long __unused4;
23 unsigned long __unused5;
24};
25
26struct shminfo64 {
27 unsigned long shmmax;
28 unsigned long shmmin;
29 unsigned long shmmni;
30 unsigned long shmseg;
31 unsigned long shmall;
32 unsigned long __unused1;
33 unsigned long __unused2;
34 unsigned long __unused3;
35 unsigned long __unused4;
36};
37
38#endif
diff --git a/include/asm-x86_64/shmparam.h b/include/asm-x86_64/shmparam.h
deleted file mode 100644
index d7021620dcb7..000000000000
--- a/include/asm-x86_64/shmparam.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _ASMX8664_SHMPARAM_H
2#define _ASMX8664_SHMPARAM_H
3
4#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */
5
6#endif /* _ASMX8664_SHMPARAM_H */
diff --git a/include/asm-x86_64/sigcontext.h b/include/asm-x86_64/sigcontext.h
deleted file mode 100644
index b4e40236666c..000000000000
--- a/include/asm-x86_64/sigcontext.h
+++ /dev/null
@@ -1,55 +0,0 @@
1#ifndef _ASM_X86_64_SIGCONTEXT_H
2#define _ASM_X86_64_SIGCONTEXT_H
3
4#include <asm/types.h>
5#include <linux/compiler.h>
6
7/* FXSAVE frame */
8/* Note: reserved1/2 may someday contain valuable data. Always save/restore
9 them when you change signal frames. */
10struct _fpstate {
11 __u16 cwd;
12 __u16 swd;
13 __u16 twd; /* Note this is not the same as the 32bit/x87/FSAVE twd */
14 __u16 fop;
15 __u64 rip;
16 __u64 rdp;
17 __u32 mxcsr;
18 __u32 mxcsr_mask;
19 __u32 st_space[32]; /* 8*16 bytes for each FP-reg */
20 __u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg */
21 __u32 reserved2[24];
22};
23
24struct sigcontext {
25 unsigned long r8;
26 unsigned long r9;
27 unsigned long r10;
28 unsigned long r11;
29 unsigned long r12;
30 unsigned long r13;
31 unsigned long r14;
32 unsigned long r15;
33 unsigned long rdi;
34 unsigned long rsi;
35 unsigned long rbp;
36 unsigned long rbx;
37 unsigned long rdx;
38 unsigned long rax;
39 unsigned long rcx;
40 unsigned long rsp;
41 unsigned long rip;
42 unsigned long eflags; /* RFLAGS */
43 unsigned short cs;
44 unsigned short gs;
45 unsigned short fs;
46 unsigned short __pad0;
47 unsigned long err;
48 unsigned long trapno;
49 unsigned long oldmask;
50 unsigned long cr2;
51 struct _fpstate __user *fpstate; /* zero when no FPU context */
52 unsigned long reserved1[8];
53};
54
55#endif
diff --git a/include/asm-x86_64/sigcontext32.h b/include/asm-x86_64/sigcontext32.h
deleted file mode 100644
index 3d657038ab7c..000000000000
--- a/include/asm-x86_64/sigcontext32.h
+++ /dev/null
@@ -1,71 +0,0 @@
1#ifndef _SIGCONTEXT32_H
2#define _SIGCONTEXT32_H 1
3
4/* signal context for 32bit programs. */
5
6#define X86_FXSR_MAGIC 0x0000
7
8struct _fpreg {
9 unsigned short significand[4];
10 unsigned short exponent;
11};
12
13struct _fpxreg {
14 unsigned short significand[4];
15 unsigned short exponent;
16 unsigned short padding[3];
17};
18
19struct _xmmreg {
20 __u32 element[4];
21};
22
23/* FSAVE frame with extensions */
24struct _fpstate_ia32 {
25 /* Regular FPU environment */
26 __u32 cw;
27 __u32 sw;
28 __u32 tag; /* not compatible to 64bit twd */
29 __u32 ipoff;
30 __u32 cssel;
31 __u32 dataoff;
32 __u32 datasel;
33 struct _fpreg _st[8];
34 unsigned short status;
35 unsigned short magic; /* 0xffff = regular FPU data only */
36
37 /* FXSR FPU environment */
38 __u32 _fxsr_env[6];
39 __u32 mxcsr;
40 __u32 reserved;
41 struct _fpxreg _fxsr_st[8];
42 struct _xmmreg _xmm[8]; /* It's actually 16 */
43 __u32 padding[56];
44};
45
46struct sigcontext_ia32 {
47 unsigned short gs, __gsh;
48 unsigned short fs, __fsh;
49 unsigned short es, __esh;
50 unsigned short ds, __dsh;
51 unsigned int edi;
52 unsigned int esi;
53 unsigned int ebp;
54 unsigned int esp;
55 unsigned int ebx;
56 unsigned int edx;
57 unsigned int ecx;
58 unsigned int eax;
59 unsigned int trapno;
60 unsigned int err;
61 unsigned int eip;
62 unsigned short cs, __csh;
63 unsigned int eflags;
64 unsigned int esp_at_signal;
65 unsigned short ss, __ssh;
66 unsigned int fpstate; /* really (struct _fpstate_ia32 *) */
67 unsigned int oldmask;
68 unsigned int cr2;
69};
70
71#endif
diff --git a/include/asm-x86_64/siginfo.h b/include/asm-x86_64/siginfo.h
deleted file mode 100644
index d09a1e6e7246..000000000000
--- a/include/asm-x86_64/siginfo.h
+++ /dev/null
@@ -1,8 +0,0 @@
1#ifndef _X8664_SIGINFO_H
2#define _X8664_SIGINFO_H
3
4#define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int))
5
6#include <asm-generic/siginfo.h>
7
8#endif
diff --git a/include/asm-x86_64/signal.h b/include/asm-x86_64/signal.h
deleted file mode 100644
index 4581f978b299..000000000000
--- a/include/asm-x86_64/signal.h
+++ /dev/null
@@ -1,181 +0,0 @@
1#ifndef _ASMx8664_SIGNAL_H
2#define _ASMx8664_SIGNAL_H
3
4#ifndef __ASSEMBLY__
5#include <linux/types.h>
6#include <linux/time.h>
7
8/* Avoid too many header ordering problems. */
9struct siginfo;
10
11#ifdef __KERNEL__
12#include <linux/linkage.h>
13/* Most things should be clean enough to redefine this at will, if care
14 is taken to make libc match. */
15
16#define _NSIG 64
17#define _NSIG_BPW 64
18#define _NSIG_WORDS (_NSIG / _NSIG_BPW)
19
20typedef unsigned long old_sigset_t; /* at least 32 bits */
21
22typedef struct {
23 unsigned long sig[_NSIG_WORDS];
24} sigset_t;
25
26
27#else
28/* Here we must cater to libcs that poke about in kernel headers. */
29
30#define NSIG 32
31typedef unsigned long sigset_t;
32
33#endif /* __KERNEL__ */
34#endif
35
36#define SIGHUP 1
37#define SIGINT 2
38#define SIGQUIT 3
39#define SIGILL 4
40#define SIGTRAP 5
41#define SIGABRT 6
42#define SIGIOT 6
43#define SIGBUS 7
44#define SIGFPE 8
45#define SIGKILL 9
46#define SIGUSR1 10
47#define SIGSEGV 11
48#define SIGUSR2 12
49#define SIGPIPE 13
50#define SIGALRM 14
51#define SIGTERM 15
52#define SIGSTKFLT 16
53#define SIGCHLD 17
54#define SIGCONT 18
55#define SIGSTOP 19
56#define SIGTSTP 20
57#define SIGTTIN 21
58#define SIGTTOU 22
59#define SIGURG 23
60#define SIGXCPU 24
61#define SIGXFSZ 25
62#define SIGVTALRM 26
63#define SIGPROF 27
64#define SIGWINCH 28
65#define SIGIO 29
66#define SIGPOLL SIGIO
67/*
68#define SIGLOST 29
69*/
70#define SIGPWR 30
71#define SIGSYS 31
72#define SIGUNUSED 31
73
74/* These should not be considered constants from userland. */
75#define SIGRTMIN 32
76#define SIGRTMAX _NSIG
77
78/*
79 * SA_FLAGS values:
80 *
81 * SA_ONSTACK indicates that a registered stack_t will be used.
82 * SA_RESTART flag to get restarting signals (which were the default long ago)
83 * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
84 * SA_RESETHAND clears the handler when the signal is delivered.
85 * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
86 * SA_NODEFER prevents the current signal from being masked in the handler.
87 *
88 * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
89 * Unix names RESETHAND and NODEFER respectively.
90 */
91#define SA_NOCLDSTOP 0x00000001
92#define SA_NOCLDWAIT 0x00000002
93#define SA_SIGINFO 0x00000004
94#define SA_ONSTACK 0x08000000
95#define SA_RESTART 0x10000000
96#define SA_NODEFER 0x40000000
97#define SA_RESETHAND 0x80000000
98
99#define SA_NOMASK SA_NODEFER
100#define SA_ONESHOT SA_RESETHAND
101
102#define SA_RESTORER 0x04000000
103
104/*
105 * sigaltstack controls
106 */
107#define SS_ONSTACK 1
108#define SS_DISABLE 2
109
110#define MINSIGSTKSZ 2048
111#define SIGSTKSZ 8192
112
113#include <asm-generic/signal.h>
114
115#ifndef __ASSEMBLY__
116
117struct sigaction {
118 __sighandler_t sa_handler;
119 unsigned long sa_flags;
120 __sigrestore_t sa_restorer;
121 sigset_t sa_mask; /* mask last for extensibility */
122};
123
124struct k_sigaction {
125 struct sigaction sa;
126};
127
128typedef struct sigaltstack {
129 void __user *ss_sp;
130 int ss_flags;
131 size_t ss_size;
132} stack_t;
133
134#ifdef __KERNEL__
135#include <asm/sigcontext.h>
136
137#undef __HAVE_ARCH_SIG_BITOPS
138#if 0
139
140static inline void sigaddset(sigset_t *set, int _sig)
141{
142 __asm__("btsq %1,%0" : "=m"(*set) : "Ir"(_sig - 1) : "cc");
143}
144
145static inline void sigdelset(sigset_t *set, int _sig)
146{
147 __asm__("btrq %1,%0" : "=m"(*set) : "Ir"(_sig - 1) : "cc");
148}
149
150static inline int __const_sigismember(sigset_t *set, int _sig)
151{
152 unsigned long sig = _sig - 1;
153 return 1 & (set->sig[sig / _NSIG_BPW] >> (sig & ~(_NSIG_BPW-1)));
154}
155
156static inline int __gen_sigismember(sigset_t *set, int _sig)
157{
158 int ret;
159 __asm__("btq %2,%1\n\tsbbq %0,%0"
160 : "=r"(ret) : "m"(*set), "Ir"(_sig-1) : "cc");
161 return ret;
162}
163
164#define sigismember(set,sig) \
165 (__builtin_constant_p(sig) ? \
166 __const_sigismember((set),(sig)) : \
167 __gen_sigismember((set),(sig)))
168
169static inline int sigfindinword(unsigned long word)
170{
171 __asm__("bsfq %1,%0" : "=r"(word) : "rm"(word) : "cc");
172 return word;
173}
174#endif
175#endif
176
177#define ptrace_signal_deliver(regs, cookie) do { } while (0)
178
179#endif /* __KERNEL__ */
180
181#endif
diff --git a/include/asm-x86_64/smp.h b/include/asm-x86_64/smp.h
deleted file mode 100644
index 3f303d2365ed..000000000000
--- a/include/asm-x86_64/smp.h
+++ /dev/null
@@ -1,117 +0,0 @@
1#ifndef __ASM_SMP_H
2#define __ASM_SMP_H
3
4/*
5 * We need the APIC definitions automatically as part of 'smp.h'
6 */
7#include <linux/threads.h>
8#include <linux/cpumask.h>
9#include <linux/bitops.h>
10#include <linux/init.h>
11extern int disable_apic;
12
13#include <asm/mpspec.h>
14#include <asm/apic.h>
15#include <asm/io_apic.h>
16#include <asm/thread_info.h>
17
18#ifdef CONFIG_SMP
19
20#include <asm/pda.h>
21
22struct pt_regs;
23
24extern cpumask_t cpu_present_mask;
25extern cpumask_t cpu_possible_map;
26extern cpumask_t cpu_online_map;
27extern cpumask_t cpu_callout_map;
28extern cpumask_t cpu_initialized;
29
30/*
31 * Private routines/data
32 */
33
34extern void smp_alloc_memory(void);
35extern volatile unsigned long smp_invalidate_needed;
36extern void lock_ipi_call_lock(void);
37extern void unlock_ipi_call_lock(void);
38extern int smp_num_siblings;
39extern void smp_send_reschedule(int cpu);
40
41extern cpumask_t cpu_sibling_map[NR_CPUS];
42extern cpumask_t cpu_core_map[NR_CPUS];
43extern u8 cpu_llc_id[NR_CPUS];
44
45#define SMP_TRAMPOLINE_BASE 0x6000
46
47/*
48 * On x86 all CPUs are mapped 1:1 to the APIC space.
49 * This simplifies scheduling and IPI sending and
50 * compresses data structures.
51 */
52
53static inline int num_booting_cpus(void)
54{
55 return cpus_weight(cpu_callout_map);
56}
57
58#define raw_smp_processor_id() read_pda(cpunumber)
59
60extern int __cpu_disable(void);
61extern void __cpu_die(unsigned int cpu);
62extern void prefill_possible_map(void);
63extern unsigned num_processors;
64extern unsigned __cpuinitdata disabled_cpus;
65
66#define NO_PROC_ID 0xFF /* No processor magic marker */
67
68#endif /* CONFIG_SMP */
69
70static inline int hard_smp_processor_id(void)
71{
72 /* we don't want to mark this access volatile - bad code generation */
73 return GET_APIC_ID(*(unsigned int *)(APIC_BASE+APIC_ID));
74}
75
76/*
77 * Some lowlevel functions might want to know about
78 * the real APIC ID <-> CPU # mapping.
79 */
80extern u8 x86_cpu_to_apicid[NR_CPUS]; /* physical ID */
81extern u8 x86_cpu_to_log_apicid[NR_CPUS];
82extern u8 bios_cpu_apicid[];
83
84static inline int cpu_present_to_apicid(int mps_cpu)
85{
86 if (mps_cpu < NR_CPUS)
87 return (int)bios_cpu_apicid[mps_cpu];
88 else
89 return BAD_APICID;
90}
91
92#ifndef CONFIG_SMP
93#define stack_smp_processor_id() 0
94#define cpu_logical_map(x) (x)
95#else
96#include <asm/thread_info.h>
97#define stack_smp_processor_id() \
98({ \
99 struct thread_info *ti; \
100 __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
101 ti->cpu; \
102})
103#endif
104
105static __inline int logical_smp_processor_id(void)
106{
107 /* we don't want to mark this access volatile - bad code generation */
108 return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR));
109}
110
111#ifdef CONFIG_SMP
112#define cpu_physical_id(cpu) x86_cpu_to_apicid[cpu]
113#else
114#define cpu_physical_id(cpu) boot_cpu_id
115#endif /* !CONFIG_SMP */
116#endif
117
diff --git a/include/asm-x86_64/socket.h b/include/asm-x86_64/socket.h
deleted file mode 100644
index 90af60cf3c0e..000000000000
--- a/include/asm-x86_64/socket.h
+++ /dev/null
@@ -1,55 +0,0 @@
1#ifndef _ASM_SOCKET_H
2#define _ASM_SOCKET_H
3
4#include <asm/sockios.h>
5
6/* For setsockopt(2) */
7#define SOL_SOCKET 1
8
9#define SO_DEBUG 1
10#define SO_REUSEADDR 2
11#define SO_TYPE 3
12#define SO_ERROR 4
13#define SO_DONTROUTE 5
14#define SO_BROADCAST 6
15#define SO_SNDBUF 7
16#define SO_RCVBUF 8
17#define SO_SNDBUFFORCE 32
18#define SO_RCVBUFFORCE 33
19#define SO_KEEPALIVE 9
20#define SO_OOBINLINE 10
21#define SO_NO_CHECK 11
22#define SO_PRIORITY 12
23#define SO_LINGER 13
24#define SO_BSDCOMPAT 14
25/* To add :#define SO_REUSEPORT 15 */
26#define SO_PASSCRED 16
27#define SO_PEERCRED 17
28#define SO_RCVLOWAT 18
29#define SO_SNDLOWAT 19
30#define SO_RCVTIMEO 20
31#define SO_SNDTIMEO 21
32
33/* Security levels - as per NRL IPv6 - don't actually do anything */
34#define SO_SECURITY_AUTHENTICATION 22
35#define SO_SECURITY_ENCRYPTION_TRANSPORT 23
36#define SO_SECURITY_ENCRYPTION_NETWORK 24
37
38#define SO_BINDTODEVICE 25
39
40/* Socket filtering */
41#define SO_ATTACH_FILTER 26
42#define SO_DETACH_FILTER 27
43
44#define SO_PEERNAME 28
45#define SO_TIMESTAMP 29
46#define SCM_TIMESTAMP SO_TIMESTAMP
47
48#define SO_ACCEPTCONN 30
49
50#define SO_PEERSEC 31
51#define SO_PASSSEC 34
52#define SO_TIMESTAMPNS 35
53#define SCM_TIMESTAMPNS SO_TIMESTAMPNS
54
55#endif /* _ASM_SOCKET_H */
diff --git a/include/asm-x86_64/sockios.h b/include/asm-x86_64/sockios.h
deleted file mode 100644
index d726ba2513e3..000000000000
--- a/include/asm-x86_64/sockios.h
+++ /dev/null
@@ -1,13 +0,0 @@
1#ifndef __ARCH_X8664_SOCKIOS__
2#define __ARCH_X8664_SOCKIOS__
3
4/* Socket-level I/O control calls. */
5#define FIOSETOWN 0x8901
6#define SIOCSPGRP 0x8902
7#define FIOGETOWN 0x8903
8#define SIOCGPGRP 0x8904
9#define SIOCATMARK 0x8905
10#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */
11#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */
12
13#endif
diff --git a/include/asm-x86_64/sparsemem.h b/include/asm-x86_64/sparsemem.h
deleted file mode 100644
index dabb16714a71..000000000000
--- a/include/asm-x86_64/sparsemem.h
+++ /dev/null
@@ -1,26 +0,0 @@
1#ifndef _ASM_X86_64_SPARSEMEM_H
2#define _ASM_X86_64_SPARSEMEM_H 1
3
4#ifdef CONFIG_SPARSEMEM
5
6/*
7 * generic non-linear memory support:
8 *
9 * 1) we will not split memory into more chunks than will fit into the flags
10 * field of the struct page
11 *
12 * SECTION_SIZE_BITS 2^n: size of each section
13 * MAX_PHYSADDR_BITS 2^n: max size of physical address space
14 * MAX_PHYSMEM_BITS 2^n: how much memory we can have in that space
15 *
16 */
17
18#define SECTION_SIZE_BITS 27 /* matt - 128 is convenient right now */
19#define MAX_PHYSADDR_BITS 40
20#define MAX_PHYSMEM_BITS 40
21
22extern int early_pfn_to_nid(unsigned long pfn);
23
24#endif /* CONFIG_SPARSEMEM */
25
26#endif /* _ASM_X86_64_SPARSEMEM_H */
diff --git a/include/asm-x86_64/spinlock.h b/include/asm-x86_64/spinlock.h
deleted file mode 100644
index 88bf981e73cf..000000000000
--- a/include/asm-x86_64/spinlock.h
+++ /dev/null
@@ -1,167 +0,0 @@
1#ifndef __ASM_SPINLOCK_H
2#define __ASM_SPINLOCK_H
3
4#include <asm/atomic.h>
5#include <asm/rwlock.h>
6#include <asm/page.h>
7#include <asm/processor.h>
8
9/*
10 * Your basic SMP spinlocks, allowing only a single CPU anywhere
11 *
12 * Simple spin lock operations. There are two variants, one clears IRQ's
13 * on the local processor, one does not.
14 *
15 * We make no fairness assumptions. They have a cost.
16 *
17 * (the type definitions are in asm/spinlock_types.h)
18 */
19
20static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
21{
22 return *(volatile signed int *)(&(lock)->slock) <= 0;
23}
24
25static inline void __raw_spin_lock(raw_spinlock_t *lock)
26{
27 asm volatile(
28 "\n1:\t"
29 LOCK_PREFIX " ; decl %0\n\t"
30 "jns 2f\n"
31 "3:\n"
32 "rep;nop\n\t"
33 "cmpl $0,%0\n\t"
34 "jle 3b\n\t"
35 "jmp 1b\n"
36 "2:\t" : "=m" (lock->slock) : : "memory");
37}
38
39/*
40 * Same as __raw_spin_lock, but reenable interrupts during spinning.
41 */
42#ifndef CONFIG_PROVE_LOCKING
43static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
44{
45 asm volatile(
46 "\n1:\t"
47 LOCK_PREFIX " ; decl %0\n\t"
48 "jns 5f\n"
49 "testl $0x200, %1\n\t" /* interrupts were disabled? */
50 "jz 4f\n\t"
51 "sti\n"
52 "3:\t"
53 "rep;nop\n\t"
54 "cmpl $0, %0\n\t"
55 "jle 3b\n\t"
56 "cli\n\t"
57 "jmp 1b\n"
58 "4:\t"
59 "rep;nop\n\t"
60 "cmpl $0, %0\n\t"
61 "jg 1b\n\t"
62 "jmp 4b\n"
63 "5:\n\t"
64 : "+m" (lock->slock) : "r" ((unsigned)flags) : "memory");
65}
66#endif
67
68static inline int __raw_spin_trylock(raw_spinlock_t *lock)
69{
70 int oldval;
71
72 asm volatile(
73 "xchgl %0,%1"
74 :"=q" (oldval), "=m" (lock->slock)
75 :"0" (0) : "memory");
76
77 return oldval > 0;
78}
79
80static inline void __raw_spin_unlock(raw_spinlock_t *lock)
81{
82 asm volatile("movl $1,%0" :"=m" (lock->slock) :: "memory");
83}
84
85static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
86{
87 while (__raw_spin_is_locked(lock))
88 cpu_relax();
89}
90
91/*
92 * Read-write spinlocks, allowing multiple readers
93 * but only one writer.
94 *
95 * NOTE! it is quite common to have readers in interrupts
96 * but no interrupt writers. For those circumstances we
97 * can "mix" irq-safe locks - any writer needs to get a
98 * irq-safe write-lock, but readers can get non-irqsafe
99 * read-locks.
100 *
101 * On x86, we implement read-write locks as a 32-bit counter
102 * with the high bit (sign) being the "contended" bit.
103 */
104
105static inline int __raw_read_can_lock(raw_rwlock_t *lock)
106{
107 return (int)(lock)->lock > 0;
108}
109
110static inline int __raw_write_can_lock(raw_rwlock_t *lock)
111{
112 return (lock)->lock == RW_LOCK_BIAS;
113}
114
115static inline void __raw_read_lock(raw_rwlock_t *rw)
116{
117 asm volatile(LOCK_PREFIX "subl $1,(%0)\n\t"
118 "jns 1f\n"
119 "call __read_lock_failed\n"
120 "1:\n"
121 ::"D" (rw), "i" (RW_LOCK_BIAS) : "memory");
122}
123
124static inline void __raw_write_lock(raw_rwlock_t *rw)
125{
126 asm volatile(LOCK_PREFIX "subl %1,(%0)\n\t"
127 "jz 1f\n"
128 "\tcall __write_lock_failed\n\t"
129 "1:\n"
130 ::"D" (rw), "i" (RW_LOCK_BIAS) : "memory");
131}
132
133static inline int __raw_read_trylock(raw_rwlock_t *lock)
134{
135 atomic_t *count = (atomic_t *)lock;
136 atomic_dec(count);
137 if (atomic_read(count) >= 0)
138 return 1;
139 atomic_inc(count);
140 return 0;
141}
142
143static inline int __raw_write_trylock(raw_rwlock_t *lock)
144{
145 atomic_t *count = (atomic_t *)lock;
146 if (atomic_sub_and_test(RW_LOCK_BIAS, count))
147 return 1;
148 atomic_add(RW_LOCK_BIAS, count);
149 return 0;
150}
151
152static inline void __raw_read_unlock(raw_rwlock_t *rw)
153{
154 asm volatile(LOCK_PREFIX " ; incl %0" :"=m" (rw->lock) : : "memory");
155}
156
157static inline void __raw_write_unlock(raw_rwlock_t *rw)
158{
159 asm volatile(LOCK_PREFIX " ; addl $" RW_LOCK_BIAS_STR ",%0"
160 : "=m" (rw->lock) : : "memory");
161}
162
163#define _raw_spin_relax(lock) cpu_relax()
164#define _raw_read_relax(lock) cpu_relax()
165#define _raw_write_relax(lock) cpu_relax()
166
167#endif /* __ASM_SPINLOCK_H */
diff --git a/include/asm-x86_64/spinlock_types.h b/include/asm-x86_64/spinlock_types.h
deleted file mode 100644
index 4da9345c1500..000000000000
--- a/include/asm-x86_64/spinlock_types.h
+++ /dev/null
@@ -1,20 +0,0 @@
1#ifndef __ASM_SPINLOCK_TYPES_H
2#define __ASM_SPINLOCK_TYPES_H
3
4#ifndef __LINUX_SPINLOCK_TYPES_H
5# error "please don't include this file directly"
6#endif
7
8typedef struct {
9 unsigned int slock;
10} raw_spinlock_t;
11
12#define __RAW_SPIN_LOCK_UNLOCKED { 1 }
13
14typedef struct {
15 unsigned int lock;
16} raw_rwlock_t;
17
18#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
19
20#endif
diff --git a/include/asm-x86_64/stacktrace.h b/include/asm-x86_64/stacktrace.h
deleted file mode 100644
index 6f0b54594307..000000000000
--- a/include/asm-x86_64/stacktrace.h
+++ /dev/null
@@ -1,20 +0,0 @@
1#ifndef _ASM_STACKTRACE_H
2#define _ASM_STACKTRACE_H 1
3
4extern int kstack_depth_to_print;
5
6/* Generic stack tracer with callbacks */
7
8struct stacktrace_ops {
9 void (*warning)(void *data, char *msg);
10 /* msg must contain %s for the symbol */
11 void (*warning_symbol)(void *data, char *msg, unsigned long symbol);
12 void (*address)(void *data, unsigned long address);
13 /* On negative return stop dumping */
14 int (*stack)(void *data, char *name);
15};
16
17void dump_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long *stack,
18 struct stacktrace_ops *ops, void *data);
19
20#endif
diff --git a/include/asm-x86_64/stat.h b/include/asm-x86_64/stat.h
deleted file mode 100644
index fd9f00d560f8..000000000000
--- a/include/asm-x86_64/stat.h
+++ /dev/null
@@ -1,44 +0,0 @@
1#ifndef _ASM_X86_64_STAT_H
2#define _ASM_X86_64_STAT_H
3
4#define STAT_HAVE_NSEC 1
5
6struct stat {
7 unsigned long st_dev;
8 unsigned long st_ino;
9 unsigned long st_nlink;
10
11 unsigned int st_mode;
12 unsigned int st_uid;
13 unsigned int st_gid;
14 unsigned int __pad0;
15 unsigned long st_rdev;
16 long st_size;
17 long st_blksize;
18 long st_blocks; /* Number 512-byte blocks allocated. */
19
20 unsigned long st_atime;
21 unsigned long st_atime_nsec;
22 unsigned long st_mtime;
23 unsigned long st_mtime_nsec;
24 unsigned long st_ctime;
25 unsigned long st_ctime_nsec;
26 long __unused[3];
27};
28
29/* For 32bit emulation */
30struct __old_kernel_stat {
31 unsigned short st_dev;
32 unsigned short st_ino;
33 unsigned short st_mode;
34 unsigned short st_nlink;
35 unsigned short st_uid;
36 unsigned short st_gid;
37 unsigned short st_rdev;
38 unsigned int st_size;
39 unsigned int st_atime;
40 unsigned int st_mtime;
41 unsigned int st_ctime;
42};
43
44#endif
diff --git a/include/asm-x86_64/statfs.h b/include/asm-x86_64/statfs.h
deleted file mode 100644
index b3f4718af30b..000000000000
--- a/include/asm-x86_64/statfs.h
+++ /dev/null
@@ -1,58 +0,0 @@
1#ifndef _X86_64_STATFS_H
2#define _X86_64_STATFS_H
3
4#ifndef __KERNEL_STRICT_NAMES
5
6#include <linux/types.h>
7
8typedef __kernel_fsid_t fsid_t;
9
10#endif
11
12/*
13 * This is ugly -- we're already 64-bit clean, so just duplicate the
14 * definitions.
15 */
16struct statfs {
17 long f_type;
18 long f_bsize;
19 long f_blocks;
20 long f_bfree;
21 long f_bavail;
22 long f_files;
23 long f_ffree;
24 __kernel_fsid_t f_fsid;
25 long f_namelen;
26 long f_frsize;
27 long f_spare[5];
28};
29
30struct statfs64 {
31 long f_type;
32 long f_bsize;
33 long f_blocks;
34 long f_bfree;
35 long f_bavail;
36 long f_files;
37 long f_ffree;
38 __kernel_fsid_t f_fsid;
39 long f_namelen;
40 long f_frsize;
41 long f_spare[5];
42};
43
44struct compat_statfs64 {
45 __u32 f_type;
46 __u32 f_bsize;
47 __u64 f_blocks;
48 __u64 f_bfree;
49 __u64 f_bavail;
50 __u64 f_files;
51 __u64 f_ffree;
52 __kernel_fsid_t f_fsid;
53 __u32 f_namelen;
54 __u32 f_frsize;
55 __u32 f_spare[5];
56} __attribute__((packed));
57
58#endif
diff --git a/include/asm-x86_64/string.h b/include/asm-x86_64/string.h
deleted file mode 100644
index e583da7918fb..000000000000
--- a/include/asm-x86_64/string.h
+++ /dev/null
@@ -1,60 +0,0 @@
1#ifndef _X86_64_STRING_H_
2#define _X86_64_STRING_H_
3
4#ifdef __KERNEL__
5
6/* Written 2002 by Andi Kleen */
7
8/* Only used for special circumstances. Stolen from i386/string.h */
9static __always_inline void *
10__inline_memcpy(void * to, const void * from, size_t n)
11{
12unsigned long d0, d1, d2;
13__asm__ __volatile__(
14 "rep ; movsl\n\t"
15 "testb $2,%b4\n\t"
16 "je 1f\n\t"
17 "movsw\n"
18 "1:\ttestb $1,%b4\n\t"
19 "je 2f\n\t"
20 "movsb\n"
21 "2:"
22 : "=&c" (d0), "=&D" (d1), "=&S" (d2)
23 :"0" (n/4), "q" (n),"1" ((long) to),"2" ((long) from)
24 : "memory");
25return (to);
26}
27
28/* Even with __builtin_ the compiler may decide to use the out of line
29 function. */
30
31#define __HAVE_ARCH_MEMCPY 1
32#if (__GNUC__ == 4 && __GNUC_MINOR__ >= 3) || __GNUC__ > 4
33extern void *memcpy(void *to, const void *from, size_t len);
34#else
35extern void *__memcpy(void *to, const void *from, size_t len);
36#define memcpy(dst,src,len) \
37 ({ size_t __len = (len); \
38 void *__ret; \
39 if (__builtin_constant_p(len) && __len >= 64) \
40 __ret = __memcpy((dst),(src),__len); \
41 else \
42 __ret = __builtin_memcpy((dst),(src),__len); \
43 __ret; })
44#endif
45
46#define __HAVE_ARCH_MEMSET
47void *memset(void *s, int c, size_t n);
48
49#define __HAVE_ARCH_MEMMOVE
50void * memmove(void * dest,const void *src,size_t count);
51
52int memcmp(const void * cs,const void * ct,size_t count);
53size_t strlen(const char * s);
54char *strcpy(char * dest,const char *src);
55char *strcat(char * dest, const char * src);
56int strcmp(const char * cs,const char * ct);
57
58#endif /* __KERNEL__ */
59
60#endif
diff --git a/include/asm-x86_64/suspend.h b/include/asm-x86_64/suspend.h
deleted file mode 100644
index b897e8cb55fb..000000000000
--- a/include/asm-x86_64/suspend.h
+++ /dev/null
@@ -1,55 +0,0 @@
1/*
2 * Copyright 2001-2003 Pavel Machek <pavel@suse.cz>
3 * Based on code
4 * Copyright 2001 Patrick Mochel <mochel@osdl.org>
5 */
6#include <asm/desc.h>
7#include <asm/i387.h>
8
9static inline int
10arch_prepare_suspend(void)
11{
12 return 0;
13}
14
15/* Image of the saved processor state. If you touch this, fix acpi_wakeup.S. */
16struct saved_context {
17 u16 ds, es, fs, gs, ss;
18 unsigned long gs_base, gs_kernel_base, fs_base;
19 unsigned long cr0, cr2, cr3, cr4, cr8;
20 unsigned long efer;
21 u16 gdt_pad;
22 u16 gdt_limit;
23 unsigned long gdt_base;
24 u16 idt_pad;
25 u16 idt_limit;
26 unsigned long idt_base;
27 u16 ldt;
28 u16 tss;
29 unsigned long tr;
30 unsigned long safety;
31 unsigned long return_address;
32 unsigned long eflags;
33} __attribute__((packed));
34
35/* We'll access these from assembly, so we'd better have them outside struct */
36extern unsigned long saved_context_eax, saved_context_ebx, saved_context_ecx, saved_context_edx;
37extern unsigned long saved_context_esp, saved_context_ebp, saved_context_esi, saved_context_edi;
38extern unsigned long saved_context_r08, saved_context_r09, saved_context_r10, saved_context_r11;
39extern unsigned long saved_context_r12, saved_context_r13, saved_context_r14, saved_context_r15;
40extern unsigned long saved_context_eflags;
41
42#define loaddebug(thread,register) \
43 set_debugreg((thread)->debugreg##register, register)
44
45extern void fix_processor_context(void);
46
47extern unsigned long saved_rip;
48extern unsigned long saved_rsp;
49extern unsigned long saved_rbp;
50extern unsigned long saved_rbx;
51extern unsigned long saved_rsi;
52extern unsigned long saved_rdi;
53
54/* routines for saving/restoring kernel state */
55extern int acpi_save_state_mem(void);
diff --git a/include/asm-x86_64/swiotlb.h b/include/asm-x86_64/swiotlb.h
deleted file mode 100644
index f9c589539a82..000000000000
--- a/include/asm-x86_64/swiotlb.h
+++ /dev/null
@@ -1,56 +0,0 @@
1#ifndef _ASM_SWIOTLB_H
2#define _ASM_SWIOTLB_H 1
3
4#include <asm/dma-mapping.h>
5
6/* SWIOTLB interface */
7
8extern dma_addr_t swiotlb_map_single(struct device *hwdev, void *ptr,
9 size_t size, int dir);
10extern void *swiotlb_alloc_coherent(struct device *hwdev, size_t size,
11 dma_addr_t *dma_handle, gfp_t flags);
12extern void swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
13 size_t size, int dir);
14extern void swiotlb_sync_single_for_cpu(struct device *hwdev,
15 dma_addr_t dev_addr,
16 size_t size, int dir);
17extern void swiotlb_sync_single_for_device(struct device *hwdev,
18 dma_addr_t dev_addr,
19 size_t size, int dir);
20extern void swiotlb_sync_single_range_for_cpu(struct device *hwdev,
21 dma_addr_t dev_addr,
22 unsigned long offset,
23 size_t size, int dir);
24extern void swiotlb_sync_single_range_for_device(struct device *hwdev,
25 dma_addr_t dev_addr,
26 unsigned long offset,
27 size_t size, int dir);
28extern void swiotlb_sync_sg_for_cpu(struct device *hwdev,
29 struct scatterlist *sg, int nelems,
30 int dir);
31extern void swiotlb_sync_sg_for_device(struct device *hwdev,
32 struct scatterlist *sg, int nelems,
33 int dir);
34extern int swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg,
35 int nents, int direction);
36extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg,
37 int nents, int direction);
38extern int swiotlb_dma_mapping_error(dma_addr_t dma_addr);
39extern void swiotlb_free_coherent (struct device *hwdev, size_t size,
40 void *vaddr, dma_addr_t dma_handle);
41extern int swiotlb_dma_supported(struct device *hwdev, u64 mask);
42extern void swiotlb_init(void);
43
44extern int swiotlb_force;
45
46#ifdef CONFIG_SWIOTLB
47extern int swiotlb;
48#else
49#define swiotlb 0
50#endif
51
52extern void pci_swiotlb_init(void);
53
54static inline void dma_mark_clean(void *addr, size_t size) {}
55
56#endif /* _ASM_SWIOTLB_H */
diff --git a/include/asm-x86_64/system.h b/include/asm-x86_64/system.h
deleted file mode 100644
index 02175aa1d16a..000000000000
--- a/include/asm-x86_64/system.h
+++ /dev/null
@@ -1,180 +0,0 @@
1#ifndef __ASM_SYSTEM_H
2#define __ASM_SYSTEM_H
3
4#include <linux/kernel.h>
5#include <asm/segment.h>
6#include <asm/cmpxchg.h>
7
8#ifdef __KERNEL__
9
10#define __STR(x) #x
11#define STR(x) __STR(x)
12
13#define __SAVE(reg,offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t"
14#define __RESTORE(reg,offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t"
15
16/* frame pointer must be last for get_wchan */
17#define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
18#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t"
19
20#define __EXTRA_CLOBBER \
21 ,"rcx","rbx","rdx","r8","r9","r10","r11","r12","r13","r14","r15"
22
23/* Save restore flags to clear handle leaking NT */
24#define switch_to(prev,next,last) \
25 asm volatile(SAVE_CONTEXT \
26 "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
27 "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \
28 "call __switch_to\n\t" \
29 ".globl thread_return\n" \
30 "thread_return:\n\t" \
31 "movq %%gs:%P[pda_pcurrent],%%rsi\n\t" \
32 "movq %P[thread_info](%%rsi),%%r8\n\t" \
33 LOCK_PREFIX "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \
34 "movq %%rax,%%rdi\n\t" \
35 "jc ret_from_fork\n\t" \
36 RESTORE_CONTEXT \
37 : "=a" (last) \
38 : [next] "S" (next), [prev] "D" (prev), \
39 [threadrsp] "i" (offsetof(struct task_struct, thread.rsp)), \
40 [ti_flags] "i" (offsetof(struct thread_info, flags)),\
41 [tif_fork] "i" (TIF_FORK), \
42 [thread_info] "i" (offsetof(struct task_struct, stack)), \
43 [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent)) \
44 : "memory", "cc" __EXTRA_CLOBBER)
45
46extern void load_gs_index(unsigned);
47
48/*
49 * Load a segment. Fall back on loading the zero
50 * segment if something goes wrong..
51 */
52#define loadsegment(seg,value) \
53 asm volatile("\n" \
54 "1:\t" \
55 "movl %k0,%%" #seg "\n" \
56 "2:\n" \
57 ".section .fixup,\"ax\"\n" \
58 "3:\t" \
59 "movl %1,%%" #seg "\n\t" \
60 "jmp 2b\n" \
61 ".previous\n" \
62 ".section __ex_table,\"a\"\n\t" \
63 ".align 8\n\t" \
64 ".quad 1b,3b\n" \
65 ".previous" \
66 : :"r" (value), "r" (0))
67
68/*
69 * Clear and set 'TS' bit respectively
70 */
71#define clts() __asm__ __volatile__ ("clts")
72
73static inline unsigned long read_cr0(void)
74{
75 unsigned long cr0;
76 asm volatile("movq %%cr0,%0" : "=r" (cr0));
77 return cr0;
78}
79
80static inline void write_cr0(unsigned long val)
81{
82 asm volatile("movq %0,%%cr0" :: "r" (val));
83}
84
85static inline unsigned long read_cr2(void)
86{
87 unsigned long cr2;
88 asm("movq %%cr2,%0" : "=r" (cr2));
89 return cr2;
90}
91
92static inline void write_cr2(unsigned long val)
93{
94 asm volatile("movq %0,%%cr2" :: "r" (val));
95}
96
97static inline unsigned long read_cr3(void)
98{
99 unsigned long cr3;
100 asm("movq %%cr3,%0" : "=r" (cr3));
101 return cr3;
102}
103
104static inline void write_cr3(unsigned long val)
105{
106 asm volatile("movq %0,%%cr3" :: "r" (val) : "memory");
107}
108
109static inline unsigned long read_cr4(void)
110{
111 unsigned long cr4;
112 asm("movq %%cr4,%0" : "=r" (cr4));
113 return cr4;
114}
115
116static inline void write_cr4(unsigned long val)
117{
118 asm volatile("movq %0,%%cr4" :: "r" (val) : "memory");
119}
120
121static inline unsigned long read_cr8(void)
122{
123 unsigned long cr8;
124 asm("movq %%cr8,%0" : "=r" (cr8));
125 return cr8;
126}
127
128static inline void write_cr8(unsigned long val)
129{
130 asm volatile("movq %0,%%cr8" :: "r" (val) : "memory");
131}
132
133#define stts() write_cr0(8 | read_cr0())
134
135#define wbinvd() \
136 __asm__ __volatile__ ("wbinvd": : :"memory")
137
138#endif /* __KERNEL__ */
139
140#define nop() __asm__ __volatile__ ("nop")
141
142#ifdef CONFIG_SMP
143#define smp_mb() mb()
144#define smp_rmb() rmb()
145#define smp_wmb() wmb()
146#define smp_read_barrier_depends() do {} while(0)
147#else
148#define smp_mb() barrier()
149#define smp_rmb() barrier()
150#define smp_wmb() barrier()
151#define smp_read_barrier_depends() do {} while(0)
152#endif
153
154
155/*
156 * Force strict CPU ordering.
157 * And yes, this is required on UP too when we're talking
158 * to devices.
159 */
160#define mb() asm volatile("mfence":::"memory")
161#define rmb() asm volatile("lfence":::"memory")
162
163#ifdef CONFIG_UNORDERED_IO
164#define wmb() asm volatile("sfence" ::: "memory")
165#else
166#define wmb() asm volatile("" ::: "memory")
167#endif
168#define read_barrier_depends() do {} while(0)
169#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
170
171#define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0)
172
173#include <linux/irqflags.h>
174
175void cpu_idle_wait(void);
176
177extern unsigned long arch_align_stack(unsigned long sp);
178extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
179
180#endif
diff --git a/include/asm-x86_64/tce.h b/include/asm-x86_64/tce.h
deleted file mode 100644
index cd955d3d112f..000000000000
--- a/include/asm-x86_64/tce.h
+++ /dev/null
@@ -1,48 +0,0 @@
1/*
2 * This file is derived from asm-powerpc/tce.h.
3 *
4 * Copyright (C) IBM Corporation, 2006
5 *
6 * Author: Muli Ben-Yehuda <muli@il.ibm.com>
7 * Author: Jon Mason <jdmason@us.ibm.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 */
23
24#ifndef _ASM_X86_64_TCE_H
25#define _ASM_X86_64_TCE_H
26
27extern unsigned int specified_table_size;
28struct iommu_table;
29
30#define TCE_ENTRY_SIZE 8 /* in bytes */
31
32#define TCE_READ_SHIFT 0
33#define TCE_WRITE_SHIFT 1
34#define TCE_HUBID_SHIFT 2 /* unused */
35#define TCE_RSVD_SHIFT 8 /* unused */
36#define TCE_RPN_SHIFT 12
37#define TCE_UNUSED_SHIFT 48 /* unused */
38
39#define TCE_RPN_MASK 0x0000fffffffff000ULL
40
41extern void tce_build(struct iommu_table *tbl, unsigned long index,
42 unsigned int npages, unsigned long uaddr, int direction);
43extern void tce_free(struct iommu_table *tbl, long index, unsigned int npages);
44extern void * __init alloc_tce_table(void);
45extern void __init free_tce_table(void *tbl);
46extern int __init build_tce_table(struct pci_dev *dev, void __iomem *bbar);
47
48#endif /* _ASM_X86_64_TCE_H */
diff --git a/include/asm-x86_64/termbits.h b/include/asm-x86_64/termbits.h
deleted file mode 100644
index 7405756dd41b..000000000000
--- a/include/asm-x86_64/termbits.h
+++ /dev/null
@@ -1,198 +0,0 @@
1#ifndef __ARCH_X8664_TERMBITS_H__
2#define __ARCH_X8664_TERMBITS_H__
3
4#include <linux/posix_types.h>
5
6typedef unsigned char cc_t;
7typedef unsigned int speed_t;
8typedef unsigned int tcflag_t;
9
10#define NCCS 19
11struct termios {
12 tcflag_t c_iflag; /* input mode flags */
13 tcflag_t c_oflag; /* output mode flags */
14 tcflag_t c_cflag; /* control mode flags */
15 tcflag_t c_lflag; /* local mode flags */
16 cc_t c_line; /* line discipline */
17 cc_t c_cc[NCCS]; /* control characters */
18};
19
20struct termios2 {
21 tcflag_t c_iflag; /* input mode flags */
22 tcflag_t c_oflag; /* output mode flags */
23 tcflag_t c_cflag; /* control mode flags */
24 tcflag_t c_lflag; /* local mode flags */
25 cc_t c_line; /* line discipline */
26 cc_t c_cc[NCCS]; /* control characters */
27 speed_t c_ispeed; /* input speed */
28 speed_t c_ospeed; /* output speed */
29};
30
31struct ktermios {
32 tcflag_t c_iflag; /* input mode flags */
33 tcflag_t c_oflag; /* output mode flags */
34 tcflag_t c_cflag; /* control mode flags */
35 tcflag_t c_lflag; /* local mode flags */
36 cc_t c_line; /* line discipline */
37 cc_t c_cc[NCCS]; /* control characters */
38 speed_t c_ispeed; /* input speed */
39 speed_t c_ospeed; /* output speed */
40};
41
42/* c_cc characters */
43#define VINTR 0
44#define VQUIT 1
45#define VERASE 2
46#define VKILL 3
47#define VEOF 4
48#define VTIME 5
49#define VMIN 6
50#define VSWTC 7
51#define VSTART 8
52#define VSTOP 9
53#define VSUSP 10
54#define VEOL 11
55#define VREPRINT 12
56#define VDISCARD 13
57#define VWERASE 14
58#define VLNEXT 15
59#define VEOL2 16
60
61/* c_iflag bits */
62#define IGNBRK 0000001
63#define BRKINT 0000002
64#define IGNPAR 0000004
65#define PARMRK 0000010
66#define INPCK 0000020
67#define ISTRIP 0000040
68#define INLCR 0000100
69#define IGNCR 0000200
70#define ICRNL 0000400
71#define IUCLC 0001000
72#define IXON 0002000
73#define IXANY 0004000
74#define IXOFF 0010000
75#define IMAXBEL 0020000
76#define IUTF8 0040000
77
78/* c_oflag bits */
79#define OPOST 0000001
80#define OLCUC 0000002
81#define ONLCR 0000004
82#define OCRNL 0000010
83#define ONOCR 0000020
84#define ONLRET 0000040
85#define OFILL 0000100
86#define OFDEL 0000200
87#define NLDLY 0000400
88#define NL0 0000000
89#define NL1 0000400
90#define CRDLY 0003000
91#define CR0 0000000
92#define CR1 0001000
93#define CR2 0002000
94#define CR3 0003000
95#define TABDLY 0014000
96#define TAB0 0000000
97#define TAB1 0004000
98#define TAB2 0010000
99#define TAB3 0014000
100#define XTABS 0014000
101#define BSDLY 0020000
102#define BS0 0000000
103#define BS1 0020000
104#define VTDLY 0040000
105#define VT0 0000000
106#define VT1 0040000
107#define FFDLY 0100000
108#define FF0 0000000
109#define FF1 0100000
110
111/* c_cflag bit meaning */
112#define CBAUD 0010017
113#define B0 0000000 /* hang up */
114#define B50 0000001
115#define B75 0000002
116#define B110 0000003
117#define B134 0000004
118#define B150 0000005
119#define B200 0000006
120#define B300 0000007
121#define B600 0000010
122#define B1200 0000011
123#define B1800 0000012
124#define B2400 0000013
125#define B4800 0000014
126#define B9600 0000015
127#define B19200 0000016
128#define B38400 0000017
129#define EXTA B19200
130#define EXTB B38400
131#define CSIZE 0000060
132#define CS5 0000000
133#define CS6 0000020
134#define CS7 0000040
135#define CS8 0000060
136#define CSTOPB 0000100
137#define CREAD 0000200
138#define PARENB 0000400
139#define PARODD 0001000
140#define HUPCL 0002000
141#define CLOCAL 0004000
142#define CBAUDEX 0010000
143#define BOTHER 0010000 /* non standard rate */
144#define B57600 0010001
145#define B115200 0010002
146#define B230400 0010003
147#define B460800 0010004
148#define B500000 0010005
149#define B576000 0010006
150#define B921600 0010007
151#define B1000000 0010010
152#define B1152000 0010011
153#define B1500000 0010012
154#define B2000000 0010013
155#define B2500000 0010014
156#define B3000000 0010015
157#define B3500000 0010016
158#define B4000000 0010017
159#define CIBAUD 002003600000 /* input baud rate */
160#define CMSPAR 010000000000 /* mark or space (stick) parity */
161#define CRTSCTS 020000000000 /* flow control */
162
163#define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */
164
165/* c_lflag bits */
166#define ISIG 0000001
167#define ICANON 0000002
168#define XCASE 0000004
169#define ECHO 0000010
170#define ECHOE 0000020
171#define ECHOK 0000040
172#define ECHONL 0000100
173#define NOFLSH 0000200
174#define TOSTOP 0000400
175#define ECHOCTL 0001000
176#define ECHOPRT 0002000
177#define ECHOKE 0004000
178#define FLUSHO 0010000
179#define PENDIN 0040000
180#define IEXTEN 0100000
181
182/* tcflow() and TCXONC use these */
183#define TCOOFF 0
184#define TCOON 1
185#define TCIOFF 2
186#define TCION 3
187
188/* tcflush() and TCFLSH use these */
189#define TCIFLUSH 0
190#define TCOFLUSH 1
191#define TCIOFLUSH 2
192
193/* tcsetattr uses these */
194#define TCSANOW 0
195#define TCSADRAIN 1
196#define TCSAFLUSH 2
197
198#endif
diff --git a/include/asm-x86_64/termios.h b/include/asm-x86_64/termios.h
deleted file mode 100644
index 35ee59b78329..000000000000
--- a/include/asm-x86_64/termios.h
+++ /dev/null
@@ -1,90 +0,0 @@
1#ifndef _X8664_TERMIOS_H
2#define _X8664_TERMIOS_H
3
4#include <asm/termbits.h>
5#include <asm/ioctls.h>
6
7struct winsize {
8 unsigned short ws_row;
9 unsigned short ws_col;
10 unsigned short ws_xpixel;
11 unsigned short ws_ypixel;
12};
13
14#define NCC 8
15struct termio {
16 unsigned short c_iflag; /* input mode flags */
17 unsigned short c_oflag; /* output mode flags */
18 unsigned short c_cflag; /* control mode flags */
19 unsigned short c_lflag; /* local mode flags */
20 unsigned char c_line; /* line discipline */
21 unsigned char c_cc[NCC]; /* control characters */
22};
23
24/* modem lines */
25#define TIOCM_LE 0x001
26#define TIOCM_DTR 0x002
27#define TIOCM_RTS 0x004
28#define TIOCM_ST 0x008
29#define TIOCM_SR 0x010
30#define TIOCM_CTS 0x020
31#define TIOCM_CAR 0x040
32#define TIOCM_RNG 0x080
33#define TIOCM_DSR 0x100
34#define TIOCM_CD TIOCM_CAR
35#define TIOCM_RI TIOCM_RNG
36#define TIOCM_OUT1 0x2000
37#define TIOCM_OUT2 0x4000
38#define TIOCM_LOOP 0x8000
39
40/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
41
42#ifdef __KERNEL__
43
44/* intr=^C quit=^\ erase=del kill=^U
45 eof=^D vtime=\0 vmin=\1 sxtc=\0
46 start=^Q stop=^S susp=^Z eol=\0
47 reprint=^R discard=^U werase=^W lnext=^V
48 eol2=\0
49*/
50#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0"
51
52/*
53 * Translate a "termio" structure into a "termios". Ugh.
54 */
55#define SET_LOW_TERMIOS_BITS(termios, termio, x) { \
56 unsigned short __tmp; \
57 get_user(__tmp,&(termio)->x); \
58 *(unsigned short *) &(termios)->x = __tmp; \
59}
60
61#define user_termio_to_kernel_termios(termios, termio) \
62({ \
63 SET_LOW_TERMIOS_BITS(termios, termio, c_iflag); \
64 SET_LOW_TERMIOS_BITS(termios, termio, c_oflag); \
65 SET_LOW_TERMIOS_BITS(termios, termio, c_cflag); \
66 SET_LOW_TERMIOS_BITS(termios, termio, c_lflag); \
67 copy_from_user((termios)->c_cc, (termio)->c_cc, NCC); \
68})
69
70/*
71 * Translate a "termios" structure into a "termio". Ugh.
72 */
73#define kernel_termios_to_user_termio(termio, termios) \
74({ \
75 put_user((termios)->c_iflag, &(termio)->c_iflag); \
76 put_user((termios)->c_oflag, &(termio)->c_oflag); \
77 put_user((termios)->c_cflag, &(termio)->c_cflag); \
78 put_user((termios)->c_lflag, &(termio)->c_lflag); \
79 put_user((termios)->c_line, &(termio)->c_line); \
80 copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \
81})
82
83#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios2))
84#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios2))
85#define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios))
86#define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios))
87
88#endif /* __KERNEL__ */
89
90#endif /* _X8664_TERMIOS_H */
diff --git a/include/asm-x86_64/therm_throt.h b/include/asm-x86_64/therm_throt.h
deleted file mode 100644
index 5aac059007ba..000000000000
--- a/include/asm-x86_64/therm_throt.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-i386/therm_throt.h>
diff --git a/include/asm-x86_64/thread_info.h b/include/asm-x86_64/thread_info.h
deleted file mode 100644
index beae2bfb62ca..000000000000
--- a/include/asm-x86_64/thread_info.h
+++ /dev/null
@@ -1,169 +0,0 @@
1/* thread_info.h: x86_64 low-level thread information
2 *
3 * Copyright (C) 2002 David Howells (dhowells@redhat.com)
4 * - Incorporating suggestions made by Linus Torvalds and Dave Miller
5 */
6
7#ifndef _ASM_THREAD_INFO_H
8#define _ASM_THREAD_INFO_H
9
10#ifdef __KERNEL__
11
12#include <asm/page.h>
13#include <asm/types.h>
14#include <asm/pda.h>
15
16/*
17 * low level task data that entry.S needs immediate access to
18 * - this struct should fit entirely inside of one cache line
19 * - this struct shares the supervisor stack pages
20 */
21#ifndef __ASSEMBLY__
22struct task_struct;
23struct exec_domain;
24#include <asm/mmsegment.h>
25
26struct thread_info {
27 struct task_struct *task; /* main task structure */
28 struct exec_domain *exec_domain; /* execution domain */
29 __u32 flags; /* low level flags */
30 __u32 status; /* thread synchronous flags */
31 __u32 cpu; /* current CPU */
32 int preempt_count; /* 0 => preemptable, <0 => BUG */
33
34 mm_segment_t addr_limit;
35 struct restart_block restart_block;
36};
37#endif
38
39/*
40 * macros/functions for gaining access to the thread information structure
41 * preempt_count needs to be 1 initially, until the scheduler is functional.
42 */
43#ifndef __ASSEMBLY__
44#define INIT_THREAD_INFO(tsk) \
45{ \
46 .task = &tsk, \
47 .exec_domain = &default_exec_domain, \
48 .flags = 0, \
49 .cpu = 0, \
50 .preempt_count = 1, \
51 .addr_limit = KERNEL_DS, \
52 .restart_block = { \
53 .fn = do_no_restart_syscall, \
54 }, \
55}
56
57#define init_thread_info (init_thread_union.thread_info)
58#define init_stack (init_thread_union.stack)
59
60static inline struct thread_info *current_thread_info(void)
61{
62 struct thread_info *ti;
63 ti = (void *)(read_pda(kernelstack) + PDA_STACKOFFSET - THREAD_SIZE);
64 return ti;
65}
66
67/* do not use in interrupt context */
68static inline struct thread_info *stack_thread_info(void)
69{
70 struct thread_info *ti;
71 __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (~(THREAD_SIZE - 1)));
72 return ti;
73}
74
75/* thread information allocation */
76#ifdef CONFIG_DEBUG_STACK_USAGE
77#define alloc_thread_info(tsk) \
78 ({ \
79 struct thread_info *ret; \
80 \
81 ret = ((struct thread_info *) __get_free_pages(GFP_KERNEL,THREAD_ORDER)); \
82 if (ret) \
83 memset(ret, 0, THREAD_SIZE); \
84 ret; \
85 })
86#else
87#define alloc_thread_info(tsk) \
88 ((struct thread_info *) __get_free_pages(GFP_KERNEL,THREAD_ORDER))
89#endif
90
91#define free_thread_info(ti) free_pages((unsigned long) (ti), THREAD_ORDER)
92
93#else /* !__ASSEMBLY__ */
94
95/* how to get the thread information struct from ASM */
96#define GET_THREAD_INFO(reg) \
97 movq %gs:pda_kernelstack,reg ; \
98 subq $(THREAD_SIZE-PDA_STACKOFFSET),reg
99
100#endif
101
102/*
103 * thread information flags
104 * - these are process state flags that various assembly files may need to access
105 * - pending work-to-be-done flags are in LSW
106 * - other flags in MSW
107 * Warning: layout of LSW is hardcoded in entry.S
108 */
109#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
110#define TIF_SIGPENDING 2 /* signal pending */
111#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
112#define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/
113#define TIF_IRET 5 /* force IRET */
114#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
115#define TIF_SECCOMP 8 /* secure computing */
116#define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal */
117#define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */
118/* 16 free */
119#define TIF_IA32 17 /* 32bit process */
120#define TIF_FORK 18 /* ret_from_fork */
121#define TIF_ABI_PENDING 19
122#define TIF_MEMDIE 20
123#define TIF_DEBUG 21 /* uses debug registers */
124#define TIF_IO_BITMAP 22 /* uses I/O bitmap */
125#define TIF_FREEZE 23 /* is freezing for suspend */
126
127#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
128#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
129#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP)
130#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
131#define _TIF_IRET (1<<TIF_IRET)
132#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
133#define _TIF_SECCOMP (1<<TIF_SECCOMP)
134#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
135#define _TIF_MCE_NOTIFY (1<<TIF_MCE_NOTIFY)
136#define _TIF_IA32 (1<<TIF_IA32)
137#define _TIF_FORK (1<<TIF_FORK)
138#define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING)
139#define _TIF_DEBUG (1<<TIF_DEBUG)
140#define _TIF_IO_BITMAP (1<<TIF_IO_BITMAP)
141#define _TIF_FREEZE (1<<TIF_FREEZE)
142
143/* work to do on interrupt/exception return */
144#define _TIF_WORK_MASK \
145 (0x0000FFFF & ~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP|_TIF_SECCOMP))
146/* work to do on any return to user space */
147#define _TIF_ALLWORK_MASK (0x0000FFFF & ~_TIF_SECCOMP)
148
149/* flags to check in __switch_to() */
150#define _TIF_WORK_CTXSW (_TIF_DEBUG|_TIF_IO_BITMAP)
151
152#define PREEMPT_ACTIVE 0x10000000
153
154/*
155 * Thread-synchronous status.
156 *
157 * This is different from the flags in that nobody else
158 * ever touches our thread-synchronous status, so we don't
159 * have to worry about atomic accesses.
160 */
161#define TS_USEDFPU 0x0001 /* FPU was used by this task this quantum (SMP) */
162#define TS_COMPAT 0x0002 /* 32bit syscall active */
163#define TS_POLLING 0x0004 /* true if in idle loop and not sleeping */
164
165#define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING)
166
167#endif /* __KERNEL__ */
168
169#endif /* _ASM_THREAD_INFO_H */
diff --git a/include/asm-x86_64/timex.h b/include/asm-x86_64/timex.h
deleted file mode 100644
index 6ed21f44d308..000000000000
--- a/include/asm-x86_64/timex.h
+++ /dev/null
@@ -1,31 +0,0 @@
1/*
2 * linux/include/asm-x86_64/timex.h
3 *
4 * x86-64 architecture timex specifications
5 */
6#ifndef _ASMx8664_TIMEX_H
7#define _ASMx8664_TIMEX_H
8
9#include <asm/8253pit.h>
10#include <asm/msr.h>
11#include <asm/vsyscall.h>
12#include <asm/system.h>
13#include <asm/processor.h>
14#include <asm/tsc.h>
15#include <linux/compiler.h>
16
17#define CLOCK_TICK_RATE PIT_TICK_RATE /* Underlying HZ */
18
19extern int read_current_timer(unsigned long *timer_value);
20#define ARCH_HAS_READ_CURRENT_TIMER 1
21
22#define USEC_PER_TICK (USEC_PER_SEC / HZ)
23#define NSEC_PER_TICK (NSEC_PER_SEC / HZ)
24#define FSEC_PER_TICK (FSEC_PER_SEC / HZ)
25
26#define NS_SCALE 10 /* 2^10, carefully chosen */
27#define US_SCALE 32 /* 2^32, arbitralrily chosen */
28
29extern void mark_tsc_unstable(char *msg);
30extern void set_cyc2ns_scale(unsigned long khz);
31#endif
diff --git a/include/asm-x86_64/tlb.h b/include/asm-x86_64/tlb.h
deleted file mode 100644
index cd4c3c590a0e..000000000000
--- a/include/asm-x86_64/tlb.h
+++ /dev/null
@@ -1,13 +0,0 @@
1#ifndef TLB_H
2#define TLB_H 1
3
4
5#define tlb_start_vma(tlb, vma) do { } while (0)
6#define tlb_end_vma(tlb, vma) do { } while (0)
7#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
8
9#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
10
11#include <asm-generic/tlb.h>
12
13#endif
diff --git a/include/asm-x86_64/tlbflush.h b/include/asm-x86_64/tlbflush.h
deleted file mode 100644
index 888eb4abdd07..000000000000
--- a/include/asm-x86_64/tlbflush.h
+++ /dev/null
@@ -1,109 +0,0 @@
1#ifndef _X8664_TLBFLUSH_H
2#define _X8664_TLBFLUSH_H
3
4#include <linux/mm.h>
5#include <linux/sched.h>
6#include <asm/processor.h>
7#include <asm/system.h>
8
9static inline void __flush_tlb(void)
10{
11 write_cr3(read_cr3());
12}
13
14static inline void __flush_tlb_all(void)
15{
16 unsigned long cr4 = read_cr4();
17 write_cr4(cr4 & ~X86_CR4_PGE); /* clear PGE */
18 write_cr4(cr4); /* write old PGE again and flush TLBs */
19}
20
21#define __flush_tlb_one(addr) \
22 __asm__ __volatile__("invlpg (%0)" :: "r" (addr) : "memory")
23
24
25/*
26 * TLB flushing:
27 *
28 * - flush_tlb() flushes the current mm struct TLBs
29 * - flush_tlb_all() flushes all processes TLBs
30 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
31 * - flush_tlb_page(vma, vmaddr) flushes one page
32 * - flush_tlb_range(vma, start, end) flushes a range of pages
33 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
34 * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
35 *
36 * x86-64 can only flush individual pages or full VMs. For a range flush
37 * we always do the full VM. Might be worth trying if for a small
38 * range a few INVLPGs in a row are a win.
39 */
40
41#ifndef CONFIG_SMP
42
43#define flush_tlb() __flush_tlb()
44#define flush_tlb_all() __flush_tlb_all()
45#define local_flush_tlb() __flush_tlb()
46
47static inline void flush_tlb_mm(struct mm_struct *mm)
48{
49 if (mm == current->active_mm)
50 __flush_tlb();
51}
52
53static inline void flush_tlb_page(struct vm_area_struct *vma,
54 unsigned long addr)
55{
56 if (vma->vm_mm == current->active_mm)
57 __flush_tlb_one(addr);
58}
59
60static inline void flush_tlb_range(struct vm_area_struct *vma,
61 unsigned long start, unsigned long end)
62{
63 if (vma->vm_mm == current->active_mm)
64 __flush_tlb();
65}
66
67#else
68
69#include <asm/smp.h>
70
71#define local_flush_tlb() \
72 __flush_tlb()
73
74extern void flush_tlb_all(void);
75extern void flush_tlb_current_task(void);
76extern void flush_tlb_mm(struct mm_struct *);
77extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
78
79#define flush_tlb() flush_tlb_current_task()
80
81static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end)
82{
83 flush_tlb_mm(vma->vm_mm);
84}
85
86#define TLBSTATE_OK 1
87#define TLBSTATE_LAZY 2
88
89/* Roughly an IPI every 20MB with 4k pages for freeing page table
90 ranges. Cost is about 42k of memory for each CPU. */
91#define ARCH_FREE_PTE_NR 5350
92
93#endif
94
95static inline void flush_tlb_kernel_range(unsigned long start,
96 unsigned long end)
97{
98 flush_tlb_all();
99}
100
101static inline void flush_tlb_pgtables(struct mm_struct *mm,
102 unsigned long start, unsigned long end)
103{
104 /* x86_64 does not keep any page table caches in a software TLB.
105 The CPUs do in their hardware TLBs, but they are handled
106 by the normal TLB flushing algorithms. */
107}
108
109#endif /* _X8664_TLBFLUSH_H */
diff --git a/include/asm-x86_64/topology.h b/include/asm-x86_64/topology.h
deleted file mode 100644
index 36e52fba7960..000000000000
--- a/include/asm-x86_64/topology.h
+++ /dev/null
@@ -1,71 +0,0 @@
1#ifndef _ASM_X86_64_TOPOLOGY_H
2#define _ASM_X86_64_TOPOLOGY_H
3
4
5#ifdef CONFIG_NUMA
6
7#include <asm/mpspec.h>
8#include <asm/bitops.h>
9
10extern cpumask_t cpu_online_map;
11
12extern unsigned char cpu_to_node[];
13extern cpumask_t node_to_cpumask[];
14
15#ifdef CONFIG_ACPI_NUMA
16extern int __node_distance(int, int);
17#define node_distance(a,b) __node_distance(a,b)
18/* #else fallback version */
19#endif
20
21#define cpu_to_node(cpu) (cpu_to_node[cpu])
22#define parent_node(node) (node)
23#define node_to_first_cpu(node) (first_cpu(node_to_cpumask[node]))
24#define node_to_cpumask(node) (node_to_cpumask[node])
25#define pcibus_to_node(bus) ((struct pci_sysdata *)((bus)->sysdata))->node
26#define pcibus_to_cpumask(bus) node_to_cpumask(pcibus_to_node(bus));
27
28#define numa_node_id() read_pda(nodenumber)
29
30/* sched_domains SD_NODE_INIT for x86_64 machines */
31#define SD_NODE_INIT (struct sched_domain) { \
32 .span = CPU_MASK_NONE, \
33 .parent = NULL, \
34 .child = NULL, \
35 .groups = NULL, \
36 .min_interval = 8, \
37 .max_interval = 32, \
38 .busy_factor = 32, \
39 .imbalance_pct = 125, \
40 .cache_nice_tries = 2, \
41 .busy_idx = 3, \
42 .idle_idx = 2, \
43 .newidle_idx = 0, \
44 .wake_idx = 1, \
45 .forkexec_idx = 1, \
46 .flags = SD_LOAD_BALANCE \
47 | SD_BALANCE_FORK \
48 | SD_BALANCE_EXEC \
49 | SD_SERIALIZE \
50 | SD_WAKE_BALANCE, \
51 .last_balance = jiffies, \
52 .balance_interval = 1, \
53 .nr_balance_failed = 0, \
54}
55
56#endif
57
58#ifdef CONFIG_SMP
59#define topology_physical_package_id(cpu) (cpu_data[cpu].phys_proc_id)
60#define topology_core_id(cpu) (cpu_data[cpu].cpu_core_id)
61#define topology_core_siblings(cpu) (cpu_core_map[cpu])
62#define topology_thread_siblings(cpu) (cpu_sibling_map[cpu])
63#define mc_capable() (boot_cpu_data.x86_max_cores > 1)
64#define smt_capable() (smp_num_siblings > 1)
65#endif
66
67#include <asm-generic/topology.h>
68
69extern cpumask_t cpu_coregroup_map(int cpu);
70
71#endif
diff --git a/include/asm-x86_64/tsc.h b/include/asm-x86_64/tsc.h
deleted file mode 100644
index d66ba6ef25f6..000000000000
--- a/include/asm-x86_64/tsc.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-i386/tsc.h>
diff --git a/include/asm-x86_64/types.h b/include/asm-x86_64/types.h
deleted file mode 100644
index 2d4491aae281..000000000000
--- a/include/asm-x86_64/types.h
+++ /dev/null
@@ -1,55 +0,0 @@
1#ifndef _X86_64_TYPES_H
2#define _X86_64_TYPES_H
3
4#ifndef __ASSEMBLY__
5
6typedef unsigned short umode_t;
7
8/*
9 * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the
10 * header files exported to user space
11 */
12
13typedef __signed__ char __s8;
14typedef unsigned char __u8;
15
16typedef __signed__ short __s16;
17typedef unsigned short __u16;
18
19typedef __signed__ int __s32;
20typedef unsigned int __u32;
21
22typedef __signed__ long long __s64;
23typedef unsigned long long __u64;
24
25#endif /* __ASSEMBLY__ */
26
27/*
28 * These aren't exported outside the kernel to avoid name space clashes
29 */
30#ifdef __KERNEL__
31
32#define BITS_PER_LONG 64
33
34#ifndef __ASSEMBLY__
35
36typedef signed char s8;
37typedef unsigned char u8;
38
39typedef signed short s16;
40typedef unsigned short u16;
41
42typedef signed int s32;
43typedef unsigned int u32;
44
45typedef signed long long s64;
46typedef unsigned long long u64;
47
48typedef u64 dma64_addr_t;
49typedef u64 dma_addr_t;
50
51#endif /* __ASSEMBLY__ */
52
53#endif /* __KERNEL__ */
54
55#endif
diff --git a/include/asm-x86_64/uaccess.h b/include/asm-x86_64/uaccess.h
deleted file mode 100644
index f4ce8768ad44..000000000000
--- a/include/asm-x86_64/uaccess.h
+++ /dev/null
@@ -1,384 +0,0 @@
1#ifndef __X86_64_UACCESS_H
2#define __X86_64_UACCESS_H
3
4/*
5 * User space memory access functions
6 */
7#include <linux/compiler.h>
8#include <linux/errno.h>
9#include <linux/prefetch.h>
10#include <asm/page.h>
11
12#define VERIFY_READ 0
13#define VERIFY_WRITE 1
14
15/*
16 * The fs value determines whether argument validity checking should be
17 * performed or not. If get_fs() == USER_DS, checking is performed, with
18 * get_fs() == KERNEL_DS, checking is bypassed.
19 *
20 * For historical reasons, these macros are grossly misnamed.
21 */
22
23#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
24
25#define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFFFFFFFFFFUL)
26#define USER_DS MAKE_MM_SEG(PAGE_OFFSET)
27
28#define get_ds() (KERNEL_DS)
29#define get_fs() (current_thread_info()->addr_limit)
30#define set_fs(x) (current_thread_info()->addr_limit = (x))
31
32#define segment_eq(a,b) ((a).seg == (b).seg)
33
34#define __addr_ok(addr) (!((unsigned long)(addr) & (current_thread_info()->addr_limit.seg)))
35
36/*
37 * Uhhuh, this needs 65-bit arithmetic. We have a carry..
38 */
39#define __range_not_ok(addr,size) ({ \
40 unsigned long flag,roksum; \
41 __chk_user_ptr(addr); \
42 asm("# range_ok\n\r" \
43 "addq %3,%1 ; sbbq %0,%0 ; cmpq %1,%4 ; sbbq $0,%0" \
44 :"=&r" (flag), "=r" (roksum) \
45 :"1" (addr),"g" ((long)(size)),"g" (current_thread_info()->addr_limit.seg)); \
46 flag; })
47
48#define access_ok(type, addr, size) (__range_not_ok(addr,size) == 0)
49
50/*
51 * The exception table consists of pairs of addresses: the first is the
52 * address of an instruction that is allowed to fault, and the second is
53 * the address at which the program should continue. No registers are
54 * modified, so it is entirely up to the continuation code to figure out
55 * what to do.
56 *
57 * All the routines below use bits of fixup code that are out of line
58 * with the main instruction path. This means when everything is well,
59 * we don't even have to jump over them. Further, they do not intrude
60 * on our cache or tlb entries.
61 */
62
63struct exception_table_entry
64{
65 unsigned long insn, fixup;
66};
67
68#define ARCH_HAS_SEARCH_EXTABLE
69
70/*
71 * These are the main single-value transfer routines. They automatically
72 * use the right size if we just have the right pointer type.
73 *
74 * This gets kind of ugly. We want to return _two_ values in "get_user()"
75 * and yet we don't want to do any pointers, because that is too much
76 * of a performance impact. Thus we have a few rather ugly macros here,
77 * and hide all the ugliness from the user.
78 *
79 * The "__xxx" versions of the user access functions are versions that
80 * do not verify the address space, that must have been done previously
81 * with a separate "access_ok()" call (this is used when we do multiple
82 * accesses to the same area of user memory).
83 */
84
85#define __get_user_x(size,ret,x,ptr) \
86 asm volatile("call __get_user_" #size \
87 :"=a" (ret),"=d" (x) \
88 :"c" (ptr) \
89 :"r8")
90
91/* Careful: we have to cast the result to the type of the pointer for sign reasons */
92#define get_user(x,ptr) \
93({ unsigned long __val_gu; \
94 int __ret_gu; \
95 __chk_user_ptr(ptr); \
96 switch(sizeof (*(ptr))) { \
97 case 1: __get_user_x(1,__ret_gu,__val_gu,ptr); break; \
98 case 2: __get_user_x(2,__ret_gu,__val_gu,ptr); break; \
99 case 4: __get_user_x(4,__ret_gu,__val_gu,ptr); break; \
100 case 8: __get_user_x(8,__ret_gu,__val_gu,ptr); break; \
101 default: __get_user_bad(); break; \
102 } \
103 (x) = (__force typeof(*(ptr)))__val_gu; \
104 __ret_gu; \
105})
106
107extern void __put_user_1(void);
108extern void __put_user_2(void);
109extern void __put_user_4(void);
110extern void __put_user_8(void);
111extern void __put_user_bad(void);
112
113#define __put_user_x(size,ret,x,ptr) \
114 asm volatile("call __put_user_" #size \
115 :"=a" (ret) \
116 :"c" (ptr),"d" (x) \
117 :"r8")
118
119#define put_user(x,ptr) \
120 __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
121
122#define __get_user(x,ptr) \
123 __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
124#define __put_user(x,ptr) \
125 __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
126
127#define __get_user_unaligned __get_user
128#define __put_user_unaligned __put_user
129
130#define __put_user_nocheck(x,ptr,size) \
131({ \
132 int __pu_err; \
133 __put_user_size((x),(ptr),(size),__pu_err); \
134 __pu_err; \
135})
136
137
138#define __put_user_check(x,ptr,size) \
139({ \
140 int __pu_err; \
141 typeof(*(ptr)) __user *__pu_addr = (ptr); \
142 switch (size) { \
143 case 1: __put_user_x(1,__pu_err,x,__pu_addr); break; \
144 case 2: __put_user_x(2,__pu_err,x,__pu_addr); break; \
145 case 4: __put_user_x(4,__pu_err,x,__pu_addr); break; \
146 case 8: __put_user_x(8,__pu_err,x,__pu_addr); break; \
147 default: __put_user_bad(); \
148 } \
149 __pu_err; \
150})
151
152#define __put_user_size(x,ptr,size,retval) \
153do { \
154 retval = 0; \
155 __chk_user_ptr(ptr); \
156 switch (size) { \
157 case 1: __put_user_asm(x,ptr,retval,"b","b","iq",-EFAULT); break;\
158 case 2: __put_user_asm(x,ptr,retval,"w","w","ir",-EFAULT); break;\
159 case 4: __put_user_asm(x,ptr,retval,"l","k","ir",-EFAULT); break;\
160 case 8: __put_user_asm(x,ptr,retval,"q","","Zr",-EFAULT); break;\
161 default: __put_user_bad(); \
162 } \
163} while (0)
164
165/* FIXME: this hack is definitely wrong -AK */
166struct __large_struct { unsigned long buf[100]; };
167#define __m(x) (*(struct __large_struct __user *)(x))
168
169/*
170 * Tell gcc we read from memory instead of writing: this is because
171 * we do not write to any memory gcc knows about, so there are no
172 * aliasing issues.
173 */
174#define __put_user_asm(x, addr, err, itype, rtype, ltype, errno) \
175 asm volatile( \
176 "1: mov"itype" %"rtype"1,%2\n" \
177 "2:\n" \
178 ".section .fixup,\"ax\"\n" \
179 "3: mov %3,%0\n" \
180 " jmp 2b\n" \
181 ".previous\n" \
182 ".section __ex_table,\"a\"\n" \
183 " .align 8\n" \
184 " .quad 1b,3b\n" \
185 ".previous" \
186 : "=r"(err) \
187 : ltype (x), "m"(__m(addr)), "i"(errno), "0"(err))
188
189
190#define __get_user_nocheck(x,ptr,size) \
191({ \
192 int __gu_err; \
193 unsigned long __gu_val; \
194 __get_user_size(__gu_val,(ptr),(size),__gu_err); \
195 (x) = (__force typeof(*(ptr)))__gu_val; \
196 __gu_err; \
197})
198
199extern int __get_user_1(void);
200extern int __get_user_2(void);
201extern int __get_user_4(void);
202extern int __get_user_8(void);
203extern int __get_user_bad(void);
204
205#define __get_user_size(x,ptr,size,retval) \
206do { \
207 retval = 0; \
208 __chk_user_ptr(ptr); \
209 switch (size) { \
210 case 1: __get_user_asm(x,ptr,retval,"b","b","=q",-EFAULT); break;\
211 case 2: __get_user_asm(x,ptr,retval,"w","w","=r",-EFAULT); break;\
212 case 4: __get_user_asm(x,ptr,retval,"l","k","=r",-EFAULT); break;\
213 case 8: __get_user_asm(x,ptr,retval,"q","","=r",-EFAULT); break;\
214 default: (x) = __get_user_bad(); \
215 } \
216} while (0)
217
218#define __get_user_asm(x, addr, err, itype, rtype, ltype, errno) \
219 asm volatile( \
220 "1: mov"itype" %2,%"rtype"1\n" \
221 "2:\n" \
222 ".section .fixup,\"ax\"\n" \
223 "3: mov %3,%0\n" \
224 " xor"itype" %"rtype"1,%"rtype"1\n" \
225 " jmp 2b\n" \
226 ".previous\n" \
227 ".section __ex_table,\"a\"\n" \
228 " .align 8\n" \
229 " .quad 1b,3b\n" \
230 ".previous" \
231 : "=r"(err), ltype (x) \
232 : "m"(__m(addr)), "i"(errno), "0"(err))
233
234/*
235 * Copy To/From Userspace
236 */
237
238/* Handles exceptions in both to and from, but doesn't do access_ok */
239__must_check unsigned long
240copy_user_generic(void *to, const void *from, unsigned len);
241
242__must_check unsigned long
243copy_to_user(void __user *to, const void *from, unsigned len);
244__must_check unsigned long
245copy_from_user(void *to, const void __user *from, unsigned len);
246__must_check unsigned long
247copy_in_user(void __user *to, const void __user *from, unsigned len);
248
249static __always_inline __must_check
250int __copy_from_user(void *dst, const void __user *src, unsigned size)
251{
252 int ret = 0;
253 if (!__builtin_constant_p(size))
254 return copy_user_generic(dst,(__force void *)src,size);
255 switch (size) {
256 case 1:__get_user_asm(*(u8*)dst,(u8 __user *)src,ret,"b","b","=q",1);
257 return ret;
258 case 2:__get_user_asm(*(u16*)dst,(u16 __user *)src,ret,"w","w","=r",2);
259 return ret;
260 case 4:__get_user_asm(*(u32*)dst,(u32 __user *)src,ret,"l","k","=r",4);
261 return ret;
262 case 8:__get_user_asm(*(u64*)dst,(u64 __user *)src,ret,"q","","=r",8);
263 return ret;
264 case 10:
265 __get_user_asm(*(u64*)dst,(u64 __user *)src,ret,"q","","=r",16);
266 if (unlikely(ret)) return ret;
267 __get_user_asm(*(u16*)(8+(char*)dst),(u16 __user *)(8+(char __user *)src),ret,"w","w","=r",2);
268 return ret;
269 case 16:
270 __get_user_asm(*(u64*)dst,(u64 __user *)src,ret,"q","","=r",16);
271 if (unlikely(ret)) return ret;
272 __get_user_asm(*(u64*)(8+(char*)dst),(u64 __user *)(8+(char __user *)src),ret,"q","","=r",8);
273 return ret;
274 default:
275 return copy_user_generic(dst,(__force void *)src,size);
276 }
277}
278
279static __always_inline __must_check
280int __copy_to_user(void __user *dst, const void *src, unsigned size)
281{
282 int ret = 0;
283 if (!__builtin_constant_p(size))
284 return copy_user_generic((__force void *)dst,src,size);
285 switch (size) {
286 case 1:__put_user_asm(*(u8*)src,(u8 __user *)dst,ret,"b","b","iq",1);
287 return ret;
288 case 2:__put_user_asm(*(u16*)src,(u16 __user *)dst,ret,"w","w","ir",2);
289 return ret;
290 case 4:__put_user_asm(*(u32*)src,(u32 __user *)dst,ret,"l","k","ir",4);
291 return ret;
292 case 8:__put_user_asm(*(u64*)src,(u64 __user *)dst,ret,"q","","ir",8);
293 return ret;
294 case 10:
295 __put_user_asm(*(u64*)src,(u64 __user *)dst,ret,"q","","ir",10);
296 if (unlikely(ret)) return ret;
297 asm("":::"memory");
298 __put_user_asm(4[(u16*)src],4+(u16 __user *)dst,ret,"w","w","ir",2);
299 return ret;
300 case 16:
301 __put_user_asm(*(u64*)src,(u64 __user *)dst,ret,"q","","ir",16);
302 if (unlikely(ret)) return ret;
303 asm("":::"memory");
304 __put_user_asm(1[(u64*)src],1+(u64 __user *)dst,ret,"q","","ir",8);
305 return ret;
306 default:
307 return copy_user_generic((__force void *)dst,src,size);
308 }
309}
310
311static __always_inline __must_check
312int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
313{
314 int ret = 0;
315 if (!__builtin_constant_p(size))
316 return copy_user_generic((__force void *)dst,(__force void *)src,size);
317 switch (size) {
318 case 1: {
319 u8 tmp;
320 __get_user_asm(tmp,(u8 __user *)src,ret,"b","b","=q",1);
321 if (likely(!ret))
322 __put_user_asm(tmp,(u8 __user *)dst,ret,"b","b","iq",1);
323 return ret;
324 }
325 case 2: {
326 u16 tmp;
327 __get_user_asm(tmp,(u16 __user *)src,ret,"w","w","=r",2);
328 if (likely(!ret))
329 __put_user_asm(tmp,(u16 __user *)dst,ret,"w","w","ir",2);
330 return ret;
331 }
332
333 case 4: {
334 u32 tmp;
335 __get_user_asm(tmp,(u32 __user *)src,ret,"l","k","=r",4);
336 if (likely(!ret))
337 __put_user_asm(tmp,(u32 __user *)dst,ret,"l","k","ir",4);
338 return ret;
339 }
340 case 8: {
341 u64 tmp;
342 __get_user_asm(tmp,(u64 __user *)src,ret,"q","","=r",8);
343 if (likely(!ret))
344 __put_user_asm(tmp,(u64 __user *)dst,ret,"q","","ir",8);
345 return ret;
346 }
347 default:
348 return copy_user_generic((__force void *)dst,(__force void *)src,size);
349 }
350}
351
352__must_check long
353strncpy_from_user(char *dst, const char __user *src, long count);
354__must_check long
355__strncpy_from_user(char *dst, const char __user *src, long count);
356__must_check long strnlen_user(const char __user *str, long n);
357__must_check long __strnlen_user(const char __user *str, long n);
358__must_check long strlen_user(const char __user *str);
359__must_check unsigned long clear_user(void __user *mem, unsigned long len);
360__must_check unsigned long __clear_user(void __user *mem, unsigned long len);
361
362__must_check long __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size);
363
364static __must_check __always_inline int
365__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
366{
367 return copy_user_generic((__force void *)dst, src, size);
368}
369
370#define ARCH_HAS_NOCACHE_UACCESS 1
371extern long __copy_user_nocache(void *dst, const void __user *src, unsigned size, int zerorest);
372
373static inline int __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
374{
375 might_sleep();
376 return __copy_user_nocache(dst, src, size, 1);
377}
378
379static inline int __copy_from_user_inatomic_nocache(void *dst, const void __user *src, unsigned size)
380{
381 return __copy_user_nocache(dst, src, size, 0);
382}
383
384#endif /* __X86_64_UACCESS_H */
diff --git a/include/asm-x86_64/ucontext.h b/include/asm-x86_64/ucontext.h
deleted file mode 100644
index 159a3da9e112..000000000000
--- a/include/asm-x86_64/ucontext.h
+++ /dev/null
@@ -1,12 +0,0 @@
1#ifndef _ASMX8664_UCONTEXT_H
2#define _ASMX8664_UCONTEXT_H
3
4struct ucontext {
5 unsigned long uc_flags;
6 struct ucontext *uc_link;
7 stack_t uc_stack;
8 struct sigcontext uc_mcontext;
9 sigset_t uc_sigmask; /* mask last for extensibility */
10};
11
12#endif
diff --git a/include/asm-x86_64/unaligned.h b/include/asm-x86_64/unaligned.h
deleted file mode 100644
index d4bf78dc6f39..000000000000
--- a/include/asm-x86_64/unaligned.h
+++ /dev/null
@@ -1,37 +0,0 @@
1#ifndef __X8664_UNALIGNED_H
2#define __X8664_UNALIGNED_H
3
4/*
5 * The x86-64 can do unaligned accesses itself.
6 *
7 * The strange macros are there to make sure these can't
8 * be misused in a way that makes them not work on other
9 * architectures where unaligned accesses aren't as simple.
10 */
11
12/**
13 * get_unaligned - get value from possibly mis-aligned location
14 * @ptr: pointer to value
15 *
16 * This macro should be used for accessing values larger in size than
17 * single bytes at locations that are expected to be improperly aligned,
18 * e.g. retrieving a u16 value from a location not u16-aligned.
19 *
20 * Note that unaligned accesses can be very expensive on some architectures.
21 */
22#define get_unaligned(ptr) (*(ptr))
23
24/**
25 * put_unaligned - put value to a possibly mis-aligned location
26 * @val: value to place
27 * @ptr: pointer to location
28 *
29 * This macro should be used for placing values larger in size than
30 * single bytes at locations that are expected to be improperly aligned,
31 * e.g. writing a u16 value to a location not u16-aligned.
32 *
33 * Note that unaligned accesses can be very expensive on some architectures.
34 */
35#define put_unaligned(val, ptr) ((void)( *(ptr) = (val) ))
36
37#endif
diff --git a/include/asm-x86_64/unistd.h b/include/asm-x86_64/unistd.h
deleted file mode 100644
index fc4e73f5f1fa..000000000000
--- a/include/asm-x86_64/unistd.h
+++ /dev/null
@@ -1,687 +0,0 @@
1#ifndef _ASM_X86_64_UNISTD_H_
2#define _ASM_X86_64_UNISTD_H_
3
4#ifndef __SYSCALL
5#define __SYSCALL(a,b)
6#endif
7
8/*
9 * This file contains the system call numbers.
10 *
11 * Note: holes are not allowed.
12 */
13
14/* at least 8 syscall per cacheline */
15#define __NR_read 0
16__SYSCALL(__NR_read, sys_read)
17#define __NR_write 1
18__SYSCALL(__NR_write, sys_write)
19#define __NR_open 2
20__SYSCALL(__NR_open, sys_open)
21#define __NR_close 3
22__SYSCALL(__NR_close, sys_close)
23#define __NR_stat 4
24__SYSCALL(__NR_stat, sys_newstat)
25#define __NR_fstat 5
26__SYSCALL(__NR_fstat, sys_newfstat)
27#define __NR_lstat 6
28__SYSCALL(__NR_lstat, sys_newlstat)
29#define __NR_poll 7
30__SYSCALL(__NR_poll, sys_poll)
31
32#define __NR_lseek 8
33__SYSCALL(__NR_lseek, sys_lseek)
34#define __NR_mmap 9
35__SYSCALL(__NR_mmap, sys_mmap)
36#define __NR_mprotect 10
37__SYSCALL(__NR_mprotect, sys_mprotect)
38#define __NR_munmap 11
39__SYSCALL(__NR_munmap, sys_munmap)
40#define __NR_brk 12
41__SYSCALL(__NR_brk, sys_brk)
42#define __NR_rt_sigaction 13
43__SYSCALL(__NR_rt_sigaction, sys_rt_sigaction)
44#define __NR_rt_sigprocmask 14
45__SYSCALL(__NR_rt_sigprocmask, sys_rt_sigprocmask)
46#define __NR_rt_sigreturn 15
47__SYSCALL(__NR_rt_sigreturn, stub_rt_sigreturn)
48
49#define __NR_ioctl 16
50__SYSCALL(__NR_ioctl, sys_ioctl)
51#define __NR_pread64 17
52__SYSCALL(__NR_pread64, sys_pread64)
53#define __NR_pwrite64 18
54__SYSCALL(__NR_pwrite64, sys_pwrite64)
55#define __NR_readv 19
56__SYSCALL(__NR_readv, sys_readv)
57#define __NR_writev 20
58__SYSCALL(__NR_writev, sys_writev)
59#define __NR_access 21
60__SYSCALL(__NR_access, sys_access)
61#define __NR_pipe 22
62__SYSCALL(__NR_pipe, sys_pipe)
63#define __NR_select 23
64__SYSCALL(__NR_select, sys_select)
65
66#define __NR_sched_yield 24
67__SYSCALL(__NR_sched_yield, sys_sched_yield)
68#define __NR_mremap 25
69__SYSCALL(__NR_mremap, sys_mremap)
70#define __NR_msync 26
71__SYSCALL(__NR_msync, sys_msync)
72#define __NR_mincore 27
73__SYSCALL(__NR_mincore, sys_mincore)
74#define __NR_madvise 28
75__SYSCALL(__NR_madvise, sys_madvise)
76#define __NR_shmget 29
77__SYSCALL(__NR_shmget, sys_shmget)
78#define __NR_shmat 30
79__SYSCALL(__NR_shmat, sys_shmat)
80#define __NR_shmctl 31
81__SYSCALL(__NR_shmctl, sys_shmctl)
82
83#define __NR_dup 32
84__SYSCALL(__NR_dup, sys_dup)
85#define __NR_dup2 33
86__SYSCALL(__NR_dup2, sys_dup2)
87#define __NR_pause 34
88__SYSCALL(__NR_pause, sys_pause)
89#define __NR_nanosleep 35
90__SYSCALL(__NR_nanosleep, sys_nanosleep)
91#define __NR_getitimer 36
92__SYSCALL(__NR_getitimer, sys_getitimer)
93#define __NR_alarm 37
94__SYSCALL(__NR_alarm, sys_alarm)
95#define __NR_setitimer 38
96__SYSCALL(__NR_setitimer, sys_setitimer)
97#define __NR_getpid 39
98__SYSCALL(__NR_getpid, sys_getpid)
99
100#define __NR_sendfile 40
101__SYSCALL(__NR_sendfile, sys_sendfile64)
102#define __NR_socket 41
103__SYSCALL(__NR_socket, sys_socket)
104#define __NR_connect 42
105__SYSCALL(__NR_connect, sys_connect)
106#define __NR_accept 43
107__SYSCALL(__NR_accept, sys_accept)
108#define __NR_sendto 44
109__SYSCALL(__NR_sendto, sys_sendto)
110#define __NR_recvfrom 45
111__SYSCALL(__NR_recvfrom, sys_recvfrom)
112#define __NR_sendmsg 46
113__SYSCALL(__NR_sendmsg, sys_sendmsg)
114#define __NR_recvmsg 47
115__SYSCALL(__NR_recvmsg, sys_recvmsg)
116
117#define __NR_shutdown 48
118__SYSCALL(__NR_shutdown, sys_shutdown)
119#define __NR_bind 49
120__SYSCALL(__NR_bind, sys_bind)
121#define __NR_listen 50
122__SYSCALL(__NR_listen, sys_listen)
123#define __NR_getsockname 51
124__SYSCALL(__NR_getsockname, sys_getsockname)
125#define __NR_getpeername 52
126__SYSCALL(__NR_getpeername, sys_getpeername)
127#define __NR_socketpair 53
128__SYSCALL(__NR_socketpair, sys_socketpair)
129#define __NR_setsockopt 54
130__SYSCALL(__NR_setsockopt, sys_setsockopt)
131#define __NR_getsockopt 55
132__SYSCALL(__NR_getsockopt, sys_getsockopt)
133
134#define __NR_clone 56
135__SYSCALL(__NR_clone, stub_clone)
136#define __NR_fork 57
137__SYSCALL(__NR_fork, stub_fork)
138#define __NR_vfork 58
139__SYSCALL(__NR_vfork, stub_vfork)
140#define __NR_execve 59
141__SYSCALL(__NR_execve, stub_execve)
142#define __NR_exit 60
143__SYSCALL(__NR_exit, sys_exit)
144#define __NR_wait4 61
145__SYSCALL(__NR_wait4, sys_wait4)
146#define __NR_kill 62
147__SYSCALL(__NR_kill, sys_kill)
148#define __NR_uname 63
149__SYSCALL(__NR_uname, sys_uname)
150
151#define __NR_semget 64
152__SYSCALL(__NR_semget, sys_semget)
153#define __NR_semop 65
154__SYSCALL(__NR_semop, sys_semop)
155#define __NR_semctl 66
156__SYSCALL(__NR_semctl, sys_semctl)
157#define __NR_shmdt 67
158__SYSCALL(__NR_shmdt, sys_shmdt)
159#define __NR_msgget 68
160__SYSCALL(__NR_msgget, sys_msgget)
161#define __NR_msgsnd 69
162__SYSCALL(__NR_msgsnd, sys_msgsnd)
163#define __NR_msgrcv 70
164__SYSCALL(__NR_msgrcv, sys_msgrcv)
165#define __NR_msgctl 71
166__SYSCALL(__NR_msgctl, sys_msgctl)
167
168#define __NR_fcntl 72
169__SYSCALL(__NR_fcntl, sys_fcntl)
170#define __NR_flock 73
171__SYSCALL(__NR_flock, sys_flock)
172#define __NR_fsync 74
173__SYSCALL(__NR_fsync, sys_fsync)
174#define __NR_fdatasync 75
175__SYSCALL(__NR_fdatasync, sys_fdatasync)
176#define __NR_truncate 76
177__SYSCALL(__NR_truncate, sys_truncate)
178#define __NR_ftruncate 77
179__SYSCALL(__NR_ftruncate, sys_ftruncate)
180#define __NR_getdents 78
181__SYSCALL(__NR_getdents, sys_getdents)
182#define __NR_getcwd 79
183__SYSCALL(__NR_getcwd, sys_getcwd)
184
185#define __NR_chdir 80
186__SYSCALL(__NR_chdir, sys_chdir)
187#define __NR_fchdir 81
188__SYSCALL(__NR_fchdir, sys_fchdir)
189#define __NR_rename 82
190__SYSCALL(__NR_rename, sys_rename)
191#define __NR_mkdir 83
192__SYSCALL(__NR_mkdir, sys_mkdir)
193#define __NR_rmdir 84
194__SYSCALL(__NR_rmdir, sys_rmdir)
195#define __NR_creat 85
196__SYSCALL(__NR_creat, sys_creat)
197#define __NR_link 86
198__SYSCALL(__NR_link, sys_link)
199#define __NR_unlink 87
200__SYSCALL(__NR_unlink, sys_unlink)
201
202#define __NR_symlink 88
203__SYSCALL(__NR_symlink, sys_symlink)
204#define __NR_readlink 89
205__SYSCALL(__NR_readlink, sys_readlink)
206#define __NR_chmod 90
207__SYSCALL(__NR_chmod, sys_chmod)
208#define __NR_fchmod 91
209__SYSCALL(__NR_fchmod, sys_fchmod)
210#define __NR_chown 92
211__SYSCALL(__NR_chown, sys_chown)
212#define __NR_fchown 93
213__SYSCALL(__NR_fchown, sys_fchown)
214#define __NR_lchown 94
215__SYSCALL(__NR_lchown, sys_lchown)
216#define __NR_umask 95
217__SYSCALL(__NR_umask, sys_umask)
218
219#define __NR_gettimeofday 96
220__SYSCALL(__NR_gettimeofday, sys_gettimeofday)
221#define __NR_getrlimit 97
222__SYSCALL(__NR_getrlimit, sys_getrlimit)
223#define __NR_getrusage 98
224__SYSCALL(__NR_getrusage, sys_getrusage)
225#define __NR_sysinfo 99
226__SYSCALL(__NR_sysinfo, sys_sysinfo)
227#define __NR_times 100
228__SYSCALL(__NR_times, sys_times)
229#define __NR_ptrace 101
230__SYSCALL(__NR_ptrace, sys_ptrace)
231#define __NR_getuid 102
232__SYSCALL(__NR_getuid, sys_getuid)
233#define __NR_syslog 103
234__SYSCALL(__NR_syslog, sys_syslog)
235
236/* at the very end the stuff that never runs during the benchmarks */
237#define __NR_getgid 104
238__SYSCALL(__NR_getgid, sys_getgid)
239#define __NR_setuid 105
240__SYSCALL(__NR_setuid, sys_setuid)
241#define __NR_setgid 106
242__SYSCALL(__NR_setgid, sys_setgid)
243#define __NR_geteuid 107
244__SYSCALL(__NR_geteuid, sys_geteuid)
245#define __NR_getegid 108
246__SYSCALL(__NR_getegid, sys_getegid)
247#define __NR_setpgid 109
248__SYSCALL(__NR_setpgid, sys_setpgid)
249#define __NR_getppid 110
250__SYSCALL(__NR_getppid, sys_getppid)
251#define __NR_getpgrp 111
252__SYSCALL(__NR_getpgrp, sys_getpgrp)
253
254#define __NR_setsid 112
255__SYSCALL(__NR_setsid, sys_setsid)
256#define __NR_setreuid 113
257__SYSCALL(__NR_setreuid, sys_setreuid)
258#define __NR_setregid 114
259__SYSCALL(__NR_setregid, sys_setregid)
260#define __NR_getgroups 115
261__SYSCALL(__NR_getgroups, sys_getgroups)
262#define __NR_setgroups 116
263__SYSCALL(__NR_setgroups, sys_setgroups)
264#define __NR_setresuid 117
265__SYSCALL(__NR_setresuid, sys_setresuid)
266#define __NR_getresuid 118
267__SYSCALL(__NR_getresuid, sys_getresuid)
268#define __NR_setresgid 119
269__SYSCALL(__NR_setresgid, sys_setresgid)
270
271#define __NR_getresgid 120
272__SYSCALL(__NR_getresgid, sys_getresgid)
273#define __NR_getpgid 121
274__SYSCALL(__NR_getpgid, sys_getpgid)
275#define __NR_setfsuid 122
276__SYSCALL(__NR_setfsuid, sys_setfsuid)
277#define __NR_setfsgid 123
278__SYSCALL(__NR_setfsgid, sys_setfsgid)
279#define __NR_getsid 124
280__SYSCALL(__NR_getsid, sys_getsid)
281#define __NR_capget 125
282__SYSCALL(__NR_capget, sys_capget)
283#define __NR_capset 126
284__SYSCALL(__NR_capset, sys_capset)
285
286#define __NR_rt_sigpending 127
287__SYSCALL(__NR_rt_sigpending, sys_rt_sigpending)
288#define __NR_rt_sigtimedwait 128
289__SYSCALL(__NR_rt_sigtimedwait, sys_rt_sigtimedwait)
290#define __NR_rt_sigqueueinfo 129
291__SYSCALL(__NR_rt_sigqueueinfo, sys_rt_sigqueueinfo)
292#define __NR_rt_sigsuspend 130
293__SYSCALL(__NR_rt_sigsuspend, stub_rt_sigsuspend)
294#define __NR_sigaltstack 131
295__SYSCALL(__NR_sigaltstack, stub_sigaltstack)
296#define __NR_utime 132
297__SYSCALL(__NR_utime, sys_utime)
298#define __NR_mknod 133
299__SYSCALL(__NR_mknod, sys_mknod)
300
301/* Only needed for a.out */
302#define __NR_uselib 134
303__SYSCALL(__NR_uselib, sys_ni_syscall)
304#define __NR_personality 135
305__SYSCALL(__NR_personality, sys_personality)
306
307#define __NR_ustat 136
308__SYSCALL(__NR_ustat, sys_ustat)
309#define __NR_statfs 137
310__SYSCALL(__NR_statfs, sys_statfs)
311#define __NR_fstatfs 138
312__SYSCALL(__NR_fstatfs, sys_fstatfs)
313#define __NR_sysfs 139
314__SYSCALL(__NR_sysfs, sys_sysfs)
315
316#define __NR_getpriority 140
317__SYSCALL(__NR_getpriority, sys_getpriority)
318#define __NR_setpriority 141
319__SYSCALL(__NR_setpriority, sys_setpriority)
320#define __NR_sched_setparam 142
321__SYSCALL(__NR_sched_setparam, sys_sched_setparam)
322#define __NR_sched_getparam 143
323__SYSCALL(__NR_sched_getparam, sys_sched_getparam)
324#define __NR_sched_setscheduler 144
325__SYSCALL(__NR_sched_setscheduler, sys_sched_setscheduler)
326#define __NR_sched_getscheduler 145
327__SYSCALL(__NR_sched_getscheduler, sys_sched_getscheduler)
328#define __NR_sched_get_priority_max 146
329__SYSCALL(__NR_sched_get_priority_max, sys_sched_get_priority_max)
330#define __NR_sched_get_priority_min 147
331__SYSCALL(__NR_sched_get_priority_min, sys_sched_get_priority_min)
332#define __NR_sched_rr_get_interval 148
333__SYSCALL(__NR_sched_rr_get_interval, sys_sched_rr_get_interval)
334
335#define __NR_mlock 149
336__SYSCALL(__NR_mlock, sys_mlock)
337#define __NR_munlock 150
338__SYSCALL(__NR_munlock, sys_munlock)
339#define __NR_mlockall 151
340__SYSCALL(__NR_mlockall, sys_mlockall)
341#define __NR_munlockall 152
342__SYSCALL(__NR_munlockall, sys_munlockall)
343
344#define __NR_vhangup 153
345__SYSCALL(__NR_vhangup, sys_vhangup)
346
347#define __NR_modify_ldt 154
348__SYSCALL(__NR_modify_ldt, sys_modify_ldt)
349
350#define __NR_pivot_root 155
351__SYSCALL(__NR_pivot_root, sys_pivot_root)
352
353#define __NR__sysctl 156
354__SYSCALL(__NR__sysctl, sys_sysctl)
355
356#define __NR_prctl 157
357__SYSCALL(__NR_prctl, sys_prctl)
358#define __NR_arch_prctl 158
359__SYSCALL(__NR_arch_prctl, sys_arch_prctl)
360
361#define __NR_adjtimex 159
362__SYSCALL(__NR_adjtimex, sys_adjtimex)
363
364#define __NR_setrlimit 160
365__SYSCALL(__NR_setrlimit, sys_setrlimit)
366
367#define __NR_chroot 161
368__SYSCALL(__NR_chroot, sys_chroot)
369
370#define __NR_sync 162
371__SYSCALL(__NR_sync, sys_sync)
372
373#define __NR_acct 163
374__SYSCALL(__NR_acct, sys_acct)
375
376#define __NR_settimeofday 164
377__SYSCALL(__NR_settimeofday, sys_settimeofday)
378
379#define __NR_mount 165
380__SYSCALL(__NR_mount, sys_mount)
381#define __NR_umount2 166
382__SYSCALL(__NR_umount2, sys_umount)
383
384#define __NR_swapon 167
385__SYSCALL(__NR_swapon, sys_swapon)
386#define __NR_swapoff 168
387__SYSCALL(__NR_swapoff, sys_swapoff)
388
389#define __NR_reboot 169
390__SYSCALL(__NR_reboot, sys_reboot)
391
392#define __NR_sethostname 170
393__SYSCALL(__NR_sethostname, sys_sethostname)
394#define __NR_setdomainname 171
395__SYSCALL(__NR_setdomainname, sys_setdomainname)
396
397#define __NR_iopl 172
398__SYSCALL(__NR_iopl, stub_iopl)
399#define __NR_ioperm 173
400__SYSCALL(__NR_ioperm, sys_ioperm)
401
402#define __NR_create_module 174
403__SYSCALL(__NR_create_module, sys_ni_syscall)
404#define __NR_init_module 175
405__SYSCALL(__NR_init_module, sys_init_module)
406#define __NR_delete_module 176
407__SYSCALL(__NR_delete_module, sys_delete_module)
408#define __NR_get_kernel_syms 177
409__SYSCALL(__NR_get_kernel_syms, sys_ni_syscall)
410#define __NR_query_module 178
411__SYSCALL(__NR_query_module, sys_ni_syscall)
412
413#define __NR_quotactl 179
414__SYSCALL(__NR_quotactl, sys_quotactl)
415
416#define __NR_nfsservctl 180
417__SYSCALL(__NR_nfsservctl, sys_nfsservctl)
418
419#define __NR_getpmsg 181 /* reserved for LiS/STREAMS */
420__SYSCALL(__NR_getpmsg, sys_ni_syscall)
421#define __NR_putpmsg 182 /* reserved for LiS/STREAMS */
422__SYSCALL(__NR_putpmsg, sys_ni_syscall)
423
424#define __NR_afs_syscall 183 /* reserved for AFS */
425__SYSCALL(__NR_afs_syscall, sys_ni_syscall)
426
427#define __NR_tuxcall 184 /* reserved for tux */
428__SYSCALL(__NR_tuxcall, sys_ni_syscall)
429
430#define __NR_security 185
431__SYSCALL(__NR_security, sys_ni_syscall)
432
433#define __NR_gettid 186
434__SYSCALL(__NR_gettid, sys_gettid)
435
436#define __NR_readahead 187
437__SYSCALL(__NR_readahead, sys_readahead)
438#define __NR_setxattr 188
439__SYSCALL(__NR_setxattr, sys_setxattr)
440#define __NR_lsetxattr 189
441__SYSCALL(__NR_lsetxattr, sys_lsetxattr)
442#define __NR_fsetxattr 190
443__SYSCALL(__NR_fsetxattr, sys_fsetxattr)
444#define __NR_getxattr 191
445__SYSCALL(__NR_getxattr, sys_getxattr)
446#define __NR_lgetxattr 192
447__SYSCALL(__NR_lgetxattr, sys_lgetxattr)
448#define __NR_fgetxattr 193
449__SYSCALL(__NR_fgetxattr, sys_fgetxattr)
450#define __NR_listxattr 194
451__SYSCALL(__NR_listxattr, sys_listxattr)
452#define __NR_llistxattr 195
453__SYSCALL(__NR_llistxattr, sys_llistxattr)
454#define __NR_flistxattr 196
455__SYSCALL(__NR_flistxattr, sys_flistxattr)
456#define __NR_removexattr 197
457__SYSCALL(__NR_removexattr, sys_removexattr)
458#define __NR_lremovexattr 198
459__SYSCALL(__NR_lremovexattr, sys_lremovexattr)
460#define __NR_fremovexattr 199
461__SYSCALL(__NR_fremovexattr, sys_fremovexattr)
462#define __NR_tkill 200
463__SYSCALL(__NR_tkill, sys_tkill)
464#define __NR_time 201
465__SYSCALL(__NR_time, sys_time)
466#define __NR_futex 202
467__SYSCALL(__NR_futex, sys_futex)
468#define __NR_sched_setaffinity 203
469__SYSCALL(__NR_sched_setaffinity, sys_sched_setaffinity)
470#define __NR_sched_getaffinity 204
471__SYSCALL(__NR_sched_getaffinity, sys_sched_getaffinity)
472#define __NR_set_thread_area 205
473__SYSCALL(__NR_set_thread_area, sys_ni_syscall) /* use arch_prctl */
474#define __NR_io_setup 206
475__SYSCALL(__NR_io_setup, sys_io_setup)
476#define __NR_io_destroy 207
477__SYSCALL(__NR_io_destroy, sys_io_destroy)
478#define __NR_io_getevents 208
479__SYSCALL(__NR_io_getevents, sys_io_getevents)
480#define __NR_io_submit 209
481__SYSCALL(__NR_io_submit, sys_io_submit)
482#define __NR_io_cancel 210
483__SYSCALL(__NR_io_cancel, sys_io_cancel)
484#define __NR_get_thread_area 211
485__SYSCALL(__NR_get_thread_area, sys_ni_syscall) /* use arch_prctl */
486#define __NR_lookup_dcookie 212
487__SYSCALL(__NR_lookup_dcookie, sys_lookup_dcookie)
488#define __NR_epoll_create 213
489__SYSCALL(__NR_epoll_create, sys_epoll_create)
490#define __NR_epoll_ctl_old 214
491__SYSCALL(__NR_epoll_ctl_old, sys_ni_syscall)
492#define __NR_epoll_wait_old 215
493__SYSCALL(__NR_epoll_wait_old, sys_ni_syscall)
494#define __NR_remap_file_pages 216
495__SYSCALL(__NR_remap_file_pages, sys_remap_file_pages)
496#define __NR_getdents64 217
497__SYSCALL(__NR_getdents64, sys_getdents64)
498#define __NR_set_tid_address 218
499__SYSCALL(__NR_set_tid_address, sys_set_tid_address)
500#define __NR_restart_syscall 219
501__SYSCALL(__NR_restart_syscall, sys_restart_syscall)
502#define __NR_semtimedop 220
503__SYSCALL(__NR_semtimedop, sys_semtimedop)
504#define __NR_fadvise64 221
505__SYSCALL(__NR_fadvise64, sys_fadvise64)
506#define __NR_timer_create 222
507__SYSCALL(__NR_timer_create, sys_timer_create)
508#define __NR_timer_settime 223
509__SYSCALL(__NR_timer_settime, sys_timer_settime)
510#define __NR_timer_gettime 224
511__SYSCALL(__NR_timer_gettime, sys_timer_gettime)
512#define __NR_timer_getoverrun 225
513__SYSCALL(__NR_timer_getoverrun, sys_timer_getoverrun)
514#define __NR_timer_delete 226
515__SYSCALL(__NR_timer_delete, sys_timer_delete)
516#define __NR_clock_settime 227
517__SYSCALL(__NR_clock_settime, sys_clock_settime)
518#define __NR_clock_gettime 228
519__SYSCALL(__NR_clock_gettime, sys_clock_gettime)
520#define __NR_clock_getres 229
521__SYSCALL(__NR_clock_getres, sys_clock_getres)
522#define __NR_clock_nanosleep 230
523__SYSCALL(__NR_clock_nanosleep, sys_clock_nanosleep)
524#define __NR_exit_group 231
525__SYSCALL(__NR_exit_group, sys_exit_group)
526#define __NR_epoll_wait 232
527__SYSCALL(__NR_epoll_wait, sys_epoll_wait)
528#define __NR_epoll_ctl 233
529__SYSCALL(__NR_epoll_ctl, sys_epoll_ctl)
530#define __NR_tgkill 234
531__SYSCALL(__NR_tgkill, sys_tgkill)
532#define __NR_utimes 235
533__SYSCALL(__NR_utimes, sys_utimes)
534#define __NR_vserver 236
535__SYSCALL(__NR_vserver, sys_ni_syscall)
536#define __NR_mbind 237
537__SYSCALL(__NR_mbind, sys_mbind)
538#define __NR_set_mempolicy 238
539__SYSCALL(__NR_set_mempolicy, sys_set_mempolicy)
540#define __NR_get_mempolicy 239
541__SYSCALL(__NR_get_mempolicy, sys_get_mempolicy)
542#define __NR_mq_open 240
543__SYSCALL(__NR_mq_open, sys_mq_open)
544#define __NR_mq_unlink 241
545__SYSCALL(__NR_mq_unlink, sys_mq_unlink)
546#define __NR_mq_timedsend 242
547__SYSCALL(__NR_mq_timedsend, sys_mq_timedsend)
548#define __NR_mq_timedreceive 243
549__SYSCALL(__NR_mq_timedreceive, sys_mq_timedreceive)
550#define __NR_mq_notify 244
551__SYSCALL(__NR_mq_notify, sys_mq_notify)
552#define __NR_mq_getsetattr 245
553__SYSCALL(__NR_mq_getsetattr, sys_mq_getsetattr)
554#define __NR_kexec_load 246
555__SYSCALL(__NR_kexec_load, sys_kexec_load)
556#define __NR_waitid 247
557__SYSCALL(__NR_waitid, sys_waitid)
558#define __NR_add_key 248
559__SYSCALL(__NR_add_key, sys_add_key)
560#define __NR_request_key 249
561__SYSCALL(__NR_request_key, sys_request_key)
562#define __NR_keyctl 250
563__SYSCALL(__NR_keyctl, sys_keyctl)
564#define __NR_ioprio_set 251
565__SYSCALL(__NR_ioprio_set, sys_ioprio_set)
566#define __NR_ioprio_get 252
567__SYSCALL(__NR_ioprio_get, sys_ioprio_get)
568#define __NR_inotify_init 253
569__SYSCALL(__NR_inotify_init, sys_inotify_init)
570#define __NR_inotify_add_watch 254
571__SYSCALL(__NR_inotify_add_watch, sys_inotify_add_watch)
572#define __NR_inotify_rm_watch 255
573__SYSCALL(__NR_inotify_rm_watch, sys_inotify_rm_watch)
574#define __NR_migrate_pages 256
575__SYSCALL(__NR_migrate_pages, sys_migrate_pages)
576#define __NR_openat 257
577__SYSCALL(__NR_openat, sys_openat)
578#define __NR_mkdirat 258
579__SYSCALL(__NR_mkdirat, sys_mkdirat)
580#define __NR_mknodat 259
581__SYSCALL(__NR_mknodat, sys_mknodat)
582#define __NR_fchownat 260
583__SYSCALL(__NR_fchownat, sys_fchownat)
584#define __NR_futimesat 261
585__SYSCALL(__NR_futimesat, sys_futimesat)
586#define __NR_newfstatat 262
587__SYSCALL(__NR_newfstatat, sys_newfstatat)
588#define __NR_unlinkat 263
589__SYSCALL(__NR_unlinkat, sys_unlinkat)
590#define __NR_renameat 264
591__SYSCALL(__NR_renameat, sys_renameat)
592#define __NR_linkat 265
593__SYSCALL(__NR_linkat, sys_linkat)
594#define __NR_symlinkat 266
595__SYSCALL(__NR_symlinkat, sys_symlinkat)
596#define __NR_readlinkat 267
597__SYSCALL(__NR_readlinkat, sys_readlinkat)
598#define __NR_fchmodat 268
599__SYSCALL(__NR_fchmodat, sys_fchmodat)
600#define __NR_faccessat 269
601__SYSCALL(__NR_faccessat, sys_faccessat)
602#define __NR_pselect6 270
603__SYSCALL(__NR_pselect6, sys_pselect6)
604#define __NR_ppoll 271
605__SYSCALL(__NR_ppoll, sys_ppoll)
606#define __NR_unshare 272
607__SYSCALL(__NR_unshare, sys_unshare)
608#define __NR_set_robust_list 273
609__SYSCALL(__NR_set_robust_list, sys_set_robust_list)
610#define __NR_get_robust_list 274
611__SYSCALL(__NR_get_robust_list, sys_get_robust_list)
612#define __NR_splice 275
613__SYSCALL(__NR_splice, sys_splice)
614#define __NR_tee 276
615__SYSCALL(__NR_tee, sys_tee)
616#define __NR_sync_file_range 277
617__SYSCALL(__NR_sync_file_range, sys_sync_file_range)
618#define __NR_vmsplice 278
619__SYSCALL(__NR_vmsplice, sys_vmsplice)
620#define __NR_move_pages 279
621__SYSCALL(__NR_move_pages, sys_move_pages)
622#define __NR_utimensat 280
623__SYSCALL(__NR_utimensat, sys_utimensat)
624#define __IGNORE_getcpu /* implemented as a vsyscall */
625#define __NR_epoll_pwait 281
626__SYSCALL(__NR_epoll_pwait, sys_epoll_pwait)
627#define __NR_signalfd 282
628__SYSCALL(__NR_signalfd, sys_signalfd)
629#define __NR_timerfd 283
630__SYSCALL(__NR_timerfd, sys_timerfd)
631#define __NR_eventfd 284
632__SYSCALL(__NR_eventfd, sys_eventfd)
633#define __NR_fallocate 285
634__SYSCALL(__NR_fallocate, sys_fallocate)
635
636#ifndef __NO_STUBS
637#define __ARCH_WANT_OLD_READDIR
638#define __ARCH_WANT_OLD_STAT
639#define __ARCH_WANT_SYS_ALARM
640#define __ARCH_WANT_SYS_GETHOSTNAME
641#define __ARCH_WANT_SYS_PAUSE
642#define __ARCH_WANT_SYS_SGETMASK
643#define __ARCH_WANT_SYS_SIGNAL
644#define __ARCH_WANT_SYS_UTIME
645#define __ARCH_WANT_SYS_WAITPID
646#define __ARCH_WANT_SYS_SOCKETCALL
647#define __ARCH_WANT_SYS_FADVISE64
648#define __ARCH_WANT_SYS_GETPGRP
649#define __ARCH_WANT_SYS_LLSEEK
650#define __ARCH_WANT_SYS_NICE
651#define __ARCH_WANT_SYS_OLD_GETRLIMIT
652#define __ARCH_WANT_SYS_OLDUMOUNT
653#define __ARCH_WANT_SYS_SIGPENDING
654#define __ARCH_WANT_SYS_SIGPROCMASK
655#define __ARCH_WANT_SYS_RT_SIGACTION
656#define __ARCH_WANT_SYS_RT_SIGSUSPEND
657#define __ARCH_WANT_SYS_TIME
658#define __ARCH_WANT_COMPAT_SYS_TIME
659
660#ifdef __KERNEL__
661#ifndef __ASSEMBLY__
662
663#include <linux/linkage.h>
664#include <linux/compiler.h>
665#include <linux/types.h>
666#include <asm/ptrace.h>
667
668asmlinkage long sys_iopl(unsigned int level, struct pt_regs *regs);
669struct sigaction;
670asmlinkage long sys_rt_sigaction(int sig,
671 const struct sigaction __user *act,
672 struct sigaction __user *oact,
673 size_t sigsetsize);
674
675#endif /* __ASSEMBLY__ */
676#endif /* __KERNEL__ */
677#endif /* __NO_STUBS */
678
679/*
680 * "Conditional" syscalls
681 *
682 * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
683 * but it doesn't work on all toolchains, so we just do it by hand
684 */
685#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
686
687#endif /* _ASM_X86_64_UNISTD_H_ */
diff --git a/include/asm-x86_64/unwind.h b/include/asm-x86_64/unwind.h
deleted file mode 100644
index 02710f6a4560..000000000000
--- a/include/asm-x86_64/unwind.h
+++ /dev/null
@@ -1,12 +0,0 @@
1#ifndef _ASM_X86_64_UNWIND_H
2#define _ASM_X86_64_UNWIND_H
3
4#define UNW_PC(frame) ((void)(frame), 0UL)
5#define UNW_SP(frame) ((void)(frame), 0UL)
6
7static inline int arch_unw_user_mode(const void *info)
8{
9 return 0;
10}
11
12#endif /* _ASM_X86_64_UNWIND_H */
diff --git a/include/asm-x86_64/user.h b/include/asm-x86_64/user.h
deleted file mode 100644
index 12785c649ac5..000000000000
--- a/include/asm-x86_64/user.h
+++ /dev/null
@@ -1,114 +0,0 @@
1#ifndef _X86_64_USER_H
2#define _X86_64_USER_H
3
4#include <asm/types.h>
5#include <asm/page.h>
6/* Core file format: The core file is written in such a way that gdb
7 can understand it and provide useful information to the user.
8 There are quite a number of obstacles to being able to view the
9 contents of the floating point registers, and until these are
10 solved you will not be able to view the contents of them.
11 Actually, you can read in the core file and look at the contents of
12 the user struct to find out what the floating point registers
13 contain.
14
15 The actual file contents are as follows:
16 UPAGE: 1 page consisting of a user struct that tells gdb what is present
17 in the file. Directly after this is a copy of the task_struct, which
18 is currently not used by gdb, but it may come in useful at some point.
19 All of the registers are stored as part of the upage. The upage should
20 always be only one page.
21 DATA: The data area is stored. We use current->end_text to
22 current->brk to pick up all of the user variables, plus any memory
23 that may have been malloced. No attempt is made to determine if a page
24 is demand-zero or if a page is totally unused, we just cover the entire
25 range. All of the addresses are rounded in such a way that an integral
26 number of pages is written.
27 STACK: We need the stack information in order to get a meaningful
28 backtrace. We need to write the data from (esp) to
29 current->start_stack, so we round each of these off in order to be able
30 to write an integer number of pages.
31 The minimum core file size is 3 pages, or 12288 bytes. */
32
33/*
34 * Pentium III FXSR, SSE support
35 * Gareth Hughes <gareth@valinux.com>, May 2000
36 *
37 * Provide support for the GDB 5.0+ PTRACE_{GET|SET}FPXREGS requests for
38 * interacting with the FXSR-format floating point environment. Floating
39 * point data can be accessed in the regular format in the usual manner,
40 * and both the standard and SIMD floating point data can be accessed via
41 * the new ptrace requests. In either case, changes to the FPU environment
42 * will be reflected in the task's state as expected.
43 *
44 * x86-64 support by Andi Kleen.
45 */
46
47/* This matches the 64bit FXSAVE format as defined by AMD. It is the same
48 as the 32bit format defined by Intel, except that the selector:offset pairs for
49 data and eip are replaced with flat 64bit pointers. */
50struct user_i387_struct {
51 unsigned short cwd;
52 unsigned short swd;
53 unsigned short twd; /* Note this is not the same as the 32bit/x87/FSAVE twd */
54 unsigned short fop;
55 __u64 rip;
56 __u64 rdp;
57 __u32 mxcsr;
58 __u32 mxcsr_mask;
59 __u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
60 __u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */
61 __u32 padding[24];
62};
63
64/*
65 * Segment register layout in coredumps.
66 */
67struct user_regs_struct {
68 unsigned long r15,r14,r13,r12,rbp,rbx,r11,r10;
69 unsigned long r9,r8,rax,rcx,rdx,rsi,rdi,orig_rax;
70 unsigned long rip,cs,eflags;
71 unsigned long rsp,ss;
72 unsigned long fs_base, gs_base;
73 unsigned long ds,es,fs,gs;
74};
75
76/* When the kernel dumps core, it starts by dumping the user struct -
77 this will be used by gdb to figure out where the data and stack segments
78 are within the file, and what virtual addresses to use. */
79struct user{
80/* We start with the registers, to mimic the way that "memory" is returned
81 from the ptrace(3,...) function. */
82 struct user_regs_struct regs; /* Where the registers are actually stored */
83/* ptrace does not yet supply these. Someday.... */
84 int u_fpvalid; /* True if math co-processor being used. */
85 /* for this mess. Not yet used. */
86 int pad0;
87 struct user_i387_struct i387; /* Math Co-processor registers. */
88/* The rest of this junk is to help gdb figure out what goes where */
89 unsigned long int u_tsize; /* Text segment size (pages). */
90 unsigned long int u_dsize; /* Data segment size (pages). */
91 unsigned long int u_ssize; /* Stack segment size (pages). */
92 unsigned long start_code; /* Starting virtual address of text. */
93 unsigned long start_stack; /* Starting virtual address of stack area.
94 This is actually the bottom of the stack,
95 the top of the stack is always found in the
96 esp register. */
97 long int signal; /* Signal that caused the core dump. */
98 int reserved; /* No longer used */
99 int pad1;
100 struct user_pt_regs * u_ar0; /* Used by gdb to help find the values for */
101 /* the registers. */
102 struct user_i387_struct* u_fpstate; /* Math Co-processor pointer. */
103 unsigned long magic; /* To uniquely identify a core file */
104 char u_comm[32]; /* User command that was responsible */
105 unsigned long u_debugreg[8];
106 unsigned long error_code; /* CPU error code or 0 */
107 unsigned long fault_address; /* CR3 or 0 */
108};
109#define NBPG PAGE_SIZE
110#define UPAGES 1
111#define HOST_TEXT_START_ADDR (u.start_code)
112#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)
113
114#endif /* _X86_64_USER_H */
diff --git a/include/asm-x86_64/user32.h b/include/asm-x86_64/user32.h
deleted file mode 100644
index f769872debea..000000000000
--- a/include/asm-x86_64/user32.h
+++ /dev/null
@@ -1,69 +0,0 @@
1#ifndef USER32_H
2#define USER32_H 1
3
4/* IA32 compatible user structures for ptrace. These should be used for 32bit coredumps too. */
5
6struct user_i387_ia32_struct {
7 u32 cwd;
8 u32 swd;
9 u32 twd;
10 u32 fip;
11 u32 fcs;
12 u32 foo;
13 u32 fos;
14 u32 st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
15};
16
17/* FSAVE frame with extensions */
18struct user32_fxsr_struct {
19 unsigned short cwd;
20 unsigned short swd;
21 unsigned short twd; /* not compatible to 64bit twd */
22 unsigned short fop;
23 int fip;
24 int fcs;
25 int foo;
26 int fos;
27 int mxcsr;
28 int reserved;
29 int st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
30 int xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
31 int padding[56];
32};
33
34struct user_regs_struct32 {
35 __u32 ebx, ecx, edx, esi, edi, ebp, eax;
36 unsigned short ds, __ds, es, __es;
37 unsigned short fs, __fs, gs, __gs;
38 __u32 orig_eax, eip;
39 unsigned short cs, __cs;
40 __u32 eflags, esp;
41 unsigned short ss, __ss;
42};
43
44struct user32 {
45 struct user_regs_struct32 regs; /* Where the registers are actually stored */
46 int u_fpvalid; /* True if math co-processor being used. */
47 /* for this mess. Not yet used. */
48 struct user_i387_ia32_struct i387; /* Math Co-processor registers. */
49/* The rest of this junk is to help gdb figure out what goes where */
50 __u32 u_tsize; /* Text segment size (pages). */
51 __u32 u_dsize; /* Data segment size (pages). */
52 __u32 u_ssize; /* Stack segment size (pages). */
53 __u32 start_code; /* Starting virtual address of text. */
54 __u32 start_stack; /* Starting virtual address of stack area.
55 This is actually the bottom of the stack,
56 the top of the stack is always found in the
57 esp register. */
58 __u32 signal; /* Signal that caused the core dump. */
59 int reserved; /* No __u32er used */
60 __u32 u_ar0; /* Used by gdb to help find the values for */
61 /* the registers. */
62 __u32 u_fpstate; /* Math Co-processor pointer. */
63 __u32 magic; /* To uniquely identify a core file */
64 char u_comm[32]; /* User command that was responsible */
65 int u_debugreg[8];
66};
67
68
69#endif
diff --git a/include/asm-x86_64/vga.h b/include/asm-x86_64/vga.h
deleted file mode 100644
index 0ecf68ac03aa..000000000000
--- a/include/asm-x86_64/vga.h
+++ /dev/null
@@ -1,20 +0,0 @@
1/*
2 * Access to VGA videoram
3 *
4 * (c) 1998 Martin Mares <mj@ucw.cz>
5 */
6
7#ifndef _LINUX_ASM_VGA_H_
8#define _LINUX_ASM_VGA_H_
9
10/*
11 * On the PC, we can just recalculate addresses and then
12 * access the videoram directly without any black magic.
13 */
14
15#define VGA_MAP_MEM(x,s) (unsigned long)phys_to_virt(x)
16
17#define vga_readb(x) (*(x))
18#define vga_writeb(x,y) (*(y) = (x))
19
20#endif
diff --git a/include/asm-x86_64/vgtod.h b/include/asm-x86_64/vgtod.h
deleted file mode 100644
index 3301f0929342..000000000000
--- a/include/asm-x86_64/vgtod.h
+++ /dev/null
@@ -1,29 +0,0 @@
1#ifndef _ASM_VGTOD_H
2#define _ASM_VGTOD_H 1
3
4#include <asm/vsyscall.h>
5#include <linux/clocksource.h>
6
7struct vsyscall_gtod_data {
8 seqlock_t lock;
9
10 /* open coded 'struct timespec' */
11 time_t wall_time_sec;
12 u32 wall_time_nsec;
13
14 int sysctl_enabled;
15 struct timezone sys_tz;
16 struct { /* extract of a clocksource struct */
17 cycle_t (*vread)(void);
18 cycle_t cycle_last;
19 cycle_t mask;
20 u32 mult;
21 u32 shift;
22 } clock;
23 struct timespec wall_to_monotonic;
24};
25extern struct vsyscall_gtod_data __vsyscall_gtod_data
26__section_vsyscall_gtod_data;
27extern struct vsyscall_gtod_data vsyscall_gtod_data;
28
29#endif
diff --git a/include/asm-x86_64/vsyscall.h b/include/asm-x86_64/vsyscall.h
deleted file mode 100644
index 3b8ceb4af2cf..000000000000
--- a/include/asm-x86_64/vsyscall.h
+++ /dev/null
@@ -1,44 +0,0 @@
1#ifndef _ASM_X86_64_VSYSCALL_H_
2#define _ASM_X86_64_VSYSCALL_H_
3
4enum vsyscall_num {
5 __NR_vgettimeofday,
6 __NR_vtime,
7 __NR_vgetcpu,
8};
9
10#define VSYSCALL_START (-10UL << 20)
11#define VSYSCALL_SIZE 1024
12#define VSYSCALL_END (-2UL << 20)
13#define VSYSCALL_MAPPED_PAGES 1
14#define VSYSCALL_ADDR(vsyscall_nr) (VSYSCALL_START+VSYSCALL_SIZE*(vsyscall_nr))
15
16#ifdef __KERNEL__
17#include <linux/seqlock.h>
18
19#define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16)))
20#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
21
22/* Definitions for CONFIG_GENERIC_TIME definitions */
23#define __section_vsyscall_gtod_data __attribute__ \
24 ((unused, __section__ (".vsyscall_gtod_data"),aligned(16)))
25#define __section_vsyscall_clock __attribute__ \
26 ((unused, __section__ (".vsyscall_clock"),aligned(16)))
27#define __vsyscall_fn __attribute__ ((unused,__section__(".vsyscall_fn")))
28
29#define VGETCPU_RDTSCP 1
30#define VGETCPU_LSL 2
31
32#define hpet_readl(a) readl((const void __iomem *)fix_to_virt(FIX_HPET_BASE) + a)
33#define hpet_writel(d,a) writel(d, (void __iomem *)fix_to_virt(FIX_HPET_BASE) + a)
34
35extern int __vgetcpu_mode;
36extern volatile unsigned long __jiffies;
37
38/* kernel space (writeable) */
39extern int vgetcpu_mode;
40extern struct timezone sys_tz;
41
42#endif /* __KERNEL__ */
43
44#endif /* _ASM_X86_64_VSYSCALL_H_ */
diff --git a/include/asm-x86_64/vsyscall32.h b/include/asm-x86_64/vsyscall32.h
deleted file mode 100644
index c631c082f8f7..000000000000
--- a/include/asm-x86_64/vsyscall32.h
+++ /dev/null
@@ -1,20 +0,0 @@
1#ifndef _ASM_VSYSCALL32_H
2#define _ASM_VSYSCALL32_H 1
3
4/* Values need to match arch/x86_64/ia32/vsyscall.lds */
5
6#ifdef __ASSEMBLY__
7#define VSYSCALL32_BASE 0xffffe000
8#define VSYSCALL32_SYSEXIT (VSYSCALL32_BASE + 0x410)
9#else
10#define VSYSCALL32_BASE 0xffffe000UL
11#define VSYSCALL32_END (VSYSCALL32_BASE + PAGE_SIZE)
12#define VSYSCALL32_EHDR ((const struct elf32_hdr *) VSYSCALL32_BASE)
13
14#define VSYSCALL32_VSYSCALL ((void *)VSYSCALL32_BASE + 0x400)
15#define VSYSCALL32_SYSEXIT ((void *)VSYSCALL32_BASE + 0x410)
16#define VSYSCALL32_SIGRETURN ((void __user *)VSYSCALL32_BASE + 0x500)
17#define VSYSCALL32_RTSIGRETURN ((void __user *)VSYSCALL32_BASE + 0x600)
18#endif
19
20#endif
diff --git a/include/asm-x86_64/xor.h b/include/asm-x86_64/xor.h
deleted file mode 100644
index f942fcc21831..000000000000
--- a/include/asm-x86_64/xor.h
+++ /dev/null
@@ -1,354 +0,0 @@
1/*
2 * include/asm-x86_64/xor.h
3 *
4 * Optimized RAID-5 checksumming functions for MMX and SSE.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2, or (at your option)
9 * any later version.
10 *
11 * You should have received a copy of the GNU General Public License
12 * (for example /usr/src/linux/COPYING); if not, write to the Free
13 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
14 */
15
16
17/*
18 * Cache avoiding checksumming functions utilizing KNI instructions
19 * Copyright (C) 1999 Zach Brown (with obvious credit due Ingo)
20 */
21
22/*
23 * Based on
24 * High-speed RAID5 checksumming functions utilizing SSE instructions.
25 * Copyright (C) 1998 Ingo Molnar.
26 */
27
28/*
29 * x86-64 changes / gcc fixes from Andi Kleen.
30 * Copyright 2002 Andi Kleen, SuSE Labs.
31 *
32 * This hasn't been optimized for the hammer yet, but there are likely
33 * no advantages to be gotten from x86-64 here anyways.
34 */
35
36typedef struct { unsigned long a,b; } __attribute__((aligned(16))) xmm_store_t;
37
38/* Doesn't use gcc to save the XMM registers, because there is no easy way to
39 tell it to do a clts before the register saving. */
40#define XMMS_SAVE do { \
41 preempt_disable(); \
42 asm volatile ( \
43 "movq %%cr0,%0 ;\n\t" \
44 "clts ;\n\t" \
45 "movups %%xmm0,(%1) ;\n\t" \
46 "movups %%xmm1,0x10(%1) ;\n\t" \
47 "movups %%xmm2,0x20(%1) ;\n\t" \
48 "movups %%xmm3,0x30(%1) ;\n\t" \
49 : "=&r" (cr0) \
50 : "r" (xmm_save) \
51 : "memory"); \
52} while(0)
53
54#define XMMS_RESTORE do { \
55 asm volatile ( \
56 "sfence ;\n\t" \
57 "movups (%1),%%xmm0 ;\n\t" \
58 "movups 0x10(%1),%%xmm1 ;\n\t" \
59 "movups 0x20(%1),%%xmm2 ;\n\t" \
60 "movups 0x30(%1),%%xmm3 ;\n\t" \
61 "movq %0,%%cr0 ;\n\t" \
62 : \
63 : "r" (cr0), "r" (xmm_save) \
64 : "memory"); \
65 preempt_enable(); \
66} while(0)
67
68#define OFFS(x) "16*("#x")"
69#define PF_OFFS(x) "256+16*("#x")"
70#define PF0(x) " prefetchnta "PF_OFFS(x)"(%[p1]) ;\n"
71#define LD(x,y) " movaps "OFFS(x)"(%[p1]), %%xmm"#y" ;\n"
72#define ST(x,y) " movaps %%xmm"#y", "OFFS(x)"(%[p1]) ;\n"
73#define PF1(x) " prefetchnta "PF_OFFS(x)"(%[p2]) ;\n"
74#define PF2(x) " prefetchnta "PF_OFFS(x)"(%[p3]) ;\n"
75#define PF3(x) " prefetchnta "PF_OFFS(x)"(%[p4]) ;\n"
76#define PF4(x) " prefetchnta "PF_OFFS(x)"(%[p5]) ;\n"
77#define PF5(x) " prefetchnta "PF_OFFS(x)"(%[p6]) ;\n"
78#define XO1(x,y) " xorps "OFFS(x)"(%[p2]), %%xmm"#y" ;\n"
79#define XO2(x,y) " xorps "OFFS(x)"(%[p3]), %%xmm"#y" ;\n"
80#define XO3(x,y) " xorps "OFFS(x)"(%[p4]), %%xmm"#y" ;\n"
81#define XO4(x,y) " xorps "OFFS(x)"(%[p5]), %%xmm"#y" ;\n"
82#define XO5(x,y) " xorps "OFFS(x)"(%[p6]), %%xmm"#y" ;\n"
83
84
85static void
86xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
87{
88 unsigned int lines = bytes >> 8;
89 unsigned long cr0;
90 xmm_store_t xmm_save[4];
91
92 XMMS_SAVE;
93
94 asm volatile (
95#undef BLOCK
96#define BLOCK(i) \
97 LD(i,0) \
98 LD(i+1,1) \
99 PF1(i) \
100 PF1(i+2) \
101 LD(i+2,2) \
102 LD(i+3,3) \
103 PF0(i+4) \
104 PF0(i+6) \
105 XO1(i,0) \
106 XO1(i+1,1) \
107 XO1(i+2,2) \
108 XO1(i+3,3) \
109 ST(i,0) \
110 ST(i+1,1) \
111 ST(i+2,2) \
112 ST(i+3,3) \
113
114
115 PF0(0)
116 PF0(2)
117
118 " .align 32 ;\n"
119 " 1: ;\n"
120
121 BLOCK(0)
122 BLOCK(4)
123 BLOCK(8)
124 BLOCK(12)
125
126 " addq %[inc], %[p1] ;\n"
127 " addq %[inc], %[p2] ;\n"
128 " decl %[cnt] ; jnz 1b"
129 : [p1] "+r" (p1), [p2] "+r" (p2), [cnt] "+r" (lines)
130 : [inc] "r" (256UL)
131 : "memory");
132
133 XMMS_RESTORE;
134}
135
136static void
137xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
138 unsigned long *p3)
139{
140 unsigned int lines = bytes >> 8;
141 xmm_store_t xmm_save[4];
142 unsigned long cr0;
143
144 XMMS_SAVE;
145
146 __asm__ __volatile__ (
147#undef BLOCK
148#define BLOCK(i) \
149 PF1(i) \
150 PF1(i+2) \
151 LD(i,0) \
152 LD(i+1,1) \
153 LD(i+2,2) \
154 LD(i+3,3) \
155 PF2(i) \
156 PF2(i+2) \
157 PF0(i+4) \
158 PF0(i+6) \
159 XO1(i,0) \
160 XO1(i+1,1) \
161 XO1(i+2,2) \
162 XO1(i+3,3) \
163 XO2(i,0) \
164 XO2(i+1,1) \
165 XO2(i+2,2) \
166 XO2(i+3,3) \
167 ST(i,0) \
168 ST(i+1,1) \
169 ST(i+2,2) \
170 ST(i+3,3) \
171
172
173 PF0(0)
174 PF0(2)
175
176 " .align 32 ;\n"
177 " 1: ;\n"
178
179 BLOCK(0)
180 BLOCK(4)
181 BLOCK(8)
182 BLOCK(12)
183
184 " addq %[inc], %[p1] ;\n"
185 " addq %[inc], %[p2] ;\n"
186 " addq %[inc], %[p3] ;\n"
187 " decl %[cnt] ; jnz 1b"
188 : [cnt] "+r" (lines),
189 [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3)
190 : [inc] "r" (256UL)
191 : "memory");
192 XMMS_RESTORE;
193}
194
195static void
196xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
197 unsigned long *p3, unsigned long *p4)
198{
199 unsigned int lines = bytes >> 8;
200 xmm_store_t xmm_save[4];
201 unsigned long cr0;
202
203 XMMS_SAVE;
204
205 __asm__ __volatile__ (
206#undef BLOCK
207#define BLOCK(i) \
208 PF1(i) \
209 PF1(i+2) \
210 LD(i,0) \
211 LD(i+1,1) \
212 LD(i+2,2) \
213 LD(i+3,3) \
214 PF2(i) \
215 PF2(i+2) \
216 XO1(i,0) \
217 XO1(i+1,1) \
218 XO1(i+2,2) \
219 XO1(i+3,3) \
220 PF3(i) \
221 PF3(i+2) \
222 PF0(i+4) \
223 PF0(i+6) \
224 XO2(i,0) \
225 XO2(i+1,1) \
226 XO2(i+2,2) \
227 XO2(i+3,3) \
228 XO3(i,0) \
229 XO3(i+1,1) \
230 XO3(i+2,2) \
231 XO3(i+3,3) \
232 ST(i,0) \
233 ST(i+1,1) \
234 ST(i+2,2) \
235 ST(i+3,3) \
236
237
238 PF0(0)
239 PF0(2)
240
241 " .align 32 ;\n"
242 " 1: ;\n"
243
244 BLOCK(0)
245 BLOCK(4)
246 BLOCK(8)
247 BLOCK(12)
248
249 " addq %[inc], %[p1] ;\n"
250 " addq %[inc], %[p2] ;\n"
251 " addq %[inc], %[p3] ;\n"
252 " addq %[inc], %[p4] ;\n"
253 " decl %[cnt] ; jnz 1b"
254 : [cnt] "+c" (lines),
255 [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4)
256 : [inc] "r" (256UL)
257 : "memory" );
258
259 XMMS_RESTORE;
260}
261
262static void
263xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
264 unsigned long *p3, unsigned long *p4, unsigned long *p5)
265{
266 unsigned int lines = bytes >> 8;
267 xmm_store_t xmm_save[4];
268 unsigned long cr0;
269
270 XMMS_SAVE;
271
272 __asm__ __volatile__ (
273#undef BLOCK
274#define BLOCK(i) \
275 PF1(i) \
276 PF1(i+2) \
277 LD(i,0) \
278 LD(i+1,1) \
279 LD(i+2,2) \
280 LD(i+3,3) \
281 PF2(i) \
282 PF2(i+2) \
283 XO1(i,0) \
284 XO1(i+1,1) \
285 XO1(i+2,2) \
286 XO1(i+3,3) \
287 PF3(i) \
288 PF3(i+2) \
289 XO2(i,0) \
290 XO2(i+1,1) \
291 XO2(i+2,2) \
292 XO2(i+3,3) \
293 PF4(i) \
294 PF4(i+2) \
295 PF0(i+4) \
296 PF0(i+6) \
297 XO3(i,0) \
298 XO3(i+1,1) \
299 XO3(i+2,2) \
300 XO3(i+3,3) \
301 XO4(i,0) \
302 XO4(i+1,1) \
303 XO4(i+2,2) \
304 XO4(i+3,3) \
305 ST(i,0) \
306 ST(i+1,1) \
307 ST(i+2,2) \
308 ST(i+3,3) \
309
310
311 PF0(0)
312 PF0(2)
313
314 " .align 32 ;\n"
315 " 1: ;\n"
316
317 BLOCK(0)
318 BLOCK(4)
319 BLOCK(8)
320 BLOCK(12)
321
322 " addq %[inc], %[p1] ;\n"
323 " addq %[inc], %[p2] ;\n"
324 " addq %[inc], %[p3] ;\n"
325 " addq %[inc], %[p4] ;\n"
326 " addq %[inc], %[p5] ;\n"
327 " decl %[cnt] ; jnz 1b"
328 : [cnt] "+c" (lines),
329 [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4),
330 [p5] "+r" (p5)
331 : [inc] "r" (256UL)
332 : "memory");
333
334 XMMS_RESTORE;
335}
336
337static struct xor_block_template xor_block_sse = {
338 .name = "generic_sse",
339 .do_2 = xor_sse_2,
340 .do_3 = xor_sse_3,
341 .do_4 = xor_sse_4,
342 .do_5 = xor_sse_5,
343};
344
345#undef XOR_TRY_TEMPLATES
346#define XOR_TRY_TEMPLATES \
347 do { \
348 xor_speed(&xor_block_sse); \
349 } while (0)
350
351/* We force the use of the SSE xor block because it can write around L2.
352 We may also be able to load into the L1 only depending on how the cpu
353 deals with a load to a line that is being prefetched. */
354#define XOR_SELECT_TEMPLATE(FASTEST) (&xor_block_sse)