aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-x86_64
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /include/asm-x86_64
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'include/asm-x86_64')
-rw-r--r--include/asm-x86_64/8253pit.h10
-rw-r--r--include/asm-x86_64/a.out.h27
-rw-r--r--include/asm-x86_64/acpi.h176
-rw-r--r--include/asm-x86_64/agp.h22
-rw-r--r--include/asm-x86_64/apic.h118
-rw-r--r--include/asm-x86_64/apicdef.h391
-rw-r--r--include/asm-x86_64/atomic.h381
-rw-r--r--include/asm-x86_64/bitops.h418
-rw-r--r--include/asm-x86_64/boot.h15
-rw-r--r--include/asm-x86_64/bootsetup.h39
-rw-r--r--include/asm-x86_64/bug.h25
-rw-r--r--include/asm-x86_64/bugs.h29
-rw-r--r--include/asm-x86_64/byteorder.h33
-rw-r--r--include/asm-x86_64/cache.h14
-rw-r--r--include/asm-x86_64/cacheflush.h30
-rw-r--r--include/asm-x86_64/calling.h156
-rw-r--r--include/asm-x86_64/checksum.h193
-rw-r--r--include/asm-x86_64/compat.h205
-rw-r--r--include/asm-x86_64/cpu.h1
-rw-r--r--include/asm-x86_64/cpufeature.h104
-rw-r--r--include/asm-x86_64/cputime.h6
-rw-r--r--include/asm-x86_64/current.h27
-rw-r--r--include/asm-x86_64/debugreg.h65
-rw-r--r--include/asm-x86_64/delay.h27
-rw-r--r--include/asm-x86_64/desc.h217
-rw-r--r--include/asm-x86_64/div64.h1
-rw-r--r--include/asm-x86_64/dma-mapping.h138
-rw-r--r--include/asm-x86_64/dma.h298
-rw-r--r--include/asm-x86_64/dwarf2.h42
-rw-r--r--include/asm-x86_64/e820.h59
-rw-r--r--include/asm-x86_64/elf.h160
-rw-r--r--include/asm-x86_64/errno.h6
-rw-r--r--include/asm-x86_64/fcntl.h76
-rw-r--r--include/asm-x86_64/fixmap.h96
-rw-r--r--include/asm-x86_64/floppy.h285
-rw-r--r--include/asm-x86_64/fpu32.h10
-rw-r--r--include/asm-x86_64/genapic.h35
-rw-r--r--include/asm-x86_64/hardirq.h37
-rw-r--r--include/asm-x86_64/hdreg.h1
-rw-r--r--include/asm-x86_64/hpet.h60
-rw-r--r--include/asm-x86_64/hw_irq.h144
-rw-r--r--include/asm-x86_64/i387.h150
-rw-r--r--include/asm-x86_64/ia32.h172
-rw-r--r--include/asm-x86_64/ia32_unistd.h300
-rw-r--r--include/asm-x86_64/ide.h1
-rw-r--r--include/asm-x86_64/io.h341
-rw-r--r--include/asm-x86_64/io_apic.h221
-rw-r--r--include/asm-x86_64/ioctl.h75
-rw-r--r--include/asm-x86_64/ioctl32.h1
-rw-r--r--include/asm-x86_64/ioctls.h82
-rw-r--r--include/asm-x86_64/ipcbuf.h29
-rw-r--r--include/asm-x86_64/ipi.h113
-rw-r--r--include/asm-x86_64/irq.h55
-rw-r--r--include/asm-x86_64/kdebug.h53
-rw-r--r--include/asm-x86_64/kmap_types.h19
-rw-r--r--include/asm-x86_64/kprobes.h63
-rw-r--r--include/asm-x86_64/ldt.h36
-rw-r--r--include/asm-x86_64/linkage.h6
-rw-r--r--include/asm-x86_64/local.h73
-rw-r--r--include/asm-x86_64/mach_apic.h29
-rw-r--r--include/asm-x86_64/mc146818rtc.h29
-rw-r--r--include/asm-x86_64/mce.h80
-rw-r--r--include/asm-x86_64/mman.h44
-rw-r--r--include/asm-x86_64/mmsegment.h8
-rw-r--r--include/asm-x86_64/mmu.h20
-rw-r--r--include/asm-x86_64/mmu_context.h79
-rw-r--r--include/asm-x86_64/mmx.h14
-rw-r--r--include/asm-x86_64/mmzone.h63
-rw-r--r--include/asm-x86_64/module.h10
-rw-r--r--include/asm-x86_64/mpspec.h241
-rw-r--r--include/asm-x86_64/msgbuf.h27
-rw-r--r--include/asm-x86_64/msi.h18
-rw-r--r--include/asm-x86_64/msr.h387
-rw-r--r--include/asm-x86_64/mtrr.h108
-rw-r--r--include/asm-x86_64/namei.h11
-rw-r--r--include/asm-x86_64/nmi.h57
-rw-r--r--include/asm-x86_64/node.h1
-rw-r--r--include/asm-x86_64/numa.h21
-rw-r--r--include/asm-x86_64/numnodes.h12
-rw-r--r--include/asm-x86_64/page.h139
-rw-r--r--include/asm-x86_64/param.h22
-rw-r--r--include/asm-x86_64/parport.h18
-rw-r--r--include/asm-x86_64/pci-direct.h48
-rw-r--r--include/asm-x86_64/pci.h141
-rw-r--r--include/asm-x86_64/pda.h83
-rw-r--r--include/asm-x86_64/percpu.h52
-rw-r--r--include/asm-x86_64/pgalloc.h105
-rw-r--r--include/asm-x86_64/pgtable.h437
-rw-r--r--include/asm-x86_64/poll.h26
-rw-r--r--include/asm-x86_64/posix_types.h119
-rw-r--r--include/asm-x86_64/prctl.h10
-rw-r--r--include/asm-x86_64/processor.h462
-rw-r--r--include/asm-x86_64/proto.h118
-rw-r--r--include/asm-x86_64/ptrace.h114
-rw-r--r--include/asm-x86_64/resource.h6
-rw-r--r--include/asm-x86_64/rtc.h10
-rw-r--r--include/asm-x86_64/rwlock.h86
-rw-r--r--include/asm-x86_64/rwsem.h278
-rw-r--r--include/asm-x86_64/scatterlist.h22
-rw-r--r--include/asm-x86_64/seccomp.h24
-rw-r--r--include/asm-x86_64/sections.h7
-rw-r--r--include/asm-x86_64/segment.h46
-rw-r--r--include/asm-x86_64/semaphore.h196
-rw-r--r--include/asm-x86_64/sembuf.h25
-rw-r--r--include/asm-x86_64/serial.h130
-rw-r--r--include/asm-x86_64/setup.h6
-rw-r--r--include/asm-x86_64/shmbuf.h38
-rw-r--r--include/asm-x86_64/shmparam.h6
-rw-r--r--include/asm-x86_64/sigcontext.h55
-rw-r--r--include/asm-x86_64/sigcontext32.h71
-rw-r--r--include/asm-x86_64/siginfo.h10
-rw-r--r--include/asm-x86_64/signal.h213
-rw-r--r--include/asm-x86_64/smp.h149
-rw-r--r--include/asm-x86_64/socket.h50
-rw-r--r--include/asm-x86_64/sockios.h12
-rw-r--r--include/asm-x86_64/spinlock.h214
-rw-r--r--include/asm-x86_64/stat.h44
-rw-r--r--include/asm-x86_64/statfs.h58
-rw-r--r--include/asm-x86_64/string.h67
-rw-r--r--include/asm-x86_64/suspend.h58
-rw-r--r--include/asm-x86_64/swiotlb.h40
-rw-r--r--include/asm-x86_64/system.h343
-rw-r--r--include/asm-x86_64/termbits.h173
-rw-r--r--include/asm-x86_64/termios.h106
-rw-r--r--include/asm-x86_64/thread_info.h144
-rw-r--r--include/asm-x86_64/timex.h31
-rw-r--r--include/asm-x86_64/tlb.h13
-rw-r--r--include/asm-x86_64/tlbflush.h119
-rw-r--r--include/asm-x86_64/topology.h68
-rw-r--r--include/asm-x86_64/types.h60
-rw-r--r--include/asm-x86_64/uaccess.h365
-rw-r--r--include/asm-x86_64/ucontext.h12
-rw-r--r--include/asm-x86_64/unaligned.h37
-rw-r--r--include/asm-x86_64/unistd.h797
-rw-r--r--include/asm-x86_64/user.h114
-rw-r--r--include/asm-x86_64/user32.h69
-rw-r--r--include/asm-x86_64/vga.h20
-rw-r--r--include/asm-x86_64/vsyscall.h61
-rw-r--r--include/asm-x86_64/vsyscall32.h20
-rw-r--r--include/asm-x86_64/xor.h354
140 files changed, 13947 insertions, 0 deletions
diff --git a/include/asm-x86_64/8253pit.h b/include/asm-x86_64/8253pit.h
new file mode 100644
index 000000000000..285f78488ccb
--- /dev/null
+++ b/include/asm-x86_64/8253pit.h
@@ -0,0 +1,10 @@
1/*
2 * 8253/8254 Programmable Interval Timer
3 */
4
5#ifndef _8253PIT_H
6#define _8253PIT_H
7
8#define PIT_TICK_RATE 1193182UL
9
10#endif
diff --git a/include/asm-x86_64/a.out.h b/include/asm-x86_64/a.out.h
new file mode 100644
index 000000000000..5952914f4121
--- /dev/null
+++ b/include/asm-x86_64/a.out.h
@@ -0,0 +1,27 @@
1#ifndef __X8664_A_OUT_H__
2#define __X8664_A_OUT_H__
3
4/* 32bit a.out */
5
6struct exec
7{
8 unsigned int a_info; /* Use macros N_MAGIC, etc for access */
9 unsigned a_text; /* length of text, in bytes */
10 unsigned a_data; /* length of data, in bytes */
11 unsigned a_bss; /* length of uninitialized data area for file, in bytes */
12 unsigned a_syms; /* length of symbol table data in file, in bytes */
13 unsigned a_entry; /* start address */
14 unsigned a_trsize; /* length of relocation info for text, in bytes */
15 unsigned a_drsize; /* length of relocation info for data, in bytes */
16};
17
18#define N_TRSIZE(a) ((a).a_trsize)
19#define N_DRSIZE(a) ((a).a_drsize)
20#define N_SYMSIZE(a) ((a).a_syms)
21
22#ifdef __KERNEL__
23#include <linux/thread_info.h>
24#define STACK_TOP (test_thread_flag(TIF_IA32) ? IA32_PAGE_OFFSET : TASK_SIZE)
25#endif
26
27#endif /* __A_OUT_GNU_H__ */
diff --git a/include/asm-x86_64/acpi.h b/include/asm-x86_64/acpi.h
new file mode 100644
index 000000000000..a6b41b892062
--- /dev/null
+++ b/include/asm-x86_64/acpi.h
@@ -0,0 +1,176 @@
1/*
2 * asm-x86_64/acpi.h
3 *
4 * Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
5 * Copyright (C) 2001 Patrick Mochel <mochel@osdl.org>
6 *
7 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 *
23 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
24 */
25
26#ifndef _ASM_ACPI_H
27#define _ASM_ACPI_H
28
29#ifdef __KERNEL__
30
31#define COMPILER_DEPENDENT_INT64 long long
32#define COMPILER_DEPENDENT_UINT64 unsigned long long
33
34/*
35 * Calling conventions:
36 *
37 * ACPI_SYSTEM_XFACE - Interfaces to host OS (handlers, threads)
38 * ACPI_EXTERNAL_XFACE - External ACPI interfaces
39 * ACPI_INTERNAL_XFACE - Internal ACPI interfaces
40 * ACPI_INTERNAL_VAR_XFACE - Internal variable-parameter list interfaces
41 */
42#define ACPI_SYSTEM_XFACE
43#define ACPI_EXTERNAL_XFACE
44#define ACPI_INTERNAL_XFACE
45#define ACPI_INTERNAL_VAR_XFACE
46
47/* Asm macros */
48
49#define ACPI_ASM_MACROS
50#define BREAKPOINT3
51#define ACPI_DISABLE_IRQS() local_irq_disable()
52#define ACPI_ENABLE_IRQS() local_irq_enable()
53#define ACPI_FLUSH_CPU_CACHE() wbinvd()
54
55
56static inline int
57__acpi_acquire_global_lock (unsigned int *lock)
58{
59 unsigned int old, new, val;
60 do {
61 old = *lock;
62 new = (((old & ~0x3) + 2) + ((old >> 1) & 0x1));
63 val = cmpxchg(lock, old, new);
64 } while (unlikely (val != old));
65 return (new < 3) ? -1 : 0;
66}
67
68static inline int
69__acpi_release_global_lock (unsigned int *lock)
70{
71 unsigned int old, new, val;
72 do {
73 old = *lock;
74 new = old & ~0x3;
75 val = cmpxchg(lock, old, new);
76 } while (unlikely (val != old));
77 return old & 0x1;
78}
79
80#define ACPI_ACQUIRE_GLOBAL_LOCK(GLptr, Acq) \
81 ((Acq) = __acpi_acquire_global_lock((unsigned int *) GLptr))
82
83#define ACPI_RELEASE_GLOBAL_LOCK(GLptr, Acq) \
84 ((Acq) = __acpi_release_global_lock((unsigned int *) GLptr))
85
86/*
87 * Math helper asm macros
88 */
89#define ACPI_DIV_64_BY_32(n_hi, n_lo, d32, q32, r32) \
90 asm("divl %2;" \
91 :"=a"(q32), "=d"(r32) \
92 :"r"(d32), \
93 "0"(n_lo), "1"(n_hi))
94
95
96#define ACPI_SHIFT_RIGHT_64(n_hi, n_lo) \
97 asm("shrl $1,%2;" \
98 "rcrl $1,%3;" \
99 :"=r"(n_hi), "=r"(n_lo) \
100 :"0"(n_hi), "1"(n_lo))
101
102/*
103 * Refer Intel ACPI _PDC support document for bit definitions
104 */
105#define ACPI_PDC_EST_CAPABILITY_SMP 0xa
106#define ACPI_PDC_EST_CAPABILITY_MSR 0x1
107
108#ifdef CONFIG_ACPI_BOOT
109extern int acpi_lapic;
110extern int acpi_ioapic;
111extern int acpi_noirq;
112extern int acpi_strict;
113extern int acpi_disabled;
114extern int acpi_pci_disabled;
115extern int acpi_ht;
116static inline void disable_acpi(void)
117{
118 acpi_disabled = 1;
119 acpi_ht = 0;
120 acpi_pci_disabled = 1;
121 acpi_noirq = 1;
122}
123
124/* Fixmap pages to reserve for ACPI boot-time tables (see fixmap.h) */
125#define FIX_ACPI_PAGES 4
126
127extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq);
128
129#else /* !CONFIG_ACPI_BOOT */
130#define acpi_lapic 0
131#define acpi_ioapic 0
132#endif /* !CONFIG_ACPI_BOOT */
133
134extern int acpi_numa;
135extern int acpi_scan_nodes(unsigned long start, unsigned long end);
136#define NR_NODE_MEMBLKS (MAX_NUMNODES*2)
137
138#ifdef CONFIG_ACPI_PCI
139static inline void acpi_noirq_set(void) { acpi_noirq = 1; }
140static inline void acpi_disable_pci(void)
141{
142 acpi_pci_disabled = 1;
143 acpi_noirq_set();
144}
145extern int acpi_irq_balance_set(char *str);
146#else
147static inline void acpi_noirq_set(void) { }
148static inline void acpi_disable_pci(void) { }
149static inline int acpi_irq_balance_set(char *str) { return 0; }
150#endif
151
152#ifdef CONFIG_ACPI_SLEEP
153
154/* routines for saving/restoring kernel state */
155extern int acpi_save_state_mem(void);
156extern void acpi_restore_state_mem(void);
157
158extern unsigned long acpi_wakeup_address;
159
160/* early initialization routine */
161extern void acpi_reserve_bootmem(void);
162
163#endif /*CONFIG_ACPI_SLEEP*/
164
165#define boot_cpu_physical_apicid boot_cpu_id
166
167extern int acpi_disabled;
168extern int acpi_pci_disabled;
169
170extern u8 x86_acpiid_to_apicid[];
171
172extern int acpi_skip_timer_override;
173
174#endif /*__KERNEL__*/
175
176#endif /*_ASM_ACPI_H*/
diff --git a/include/asm-x86_64/agp.h b/include/asm-x86_64/agp.h
new file mode 100644
index 000000000000..0bb9019d58aa
--- /dev/null
+++ b/include/asm-x86_64/agp.h
@@ -0,0 +1,22 @@
1#ifndef AGP_H
2#define AGP_H 1
3
4#include <asm/cacheflush.h>
5
6/*
7 * Functions to keep the agpgart mappings coherent.
8 * The GART gives the CPU a physical alias of memory. The alias is
9 * mapped uncacheable. Make sure there are no conflicting mappings
10 * with different cachability attributes for the same page.
11 */
12
13int map_page_into_agp(struct page *page);
14int unmap_page_from_agp(struct page *page);
15#define flush_agp_mappings() global_flush_tlb()
16
17/* Could use CLFLUSH here if the cpu supports it. But then it would
18 need to be called for each cacheline of the whole page so it may not be
19 worth it. Would need a page for it. */
20#define flush_agp_cache() asm volatile("wbinvd":::"memory")
21
22#endif
diff --git a/include/asm-x86_64/apic.h b/include/asm-x86_64/apic.h
new file mode 100644
index 000000000000..c025cc3ef789
--- /dev/null
+++ b/include/asm-x86_64/apic.h
@@ -0,0 +1,118 @@
1#ifndef __ASM_APIC_H
2#define __ASM_APIC_H
3
4#include <linux/config.h>
5#include <linux/pm.h>
6#include <asm/fixmap.h>
7#include <asm/apicdef.h>
8#include <asm/system.h>
9
10#define Dprintk(x...)
11
12/*
13 * Debugging macros
14 */
15#define APIC_QUIET 0
16#define APIC_VERBOSE 1
17#define APIC_DEBUG 2
18
19extern int apic_verbosity;
20
21/*
22 * Define the default level of output to be very little
23 * This can be turned up by using apic=verbose for more
24 * information and apic=debug for _lots_ of information.
25 * apic_verbosity is defined in apic.c
26 */
27#define apic_printk(v, s, a...) do { \
28 if ((v) <= apic_verbosity) \
29 printk(s, ##a); \
30 } while (0)
31
32#ifdef CONFIG_X86_LOCAL_APIC
33
34struct pt_regs;
35
36/*
37 * Basic functions accessing APICs.
38 */
39
40static __inline void apic_write(unsigned long reg, unsigned int v)
41{
42 *((volatile unsigned int *)(APIC_BASE+reg)) = v;
43}
44
45static __inline void apic_write_atomic(unsigned long reg, unsigned int v)
46{
47 xchg((volatile unsigned int *)(APIC_BASE+reg), v);
48}
49
50static __inline unsigned int apic_read(unsigned long reg)
51{
52 return *((volatile unsigned int *)(APIC_BASE+reg));
53}
54
55static __inline__ void apic_wait_icr_idle(void)
56{
57 while ( apic_read( APIC_ICR ) & APIC_ICR_BUSY );
58}
59
60#define FORCE_READ_AROUND_WRITE 0
61#define apic_read_around(x)
62#define apic_write_around(x,y) apic_write((x),(y))
63
64static inline void ack_APIC_irq(void)
65{
66 /*
67 * ack_APIC_irq() actually gets compiled as a single instruction:
68 * - a single rmw on Pentium/82489DX
69 * - a single write on P6+ cores (CONFIG_X86_GOOD_APIC)
70 * ... yummie.
71 */
72
73 /* Docs say use 0 for future compatibility */
74 apic_write_around(APIC_EOI, 0);
75}
76
77extern int get_maxlvt (void);
78extern void clear_local_APIC (void);
79extern void connect_bsp_APIC (void);
80extern void disconnect_bsp_APIC (void);
81extern void disable_local_APIC (void);
82extern int verify_local_APIC (void);
83extern void cache_APIC_registers (void);
84extern void sync_Arb_IDs (void);
85extern void init_bsp_APIC (void);
86extern void setup_local_APIC (void);
87extern void init_apic_mappings (void);
88extern void smp_local_timer_interrupt (struct pt_regs * regs);
89extern void setup_boot_APIC_clock (void);
90extern void setup_secondary_APIC_clock (void);
91extern void setup_apic_nmi_watchdog (void);
92extern int reserve_lapic_nmi(void);
93extern void release_lapic_nmi(void);
94extern void disable_timer_nmi_watchdog(void);
95extern void enable_timer_nmi_watchdog(void);
96extern void nmi_watchdog_tick (struct pt_regs * regs, unsigned reason);
97extern int APIC_init_uniprocessor (void);
98extern void disable_APIC_timer(void);
99extern void enable_APIC_timer(void);
100extern void clustered_apic_check(void);
101
102extern int check_nmi_watchdog(void);
103extern void nmi_watchdog_default(void);
104extern int setup_nmi_watchdog(char *);
105
106extern unsigned int nmi_watchdog;
107#define NMI_DEFAULT -1
108#define NMI_NONE 0
109#define NMI_IO_APIC 1
110#define NMI_LOCAL_APIC 2
111#define NMI_INVALID 3
112
113#endif /* CONFIG_X86_LOCAL_APIC */
114
115#define esr_disable 0
116extern unsigned boot_cpu_id;
117
118#endif /* __ASM_APIC_H */
diff --git a/include/asm-x86_64/apicdef.h b/include/asm-x86_64/apicdef.h
new file mode 100644
index 000000000000..3d7627ffe67d
--- /dev/null
+++ b/include/asm-x86_64/apicdef.h
@@ -0,0 +1,391 @@
1#ifndef __ASM_APICDEF_H
2#define __ASM_APICDEF_H
3
4/*
5 * Constants for various Intel APICs. (local APIC, IOAPIC, etc.)
6 *
7 * Alan Cox <Alan.Cox@linux.org>, 1995.
8 * Ingo Molnar <mingo@redhat.com>, 1999, 2000
9 */
10
11#define APIC_DEFAULT_PHYS_BASE 0xfee00000
12
13#define APIC_ID 0x20
14#define APIC_ID_MASK (0xFFu<<24)
15#define GET_APIC_ID(x) (((x)>>24)&0xFFu)
16#define APIC_LVR 0x30
17#define APIC_LVR_MASK 0xFF00FF
18#define GET_APIC_VERSION(x) ((x)&0xFFu)
19#define GET_APIC_MAXLVT(x) (((x)>>16)&0xFFu)
20#define APIC_INTEGRATED(x) ((x)&0xF0u)
21#define APIC_TASKPRI 0x80
22#define APIC_TPRI_MASK 0xFFu
23#define APIC_ARBPRI 0x90
24#define APIC_ARBPRI_MASK 0xFFu
25#define APIC_PROCPRI 0xA0
26#define APIC_EOI 0xB0
27#define APIC_EIO_ACK 0x0 /* Write this to the EOI register */
28#define APIC_RRR 0xC0
29#define APIC_LDR 0xD0
30#define APIC_LDR_MASK (0xFFu<<24)
31#define GET_APIC_LOGICAL_ID(x) (((x)>>24)&0xFFu)
32#define SET_APIC_LOGICAL_ID(x) (((x)<<24))
33#define APIC_ALL_CPUS 0xFFu
34#define APIC_DFR 0xE0
35#define APIC_DFR_CLUSTER 0x0FFFFFFFul
36#define APIC_DFR_FLAT 0xFFFFFFFFul
37#define APIC_SPIV 0xF0
38#define APIC_SPIV_FOCUS_DISABLED (1<<9)
39#define APIC_SPIV_APIC_ENABLED (1<<8)
40#define APIC_ISR 0x100
41#define APIC_TMR 0x180
42#define APIC_IRR 0x200
43#define APIC_ESR 0x280
44#define APIC_ESR_SEND_CS 0x00001
45#define APIC_ESR_RECV_CS 0x00002
46#define APIC_ESR_SEND_ACC 0x00004
47#define APIC_ESR_RECV_ACC 0x00008
48#define APIC_ESR_SENDILL 0x00020
49#define APIC_ESR_RECVILL 0x00040
50#define APIC_ESR_ILLREGA 0x00080
51#define APIC_ICR 0x300
52#define APIC_DEST_SELF 0x40000
53#define APIC_DEST_ALLINC 0x80000
54#define APIC_DEST_ALLBUT 0xC0000
55#define APIC_ICR_RR_MASK 0x30000
56#define APIC_ICR_RR_INVALID 0x00000
57#define APIC_ICR_RR_INPROG 0x10000
58#define APIC_ICR_RR_VALID 0x20000
59#define APIC_INT_LEVELTRIG 0x08000
60#define APIC_INT_ASSERT 0x04000
61#define APIC_ICR_BUSY 0x01000
62#define APIC_DEST_LOGICAL 0x00800
63#define APIC_DEST_PHYSICAL 0x00000
64#define APIC_DM_FIXED 0x00000
65#define APIC_DM_LOWEST 0x00100
66#define APIC_DM_SMI 0x00200
67#define APIC_DM_REMRD 0x00300
68#define APIC_DM_NMI 0x00400
69#define APIC_DM_INIT 0x00500
70#define APIC_DM_STARTUP 0x00600
71#define APIC_DM_EXTINT 0x00700
72#define APIC_VECTOR_MASK 0x000FF
73#define APIC_ICR2 0x310
74#define GET_APIC_DEST_FIELD(x) (((x)>>24)&0xFF)
75#define SET_APIC_DEST_FIELD(x) ((x)<<24)
76#define APIC_LVTT 0x320
77#define APIC_LVTTHMR 0x330
78#define APIC_LVTPC 0x340
79#define APIC_LVT0 0x350
80#define APIC_LVT_TIMER_BASE_MASK (0x3<<18)
81#define GET_APIC_TIMER_BASE(x) (((x)>>18)&0x3)
82#define SET_APIC_TIMER_BASE(x) (((x)<<18))
83#define APIC_TIMER_BASE_CLKIN 0x0
84#define APIC_TIMER_BASE_TMBASE 0x1
85#define APIC_TIMER_BASE_DIV 0x2
86#define APIC_LVT_TIMER_PERIODIC (1<<17)
87#define APIC_LVT_MASKED (1<<16)
88#define APIC_LVT_LEVEL_TRIGGER (1<<15)
89#define APIC_LVT_REMOTE_IRR (1<<14)
90#define APIC_INPUT_POLARITY (1<<13)
91#define APIC_SEND_PENDING (1<<12)
92#define APIC_MODE_MASK 0x700
93#define GET_APIC_DELIVERY_MODE(x) (((x)>>8)&0x7)
94#define SET_APIC_DELIVERY_MODE(x,y) (((x)&~0x700)|((y)<<8))
95#define APIC_MODE_FIXED 0x0
96#define APIC_MODE_NMI 0x4
97#define APIC_MODE_EXINT 0x7
98#define APIC_LVT1 0x360
99#define APIC_LVTERR 0x370
100#define APIC_TMICT 0x380
101#define APIC_TMCCT 0x390
102#define APIC_TDCR 0x3E0
103#define APIC_TDR_DIV_TMBASE (1<<2)
104#define APIC_TDR_DIV_1 0xB
105#define APIC_TDR_DIV_2 0x0
106#define APIC_TDR_DIV_4 0x1
107#define APIC_TDR_DIV_8 0x2
108#define APIC_TDR_DIV_16 0x3
109#define APIC_TDR_DIV_32 0x8
110#define APIC_TDR_DIV_64 0x9
111#define APIC_TDR_DIV_128 0xA
112
113#define APIC_BASE (fix_to_virt(FIX_APIC_BASE))
114
115#define MAX_IO_APICS 32
116
117/*
118 * All x86-64 systems are xAPIC compatible.
119 * In the following, "apicid" is a physical APIC ID.
120 */
121#define XAPIC_DEST_CPUS_SHIFT 4
122#define XAPIC_DEST_CPUS_MASK ((1u << XAPIC_DEST_CPUS_SHIFT) - 1)
123#define XAPIC_DEST_CLUSTER_MASK (XAPIC_DEST_CPUS_MASK << XAPIC_DEST_CPUS_SHIFT)
124#define APIC_CLUSTER(apicid) ((apicid) & XAPIC_DEST_CLUSTER_MASK)
125#define APIC_CLUSTERID(apicid) (APIC_CLUSTER(apicid) >> XAPIC_DEST_CPUS_SHIFT)
126#define APIC_CPUID(apicid) ((apicid) & XAPIC_DEST_CPUS_MASK)
127#define NUM_APIC_CLUSTERS ((BAD_APICID + 1) >> XAPIC_DEST_CPUS_SHIFT)
128
129/*
130 * the local APIC register structure, memory mapped. Not terribly well
131 * tested, but we might eventually use this one in the future - the
132 * problem why we cannot use it right now is the P5 APIC, it has an
133 * errata which cannot take 8-bit reads and writes, only 32-bit ones ...
134 */
135#define u32 unsigned int
136
137#define lapic ((volatile struct local_apic *)APIC_BASE)
138
139struct local_apic {
140
141/*000*/ struct { u32 __reserved[4]; } __reserved_01;
142
143/*010*/ struct { u32 __reserved[4]; } __reserved_02;
144
145/*020*/ struct { /* APIC ID Register */
146 u32 __reserved_1 : 24,
147 phys_apic_id : 4,
148 __reserved_2 : 4;
149 u32 __reserved[3];
150 } id;
151
152/*030*/ const
153 struct { /* APIC Version Register */
154 u32 version : 8,
155 __reserved_1 : 8,
156 max_lvt : 8,
157 __reserved_2 : 8;
158 u32 __reserved[3];
159 } version;
160
161/*040*/ struct { u32 __reserved[4]; } __reserved_03;
162
163/*050*/ struct { u32 __reserved[4]; } __reserved_04;
164
165/*060*/ struct { u32 __reserved[4]; } __reserved_05;
166
167/*070*/ struct { u32 __reserved[4]; } __reserved_06;
168
169/*080*/ struct { /* Task Priority Register */
170 u32 priority : 8,
171 __reserved_1 : 24;
172 u32 __reserved_2[3];
173 } tpr;
174
175/*090*/ const
176 struct { /* Arbitration Priority Register */
177 u32 priority : 8,
178 __reserved_1 : 24;
179 u32 __reserved_2[3];
180 } apr;
181
182/*0A0*/ const
183 struct { /* Processor Priority Register */
184 u32 priority : 8,
185 __reserved_1 : 24;
186 u32 __reserved_2[3];
187 } ppr;
188
189/*0B0*/ struct { /* End Of Interrupt Register */
190 u32 eoi;
191 u32 __reserved[3];
192 } eoi;
193
194/*0C0*/ struct { u32 __reserved[4]; } __reserved_07;
195
196/*0D0*/ struct { /* Logical Destination Register */
197 u32 __reserved_1 : 24,
198 logical_dest : 8;
199 u32 __reserved_2[3];
200 } ldr;
201
202/*0E0*/ struct { /* Destination Format Register */
203 u32 __reserved_1 : 28,
204 model : 4;
205 u32 __reserved_2[3];
206 } dfr;
207
208/*0F0*/ struct { /* Spurious Interrupt Vector Register */
209 u32 spurious_vector : 8,
210 apic_enabled : 1,
211 focus_cpu : 1,
212 __reserved_2 : 22;
213 u32 __reserved_3[3];
214 } svr;
215
216/*100*/ struct { /* In Service Register */
217/*170*/ u32 bitfield;
218 u32 __reserved[3];
219 } isr [8];
220
221/*180*/ struct { /* Trigger Mode Register */
222/*1F0*/ u32 bitfield;
223 u32 __reserved[3];
224 } tmr [8];
225
226/*200*/ struct { /* Interrupt Request Register */
227/*270*/ u32 bitfield;
228 u32 __reserved[3];
229 } irr [8];
230
231/*280*/ union { /* Error Status Register */
232 struct {
233 u32 send_cs_error : 1,
234 receive_cs_error : 1,
235 send_accept_error : 1,
236 receive_accept_error : 1,
237 __reserved_1 : 1,
238 send_illegal_vector : 1,
239 receive_illegal_vector : 1,
240 illegal_register_address : 1,
241 __reserved_2 : 24;
242 u32 __reserved_3[3];
243 } error_bits;
244 struct {
245 u32 errors;
246 u32 __reserved_3[3];
247 } all_errors;
248 } esr;
249
250/*290*/ struct { u32 __reserved[4]; } __reserved_08;
251
252/*2A0*/ struct { u32 __reserved[4]; } __reserved_09;
253
254/*2B0*/ struct { u32 __reserved[4]; } __reserved_10;
255
256/*2C0*/ struct { u32 __reserved[4]; } __reserved_11;
257
258/*2D0*/ struct { u32 __reserved[4]; } __reserved_12;
259
260/*2E0*/ struct { u32 __reserved[4]; } __reserved_13;
261
262/*2F0*/ struct { u32 __reserved[4]; } __reserved_14;
263
264/*300*/ struct { /* Interrupt Command Register 1 */
265 u32 vector : 8,
266 delivery_mode : 3,
267 destination_mode : 1,
268 delivery_status : 1,
269 __reserved_1 : 1,
270 level : 1,
271 trigger : 1,
272 __reserved_2 : 2,
273 shorthand : 2,
274 __reserved_3 : 12;
275 u32 __reserved_4[3];
276 } icr1;
277
278/*310*/ struct { /* Interrupt Command Register 2 */
279 union {
280 u32 __reserved_1 : 24,
281 phys_dest : 4,
282 __reserved_2 : 4;
283 u32 __reserved_3 : 24,
284 logical_dest : 8;
285 } dest;
286 u32 __reserved_4[3];
287 } icr2;
288
289/*320*/ struct { /* LVT - Timer */
290 u32 vector : 8,
291 __reserved_1 : 4,
292 delivery_status : 1,
293 __reserved_2 : 3,
294 mask : 1,
295 timer_mode : 1,
296 __reserved_3 : 14;
297 u32 __reserved_4[3];
298 } lvt_timer;
299
300/*330*/ struct { /* LVT - Thermal Sensor */
301 u32 vector : 8,
302 delivery_mode : 3,
303 __reserved_1 : 1,
304 delivery_status : 1,
305 __reserved_2 : 3,
306 mask : 1,
307 __reserved_3 : 15;
308 u32 __reserved_4[3];
309 } lvt_thermal;
310
311/*340*/ struct { /* LVT - Performance Counter */
312 u32 vector : 8,
313 delivery_mode : 3,
314 __reserved_1 : 1,
315 delivery_status : 1,
316 __reserved_2 : 3,
317 mask : 1,
318 __reserved_3 : 15;
319 u32 __reserved_4[3];
320 } lvt_pc;
321
322/*350*/ struct { /* LVT - LINT0 */
323 u32 vector : 8,
324 delivery_mode : 3,
325 __reserved_1 : 1,
326 delivery_status : 1,
327 polarity : 1,
328 remote_irr : 1,
329 trigger : 1,
330 mask : 1,
331 __reserved_2 : 15;
332 u32 __reserved_3[3];
333 } lvt_lint0;
334
335/*360*/ struct { /* LVT - LINT1 */
336 u32 vector : 8,
337 delivery_mode : 3,
338 __reserved_1 : 1,
339 delivery_status : 1,
340 polarity : 1,
341 remote_irr : 1,
342 trigger : 1,
343 mask : 1,
344 __reserved_2 : 15;
345 u32 __reserved_3[3];
346 } lvt_lint1;
347
348/*370*/ struct { /* LVT - Error */
349 u32 vector : 8,
350 __reserved_1 : 4,
351 delivery_status : 1,
352 __reserved_2 : 3,
353 mask : 1,
354 __reserved_3 : 15;
355 u32 __reserved_4[3];
356 } lvt_error;
357
358/*380*/ struct { /* Timer Initial Count Register */
359 u32 initial_count;
360 u32 __reserved_2[3];
361 } timer_icr;
362
363/*390*/ const
364 struct { /* Timer Current Count Register */
365 u32 curr_count;
366 u32 __reserved_2[3];
367 } timer_ccr;
368
369/*3A0*/ struct { u32 __reserved[4]; } __reserved_16;
370
371/*3B0*/ struct { u32 __reserved[4]; } __reserved_17;
372
373/*3C0*/ struct { u32 __reserved[4]; } __reserved_18;
374
375/*3D0*/ struct { u32 __reserved[4]; } __reserved_19;
376
377/*3E0*/ struct { /* Timer Divide Configuration Register */
378 u32 divisor : 4,
379 __reserved_1 : 28;
380 u32 __reserved_2[3];
381 } timer_dcr;
382
383/*3F0*/ struct { u32 __reserved[4]; } __reserved_20;
384
385} __attribute__ ((packed));
386
387#undef u32
388
389#define BAD_APICID 0xFFu
390
391#endif
diff --git a/include/asm-x86_64/atomic.h b/include/asm-x86_64/atomic.h
new file mode 100644
index 000000000000..fc4c5956e1ea
--- /dev/null
+++ b/include/asm-x86_64/atomic.h
@@ -0,0 +1,381 @@
1#ifndef __ARCH_X86_64_ATOMIC__
2#define __ARCH_X86_64_ATOMIC__
3
4#include <linux/config.h>
5
6/* atomic_t should be 32 bit signed type */
7
8/*
9 * Atomic operations that C can't guarantee us. Useful for
10 * resource counting etc..
11 */
12
13#ifdef CONFIG_SMP
14#define LOCK "lock ; "
15#else
16#define LOCK ""
17#endif
18
19/*
20 * Make sure gcc doesn't try to be clever and move things around
21 * on us. We need to use _exactly_ the address the user gave us,
22 * not some alias that contains the same information.
23 */
24typedef struct { volatile int counter; } atomic_t;
25
26#define ATOMIC_INIT(i) { (i) }
27
28/**
29 * atomic_read - read atomic variable
30 * @v: pointer of type atomic_t
31 *
32 * Atomically reads the value of @v.
33 */
34#define atomic_read(v) ((v)->counter)
35
36/**
37 * atomic_set - set atomic variable
38 * @v: pointer of type atomic_t
39 * @i: required value
40 *
41 * Atomically sets the value of @v to @i.
42 */
43#define atomic_set(v,i) (((v)->counter) = (i))
44
45/**
46 * atomic_add - add integer to atomic variable
47 * @i: integer value to add
48 * @v: pointer of type atomic_t
49 *
50 * Atomically adds @i to @v.
51 */
52static __inline__ void atomic_add(int i, atomic_t *v)
53{
54 __asm__ __volatile__(
55 LOCK "addl %1,%0"
56 :"=m" (v->counter)
57 :"ir" (i), "m" (v->counter));
58}
59
60/**
61 * atomic_sub - subtract the atomic variable
62 * @i: integer value to subtract
63 * @v: pointer of type atomic_t
64 *
65 * Atomically subtracts @i from @v.
66 */
67static __inline__ void atomic_sub(int i, atomic_t *v)
68{
69 __asm__ __volatile__(
70 LOCK "subl %1,%0"
71 :"=m" (v->counter)
72 :"ir" (i), "m" (v->counter));
73}
74
75/**
76 * atomic_sub_and_test - subtract value from variable and test result
77 * @i: integer value to subtract
78 * @v: pointer of type atomic_t
79 *
80 * Atomically subtracts @i from @v and returns
81 * true if the result is zero, or false for all
82 * other cases.
83 */
84static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
85{
86 unsigned char c;
87
88 __asm__ __volatile__(
89 LOCK "subl %2,%0; sete %1"
90 :"=m" (v->counter), "=qm" (c)
91 :"ir" (i), "m" (v->counter) : "memory");
92 return c;
93}
94
95/**
96 * atomic_inc - increment atomic variable
97 * @v: pointer of type atomic_t
98 *
99 * Atomically increments @v by 1.
100 */
101static __inline__ void atomic_inc(atomic_t *v)
102{
103 __asm__ __volatile__(
104 LOCK "incl %0"
105 :"=m" (v->counter)
106 :"m" (v->counter));
107}
108
109/**
110 * atomic_dec - decrement atomic variable
111 * @v: pointer of type atomic_t
112 *
113 * Atomically decrements @v by 1.
114 */
115static __inline__ void atomic_dec(atomic_t *v)
116{
117 __asm__ __volatile__(
118 LOCK "decl %0"
119 :"=m" (v->counter)
120 :"m" (v->counter));
121}
122
123/**
124 * atomic_dec_and_test - decrement and test
125 * @v: pointer of type atomic_t
126 *
127 * Atomically decrements @v by 1 and
128 * returns true if the result is 0, or false for all other
129 * cases.
130 */
131static __inline__ int atomic_dec_and_test(atomic_t *v)
132{
133 unsigned char c;
134
135 __asm__ __volatile__(
136 LOCK "decl %0; sete %1"
137 :"=m" (v->counter), "=qm" (c)
138 :"m" (v->counter) : "memory");
139 return c != 0;
140}
141
142/**
143 * atomic_inc_and_test - increment and test
144 * @v: pointer of type atomic_t
145 *
146 * Atomically increments @v by 1
147 * and returns true if the result is zero, or false for all
148 * other cases.
149 */
150static __inline__ int atomic_inc_and_test(atomic_t *v)
151{
152 unsigned char c;
153
154 __asm__ __volatile__(
155 LOCK "incl %0; sete %1"
156 :"=m" (v->counter), "=qm" (c)
157 :"m" (v->counter) : "memory");
158 return c != 0;
159}
160
161/**
162 * atomic_add_negative - add and test if negative
163 * @v: pointer of type atomic_t
164 * @i: integer value to add
165 *
166 * Atomically adds @i to @v and returns true
167 * if the result is negative, or false when
168 * result is greater than or equal to zero.
169 */
170static __inline__ int atomic_add_negative(int i, atomic_t *v)
171{
172 unsigned char c;
173
174 __asm__ __volatile__(
175 LOCK "addl %2,%0; sets %1"
176 :"=m" (v->counter), "=qm" (c)
177 :"ir" (i), "m" (v->counter) : "memory");
178 return c;
179}
180
181/* An 64bit atomic type */
182
183typedef struct { volatile long counter; } atomic64_t;
184
185#define ATOMIC64_INIT(i) { (i) }
186
187/**
188 * atomic64_read - read atomic64 variable
189 * @v: pointer of type atomic64_t
190 *
191 * Atomically reads the value of @v.
192 * Doesn't imply a read memory barrier.
193 */
194#define atomic64_read(v) ((v)->counter)
195
196/**
197 * atomic64_set - set atomic64 variable
198 * @v: pointer to type atomic64_t
199 * @i: required value
200 *
201 * Atomically sets the value of @v to @i.
202 */
203#define atomic64_set(v,i) (((v)->counter) = (i))
204
205/**
206 * atomic64_add - add integer to atomic64 variable
207 * @i: integer value to add
208 * @v: pointer to type atomic64_t
209 *
210 * Atomically adds @i to @v.
211 */
212static __inline__ void atomic64_add(long i, atomic64_t *v)
213{
214 __asm__ __volatile__(
215 LOCK "addq %1,%0"
216 :"=m" (v->counter)
217 :"ir" (i), "m" (v->counter));
218}
219
220/**
221 * atomic64_sub - subtract the atomic64 variable
222 * @i: integer value to subtract
223 * @v: pointer to type atomic64_t
224 *
225 * Atomically subtracts @i from @v.
226 */
227static __inline__ void atomic64_sub(long i, atomic64_t *v)
228{
229 __asm__ __volatile__(
230 LOCK "subq %1,%0"
231 :"=m" (v->counter)
232 :"ir" (i), "m" (v->counter));
233}
234
235/**
236 * atomic64_sub_and_test - subtract value from variable and test result
237 * @i: integer value to subtract
238 * @v: pointer to type atomic64_t
239 *
240 * Atomically subtracts @i from @v and returns
241 * true if the result is zero, or false for all
242 * other cases.
243 */
244static __inline__ int atomic64_sub_and_test(long i, atomic64_t *v)
245{
246 unsigned char c;
247
248 __asm__ __volatile__(
249 LOCK "subq %2,%0; sete %1"
250 :"=m" (v->counter), "=qm" (c)
251 :"ir" (i), "m" (v->counter) : "memory");
252 return c;
253}
254
255/**
256 * atomic64_inc - increment atomic64 variable
257 * @v: pointer to type atomic64_t
258 *
259 * Atomically increments @v by 1.
260 */
261static __inline__ void atomic64_inc(atomic64_t *v)
262{
263 __asm__ __volatile__(
264 LOCK "incq %0"
265 :"=m" (v->counter)
266 :"m" (v->counter));
267}
268
269/**
270 * atomic64_dec - decrement atomic64 variable
271 * @v: pointer to type atomic64_t
272 *
273 * Atomically decrements @v by 1.
274 */
275static __inline__ void atomic64_dec(atomic64_t *v)
276{
277 __asm__ __volatile__(
278 LOCK "decq %0"
279 :"=m" (v->counter)
280 :"m" (v->counter));
281}
282
283/**
284 * atomic64_dec_and_test - decrement and test
285 * @v: pointer to type atomic64_t
286 *
287 * Atomically decrements @v by 1 and
288 * returns true if the result is 0, or false for all other
289 * cases.
290 */
291static __inline__ int atomic64_dec_and_test(atomic64_t *v)
292{
293 unsigned char c;
294
295 __asm__ __volatile__(
296 LOCK "decq %0; sete %1"
297 :"=m" (v->counter), "=qm" (c)
298 :"m" (v->counter) : "memory");
299 return c != 0;
300}
301
302/**
303 * atomic64_inc_and_test - increment and test
304 * @v: pointer to type atomic64_t
305 *
306 * Atomically increments @v by 1
307 * and returns true if the result is zero, or false for all
308 * other cases.
309 */
310static __inline__ int atomic64_inc_and_test(atomic64_t *v)
311{
312 unsigned char c;
313
314 __asm__ __volatile__(
315 LOCK "incq %0; sete %1"
316 :"=m" (v->counter), "=qm" (c)
317 :"m" (v->counter) : "memory");
318 return c != 0;
319}
320
321/**
322 * atomic64_add_negative - add and test if negative
323 * @v: pointer to atomic64_t
324 * @i: integer value to add
325 *
326 * Atomically adds @i to @v and returns true
327 * if the result is negative, or false when
328 * result is greater than or equal to zero.
329 */
330static __inline__ long atomic64_add_negative(long i, atomic64_t *v)
331{
332 unsigned char c;
333
334 __asm__ __volatile__(
335 LOCK "addq %2,%0; sets %1"
336 :"=m" (v->counter), "=qm" (c)
337 :"ir" (i), "m" (v->counter) : "memory");
338 return c;
339}
340
341/**
342 * atomic_add_return - add and return
343 * @v: pointer of type atomic_t
344 * @i: integer value to add
345 *
346 * Atomically adds @i to @v and returns @i + @v
347 */
348static __inline__ int atomic_add_return(int i, atomic_t *v)
349{
350 int __i = i;
351 __asm__ __volatile__(
352 LOCK "xaddl %0, %1;"
353 :"=r"(i)
354 :"m"(v->counter), "0"(i));
355 return i + __i;
356}
357
358static __inline__ int atomic_sub_return(int i, atomic_t *v)
359{
360 return atomic_add_return(-i,v);
361}
362
363#define atomic_inc_return(v) (atomic_add_return(1,v))
364#define atomic_dec_return(v) (atomic_sub_return(1,v))
365
366/* These are x86-specific, used by some header files */
367#define atomic_clear_mask(mask, addr) \
368__asm__ __volatile__(LOCK "andl %0,%1" \
369: : "r" (~(mask)),"m" (*addr) : "memory")
370
371#define atomic_set_mask(mask, addr) \
372__asm__ __volatile__(LOCK "orl %0,%1" \
373: : "r" ((unsigned)mask),"m" (*(addr)) : "memory")
374
375/* Atomic operations are already serializing on x86 */
376#define smp_mb__before_atomic_dec() barrier()
377#define smp_mb__after_atomic_dec() barrier()
378#define smp_mb__before_atomic_inc() barrier()
379#define smp_mb__after_atomic_inc() barrier()
380
381#endif
diff --git a/include/asm-x86_64/bitops.h b/include/asm-x86_64/bitops.h
new file mode 100644
index 000000000000..5dd7727c756b
--- /dev/null
+++ b/include/asm-x86_64/bitops.h
@@ -0,0 +1,418 @@
1#ifndef _X86_64_BITOPS_H
2#define _X86_64_BITOPS_H
3
4/*
5 * Copyright 1992, Linus Torvalds.
6 */
7
8#include <linux/config.h>
9
10#ifdef CONFIG_SMP
11#define LOCK_PREFIX "lock ; "
12#else
13#define LOCK_PREFIX ""
14#endif
15
16#define ADDR (*(volatile long *) addr)
17
18/**
19 * set_bit - Atomically set a bit in memory
20 * @nr: the bit to set
21 * @addr: the address to start counting from
22 *
23 * This function is atomic and may not be reordered. See __set_bit()
24 * if you do not require the atomic guarantees.
25 * Note that @nr may be almost arbitrarily large; this function is not
26 * restricted to acting on a single-word quantity.
27 */
28static __inline__ void set_bit(int nr, volatile void * addr)
29{
30 __asm__ __volatile__( LOCK_PREFIX
31 "btsl %1,%0"
32 :"=m" (ADDR)
33 :"dIr" (nr) : "memory");
34}
35
36/**
37 * __set_bit - Set a bit in memory
38 * @nr: the bit to set
39 * @addr: the address to start counting from
40 *
41 * Unlike set_bit(), this function is non-atomic and may be reordered.
42 * If it's called on the same region of memory simultaneously, the effect
43 * may be that only one operation succeeds.
44 */
45static __inline__ void __set_bit(int nr, volatile void * addr)
46{
47 __asm__ volatile(
48 "btsl %1,%0"
49 :"=m" (ADDR)
50 :"dIr" (nr) : "memory");
51}
52
53/**
54 * clear_bit - Clears a bit in memory
55 * @nr: Bit to clear
56 * @addr: Address to start counting from
57 *
58 * clear_bit() is atomic and may not be reordered. However, it does
59 * not contain a memory barrier, so if it is used for locking purposes,
60 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
61 * in order to ensure changes are visible on other processors.
62 */
63static __inline__ void clear_bit(int nr, volatile void * addr)
64{
65 __asm__ __volatile__( LOCK_PREFIX
66 "btrl %1,%0"
67 :"=m" (ADDR)
68 :"dIr" (nr));
69}
70
71static __inline__ void __clear_bit(int nr, volatile void * addr)
72{
73 __asm__ __volatile__(
74 "btrl %1,%0"
75 :"=m" (ADDR)
76 :"dIr" (nr));
77}
78
79#define smp_mb__before_clear_bit() barrier()
80#define smp_mb__after_clear_bit() barrier()
81
82/**
83 * __change_bit - Toggle a bit in memory
84 * @nr: the bit to change
85 * @addr: the address to start counting from
86 *
87 * Unlike change_bit(), this function is non-atomic and may be reordered.
88 * If it's called on the same region of memory simultaneously, the effect
89 * may be that only one operation succeeds.
90 */
91static __inline__ void __change_bit(int nr, volatile void * addr)
92{
93 __asm__ __volatile__(
94 "btcl %1,%0"
95 :"=m" (ADDR)
96 :"dIr" (nr));
97}
98
99/**
100 * change_bit - Toggle a bit in memory
101 * @nr: Bit to change
102 * @addr: Address to start counting from
103 *
104 * change_bit() is atomic and may not be reordered.
105 * Note that @nr may be almost arbitrarily large; this function is not
106 * restricted to acting on a single-word quantity.
107 */
108static __inline__ void change_bit(int nr, volatile void * addr)
109{
110 __asm__ __volatile__( LOCK_PREFIX
111 "btcl %1,%0"
112 :"=m" (ADDR)
113 :"dIr" (nr));
114}
115
116/**
117 * test_and_set_bit - Set a bit and return its old value
118 * @nr: Bit to set
119 * @addr: Address to count from
120 *
121 * This operation is atomic and cannot be reordered.
122 * It also implies a memory barrier.
123 */
124static __inline__ int test_and_set_bit(int nr, volatile void * addr)
125{
126 int oldbit;
127
128 __asm__ __volatile__( LOCK_PREFIX
129 "btsl %2,%1\n\tsbbl %0,%0"
130 :"=r" (oldbit),"=m" (ADDR)
131 :"dIr" (nr) : "memory");
132 return oldbit;
133}
134
135/**
136 * __test_and_set_bit - Set a bit and return its old value
137 * @nr: Bit to set
138 * @addr: Address to count from
139 *
140 * This operation is non-atomic and can be reordered.
141 * If two examples of this operation race, one can appear to succeed
142 * but actually fail. You must protect multiple accesses with a lock.
143 */
144static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
145{
146 int oldbit;
147
148 __asm__(
149 "btsl %2,%1\n\tsbbl %0,%0"
150 :"=r" (oldbit),"=m" (ADDR)
151 :"dIr" (nr));
152 return oldbit;
153}
154
155/**
156 * test_and_clear_bit - Clear a bit and return its old value
157 * @nr: Bit to clear
158 * @addr: Address to count from
159 *
160 * This operation is atomic and cannot be reordered.
161 * It also implies a memory barrier.
162 */
163static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
164{
165 int oldbit;
166
167 __asm__ __volatile__( LOCK_PREFIX
168 "btrl %2,%1\n\tsbbl %0,%0"
169 :"=r" (oldbit),"=m" (ADDR)
170 :"dIr" (nr) : "memory");
171 return oldbit;
172}
173
174/**
175 * __test_and_clear_bit - Clear a bit and return its old value
176 * @nr: Bit to clear
177 * @addr: Address to count from
178 *
179 * This operation is non-atomic and can be reordered.
180 * If two examples of this operation race, one can appear to succeed
181 * but actually fail. You must protect multiple accesses with a lock.
182 */
183static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
184{
185 int oldbit;
186
187 __asm__(
188 "btrl %2,%1\n\tsbbl %0,%0"
189 :"=r" (oldbit),"=m" (ADDR)
190 :"dIr" (nr));
191 return oldbit;
192}
193
194/* WARNING: non atomic and it can be reordered! */
195static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
196{
197 int oldbit;
198
199 __asm__ __volatile__(
200 "btcl %2,%1\n\tsbbl %0,%0"
201 :"=r" (oldbit),"=m" (ADDR)
202 :"dIr" (nr) : "memory");
203 return oldbit;
204}
205
206/**
207 * test_and_change_bit - Change a bit and return its old value
208 * @nr: Bit to change
209 * @addr: Address to count from
210 *
211 * This operation is atomic and cannot be reordered.
212 * It also implies a memory barrier.
213 */
214static __inline__ int test_and_change_bit(int nr, volatile void * addr)
215{
216 int oldbit;
217
218 __asm__ __volatile__( LOCK_PREFIX
219 "btcl %2,%1\n\tsbbl %0,%0"
220 :"=r" (oldbit),"=m" (ADDR)
221 :"dIr" (nr) : "memory");
222 return oldbit;
223}
224
225#if 0 /* Fool kernel-doc since it doesn't do macros yet */
226/**
227 * test_bit - Determine whether a bit is set
228 * @nr: bit number to test
229 * @addr: Address to start counting from
230 */
231static int test_bit(int nr, const volatile void * addr);
232#endif
233
234static __inline__ int constant_test_bit(int nr, const volatile void * addr)
235{
236 return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
237}
238
239static __inline__ int variable_test_bit(int nr, volatile const void * addr)
240{
241 int oldbit;
242
243 __asm__ __volatile__(
244 "btl %2,%1\n\tsbbl %0,%0"
245 :"=r" (oldbit)
246 :"m" (ADDR),"dIr" (nr));
247 return oldbit;
248}
249
250#define test_bit(nr,addr) \
251(__builtin_constant_p(nr) ? \
252 constant_test_bit((nr),(addr)) : \
253 variable_test_bit((nr),(addr)))
254
255#undef ADDR
256
257extern long find_first_zero_bit(const unsigned long * addr, unsigned long size);
258extern long find_next_zero_bit (const unsigned long * addr, long size, long offset);
259extern long find_first_bit(const unsigned long * addr, unsigned long size);
260extern long find_next_bit(const unsigned long * addr, long size, long offset);
261
262/* return index of first bet set in val or max when no bit is set */
263static inline unsigned long __scanbit(unsigned long val, unsigned long max)
264{
265 asm("bsfq %1,%0 ; cmovz %2,%0" : "=&r" (val) : "r" (val), "r" (max));
266 return val;
267}
268
269#define find_first_bit(addr,size) \
270((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
271 (__scanbit(*(unsigned long *)addr,(size))) : \
272 find_first_bit(addr,size)))
273
274#define find_next_bit(addr,size,off) \
275((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
276 ((off) + (__scanbit((*(unsigned long *)addr) >> (off),(size)-(off)))) : \
277 find_next_bit(addr,size,off)))
278
279#define find_first_zero_bit(addr,size) \
280((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
281 (__scanbit(~*(unsigned long *)addr,(size))) : \
282 find_first_zero_bit(addr,size)))
283
284#define find_next_zero_bit(addr,size,off) \
285((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
286 ((off)+(__scanbit(~(((*(unsigned long *)addr)) >> (off)),(size)-(off)))) : \
287 find_next_zero_bit(addr,size,off)))
288
289/*
290 * Find string of zero bits in a bitmap. -1 when not found.
291 */
292extern unsigned long
293find_next_zero_string(unsigned long *bitmap, long start, long nbits, int len);
294
295static inline void set_bit_string(unsigned long *bitmap, unsigned long i,
296 int len)
297{
298 unsigned long end = i + len;
299 while (i < end) {
300 __set_bit(i, bitmap);
301 i++;
302 }
303}
304
305static inline void __clear_bit_string(unsigned long *bitmap, unsigned long i,
306 int len)
307{
308 unsigned long end = i + len;
309 while (i < end) {
310 __clear_bit(i, bitmap);
311 i++;
312 }
313}
314
315/**
316 * ffz - find first zero in word.
317 * @word: The word to search
318 *
319 * Undefined if no zero exists, so code should check against ~0UL first.
320 */
321static __inline__ unsigned long ffz(unsigned long word)
322{
323 __asm__("bsfq %1,%0"
324 :"=r" (word)
325 :"r" (~word));
326 return word;
327}
328
329/**
330 * __ffs - find first bit in word.
331 * @word: The word to search
332 *
333 * Undefined if no bit exists, so code should check against 0 first.
334 */
335static __inline__ unsigned long __ffs(unsigned long word)
336{
337 __asm__("bsfq %1,%0"
338 :"=r" (word)
339 :"rm" (word));
340 return word;
341}
342
343#ifdef __KERNEL__
344
345static inline int sched_find_first_bit(const unsigned long *b)
346{
347 if (b[0])
348 return __ffs(b[0]);
349 if (b[1])
350 return __ffs(b[1]) + 64;
351 if (b[2])
352 return __ffs(b[2]) + 128;
353}
354
355/**
356 * ffs - find first bit set
357 * @x: the word to search
358 *
359 * This is defined the same way as
360 * the libc and compiler builtin ffs routines, therefore
361 * differs in spirit from the above ffz (man ffs).
362 */
363static __inline__ int ffs(int x)
364{
365 int r;
366
367 __asm__("bsfl %1,%0\n\t"
368 "cmovzl %2,%0"
369 : "=r" (r) : "rm" (x), "r" (-1));
370 return r+1;
371}
372
373/**
374 * hweightN - returns the hamming weight of a N-bit word
375 * @x: the word to weigh
376 *
377 * The Hamming Weight of a number is the total number of bits set in it.
378 */
379
380#define hweight64(x) generic_hweight64(x)
381#define hweight32(x) generic_hweight32(x)
382#define hweight16(x) generic_hweight16(x)
383#define hweight8(x) generic_hweight8(x)
384
385#endif /* __KERNEL__ */
386
387#ifdef __KERNEL__
388
389#define ext2_set_bit(nr,addr) \
390 __test_and_set_bit((nr),(unsigned long*)addr)
391#define ext2_set_bit_atomic(lock,nr,addr) \
392 test_and_set_bit((nr),(unsigned long*)addr)
393#define ext2_clear_bit(nr, addr) \
394 __test_and_clear_bit((nr),(unsigned long*)addr)
395#define ext2_clear_bit_atomic(lock,nr,addr) \
396 test_and_clear_bit((nr),(unsigned long*)addr)
397#define ext2_test_bit(nr, addr) test_bit((nr),(unsigned long*)addr)
398#define ext2_find_first_zero_bit(addr, size) \
399 find_first_zero_bit((unsigned long*)addr, size)
400#define ext2_find_next_zero_bit(addr, size, off) \
401 find_next_zero_bit((unsigned long*)addr, size, off)
402
403/* Bitmap functions for the minix filesystem. */
404#define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,(void*)addr)
405#define minix_set_bit(nr,addr) __set_bit(nr,(void*)addr)
406#define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,(void*)addr)
407#define minix_test_bit(nr,addr) test_bit(nr,(void*)addr)
408#define minix_find_first_zero_bit(addr,size) \
409 find_first_zero_bit((void*)addr,size)
410
411/* find last set bit */
412#define fls(x) generic_fls(x)
413
414#define ARCH_HAS_ATOMIC_UNSIGNED 1
415
416#endif /* __KERNEL__ */
417
418#endif /* _X86_64_BITOPS_H */
diff --git a/include/asm-x86_64/boot.h b/include/asm-x86_64/boot.h
new file mode 100644
index 000000000000..96b228e6e79c
--- /dev/null
+++ b/include/asm-x86_64/boot.h
@@ -0,0 +1,15 @@
1#ifndef _LINUX_BOOT_H
2#define _LINUX_BOOT_H
3
4/* Don't touch these, unless you really know what you're doing. */
5#define DEF_INITSEG 0x9000
6#define DEF_SYSSEG 0x1000
7#define DEF_SETUPSEG 0x9020
8#define DEF_SYSSIZE 0x7F00
9
10/* Internal svga startup constants */
11#define NORMAL_VGA 0xffff /* 80x25 mode */
12#define EXTENDED_VGA 0xfffe /* 80x50 mode */
13#define ASK_VGA 0xfffd /* ask for it at bootup */
14
15#endif
diff --git a/include/asm-x86_64/bootsetup.h b/include/asm-x86_64/bootsetup.h
new file mode 100644
index 000000000000..b570a484dc50
--- /dev/null
+++ b/include/asm-x86_64/bootsetup.h
@@ -0,0 +1,39 @@
1
2#ifndef _X86_64_BOOTSETUP_H
3#define _X86_64_BOOTSETUP_H 1
4
5extern char x86_boot_params[2048];
6
7/*
8 * This is set up by the setup-routine at boot-time
9 */
10#define PARAM ((unsigned char *)x86_boot_params)
11#define SCREEN_INFO (*(struct screen_info *) (PARAM+0))
12#define EXT_MEM_K (*(unsigned short *) (PARAM+2))
13#define ALT_MEM_K (*(unsigned int *) (PARAM+0x1e0))
14#define E820_MAP_NR (*(char*) (PARAM+E820NR))
15#define E820_MAP ((struct e820entry *) (PARAM+E820MAP))
16#define APM_BIOS_INFO (*(struct apm_bios_info *) (PARAM+0x40))
17#define DRIVE_INFO (*(struct drive_info_struct *) (PARAM+0x80))
18#define SYS_DESC_TABLE (*(struct sys_desc_table_struct*)(PARAM+0xa0))
19#define MOUNT_ROOT_RDONLY (*(unsigned short *) (PARAM+0x1F2))
20#define RAMDISK_FLAGS (*(unsigned short *) (PARAM+0x1F8))
21#define SAVED_VIDEO_MODE (*(unsigned short *) (PARAM+0x1FA))
22#define ORIG_ROOT_DEV (*(unsigned short *) (PARAM+0x1FC))
23#define AUX_DEVICE_INFO (*(unsigned char *) (PARAM+0x1FF))
24#define LOADER_TYPE (*(unsigned char *) (PARAM+0x210))
25#define KERNEL_START (*(unsigned int *) (PARAM+0x214))
26#define INITRD_START (*(unsigned int *) (PARAM+0x218))
27#define INITRD_SIZE (*(unsigned int *) (PARAM+0x21c))
28#define EDID_INFO (*(struct edid_info *) (PARAM+0x140))
29#define EDD_NR (*(unsigned char *) (PARAM+EDDNR))
30#define EDD_MBR_SIG_NR (*(unsigned char *) (PARAM+EDD_MBR_SIG_NR_BUF))
31#define EDD_MBR_SIGNATURE ((unsigned int *) (PARAM+EDD_MBR_SIG_BUF))
32#define EDD_BUF ((struct edd_info *) (PARAM+EDDBUF))
33#define COMMAND_LINE saved_command_line
34
35#define RAMDISK_IMAGE_START_MASK 0x07FF
36#define RAMDISK_PROMPT_FLAG 0x8000
37#define RAMDISK_LOAD_FLAG 0x4000
38
39#endif
diff --git a/include/asm-x86_64/bug.h b/include/asm-x86_64/bug.h
new file mode 100644
index 000000000000..d2032f56e87c
--- /dev/null
+++ b/include/asm-x86_64/bug.h
@@ -0,0 +1,25 @@
1#ifndef __ASM_X8664_BUG_H
2#define __ASM_X8664_BUG_H 1
3
4#include <linux/stringify.h>
5
6/*
7 * Tell the user there is some problem. The exception handler decodes
8 * this frame.
9 */
10struct bug_frame {
11 unsigned char ud2[2];
12 /* should use 32bit offset instead, but the assembler doesn't
13 like it */
14 char *filename;
15 unsigned short line;
16} __attribute__((packed));
17
18#define HAVE_ARCH_BUG
19#define BUG() \
20 asm volatile("ud2 ; .quad %c1 ; .short %c0" :: \
21 "i"(__LINE__), "i" (__stringify(KBUILD_BASENAME)))
22void out_of_line_bug(void);
23#include <asm-generic/bug.h>
24
25#endif
diff --git a/include/asm-x86_64/bugs.h b/include/asm-x86_64/bugs.h
new file mode 100644
index 000000000000..59bc68925d0f
--- /dev/null
+++ b/include/asm-x86_64/bugs.h
@@ -0,0 +1,29 @@
1/*
2 * include/asm-x86_64/bugs.h
3 *
4 * Copyright (C) 1994 Linus Torvalds
5 * Copyright (C) 2000 SuSE
6 *
7 * This is included by init/main.c to check for architecture-dependent bugs.
8 *
9 * Needs:
10 * void check_bugs(void);
11 */
12
13#include <linux/config.h>
14#include <asm/processor.h>
15#include <asm/i387.h>
16#include <asm/msr.h>
17#include <asm/pda.h>
18
19extern void alternative_instructions(void);
20
21static void __init check_bugs(void)
22{
23 identify_cpu(&boot_cpu_data);
24#if !defined(CONFIG_SMP)
25 printk("CPU: ");
26 print_cpu_info(&boot_cpu_data);
27#endif
28 alternative_instructions();
29}
diff --git a/include/asm-x86_64/byteorder.h b/include/asm-x86_64/byteorder.h
new file mode 100644
index 000000000000..5e86c868c75e
--- /dev/null
+++ b/include/asm-x86_64/byteorder.h
@@ -0,0 +1,33 @@
1#ifndef _X86_64_BYTEORDER_H
2#define _X86_64_BYTEORDER_H
3
4#include <asm/types.h>
5#include <linux/compiler.h>
6
7#ifdef __GNUC__
8
9static __inline__ __attribute_const__ __u64 ___arch__swab64(__u64 x)
10{
11 __asm__("bswapq %0" : "=r" (x) : "0" (x));
12 return x;
13}
14
15static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x)
16{
17 __asm__("bswapl %0" : "=r" (x) : "0" (x));
18 return x;
19}
20
21/* Do not define swab16. Gcc is smart enough to recognize "C" version and
22 convert it into rotation or exhange. */
23
24#define __arch__swab32(x) ___arch__swab32(x)
25#define __arch__swab64(x) ___arch__swab64(x)
26
27#endif /* __GNUC__ */
28
29#define __BYTEORDER_HAS_U64__
30
31#include <linux/byteorder/little_endian.h>
32
33#endif /* _X86_64_BYTEORDER_H */
diff --git a/include/asm-x86_64/cache.h b/include/asm-x86_64/cache.h
new file mode 100644
index 000000000000..eda62bae1240
--- /dev/null
+++ b/include/asm-x86_64/cache.h
@@ -0,0 +1,14 @@
1/*
2 * include/asm-x8664/cache.h
3 */
4#ifndef __ARCH_X8664_CACHE_H
5#define __ARCH_X8664_CACHE_H
6
7#include <linux/config.h>
8
9/* L1 cache line size */
10#define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
11#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12#define L1_CACHE_SHIFT_MAX 6 /* largest L1 which this arch supports */
13
14#endif
diff --git a/include/asm-x86_64/cacheflush.h b/include/asm-x86_64/cacheflush.h
new file mode 100644
index 000000000000..b3189fb229d1
--- /dev/null
+++ b/include/asm-x86_64/cacheflush.h
@@ -0,0 +1,30 @@
1#ifndef _X8664_CACHEFLUSH_H
2#define _X8664_CACHEFLUSH_H
3
4/* Keep includes the same across arches. */
5#include <linux/mm.h>
6
7/* Caches aren't brain-dead on the intel. */
8#define flush_cache_all() do { } while (0)
9#define flush_cache_mm(mm) do { } while (0)
10#define flush_cache_range(vma, start, end) do { } while (0)
11#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
12#define flush_dcache_page(page) do { } while (0)
13#define flush_dcache_mmap_lock(mapping) do { } while (0)
14#define flush_dcache_mmap_unlock(mapping) do { } while (0)
15#define flush_icache_range(start, end) do { } while (0)
16#define flush_icache_page(vma,pg) do { } while (0)
17#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
18#define flush_cache_vmap(start, end) do { } while (0)
19#define flush_cache_vunmap(start, end) do { } while (0)
20
21#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
22 memcpy(dst, src, len)
23#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
24 memcpy(dst, src, len)
25
26void global_flush_tlb(void);
27int change_page_attr(struct page *page, int numpages, pgprot_t prot);
28int change_page_attr_addr(unsigned long addr, int numpages, pgprot_t prot);
29
30#endif /* _X8664_CACHEFLUSH_H */
diff --git a/include/asm-x86_64/calling.h b/include/asm-x86_64/calling.h
new file mode 100644
index 000000000000..0bc12655fa5b
--- /dev/null
+++ b/include/asm-x86_64/calling.h
@@ -0,0 +1,156 @@
1/*
2 * Some macros to handle stack frames in assembly.
3 */
4
5#include <linux/config.h>
6
7#define R15 0
8#define R14 8
9#define R13 16
10#define R12 24
11#define RBP 32
12#define RBX 40
13/* arguments: interrupts/non tracing syscalls only save upto here*/
14#define R11 48
15#define R10 56
16#define R9 64
17#define R8 72
18#define RAX 80
19#define RCX 88
20#define RDX 96
21#define RSI 104
22#define RDI 112
23#define ORIG_RAX 120 /* + error_code */
24/* end of arguments */
25/* cpu exception frame or undefined in case of fast syscall. */
26#define RIP 128
27#define CS 136
28#define EFLAGS 144
29#define RSP 152
30#define SS 160
31#define ARGOFFSET R11
32#define SWFRAME ORIG_RAX
33
34 .macro SAVE_ARGS addskip=0,norcx=0,nor891011=0
35 subq $9*8+\addskip,%rsp
36 CFI_ADJUST_CFA_OFFSET 9*8+\addskip
37 movq %rdi,8*8(%rsp)
38 CFI_REL_OFFSET rdi,8*8
39 movq %rsi,7*8(%rsp)
40 CFI_REL_OFFSET rsi,7*8
41 movq %rdx,6*8(%rsp)
42 CFI_REL_OFFSET rdx,6*8
43 .if \norcx
44 .else
45 movq %rcx,5*8(%rsp)
46 CFI_REL_OFFSET rcx,5*8
47 .endif
48 movq %rax,4*8(%rsp)
49 CFI_REL_OFFSET rax,4*8
50 .if \nor891011
51 .else
52 movq %r8,3*8(%rsp)
53 CFI_REL_OFFSET r8,3*8
54 movq %r9,2*8(%rsp)
55 CFI_REL_OFFSET r9,2*8
56 movq %r10,1*8(%rsp)
57 CFI_REL_OFFSET r10,1*8
58 movq %r11,(%rsp)
59 CFI_REL_OFFSET r11,0*8
60 .endif
61 .endm
62
63#define ARG_SKIP 9*8
64 .macro RESTORE_ARGS skiprax=0,addskip=0,skiprcx=0,skipr11=0,skipr8910=0,skiprdx=0
65 .if \skipr11
66 .else
67 movq (%rsp),%r11
68 .endif
69 .if \skipr8910
70 .else
71 movq 1*8(%rsp),%r10
72 movq 2*8(%rsp),%r9
73 movq 3*8(%rsp),%r8
74 .endif
75 .if \skiprax
76 .else
77 movq 4*8(%rsp),%rax
78 .endif
79 .if \skiprcx
80 .else
81 movq 5*8(%rsp),%rcx
82 .endif
83 .if \skiprdx
84 .else
85 movq 6*8(%rsp),%rdx
86 .endif
87 movq 7*8(%rsp),%rsi
88 movq 8*8(%rsp),%rdi
89 .if ARG_SKIP+\addskip > 0
90 addq $ARG_SKIP+\addskip,%rsp
91 CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip)
92 .endif
93 .endm
94
95 .macro LOAD_ARGS offset
96 movq \offset(%rsp),%r11
97 movq \offset+8(%rsp),%r10
98 movq \offset+16(%rsp),%r9
99 movq \offset+24(%rsp),%r8
100 movq \offset+40(%rsp),%rcx
101 movq \offset+48(%rsp),%rdx
102 movq \offset+56(%rsp),%rsi
103 movq \offset+64(%rsp),%rdi
104 movq \offset+72(%rsp),%rax
105 .endm
106
107#define REST_SKIP 6*8
108 .macro SAVE_REST
109 subq $REST_SKIP,%rsp
110 CFI_ADJUST_CFA_OFFSET REST_SKIP
111 movq %rbx,5*8(%rsp)
112 CFI_REL_OFFSET rbx,5*8
113 movq %rbp,4*8(%rsp)
114 CFI_REL_OFFSET rbp,4*8
115 movq %r12,3*8(%rsp)
116 CFI_REL_OFFSET r12,3*8
117 movq %r13,2*8(%rsp)
118 CFI_REL_OFFSET r13,2*8
119 movq %r14,1*8(%rsp)
120 CFI_REL_OFFSET r14,1*8
121 movq %r15,(%rsp)
122 CFI_REL_OFFSET r15,0*8
123 .endm
124
125 .macro RESTORE_REST
126 movq (%rsp),%r15
127 movq 1*8(%rsp),%r14
128 movq 2*8(%rsp),%r13
129 movq 3*8(%rsp),%r12
130 movq 4*8(%rsp),%rbp
131 movq 5*8(%rsp),%rbx
132 addq $REST_SKIP,%rsp
133 CFI_ADJUST_CFA_OFFSET -(REST_SKIP)
134 .endm
135
136 .macro SAVE_ALL
137 SAVE_ARGS
138 SAVE_REST
139 .endm
140
141 .macro RESTORE_ALL addskip=0
142 RESTORE_REST
143 RESTORE_ARGS 0,\addskip
144 .endm
145
146 .macro icebp
147 .byte 0xf1
148 .endm
149
150#ifdef CONFIG_FRAME_POINTER
151#define ENTER enter
152#define LEAVE leave
153#else
154#define ENTER
155#define LEAVE
156#endif
diff --git a/include/asm-x86_64/checksum.h b/include/asm-x86_64/checksum.h
new file mode 100644
index 000000000000..d01356f01448
--- /dev/null
+++ b/include/asm-x86_64/checksum.h
@@ -0,0 +1,193 @@
1#ifndef _X86_64_CHECKSUM_H
2#define _X86_64_CHECKSUM_H
3
4/*
5 * Checksums for x86-64
6 * Copyright 2002 by Andi Kleen, SuSE Labs
7 * with some code from asm-i386/checksum.h
8 */
9
10#include <linux/compiler.h>
11#include <asm/uaccess.h>
12#include <asm/byteorder.h>
13
14/**
15 * csum_fold - Fold and invert a 32bit checksum.
16 * sum: 32bit unfolded sum
17 *
18 * Fold a 32bit running checksum to 16bit and invert it. This is usually
19 * the last step before putting a checksum into a packet.
20 * Make sure not to mix with 64bit checksums.
21 */
22static inline unsigned int csum_fold(unsigned int sum)
23{
24 __asm__(
25 " addl %1,%0\n"
26 " adcl $0xffff,%0"
27 : "=r" (sum)
28 : "r" (sum << 16), "0" (sum & 0xffff0000)
29 );
30 return (~sum) >> 16;
31}
32
33/*
34 * This is a version of ip_compute_csum() optimized for IP headers,
35 * which always checksum on 4 octet boundaries.
36 *
37 * By Jorge Cwik <jorge@laser.satlink.net>, adapted for linux by
38 * Arnt Gulbrandsen.
39 */
40
41/**
42 * ip_fast_csum - Compute the IPv4 header checksum efficiently.
43 * iph: ipv4 header
44 * ihl: length of header / 4
45 */
46static inline unsigned short ip_fast_csum(unsigned char *iph, unsigned int ihl)
47{
48 unsigned int sum;
49
50 asm( " movl (%1), %0\n"
51 " subl $4, %2\n"
52 " jbe 2f\n"
53 " addl 4(%1), %0\n"
54 " adcl 8(%1), %0\n"
55 " adcl 12(%1), %0\n"
56 "1: adcl 16(%1), %0\n"
57 " lea 4(%1), %1\n"
58 " decl %2\n"
59 " jne 1b\n"
60 " adcl $0, %0\n"
61 " movl %0, %2\n"
62 " shrl $16, %0\n"
63 " addw %w2, %w0\n"
64 " adcl $0, %0\n"
65 " notl %0\n"
66 "2:"
67 /* Since the input registers which are loaded with iph and ipl
68 are modified, we must also specify them as outputs, or gcc
69 will assume they contain their original values. */
70 : "=r" (sum), "=r" (iph), "=r" (ihl)
71 : "1" (iph), "2" (ihl)
72 : "memory");
73 return(sum);
74}
75
76/**
77 * csum_tcpup_nofold - Compute an IPv4 pseudo header checksum.
78 * @saddr: source address
79 * @daddr: destination address
80 * @len: length of packet
81 * @proto: ip protocol of packet
82 * @sum: initial sum to be added in (32bit unfolded)
83 *
84 * Returns the pseudo header checksum the input data. Result is
85 * 32bit unfolded.
86 */
87static inline unsigned long
88csum_tcpudp_nofold(unsigned saddr, unsigned daddr, unsigned short len,
89 unsigned short proto, unsigned int sum)
90{
91 asm(" addl %1, %0\n"
92 " adcl %2, %0\n"
93 " adcl %3, %0\n"
94 " adcl $0, %0\n"
95 : "=r" (sum)
96 : "g" (daddr), "g" (saddr), "g" ((ntohs(len)<<16)+proto*256), "0" (sum));
97 return sum;
98}
99
100
101/**
102 * csum_tcpup_magic - Compute an IPv4 pseudo header checksum.
103 * @saddr: source address
104 * @daddr: destination address
105 * @len: length of packet
106 * @proto: ip protocol of packet
107 * @sum: initial sum to be added in (32bit unfolded)
108 *
109 * Returns the 16bit pseudo header checksum the input data already
110 * complemented and ready to be filled in.
111 */
112static inline unsigned short int
113csum_tcpudp_magic(unsigned long saddr, unsigned long daddr,
114 unsigned short len, unsigned short proto, unsigned int sum)
115{
116 return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
117}
118
119/**
120 * csum_partial - Compute an internet checksum.
121 * @buff: buffer to be checksummed
122 * @len: length of buffer.
123 * @sum: initial sum to be added in (32bit unfolded)
124 *
125 * Returns the 32bit unfolded internet checksum of the buffer.
126 * Before filling it in it needs to be csum_fold()'ed.
127 * buff should be aligned to a 64bit boundary if possible.
128 */
129extern unsigned int csum_partial(const unsigned char *buff, unsigned len, unsigned int sum);
130
131#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER 1
132#define HAVE_CSUM_COPY_USER 1
133
134
135/* Do not call this directly. Use the wrappers below */
136extern unsigned long csum_partial_copy_generic(const unsigned char *src, const unsigned char *dst,
137 unsigned len,
138 unsigned sum,
139 int *src_err_ptr, int *dst_err_ptr);
140
141
142extern unsigned int csum_partial_copy_from_user(const unsigned char __user *src, unsigned char *dst,
143 int len, unsigned int isum, int *errp);
144extern unsigned int csum_partial_copy_to_user(const unsigned char *src, unsigned char __user *dst,
145 int len, unsigned int isum, int *errp);
146extern unsigned int csum_partial_copy_nocheck(const unsigned char *src, unsigned char *dst, int len,
147 unsigned int sum);
148
149/* Old names. To be removed. */
150#define csum_and_copy_to_user csum_partial_copy_to_user
151#define csum_and_copy_from_user csum_partial_copy_from_user
152
153/**
154 * ip_compute_csum - Compute an 16bit IP checksum.
155 * @buff: buffer address.
156 * @len: length of buffer.
157 *
158 * Returns the 16bit folded/inverted checksum of the passed buffer.
159 * Ready to fill in.
160 */
161extern unsigned short ip_compute_csum(unsigned char * buff, int len);
162
163/**
164 * csum_ipv6_magic - Compute checksum of an IPv6 pseudo header.
165 * @saddr: source address
166 * @daddr: destination address
167 * @len: length of packet
168 * @proto: protocol of packet
169 * @sum: initial sum (32bit unfolded) to be added in
170 *
171 * Computes an IPv6 pseudo header checksum. This sum is added the checksum
172 * into UDP/TCP packets and contains some link layer information.
173 * Returns the unfolded 32bit checksum.
174 */
175
176struct in6_addr;
177
178#define _HAVE_ARCH_IPV6_CSUM 1
179extern unsigned short
180csum_ipv6_magic(struct in6_addr *saddr, struct in6_addr *daddr,
181 __u32 len, unsigned short proto, unsigned int sum);
182
183static inline unsigned add32_with_carry(unsigned a, unsigned b)
184{
185 asm("addl %2,%0\n\t"
186 "adcl $0,%0"
187 : "=r" (a)
188 : "0" (a), "r" (b));
189 return a;
190}
191
192#endif
193
diff --git a/include/asm-x86_64/compat.h b/include/asm-x86_64/compat.h
new file mode 100644
index 000000000000..d0f453c5adfc
--- /dev/null
+++ b/include/asm-x86_64/compat.h
@@ -0,0 +1,205 @@
1#ifndef _ASM_X86_64_COMPAT_H
2#define _ASM_X86_64_COMPAT_H
3
4/*
5 * Architecture specific compatibility types
6 */
7#include <linux/types.h>
8#include <linux/sched.h>
9
10#define COMPAT_USER_HZ 100
11
12typedef u32 compat_size_t;
13typedef s32 compat_ssize_t;
14typedef s32 compat_time_t;
15typedef s32 compat_clock_t;
16typedef s32 compat_pid_t;
17typedef u16 compat_uid_t;
18typedef u16 compat_gid_t;
19typedef u32 compat_uid32_t;
20typedef u32 compat_gid32_t;
21typedef u16 compat_mode_t;
22typedef u32 compat_ino_t;
23typedef u16 compat_dev_t;
24typedef s32 compat_off_t;
25typedef s64 compat_loff_t;
26typedef u16 compat_nlink_t;
27typedef u16 compat_ipc_pid_t;
28typedef s32 compat_daddr_t;
29typedef u32 compat_caddr_t;
30typedef __kernel_fsid_t compat_fsid_t;
31typedef s32 compat_timer_t;
32typedef s32 compat_key_t;
33
34typedef s32 compat_int_t;
35typedef s32 compat_long_t;
36typedef u32 compat_uint_t;
37typedef u32 compat_ulong_t;
38
39struct compat_timespec {
40 compat_time_t tv_sec;
41 s32 tv_nsec;
42};
43
44struct compat_timeval {
45 compat_time_t tv_sec;
46 s32 tv_usec;
47};
48
49struct compat_stat {
50 compat_dev_t st_dev;
51 u16 __pad1;
52 compat_ino_t st_ino;
53 compat_mode_t st_mode;
54 compat_nlink_t st_nlink;
55 compat_uid_t st_uid;
56 compat_gid_t st_gid;
57 compat_dev_t st_rdev;
58 u16 __pad2;
59 u32 st_size;
60 u32 st_blksize;
61 u32 st_blocks;
62 u32 st_atime;
63 u32 st_atime_nsec;
64 u32 st_mtime;
65 u32 st_mtime_nsec;
66 u32 st_ctime;
67 u32 st_ctime_nsec;
68 u32 __unused4;
69 u32 __unused5;
70};
71
72struct compat_flock {
73 short l_type;
74 short l_whence;
75 compat_off_t l_start;
76 compat_off_t l_len;
77 compat_pid_t l_pid;
78};
79
80#define F_GETLK64 12 /* using 'struct flock64' */
81#define F_SETLK64 13
82#define F_SETLKW64 14
83
84/*
85 * IA32 uses 4 byte alignment for 64 bit quantities,
86 * so we need to pack this structure.
87 */
88struct compat_flock64 {
89 short l_type;
90 short l_whence;
91 compat_loff_t l_start;
92 compat_loff_t l_len;
93 compat_pid_t l_pid;
94} __attribute__((packed));
95
96struct compat_statfs {
97 int f_type;
98 int f_bsize;
99 int f_blocks;
100 int f_bfree;
101 int f_bavail;
102 int f_files;
103 int f_ffree;
104 compat_fsid_t f_fsid;
105 int f_namelen; /* SunOS ignores this field. */
106 int f_frsize;
107 int f_spare[5];
108};
109
110#define COMPAT_RLIM_OLD_INFINITY 0x7fffffff
111#define COMPAT_RLIM_INFINITY 0xffffffff
112
113typedef u32 compat_old_sigset_t; /* at least 32 bits */
114
115#define _COMPAT_NSIG 64
116#define _COMPAT_NSIG_BPW 32
117
118typedef u32 compat_sigset_word;
119
120#define COMPAT_OFF_T_MAX 0x7fffffff
121#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL
122
123struct compat_ipc64_perm {
124 compat_key_t key;
125 compat_uid32_t uid;
126 compat_gid32_t gid;
127 compat_uid32_t cuid;
128 compat_gid32_t cgid;
129 unsigned short mode;
130 unsigned short __pad1;
131 unsigned short seq;
132 unsigned short __pad2;
133 compat_ulong_t unused1;
134 compat_ulong_t unused2;
135};
136
137struct compat_semid64_ds {
138 struct compat_ipc64_perm sem_perm;
139 compat_time_t sem_otime;
140 compat_ulong_t __unused1;
141 compat_time_t sem_ctime;
142 compat_ulong_t __unused2;
143 compat_ulong_t sem_nsems;
144 compat_ulong_t __unused3;
145 compat_ulong_t __unused4;
146};
147
148struct compat_msqid64_ds {
149 struct compat_ipc64_perm msg_perm;
150 compat_time_t msg_stime;
151 compat_ulong_t __unused1;
152 compat_time_t msg_rtime;
153 compat_ulong_t __unused2;
154 compat_time_t msg_ctime;
155 compat_ulong_t __unused3;
156 compat_ulong_t msg_cbytes;
157 compat_ulong_t msg_qnum;
158 compat_ulong_t msg_qbytes;
159 compat_pid_t msg_lspid;
160 compat_pid_t msg_lrpid;
161 compat_ulong_t __unused4;
162 compat_ulong_t __unused5;
163};
164
165struct compat_shmid64_ds {
166 struct compat_ipc64_perm shm_perm;
167 compat_size_t shm_segsz;
168 compat_time_t shm_atime;
169 compat_ulong_t __unused1;
170 compat_time_t shm_dtime;
171 compat_ulong_t __unused2;
172 compat_time_t shm_ctime;
173 compat_ulong_t __unused3;
174 compat_pid_t shm_cpid;
175 compat_pid_t shm_lpid;
176 compat_ulong_t shm_nattch;
177 compat_ulong_t __unused4;
178 compat_ulong_t __unused5;
179};
180
181/*
182 * A pointer passed in from user mode. This should not
183 * be used for syscall parameters, just declare them
184 * as pointers because the syscall entry code will have
185 * appropriately comverted them already.
186 */
187typedef u32 compat_uptr_t;
188
189static inline void __user *compat_ptr(compat_uptr_t uptr)
190{
191 return (void __user *)(unsigned long)uptr;
192}
193
194static inline compat_uptr_t ptr_to_compat(void __user *uptr)
195{
196 return (u32)(unsigned long)uptr;
197}
198
199static __inline__ void __user *compat_alloc_user_space(long len)
200{
201 struct pt_regs *regs = (void *)current->thread.rsp0 - sizeof(struct pt_regs);
202 return (void __user *)regs->rsp - len;
203}
204
205#endif /* _ASM_X86_64_COMPAT_H */
diff --git a/include/asm-x86_64/cpu.h b/include/asm-x86_64/cpu.h
new file mode 100644
index 000000000000..8eea076525a4
--- /dev/null
+++ b/include/asm-x86_64/cpu.h
@@ -0,0 +1 @@
#include <asm-i386/cpu.h>
diff --git a/include/asm-x86_64/cpufeature.h b/include/asm-x86_64/cpufeature.h
new file mode 100644
index 000000000000..0e47a6d53726
--- /dev/null
+++ b/include/asm-x86_64/cpufeature.h
@@ -0,0 +1,104 @@
1/*
2 * cpufeature.h
3 *
4 * Defines x86 CPU feature bits
5 */
6
7#ifndef __ASM_X8664_CPUFEATURE_H
8#define __ASM_X8664_CPUFEATURE_H
9
10#define NCAPINTS 6
11
12/* Intel-defined CPU features, CPUID level 0x00000001, word 0 */
13#define X86_FEATURE_FPU (0*32+ 0) /* Onboard FPU */
14#define X86_FEATURE_VME (0*32+ 1) /* Virtual Mode Extensions */
15#define X86_FEATURE_DE (0*32+ 2) /* Debugging Extensions */
16#define X86_FEATURE_PSE (0*32+ 3) /* Page Size Extensions */
17#define X86_FEATURE_TSC (0*32+ 4) /* Time Stamp Counter */
18#define X86_FEATURE_MSR (0*32+ 5) /* Model-Specific Registers, RDMSR, WRMSR */
19#define X86_FEATURE_PAE (0*32+ 6) /* Physical Address Extensions */
20#define X86_FEATURE_MCE (0*32+ 7) /* Machine Check Architecture */
21#define X86_FEATURE_CX8 (0*32+ 8) /* CMPXCHG8 instruction */
22#define X86_FEATURE_APIC (0*32+ 9) /* Onboard APIC */
23#define X86_FEATURE_SEP (0*32+11) /* SYSENTER/SYSEXIT */
24#define X86_FEATURE_MTRR (0*32+12) /* Memory Type Range Registers */
25#define X86_FEATURE_PGE (0*32+13) /* Page Global Enable */
26#define X86_FEATURE_MCA (0*32+14) /* Machine Check Architecture */
27#define X86_FEATURE_CMOV (0*32+15) /* CMOV instruction (FCMOVCC and FCOMI too if FPU present) */
28#define X86_FEATURE_PAT (0*32+16) /* Page Attribute Table */
29#define X86_FEATURE_PSE36 (0*32+17) /* 36-bit PSEs */
30#define X86_FEATURE_PN (0*32+18) /* Processor serial number */
31#define X86_FEATURE_CLFLSH (0*32+19) /* Supports the CLFLUSH instruction */
32#define X86_FEATURE_DTES (0*32+21) /* Debug Trace Store */
33#define X86_FEATURE_ACPI (0*32+22) /* ACPI via MSR */
34#define X86_FEATURE_MMX (0*32+23) /* Multimedia Extensions */
35#define X86_FEATURE_FXSR (0*32+24) /* FXSAVE and FXRSTOR instructions (fast save and restore */
36 /* of FPU context), and CR4.OSFXSR available */
37#define X86_FEATURE_XMM (0*32+25) /* Streaming SIMD Extensions */
38#define X86_FEATURE_XMM2 (0*32+26) /* Streaming SIMD Extensions-2 */
39#define X86_FEATURE_SELFSNOOP (0*32+27) /* CPU self snoop */
40#define X86_FEATURE_HT (0*32+28) /* Hyper-Threading */
41#define X86_FEATURE_ACC (0*32+29) /* Automatic clock control */
42#define X86_FEATURE_IA64 (0*32+30) /* IA-64 processor */
43
44/* AMD-defined CPU features, CPUID level 0x80000001, word 1 */
45/* Don't duplicate feature flags which are redundant with Intel! */
46#define X86_FEATURE_SYSCALL (1*32+11) /* SYSCALL/SYSRET */
47#define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */
48#define X86_FEATURE_FXSR_OPT (1*32+25) /* FXSR optimizations */
49#define X86_FEATURE_LM (1*32+29) /* Long Mode (x86-64) */
50#define X86_FEATURE_3DNOWEXT (1*32+30) /* AMD 3DNow! extensions */
51#define X86_FEATURE_3DNOW (1*32+31) /* 3DNow! */
52
53/* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */
54#define X86_FEATURE_RECOVERY (2*32+ 0) /* CPU in recovery mode */
55#define X86_FEATURE_LONGRUN (2*32+ 1) /* Longrun power control */
56#define X86_FEATURE_LRTI (2*32+ 3) /* LongRun table interface */
57
58/* Other features, Linux-defined mapping, word 3 */
59/* This range is used for feature bits which conflict or are synthesized */
60#define X86_FEATURE_CXMMX (3*32+ 0) /* Cyrix MMX extensions */
61#define X86_FEATURE_K6_MTRR (3*32+ 1) /* AMD K6 nonstandard MTRRs */
62#define X86_FEATURE_CYRIX_ARR (3*32+ 2) /* Cyrix ARRs (= MTRRs) */
63#define X86_FEATURE_CENTAUR_MCR (3*32+ 3) /* Centaur MCRs (= MTRRs) */
64#define X86_FEATURE_K8_C (3*32+ 4) /* C stepping K8 */
65
66/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
67#define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */
68#define X86_FEATURE_MWAIT (4*32+ 3) /* Monitor/Mwait support */
69#define X86_FEATURE_DSCPL (4*32+ 4) /* CPL Qualified Debug Store */
70#define X86_FEATURE_EST (4*32+ 7) /* Enhanced SpeedStep */
71#define X86_FEATURE_TM2 (4*32+ 8) /* Thermal Monitor 2 */
72#define X86_FEATURE_CID (4*32+10) /* Context ID */
73#define X86_FEATURE_CX16 (4*32+13) /* CMPXCHG16B */
74#define X86_FEATURE_XTPR (4*32+14) /* Send Task Priority Messages */
75
76/* More extended AMD flags: CPUID level 0x80000001, ecx, word 5 */
77#define X86_FEATURE_LAHF_LM (5*32+ 0) /* LAHF/SAHF in long mode */
78#define X86_FEATURE_CMP_LEGACY (5*32+ 1) /* If yes HyperThreading not valid */
79
80#define cpu_has(c, bit) test_bit(bit, (c)->x86_capability)
81#define boot_cpu_has(bit) test_bit(bit, boot_cpu_data.x86_capability)
82
83#define cpu_has_fpu 1
84#define cpu_has_vme 0
85#define cpu_has_de 1
86#define cpu_has_pse 1
87#define cpu_has_tsc 1
88#define cpu_has_pae ___BUG___
89#define cpu_has_pge 1
90#define cpu_has_apic boot_cpu_has(X86_FEATURE_APIC)
91#define cpu_has_mtrr 1
92#define cpu_has_mmx 1
93#define cpu_has_fxsr 1
94#define cpu_has_xmm 1
95#define cpu_has_xmm2 1
96#define cpu_has_xmm3 boot_cpu_has(X86_FEATURE_XMM3)
97#define cpu_has_ht boot_cpu_has(X86_FEATURE_HT)
98#define cpu_has_mp 1 /* XXX */
99#define cpu_has_k6_mtrr 0
100#define cpu_has_cyrix_arr 0
101#define cpu_has_centaur_mcr 0
102#define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLSH)
103
104#endif /* __ASM_X8664_CPUFEATURE_H */
diff --git a/include/asm-x86_64/cputime.h b/include/asm-x86_64/cputime.h
new file mode 100644
index 000000000000..a07012dc5a3c
--- /dev/null
+++ b/include/asm-x86_64/cputime.h
@@ -0,0 +1,6 @@
1#ifndef __X86_64_CPUTIME_H
2#define __X86_64_CPUTIME_H
3
4#include <asm-generic/cputime.h>
5
6#endif /* __X86_64_CPUTIME_H */
diff --git a/include/asm-x86_64/current.h b/include/asm-x86_64/current.h
new file mode 100644
index 000000000000..7db560ee6f70
--- /dev/null
+++ b/include/asm-x86_64/current.h
@@ -0,0 +1,27 @@
1#ifndef _X86_64_CURRENT_H
2#define _X86_64_CURRENT_H
3
4#if !defined(__ASSEMBLY__)
5struct task_struct;
6
7#include <asm/pda.h>
8
9static inline struct task_struct *get_current(void)
10{
11 struct task_struct *t = read_pda(pcurrent);
12 return t;
13}
14
15#define current get_current()
16
17#else
18
19#ifndef ASM_OFFSET_H
20#include <asm/offset.h>
21#endif
22
23#define GET_CURRENT(reg) movq %gs:(pda_pcurrent),reg
24
25#endif
26
27#endif /* !(_X86_64_CURRENT_H) */
diff --git a/include/asm-x86_64/debugreg.h b/include/asm-x86_64/debugreg.h
new file mode 100644
index 000000000000..bd1aab1d8c4a
--- /dev/null
+++ b/include/asm-x86_64/debugreg.h
@@ -0,0 +1,65 @@
1#ifndef _X86_64_DEBUGREG_H
2#define _X86_64_DEBUGREG_H
3
4
5/* Indicate the register numbers for a number of the specific
6 debug registers. Registers 0-3 contain the addresses we wish to trap on */
7#define DR_FIRSTADDR 0 /* u_debugreg[DR_FIRSTADDR] */
8#define DR_LASTADDR 3 /* u_debugreg[DR_LASTADDR] */
9
10#define DR_STATUS 6 /* u_debugreg[DR_STATUS] */
11#define DR_CONTROL 7 /* u_debugreg[DR_CONTROL] */
12
13/* Define a few things for the status register. We can use this to determine
14 which debugging register was responsible for the trap. The other bits
15 are either reserved or not of interest to us. */
16
17#define DR_TRAP0 (0x1) /* db0 */
18#define DR_TRAP1 (0x2) /* db1 */
19#define DR_TRAP2 (0x4) /* db2 */
20#define DR_TRAP3 (0x8) /* db3 */
21
22#define DR_STEP (0x4000) /* single-step */
23#define DR_SWITCH (0x8000) /* task switch */
24
25/* Now define a bunch of things for manipulating the control register.
26 The top two bytes of the control register consist of 4 fields of 4
27 bits - each field corresponds to one of the four debug registers,
28 and indicates what types of access we trap on, and how large the data
29 field is that we are looking at */
30
31#define DR_CONTROL_SHIFT 16 /* Skip this many bits in ctl register */
32#define DR_CONTROL_SIZE 4 /* 4 control bits per register */
33
34#define DR_RW_EXECUTE (0x0) /* Settings for the access types to trap on */
35#define DR_RW_WRITE (0x1)
36#define DR_RW_READ (0x3)
37
38#define DR_LEN_1 (0x0) /* Settings for data length to trap on */
39#define DR_LEN_2 (0x4)
40#define DR_LEN_4 (0xC)
41#define DR_LEN_8 (0x8)
42
43/* The low byte to the control register determine which registers are
44 enabled. There are 4 fields of two bits. One bit is "local", meaning
45 that the processor will reset the bit after a task switch and the other
46 is global meaning that we have to explicitly reset the bit. With linux,
47 you can use either one, since we explicitly zero the register when we enter
48 kernel mode. */
49
50#define DR_LOCAL_ENABLE_SHIFT 0 /* Extra shift to the local enable bit */
51#define DR_GLOBAL_ENABLE_SHIFT 1 /* Extra shift to the global enable bit */
52#define DR_ENABLE_SIZE 2 /* 2 enable bits per register */
53
54#define DR_LOCAL_ENABLE_MASK (0x55) /* Set local bits for all 4 regs */
55#define DR_GLOBAL_ENABLE_MASK (0xAA) /* Set global bits for all 4 regs */
56
57/* The second byte to the control register has a few special things.
58 We can slow the instruction pipeline for instructions coming via the
59 gdt or the ldt if we want to. I am not sure why this is an advantage */
60
61#define DR_CONTROL_RESERVED (0xFFFFFFFF0000FC00UL) /* Reserved */
62#define DR_LOCAL_SLOWDOWN (0x100) /* Local slow the pipeline */
63#define DR_GLOBAL_SLOWDOWN (0x200) /* Global slow the pipeline */
64
65#endif
diff --git a/include/asm-x86_64/delay.h b/include/asm-x86_64/delay.h
new file mode 100644
index 000000000000..65f64acc5319
--- /dev/null
+++ b/include/asm-x86_64/delay.h
@@ -0,0 +1,27 @@
1#ifndef _X8664_DELAY_H
2#define _X8664_DELAY_H
3
4/*
5 * Copyright (C) 1993 Linus Torvalds
6 *
7 * Delay routines calling functions in arch/x86_64/lib/delay.c
8 */
9
10extern void __bad_udelay(void);
11extern void __bad_ndelay(void);
12
13extern void __udelay(unsigned long usecs);
14extern void __ndelay(unsigned long usecs);
15extern void __const_udelay(unsigned long usecs);
16extern void __delay(unsigned long loops);
17
18#define udelay(n) (__builtin_constant_p(n) ? \
19 ((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c6ul)) : \
20 __udelay(n))
21
22#define ndelay(n) (__builtin_constant_p(n) ? \
23 ((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \
24 __ndelay(n))
25
26
27#endif /* defined(_X8664_DELAY_H) */
diff --git a/include/asm-x86_64/desc.h b/include/asm-x86_64/desc.h
new file mode 100644
index 000000000000..6aefb9c0280d
--- /dev/null
+++ b/include/asm-x86_64/desc.h
@@ -0,0 +1,217 @@
1/* Written 2000 by Andi Kleen */
2#ifndef __ARCH_DESC_H
3#define __ARCH_DESC_H
4
5#include <linux/threads.h>
6#include <asm/ldt.h>
7
8#ifndef __ASSEMBLY__
9
10#include <linux/string.h>
11#include <asm/segment.h>
12#include <asm/mmu.h>
13
14// 8 byte segment descriptor
15struct desc_struct {
16 u16 limit0;
17 u16 base0;
18 unsigned base1 : 8, type : 4, s : 1, dpl : 2, p : 1;
19 unsigned limit : 4, avl : 1, l : 1, d : 1, g : 1, base2 : 8;
20} __attribute__((packed));
21
22struct n_desc_struct {
23 unsigned int a,b;
24};
25
26extern struct desc_struct cpu_gdt_table[NR_CPUS][GDT_ENTRIES];
27
28enum {
29 GATE_INTERRUPT = 0xE,
30 GATE_TRAP = 0xF,
31 GATE_CALL = 0xC,
32};
33
34// 16byte gate
35struct gate_struct {
36 u16 offset_low;
37 u16 segment;
38 unsigned ist : 3, zero0 : 5, type : 5, dpl : 2, p : 1;
39 u16 offset_middle;
40 u32 offset_high;
41 u32 zero1;
42} __attribute__((packed));
43
44#define PTR_LOW(x) ((unsigned long)(x) & 0xFFFF)
45#define PTR_MIDDLE(x) (((unsigned long)(x) >> 16) & 0xFFFF)
46#define PTR_HIGH(x) ((unsigned long)(x) >> 32)
47
48enum {
49 DESC_TSS = 0x9,
50 DESC_LDT = 0x2,
51};
52
53// LDT or TSS descriptor in the GDT. 16 bytes.
54struct ldttss_desc {
55 u16 limit0;
56 u16 base0;
57 unsigned base1 : 8, type : 5, dpl : 2, p : 1;
58 unsigned limit1 : 4, zero0 : 3, g : 1, base2 : 8;
59 u32 base3;
60 u32 zero1;
61} __attribute__((packed));
62
63struct desc_ptr {
64 unsigned short size;
65 unsigned long address;
66} __attribute__((packed)) ;
67
68#define load_TR_desc() asm volatile("ltr %w0"::"r" (GDT_ENTRY_TSS*8))
69#define load_LDT_desc() asm volatile("lldt %w0"::"r" (GDT_ENTRY_LDT*8))
70#define clear_LDT() asm volatile("lldt %w0"::"r" (0))
71
72/*
73 * This is the ldt that every process will get unless we need
74 * something other than this.
75 */
76extern struct desc_struct default_ldt[];
77extern struct gate_struct idt_table[];
78
79static inline void _set_gate(void *adr, unsigned type, unsigned long func, unsigned dpl, unsigned ist)
80{
81 struct gate_struct s;
82 s.offset_low = PTR_LOW(func);
83 s.segment = __KERNEL_CS;
84 s.ist = ist;
85 s.p = 1;
86 s.dpl = dpl;
87 s.zero0 = 0;
88 s.zero1 = 0;
89 s.type = type;
90 s.offset_middle = PTR_MIDDLE(func);
91 s.offset_high = PTR_HIGH(func);
92 /* does not need to be atomic because it is only done once at setup time */
93 memcpy(adr, &s, 16);
94}
95
96static inline void set_intr_gate(int nr, void *func)
97{
98 _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 0, 0);
99}
100
101static inline void set_intr_gate_ist(int nr, void *func, unsigned ist)
102{
103 _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 0, ist);
104}
105
106static inline void set_system_gate(int nr, void *func)
107{
108 _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 3, 0);
109}
110
111static inline void set_tssldt_descriptor(void *ptr, unsigned long tss, unsigned type,
112 unsigned size)
113{
114 struct ldttss_desc d;
115 memset(&d,0,sizeof(d));
116 d.limit0 = size & 0xFFFF;
117 d.base0 = PTR_LOW(tss);
118 d.base1 = PTR_MIDDLE(tss) & 0xFF;
119 d.type = type;
120 d.p = 1;
121 d.limit1 = (size >> 16) & 0xF;
122 d.base2 = (PTR_MIDDLE(tss) >> 8) & 0xFF;
123 d.base3 = PTR_HIGH(tss);
124 memcpy(ptr, &d, 16);
125}
126
127static inline void set_tss_desc(unsigned cpu, void *addr)
128{
129 set_tssldt_descriptor(&cpu_gdt_table[cpu][GDT_ENTRY_TSS], (unsigned long)addr,
130 DESC_TSS,
131 sizeof(struct tss_struct) - 1);
132}
133
134static inline void set_ldt_desc(unsigned cpu, void *addr, int size)
135{
136 set_tssldt_descriptor(&cpu_gdt_table[cpu][GDT_ENTRY_LDT], (unsigned long)addr,
137 DESC_LDT, size * 8 - 1);
138}
139
140static inline void set_seg_base(unsigned cpu, int entry, void *base)
141{
142 struct desc_struct *d = &cpu_gdt_table[cpu][entry];
143 u32 addr = (u32)(u64)base;
144 BUG_ON((u64)base >> 32);
145 d->base0 = addr & 0xffff;
146 d->base1 = (addr >> 16) & 0xff;
147 d->base2 = (addr >> 24) & 0xff;
148}
149
150#define LDT_entry_a(info) \
151 ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
152/* Don't allow setting of the lm bit. It is useless anyways because
153 64bit system calls require __USER_CS. */
154#define LDT_entry_b(info) \
155 (((info)->base_addr & 0xff000000) | \
156 (((info)->base_addr & 0x00ff0000) >> 16) | \
157 ((info)->limit & 0xf0000) | \
158 (((info)->read_exec_only ^ 1) << 9) | \
159 ((info)->contents << 10) | \
160 (((info)->seg_not_present ^ 1) << 15) | \
161 ((info)->seg_32bit << 22) | \
162 ((info)->limit_in_pages << 23) | \
163 ((info)->useable << 20) | \
164 /* ((info)->lm << 21) | */ \
165 0x7000)
166
167#define LDT_empty(info) (\
168 (info)->base_addr == 0 && \
169 (info)->limit == 0 && \
170 (info)->contents == 0 && \
171 (info)->read_exec_only == 1 && \
172 (info)->seg_32bit == 0 && \
173 (info)->limit_in_pages == 0 && \
174 (info)->seg_not_present == 1 && \
175 (info)->useable == 0 && \
176 (info)->lm == 0)
177
178#if TLS_SIZE != 24
179# error update this code.
180#endif
181
182static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
183{
184 u64 *gdt = (u64 *)(cpu_gdt_table[cpu] + GDT_ENTRY_TLS_MIN);
185 gdt[0] = t->tls_array[0];
186 gdt[1] = t->tls_array[1];
187 gdt[2] = t->tls_array[2];
188}
189
190/*
191 * load one particular LDT into the current CPU
192 */
193extern inline void load_LDT_nolock (mm_context_t *pc, int cpu)
194{
195 int count = pc->size;
196
197 if (likely(!count)) {
198 clear_LDT();
199 return;
200 }
201
202 set_ldt_desc(cpu, pc->ldt, count);
203 load_LDT_desc();
204}
205
206static inline void load_LDT(mm_context_t *pc)
207{
208 int cpu = get_cpu();
209 load_LDT_nolock(pc, cpu);
210 put_cpu();
211}
212
213extern struct desc_ptr idt_descr;
214
215#endif /* !__ASSEMBLY__ */
216
217#endif
diff --git a/include/asm-x86_64/div64.h b/include/asm-x86_64/div64.h
new file mode 100644
index 000000000000..6cd978cefb28
--- /dev/null
+++ b/include/asm-x86_64/div64.h
@@ -0,0 +1 @@
#include <asm-generic/div64.h>
diff --git a/include/asm-x86_64/dma-mapping.h b/include/asm-x86_64/dma-mapping.h
new file mode 100644
index 000000000000..a416dc31634a
--- /dev/null
+++ b/include/asm-x86_64/dma-mapping.h
@@ -0,0 +1,138 @@
1#ifndef _X8664_DMA_MAPPING_H
2#define _X8664_DMA_MAPPING_H 1
3
4/*
5 * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
6 * documentation.
7 */
8
9#include <linux/config.h>
10
11#include <asm/scatterlist.h>
12#include <asm/io.h>
13#include <asm/swiotlb.h>
14
15extern dma_addr_t bad_dma_address;
16#define dma_mapping_error(x) \
17 (swiotlb ? swiotlb_dma_mapping_error(x) : ((x) == bad_dma_address))
18
19void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
20 unsigned gfp);
21void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
22 dma_addr_t dma_handle);
23
24#ifdef CONFIG_GART_IOMMU
25
26extern dma_addr_t dma_map_single(struct device *hwdev, void *ptr, size_t size,
27 int direction);
28extern void dma_unmap_single(struct device *dev, dma_addr_t addr,size_t size,
29 int direction);
30
31#else
32
33/* No IOMMU */
34
35static inline dma_addr_t dma_map_single(struct device *hwdev, void *ptr,
36 size_t size, int direction)
37{
38 dma_addr_t addr;
39
40 if (direction == DMA_NONE)
41 out_of_line_bug();
42 addr = virt_to_bus(ptr);
43
44 if ((addr+size) & ~*hwdev->dma_mask)
45 out_of_line_bug();
46 return addr;
47}
48
49static inline void dma_unmap_single(struct device *hwdev, dma_addr_t dma_addr,
50 size_t size, int direction)
51{
52 if (direction == DMA_NONE)
53 out_of_line_bug();
54 /* Nothing to do */
55}
56
57#endif
58
59#define dma_map_page(dev,page,offset,size,dir) \
60 dma_map_single((dev), page_address(page)+(offset), (size), (dir))
61
62static inline void dma_sync_single_for_cpu(struct device *hwdev,
63 dma_addr_t dma_handle,
64 size_t size, int direction)
65{
66 if (direction == DMA_NONE)
67 out_of_line_bug();
68
69 if (swiotlb)
70 return swiotlb_sync_single_for_cpu(hwdev,dma_handle,size,direction);
71
72 flush_write_buffers();
73}
74
75static inline void dma_sync_single_for_device(struct device *hwdev,
76 dma_addr_t dma_handle,
77 size_t size, int direction)
78{
79 if (direction == DMA_NONE)
80 out_of_line_bug();
81
82 if (swiotlb)
83 return swiotlb_sync_single_for_device(hwdev,dma_handle,size,direction);
84
85 flush_write_buffers();
86}
87
88static inline void dma_sync_sg_for_cpu(struct device *hwdev,
89 struct scatterlist *sg,
90 int nelems, int direction)
91{
92 if (direction == DMA_NONE)
93 out_of_line_bug();
94
95 if (swiotlb)
96 return swiotlb_sync_sg_for_cpu(hwdev,sg,nelems,direction);
97
98 flush_write_buffers();
99}
100
101static inline void dma_sync_sg_for_device(struct device *hwdev,
102 struct scatterlist *sg,
103 int nelems, int direction)
104{
105 if (direction == DMA_NONE)
106 out_of_line_bug();
107
108 if (swiotlb)
109 return swiotlb_sync_sg_for_device(hwdev,sg,nelems,direction);
110
111 flush_write_buffers();
112}
113
114extern int dma_map_sg(struct device *hwdev, struct scatterlist *sg,
115 int nents, int direction);
116extern void dma_unmap_sg(struct device *hwdev, struct scatterlist *sg,
117 int nents, int direction);
118
119#define dma_unmap_page dma_unmap_single
120
121extern int dma_supported(struct device *hwdev, u64 mask);
122extern int dma_get_cache_alignment(void);
123#define dma_is_consistent(h) 1
124
125static inline int dma_set_mask(struct device *dev, u64 mask)
126{
127 if (!dev->dma_mask || !dma_supported(dev, mask))
128 return -EIO;
129 *dev->dma_mask = mask;
130 return 0;
131}
132
133static inline void dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction dir)
134{
135 flush_write_buffers();
136}
137
138#endif
diff --git a/include/asm-x86_64/dma.h b/include/asm-x86_64/dma.h
new file mode 100644
index 000000000000..16fa3a064d0c
--- /dev/null
+++ b/include/asm-x86_64/dma.h
@@ -0,0 +1,298 @@
1/* $Id: dma.h,v 1.1.1.1 2001/04/19 20:00:38 ak Exp $
2 * linux/include/asm/dma.h: Defines for using and allocating dma channels.
3 * Written by Hennus Bergman, 1992.
4 * High DMA channel support & info by Hannu Savolainen
5 * and John Boyd, Nov. 1992.
6 */
7
8#ifndef _ASM_DMA_H
9#define _ASM_DMA_H
10
11#include <linux/config.h>
12#include <linux/spinlock.h> /* And spinlocks */
13#include <asm/io.h> /* need byte IO */
14#include <linux/delay.h>
15
16
17#ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER
18#define dma_outb outb_p
19#else
20#define dma_outb outb
21#endif
22
23#define dma_inb inb
24
25/*
26 * NOTES about DMA transfers:
27 *
28 * controller 1: channels 0-3, byte operations, ports 00-1F
29 * controller 2: channels 4-7, word operations, ports C0-DF
30 *
31 * - ALL registers are 8 bits only, regardless of transfer size
32 * - channel 4 is not used - cascades 1 into 2.
33 * - channels 0-3 are byte - addresses/counts are for physical bytes
34 * - channels 5-7 are word - addresses/counts are for physical words
35 * - transfers must not cross physical 64K (0-3) or 128K (5-7) boundaries
36 * - transfer count loaded to registers is 1 less than actual count
37 * - controller 2 offsets are all even (2x offsets for controller 1)
38 * - page registers for 5-7 don't use data bit 0, represent 128K pages
39 * - page registers for 0-3 use bit 0, represent 64K pages
40 *
41 * DMA transfers are limited to the lower 16MB of _physical_ memory.
42 * Note that addresses loaded into registers must be _physical_ addresses,
43 * not logical addresses (which may differ if paging is active).
44 *
45 * Address mapping for channels 0-3:
46 *
47 * A23 ... A16 A15 ... A8 A7 ... A0 (Physical addresses)
48 * | ... | | ... | | ... |
49 * | ... | | ... | | ... |
50 * | ... | | ... | | ... |
51 * P7 ... P0 A7 ... A0 A7 ... A0
52 * | Page | Addr MSB | Addr LSB | (DMA registers)
53 *
54 * Address mapping for channels 5-7:
55 *
56 * A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0 (Physical addresses)
57 * | ... | \ \ ... \ \ \ ... \ \
58 * | ... | \ \ ... \ \ \ ... \ (not used)
59 * | ... | \ \ ... \ \ \ ... \
60 * P7 ... P1 (0) A7 A6 ... A0 A7 A6 ... A0
61 * | Page | Addr MSB | Addr LSB | (DMA registers)
62 *
63 * Again, channels 5-7 transfer _physical_ words (16 bits), so addresses
64 * and counts _must_ be word-aligned (the lowest address bit is _ignored_ at
65 * the hardware level, so odd-byte transfers aren't possible).
66 *
67 * Transfer count (_not # bytes_) is limited to 64K, represented as actual
68 * count - 1 : 64K => 0xFFFF, 1 => 0x0000. Thus, count is always 1 or more,
69 * and up to 128K bytes may be transferred on channels 5-7 in one operation.
70 *
71 */
72
73#define MAX_DMA_CHANNELS 8
74
75/* The maximum address that we can perform a DMA transfer to on this platform */
76#define MAX_DMA_ADDRESS (PAGE_OFFSET+0x1000000)
77
78/* 8237 DMA controllers */
79#define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */
80#define IO_DMA2_BASE 0xC0 /* 16 bit master DMA, ch 4(=slave input)..7 */
81
82/* DMA controller registers */
83#define DMA1_CMD_REG 0x08 /* command register (w) */
84#define DMA1_STAT_REG 0x08 /* status register (r) */
85#define DMA1_REQ_REG 0x09 /* request register (w) */
86#define DMA1_MASK_REG 0x0A /* single-channel mask (w) */
87#define DMA1_MODE_REG 0x0B /* mode register (w) */
88#define DMA1_CLEAR_FF_REG 0x0C /* clear pointer flip-flop (w) */
89#define DMA1_TEMP_REG 0x0D /* Temporary Register (r) */
90#define DMA1_RESET_REG 0x0D /* Master Clear (w) */
91#define DMA1_CLR_MASK_REG 0x0E /* Clear Mask */
92#define DMA1_MASK_ALL_REG 0x0F /* all-channels mask (w) */
93
94#define DMA2_CMD_REG 0xD0 /* command register (w) */
95#define DMA2_STAT_REG 0xD0 /* status register (r) */
96#define DMA2_REQ_REG 0xD2 /* request register (w) */
97#define DMA2_MASK_REG 0xD4 /* single-channel mask (w) */
98#define DMA2_MODE_REG 0xD6 /* mode register (w) */
99#define DMA2_CLEAR_FF_REG 0xD8 /* clear pointer flip-flop (w) */
100#define DMA2_TEMP_REG 0xDA /* Temporary Register (r) */
101#define DMA2_RESET_REG 0xDA /* Master Clear (w) */
102#define DMA2_CLR_MASK_REG 0xDC /* Clear Mask */
103#define DMA2_MASK_ALL_REG 0xDE /* all-channels mask (w) */
104
105#define DMA_ADDR_0 0x00 /* DMA address registers */
106#define DMA_ADDR_1 0x02
107#define DMA_ADDR_2 0x04
108#define DMA_ADDR_3 0x06
109#define DMA_ADDR_4 0xC0
110#define DMA_ADDR_5 0xC4
111#define DMA_ADDR_6 0xC8
112#define DMA_ADDR_7 0xCC
113
114#define DMA_CNT_0 0x01 /* DMA count registers */
115#define DMA_CNT_1 0x03
116#define DMA_CNT_2 0x05
117#define DMA_CNT_3 0x07
118#define DMA_CNT_4 0xC2
119#define DMA_CNT_5 0xC6
120#define DMA_CNT_6 0xCA
121#define DMA_CNT_7 0xCE
122
123#define DMA_PAGE_0 0x87 /* DMA page registers */
124#define DMA_PAGE_1 0x83
125#define DMA_PAGE_2 0x81
126#define DMA_PAGE_3 0x82
127#define DMA_PAGE_5 0x8B
128#define DMA_PAGE_6 0x89
129#define DMA_PAGE_7 0x8A
130
131#define DMA_MODE_READ 0x44 /* I/O to memory, no autoinit, increment, single mode */
132#define DMA_MODE_WRITE 0x48 /* memory to I/O, no autoinit, increment, single mode */
133#define DMA_MODE_CASCADE 0xC0 /* pass thru DREQ->HRQ, DACK<-HLDA only */
134
135#define DMA_AUTOINIT 0x10
136
137
138extern spinlock_t dma_spin_lock;
139
140static __inline__ unsigned long claim_dma_lock(void)
141{
142 unsigned long flags;
143 spin_lock_irqsave(&dma_spin_lock, flags);
144 return flags;
145}
146
147static __inline__ void release_dma_lock(unsigned long flags)
148{
149 spin_unlock_irqrestore(&dma_spin_lock, flags);
150}
151
152/* enable/disable a specific DMA channel */
153static __inline__ void enable_dma(unsigned int dmanr)
154{
155 if (dmanr<=3)
156 dma_outb(dmanr, DMA1_MASK_REG);
157 else
158 dma_outb(dmanr & 3, DMA2_MASK_REG);
159}
160
161static __inline__ void disable_dma(unsigned int dmanr)
162{
163 if (dmanr<=3)
164 dma_outb(dmanr | 4, DMA1_MASK_REG);
165 else
166 dma_outb((dmanr & 3) | 4, DMA2_MASK_REG);
167}
168
169/* Clear the 'DMA Pointer Flip Flop'.
170 * Write 0 for LSB/MSB, 1 for MSB/LSB access.
171 * Use this once to initialize the FF to a known state.
172 * After that, keep track of it. :-)
173 * --- In order to do that, the DMA routines below should ---
174 * --- only be used while holding the DMA lock ! ---
175 */
176static __inline__ void clear_dma_ff(unsigned int dmanr)
177{
178 if (dmanr<=3)
179 dma_outb(0, DMA1_CLEAR_FF_REG);
180 else
181 dma_outb(0, DMA2_CLEAR_FF_REG);
182}
183
184/* set mode (above) for a specific DMA channel */
185static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
186{
187 if (dmanr<=3)
188 dma_outb(mode | dmanr, DMA1_MODE_REG);
189 else
190 dma_outb(mode | (dmanr&3), DMA2_MODE_REG);
191}
192
193/* Set only the page register bits of the transfer address.
194 * This is used for successive transfers when we know the contents of
195 * the lower 16 bits of the DMA current address register, but a 64k boundary
196 * may have been crossed.
197 */
198static __inline__ void set_dma_page(unsigned int dmanr, char pagenr)
199{
200 switch(dmanr) {
201 case 0:
202 dma_outb(pagenr, DMA_PAGE_0);
203 break;
204 case 1:
205 dma_outb(pagenr, DMA_PAGE_1);
206 break;
207 case 2:
208 dma_outb(pagenr, DMA_PAGE_2);
209 break;
210 case 3:
211 dma_outb(pagenr, DMA_PAGE_3);
212 break;
213 case 5:
214 dma_outb(pagenr & 0xfe, DMA_PAGE_5);
215 break;
216 case 6:
217 dma_outb(pagenr & 0xfe, DMA_PAGE_6);
218 break;
219 case 7:
220 dma_outb(pagenr & 0xfe, DMA_PAGE_7);
221 break;
222 }
223}
224
225
226/* Set transfer address & page bits for specific DMA channel.
227 * Assumes dma flipflop is clear.
228 */
229static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a)
230{
231 set_dma_page(dmanr, a>>16);
232 if (dmanr <= 3) {
233 dma_outb( a & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE );
234 dma_outb( (a>>8) & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE );
235 } else {
236 dma_outb( (a>>1) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE );
237 dma_outb( (a>>9) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE );
238 }
239}
240
241
242/* Set transfer size (max 64k for DMA1..3, 128k for DMA5..7) for
243 * a specific DMA channel.
244 * You must ensure the parameters are valid.
245 * NOTE: from a manual: "the number of transfers is one more
246 * than the initial word count"! This is taken into account.
247 * Assumes dma flip-flop is clear.
248 * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7.
249 */
250static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
251{
252 count--;
253 if (dmanr <= 3) {
254 dma_outb( count & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE );
255 dma_outb( (count>>8) & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE );
256 } else {
257 dma_outb( (count>>1) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE );
258 dma_outb( (count>>9) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE );
259 }
260}
261
262
263/* Get DMA residue count. After a DMA transfer, this
264 * should return zero. Reading this while a DMA transfer is
265 * still in progress will return unpredictable results.
266 * If called before the channel has been used, it may return 1.
267 * Otherwise, it returns the number of _bytes_ left to transfer.
268 *
269 * Assumes DMA flip-flop is clear.
270 */
271static __inline__ int get_dma_residue(unsigned int dmanr)
272{
273 unsigned int io_port = (dmanr<=3)? ((dmanr&3)<<1) + 1 + IO_DMA1_BASE
274 : ((dmanr&3)<<2) + 2 + IO_DMA2_BASE;
275
276 /* using short to get 16-bit wrap around */
277 unsigned short count;
278
279 count = 1 + dma_inb(io_port);
280 count += dma_inb(io_port) << 8;
281
282 return (dmanr<=3)? count : (count<<1);
283}
284
285
286/* These are in kernel/dma.c: */
287extern int request_dma(unsigned int dmanr, const char * device_id); /* reserve a DMA channel */
288extern void free_dma(unsigned int dmanr); /* release it again */
289
290/* From PCI */
291
292#ifdef CONFIG_PCI
293extern int isa_dma_bridge_buggy;
294#else
295#define isa_dma_bridge_buggy (0)
296#endif
297
298#endif /* _ASM_DMA_H */
diff --git a/include/asm-x86_64/dwarf2.h b/include/asm-x86_64/dwarf2.h
new file mode 100644
index 000000000000..afd4212e860b
--- /dev/null
+++ b/include/asm-x86_64/dwarf2.h
@@ -0,0 +1,42 @@
1#ifndef _DWARF2_H
2#define _DWARF2_H 1
3
4#include <linux/config.h>
5
6#ifndef __ASSEMBLY__
7#warning "asm/dwarf2.h should be only included in pure assembly files"
8#endif
9
10/*
11 Macros for dwarf2 CFI unwind table entries.
12 See "as.info" for details on these pseudo ops. Unfortunately
13 they are only supported in very new binutils, so define them
14 away for older version.
15 */
16
17#ifdef CONFIG_DEBUG_INFO
18
19#define CFI_STARTPROC .cfi_startproc
20#define CFI_ENDPROC .cfi_endproc
21#define CFI_DEF_CFA .cfi_def_cfa
22#define CFI_DEF_CFA_REGISTER .cfi_def_cfa_register
23#define CFI_DEF_CFA_OFFSET .cfi_def_cfa_offset
24#define CFI_ADJUST_CFA_OFFSET .cfi_adjust_cfa_offset
25#define CFI_OFFSET .cfi_offset
26#define CFI_REL_OFFSET .cfi_rel_offset
27
28#else
29
30/* use assembler line comment character # to ignore the arguments. */
31#define CFI_STARTPROC #
32#define CFI_ENDPROC #
33#define CFI_DEF_CFA #
34#define CFI_DEF_CFA_REGISTER #
35#define CFI_DEF_CFA_OFFSET #
36#define CFI_ADJUST_CFA_OFFSET #
37#define CFI_OFFSET #
38#define CFI_REL_OFFSET #
39
40#endif
41
42#endif
diff --git a/include/asm-x86_64/e820.h b/include/asm-x86_64/e820.h
new file mode 100644
index 000000000000..5b376e42b153
--- /dev/null
+++ b/include/asm-x86_64/e820.h
@@ -0,0 +1,59 @@
1/*
2 * structures and definitions for the int 15, ax=e820 memory map
3 * scheme.
4 *
5 * In a nutshell, setup.S populates a scratch table in the
6 * empty_zero_block that contains a list of usable address/size
7 * duples. setup.c, this information is transferred into the e820map,
8 * and in init.c/numa.c, that new information is used to mark pages
9 * reserved or not.
10 */
11#ifndef __E820_HEADER
12#define __E820_HEADER
13
14#include <linux/mmzone.h>
15
16#define E820MAP 0x2d0 /* our map */
17#define E820MAX 32 /* number of entries in E820MAP */
18#define E820NR 0x1e8 /* # entries in E820MAP */
19
20#define E820_RAM 1
21#define E820_RESERVED 2
22#define E820_ACPI 3 /* usable as RAM once ACPI tables have been read */
23#define E820_NVS 4
24
25#define HIGH_MEMORY (1024*1024)
26
27#define LOWMEMSIZE() (0x9f000)
28
29#ifndef __ASSEMBLY__
30struct e820entry {
31 u64 addr; /* start of memory segment */
32 u64 size; /* size of memory segment */
33 u32 type; /* type of memory segment */
34} __attribute__((packed));
35
36struct e820map {
37 int nr_map;
38 struct e820entry map[E820MAX];
39};
40
41extern unsigned long find_e820_area(unsigned long start, unsigned long end,
42 unsigned size);
43extern void add_memory_region(unsigned long start, unsigned long size,
44 int type);
45extern void setup_memory_region(void);
46extern void contig_e820_setup(void);
47extern unsigned long e820_end_of_ram(void);
48extern void e820_reserve_resources(void);
49extern void e820_print_map(char *who);
50extern int e820_mapped(unsigned long start, unsigned long end, unsigned type);
51
52extern void e820_bootmem_free(pg_data_t *pgdat, unsigned long start,unsigned long end);
53
54extern void __init parse_memopt(char *p, char **end);
55
56extern struct e820map e820;
57#endif/*!__ASSEMBLY__*/
58
59#endif/*__E820_HEADER*/
diff --git a/include/asm-x86_64/elf.h b/include/asm-x86_64/elf.h
new file mode 100644
index 000000000000..a60a35e79222
--- /dev/null
+++ b/include/asm-x86_64/elf.h
@@ -0,0 +1,160 @@
1#ifndef __ASM_X86_64_ELF_H
2#define __ASM_X86_64_ELF_H
3
4/*
5 * ELF register definitions..
6 */
7
8#include <asm/ptrace.h>
9#include <asm/user.h>
10#include <asm/processor.h>
11
12/* x86-64 relocation types */
13#define R_X86_64_NONE 0 /* No reloc */
14#define R_X86_64_64 1 /* Direct 64 bit */
15#define R_X86_64_PC32 2 /* PC relative 32 bit signed */
16#define R_X86_64_GOT32 3 /* 32 bit GOT entry */
17#define R_X86_64_PLT32 4 /* 32 bit PLT address */
18#define R_X86_64_COPY 5 /* Copy symbol at runtime */
19#define R_X86_64_GLOB_DAT 6 /* Create GOT entry */
20#define R_X86_64_JUMP_SLOT 7 /* Create PLT entry */
21#define R_X86_64_RELATIVE 8 /* Adjust by program base */
22#define R_X86_64_GOTPCREL 9 /* 32 bit signed pc relative
23 offset to GOT */
24#define R_X86_64_32 10 /* Direct 32 bit zero extended */
25#define R_X86_64_32S 11 /* Direct 32 bit sign extended */
26#define R_X86_64_16 12 /* Direct 16 bit zero extended */
27#define R_X86_64_PC16 13 /* 16 bit sign extended pc relative */
28#define R_X86_64_8 14 /* Direct 8 bit sign extended */
29#define R_X86_64_PC8 15 /* 8 bit sign extended pc relative */
30
31#define R_X86_64_NUM 16
32
33typedef unsigned long elf_greg_t;
34
35#define ELF_NGREG (sizeof (struct user_regs_struct) / sizeof(elf_greg_t))
36typedef elf_greg_t elf_gregset_t[ELF_NGREG];
37
38typedef struct user_i387_struct elf_fpregset_t;
39
40/*
41 * This is used to ensure we don't load something for the wrong architecture.
42 */
43#define elf_check_arch(x) \
44 ((x)->e_machine == EM_X86_64)
45
46/*
47 * These are used to set parameters in the core dumps.
48 */
49#define ELF_CLASS ELFCLASS64
50#define ELF_DATA ELFDATA2LSB
51#define ELF_ARCH EM_X86_64
52
53/* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program starts %edx
54 contains a pointer to a function which might be registered using `atexit'.
55 This provides a mean for the dynamic linker to call DT_FINI functions for
56 shared libraries that have been loaded before the code runs.
57
58 A value of 0 tells we have no such handler.
59
60 We might as well make sure everything else is cleared too (except for %esp),
61 just to make things more deterministic.
62 */
63#define ELF_PLAT_INIT(_r, load_addr) do { \
64 struct task_struct *cur = current; \
65 (_r)->rbx = 0; (_r)->rcx = 0; (_r)->rdx = 0; \
66 (_r)->rsi = 0; (_r)->rdi = 0; (_r)->rbp = 0; \
67 (_r)->rax = 0; \
68 (_r)->r8 = 0; \
69 (_r)->r9 = 0; \
70 (_r)->r10 = 0; \
71 (_r)->r11 = 0; \
72 (_r)->r12 = 0; \
73 (_r)->r13 = 0; \
74 (_r)->r14 = 0; \
75 (_r)->r15 = 0; \
76 cur->thread.fs = 0; cur->thread.gs = 0; \
77 cur->thread.fsindex = 0; cur->thread.gsindex = 0; \
78 cur->thread.ds = 0; cur->thread.es = 0; \
79 clear_thread_flag(TIF_IA32); \
80} while (0)
81
82#define USE_ELF_CORE_DUMP
83#define ELF_EXEC_PAGESIZE 4096
84
85/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
86 use of this is to invoke "./ld.so someprog" to test out a new version of
87 the loader. We need to make sure that it is out of the way of the program
88 that it will "exec", and that there is sufficient room for the brk. */
89
90#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
91
92/* regs is struct pt_regs, pr_reg is elf_gregset_t (which is
93 now struct_user_regs, they are different). Assumes current is the process
94 getting dumped. */
95
96#define ELF_CORE_COPY_REGS(pr_reg, regs) do { \
97 unsigned v; \
98 (pr_reg)[0] = (regs)->r15; \
99 (pr_reg)[1] = (regs)->r14; \
100 (pr_reg)[2] = (regs)->r13; \
101 (pr_reg)[3] = (regs)->r12; \
102 (pr_reg)[4] = (regs)->rbp; \
103 (pr_reg)[5] = (regs)->rbx; \
104 (pr_reg)[6] = (regs)->r11; \
105 (pr_reg)[7] = (regs)->r10; \
106 (pr_reg)[8] = (regs)->r9; \
107 (pr_reg)[9] = (regs)->r8; \
108 (pr_reg)[10] = (regs)->rax; \
109 (pr_reg)[11] = (regs)->rcx; \
110 (pr_reg)[12] = (regs)->rdx; \
111 (pr_reg)[13] = (regs)->rsi; \
112 (pr_reg)[14] = (regs)->rdi; \
113 (pr_reg)[15] = (regs)->orig_rax; \
114 (pr_reg)[16] = (regs)->rip; \
115 (pr_reg)[17] = (regs)->cs; \
116 (pr_reg)[18] = (regs)->eflags; \
117 (pr_reg)[19] = (regs)->rsp; \
118 (pr_reg)[20] = (regs)->ss; \
119 (pr_reg)[21] = current->thread.fs; \
120 (pr_reg)[22] = current->thread.gs; \
121 asm("movl %%ds,%0" : "=r" (v)); (pr_reg)[23] = v; \
122 asm("movl %%es,%0" : "=r" (v)); (pr_reg)[24] = v; \
123 asm("movl %%fs,%0" : "=r" (v)); (pr_reg)[25] = v; \
124 asm("movl %%gs,%0" : "=r" (v)); (pr_reg)[26] = v; \
125} while(0);
126
127/* This yields a mask that user programs can use to figure out what
128 instruction set this CPU supports. This could be done in user space,
129 but it's not easy, and we've already done it here. */
130
131#define ELF_HWCAP (boot_cpu_data.x86_capability[0])
132
133/* This yields a string that ld.so will use to load implementation
134 specific libraries for optimization. This is more specific in
135 intent than poking at uname or /proc/cpuinfo.
136
137 For the moment, we have only optimizations for the Intel generations,
138 but that could change... */
139
140/* I'm not sure if we can use '-' here */
141#define ELF_PLATFORM ("x86_64")
142
143#ifdef __KERNEL__
144extern void set_personality_64bit(void);
145#define SET_PERSONALITY(ex, ibcs2) set_personality_64bit()
146/*
147 * An executable for which elf_read_implies_exec() returns TRUE will
148 * have the READ_IMPLIES_EXEC personality flag set automatically.
149 */
150#define elf_read_implies_exec(ex, executable_stack) (executable_stack != EXSTACK_DISABLE_X)
151
152extern int dump_task_regs (struct task_struct *, elf_gregset_t *);
153extern int dump_task_fpu (struct task_struct *, elf_fpregset_t *);
154
155#define ELF_CORE_COPY_TASK_REGS(tsk, elf_regs) dump_task_regs(tsk, elf_regs)
156#define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs)
157
158#endif
159
160#endif
diff --git a/include/asm-x86_64/errno.h b/include/asm-x86_64/errno.h
new file mode 100644
index 000000000000..311182129e32
--- /dev/null
+++ b/include/asm-x86_64/errno.h
@@ -0,0 +1,6 @@
1#ifndef _X8664_ERRNO_H
2#define _X8664_ERRNO_H
3
4#include <asm-generic/errno.h>
5
6#endif
diff --git a/include/asm-x86_64/fcntl.h b/include/asm-x86_64/fcntl.h
new file mode 100644
index 000000000000..4411f221c037
--- /dev/null
+++ b/include/asm-x86_64/fcntl.h
@@ -0,0 +1,76 @@
1#ifndef _X86_64_FCNTL_H
2#define _X86_64_FCNTL_H
3
4/* open/fcntl - O_SYNC is only implemented on blocks devices and on files
5 located on an ext2 file system */
6#define O_ACCMODE 0003
7#define O_RDONLY 00
8#define O_WRONLY 01
9#define O_RDWR 02
10#define O_CREAT 0100 /* not fcntl */
11#define O_EXCL 0200 /* not fcntl */
12#define O_NOCTTY 0400 /* not fcntl */
13#define O_TRUNC 01000 /* not fcntl */
14#define O_APPEND 02000
15#define O_NONBLOCK 04000
16#define O_NDELAY O_NONBLOCK
17#define O_SYNC 010000
18#define FASYNC 020000 /* fcntl, for BSD compatibility */
19#define O_DIRECT 040000 /* direct disk access hint */
20#define O_LARGEFILE 0100000
21#define O_DIRECTORY 0200000 /* must be a directory */
22#define O_NOFOLLOW 0400000 /* don't follow links */
23#define O_NOATIME 01000000
24
25#define F_DUPFD 0 /* dup */
26#define F_GETFD 1 /* get close_on_exec */
27#define F_SETFD 2 /* set/clear close_on_exec */
28#define F_GETFL 3 /* get file->f_flags */
29#define F_SETFL 4 /* set file->f_flags */
30#define F_GETLK 5
31#define F_SETLK 6
32#define F_SETLKW 7
33
34#define F_SETOWN 8 /* for sockets. */
35#define F_GETOWN 9 /* for sockets. */
36#define F_SETSIG 10 /* for sockets. */
37#define F_GETSIG 11 /* for sockets. */
38
39/* for F_[GET|SET]FL */
40#define FD_CLOEXEC 1 /* actually anything with low bit set goes */
41
42/* for posix fcntl() and lockf() */
43#define F_RDLCK 0
44#define F_WRLCK 1
45#define F_UNLCK 2
46
47/* for old implementation of bsd flock () */
48#define F_EXLCK 4 /* or 3 */
49#define F_SHLCK 8 /* or 4 */
50
51/* for leases */
52#define F_INPROGRESS 16
53
54/* operations for bsd flock(), also used by the kernel implementation */
55#define LOCK_SH 1 /* shared lock */
56#define LOCK_EX 2 /* exclusive lock */
57#define LOCK_NB 4 /* or'd with one of the above to prevent
58 blocking */
59#define LOCK_UN 8 /* remove lock */
60
61#define LOCK_MAND 32 /* This is a mandatory flock */
62#define LOCK_READ 64 /* ... Which allows concurrent read operations */
63#define LOCK_WRITE 128 /* ... Which allows concurrent write operations */
64#define LOCK_RW 192 /* ... Which allows concurrent read & write ops */
65
66struct flock {
67 short l_type;
68 short l_whence;
69 off_t l_start;
70 off_t l_len;
71 pid_t l_pid;
72};
73
74#define F_LINUX_SPECIFIC_BASE 1024
75
76#endif /* !_X86_64_FCNTL_H */
diff --git a/include/asm-x86_64/fixmap.h b/include/asm-x86_64/fixmap.h
new file mode 100644
index 000000000000..cf8b16cbe8db
--- /dev/null
+++ b/include/asm-x86_64/fixmap.h
@@ -0,0 +1,96 @@
1/*
2 * fixmap.h: compile-time virtual memory allocation
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 1998 Ingo Molnar
9 */
10
11#ifndef _ASM_FIXMAP_H
12#define _ASM_FIXMAP_H
13
14#include <linux/config.h>
15#include <linux/kernel.h>
16#include <asm/apicdef.h>
17#include <asm/page.h>
18#include <asm/vsyscall.h>
19#include <asm/vsyscall32.h>
20
21/*
22 * Here we define all the compile-time 'special' virtual
23 * addresses. The point is to have a constant address at
24 * compile time, but to set the physical address only
25 * in the boot process.
26 *
27 * these 'compile-time allocated' memory buffers are
28 * fixed-size 4k pages. (or larger if used with an increment
29 * highger than 1) use fixmap_set(idx,phys) to associate
30 * physical memory with fixmap indices.
31 *
32 * TLB entries of such buffers will not be flushed across
33 * task switches.
34 */
35
36enum fixed_addresses {
37 VSYSCALL_LAST_PAGE,
38 VSYSCALL_FIRST_PAGE = VSYSCALL_LAST_PAGE + ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1,
39 VSYSCALL_HPET,
40 FIX_HPET_BASE,
41#ifdef CONFIG_X86_LOCAL_APIC
42 FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
43#endif
44#ifdef CONFIG_X86_IO_APIC
45 FIX_IO_APIC_BASE_0,
46 FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1,
47#endif
48 __end_of_fixed_addresses
49};
50
51extern void __set_fixmap (enum fixed_addresses idx,
52 unsigned long phys, pgprot_t flags);
53
54#define set_fixmap(idx, phys) \
55 __set_fixmap(idx, phys, PAGE_KERNEL)
56/*
57 * Some hardware wants to get fixmapped without caching.
58 */
59#define set_fixmap_nocache(idx, phys) \
60 __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
61
62#define FIXADDR_TOP (VSYSCALL_END-PAGE_SIZE)
63#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
64#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
65
66/* Only covers 32bit vsyscalls currently. Need another set for 64bit. */
67#define FIXADDR_USER_START ((unsigned long)VSYSCALL32_VSYSCALL)
68#define FIXADDR_USER_END (FIXADDR_USER_START + PAGE_SIZE)
69
70#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
71
72extern void __this_fixmap_does_not_exist(void);
73
74/*
75 * 'index to address' translation. If anyone tries to use the idx
76 * directly without translation, we catch the bug with a NULL-deference
77 * kernel oops. Illegal ranges of incoming indices are caught too.
78 */
79extern inline unsigned long fix_to_virt(const unsigned int idx)
80{
81 /*
82 * this branch gets completely eliminated after inlining,
83 * except when someone tries to use fixaddr indices in an
84 * illegal way. (such as mixing up address types or using
85 * out-of-range indices).
86 *
87 * If it doesn't get removed, the linker will complain
88 * loudly with a reasonably clear error message..
89 */
90 if (idx >= __end_of_fixed_addresses)
91 __this_fixmap_does_not_exist();
92
93 return __fix_to_virt(idx);
94}
95
96#endif
diff --git a/include/asm-x86_64/floppy.h b/include/asm-x86_64/floppy.h
new file mode 100644
index 000000000000..bca9b28a1a0a
--- /dev/null
+++ b/include/asm-x86_64/floppy.h
@@ -0,0 +1,285 @@
1/*
2 * Architecture specific parts of the Floppy driver
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 1995
9 */
10#ifndef __ASM_X86_64_FLOPPY_H
11#define __ASM_X86_64_FLOPPY_H
12
13#include <linux/vmalloc.h>
14
15
16/*
17 * The DMA channel used by the floppy controller cannot access data at
18 * addresses >= 16MB
19 *
20 * Went back to the 1MB limit, as some people had problems with the floppy
21 * driver otherwise. It doesn't matter much for performance anyway, as most
22 * floppy accesses go through the track buffer.
23 */
24#define _CROSS_64KB(a,s,vdma) \
25(!(vdma) && ((unsigned long)(a)/K_64 != ((unsigned long)(a) + (s) - 1) / K_64))
26
27#define CROSS_64KB(a,s) _CROSS_64KB(a,s,use_virtual_dma & 1)
28
29
30#define SW fd_routine[use_virtual_dma&1]
31#define CSW fd_routine[can_use_virtual_dma & 1]
32
33
34#define fd_inb(port) inb_p(port)
35#define fd_outb(value,port) outb_p(value,port)
36
37#define fd_request_dma() CSW._request_dma(FLOPPY_DMA,"floppy")
38#define fd_free_dma() CSW._free_dma(FLOPPY_DMA)
39#define fd_enable_irq() enable_irq(FLOPPY_IRQ)
40#define fd_disable_irq() disable_irq(FLOPPY_IRQ)
41#define fd_free_irq() free_irq(FLOPPY_IRQ, NULL)
42#define fd_get_dma_residue() SW._get_dma_residue(FLOPPY_DMA)
43#define fd_dma_mem_alloc(size) SW._dma_mem_alloc(size)
44#define fd_dma_setup(addr, size, mode, io) SW._dma_setup(addr, size, mode, io)
45
46#define FLOPPY_CAN_FALLBACK_ON_NODMA
47
48static int virtual_dma_count;
49static int virtual_dma_residue;
50static char *virtual_dma_addr;
51static int virtual_dma_mode;
52static int doing_pdma;
53
54static irqreturn_t floppy_hardint(int irq, void *dev_id, struct pt_regs * regs)
55{
56 register unsigned char st;
57
58#undef TRACE_FLPY_INT
59
60#ifdef TRACE_FLPY_INT
61 static int calls=0;
62 static int bytes=0;
63 static int dma_wait=0;
64#endif
65 if (!doing_pdma)
66 return floppy_interrupt(irq, dev_id, regs);
67
68#ifdef TRACE_FLPY_INT
69 if(!calls)
70 bytes = virtual_dma_count;
71#endif
72
73 {
74 register int lcount;
75 register char *lptr;
76
77 st = 1;
78 for(lcount=virtual_dma_count, lptr=virtual_dma_addr;
79 lcount; lcount--, lptr++) {
80 st=inb(virtual_dma_port+4) & 0xa0 ;
81 if(st != 0xa0)
82 break;
83 if(virtual_dma_mode)
84 outb_p(*lptr, virtual_dma_port+5);
85 else
86 *lptr = inb_p(virtual_dma_port+5);
87 }
88 virtual_dma_count = lcount;
89 virtual_dma_addr = lptr;
90 st = inb(virtual_dma_port+4);
91 }
92
93#ifdef TRACE_FLPY_INT
94 calls++;
95#endif
96 if(st == 0x20)
97 return IRQ_HANDLED;
98 if(!(st & 0x20)) {
99 virtual_dma_residue += virtual_dma_count;
100 virtual_dma_count=0;
101#ifdef TRACE_FLPY_INT
102 printk("count=%x, residue=%x calls=%d bytes=%d dma_wait=%d\n",
103 virtual_dma_count, virtual_dma_residue, calls, bytes,
104 dma_wait);
105 calls = 0;
106 dma_wait=0;
107#endif
108 doing_pdma = 0;
109 floppy_interrupt(irq, dev_id, regs);
110 return IRQ_HANDLED;
111 }
112#ifdef TRACE_FLPY_INT
113 if(!virtual_dma_count)
114 dma_wait++;
115#endif
116 return IRQ_HANDLED;
117}
118
119static void fd_disable_dma(void)
120{
121 if(! (can_use_virtual_dma & 1))
122 disable_dma(FLOPPY_DMA);
123 doing_pdma = 0;
124 virtual_dma_residue += virtual_dma_count;
125 virtual_dma_count=0;
126}
127
128static int vdma_request_dma(unsigned int dmanr, const char * device_id)
129{
130 return 0;
131}
132
133static void vdma_nop(unsigned int dummy)
134{
135}
136
137
138static int vdma_get_dma_residue(unsigned int dummy)
139{
140 return virtual_dma_count + virtual_dma_residue;
141}
142
143
144static int fd_request_irq(void)
145{
146 if(can_use_virtual_dma)
147 return request_irq(FLOPPY_IRQ, floppy_hardint,SA_INTERRUPT,
148 "floppy", NULL);
149 else
150 return request_irq(FLOPPY_IRQ, floppy_interrupt,
151 SA_INTERRUPT|SA_SAMPLE_RANDOM,
152 "floppy", NULL);
153
154}
155
156static unsigned long dma_mem_alloc(unsigned long size)
157{
158 return __get_dma_pages(GFP_KERNEL,get_order(size));
159}
160
161
162static unsigned long vdma_mem_alloc(unsigned long size)
163{
164 return (unsigned long) vmalloc(size);
165
166}
167
168#define nodma_mem_alloc(size) vdma_mem_alloc(size)
169
170static void _fd_dma_mem_free(unsigned long addr, unsigned long size)
171{
172 if((unsigned long) addr >= (unsigned long) high_memory)
173 vfree((void *)addr);
174 else
175 free_pages(addr, get_order(size));
176}
177
178#define fd_dma_mem_free(addr, size) _fd_dma_mem_free(addr, size)
179
180static void _fd_chose_dma_mode(char *addr, unsigned long size)
181{
182 if(can_use_virtual_dma == 2) {
183 if((unsigned long) addr >= (unsigned long) high_memory ||
184 isa_virt_to_bus(addr) >= 0x1000000 ||
185 _CROSS_64KB(addr, size, 0))
186 use_virtual_dma = 1;
187 else
188 use_virtual_dma = 0;
189 } else {
190 use_virtual_dma = can_use_virtual_dma & 1;
191 }
192}
193
194#define fd_chose_dma_mode(addr, size) _fd_chose_dma_mode(addr, size)
195
196
197static int vdma_dma_setup(char *addr, unsigned long size, int mode, int io)
198{
199 doing_pdma = 1;
200 virtual_dma_port = io;
201 virtual_dma_mode = (mode == DMA_MODE_WRITE);
202 virtual_dma_addr = addr;
203 virtual_dma_count = size;
204 virtual_dma_residue = 0;
205 return 0;
206}
207
208static int hard_dma_setup(char *addr, unsigned long size, int mode, int io)
209{
210#ifdef FLOPPY_SANITY_CHECK
211 if (CROSS_64KB(addr, size)) {
212 printk("DMA crossing 64-K boundary %p-%p\n", addr, addr+size);
213 return -1;
214 }
215#endif
216 /* actual, physical DMA */
217 doing_pdma = 0;
218 clear_dma_ff(FLOPPY_DMA);
219 set_dma_mode(FLOPPY_DMA,mode);
220 set_dma_addr(FLOPPY_DMA,isa_virt_to_bus(addr));
221 set_dma_count(FLOPPY_DMA,size);
222 enable_dma(FLOPPY_DMA);
223 return 0;
224}
225
226struct fd_routine_l {
227 int (*_request_dma)(unsigned int dmanr, const char * device_id);
228 void (*_free_dma)(unsigned int dmanr);
229 int (*_get_dma_residue)(unsigned int dummy);
230 unsigned long (*_dma_mem_alloc) (unsigned long size);
231 int (*_dma_setup)(char *addr, unsigned long size, int mode, int io);
232} fd_routine[] = {
233 {
234 request_dma,
235 free_dma,
236 get_dma_residue,
237 dma_mem_alloc,
238 hard_dma_setup
239 },
240 {
241 vdma_request_dma,
242 vdma_nop,
243 vdma_get_dma_residue,
244 vdma_mem_alloc,
245 vdma_dma_setup
246 }
247};
248
249
250static int FDC1 = 0x3f0;
251static int FDC2 = -1;
252
253/*
254 * Floppy types are stored in the rtc's CMOS RAM and so rtc_lock
255 * is needed to prevent corrupted CMOS RAM in case "insmod floppy"
256 * coincides with another rtc CMOS user. Paul G.
257 */
258#define FLOPPY0_TYPE ({ \
259 unsigned long flags; \
260 unsigned char val; \
261 spin_lock_irqsave(&rtc_lock, flags); \
262 val = (CMOS_READ(0x10) >> 4) & 15; \
263 spin_unlock_irqrestore(&rtc_lock, flags); \
264 val; \
265})
266
267#define FLOPPY1_TYPE ({ \
268 unsigned long flags; \
269 unsigned char val; \
270 spin_lock_irqsave(&rtc_lock, flags); \
271 val = CMOS_READ(0x10) & 15; \
272 spin_unlock_irqrestore(&rtc_lock, flags); \
273 val; \
274})
275
276#define N_FDC 2
277#define N_DRIVE 8
278
279#define FLOPPY_MOTOR_MASK 0xf0
280
281#define AUTO_DMA
282
283#define EXTRA_FLOPPY_PARAMS
284
285#endif /* __ASM_X86_64_FLOPPY_H */
diff --git a/include/asm-x86_64/fpu32.h b/include/asm-x86_64/fpu32.h
new file mode 100644
index 000000000000..4153db5c0c31
--- /dev/null
+++ b/include/asm-x86_64/fpu32.h
@@ -0,0 +1,10 @@
1#ifndef _FPU32_H
2#define _FPU32_H 1
3
4struct _fpstate_ia32;
5
6int restore_i387_ia32(struct task_struct *tsk, struct _fpstate_ia32 __user *buf, int fsave);
7int save_i387_ia32(struct task_struct *tsk, struct _fpstate_ia32 __user *buf,
8 struct pt_regs *regs, int fsave);
9
10#endif
diff --git a/include/asm-x86_64/genapic.h b/include/asm-x86_64/genapic.h
new file mode 100644
index 000000000000..50b38e7c58e4
--- /dev/null
+++ b/include/asm-x86_64/genapic.h
@@ -0,0 +1,35 @@
1#ifndef _ASM_GENAPIC_H
2#define _ASM_GENAPIC_H 1
3
4/*
5 * Copyright 2004 James Cleverdon, IBM.
6 * Subject to the GNU Public License, v.2
7 *
8 * Generic APIC sub-arch data struct.
9 *
10 * Hacked for x86-64 by James Cleverdon from i386 architecture code by
11 * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
12 * James Cleverdon.
13 */
14
15struct genapic {
16 char *name;
17 u32 int_delivery_mode;
18 u32 int_dest_mode;
19 u32 int_delivery_dest; /* for quick IPIs */
20 int (*apic_id_registered)(void);
21 cpumask_t (*target_cpus)(void);
22 void (*init_apic_ldr)(void);
23 /* ipi */
24 void (*send_IPI_mask)(cpumask_t mask, int vector);
25 void (*send_IPI_allbutself)(int vector);
26 void (*send_IPI_all)(int vector);
27 /* */
28 unsigned int (*cpu_mask_to_apicid)(cpumask_t cpumask);
29 unsigned int (*phys_pkg_id)(int index_msb);
30};
31
32
33extern struct genapic *genapic;
34
35#endif
diff --git a/include/asm-x86_64/hardirq.h b/include/asm-x86_64/hardirq.h
new file mode 100644
index 000000000000..27c381fa1c9d
--- /dev/null
+++ b/include/asm-x86_64/hardirq.h
@@ -0,0 +1,37 @@
1#ifndef __ASM_HARDIRQ_H
2#define __ASM_HARDIRQ_H
3
4#include <linux/config.h>
5#include <linux/threads.h>
6#include <linux/irq.h>
7#include <asm/pda.h>
8#include <asm/apic.h>
9
10#define __ARCH_IRQ_STAT 1
11
12/* Generate a lvalue for a pda member. Should fix softirq.c instead to use
13 special access macros. This would generate better code. */
14#define __IRQ_STAT(cpu,member) (read_pda(me)->member)
15
16#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
17
18/*
19 * 'what should we do if we get a hw irq event on an illegal vector'.
20 * each architecture has to answer this themselves.
21 */
22static inline void ack_bad_irq(unsigned int irq)
23{
24 printk("unexpected IRQ trap at vector %02x\n", irq);
25#ifdef CONFIG_X86_LOCAL_APIC
26 /*
27 * Currently unexpected vectors happen only on SMP and APIC.
28 * We _must_ ack these because every local APIC has only N
29 * irq slots per priority level, and a 'hanging, unacked' IRQ
30 * holds up an irq slot - in excessive cases (when multiple
31 * unexpected vectors occur) that might lock up the APIC
32 * completely.
33 */
34 ack_APIC_irq();
35#endif
36}
37#endif /* __ASM_HARDIRQ_H */
diff --git a/include/asm-x86_64/hdreg.h b/include/asm-x86_64/hdreg.h
new file mode 100644
index 000000000000..5989bbc97cbf
--- /dev/null
+++ b/include/asm-x86_64/hdreg.h
@@ -0,0 +1 @@
#warning this file is obsolete, please do not use it
diff --git a/include/asm-x86_64/hpet.h b/include/asm-x86_64/hpet.h
new file mode 100644
index 000000000000..a3877f570998
--- /dev/null
+++ b/include/asm-x86_64/hpet.h
@@ -0,0 +1,60 @@
1#ifndef _ASM_X8664_HPET_H
2#define _ASM_X8664_HPET_H 1
3
4/*
5 * Documentation on HPET can be found at:
6 * http://www.intel.com/ial/home/sp/pcmmspec.htm
7 * ftp://download.intel.com/ial/home/sp/mmts098.pdf
8 */
9
10#define HPET_MMAP_SIZE 1024
11
12#define HPET_ID 0x000
13#define HPET_PERIOD 0x004
14#define HPET_CFG 0x010
15#define HPET_STATUS 0x020
16#define HPET_COUNTER 0x0f0
17#define HPET_T0_CFG 0x100
18#define HPET_T0_CMP 0x108
19#define HPET_T0_ROUTE 0x110
20#define HPET_T1_CFG 0x120
21#define HPET_T1_CMP 0x128
22#define HPET_T1_ROUTE 0x130
23#define HPET_T2_CFG 0x140
24#define HPET_T2_CMP 0x148
25#define HPET_T2_ROUTE 0x150
26
27#define HPET_ID_VENDOR 0xffff0000
28#define HPET_ID_LEGSUP 0x00008000
29#define HPET_ID_NUMBER 0x00001f00
30#define HPET_ID_REV 0x000000ff
31#define HPET_ID_NUMBER_SHIFT 8
32
33#define HPET_ID_VENDOR_SHIFT 16
34#define HPET_ID_VENDOR_8086 0x8086
35
36#define HPET_CFG_ENABLE 0x001
37#define HPET_CFG_LEGACY 0x002
38#define HPET_LEGACY_8254 2
39#define HPET_LEGACY_RTC 8
40
41#define HPET_TN_ENABLE 0x004
42#define HPET_TN_PERIODIC 0x008
43#define HPET_TN_PERIODIC_CAP 0x010
44#define HPET_TN_SETVAL 0x040
45#define HPET_TN_32BIT 0x100
46
47extern int is_hpet_enabled(void);
48extern int hpet_rtc_timer_init(void);
49extern int oem_force_hpet_timer(void);
50
51#ifdef CONFIG_HPET_EMULATE_RTC
52extern int hpet_mask_rtc_irq_bit(unsigned long bit_mask);
53extern int hpet_set_rtc_irq_bit(unsigned long bit_mask);
54extern int hpet_set_alarm_time(unsigned char hrs, unsigned char min, unsigned char sec);
55extern int hpet_set_periodic_freq(unsigned long freq);
56extern int hpet_rtc_dropped_irq(void);
57extern int hpet_rtc_timer_init(void);
58#endif /* CONFIG_HPET_EMULATE_RTC */
59
60#endif
diff --git a/include/asm-x86_64/hw_irq.h b/include/asm-x86_64/hw_irq.h
new file mode 100644
index 000000000000..2b5cb2865d21
--- /dev/null
+++ b/include/asm-x86_64/hw_irq.h
@@ -0,0 +1,144 @@
1#ifndef _ASM_HW_IRQ_H
2#define _ASM_HW_IRQ_H
3
4/*
5 * linux/include/asm/hw_irq.h
6 *
7 * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
8 *
9 * moved some of the old arch/i386/kernel/irq.h to here. VY
10 *
11 * IRQ/IPI changes taken from work by Thomas Radke
12 * <tomsoft@informatik.tu-chemnitz.de>
13 *
14 * hacked by Andi Kleen for x86-64.
15 *
16 * $Id: hw_irq.h,v 1.24 2001/09/14 20:55:03 vojtech Exp $
17 */
18
19#ifndef __ASSEMBLY__
20#include <linux/config.h>
21#include <asm/atomic.h>
22#include <asm/irq.h>
23#include <linux/profile.h>
24#include <linux/smp.h>
25
26struct hw_interrupt_type;
27#endif
28
29/*
30 * IDT vectors usable for external interrupt sources start
31 * at 0x20:
32 */
33#define FIRST_EXTERNAL_VECTOR 0x20
34
35#define IA32_SYSCALL_VECTOR 0x80
36
37
38/*
39 * Vectors 0x20-0x2f are used for ISA interrupts.
40 */
41
42/*
43 * Special IRQ vectors used by the SMP architecture, 0xf0-0xff
44 *
45 * some of the following vectors are 'rare', they are merged
46 * into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
47 * TLB, reschedule and local APIC vectors are performance-critical.
48 *
49 * Vectors 0xf0-0xf9 are free (reserved for future Linux use).
50 */
51#define SPURIOUS_APIC_VECTOR 0xff
52#define ERROR_APIC_VECTOR 0xfe
53#define INVALIDATE_TLB_VECTOR 0xfd
54#define RESCHEDULE_VECTOR 0xfc
55#define TASK_MIGRATION_VECTOR 0xfb
56#define CALL_FUNCTION_VECTOR 0xfa
57#define KDB_VECTOR 0xf9
58
59#define THERMAL_APIC_VECTOR 0xf0
60
61
62/*
63 * Local APIC timer IRQ vector is on a different priority level,
64 * to work around the 'lost local interrupt if more than 2 IRQ
65 * sources per level' errata.
66 */
67#define LOCAL_TIMER_VECTOR 0xef
68
69/*
70 * First APIC vector available to drivers: (vectors 0x30-0xee)
71 * we start at 0x31 to spread out vectors evenly between priority
72 * levels. (0x80 is the syscall vector)
73 */
74#define FIRST_DEVICE_VECTOR 0x31
75#define FIRST_SYSTEM_VECTOR 0xef /* duplicated in irq.h */
76
77
78#ifndef __ASSEMBLY__
79extern u8 irq_vector[NR_IRQ_VECTORS];
80#define IO_APIC_VECTOR(irq) (irq_vector[irq])
81#define AUTO_ASSIGN -1
82
83/*
84 * Various low-level irq details needed by irq.c, process.c,
85 * time.c, io_apic.c and smp.c
86 *
87 * Interrupt entry/exit code at both C and assembly level
88 */
89
90extern void disable_8259A_irq(unsigned int irq);
91extern void enable_8259A_irq(unsigned int irq);
92extern int i8259A_irq_pending(unsigned int irq);
93extern void make_8259A_irq(unsigned int irq);
94extern void init_8259A(int aeoi);
95extern void FASTCALL(send_IPI_self(int vector));
96extern void init_VISWS_APIC_irqs(void);
97extern void setup_IO_APIC(void);
98extern void disable_IO_APIC(void);
99extern void print_IO_APIC(void);
100extern int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn);
101extern void send_IPI(int dest, int vector);
102extern void setup_ioapic_dest(void);
103
104extern unsigned long io_apic_irqs;
105
106extern atomic_t irq_err_count;
107extern atomic_t irq_mis_count;
108
109#define IO_APIC_IRQ(x) (((x) >= 16) || ((1<<(x)) & io_apic_irqs))
110
111#define __STR(x) #x
112#define STR(x) __STR(x)
113
114#include <asm/ptrace.h>
115
116#define IRQ_NAME2(nr) nr##_interrupt(void)
117#define IRQ_NAME(nr) IRQ_NAME2(IRQ##nr)
118
119/*
120 * SMP has a few special interrupts for IPI messages
121 */
122
123#define BUILD_IRQ(nr) \
124asmlinkage void IRQ_NAME(nr); \
125__asm__( \
126"\n.p2align\n" \
127"IRQ" #nr "_interrupt:\n\t" \
128 "push $" #nr "-256 ; " \
129 "jmp common_interrupt");
130
131#if defined(CONFIG_X86_IO_APIC)
132static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) {
133 if (IO_APIC_IRQ(i))
134 send_IPI_self(IO_APIC_VECTOR(i));
135}
136#else
137static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) {}
138#endif
139
140#define platform_legacy_irq(irq) ((irq) < 16)
141
142#endif
143
144#endif /* _ASM_HW_IRQ_H */
diff --git a/include/asm-x86_64/i387.h b/include/asm-x86_64/i387.h
new file mode 100644
index 000000000000..aa39cfd0e001
--- /dev/null
+++ b/include/asm-x86_64/i387.h
@@ -0,0 +1,150 @@
1/*
2 * include/asm-x86_64/i387.h
3 *
4 * Copyright (C) 1994 Linus Torvalds
5 *
6 * Pentium III FXSR, SSE support
7 * General FPU state handling cleanups
8 * Gareth Hughes <gareth@valinux.com>, May 2000
9 * x86-64 work by Andi Kleen 2002
10 */
11
12#ifndef __ASM_X86_64_I387_H
13#define __ASM_X86_64_I387_H
14
15#include <linux/sched.h>
16#include <asm/processor.h>
17#include <asm/sigcontext.h>
18#include <asm/user.h>
19#include <asm/thread_info.h>
20#include <asm/uaccess.h>
21
22extern void fpu_init(void);
23extern unsigned int mxcsr_feature_mask;
24extern void mxcsr_feature_mask_init(void);
25extern void init_fpu(struct task_struct *child);
26extern int save_i387(struct _fpstate __user *buf);
27
28/*
29 * FPU lazy state save handling...
30 */
31
32#define unlazy_fpu(tsk) do { \
33 if ((tsk)->thread_info->status & TS_USEDFPU) \
34 save_init_fpu(tsk); \
35} while (0)
36
37/* Ignore delayed exceptions from user space */
38static inline void tolerant_fwait(void)
39{
40 asm volatile("1: fwait\n"
41 "2:\n"
42 " .section __ex_table,\"a\"\n"
43 " .align 8\n"
44 " .quad 1b,2b\n"
45 " .previous\n");
46}
47
48#define clear_fpu(tsk) do { \
49 if ((tsk)->thread_info->status & TS_USEDFPU) { \
50 tolerant_fwait(); \
51 (tsk)->thread_info->status &= ~TS_USEDFPU; \
52 stts(); \
53 } \
54} while (0)
55
56/*
57 * ptrace request handers...
58 */
59extern int get_fpregs(struct user_i387_struct __user *buf,
60 struct task_struct *tsk);
61extern int set_fpregs(struct task_struct *tsk,
62 struct user_i387_struct __user *buf);
63
64/*
65 * i387 state interaction
66 */
67#define get_fpu_mxcsr(t) ((t)->thread.i387.fxsave.mxcsr)
68#define get_fpu_cwd(t) ((t)->thread.i387.fxsave.cwd)
69#define get_fpu_fxsr_twd(t) ((t)->thread.i387.fxsave.twd)
70#define get_fpu_swd(t) ((t)->thread.i387.fxsave.swd)
71#define set_fpu_cwd(t,val) ((t)->thread.i387.fxsave.cwd = (val))
72#define set_fpu_swd(t,val) ((t)->thread.i387.fxsave.swd = (val))
73#define set_fpu_fxsr_twd(t,val) ((t)->thread.i387.fxsave.twd = (val))
74
75static inline int restore_fpu_checking(struct i387_fxsave_struct *fx)
76{
77 int err;
78 asm volatile("1: rex64 ; fxrstor (%[fx])\n\t"
79 "2:\n"
80 ".section .fixup,\"ax\"\n"
81 "3: movl $-1,%[err]\n"
82 " jmp 2b\n"
83 ".previous\n"
84 ".section __ex_table,\"a\"\n"
85 " .align 8\n"
86 " .quad 1b,3b\n"
87 ".previous"
88 : [err] "=r" (err)
89 : [fx] "r" (fx), "0" (0));
90 if (unlikely(err))
91 init_fpu(current);
92 return err;
93}
94
95static inline int save_i387_checking(struct i387_fxsave_struct __user *fx)
96{
97 int err;
98 asm volatile("1: rex64 ; fxsave (%[fx])\n\t"
99 "2:\n"
100 ".section .fixup,\"ax\"\n"
101 "3: movl $-1,%[err]\n"
102 " jmp 2b\n"
103 ".previous\n"
104 ".section __ex_table,\"a\"\n"
105 " .align 8\n"
106 " .quad 1b,3b\n"
107 ".previous"
108 : [err] "=r" (err)
109 : [fx] "r" (fx), "0" (0));
110 if (unlikely(err))
111 __clear_user(fx, sizeof(struct i387_fxsave_struct));
112 return err;
113}
114
115static inline void kernel_fpu_begin(void)
116{
117 struct thread_info *me = current_thread_info();
118 preempt_disable();
119 if (me->status & TS_USEDFPU) {
120 asm volatile("rex64 ; fxsave %0 ; fnclex"
121 : "=m" (me->task->thread.i387.fxsave));
122 me->status &= ~TS_USEDFPU;
123 return;
124 }
125 clts();
126}
127
128static inline void kernel_fpu_end(void)
129{
130 stts();
131 preempt_enable();
132}
133
134static inline void save_init_fpu( struct task_struct *tsk )
135{
136 asm volatile( "rex64 ; fxsave %0 ; fnclex"
137 : "=m" (tsk->thread.i387.fxsave));
138 tsk->thread_info->status &= ~TS_USEDFPU;
139 stts();
140}
141
142/*
143 * This restores directly out of user space. Exceptions are handled.
144 */
145static inline int restore_i387(struct _fpstate __user *buf)
146{
147 return restore_fpu_checking((__force struct i387_fxsave_struct *)buf);
148}
149
150#endif /* __ASM_X86_64_I387_H */
diff --git a/include/asm-x86_64/ia32.h b/include/asm-x86_64/ia32.h
new file mode 100644
index 000000000000..c0a7717923ed
--- /dev/null
+++ b/include/asm-x86_64/ia32.h
@@ -0,0 +1,172 @@
1#ifndef _ASM_X86_64_IA32_H
2#define _ASM_X86_64_IA32_H
3
4#include <linux/config.h>
5
6#ifdef CONFIG_IA32_EMULATION
7
8#include <linux/compat.h>
9
10/*
11 * 32 bit structures for IA32 support.
12 */
13
14#include <asm/sigcontext32.h>
15
16/* signal.h */
17struct sigaction32 {
18 unsigned int sa_handler; /* Really a pointer, but need to deal
19 with 32 bits */
20 unsigned int sa_flags;
21 unsigned int sa_restorer; /* Another 32 bit pointer */
22 compat_sigset_t sa_mask; /* A 32 bit mask */
23};
24
25struct old_sigaction32 {
26 unsigned int sa_handler; /* Really a pointer, but need to deal
27 with 32 bits */
28 compat_old_sigset_t sa_mask; /* A 32 bit mask */
29 unsigned int sa_flags;
30 unsigned int sa_restorer; /* Another 32 bit pointer */
31};
32
33typedef struct sigaltstack_ia32 {
34 unsigned int ss_sp;
35 int ss_flags;
36 unsigned int ss_size;
37} stack_ia32_t;
38
39struct ucontext_ia32 {
40 unsigned int uc_flags;
41 unsigned int uc_link;
42 stack_ia32_t uc_stack;
43 struct sigcontext_ia32 uc_mcontext;
44 compat_sigset_t uc_sigmask; /* mask last for extensibility */
45};
46
47/* This matches struct stat64 in glibc2.2, hence the absolutely
48 * insane amounts of padding around dev_t's.
49 */
50struct stat64 {
51 unsigned long long st_dev;
52 unsigned char __pad0[4];
53
54#define STAT64_HAS_BROKEN_ST_INO 1
55 unsigned int __st_ino;
56
57 unsigned int st_mode;
58 unsigned int st_nlink;
59
60 unsigned int st_uid;
61 unsigned int st_gid;
62
63 unsigned long long st_rdev;
64 unsigned char __pad3[4];
65
66 long long st_size;
67 unsigned int st_blksize;
68
69 long long st_blocks;/* Number 512-byte blocks allocated. */
70
71 unsigned st_atime;
72 unsigned st_atime_nsec;
73 unsigned st_mtime;
74 unsigned st_mtime_nsec;
75 unsigned st_ctime;
76 unsigned st_ctime_nsec;
77
78 unsigned long long st_ino;
79} __attribute__((packed));
80
81typedef struct compat_siginfo{
82 int si_signo;
83 int si_errno;
84 int si_code;
85
86 union {
87 int _pad[((128/sizeof(int)) - 3)];
88
89 /* kill() */
90 struct {
91 unsigned int _pid; /* sender's pid */
92 unsigned int _uid; /* sender's uid */
93 } _kill;
94
95 /* POSIX.1b timers */
96 struct {
97 int _tid; /* timer id */
98 int _overrun; /* overrun count */
99 compat_sigval_t _sigval; /* same as below */
100 int _sys_private; /* not to be passed to user */
101 int _overrun_incr; /* amount to add to overrun */
102 } _timer;
103
104 /* POSIX.1b signals */
105 struct {
106 unsigned int _pid; /* sender's pid */
107 unsigned int _uid; /* sender's uid */
108 compat_sigval_t _sigval;
109 } _rt;
110
111 /* SIGCHLD */
112 struct {
113 unsigned int _pid; /* which child */
114 unsigned int _uid; /* sender's uid */
115 int _status; /* exit code */
116 compat_clock_t _utime;
117 compat_clock_t _stime;
118 } _sigchld;
119
120 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
121 struct {
122 unsigned int _addr; /* faulting insn/memory ref. */
123 } _sigfault;
124
125 /* SIGPOLL */
126 struct {
127 int _band; /* POLL_IN, POLL_OUT, POLL_MSG */
128 int _fd;
129 } _sigpoll;
130 } _sifields;
131} compat_siginfo_t;
132
133struct sigframe32
134{
135 u32 pretcode;
136 int sig;
137 struct sigcontext_ia32 sc;
138 struct _fpstate_ia32 fpstate;
139 unsigned int extramask[_COMPAT_NSIG_WORDS-1];
140};
141
142struct rt_sigframe32
143{
144 u32 pretcode;
145 int sig;
146 u32 pinfo;
147 u32 puc;
148 compat_siginfo_t info;
149 struct ucontext_ia32 uc;
150 struct _fpstate_ia32 fpstate;
151};
152
153struct ustat32 {
154 __u32 f_tfree;
155 compat_ino_t f_tinode;
156 char f_fname[6];
157 char f_fpack[6];
158};
159
160#define IA32_STACK_TOP IA32_PAGE_OFFSET
161
162#ifdef __KERNEL__
163struct user_desc;
164struct siginfo_t;
165int do_get_thread_area(struct thread_struct *t, struct user_desc __user *info);
166int do_set_thread_area(struct thread_struct *t, struct user_desc __user *info);
167int ia32_child_tls(struct task_struct *p, struct pt_regs *childregs);
168#endif
169
170#endif /* !CONFIG_IA32_SUPPORT */
171
172#endif
diff --git a/include/asm-x86_64/ia32_unistd.h b/include/asm-x86_64/ia32_unistd.h
new file mode 100644
index 000000000000..f3b7111cf33d
--- /dev/null
+++ b/include/asm-x86_64/ia32_unistd.h
@@ -0,0 +1,300 @@
1#ifndef _ASM_X86_64_IA32_UNISTD_H_
2#define _ASM_X86_64_IA32_UNISTD_H_
3
4/*
5 * This file contains the system call numbers of the ia32 port,
6 * this is for the kernel only.
7 */
8
9#define __NR_ia32_restart_syscall 0
10#define __NR_ia32_exit 1
11#define __NR_ia32_fork 2
12#define __NR_ia32_read 3
13#define __NR_ia32_write 4
14#define __NR_ia32_open 5
15#define __NR_ia32_close 6
16#define __NR_ia32_waitpid 7
17#define __NR_ia32_creat 8
18#define __NR_ia32_link 9
19#define __NR_ia32_unlink 10
20#define __NR_ia32_execve 11
21#define __NR_ia32_chdir 12
22#define __NR_ia32_time 13
23#define __NR_ia32_mknod 14
24#define __NR_ia32_chmod 15
25#define __NR_ia32_lchown 16
26#define __NR_ia32_break 17
27#define __NR_ia32_oldstat 18
28#define __NR_ia32_lseek 19
29#define __NR_ia32_getpid 20
30#define __NR_ia32_mount 21
31#define __NR_ia32_umount 22
32#define __NR_ia32_setuid 23
33#define __NR_ia32_getuid 24
34#define __NR_ia32_stime 25
35#define __NR_ia32_ptrace 26
36#define __NR_ia32_alarm 27
37#define __NR_ia32_oldfstat 28
38#define __NR_ia32_pause 29
39#define __NR_ia32_utime 30
40#define __NR_ia32_stty 31
41#define __NR_ia32_gtty 32
42#define __NR_ia32_access 33
43#define __NR_ia32_nice 34
44#define __NR_ia32_ftime 35
45#define __NR_ia32_sync 36
46#define __NR_ia32_kill 37
47#define __NR_ia32_rename 38
48#define __NR_ia32_mkdir 39
49#define __NR_ia32_rmdir 40
50#define __NR_ia32_dup 41
51#define __NR_ia32_pipe 42
52#define __NR_ia32_times 43
53#define __NR_ia32_prof 44
54#define __NR_ia32_brk 45
55#define __NR_ia32_setgid 46
56#define __NR_ia32_getgid 47
57#define __NR_ia32_signal 48
58#define __NR_ia32_geteuid 49
59#define __NR_ia32_getegid 50
60#define __NR_ia32_acct 51
61#define __NR_ia32_umount2 52
62#define __NR_ia32_lock 53
63#define __NR_ia32_ioctl 54
64#define __NR_ia32_fcntl 55
65#define __NR_ia32_mpx 56
66#define __NR_ia32_setpgid 57
67#define __NR_ia32_ulimit 58
68#define __NR_ia32_oldolduname 59
69#define __NR_ia32_umask 60
70#define __NR_ia32_chroot 61
71#define __NR_ia32_ustat 62
72#define __NR_ia32_dup2 63
73#define __NR_ia32_getppid 64
74#define __NR_ia32_getpgrp 65
75#define __NR_ia32_setsid 66
76#define __NR_ia32_sigaction 67
77#define __NR_ia32_sgetmask 68
78#define __NR_ia32_ssetmask 69
79#define __NR_ia32_setreuid 70
80#define __NR_ia32_setregid 71
81#define __NR_ia32_sigsuspend 72
82#define __NR_ia32_sigpending 73
83#define __NR_ia32_sethostname 74
84#define __NR_ia32_setrlimit 75
85#define __NR_ia32_getrlimit 76 /* Back compatible 2Gig limited rlimit */
86#define __NR_ia32_getrusage 77
87#define __NR_ia32_gettimeofday 78
88#define __NR_ia32_settimeofday 79
89#define __NR_ia32_getgroups 80
90#define __NR_ia32_setgroups 81
91#define __NR_ia32_select 82
92#define __NR_ia32_symlink 83
93#define __NR_ia32_oldlstat 84
94#define __NR_ia32_readlink 85
95#define __NR_ia32_uselib 86
96#define __NR_ia32_swapon 87
97#define __NR_ia32_reboot 88
98#define __NR_ia32_readdir 89
99#define __NR_ia32_mmap 90
100#define __NR_ia32_munmap 91
101#define __NR_ia32_truncate 92
102#define __NR_ia32_ftruncate 93
103#define __NR_ia32_fchmod 94
104#define __NR_ia32_fchown 95
105#define __NR_ia32_getpriority 96
106#define __NR_ia32_setpriority 97
107#define __NR_ia32_profil 98
108#define __NR_ia32_statfs 99
109#define __NR_ia32_fstatfs 100
110#define __NR_ia32_ioperm 101
111#define __NR_ia32_socketcall 102
112#define __NR_ia32_syslog 103
113#define __NR_ia32_setitimer 104
114#define __NR_ia32_getitimer 105
115#define __NR_ia32_stat 106
116#define __NR_ia32_lstat 107
117#define __NR_ia32_fstat 108
118#define __NR_ia32_olduname 109
119#define __NR_ia32_iopl 110
120#define __NR_ia32_vhangup 111
121#define __NR_ia32_idle 112
122#define __NR_ia32_vm86old 113
123#define __NR_ia32_wait4 114
124#define __NR_ia32_swapoff 115
125#define __NR_ia32_sysinfo 116
126#define __NR_ia32_ipc 117
127#define __NR_ia32_fsync 118
128#define __NR_ia32_sigreturn 119
129#define __NR_ia32_clone 120
130#define __NR_ia32_setdomainname 121
131#define __NR_ia32_uname 122
132#define __NR_ia32_modify_ldt 123
133#define __NR_ia32_adjtimex 124
134#define __NR_ia32_mprotect 125
135#define __NR_ia32_sigprocmask 126
136#define __NR_ia32_create_module 127
137#define __NR_ia32_init_module 128
138#define __NR_ia32_delete_module 129
139#define __NR_ia32_get_kernel_syms 130
140#define __NR_ia32_quotactl 131
141#define __NR_ia32_getpgid 132
142#define __NR_ia32_fchdir 133
143#define __NR_ia32_bdflush 134
144#define __NR_ia32_sysfs 135
145#define __NR_ia32_personality 136
146#define __NR_ia32_afs_syscall 137 /* Syscall for Andrew File System */
147#define __NR_ia32_setfsuid 138
148#define __NR_ia32_setfsgid 139
149#define __NR_ia32__llseek 140
150#define __NR_ia32_getdents 141
151#define __NR_ia32__newselect 142
152#define __NR_ia32_flock 143
153#define __NR_ia32_msync 144
154#define __NR_ia32_readv 145
155#define __NR_ia32_writev 146
156#define __NR_ia32_getsid 147
157#define __NR_ia32_fdatasync 148
158#define __NR_ia32__sysctl 149
159#define __NR_ia32_mlock 150
160#define __NR_ia32_munlock 151
161#define __NR_ia32_mlockall 152
162#define __NR_ia32_munlockall 153
163#define __NR_ia32_sched_setparam 154
164#define __NR_ia32_sched_getparam 155
165#define __NR_ia32_sched_setscheduler 156
166#define __NR_ia32_sched_getscheduler 157
167#define __NR_ia32_sched_yield 158
168#define __NR_ia32_sched_get_priority_max 159
169#define __NR_ia32_sched_get_priority_min 160
170#define __NR_ia32_sched_rr_get_interval 161
171#define __NR_ia32_nanosleep 162
172#define __NR_ia32_mremap 163
173#define __NR_ia32_setresuid 164
174#define __NR_ia32_getresuid 165
175#define __NR_ia32_vm86 166
176#define __NR_ia32_query_module 167
177#define __NR_ia32_poll 168
178#define __NR_ia32_nfsservctl 169
179#define __NR_ia32_setresgid 170
180#define __NR_ia32_getresgid 171
181#define __NR_ia32_prctl 172
182#define __NR_ia32_rt_sigreturn 173
183#define __NR_ia32_rt_sigaction 174
184#define __NR_ia32_rt_sigprocmask 175
185#define __NR_ia32_rt_sigpending 176
186#define __NR_ia32_rt_sigtimedwait 177
187#define __NR_ia32_rt_sigqueueinfo 178
188#define __NR_ia32_rt_sigsuspend 179
189#define __NR_ia32_pread 180
190#define __NR_ia32_pwrite 181
191#define __NR_ia32_chown 182
192#define __NR_ia32_getcwd 183
193#define __NR_ia32_capget 184
194#define __NR_ia32_capset 185
195#define __NR_ia32_sigaltstack 186
196#define __NR_ia32_sendfile 187
197#define __NR_ia32_getpmsg 188 /* some people actually want streams */
198#define __NR_ia32_putpmsg 189 /* some people actually want streams */
199#define __NR_ia32_vfork 190
200#define __NR_ia32_ugetrlimit 191 /* SuS compliant getrlimit */
201#define __NR_ia32_mmap2 192
202#define __NR_ia32_truncate64 193
203#define __NR_ia32_ftruncate64 194
204#define __NR_ia32_stat64 195
205#define __NR_ia32_lstat64 196
206#define __NR_ia32_fstat64 197
207#define __NR_ia32_lchown32 198
208#define __NR_ia32_getuid32 199
209#define __NR_ia32_getgid32 200
210#define __NR_ia32_geteuid32 201
211#define __NR_ia32_getegid32 202
212#define __NR_ia32_setreuid32 203
213#define __NR_ia32_setregid32 204
214#define __NR_ia32_getgroups32 205
215#define __NR_ia32_setgroups32 206
216#define __NR_ia32_fchown32 207
217#define __NR_ia32_setresuid32 208
218#define __NR_ia32_getresuid32 209
219#define __NR_ia32_setresgid32 210
220#define __NR_ia32_getresgid32 211
221#define __NR_ia32_chown32 212
222#define __NR_ia32_setuid32 213
223#define __NR_ia32_setgid32 214
224#define __NR_ia32_setfsuid32 215
225#define __NR_ia32_setfsgid32 216
226#define __NR_ia32_pivot_root 217
227#define __NR_ia32_mincore 218
228#define __NR_ia32_madvise 219
229#define __NR_ia32_madvise1 219 /* delete when C lib stub is removed */
230#define __NR_ia32_getdents64 220
231#define __NR_ia32_fcntl64 221
232#define __NR_ia32_tuxcall 222
233#define __NR_ia32_security 223
234#define __NR_ia32_gettid 224
235#define __NR_ia32_readahead 225
236#define __NR_ia32_setxattr 226
237#define __NR_ia32_lsetxattr 227
238#define __NR_ia32_fsetxattr 228
239#define __NR_ia32_getxattr 229
240#define __NR_ia32_lgetxattr 230
241#define __NR_ia32_fgetxattr 231
242#define __NR_ia32_listxattr 232
243#define __NR_ia32_llistxattr 233
244#define __NR_ia32_flistxattr 234
245#define __NR_ia32_removexattr 235
246#define __NR_ia32_lremovexattr 236
247#define __NR_ia32_fremovexattr 237
248#define __NR_ia32_tkill 238
249#define __NR_ia32_sendfile64 239
250#define __NR_ia32_futex 240
251#define __NR_ia32_sched_setaffinity 241
252#define __NR_ia32_sched_getaffinity 242
253#define __NR_ia32_set_thread_area 243
254#define __NR_ia32_get_thread_area 244
255#define __NR_ia32_io_setup 245
256#define __NR_ia32_io_destroy 246
257#define __NR_ia32_io_getevents 247
258#define __NR_ia32_io_submit 248
259#define __NR_ia32_io_cancel 249
260#define __NR_ia32_exit_group 252
261#define __NR_ia32_lookup_dcookie 253
262#define __NR_ia32_sys_epoll_create 254
263#define __NR_ia32_sys_epoll_ctl 255
264#define __NR_ia32_sys_epoll_wait 256
265#define __NR_ia32_remap_file_pages 257
266#define __NR_ia32_set_tid_address 258
267#define __NR_ia32_timer_create 259
268#define __NR_ia32_timer_settime (__NR_ia32_timer_create+1)
269#define __NR_ia32_timer_gettime (__NR_ia32_timer_create+2)
270#define __NR_ia32_timer_getoverrun (__NR_ia32_timer_create+3)
271#define __NR_ia32_timer_delete (__NR_ia32_timer_create+4)
272#define __NR_ia32_clock_settime (__NR_ia32_timer_create+5)
273#define __NR_ia32_clock_gettime (__NR_ia32_timer_create+6)
274#define __NR_ia32_clock_getres (__NR_ia32_timer_create+7)
275#define __NR_ia32_clock_nanosleep (__NR_ia32_timer_create+8)
276#define __NR_ia32_statfs64 268
277#define __NR_ia32_fstatfs64 269
278#define __NR_ia32_tgkill 270
279#define __NR_ia32_utimes 271
280#define __NR_ia32_fadvise64_64 272
281#define __NR_ia32_vserver 273
282#define __NR_ia32_mbind 274
283#define __NR_ia32_get_mempolicy 275
284#define __NR_ia32_set_mempolicy 276
285#define __NR_ia32_mq_open 277
286#define __NR_ia32_mq_unlink (__NR_ia32_mq_open+1)
287#define __NR_ia32_mq_timedsend (__NR_ia32_mq_open+2)
288#define __NR_ia32_mq_timedreceive (__NR_ia32_mq_open+3)
289#define __NR_ia32_mq_notify (__NR_ia32_mq_open+4)
290#define __NR_ia32_mq_getsetattr (__NR_ia32_mq_open+5)
291#define __NR_ia32_kexec 283
292#define __NR_ia32_waitid 284
293/* #define __NR_sys_setaltroot 285 */
294#define __NR_ia32_add_key 286
295#define __NR_ia32_request_key 287
296#define __NR_ia32_keyctl 288
297
298#define IA32_NR_syscalls 290 /* must be > than biggest syscall! */
299
300#endif /* _ASM_X86_64_IA32_UNISTD_H_ */
diff --git a/include/asm-x86_64/ide.h b/include/asm-x86_64/ide.h
new file mode 100644
index 000000000000..4cef0ef61878
--- /dev/null
+++ b/include/asm-x86_64/ide.h
@@ -0,0 +1 @@
#include <asm-i386/ide.h>
diff --git a/include/asm-x86_64/io.h b/include/asm-x86_64/io.h
new file mode 100644
index 000000000000..94202703fae2
--- /dev/null
+++ b/include/asm-x86_64/io.h
@@ -0,0 +1,341 @@
1#ifndef _ASM_IO_H
2#define _ASM_IO_H
3
4#include <linux/config.h>
5
6/*
7 * This file contains the definitions for the x86 IO instructions
8 * inb/inw/inl/outb/outw/outl and the "string versions" of the same
9 * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
10 * versions of the single-IO instructions (inb_p/inw_p/..).
11 *
12 * This file is not meant to be obfuscating: it's just complicated
13 * to (a) handle it all in a way that makes gcc able to optimize it
14 * as well as possible and (b) trying to avoid writing the same thing
15 * over and over again with slight variations and possibly making a
16 * mistake somewhere.
17 */
18
19/*
20 * Thanks to James van Artsdalen for a better timing-fix than
21 * the two short jumps: using outb's to a nonexistent port seems
22 * to guarantee better timings even on fast machines.
23 *
24 * On the other hand, I'd like to be sure of a non-existent port:
25 * I feel a bit unsafe about using 0x80 (should be safe, though)
26 *
27 * Linus
28 */
29
30 /*
31 * Bit simplified and optimized by Jan Hubicka
32 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999.
33 *
34 * isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added,
35 * isa_read[wl] and isa_write[wl] fixed
36 * - Arnaldo Carvalho de Melo <acme@conectiva.com.br>
37 */
38
39#define __SLOW_DOWN_IO "\noutb %%al,$0x80"
40
41#ifdef REALLY_SLOW_IO
42#define __FULL_SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO
43#else
44#define __FULL_SLOW_DOWN_IO __SLOW_DOWN_IO
45#endif
46
47/*
48 * Talk about misusing macros..
49 */
50#define __OUT1(s,x) \
51extern inline void out##s(unsigned x value, unsigned short port) {
52
53#define __OUT2(s,s1,s2) \
54__asm__ __volatile__ ("out" #s " %" s1 "0,%" s2 "1"
55
56#define __OUT(s,s1,x) \
57__OUT1(s,x) __OUT2(s,s1,"w") : : "a" (value), "Nd" (port)); } \
58__OUT1(s##_p,x) __OUT2(s,s1,"w") __FULL_SLOW_DOWN_IO : : "a" (value), "Nd" (port));} \
59
60#define __IN1(s) \
61extern inline RETURN_TYPE in##s(unsigned short port) { RETURN_TYPE _v;
62
63#define __IN2(s,s1,s2) \
64__asm__ __volatile__ ("in" #s " %" s2 "1,%" s1 "0"
65
66#define __IN(s,s1,i...) \
67__IN1(s) __IN2(s,s1,"w") : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \
68__IN1(s##_p) __IN2(s,s1,"w") __FULL_SLOW_DOWN_IO : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \
69
70#define __INS(s) \
71extern inline void ins##s(unsigned short port, void * addr, unsigned long count) \
72{ __asm__ __volatile__ ("rep ; ins" #s \
73: "=D" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
74
75#define __OUTS(s) \
76extern inline void outs##s(unsigned short port, const void * addr, unsigned long count) \
77{ __asm__ __volatile__ ("rep ; outs" #s \
78: "=S" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
79
80#define RETURN_TYPE unsigned char
81__IN(b,"")
82#undef RETURN_TYPE
83#define RETURN_TYPE unsigned short
84__IN(w,"")
85#undef RETURN_TYPE
86#define RETURN_TYPE unsigned int
87__IN(l,"")
88#undef RETURN_TYPE
89
90__OUT(b,"b",char)
91__OUT(w,"w",short)
92__OUT(l,,int)
93
94__INS(b)
95__INS(w)
96__INS(l)
97
98__OUTS(b)
99__OUTS(w)
100__OUTS(l)
101
102#define IO_SPACE_LIMIT 0xffff
103
104#if defined(__KERNEL__) && __x86_64__
105
106#include <linux/vmalloc.h>
107
108#ifndef __i386__
109/*
110 * Change virtual addresses to physical addresses and vv.
111 * These are pretty trivial
112 */
113extern inline unsigned long virt_to_phys(volatile void * address)
114{
115 return __pa(address);
116}
117
118extern inline void * phys_to_virt(unsigned long address)
119{
120 return __va(address);
121}
122#endif
123
124/*
125 * Change "struct page" to physical address.
126 */
127#ifdef CONFIG_DISCONTIGMEM
128#include <asm/mmzone.h>
129#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
130#else
131#define page_to_phys(page) ((page - mem_map) << PAGE_SHIFT)
132#endif
133
134#include <asm-generic/iomap.h>
135
136extern void __iomem *__ioremap(unsigned long offset, unsigned long size, unsigned long flags);
137
138extern inline void __iomem * ioremap (unsigned long offset, unsigned long size)
139{
140 return __ioremap(offset, size, 0);
141}
142
143/*
144 * This one maps high address device memory and turns off caching for that area.
145 * it's useful if some control registers are in such an area and write combining
146 * or read caching is not desirable:
147 */
148extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size);
149extern void iounmap(volatile void __iomem *addr);
150
151/*
152 * ISA I/O bus memory addresses are 1:1 with the physical address.
153 */
154#define isa_virt_to_bus virt_to_phys
155#define isa_page_to_bus page_to_phys
156#define isa_bus_to_virt phys_to_virt
157
158/*
159 * However PCI ones are not necessarily 1:1 and therefore these interfaces
160 * are forbidden in portable PCI drivers.
161 *
162 * Allow them on x86 for legacy drivers, though.
163 */
164#define virt_to_bus virt_to_phys
165#define bus_to_virt phys_to_virt
166
167/*
168 * readX/writeX() are used to access memory mapped devices. On some
169 * architectures the memory mapped IO stuff needs to be accessed
170 * differently. On the x86 architecture, we just read/write the
171 * memory location directly.
172 */
173
174static inline __u8 __readb(const volatile void __iomem *addr)
175{
176 return *(__force volatile __u8 *)addr;
177}
178static inline __u16 __readw(const volatile void __iomem *addr)
179{
180 return *(__force volatile __u16 *)addr;
181}
182static inline __u32 __readl(const volatile void __iomem *addr)
183{
184 return *(__force volatile __u32 *)addr;
185}
186static inline __u64 __readq(const volatile void __iomem *addr)
187{
188 return *(__force volatile __u64 *)addr;
189}
190#define readb(x) __readb(x)
191#define readw(x) __readw(x)
192#define readl(x) __readl(x)
193#define readq(x) __readq(x)
194#define readb_relaxed(a) readb(a)
195#define readw_relaxed(a) readw(a)
196#define readl_relaxed(a) readl(a)
197#define readq_relaxed(a) readq(a)
198#define __raw_readb readb
199#define __raw_readw readw
200#define __raw_readl readl
201#define __raw_readq readq
202
203#define mmiowb()
204
205#ifdef CONFIG_UNORDERED_IO
206static inline void __writel(__u32 val, volatile void __iomem *addr)
207{
208 volatile __u32 __iomem *target = addr;
209 asm volatile("movnti %1,%0"
210 : "=m" (*target)
211 : "r" (val) : "memory");
212}
213
214static inline void __writeq(__u64 val, volatile void __iomem *addr)
215{
216 volatile __u64 __iomem *target = addr;
217 asm volatile("movnti %1,%0"
218 : "=m" (*target)
219 : "r" (val) : "memory");
220}
221#else
222static inline void __writel(__u32 b, volatile void __iomem *addr)
223{
224 *(__force volatile __u32 *)addr = b;
225}
226static inline void __writeq(__u64 b, volatile void __iomem *addr)
227{
228 *(__force volatile __u64 *)addr = b;
229}
230#endif
231static inline void __writeb(__u8 b, volatile void __iomem *addr)
232{
233 *(__force volatile __u8 *)addr = b;
234}
235static inline void __writew(__u16 b, volatile void __iomem *addr)
236{
237 *(__force volatile __u16 *)addr = b;
238}
239#define writeq(val,addr) __writeq((val),(addr))
240#define writel(val,addr) __writel((val),(addr))
241#define writew(val,addr) __writew((val),(addr))
242#define writeb(val,addr) __writeb((val),(addr))
243#define __raw_writeb writeb
244#define __raw_writew writew
245#define __raw_writel writel
246#define __raw_writeq writeq
247
248void __memcpy_fromio(void*,unsigned long,unsigned);
249void __memcpy_toio(unsigned long,const void*,unsigned);
250
251static inline void memcpy_fromio(void *to, const volatile void __iomem *from, unsigned len)
252{
253 __memcpy_fromio(to,(unsigned long)from,len);
254}
255static inline void memcpy_toio(volatile void __iomem *to, const void *from, unsigned len)
256{
257 __memcpy_toio((unsigned long)to,from,len);
258}
259
260void memset_io(volatile void __iomem *a, int b, size_t c);
261
262/*
263 * ISA space is 'always mapped' on a typical x86 system, no need to
264 * explicitly ioremap() it. The fact that the ISA IO space is mapped
265 * to PAGE_OFFSET is pure coincidence - it does not mean ISA values
266 * are physical addresses. The following constant pointer can be
267 * used as the IO-area pointer (it can be iounmapped as well, so the
268 * analogy with PCI is quite large):
269 */
270#define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET))
271
272#define isa_readb(a) readb(__ISA_IO_base + (a))
273#define isa_readw(a) readw(__ISA_IO_base + (a))
274#define isa_readl(a) readl(__ISA_IO_base + (a))
275#define isa_writeb(b,a) writeb(b,__ISA_IO_base + (a))
276#define isa_writew(w,a) writew(w,__ISA_IO_base + (a))
277#define isa_writel(l,a) writel(l,__ISA_IO_base + (a))
278#define isa_memset_io(a,b,c) memset_io(__ISA_IO_base + (a),(b),(c))
279#define isa_memcpy_fromio(a,b,c) memcpy_fromio((a),__ISA_IO_base + (b),(c))
280#define isa_memcpy_toio(a,b,c) memcpy_toio(__ISA_IO_base + (a),(b),(c))
281
282
283/*
284 * Again, x86-64 does not require mem IO specific function.
285 */
286
287#define eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),(void *)(b),(c),(d))
288#define isa_eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),(void *)(__ISA_IO_base + (b)),(c),(d))
289
290/**
291 * check_signature - find BIOS signatures
292 * @io_addr: mmio address to check
293 * @signature: signature block
294 * @length: length of signature
295 *
296 * Perform a signature comparison with the mmio address io_addr. This
297 * address should have been obtained by ioremap.
298 * Returns 1 on a match.
299 */
300
301static inline int check_signature(void __iomem *io_addr,
302 const unsigned char *signature, int length)
303{
304 int retval = 0;
305 do {
306 if (readb(io_addr) != *signature)
307 goto out;
308 io_addr++;
309 signature++;
310 length--;
311 } while (length);
312 retval = 1;
313out:
314 return retval;
315}
316
317/* Nothing to do */
318
319#define dma_cache_inv(_start,_size) do { } while (0)
320#define dma_cache_wback(_start,_size) do { } while (0)
321#define dma_cache_wback_inv(_start,_size) do { } while (0)
322
323#define flush_write_buffers()
324
325extern int iommu_bio_merge;
326#define BIO_VMERGE_BOUNDARY iommu_bio_merge
327
328/*
329 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
330 * access
331 */
332#define xlate_dev_mem_ptr(p) __va(p)
333
334/*
335 * Convert a virtual cached pointer to an uncached pointer
336 */
337#define xlate_dev_kmem_ptr(p) p
338
339#endif /* __KERNEL__ */
340
341#endif
diff --git a/include/asm-x86_64/io_apic.h b/include/asm-x86_64/io_apic.h
new file mode 100644
index 000000000000..7efc932e8f0b
--- /dev/null
+++ b/include/asm-x86_64/io_apic.h
@@ -0,0 +1,221 @@
1#ifndef __ASM_IO_APIC_H
2#define __ASM_IO_APIC_H
3
4#include <linux/config.h>
5#include <asm/types.h>
6#include <asm/mpspec.h>
7
8/*
9 * Intel IO-APIC support for SMP and UP systems.
10 *
11 * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar
12 */
13
14#ifdef CONFIG_X86_IO_APIC
15
16#ifdef CONFIG_PCI_MSI
17static inline int use_pci_vector(void) {return 1;}
18static inline void disable_edge_ioapic_vector(unsigned int vector) { }
19static inline void mask_and_ack_level_ioapic_vector(unsigned int vector) { }
20static inline void end_edge_ioapic_vector (unsigned int vector) { }
21#define startup_level_ioapic startup_level_ioapic_vector
22#define shutdown_level_ioapic mask_IO_APIC_vector
23#define enable_level_ioapic unmask_IO_APIC_vector
24#define disable_level_ioapic mask_IO_APIC_vector
25#define mask_and_ack_level_ioapic mask_and_ack_level_ioapic_vector
26#define end_level_ioapic end_level_ioapic_vector
27#define set_ioapic_affinity set_ioapic_affinity_vector
28
29#define startup_edge_ioapic startup_edge_ioapic_vector
30#define shutdown_edge_ioapic disable_edge_ioapic_vector
31#define enable_edge_ioapic unmask_IO_APIC_vector
32#define disable_edge_ioapic disable_edge_ioapic_vector
33#define ack_edge_ioapic ack_edge_ioapic_vector
34#define end_edge_ioapic end_edge_ioapic_vector
35#else
36static inline int use_pci_vector(void) {return 0;}
37static inline void disable_edge_ioapic_irq(unsigned int irq) { }
38static inline void mask_and_ack_level_ioapic_irq(unsigned int irq) { }
39static inline void end_edge_ioapic_irq (unsigned int irq) { }
40#define startup_level_ioapic startup_level_ioapic_irq
41#define shutdown_level_ioapic mask_IO_APIC_irq
42#define enable_level_ioapic unmask_IO_APIC_irq
43#define disable_level_ioapic mask_IO_APIC_irq
44#define mask_and_ack_level_ioapic mask_and_ack_level_ioapic_irq
45#define end_level_ioapic end_level_ioapic_irq
46#define set_ioapic_affinity set_ioapic_affinity_irq
47
48#define startup_edge_ioapic startup_edge_ioapic_irq
49#define shutdown_edge_ioapic disable_edge_ioapic_irq
50#define enable_edge_ioapic unmask_IO_APIC_irq
51#define disable_edge_ioapic disable_edge_ioapic_irq
52#define ack_edge_ioapic ack_edge_ioapic_irq
53#define end_edge_ioapic end_edge_ioapic_irq
54#endif
55
56#define APIC_MISMATCH_DEBUG
57
58#define IO_APIC_BASE(idx) \
59 ((volatile int *)(__fix_to_virt(FIX_IO_APIC_BASE_0 + idx) \
60 + (mp_ioapics[idx].mpc_apicaddr & ~PAGE_MASK)))
61
62/*
63 * The structure of the IO-APIC:
64 */
65union IO_APIC_reg_00 {
66 u32 raw;
67 struct {
68 u32 __reserved_2 : 14,
69 LTS : 1,
70 delivery_type : 1,
71 __reserved_1 : 8,
72 ID : 8;
73 } __attribute__ ((packed)) bits;
74};
75
76union IO_APIC_reg_01 {
77 u32 raw;
78 struct {
79 u32 version : 8,
80 __reserved_2 : 7,
81 PRQ : 1,
82 entries : 8,
83 __reserved_1 : 8;
84 } __attribute__ ((packed)) bits;
85};
86
87union IO_APIC_reg_02 {
88 u32 raw;
89 struct {
90 u32 __reserved_2 : 24,
91 arbitration : 4,
92 __reserved_1 : 4;
93 } __attribute__ ((packed)) bits;
94};
95
96union IO_APIC_reg_03 {
97 u32 raw;
98 struct {
99 u32 boot_DT : 1,
100 __reserved_1 : 31;
101 } __attribute__ ((packed)) bits;
102};
103
104/*
105 * # of IO-APICs and # of IRQ routing registers
106 */
107extern int nr_ioapics;
108extern int nr_ioapic_registers[MAX_IO_APICS];
109
110enum ioapic_irq_destination_types {
111 dest_Fixed = 0,
112 dest_LowestPrio = 1,
113 dest_SMI = 2,
114 dest__reserved_1 = 3,
115 dest_NMI = 4,
116 dest_INIT = 5,
117 dest__reserved_2 = 6,
118 dest_ExtINT = 7
119};
120
121struct IO_APIC_route_entry {
122 __u32 vector : 8,
123 delivery_mode : 3, /* 000: FIXED
124 * 001: lowest prio
125 * 111: ExtINT
126 */
127 dest_mode : 1, /* 0: physical, 1: logical */
128 delivery_status : 1,
129 polarity : 1,
130 irr : 1,
131 trigger : 1, /* 0: edge, 1: level */
132 mask : 1, /* 0: enabled, 1: disabled */
133 __reserved_2 : 15;
134
135 union { struct { __u32
136 __reserved_1 : 24,
137 physical_dest : 4,
138 __reserved_2 : 4;
139 } physical;
140
141 struct { __u32
142 __reserved_1 : 24,
143 logical_dest : 8;
144 } logical;
145 } dest;
146
147} __attribute__ ((packed));
148
149/*
150 * MP-BIOS irq configuration table structures:
151 */
152
153/* I/O APIC entries */
154extern struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS];
155
156/* # of MP IRQ source entries */
157extern int mp_irq_entries;
158
159/* MP IRQ source entries */
160extern struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
161
162/* non-0 if default (table-less) MP configuration */
163extern int mpc_default_type;
164
165static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
166{
167 *IO_APIC_BASE(apic) = reg;
168 return *(IO_APIC_BASE(apic)+4);
169}
170
171static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
172{
173 *IO_APIC_BASE(apic) = reg;
174 *(IO_APIC_BASE(apic)+4) = value;
175}
176
177/*
178 * Re-write a value: to be used for read-modify-write
179 * cycles where the read already set up the index register.
180 */
181static inline void io_apic_modify(unsigned int apic, unsigned int value)
182{
183 *(IO_APIC_BASE(apic)+4) = value;
184}
185
186/*
187 * Synchronize the IO-APIC and the CPU by doing
188 * a dummy read from the IO-APIC
189 */
190static inline void io_apic_sync(unsigned int apic)
191{
192 (void) *(IO_APIC_BASE(apic)+4);
193}
194
195/* 1 if "noapic" boot option passed */
196extern int skip_ioapic_setup;
197
198/*
199 * If we use the IO-APIC for IRQ routing, disable automatic
200 * assignment of PCI IRQ's.
201 */
202#define io_apic_assign_pci_irqs (mp_irq_entries && !skip_ioapic_setup && io_apic_irqs)
203
204#ifdef CONFIG_ACPI_BOOT
205extern int io_apic_get_unique_id (int ioapic, int apic_id);
206extern int io_apic_get_version (int ioapic);
207extern int io_apic_get_redir_entries (int ioapic);
208extern int io_apic_set_pci_routing (int ioapic, int pin, int irq, int, int);
209#endif
210
211extern int sis_apic_bug; /* dummy */
212
213#else /* !CONFIG_X86_IO_APIC */
214#define io_apic_assign_pci_irqs 0
215#endif
216
217extern int assign_irq_vector(int irq);
218
219void enable_NMI_through_LVT0 (void * dummy);
220
221#endif
diff --git a/include/asm-x86_64/ioctl.h b/include/asm-x86_64/ioctl.h
new file mode 100644
index 000000000000..609b663b6bf4
--- /dev/null
+++ b/include/asm-x86_64/ioctl.h
@@ -0,0 +1,75 @@
1/* $Id: ioctl.h,v 1.2 2001/07/04 09:08:13 ak Exp $
2 *
3 * linux/ioctl.h for Linux by H.H. Bergman.
4 */
5
6#ifndef _ASMX8664_IOCTL_H
7#define _ASMX8664_IOCTL_H
8
9/* ioctl command encoding: 32 bits total, command in lower 16 bits,
10 * size of the parameter structure in the lower 14 bits of the
11 * upper 16 bits.
12 * Encoding the size of the parameter structure in the ioctl request
13 * is useful for catching programs compiled with old versions
14 * and to avoid overwriting user space outside the user buffer area.
15 * The highest 2 bits are reserved for indicating the ``access mode''.
16 * NOTE: This limits the max parameter size to 16kB -1 !
17 */
18
19/*
20 * The following is for compatibility across the various Linux
21 * platforms. The i386 ioctl numbering scheme doesn't really enforce
22 * a type field. De facto, however, the top 8 bits of the lower 16
23 * bits are indeed used as a type field, so we might just as well make
24 * this explicit here. Please be sure to use the decoding macros
25 * below from now on.
26 */
27#define _IOC_NRBITS 8
28#define _IOC_TYPEBITS 8
29#define _IOC_SIZEBITS 14
30#define _IOC_DIRBITS 2
31
32#define _IOC_NRMASK ((1 << _IOC_NRBITS)-1)
33#define _IOC_TYPEMASK ((1 << _IOC_TYPEBITS)-1)
34#define _IOC_SIZEMASK ((1 << _IOC_SIZEBITS)-1)
35#define _IOC_DIRMASK ((1 << _IOC_DIRBITS)-1)
36
37#define _IOC_NRSHIFT 0
38#define _IOC_TYPESHIFT (_IOC_NRSHIFT+_IOC_NRBITS)
39#define _IOC_SIZESHIFT (_IOC_TYPESHIFT+_IOC_TYPEBITS)
40#define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS)
41
42/*
43 * Direction bits.
44 */
45#define _IOC_NONE 0U
46#define _IOC_WRITE 1U
47#define _IOC_READ 2U
48
49#define _IOC(dir,type,nr,size) \
50 (((dir) << _IOC_DIRSHIFT) | \
51 ((type) << _IOC_TYPESHIFT) | \
52 ((nr) << _IOC_NRSHIFT) | \
53 ((size) << _IOC_SIZESHIFT))
54
55/* used to create numbers */
56#define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0)
57#define _IOR(type,nr,size) _IOC(_IOC_READ,(type),(nr),sizeof(size))
58#define _IOW(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),sizeof(size))
59#define _IOWR(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(size))
60
61/* used to decode ioctl numbers.. */
62#define _IOC_DIR(nr) (((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK)
63#define _IOC_TYPE(nr) (((nr) >> _IOC_TYPESHIFT) & _IOC_TYPEMASK)
64#define _IOC_NR(nr) (((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK)
65#define _IOC_SIZE(nr) (((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK)
66
67/* ...and for the drivers/sound files... */
68
69#define IOC_IN (_IOC_WRITE << _IOC_DIRSHIFT)
70#define IOC_OUT (_IOC_READ << _IOC_DIRSHIFT)
71#define IOC_INOUT ((_IOC_WRITE|_IOC_READ) << _IOC_DIRSHIFT)
72#define IOCSIZE_MASK (_IOC_SIZEMASK << _IOC_SIZESHIFT)
73#define IOCSIZE_SHIFT (_IOC_SIZESHIFT)
74
75#endif /* _ASMX8664_IOCTL_H */
diff --git a/include/asm-x86_64/ioctl32.h b/include/asm-x86_64/ioctl32.h
new file mode 100644
index 000000000000..d0d227f45e05
--- /dev/null
+++ b/include/asm-x86_64/ioctl32.h
@@ -0,0 +1 @@
#include <linux/ioctl32.h>
diff --git a/include/asm-x86_64/ioctls.h b/include/asm-x86_64/ioctls.h
new file mode 100644
index 000000000000..62caf8b6e4e1
--- /dev/null
+++ b/include/asm-x86_64/ioctls.h
@@ -0,0 +1,82 @@
1#ifndef __ARCH_X8664_IOCTLS_H__
2#define __ARCH_X8664_IOCTLS_H__
3
4#include <asm/ioctl.h>
5
6/* 0x54 is just a magic number to make these relatively unique ('T') */
7
8#define TCGETS 0x5401
9#define TCSETS 0x5402
10#define TCSETSW 0x5403
11#define TCSETSF 0x5404
12#define TCGETA 0x5405
13#define TCSETA 0x5406
14#define TCSETAW 0x5407
15#define TCSETAF 0x5408
16#define TCSBRK 0x5409
17#define TCXONC 0x540A
18#define TCFLSH 0x540B
19#define TIOCEXCL 0x540C
20#define TIOCNXCL 0x540D
21#define TIOCSCTTY 0x540E
22#define TIOCGPGRP 0x540F
23#define TIOCSPGRP 0x5410
24#define TIOCOUTQ 0x5411
25#define TIOCSTI 0x5412
26#define TIOCGWINSZ 0x5413
27#define TIOCSWINSZ 0x5414
28#define TIOCMGET 0x5415
29#define TIOCMBIS 0x5416
30#define TIOCMBIC 0x5417
31#define TIOCMSET 0x5418
32#define TIOCGSOFTCAR 0x5419
33#define TIOCSSOFTCAR 0x541A
34#define FIONREAD 0x541B
35#define TIOCINQ FIONREAD
36#define TIOCLINUX 0x541C
37#define TIOCCONS 0x541D
38#define TIOCGSERIAL 0x541E
39#define TIOCSSERIAL 0x541F
40#define TIOCPKT 0x5420
41#define FIONBIO 0x5421
42#define TIOCNOTTY 0x5422
43#define TIOCSETD 0x5423
44#define TIOCGETD 0x5424
45#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */
46#define TIOCSBRK 0x5427 /* BSD compatibility */
47#define TIOCCBRK 0x5428 /* BSD compatibility */
48#define TIOCGSID 0x5429 /* Return the session ID of FD */
49#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
50#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */
51
52#define FIONCLEX 0x5450 /* these numbers need to be adjusted. */
53#define FIOCLEX 0x5451
54#define FIOASYNC 0x5452
55#define TIOCSERCONFIG 0x5453
56#define TIOCSERGWILD 0x5454
57#define TIOCSERSWILD 0x5455
58#define TIOCGLCKTRMIOS 0x5456
59#define TIOCSLCKTRMIOS 0x5457
60#define TIOCSERGSTRUCT 0x5458 /* For debugging only */
61#define TIOCSERGETLSR 0x5459 /* Get line status register */
62#define TIOCSERGETMULTI 0x545A /* Get multiport config */
63#define TIOCSERSETMULTI 0x545B /* Set multiport config */
64
65#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */
66#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
67#define TIOCGHAYESESP 0x545E /* Get Hayes ESP configuration */
68#define TIOCSHAYESESP 0x545F /* Set Hayes ESP configuration */
69#define FIOQSIZE 0x5460
70
71/* Used for packet mode */
72#define TIOCPKT_DATA 0
73#define TIOCPKT_FLUSHREAD 1
74#define TIOCPKT_FLUSHWRITE 2
75#define TIOCPKT_STOP 4
76#define TIOCPKT_START 8
77#define TIOCPKT_NOSTOP 16
78#define TIOCPKT_DOSTOP 32
79
80#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
81
82#endif
diff --git a/include/asm-x86_64/ipcbuf.h b/include/asm-x86_64/ipcbuf.h
new file mode 100644
index 000000000000..470cf85e3ba8
--- /dev/null
+++ b/include/asm-x86_64/ipcbuf.h
@@ -0,0 +1,29 @@
1#ifndef __x86_64_IPCBUF_H__
2#define __x86_64_IPCBUF_H__
3
4/*
5 * The ipc64_perm structure for x86_64 architecture.
6 * Note extra padding because this structure is passed back and forth
7 * between kernel and user space.
8 *
9 * Pad space is left for:
10 * - 32-bit mode_t and seq
11 * - 2 miscellaneous 32-bit values
12 */
13
14struct ipc64_perm
15{
16 __kernel_key_t key;
17 __kernel_uid32_t uid;
18 __kernel_gid32_t gid;
19 __kernel_uid32_t cuid;
20 __kernel_gid32_t cgid;
21 __kernel_mode_t mode;
22 unsigned short __pad1;
23 unsigned short seq;
24 unsigned short __pad2;
25 unsigned long __unused1;
26 unsigned long __unused2;
27};
28
29#endif /* __x86_64_IPCBUF_H__ */
diff --git a/include/asm-x86_64/ipi.h b/include/asm-x86_64/ipi.h
new file mode 100644
index 000000000000..d1841847ed89
--- /dev/null
+++ b/include/asm-x86_64/ipi.h
@@ -0,0 +1,113 @@
1#ifndef __ASM_IPI_H
2#define __ASM_IPI_H
3
4/*
5 * Copyright 2004 James Cleverdon, IBM.
6 * Subject to the GNU Public License, v.2
7 *
8 * Generic APIC InterProcessor Interrupt code.
9 *
10 * Moved to include file by James Cleverdon from
11 * arch/x86-64/kernel/smp.c
12 *
13 * Copyrights from kernel/smp.c:
14 *
15 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
16 * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
17 * (c) 2002,2003 Andi Kleen, SuSE Labs.
18 * Subject to the GNU Public License, v.2
19 */
20
21#include <asm/fixmap.h>
22#include <asm/hw_irq.h>
23#include <asm/apicdef.h>
24#include <asm/genapic.h>
25
26/*
27 * the following functions deal with sending IPIs between CPUs.
28 *
29 * We use 'broadcast', CPU->CPU IPIs and self-IPIs too.
30 */
31
32static inline unsigned int __prepare_ICR (unsigned int shortcut, int vector, unsigned int dest)
33{
34 unsigned int icr = APIC_DM_FIXED | shortcut | vector | dest;
35 if (vector == KDB_VECTOR)
36 icr = (icr & (~APIC_VECTOR_MASK)) | APIC_DM_NMI;
37 return icr;
38}
39
40static inline int __prepare_ICR2 (unsigned int mask)
41{
42 return SET_APIC_DEST_FIELD(mask);
43}
44
45static inline void __send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest)
46{
47 /*
48 * Subtle. In the case of the 'never do double writes' workaround
49 * we have to lock out interrupts to be safe. As we don't care
50 * of the value read we use an atomic rmw access to avoid costly
51 * cli/sti. Otherwise we use an even cheaper single atomic write
52 * to the APIC.
53 */
54 unsigned int cfg;
55
56 /*
57 * Wait for idle.
58 */
59 apic_wait_icr_idle();
60
61 /*
62 * No need to touch the target chip field
63 */
64 cfg = __prepare_ICR(shortcut, vector, dest);
65
66 /*
67 * Send the IPI. The write to APIC_ICR fires this off.
68 */
69 apic_write_around(APIC_ICR, cfg);
70}
71
72
73static inline void send_IPI_mask_sequence(cpumask_t mask, int vector)
74{
75 unsigned long cfg, flags;
76 unsigned long query_cpu;
77
78 /*
79 * Hack. The clustered APIC addressing mode doesn't allow us to send
80 * to an arbitrary mask, so I do a unicast to each CPU instead.
81 * - mbligh
82 */
83 local_irq_save(flags);
84
85 for (query_cpu = 0; query_cpu < NR_CPUS; ++query_cpu) {
86 if (cpu_isset(query_cpu, mask)) {
87
88 /*
89 * Wait for idle.
90 */
91 apic_wait_icr_idle();
92
93 /*
94 * prepare target chip field
95 */
96 cfg = __prepare_ICR2(x86_cpu_to_apicid[query_cpu]);
97 apic_write_around(APIC_ICR2, cfg);
98
99 /*
100 * program the ICR
101 */
102 cfg = __prepare_ICR(0, vector, APIC_DEST_PHYSICAL);
103
104 /*
105 * Send the IPI. The write to APIC_ICR fires this off.
106 */
107 apic_write_around(APIC_ICR, cfg);
108 }
109 }
110 local_irq_restore(flags);
111}
112
113#endif /* __ASM_IPI_H */
diff --git a/include/asm-x86_64/irq.h b/include/asm-x86_64/irq.h
new file mode 100644
index 000000000000..3af50b3c3b05
--- /dev/null
+++ b/include/asm-x86_64/irq.h
@@ -0,0 +1,55 @@
1#ifndef _ASM_IRQ_H
2#define _ASM_IRQ_H
3
4/*
5 * linux/include/asm/irq.h
6 *
7 * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
8 *
9 * IRQ/IPI changes taken from work by Thomas Radke
10 * <tomsoft@informatik.tu-chemnitz.de>
11 */
12
13#define TIMER_IRQ 0
14
15/*
16 * 16 8259A IRQ's, 208 potential APIC interrupt sources.
17 * Right now the APIC is mostly only used for SMP.
18 * 256 vectors is an architectural limit. (we can have
19 * more than 256 devices theoretically, but they will
20 * have to use shared interrupts)
21 * Since vectors 0x00-0x1f are used/reserved for the CPU,
22 * the usable vector space is 0x20-0xff (224 vectors)
23 */
24
25/*
26 * The maximum number of vectors supported by x86_64 processors
27 * is limited to 256. For processors other than x86_64, NR_VECTORS
28 * should be changed accordingly.
29 */
30#define NR_VECTORS 256
31
32#define FIRST_SYSTEM_VECTOR 0xef /* duplicated in hw_irq.h */
33
34#ifdef CONFIG_PCI_MSI
35#define NR_IRQS FIRST_SYSTEM_VECTOR
36#define NR_IRQ_VECTORS NR_IRQS
37#else
38#define NR_IRQS 224
39#define NR_IRQ_VECTORS 1024
40#endif
41
42static __inline__ int irq_canonicalize(int irq)
43{
44 return ((irq == 2) ? 9 : irq);
45}
46
47#ifdef CONFIG_X86_LOCAL_APIC
48#define ARCH_HAS_NMI_WATCHDOG /* See include/linux/nmi.h */
49#endif
50
51struct irqaction;
52struct pt_regs;
53int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *);
54
55#endif /* _ASM_IRQ_H */
diff --git a/include/asm-x86_64/kdebug.h b/include/asm-x86_64/kdebug.h
new file mode 100644
index 000000000000..adb6f918d3ad
--- /dev/null
+++ b/include/asm-x86_64/kdebug.h
@@ -0,0 +1,53 @@
1#ifndef _X86_64_KDEBUG_H
2#define _X86_64_KDEBUG_H 1
3
4#include <linux/notifier.h>
5
6struct pt_regs;
7
8struct die_args {
9 struct pt_regs *regs;
10 const char *str;
11 long err;
12 int trapnr;
13 int signr;
14};
15
16/* Note - you should never unregister because that can race with NMIs.
17 If you really want to do it first unregister - then synchronize_kernel - then free.
18 */
19int register_die_notifier(struct notifier_block *nb);
20extern struct notifier_block *die_chain;
21/* Grossly misnamed. */
22enum die_val {
23 DIE_OOPS = 1,
24 DIE_INT3,
25 DIE_DEBUG,
26 DIE_DEBUGSTEP,
27 DIE_PANIC,
28 DIE_NMI,
29 DIE_DIE,
30 DIE_NMIWATCHDOG,
31 DIE_KERNELDEBUG,
32 DIE_TRAP,
33 DIE_GPF,
34 DIE_CALL,
35 DIE_NMI_IPI,
36 DIE_PAGE_FAULT,
37};
38
39static inline int notify_die(enum die_val val,char *str,struct pt_regs *regs,long err,int trap, int sig)
40{
41 struct die_args args = { .regs=regs, .str=str, .err=err, .trapnr=trap,.signr=sig };
42 return notifier_call_chain(&die_chain, val, &args);
43}
44
45extern int printk_address(unsigned long address);
46extern void die(const char *,struct pt_regs *,long);
47extern void __die(const char *,struct pt_regs *,long);
48extern void show_registers(struct pt_regs *regs);
49extern void dump_pagetable(unsigned long);
50extern void oops_begin(void);
51extern void oops_end(void);
52
53#endif
diff --git a/include/asm-x86_64/kmap_types.h b/include/asm-x86_64/kmap_types.h
new file mode 100644
index 000000000000..7486338c6cea
--- /dev/null
+++ b/include/asm-x86_64/kmap_types.h
@@ -0,0 +1,19 @@
1#ifndef _ASM_KMAP_TYPES_H
2#define _ASM_KMAP_TYPES_H
3
4enum km_type {
5 KM_BOUNCE_READ,
6 KM_SKB_SUNRPC_DATA,
7 KM_SKB_DATA_SOFTIRQ,
8 KM_USER0,
9 KM_USER1,
10 KM_BIO_SRC_IRQ,
11 KM_BIO_DST_IRQ,
12 KM_IRQ0,
13 KM_IRQ1,
14 KM_SOFTIRQ0,
15 KM_SOFTIRQ1,
16 KM_TYPE_NR
17};
18
19#endif
diff --git a/include/asm-x86_64/kprobes.h b/include/asm-x86_64/kprobes.h
new file mode 100644
index 000000000000..bfea52d516f8
--- /dev/null
+++ b/include/asm-x86_64/kprobes.h
@@ -0,0 +1,63 @@
1#ifndef _ASM_KPROBES_H
2#define _ASM_KPROBES_H
3/*
4 * Kernel Probes (KProbes)
5 * include/asm-x86_64/kprobes.h
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 *
21 * Copyright (C) IBM Corporation, 2002, 2004
22 *
23 * 2004-Oct Prasanna S Panchamukhi <prasanna@in.ibm.com> and Jim Keniston
24 * kenistoj@us.ibm.com adopted from i386.
25 */
26#include <linux/types.h>
27#include <linux/ptrace.h>
28
29struct pt_regs;
30
31typedef u8 kprobe_opcode_t;
32#define BREAKPOINT_INSTRUCTION 0xcc
33#define MAX_INSN_SIZE 15
34#define MAX_STACK_SIZE 64
35#define MIN_STACK_SIZE(ADDR) (((MAX_STACK_SIZE) < \
36 (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR))) \
37 ? (MAX_STACK_SIZE) \
38 : (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR)))
39
40#define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry
41
42/* Architecture specific copy of original instruction*/
43struct arch_specific_insn {
44 /* copy of the original instruction */
45 kprobe_opcode_t *insn;
46};
47
48/* trap3/1 are intr gates for kprobes. So, restore the status of IF,
49 * if necessary, before executing the original int3/1 (trap) handler.
50 */
51static inline void restore_interrupts(struct pt_regs *regs)
52{
53 if (regs->eflags & IF_MASK)
54 local_irq_enable();
55}
56
57extern int post_kprobe_handler(struct pt_regs *regs);
58extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
59extern int kprobe_handler(struct pt_regs *regs);
60
61extern int kprobe_exceptions_notify(struct notifier_block *self,
62 unsigned long val, void *data);
63#endif /* _ASM_KPROBES_H */
diff --git a/include/asm-x86_64/ldt.h b/include/asm-x86_64/ldt.h
new file mode 100644
index 000000000000..9ef647b890d2
--- /dev/null
+++ b/include/asm-x86_64/ldt.h
@@ -0,0 +1,36 @@
1/*
2 * ldt.h
3 *
4 * Definitions of structures used with the modify_ldt system call.
5 */
6#ifndef _LINUX_LDT_H
7#define _LINUX_LDT_H
8
9/* Maximum number of LDT entries supported. */
10#define LDT_ENTRIES 8192
11/* The size of each LDT entry. */
12#define LDT_ENTRY_SIZE 8
13
14#ifndef __ASSEMBLY__
15/* Note on 64bit base and limit is ignored and you cannot set
16 DS/ES/CS not to the default values if you still want to do syscalls. This
17 call is more for 32bit mode therefore. */
18struct user_desc {
19 unsigned int entry_number;
20 unsigned int base_addr;
21 unsigned int limit;
22 unsigned int seg_32bit:1;
23 unsigned int contents:2;
24 unsigned int read_exec_only:1;
25 unsigned int limit_in_pages:1;
26 unsigned int seg_not_present:1;
27 unsigned int useable:1;
28 unsigned int lm:1;
29};
30
31#define MODIFY_LDT_CONTENTS_DATA 0
32#define MODIFY_LDT_CONTENTS_STACK 1
33#define MODIFY_LDT_CONTENTS_CODE 2
34
35#endif /* !__ASSEMBLY__ */
36#endif
diff --git a/include/asm-x86_64/linkage.h b/include/asm-x86_64/linkage.h
new file mode 100644
index 000000000000..291c2d01c44f
--- /dev/null
+++ b/include/asm-x86_64/linkage.h
@@ -0,0 +1,6 @@
1#ifndef __ASM_LINKAGE_H
2#define __ASM_LINKAGE_H
3
4/* Nothing to see here... */
5
6#endif
diff --git a/include/asm-x86_64/local.h b/include/asm-x86_64/local.h
new file mode 100644
index 000000000000..169c223a8452
--- /dev/null
+++ b/include/asm-x86_64/local.h
@@ -0,0 +1,73 @@
1#ifndef _ARCH_X8664_LOCAL_H
2#define _ARCH_X8664_LOCAL_H
3
4#include <linux/percpu.h>
5
6typedef struct
7{
8 volatile unsigned int counter;
9} local_t;
10
11#define LOCAL_INIT(i) { (i) }
12
13#define local_read(v) ((v)->counter)
14#define local_set(v,i) (((v)->counter) = (i))
15
16static __inline__ void local_inc(local_t *v)
17{
18 __asm__ __volatile__(
19 "incl %0"
20 :"=m" (v->counter)
21 :"m" (v->counter));
22}
23
24static __inline__ void local_dec(local_t *v)
25{
26 __asm__ __volatile__(
27 "decl %0"
28 :"=m" (v->counter)
29 :"m" (v->counter));
30}
31
32static __inline__ void local_add(unsigned long i, local_t *v)
33{
34 __asm__ __volatile__(
35 "addl %1,%0"
36 :"=m" (v->counter)
37 :"ir" (i), "m" (v->counter));
38}
39
40static __inline__ void local_sub(unsigned long i, local_t *v)
41{
42 __asm__ __volatile__(
43 "subl %1,%0"
44 :"=m" (v->counter)
45 :"ir" (i), "m" (v->counter));
46}
47
48/* On x86, these are no better than the atomic variants. */
49#define __local_inc(l) local_inc(l)
50#define __local_dec(l) local_dec(l)
51#define __local_add(i,l) local_add((i),(l))
52#define __local_sub(i,l) local_sub((i),(l))
53
54/* Use these for per-cpu local_t variables: on some archs they are
55 * much more efficient than these naive implementations. Note they take
56 * a variable, not an address.
57 *
58 * This could be done better if we moved the per cpu data directly
59 * after GS.
60 */
61#define cpu_local_read(v) local_read(&__get_cpu_var(v))
62#define cpu_local_set(v, i) local_set(&__get_cpu_var(v), (i))
63#define cpu_local_inc(v) local_inc(&__get_cpu_var(v))
64#define cpu_local_dec(v) local_dec(&__get_cpu_var(v))
65#define cpu_local_add(i, v) local_add((i), &__get_cpu_var(v))
66#define cpu_local_sub(i, v) local_sub((i), &__get_cpu_var(v))
67
68#define __cpu_local_inc(v) cpu_local_inc(v)
69#define __cpu_local_dec(v) cpu_local_dec(v)
70#define __cpu_local_add(i, v) cpu_local_add((i), (v))
71#define __cpu_local_sub(i, v) cpu_local_sub((i), (v))
72
73#endif /* _ARCH_I386_LOCAL_H */
diff --git a/include/asm-x86_64/mach_apic.h b/include/asm-x86_64/mach_apic.h
new file mode 100644
index 000000000000..0acea44c9377
--- /dev/null
+++ b/include/asm-x86_64/mach_apic.h
@@ -0,0 +1,29 @@
1#ifndef __ASM_MACH_APIC_H
2#define __ASM_MACH_APIC_H
3
4/*
5 * Copyright 2004 James Cleverdon, IBM.
6 * Subject to the GNU Public License, v.2
7 *
8 * Generic APIC sub-arch defines.
9 *
10 * Hacked for x86-64 by James Cleverdon from i386 architecture code by
11 * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
12 * James Cleverdon.
13 */
14
15#include <asm/genapic.h>
16
17#define INT_DELIVERY_MODE (genapic->int_delivery_mode)
18#define INT_DEST_MODE (genapic->int_dest_mode)
19#define INT_DELIVERY_DEST (genapic->int_delivery_dest)
20#define TARGET_CPUS (genapic->target_cpus())
21#define apic_id_registered (genapic->apic_id_registered)
22#define init_apic_ldr (genapic->init_apic_ldr)
23#define send_IPI_mask (genapic->send_IPI_mask)
24#define send_IPI_allbutself (genapic->send_IPI_allbutself)
25#define send_IPI_all (genapic->send_IPI_all)
26#define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid)
27#define phys_pkg_id (genapic->phys_pkg_id)
28
29#endif /* __ASM_MACH_APIC_H */
diff --git a/include/asm-x86_64/mc146818rtc.h b/include/asm-x86_64/mc146818rtc.h
new file mode 100644
index 000000000000..d6e3009430c1
--- /dev/null
+++ b/include/asm-x86_64/mc146818rtc.h
@@ -0,0 +1,29 @@
1/*
2 * Machine dependent access functions for RTC registers.
3 */
4#ifndef _ASM_MC146818RTC_H
5#define _ASM_MC146818RTC_H
6
7#include <asm/io.h>
8
9#ifndef RTC_PORT
10#define RTC_PORT(x) (0x70 + (x))
11#define RTC_ALWAYS_BCD 1 /* RTC operates in binary mode */
12#endif
13
14/*
15 * The yet supported machines all access the RTC index register via
16 * an ISA port access but the way to access the date register differs ...
17 */
18#define CMOS_READ(addr) ({ \
19outb_p((addr),RTC_PORT(0)); \
20inb_p(RTC_PORT(1)); \
21})
22#define CMOS_WRITE(val, addr) ({ \
23outb_p((addr),RTC_PORT(0)); \
24outb_p((val),RTC_PORT(1)); \
25})
26
27#define RTC_IRQ 8
28
29#endif /* _ASM_MC146818RTC_H */
diff --git a/include/asm-x86_64/mce.h b/include/asm-x86_64/mce.h
new file mode 100644
index 000000000000..869249db6795
--- /dev/null
+++ b/include/asm-x86_64/mce.h
@@ -0,0 +1,80 @@
1#ifndef _ASM_MCE_H
2#define _ASM_MCE_H 1
3
4#include <asm/ioctls.h>
5#include <asm/types.h>
6
7/*
8 * Machine Check support for x86
9 */
10
11#define MCG_CTL_P (1UL<<8) /* MCG_CAP register available */
12
13#define MCG_STATUS_RIPV (1UL<<0) /* restart ip valid */
14#define MCG_STATUS_EIPV (1UL<<1) /* eip points to correct instruction */
15#define MCG_STATUS_MCIP (1UL<<2) /* machine check in progress */
16
17#define MCI_STATUS_VAL (1UL<<63) /* valid error */
18#define MCI_STATUS_OVER (1UL<<62) /* previous errors lost */
19#define MCI_STATUS_UC (1UL<<61) /* uncorrected error */
20#define MCI_STATUS_EN (1UL<<60) /* error enabled */
21#define MCI_STATUS_MISCV (1UL<<59) /* misc error reg. valid */
22#define MCI_STATUS_ADDRV (1UL<<58) /* addr reg. valid */
23#define MCI_STATUS_PCC (1UL<<57) /* processor context corrupt */
24
25/* Fields are zero when not available */
26struct mce {
27 __u64 status;
28 __u64 misc;
29 __u64 addr;
30 __u64 mcgstatus;
31 __u64 rip;
32 __u64 tsc; /* cpu time stamp counter */
33 __u64 res1; /* for future extension */
34 __u64 res2; /* dito. */
35 __u8 cs; /* code segment */
36 __u8 bank; /* machine check bank */
37 __u8 cpu; /* cpu that raised the error */
38 __u8 finished; /* entry is valid */
39 __u32 pad;
40};
41
42/*
43 * This structure contains all data related to the MCE log.
44 * Also carries a signature to make it easier to find from external debugging tools.
45 * Each entry is only valid when its finished flag is set.
46 */
47
48#define MCE_LOG_LEN 32
49
50struct mce_log {
51 char signature[12]; /* "MACHINECHECK" */
52 unsigned len; /* = MCE_LOG_LEN */
53 unsigned next;
54 unsigned flags;
55 unsigned pad0;
56 struct mce entry[MCE_LOG_LEN];
57};
58
59#define MCE_OVERFLOW 0 /* bit 0 in flags means overflow */
60
61#define MCE_LOG_SIGNATURE "MACHINECHECK"
62
63#define MCE_GET_RECORD_LEN _IOR('M', 1, int)
64#define MCE_GET_LOG_LEN _IOR('M', 2, int)
65#define MCE_GETCLEAR_FLAGS _IOR('M', 3, int)
66
67/* Software defined banks */
68#define MCE_EXTENDED_BANK 128
69#define MCE_THERMAL_BANK MCE_EXTENDED_BANK + 0
70
71void mce_log(struct mce *m);
72#ifdef CONFIG_X86_MCE_INTEL
73void mce_intel_feature_init(struct cpuinfo_x86 *c);
74#else
75static inline void mce_intel_feature_init(struct cpuinfo_x86 *c)
76{
77}
78#endif
79
80#endif
diff --git a/include/asm-x86_64/mman.h b/include/asm-x86_64/mman.h
new file mode 100644
index 000000000000..78e60a4fd4ee
--- /dev/null
+++ b/include/asm-x86_64/mman.h
@@ -0,0 +1,44 @@
1#ifndef __X8664_MMAN_H__
2#define __X8664_MMAN_H__
3
4#define PROT_READ 0x1 /* page can be read */
5#define PROT_WRITE 0x2 /* page can be written */
6#define PROT_EXEC 0x4 /* page can be executed */
7#define PROT_NONE 0x0 /* page can not be accessed */
8#define PROT_SEM 0x8
9#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */
10#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */
11
12#define MAP_SHARED 0x01 /* Share changes */
13#define MAP_PRIVATE 0x02 /* Changes are private */
14#define MAP_TYPE 0x0f /* Mask for type of mapping */
15#define MAP_FIXED 0x10 /* Interpret addr exactly */
16#define MAP_ANONYMOUS 0x20 /* don't use a file */
17#define MAP_32BIT 0x40 /* only give out 32bit addresses */
18
19#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
20#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
21#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */
22#define MAP_LOCKED 0x2000 /* pages are locked */
23#define MAP_NORESERVE 0x4000 /* don't check for reservations */
24#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
25#define MAP_NONBLOCK 0x10000 /* do not block on IO */
26
27#define MS_ASYNC 1 /* sync memory asynchronously */
28#define MS_INVALIDATE 2 /* invalidate the caches */
29#define MS_SYNC 4 /* synchronous memory sync */
30
31#define MCL_CURRENT 1 /* lock all current mappings */
32#define MCL_FUTURE 2 /* lock all future mappings */
33
34#define MADV_NORMAL 0x0 /* default page-in behavior */
35#define MADV_RANDOM 0x1 /* page-in minimum required */
36#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
37#define MADV_WILLNEED 0x3 /* pre-fault pages */
38#define MADV_DONTNEED 0x4 /* discard these pages */
39
40/* compatibility flags */
41#define MAP_ANON MAP_ANONYMOUS
42#define MAP_FILE 0
43
44#endif
diff --git a/include/asm-x86_64/mmsegment.h b/include/asm-x86_64/mmsegment.h
new file mode 100644
index 000000000000..d3f80c996330
--- /dev/null
+++ b/include/asm-x86_64/mmsegment.h
@@ -0,0 +1,8 @@
1#ifndef _ASM_MMSEGMENT_H
2#define _ASM_MMSEGMENT_H 1
3
4typedef struct {
5 unsigned long seg;
6} mm_segment_t;
7
8#endif
diff --git a/include/asm-x86_64/mmu.h b/include/asm-x86_64/mmu.h
new file mode 100644
index 000000000000..5dc6ed79859a
--- /dev/null
+++ b/include/asm-x86_64/mmu.h
@@ -0,0 +1,20 @@
1#ifndef __x86_64_MMU_H
2#define __x86_64_MMU_H
3
4#include <linux/spinlock.h>
5#include <asm/semaphore.h>
6
7/*
8 * The x86_64 doesn't have a mmu context, but
9 * we put the segment information here.
10 *
11 * cpu_vm_mask is used to optimize ldt flushing.
12 */
13typedef struct {
14 void *ldt;
15 rwlock_t ldtlock;
16 int size;
17 struct semaphore sem;
18} mm_context_t;
19
20#endif
diff --git a/include/asm-x86_64/mmu_context.h b/include/asm-x86_64/mmu_context.h
new file mode 100644
index 000000000000..b630d52bdfb1
--- /dev/null
+++ b/include/asm-x86_64/mmu_context.h
@@ -0,0 +1,79 @@
1#ifndef __X86_64_MMU_CONTEXT_H
2#define __X86_64_MMU_CONTEXT_H
3
4#include <linux/config.h>
5#include <asm/desc.h>
6#include <asm/atomic.h>
7#include <asm/pgalloc.h>
8#include <asm/pda.h>
9#include <asm/pgtable.h>
10#include <asm/tlbflush.h>
11
12/*
13 * possibly do the LDT unload here?
14 */
15int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
16void destroy_context(struct mm_struct *mm);
17
18#ifdef CONFIG_SMP
19
20static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
21{
22 if (read_pda(mmu_state) == TLBSTATE_OK)
23 write_pda(mmu_state, TLBSTATE_LAZY);
24}
25#else
26static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
27{
28}
29#endif
30
31static inline void load_cr3(pgd_t *pgd)
32{
33 asm volatile("movq %0,%%cr3" :: "r" (__pa(pgd)) : "memory");
34}
35
36static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
37 struct task_struct *tsk)
38{
39 unsigned cpu = smp_processor_id();
40 if (likely(prev != next)) {
41 /* stop flush ipis for the previous mm */
42 clear_bit(cpu, &prev->cpu_vm_mask);
43#ifdef CONFIG_SMP
44 write_pda(mmu_state, TLBSTATE_OK);
45 write_pda(active_mm, next);
46#endif
47 set_bit(cpu, &next->cpu_vm_mask);
48 load_cr3(next->pgd);
49
50 if (unlikely(next->context.ldt != prev->context.ldt))
51 load_LDT_nolock(&next->context, cpu);
52 }
53#ifdef CONFIG_SMP
54 else {
55 write_pda(mmu_state, TLBSTATE_OK);
56 if (read_pda(active_mm) != next)
57 out_of_line_bug();
58 if(!test_and_set_bit(cpu, &next->cpu_vm_mask)) {
59 /* We were in lazy tlb mode and leave_mm disabled
60 * tlb flush IPI delivery. We must reload CR3
61 * to make sure to use no freed page tables.
62 */
63 load_cr3(next->pgd);
64 load_LDT_nolock(&next->context, cpu);
65 }
66 }
67#endif
68}
69
70#define deactivate_mm(tsk,mm) do { \
71 load_gs_index(0); \
72 asm volatile("movl %0,%%fs"::"r"(0)); \
73} while(0)
74
75#define activate_mm(prev, next) \
76 switch_mm((prev),(next),NULL)
77
78
79#endif
diff --git a/include/asm-x86_64/mmx.h b/include/asm-x86_64/mmx.h
new file mode 100644
index 000000000000..46b71da99869
--- /dev/null
+++ b/include/asm-x86_64/mmx.h
@@ -0,0 +1,14 @@
1#ifndef _ASM_MMX_H
2#define _ASM_MMX_H
3
4/*
5 * MMX 3Dnow! helper operations
6 */
7
8#include <linux/types.h>
9
10extern void *_mmx_memcpy(void *to, const void *from, size_t size);
11extern void mmx_clear_page(void *page);
12extern void mmx_copy_page(void *to, void *from);
13
14#endif
diff --git a/include/asm-x86_64/mmzone.h b/include/asm-x86_64/mmzone.h
new file mode 100644
index 000000000000..d95b7c240831
--- /dev/null
+++ b/include/asm-x86_64/mmzone.h
@@ -0,0 +1,63 @@
1/* K8 NUMA support */
2/* Copyright 2002,2003 by Andi Kleen, SuSE Labs */
3/* 2.5 Version loosely based on the NUMAQ Code by Pat Gaughen. */
4#ifndef _ASM_X86_64_MMZONE_H
5#define _ASM_X86_64_MMZONE_H 1
6
7#include <linux/config.h>
8
9#ifdef CONFIG_DISCONTIGMEM
10
11#define VIRTUAL_BUG_ON(x)
12
13#include <asm/smp.h>
14
15#define NODEMAPSIZE 0xff
16
17/* Simple perfect hash to map physical addresses to node numbers */
18extern int memnode_shift;
19extern u8 memnodemap[NODEMAPSIZE];
20extern int maxnode;
21
22extern struct pglist_data *node_data[];
23
24static inline __attribute__((pure)) int phys_to_nid(unsigned long addr)
25{
26 int nid;
27 VIRTUAL_BUG_ON((addr >> memnode_shift) >= NODEMAPSIZE);
28 nid = memnodemap[addr >> memnode_shift];
29 VIRTUAL_BUG_ON(nid > maxnode);
30 return nid;
31}
32
33#define pfn_to_nid(pfn) phys_to_nid((unsigned long)(pfn) << PAGE_SHIFT)
34
35#define kvaddr_to_nid(kaddr) phys_to_nid(__pa(kaddr))
36#define NODE_DATA(nid) (node_data[nid])
37
38#define node_mem_map(nid) (NODE_DATA(nid)->node_mem_map)
39
40#define node_mem_map(nid) (NODE_DATA(nid)->node_mem_map)
41#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
42#define node_end_pfn(nid) (NODE_DATA(nid)->node_start_pfn + \
43 NODE_DATA(nid)->node_spanned_pages)
44
45#define local_mapnr(kvaddr) \
46 ( (__pa(kvaddr) >> PAGE_SHIFT) - node_start_pfn(kvaddr_to_nid(kvaddr)) )
47
48/* AK: this currently doesn't deal with invalid addresses. We'll see
49 if the 2.5 kernel doesn't pass them
50 (2.4 used to). */
51#define pfn_to_page(pfn) ({ \
52 int nid = phys_to_nid(((unsigned long)(pfn)) << PAGE_SHIFT); \
53 ((pfn) - node_start_pfn(nid)) + node_mem_map(nid); \
54})
55
56#define page_to_pfn(page) \
57 (long)(((page) - page_zone(page)->zone_mem_map) + page_zone(page)->zone_start_pfn)
58
59#define pfn_valid(pfn) ((pfn) >= num_physpages ? 0 : \
60 ({ u8 nid__ = pfn_to_nid(pfn); \
61 nid__ != 0xff && (pfn) >= node_start_pfn(nid__) && (pfn) <= node_end_pfn(nid__); }))
62#endif
63#endif
diff --git a/include/asm-x86_64/module.h b/include/asm-x86_64/module.h
new file mode 100644
index 000000000000..67f8f69fa7b1
--- /dev/null
+++ b/include/asm-x86_64/module.h
@@ -0,0 +1,10 @@
1#ifndef _ASM_X8664_MODULE_H
2#define _ASM_X8664_MODULE_H
3
4struct mod_arch_specific {};
5
6#define Elf_Shdr Elf64_Shdr
7#define Elf_Sym Elf64_Sym
8#define Elf_Ehdr Elf64_Ehdr
9
10#endif
diff --git a/include/asm-x86_64/mpspec.h b/include/asm-x86_64/mpspec.h
new file mode 100644
index 000000000000..331f6a3c72a2
--- /dev/null
+++ b/include/asm-x86_64/mpspec.h
@@ -0,0 +1,241 @@
1#ifndef __ASM_MPSPEC_H
2#define __ASM_MPSPEC_H
3
4/*
5 * Structure definitions for SMP machines following the
6 * Intel Multiprocessing Specification 1.1 and 1.4.
7 */
8
9/*
10 * This tag identifies where the SMP configuration
11 * information is.
12 */
13
14#define SMP_MAGIC_IDENT (('_'<<24)|('P'<<16)|('M'<<8)|'_')
15
16/*
17 * A maximum of 255 APICs with the current APIC ID architecture.
18 */
19#define MAX_APICS 128
20
21struct intel_mp_floating
22{
23 char mpf_signature[4]; /* "_MP_" */
24 unsigned int mpf_physptr; /* Configuration table address */
25 unsigned char mpf_length; /* Our length (paragraphs) */
26 unsigned char mpf_specification;/* Specification version */
27 unsigned char mpf_checksum; /* Checksum (makes sum 0) */
28 unsigned char mpf_feature1; /* Standard or configuration ? */
29 unsigned char mpf_feature2; /* Bit7 set for IMCR|PIC */
30 unsigned char mpf_feature3; /* Unused (0) */
31 unsigned char mpf_feature4; /* Unused (0) */
32 unsigned char mpf_feature5; /* Unused (0) */
33};
34
35struct mp_config_table
36{
37 char mpc_signature[4];
38#define MPC_SIGNATURE "PCMP"
39 unsigned short mpc_length; /* Size of table */
40 char mpc_spec; /* 0x01 */
41 char mpc_checksum;
42 char mpc_oem[8];
43 char mpc_productid[12];
44 unsigned int mpc_oemptr; /* 0 if not present */
45 unsigned short mpc_oemsize; /* 0 if not present */
46 unsigned short mpc_oemcount;
47 unsigned int mpc_lapic; /* APIC address */
48 unsigned int reserved;
49};
50
51/* Followed by entries */
52
53#define MP_PROCESSOR 0
54#define MP_BUS 1
55#define MP_IOAPIC 2
56#define MP_INTSRC 3
57#define MP_LINTSRC 4
58
59struct mpc_config_processor
60{
61 unsigned char mpc_type;
62 unsigned char mpc_apicid; /* Local APIC number */
63 unsigned char mpc_apicver; /* Its versions */
64 unsigned char mpc_cpuflag;
65#define CPU_ENABLED 1 /* Processor is available */
66#define CPU_BOOTPROCESSOR 2 /* Processor is the BP */
67 unsigned int mpc_cpufeature;
68#define CPU_STEPPING_MASK 0x0F
69#define CPU_MODEL_MASK 0xF0
70#define CPU_FAMILY_MASK 0xF00
71 unsigned int mpc_featureflag; /* CPUID feature value */
72 unsigned int mpc_reserved[2];
73};
74
75struct mpc_config_bus
76{
77 unsigned char mpc_type;
78 unsigned char mpc_busid;
79 unsigned char mpc_bustype[6] __attribute((packed));
80};
81
82/* List of Bus Type string values, Intel MP Spec. */
83#define BUSTYPE_EISA "EISA"
84#define BUSTYPE_ISA "ISA"
85#define BUSTYPE_INTERN "INTERN" /* Internal BUS */
86#define BUSTYPE_MCA "MCA"
87#define BUSTYPE_VL "VL" /* Local bus */
88#define BUSTYPE_PCI "PCI"
89#define BUSTYPE_PCMCIA "PCMCIA"
90#define BUSTYPE_CBUS "CBUS"
91#define BUSTYPE_CBUSII "CBUSII"
92#define BUSTYPE_FUTURE "FUTURE"
93#define BUSTYPE_MBI "MBI"
94#define BUSTYPE_MBII "MBII"
95#define BUSTYPE_MPI "MPI"
96#define BUSTYPE_MPSA "MPSA"
97#define BUSTYPE_NUBUS "NUBUS"
98#define BUSTYPE_TC "TC"
99#define BUSTYPE_VME "VME"
100#define BUSTYPE_XPRESS "XPRESS"
101
102struct mpc_config_ioapic
103{
104 unsigned char mpc_type;
105 unsigned char mpc_apicid;
106 unsigned char mpc_apicver;
107 unsigned char mpc_flags;
108#define MPC_APIC_USABLE 0x01
109 unsigned int mpc_apicaddr;
110};
111
112struct mpc_config_intsrc
113{
114 unsigned char mpc_type;
115 unsigned char mpc_irqtype;
116 unsigned short mpc_irqflag;
117 unsigned char mpc_srcbus;
118 unsigned char mpc_srcbusirq;
119 unsigned char mpc_dstapic;
120 unsigned char mpc_dstirq;
121};
122
123enum mp_irq_source_types {
124 mp_INT = 0,
125 mp_NMI = 1,
126 mp_SMI = 2,
127 mp_ExtINT = 3
128};
129
130#define MP_IRQDIR_DEFAULT 0
131#define MP_IRQDIR_HIGH 1
132#define MP_IRQDIR_LOW 3
133
134
135struct mpc_config_lintsrc
136{
137 unsigned char mpc_type;
138 unsigned char mpc_irqtype;
139 unsigned short mpc_irqflag;
140 unsigned char mpc_srcbusid;
141 unsigned char mpc_srcbusirq;
142 unsigned char mpc_destapic;
143#define MP_APIC_ALL 0xFF
144 unsigned char mpc_destapiclint;
145};
146
147/*
148 * Default configurations
149 *
150 * 1 2 CPU ISA 82489DX
151 * 2 2 CPU EISA 82489DX neither IRQ 0 timer nor IRQ 13 DMA chaining
152 * 3 2 CPU EISA 82489DX
153 * 4 2 CPU MCA 82489DX
154 * 5 2 CPU ISA+PCI
155 * 6 2 CPU EISA+PCI
156 * 7 2 CPU MCA+PCI
157 */
158
159#define MAX_MP_BUSSES 256
160#define MAX_IRQ_SOURCES 256
161enum mp_bustype {
162 MP_BUS_ISA = 1,
163 MP_BUS_EISA,
164 MP_BUS_PCI,
165 MP_BUS_MCA
166};
167extern unsigned char mp_bus_id_to_type [MAX_MP_BUSSES];
168extern int mp_bus_id_to_pci_bus [MAX_MP_BUSSES];
169
170extern unsigned int boot_cpu_physical_apicid;
171extern int smp_found_config;
172extern void find_smp_config (void);
173extern void get_smp_config (void);
174extern int nr_ioapics;
175extern int apic_version [MAX_APICS];
176extern int mp_irq_entries;
177extern struct mpc_config_intsrc mp_irqs [MAX_IRQ_SOURCES];
178extern int mpc_default_type;
179extern unsigned long mp_lapic_addr;
180extern int pic_mode;
181
182#ifdef CONFIG_ACPI_BOOT
183extern void mp_register_lapic (u8 id, u8 enabled);
184extern void mp_register_lapic_address (u64 address);
185
186#ifdef CONFIG_X86_IO_APIC
187extern void mp_register_ioapic (u8 id, u32 address, u32 gsi_base);
188extern void mp_override_legacy_irq (u8 bus_irq, u8 polarity, u8 trigger, u32 gsi);
189extern void mp_config_acpi_legacy_irqs (void);
190extern int mp_register_gsi (u32 gsi, int edge_level, int active_high_low);
191#endif /*CONFIG_X86_IO_APIC*/
192#endif
193
194extern int using_apic_timer;
195
196#define PHYSID_ARRAY_SIZE BITS_TO_LONGS(MAX_APICS)
197
198struct physid_mask
199{
200 unsigned long mask[PHYSID_ARRAY_SIZE];
201};
202
203typedef struct physid_mask physid_mask_t;
204
205#define physid_set(physid, map) set_bit(physid, (map).mask)
206#define physid_clear(physid, map) clear_bit(physid, (map).mask)
207#define physid_isset(physid, map) test_bit(physid, (map).mask)
208#define physid_test_and_set(physid, map) test_and_set_bit(physid, (map).mask)
209
210#define physids_and(dst, src1, src2) bitmap_and((dst).mask, (src1).mask, (src2).mask, MAX_APICS)
211#define physids_or(dst, src1, src2) bitmap_or((dst).mask, (src1).mask, (src2).mask, MAX_APICS)
212#define physids_clear(map) bitmap_zero((map).mask, MAX_APICS)
213#define physids_complement(dst, src) bitmap_complement((dst).mask, (src).mask, MAX_APICS)
214#define physids_empty(map) bitmap_empty((map).mask, MAX_APICS)
215#define physids_equal(map1, map2) bitmap_equal((map1).mask, (map2).mask, MAX_APICS)
216#define physids_weight(map) bitmap_weight((map).mask, MAX_APICS)
217#define physids_shift_right(d, s, n) bitmap_shift_right((d).mask, (s).mask, n, MAX_APICS)
218#define physids_shift_left(d, s, n) bitmap_shift_left((d).mask, (s).mask, n, MAX_APICS)
219#define physids_coerce(map) ((map).mask[0])
220
221#define physids_promote(physids) \
222 ({ \
223 physid_mask_t __physid_mask = PHYSID_MASK_NONE; \
224 __physid_mask.mask[0] = physids; \
225 __physid_mask; \
226 })
227
228#define physid_mask_of_physid(physid) \
229 ({ \
230 physid_mask_t __physid_mask = PHYSID_MASK_NONE; \
231 physid_set(physid, __physid_mask); \
232 __physid_mask; \
233 })
234
235#define PHYSID_MASK_ALL { {[0 ... PHYSID_ARRAY_SIZE-1] = ~0UL} }
236#define PHYSID_MASK_NONE { {[0 ... PHYSID_ARRAY_SIZE-1] = 0UL} }
237
238extern physid_mask_t phys_cpu_present_map;
239
240#endif
241
diff --git a/include/asm-x86_64/msgbuf.h b/include/asm-x86_64/msgbuf.h
new file mode 100644
index 000000000000..cd6f95dd54da
--- /dev/null
+++ b/include/asm-x86_64/msgbuf.h
@@ -0,0 +1,27 @@
1#ifndef _X8664_MSGBUF_H
2#define _X8664_MSGBUF_H
3
4/*
5 * The msqid64_ds structure for x86-64 architecture.
6 * Note extra padding because this structure is passed back and forth
7 * between kernel and user space.
8 *
9 * Pad space is left for:
10 * - 2 miscellaneous 64-bit values
11 */
12
13struct msqid64_ds {
14 struct ipc64_perm msg_perm;
15 __kernel_time_t msg_stime; /* last msgsnd time */
16 __kernel_time_t msg_rtime; /* last msgrcv time */
17 __kernel_time_t msg_ctime; /* last change time */
18 unsigned long msg_cbytes; /* current number of bytes on queue */
19 unsigned long msg_qnum; /* number of messages in queue */
20 unsigned long msg_qbytes; /* max number of bytes on queue */
21 __kernel_pid_t msg_lspid; /* pid of last msgsnd */
22 __kernel_pid_t msg_lrpid; /* last receive pid */
23 unsigned long __unused4;
24 unsigned long __unused5;
25};
26
27#endif
diff --git a/include/asm-x86_64/msi.h b/include/asm-x86_64/msi.h
new file mode 100644
index 000000000000..85c427e472bf
--- /dev/null
+++ b/include/asm-x86_64/msi.h
@@ -0,0 +1,18 @@
1/*
2 * Copyright (C) 2003-2004 Intel
3 * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
4 */
5
6#ifndef ASM_MSI_H
7#define ASM_MSI_H
8
9#include <asm/desc.h>
10#include <asm/mach_apic.h>
11#include <asm/smp.h>
12
13#define LAST_DEVICE_VECTOR 232
14#define MSI_DEST_MODE MSI_LOGICAL_MODE
15#define MSI_TARGET_CPU_SHIFT 12
16#define MSI_TARGET_CPU logical_smp_processor_id()
17
18#endif /* ASM_MSI_H */
diff --git a/include/asm-x86_64/msr.h b/include/asm-x86_64/msr.h
new file mode 100644
index 000000000000..66f0be191ab4
--- /dev/null
+++ b/include/asm-x86_64/msr.h
@@ -0,0 +1,387 @@
1#ifndef X86_64_MSR_H
2#define X86_64_MSR_H 1
3
4#ifndef __ASSEMBLY__
5/*
6 * Access to machine-specific registers (available on 586 and better only)
7 * Note: the rd* operations modify the parameters directly (without using
8 * pointer indirection), this allows gcc to optimize better
9 */
10
11#define rdmsr(msr,val1,val2) \
12 __asm__ __volatile__("rdmsr" \
13 : "=a" (val1), "=d" (val2) \
14 : "c" (msr))
15
16
17#define rdmsrl(msr,val) do { unsigned long a__,b__; \
18 __asm__ __volatile__("rdmsr" \
19 : "=a" (a__), "=d" (b__) \
20 : "c" (msr)); \
21 val = a__ | (b__<<32); \
22} while(0);
23
24#define wrmsr(msr,val1,val2) \
25 __asm__ __volatile__("wrmsr" \
26 : /* no outputs */ \
27 : "c" (msr), "a" (val1), "d" (val2))
28
29#define wrmsrl(msr,val) wrmsr(msr,(__u32)((__u64)(val)),((__u64)(val))>>32)
30
31/* wrmsr with exception handling */
32#define wrmsr_safe(msr,a,b) ({ int ret__; \
33 asm volatile("2: wrmsr ; xorl %0,%0\n" \
34 "1:\n\t" \
35 ".section .fixup,\"ax\"\n\t" \
36 "3: movl %4,%0 ; jmp 1b\n\t" \
37 ".previous\n\t" \
38 ".section __ex_table,\"a\"\n" \
39 " .align 8\n\t" \
40 " .quad 2b,3b\n\t" \
41 ".previous" \
42 : "=a" (ret__) \
43 : "c" (msr), "0" (a), "d" (b), "i" (-EFAULT));\
44 ret__; })
45
46#define checking_wrmsrl(msr,val) wrmsr_safe(msr,(u32)(val),(u32)((val)>>32))
47
48#define rdtsc(low,high) \
49 __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high))
50
51#define rdtscl(low) \
52 __asm__ __volatile__ ("rdtsc" : "=a" (low) : : "edx")
53
54#define rdtscll(val) do { \
55 unsigned int __a,__d; \
56 asm volatile("rdtsc" : "=a" (__a), "=d" (__d)); \
57 (val) = ((unsigned long)__a) | (((unsigned long)__d)<<32); \
58} while(0)
59
60#define rdpmc(counter,low,high) \
61 __asm__ __volatile__("rdpmc" \
62 : "=a" (low), "=d" (high) \
63 : "c" (counter))
64
65#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
66
67#define rdpmc(counter,low,high) \
68 __asm__ __volatile__("rdpmc" \
69 : "=a" (low), "=d" (high) \
70 : "c" (counter))
71
72extern inline void cpuid(int op, unsigned int *eax, unsigned int *ebx,
73 unsigned int *ecx, unsigned int *edx)
74{
75 __asm__("cpuid"
76 : "=a" (*eax),
77 "=b" (*ebx),
78 "=c" (*ecx),
79 "=d" (*edx)
80 : "0" (op));
81}
82
83/* Some CPUID calls want 'count' to be placed in ecx */
84static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx,
85 int *edx)
86{
87 __asm__("cpuid"
88 : "=a" (*eax),
89 "=b" (*ebx),
90 "=c" (*ecx),
91 "=d" (*edx)
92 : "0" (op), "c" (count));
93}
94
95/*
96 * CPUID functions returning a single datum
97 */
98extern inline unsigned int cpuid_eax(unsigned int op)
99{
100 unsigned int eax;
101
102 __asm__("cpuid"
103 : "=a" (eax)
104 : "0" (op)
105 : "bx", "cx", "dx");
106 return eax;
107}
108extern inline unsigned int cpuid_ebx(unsigned int op)
109{
110 unsigned int eax, ebx;
111
112 __asm__("cpuid"
113 : "=a" (eax), "=b" (ebx)
114 : "0" (op)
115 : "cx", "dx" );
116 return ebx;
117}
118extern inline unsigned int cpuid_ecx(unsigned int op)
119{
120 unsigned int eax, ecx;
121
122 __asm__("cpuid"
123 : "=a" (eax), "=c" (ecx)
124 : "0" (op)
125 : "bx", "dx" );
126 return ecx;
127}
128extern inline unsigned int cpuid_edx(unsigned int op)
129{
130 unsigned int eax, edx;
131
132 __asm__("cpuid"
133 : "=a" (eax), "=d" (edx)
134 : "0" (op)
135 : "bx", "cx");
136 return edx;
137}
138
139#define MSR_IA32_UCODE_WRITE 0x79
140#define MSR_IA32_UCODE_REV 0x8b
141
142
143#endif
144
145/* AMD/K8 specific MSRs */
146#define MSR_EFER 0xc0000080 /* extended feature register */
147#define MSR_STAR 0xc0000081 /* legacy mode SYSCALL target */
148#define MSR_LSTAR 0xc0000082 /* long mode SYSCALL target */
149#define MSR_CSTAR 0xc0000083 /* compatibility mode SYSCALL target */
150#define MSR_SYSCALL_MASK 0xc0000084 /* EFLAGS mask for syscall */
151#define MSR_FS_BASE 0xc0000100 /* 64bit GS base */
152#define MSR_GS_BASE 0xc0000101 /* 64bit FS base */
153#define MSR_KERNEL_GS_BASE 0xc0000102 /* SwapGS GS shadow (or USER_GS from kernel) */
154/* EFER bits: */
155#define _EFER_SCE 0 /* SYSCALL/SYSRET */
156#define _EFER_LME 8 /* Long mode enable */
157#define _EFER_LMA 10 /* Long mode active (read-only) */
158#define _EFER_NX 11 /* No execute enable */
159
160#define EFER_SCE (1<<_EFER_SCE)
161#define EFER_LME (1<<_EFER_LME)
162#define EFER_LMA (1<<_EFER_LMA)
163#define EFER_NX (1<<_EFER_NX)
164
165/* Intel MSRs. Some also available on other CPUs */
166#define MSR_IA32_PLATFORM_ID 0x17
167
168#define MSR_IA32_PERFCTR0 0xc1
169#define MSR_IA32_PERFCTR1 0xc2
170
171#define MSR_MTRRcap 0x0fe
172#define MSR_IA32_BBL_CR_CTL 0x119
173
174#define MSR_IA32_SYSENTER_CS 0x174
175#define MSR_IA32_SYSENTER_ESP 0x175
176#define MSR_IA32_SYSENTER_EIP 0x176
177
178#define MSR_IA32_MCG_CAP 0x179
179#define MSR_IA32_MCG_STATUS 0x17a
180#define MSR_IA32_MCG_CTL 0x17b
181
182#define MSR_IA32_EVNTSEL0 0x186
183#define MSR_IA32_EVNTSEL1 0x187
184
185#define MSR_IA32_DEBUGCTLMSR 0x1d9
186#define MSR_IA32_LASTBRANCHFROMIP 0x1db
187#define MSR_IA32_LASTBRANCHTOIP 0x1dc
188#define MSR_IA32_LASTINTFROMIP 0x1dd
189#define MSR_IA32_LASTINTTOIP 0x1de
190
191#define MSR_MTRRfix64K_00000 0x250
192#define MSR_MTRRfix16K_80000 0x258
193#define MSR_MTRRfix16K_A0000 0x259
194#define MSR_MTRRfix4K_C0000 0x268
195#define MSR_MTRRfix4K_C8000 0x269
196#define MSR_MTRRfix4K_D0000 0x26a
197#define MSR_MTRRfix4K_D8000 0x26b
198#define MSR_MTRRfix4K_E0000 0x26c
199#define MSR_MTRRfix4K_E8000 0x26d
200#define MSR_MTRRfix4K_F0000 0x26e
201#define MSR_MTRRfix4K_F8000 0x26f
202#define MSR_MTRRdefType 0x2ff
203
204#define MSR_IA32_MC0_CTL 0x400
205#define MSR_IA32_MC0_STATUS 0x401
206#define MSR_IA32_MC0_ADDR 0x402
207#define MSR_IA32_MC0_MISC 0x403
208
209#define MSR_P6_PERFCTR0 0xc1
210#define MSR_P6_PERFCTR1 0xc2
211#define MSR_P6_EVNTSEL0 0x186
212#define MSR_P6_EVNTSEL1 0x187
213
214/* K7/K8 MSRs. Not complete. See the architecture manual for a more complete list. */
215#define MSR_K7_EVNTSEL0 0xC0010000
216#define MSR_K7_PERFCTR0 0xC0010004
217#define MSR_K7_EVNTSEL1 0xC0010001
218#define MSR_K7_PERFCTR1 0xC0010005
219#define MSR_K7_EVNTSEL2 0xC0010002
220#define MSR_K7_PERFCTR2 0xC0010006
221#define MSR_K7_EVNTSEL3 0xC0010003
222#define MSR_K7_PERFCTR3 0xC0010007
223#define MSR_K8_TOP_MEM1 0xC001001A
224#define MSR_K8_TOP_MEM2 0xC001001D
225#define MSR_K8_SYSCFG 0xC0000010
226
227/* K6 MSRs */
228#define MSR_K6_EFER 0xC0000080
229#define MSR_K6_STAR 0xC0000081
230#define MSR_K6_WHCR 0xC0000082
231#define MSR_K6_UWCCR 0xC0000085
232#define MSR_K6_PSOR 0xC0000087
233#define MSR_K6_PFIR 0xC0000088
234
235/* Centaur-Hauls/IDT defined MSRs. */
236#define MSR_IDT_FCR1 0x107
237#define MSR_IDT_FCR2 0x108
238#define MSR_IDT_FCR3 0x109
239#define MSR_IDT_FCR4 0x10a
240
241#define MSR_IDT_MCR0 0x110
242#define MSR_IDT_MCR1 0x111
243#define MSR_IDT_MCR2 0x112
244#define MSR_IDT_MCR3 0x113
245#define MSR_IDT_MCR4 0x114
246#define MSR_IDT_MCR5 0x115
247#define MSR_IDT_MCR6 0x116
248#define MSR_IDT_MCR7 0x117
249#define MSR_IDT_MCR_CTRL 0x120
250
251/* VIA Cyrix defined MSRs*/
252#define MSR_VIA_FCR 0x1107
253#define MSR_VIA_LONGHAUL 0x110a
254#define MSR_VIA_RNG 0x110b
255#define MSR_VIA_BCR2 0x1147
256
257/* Intel defined MSRs. */
258#define MSR_IA32_P5_MC_ADDR 0
259#define MSR_IA32_P5_MC_TYPE 1
260#define MSR_IA32_PLATFORM_ID 0x17
261#define MSR_IA32_EBL_CR_POWERON 0x2a
262
263#define MSR_IA32_APICBASE 0x1b
264#define MSR_IA32_APICBASE_BSP (1<<8)
265#define MSR_IA32_APICBASE_ENABLE (1<<11)
266#define MSR_IA32_APICBASE_BASE (0xfffff<<12)
267
268/* P4/Xeon+ specific */
269#define MSR_IA32_MCG_EAX 0x180
270#define MSR_IA32_MCG_EBX 0x181
271#define MSR_IA32_MCG_ECX 0x182
272#define MSR_IA32_MCG_EDX 0x183
273#define MSR_IA32_MCG_ESI 0x184
274#define MSR_IA32_MCG_EDI 0x185
275#define MSR_IA32_MCG_EBP 0x186
276#define MSR_IA32_MCG_ESP 0x187
277#define MSR_IA32_MCG_EFLAGS 0x188
278#define MSR_IA32_MCG_EIP 0x189
279#define MSR_IA32_MCG_RESERVED 0x18A
280
281#define MSR_P6_EVNTSEL0 0x186
282#define MSR_P6_EVNTSEL1 0x187
283
284#define MSR_IA32_PERF_STATUS 0x198
285#define MSR_IA32_PERF_CTL 0x199
286
287#define MSR_IA32_THERM_CONTROL 0x19a
288#define MSR_IA32_THERM_INTERRUPT 0x19b
289#define MSR_IA32_THERM_STATUS 0x19c
290#define MSR_IA32_MISC_ENABLE 0x1a0
291
292#define MSR_IA32_DEBUGCTLMSR 0x1d9
293#define MSR_IA32_LASTBRANCHFROMIP 0x1db
294#define MSR_IA32_LASTBRANCHTOIP 0x1dc
295#define MSR_IA32_LASTINTFROMIP 0x1dd
296#define MSR_IA32_LASTINTTOIP 0x1de
297
298#define MSR_IA32_MC0_CTL 0x400
299#define MSR_IA32_MC0_STATUS 0x401
300#define MSR_IA32_MC0_ADDR 0x402
301#define MSR_IA32_MC0_MISC 0x403
302
303/* Pentium IV performance counter MSRs */
304#define MSR_P4_BPU_PERFCTR0 0x300
305#define MSR_P4_BPU_PERFCTR1 0x301
306#define MSR_P4_BPU_PERFCTR2 0x302
307#define MSR_P4_BPU_PERFCTR3 0x303
308#define MSR_P4_MS_PERFCTR0 0x304
309#define MSR_P4_MS_PERFCTR1 0x305
310#define MSR_P4_MS_PERFCTR2 0x306
311#define MSR_P4_MS_PERFCTR3 0x307
312#define MSR_P4_FLAME_PERFCTR0 0x308
313#define MSR_P4_FLAME_PERFCTR1 0x309
314#define MSR_P4_FLAME_PERFCTR2 0x30a
315#define MSR_P4_FLAME_PERFCTR3 0x30b
316#define MSR_P4_IQ_PERFCTR0 0x30c
317#define MSR_P4_IQ_PERFCTR1 0x30d
318#define MSR_P4_IQ_PERFCTR2 0x30e
319#define MSR_P4_IQ_PERFCTR3 0x30f
320#define MSR_P4_IQ_PERFCTR4 0x310
321#define MSR_P4_IQ_PERFCTR5 0x311
322#define MSR_P4_BPU_CCCR0 0x360
323#define MSR_P4_BPU_CCCR1 0x361
324#define MSR_P4_BPU_CCCR2 0x362
325#define MSR_P4_BPU_CCCR3 0x363
326#define MSR_P4_MS_CCCR0 0x364
327#define MSR_P4_MS_CCCR1 0x365
328#define MSR_P4_MS_CCCR2 0x366
329#define MSR_P4_MS_CCCR3 0x367
330#define MSR_P4_FLAME_CCCR0 0x368
331#define MSR_P4_FLAME_CCCR1 0x369
332#define MSR_P4_FLAME_CCCR2 0x36a
333#define MSR_P4_FLAME_CCCR3 0x36b
334#define MSR_P4_IQ_CCCR0 0x36c
335#define MSR_P4_IQ_CCCR1 0x36d
336#define MSR_P4_IQ_CCCR2 0x36e
337#define MSR_P4_IQ_CCCR3 0x36f
338#define MSR_P4_IQ_CCCR4 0x370
339#define MSR_P4_IQ_CCCR5 0x371
340#define MSR_P4_ALF_ESCR0 0x3ca
341#define MSR_P4_ALF_ESCR1 0x3cb
342#define MSR_P4_BPU_ESCR0 0x3b2
343#define MSR_P4_BPU_ESCR1 0x3b3
344#define MSR_P4_BSU_ESCR0 0x3a0
345#define MSR_P4_BSU_ESCR1 0x3a1
346#define MSR_P4_CRU_ESCR0 0x3b8
347#define MSR_P4_CRU_ESCR1 0x3b9
348#define MSR_P4_CRU_ESCR2 0x3cc
349#define MSR_P4_CRU_ESCR3 0x3cd
350#define MSR_P4_CRU_ESCR4 0x3e0
351#define MSR_P4_CRU_ESCR5 0x3e1
352#define MSR_P4_DAC_ESCR0 0x3a8
353#define MSR_P4_DAC_ESCR1 0x3a9
354#define MSR_P4_FIRM_ESCR0 0x3a4
355#define MSR_P4_FIRM_ESCR1 0x3a5
356#define MSR_P4_FLAME_ESCR0 0x3a6
357#define MSR_P4_FLAME_ESCR1 0x3a7
358#define MSR_P4_FSB_ESCR0 0x3a2
359#define MSR_P4_FSB_ESCR1 0x3a3
360#define MSR_P4_IQ_ESCR0 0x3ba
361#define MSR_P4_IQ_ESCR1 0x3bb
362#define MSR_P4_IS_ESCR0 0x3b4
363#define MSR_P4_IS_ESCR1 0x3b5
364#define MSR_P4_ITLB_ESCR0 0x3b6
365#define MSR_P4_ITLB_ESCR1 0x3b7
366#define MSR_P4_IX_ESCR0 0x3c8
367#define MSR_P4_IX_ESCR1 0x3c9
368#define MSR_P4_MOB_ESCR0 0x3aa
369#define MSR_P4_MOB_ESCR1 0x3ab
370#define MSR_P4_MS_ESCR0 0x3c0
371#define MSR_P4_MS_ESCR1 0x3c1
372#define MSR_P4_PMH_ESCR0 0x3ac
373#define MSR_P4_PMH_ESCR1 0x3ad
374#define MSR_P4_RAT_ESCR0 0x3bc
375#define MSR_P4_RAT_ESCR1 0x3bd
376#define MSR_P4_SAAT_ESCR0 0x3ae
377#define MSR_P4_SAAT_ESCR1 0x3af
378#define MSR_P4_SSU_ESCR0 0x3be
379#define MSR_P4_SSU_ESCR1 0x3bf /* guess: not defined in manual */
380#define MSR_P4_TBPU_ESCR0 0x3c2
381#define MSR_P4_TBPU_ESCR1 0x3c3
382#define MSR_P4_TC_ESCR0 0x3c4
383#define MSR_P4_TC_ESCR1 0x3c5
384#define MSR_P4_U2L_ESCR0 0x3b0
385#define MSR_P4_U2L_ESCR1 0x3b1
386
387#endif
diff --git a/include/asm-x86_64/mtrr.h b/include/asm-x86_64/mtrr.h
new file mode 100644
index 000000000000..c5959d6418bb
--- /dev/null
+++ b/include/asm-x86_64/mtrr.h
@@ -0,0 +1,108 @@
1/* Generic MTRR (Memory Type Range Register) ioctls.
2
3 Copyright (C) 1997-1999 Richard Gooch
4
5 This library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Library General Public
7 License as published by the Free Software Foundation; either
8 version 2 of the License, or (at your option) any later version.
9
10 This library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Library General Public License for more details.
14
15 You should have received a copy of the GNU Library General Public
16 License along with this library; if not, write to the Free
17 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18
19 Richard Gooch may be reached by email at rgooch@atnf.csiro.au
20 The postal address is:
21 Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia.
22*/
23#ifndef _LINUX_MTRR_H
24#define _LINUX_MTRR_H
25
26#include <linux/config.h>
27#include <linux/ioctl.h>
28
29#define MTRR_IOCTL_BASE 'M'
30
31struct mtrr_sentry
32{
33 unsigned long base; /* Base address */
34 unsigned int size; /* Size of region */
35 unsigned int type; /* Type of region */
36};
37
38/* Warning: this structure has a different order from i386
39 on x86-64. The 32bit emulation code takes care of that.
40 But you need to use this for 64bit, otherwise your X server
41 will break. */
42struct mtrr_gentry
43{
44 unsigned long base; /* Base address */
45 unsigned int size; /* Size of region */
46 unsigned int regnum; /* Register number */
47 unsigned int type; /* Type of region */
48};
49
50/* These are the various ioctls */
51#define MTRRIOC_ADD_ENTRY _IOW(MTRR_IOCTL_BASE, 0, struct mtrr_sentry)
52#define MTRRIOC_SET_ENTRY _IOW(MTRR_IOCTL_BASE, 1, struct mtrr_sentry)
53#define MTRRIOC_DEL_ENTRY _IOW(MTRR_IOCTL_BASE, 2, struct mtrr_sentry)
54#define MTRRIOC_GET_ENTRY _IOWR(MTRR_IOCTL_BASE, 3, struct mtrr_gentry)
55#define MTRRIOC_KILL_ENTRY _IOW(MTRR_IOCTL_BASE, 4, struct mtrr_sentry)
56#define MTRRIOC_ADD_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 5, struct mtrr_sentry)
57#define MTRRIOC_SET_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 6, struct mtrr_sentry)
58#define MTRRIOC_DEL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 7, struct mtrr_sentry)
59#define MTRRIOC_GET_PAGE_ENTRY _IOWR(MTRR_IOCTL_BASE, 8, struct mtrr_gentry)
60#define MTRRIOC_KILL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 9, struct mtrr_sentry)
61
62/* These are the region types */
63#define MTRR_TYPE_UNCACHABLE 0
64#define MTRR_TYPE_WRCOMB 1
65/*#define MTRR_TYPE_ 2*/
66/*#define MTRR_TYPE_ 3*/
67#define MTRR_TYPE_WRTHROUGH 4
68#define MTRR_TYPE_WRPROT 5
69#define MTRR_TYPE_WRBACK 6
70#define MTRR_NUM_TYPES 7
71
72#ifdef __KERNEL__
73
74/* The following functions are for use by other drivers */
75# ifdef CONFIG_MTRR
76extern int mtrr_add (unsigned long base, unsigned long size,
77 unsigned int type, char increment);
78extern int mtrr_add_page (unsigned long base, unsigned long size,
79 unsigned int type, char increment);
80extern int mtrr_del (int reg, unsigned long base, unsigned long size);
81extern int mtrr_del_page (int reg, unsigned long base, unsigned long size);
82# else
83static __inline__ int mtrr_add (unsigned long base, unsigned long size,
84 unsigned int type, char increment)
85{
86 return -ENODEV;
87}
88static __inline__ int mtrr_add_page (unsigned long base, unsigned long size,
89 unsigned int type, char increment)
90{
91 return -ENODEV;
92}
93static __inline__ int mtrr_del (int reg, unsigned long base,
94 unsigned long size)
95{
96 return -ENODEV;
97}
98static __inline__ int mtrr_del_page (int reg, unsigned long base,
99 unsigned long size)
100{
101 return -ENODEV;
102}
103
104# endif
105
106#endif
107
108#endif /* _LINUX_MTRR_H */
diff --git a/include/asm-x86_64/namei.h b/include/asm-x86_64/namei.h
new file mode 100644
index 000000000000..bef239f5318f
--- /dev/null
+++ b/include/asm-x86_64/namei.h
@@ -0,0 +1,11 @@
1#ifndef __X8664_NAMEI_H
2#define __X8664_NAMEI_H
3
4/* This dummy routine maybe changed to something useful
5 * for /usr/gnemul/ emulation stuff.
6 * Look at asm-sparc/namei.h for details.
7 */
8
9#define __emul_prefix() NULL
10
11#endif
diff --git a/include/asm-x86_64/nmi.h b/include/asm-x86_64/nmi.h
new file mode 100644
index 000000000000..21d56b086b9d
--- /dev/null
+++ b/include/asm-x86_64/nmi.h
@@ -0,0 +1,57 @@
1/*
2 * linux/include/asm-i386/nmi.h
3 */
4#ifndef ASM_NMI_H
5#define ASM_NMI_H
6
7#include <linux/pm.h>
8
9struct pt_regs;
10
11typedef int (*nmi_callback_t)(struct pt_regs * regs, int cpu);
12
13/**
14 * set_nmi_callback
15 *
16 * Set a handler for an NMI. Only one handler may be
17 * set. Return 1 if the NMI was handled.
18 */
19void set_nmi_callback(nmi_callback_t callback);
20
21/**
22 * unset_nmi_callback
23 *
24 * Remove the handler previously set.
25 */
26void unset_nmi_callback(void);
27
28#ifdef CONFIG_PM
29
30/** Replace the PM callback routine for NMI. */
31struct pm_dev * set_nmi_pm_callback(pm_callback callback);
32
33/** Unset the PM callback routine back to the default. */
34void unset_nmi_pm_callback(struct pm_dev * dev);
35
36#else
37
38static inline struct pm_dev * set_nmi_pm_callback(pm_callback callback)
39{
40 return 0;
41}
42
43static inline void unset_nmi_pm_callback(struct pm_dev * dev)
44{
45}
46
47#endif /* CONFIG_PM */
48
49extern void default_do_nmi(struct pt_regs *);
50extern void die_nmi(char *str, struct pt_regs *regs);
51
52#define get_nmi_reason() inb(0x61)
53
54extern int panic_on_timeout;
55extern int unknown_nmi_panic;
56
57#endif /* ASM_NMI_H */
diff --git a/include/asm-x86_64/node.h b/include/asm-x86_64/node.h
new file mode 100644
index 000000000000..0ee6f88db048
--- /dev/null
+++ b/include/asm-x86_64/node.h
@@ -0,0 +1 @@
#include <asm-i386/node.h>
diff --git a/include/asm-x86_64/numa.h b/include/asm-x86_64/numa.h
new file mode 100644
index 000000000000..5c363a1482e4
--- /dev/null
+++ b/include/asm-x86_64/numa.h
@@ -0,0 +1,21 @@
1#ifndef _ASM_X8664_NUMA_H
2#define _ASM_X8664_NUMA_H 1
3
4#include <linux/nodemask.h>
5#include <asm/numnodes.h>
6
7struct node {
8 u64 start,end;
9};
10
11extern int compute_hash_shift(struct node *nodes, int numnodes);
12
13#define ZONE_ALIGN (1UL << (MAX_ORDER+PAGE_SHIFT))
14
15extern void numa_add_cpu(int cpu);
16extern void numa_init_array(void);
17extern int numa_off;
18
19#define NUMA_NO_NODE 0xff
20
21#endif
diff --git a/include/asm-x86_64/numnodes.h b/include/asm-x86_64/numnodes.h
new file mode 100644
index 000000000000..32be16b8ae96
--- /dev/null
+++ b/include/asm-x86_64/numnodes.h
@@ -0,0 +1,12 @@
1#ifndef _ASM_X8664_NUMNODES_H
2#define _ASM_X8664_NUMNODES_H 1
3
4#include <linux/config.h>
5
6#ifdef CONFIG_NUMA
7#define NODES_SHIFT 6
8#else
9#define NODES_SHIFT 0
10#endif
11
12#endif
diff --git a/include/asm-x86_64/page.h b/include/asm-x86_64/page.h
new file mode 100644
index 000000000000..f43048035a03
--- /dev/null
+++ b/include/asm-x86_64/page.h
@@ -0,0 +1,139 @@
1#ifndef _X86_64_PAGE_H
2#define _X86_64_PAGE_H
3
4#include <linux/config.h>
5
6/* PAGE_SHIFT determines the page size */
7#define PAGE_SHIFT 12
8#ifdef __ASSEMBLY__
9#define PAGE_SIZE (0x1 << PAGE_SHIFT)
10#else
11#define PAGE_SIZE (1UL << PAGE_SHIFT)
12#endif
13#define PAGE_MASK (~(PAGE_SIZE-1))
14#define PHYSICAL_PAGE_MASK (~(PAGE_SIZE-1) & (__PHYSICAL_MASK << PAGE_SHIFT))
15
16#define THREAD_ORDER 1
17#ifdef __ASSEMBLY__
18#define THREAD_SIZE (1 << (PAGE_SHIFT + THREAD_ORDER))
19#else
20#define THREAD_SIZE (1UL << (PAGE_SHIFT + THREAD_ORDER))
21#endif
22#define CURRENT_MASK (~(THREAD_SIZE-1))
23
24#define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1))
25#define LARGE_PAGE_SIZE (1UL << PMD_SHIFT)
26
27#define HPAGE_SHIFT PMD_SHIFT
28#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
29#define HPAGE_MASK (~(HPAGE_SIZE - 1))
30#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
31
32#ifdef __KERNEL__
33#ifndef __ASSEMBLY__
34
35void clear_page(void *);
36void copy_page(void *, void *);
37
38#define clear_user_page(page, vaddr, pg) clear_page(page)
39#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
40
41#define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr)
42#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
43/*
44 * These are used to make use of C type-checking..
45 */
46typedef struct { unsigned long pte; } pte_t;
47typedef struct { unsigned long pmd; } pmd_t;
48typedef struct { unsigned long pud; } pud_t;
49typedef struct { unsigned long pgd; } pgd_t;
50#define PTE_MASK PHYSICAL_PAGE_MASK
51
52typedef struct { unsigned long pgprot; } pgprot_t;
53
54#define pte_val(x) ((x).pte)
55#define pmd_val(x) ((x).pmd)
56#define pud_val(x) ((x).pud)
57#define pgd_val(x) ((x).pgd)
58#define pgprot_val(x) ((x).pgprot)
59
60#define __pte(x) ((pte_t) { (x) } )
61#define __pmd(x) ((pmd_t) { (x) } )
62#define __pud(x) ((pud_t) { (x) } )
63#define __pgd(x) ((pgd_t) { (x) } )
64#define __pgprot(x) ((pgprot_t) { (x) } )
65
66#define __START_KERNEL 0xffffffff80100000UL
67#define __START_KERNEL_map 0xffffffff80000000UL
68#define __PAGE_OFFSET 0xffff810000000000UL
69
70#else
71#define __START_KERNEL 0xffffffff80100000
72#define __START_KERNEL_map 0xffffffff80000000
73#define __PAGE_OFFSET 0xffff810000000000
74#endif /* !__ASSEMBLY__ */
75
76/* to align the pointer to the (next) page boundary */
77#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
78
79/* See Documentation/x86_64/mm.txt for a description of the memory map. */
80#define __PHYSICAL_MASK_SHIFT 46
81#define __PHYSICAL_MASK ((1UL << __PHYSICAL_MASK_SHIFT) - 1)
82#define __VIRTUAL_MASK_SHIFT 48
83#define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1)
84
85#define KERNEL_TEXT_SIZE (40UL*1024*1024)
86#define KERNEL_TEXT_START 0xffffffff80000000UL
87
88#ifndef __ASSEMBLY__
89
90#include <asm/bug.h>
91
92/* Pure 2^n version of get_order */
93extern __inline__ int get_order(unsigned long size)
94{
95 int order;
96
97 size = (size-1) >> (PAGE_SHIFT-1);
98 order = -1;
99 do {
100 size >>= 1;
101 order++;
102 } while (size);
103 return order;
104}
105
106#endif /* __ASSEMBLY__ */
107
108#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
109
110/* Note: __pa(&symbol_visible_to_c) should be always replaced with __pa_symbol.
111 Otherwise you risk miscompilation. */
112#define __pa(x) (((unsigned long)(x)>=__START_KERNEL_map)?(unsigned long)(x) - (unsigned long)__START_KERNEL_map:(unsigned long)(x) - PAGE_OFFSET)
113/* __pa_symbol should be used for C visible symbols.
114 This seems to be the official gcc blessed way to do such arithmetic. */
115#define __pa_symbol(x) \
116 ({unsigned long v; \
117 asm("" : "=r" (v) : "0" (x)); \
118 __pa(v); })
119
120#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
121#ifndef CONFIG_DISCONTIGMEM
122#define pfn_to_page(pfn) (mem_map + (pfn))
123#define page_to_pfn(page) ((unsigned long)((page) - mem_map))
124#define pfn_valid(pfn) ((pfn) < max_mapnr)
125#endif
126
127#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
128#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
129#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
130
131#define VM_DATA_DEFAULT_FLAGS \
132 (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
133 VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
134
135#define __HAVE_ARCH_GATE_AREA 1
136
137#endif /* __KERNEL__ */
138
139#endif /* _X86_64_PAGE_H */
diff --git a/include/asm-x86_64/param.h b/include/asm-x86_64/param.h
new file mode 100644
index 000000000000..b707f0568c9e
--- /dev/null
+++ b/include/asm-x86_64/param.h
@@ -0,0 +1,22 @@
1#ifndef _ASMx86_64_PARAM_H
2#define _ASMx86_64_PARAM_H
3
4#ifdef __KERNEL__
5# define HZ 1000 /* Internal kernel timer frequency */
6# define USER_HZ 100 /* .. some user interfaces are in "ticks */
7#define CLOCKS_PER_SEC (USER_HZ) /* like times() */
8#endif
9
10#ifndef HZ
11#define HZ 100
12#endif
13
14#define EXEC_PAGESIZE 4096
15
16#ifndef NOGROUP
17#define NOGROUP (-1)
18#endif
19
20#define MAXHOSTNAMELEN 64 /* max length of hostname */
21
22#endif
diff --git a/include/asm-x86_64/parport.h b/include/asm-x86_64/parport.h
new file mode 100644
index 000000000000..7135ef977c96
--- /dev/null
+++ b/include/asm-x86_64/parport.h
@@ -0,0 +1,18 @@
1/*
2 * parport.h: ia32-specific parport initialisation
3 *
4 * Copyright (C) 1999, 2000 Tim Waugh <tim@cyberelk.demon.co.uk>
5 *
6 * This file should only be included by drivers/parport/parport_pc.c.
7 */
8
9#ifndef _ASM_X8664_PARPORT_H
10#define _ASM_X8664_PARPORT_H 1
11
12static int __devinit parport_pc_find_isa_ports (int autoirq, int autodma);
13static int __devinit parport_pc_find_nonpci_ports (int autoirq, int autodma)
14{
15 return parport_pc_find_isa_ports (autoirq, autodma);
16}
17
18#endif
diff --git a/include/asm-x86_64/pci-direct.h b/include/asm-x86_64/pci-direct.h
new file mode 100644
index 000000000000..036b6ca5b53b
--- /dev/null
+++ b/include/asm-x86_64/pci-direct.h
@@ -0,0 +1,48 @@
1#ifndef ASM_PCI_DIRECT_H
2#define ASM_PCI_DIRECT_H 1
3
4#include <linux/types.h>
5#include <asm/io.h>
6
7/* Direct PCI access. This is used for PCI accesses in early boot before
8 the PCI subsystem works. */
9
10#define PDprintk(x...)
11
12static inline u32 read_pci_config(u8 bus, u8 slot, u8 func, u8 offset)
13{
14 u32 v;
15 outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
16 v = inl(0xcfc);
17 if (v != 0xffffffff)
18 PDprintk("%x reading 4 from %x: %x\n", slot, offset, v);
19 return v;
20}
21
22static inline u8 read_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset)
23{
24 u8 v;
25 outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
26 v = inb(0xcfc + (offset&3));
27 PDprintk("%x reading 1 from %x: %x\n", slot, offset, v);
28 return v;
29}
30
31static inline u16 read_pci_config_16(u8 bus, u8 slot, u8 func, u8 offset)
32{
33 u16 v;
34 outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
35 v = inw(0xcfc + (offset&2));
36 PDprintk("%x reading 2 from %x: %x\n", slot, offset, v);
37 return v;
38}
39
40static inline void write_pci_config(u8 bus, u8 slot, u8 func, u8 offset,
41 u32 val)
42{
43 PDprintk("%x writing to %x: %x\n", slot, offset, val);
44 outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
45 outl(val, 0xcfc);
46}
47
48#endif
diff --git a/include/asm-x86_64/pci.h b/include/asm-x86_64/pci.h
new file mode 100644
index 000000000000..8712520ca47f
--- /dev/null
+++ b/include/asm-x86_64/pci.h
@@ -0,0 +1,141 @@
1#ifndef __x8664_PCI_H
2#define __x8664_PCI_H
3
4#include <linux/config.h>
5#include <asm/io.h>
6
7#ifdef __KERNEL__
8
9#include <linux/mm.h> /* for struct page */
10
11/* Can be used to override the logic in pci_scan_bus for skipping
12 already-configured bus numbers - to be used for buggy BIOSes
13 or architectures with incomplete PCI setup by the loader */
14
15#ifdef CONFIG_PCI
16extern unsigned int pcibios_assign_all_busses(void);
17#else
18#define pcibios_assign_all_busses() 0
19#endif
20#define pcibios_scan_all_fns(a, b) 0
21
22extern int no_iommu, force_iommu;
23
24extern unsigned long pci_mem_start;
25#define PCIBIOS_MIN_IO 0x1000
26#define PCIBIOS_MIN_MEM (pci_mem_start)
27
28#define PCIBIOS_MIN_CARDBUS_IO 0x4000
29
30void pcibios_config_init(void);
31struct pci_bus * pcibios_scan_root(int bus);
32extern int (*pci_config_read)(int seg, int bus, int dev, int fn, int reg, int len, u32 *value);
33extern int (*pci_config_write)(int seg, int bus, int dev, int fn, int reg, int len, u32 value);
34
35void pcibios_set_master(struct pci_dev *dev);
36void pcibios_penalize_isa_irq(int irq);
37struct irq_routing_table *pcibios_get_irq_routing_table(void);
38int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq);
39
40#include <linux/types.h>
41#include <linux/slab.h>
42#include <asm/scatterlist.h>
43#include <linux/string.h>
44#include <asm/page.h>
45
46extern int iommu_setup(char *opt);
47
48#ifdef CONFIG_GART_IOMMU
49/* The PCI address space does equal the physical memory
50 * address space. The networking and block device layers use
51 * this boolean for bounce buffer decisions
52 *
53 * On AMD64 it mostly equals, but we set it to zero to tell some subsystems
54 * that an IOMMU is available.
55 */
56#define PCI_DMA_BUS_IS_PHYS (no_iommu ? 1 : 0)
57
58/*
59 * x86-64 always supports DAC, but sometimes it is useful to force
60 * devices through the IOMMU to get automatic sg list merging.
61 * Optional right now.
62 */
63extern int iommu_sac_force;
64#define pci_dac_dma_supported(pci_dev, mask) (!iommu_sac_force)
65
66#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
67 dma_addr_t ADDR_NAME;
68#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
69 __u32 LEN_NAME;
70#define pci_unmap_addr(PTR, ADDR_NAME) \
71 ((PTR)->ADDR_NAME)
72#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
73 (((PTR)->ADDR_NAME) = (VAL))
74#define pci_unmap_len(PTR, LEN_NAME) \
75 ((PTR)->LEN_NAME)
76#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
77 (((PTR)->LEN_NAME) = (VAL))
78
79#else
80/* No IOMMU */
81
82#define PCI_DMA_BUS_IS_PHYS 1
83#define pci_dac_dma_supported(pci_dev, mask) 1
84
85#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
86#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
87#define pci_unmap_addr(PTR, ADDR_NAME) (0)
88#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
89#define pci_unmap_len(PTR, LEN_NAME) (0)
90#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
91
92#endif
93
94#include <asm-generic/pci-dma-compat.h>
95
96static inline dma64_addr_t
97pci_dac_page_to_dma(struct pci_dev *pdev, struct page *page, unsigned long offset, int direction)
98{
99 return ((dma64_addr_t) page_to_phys(page) +
100 (dma64_addr_t) offset);
101}
102
103static inline struct page *
104pci_dac_dma_to_page(struct pci_dev *pdev, dma64_addr_t dma_addr)
105{
106 return virt_to_page(__va(dma_addr));
107}
108
109static inline unsigned long
110pci_dac_dma_to_offset(struct pci_dev *pdev, dma64_addr_t dma_addr)
111{
112 return (dma_addr & ~PAGE_MASK);
113}
114
115static inline void
116pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction)
117{
118}
119
120static inline void
121pci_dac_dma_sync_single_for_device(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction)
122{
123 flush_write_buffers();
124}
125
126#define HAVE_PCI_MMAP
127extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
128 enum pci_mmap_state mmap_state, int write_combine);
129
130static inline void pcibios_add_platform_entries(struct pci_dev *dev)
131{
132}
133
134#endif /* __KERNEL__ */
135
136/* generic pci stuff */
137#ifdef CONFIG_PCI
138#include <asm-generic/pci.h>
139#endif
140
141#endif /* __x8664_PCI_H */
diff --git a/include/asm-x86_64/pda.h b/include/asm-x86_64/pda.h
new file mode 100644
index 000000000000..36b766cfc4d5
--- /dev/null
+++ b/include/asm-x86_64/pda.h
@@ -0,0 +1,83 @@
1#ifndef X86_64_PDA_H
2#define X86_64_PDA_H
3
4#ifndef __ASSEMBLY__
5#include <linux/stddef.h>
6#include <linux/types.h>
7#include <linux/cache.h>
8
9/* Per processor datastructure. %gs points to it while the kernel runs */
10struct x8664_pda {
11 struct task_struct *pcurrent; /* Current process */
12 unsigned long data_offset; /* Per cpu data offset from linker address */
13 struct x8664_pda *me; /* Pointer to itself */
14 unsigned long kernelstack; /* top of kernel stack for current */
15 unsigned long oldrsp; /* user rsp for system call */
16 unsigned long irqrsp; /* Old rsp for interrupts. */
17 int irqcount; /* Irq nesting counter. Starts with -1 */
18 int cpunumber; /* Logical CPU number */
19 char *irqstackptr; /* top of irqstack */
20 unsigned int __softirq_pending;
21 unsigned int __nmi_count; /* number of NMI on this CPUs */
22 struct mm_struct *active_mm;
23 int mmu_state;
24 unsigned apic_timer_irqs;
25} ____cacheline_aligned;
26
27
28#define IRQSTACK_ORDER 2
29#define IRQSTACKSIZE (PAGE_SIZE << IRQSTACK_ORDER)
30
31extern struct x8664_pda cpu_pda[];
32
33/*
34 * There is no fast way to get the base address of the PDA, all the accesses
35 * have to mention %fs/%gs. So it needs to be done this Torvaldian way.
36 */
37#define sizeof_field(type,field) (sizeof(((type *)0)->field))
38#define typeof_field(type,field) typeof(((type *)0)->field)
39
40extern void __bad_pda_field(void);
41
42#define pda_offset(field) offsetof(struct x8664_pda, field)
43
44#define pda_to_op(op,field,val) do { \
45 switch (sizeof_field(struct x8664_pda, field)) { \
46case 2: \
47asm volatile(op "w %0,%%gs:%P1"::"r" (val),"i"(pda_offset(field)):"memory"); break; \
48case 4: \
49asm volatile(op "l %0,%%gs:%P1"::"r" (val),"i"(pda_offset(field)):"memory"); break; \
50case 8: \
51asm volatile(op "q %0,%%gs:%P1"::"r" (val),"i"(pda_offset(field)):"memory"); break; \
52 default: __bad_pda_field(); \
53 } \
54 } while (0)
55
56/*
57 * AK: PDA read accesses should be neither volatile nor have an memory clobber.
58 * Unfortunately removing them causes all hell to break lose currently.
59 */
60#define pda_from_op(op,field) ({ \
61 typedef typeof_field(struct x8664_pda, field) T__; T__ ret__; \
62 switch (sizeof_field(struct x8664_pda, field)) { \
63case 2: \
64asm volatile(op "w %%gs:%P1,%0":"=r" (ret__):"i"(pda_offset(field)):"memory"); break;\
65case 4: \
66asm volatile(op "l %%gs:%P1,%0":"=r" (ret__):"i"(pda_offset(field)):"memory"); break;\
67case 8: \
68asm volatile(op "q %%gs:%P1,%0":"=r" (ret__):"i"(pda_offset(field)):"memory"); break;\
69 default: __bad_pda_field(); \
70 } \
71 ret__; })
72
73
74#define read_pda(field) pda_from_op("mov",field)
75#define write_pda(field,val) pda_to_op("mov",field,val)
76#define add_pda(field,val) pda_to_op("add",field,val)
77#define sub_pda(field,val) pda_to_op("sub",field,val)
78
79#endif
80
81#define PDA_STACKOFFSET (5*8)
82
83#endif
diff --git a/include/asm-x86_64/percpu.h b/include/asm-x86_64/percpu.h
new file mode 100644
index 000000000000..415d73f3c8ef
--- /dev/null
+++ b/include/asm-x86_64/percpu.h
@@ -0,0 +1,52 @@
1#ifndef _ASM_X8664_PERCPU_H_
2#define _ASM_X8664_PERCPU_H_
3#include <linux/compiler.h>
4
5/* Same as asm-generic/percpu.h, except that we store the per cpu offset
6 in the PDA. Longer term the PDA and every per cpu variable
7 should be just put into a single section and referenced directly
8 from %gs */
9
10#ifdef CONFIG_SMP
11
12#include <asm/pda.h>
13
14#define __per_cpu_offset(cpu) (cpu_pda[cpu].data_offset)
15#define __my_cpu_offset() read_pda(data_offset)
16
17/* Separate out the type, so (int[3], foo) works. */
18#define DEFINE_PER_CPU(type, name) \
19 __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name
20
21/* var is in discarded region: offset to particular copy we want */
22#define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset(cpu)))
23#define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __my_cpu_offset()))
24
25/* A macro to avoid #include hell... */
26#define percpu_modcopy(pcpudst, src, size) \
27do { \
28 unsigned int __i; \
29 for (__i = 0; __i < NR_CPUS; __i++) \
30 if (cpu_possible(__i)) \
31 memcpy((pcpudst)+__per_cpu_offset(__i), \
32 (src), (size)); \
33} while (0)
34
35extern void setup_per_cpu_areas(void);
36
37#else /* ! SMP */
38
39#define DEFINE_PER_CPU(type, name) \
40 __typeof__(type) per_cpu__##name
41
42#define per_cpu(var, cpu) (*((void)cpu, &per_cpu__##var))
43#define __get_cpu_var(var) per_cpu__##var
44
45#endif /* SMP */
46
47#define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name
48
49#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
50#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
51
52#endif /* _ASM_X8664_PERCPU_H_ */
diff --git a/include/asm-x86_64/pgalloc.h b/include/asm-x86_64/pgalloc.h
new file mode 100644
index 000000000000..deadd146978b
--- /dev/null
+++ b/include/asm-x86_64/pgalloc.h
@@ -0,0 +1,105 @@
1#ifndef _X86_64_PGALLOC_H
2#define _X86_64_PGALLOC_H
3
4#include <asm/fixmap.h>
5#include <asm/pda.h>
6#include <linux/threads.h>
7#include <linux/mm.h>
8
9#define pmd_populate_kernel(mm, pmd, pte) \
10 set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte)))
11#define pud_populate(mm, pud, pmd) \
12 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)))
13#define pgd_populate(mm, pgd, pud) \
14 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)))
15
16static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *pte)
17{
18 set_pmd(pmd, __pmd(_PAGE_TABLE | (page_to_pfn(pte) << PAGE_SHIFT)));
19}
20
21extern __inline__ pmd_t *get_pmd(void)
22{
23 return (pmd_t *)get_zeroed_page(GFP_KERNEL);
24}
25
26extern __inline__ void pmd_free(pmd_t *pmd)
27{
28 BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
29 free_page((unsigned long)pmd);
30}
31
32static inline pmd_t *pmd_alloc_one (struct mm_struct *mm, unsigned long addr)
33{
34 return (pmd_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
35}
36
37static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
38{
39 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
40}
41
42static inline void pud_free (pud_t *pud)
43{
44 BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
45 free_page((unsigned long)pud);
46}
47
48static inline pgd_t *pgd_alloc(struct mm_struct *mm)
49{
50 unsigned boundary;
51 pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
52 if (!pgd)
53 return NULL;
54 /*
55 * Copy kernel pointers in from init.
56 * Could keep a freelist or slab cache of those because the kernel
57 * part never changes.
58 */
59 boundary = pgd_index(__PAGE_OFFSET);
60 memset(pgd, 0, boundary * sizeof(pgd_t));
61 memcpy(pgd + boundary,
62 init_level4_pgt + boundary,
63 (PTRS_PER_PGD - boundary) * sizeof(pgd_t));
64 return pgd;
65}
66
67static inline void pgd_free(pgd_t *pgd)
68{
69 BUG_ON((unsigned long)pgd & (PAGE_SIZE-1));
70 free_page((unsigned long)pgd);
71}
72
73static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
74{
75 return (pte_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
76}
77
78static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
79{
80 void *p = (void *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
81 if (!p)
82 return NULL;
83 return virt_to_page(p);
84}
85
86/* Should really implement gc for free page table pages. This could be
87 done with a reference count in struct page. */
88
89extern __inline__ void pte_free_kernel(pte_t *pte)
90{
91 BUG_ON((unsigned long)pte & (PAGE_SIZE-1));
92 free_page((unsigned long)pte);
93}
94
95extern inline void pte_free(struct page *pte)
96{
97 __free_page(pte);
98}
99
100#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
101
102#define __pmd_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x))
103#define __pud_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x))
104
105#endif /* _X86_64_PGALLOC_H */
diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h
new file mode 100644
index 000000000000..dc6b6f2604e8
--- /dev/null
+++ b/include/asm-x86_64/pgtable.h
@@ -0,0 +1,437 @@
1#ifndef _X86_64_PGTABLE_H
2#define _X86_64_PGTABLE_H
3
4/*
5 * This file contains the functions and defines necessary to modify and use
6 * the x86-64 page table tree.
7 */
8#include <asm/processor.h>
9#include <asm/fixmap.h>
10#include <asm/bitops.h>
11#include <linux/threads.h>
12#include <asm/pda.h>
13
14extern pud_t level3_kernel_pgt[512];
15extern pud_t level3_physmem_pgt[512];
16extern pud_t level3_ident_pgt[512];
17extern pmd_t level2_kernel_pgt[512];
18extern pgd_t init_level4_pgt[];
19extern unsigned long __supported_pte_mask;
20
21#define swapper_pg_dir init_level4_pgt
22
23extern int nonx_setup(char *str);
24extern void paging_init(void);
25extern void clear_kernel_mapping(unsigned long addr, unsigned long size);
26
27extern unsigned long pgkern_mask;
28
29/*
30 * ZERO_PAGE is a global shared page that is always zero: used
31 * for zero-mapped memory areas etc..
32 */
33extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
34#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
35
36/*
37 * PGDIR_SHIFT determines what a top-level page table entry can map
38 */
39#define PGDIR_SHIFT 39
40#define PTRS_PER_PGD 512
41
42/*
43 * 3rd level page
44 */
45#define PUD_SHIFT 30
46#define PTRS_PER_PUD 512
47
48/*
49 * PMD_SHIFT determines the size of the area a middle-level
50 * page table can map
51 */
52#define PMD_SHIFT 21
53#define PTRS_PER_PMD 512
54
55/*
56 * entries per page directory level
57 */
58#define PTRS_PER_PTE 512
59
60#define pte_ERROR(e) \
61 printk("%s:%d: bad pte %p(%016lx).\n", __FILE__, __LINE__, &(e), pte_val(e))
62#define pmd_ERROR(e) \
63 printk("%s:%d: bad pmd %p(%016lx).\n", __FILE__, __LINE__, &(e), pmd_val(e))
64#define pud_ERROR(e) \
65 printk("%s:%d: bad pud %p(%016lx).\n", __FILE__, __LINE__, &(e), pud_val(e))
66#define pgd_ERROR(e) \
67 printk("%s:%d: bad pgd %p(%016lx).\n", __FILE__, __LINE__, &(e), pgd_val(e))
68
69#define pgd_none(x) (!pgd_val(x))
70#define pud_none(x) (!pud_val(x))
71
72static inline void set_pte(pte_t *dst, pte_t val)
73{
74 pte_val(*dst) = pte_val(val);
75}
76#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
77
78static inline void set_pmd(pmd_t *dst, pmd_t val)
79{
80 pmd_val(*dst) = pmd_val(val);
81}
82
83static inline void set_pud(pud_t *dst, pud_t val)
84{
85 pud_val(*dst) = pud_val(val);
86}
87
88extern inline void pud_clear (pud_t *pud)
89{
90 set_pud(pud, __pud(0));
91}
92
93static inline void set_pgd(pgd_t *dst, pgd_t val)
94{
95 pgd_val(*dst) = pgd_val(val);
96}
97
98extern inline void pgd_clear (pgd_t * pgd)
99{
100 set_pgd(pgd, __pgd(0));
101}
102
103#define pud_page(pud) \
104((unsigned long) __va(pud_val(pud) & PHYSICAL_PAGE_MASK))
105
106#define ptep_get_and_clear(mm,addr,xp) __pte(xchg(&(xp)->pte, 0))
107#define pte_same(a, b) ((a).pte == (b).pte)
108
109#define PMD_SIZE (1UL << PMD_SHIFT)
110#define PMD_MASK (~(PMD_SIZE-1))
111#define PUD_SIZE (1UL << PUD_SHIFT)
112#define PUD_MASK (~(PUD_SIZE-1))
113#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
114#define PGDIR_MASK (~(PGDIR_SIZE-1))
115
116#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
117#define FIRST_USER_PGD_NR 0
118
119#ifndef __ASSEMBLY__
120#define MAXMEM 0x3fffffffffffUL
121#define VMALLOC_START 0xffffc20000000000UL
122#define VMALLOC_END 0xffffe1ffffffffffUL
123#define MODULES_VADDR 0xffffffff88000000UL
124#define MODULES_END 0xfffffffffff00000UL
125#define MODULES_LEN (MODULES_END - MODULES_VADDR)
126
127#define _PAGE_BIT_PRESENT 0
128#define _PAGE_BIT_RW 1
129#define _PAGE_BIT_USER 2
130#define _PAGE_BIT_PWT 3
131#define _PAGE_BIT_PCD 4
132#define _PAGE_BIT_ACCESSED 5
133#define _PAGE_BIT_DIRTY 6
134#define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
135#define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
136#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
137
138#define _PAGE_PRESENT 0x001
139#define _PAGE_RW 0x002
140#define _PAGE_USER 0x004
141#define _PAGE_PWT 0x008
142#define _PAGE_PCD 0x010
143#define _PAGE_ACCESSED 0x020
144#define _PAGE_DIRTY 0x040
145#define _PAGE_PSE 0x080 /* 2MB page */
146#define _PAGE_FILE 0x040 /* set:pagecache, unset:swap */
147#define _PAGE_GLOBAL 0x100 /* Global TLB entry */
148
149#define _PAGE_PROTNONE 0x080 /* If not present */
150#define _PAGE_NX (1UL<<_PAGE_BIT_NX)
151
152#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
153#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
154
155#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
156
157#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
158#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
159#define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
160#define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
161#define PAGE_COPY PAGE_COPY_NOEXEC
162#define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
163#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
164#define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
165#define __PAGE_KERNEL \
166 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX)
167#define __PAGE_KERNEL_EXEC \
168 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
169#define __PAGE_KERNEL_NOCACHE \
170 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_PCD | _PAGE_ACCESSED | _PAGE_NX)
171#define __PAGE_KERNEL_RO \
172 (_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX)
173#define __PAGE_KERNEL_VSYSCALL \
174 (_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
175#define __PAGE_KERNEL_VSYSCALL_NOCACHE \
176 (_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_PCD)
177#define __PAGE_KERNEL_LARGE \
178 (__PAGE_KERNEL | _PAGE_PSE)
179
180#define MAKE_GLOBAL(x) __pgprot((x) | _PAGE_GLOBAL)
181
182#define PAGE_KERNEL MAKE_GLOBAL(__PAGE_KERNEL)
183#define PAGE_KERNEL_EXEC MAKE_GLOBAL(__PAGE_KERNEL_EXEC)
184#define PAGE_KERNEL_RO MAKE_GLOBAL(__PAGE_KERNEL_RO)
185#define PAGE_KERNEL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_NOCACHE)
186#define PAGE_KERNEL_VSYSCALL32 __pgprot(__PAGE_KERNEL_VSYSCALL)
187#define PAGE_KERNEL_VSYSCALL MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL)
188#define PAGE_KERNEL_LARGE MAKE_GLOBAL(__PAGE_KERNEL_LARGE)
189#define PAGE_KERNEL_VSYSCALL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL_NOCACHE)
190
191/* xwr */
192#define __P000 PAGE_NONE
193#define __P001 PAGE_READONLY
194#define __P010 PAGE_COPY
195#define __P011 PAGE_COPY
196#define __P100 PAGE_READONLY_EXEC
197#define __P101 PAGE_READONLY_EXEC
198#define __P110 PAGE_COPY_EXEC
199#define __P111 PAGE_COPY_EXEC
200
201#define __S000 PAGE_NONE
202#define __S001 PAGE_READONLY
203#define __S010 PAGE_SHARED
204#define __S011 PAGE_SHARED
205#define __S100 PAGE_READONLY_EXEC
206#define __S101 PAGE_READONLY_EXEC
207#define __S110 PAGE_SHARED_EXEC
208#define __S111 PAGE_SHARED_EXEC
209
210static inline unsigned long pgd_bad(pgd_t pgd)
211{
212 unsigned long val = pgd_val(pgd);
213 val &= ~PTE_MASK;
214 val &= ~(_PAGE_USER | _PAGE_DIRTY);
215 return val & ~(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED);
216}
217
218static inline unsigned long pud_bad(pud_t pud)
219{
220 unsigned long val = pud_val(pud);
221 val &= ~PTE_MASK;
222 val &= ~(_PAGE_USER | _PAGE_DIRTY);
223 return val & ~(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED);
224}
225
226#define pte_none(x) (!pte_val(x))
227#define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE))
228#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
229
230#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) /* FIXME: is this
231 right? */
232#define pte_page(x) pfn_to_page(pte_pfn(x))
233#define pte_pfn(x) ((pte_val(x) >> PAGE_SHIFT) & __PHYSICAL_MASK)
234
235static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
236{
237 pte_t pte;
238 pte_val(pte) = (page_nr << PAGE_SHIFT);
239 pte_val(pte) |= pgprot_val(pgprot);
240 pte_val(pte) &= __supported_pte_mask;
241 return pte;
242}
243
244/*
245 * The following only work if pte_present() is true.
246 * Undefined behaviour if not..
247 */
248static inline int pte_user(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
249extern inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
250extern inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
251extern inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
252extern inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
253extern inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
254static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
255
256extern inline pte_t pte_rdprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); return pte; }
257extern inline pte_t pte_exprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); return pte; }
258extern inline pte_t pte_mkclean(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; }
259extern inline pte_t pte_mkold(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_ACCESSED)); return pte; }
260extern inline pte_t pte_wrprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_RW)); return pte; }
261extern inline pte_t pte_mkread(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_USER)); return pte; }
262extern inline pte_t pte_mkexec(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_USER)); return pte; }
263extern inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; }
264extern inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; }
265extern inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_RW)); return pte; }
266
267struct vm_area_struct;
268
269static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
270{
271 if (!pte_dirty(*ptep))
272 return 0;
273 return test_and_clear_bit(_PAGE_BIT_DIRTY, ptep);
274}
275
276static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
277{
278 if (!pte_young(*ptep))
279 return 0;
280 return test_and_clear_bit(_PAGE_BIT_ACCESSED, ptep);
281}
282
283static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
284{
285 clear_bit(_PAGE_BIT_RW, ptep);
286}
287
288/*
289 * Macro to mark a page protection value as "uncacheable".
290 */
291#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT))
292
293#define __LARGE_PTE (_PAGE_PSE|_PAGE_PRESENT)
294static inline int pmd_large(pmd_t pte) {
295 return (pmd_val(pte) & __LARGE_PTE) == __LARGE_PTE;
296}
297
298
299/*
300 * Conversion functions: convert a page and protection to a page entry,
301 * and a page entry and page directory to the page they refer to.
302 */
303
304#define page_pte(page) page_pte_prot(page, __pgprot(0))
305
306/*
307 * Level 4 access.
308 */
309#define pgd_page(pgd) ((unsigned long) __va((unsigned long)pgd_val(pgd) & PTE_MASK))
310#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
311#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
312#define pgd_offset_k(address) (init_level4_pgt + pgd_index(address))
313#define pgd_present(pgd) (pgd_val(pgd) & _PAGE_PRESENT)
314#define mk_kernel_pgd(address) ((pgd_t){ (address) | _KERNPG_TABLE })
315
316/* PUD - Level3 access */
317/* to find an entry in a page-table-directory. */
318#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
319#define pud_offset(pgd, address) ((pud_t *) pgd_page(*(pgd)) + pud_index(address))
320#define pud_offset_k(pgd, addr) pud_offset(pgd, addr)
321#define pud_present(pud) (pud_val(pud) & _PAGE_PRESENT)
322
323static inline pud_t *__pud_offset_k(pud_t *pud, unsigned long address)
324{
325 return pud + pud_index(address);
326}
327
328/* PMD - Level 2 access */
329#define pmd_page_kernel(pmd) ((unsigned long) __va(pmd_val(pmd) & PTE_MASK))
330#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
331
332#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
333#define pmd_offset(dir, address) ((pmd_t *) pud_page(*(dir)) + \
334 pmd_index(address))
335#define pmd_none(x) (!pmd_val(x))
336#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
337#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
338#define pmd_bad(x) ((pmd_val(x) & (~PTE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE )
339#define pfn_pmd(nr,prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val(prot)))
340#define pmd_pfn(x) ((pmd_val(x) >> PAGE_SHIFT) & __PHYSICAL_MASK)
341
342#define pte_to_pgoff(pte) ((pte_val(pte) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT)
343#define pgoff_to_pte(off) ((pte_t) { ((off) << PAGE_SHIFT) | _PAGE_FILE })
344#define PTE_FILE_MAX_BITS __PHYSICAL_MASK_SHIFT
345
346/* PTE - Level 1 access. */
347
348/* page, protection -> pte */
349#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
350#define mk_pte_huge(entry) (pte_val(entry) |= _PAGE_PRESENT | _PAGE_PSE)
351
352/* physical address -> PTE */
353static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
354{
355 pte_t pte;
356 pte_val(pte) = physpage | pgprot_val(pgprot);
357 return pte;
358}
359
360/* Change flags of a PTE */
361extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
362{
363 pte_val(pte) &= _PAGE_CHG_MASK;
364 pte_val(pte) |= pgprot_val(newprot);
365 pte_val(pte) &= __supported_pte_mask;
366 return pte;
367}
368
369#define pte_index(address) \
370 ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
371#define pte_offset_kernel(dir, address) ((pte_t *) pmd_page_kernel(*(dir)) + \
372 pte_index(address))
373
374/* x86-64 always has all page tables mapped. */
375#define pte_offset_map(dir,address) pte_offset_kernel(dir,address)
376#define pte_offset_map_nested(dir,address) pte_offset_kernel(dir,address)
377#define pte_unmap(pte) /* NOP */
378#define pte_unmap_nested(pte) /* NOP */
379
380#define update_mmu_cache(vma,address,pte) do { } while (0)
381
382/* We only update the dirty/accessed state if we set
383 * the dirty bit by hand in the kernel, since the hardware
384 * will do the accessed bit for us, and we don't want to
385 * race with other CPU's that might be updating the dirty
386 * bit at the same time. */
387#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
388#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
389 do { \
390 if (__dirty) { \
391 set_pte(__ptep, __entry); \
392 flush_tlb_page(__vma, __address); \
393 } \
394 } while (0)
395
396/* Encode and de-code a swap entry */
397#define __swp_type(x) (((x).val >> 1) & 0x3f)
398#define __swp_offset(x) ((x).val >> 8)
399#define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) })
400#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
401#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
402
403#endif /* !__ASSEMBLY__ */
404
405extern int kern_addr_valid(unsigned long addr);
406
407#define io_remap_page_range(vma, vaddr, paddr, size, prot) \
408 remap_pfn_range(vma, vaddr, (paddr) >> PAGE_SHIFT, size, prot)
409
410#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
411 remap_pfn_range(vma, vaddr, pfn, size, prot)
412
413#define MK_IOSPACE_PFN(space, pfn) (pfn)
414#define GET_IOSPACE(pfn) 0
415#define GET_PFN(pfn) (pfn)
416
417#define HAVE_ARCH_UNMAPPED_AREA
418
419#define pgtable_cache_init() do { } while (0)
420#define check_pgt_cache() do { } while (0)
421
422#define PAGE_AGP PAGE_KERNEL_NOCACHE
423#define HAVE_PAGE_AGP 1
424
425/* fs/proc/kcore.c */
426#define kc_vaddr_to_offset(v) ((v) & __VIRTUAL_MASK)
427#define kc_offset_to_vaddr(o) \
428 (((o) & (1UL << (__VIRTUAL_MASK_SHIFT-1))) ? ((o) | (~__VIRTUAL_MASK)) : (o))
429
430#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
431#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
432#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
433#define __HAVE_ARCH_PTEP_SET_WRPROTECT
434#define __HAVE_ARCH_PTE_SAME
435#include <asm-generic/pgtable.h>
436
437#endif /* _X86_64_PGTABLE_H */
diff --git a/include/asm-x86_64/poll.h b/include/asm-x86_64/poll.h
new file mode 100644
index 000000000000..c43cbba31913
--- /dev/null
+++ b/include/asm-x86_64/poll.h
@@ -0,0 +1,26 @@
1#ifndef __x86_64_POLL_H
2#define __x86_64_POLL_H
3
4/* These are specified by iBCS2 */
5#define POLLIN 0x0001
6#define POLLPRI 0x0002
7#define POLLOUT 0x0004
8#define POLLERR 0x0008
9#define POLLHUP 0x0010
10#define POLLNVAL 0x0020
11
12/* The rest seem to be more-or-less nonstandard. Check them! */
13#define POLLRDNORM 0x0040
14#define POLLRDBAND 0x0080
15#define POLLWRNORM 0x0100
16#define POLLWRBAND 0x0200
17#define POLLMSG 0x0400
18#define POLLREMOVE 0x1000
19
20struct pollfd {
21 int fd;
22 short events;
23 short revents;
24};
25
26#endif
diff --git a/include/asm-x86_64/posix_types.h b/include/asm-x86_64/posix_types.h
new file mode 100644
index 000000000000..9926aa43775b
--- /dev/null
+++ b/include/asm-x86_64/posix_types.h
@@ -0,0 +1,119 @@
1#ifndef _ASM_X86_64_POSIX_TYPES_H
2#define _ASM_X86_64_POSIX_TYPES_H
3
4/*
5 * This file is generally used by user-level software, so you need to
6 * be a little careful about namespace pollution etc. Also, we cannot
7 * assume GCC is being used.
8 */
9
10typedef unsigned long __kernel_ino_t;
11typedef unsigned int __kernel_mode_t;
12typedef unsigned long __kernel_nlink_t;
13typedef long __kernel_off_t;
14typedef int __kernel_pid_t;
15typedef int __kernel_ipc_pid_t;
16typedef unsigned int __kernel_uid_t;
17typedef unsigned int __kernel_gid_t;
18typedef unsigned long __kernel_size_t;
19typedef long __kernel_ssize_t;
20typedef long __kernel_ptrdiff_t;
21typedef long __kernel_time_t;
22typedef long __kernel_suseconds_t;
23typedef long __kernel_clock_t;
24typedef int __kernel_timer_t;
25typedef int __kernel_clockid_t;
26typedef int __kernel_daddr_t;
27typedef char * __kernel_caddr_t;
28typedef unsigned short __kernel_uid16_t;
29typedef unsigned short __kernel_gid16_t;
30
31#ifdef __GNUC__
32typedef long long __kernel_loff_t;
33#endif
34
35typedef struct {
36 int val[2];
37} __kernel_fsid_t;
38
39typedef unsigned short __kernel_old_uid_t;
40typedef unsigned short __kernel_old_gid_t;
41typedef __kernel_uid_t __kernel_uid32_t;
42typedef __kernel_gid_t __kernel_gid32_t;
43
44typedef unsigned long __kernel_old_dev_t;
45
46#ifdef __KERNEL__
47
48#undef __FD_SET
49static __inline__ void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp)
50{
51 unsigned long _tmp = fd / __NFDBITS;
52 unsigned long _rem = fd % __NFDBITS;
53 fdsetp->fds_bits[_tmp] |= (1UL<<_rem);
54}
55
56#undef __FD_CLR
57static __inline__ void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp)
58{
59 unsigned long _tmp = fd / __NFDBITS;
60 unsigned long _rem = fd % __NFDBITS;
61 fdsetp->fds_bits[_tmp] &= ~(1UL<<_rem);
62}
63
64#undef __FD_ISSET
65static __inline__ int __FD_ISSET(unsigned long fd, __const__ __kernel_fd_set *p)
66{
67 unsigned long _tmp = fd / __NFDBITS;
68 unsigned long _rem = fd % __NFDBITS;
69 return (p->fds_bits[_tmp] & (1UL<<_rem)) != 0;
70}
71
72/*
73 * This will unroll the loop for the normal constant cases (8 or 32 longs,
74 * for 256 and 1024-bit fd_sets respectively)
75 */
76#undef __FD_ZERO
77static __inline__ void __FD_ZERO(__kernel_fd_set *p)
78{
79 unsigned long *tmp = p->fds_bits;
80 int i;
81
82 if (__builtin_constant_p(__FDSET_LONGS)) {
83 switch (__FDSET_LONGS) {
84 case 32:
85 tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
86 tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
87 tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0;
88 tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0;
89 tmp[16] = 0; tmp[17] = 0; tmp[18] = 0; tmp[19] = 0;
90 tmp[20] = 0; tmp[21] = 0; tmp[22] = 0; tmp[23] = 0;
91 tmp[24] = 0; tmp[25] = 0; tmp[26] = 0; tmp[27] = 0;
92 tmp[28] = 0; tmp[29] = 0; tmp[30] = 0; tmp[31] = 0;
93 return;
94 case 16:
95 tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
96 tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
97 tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0;
98 tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0;
99 return;
100 case 8:
101 tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
102 tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
103 return;
104 case 4:
105 tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
106 return;
107 }
108 }
109 i = __FDSET_LONGS;
110 while (i) {
111 i--;
112 *tmp = 0;
113 tmp++;
114 }
115}
116
117#endif /* defined(__KERNEL__) */
118
119#endif
diff --git a/include/asm-x86_64/prctl.h b/include/asm-x86_64/prctl.h
new file mode 100644
index 000000000000..52952adef1ca
--- /dev/null
+++ b/include/asm-x86_64/prctl.h
@@ -0,0 +1,10 @@
1#ifndef X86_64_PRCTL_H
2#define X86_64_PRCTL_H 1
3
4#define ARCH_SET_GS 0x1001
5#define ARCH_SET_FS 0x1002
6#define ARCH_GET_FS 0x1003
7#define ARCH_GET_GS 0x1004
8
9
10#endif
diff --git a/include/asm-x86_64/processor.h b/include/asm-x86_64/processor.h
new file mode 100644
index 000000000000..45ab74e8f141
--- /dev/null
+++ b/include/asm-x86_64/processor.h
@@ -0,0 +1,462 @@
1/*
2 * include/asm-x86_64/processor.h
3 *
4 * Copyright (C) 1994 Linus Torvalds
5 */
6
7#ifndef __ASM_X86_64_PROCESSOR_H
8#define __ASM_X86_64_PROCESSOR_H
9
10#include <asm/segment.h>
11#include <asm/page.h>
12#include <asm/types.h>
13#include <asm/sigcontext.h>
14#include <asm/cpufeature.h>
15#include <linux/config.h>
16#include <linux/threads.h>
17#include <asm/msr.h>
18#include <asm/current.h>
19#include <asm/system.h>
20#include <asm/mmsegment.h>
21#include <asm/percpu.h>
22#include <linux/personality.h>
23
24#define TF_MASK 0x00000100
25#define IF_MASK 0x00000200
26#define IOPL_MASK 0x00003000
27#define NT_MASK 0x00004000
28#define VM_MASK 0x00020000
29#define AC_MASK 0x00040000
30#define VIF_MASK 0x00080000 /* virtual interrupt flag */
31#define VIP_MASK 0x00100000 /* virtual interrupt pending */
32#define ID_MASK 0x00200000
33
34#define desc_empty(desc) \
35 (!((desc)->a + (desc)->b))
36
37#define desc_equal(desc1, desc2) \
38 (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b))
39
40/*
41 * Default implementation of macro that returns current
42 * instruction pointer ("program counter").
43 */
44#define current_text_addr() ({ void *pc; asm volatile("leaq 1f(%%rip),%0\n1:":"=r"(pc)); pc; })
45
46/*
47 * CPU type and hardware bug flags. Kept separately for each CPU.
48 */
49
50struct cpuinfo_x86 {
51 __u8 x86; /* CPU family */
52 __u8 x86_vendor; /* CPU vendor */
53 __u8 x86_model;
54 __u8 x86_mask;
55 int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */
56 __u32 x86_capability[NCAPINTS];
57 char x86_vendor_id[16];
58 char x86_model_id[64];
59 int x86_cache_size; /* in KB */
60 int x86_clflush_size;
61 int x86_cache_alignment;
62 int x86_tlbsize; /* number of 4K pages in DTLB/ITLB combined(in pages)*/
63 __u8 x86_virt_bits, x86_phys_bits;
64 __u8 x86_num_cores;
65 __u8 x86_apicid;
66 __u32 x86_power;
67 __u32 x86_cpuid_level; /* Max CPUID function supported */
68 unsigned long loops_per_jiffy;
69} ____cacheline_aligned;
70
71#define X86_VENDOR_INTEL 0
72#define X86_VENDOR_CYRIX 1
73#define X86_VENDOR_AMD 2
74#define X86_VENDOR_UMC 3
75#define X86_VENDOR_NEXGEN 4
76#define X86_VENDOR_CENTAUR 5
77#define X86_VENDOR_RISE 6
78#define X86_VENDOR_TRANSMETA 7
79#define X86_VENDOR_NUM 8
80#define X86_VENDOR_UNKNOWN 0xff
81
82#ifdef CONFIG_SMP
83extern struct cpuinfo_x86 cpu_data[];
84#define current_cpu_data cpu_data[smp_processor_id()]
85#else
86#define cpu_data (&boot_cpu_data)
87#define current_cpu_data boot_cpu_data
88#endif
89
90extern char ignore_irq13;
91
92extern void identify_cpu(struct cpuinfo_x86 *);
93extern void print_cpu_info(struct cpuinfo_x86 *);
94extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
95
96/*
97 * EFLAGS bits
98 */
99#define X86_EFLAGS_CF 0x00000001 /* Carry Flag */
100#define X86_EFLAGS_PF 0x00000004 /* Parity Flag */
101#define X86_EFLAGS_AF 0x00000010 /* Auxillary carry Flag */
102#define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */
103#define X86_EFLAGS_SF 0x00000080 /* Sign Flag */
104#define X86_EFLAGS_TF 0x00000100 /* Trap Flag */
105#define X86_EFLAGS_IF 0x00000200 /* Interrupt Flag */
106#define X86_EFLAGS_DF 0x00000400 /* Direction Flag */
107#define X86_EFLAGS_OF 0x00000800 /* Overflow Flag */
108#define X86_EFLAGS_IOPL 0x00003000 /* IOPL mask */
109#define X86_EFLAGS_NT 0x00004000 /* Nested Task */
110#define X86_EFLAGS_RF 0x00010000 /* Resume Flag */
111#define X86_EFLAGS_VM 0x00020000 /* Virtual Mode */
112#define X86_EFLAGS_AC 0x00040000 /* Alignment Check */
113#define X86_EFLAGS_VIF 0x00080000 /* Virtual Interrupt Flag */
114#define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */
115#define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */
116
117/*
118 * Intel CPU features in CR4
119 */
120#define X86_CR4_VME 0x0001 /* enable vm86 extensions */
121#define X86_CR4_PVI 0x0002 /* virtual interrupts flag enable */
122#define X86_CR4_TSD 0x0004 /* disable time stamp at ipl 3 */
123#define X86_CR4_DE 0x0008 /* enable debugging extensions */
124#define X86_CR4_PSE 0x0010 /* enable page size extensions */
125#define X86_CR4_PAE 0x0020 /* enable physical address extensions */
126#define X86_CR4_MCE 0x0040 /* Machine check enable */
127#define X86_CR4_PGE 0x0080 /* enable global pages */
128#define X86_CR4_PCE 0x0100 /* enable performance counters at ipl 3 */
129#define X86_CR4_OSFXSR 0x0200 /* enable fast FPU save and restore */
130#define X86_CR4_OSXMMEXCPT 0x0400 /* enable unmasked SSE exceptions */
131
132/*
133 * Save the cr4 feature set we're using (ie
134 * Pentium 4MB enable and PPro Global page
135 * enable), so that any CPU's that boot up
136 * after us can get the correct flags.
137 */
138extern unsigned long mmu_cr4_features;
139
140static inline void set_in_cr4 (unsigned long mask)
141{
142 mmu_cr4_features |= mask;
143 __asm__("movq %%cr4,%%rax\n\t"
144 "orq %0,%%rax\n\t"
145 "movq %%rax,%%cr4\n"
146 : : "irg" (mask)
147 :"ax");
148}
149
150static inline void clear_in_cr4 (unsigned long mask)
151{
152 mmu_cr4_features &= ~mask;
153 __asm__("movq %%cr4,%%rax\n\t"
154 "andq %0,%%rax\n\t"
155 "movq %%rax,%%cr4\n"
156 : : "irg" (~mask)
157 :"ax");
158}
159
160
161/*
162 * User space process size. 47bits.
163 */
164#define TASK_SIZE (0x800000000000UL)
165
166/* This decides where the kernel will search for a free chunk of vm
167 * space during mmap's.
168 */
169#define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? 0xc0000000 : 0xFFFFe000)
170#define TASK_UNMAPPED_32 PAGE_ALIGN(IA32_PAGE_OFFSET/3)
171#define TASK_UNMAPPED_64 PAGE_ALIGN(TASK_SIZE/3)
172#define TASK_UNMAPPED_BASE \
173 (test_thread_flag(TIF_IA32) ? TASK_UNMAPPED_32 : TASK_UNMAPPED_64)
174
175/*
176 * Size of io_bitmap.
177 */
178#define IO_BITMAP_BITS 65536
179#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
180#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
181#define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap)
182#define INVALID_IO_BITMAP_OFFSET 0x8000
183
184struct i387_fxsave_struct {
185 u16 cwd;
186 u16 swd;
187 u16 twd;
188 u16 fop;
189 u64 rip;
190 u64 rdp;
191 u32 mxcsr;
192 u32 mxcsr_mask;
193 u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
194 u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 128 bytes */
195 u32 padding[24];
196} __attribute__ ((aligned (16)));
197
198union i387_union {
199 struct i387_fxsave_struct fxsave;
200};
201
202struct tss_struct {
203 u32 reserved1;
204 u64 rsp0;
205 u64 rsp1;
206 u64 rsp2;
207 u64 reserved2;
208 u64 ist[7];
209 u32 reserved3;
210 u32 reserved4;
211 u16 reserved5;
212 u16 io_bitmap_base;
213 /*
214 * The extra 1 is there because the CPU will access an
215 * additional byte beyond the end of the IO permission
216 * bitmap. The extra byte must be all 1 bits, and must
217 * be within the limit. Thus we have:
218 *
219 * 128 bytes, the bitmap itself, for ports 0..0x3ff
220 * 8 bytes, for an extra "long" of ~0UL
221 */
222 unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
223} __attribute__((packed)) ____cacheline_aligned;
224
225extern struct cpuinfo_x86 boot_cpu_data;
226DECLARE_PER_CPU(struct tss_struct,init_tss);
227
228#define ARCH_MIN_TASKALIGN 16
229
230struct thread_struct {
231 unsigned long rsp0;
232 unsigned long rsp;
233 unsigned long userrsp; /* Copy from PDA */
234 unsigned long fs;
235 unsigned long gs;
236 unsigned short es, ds, fsindex, gsindex;
237/* Hardware debugging registers */
238 unsigned long debugreg0;
239 unsigned long debugreg1;
240 unsigned long debugreg2;
241 unsigned long debugreg3;
242 unsigned long debugreg6;
243 unsigned long debugreg7;
244/* fault info */
245 unsigned long cr2, trap_no, error_code;
246/* floating point info */
247 union i387_union i387 __attribute__((aligned(16)));
248/* IO permissions. the bitmap could be moved into the GDT, that would make
249 switch faster for a limited number of ioperm using tasks. -AK */
250 int ioperm;
251 unsigned long *io_bitmap_ptr;
252 unsigned io_bitmap_max;
253/* cached TLS descriptors. */
254 u64 tls_array[GDT_ENTRY_TLS_ENTRIES];
255} __attribute__((aligned(16)));
256
257#define INIT_THREAD {}
258
259#define INIT_MMAP \
260{ &init_mm, 0, 0, NULL, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, 1, NULL, NULL }
261
262#define STACKFAULT_STACK 1
263#define DOUBLEFAULT_STACK 2
264#define NMI_STACK 3
265#define DEBUG_STACK 4
266#define MCE_STACK 5
267#define N_EXCEPTION_STACKS 5 /* hw limit: 7 */
268#define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER)
269#define EXCEPTION_STACK_ORDER 0
270
271#define start_thread(regs,new_rip,new_rsp) do { \
272 asm volatile("movl %0,%%fs; movl %0,%%es; movl %0,%%ds": :"r" (0)); \
273 load_gs_index(0); \
274 (regs)->rip = (new_rip); \
275 (regs)->rsp = (new_rsp); \
276 write_pda(oldrsp, (new_rsp)); \
277 (regs)->cs = __USER_CS; \
278 (regs)->ss = __USER_DS; \
279 (regs)->eflags = 0x200; \
280 set_fs(USER_DS); \
281} while(0)
282
283struct task_struct;
284struct mm_struct;
285
286/* Free all resources held by a thread. */
287extern void release_thread(struct task_struct *);
288
289/* Prepare to copy thread state - unlazy all lazy status */
290extern void prepare_to_copy(struct task_struct *tsk);
291
292/*
293 * create a kernel thread without removing it from tasklists
294 */
295extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
296
297/*
298 * Return saved PC of a blocked thread.
299 * What is this good for? it will be always the scheduler or ret_from_fork.
300 */
301#define thread_saved_pc(t) (*(unsigned long *)((t)->thread.rsp - 8))
302
303extern unsigned long get_wchan(struct task_struct *p);
304#define KSTK_EIP(tsk) \
305 (((struct pt_regs *)(tsk->thread.rsp0 - sizeof(struct pt_regs)))->rip)
306#define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */
307
308
309struct microcode_header {
310 unsigned int hdrver;
311 unsigned int rev;
312 unsigned int date;
313 unsigned int sig;
314 unsigned int cksum;
315 unsigned int ldrver;
316 unsigned int pf;
317 unsigned int datasize;
318 unsigned int totalsize;
319 unsigned int reserved[3];
320};
321
322struct microcode {
323 struct microcode_header hdr;
324 unsigned int bits[0];
325};
326
327typedef struct microcode microcode_t;
328typedef struct microcode_header microcode_header_t;
329
330/* microcode format is extended from prescott processors */
331struct extended_signature {
332 unsigned int sig;
333 unsigned int pf;
334 unsigned int cksum;
335};
336
337struct extended_sigtable {
338 unsigned int count;
339 unsigned int cksum;
340 unsigned int reserved[3];
341 struct extended_signature sigs[0];
342};
343
344/* '6' because it used to be for P6 only (but now covers Pentium 4 as well) */
345#define MICROCODE_IOCFREE _IO('6',0)
346
347
348#define ASM_NOP1 K8_NOP1
349#define ASM_NOP2 K8_NOP2
350#define ASM_NOP3 K8_NOP3
351#define ASM_NOP4 K8_NOP4
352#define ASM_NOP5 K8_NOP5
353#define ASM_NOP6 K8_NOP6
354#define ASM_NOP7 K8_NOP7
355#define ASM_NOP8 K8_NOP8
356
357/* Opteron nops */
358#define K8_NOP1 ".byte 0x90\n"
359#define K8_NOP2 ".byte 0x66,0x90\n"
360#define K8_NOP3 ".byte 0x66,0x66,0x90\n"
361#define K8_NOP4 ".byte 0x66,0x66,0x66,0x90\n"
362#define K8_NOP5 K8_NOP3 K8_NOP2
363#define K8_NOP6 K8_NOP3 K8_NOP3
364#define K8_NOP7 K8_NOP4 K8_NOP3
365#define K8_NOP8 K8_NOP4 K8_NOP4
366
367#define ASM_NOP_MAX 8
368
369/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
370extern inline void rep_nop(void)
371{
372 __asm__ __volatile__("rep;nop": : :"memory");
373}
374
375/* Stop speculative execution */
376extern inline void sync_core(void)
377{
378 int tmp;
379 asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory");
380}
381
382#define cpu_has_fpu 1
383
384#define ARCH_HAS_PREFETCH
385static inline void prefetch(void *x)
386{
387 asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x));
388}
389
390#define ARCH_HAS_PREFETCHW 1
391static inline void prefetchw(void *x)
392{
393 alternative_input(ASM_NOP5,
394 "prefetchw (%1)",
395 X86_FEATURE_3DNOW,
396 "r" (x));
397}
398
399#define ARCH_HAS_SPINLOCK_PREFETCH 1
400
401#define spin_lock_prefetch(x) prefetchw(x)
402
403#define cpu_relax() rep_nop()
404
405/*
406 * NSC/Cyrix CPU configuration register indexes
407 */
408#define CX86_CCR0 0xc0
409#define CX86_CCR1 0xc1
410#define CX86_CCR2 0xc2
411#define CX86_CCR3 0xc3
412#define CX86_CCR4 0xe8
413#define CX86_CCR5 0xe9
414#define CX86_CCR6 0xea
415#define CX86_CCR7 0xeb
416#define CX86_DIR0 0xfe
417#define CX86_DIR1 0xff
418#define CX86_ARR_BASE 0xc4
419#define CX86_RCR_BASE 0xdc
420
421/*
422 * NSC/Cyrix CPU indexed register access macros
423 */
424
425#define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); })
426
427#define setCx86(reg, data) do { \
428 outb((reg), 0x22); \
429 outb((data), 0x23); \
430} while (0)
431
432static inline void __monitor(const void *eax, unsigned long ecx,
433 unsigned long edx)
434{
435 /* "monitor %eax,%ecx,%edx;" */
436 asm volatile(
437 ".byte 0x0f,0x01,0xc8;"
438 : :"a" (eax), "c" (ecx), "d"(edx));
439}
440
441static inline void __mwait(unsigned long eax, unsigned long ecx)
442{
443 /* "mwait %eax,%ecx;" */
444 asm volatile(
445 ".byte 0x0f,0x01,0xc9;"
446 : :"a" (eax), "c" (ecx));
447}
448
449#define stack_current() \
450({ \
451 struct thread_info *ti; \
452 asm("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
453 ti->task; \
454})
455
456#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
457
458extern unsigned long boot_option_idle_override;
459/* Boot loader type from the setup header */
460extern int bootloader_type;
461
462#endif /* __ASM_X86_64_PROCESSOR_H */
diff --git a/include/asm-x86_64/proto.h b/include/asm-x86_64/proto.h
new file mode 100644
index 000000000000..c59526ee54da
--- /dev/null
+++ b/include/asm-x86_64/proto.h
@@ -0,0 +1,118 @@
1#ifndef _ASM_X8664_PROTO_H
2#define _ASM_X8664_PROTO_H 1
3
4#include <asm/ldt.h>
5
6/* misc architecture specific prototypes */
7
8struct cpuinfo_x86;
9struct pt_regs;
10
11extern void get_cpu_vendor(struct cpuinfo_x86*);
12extern void start_kernel(void);
13extern void pda_init(int);
14
15extern void early_idt_handler(void);
16
17extern void mcheck_init(struct cpuinfo_x86 *c);
18extern void init_memory_mapping(unsigned long start, unsigned long end);
19
20extern void system_call(void);
21extern int kernel_syscall(void);
22extern void syscall_init(void);
23
24extern void ia32_syscall(void);
25extern void ia32_cstar_target(void);
26extern void ia32_sysenter_target(void);
27
28extern void config_acpi_tables(void);
29extern void ia32_syscall(void);
30extern void iommu_hole_init(void);
31
32extern void time_init_smp(void);
33
34extern void do_softirq_thunk(void);
35
36extern int numa_setup(char *opt);
37
38extern int setup_early_printk(char *);
39extern void early_printk(const char *fmt, ...) __attribute__((format(printf,1,2)));
40
41extern void early_identify_cpu(struct cpuinfo_x86 *c);
42
43extern int k8_scan_nodes(unsigned long start, unsigned long end);
44
45extern void numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn);
46extern unsigned long numa_free_all_bootmem(void);
47
48extern void reserve_bootmem_generic(unsigned long phys, unsigned len);
49extern void free_bootmem_generic(unsigned long phys, unsigned len);
50
51extern void load_gs_index(unsigned gs);
52
53extern unsigned long end_pfn_map;
54
55extern cpumask_t cpu_initialized;
56
57extern void show_trace(unsigned long * rsp);
58extern void show_registers(struct pt_regs *regs);
59
60extern void exception_table_check(void);
61
62extern void acpi_reserve_bootmem(void);
63
64extern void swap_low_mappings(void);
65
66extern void oops_begin(void);
67extern void die(const char *,struct pt_regs *,long);
68extern void __die(const char * str, struct pt_regs * regs, long err);
69extern void __show_regs(struct pt_regs * regs);
70extern void show_regs(struct pt_regs * regs);
71
72extern int map_syscall32(struct mm_struct *mm, unsigned long address);
73extern int __map_syscall32(struct mm_struct *mm, unsigned long address);
74extern char *syscall32_page;
75extern void syscall32_cpu_init(void);
76
77extern void setup_node_bootmem(int nodeid, unsigned long start, unsigned long end);
78
79extern void check_ioapic(void);
80extern void check_efer(void);
81
82extern int unhandled_signal(struct task_struct *tsk, int sig);
83
84extern void select_idle_routine(const struct cpuinfo_x86 *c);
85extern void swiotlb_init(void);
86
87extern unsigned long max_mapnr;
88extern unsigned long end_pfn;
89extern unsigned long table_start, table_end;
90
91extern int exception_trace;
92extern int force_iommu, no_iommu;
93extern int using_apic_timer;
94extern int disable_apic;
95extern unsigned cpu_khz;
96extern int ioapic_force;
97extern int skip_ioapic_setup;
98extern int acpi_ht;
99extern int acpi_disabled;
100
101extern int fallback_aper_order;
102extern int fallback_aper_force;
103extern int iommu_aperture;
104extern int iommu_aperture_disabled;
105extern int iommu_aperture_allowed;
106extern int fix_aperture;
107extern int force_iommu;
108
109extern int reboot_force;
110
111extern void smp_local_timer_interrupt(struct pt_regs * regs);
112
113long do_arch_prctl(struct task_struct *task, int code, unsigned long addr);
114
115#define round_up(x,y) (((x) + (y) - 1) & ~((y)-1))
116#define round_down(x,y) ((x) & ~((y)-1))
117
118#endif
diff --git a/include/asm-x86_64/ptrace.h b/include/asm-x86_64/ptrace.h
new file mode 100644
index 000000000000..c7865cfedc1f
--- /dev/null
+++ b/include/asm-x86_64/ptrace.h
@@ -0,0 +1,114 @@
1#ifndef _X86_64_PTRACE_H
2#define _X86_64_PTRACE_H
3
4#if defined(__ASSEMBLY__) || defined(__FRAME_OFFSETS)
5#define R15 0
6#define R14 8
7#define R13 16
8#define R12 24
9#define RBP 32
10#define RBX 40
11/* arguments: interrupts/non tracing syscalls only save upto here*/
12#define R11 48
13#define R10 56
14#define R9 64
15#define R8 72
16#define RAX 80
17#define RCX 88
18#define RDX 96
19#define RSI 104
20#define RDI 112
21#define ORIG_RAX 120 /* = ERROR */
22/* end of arguments */
23/* cpu exception frame or undefined in case of fast syscall. */
24#define RIP 128
25#define CS 136
26#define EFLAGS 144
27#define RSP 152
28#define SS 160
29#define ARGOFFSET R11
30#endif /* __ASSEMBLY__ */
31
32/* top of stack page */
33#define FRAME_SIZE 168
34
35#define PTRACE_OLDSETOPTIONS 21
36
37#ifndef __ASSEMBLY__
38
39struct pt_regs {
40 unsigned long r15;
41 unsigned long r14;
42 unsigned long r13;
43 unsigned long r12;
44 unsigned long rbp;
45 unsigned long rbx;
46/* arguments: non interrupts/non tracing syscalls only save upto here*/
47 unsigned long r11;
48 unsigned long r10;
49 unsigned long r9;
50 unsigned long r8;
51 unsigned long rax;
52 unsigned long rcx;
53 unsigned long rdx;
54 unsigned long rsi;
55 unsigned long rdi;
56 unsigned long orig_rax;
57/* end of arguments */
58/* cpu exception frame or undefined */
59 unsigned long rip;
60 unsigned long cs;
61 unsigned long eflags;
62 unsigned long rsp;
63 unsigned long ss;
64/* top of stack page */
65};
66
67#endif
68
69/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
70#define PTRACE_GETREGS 12
71#define PTRACE_SETREGS 13
72#define PTRACE_GETFPREGS 14
73#define PTRACE_SETFPREGS 15
74#define PTRACE_GETFPXREGS 18
75#define PTRACE_SETFPXREGS 19
76
77/* only useful for access 32bit programs */
78#define PTRACE_GET_THREAD_AREA 25
79#define PTRACE_SET_THREAD_AREA 26
80
81#define PTRACE_ARCH_PRCTL 30 /* arch_prctl for child */
82
83#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
84#define user_mode(regs) (!!((regs)->cs & 3))
85#define instruction_pointer(regs) ((regs)->rip)
86extern unsigned long profile_pc(struct pt_regs *regs);
87void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
88
89enum {
90 EF_CF = 0x00000001,
91 EF_PF = 0x00000004,
92 EF_AF = 0x00000010,
93 EF_ZF = 0x00000040,
94 EF_SF = 0x00000080,
95 EF_TF = 0x00000100,
96 EF_IE = 0x00000200,
97 EF_DF = 0x00000400,
98 EF_OF = 0x00000800,
99 EF_IOPL = 0x00003000,
100 EF_IOPL_RING0 = 0x00000000,
101 EF_IOPL_RING1 = 0x00001000,
102 EF_IOPL_RING2 = 0x00002000,
103 EF_NT = 0x00004000, /* nested task */
104 EF_RF = 0x00010000, /* resume */
105 EF_VM = 0x00020000, /* virtual mode */
106 EF_AC = 0x00040000, /* alignment */
107 EF_VIF = 0x00080000, /* virtual interrupt */
108 EF_VIP = 0x00100000, /* virtual interrupt pending */
109 EF_ID = 0x00200000, /* id */
110};
111
112#endif
113
114#endif
diff --git a/include/asm-x86_64/resource.h b/include/asm-x86_64/resource.h
new file mode 100644
index 000000000000..f40b40623234
--- /dev/null
+++ b/include/asm-x86_64/resource.h
@@ -0,0 +1,6 @@
1#ifndef _X8664_RESOURCE_H
2#define _X8664_RESOURCE_H
3
4#include <asm-generic/resource.h>
5
6#endif
diff --git a/include/asm-x86_64/rtc.h b/include/asm-x86_64/rtc.h
new file mode 100644
index 000000000000..18ed713ac7de
--- /dev/null
+++ b/include/asm-x86_64/rtc.h
@@ -0,0 +1,10 @@
1#ifndef _X86_64_RTC_H
2#define _X86_64_RTC_H
3
4/*
5 * x86 uses the default access methods for the RTC.
6 */
7
8#include <asm-generic/rtc.h>
9
10#endif
diff --git a/include/asm-x86_64/rwlock.h b/include/asm-x86_64/rwlock.h
new file mode 100644
index 000000000000..8a78a4ace53c
--- /dev/null
+++ b/include/asm-x86_64/rwlock.h
@@ -0,0 +1,86 @@
1/* include/asm-x86_64/rwlock.h
2 *
3 * Helpers used by both rw spinlocks and rw semaphores.
4 *
5 * Based in part on code from semaphore.h and
6 * spinlock.h Copyright 1996 Linus Torvalds.
7 *
8 * Copyright 1999 Red Hat, Inc.
9 * Copyright 2001,2002 SuSE labs
10 *
11 * Written by Benjamin LaHaise.
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
17 */
18#ifndef _ASM_X86_64_RWLOCK_H
19#define _ASM_X86_64_RWLOCK_H
20
21#include <linux/stringify.h>
22
23#define RW_LOCK_BIAS 0x01000000
24#define RW_LOCK_BIAS_STR "0x01000000"
25
26#define __build_read_lock_ptr(rw, helper) \
27 asm volatile(LOCK "subl $1,(%0)\n\t" \
28 "js 2f\n" \
29 "1:\n" \
30 LOCK_SECTION_START("") \
31 "2:\tcall " helper "\n\t" \
32 "jmp 1b\n" \
33 LOCK_SECTION_END \
34 ::"a" (rw) : "memory")
35
36#define __build_read_lock_const(rw, helper) \
37 asm volatile(LOCK "subl $1,%0\n\t" \
38 "js 2f\n" \
39 "1:\n" \
40 LOCK_SECTION_START("") \
41 "2:\tpushq %%rax\n\t" \
42 "leaq %0,%%rax\n\t" \
43 "call " helper "\n\t" \
44 "popq %%rax\n\t" \
45 "jmp 1b\n" \
46 LOCK_SECTION_END \
47 :"=m" (*((volatile int *)rw))::"memory")
48
49#define __build_read_lock(rw, helper) do { \
50 if (__builtin_constant_p(rw)) \
51 __build_read_lock_const(rw, helper); \
52 else \
53 __build_read_lock_ptr(rw, helper); \
54 } while (0)
55
56#define __build_write_lock_ptr(rw, helper) \
57 asm volatile(LOCK "subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" \
58 "jnz 2f\n" \
59 "1:\n" \
60 LOCK_SECTION_START("") \
61 "2:\tcall " helper "\n\t" \
62 "jmp 1b\n" \
63 LOCK_SECTION_END \
64 ::"a" (rw) : "memory")
65
66#define __build_write_lock_const(rw, helper) \
67 asm volatile(LOCK "subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" \
68 "jnz 2f\n" \
69 "1:\n" \
70 LOCK_SECTION_START("") \
71 "2:\tpushq %%rax\n\t" \
72 "leaq %0,%%rax\n\t" \
73 "call " helper "\n\t" \
74 "popq %%rax\n\t" \
75 "jmp 1b\n" \
76 LOCK_SECTION_END \
77 :"=m" (*((volatile long *)rw))::"memory")
78
79#define __build_write_lock(rw, helper) do { \
80 if (__builtin_constant_p(rw)) \
81 __build_write_lock_const(rw, helper); \
82 else \
83 __build_write_lock_ptr(rw, helper); \
84 } while (0)
85
86#endif
diff --git a/include/asm-x86_64/rwsem.h b/include/asm-x86_64/rwsem.h
new file mode 100644
index 000000000000..c002175b6e82
--- /dev/null
+++ b/include/asm-x86_64/rwsem.h
@@ -0,0 +1,278 @@
1/* rwsem.h: R/W semaphores implemented using XADD/CMPXCHG for x86_64+
2 *
3 * Written by David Howells (dhowells@redhat.com).
4 * Ported by Andi Kleen <ak@suse.de> to x86-64.
5 *
6 * Derived from asm-i386/semaphore.h and asm-i386/rwsem.h
7 *
8 *
9 * The MSW of the count is the negated number of active writers and waiting
10 * lockers, and the LSW is the total number of active locks
11 *
12 * The lock count is initialized to 0 (no active and no waiting lockers).
13 *
14 * When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case of an
15 * uncontended lock. This can be determined because XADD returns the old value.
16 * Readers increment by 1 and see a positive value when uncontended, negative
17 * if there are writers (and maybe) readers waiting (in which case it goes to
18 * sleep).
19 *
20 * The value of WAITING_BIAS supports up to 32766 waiting processes. This can
21 * be extended to 65534 by manually checking the whole MSW rather than relying
22 * on the S flag.
23 *
24 * The value of ACTIVE_BIAS supports up to 65535 active processes.
25 *
26 * This should be totally fair - if anything is waiting, a process that wants a
27 * lock will go to the back of the queue. When the currently active lock is
28 * released, if there's a writer at the front of the queue, then that and only
29 * that will be woken up; if there's a bunch of consecutive readers at the
30 * front, then they'll all be woken up, but no other readers will be.
31 */
32
33#ifndef _X8664_RWSEM_H
34#define _X8664_RWSEM_H
35
36#ifndef _LINUX_RWSEM_H
37#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
38#endif
39
40#ifdef __KERNEL__
41
42#include <linux/list.h>
43#include <linux/spinlock.h>
44
45struct rwsem_waiter;
46
47extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
48extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
49extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *);
50extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
51
52/*
53 * the semaphore definition
54 */
55struct rw_semaphore {
56 signed int count;
57#define RWSEM_UNLOCKED_VALUE 0x00000000
58#define RWSEM_ACTIVE_BIAS 0x00000001
59#define RWSEM_ACTIVE_MASK 0x0000ffff
60#define RWSEM_WAITING_BIAS (-0x00010000)
61#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
62#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
63 spinlock_t wait_lock;
64 struct list_head wait_list;
65#if RWSEM_DEBUG
66 int debug;
67#endif
68};
69
70/*
71 * initialisation
72 */
73#if RWSEM_DEBUG
74#define __RWSEM_DEBUG_INIT , 0
75#else
76#define __RWSEM_DEBUG_INIT /* */
77#endif
78
79#define __RWSEM_INITIALIZER(name) \
80{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) \
81 __RWSEM_DEBUG_INIT }
82
83#define DECLARE_RWSEM(name) \
84 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
85
86static inline void init_rwsem(struct rw_semaphore *sem)
87{
88 sem->count = RWSEM_UNLOCKED_VALUE;
89 spin_lock_init(&sem->wait_lock);
90 INIT_LIST_HEAD(&sem->wait_list);
91#if RWSEM_DEBUG
92 sem->debug = 0;
93#endif
94}
95
96/*
97 * lock for reading
98 */
99static inline void __down_read(struct rw_semaphore *sem)
100{
101 __asm__ __volatile__(
102 "# beginning down_read\n\t"
103LOCK_PREFIX " incl (%%rdi)\n\t" /* adds 0x00000001, returns the old value */
104 " js 2f\n\t" /* jump if we weren't granted the lock */
105 "1:\n\t"
106 LOCK_SECTION_START("") \
107 "2:\n\t"
108 " call rwsem_down_read_failed_thunk\n\t"
109 " jmp 1b\n"
110 LOCK_SECTION_END \
111 "# ending down_read\n\t"
112 : "+m"(sem->count)
113 : "D"(sem)
114 : "memory", "cc");
115}
116
117
118/*
119 * trylock for reading -- returns 1 if successful, 0 if contention
120 */
121static inline int __down_read_trylock(struct rw_semaphore *sem)
122{
123 __s32 result, tmp;
124 __asm__ __volatile__(
125 "# beginning __down_read_trylock\n\t"
126 " movl %0,%1\n\t"
127 "1:\n\t"
128 " movl %1,%2\n\t"
129 " addl %3,%2\n\t"
130 " jle 2f\n\t"
131LOCK_PREFIX " cmpxchgl %2,%0\n\t"
132 " jnz 1b\n\t"
133 "2:\n\t"
134 "# ending __down_read_trylock\n\t"
135 : "+m"(sem->count), "=&a"(result), "=&r"(tmp)
136 : "i"(RWSEM_ACTIVE_READ_BIAS)
137 : "memory", "cc");
138 return result>=0 ? 1 : 0;
139}
140
141
142/*
143 * lock for writing
144 */
145static inline void __down_write(struct rw_semaphore *sem)
146{
147 int tmp;
148
149 tmp = RWSEM_ACTIVE_WRITE_BIAS;
150 __asm__ __volatile__(
151 "# beginning down_write\n\t"
152LOCK_PREFIX " xaddl %0,(%%rdi)\n\t" /* subtract 0x0000ffff, returns the old value */
153 " testl %0,%0\n\t" /* was the count 0 before? */
154 " jnz 2f\n\t" /* jump if we weren't granted the lock */
155 "1:\n\t"
156 LOCK_SECTION_START("")
157 "2:\n\t"
158 " call rwsem_down_write_failed_thunk\n\t"
159 " jmp 1b\n"
160 LOCK_SECTION_END
161 "# ending down_write"
162 : "=&r" (tmp)
163 : "0"(tmp), "D"(sem)
164 : "memory", "cc");
165}
166
167/*
168 * trylock for writing -- returns 1 if successful, 0 if contention
169 */
170static inline int __down_write_trylock(struct rw_semaphore *sem)
171{
172 signed long ret = cmpxchg(&sem->count,
173 RWSEM_UNLOCKED_VALUE,
174 RWSEM_ACTIVE_WRITE_BIAS);
175 if (ret == RWSEM_UNLOCKED_VALUE)
176 return 1;
177 return 0;
178}
179
180/*
181 * unlock after reading
182 */
183static inline void __up_read(struct rw_semaphore *sem)
184{
185 __s32 tmp = -RWSEM_ACTIVE_READ_BIAS;
186 __asm__ __volatile__(
187 "# beginning __up_read\n\t"
188LOCK_PREFIX " xaddl %[tmp],(%%rdi)\n\t" /* subtracts 1, returns the old value */
189 " js 2f\n\t" /* jump if the lock is being waited upon */
190 "1:\n\t"
191 LOCK_SECTION_START("")
192 "2:\n\t"
193 " decw %w[tmp]\n\t" /* do nothing if still outstanding active readers */
194 " jnz 1b\n\t"
195 " call rwsem_wake_thunk\n\t"
196 " jmp 1b\n"
197 LOCK_SECTION_END
198 "# ending __up_read\n"
199 : "+m"(sem->count), [tmp] "+r" (tmp)
200 : "D"(sem)
201 : "memory", "cc");
202}
203
204/*
205 * unlock after writing
206 */
207static inline void __up_write(struct rw_semaphore *sem)
208{
209 unsigned tmp;
210 __asm__ __volatile__(
211 "# beginning __up_write\n\t"
212 " movl %[bias],%[tmp]\n\t"
213LOCK_PREFIX " xaddl %[tmp],(%%rdi)\n\t" /* tries to transition 0xffff0001 -> 0x00000000 */
214 " jnz 2f\n\t" /* jump if the lock is being waited upon */
215 "1:\n\t"
216 LOCK_SECTION_START("")
217 "2:\n\t"
218 " decw %w[tmp]\n\t" /* did the active count reduce to 0? */
219 " jnz 1b\n\t" /* jump back if not */
220 " call rwsem_wake_thunk\n\t"
221 " jmp 1b\n"
222 LOCK_SECTION_END
223 "# ending __up_write\n"
224 : "+m"(sem->count), [tmp] "=r" (tmp)
225 : "D"(sem), [bias] "i"(-RWSEM_ACTIVE_WRITE_BIAS)
226 : "memory", "cc");
227}
228
229/*
230 * downgrade write lock to read lock
231 */
232static inline void __downgrade_write(struct rw_semaphore *sem)
233{
234 __asm__ __volatile__(
235 "# beginning __downgrade_write\n\t"
236LOCK_PREFIX " addl %[bias],(%%rdi)\n\t" /* transitions 0xZZZZ0001 -> 0xYYYY0001 */
237 " js 2f\n\t" /* jump if the lock is being waited upon */
238 "1:\n\t"
239 LOCK_SECTION_START("")
240 "2:\n\t"
241 " call rwsem_downgrade_thunk\n"
242 " jmp 1b\n"
243 LOCK_SECTION_END
244 "# ending __downgrade_write\n"
245 : "=m"(sem->count)
246 : "D"(sem), [bias] "i"(-RWSEM_WAITING_BIAS), "m"(sem->count)
247 : "memory", "cc");
248}
249
250/*
251 * implement atomic add functionality
252 */
253static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
254{
255 __asm__ __volatile__(
256LOCK_PREFIX "addl %1,%0"
257 :"=m"(sem->count)
258 :"ir"(delta), "m"(sem->count));
259}
260
261/*
262 * implement exchange and add functionality
263 */
264static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
265{
266 int tmp = delta;
267
268 __asm__ __volatile__(
269LOCK_PREFIX "xaddl %0,(%2)"
270 : "=r"(tmp), "=m"(sem->count)
271 : "r"(sem), "m"(sem->count), "0" (tmp)
272 : "memory");
273
274 return tmp+delta;
275}
276
277#endif /* __KERNEL__ */
278#endif /* _X8664_RWSEM_H */
diff --git a/include/asm-x86_64/scatterlist.h b/include/asm-x86_64/scatterlist.h
new file mode 100644
index 000000000000..49d89f8129cd
--- /dev/null
+++ b/include/asm-x86_64/scatterlist.h
@@ -0,0 +1,22 @@
1#ifndef _X8664_SCATTERLIST_H
2#define _X8664_SCATTERLIST_H
3
4struct scatterlist {
5 struct page *page;
6 unsigned int offset;
7 unsigned int length;
8 dma_addr_t dma_address;
9 unsigned int dma_length;
10};
11
12#define ISA_DMA_THRESHOLD (0x00ffffff)
13
14/* These macros should be used after a pci_map_sg call has been done
15 * to get bus addresses of each of the SG entries and their lengths.
16 * You should only work with the number of sg entries pci_map_sg
17 * returns.
18 */
19#define sg_dma_address(sg) ((sg)->dma_address)
20#define sg_dma_len(sg) ((sg)->dma_length)
21
22#endif
diff --git a/include/asm-x86_64/seccomp.h b/include/asm-x86_64/seccomp.h
new file mode 100644
index 000000000000..553af65a2287
--- /dev/null
+++ b/include/asm-x86_64/seccomp.h
@@ -0,0 +1,24 @@
1#ifndef _ASM_SECCOMP_H
2
3#include <linux/thread_info.h>
4
5#ifdef TIF_32BIT
6#error "unexpected TIF_32BIT on x86_64"
7#else
8#define TIF_32BIT TIF_IA32
9#endif
10
11#include <linux/unistd.h>
12#include <asm/ia32_unistd.h>
13
14#define __NR_seccomp_read __NR_read
15#define __NR_seccomp_write __NR_write
16#define __NR_seccomp_exit __NR_exit
17#define __NR_seccomp_sigreturn __NR_rt_sigreturn
18
19#define __NR_seccomp_read_32 __NR_ia32_read
20#define __NR_seccomp_write_32 __NR_ia32_write
21#define __NR_seccomp_exit_32 __NR_ia32_exit
22#define __NR_seccomp_sigreturn_32 __NR_ia32_sigreturn
23
24#endif /* _ASM_SECCOMP_H */
diff --git a/include/asm-x86_64/sections.h b/include/asm-x86_64/sections.h
new file mode 100644
index 000000000000..c746d9f1e70c
--- /dev/null
+++ b/include/asm-x86_64/sections.h
@@ -0,0 +1,7 @@
1#ifndef _X8664_SECTIONS_H
2#define _X8664_SECTIONS_H
3
4/* nothing to see, move along */
5#include <asm-generic/sections.h>
6
7#endif
diff --git a/include/asm-x86_64/segment.h b/include/asm-x86_64/segment.h
new file mode 100644
index 000000000000..44adaf18c11e
--- /dev/null
+++ b/include/asm-x86_64/segment.h
@@ -0,0 +1,46 @@
1#ifndef _ASM_SEGMENT_H
2#define _ASM_SEGMENT_H
3
4#include <asm/cache.h>
5
6#define __KERNEL_CS 0x10
7#define __KERNEL_DS 0x18
8
9#define __KERNEL32_CS 0x38
10
11/*
12 * we cannot use the same code segment descriptor for user and kernel
13 * -- not even in the long flat mode, because of different DPL /kkeil
14 * The segment offset needs to contain a RPL. Grr. -AK
15 * GDT layout to get 64bit syscall right (sysret hardcodes gdt offsets)
16 */
17
18#define __USER32_CS 0x23 /* 4*8+3 */
19#define __USER_DS 0x2b /* 5*8+3 */
20#define __USER_CS 0x33 /* 6*8+3 */
21#define __USER32_DS __USER_DS
22#define __KERNEL16_CS (GDT_ENTRY_KERNELCS16 * 8)
23#define __KERNEL_COMPAT32_CS 0x8
24
25#define GDT_ENTRY_TLS 1
26#define GDT_ENTRY_TSS 8 /* needs two entries */
27#define GDT_ENTRY_LDT 10 /* needs two entries */
28#define GDT_ENTRY_TLS_MIN 12
29#define GDT_ENTRY_TLS_MAX 14
30#define GDT_ENTRY_KERNELCS16 15
31
32#define GDT_ENTRY_TLS_ENTRIES 3
33
34/* TLS indexes for 64bit - hardcoded in arch_prctl */
35#define FS_TLS 0
36#define GS_TLS 1
37
38#define GS_TLS_SEL ((GDT_ENTRY_TLS_MIN+GS_TLS)*8 + 3)
39#define FS_TLS_SEL ((GDT_ENTRY_TLS_MIN+FS_TLS)*8 + 3)
40
41#define IDT_ENTRIES 256
42#define GDT_ENTRIES 16
43#define GDT_SIZE (GDT_ENTRIES * 8)
44#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8)
45
46#endif
diff --git a/include/asm-x86_64/semaphore.h b/include/asm-x86_64/semaphore.h
new file mode 100644
index 000000000000..f325e39bf3b9
--- /dev/null
+++ b/include/asm-x86_64/semaphore.h
@@ -0,0 +1,196 @@
1#ifndef _X86_64_SEMAPHORE_H
2#define _X86_64_SEMAPHORE_H
3
4#include <linux/linkage.h>
5
6#ifdef __KERNEL__
7
8/*
9 * SMP- and interrupt-safe semaphores..
10 *
11 * (C) Copyright 1996 Linus Torvalds
12 *
13 * Modified 1996-12-23 by Dave Grothe <dave@gcom.com> to fix bugs in
14 * the original code and to make semaphore waits
15 * interruptible so that processes waiting on
16 * semaphores can be killed.
17 * Modified 1999-02-14 by Andrea Arcangeli, split the sched.c helper
18 * functions in asm/sempahore-helper.h while fixing a
19 * potential and subtle race discovered by Ulrich Schmid
20 * in down_interruptible(). Since I started to play here I
21 * also implemented the `trylock' semaphore operation.
22 * 1999-07-02 Artur Skawina <skawina@geocities.com>
23 * Optimized "0(ecx)" -> "(ecx)" (the assembler does not
24 * do this). Changed calling sequences from push/jmp to
25 * traditional call/ret.
26 * Modified 2001-01-01 Andreas Franck <afranck@gmx.de>
27 * Some hacks to ensure compatibility with recent
28 * GCC snapshots, to avoid stack corruption when compiling
29 * with -fomit-frame-pointer. It's not sure if this will
30 * be fixed in GCC, as our previous implementation was a
31 * bit dubious.
32 *
33 * If you would like to see an analysis of this implementation, please
34 * ftp to gcom.com and download the file
35 * /pub/linux/src/semaphore/semaphore-2.0.24.tar.gz.
36 *
37 */
38
39#include <asm/system.h>
40#include <asm/atomic.h>
41#include <asm/rwlock.h>
42#include <linux/wait.h>
43#include <linux/rwsem.h>
44#include <linux/stringify.h>
45
46struct semaphore {
47 atomic_t count;
48 int sleepers;
49 wait_queue_head_t wait;
50};
51
52#define __SEMAPHORE_INITIALIZER(name, n) \
53{ \
54 .count = ATOMIC_INIT(n), \
55 .sleepers = 0, \
56 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
57}
58
59#define __MUTEX_INITIALIZER(name) \
60 __SEMAPHORE_INITIALIZER(name,1)
61
62#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
63 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
64
65#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
66#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
67
68static inline void sema_init (struct semaphore *sem, int val)
69{
70/*
71 * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
72 *
73 * i'd rather use the more flexible initialization above, but sadly
74 * GCC 2.7.2.3 emits a bogus warning. EGCS doesn't. Oh well.
75 */
76 atomic_set(&sem->count, val);
77 sem->sleepers = 0;
78 init_waitqueue_head(&sem->wait);
79}
80
81static inline void init_MUTEX (struct semaphore *sem)
82{
83 sema_init(sem, 1);
84}
85
86static inline void init_MUTEX_LOCKED (struct semaphore *sem)
87{
88 sema_init(sem, 0);
89}
90
91asmlinkage void __down_failed(void /* special register calling convention */);
92asmlinkage int __down_failed_interruptible(void /* params in registers */);
93asmlinkage int __down_failed_trylock(void /* params in registers */);
94asmlinkage void __up_wakeup(void /* special register calling convention */);
95
96asmlinkage void __down(struct semaphore * sem);
97asmlinkage int __down_interruptible(struct semaphore * sem);
98asmlinkage int __down_trylock(struct semaphore * sem);
99asmlinkage void __up(struct semaphore * sem);
100
101/*
102 * This is ugly, but we want the default case to fall through.
103 * "__down_failed" is a special asm handler that calls the C
104 * routine that actually waits. See arch/x86_64/kernel/semaphore.c
105 */
106static inline void down(struct semaphore * sem)
107{
108 might_sleep();
109
110 __asm__ __volatile__(
111 "# atomic down operation\n\t"
112 LOCK "decl %0\n\t" /* --sem->count */
113 "js 2f\n"
114 "1:\n"
115 LOCK_SECTION_START("")
116 "2:\tcall __down_failed\n\t"
117 "jmp 1b\n"
118 LOCK_SECTION_END
119 :"=m" (sem->count)
120 :"D" (sem)
121 :"memory");
122}
123
124/*
125 * Interruptible try to acquire a semaphore. If we obtained
126 * it, return zero. If we were interrupted, returns -EINTR
127 */
128static inline int down_interruptible(struct semaphore * sem)
129{
130 int result;
131
132 might_sleep();
133
134 __asm__ __volatile__(
135 "# atomic interruptible down operation\n\t"
136 LOCK "decl %1\n\t" /* --sem->count */
137 "js 2f\n\t"
138 "xorl %0,%0\n"
139 "1:\n"
140 LOCK_SECTION_START("")
141 "2:\tcall __down_failed_interruptible\n\t"
142 "jmp 1b\n"
143 LOCK_SECTION_END
144 :"=a" (result), "=m" (sem->count)
145 :"D" (sem)
146 :"memory");
147 return result;
148}
149
150/*
151 * Non-blockingly attempt to down() a semaphore.
152 * Returns zero if we acquired it
153 */
154static inline int down_trylock(struct semaphore * sem)
155{
156 int result;
157
158 __asm__ __volatile__(
159 "# atomic interruptible down operation\n\t"
160 LOCK "decl %1\n\t" /* --sem->count */
161 "js 2f\n\t"
162 "xorl %0,%0\n"
163 "1:\n"
164 LOCK_SECTION_START("")
165 "2:\tcall __down_failed_trylock\n\t"
166 "jmp 1b\n"
167 LOCK_SECTION_END
168 :"=a" (result), "=m" (sem->count)
169 :"D" (sem)
170 :"memory","cc");
171 return result;
172}
173
174/*
175 * Note! This is subtle. We jump to wake people up only if
176 * the semaphore was negative (== somebody was waiting on it).
177 * The default case (no contention) will result in NO
178 * jumps for both down() and up().
179 */
180static inline void up(struct semaphore * sem)
181{
182 __asm__ __volatile__(
183 "# atomic up operation\n\t"
184 LOCK "incl %0\n\t" /* ++sem->count */
185 "jle 2f\n"
186 "1:\n"
187 LOCK_SECTION_START("")
188 "2:\tcall __up_wakeup\n\t"
189 "jmp 1b\n"
190 LOCK_SECTION_END
191 :"=m" (sem->count)
192 :"D" (sem)
193 :"memory");
194}
195#endif /* __KERNEL__ */
196#endif
diff --git a/include/asm-x86_64/sembuf.h b/include/asm-x86_64/sembuf.h
new file mode 100644
index 000000000000..63b52925ae2a
--- /dev/null
+++ b/include/asm-x86_64/sembuf.h
@@ -0,0 +1,25 @@
1#ifndef _X86_64_SEMBUF_H
2#define _X86_64_SEMBUF_H
3
4/*
5 * The semid64_ds structure for x86_64 architecture.
6 * Note extra padding because this structure is passed back and forth
7 * between kernel and user space.
8 *
9 * Pad space is left for:
10 * - 64-bit time_t to solve y2038 problem
11 * - 2 miscellaneous 32-bit values
12 */
13
14struct semid64_ds {
15 struct ipc64_perm sem_perm; /* permissions .. see ipc.h */
16 __kernel_time_t sem_otime; /* last semop time */
17 unsigned long __unused1;
18 __kernel_time_t sem_ctime; /* last change time */
19 unsigned long __unused2;
20 unsigned long sem_nsems; /* no. of semaphores in array */
21 unsigned long __unused3;
22 unsigned long __unused4;
23};
24
25#endif /* _X86_64_SEMBUF_H */
diff --git a/include/asm-x86_64/serial.h b/include/asm-x86_64/serial.h
new file mode 100644
index 000000000000..dbab232044cd
--- /dev/null
+++ b/include/asm-x86_64/serial.h
@@ -0,0 +1,130 @@
1/*
2 * include/asm-x86_64/serial.h
3 */
4
5#include <linux/config.h>
6
7/*
8 * This assumes you have a 1.8432 MHz clock for your UART.
9 *
10 * It'd be nice if someone built a serial card with a 24.576 MHz
11 * clock, since the 16550A is capable of handling a top speed of 1.5
12 * megabits/second; but this requires the faster clock.
13 */
14#define BASE_BAUD ( 1843200 / 16 )
15
16/* Standard COM flags (except for COM4, because of the 8514 problem) */
17#ifdef CONFIG_SERIAL_DETECT_IRQ
18#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST | ASYNC_AUTO_IRQ)
19#define STD_COM4_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_AUTO_IRQ)
20#else
21#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST)
22#define STD_COM4_FLAGS ASYNC_BOOT_AUTOCONF
23#endif
24
25#ifdef CONFIG_SERIAL_MANY_PORTS
26#define FOURPORT_FLAGS ASYNC_FOURPORT
27#define ACCENT_FLAGS 0
28#define BOCA_FLAGS 0
29#define HUB6_FLAGS 0
30#endif
31
32#define MCA_COM_FLAGS (STD_COM_FLAGS|ASYNC_BOOT_ONLYMCA)
33
34/*
35 * The following define the access methods for the HUB6 card. All
36 * access is through two ports for all 24 possible chips. The card is
37 * selected through the high 2 bits, the port on that card with the
38 * "middle" 3 bits, and the register on that port with the bottom
39 * 3 bits.
40 *
41 * While the access port and interrupt is configurable, the default
42 * port locations are 0x302 for the port control register, and 0x303
43 * for the data read/write register. Normally, the interrupt is at irq3
44 * but can be anything from 3 to 7 inclusive. Note that using 3 will
45 * require disabling com2.
46 */
47
48#define C_P(card,port) (((card)<<6|(port)<<3) + 1)
49
50#define STD_SERIAL_PORT_DEFNS \
51 /* UART CLK PORT IRQ FLAGS */ \
52 { 0, BASE_BAUD, 0x3F8, 4, STD_COM_FLAGS }, /* ttyS0 */ \
53 { 0, BASE_BAUD, 0x2F8, 3, STD_COM_FLAGS }, /* ttyS1 */ \
54 { 0, BASE_BAUD, 0x3E8, 4, STD_COM_FLAGS }, /* ttyS2 */ \
55 { 0, BASE_BAUD, 0x2E8, 3, STD_COM4_FLAGS }, /* ttyS3 */
56
57
58#ifdef CONFIG_SERIAL_MANY_PORTS
59#define EXTRA_SERIAL_PORT_DEFNS \
60 { 0, BASE_BAUD, 0x1A0, 9, FOURPORT_FLAGS }, /* ttyS4 */ \
61 { 0, BASE_BAUD, 0x1A8, 9, FOURPORT_FLAGS }, /* ttyS5 */ \
62 { 0, BASE_BAUD, 0x1B0, 9, FOURPORT_FLAGS }, /* ttyS6 */ \
63 { 0, BASE_BAUD, 0x1B8, 9, FOURPORT_FLAGS }, /* ttyS7 */ \
64 { 0, BASE_BAUD, 0x2A0, 5, FOURPORT_FLAGS }, /* ttyS8 */ \
65 { 0, BASE_BAUD, 0x2A8, 5, FOURPORT_FLAGS }, /* ttyS9 */ \
66 { 0, BASE_BAUD, 0x2B0, 5, FOURPORT_FLAGS }, /* ttyS10 */ \
67 { 0, BASE_BAUD, 0x2B8, 5, FOURPORT_FLAGS }, /* ttyS11 */ \
68 { 0, BASE_BAUD, 0x330, 4, ACCENT_FLAGS }, /* ttyS12 */ \
69 { 0, BASE_BAUD, 0x338, 4, ACCENT_FLAGS }, /* ttyS13 */ \
70 { 0, BASE_BAUD, 0x000, 0, 0 }, /* ttyS14 (spare) */ \
71 { 0, BASE_BAUD, 0x000, 0, 0 }, /* ttyS15 (spare) */ \
72 { 0, BASE_BAUD, 0x100, 12, BOCA_FLAGS }, /* ttyS16 */ \
73 { 0, BASE_BAUD, 0x108, 12, BOCA_FLAGS }, /* ttyS17 */ \
74 { 0, BASE_BAUD, 0x110, 12, BOCA_FLAGS }, /* ttyS18 */ \
75 { 0, BASE_BAUD, 0x118, 12, BOCA_FLAGS }, /* ttyS19 */ \
76 { 0, BASE_BAUD, 0x120, 12, BOCA_FLAGS }, /* ttyS20 */ \
77 { 0, BASE_BAUD, 0x128, 12, BOCA_FLAGS }, /* ttyS21 */ \
78 { 0, BASE_BAUD, 0x130, 12, BOCA_FLAGS }, /* ttyS22 */ \
79 { 0, BASE_BAUD, 0x138, 12, BOCA_FLAGS }, /* ttyS23 */ \
80 { 0, BASE_BAUD, 0x140, 12, BOCA_FLAGS }, /* ttyS24 */ \
81 { 0, BASE_BAUD, 0x148, 12, BOCA_FLAGS }, /* ttyS25 */ \
82 { 0, BASE_BAUD, 0x150, 12, BOCA_FLAGS }, /* ttyS26 */ \
83 { 0, BASE_BAUD, 0x158, 12, BOCA_FLAGS }, /* ttyS27 */ \
84 { 0, BASE_BAUD, 0x160, 12, BOCA_FLAGS }, /* ttyS28 */ \
85 { 0, BASE_BAUD, 0x168, 12, BOCA_FLAGS }, /* ttyS29 */ \
86 { 0, BASE_BAUD, 0x170, 12, BOCA_FLAGS }, /* ttyS30 */ \
87 { 0, BASE_BAUD, 0x178, 12, BOCA_FLAGS }, /* ttyS31 */
88#else
89#define EXTRA_SERIAL_PORT_DEFNS
90#endif
91
92/* You can have up to four HUB6's in the system, but I've only
93 * included two cards here for a total of twelve ports.
94 */
95#if (defined(CONFIG_HUB6) && defined(CONFIG_SERIAL_MANY_PORTS))
96#define HUB6_SERIAL_PORT_DFNS \
97 { 0, BASE_BAUD, 0x302, 3, HUB6_FLAGS, C_P(0,0) }, /* ttyS32 */ \
98 { 0, BASE_BAUD, 0x302, 3, HUB6_FLAGS, C_P(0,1) }, /* ttyS33 */ \
99 { 0, BASE_BAUD, 0x302, 3, HUB6_FLAGS, C_P(0,2) }, /* ttyS34 */ \
100 { 0, BASE_BAUD, 0x302, 3, HUB6_FLAGS, C_P(0,3) }, /* ttyS35 */ \
101 { 0, BASE_BAUD, 0x302, 3, HUB6_FLAGS, C_P(0,4) }, /* ttyS36 */ \
102 { 0, BASE_BAUD, 0x302, 3, HUB6_FLAGS, C_P(0,5) }, /* ttyS37 */ \
103 { 0, BASE_BAUD, 0x302, 3, HUB6_FLAGS, C_P(1,0) }, /* ttyS38 */ \
104 { 0, BASE_BAUD, 0x302, 3, HUB6_FLAGS, C_P(1,1) }, /* ttyS39 */ \
105 { 0, BASE_BAUD, 0x302, 3, HUB6_FLAGS, C_P(1,2) }, /* ttyS40 */ \
106 { 0, BASE_BAUD, 0x302, 3, HUB6_FLAGS, C_P(1,3) }, /* ttyS41 */ \
107 { 0, BASE_BAUD, 0x302, 3, HUB6_FLAGS, C_P(1,4) }, /* ttyS42 */ \
108 { 0, BASE_BAUD, 0x302, 3, HUB6_FLAGS, C_P(1,5) }, /* ttyS43 */
109#else
110#define HUB6_SERIAL_PORT_DFNS
111#endif
112
113#ifdef CONFIG_MCA
114#define MCA_SERIAL_PORT_DFNS \
115 { 0, BASE_BAUD, 0x3220, 3, MCA_COM_FLAGS }, \
116 { 0, BASE_BAUD, 0x3228, 3, MCA_COM_FLAGS }, \
117 { 0, BASE_BAUD, 0x4220, 3, MCA_COM_FLAGS }, \
118 { 0, BASE_BAUD, 0x4228, 3, MCA_COM_FLAGS }, \
119 { 0, BASE_BAUD, 0x5220, 3, MCA_COM_FLAGS }, \
120 { 0, BASE_BAUD, 0x5228, 3, MCA_COM_FLAGS },
121#else
122#define MCA_SERIAL_PORT_DFNS
123#endif
124
125#define SERIAL_PORT_DFNS \
126 STD_SERIAL_PORT_DEFNS \
127 EXTRA_SERIAL_PORT_DEFNS \
128 HUB6_SERIAL_PORT_DFNS \
129 MCA_SERIAL_PORT_DFNS
130
diff --git a/include/asm-x86_64/setup.h b/include/asm-x86_64/setup.h
new file mode 100644
index 000000000000..985d4e3c79da
--- /dev/null
+++ b/include/asm-x86_64/setup.h
@@ -0,0 +1,6 @@
1#ifndef _x8664_SETUP_H
2#define _x8664_SETUP_H
3
4#define COMMAND_LINE_SIZE 256
5
6#endif
diff --git a/include/asm-x86_64/shmbuf.h b/include/asm-x86_64/shmbuf.h
new file mode 100644
index 000000000000..5a6d6dda7c48
--- /dev/null
+++ b/include/asm-x86_64/shmbuf.h
@@ -0,0 +1,38 @@
1#ifndef _X8664_SHMBUF_H
2#define _X8664_SHMBUF_H
3
4/*
5 * The shmid64_ds structure for x8664 architecture.
6 * Note extra padding because this structure is passed back and forth
7 * between kernel and user space.
8 *
9 * Pad space is left for:
10 * - 2 miscellaneous 64-bit values
11 */
12
13struct shmid64_ds {
14 struct ipc64_perm shm_perm; /* operation perms */
15 size_t shm_segsz; /* size of segment (bytes) */
16 __kernel_time_t shm_atime; /* last attach time */
17 __kernel_time_t shm_dtime; /* last detach time */
18 __kernel_time_t shm_ctime; /* last change time */
19 __kernel_pid_t shm_cpid; /* pid of creator */
20 __kernel_pid_t shm_lpid; /* pid of last operator */
21 unsigned long shm_nattch; /* no. of current attaches */
22 unsigned long __unused4;
23 unsigned long __unused5;
24};
25
26struct shminfo64 {
27 unsigned long shmmax;
28 unsigned long shmmin;
29 unsigned long shmmni;
30 unsigned long shmseg;
31 unsigned long shmall;
32 unsigned long __unused1;
33 unsigned long __unused2;
34 unsigned long __unused3;
35 unsigned long __unused4;
36};
37
38#endif
diff --git a/include/asm-x86_64/shmparam.h b/include/asm-x86_64/shmparam.h
new file mode 100644
index 000000000000..d7021620dcb7
--- /dev/null
+++ b/include/asm-x86_64/shmparam.h
@@ -0,0 +1,6 @@
1#ifndef _ASMX8664_SHMPARAM_H
2#define _ASMX8664_SHMPARAM_H
3
4#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */
5
6#endif /* _ASMX8664_SHMPARAM_H */
diff --git a/include/asm-x86_64/sigcontext.h b/include/asm-x86_64/sigcontext.h
new file mode 100644
index 000000000000..b4e40236666c
--- /dev/null
+++ b/include/asm-x86_64/sigcontext.h
@@ -0,0 +1,55 @@
1#ifndef _ASM_X86_64_SIGCONTEXT_H
2#define _ASM_X86_64_SIGCONTEXT_H
3
4#include <asm/types.h>
5#include <linux/compiler.h>
6
7/* FXSAVE frame */
8/* Note: reserved1/2 may someday contain valuable data. Always save/restore
9 them when you change signal frames. */
10struct _fpstate {
11 __u16 cwd;
12 __u16 swd;
13 __u16 twd; /* Note this is not the same as the 32bit/x87/FSAVE twd */
14 __u16 fop;
15 __u64 rip;
16 __u64 rdp;
17 __u32 mxcsr;
18 __u32 mxcsr_mask;
19 __u32 st_space[32]; /* 8*16 bytes for each FP-reg */
20 __u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg */
21 __u32 reserved2[24];
22};
23
24struct sigcontext {
25 unsigned long r8;
26 unsigned long r9;
27 unsigned long r10;
28 unsigned long r11;
29 unsigned long r12;
30 unsigned long r13;
31 unsigned long r14;
32 unsigned long r15;
33 unsigned long rdi;
34 unsigned long rsi;
35 unsigned long rbp;
36 unsigned long rbx;
37 unsigned long rdx;
38 unsigned long rax;
39 unsigned long rcx;
40 unsigned long rsp;
41 unsigned long rip;
42 unsigned long eflags; /* RFLAGS */
43 unsigned short cs;
44 unsigned short gs;
45 unsigned short fs;
46 unsigned short __pad0;
47 unsigned long err;
48 unsigned long trapno;
49 unsigned long oldmask;
50 unsigned long cr2;
51 struct _fpstate __user *fpstate; /* zero when no FPU context */
52 unsigned long reserved1[8];
53};
54
55#endif
diff --git a/include/asm-x86_64/sigcontext32.h b/include/asm-x86_64/sigcontext32.h
new file mode 100644
index 000000000000..3d657038ab7c
--- /dev/null
+++ b/include/asm-x86_64/sigcontext32.h
@@ -0,0 +1,71 @@
1#ifndef _SIGCONTEXT32_H
2#define _SIGCONTEXT32_H 1
3
4/* signal context for 32bit programs. */
5
6#define X86_FXSR_MAGIC 0x0000
7
8struct _fpreg {
9 unsigned short significand[4];
10 unsigned short exponent;
11};
12
13struct _fpxreg {
14 unsigned short significand[4];
15 unsigned short exponent;
16 unsigned short padding[3];
17};
18
19struct _xmmreg {
20 __u32 element[4];
21};
22
23/* FSAVE frame with extensions */
24struct _fpstate_ia32 {
25 /* Regular FPU environment */
26 __u32 cw;
27 __u32 sw;
28 __u32 tag; /* not compatible to 64bit twd */
29 __u32 ipoff;
30 __u32 cssel;
31 __u32 dataoff;
32 __u32 datasel;
33 struct _fpreg _st[8];
34 unsigned short status;
35 unsigned short magic; /* 0xffff = regular FPU data only */
36
37 /* FXSR FPU environment */
38 __u32 _fxsr_env[6];
39 __u32 mxcsr;
40 __u32 reserved;
41 struct _fpxreg _fxsr_st[8];
42 struct _xmmreg _xmm[8]; /* It's actually 16 */
43 __u32 padding[56];
44};
45
46struct sigcontext_ia32 {
47 unsigned short gs, __gsh;
48 unsigned short fs, __fsh;
49 unsigned short es, __esh;
50 unsigned short ds, __dsh;
51 unsigned int edi;
52 unsigned int esi;
53 unsigned int ebp;
54 unsigned int esp;
55 unsigned int ebx;
56 unsigned int edx;
57 unsigned int ecx;
58 unsigned int eax;
59 unsigned int trapno;
60 unsigned int err;
61 unsigned int eip;
62 unsigned short cs, __csh;
63 unsigned int eflags;
64 unsigned int esp_at_signal;
65 unsigned short ss, __ssh;
66 unsigned int fpstate; /* really (struct _fpstate_ia32 *) */
67 unsigned int oldmask;
68 unsigned int cr2;
69};
70
71#endif
diff --git a/include/asm-x86_64/siginfo.h b/include/asm-x86_64/siginfo.h
new file mode 100644
index 000000000000..7bc15985f124
--- /dev/null
+++ b/include/asm-x86_64/siginfo.h
@@ -0,0 +1,10 @@
1#ifndef _X8664_SIGINFO_H
2#define _X8664_SIGINFO_H
3
4#define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int))
5
6#define SIGEV_PAD_SIZE ((SIGEV_MAX_SIZE/sizeof(int)) - 4)
7
8#include <asm-generic/siginfo.h>
9
10#endif
diff --git a/include/asm-x86_64/signal.h b/include/asm-x86_64/signal.h
new file mode 100644
index 000000000000..643a20d73765
--- /dev/null
+++ b/include/asm-x86_64/signal.h
@@ -0,0 +1,213 @@
1#ifndef _ASMx8664_SIGNAL_H
2#define _ASMx8664_SIGNAL_H
3
4#ifndef __ASSEMBLY__
5#include <linux/types.h>
6#include <linux/linkage.h>
7#include <linux/time.h>
8
9/* Avoid too many header ordering problems. */
10struct siginfo;
11
12#ifdef __KERNEL__
13/* Most things should be clean enough to redefine this at will, if care
14 is taken to make libc match. */
15
16#define _NSIG 64
17#define _NSIG_BPW 64
18#define _NSIG_WORDS (_NSIG / _NSIG_BPW)
19
20typedef unsigned long old_sigset_t; /* at least 32 bits */
21
22typedef struct {
23 unsigned long sig[_NSIG_WORDS];
24} sigset_t;
25
26
27struct pt_regs;
28asmlinkage int do_signal(struct pt_regs *regs, sigset_t *oldset);
29
30
31#else
32/* Here we must cater to libcs that poke about in kernel headers. */
33
34#define NSIG 32
35typedef unsigned long sigset_t;
36
37#endif /* __KERNEL__ */
38#endif
39
40#define SIGHUP 1
41#define SIGINT 2
42#define SIGQUIT 3
43#define SIGILL 4
44#define SIGTRAP 5
45#define SIGABRT 6
46#define SIGIOT 6
47#define SIGBUS 7
48#define SIGFPE 8
49#define SIGKILL 9
50#define SIGUSR1 10
51#define SIGSEGV 11
52#define SIGUSR2 12
53#define SIGPIPE 13
54#define SIGALRM 14
55#define SIGTERM 15
56#define SIGSTKFLT 16
57#define SIGCHLD 17
58#define SIGCONT 18
59#define SIGSTOP 19
60#define SIGTSTP 20
61#define SIGTTIN 21
62#define SIGTTOU 22
63#define SIGURG 23
64#define SIGXCPU 24
65#define SIGXFSZ 25
66#define SIGVTALRM 26
67#define SIGPROF 27
68#define SIGWINCH 28
69#define SIGIO 29
70#define SIGPOLL SIGIO
71/*
72#define SIGLOST 29
73*/
74#define SIGPWR 30
75#define SIGSYS 31
76#define SIGUNUSED 31
77
78/* These should not be considered constants from userland. */
79#define SIGRTMIN 32
80#define SIGRTMAX _NSIG
81
82/*
83 * SA_FLAGS values:
84 *
85 * SA_ONSTACK indicates that a registered stack_t will be used.
86 * SA_INTERRUPT is a no-op, but left due to historical reasons. Use the
87 * SA_RESTART flag to get restarting signals (which were the default long ago)
88 * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
89 * SA_RESETHAND clears the handler when the signal is delivered.
90 * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
91 * SA_NODEFER prevents the current signal from being masked in the handler.
92 *
93 * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
94 * Unix names RESETHAND and NODEFER respectively.
95 */
96#define SA_NOCLDSTOP 0x00000001
97#define SA_NOCLDWAIT 0x00000002
98#define SA_SIGINFO 0x00000004
99#define SA_ONSTACK 0x08000000
100#define SA_RESTART 0x10000000
101#define SA_NODEFER 0x40000000
102#define SA_RESETHAND 0x80000000
103
104#define SA_NOMASK SA_NODEFER
105#define SA_ONESHOT SA_RESETHAND
106#define SA_INTERRUPT 0x20000000 /* dummy -- ignored */
107
108#define SA_RESTORER 0x04000000
109
110/*
111 * sigaltstack controls
112 */
113#define SS_ONSTACK 1
114#define SS_DISABLE 2
115
116#define MINSIGSTKSZ 2048
117#define SIGSTKSZ 8192
118
119#ifdef __KERNEL__
120
121/*
122 * These values of sa_flags are used only by the kernel as part of the
123 * irq handling routines.
124 *
125 * SA_INTERRUPT is also used by the irq handling routines.
126 * SA_SHIRQ is for shared interrupt support on PCI and EISA.
127 */
128#define SA_PROBE SA_ONESHOT
129#define SA_SAMPLE_RANDOM SA_RESTART
130#define SA_SHIRQ 0x04000000
131#endif
132
133#define SIG_BLOCK 0 /* for blocking signals */
134#define SIG_UNBLOCK 1 /* for unblocking signals */
135#define SIG_SETMASK 2 /* for setting the signal mask */
136
137#ifndef __ASSEMBLY__
138/* Type of a signal handler. */
139typedef void __signalfn_t(int);
140typedef __signalfn_t __user *__sighandler_t;
141
142typedef void __restorefn_t(void);
143typedef __restorefn_t __user *__sigrestore_t;
144
145#define SIG_DFL ((__sighandler_t)0) /* default signal handling */
146#define SIG_IGN ((__sighandler_t)1) /* ignore signal */
147#define SIG_ERR ((__sighandler_t)-1) /* error return from signal */
148
149struct sigaction {
150 __sighandler_t sa_handler;
151 unsigned long sa_flags;
152 __sigrestore_t sa_restorer;
153 sigset_t sa_mask; /* mask last for extensibility */
154};
155
156struct k_sigaction {
157 struct sigaction sa;
158};
159
160typedef struct sigaltstack {
161 void __user *ss_sp;
162 int ss_flags;
163 size_t ss_size;
164} stack_t;
165
166#ifdef __KERNEL__
167#include <asm/sigcontext.h>
168
169#undef __HAVE_ARCH_SIG_BITOPS
170#if 0
171
172extern __inline__ void sigaddset(sigset_t *set, int _sig)
173{
174 __asm__("btsq %1,%0" : "=m"(*set) : "Ir"(_sig - 1) : "cc");
175}
176
177extern __inline__ void sigdelset(sigset_t *set, int _sig)
178{
179 __asm__("btrq %1,%0" : "=m"(*set) : "Ir"(_sig - 1) : "cc");
180}
181
182extern __inline__ int __const_sigismember(sigset_t *set, int _sig)
183{
184 unsigned long sig = _sig - 1;
185 return 1 & (set->sig[sig / _NSIG_BPW] >> (sig & ~(_NSIG_BPW-1)));
186}
187
188extern __inline__ int __gen_sigismember(sigset_t *set, int _sig)
189{
190 int ret;
191 __asm__("btq %2,%1\n\tsbbq %0,%0"
192 : "=r"(ret) : "m"(*set), "Ir"(_sig-1) : "cc");
193 return ret;
194}
195
196#define sigismember(set,sig) \
197 (__builtin_constant_p(sig) ? \
198 __const_sigismember((set),(sig)) : \
199 __gen_sigismember((set),(sig)))
200
201extern __inline__ int sigfindinword(unsigned long word)
202{
203 __asm__("bsfq %1,%0" : "=r"(word) : "rm"(word) : "cc");
204 return word;
205}
206#endif
207#endif
208
209#define ptrace_signal_deliver(regs, cookie) do { } while (0)
210
211#endif /* __KERNEL__ */
212
213#endif
diff --git a/include/asm-x86_64/smp.h b/include/asm-x86_64/smp.h
new file mode 100644
index 000000000000..fe523e3e2ff4
--- /dev/null
+++ b/include/asm-x86_64/smp.h
@@ -0,0 +1,149 @@
1#ifndef __ASM_SMP_H
2#define __ASM_SMP_H
3
4/*
5 * We need the APIC definitions automatically as part of 'smp.h'
6 */
7#ifndef __ASSEMBLY__
8#include <linux/config.h>
9#include <linux/threads.h>
10#include <linux/cpumask.h>
11#include <linux/bitops.h>
12extern int disable_apic;
13#endif
14
15#ifdef CONFIG_X86_LOCAL_APIC
16#ifndef __ASSEMBLY__
17#include <asm/fixmap.h>
18#include <asm/mpspec.h>
19#ifdef CONFIG_X86_IO_APIC
20#include <asm/io_apic.h>
21#endif
22#include <asm/apic.h>
23#include <asm/thread_info.h>
24#endif
25#endif
26
27#ifdef CONFIG_SMP
28#ifndef ASSEMBLY
29
30#include <asm/pda.h>
31
32struct pt_regs;
33
34/*
35 * Private routines/data
36 */
37
38extern void smp_alloc_memory(void);
39extern cpumask_t cpu_online_map;
40extern volatile unsigned long smp_invalidate_needed;
41extern int pic_mode;
42extern int smp_num_siblings;
43extern void smp_flush_tlb(void);
44extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs);
45extern void smp_send_reschedule(int cpu);
46extern void smp_invalidate_rcv(void); /* Process an NMI */
47extern void (*mtrr_hook) (void);
48extern void zap_low_mappings(void);
49void smp_stop_cpu(void);
50extern cpumask_t cpu_sibling_map[NR_CPUS];
51extern u8 phys_proc_id[NR_CPUS];
52
53#define SMP_TRAMPOLINE_BASE 0x6000
54
55/*
56 * On x86 all CPUs are mapped 1:1 to the APIC space.
57 * This simplifies scheduling and IPI sending and
58 * compresses data structures.
59 */
60
61extern cpumask_t cpu_callout_map;
62extern cpumask_t cpu_callin_map;
63#define cpu_possible_map cpu_callout_map
64
65static inline int num_booting_cpus(void)
66{
67 return cpus_weight(cpu_callout_map);
68}
69
70#define __smp_processor_id() read_pda(cpunumber)
71
72extern __inline int hard_smp_processor_id(void)
73{
74 /* we don't want to mark this access volatile - bad code generation */
75 return GET_APIC_ID(*(unsigned int *)(APIC_BASE+APIC_ID));
76}
77
78#define safe_smp_processor_id() (disable_apic ? 0 : x86_apicid_to_cpu(hard_smp_processor_id()))
79
80#endif /* !ASSEMBLY */
81
82#define NO_PROC_ID 0xFF /* No processor magic marker */
83
84#endif
85
86#ifndef ASSEMBLY
87/*
88 * Some lowlevel functions might want to know about
89 * the real APIC ID <-> CPU # mapping.
90 */
91extern u8 x86_cpu_to_apicid[NR_CPUS]; /* physical ID */
92extern u8 x86_cpu_to_log_apicid[NR_CPUS];
93extern u8 bios_cpu_apicid[];
94
95static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
96{
97 return cpus_addr(cpumask)[0];
98}
99
100static inline int x86_apicid_to_cpu(u8 apicid)
101{
102 int i;
103
104 for (i = 0; i < NR_CPUS; ++i)
105 if (x86_cpu_to_apicid[i] == apicid)
106 return i;
107
108 /* No entries in x86_cpu_to_apicid? Either no MPS|ACPI,
109 * or called too early. Either way, we must be CPU 0. */
110 if (x86_cpu_to_apicid[0] == BAD_APICID)
111 return 0;
112
113 return -1;
114}
115
116static inline int cpu_present_to_apicid(int mps_cpu)
117{
118 if (mps_cpu < NR_CPUS)
119 return (int)bios_cpu_apicid[mps_cpu];
120 else
121 return BAD_APICID;
122}
123
124#endif /* !ASSEMBLY */
125
126#ifndef CONFIG_SMP
127#define stack_smp_processor_id() 0
128#define safe_smp_processor_id() 0
129#define cpu_logical_map(x) (x)
130#else
131#include <asm/thread_info.h>
132#define stack_smp_processor_id() \
133({ \
134 struct thread_info *ti; \
135 __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
136 ti->cpu; \
137})
138#endif
139
140#ifndef __ASSEMBLY__
141static __inline int logical_smp_processor_id(void)
142{
143 /* we don't want to mark this access volatile - bad code generation */
144 return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR));
145}
146#endif
147
148#endif
149
diff --git a/include/asm-x86_64/socket.h b/include/asm-x86_64/socket.h
new file mode 100644
index 000000000000..d9a252ea8210
--- /dev/null
+++ b/include/asm-x86_64/socket.h
@@ -0,0 +1,50 @@
1#ifndef _ASM_SOCKET_H
2#define _ASM_SOCKET_H
3
4#include <asm/sockios.h>
5
6/* For setsockopt(2) */
7#define SOL_SOCKET 1
8
9#define SO_DEBUG 1
10#define SO_REUSEADDR 2
11#define SO_TYPE 3
12#define SO_ERROR 4
13#define SO_DONTROUTE 5
14#define SO_BROADCAST 6
15#define SO_SNDBUF 7
16#define SO_RCVBUF 8
17#define SO_KEEPALIVE 9
18#define SO_OOBINLINE 10
19#define SO_NO_CHECK 11
20#define SO_PRIORITY 12
21#define SO_LINGER 13
22#define SO_BSDCOMPAT 14
23/* To add :#define SO_REUSEPORT 15 */
24#define SO_PASSCRED 16
25#define SO_PEERCRED 17
26#define SO_RCVLOWAT 18
27#define SO_SNDLOWAT 19
28#define SO_RCVTIMEO 20
29#define SO_SNDTIMEO 21
30
31/* Security levels - as per NRL IPv6 - don't actually do anything */
32#define SO_SECURITY_AUTHENTICATION 22
33#define SO_SECURITY_ENCRYPTION_TRANSPORT 23
34#define SO_SECURITY_ENCRYPTION_NETWORK 24
35
36#define SO_BINDTODEVICE 25
37
38/* Socket filtering */
39#define SO_ATTACH_FILTER 26
40#define SO_DETACH_FILTER 27
41
42#define SO_PEERNAME 28
43#define SO_TIMESTAMP 29
44#define SCM_TIMESTAMP SO_TIMESTAMP
45
46#define SO_ACCEPTCONN 30
47
48#define SO_PEERSEC 31
49
50#endif /* _ASM_SOCKET_H */
diff --git a/include/asm-x86_64/sockios.h b/include/asm-x86_64/sockios.h
new file mode 100644
index 000000000000..2eefd10d4f48
--- /dev/null
+++ b/include/asm-x86_64/sockios.h
@@ -0,0 +1,12 @@
1#ifndef __ARCH_X8664_SOCKIOS__
2#define __ARCH_X8664_SOCKIOS__
3
4/* Socket-level I/O control calls. */
5#define FIOSETOWN 0x8901
6#define SIOCSPGRP 0x8902
7#define FIOGETOWN 0x8903
8#define SIOCGPGRP 0x8904
9#define SIOCATMARK 0x8905
10#define SIOCGSTAMP 0x8906 /* Get stamp */
11
12#endif
diff --git a/include/asm-x86_64/spinlock.h b/include/asm-x86_64/spinlock.h
new file mode 100644
index 000000000000..5aeb57a3baad
--- /dev/null
+++ b/include/asm-x86_64/spinlock.h
@@ -0,0 +1,214 @@
1#ifndef __ASM_SPINLOCK_H
2#define __ASM_SPINLOCK_H
3
4#include <asm/atomic.h>
5#include <asm/rwlock.h>
6#include <asm/page.h>
7#include <linux/config.h>
8
9extern int printk(const char * fmt, ...)
10 __attribute__ ((format (printf, 1, 2)));
11
12/*
13 * Your basic SMP spinlocks, allowing only a single CPU anywhere
14 */
15
16typedef struct {
17 volatile unsigned int lock;
18#ifdef CONFIG_DEBUG_SPINLOCK
19 unsigned magic;
20#endif
21#ifdef CONFIG_PREEMPT
22 unsigned int break_lock;
23#endif
24} spinlock_t;
25
26#define SPINLOCK_MAGIC 0xdead4ead
27
28#ifdef CONFIG_DEBUG_SPINLOCK
29#define SPINLOCK_MAGIC_INIT , SPINLOCK_MAGIC
30#else
31#define SPINLOCK_MAGIC_INIT /* */
32#endif
33
34#define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 SPINLOCK_MAGIC_INIT }
35
36#define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
37
38/*
39 * Simple spin lock operations. There are two variants, one clears IRQ's
40 * on the local processor, one does not.
41 *
42 * We make no fairness assumptions. They have a cost.
43 */
44
45#define spin_is_locked(x) (*(volatile signed char *)(&(x)->lock) <= 0)
46#define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x))
47#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
48
49#define spin_lock_string \
50 "\n1:\t" \
51 "lock ; decb %0\n\t" \
52 "js 2f\n" \
53 LOCK_SECTION_START("") \
54 "2:\t" \
55 "rep;nop\n\t" \
56 "cmpb $0,%0\n\t" \
57 "jle 2b\n\t" \
58 "jmp 1b\n" \
59 LOCK_SECTION_END
60
61/*
62 * This works. Despite all the confusion.
63 * (except on PPro SMP or if we are using OOSTORE)
64 * (PPro errata 66, 92)
65 */
66
67#if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE)
68
69#define spin_unlock_string \
70 "movb $1,%0" \
71 :"=m" (lock->lock) : : "memory"
72
73
74static inline void _raw_spin_unlock(spinlock_t *lock)
75{
76#ifdef CONFIG_DEBUG_SPINLOCK
77 BUG_ON(lock->magic != SPINLOCK_MAGIC);
78 assert_spin_locked(lock);
79#endif
80 __asm__ __volatile__(
81 spin_unlock_string
82 );
83}
84
85#else
86
87#define spin_unlock_string \
88 "xchgb %b0, %1" \
89 :"=q" (oldval), "=m" (lock->lock) \
90 :"0" (oldval) : "memory"
91
92static inline void _raw_spin_unlock(spinlock_t *lock)
93{
94 char oldval = 1;
95#ifdef CONFIG_DEBUG_SPINLOCK
96 BUG_ON(lock->magic != SPINLOCK_MAGIC);
97 assert_spin_locked(lock);
98#endif
99 __asm__ __volatile__(
100 spin_unlock_string
101 );
102}
103
104#endif
105
106static inline int _raw_spin_trylock(spinlock_t *lock)
107{
108 char oldval;
109 __asm__ __volatile__(
110 "xchgb %b0,%1"
111 :"=q" (oldval), "=m" (lock->lock)
112 :"0" (0) : "memory");
113 return oldval > 0;
114}
115
116static inline void _raw_spin_lock(spinlock_t *lock)
117{
118#ifdef CONFIG_DEBUG_SPINLOCK
119 if (lock->magic != SPINLOCK_MAGIC) {
120 printk("eip: %p\n", __builtin_return_address(0));
121 BUG();
122 }
123#endif
124 __asm__ __volatile__(
125 spin_lock_string
126 :"=m" (lock->lock) : : "memory");
127}
128
129
130/*
131 * Read-write spinlocks, allowing multiple readers
132 * but only one writer.
133 *
134 * NOTE! it is quite common to have readers in interrupts
135 * but no interrupt writers. For those circumstances we
136 * can "mix" irq-safe locks - any writer needs to get a
137 * irq-safe write-lock, but readers can get non-irqsafe
138 * read-locks.
139 */
140typedef struct {
141 volatile unsigned int lock;
142#ifdef CONFIG_DEBUG_SPINLOCK
143 unsigned magic;
144#endif
145#ifdef CONFIG_PREEMPT
146 unsigned int break_lock;
147#endif
148} rwlock_t;
149
150#define RWLOCK_MAGIC 0xdeaf1eed
151
152#ifdef CONFIG_DEBUG_SPINLOCK
153#define RWLOCK_MAGIC_INIT , RWLOCK_MAGIC
154#else
155#define RWLOCK_MAGIC_INIT /* */
156#endif
157
158#define RW_LOCK_UNLOCKED (rwlock_t) { RW_LOCK_BIAS RWLOCK_MAGIC_INIT }
159
160#define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0)
161
162#define read_can_lock(x) ((int)(x)->lock > 0)
163#define write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
164
165/*
166 * On x86, we implement read-write locks as a 32-bit counter
167 * with the high bit (sign) being the "contended" bit.
168 *
169 * The inline assembly is non-obvious. Think about it.
170 *
171 * Changed to use the same technique as rw semaphores. See
172 * semaphore.h for details. -ben
173 */
174/* the spinlock helpers are in arch/i386/kernel/semaphore.c */
175
176static inline void _raw_read_lock(rwlock_t *rw)
177{
178#ifdef CONFIG_DEBUG_SPINLOCK
179 BUG_ON(rw->magic != RWLOCK_MAGIC);
180#endif
181 __build_read_lock(rw, "__read_lock_failed");
182}
183
184static inline void _raw_write_lock(rwlock_t *rw)
185{
186#ifdef CONFIG_DEBUG_SPINLOCK
187 BUG_ON(rw->magic != RWLOCK_MAGIC);
188#endif
189 __build_write_lock(rw, "__write_lock_failed");
190}
191
192#define _raw_read_unlock(rw) asm volatile("lock ; incl %0" :"=m" ((rw)->lock) : : "memory")
193#define _raw_write_unlock(rw) asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory")
194
195static inline int _raw_read_trylock(rwlock_t *lock)
196{
197 atomic_t *count = (atomic_t *)lock;
198 atomic_dec(count);
199 if (atomic_read(count) >= 0)
200 return 1;
201 atomic_inc(count);
202 return 0;
203}
204
205static inline int _raw_write_trylock(rwlock_t *lock)
206{
207 atomic_t *count = (atomic_t *)lock;
208 if (atomic_sub_and_test(RW_LOCK_BIAS, count))
209 return 1;
210 atomic_add(RW_LOCK_BIAS, count);
211 return 0;
212}
213
214#endif /* __ASM_SPINLOCK_H */
diff --git a/include/asm-x86_64/stat.h b/include/asm-x86_64/stat.h
new file mode 100644
index 000000000000..fd9f00d560f8
--- /dev/null
+++ b/include/asm-x86_64/stat.h
@@ -0,0 +1,44 @@
1#ifndef _ASM_X86_64_STAT_H
2#define _ASM_X86_64_STAT_H
3
4#define STAT_HAVE_NSEC 1
5
6struct stat {
7 unsigned long st_dev;
8 unsigned long st_ino;
9 unsigned long st_nlink;
10
11 unsigned int st_mode;
12 unsigned int st_uid;
13 unsigned int st_gid;
14 unsigned int __pad0;
15 unsigned long st_rdev;
16 long st_size;
17 long st_blksize;
18 long st_blocks; /* Number 512-byte blocks allocated. */
19
20 unsigned long st_atime;
21 unsigned long st_atime_nsec;
22 unsigned long st_mtime;
23 unsigned long st_mtime_nsec;
24 unsigned long st_ctime;
25 unsigned long st_ctime_nsec;
26 long __unused[3];
27};
28
29/* For 32bit emulation */
30struct __old_kernel_stat {
31 unsigned short st_dev;
32 unsigned short st_ino;
33 unsigned short st_mode;
34 unsigned short st_nlink;
35 unsigned short st_uid;
36 unsigned short st_gid;
37 unsigned short st_rdev;
38 unsigned int st_size;
39 unsigned int st_atime;
40 unsigned int st_mtime;
41 unsigned int st_ctime;
42};
43
44#endif
diff --git a/include/asm-x86_64/statfs.h b/include/asm-x86_64/statfs.h
new file mode 100644
index 000000000000..b3f4718af30b
--- /dev/null
+++ b/include/asm-x86_64/statfs.h
@@ -0,0 +1,58 @@
1#ifndef _X86_64_STATFS_H
2#define _X86_64_STATFS_H
3
4#ifndef __KERNEL_STRICT_NAMES
5
6#include <linux/types.h>
7
8typedef __kernel_fsid_t fsid_t;
9
10#endif
11
12/*
13 * This is ugly -- we're already 64-bit clean, so just duplicate the
14 * definitions.
15 */
16struct statfs {
17 long f_type;
18 long f_bsize;
19 long f_blocks;
20 long f_bfree;
21 long f_bavail;
22 long f_files;
23 long f_ffree;
24 __kernel_fsid_t f_fsid;
25 long f_namelen;
26 long f_frsize;
27 long f_spare[5];
28};
29
30struct statfs64 {
31 long f_type;
32 long f_bsize;
33 long f_blocks;
34 long f_bfree;
35 long f_bavail;
36 long f_files;
37 long f_ffree;
38 __kernel_fsid_t f_fsid;
39 long f_namelen;
40 long f_frsize;
41 long f_spare[5];
42};
43
44struct compat_statfs64 {
45 __u32 f_type;
46 __u32 f_bsize;
47 __u64 f_blocks;
48 __u64 f_bfree;
49 __u64 f_bavail;
50 __u64 f_files;
51 __u64 f_ffree;
52 __kernel_fsid_t f_fsid;
53 __u32 f_namelen;
54 __u32 f_frsize;
55 __u32 f_spare[5];
56} __attribute__((packed));
57
58#endif
diff --git a/include/asm-x86_64/string.h b/include/asm-x86_64/string.h
new file mode 100644
index 000000000000..a3493ee282bb
--- /dev/null
+++ b/include/asm-x86_64/string.h
@@ -0,0 +1,67 @@
1#ifndef _X86_64_STRING_H_
2#define _X86_64_STRING_H_
3
4#ifdef __KERNEL__
5
6/* Written 2002 by Andi Kleen */
7
8/* Only used for special circumstances. Stolen from i386/string.h */
9static inline void * __inline_memcpy(void * to, const void * from, size_t n)
10{
11unsigned long d0, d1, d2;
12__asm__ __volatile__(
13 "rep ; movsl\n\t"
14 "testb $2,%b4\n\t"
15 "je 1f\n\t"
16 "movsw\n"
17 "1:\ttestb $1,%b4\n\t"
18 "je 2f\n\t"
19 "movsb\n"
20 "2:"
21 : "=&c" (d0), "=&D" (d1), "=&S" (d2)
22 :"0" (n/4), "q" (n),"1" ((long) to),"2" ((long) from)
23 : "memory");
24return (to);
25}
26
27/* Even with __builtin_ the compiler may decide to use the out of line
28 function. */
29
30#define __HAVE_ARCH_MEMCPY 1
31extern void *__memcpy(void *to, const void *from, size_t len);
32#define memcpy(dst,src,len) \
33 ({ size_t __len = (len); \
34 void *__ret; \
35 if (__builtin_constant_p(len) && __len >= 64) \
36 __ret = __memcpy((dst),(src),__len); \
37 else \
38 __ret = __builtin_memcpy((dst),(src),__len); \
39 __ret; })
40
41
42#define __HAVE_ARCH_MEMSET
43#define memset __builtin_memset
44
45#define __HAVE_ARCH_MEMMOVE
46void * memmove(void * dest,const void *src,size_t count);
47
48/* Use C out of line version for memcmp */
49#define memcmp __builtin_memcmp
50int memcmp(const void * cs,const void * ct,size_t count);
51
52/* out of line string functions use always C versions */
53#define strlen __builtin_strlen
54size_t strlen(const char * s);
55
56#define strcpy __builtin_strcpy
57char * strcpy(char * dest,const char *src);
58
59#define strcat __builtin_strcat
60char * strcat(char * dest, const char * src);
61
62#define strcmp __builtin_strcmp
63int strcmp(const char * cs,const char * ct);
64
65#endif /* __KERNEL__ */
66
67#endif
diff --git a/include/asm-x86_64/suspend.h b/include/asm-x86_64/suspend.h
new file mode 100644
index 000000000000..ec745807feae
--- /dev/null
+++ b/include/asm-x86_64/suspend.h
@@ -0,0 +1,58 @@
1/*
2 * Copyright 2001-2003 Pavel Machek <pavel@suse.cz>
3 * Based on code
4 * Copyright 2001 Patrick Mochel <mochel@osdl.org>
5 */
6#include <asm/desc.h>
7#include <asm/i387.h>
8
9static inline int
10arch_prepare_suspend(void)
11{
12 return 0;
13}
14
15/* Image of the saved processor state. If you touch this, fix acpi_wakeup.S. */
16struct saved_context {
17 u16 ds, es, fs, gs, ss;
18 unsigned long gs_base, gs_kernel_base, fs_base;
19 unsigned long cr0, cr2, cr3, cr4;
20 u16 gdt_pad;
21 u16 gdt_limit;
22 unsigned long gdt_base;
23 u16 idt_pad;
24 u16 idt_limit;
25 unsigned long idt_base;
26 u16 ldt;
27 u16 tss;
28 unsigned long tr;
29 unsigned long safety;
30 unsigned long return_address;
31 unsigned long eflags;
32} __attribute__((packed));
33
34/* We'll access these from assembly, so we'd better have them outside struct */
35extern unsigned long saved_context_eax, saved_context_ebx, saved_context_ecx, saved_context_edx;
36extern unsigned long saved_context_esp, saved_context_ebp, saved_context_esi, saved_context_edi;
37extern unsigned long saved_context_r08, saved_context_r09, saved_context_r10, saved_context_r11;
38extern unsigned long saved_context_r12, saved_context_r13, saved_context_r14, saved_context_r15;
39extern unsigned long saved_context_eflags;
40
41#define loaddebug(thread,register) \
42 __asm__("movq %0,%%db" #register \
43 : /* no output */ \
44 :"r" ((thread)->debugreg##register))
45
46extern void fix_processor_context(void);
47
48#ifdef CONFIG_ACPI_SLEEP
49extern unsigned long saved_eip;
50extern unsigned long saved_esp;
51extern unsigned long saved_ebp;
52extern unsigned long saved_ebx;
53extern unsigned long saved_esi;
54extern unsigned long saved_edi;
55
56/* routines for saving/restoring kernel state */
57extern int acpi_save_state_mem(void);
58#endif
diff --git a/include/asm-x86_64/swiotlb.h b/include/asm-x86_64/swiotlb.h
new file mode 100644
index 000000000000..36293061f4ed
--- /dev/null
+++ b/include/asm-x86_64/swiotlb.h
@@ -0,0 +1,40 @@
1#ifndef _ASM_SWIOTLB_H
2#define _ASM_SWTIOLB_H 1
3
4#include <linux/config.h>
5
6/* SWIOTLB interface */
7
8extern dma_addr_t swiotlb_map_single(struct device *hwdev, void *ptr, size_t size,
9 int dir);
10extern void swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
11 size_t size, int dir);
12extern void swiotlb_sync_single_for_cpu(struct device *hwdev,
13 dma_addr_t dev_addr,
14 size_t size, int dir);
15extern void swiotlb_sync_single_for_device(struct device *hwdev,
16 dma_addr_t dev_addr,
17 size_t size, int dir);
18extern void swiotlb_sync_sg_for_cpu(struct device *hwdev,
19 struct scatterlist *sg, int nelems,
20 int dir);
21extern void swiotlb_sync_sg_for_device(struct device *hwdev,
22 struct scatterlist *sg, int nelems,
23 int dir);
24extern int swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg,
25 int nents, int direction);
26extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg,
27 int nents, int direction);
28extern int swiotlb_dma_mapping_error(dma_addr_t dma_addr);
29extern void *swiotlb_alloc_coherent (struct device *hwdev, size_t size,
30 dma_addr_t *dma_handle, int flags);
31extern void swiotlb_free_coherent (struct device *hwdev, size_t size,
32 void *vaddr, dma_addr_t dma_handle);
33
34#ifdef CONFIG_SWIOTLB
35extern int swiotlb;
36#else
37#define swiotlb 0
38#endif
39
40#endif
diff --git a/include/asm-x86_64/system.h b/include/asm-x86_64/system.h
new file mode 100644
index 000000000000..76165736e43a
--- /dev/null
+++ b/include/asm-x86_64/system.h
@@ -0,0 +1,343 @@
1#ifndef __ASM_SYSTEM_H
2#define __ASM_SYSTEM_H
3
4#include <linux/config.h>
5#include <linux/kernel.h>
6#include <asm/segment.h>
7
8#ifdef __KERNEL__
9
10#ifdef CONFIG_SMP
11#define LOCK_PREFIX "lock ; "
12#else
13#define LOCK_PREFIX ""
14#endif
15
16#define __STR(x) #x
17#define STR(x) __STR(x)
18
19#define __SAVE(reg,offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t"
20#define __RESTORE(reg,offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t"
21
22/* frame pointer must be last for get_wchan */
23#define SAVE_CONTEXT "pushfq ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
24#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popfq\n\t"
25
26#define __EXTRA_CLOBBER \
27 ,"rcx","rbx","rdx","r8","r9","r10","r11","r12","r13","r14","r15"
28
29#define switch_to(prev,next,last) \
30 asm volatile(SAVE_CONTEXT \
31 "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
32 "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \
33 "call __switch_to\n\t" \
34 ".globl thread_return\n" \
35 "thread_return:\n\t" \
36 "movq %%gs:%P[pda_pcurrent],%%rsi\n\t" \
37 "movq %P[thread_info](%%rsi),%%r8\n\t" \
38 LOCK "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \
39 "movq %%rax,%%rdi\n\t" \
40 "jc ret_from_fork\n\t" \
41 RESTORE_CONTEXT \
42 : "=a" (last) \
43 : [next] "S" (next), [prev] "D" (prev), \
44 [threadrsp] "i" (offsetof(struct task_struct, thread.rsp)), \
45 [ti_flags] "i" (offsetof(struct thread_info, flags)),\
46 [tif_fork] "i" (TIF_FORK), \
47 [thread_info] "i" (offsetof(struct task_struct, thread_info)), \
48 [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent)) \
49 : "memory", "cc" __EXTRA_CLOBBER)
50
51extern void load_gs_index(unsigned);
52
53/*
54 * Load a segment. Fall back on loading the zero
55 * segment if something goes wrong..
56 */
57#define loadsegment(seg,value) \
58 asm volatile("\n" \
59 "1:\t" \
60 "movl %k0,%%" #seg "\n" \
61 "2:\n" \
62 ".section .fixup,\"ax\"\n" \
63 "3:\t" \
64 "movl %1,%%" #seg "\n\t" \
65 "jmp 2b\n" \
66 ".previous\n" \
67 ".section __ex_table,\"a\"\n\t" \
68 ".align 8\n\t" \
69 ".quad 1b,3b\n" \
70 ".previous" \
71 : :"r" (value), "r" (0))
72
73#define set_debug(value,register) \
74 __asm__("movq %0,%%db" #register \
75 : /* no output */ \
76 :"r" ((unsigned long) value))
77
78
79#ifdef __KERNEL__
80struct alt_instr {
81 __u8 *instr; /* original instruction */
82 __u8 *replacement;
83 __u8 cpuid; /* cpuid bit set for replacement */
84 __u8 instrlen; /* length of original instruction */
85 __u8 replacementlen; /* length of new instruction, <= instrlen */
86 __u8 pad[5];
87};
88#endif
89
90/*
91 * Alternative instructions for different CPU types or capabilities.
92 *
93 * This allows to use optimized instructions even on generic binary
94 * kernels.
95 *
96 * length of oldinstr must be longer or equal the length of newinstr
97 * It can be padded with nops as needed.
98 *
99 * For non barrier like inlines please define new variants
100 * without volatile and memory clobber.
101 */
102#define alternative(oldinstr, newinstr, feature) \
103 asm volatile ("661:\n\t" oldinstr "\n662:\n" \
104 ".section .altinstructions,\"a\"\n" \
105 " .align 8\n" \
106 " .quad 661b\n" /* label */ \
107 " .quad 663f\n" /* new instruction */ \
108 " .byte %c0\n" /* feature bit */ \
109 " .byte 662b-661b\n" /* sourcelen */ \
110 " .byte 664f-663f\n" /* replacementlen */ \
111 ".previous\n" \
112 ".section .altinstr_replacement,\"ax\"\n" \
113 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
114 ".previous" :: "i" (feature) : "memory")
115
116/*
117 * Alternative inline assembly with input.
118 *
119 * Pecularities:
120 * No memory clobber here.
121 * Argument numbers start with 1.
122 * Best is to use constraints that are fixed size (like (%1) ... "r")
123 * If you use variable sized constraints like "m" or "g" in the
124 * replacement maake sure to pad to the worst case length.
125 */
126#define alternative_input(oldinstr, newinstr, feature, input...) \
127 asm volatile ("661:\n\t" oldinstr "\n662:\n" \
128 ".section .altinstructions,\"a\"\n" \
129 " .align 8\n" \
130 " .quad 661b\n" /* label */ \
131 " .quad 663f\n" /* new instruction */ \
132 " .byte %c0\n" /* feature bit */ \
133 " .byte 662b-661b\n" /* sourcelen */ \
134 " .byte 664f-663f\n" /* replacementlen */ \
135 ".previous\n" \
136 ".section .altinstr_replacement,\"ax\"\n" \
137 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
138 ".previous" :: "i" (feature), ##input)
139
140/*
141 * Clear and set 'TS' bit respectively
142 */
143#define clts() __asm__ __volatile__ ("clts")
144
145static inline unsigned long read_cr0(void)
146{
147 unsigned long cr0;
148 asm volatile("movq %%cr0,%0" : "=r" (cr0));
149 return cr0;
150}
151
152static inline void write_cr0(unsigned long val)
153{
154 asm volatile("movq %0,%%cr0" :: "r" (val));
155}
156
157static inline unsigned long read_cr3(void)
158{
159 unsigned long cr3;
160 asm("movq %%cr3,%0" : "=r" (cr3));
161 return cr3;
162}
163
164static inline unsigned long read_cr4(void)
165{
166 unsigned long cr4;
167 asm("movq %%cr4,%0" : "=r" (cr4));
168 return cr4;
169}
170
171static inline void write_cr4(unsigned long val)
172{
173 asm volatile("movq %0,%%cr4" :: "r" (val));
174}
175
176#define stts() write_cr0(8 | read_cr0())
177
178#define wbinvd() \
179 __asm__ __volatile__ ("wbinvd": : :"memory");
180
181#endif /* __KERNEL__ */
182
183#define nop() __asm__ __volatile__ ("nop")
184
185#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
186
187#define tas(ptr) (xchg((ptr),1))
188
189#define __xg(x) ((volatile long *)(x))
190
191extern inline void set_64bit(volatile unsigned long *ptr, unsigned long val)
192{
193 *ptr = val;
194}
195
196#define _set_64bit set_64bit
197
198/*
199 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
200 * Note 2: xchg has side effect, so that attribute volatile is necessary,
201 * but generally the primitive is invalid, *ptr is output argument. --ANK
202 */
203static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
204{
205 switch (size) {
206 case 1:
207 __asm__ __volatile__("xchgb %b0,%1"
208 :"=q" (x)
209 :"m" (*__xg(ptr)), "0" (x)
210 :"memory");
211 break;
212 case 2:
213 __asm__ __volatile__("xchgw %w0,%1"
214 :"=r" (x)
215 :"m" (*__xg(ptr)), "0" (x)
216 :"memory");
217 break;
218 case 4:
219 __asm__ __volatile__("xchgl %k0,%1"
220 :"=r" (x)
221 :"m" (*__xg(ptr)), "0" (x)
222 :"memory");
223 break;
224 case 8:
225 __asm__ __volatile__("xchgq %0,%1"
226 :"=r" (x)
227 :"m" (*__xg(ptr)), "0" (x)
228 :"memory");
229 break;
230 }
231 return x;
232}
233
234/*
235 * Atomic compare and exchange. Compare OLD with MEM, if identical,
236 * store NEW in MEM. Return the initial value in MEM. Success is
237 * indicated by comparing RETURN with OLD.
238 */
239
240#define __HAVE_ARCH_CMPXCHG 1
241
242static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
243 unsigned long new, int size)
244{
245 unsigned long prev;
246 switch (size) {
247 case 1:
248 __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
249 : "=a"(prev)
250 : "q"(new), "m"(*__xg(ptr)), "0"(old)
251 : "memory");
252 return prev;
253 case 2:
254 __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
255 : "=a"(prev)
256 : "q"(new), "m"(*__xg(ptr)), "0"(old)
257 : "memory");
258 return prev;
259 case 4:
260 __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2"
261 : "=a"(prev)
262 : "q"(new), "m"(*__xg(ptr)), "0"(old)
263 : "memory");
264 return prev;
265 case 8:
266 __asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2"
267 : "=a"(prev)
268 : "q"(new), "m"(*__xg(ptr)), "0"(old)
269 : "memory");
270 return prev;
271 }
272 return old;
273}
274
275#define cmpxchg(ptr,o,n)\
276 ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
277 (unsigned long)(n),sizeof(*(ptr))))
278
279#ifdef CONFIG_SMP
280#define smp_mb() mb()
281#define smp_rmb() rmb()
282#define smp_wmb() wmb()
283#define smp_read_barrier_depends() do {} while(0)
284#else
285#define smp_mb() barrier()
286#define smp_rmb() barrier()
287#define smp_wmb() barrier()
288#define smp_read_barrier_depends() do {} while(0)
289#endif
290
291
292/*
293 * Force strict CPU ordering.
294 * And yes, this is required on UP too when we're talking
295 * to devices.
296 */
297#define mb() asm volatile("mfence":::"memory")
298#define rmb() asm volatile("lfence":::"memory")
299
300#ifdef CONFIG_UNORDERED_IO
301#define wmb() asm volatile("sfence" ::: "memory")
302#else
303#define wmb() asm volatile("" ::: "memory")
304#endif
305#define read_barrier_depends() do {} while(0)
306#define set_mb(var, value) do { xchg(&var, value); } while (0)
307#define set_wmb(var, value) do { var = value; wmb(); } while (0)
308
309#define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0)
310
311/* interrupt control.. */
312#define local_save_flags(x) do { warn_if_not_ulong(x); __asm__ __volatile__("# save_flags \n\t pushfq ; popq %q0":"=g" (x): /* no input */ :"memory"); } while (0)
313#define local_irq_restore(x) __asm__ __volatile__("# restore_flags \n\t pushq %0 ; popfq": /* no output */ :"g" (x):"memory", "cc")
314#define local_irq_disable() __asm__ __volatile__("cli": : :"memory")
315#define local_irq_enable() __asm__ __volatile__("sti": : :"memory")
316/* used in the idle loop; sti takes one instruction cycle to complete */
317#define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory")
318
319#define irqs_disabled() \
320({ \
321 unsigned long flags; \
322 local_save_flags(flags); \
323 !(flags & (1<<9)); \
324})
325
326/* For spinlocks etc */
327#define local_irq_save(x) do { warn_if_not_ulong(x); __asm__ __volatile__("# local_irq_save \n\t pushfq ; popq %0 ; cli":"=g" (x): /* no input */ :"memory"); } while (0)
328
329void cpu_idle_wait(void);
330
331/*
332 * disable hlt during certain critical i/o operations
333 */
334#define HAVE_DISABLE_HLT
335void disable_hlt(void);
336void enable_hlt(void);
337
338#define HAVE_EAT_KEY
339void eat_key(void);
340
341extern unsigned long arch_align_stack(unsigned long sp);
342
343#endif
diff --git a/include/asm-x86_64/termbits.h b/include/asm-x86_64/termbits.h
new file mode 100644
index 000000000000..bd950946e52c
--- /dev/null
+++ b/include/asm-x86_64/termbits.h
@@ -0,0 +1,173 @@
1#ifndef __ARCH_X8664_TERMBITS_H__
2#define __ARCH_X8664_TERMBITS_H__
3
4#include <linux/posix_types.h>
5
6typedef unsigned char cc_t;
7typedef unsigned int speed_t;
8typedef unsigned int tcflag_t;
9
10#define NCCS 19
11struct termios {
12 tcflag_t c_iflag; /* input mode flags */
13 tcflag_t c_oflag; /* output mode flags */
14 tcflag_t c_cflag; /* control mode flags */
15 tcflag_t c_lflag; /* local mode flags */
16 cc_t c_line; /* line discipline */
17 cc_t c_cc[NCCS]; /* control characters */
18};
19
20/* c_cc characters */
21#define VINTR 0
22#define VQUIT 1
23#define VERASE 2
24#define VKILL 3
25#define VEOF 4
26#define VTIME 5
27#define VMIN 6
28#define VSWTC 7
29#define VSTART 8
30#define VSTOP 9
31#define VSUSP 10
32#define VEOL 11
33#define VREPRINT 12
34#define VDISCARD 13
35#define VWERASE 14
36#define VLNEXT 15
37#define VEOL2 16
38
39/* c_iflag bits */
40#define IGNBRK 0000001
41#define BRKINT 0000002
42#define IGNPAR 0000004
43#define PARMRK 0000010
44#define INPCK 0000020
45#define ISTRIP 0000040
46#define INLCR 0000100
47#define IGNCR 0000200
48#define ICRNL 0000400
49#define IUCLC 0001000
50#define IXON 0002000
51#define IXANY 0004000
52#define IXOFF 0010000
53#define IMAXBEL 0020000
54#define IUTF8 0040000
55
56/* c_oflag bits */
57#define OPOST 0000001
58#define OLCUC 0000002
59#define ONLCR 0000004
60#define OCRNL 0000010
61#define ONOCR 0000020
62#define ONLRET 0000040
63#define OFILL 0000100
64#define OFDEL 0000200
65#define NLDLY 0000400
66#define NL0 0000000
67#define NL1 0000400
68#define CRDLY 0003000
69#define CR0 0000000
70#define CR1 0001000
71#define CR2 0002000
72#define CR3 0003000
73#define TABDLY 0014000
74#define TAB0 0000000
75#define TAB1 0004000
76#define TAB2 0010000
77#define TAB3 0014000
78#define XTABS 0014000
79#define BSDLY 0020000
80#define BS0 0000000
81#define BS1 0020000
82#define VTDLY 0040000
83#define VT0 0000000
84#define VT1 0040000
85#define FFDLY 0100000
86#define FF0 0000000
87#define FF1 0100000
88
89/* c_cflag bit meaning */
90#define CBAUD 0010017
91#define B0 0000000 /* hang up */
92#define B50 0000001
93#define B75 0000002
94#define B110 0000003
95#define B134 0000004
96#define B150 0000005
97#define B200 0000006
98#define B300 0000007
99#define B600 0000010
100#define B1200 0000011
101#define B1800 0000012
102#define B2400 0000013
103#define B4800 0000014
104#define B9600 0000015
105#define B19200 0000016
106#define B38400 0000017
107#define EXTA B19200
108#define EXTB B38400
109#define CSIZE 0000060
110#define CS5 0000000
111#define CS6 0000020
112#define CS7 0000040
113#define CS8 0000060
114#define CSTOPB 0000100
115#define CREAD 0000200
116#define PARENB 0000400
117#define PARODD 0001000
118#define HUPCL 0002000
119#define CLOCAL 0004000
120#define CBAUDEX 0010000
121#define B57600 0010001
122#define B115200 0010002
123#define B230400 0010003
124#define B460800 0010004
125#define B500000 0010005
126#define B576000 0010006
127#define B921600 0010007
128#define B1000000 0010010
129#define B1152000 0010011
130#define B1500000 0010012
131#define B2000000 0010013
132#define B2500000 0010014
133#define B3000000 0010015
134#define B3500000 0010016
135#define B4000000 0010017
136#define CIBAUD 002003600000 /* input baud rate (not used) */
137#define CMSPAR 010000000000 /* mark or space (stick) parity */
138#define CRTSCTS 020000000000 /* flow control */
139
140/* c_lflag bits */
141#define ISIG 0000001
142#define ICANON 0000002
143#define XCASE 0000004
144#define ECHO 0000010
145#define ECHOE 0000020
146#define ECHOK 0000040
147#define ECHONL 0000100
148#define NOFLSH 0000200
149#define TOSTOP 0000400
150#define ECHOCTL 0001000
151#define ECHOPRT 0002000
152#define ECHOKE 0004000
153#define FLUSHO 0010000
154#define PENDIN 0040000
155#define IEXTEN 0100000
156
157/* tcflow() and TCXONC use these */
158#define TCOOFF 0
159#define TCOON 1
160#define TCIOFF 2
161#define TCION 3
162
163/* tcflush() and TCFLSH use these */
164#define TCIFLUSH 0
165#define TCOFLUSH 1
166#define TCIOFLUSH 2
167
168/* tcsetattr uses these */
169#define TCSANOW 0
170#define TCSADRAIN 1
171#define TCSAFLUSH 2
172
173#endif
diff --git a/include/asm-x86_64/termios.h b/include/asm-x86_64/termios.h
new file mode 100644
index 000000000000..041a91f7ddfb
--- /dev/null
+++ b/include/asm-x86_64/termios.h
@@ -0,0 +1,106 @@
1#ifndef _X8664_TERMIOS_H
2#define _X8664_TERMIOS_H
3
4#include <asm/termbits.h>
5#include <asm/ioctls.h>
6
7struct winsize {
8 unsigned short ws_row;
9 unsigned short ws_col;
10 unsigned short ws_xpixel;
11 unsigned short ws_ypixel;
12};
13
14#define NCC 8
15struct termio {
16 unsigned short c_iflag; /* input mode flags */
17 unsigned short c_oflag; /* output mode flags */
18 unsigned short c_cflag; /* control mode flags */
19 unsigned short c_lflag; /* local mode flags */
20 unsigned char c_line; /* line discipline */
21 unsigned char c_cc[NCC]; /* control characters */
22};
23
24/* modem lines */
25#define TIOCM_LE 0x001
26#define TIOCM_DTR 0x002
27#define TIOCM_RTS 0x004
28#define TIOCM_ST 0x008
29#define TIOCM_SR 0x010
30#define TIOCM_CTS 0x020
31#define TIOCM_CAR 0x040
32#define TIOCM_RNG 0x080
33#define TIOCM_DSR 0x100
34#define TIOCM_CD TIOCM_CAR
35#define TIOCM_RI TIOCM_RNG
36#define TIOCM_OUT1 0x2000
37#define TIOCM_OUT2 0x4000
38#define TIOCM_LOOP 0x8000
39
40/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
41
42/* line disciplines */
43#define N_TTY 0
44#define N_SLIP 1
45#define N_MOUSE 2
46#define N_PPP 3
47#define N_STRIP 4
48#define N_AX25 5
49#define N_X25 6 /* X.25 async */
50#define N_6PACK 7
51#define N_MASC 8 /* Reserved for Mobitex module <kaz@cafe.net> */
52#define N_R3964 9 /* Reserved for Simatic R3964 module */
53#define N_PROFIBUS_FDL 10 /* Reserved for Profibus <Dave@mvhi.com> */
54#define N_IRDA 11 /* Linux IR - http://irda.sourceforge.net/ */
55#define N_SMSBLOCK 12 /* SMS block mode - for talking to GSM data cards about SMS messages */
56#define N_HDLC 13 /* synchronous HDLC */
57#define N_SYNC_PPP 14 /* synchronous PPP */
58#define N_HCI 15 /* Bluetooth HCI UART */
59
60#ifdef __KERNEL__
61
62/* intr=^C quit=^\ erase=del kill=^U
63 eof=^D vtime=\0 vmin=\1 sxtc=\0
64 start=^Q stop=^S susp=^Z eol=\0
65 reprint=^R discard=^U werase=^W lnext=^V
66 eol2=\0
67*/
68#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0"
69
70/*
71 * Translate a "termio" structure into a "termios". Ugh.
72 */
73#define SET_LOW_TERMIOS_BITS(termios, termio, x) { \
74 unsigned short __tmp; \
75 get_user(__tmp,&(termio)->x); \
76 *(unsigned short *) &(termios)->x = __tmp; \
77}
78
79#define user_termio_to_kernel_termios(termios, termio) \
80({ \
81 SET_LOW_TERMIOS_BITS(termios, termio, c_iflag); \
82 SET_LOW_TERMIOS_BITS(termios, termio, c_oflag); \
83 SET_LOW_TERMIOS_BITS(termios, termio, c_cflag); \
84 SET_LOW_TERMIOS_BITS(termios, termio, c_lflag); \
85 copy_from_user((termios)->c_cc, (termio)->c_cc, NCC); \
86})
87
88/*
89 * Translate a "termios" structure into a "termio". Ugh.
90 */
91#define kernel_termios_to_user_termio(termio, termios) \
92({ \
93 put_user((termios)->c_iflag, &(termio)->c_iflag); \
94 put_user((termios)->c_oflag, &(termio)->c_oflag); \
95 put_user((termios)->c_cflag, &(termio)->c_cflag); \
96 put_user((termios)->c_lflag, &(termio)->c_lflag); \
97 put_user((termios)->c_line, &(termio)->c_line); \
98 copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \
99})
100
101#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios))
102#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios))
103
104#endif /* __KERNEL__ */
105
106#endif /* _X8664_TERMIOS_H */
diff --git a/include/asm-x86_64/thread_info.h b/include/asm-x86_64/thread_info.h
new file mode 100644
index 000000000000..f4b3b249639c
--- /dev/null
+++ b/include/asm-x86_64/thread_info.h
@@ -0,0 +1,144 @@
1/* thread_info.h: x86_64 low-level thread information
2 *
3 * Copyright (C) 2002 David Howells (dhowells@redhat.com)
4 * - Incorporating suggestions made by Linus Torvalds and Dave Miller
5 */
6
7#ifndef _ASM_THREAD_INFO_H
8#define _ASM_THREAD_INFO_H
9
10#ifdef __KERNEL__
11
12#include <asm/page.h>
13#include <asm/types.h>
14#include <asm/pda.h>
15
16/*
17 * low level task data that entry.S needs immediate access to
18 * - this struct should fit entirely inside of one cache line
19 * - this struct shares the supervisor stack pages
20 */
21#ifndef __ASSEMBLY__
22struct task_struct;
23struct exec_domain;
24#include <asm/mmsegment.h>
25
26struct thread_info {
27 struct task_struct *task; /* main task structure */
28 struct exec_domain *exec_domain; /* execution domain */
29 __u32 flags; /* low level flags */
30 __u32 status; /* thread synchronous flags */
31 __u32 cpu; /* current CPU */
32 int preempt_count;
33
34 mm_segment_t addr_limit;
35 struct restart_block restart_block;
36};
37#endif
38
39/*
40 * macros/functions for gaining access to the thread information structure
41 * preempt_count needs to be 1 initially, until the scheduler is functional.
42 */
43#ifndef __ASSEMBLY__
44#define INIT_THREAD_INFO(tsk) \
45{ \
46 .task = &tsk, \
47 .exec_domain = &default_exec_domain, \
48 .flags = 0, \
49 .cpu = 0, \
50 .preempt_count = 1, \
51 .addr_limit = KERNEL_DS, \
52 .restart_block = { \
53 .fn = do_no_restart_syscall, \
54 }, \
55}
56
57#define init_thread_info (init_thread_union.thread_info)
58#define init_stack (init_thread_union.stack)
59
60static inline struct thread_info *current_thread_info(void)
61{
62 struct thread_info *ti;
63 ti = (void *)(read_pda(kernelstack) + PDA_STACKOFFSET - THREAD_SIZE);
64 return ti;
65}
66
67/* do not use in interrupt context */
68static inline struct thread_info *stack_thread_info(void)
69{
70 struct thread_info *ti;
71 __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (~(THREAD_SIZE - 1)));
72 return ti;
73}
74
75/* thread information allocation */
76#define alloc_thread_info(tsk) \
77 ((struct thread_info *) __get_free_pages(GFP_KERNEL,THREAD_ORDER))
78#define free_thread_info(ti) free_pages((unsigned long) (ti), THREAD_ORDER)
79#define get_thread_info(ti) get_task_struct((ti)->task)
80#define put_thread_info(ti) put_task_struct((ti)->task)
81
82#else /* !__ASSEMBLY__ */
83
84/* how to get the thread information struct from ASM */
85#define GET_THREAD_INFO(reg) \
86 movq %gs:pda_kernelstack,reg ; \
87 subq $(THREAD_SIZE-PDA_STACKOFFSET),reg
88
89#endif
90
91/*
92 * thread information flags
93 * - these are process state flags that various assembly files may need to access
94 * - pending work-to-be-done flags are in LSW
95 * - other flags in MSW
96 * Warning: layout of LSW is hardcoded in entry.S
97 */
98#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
99#define TIF_NOTIFY_RESUME 1 /* resumption notification requested */
100#define TIF_SIGPENDING 2 /* signal pending */
101#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
102#define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/
103#define TIF_IRET 5 /* force IRET */
104#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
105#define TIF_SECCOMP 8 /* secure computing */
106#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
107#define TIF_IA32 17 /* 32bit process */
108#define TIF_FORK 18 /* ret_from_fork */
109#define TIF_ABI_PENDING 19
110#define TIF_MEMDIE 20
111
112#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
113#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
114#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
115#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP)
116#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
117#define _TIF_IRET (1<<TIF_IRET)
118#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
119#define _TIF_SECCOMP (1<<TIF_SECCOMP)
120#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
121#define _TIF_IA32 (1<<TIF_IA32)
122#define _TIF_FORK (1<<TIF_FORK)
123#define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING)
124
125/* work to do on interrupt/exception return */
126#define _TIF_WORK_MASK \
127 (0x0000FFFF & ~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP|_TIF_SECCOMP))
128/* work to do on any return to user space */
129#define _TIF_ALLWORK_MASK (0x0000FFFF & ~_TIF_SECCOMP)
130
131#define PREEMPT_ACTIVE 0x10000000
132
133/*
134 * Thread-synchronous status.
135 *
136 * This is different from the flags in that nobody else
137 * ever touches our thread-synchronous status, so we don't
138 * have to worry about atomic accesses.
139 */
140#define TS_USEDFPU 0x0001 /* FPU was used by this task this quantum (SMP) */
141
142#endif /* __KERNEL__ */
143
144#endif /* _ASM_THREAD_INFO_H */
diff --git a/include/asm-x86_64/timex.h b/include/asm-x86_64/timex.h
new file mode 100644
index 000000000000..34f31a18f90b
--- /dev/null
+++ b/include/asm-x86_64/timex.h
@@ -0,0 +1,31 @@
1/*
2 * linux/include/asm-x86_64/timex.h
3 *
4 * x86-64 architecture timex specifications
5 */
6#ifndef _ASMx8664_TIMEX_H
7#define _ASMx8664_TIMEX_H
8
9#include <linux/config.h>
10#include <asm/8253pit.h>
11#include <asm/msr.h>
12#include <asm/vsyscall.h>
13#include <asm/hpet.h>
14
15#define CLOCK_TICK_RATE PIT_TICK_RATE /* Underlying HZ */
16
17typedef unsigned long long cycles_t;
18
19static inline cycles_t get_cycles (void)
20{
21 unsigned long long ret;
22
23 rdtscll(ret);
24 return ret;
25}
26
27extern unsigned int cpu_khz;
28
29extern struct vxtime_data vxtime;
30
31#endif
diff --git a/include/asm-x86_64/tlb.h b/include/asm-x86_64/tlb.h
new file mode 100644
index 000000000000..cd4c3c590a0e
--- /dev/null
+++ b/include/asm-x86_64/tlb.h
@@ -0,0 +1,13 @@
1#ifndef TLB_H
2#define TLB_H 1
3
4
5#define tlb_start_vma(tlb, vma) do { } while (0)
6#define tlb_end_vma(tlb, vma) do { } while (0)
7#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
8
9#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
10
11#include <asm-generic/tlb.h>
12
13#endif
diff --git a/include/asm-x86_64/tlbflush.h b/include/asm-x86_64/tlbflush.h
new file mode 100644
index 000000000000..2e811ac262af
--- /dev/null
+++ b/include/asm-x86_64/tlbflush.h
@@ -0,0 +1,119 @@
1#ifndef _X8664_TLBFLUSH_H
2#define _X8664_TLBFLUSH_H
3
4#include <linux/config.h>
5#include <linux/mm.h>
6#include <asm/processor.h>
7
8#define __flush_tlb() \
9 do { \
10 unsigned long tmpreg; \
11 \
12 __asm__ __volatile__( \
13 "movq %%cr3, %0; # flush TLB \n" \
14 "movq %0, %%cr3; \n" \
15 : "=r" (tmpreg) \
16 :: "memory"); \
17 } while (0)
18
19/*
20 * Global pages have to be flushed a bit differently. Not a real
21 * performance problem because this does not happen often.
22 */
23#define __flush_tlb_global() \
24 do { \
25 unsigned long tmpreg; \
26 \
27 __asm__ __volatile__( \
28 "movq %1, %%cr4; # turn off PGE \n" \
29 "movq %%cr3, %0; # flush TLB \n" \
30 "movq %0, %%cr3; \n" \
31 "movq %2, %%cr4; # turn PGE back on \n" \
32 : "=&r" (tmpreg) \
33 : "r" (mmu_cr4_features & ~X86_CR4_PGE), \
34 "r" (mmu_cr4_features) \
35 : "memory"); \
36 } while (0)
37
38extern unsigned long pgkern_mask;
39
40#define __flush_tlb_all() __flush_tlb_global()
41
42#define __flush_tlb_one(addr) \
43 __asm__ __volatile__("invlpg %0": :"m" (*(char *) addr))
44
45
46/*
47 * TLB flushing:
48 *
49 * - flush_tlb() flushes the current mm struct TLBs
50 * - flush_tlb_all() flushes all processes TLBs
51 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
52 * - flush_tlb_page(vma, vmaddr) flushes one page
53 * - flush_tlb_range(vma, start, end) flushes a range of pages
54 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
55 * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
56 *
57 * ..but the x86_64 has somewhat limited tlb flushing capabilities,
58 * and page-granular flushes are available only on i486 and up.
59 */
60
61#ifndef CONFIG_SMP
62
63#define flush_tlb() __flush_tlb()
64#define flush_tlb_all() __flush_tlb_all()
65#define local_flush_tlb() __flush_tlb()
66
67static inline void flush_tlb_mm(struct mm_struct *mm)
68{
69 if (mm == current->active_mm)
70 __flush_tlb();
71}
72
73static inline void flush_tlb_page(struct vm_area_struct *vma,
74 unsigned long addr)
75{
76 if (vma->vm_mm == current->active_mm)
77 __flush_tlb_one(addr);
78}
79
80static inline void flush_tlb_range(struct vm_area_struct *vma,
81 unsigned long start, unsigned long end)
82{
83 if (vma->vm_mm == current->active_mm)
84 __flush_tlb();
85}
86
87#else
88
89#include <asm/smp.h>
90
91#define local_flush_tlb() \
92 __flush_tlb()
93
94extern void flush_tlb_all(void);
95extern void flush_tlb_current_task(void);
96extern void flush_tlb_mm(struct mm_struct *);
97extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
98
99#define flush_tlb() flush_tlb_current_task()
100
101static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end)
102{
103 flush_tlb_mm(vma->vm_mm);
104}
105
106#define TLBSTATE_OK 1
107#define TLBSTATE_LAZY 2
108
109#endif
110
111#define flush_tlb_kernel_range(start, end) flush_tlb_all()
112
113static inline void flush_tlb_pgtables(struct mm_struct *mm,
114 unsigned long start, unsigned long end)
115{
116 /* x86_64 does not keep any page table caches in TLB */
117}
118
119#endif /* _X8664_TLBFLUSH_H */
diff --git a/include/asm-x86_64/topology.h b/include/asm-x86_64/topology.h
new file mode 100644
index 000000000000..67f24e0ea819
--- /dev/null
+++ b/include/asm-x86_64/topology.h
@@ -0,0 +1,68 @@
1#ifndef _ASM_X86_64_TOPOLOGY_H
2#define _ASM_X86_64_TOPOLOGY_H
3
4#include <linux/config.h>
5
6#ifdef CONFIG_DISCONTIGMEM
7
8#include <asm/mpspec.h>
9#include <asm/bitops.h>
10
11/* Map the K8 CPU local memory controllers to a simple 1:1 CPU:NODE topology */
12
13extern cpumask_t cpu_online_map;
14
15extern unsigned char cpu_to_node[];
16extern cpumask_t node_to_cpumask[];
17extern cpumask_t pci_bus_to_cpumask[];
18
19#ifdef CONFIG_ACPI_NUMA
20extern int __node_distance(int, int);
21#define node_distance(a,b) __node_distance(a,b)
22/* #else fallback version */
23#endif
24
25#define cpu_to_node(cpu) (cpu_to_node[cpu])
26#define parent_node(node) (node)
27#define node_to_first_cpu(node) (__ffs(node_to_cpumask[node]))
28#define node_to_cpumask(node) (node_to_cpumask[node])
29
30static inline cpumask_t __pcibus_to_cpumask(int bus)
31{
32 cpumask_t busmask = pci_bus_to_cpumask[bus];
33 cpumask_t online = cpu_online_map;
34 cpumask_t res;
35 cpus_and(res, busmask, online);
36 return res;
37}
38#define pcibus_to_cpumask(bus) __pcibus_to_cpumask(bus->number)
39
40#ifdef CONFIG_NUMA
41/* sched_domains SD_NODE_INIT for x86_64 machines */
42#define SD_NODE_INIT (struct sched_domain) { \
43 .span = CPU_MASK_NONE, \
44 .parent = NULL, \
45 .groups = NULL, \
46 .min_interval = 8, \
47 .max_interval = 32, \
48 .busy_factor = 32, \
49 .imbalance_pct = 125, \
50 .cache_hot_time = (10*1000000), \
51 .cache_nice_tries = 1, \
52 .per_cpu_gain = 100, \
53 .flags = SD_LOAD_BALANCE \
54 | SD_BALANCE_NEWIDLE \
55 | SD_BALANCE_EXEC \
56 | SD_WAKE_IDLE \
57 | SD_WAKE_BALANCE, \
58 .last_balance = jiffies, \
59 .balance_interval = 1, \
60 .nr_balance_failed = 0, \
61}
62#endif
63
64#endif
65
66#include <asm-generic/topology.h>
67
68#endif
diff --git a/include/asm-x86_64/types.h b/include/asm-x86_64/types.h
new file mode 100644
index 000000000000..32bd1426b523
--- /dev/null
+++ b/include/asm-x86_64/types.h
@@ -0,0 +1,60 @@
1#ifndef _X86_64_TYPES_H
2#define _X86_64_TYPES_H
3
4#ifndef __ASSEMBLY__
5
6typedef unsigned short umode_t;
7
8/*
9 * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the
10 * header files exported to user space
11 */
12
13typedef __signed__ char __s8;
14typedef unsigned char __u8;
15
16typedef __signed__ short __s16;
17typedef unsigned short __u16;
18
19typedef __signed__ int __s32;
20typedef unsigned int __u32;
21
22typedef __signed__ long long __s64;
23typedef unsigned long long __u64;
24
25#endif /* __ASSEMBLY__ */
26
27/*
28 * These aren't exported outside the kernel to avoid name space clashes
29 */
30#ifdef __KERNEL__
31
32#define BITS_PER_LONG 64
33
34#ifndef __ASSEMBLY__
35
36typedef signed char s8;
37typedef unsigned char u8;
38
39typedef signed short s16;
40typedef unsigned short u16;
41
42typedef signed int s32;
43typedef unsigned int u32;
44
45typedef signed long long s64;
46typedef unsigned long long u64;
47
48typedef u64 dma64_addr_t;
49typedef u64 dma_addr_t;
50
51typedef u64 sector_t;
52#define HAVE_SECTOR_T
53
54typedef unsigned short kmem_bufctl_t;
55
56#endif /* __ASSEMBLY__ */
57
58#endif /* __KERNEL__ */
59
60#endif
diff --git a/include/asm-x86_64/uaccess.h b/include/asm-x86_64/uaccess.h
new file mode 100644
index 000000000000..48f292752c96
--- /dev/null
+++ b/include/asm-x86_64/uaccess.h
@@ -0,0 +1,365 @@
1#ifndef __X86_64_UACCESS_H
2#define __X86_64_UACCESS_H
3
4/*
5 * User space memory access functions
6 */
7#include <linux/config.h>
8#include <linux/compiler.h>
9#include <linux/errno.h>
10#include <linux/sched.h>
11#include <linux/prefetch.h>
12#include <asm/page.h>
13
14#define VERIFY_READ 0
15#define VERIFY_WRITE 1
16
17/*
18 * The fs value determines whether argument validity checking should be
19 * performed or not. If get_fs() == USER_DS, checking is performed, with
20 * get_fs() == KERNEL_DS, checking is bypassed.
21 *
22 * For historical reasons, these macros are grossly misnamed.
23 */
24
25#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
26
27#define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFFFFFFFFFFUL)
28#define USER_DS MAKE_MM_SEG(PAGE_OFFSET)
29
30#define get_ds() (KERNEL_DS)
31#define get_fs() (current_thread_info()->addr_limit)
32#define set_fs(x) (current_thread_info()->addr_limit = (x))
33
34#define segment_eq(a,b) ((a).seg == (b).seg)
35
36#define __addr_ok(addr) (!((unsigned long)(addr) & (current_thread_info()->addr_limit.seg)))
37
38/*
39 * Uhhuh, this needs 65-bit arithmetic. We have a carry..
40 */
41#define __range_not_ok(addr,size) ({ \
42 unsigned long flag,sum; \
43 __chk_user_ptr(addr); \
44 asm("# range_ok\n\r" \
45 "addq %3,%1 ; sbbq %0,%0 ; cmpq %1,%4 ; sbbq $0,%0" \
46 :"=&r" (flag), "=r" (sum) \
47 :"1" (addr),"g" ((long)(size)),"g" (current_thread_info()->addr_limit.seg)); \
48 flag; })
49
50#define access_ok(type, addr, size) (__range_not_ok(addr,size) == 0)
51
52/* this function will go away soon - use access_ok() instead */
53extern inline int __deprecated verify_area(int type, const void __user * addr, unsigned long size)
54{
55 return access_ok(type,addr,size) ? 0 : -EFAULT;
56}
57
58
59/*
60 * The exception table consists of pairs of addresses: the first is the
61 * address of an instruction that is allowed to fault, and the second is
62 * the address at which the program should continue. No registers are
63 * modified, so it is entirely up to the continuation code to figure out
64 * what to do.
65 *
66 * All the routines below use bits of fixup code that are out of line
67 * with the main instruction path. This means when everything is well,
68 * we don't even have to jump over them. Further, they do not intrude
69 * on our cache or tlb entries.
70 */
71
72struct exception_table_entry
73{
74 unsigned long insn, fixup;
75};
76
77#define ARCH_HAS_SEARCH_EXTABLE
78
79/*
80 * These are the main single-value transfer routines. They automatically
81 * use the right size if we just have the right pointer type.
82 *
83 * This gets kind of ugly. We want to return _two_ values in "get_user()"
84 * and yet we don't want to do any pointers, because that is too much
85 * of a performance impact. Thus we have a few rather ugly macros here,
86 * and hide all the ugliness from the user.
87 *
88 * The "__xxx" versions of the user access functions are versions that
89 * do not verify the address space, that must have been done previously
90 * with a separate "access_ok()" call (this is used when we do multiple
91 * accesses to the same area of user memory).
92 */
93
94#define __get_user_x(size,ret,x,ptr) \
95 __asm__ __volatile__("call __get_user_" #size \
96 :"=a" (ret),"=d" (x) \
97 :"c" (ptr) \
98 :"r8")
99
100/* Careful: we have to cast the result to the type of the pointer for sign reasons */
101#define get_user(x,ptr) \
102({ unsigned long __val_gu; \
103 int __ret_gu; \
104 __chk_user_ptr(ptr); \
105 switch(sizeof (*(ptr))) { \
106 case 1: __get_user_x(1,__ret_gu,__val_gu,ptr); break; \
107 case 2: __get_user_x(2,__ret_gu,__val_gu,ptr); break; \
108 case 4: __get_user_x(4,__ret_gu,__val_gu,ptr); break; \
109 case 8: __get_user_x(8,__ret_gu,__val_gu,ptr); break; \
110 default: __get_user_bad(); break; \
111 } \
112 (x) = (__typeof__(*(ptr)))__val_gu; \
113 __ret_gu; \
114})
115
116extern void __put_user_1(void);
117extern void __put_user_2(void);
118extern void __put_user_4(void);
119extern void __put_user_8(void);
120extern void __put_user_bad(void);
121
122#define __put_user_x(size,ret,x,ptr) \
123 __asm__ __volatile__("call __put_user_" #size \
124 :"=a" (ret) \
125 :"c" (ptr),"d" (x) \
126 :"r8")
127
128#define put_user(x,ptr) \
129 __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
130
131#define __get_user(x,ptr) \
132 __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
133#define __put_user(x,ptr) \
134 __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
135
136#define __get_user_unaligned __get_user
137#define __put_user_unaligned __put_user
138
139#define __put_user_nocheck(x,ptr,size) \
140({ \
141 int __pu_err; \
142 __put_user_size((x),(ptr),(size),__pu_err); \
143 __pu_err; \
144})
145
146
147#define __put_user_check(x,ptr,size) \
148({ \
149 int __pu_err; \
150 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
151 switch (size) { \
152 case 1: __put_user_x(1,__pu_err,x,__pu_addr); break; \
153 case 2: __put_user_x(2,__pu_err,x,__pu_addr); break; \
154 case 4: __put_user_x(4,__pu_err,x,__pu_addr); break; \
155 case 8: __put_user_x(8,__pu_err,x,__pu_addr); break; \
156 default: __put_user_bad(); \
157 } \
158 __pu_err; \
159})
160
161#define __put_user_size(x,ptr,size,retval) \
162do { \
163 retval = 0; \
164 __chk_user_ptr(ptr); \
165 switch (size) { \
166 case 1: __put_user_asm(x,ptr,retval,"b","b","iq",-EFAULT); break;\
167 case 2: __put_user_asm(x,ptr,retval,"w","w","ir",-EFAULT); break;\
168 case 4: __put_user_asm(x,ptr,retval,"l","k","ir",-EFAULT); break;\
169 case 8: __put_user_asm(x,ptr,retval,"q","","ir",-EFAULT); break;\
170 default: __put_user_bad(); \
171 } \
172} while (0)
173
174/* FIXME: this hack is definitely wrong -AK */
175struct __large_struct { unsigned long buf[100]; };
176#define __m(x) (*(struct __large_struct __user *)(x))
177
178/*
179 * Tell gcc we read from memory instead of writing: this is because
180 * we do not write to any memory gcc knows about, so there are no
181 * aliasing issues.
182 */
183#define __put_user_asm(x, addr, err, itype, rtype, ltype, errno) \
184 __asm__ __volatile__( \
185 "1: mov"itype" %"rtype"1,%2\n" \
186 "2:\n" \
187 ".section .fixup,\"ax\"\n" \
188 "3: mov %3,%0\n" \
189 " jmp 2b\n" \
190 ".previous\n" \
191 ".section __ex_table,\"a\"\n" \
192 " .align 8\n" \
193 " .quad 1b,3b\n" \
194 ".previous" \
195 : "=r"(err) \
196 : ltype (x), "m"(__m(addr)), "i"(errno), "0"(err))
197
198
199#define __get_user_nocheck(x,ptr,size) \
200({ \
201 int __gu_err; \
202 unsigned long __gu_val; \
203 __get_user_size(__gu_val,(ptr),(size),__gu_err); \
204 (x) = (__typeof__(*(ptr)))__gu_val; \
205 __gu_err; \
206})
207
208extern int __get_user_1(void);
209extern int __get_user_2(void);
210extern int __get_user_4(void);
211extern int __get_user_8(void);
212extern int __get_user_bad(void);
213
214#define __get_user_size(x,ptr,size,retval) \
215do { \
216 retval = 0; \
217 __chk_user_ptr(ptr); \
218 switch (size) { \
219 case 1: __get_user_asm(x,ptr,retval,"b","b","=q",-EFAULT); break;\
220 case 2: __get_user_asm(x,ptr,retval,"w","w","=r",-EFAULT); break;\
221 case 4: __get_user_asm(x,ptr,retval,"l","k","=r",-EFAULT); break;\
222 case 8: __get_user_asm(x,ptr,retval,"q","","=r",-EFAULT); break;\
223 default: (x) = __get_user_bad(); \
224 } \
225} while (0)
226
227#define __get_user_asm(x, addr, err, itype, rtype, ltype, errno) \
228 __asm__ __volatile__( \
229 "1: mov"itype" %2,%"rtype"1\n" \
230 "2:\n" \
231 ".section .fixup,\"ax\"\n" \
232 "3: mov %3,%0\n" \
233 " xor"itype" %"rtype"1,%"rtype"1\n" \
234 " jmp 2b\n" \
235 ".previous\n" \
236 ".section __ex_table,\"a\"\n" \
237 " .align 8\n" \
238 " .quad 1b,3b\n" \
239 ".previous" \
240 : "=r"(err), ltype (x) \
241 : "m"(__m(addr)), "i"(errno), "0"(err))
242
243/*
244 * Copy To/From Userspace
245 */
246
247/* Handles exceptions in both to and from, but doesn't do access_ok */
248extern unsigned long copy_user_generic(void *to, const void *from, unsigned len);
249
250extern unsigned long copy_to_user(void __user *to, const void *from, unsigned len);
251extern unsigned long copy_from_user(void *to, const void __user *from, unsigned len);
252extern unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len);
253
254static inline int __copy_from_user(void *dst, const void __user *src, unsigned size)
255{
256 int ret = 0;
257 if (!__builtin_constant_p(size))
258 return copy_user_generic(dst,(__force void *)src,size);
259 switch (size) {
260 case 1:__get_user_asm(*(u8*)dst,(u8 __user *)src,ret,"b","b","=q",1);
261 return ret;
262 case 2:__get_user_asm(*(u16*)dst,(u16 __user *)src,ret,"w","w","=r",2);
263 return ret;
264 case 4:__get_user_asm(*(u32*)dst,(u32 __user *)src,ret,"l","k","=r",4);
265 return ret;
266 case 8:__get_user_asm(*(u64*)dst,(u64 __user *)src,ret,"q","","=r",8);
267 return ret;
268 case 10:
269 __get_user_asm(*(u64*)dst,(u64 __user *)src,ret,"q","","=r",16);
270 if (unlikely(ret)) return ret;
271 __get_user_asm(*(u16*)(8+(char*)dst),(u16 __user *)(8+(char __user *)src),ret,"w","w","=r",2);
272 return ret;
273 case 16:
274 __get_user_asm(*(u64*)dst,(u64 __user *)src,ret,"q","","=r",16);
275 if (unlikely(ret)) return ret;
276 __get_user_asm(*(u64*)(8+(char*)dst),(u64 __user *)(8+(char __user *)src),ret,"q","","=r",8);
277 return ret;
278 default:
279 return copy_user_generic(dst,(__force void *)src,size);
280 }
281}
282
283static inline int __copy_to_user(void __user *dst, const void *src, unsigned size)
284{
285 int ret = 0;
286 if (!__builtin_constant_p(size))
287 return copy_user_generic((__force void *)dst,src,size);
288 switch (size) {
289 case 1:__put_user_asm(*(u8*)src,(u8 __user *)dst,ret,"b","b","iq",1);
290 return ret;
291 case 2:__put_user_asm(*(u16*)src,(u16 __user *)dst,ret,"w","w","ir",2);
292 return ret;
293 case 4:__put_user_asm(*(u32*)src,(u32 __user *)dst,ret,"l","k","ir",4);
294 return ret;
295 case 8:__put_user_asm(*(u64*)src,(u64 __user *)dst,ret,"q","","ir",8);
296 return ret;
297 case 10:
298 __put_user_asm(*(u64*)src,(u64 __user *)dst,ret,"q","","ir",10);
299 if (unlikely(ret)) return ret;
300 asm("":::"memory");
301 __put_user_asm(4[(u16*)src],4+(u16 __user *)dst,ret,"w","w","ir",2);
302 return ret;
303 case 16:
304 __put_user_asm(*(u64*)src,(u64 __user *)dst,ret,"q","","ir",16);
305 if (unlikely(ret)) return ret;
306 asm("":::"memory");
307 __put_user_asm(1[(u64*)src],1+(u64 __user *)dst,ret,"q","","ir",8);
308 return ret;
309 default:
310 return copy_user_generic((__force void *)dst,src,size);
311 }
312}
313
314
315static inline int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
316{
317 int ret = 0;
318 if (!__builtin_constant_p(size))
319 return copy_user_generic((__force void *)dst,(__force void *)src,size);
320 switch (size) {
321 case 1: {
322 u8 tmp;
323 __get_user_asm(tmp,(u8 __user *)src,ret,"b","b","=q",1);
324 if (likely(!ret))
325 __put_user_asm(tmp,(u8 __user *)dst,ret,"b","b","iq",1);
326 return ret;
327 }
328 case 2: {
329 u16 tmp;
330 __get_user_asm(tmp,(u16 __user *)src,ret,"w","w","=r",2);
331 if (likely(!ret))
332 __put_user_asm(tmp,(u16 __user *)dst,ret,"w","w","ir",2);
333 return ret;
334 }
335
336 case 4: {
337 u32 tmp;
338 __get_user_asm(tmp,(u32 __user *)src,ret,"l","k","=r",4);
339 if (likely(!ret))
340 __put_user_asm(tmp,(u32 __user *)dst,ret,"l","k","ir",4);
341 return ret;
342 }
343 case 8: {
344 u64 tmp;
345 __get_user_asm(tmp,(u64 __user *)src,ret,"q","","=r",8);
346 if (likely(!ret))
347 __put_user_asm(tmp,(u64 __user *)dst,ret,"q","","ir",8);
348 return ret;
349 }
350 default:
351 return copy_user_generic((__force void *)dst,(__force void *)src,size);
352 }
353}
354
355long strncpy_from_user(char *dst, const char __user *src, long count);
356long __strncpy_from_user(char *dst, const char __user *src, long count);
357long strnlen_user(const char __user *str, long n);
358long strlen_user(const char __user *str);
359unsigned long clear_user(void __user *mem, unsigned long len);
360unsigned long __clear_user(void __user *mem, unsigned long len);
361
362#define __copy_to_user_inatomic __copy_to_user
363#define __copy_from_user_inatomic __copy_from_user
364
365#endif /* __X86_64_UACCESS_H */
diff --git a/include/asm-x86_64/ucontext.h b/include/asm-x86_64/ucontext.h
new file mode 100644
index 000000000000..159a3da9e112
--- /dev/null
+++ b/include/asm-x86_64/ucontext.h
@@ -0,0 +1,12 @@
1#ifndef _ASMX8664_UCONTEXT_H
2#define _ASMX8664_UCONTEXT_H
3
4struct ucontext {
5 unsigned long uc_flags;
6 struct ucontext *uc_link;
7 stack_t uc_stack;
8 struct sigcontext uc_mcontext;
9 sigset_t uc_sigmask; /* mask last for extensibility */
10};
11
12#endif
diff --git a/include/asm-x86_64/unaligned.h b/include/asm-x86_64/unaligned.h
new file mode 100644
index 000000000000..d4bf78dc6f39
--- /dev/null
+++ b/include/asm-x86_64/unaligned.h
@@ -0,0 +1,37 @@
1#ifndef __X8664_UNALIGNED_H
2#define __X8664_UNALIGNED_H
3
4/*
5 * The x86-64 can do unaligned accesses itself.
6 *
7 * The strange macros are there to make sure these can't
8 * be misused in a way that makes them not work on other
9 * architectures where unaligned accesses aren't as simple.
10 */
11
12/**
13 * get_unaligned - get value from possibly mis-aligned location
14 * @ptr: pointer to value
15 *
16 * This macro should be used for accessing values larger in size than
17 * single bytes at locations that are expected to be improperly aligned,
18 * e.g. retrieving a u16 value from a location not u16-aligned.
19 *
20 * Note that unaligned accesses can be very expensive on some architectures.
21 */
22#define get_unaligned(ptr) (*(ptr))
23
24/**
25 * put_unaligned - put value to a possibly mis-aligned location
26 * @val: value to place
27 * @ptr: pointer to location
28 *
29 * This macro should be used for placing values larger in size than
30 * single bytes at locations that are expected to be improperly aligned,
31 * e.g. writing a u16 value to a location not u16-aligned.
32 *
33 * Note that unaligned accesses can be very expensive on some architectures.
34 */
35#define put_unaligned(val, ptr) ((void)( *(ptr) = (val) ))
36
37#endif
diff --git a/include/asm-x86_64/unistd.h b/include/asm-x86_64/unistd.h
new file mode 100644
index 000000000000..a8ea0164f3ab
--- /dev/null
+++ b/include/asm-x86_64/unistd.h
@@ -0,0 +1,797 @@
1#ifndef _ASM_X86_64_UNISTD_H_
2#define _ASM_X86_64_UNISTD_H_
3
4#ifndef __SYSCALL
5#define __SYSCALL(a,b)
6#endif
7
8/*
9 * This file contains the system call numbers.
10 *
11 * Note: holes are not allowed.
12 */
13
14/* at least 8 syscall per cacheline */
15#define __NR_read 0
16__SYSCALL(__NR_read, sys_read)
17#define __NR_write 1
18__SYSCALL(__NR_write, sys_write)
19#define __NR_open 2
20__SYSCALL(__NR_open, sys_open)
21#define __NR_close 3
22__SYSCALL(__NR_close, sys_close)
23#define __NR_stat 4
24__SYSCALL(__NR_stat, sys_newstat)
25#define __NR_fstat 5
26__SYSCALL(__NR_fstat, sys_newfstat)
27#define __NR_lstat 6
28__SYSCALL(__NR_lstat, sys_newlstat)
29#define __NR_poll 7
30__SYSCALL(__NR_poll, sys_poll)
31
32#define __NR_lseek 8
33__SYSCALL(__NR_lseek, sys_lseek)
34#define __NR_mmap 9
35__SYSCALL(__NR_mmap, sys_mmap)
36#define __NR_mprotect 10
37__SYSCALL(__NR_mprotect, sys_mprotect)
38#define __NR_munmap 11
39__SYSCALL(__NR_munmap, sys_munmap)
40#define __NR_brk 12
41__SYSCALL(__NR_brk, sys_brk)
42#define __NR_rt_sigaction 13
43__SYSCALL(__NR_rt_sigaction, sys_rt_sigaction)
44#define __NR_rt_sigprocmask 14
45__SYSCALL(__NR_rt_sigprocmask, sys_rt_sigprocmask)
46#define __NR_rt_sigreturn 15
47__SYSCALL(__NR_rt_sigreturn, stub_rt_sigreturn)
48
49#define __NR_ioctl 16
50__SYSCALL(__NR_ioctl, sys_ioctl)
51#define __NR_pread64 17
52__SYSCALL(__NR_pread64, sys_pread64)
53#define __NR_pwrite64 18
54__SYSCALL(__NR_pwrite64, sys_pwrite64)
55#define __NR_readv 19
56__SYSCALL(__NR_readv, sys_readv)
57#define __NR_writev 20
58__SYSCALL(__NR_writev, sys_writev)
59#define __NR_access 21
60__SYSCALL(__NR_access, sys_access)
61#define __NR_pipe 22
62__SYSCALL(__NR_pipe, sys_pipe)
63#define __NR_select 23
64__SYSCALL(__NR_select, sys_select)
65
66#define __NR_sched_yield 24
67__SYSCALL(__NR_sched_yield, sys_sched_yield)
68#define __NR_mremap 25
69__SYSCALL(__NR_mremap, sys_mremap)
70#define __NR_msync 26
71__SYSCALL(__NR_msync, sys_msync)
72#define __NR_mincore 27
73__SYSCALL(__NR_mincore, sys_mincore)
74#define __NR_madvise 28
75__SYSCALL(__NR_madvise, sys_madvise)
76#define __NR_shmget 29
77__SYSCALL(__NR_shmget, sys_shmget)
78#define __NR_shmat 30
79__SYSCALL(__NR_shmat, wrap_sys_shmat)
80#define __NR_shmctl 31
81__SYSCALL(__NR_shmctl, sys_shmctl)
82
83#define __NR_dup 32
84__SYSCALL(__NR_dup, sys_dup)
85#define __NR_dup2 33
86__SYSCALL(__NR_dup2, sys_dup2)
87#define __NR_pause 34
88__SYSCALL(__NR_pause, sys_pause)
89#define __NR_nanosleep 35
90__SYSCALL(__NR_nanosleep, sys_nanosleep)
91#define __NR_getitimer 36
92__SYSCALL(__NR_getitimer, sys_getitimer)
93#define __NR_alarm 37
94__SYSCALL(__NR_alarm, sys_alarm)
95#define __NR_setitimer 38
96__SYSCALL(__NR_setitimer, sys_setitimer)
97#define __NR_getpid 39
98__SYSCALL(__NR_getpid, sys_getpid)
99
100#define __NR_sendfile 40
101__SYSCALL(__NR_sendfile, sys_sendfile64)
102#define __NR_socket 41
103__SYSCALL(__NR_socket, sys_socket)
104#define __NR_connect 42
105__SYSCALL(__NR_connect, sys_connect)
106#define __NR_accept 43
107__SYSCALL(__NR_accept, sys_accept)
108#define __NR_sendto 44
109__SYSCALL(__NR_sendto, sys_sendto)
110#define __NR_recvfrom 45
111__SYSCALL(__NR_recvfrom, sys_recvfrom)
112#define __NR_sendmsg 46
113__SYSCALL(__NR_sendmsg, sys_sendmsg)
114#define __NR_recvmsg 47
115__SYSCALL(__NR_recvmsg, sys_recvmsg)
116
117#define __NR_shutdown 48
118__SYSCALL(__NR_shutdown, sys_shutdown)
119#define __NR_bind 49
120__SYSCALL(__NR_bind, sys_bind)
121#define __NR_listen 50
122__SYSCALL(__NR_listen, sys_listen)
123#define __NR_getsockname 51
124__SYSCALL(__NR_getsockname, sys_getsockname)
125#define __NR_getpeername 52
126__SYSCALL(__NR_getpeername, sys_getpeername)
127#define __NR_socketpair 53
128__SYSCALL(__NR_socketpair, sys_socketpair)
129#define __NR_setsockopt 54
130__SYSCALL(__NR_setsockopt, sys_setsockopt)
131#define __NR_getsockopt 55
132__SYSCALL(__NR_getsockopt, sys_getsockopt)
133
134#define __NR_clone 56
135__SYSCALL(__NR_clone, stub_clone)
136#define __NR_fork 57
137__SYSCALL(__NR_fork, stub_fork)
138#define __NR_vfork 58
139__SYSCALL(__NR_vfork, stub_vfork)
140#define __NR_execve 59
141__SYSCALL(__NR_execve, stub_execve)
142#define __NR_exit 60
143__SYSCALL(__NR_exit, sys_exit)
144#define __NR_wait4 61
145__SYSCALL(__NR_wait4, sys_wait4)
146#define __NR_kill 62
147__SYSCALL(__NR_kill, sys_kill)
148#define __NR_uname 63
149__SYSCALL(__NR_uname, sys_uname)
150
151#define __NR_semget 64
152__SYSCALL(__NR_semget, sys_semget)
153#define __NR_semop 65
154__SYSCALL(__NR_semop, sys_semop)
155#define __NR_semctl 66
156__SYSCALL(__NR_semctl, sys_semctl)
157#define __NR_shmdt 67
158__SYSCALL(__NR_shmdt, sys_shmdt)
159#define __NR_msgget 68
160__SYSCALL(__NR_msgget, sys_msgget)
161#define __NR_msgsnd 69
162__SYSCALL(__NR_msgsnd, sys_msgsnd)
163#define __NR_msgrcv 70
164__SYSCALL(__NR_msgrcv, sys_msgrcv)
165#define __NR_msgctl 71
166__SYSCALL(__NR_msgctl, sys_msgctl)
167
168#define __NR_fcntl 72
169__SYSCALL(__NR_fcntl, sys_fcntl)
170#define __NR_flock 73
171__SYSCALL(__NR_flock, sys_flock)
172#define __NR_fsync 74
173__SYSCALL(__NR_fsync, sys_fsync)
174#define __NR_fdatasync 75
175__SYSCALL(__NR_fdatasync, sys_fdatasync)
176#define __NR_truncate 76
177__SYSCALL(__NR_truncate, sys_truncate)
178#define __NR_ftruncate 77
179__SYSCALL(__NR_ftruncate, sys_ftruncate)
180#define __NR_getdents 78
181__SYSCALL(__NR_getdents, sys_getdents)
182#define __NR_getcwd 79
183__SYSCALL(__NR_getcwd, sys_getcwd)
184
185#define __NR_chdir 80
186__SYSCALL(__NR_chdir, sys_chdir)
187#define __NR_fchdir 81
188__SYSCALL(__NR_fchdir, sys_fchdir)
189#define __NR_rename 82
190__SYSCALL(__NR_rename, sys_rename)
191#define __NR_mkdir 83
192__SYSCALL(__NR_mkdir, sys_mkdir)
193#define __NR_rmdir 84
194__SYSCALL(__NR_rmdir, sys_rmdir)
195#define __NR_creat 85
196__SYSCALL(__NR_creat, sys_creat)
197#define __NR_link 86
198__SYSCALL(__NR_link, sys_link)
199#define __NR_unlink 87
200__SYSCALL(__NR_unlink, sys_unlink)
201
202#define __NR_symlink 88
203__SYSCALL(__NR_symlink, sys_symlink)
204#define __NR_readlink 89
205__SYSCALL(__NR_readlink, sys_readlink)
206#define __NR_chmod 90
207__SYSCALL(__NR_chmod, sys_chmod)
208#define __NR_fchmod 91
209__SYSCALL(__NR_fchmod, sys_fchmod)
210#define __NR_chown 92
211__SYSCALL(__NR_chown, sys_chown)
212#define __NR_fchown 93
213__SYSCALL(__NR_fchown, sys_fchown)
214#define __NR_lchown 94
215__SYSCALL(__NR_lchown, sys_lchown)
216#define __NR_umask 95
217__SYSCALL(__NR_umask, sys_umask)
218
219#define __NR_gettimeofday 96
220__SYSCALL(__NR_gettimeofday, sys_gettimeofday)
221#define __NR_getrlimit 97
222__SYSCALL(__NR_getrlimit, sys_getrlimit)
223#define __NR_getrusage 98
224__SYSCALL(__NR_getrusage, sys_getrusage)
225#define __NR_sysinfo 99
226__SYSCALL(__NR_sysinfo, sys_sysinfo)
227#define __NR_times 100
228__SYSCALL(__NR_times, sys_times)
229#define __NR_ptrace 101
230__SYSCALL(__NR_ptrace, sys_ptrace)
231#define __NR_getuid 102
232__SYSCALL(__NR_getuid, sys_getuid)
233#define __NR_syslog 103
234__SYSCALL(__NR_syslog, sys_syslog)
235
236/* at the very end the stuff that never runs during the benchmarks */
237#define __NR_getgid 104
238__SYSCALL(__NR_getgid, sys_getgid)
239#define __NR_setuid 105
240__SYSCALL(__NR_setuid, sys_setuid)
241#define __NR_setgid 106
242__SYSCALL(__NR_setgid, sys_setgid)
243#define __NR_geteuid 107
244__SYSCALL(__NR_geteuid, sys_geteuid)
245#define __NR_getegid 108
246__SYSCALL(__NR_getegid, sys_getegid)
247#define __NR_setpgid 109
248__SYSCALL(__NR_setpgid, sys_setpgid)
249#define __NR_getppid 110
250__SYSCALL(__NR_getppid, sys_getppid)
251#define __NR_getpgrp 111
252__SYSCALL(__NR_getpgrp, sys_getpgrp)
253
254#define __NR_setsid 112
255__SYSCALL(__NR_setsid, sys_setsid)
256#define __NR_setreuid 113
257__SYSCALL(__NR_setreuid, sys_setreuid)
258#define __NR_setregid 114
259__SYSCALL(__NR_setregid, sys_setregid)
260#define __NR_getgroups 115
261__SYSCALL(__NR_getgroups, sys_getgroups)
262#define __NR_setgroups 116
263__SYSCALL(__NR_setgroups, sys_setgroups)
264#define __NR_setresuid 117
265__SYSCALL(__NR_setresuid, sys_setresuid)
266#define __NR_getresuid 118
267__SYSCALL(__NR_getresuid, sys_getresuid)
268#define __NR_setresgid 119
269__SYSCALL(__NR_setresgid, sys_setresgid)
270
271#define __NR_getresgid 120
272__SYSCALL(__NR_getresgid, sys_getresgid)
273#define __NR_getpgid 121
274__SYSCALL(__NR_getpgid, sys_getpgid)
275#define __NR_setfsuid 122
276__SYSCALL(__NR_setfsuid, sys_setfsuid)
277#define __NR_setfsgid 123
278__SYSCALL(__NR_setfsgid, sys_setfsgid)
279#define __NR_getsid 124
280__SYSCALL(__NR_getsid, sys_getsid)
281#define __NR_capget 125
282__SYSCALL(__NR_capget, sys_capget)
283#define __NR_capset 126
284__SYSCALL(__NR_capset, sys_capset)
285
286#define __NR_rt_sigpending 127
287__SYSCALL(__NR_rt_sigpending, sys_rt_sigpending)
288#define __NR_rt_sigtimedwait 128
289__SYSCALL(__NR_rt_sigtimedwait, sys_rt_sigtimedwait)
290#define __NR_rt_sigqueueinfo 129
291__SYSCALL(__NR_rt_sigqueueinfo, sys_rt_sigqueueinfo)
292#define __NR_rt_sigsuspend 130
293__SYSCALL(__NR_rt_sigsuspend, stub_rt_sigsuspend)
294#define __NR_sigaltstack 131
295__SYSCALL(__NR_sigaltstack, stub_sigaltstack)
296#define __NR_utime 132
297__SYSCALL(__NR_utime, sys_utime)
298#define __NR_mknod 133
299__SYSCALL(__NR_mknod, sys_mknod)
300
301/* Only needed for a.out */
302#define __NR_uselib 134
303__SYSCALL(__NR_uselib, sys_ni_syscall)
304#define __NR_personality 135
305__SYSCALL(__NR_personality, sys_personality)
306
307#define __NR_ustat 136
308__SYSCALL(__NR_ustat, sys_ustat)
309#define __NR_statfs 137
310__SYSCALL(__NR_statfs, sys_statfs)
311#define __NR_fstatfs 138
312__SYSCALL(__NR_fstatfs, sys_fstatfs)
313#define __NR_sysfs 139
314__SYSCALL(__NR_sysfs, sys_sysfs)
315
316#define __NR_getpriority 140
317__SYSCALL(__NR_getpriority, sys_getpriority)
318#define __NR_setpriority 141
319__SYSCALL(__NR_setpriority, sys_setpriority)
320#define __NR_sched_setparam 142
321__SYSCALL(__NR_sched_setparam, sys_sched_setparam)
322#define __NR_sched_getparam 143
323__SYSCALL(__NR_sched_getparam, sys_sched_getparam)
324#define __NR_sched_setscheduler 144
325__SYSCALL(__NR_sched_setscheduler, sys_sched_setscheduler)
326#define __NR_sched_getscheduler 145
327__SYSCALL(__NR_sched_getscheduler, sys_sched_getscheduler)
328#define __NR_sched_get_priority_max 146
329__SYSCALL(__NR_sched_get_priority_max, sys_sched_get_priority_max)
330#define __NR_sched_get_priority_min 147
331__SYSCALL(__NR_sched_get_priority_min, sys_sched_get_priority_min)
332#define __NR_sched_rr_get_interval 148
333__SYSCALL(__NR_sched_rr_get_interval, sys_sched_rr_get_interval)
334
335#define __NR_mlock 149
336__SYSCALL(__NR_mlock, sys_mlock)
337#define __NR_munlock 150
338__SYSCALL(__NR_munlock, sys_munlock)
339#define __NR_mlockall 151
340__SYSCALL(__NR_mlockall, sys_mlockall)
341#define __NR_munlockall 152
342__SYSCALL(__NR_munlockall, sys_munlockall)
343
344#define __NR_vhangup 153
345__SYSCALL(__NR_vhangup, sys_vhangup)
346
347#define __NR_modify_ldt 154
348__SYSCALL(__NR_modify_ldt, sys_modify_ldt)
349
350#define __NR_pivot_root 155
351__SYSCALL(__NR_pivot_root, sys_pivot_root)
352
353#define __NR__sysctl 156
354__SYSCALL(__NR__sysctl, sys_sysctl)
355
356#define __NR_prctl 157
357__SYSCALL(__NR_prctl, sys_prctl)
358#define __NR_arch_prctl 158
359__SYSCALL(__NR_arch_prctl, sys_arch_prctl)
360
361#define __NR_adjtimex 159
362__SYSCALL(__NR_adjtimex, sys_adjtimex)
363
364#define __NR_setrlimit 160
365__SYSCALL(__NR_setrlimit, sys_setrlimit)
366
367#define __NR_chroot 161
368__SYSCALL(__NR_chroot, sys_chroot)
369
370#define __NR_sync 162
371__SYSCALL(__NR_sync, sys_sync)
372
373#define __NR_acct 163
374__SYSCALL(__NR_acct, sys_acct)
375
376#define __NR_settimeofday 164
377__SYSCALL(__NR_settimeofday, sys_settimeofday)
378
379#define __NR_mount 165
380__SYSCALL(__NR_mount, sys_mount)
381#define __NR_umount2 166
382__SYSCALL(__NR_umount2, sys_umount)
383
384#define __NR_swapon 167
385__SYSCALL(__NR_swapon, sys_swapon)
386#define __NR_swapoff 168
387__SYSCALL(__NR_swapoff, sys_swapoff)
388
389#define __NR_reboot 169
390__SYSCALL(__NR_reboot, sys_reboot)
391
392#define __NR_sethostname 170
393__SYSCALL(__NR_sethostname, sys_sethostname)
394#define __NR_setdomainname 171
395__SYSCALL(__NR_setdomainname, sys_setdomainname)
396
397#define __NR_iopl 172
398__SYSCALL(__NR_iopl, stub_iopl)
399#define __NR_ioperm 173
400__SYSCALL(__NR_ioperm, sys_ioperm)
401
402#define __NR_create_module 174
403__SYSCALL(__NR_create_module, sys_ni_syscall)
404#define __NR_init_module 175
405__SYSCALL(__NR_init_module, sys_init_module)
406#define __NR_delete_module 176
407__SYSCALL(__NR_delete_module, sys_delete_module)
408#define __NR_get_kernel_syms 177
409__SYSCALL(__NR_get_kernel_syms, sys_ni_syscall)
410#define __NR_query_module 178
411__SYSCALL(__NR_query_module, sys_ni_syscall)
412
413#define __NR_quotactl 179
414__SYSCALL(__NR_quotactl, sys_quotactl)
415
416#define __NR_nfsservctl 180
417__SYSCALL(__NR_nfsservctl, sys_nfsservctl)
418
419#define __NR_getpmsg 181 /* reserved for LiS/STREAMS */
420__SYSCALL(__NR_getpmsg, sys_ni_syscall)
421#define __NR_putpmsg 182 /* reserved for LiS/STREAMS */
422__SYSCALL(__NR_putpmsg, sys_ni_syscall)
423
424#define __NR_afs_syscall 183 /* reserved for AFS */
425__SYSCALL(__NR_afs_syscall, sys_ni_syscall)
426
427#define __NR_tuxcall 184 /* reserved for tux */
428__SYSCALL(__NR_tuxcall, sys_ni_syscall)
429
430#define __NR_security 185
431__SYSCALL(__NR_security, sys_ni_syscall)
432
433#define __NR_gettid 186
434__SYSCALL(__NR_gettid, sys_gettid)
435
436#define __NR_readahead 187
437__SYSCALL(__NR_readahead, sys_readahead)
438#define __NR_setxattr 188
439__SYSCALL(__NR_setxattr, sys_setxattr)
440#define __NR_lsetxattr 189
441__SYSCALL(__NR_lsetxattr, sys_lsetxattr)
442#define __NR_fsetxattr 190
443__SYSCALL(__NR_fsetxattr, sys_fsetxattr)
444#define __NR_getxattr 191
445__SYSCALL(__NR_getxattr, sys_getxattr)
446#define __NR_lgetxattr 192
447__SYSCALL(__NR_lgetxattr, sys_lgetxattr)
448#define __NR_fgetxattr 193
449__SYSCALL(__NR_fgetxattr, sys_fgetxattr)
450#define __NR_listxattr 194
451__SYSCALL(__NR_listxattr, sys_listxattr)
452#define __NR_llistxattr 195
453__SYSCALL(__NR_llistxattr, sys_llistxattr)
454#define __NR_flistxattr 196
455__SYSCALL(__NR_flistxattr, sys_flistxattr)
456#define __NR_removexattr 197
457__SYSCALL(__NR_removexattr, sys_removexattr)
458#define __NR_lremovexattr 198
459__SYSCALL(__NR_lremovexattr, sys_lremovexattr)
460#define __NR_fremovexattr 199
461__SYSCALL(__NR_fremovexattr, sys_fremovexattr)
462#define __NR_tkill 200
463__SYSCALL(__NR_tkill, sys_tkill)
464#define __NR_time 201
465__SYSCALL(__NR_time, sys_time64)
466#define __NR_futex 202
467__SYSCALL(__NR_futex, sys_futex)
468#define __NR_sched_setaffinity 203
469__SYSCALL(__NR_sched_setaffinity, sys_sched_setaffinity)
470#define __NR_sched_getaffinity 204
471__SYSCALL(__NR_sched_getaffinity, sys_sched_getaffinity)
472#define __NR_set_thread_area 205
473__SYSCALL(__NR_set_thread_area, sys_ni_syscall) /* use arch_prctl */
474#define __NR_io_setup 206
475__SYSCALL(__NR_io_setup, sys_io_setup)
476#define __NR_io_destroy 207
477__SYSCALL(__NR_io_destroy, sys_io_destroy)
478#define __NR_io_getevents 208
479__SYSCALL(__NR_io_getevents, sys_io_getevents)
480#define __NR_io_submit 209
481__SYSCALL(__NR_io_submit, sys_io_submit)
482#define __NR_io_cancel 210
483__SYSCALL(__NR_io_cancel, sys_io_cancel)
484#define __NR_get_thread_area 211
485__SYSCALL(__NR_get_thread_area, sys_ni_syscall) /* use arch_prctl */
486#define __NR_lookup_dcookie 212
487__SYSCALL(__NR_lookup_dcookie, sys_lookup_dcookie)
488#define __NR_epoll_create 213
489__SYSCALL(__NR_epoll_create, sys_epoll_create)
490#define __NR_epoll_ctl_old 214
491__SYSCALL(__NR_epoll_ctl_old, sys_ni_syscall)
492#define __NR_epoll_wait_old 215
493__SYSCALL(__NR_epoll_wait_old, sys_ni_syscall)
494#define __NR_remap_file_pages 216
495__SYSCALL(__NR_remap_file_pages, sys_remap_file_pages)
496#define __NR_getdents64 217
497__SYSCALL(__NR_getdents64, sys_getdents64)
498#define __NR_set_tid_address 218
499__SYSCALL(__NR_set_tid_address, sys_set_tid_address)
500#define __NR_restart_syscall 219
501__SYSCALL(__NR_restart_syscall, sys_restart_syscall)
502#define __NR_semtimedop 220
503__SYSCALL(__NR_semtimedop, sys_semtimedop)
504#define __NR_fadvise64 221
505__SYSCALL(__NR_fadvise64, sys_fadvise64)
506#define __NR_timer_create 222
507__SYSCALL(__NR_timer_create, sys_timer_create)
508#define __NR_timer_settime 223
509__SYSCALL(__NR_timer_settime, sys_timer_settime)
510#define __NR_timer_gettime 224
511__SYSCALL(__NR_timer_gettime, sys_timer_gettime)
512#define __NR_timer_getoverrun 225
513__SYSCALL(__NR_timer_getoverrun, sys_timer_getoverrun)
514#define __NR_timer_delete 226
515__SYSCALL(__NR_timer_delete, sys_timer_delete)
516#define __NR_clock_settime 227
517__SYSCALL(__NR_clock_settime, sys_clock_settime)
518#define __NR_clock_gettime 228
519__SYSCALL(__NR_clock_gettime, sys_clock_gettime)
520#define __NR_clock_getres 229
521__SYSCALL(__NR_clock_getres, sys_clock_getres)
522#define __NR_clock_nanosleep 230
523__SYSCALL(__NR_clock_nanosleep, sys_clock_nanosleep)
524#define __NR_exit_group 231
525__SYSCALL(__NR_exit_group, sys_exit_group)
526#define __NR_epoll_wait 232
527__SYSCALL(__NR_epoll_wait, sys_epoll_wait)
528#define __NR_epoll_ctl 233
529__SYSCALL(__NR_epoll_ctl, sys_epoll_ctl)
530#define __NR_tgkill 234
531__SYSCALL(__NR_tgkill, sys_tgkill)
532#define __NR_utimes 235
533__SYSCALL(__NR_utimes, sys_utimes)
534#define __NR_vserver 236
535__SYSCALL(__NR_vserver, sys_ni_syscall)
536#define __NR_vserver 236
537__SYSCALL(__NR_vserver, sys_ni_syscall)
538#define __NR_mbind 237
539__SYSCALL(__NR_mbind, sys_mbind)
540#define __NR_set_mempolicy 238
541__SYSCALL(__NR_set_mempolicy, sys_set_mempolicy)
542#define __NR_get_mempolicy 239
543__SYSCALL(__NR_get_mempolicy, sys_get_mempolicy)
544#define __NR_mq_open 240
545__SYSCALL(__NR_mq_open, sys_mq_open)
546#define __NR_mq_unlink 241
547__SYSCALL(__NR_mq_unlink, sys_mq_unlink)
548#define __NR_mq_timedsend 242
549__SYSCALL(__NR_mq_timedsend, sys_mq_timedsend)
550#define __NR_mq_timedreceive 243
551__SYSCALL(__NR_mq_timedreceive, sys_mq_timedreceive)
552#define __NR_mq_notify 244
553__SYSCALL(__NR_mq_notify, sys_mq_notify)
554#define __NR_mq_getsetattr 245
555__SYSCALL(__NR_mq_getsetattr, sys_mq_getsetattr)
556#define __NR_kexec_load 246
557__SYSCALL(__NR_kexec_load, sys_ni_syscall)
558#define __NR_waitid 247
559__SYSCALL(__NR_waitid, sys_waitid)
560#define __NR_add_key 248
561__SYSCALL(__NR_add_key, sys_add_key)
562#define __NR_request_key 249
563__SYSCALL(__NR_request_key, sys_request_key)
564#define __NR_keyctl 250
565__SYSCALL(__NR_keyctl, sys_keyctl)
566
567#define __NR_syscall_max __NR_keyctl
568#ifndef __NO_STUBS
569
570/* user-visible error numbers are in the range -1 - -4095 */
571
572#define __syscall_clobber "r11","rcx","memory"
573
574#define __syscall_return(type, res) \
575do { \
576 if ((unsigned long)(res) >= (unsigned long)(-127)) { \
577 errno = -(res); \
578 res = -1; \
579 } \
580 return (type) (res); \
581} while (0)
582
583#ifdef __KERNEL__
584#define __ARCH_WANT_OLD_READDIR
585#define __ARCH_WANT_OLD_STAT
586#define __ARCH_WANT_SYS_ALARM
587#define __ARCH_WANT_SYS_GETHOSTNAME
588#define __ARCH_WANT_SYS_PAUSE
589#define __ARCH_WANT_SYS_SGETMASK
590#define __ARCH_WANT_SYS_SIGNAL
591#define __ARCH_WANT_SYS_UTIME
592#define __ARCH_WANT_SYS_WAITPID
593#define __ARCH_WANT_SYS_SOCKETCALL
594#define __ARCH_WANT_SYS_FADVISE64
595#define __ARCH_WANT_SYS_GETPGRP
596#define __ARCH_WANT_SYS_LLSEEK
597#define __ARCH_WANT_SYS_NICE
598#define __ARCH_WANT_SYS_OLD_GETRLIMIT
599#define __ARCH_WANT_SYS_OLDUMOUNT
600#define __ARCH_WANT_SYS_SIGPENDING
601#define __ARCH_WANT_SYS_SIGPROCMASK
602#define __ARCH_WANT_SYS_RT_SIGACTION
603#define __ARCH_WANT_COMPAT_SYS_TIME
604#endif
605
606#ifndef __KERNEL_SYSCALLS__
607
608#define __syscall "syscall"
609
610#define _syscall0(type,name) \
611type name(void) \
612{ \
613long __res; \
614__asm__ volatile (__syscall \
615 : "=a" (__res) \
616 : "0" (__NR_##name) : __syscall_clobber ); \
617__syscall_return(type,__res); \
618}
619
620#define _syscall1(type,name,type1,arg1) \
621type name(type1 arg1) \
622{ \
623long __res; \
624__asm__ volatile (__syscall \
625 : "=a" (__res) \
626 : "0" (__NR_##name),"D" ((long)(arg1)) : __syscall_clobber ); \
627__syscall_return(type,__res); \
628}
629
630#define _syscall2(type,name,type1,arg1,type2,arg2) \
631type name(type1 arg1,type2 arg2) \
632{ \
633long __res; \
634__asm__ volatile (__syscall \
635 : "=a" (__res) \
636 : "0" (__NR_##name),"D" ((long)(arg1)),"S" ((long)(arg2)) : __syscall_clobber ); \
637__syscall_return(type,__res); \
638}
639
640#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
641type name(type1 arg1,type2 arg2,type3 arg3) \
642{ \
643long __res; \
644__asm__ volatile (__syscall \
645 : "=a" (__res) \
646 : "0" (__NR_##name),"D" ((long)(arg1)),"S" ((long)(arg2)), \
647 "d" ((long)(arg3)) : __syscall_clobber); \
648__syscall_return(type,__res); \
649}
650
651#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
652type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
653{ \
654long __res; \
655__asm__ volatile ("movq %5,%%r10 ;" __syscall \
656 : "=a" (__res) \
657 : "0" (__NR_##name),"D" ((long)(arg1)),"S" ((long)(arg2)), \
658 "d" ((long)(arg3)),"g" ((long)(arg4)) : __syscall_clobber,"r10" ); \
659__syscall_return(type,__res); \
660}
661
662#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
663 type5,arg5) \
664type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
665{ \
666long __res; \
667__asm__ volatile ("movq %5,%%r10 ; movq %6,%%r8 ; " __syscall \
668 : "=a" (__res) \
669 : "0" (__NR_##name),"D" ((long)(arg1)),"S" ((long)(arg2)), \
670 "d" ((long)(arg3)),"g" ((long)(arg4)),"g" ((long)(arg5)) : \
671 __syscall_clobber,"r8","r10" ); \
672__syscall_return(type,__res); \
673}
674
675#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
676 type5,arg5,type6,arg6) \
677type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,type6 arg6) \
678{ \
679long __res; \
680__asm__ volatile ("movq %5,%%r10 ; movq %6,%%r8 ; movq %7,%%r9 ; " __syscall \
681 : "=a" (__res) \
682 : "0" (__NR_##name),"D" ((long)(arg1)),"S" ((long)(arg2)), \
683 "d" ((long)(arg3)), "g" ((long)(arg4)), "g" ((long)(arg5)), \
684 "g" ((long)(arg6)) : \
685 __syscall_clobber,"r8","r10","r9" ); \
686__syscall_return(type,__res); \
687}
688
689#else /* __KERNEL_SYSCALLS__ */
690
691#include <linux/syscalls.h>
692#include <asm/ptrace.h>
693
694/*
695 * we need this inline - forking from kernel space will result
696 * in NO COPY ON WRITE (!!!), until an execve is executed. This
697 * is no problem, but for the stack. This is handled by not letting
698 * main() use the stack at all after fork(). Thus, no function
699 * calls - which means inline code for fork too, as otherwise we
700 * would use the stack upon exit from 'fork()'.
701 *
702 * Actually only pause and fork are needed inline, so that there
703 * won't be any messing with the stack from main(), but we define
704 * some others too.
705 */
706#define __NR__exit __NR_exit
707
708static inline pid_t setsid(void)
709{
710 return sys_setsid();
711}
712
713static inline ssize_t write(unsigned int fd, char * buf, size_t count)
714{
715 return sys_write(fd, buf, count);
716}
717
718static inline ssize_t read(unsigned int fd, char * buf, size_t count)
719{
720 return sys_read(fd, buf, count);
721}
722
723static inline off_t lseek(unsigned int fd, off_t offset, unsigned int origin)
724{
725 return sys_lseek(fd, offset, origin);
726}
727
728static inline long dup(unsigned int fd)
729{
730 return sys_dup(fd);
731}
732
733/* implemented in asm in arch/x86_64/kernel/entry.S */
734extern int execve(const char *, char * const *, char * const *);
735
736static inline long open(const char * filename, int flags, int mode)
737{
738 return sys_open(filename, flags, mode);
739}
740
741static inline long close(unsigned int fd)
742{
743 return sys_close(fd);
744}
745
746static inline pid_t waitpid(int pid, int * wait_stat, int flags)
747{
748 return sys_wait4(pid, wait_stat, flags, NULL);
749}
750
751extern long sys_mmap(unsigned long addr, unsigned long len,
752 unsigned long prot, unsigned long flags,
753 unsigned long fd, unsigned long off);
754
755extern int sys_modify_ldt(int func, void *ptr, unsigned long bytecount);
756
757asmlinkage long sys_execve(char *name, char **argv, char **envp,
758 struct pt_regs regs);
759asmlinkage long sys_clone(unsigned long clone_flags, unsigned long newsp,
760 void *parent_tid, void *child_tid,
761 struct pt_regs regs);
762asmlinkage long sys_fork(struct pt_regs regs);
763asmlinkage long sys_vfork(struct pt_regs regs);
764asmlinkage long sys_pipe(int *fildes);
765
766#endif /* __KERNEL_SYSCALLS__ */
767
768#if !defined(__ASSEMBLY__) && defined(__KERNEL__)
769
770#include <linux/linkage.h>
771#include <linux/compiler.h>
772#include <linux/types.h>
773#include <asm/ptrace.h>
774
775asmlinkage long sys_ptrace(long request, long pid,
776 unsigned long addr, long data);
777asmlinkage long sys_iopl(unsigned int level, struct pt_regs *regs);
778asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on);
779struct sigaction;
780asmlinkage long sys_rt_sigaction(int sig,
781 const struct sigaction __user *act,
782 struct sigaction __user *oact,
783 size_t sigsetsize);
784
785#endif /* __ASSEMBLY__ */
786
787#endif /* __NO_STUBS */
788
789/*
790 * "Conditional" syscalls
791 *
792 * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
793 * but it doesn't work on all toolchains, so we just do it by hand
794 */
795#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
796
797#endif
diff --git a/include/asm-x86_64/user.h b/include/asm-x86_64/user.h
new file mode 100644
index 000000000000..12785c649ac5
--- /dev/null
+++ b/include/asm-x86_64/user.h
@@ -0,0 +1,114 @@
1#ifndef _X86_64_USER_H
2#define _X86_64_USER_H
3
4#include <asm/types.h>
5#include <asm/page.h>
6/* Core file format: The core file is written in such a way that gdb
7 can understand it and provide useful information to the user.
8 There are quite a number of obstacles to being able to view the
9 contents of the floating point registers, and until these are
10 solved you will not be able to view the contents of them.
11 Actually, you can read in the core file and look at the contents of
12 the user struct to find out what the floating point registers
13 contain.
14
15 The actual file contents are as follows:
16 UPAGE: 1 page consisting of a user struct that tells gdb what is present
17 in the file. Directly after this is a copy of the task_struct, which
18 is currently not used by gdb, but it may come in useful at some point.
19 All of the registers are stored as part of the upage. The upage should
20 always be only one page.
21 DATA: The data area is stored. We use current->end_text to
22 current->brk to pick up all of the user variables, plus any memory
23 that may have been malloced. No attempt is made to determine if a page
24 is demand-zero or if a page is totally unused, we just cover the entire
25 range. All of the addresses are rounded in such a way that an integral
26 number of pages is written.
27 STACK: We need the stack information in order to get a meaningful
28 backtrace. We need to write the data from (esp) to
29 current->start_stack, so we round each of these off in order to be able
30 to write an integer number of pages.
31 The minimum core file size is 3 pages, or 12288 bytes. */
32
33/*
34 * Pentium III FXSR, SSE support
35 * Gareth Hughes <gareth@valinux.com>, May 2000
36 *
37 * Provide support for the GDB 5.0+ PTRACE_{GET|SET}FPXREGS requests for
38 * interacting with the FXSR-format floating point environment. Floating
39 * point data can be accessed in the regular format in the usual manner,
40 * and both the standard and SIMD floating point data can be accessed via
41 * the new ptrace requests. In either case, changes to the FPU environment
42 * will be reflected in the task's state as expected.
43 *
44 * x86-64 support by Andi Kleen.
45 */
46
47/* This matches the 64bit FXSAVE format as defined by AMD. It is the same
48 as the 32bit format defined by Intel, except that the selector:offset pairs for
49 data and eip are replaced with flat 64bit pointers. */
50struct user_i387_struct {
51 unsigned short cwd;
52 unsigned short swd;
53 unsigned short twd; /* Note this is not the same as the 32bit/x87/FSAVE twd */
54 unsigned short fop;
55 __u64 rip;
56 __u64 rdp;
57 __u32 mxcsr;
58 __u32 mxcsr_mask;
59 __u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
60 __u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */
61 __u32 padding[24];
62};
63
64/*
65 * Segment register layout in coredumps.
66 */
67struct user_regs_struct {
68 unsigned long r15,r14,r13,r12,rbp,rbx,r11,r10;
69 unsigned long r9,r8,rax,rcx,rdx,rsi,rdi,orig_rax;
70 unsigned long rip,cs,eflags;
71 unsigned long rsp,ss;
72 unsigned long fs_base, gs_base;
73 unsigned long ds,es,fs,gs;
74};
75
76/* When the kernel dumps core, it starts by dumping the user struct -
77 this will be used by gdb to figure out where the data and stack segments
78 are within the file, and what virtual addresses to use. */
79struct user{
80/* We start with the registers, to mimic the way that "memory" is returned
81 from the ptrace(3,...) function. */
82 struct user_regs_struct regs; /* Where the registers are actually stored */
83/* ptrace does not yet supply these. Someday.... */
84 int u_fpvalid; /* True if math co-processor being used. */
85 /* for this mess. Not yet used. */
86 int pad0;
87 struct user_i387_struct i387; /* Math Co-processor registers. */
88/* The rest of this junk is to help gdb figure out what goes where */
89 unsigned long int u_tsize; /* Text segment size (pages). */
90 unsigned long int u_dsize; /* Data segment size (pages). */
91 unsigned long int u_ssize; /* Stack segment size (pages). */
92 unsigned long start_code; /* Starting virtual address of text. */
93 unsigned long start_stack; /* Starting virtual address of stack area.
94 This is actually the bottom of the stack,
95 the top of the stack is always found in the
96 esp register. */
97 long int signal; /* Signal that caused the core dump. */
98 int reserved; /* No longer used */
99 int pad1;
100 struct user_pt_regs * u_ar0; /* Used by gdb to help find the values for */
101 /* the registers. */
102 struct user_i387_struct* u_fpstate; /* Math Co-processor pointer. */
103 unsigned long magic; /* To uniquely identify a core file */
104 char u_comm[32]; /* User command that was responsible */
105 unsigned long u_debugreg[8];
106 unsigned long error_code; /* CPU error code or 0 */
107 unsigned long fault_address; /* CR3 or 0 */
108};
109#define NBPG PAGE_SIZE
110#define UPAGES 1
111#define HOST_TEXT_START_ADDR (u.start_code)
112#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)
113
114#endif /* _X86_64_USER_H */
diff --git a/include/asm-x86_64/user32.h b/include/asm-x86_64/user32.h
new file mode 100644
index 000000000000..f769872debea
--- /dev/null
+++ b/include/asm-x86_64/user32.h
@@ -0,0 +1,69 @@
1#ifndef USER32_H
2#define USER32_H 1
3
4/* IA32 compatible user structures for ptrace. These should be used for 32bit coredumps too. */
5
6struct user_i387_ia32_struct {
7 u32 cwd;
8 u32 swd;
9 u32 twd;
10 u32 fip;
11 u32 fcs;
12 u32 foo;
13 u32 fos;
14 u32 st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
15};
16
17/* FSAVE frame with extensions */
18struct user32_fxsr_struct {
19 unsigned short cwd;
20 unsigned short swd;
21 unsigned short twd; /* not compatible to 64bit twd */
22 unsigned short fop;
23 int fip;
24 int fcs;
25 int foo;
26 int fos;
27 int mxcsr;
28 int reserved;
29 int st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
30 int xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
31 int padding[56];
32};
33
34struct user_regs_struct32 {
35 __u32 ebx, ecx, edx, esi, edi, ebp, eax;
36 unsigned short ds, __ds, es, __es;
37 unsigned short fs, __fs, gs, __gs;
38 __u32 orig_eax, eip;
39 unsigned short cs, __cs;
40 __u32 eflags, esp;
41 unsigned short ss, __ss;
42};
43
44struct user32 {
45 struct user_regs_struct32 regs; /* Where the registers are actually stored */
46 int u_fpvalid; /* True if math co-processor being used. */
47 /* for this mess. Not yet used. */
48 struct user_i387_ia32_struct i387; /* Math Co-processor registers. */
49/* The rest of this junk is to help gdb figure out what goes where */
50 __u32 u_tsize; /* Text segment size (pages). */
51 __u32 u_dsize; /* Data segment size (pages). */
52 __u32 u_ssize; /* Stack segment size (pages). */
53 __u32 start_code; /* Starting virtual address of text. */
54 __u32 start_stack; /* Starting virtual address of stack area.
55 This is actually the bottom of the stack,
56 the top of the stack is always found in the
57 esp register. */
58 __u32 signal; /* Signal that caused the core dump. */
59 int reserved; /* No __u32er used */
60 __u32 u_ar0; /* Used by gdb to help find the values for */
61 /* the registers. */
62 __u32 u_fpstate; /* Math Co-processor pointer. */
63 __u32 magic; /* To uniquely identify a core file */
64 char u_comm[32]; /* User command that was responsible */
65 int u_debugreg[8];
66};
67
68
69#endif
diff --git a/include/asm-x86_64/vga.h b/include/asm-x86_64/vga.h
new file mode 100644
index 000000000000..ef0c0e50cc95
--- /dev/null
+++ b/include/asm-x86_64/vga.h
@@ -0,0 +1,20 @@
1/*
2 * Access to VGA videoram
3 *
4 * (c) 1998 Martin Mares <mj@ucw.cz>
5 */
6
7#ifndef _LINUX_ASM_VGA_H_
8#define _LINUX_ASM_VGA_H_
9
10/*
11 * On the PC, we can just recalculate addresses and then
12 * access the videoram directly without any black magic.
13 */
14
15#define VGA_MAP_MEM(x) (unsigned long)phys_to_virt(x)
16
17#define vga_readb(x) (*(x))
18#define vga_writeb(x,y) (*(y) = (x))
19
20#endif
diff --git a/include/asm-x86_64/vsyscall.h b/include/asm-x86_64/vsyscall.h
new file mode 100644
index 000000000000..b0c8d4339906
--- /dev/null
+++ b/include/asm-x86_64/vsyscall.h
@@ -0,0 +1,61 @@
1#ifndef _ASM_X86_64_VSYSCALL_H_
2#define _ASM_X86_64_VSYSCALL_H_
3
4#include <linux/seqlock.h>
5
6enum vsyscall_num {
7 __NR_vgettimeofday,
8 __NR_vtime,
9};
10
11#define VSYSCALL_START (-10UL << 20)
12#define VSYSCALL_SIZE 1024
13#define VSYSCALL_END (-2UL << 20)
14#define VSYSCALL_ADDR(vsyscall_nr) (VSYSCALL_START+VSYSCALL_SIZE*(vsyscall_nr))
15
16#ifdef __KERNEL__
17
18#define __section_vxtime __attribute__ ((unused, __section__ (".vxtime"), aligned(16)))
19#define __section_wall_jiffies __attribute__ ((unused, __section__ (".wall_jiffies"), aligned(16)))
20#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
21#define __section_sys_tz __attribute__ ((unused, __section__ (".sys_tz"), aligned(16)))
22#define __section_sysctl_vsyscall __attribute__ ((unused, __section__ (".sysctl_vsyscall"), aligned(16)))
23#define __section_xtime __attribute__ ((unused, __section__ (".xtime"), aligned(16)))
24#define __section_xtime_lock __attribute__ ((unused, __section__ (".xtime_lock"), aligned(16)))
25
26#define VXTIME_TSC 1
27#define VXTIME_HPET 2
28
29struct vxtime_data {
30 long hpet_address; /* HPET base address */
31 unsigned long hz; /* HPET clocks / sec */
32 int last;
33 unsigned long last_tsc;
34 long quot;
35 long tsc_quot;
36 int mode;
37};
38
39#define hpet_readl(a) readl((void *)fix_to_virt(FIX_HPET_BASE) + a)
40#define hpet_writel(d,a) writel(d, (void *)fix_to_virt(FIX_HPET_BASE) + a)
41
42/* vsyscall space (readonly) */
43extern struct vxtime_data __vxtime;
44extern struct timespec __xtime;
45extern volatile unsigned long __jiffies;
46extern unsigned long __wall_jiffies;
47extern struct timezone __sys_tz;
48extern seqlock_t __xtime_lock;
49
50/* kernel space (writeable) */
51extern struct vxtime_data vxtime;
52extern unsigned long wall_jiffies;
53extern struct timezone sys_tz;
54extern int sysctl_vsyscall;
55extern seqlock_t xtime_lock;
56
57#define ARCH_HAVE_XTIME_LOCK 1
58
59#endif /* __KERNEL__ */
60
61#endif /* _ASM_X86_64_VSYSCALL_H_ */
diff --git a/include/asm-x86_64/vsyscall32.h b/include/asm-x86_64/vsyscall32.h
new file mode 100644
index 000000000000..c631c082f8f7
--- /dev/null
+++ b/include/asm-x86_64/vsyscall32.h
@@ -0,0 +1,20 @@
1#ifndef _ASM_VSYSCALL32_H
2#define _ASM_VSYSCALL32_H 1
3
4/* Values need to match arch/x86_64/ia32/vsyscall.lds */
5
6#ifdef __ASSEMBLY__
7#define VSYSCALL32_BASE 0xffffe000
8#define VSYSCALL32_SYSEXIT (VSYSCALL32_BASE + 0x410)
9#else
10#define VSYSCALL32_BASE 0xffffe000UL
11#define VSYSCALL32_END (VSYSCALL32_BASE + PAGE_SIZE)
12#define VSYSCALL32_EHDR ((const struct elf32_hdr *) VSYSCALL32_BASE)
13
14#define VSYSCALL32_VSYSCALL ((void *)VSYSCALL32_BASE + 0x400)
15#define VSYSCALL32_SYSEXIT ((void *)VSYSCALL32_BASE + 0x410)
16#define VSYSCALL32_SIGRETURN ((void __user *)VSYSCALL32_BASE + 0x500)
17#define VSYSCALL32_RTSIGRETURN ((void __user *)VSYSCALL32_BASE + 0x600)
18#endif
19
20#endif
diff --git a/include/asm-x86_64/xor.h b/include/asm-x86_64/xor.h
new file mode 100644
index 000000000000..f942fcc21831
--- /dev/null
+++ b/include/asm-x86_64/xor.h
@@ -0,0 +1,354 @@
1/*
2 * include/asm-x86_64/xor.h
3 *
4 * Optimized RAID-5 checksumming functions for MMX and SSE.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2, or (at your option)
9 * any later version.
10 *
11 * You should have received a copy of the GNU General Public License
12 * (for example /usr/src/linux/COPYING); if not, write to the Free
13 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
14 */
15
16
17/*
18 * Cache avoiding checksumming functions utilizing KNI instructions
19 * Copyright (C) 1999 Zach Brown (with obvious credit due Ingo)
20 */
21
22/*
23 * Based on
24 * High-speed RAID5 checksumming functions utilizing SSE instructions.
25 * Copyright (C) 1998 Ingo Molnar.
26 */
27
28/*
29 * x86-64 changes / gcc fixes from Andi Kleen.
30 * Copyright 2002 Andi Kleen, SuSE Labs.
31 *
32 * This hasn't been optimized for the hammer yet, but there are likely
33 * no advantages to be gotten from x86-64 here anyways.
34 */
35
36typedef struct { unsigned long a,b; } __attribute__((aligned(16))) xmm_store_t;
37
38/* Doesn't use gcc to save the XMM registers, because there is no easy way to
39 tell it to do a clts before the register saving. */
40#define XMMS_SAVE do { \
41 preempt_disable(); \
42 asm volatile ( \
43 "movq %%cr0,%0 ;\n\t" \
44 "clts ;\n\t" \
45 "movups %%xmm0,(%1) ;\n\t" \
46 "movups %%xmm1,0x10(%1) ;\n\t" \
47 "movups %%xmm2,0x20(%1) ;\n\t" \
48 "movups %%xmm3,0x30(%1) ;\n\t" \
49 : "=&r" (cr0) \
50 : "r" (xmm_save) \
51 : "memory"); \
52} while(0)
53
54#define XMMS_RESTORE do { \
55 asm volatile ( \
56 "sfence ;\n\t" \
57 "movups (%1),%%xmm0 ;\n\t" \
58 "movups 0x10(%1),%%xmm1 ;\n\t" \
59 "movups 0x20(%1),%%xmm2 ;\n\t" \
60 "movups 0x30(%1),%%xmm3 ;\n\t" \
61 "movq %0,%%cr0 ;\n\t" \
62 : \
63 : "r" (cr0), "r" (xmm_save) \
64 : "memory"); \
65 preempt_enable(); \
66} while(0)
67
68#define OFFS(x) "16*("#x")"
69#define PF_OFFS(x) "256+16*("#x")"
70#define PF0(x) " prefetchnta "PF_OFFS(x)"(%[p1]) ;\n"
71#define LD(x,y) " movaps "OFFS(x)"(%[p1]), %%xmm"#y" ;\n"
72#define ST(x,y) " movaps %%xmm"#y", "OFFS(x)"(%[p1]) ;\n"
73#define PF1(x) " prefetchnta "PF_OFFS(x)"(%[p2]) ;\n"
74#define PF2(x) " prefetchnta "PF_OFFS(x)"(%[p3]) ;\n"
75#define PF3(x) " prefetchnta "PF_OFFS(x)"(%[p4]) ;\n"
76#define PF4(x) " prefetchnta "PF_OFFS(x)"(%[p5]) ;\n"
77#define PF5(x) " prefetchnta "PF_OFFS(x)"(%[p6]) ;\n"
78#define XO1(x,y) " xorps "OFFS(x)"(%[p2]), %%xmm"#y" ;\n"
79#define XO2(x,y) " xorps "OFFS(x)"(%[p3]), %%xmm"#y" ;\n"
80#define XO3(x,y) " xorps "OFFS(x)"(%[p4]), %%xmm"#y" ;\n"
81#define XO4(x,y) " xorps "OFFS(x)"(%[p5]), %%xmm"#y" ;\n"
82#define XO5(x,y) " xorps "OFFS(x)"(%[p6]), %%xmm"#y" ;\n"
83
84
85static void
86xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
87{
88 unsigned int lines = bytes >> 8;
89 unsigned long cr0;
90 xmm_store_t xmm_save[4];
91
92 XMMS_SAVE;
93
94 asm volatile (
95#undef BLOCK
96#define BLOCK(i) \
97 LD(i,0) \
98 LD(i+1,1) \
99 PF1(i) \
100 PF1(i+2) \
101 LD(i+2,2) \
102 LD(i+3,3) \
103 PF0(i+4) \
104 PF0(i+6) \
105 XO1(i,0) \
106 XO1(i+1,1) \
107 XO1(i+2,2) \
108 XO1(i+3,3) \
109 ST(i,0) \
110 ST(i+1,1) \
111 ST(i+2,2) \
112 ST(i+3,3) \
113
114
115 PF0(0)
116 PF0(2)
117
118 " .align 32 ;\n"
119 " 1: ;\n"
120
121 BLOCK(0)
122 BLOCK(4)
123 BLOCK(8)
124 BLOCK(12)
125
126 " addq %[inc], %[p1] ;\n"
127 " addq %[inc], %[p2] ;\n"
128 " decl %[cnt] ; jnz 1b"
129 : [p1] "+r" (p1), [p2] "+r" (p2), [cnt] "+r" (lines)
130 : [inc] "r" (256UL)
131 : "memory");
132
133 XMMS_RESTORE;
134}
135
136static void
137xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
138 unsigned long *p3)
139{
140 unsigned int lines = bytes >> 8;
141 xmm_store_t xmm_save[4];
142 unsigned long cr0;
143
144 XMMS_SAVE;
145
146 __asm__ __volatile__ (
147#undef BLOCK
148#define BLOCK(i) \
149 PF1(i) \
150 PF1(i+2) \
151 LD(i,0) \
152 LD(i+1,1) \
153 LD(i+2,2) \
154 LD(i+3,3) \
155 PF2(i) \
156 PF2(i+2) \
157 PF0(i+4) \
158 PF0(i+6) \
159 XO1(i,0) \
160 XO1(i+1,1) \
161 XO1(i+2,2) \
162 XO1(i+3,3) \
163 XO2(i,0) \
164 XO2(i+1,1) \
165 XO2(i+2,2) \
166 XO2(i+3,3) \
167 ST(i,0) \
168 ST(i+1,1) \
169 ST(i+2,2) \
170 ST(i+3,3) \
171
172
173 PF0(0)
174 PF0(2)
175
176 " .align 32 ;\n"
177 " 1: ;\n"
178
179 BLOCK(0)
180 BLOCK(4)
181 BLOCK(8)
182 BLOCK(12)
183
184 " addq %[inc], %[p1] ;\n"
185 " addq %[inc], %[p2] ;\n"
186 " addq %[inc], %[p3] ;\n"
187 " decl %[cnt] ; jnz 1b"
188 : [cnt] "+r" (lines),
189 [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3)
190 : [inc] "r" (256UL)
191 : "memory");
192 XMMS_RESTORE;
193}
194
195static void
196xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
197 unsigned long *p3, unsigned long *p4)
198{
199 unsigned int lines = bytes >> 8;
200 xmm_store_t xmm_save[4];
201 unsigned long cr0;
202
203 XMMS_SAVE;
204
205 __asm__ __volatile__ (
206#undef BLOCK
207#define BLOCK(i) \
208 PF1(i) \
209 PF1(i+2) \
210 LD(i,0) \
211 LD(i+1,1) \
212 LD(i+2,2) \
213 LD(i+3,3) \
214 PF2(i) \
215 PF2(i+2) \
216 XO1(i,0) \
217 XO1(i+1,1) \
218 XO1(i+2,2) \
219 XO1(i+3,3) \
220 PF3(i) \
221 PF3(i+2) \
222 PF0(i+4) \
223 PF0(i+6) \
224 XO2(i,0) \
225 XO2(i+1,1) \
226 XO2(i+2,2) \
227 XO2(i+3,3) \
228 XO3(i,0) \
229 XO3(i+1,1) \
230 XO3(i+2,2) \
231 XO3(i+3,3) \
232 ST(i,0) \
233 ST(i+1,1) \
234 ST(i+2,2) \
235 ST(i+3,3) \
236
237
238 PF0(0)
239 PF0(2)
240
241 " .align 32 ;\n"
242 " 1: ;\n"
243
244 BLOCK(0)
245 BLOCK(4)
246 BLOCK(8)
247 BLOCK(12)
248
249 " addq %[inc], %[p1] ;\n"
250 " addq %[inc], %[p2] ;\n"
251 " addq %[inc], %[p3] ;\n"
252 " addq %[inc], %[p4] ;\n"
253 " decl %[cnt] ; jnz 1b"
254 : [cnt] "+c" (lines),
255 [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4)
256 : [inc] "r" (256UL)
257 : "memory" );
258
259 XMMS_RESTORE;
260}
261
262static void
263xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
264 unsigned long *p3, unsigned long *p4, unsigned long *p5)
265{
266 unsigned int lines = bytes >> 8;
267 xmm_store_t xmm_save[4];
268 unsigned long cr0;
269
270 XMMS_SAVE;
271
272 __asm__ __volatile__ (
273#undef BLOCK
274#define BLOCK(i) \
275 PF1(i) \
276 PF1(i+2) \
277 LD(i,0) \
278 LD(i+1,1) \
279 LD(i+2,2) \
280 LD(i+3,3) \
281 PF2(i) \
282 PF2(i+2) \
283 XO1(i,0) \
284 XO1(i+1,1) \
285 XO1(i+2,2) \
286 XO1(i+3,3) \
287 PF3(i) \
288 PF3(i+2) \
289 XO2(i,0) \
290 XO2(i+1,1) \
291 XO2(i+2,2) \
292 XO2(i+3,3) \
293 PF4(i) \
294 PF4(i+2) \
295 PF0(i+4) \
296 PF0(i+6) \
297 XO3(i,0) \
298 XO3(i+1,1) \
299 XO3(i+2,2) \
300 XO3(i+3,3) \
301 XO4(i,0) \
302 XO4(i+1,1) \
303 XO4(i+2,2) \
304 XO4(i+3,3) \
305 ST(i,0) \
306 ST(i+1,1) \
307 ST(i+2,2) \
308 ST(i+3,3) \
309
310
311 PF0(0)
312 PF0(2)
313
314 " .align 32 ;\n"
315 " 1: ;\n"
316
317 BLOCK(0)
318 BLOCK(4)
319 BLOCK(8)
320 BLOCK(12)
321
322 " addq %[inc], %[p1] ;\n"
323 " addq %[inc], %[p2] ;\n"
324 " addq %[inc], %[p3] ;\n"
325 " addq %[inc], %[p4] ;\n"
326 " addq %[inc], %[p5] ;\n"
327 " decl %[cnt] ; jnz 1b"
328 : [cnt] "+c" (lines),
329 [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4),
330 [p5] "+r" (p5)
331 : [inc] "r" (256UL)
332 : "memory");
333
334 XMMS_RESTORE;
335}
336
337static struct xor_block_template xor_block_sse = {
338 .name = "generic_sse",
339 .do_2 = xor_sse_2,
340 .do_3 = xor_sse_3,
341 .do_4 = xor_sse_4,
342 .do_5 = xor_sse_5,
343};
344
345#undef XOR_TRY_TEMPLATES
346#define XOR_TRY_TEMPLATES \
347 do { \
348 xor_speed(&xor_block_sse); \
349 } while (0)
350
351/* We force the use of the SSE xor block because it can write around L2.
352 We may also be able to load into the L1 only depending on how the cpu
353 deals with a load to a line that is being prefetched. */
354#define XOR_SELECT_TEMPLATE(FASTEST) (&xor_block_sse)