aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-x86_64
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-x86_64')
-rw-r--r--include/asm-x86_64/Kbuild11
-rw-r--r--include/asm-x86_64/acpi.h2
-rw-r--r--include/asm-x86_64/alternative.h136
-rw-r--r--include/asm-x86_64/apic.h27
-rw-r--r--include/asm-x86_64/apicdef.h2
-rw-r--r--include/asm-x86_64/atomic.h43
-rw-r--r--include/asm-x86_64/bitops.h8
-rw-r--r--include/asm-x86_64/bugs.h1
-rw-r--r--include/asm-x86_64/cache.h1
-rw-r--r--include/asm-x86_64/calgary.h63
-rw-r--r--include/asm-x86_64/calling.h1
-rw-r--r--include/asm-x86_64/cpufeature.h3
-rw-r--r--include/asm-x86_64/dma-mapping.h18
-rw-r--r--include/asm-x86_64/dma.h3
-rw-r--r--include/asm-x86_64/dwarf2.h1
-rw-r--r--include/asm-x86_64/elf.h20
-rw-r--r--include/asm-x86_64/fixmap.h1
-rw-r--r--include/asm-x86_64/floppy.h8
-rw-r--r--include/asm-x86_64/gart-mapping.h16
-rw-r--r--include/asm-x86_64/hardirq.h1
-rw-r--r--include/asm-x86_64/hpet.h2
-rw-r--r--include/asm-x86_64/hw_irq.h14
-rw-r--r--include/asm-x86_64/ia32.h1
-rw-r--r--include/asm-x86_64/ia32_unistd.h308
-rw-r--r--include/asm-x86_64/intel_arch_perfmon.h19
-rw-r--r--include/asm-x86_64/io.h1
-rw-r--r--include/asm-x86_64/io_apic.h1
-rw-r--r--include/asm-x86_64/irqflags.h141
-rw-r--r--include/asm-x86_64/k8.h14
-rw-r--r--include/asm-x86_64/kdebug.h4
-rw-r--r--include/asm-x86_64/kprobes.h2
-rw-r--r--include/asm-x86_64/local.h26
-rw-r--r--include/asm-x86_64/mce.h13
-rw-r--r--include/asm-x86_64/mmu_context.h1
-rw-r--r--include/asm-x86_64/mmzone.h2
-rw-r--r--include/asm-x86_64/msi.h10
-rw-r--r--include/asm-x86_64/mtrr.h9
-rw-r--r--include/asm-x86_64/mutex.h4
-rw-r--r--include/asm-x86_64/nmi.h30
-rw-r--r--include/asm-x86_64/numa.h1
-rw-r--r--include/asm-x86_64/page.h7
-rw-r--r--include/asm-x86_64/param.h1
-rw-r--r--include/asm-x86_64/pci.h5
-rw-r--r--include/asm-x86_64/percpu.h4
-rw-r--r--include/asm-x86_64/pgtable.h6
-rw-r--r--include/asm-x86_64/processor.h12
-rw-r--r--include/asm-x86_64/proto.h15
-rw-r--r--include/asm-x86_64/rwlock.h8
-rw-r--r--include/asm-x86_64/semaphore.h8
-rw-r--r--include/asm-x86_64/serial.h1
-rw-r--r--include/asm-x86_64/signal.h4
-rw-r--r--include/asm-x86_64/smp.h3
-rw-r--r--include/asm-x86_64/socket.h1
-rw-r--r--include/asm-x86_64/spinlock.h14
-rw-r--r--include/asm-x86_64/string.h3
-rw-r--r--include/asm-x86_64/swiotlb.h3
-rw-r--r--include/asm-x86_64/system.h126
-rw-r--r--include/asm-x86_64/tce.h49
-rw-r--r--include/asm-x86_64/thread_info.h19
-rw-r--r--include/asm-x86_64/tlbflush.h1
-rw-r--r--include/asm-x86_64/topology.h11
-rw-r--r--include/asm-x86_64/uaccess.h1
-rw-r--r--include/asm-x86_64/unistd.h22
-rw-r--r--include/asm-x86_64/unwind.h107
-rw-r--r--include/asm-x86_64/vga.h2
-rw-r--r--include/asm-x86_64/vsyscall.h3
66 files changed, 778 insertions, 627 deletions
diff --git a/include/asm-x86_64/Kbuild b/include/asm-x86_64/Kbuild
new file mode 100644
index 00000000000..dc4d101e8a1
--- /dev/null
+++ b/include/asm-x86_64/Kbuild
@@ -0,0 +1,11 @@
1include include/asm-generic/Kbuild.asm
2
3ALTARCH := i386
4ARCHDEF := defined __x86_64__
5ALTARCHDEF := defined __i386__
6
7header-y += boot.h bootsetup.h cpufeature.h debugreg.h ldt.h \
8 msr.h prctl.h setup.h sigcontext32.h ucontext.h \
9 vsyscall32.h
10
11unifdef-y += mce.h mtrr.h vsyscall.h
diff --git a/include/asm-x86_64/acpi.h b/include/asm-x86_64/acpi.h
index aa1c7b2e438..2c95a319c05 100644
--- a/include/asm-x86_64/acpi.h
+++ b/include/asm-x86_64/acpi.h
@@ -162,6 +162,8 @@ extern int acpi_pci_disabled;
162 162
163extern u8 x86_acpiid_to_apicid[]; 163extern u8 x86_acpiid_to_apicid[];
164 164
165#define ARCH_HAS_POWER_INIT 1
166
165extern int acpi_skip_timer_override; 167extern int acpi_skip_timer_override;
166 168
167#endif /*__KERNEL__*/ 169#endif /*__KERNEL__*/
diff --git a/include/asm-x86_64/alternative.h b/include/asm-x86_64/alternative.h
new file mode 100644
index 00000000000..a584826cc57
--- /dev/null
+++ b/include/asm-x86_64/alternative.h
@@ -0,0 +1,136 @@
1#ifndef _X86_64_ALTERNATIVE_H
2#define _X86_64_ALTERNATIVE_H
3
4#ifdef __KERNEL__
5
6#include <linux/types.h>
7#include <asm/cpufeature.h>
8
9struct alt_instr {
10 u8 *instr; /* original instruction */
11 u8 *replacement;
12 u8 cpuid; /* cpuid bit set for replacement */
13 u8 instrlen; /* length of original instruction */
14 u8 replacementlen; /* length of new instruction, <= instrlen */
15 u8 pad[5];
16};
17
18extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
19
20struct module;
21
22#ifdef CONFIG_SMP
23extern void alternatives_smp_module_add(struct module *mod, char *name,
24 void *locks, void *locks_end,
25 void *text, void *text_end);
26extern void alternatives_smp_module_del(struct module *mod);
27extern void alternatives_smp_switch(int smp);
28#else
29static inline void alternatives_smp_module_add(struct module *mod, char *name,
30 void *locks, void *locks_end,
31 void *text, void *text_end) {}
32static inline void alternatives_smp_module_del(struct module *mod) {}
33static inline void alternatives_smp_switch(int smp) {}
34#endif
35
36#endif
37
38/*
39 * Alternative instructions for different CPU types or capabilities.
40 *
41 * This allows to use optimized instructions even on generic binary
42 * kernels.
43 *
44 * length of oldinstr must be longer or equal the length of newinstr
45 * It can be padded with nops as needed.
46 *
47 * For non barrier like inlines please define new variants
48 * without volatile and memory clobber.
49 */
50#define alternative(oldinstr, newinstr, feature) \
51 asm volatile ("661:\n\t" oldinstr "\n662:\n" \
52 ".section .altinstructions,\"a\"\n" \
53 " .align 8\n" \
54 " .quad 661b\n" /* label */ \
55 " .quad 663f\n" /* new instruction */ \
56 " .byte %c0\n" /* feature bit */ \
57 " .byte 662b-661b\n" /* sourcelen */ \
58 " .byte 664f-663f\n" /* replacementlen */ \
59 ".previous\n" \
60 ".section .altinstr_replacement,\"ax\"\n" \
61 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
62 ".previous" :: "i" (feature) : "memory")
63
64/*
65 * Alternative inline assembly with input.
66 *
67 * Pecularities:
68 * No memory clobber here.
69 * Argument numbers start with 1.
70 * Best is to use constraints that are fixed size (like (%1) ... "r")
71 * If you use variable sized constraints like "m" or "g" in the
72 * replacement make sure to pad to the worst case length.
73 */
74#define alternative_input(oldinstr, newinstr, feature, input...) \
75 asm volatile ("661:\n\t" oldinstr "\n662:\n" \
76 ".section .altinstructions,\"a\"\n" \
77 " .align 8\n" \
78 " .quad 661b\n" /* label */ \
79 " .quad 663f\n" /* new instruction */ \
80 " .byte %c0\n" /* feature bit */ \
81 " .byte 662b-661b\n" /* sourcelen */ \
82 " .byte 664f-663f\n" /* replacementlen */ \
83 ".previous\n" \
84 ".section .altinstr_replacement,\"ax\"\n" \
85 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
86 ".previous" :: "i" (feature), ##input)
87
88/* Like alternative_input, but with a single output argument */
89#define alternative_io(oldinstr, newinstr, feature, output, input...) \
90 asm volatile ("661:\n\t" oldinstr "\n662:\n" \
91 ".section .altinstructions,\"a\"\n" \
92 " .align 8\n" \
93 " .quad 661b\n" /* label */ \
94 " .quad 663f\n" /* new instruction */ \
95 " .byte %c[feat]\n" /* feature bit */ \
96 " .byte 662b-661b\n" /* sourcelen */ \
97 " .byte 664f-663f\n" /* replacementlen */ \
98 ".previous\n" \
99 ".section .altinstr_replacement,\"ax\"\n" \
100 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
101 ".previous" : output : [feat] "i" (feature), ##input)
102
103/*
104 * Alternative inline assembly for SMP.
105 *
106 * The LOCK_PREFIX macro defined here replaces the LOCK and
107 * LOCK_PREFIX macros used everywhere in the source tree.
108 *
109 * SMP alternatives use the same data structures as the other
110 * alternatives and the X86_FEATURE_UP flag to indicate the case of a
111 * UP system running a SMP kernel. The existing apply_alternatives()
112 * works fine for patching a SMP kernel for UP.
113 *
114 * The SMP alternative tables can be kept after boot and contain both
115 * UP and SMP versions of the instructions to allow switching back to
116 * SMP at runtime, when hotplugging in a new CPU, which is especially
117 * useful in virtualized environments.
118 *
119 * The very common lock prefix is handled as special case in a
120 * separate table which is a pure address list without replacement ptr
121 * and size information. That keeps the table sizes small.
122 */
123
124#ifdef CONFIG_SMP
125#define LOCK_PREFIX \
126 ".section .smp_locks,\"a\"\n" \
127 " .align 8\n" \
128 " .quad 661f\n" /* address */ \
129 ".previous\n" \
130 "661:\n\tlock; "
131
132#else /* ! CONFIG_SMP */
133#define LOCK_PREFIX ""
134#endif
135
136#endif /* _X86_64_ALTERNATIVE_H */
diff --git a/include/asm-x86_64/apic.h b/include/asm-x86_64/apic.h
index bdbd8935612..9c96a0a8d1b 100644
--- a/include/asm-x86_64/apic.h
+++ b/include/asm-x86_64/apic.h
@@ -1,7 +1,6 @@
1#ifndef __ASM_APIC_H 1#ifndef __ASM_APIC_H
2#define __ASM_APIC_H 2#define __ASM_APIC_H
3 3
4#include <linux/config.h>
5#include <linux/pm.h> 4#include <linux/pm.h>
6#include <asm/fixmap.h> 5#include <asm/fixmap.h>
7#include <asm/apicdef.h> 6#include <asm/apicdef.h>
@@ -50,7 +49,8 @@ static __inline unsigned int apic_read(unsigned long reg)
50 49
51static __inline__ void apic_wait_icr_idle(void) 50static __inline__ void apic_wait_icr_idle(void)
52{ 51{
53 while ( apic_read( APIC_ICR ) & APIC_ICR_BUSY ); 52 while (apic_read( APIC_ICR ) & APIC_ICR_BUSY)
53 cpu_relax();
54} 54}
55 55
56static inline void ack_APIC_irq(void) 56static inline void ack_APIC_irq(void)
@@ -80,30 +80,23 @@ extern void init_apic_mappings (void);
80extern void smp_local_timer_interrupt (struct pt_regs * regs); 80extern void smp_local_timer_interrupt (struct pt_regs * regs);
81extern void setup_boot_APIC_clock (void); 81extern void setup_boot_APIC_clock (void);
82extern void setup_secondary_APIC_clock (void); 82extern void setup_secondary_APIC_clock (void);
83extern void setup_apic_nmi_watchdog (void);
84extern int reserve_lapic_nmi(void);
85extern void release_lapic_nmi(void);
86extern void disable_timer_nmi_watchdog(void);
87extern void enable_timer_nmi_watchdog(void);
88extern void nmi_watchdog_tick (struct pt_regs * regs, unsigned reason);
89extern int APIC_init_uniprocessor (void); 83extern int APIC_init_uniprocessor (void);
90extern void disable_APIC_timer(void); 84extern void disable_APIC_timer(void);
91extern void enable_APIC_timer(void); 85extern void enable_APIC_timer(void);
92extern void clustered_apic_check(void); 86extern void clustered_apic_check(void);
93 87
94extern void nmi_watchdog_default(void); 88extern void setup_APIC_extened_lvt(unsigned char lvt_off, unsigned char vector,
95extern int setup_nmi_watchdog(char *); 89 unsigned char msg_type, unsigned char mask);
96 90
97extern unsigned int nmi_watchdog; 91#define K8_APIC_EXT_LVT_BASE 0x500
98#define NMI_DEFAULT -1 92#define K8_APIC_EXT_INT_MSG_FIX 0x0
99#define NMI_NONE 0 93#define K8_APIC_EXT_INT_MSG_SMI 0x2
100#define NMI_IO_APIC 1 94#define K8_APIC_EXT_INT_MSG_NMI 0x4
101#define NMI_LOCAL_APIC 2 95#define K8_APIC_EXT_INT_MSG_EXT 0x7
102#define NMI_INVALID 3 96#define K8_APIC_EXT_LVT_ENTRY_THRESHOLD 0
103 97
104extern int disable_timer_pin_1; 98extern int disable_timer_pin_1;
105 99
106extern void setup_threshold_lvt(unsigned long lvt_off);
107 100
108void smp_send_timer_broadcast_ipi(void); 101void smp_send_timer_broadcast_ipi(void);
109void switch_APIC_timer_to_ipi(void *cpumask); 102void switch_APIC_timer_to_ipi(void *cpumask);
diff --git a/include/asm-x86_64/apicdef.h b/include/asm-x86_64/apicdef.h
index 5a48e9bcf21..1dd40067c67 100644
--- a/include/asm-x86_64/apicdef.h
+++ b/include/asm-x86_64/apicdef.h
@@ -137,8 +137,6 @@
137 */ 137 */
138#define u32 unsigned int 138#define u32 unsigned int
139 139
140#define lapic ((volatile struct local_apic *)APIC_BASE)
141
142struct local_apic { 140struct local_apic {
143 141
144/*000*/ struct { u32 __reserved[4]; } __reserved_01; 142/*000*/ struct { u32 __reserved[4]; } __reserved_01;
diff --git a/include/asm-x86_64/atomic.h b/include/asm-x86_64/atomic.h
index cecbf7baa6a..007e88d6d43 100644
--- a/include/asm-x86_64/atomic.h
+++ b/include/asm-x86_64/atomic.h
@@ -1,8 +1,7 @@
1#ifndef __ARCH_X86_64_ATOMIC__ 1#ifndef __ARCH_X86_64_ATOMIC__
2#define __ARCH_X86_64_ATOMIC__ 2#define __ARCH_X86_64_ATOMIC__
3 3
4#include <linux/config.h> 4#include <asm/alternative.h>
5#include <asm/types.h>
6 5
7/* atomic_t should be 32 bit signed type */ 6/* atomic_t should be 32 bit signed type */
8 7
@@ -53,7 +52,7 @@ typedef struct { volatile int counter; } atomic_t;
53static __inline__ void atomic_add(int i, atomic_t *v) 52static __inline__ void atomic_add(int i, atomic_t *v)
54{ 53{
55 __asm__ __volatile__( 54 __asm__ __volatile__(
56 LOCK "addl %1,%0" 55 LOCK_PREFIX "addl %1,%0"
57 :"=m" (v->counter) 56 :"=m" (v->counter)
58 :"ir" (i), "m" (v->counter)); 57 :"ir" (i), "m" (v->counter));
59} 58}
@@ -68,7 +67,7 @@ static __inline__ void atomic_add(int i, atomic_t *v)
68static __inline__ void atomic_sub(int i, atomic_t *v) 67static __inline__ void atomic_sub(int i, atomic_t *v)
69{ 68{
70 __asm__ __volatile__( 69 __asm__ __volatile__(
71 LOCK "subl %1,%0" 70 LOCK_PREFIX "subl %1,%0"
72 :"=m" (v->counter) 71 :"=m" (v->counter)
73 :"ir" (i), "m" (v->counter)); 72 :"ir" (i), "m" (v->counter));
74} 73}
@@ -87,7 +86,7 @@ static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
87 unsigned char c; 86 unsigned char c;
88 87
89 __asm__ __volatile__( 88 __asm__ __volatile__(
90 LOCK "subl %2,%0; sete %1" 89 LOCK_PREFIX "subl %2,%0; sete %1"
91 :"=m" (v->counter), "=qm" (c) 90 :"=m" (v->counter), "=qm" (c)
92 :"ir" (i), "m" (v->counter) : "memory"); 91 :"ir" (i), "m" (v->counter) : "memory");
93 return c; 92 return c;
@@ -102,7 +101,7 @@ static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
102static __inline__ void atomic_inc(atomic_t *v) 101static __inline__ void atomic_inc(atomic_t *v)
103{ 102{
104 __asm__ __volatile__( 103 __asm__ __volatile__(
105 LOCK "incl %0" 104 LOCK_PREFIX "incl %0"
106 :"=m" (v->counter) 105 :"=m" (v->counter)
107 :"m" (v->counter)); 106 :"m" (v->counter));
108} 107}
@@ -116,7 +115,7 @@ static __inline__ void atomic_inc(atomic_t *v)
116static __inline__ void atomic_dec(atomic_t *v) 115static __inline__ void atomic_dec(atomic_t *v)
117{ 116{
118 __asm__ __volatile__( 117 __asm__ __volatile__(
119 LOCK "decl %0" 118 LOCK_PREFIX "decl %0"
120 :"=m" (v->counter) 119 :"=m" (v->counter)
121 :"m" (v->counter)); 120 :"m" (v->counter));
122} 121}
@@ -134,7 +133,7 @@ static __inline__ int atomic_dec_and_test(atomic_t *v)
134 unsigned char c; 133 unsigned char c;
135 134
136 __asm__ __volatile__( 135 __asm__ __volatile__(
137 LOCK "decl %0; sete %1" 136 LOCK_PREFIX "decl %0; sete %1"
138 :"=m" (v->counter), "=qm" (c) 137 :"=m" (v->counter), "=qm" (c)
139 :"m" (v->counter) : "memory"); 138 :"m" (v->counter) : "memory");
140 return c != 0; 139 return c != 0;
@@ -153,7 +152,7 @@ static __inline__ int atomic_inc_and_test(atomic_t *v)
153 unsigned char c; 152 unsigned char c;
154 153
155 __asm__ __volatile__( 154 __asm__ __volatile__(
156 LOCK "incl %0; sete %1" 155 LOCK_PREFIX "incl %0; sete %1"
157 :"=m" (v->counter), "=qm" (c) 156 :"=m" (v->counter), "=qm" (c)
158 :"m" (v->counter) : "memory"); 157 :"m" (v->counter) : "memory");
159 return c != 0; 158 return c != 0;
@@ -173,7 +172,7 @@ static __inline__ int atomic_add_negative(int i, atomic_t *v)
173 unsigned char c; 172 unsigned char c;
174 173
175 __asm__ __volatile__( 174 __asm__ __volatile__(
176 LOCK "addl %2,%0; sets %1" 175 LOCK_PREFIX "addl %2,%0; sets %1"
177 :"=m" (v->counter), "=qm" (c) 176 :"=m" (v->counter), "=qm" (c)
178 :"ir" (i), "m" (v->counter) : "memory"); 177 :"ir" (i), "m" (v->counter) : "memory");
179 return c; 178 return c;
@@ -190,7 +189,7 @@ static __inline__ int atomic_add_return(int i, atomic_t *v)
190{ 189{
191 int __i = i; 190 int __i = i;
192 __asm__ __volatile__( 191 __asm__ __volatile__(
193 LOCK "xaddl %0, %1;" 192 LOCK_PREFIX "xaddl %0, %1;"
194 :"=r"(i) 193 :"=r"(i)
195 :"m"(v->counter), "0"(i)); 194 :"m"(v->counter), "0"(i));
196 return i + __i; 195 return i + __i;
@@ -238,7 +237,7 @@ typedef struct { volatile long counter; } atomic64_t;
238static __inline__ void atomic64_add(long i, atomic64_t *v) 237static __inline__ void atomic64_add(long i, atomic64_t *v)
239{ 238{
240 __asm__ __volatile__( 239 __asm__ __volatile__(
241 LOCK "addq %1,%0" 240 LOCK_PREFIX "addq %1,%0"
242 :"=m" (v->counter) 241 :"=m" (v->counter)
243 :"ir" (i), "m" (v->counter)); 242 :"ir" (i), "m" (v->counter));
244} 243}
@@ -253,7 +252,7 @@ static __inline__ void atomic64_add(long i, atomic64_t *v)
253static __inline__ void atomic64_sub(long i, atomic64_t *v) 252static __inline__ void atomic64_sub(long i, atomic64_t *v)
254{ 253{
255 __asm__ __volatile__( 254 __asm__ __volatile__(
256 LOCK "subq %1,%0" 255 LOCK_PREFIX "subq %1,%0"
257 :"=m" (v->counter) 256 :"=m" (v->counter)
258 :"ir" (i), "m" (v->counter)); 257 :"ir" (i), "m" (v->counter));
259} 258}
@@ -272,7 +271,7 @@ static __inline__ int atomic64_sub_and_test(long i, atomic64_t *v)
272 unsigned char c; 271 unsigned char c;
273 272
274 __asm__ __volatile__( 273 __asm__ __volatile__(
275 LOCK "subq %2,%0; sete %1" 274 LOCK_PREFIX "subq %2,%0; sete %1"
276 :"=m" (v->counter), "=qm" (c) 275 :"=m" (v->counter), "=qm" (c)
277 :"ir" (i), "m" (v->counter) : "memory"); 276 :"ir" (i), "m" (v->counter) : "memory");
278 return c; 277 return c;
@@ -287,7 +286,7 @@ static __inline__ int atomic64_sub_and_test(long i, atomic64_t *v)
287static __inline__ void atomic64_inc(atomic64_t *v) 286static __inline__ void atomic64_inc(atomic64_t *v)
288{ 287{
289 __asm__ __volatile__( 288 __asm__ __volatile__(
290 LOCK "incq %0" 289 LOCK_PREFIX "incq %0"
291 :"=m" (v->counter) 290 :"=m" (v->counter)
292 :"m" (v->counter)); 291 :"m" (v->counter));
293} 292}
@@ -301,7 +300,7 @@ static __inline__ void atomic64_inc(atomic64_t *v)
301static __inline__ void atomic64_dec(atomic64_t *v) 300static __inline__ void atomic64_dec(atomic64_t *v)
302{ 301{
303 __asm__ __volatile__( 302 __asm__ __volatile__(
304 LOCK "decq %0" 303 LOCK_PREFIX "decq %0"
305 :"=m" (v->counter) 304 :"=m" (v->counter)
306 :"m" (v->counter)); 305 :"m" (v->counter));
307} 306}
@@ -319,7 +318,7 @@ static __inline__ int atomic64_dec_and_test(atomic64_t *v)
319 unsigned char c; 318 unsigned char c;
320 319
321 __asm__ __volatile__( 320 __asm__ __volatile__(
322 LOCK "decq %0; sete %1" 321 LOCK_PREFIX "decq %0; sete %1"
323 :"=m" (v->counter), "=qm" (c) 322 :"=m" (v->counter), "=qm" (c)
324 :"m" (v->counter) : "memory"); 323 :"m" (v->counter) : "memory");
325 return c != 0; 324 return c != 0;
@@ -338,7 +337,7 @@ static __inline__ int atomic64_inc_and_test(atomic64_t *v)
338 unsigned char c; 337 unsigned char c;
339 338
340 __asm__ __volatile__( 339 __asm__ __volatile__(
341 LOCK "incq %0; sete %1" 340 LOCK_PREFIX "incq %0; sete %1"
342 :"=m" (v->counter), "=qm" (c) 341 :"=m" (v->counter), "=qm" (c)
343 :"m" (v->counter) : "memory"); 342 :"m" (v->counter) : "memory");
344 return c != 0; 343 return c != 0;
@@ -358,7 +357,7 @@ static __inline__ int atomic64_add_negative(long i, atomic64_t *v)
358 unsigned char c; 357 unsigned char c;
359 358
360 __asm__ __volatile__( 359 __asm__ __volatile__(
361 LOCK "addq %2,%0; sets %1" 360 LOCK_PREFIX "addq %2,%0; sets %1"
362 :"=m" (v->counter), "=qm" (c) 361 :"=m" (v->counter), "=qm" (c)
363 :"ir" (i), "m" (v->counter) : "memory"); 362 :"ir" (i), "m" (v->counter) : "memory");
364 return c; 363 return c;
@@ -375,7 +374,7 @@ static __inline__ long atomic64_add_return(long i, atomic64_t *v)
375{ 374{
376 long __i = i; 375 long __i = i;
377 __asm__ __volatile__( 376 __asm__ __volatile__(
378 LOCK "xaddq %0, %1;" 377 LOCK_PREFIX "xaddq %0, %1;"
379 :"=r"(i) 378 :"=r"(i)
380 :"m"(v->counter), "0"(i)); 379 :"m"(v->counter), "0"(i));
381 return i + __i; 380 return i + __i;
@@ -419,11 +418,11 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t *v)
419 418
420/* These are x86-specific, used by some header files */ 419/* These are x86-specific, used by some header files */
421#define atomic_clear_mask(mask, addr) \ 420#define atomic_clear_mask(mask, addr) \
422__asm__ __volatile__(LOCK "andl %0,%1" \ 421__asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \
423: : "r" (~(mask)),"m" (*addr) : "memory") 422: : "r" (~(mask)),"m" (*addr) : "memory")
424 423
425#define atomic_set_mask(mask, addr) \ 424#define atomic_set_mask(mask, addr) \
426__asm__ __volatile__(LOCK "orl %0,%1" \ 425__asm__ __volatile__(LOCK_PREFIX "orl %0,%1" \
427: : "r" ((unsigned)mask),"m" (*(addr)) : "memory") 426: : "r" ((unsigned)mask),"m" (*(addr)) : "memory")
428 427
429/* Atomic operations are already serializing on x86 */ 428/* Atomic operations are already serializing on x86 */
diff --git a/include/asm-x86_64/bitops.h b/include/asm-x86_64/bitops.h
index 79212128d0f..f7ba57b1cc0 100644
--- a/include/asm-x86_64/bitops.h
+++ b/include/asm-x86_64/bitops.h
@@ -5,13 +5,7 @@
5 * Copyright 1992, Linus Torvalds. 5 * Copyright 1992, Linus Torvalds.
6 */ 6 */
7 7
8#include <linux/config.h> 8#include <asm/alternative.h>
9
10#ifdef CONFIG_SMP
11#define LOCK_PREFIX "lock ; "
12#else
13#define LOCK_PREFIX ""
14#endif
15 9
16#define ADDR (*(volatile long *) addr) 10#define ADDR (*(volatile long *) addr)
17 11
diff --git a/include/asm-x86_64/bugs.h b/include/asm-x86_64/bugs.h
index 59bc68925d0..d86c5dd689f 100644
--- a/include/asm-x86_64/bugs.h
+++ b/include/asm-x86_64/bugs.h
@@ -10,7 +10,6 @@
10 * void check_bugs(void); 10 * void check_bugs(void);
11 */ 11 */
12 12
13#include <linux/config.h>
14#include <asm/processor.h> 13#include <asm/processor.h>
15#include <asm/i387.h> 14#include <asm/i387.h>
16#include <asm/msr.h> 15#include <asm/msr.h>
diff --git a/include/asm-x86_64/cache.h b/include/asm-x86_64/cache.h
index f8dff1c6753..ed8a9d25272 100644
--- a/include/asm-x86_64/cache.h
+++ b/include/asm-x86_64/cache.h
@@ -4,7 +4,6 @@
4#ifndef __ARCH_X8664_CACHE_H 4#ifndef __ARCH_X8664_CACHE_H
5#define __ARCH_X8664_CACHE_H 5#define __ARCH_X8664_CACHE_H
6 6
7#include <linux/config.h>
8 7
9/* L1 cache line size */ 8/* L1 cache line size */
10#define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT) 9#define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
diff --git a/include/asm-x86_64/calgary.h b/include/asm-x86_64/calgary.h
new file mode 100644
index 00000000000..4e391952424
--- /dev/null
+++ b/include/asm-x86_64/calgary.h
@@ -0,0 +1,63 @@
1/*
2 * Derived from include/asm-powerpc/iommu.h
3 *
4 * Copyright (C) IBM Corporation, 2006
5 *
6 * Author: Jon Mason <jdmason@us.ibm.com>
7 * Author: Muli Ben-Yehuda <muli@il.ibm.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 */
23
24#ifndef _ASM_X86_64_CALGARY_H
25#define _ASM_X86_64_CALGARY_H
26
27#include <linux/config.h>
28#include <linux/spinlock.h>
29#include <linux/device.h>
30#include <linux/dma-mapping.h>
31#include <asm/types.h>
32
33struct iommu_table {
34 unsigned long it_base; /* mapped address of tce table */
35 unsigned long it_hint; /* Hint for next alloc */
36 unsigned long *it_map; /* A simple allocation bitmap for now */
37 spinlock_t it_lock; /* Protects it_map */
38 unsigned int it_size; /* Size of iommu table in entries */
39 unsigned char it_busno; /* Bus number this table belongs to */
40 void __iomem *bbar;
41 u64 tar_val;
42 struct timer_list watchdog_timer;
43};
44
45#define TCE_TABLE_SIZE_UNSPECIFIED ~0
46#define TCE_TABLE_SIZE_64K 0
47#define TCE_TABLE_SIZE_128K 1
48#define TCE_TABLE_SIZE_256K 2
49#define TCE_TABLE_SIZE_512K 3
50#define TCE_TABLE_SIZE_1M 4
51#define TCE_TABLE_SIZE_2M 5
52#define TCE_TABLE_SIZE_4M 6
53#define TCE_TABLE_SIZE_8M 7
54
55#ifdef CONFIG_CALGARY_IOMMU
56extern int calgary_iommu_init(void);
57extern void detect_calgary(void);
58#else
59static inline int calgary_iommu_init(void) { return 1; }
60static inline void detect_calgary(void) { return; }
61#endif
62
63#endif /* _ASM_X86_64_CALGARY_H */
diff --git a/include/asm-x86_64/calling.h b/include/asm-x86_64/calling.h
index fc2c5a6c262..6f4f63af96e 100644
--- a/include/asm-x86_64/calling.h
+++ b/include/asm-x86_64/calling.h
@@ -2,7 +2,6 @@
2 * Some macros to handle stack frames in assembly. 2 * Some macros to handle stack frames in assembly.
3 */ 3 */
4 4
5#include <linux/config.h>
6 5
7#define R15 0 6#define R15 0
8#define R14 8 7#define R14 8
diff --git a/include/asm-x86_64/cpufeature.h b/include/asm-x86_64/cpufeature.h
index 662964b74e3..ee792faaca0 100644
--- a/include/asm-x86_64/cpufeature.h
+++ b/include/asm-x86_64/cpufeature.h
@@ -46,6 +46,7 @@
46#define X86_FEATURE_SYSCALL (1*32+11) /* SYSCALL/SYSRET */ 46#define X86_FEATURE_SYSCALL (1*32+11) /* SYSCALL/SYSRET */
47#define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */ 47#define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */
48#define X86_FEATURE_FXSR_OPT (1*32+25) /* FXSR optimizations */ 48#define X86_FEATURE_FXSR_OPT (1*32+25) /* FXSR optimizations */
49#define X86_FEATURE_RDTSCP (1*32+27) /* RDTSCP */
49#define X86_FEATURE_LM (1*32+29) /* Long Mode (x86-64) */ 50#define X86_FEATURE_LM (1*32+29) /* Long Mode (x86-64) */
50#define X86_FEATURE_3DNOWEXT (1*32+30) /* AMD 3DNow! extensions */ 51#define X86_FEATURE_3DNOWEXT (1*32+30) /* AMD 3DNow! extensions */
51#define X86_FEATURE_3DNOW (1*32+31) /* 3DNow! */ 52#define X86_FEATURE_3DNOW (1*32+31) /* 3DNow! */
@@ -65,6 +66,8 @@
65#define X86_FEATURE_CONSTANT_TSC (3*32+5) /* TSC runs at constant rate */ 66#define X86_FEATURE_CONSTANT_TSC (3*32+5) /* TSC runs at constant rate */
66#define X86_FEATURE_SYNC_RDTSC (3*32+6) /* RDTSC syncs CPU core */ 67#define X86_FEATURE_SYNC_RDTSC (3*32+6) /* RDTSC syncs CPU core */
67#define X86_FEATURE_FXSAVE_LEAK (3*32+7) /* FIP/FOP/FDP leaks through FXSAVE */ 68#define X86_FEATURE_FXSAVE_LEAK (3*32+7) /* FIP/FOP/FDP leaks through FXSAVE */
69#define X86_FEATURE_UP (3*32+8) /* SMP kernel running on UP */
70#define X86_FEATURE_ARCH_PERFMON (3*32+9) /* Intel Architectural PerfMon */
68 71
69/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ 72/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
70#define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */ 73#define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */
diff --git a/include/asm-x86_64/dma-mapping.h b/include/asm-x86_64/dma-mapping.h
index 49a81a66516..b6da83dcc7a 100644
--- a/include/asm-x86_64/dma-mapping.h
+++ b/include/asm-x86_64/dma-mapping.h
@@ -6,7 +6,6 @@
6 * documentation. 6 * documentation.
7 */ 7 */
8 8
9#include <linux/config.h>
10 9
11#include <asm/scatterlist.h> 10#include <asm/scatterlist.h>
12#include <asm/io.h> 11#include <asm/io.h>
@@ -56,6 +55,13 @@ extern dma_addr_t bad_dma_address;
56extern struct dma_mapping_ops* dma_ops; 55extern struct dma_mapping_ops* dma_ops;
57extern int iommu_merge; 56extern int iommu_merge;
58 57
58static inline int valid_dma_direction(int dma_direction)
59{
60 return ((dma_direction == DMA_BIDIRECTIONAL) ||
61 (dma_direction == DMA_TO_DEVICE) ||
62 (dma_direction == DMA_FROM_DEVICE));
63}
64
59static inline int dma_mapping_error(dma_addr_t dma_addr) 65static inline int dma_mapping_error(dma_addr_t dma_addr)
60{ 66{
61 if (dma_ops->mapping_error) 67 if (dma_ops->mapping_error)
@@ -73,6 +79,7 @@ static inline dma_addr_t
73dma_map_single(struct device *hwdev, void *ptr, size_t size, 79dma_map_single(struct device *hwdev, void *ptr, size_t size,
74 int direction) 80 int direction)
75{ 81{
82 BUG_ON(!valid_dma_direction(direction));
76 return dma_ops->map_single(hwdev, ptr, size, direction); 83 return dma_ops->map_single(hwdev, ptr, size, direction);
77} 84}
78 85
@@ -80,6 +87,7 @@ static inline void
80dma_unmap_single(struct device *dev, dma_addr_t addr,size_t size, 87dma_unmap_single(struct device *dev, dma_addr_t addr,size_t size,
81 int direction) 88 int direction)
82{ 89{
90 BUG_ON(!valid_dma_direction(direction));
83 dma_ops->unmap_single(dev, addr, size, direction); 91 dma_ops->unmap_single(dev, addr, size, direction);
84} 92}
85 93
@@ -92,6 +100,7 @@ static inline void
92dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle, 100dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
93 size_t size, int direction) 101 size_t size, int direction)
94{ 102{
103 BUG_ON(!valid_dma_direction(direction));
95 if (dma_ops->sync_single_for_cpu) 104 if (dma_ops->sync_single_for_cpu)
96 dma_ops->sync_single_for_cpu(hwdev, dma_handle, size, 105 dma_ops->sync_single_for_cpu(hwdev, dma_handle, size,
97 direction); 106 direction);
@@ -102,6 +111,7 @@ static inline void
102dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle, 111dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
103 size_t size, int direction) 112 size_t size, int direction)
104{ 113{
114 BUG_ON(!valid_dma_direction(direction));
105 if (dma_ops->sync_single_for_device) 115 if (dma_ops->sync_single_for_device)
106 dma_ops->sync_single_for_device(hwdev, dma_handle, size, 116 dma_ops->sync_single_for_device(hwdev, dma_handle, size,
107 direction); 117 direction);
@@ -112,6 +122,7 @@ static inline void
112dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle, 122dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
113 unsigned long offset, size_t size, int direction) 123 unsigned long offset, size_t size, int direction)
114{ 124{
125 BUG_ON(!valid_dma_direction(direction));
115 if (dma_ops->sync_single_range_for_cpu) { 126 if (dma_ops->sync_single_range_for_cpu) {
116 dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, size, direction); 127 dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, size, direction);
117 } 128 }
@@ -123,6 +134,7 @@ static inline void
123dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle, 134dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
124 unsigned long offset, size_t size, int direction) 135 unsigned long offset, size_t size, int direction)
125{ 136{
137 BUG_ON(!valid_dma_direction(direction));
126 if (dma_ops->sync_single_range_for_device) 138 if (dma_ops->sync_single_range_for_device)
127 dma_ops->sync_single_range_for_device(hwdev, dma_handle, 139 dma_ops->sync_single_range_for_device(hwdev, dma_handle,
128 offset, size, direction); 140 offset, size, direction);
@@ -134,6 +146,7 @@ static inline void
134dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, 146dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
135 int nelems, int direction) 147 int nelems, int direction)
136{ 148{
149 BUG_ON(!valid_dma_direction(direction));
137 if (dma_ops->sync_sg_for_cpu) 150 if (dma_ops->sync_sg_for_cpu)
138 dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction); 151 dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
139 flush_write_buffers(); 152 flush_write_buffers();
@@ -143,6 +156,7 @@ static inline void
143dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, 156dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
144 int nelems, int direction) 157 int nelems, int direction)
145{ 158{
159 BUG_ON(!valid_dma_direction(direction));
146 if (dma_ops->sync_sg_for_device) { 160 if (dma_ops->sync_sg_for_device) {
147 dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction); 161 dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction);
148 } 162 }
@@ -153,6 +167,7 @@ dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
153static inline int 167static inline int
154dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, int direction) 168dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, int direction)
155{ 169{
170 BUG_ON(!valid_dma_direction(direction));
156 return dma_ops->map_sg(hwdev, sg, nents, direction); 171 return dma_ops->map_sg(hwdev, sg, nents, direction);
157} 172}
158 173
@@ -160,6 +175,7 @@ static inline void
160dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, 175dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
161 int direction) 176 int direction)
162{ 177{
178 BUG_ON(!valid_dma_direction(direction));
163 dma_ops->unmap_sg(hwdev, sg, nents, direction); 179 dma_ops->unmap_sg(hwdev, sg, nents, direction);
164} 180}
165 181
diff --git a/include/asm-x86_64/dma.h b/include/asm-x86_64/dma.h
index 6f2a817b6a7..a37c16f0628 100644
--- a/include/asm-x86_64/dma.h
+++ b/include/asm-x86_64/dma.h
@@ -1,4 +1,4 @@
1/* $Id: dma.h,v 1.1.1.1 2001/04/19 20:00:38 ak Exp $ 1/*
2 * linux/include/asm/dma.h: Defines for using and allocating dma channels. 2 * linux/include/asm/dma.h: Defines for using and allocating dma channels.
3 * Written by Hennus Bergman, 1992. 3 * Written by Hennus Bergman, 1992.
4 * High DMA channel support & info by Hannu Savolainen 4 * High DMA channel support & info by Hannu Savolainen
@@ -8,7 +8,6 @@
8#ifndef _ASM_DMA_H 8#ifndef _ASM_DMA_H
9#define _ASM_DMA_H 9#define _ASM_DMA_H
10 10
11#include <linux/config.h>
12#include <linux/spinlock.h> /* And spinlocks */ 11#include <linux/spinlock.h> /* And spinlocks */
13#include <asm/io.h> /* need byte IO */ 12#include <asm/io.h> /* need byte IO */
14#include <linux/delay.h> 13#include <linux/delay.h>
diff --git a/include/asm-x86_64/dwarf2.h b/include/asm-x86_64/dwarf2.h
index 07654bd155b..0744db77767 100644
--- a/include/asm-x86_64/dwarf2.h
+++ b/include/asm-x86_64/dwarf2.h
@@ -1,7 +1,6 @@
1#ifndef _DWARF2_H 1#ifndef _DWARF2_H
2#define _DWARF2_H 1 2#define _DWARF2_H 1
3 3
4#include <linux/config.h>
5 4
6#ifndef __ASSEMBLY__ 5#ifndef __ASSEMBLY__
7#warning "asm/dwarf2.h should be only included in pure assembly files" 6#warning "asm/dwarf2.h should be only included in pure assembly files"
diff --git a/include/asm-x86_64/elf.h b/include/asm-x86_64/elf.h
index b4f8f4a41a6..a406fcb1e92 100644
--- a/include/asm-x86_64/elf.h
+++ b/include/asm-x86_64/elf.h
@@ -7,8 +7,6 @@
7 7
8#include <asm/ptrace.h> 8#include <asm/ptrace.h>
9#include <asm/user.h> 9#include <asm/user.h>
10#include <asm/processor.h>
11#include <asm/compat.h>
12 10
13/* x86-64 relocation types */ 11/* x86-64 relocation types */
14#define R_X86_64_NONE 0 /* No reloc */ 12#define R_X86_64_NONE 0 /* No reloc */
@@ -39,18 +37,23 @@ typedef elf_greg_t elf_gregset_t[ELF_NGREG];
39typedef struct user_i387_struct elf_fpregset_t; 37typedef struct user_i387_struct elf_fpregset_t;
40 38
41/* 39/*
42 * This is used to ensure we don't load something for the wrong architecture.
43 */
44#define elf_check_arch(x) \
45 ((x)->e_machine == EM_X86_64)
46
47/*
48 * These are used to set parameters in the core dumps. 40 * These are used to set parameters in the core dumps.
49 */ 41 */
50#define ELF_CLASS ELFCLASS64 42#define ELF_CLASS ELFCLASS64
51#define ELF_DATA ELFDATA2LSB 43#define ELF_DATA ELFDATA2LSB
52#define ELF_ARCH EM_X86_64 44#define ELF_ARCH EM_X86_64
53 45
46#ifdef __KERNEL__
47#include <asm/processor.h>
48#include <asm/compat.h>
49
50/*
51 * This is used to ensure we don't load something for the wrong architecture.
52 */
53#define elf_check_arch(x) \
54 ((x)->e_machine == EM_X86_64)
55
56
54/* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program starts %edx 57/* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program starts %edx
55 contains a pointer to a function which might be registered using `atexit'. 58 contains a pointer to a function which might be registered using `atexit'.
56 This provides a mean for the dynamic linker to call DT_FINI functions for 59 This provides a mean for the dynamic linker to call DT_FINI functions for
@@ -141,7 +144,6 @@ typedef struct user_i387_struct elf_fpregset_t;
141/* I'm not sure if we can use '-' here */ 144/* I'm not sure if we can use '-' here */
142#define ELF_PLATFORM ("x86_64") 145#define ELF_PLATFORM ("x86_64")
143 146
144#ifdef __KERNEL__
145extern void set_personality_64bit(void); 147extern void set_personality_64bit(void);
146#define SET_PERSONALITY(ex, ibcs2) set_personality_64bit() 148#define SET_PERSONALITY(ex, ibcs2) set_personality_64bit()
147/* 149/*
diff --git a/include/asm-x86_64/fixmap.h b/include/asm-x86_64/fixmap.h
index 7b286bd21d1..0b4ffbd1a12 100644
--- a/include/asm-x86_64/fixmap.h
+++ b/include/asm-x86_64/fixmap.h
@@ -11,7 +11,6 @@
11#ifndef _ASM_FIXMAP_H 11#ifndef _ASM_FIXMAP_H
12#define _ASM_FIXMAP_H 12#define _ASM_FIXMAP_H
13 13
14#include <linux/config.h>
15#include <linux/kernel.h> 14#include <linux/kernel.h>
16#include <asm/apicdef.h> 15#include <asm/apicdef.h>
17#include <asm/page.h> 16#include <asm/page.h>
diff --git a/include/asm-x86_64/floppy.h b/include/asm-x86_64/floppy.h
index 52825ce689f..32ff5d13271 100644
--- a/include/asm-x86_64/floppy.h
+++ b/include/asm-x86_64/floppy.h
@@ -144,13 +144,11 @@ static int vdma_get_dma_residue(unsigned int dummy)
144static int fd_request_irq(void) 144static int fd_request_irq(void)
145{ 145{
146 if(can_use_virtual_dma) 146 if(can_use_virtual_dma)
147 return request_irq(FLOPPY_IRQ, floppy_hardint,SA_INTERRUPT, 147 return request_irq(FLOPPY_IRQ, floppy_hardint,
148 "floppy", NULL); 148 IRQF_DISABLED, "floppy", NULL);
149 else 149 else
150 return request_irq(FLOPPY_IRQ, floppy_interrupt, 150 return request_irq(FLOPPY_IRQ, floppy_interrupt,
151 SA_INTERRUPT|SA_SAMPLE_RANDOM, 151 IRQF_DISABLED, "floppy", NULL);
152 "floppy", NULL);
153
154} 152}
155 153
156static unsigned long dma_mem_alloc(unsigned long size) 154static unsigned long dma_mem_alloc(unsigned long size)
diff --git a/include/asm-x86_64/gart-mapping.h b/include/asm-x86_64/gart-mapping.h
deleted file mode 100644
index ada497b0b55..00000000000
--- a/include/asm-x86_64/gart-mapping.h
+++ /dev/null
@@ -1,16 +0,0 @@
1#ifndef _X8664_GART_MAPPING_H
2#define _X8664_GART_MAPPING_H 1
3
4#include <linux/types.h>
5#include <asm/types.h>
6
7struct device;
8
9extern void*
10gart_alloc_coherent(struct device *dev, size_t size,
11 dma_addr_t *dma_handle, gfp_t gfp);
12
13extern int
14gart_dma_supported(struct device *hwdev, u64 mask);
15
16#endif /* _X8664_GART_MAPPING_H */
diff --git a/include/asm-x86_64/hardirq.h b/include/asm-x86_64/hardirq.h
index 8689951e350..64a65ce2f41 100644
--- a/include/asm-x86_64/hardirq.h
+++ b/include/asm-x86_64/hardirq.h
@@ -1,7 +1,6 @@
1#ifndef __ASM_HARDIRQ_H 1#ifndef __ASM_HARDIRQ_H
2#define __ASM_HARDIRQ_H 2#define __ASM_HARDIRQ_H
3 3
4#include <linux/config.h>
5#include <linux/threads.h> 4#include <linux/threads.h>
6#include <linux/irq.h> 5#include <linux/irq.h>
7#include <asm/pda.h> 6#include <asm/pda.h>
diff --git a/include/asm-x86_64/hpet.h b/include/asm-x86_64/hpet.h
index 18ff7ee9e77..b39098408b6 100644
--- a/include/asm-x86_64/hpet.h
+++ b/include/asm-x86_64/hpet.h
@@ -55,7 +55,7 @@
55 55
56extern int is_hpet_enabled(void); 56extern int is_hpet_enabled(void);
57extern int hpet_rtc_timer_init(void); 57extern int hpet_rtc_timer_init(void);
58extern int oem_force_hpet_timer(void); 58extern int apic_is_clustered_box(void);
59 59
60extern int hpet_use_timer; 60extern int hpet_use_timer;
61 61
diff --git a/include/asm-x86_64/hw_irq.h b/include/asm-x86_64/hw_irq.h
index 0df1715dee7..48a4a5364e8 100644
--- a/include/asm-x86_64/hw_irq.h
+++ b/include/asm-x86_64/hw_irq.h
@@ -12,12 +12,9 @@
12 * <tomsoft@informatik.tu-chemnitz.de> 12 * <tomsoft@informatik.tu-chemnitz.de>
13 * 13 *
14 * hacked by Andi Kleen for x86-64. 14 * hacked by Andi Kleen for x86-64.
15 *
16 * $Id: hw_irq.h,v 1.24 2001/09/14 20:55:03 vojtech Exp $
17 */ 15 */
18 16
19#ifndef __ASSEMBLY__ 17#ifndef __ASSEMBLY__
20#include <linux/config.h>
21#include <asm/atomic.h> 18#include <asm/atomic.h>
22#include <asm/irq.h> 19#include <asm/irq.h>
23#include <linux/profile.h> 20#include <linux/profile.h>
@@ -127,18 +124,9 @@ asmlinkage void IRQ_NAME(nr); \
127__asm__( \ 124__asm__( \
128"\n.p2align\n" \ 125"\n.p2align\n" \
129"IRQ" #nr "_interrupt:\n\t" \ 126"IRQ" #nr "_interrupt:\n\t" \
130 "push $" #nr "-256 ; " \ 127 "push $~(" #nr ") ; " \
131 "jmp common_interrupt"); 128 "jmp common_interrupt");
132 129
133#if defined(CONFIG_X86_IO_APIC)
134static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) {
135 if (IO_APIC_IRQ(i))
136 send_IPI_self(IO_APIC_VECTOR(i));
137}
138#else
139static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) {}
140#endif
141
142#define platform_legacy_irq(irq) ((irq) < 16) 130#define platform_legacy_irq(irq) ((irq) < 16)
143 131
144#endif 132#endif
diff --git a/include/asm-x86_64/ia32.h b/include/asm-x86_64/ia32.h
index e6b7f2234e4..0190b7c4e31 100644
--- a/include/asm-x86_64/ia32.h
+++ b/include/asm-x86_64/ia32.h
@@ -1,7 +1,6 @@
1#ifndef _ASM_X86_64_IA32_H 1#ifndef _ASM_X86_64_IA32_H
2#define _ASM_X86_64_IA32_H 2#define _ASM_X86_64_IA32_H
3 3
4#include <linux/config.h>
5 4
6#ifdef CONFIG_IA32_EMULATION 5#ifdef CONFIG_IA32_EMULATION
7 6
diff --git a/include/asm-x86_64/ia32_unistd.h b/include/asm-x86_64/ia32_unistd.h
index b4f4b172b15..5b52ce50733 100644
--- a/include/asm-x86_64/ia32_unistd.h
+++ b/include/asm-x86_64/ia32_unistd.h
@@ -4,317 +4,15 @@
4/* 4/*
5 * This file contains the system call numbers of the ia32 port, 5 * This file contains the system call numbers of the ia32 port,
6 * this is for the kernel only. 6 * this is for the kernel only.
7 * Only add syscalls here where some part of the kernel needs to know
8 * the number. This should be otherwise in sync with asm-i386/unistd.h. -AK
7 */ 9 */
8 10
9#define __NR_ia32_restart_syscall 0 11#define __NR_ia32_restart_syscall 0
10#define __NR_ia32_exit 1 12#define __NR_ia32_exit 1
11#define __NR_ia32_fork 2
12#define __NR_ia32_read 3 13#define __NR_ia32_read 3
13#define __NR_ia32_write 4 14#define __NR_ia32_write 4
14#define __NR_ia32_open 5 15#define __NR_ia32_sigreturn 119
15#define __NR_ia32_close 6
16#define __NR_ia32_waitpid 7
17#define __NR_ia32_creat 8
18#define __NR_ia32_link 9
19#define __NR_ia32_unlink 10
20#define __NR_ia32_execve 11
21#define __NR_ia32_chdir 12
22#define __NR_ia32_time 13
23#define __NR_ia32_mknod 14
24#define __NR_ia32_chmod 15
25#define __NR_ia32_lchown 16
26#define __NR_ia32_break 17
27#define __NR_ia32_oldstat 18
28#define __NR_ia32_lseek 19
29#define __NR_ia32_getpid 20
30#define __NR_ia32_mount 21
31#define __NR_ia32_umount 22
32#define __NR_ia32_setuid 23
33#define __NR_ia32_getuid 24
34#define __NR_ia32_stime 25
35#define __NR_ia32_ptrace 26
36#define __NR_ia32_alarm 27
37#define __NR_ia32_oldfstat 28
38#define __NR_ia32_pause 29
39#define __NR_ia32_utime 30
40#define __NR_ia32_stty 31
41#define __NR_ia32_gtty 32
42#define __NR_ia32_access 33
43#define __NR_ia32_nice 34
44#define __NR_ia32_ftime 35
45#define __NR_ia32_sync 36
46#define __NR_ia32_kill 37
47#define __NR_ia32_rename 38
48#define __NR_ia32_mkdir 39
49#define __NR_ia32_rmdir 40
50#define __NR_ia32_dup 41
51#define __NR_ia32_pipe 42
52#define __NR_ia32_times 43
53#define __NR_ia32_prof 44
54#define __NR_ia32_brk 45
55#define __NR_ia32_setgid 46
56#define __NR_ia32_getgid 47
57#define __NR_ia32_signal 48
58#define __NR_ia32_geteuid 49
59#define __NR_ia32_getegid 50
60#define __NR_ia32_acct 51
61#define __NR_ia32_umount2 52
62#define __NR_ia32_lock 53
63#define __NR_ia32_ioctl 54
64#define __NR_ia32_fcntl 55
65#define __NR_ia32_mpx 56
66#define __NR_ia32_setpgid 57
67#define __NR_ia32_ulimit 58
68#define __NR_ia32_oldolduname 59
69#define __NR_ia32_umask 60
70#define __NR_ia32_chroot 61
71#define __NR_ia32_ustat 62
72#define __NR_ia32_dup2 63
73#define __NR_ia32_getppid 64
74#define __NR_ia32_getpgrp 65
75#define __NR_ia32_setsid 66
76#define __NR_ia32_sigaction 67
77#define __NR_ia32_sgetmask 68
78#define __NR_ia32_ssetmask 69
79#define __NR_ia32_setreuid 70
80#define __NR_ia32_setregid 71
81#define __NR_ia32_sigsuspend 72
82#define __NR_ia32_sigpending 73
83#define __NR_ia32_sethostname 74
84#define __NR_ia32_setrlimit 75
85#define __NR_ia32_getrlimit 76 /* Back compatible 2Gig limited rlimit */
86#define __NR_ia32_getrusage 77
87#define __NR_ia32_gettimeofday 78
88#define __NR_ia32_settimeofday 79
89#define __NR_ia32_getgroups 80
90#define __NR_ia32_setgroups 81
91#define __NR_ia32_select 82
92#define __NR_ia32_symlink 83
93#define __NR_ia32_oldlstat 84
94#define __NR_ia32_readlink 85
95#define __NR_ia32_uselib 86
96#define __NR_ia32_swapon 87
97#define __NR_ia32_reboot 88
98#define __NR_ia32_readdir 89
99#define __NR_ia32_mmap 90
100#define __NR_ia32_munmap 91
101#define __NR_ia32_truncate 92
102#define __NR_ia32_ftruncate 93
103#define __NR_ia32_fchmod 94
104#define __NR_ia32_fchown 95
105#define __NR_ia32_getpriority 96
106#define __NR_ia32_setpriority 97
107#define __NR_ia32_profil 98
108#define __NR_ia32_statfs 99
109#define __NR_ia32_fstatfs 100
110#define __NR_ia32_ioperm 101
111#define __NR_ia32_socketcall 102
112#define __NR_ia32_syslog 103
113#define __NR_ia32_setitimer 104
114#define __NR_ia32_getitimer 105
115#define __NR_ia32_stat 106
116#define __NR_ia32_lstat 107
117#define __NR_ia32_fstat 108
118#define __NR_ia32_olduname 109
119#define __NR_ia32_iopl 110
120#define __NR_ia32_vhangup 111
121#define __NR_ia32_idle 112
122#define __NR_ia32_vm86old 113
123#define __NR_ia32_wait4 114
124#define __NR_ia32_swapoff 115
125#define __NR_ia32_sysinfo 116
126#define __NR_ia32_ipc 117
127#define __NR_ia32_fsync 118
128#define __NR_ia32_sigreturn 119
129#define __NR_ia32_clone 120
130#define __NR_ia32_setdomainname 121
131#define __NR_ia32_uname 122
132#define __NR_ia32_modify_ldt 123
133#define __NR_ia32_adjtimex 124
134#define __NR_ia32_mprotect 125
135#define __NR_ia32_sigprocmask 126
136#define __NR_ia32_create_module 127
137#define __NR_ia32_init_module 128
138#define __NR_ia32_delete_module 129
139#define __NR_ia32_get_kernel_syms 130
140#define __NR_ia32_quotactl 131
141#define __NR_ia32_getpgid 132
142#define __NR_ia32_fchdir 133
143#define __NR_ia32_bdflush 134
144#define __NR_ia32_sysfs 135
145#define __NR_ia32_personality 136
146#define __NR_ia32_afs_syscall 137 /* Syscall for Andrew File System */
147#define __NR_ia32_setfsuid 138
148#define __NR_ia32_setfsgid 139
149#define __NR_ia32__llseek 140
150#define __NR_ia32_getdents 141
151#define __NR_ia32__newselect 142
152#define __NR_ia32_flock 143
153#define __NR_ia32_msync 144
154#define __NR_ia32_readv 145
155#define __NR_ia32_writev 146
156#define __NR_ia32_getsid 147
157#define __NR_ia32_fdatasync 148
158#define __NR_ia32__sysctl 149
159#define __NR_ia32_mlock 150
160#define __NR_ia32_munlock 151
161#define __NR_ia32_mlockall 152
162#define __NR_ia32_munlockall 153
163#define __NR_ia32_sched_setparam 154
164#define __NR_ia32_sched_getparam 155
165#define __NR_ia32_sched_setscheduler 156
166#define __NR_ia32_sched_getscheduler 157
167#define __NR_ia32_sched_yield 158
168#define __NR_ia32_sched_get_priority_max 159
169#define __NR_ia32_sched_get_priority_min 160
170#define __NR_ia32_sched_rr_get_interval 161
171#define __NR_ia32_nanosleep 162
172#define __NR_ia32_mremap 163
173#define __NR_ia32_setresuid 164
174#define __NR_ia32_getresuid 165
175#define __NR_ia32_vm86 166
176#define __NR_ia32_query_module 167
177#define __NR_ia32_poll 168
178#define __NR_ia32_nfsservctl 169
179#define __NR_ia32_setresgid 170
180#define __NR_ia32_getresgid 171
181#define __NR_ia32_prctl 172
182#define __NR_ia32_rt_sigreturn 173 16#define __NR_ia32_rt_sigreturn 173
183#define __NR_ia32_rt_sigaction 174
184#define __NR_ia32_rt_sigprocmask 175
185#define __NR_ia32_rt_sigpending 176
186#define __NR_ia32_rt_sigtimedwait 177
187#define __NR_ia32_rt_sigqueueinfo 178
188#define __NR_ia32_rt_sigsuspend 179
189#define __NR_ia32_pread 180
190#define __NR_ia32_pwrite 181
191#define __NR_ia32_chown 182
192#define __NR_ia32_getcwd 183
193#define __NR_ia32_capget 184
194#define __NR_ia32_capset 185
195#define __NR_ia32_sigaltstack 186
196#define __NR_ia32_sendfile 187
197#define __NR_ia32_getpmsg 188 /* some people actually want streams */
198#define __NR_ia32_putpmsg 189 /* some people actually want streams */
199#define __NR_ia32_vfork 190
200#define __NR_ia32_ugetrlimit 191 /* SuS compliant getrlimit */
201#define __NR_ia32_mmap2 192
202#define __NR_ia32_truncate64 193
203#define __NR_ia32_ftruncate64 194
204#define __NR_ia32_stat64 195
205#define __NR_ia32_lstat64 196
206#define __NR_ia32_fstat64 197
207#define __NR_ia32_lchown32 198
208#define __NR_ia32_getuid32 199
209#define __NR_ia32_getgid32 200
210#define __NR_ia32_geteuid32 201
211#define __NR_ia32_getegid32 202
212#define __NR_ia32_setreuid32 203
213#define __NR_ia32_setregid32 204
214#define __NR_ia32_getgroups32 205
215#define __NR_ia32_setgroups32 206
216#define __NR_ia32_fchown32 207
217#define __NR_ia32_setresuid32 208
218#define __NR_ia32_getresuid32 209
219#define __NR_ia32_setresgid32 210
220#define __NR_ia32_getresgid32 211
221#define __NR_ia32_chown32 212
222#define __NR_ia32_setuid32 213
223#define __NR_ia32_setgid32 214
224#define __NR_ia32_setfsuid32 215
225#define __NR_ia32_setfsgid32 216
226#define __NR_ia32_pivot_root 217
227#define __NR_ia32_mincore 218
228#define __NR_ia32_madvise 219
229#define __NR_ia32_madvise1 219 /* delete when C lib stub is removed */
230#define __NR_ia32_getdents64 220
231#define __NR_ia32_fcntl64 221
232#define __NR_ia32_tuxcall 222
233#define __NR_ia32_security 223
234#define __NR_ia32_gettid 224
235#define __NR_ia32_readahead 225
236#define __NR_ia32_setxattr 226
237#define __NR_ia32_lsetxattr 227
238#define __NR_ia32_fsetxattr 228
239#define __NR_ia32_getxattr 229
240#define __NR_ia32_lgetxattr 230
241#define __NR_ia32_fgetxattr 231
242#define __NR_ia32_listxattr 232
243#define __NR_ia32_llistxattr 233
244#define __NR_ia32_flistxattr 234
245#define __NR_ia32_removexattr 235
246#define __NR_ia32_lremovexattr 236
247#define __NR_ia32_fremovexattr 237
248#define __NR_ia32_tkill 238
249#define __NR_ia32_sendfile64 239
250#define __NR_ia32_futex 240
251#define __NR_ia32_sched_setaffinity 241
252#define __NR_ia32_sched_getaffinity 242
253#define __NR_ia32_set_thread_area 243
254#define __NR_ia32_get_thread_area 244
255#define __NR_ia32_io_setup 245
256#define __NR_ia32_io_destroy 246
257#define __NR_ia32_io_getevents 247
258#define __NR_ia32_io_submit 248
259#define __NR_ia32_io_cancel 249
260#define __NR_ia32_exit_group 252
261#define __NR_ia32_lookup_dcookie 253
262#define __NR_ia32_sys_epoll_create 254
263#define __NR_ia32_sys_epoll_ctl 255
264#define __NR_ia32_sys_epoll_wait 256
265#define __NR_ia32_remap_file_pages 257
266#define __NR_ia32_set_tid_address 258
267#define __NR_ia32_timer_create 259
268#define __NR_ia32_timer_settime (__NR_ia32_timer_create+1)
269#define __NR_ia32_timer_gettime (__NR_ia32_timer_create+2)
270#define __NR_ia32_timer_getoverrun (__NR_ia32_timer_create+3)
271#define __NR_ia32_timer_delete (__NR_ia32_timer_create+4)
272#define __NR_ia32_clock_settime (__NR_ia32_timer_create+5)
273#define __NR_ia32_clock_gettime (__NR_ia32_timer_create+6)
274#define __NR_ia32_clock_getres (__NR_ia32_timer_create+7)
275#define __NR_ia32_clock_nanosleep (__NR_ia32_timer_create+8)
276#define __NR_ia32_statfs64 268
277#define __NR_ia32_fstatfs64 269
278#define __NR_ia32_tgkill 270
279#define __NR_ia32_utimes 271
280#define __NR_ia32_fadvise64_64 272
281#define __NR_ia32_vserver 273
282#define __NR_ia32_mbind 274
283#define __NR_ia32_get_mempolicy 275
284#define __NR_ia32_set_mempolicy 276
285#define __NR_ia32_mq_open 277
286#define __NR_ia32_mq_unlink (__NR_ia32_mq_open+1)
287#define __NR_ia32_mq_timedsend (__NR_ia32_mq_open+2)
288#define __NR_ia32_mq_timedreceive (__NR_ia32_mq_open+3)
289#define __NR_ia32_mq_notify (__NR_ia32_mq_open+4)
290#define __NR_ia32_mq_getsetattr (__NR_ia32_mq_open+5)
291#define __NR_ia32_kexec 283
292#define __NR_ia32_waitid 284
293/* #define __NR_sys_setaltroot 285 */
294#define __NR_ia32_add_key 286
295#define __NR_ia32_request_key 287
296#define __NR_ia32_keyctl 288
297#define __NR_ia32_ioprio_set 289
298#define __NR_ia32_ioprio_get 290
299#define __NR_ia32_inotify_init 291
300#define __NR_ia32_inotify_add_watch 292
301#define __NR_ia32_inotify_rm_watch 293
302#define __NR_ia32_migrate_pages 294
303#define __NR_ia32_openat 295
304#define __NR_ia32_mkdirat 296
305#define __NR_ia32_mknodat 297
306#define __NR_ia32_fchownat 298
307#define __NR_ia32_futimesat 299
308#define __NR_ia32_fstatat64 300
309#define __NR_ia32_unlinkat 301
310#define __NR_ia32_renameat 302
311#define __NR_ia32_linkat 303
312#define __NR_ia32_symlinkat 304
313#define __NR_ia32_readlinkat 305
314#define __NR_ia32_fchmodat 306
315#define __NR_ia32_faccessat 307
316#define __NR_ia32_pselect6 308
317#define __NR_ia32_ppoll 309
318#define __NR_ia32_unshare 310
319 17
320#endif /* _ASM_X86_64_IA32_UNISTD_H_ */ 18#endif /* _ASM_X86_64_IA32_UNISTD_H_ */
diff --git a/include/asm-x86_64/intel_arch_perfmon.h b/include/asm-x86_64/intel_arch_perfmon.h
new file mode 100644
index 00000000000..59c39643156
--- /dev/null
+++ b/include/asm-x86_64/intel_arch_perfmon.h
@@ -0,0 +1,19 @@
1#ifndef X86_64_INTEL_ARCH_PERFMON_H
2#define X86_64_INTEL_ARCH_PERFMON_H 1
3
4#define MSR_ARCH_PERFMON_PERFCTR0 0xc1
5#define MSR_ARCH_PERFMON_PERFCTR1 0xc2
6
7#define MSR_ARCH_PERFMON_EVENTSEL0 0x186
8#define MSR_ARCH_PERFMON_EVENTSEL1 0x187
9
10#define ARCH_PERFMON_EVENTSEL0_ENABLE (1 << 22)
11#define ARCH_PERFMON_EVENTSEL_INT (1 << 20)
12#define ARCH_PERFMON_EVENTSEL_OS (1 << 17)
13#define ARCH_PERFMON_EVENTSEL_USR (1 << 16)
14
15#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL (0x3c)
16#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
17#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT (1 << 0)
18
19#endif /* X86_64_INTEL_ARCH_PERFMON_H */
diff --git a/include/asm-x86_64/io.h b/include/asm-x86_64/io.h
index a05da8a50bf..70e91fe7634 100644
--- a/include/asm-x86_64/io.h
+++ b/include/asm-x86_64/io.h
@@ -1,7 +1,6 @@
1#ifndef _ASM_IO_H 1#ifndef _ASM_IO_H
2#define _ASM_IO_H 2#define _ASM_IO_H
3 3
4#include <linux/config.h>
5 4
6/* 5/*
7 * This file contains the definitions for the x86 IO instructions 6 * This file contains the definitions for the x86 IO instructions
diff --git a/include/asm-x86_64/io_apic.h b/include/asm-x86_64/io_apic.h
index 52484e82c64..fb7a0909a17 100644
--- a/include/asm-x86_64/io_apic.h
+++ b/include/asm-x86_64/io_apic.h
@@ -1,7 +1,6 @@
1#ifndef __ASM_IO_APIC_H 1#ifndef __ASM_IO_APIC_H
2#define __ASM_IO_APIC_H 2#define __ASM_IO_APIC_H
3 3
4#include <linux/config.h>
5#include <asm/types.h> 4#include <asm/types.h>
6#include <asm/mpspec.h> 5#include <asm/mpspec.h>
7 6
diff --git a/include/asm-x86_64/irqflags.h b/include/asm-x86_64/irqflags.h
new file mode 100644
index 00000000000..cce6937e87c
--- /dev/null
+++ b/include/asm-x86_64/irqflags.h
@@ -0,0 +1,141 @@
1/*
2 * include/asm-x86_64/irqflags.h
3 *
4 * IRQ flags handling
5 *
6 * This file gets included from lowlevel asm headers too, to provide
7 * wrapped versions of the local_irq_*() APIs, based on the
8 * raw_local_irq_*() functions from the lowlevel headers.
9 */
10#ifndef _ASM_IRQFLAGS_H
11#define _ASM_IRQFLAGS_H
12
13#ifndef __ASSEMBLY__
14/*
15 * Interrupt control:
16 */
17
18static inline unsigned long __raw_local_save_flags(void)
19{
20 unsigned long flags;
21
22 __asm__ __volatile__(
23 "# __raw_save_flags\n\t"
24 "pushfq ; popq %q0"
25 : "=g" (flags)
26 : /* no input */
27 : "memory"
28 );
29
30 return flags;
31}
32
33#define raw_local_save_flags(flags) \
34 do { (flags) = __raw_local_save_flags(); } while (0)
35
36static inline void raw_local_irq_restore(unsigned long flags)
37{
38 __asm__ __volatile__(
39 "pushq %0 ; popfq"
40 : /* no output */
41 :"g" (flags)
42 :"memory", "cc"
43 );
44}
45
46#ifdef CONFIG_X86_VSMP
47
48/*
49 * Interrupt control for the VSMP architecture:
50 */
51
52static inline void raw_local_irq_disable(void)
53{
54 unsigned long flags = __raw_local_save_flags();
55
56 raw_local_irq_restore((flags & ~(1 << 9)) | (1 << 18));
57}
58
59static inline void raw_local_irq_enable(void)
60{
61 unsigned long flags = __raw_local_save_flags();
62
63 raw_local_irq_restore((flags | (1 << 9)) & ~(1 << 18));
64}
65
66static inline int raw_irqs_disabled_flags(unsigned long flags)
67{
68 return !(flags & (1<<9)) || (flags & (1 << 18));
69}
70
71#else /* CONFIG_X86_VSMP */
72
73static inline void raw_local_irq_disable(void)
74{
75 __asm__ __volatile__("cli" : : : "memory");
76}
77
78static inline void raw_local_irq_enable(void)
79{
80 __asm__ __volatile__("sti" : : : "memory");
81}
82
83static inline int raw_irqs_disabled_flags(unsigned long flags)
84{
85 return !(flags & (1 << 9));
86}
87
88#endif
89
90/*
91 * For spinlocks, etc.:
92 */
93
94static inline unsigned long __raw_local_irq_save(void)
95{
96 unsigned long flags = __raw_local_save_flags();
97
98 raw_local_irq_disable();
99
100 return flags;
101}
102
103#define raw_local_irq_save(flags) \
104 do { (flags) = __raw_local_irq_save(); } while (0)
105
106static inline int raw_irqs_disabled(void)
107{
108 unsigned long flags = __raw_local_save_flags();
109
110 return raw_irqs_disabled_flags(flags);
111}
112
113/*
114 * Used in the idle loop; sti takes one instruction cycle
115 * to complete:
116 */
117static inline void raw_safe_halt(void)
118{
119 __asm__ __volatile__("sti; hlt" : : : "memory");
120}
121
122/*
123 * Used when interrupts are already enabled or to
124 * shutdown the processor:
125 */
126static inline void halt(void)
127{
128 __asm__ __volatile__("hlt": : :"memory");
129}
130
131#else /* __ASSEMBLY__: */
132# ifdef CONFIG_TRACE_IRQFLAGS
133# define TRACE_IRQS_ON call trace_hardirqs_on_thunk
134# define TRACE_IRQS_OFF call trace_hardirqs_off_thunk
135# else
136# define TRACE_IRQS_ON
137# define TRACE_IRQS_OFF
138# endif
139#endif
140
141#endif
diff --git a/include/asm-x86_64/k8.h b/include/asm-x86_64/k8.h
new file mode 100644
index 00000000000..699dd6961ed
--- /dev/null
+++ b/include/asm-x86_64/k8.h
@@ -0,0 +1,14 @@
1#ifndef _ASM_K8_H
2#define _ASM_K8_H 1
3
4#include <linux/pci.h>
5
6extern struct pci_device_id k8_nb_ids[];
7
8extern int early_is_k8_nb(u32 value);
9extern struct pci_dev **k8_northbridges;
10extern int num_k8_northbridges;
11extern int cache_k8_northbridges(void);
12extern void k8_flush_garts(void);
13
14#endif
diff --git a/include/asm-x86_64/kdebug.h b/include/asm-x86_64/kdebug.h
index cf795631d9b..2b0c088e295 100644
--- a/include/asm-x86_64/kdebug.h
+++ b/include/asm-x86_64/kdebug.h
@@ -15,6 +15,8 @@ struct die_args {
15 15
16extern int register_die_notifier(struct notifier_block *); 16extern int register_die_notifier(struct notifier_block *);
17extern int unregister_die_notifier(struct notifier_block *); 17extern int unregister_die_notifier(struct notifier_block *);
18extern int register_page_fault_notifier(struct notifier_block *);
19extern int unregister_page_fault_notifier(struct notifier_block *);
18extern struct atomic_notifier_head die_chain; 20extern struct atomic_notifier_head die_chain;
19 21
20/* Grossly misnamed. */ 22/* Grossly misnamed. */
@@ -47,7 +49,7 @@ static inline int notify_die(enum die_val val, const char *str,
47 return atomic_notifier_call_chain(&die_chain, val, &args); 49 return atomic_notifier_call_chain(&die_chain, val, &args);
48} 50}
49 51
50extern int printk_address(unsigned long address); 52extern void printk_address(unsigned long address);
51extern void die(const char *,struct pt_regs *,long); 53extern void die(const char *,struct pt_regs *,long);
52extern void __die(const char *,struct pt_regs *,long); 54extern void __die(const char *,struct pt_regs *,long);
53extern void show_registers(struct pt_regs *regs); 55extern void show_registers(struct pt_regs *regs);
diff --git a/include/asm-x86_64/kprobes.h b/include/asm-x86_64/kprobes.h
index 98a1e95ddb9..cf5317898fb 100644
--- a/include/asm-x86_64/kprobes.h
+++ b/include/asm-x86_64/kprobes.h
@@ -43,9 +43,11 @@ typedef u8 kprobe_opcode_t;
43 43
44#define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry 44#define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry
45#define ARCH_SUPPORTS_KRETPROBES 45#define ARCH_SUPPORTS_KRETPROBES
46#define ARCH_INACTIVE_KPROBE_COUNT 1
46 47
47void kretprobe_trampoline(void); 48void kretprobe_trampoline(void);
48extern void arch_remove_kprobe(struct kprobe *p); 49extern void arch_remove_kprobe(struct kprobe *p);
50#define flush_insn_slot(p) do { } while (0)
49 51
50/* Architecture specific copy of original instruction*/ 52/* Architecture specific copy of original instruction*/
51struct arch_specific_insn { 53struct arch_specific_insn {
diff --git a/include/asm-x86_64/local.h b/include/asm-x86_64/local.h
index cd17945bf21..e769e620022 100644
--- a/include/asm-x86_64/local.h
+++ b/include/asm-x86_64/local.h
@@ -59,12 +59,26 @@ static inline void local_sub(long i, local_t *v)
59 * This could be done better if we moved the per cpu data directly 59 * This could be done better if we moved the per cpu data directly
60 * after GS. 60 * after GS.
61 */ 61 */
62#define cpu_local_read(v) local_read(&__get_cpu_var(v)) 62
63#define cpu_local_set(v, i) local_set(&__get_cpu_var(v), (i)) 63/* Need to disable preemption for the cpu local counters otherwise we could
64#define cpu_local_inc(v) local_inc(&__get_cpu_var(v)) 64 still access a variable of a previous CPU in a non atomic way. */
65#define cpu_local_dec(v) local_dec(&__get_cpu_var(v)) 65#define cpu_local_wrap_v(v) \
66#define cpu_local_add(i, v) local_add((i), &__get_cpu_var(v)) 66 ({ local_t res__; \
67#define cpu_local_sub(i, v) local_sub((i), &__get_cpu_var(v)) 67 preempt_disable(); \
68 res__ = (v); \
69 preempt_enable(); \
70 res__; })
71#define cpu_local_wrap(v) \
72 ({ preempt_disable(); \
73 v; \
74 preempt_enable(); }) \
75
76#define cpu_local_read(v) cpu_local_wrap_v(local_read(&__get_cpu_var(v)))
77#define cpu_local_set(v, i) cpu_local_wrap(local_set(&__get_cpu_var(v), (i)))
78#define cpu_local_inc(v) cpu_local_wrap(local_inc(&__get_cpu_var(v)))
79#define cpu_local_dec(v) cpu_local_wrap(local_dec(&__get_cpu_var(v)))
80#define cpu_local_add(i, v) cpu_local_wrap(local_add((i), &__get_cpu_var(v)))
81#define cpu_local_sub(i, v) cpu_local_wrap(local_sub((i), &__get_cpu_var(v)))
68 82
69#define __cpu_local_inc(v) cpu_local_inc(v) 83#define __cpu_local_inc(v) cpu_local_inc(v)
70#define __cpu_local_dec(v) cpu_local_dec(v) 84#define __cpu_local_dec(v) cpu_local_dec(v)
diff --git a/include/asm-x86_64/mce.h b/include/asm-x86_64/mce.h
index 7229785094e..d13687dfd69 100644
--- a/include/asm-x86_64/mce.h
+++ b/include/asm-x86_64/mce.h
@@ -67,13 +67,22 @@ struct mce_log {
67/* Software defined banks */ 67/* Software defined banks */
68#define MCE_EXTENDED_BANK 128 68#define MCE_EXTENDED_BANK 128
69#define MCE_THERMAL_BANK MCE_EXTENDED_BANK + 0 69#define MCE_THERMAL_BANK MCE_EXTENDED_BANK + 0
70#define MCE_THRESHOLD_BASE MCE_EXTENDED_BANK + 1 /* MCE_AMD */ 70
71#define MCE_THRESHOLD_DRAM_ECC MCE_THRESHOLD_BASE + 4 71#define K8_MCE_THRESHOLD_BASE (MCE_EXTENDED_BANK + 1) /* MCE_AMD */
72#define K8_MCE_THRESHOLD_BANK_0 (MCE_THRESHOLD_BASE + 0 * 9)
73#define K8_MCE_THRESHOLD_BANK_1 (MCE_THRESHOLD_BASE + 1 * 9)
74#define K8_MCE_THRESHOLD_BANK_2 (MCE_THRESHOLD_BASE + 2 * 9)
75#define K8_MCE_THRESHOLD_BANK_3 (MCE_THRESHOLD_BASE + 3 * 9)
76#define K8_MCE_THRESHOLD_BANK_4 (MCE_THRESHOLD_BASE + 4 * 9)
77#define K8_MCE_THRESHOLD_BANK_5 (MCE_THRESHOLD_BASE + 5 * 9)
78#define K8_MCE_THRESHOLD_DRAM_ECC (MCE_THRESHOLD_BANK_4 + 0)
72 79
73#ifdef __KERNEL__ 80#ifdef __KERNEL__
74#include <asm/atomic.h> 81#include <asm/atomic.h>
75 82
76void mce_log(struct mce *m); 83void mce_log(struct mce *m);
84DECLARE_PER_CPU(struct sys_device, device_mce);
85
77#ifdef CONFIG_X86_MCE_INTEL 86#ifdef CONFIG_X86_MCE_INTEL
78void mce_intel_feature_init(struct cpuinfo_x86 *c); 87void mce_intel_feature_init(struct cpuinfo_x86 *c);
79#else 88#else
diff --git a/include/asm-x86_64/mmu_context.h b/include/asm-x86_64/mmu_context.h
index 19f0c83d079..af03b9f852d 100644
--- a/include/asm-x86_64/mmu_context.h
+++ b/include/asm-x86_64/mmu_context.h
@@ -1,7 +1,6 @@
1#ifndef __X86_64_MMU_CONTEXT_H 1#ifndef __X86_64_MMU_CONTEXT_H
2#define __X86_64_MMU_CONTEXT_H 2#define __X86_64_MMU_CONTEXT_H
3 3
4#include <linux/config.h>
5#include <asm/desc.h> 4#include <asm/desc.h>
6#include <asm/atomic.h> 5#include <asm/atomic.h>
7#include <asm/pgalloc.h> 6#include <asm/pgalloc.h>
diff --git a/include/asm-x86_64/mmzone.h b/include/asm-x86_64/mmzone.h
index 6944e7122df..c38ebdf6f42 100644
--- a/include/asm-x86_64/mmzone.h
+++ b/include/asm-x86_64/mmzone.h
@@ -4,7 +4,6 @@
4#ifndef _ASM_X86_64_MMZONE_H 4#ifndef _ASM_X86_64_MMZONE_H
5#define _ASM_X86_64_MMZONE_H 1 5#define _ASM_X86_64_MMZONE_H 1
6 6
7#include <linux/config.h>
8 7
9#ifdef CONFIG_NUMA 8#ifdef CONFIG_NUMA
10 9
@@ -43,7 +42,6 @@ static inline __attribute__((pure)) int phys_to_nid(unsigned long addr)
43 42
44#ifdef CONFIG_DISCONTIGMEM 43#ifdef CONFIG_DISCONTIGMEM
45#define pfn_to_nid(pfn) phys_to_nid((unsigned long)(pfn) << PAGE_SHIFT) 44#define pfn_to_nid(pfn) phys_to_nid((unsigned long)(pfn) << PAGE_SHIFT)
46#define kvaddr_to_nid(kaddr) phys_to_nid(__pa(kaddr))
47 45
48extern int pfn_valid(unsigned long pfn); 46extern int pfn_valid(unsigned long pfn);
49#endif 47#endif
diff --git a/include/asm-x86_64/msi.h b/include/asm-x86_64/msi.h
index 356e0e82f50..3ad2346624b 100644
--- a/include/asm-x86_64/msi.h
+++ b/include/asm-x86_64/msi.h
@@ -10,7 +10,15 @@
10#include <asm/mach_apic.h> 10#include <asm/mach_apic.h>
11#include <asm/smp.h> 11#include <asm/smp.h>
12 12
13#define LAST_DEVICE_VECTOR 232 13#define LAST_DEVICE_VECTOR (FIRST_SYSTEM_VECTOR - 1)
14#define MSI_TARGET_CPU_SHIFT 12 14#define MSI_TARGET_CPU_SHIFT 12
15 15
16extern struct msi_ops msi_apic_ops;
17
18static inline int msi_arch_init(void)
19{
20 msi_register(&msi_apic_ops);
21 return 0;
22}
23
16#endif /* ASM_MSI_H */ 24#endif /* ASM_MSI_H */
diff --git a/include/asm-x86_64/mtrr.h b/include/asm-x86_64/mtrr.h
index 66ac1c0f27e..d6135b2549b 100644
--- a/include/asm-x86_64/mtrr.h
+++ b/include/asm-x86_64/mtrr.h
@@ -23,9 +23,7 @@
23#ifndef _LINUX_MTRR_H 23#ifndef _LINUX_MTRR_H
24#define _LINUX_MTRR_H 24#define _LINUX_MTRR_H
25 25
26#include <linux/config.h>
27#include <linux/ioctl.h> 26#include <linux/ioctl.h>
28#include <linux/compat.h>
29 27
30#define MTRR_IOCTL_BASE 'M' 28#define MTRR_IOCTL_BASE 'M'
31 29
@@ -102,11 +100,10 @@ static __inline__ int mtrr_del_page (int reg, unsigned long base,
102 return -ENODEV; 100 return -ENODEV;
103} 101}
104 102
105# endif 103#endif /* CONFIG_MTRR */
106
107#endif
108 104
109#ifdef CONFIG_COMPAT 105#ifdef CONFIG_COMPAT
106#include <linux/compat.h>
110 107
111struct mtrr_sentry32 108struct mtrr_sentry32
112{ 109{
@@ -138,4 +135,6 @@ struct mtrr_gentry32
138 135
139#endif /* CONFIG_COMPAT */ 136#endif /* CONFIG_COMPAT */
140 137
138#endif /* __KERNEL__ */
139
141#endif /* _LINUX_MTRR_H */ 140#endif /* _LINUX_MTRR_H */
diff --git a/include/asm-x86_64/mutex.h b/include/asm-x86_64/mutex.h
index 11fbee2bd6c..06fab6de2a8 100644
--- a/include/asm-x86_64/mutex.h
+++ b/include/asm-x86_64/mutex.h
@@ -24,7 +24,7 @@ do { \
24 typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \ 24 typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \
25 \ 25 \
26 __asm__ __volatile__( \ 26 __asm__ __volatile__( \
27 LOCK " decl (%%rdi) \n" \ 27 LOCK_PREFIX " decl (%%rdi) \n" \
28 " js 2f \n" \ 28 " js 2f \n" \
29 "1: \n" \ 29 "1: \n" \
30 \ 30 \
@@ -74,7 +74,7 @@ do { \
74 typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \ 74 typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \
75 \ 75 \
76 __asm__ __volatile__( \ 76 __asm__ __volatile__( \
77 LOCK " incl (%%rdi) \n" \ 77 LOCK_PREFIX " incl (%%rdi) \n" \
78 " jle 2f \n" \ 78 " jle 2f \n" \
79 "1: \n" \ 79 "1: \n" \
80 \ 80 \
diff --git a/include/asm-x86_64/nmi.h b/include/asm-x86_64/nmi.h
index d3abfc6a8fd..efb45c894d7 100644
--- a/include/asm-x86_64/nmi.h
+++ b/include/asm-x86_64/nmi.h
@@ -5,26 +5,27 @@
5#define ASM_NMI_H 5#define ASM_NMI_H
6 6
7#include <linux/pm.h> 7#include <linux/pm.h>
8#include <asm/io.h>
8 9
9struct pt_regs; 10struct pt_regs;
10 11
11typedef int (*nmi_callback_t)(struct pt_regs * regs, int cpu); 12typedef int (*nmi_callback_t)(struct pt_regs * regs, int cpu);
12 13
13/** 14/**
14 * set_nmi_callback 15 * set_nmi_callback
15 * 16 *
16 * Set a handler for an NMI. Only one handler may be 17 * Set a handler for an NMI. Only one handler may be
17 * set. Return 1 if the NMI was handled. 18 * set. Return 1 if the NMI was handled.
18 */ 19 */
19void set_nmi_callback(nmi_callback_t callback); 20void set_nmi_callback(nmi_callback_t callback);
20 21
21/** 22/**
22 * unset_nmi_callback 23 * unset_nmi_callback
23 * 24 *
24 * Remove the handler previously set. 25 * Remove the handler previously set.
25 */ 26 */
26void unset_nmi_callback(void); 27void unset_nmi_callback(void);
27 28
28#ifdef CONFIG_PM 29#ifdef CONFIG_PM
29 30
30/** Replace the PM callback routine for NMI. */ 31/** Replace the PM callback routine for NMI. */
@@ -56,4 +57,21 @@ extern int unknown_nmi_panic;
56 57
57extern int check_nmi_watchdog(void); 58extern int check_nmi_watchdog(void);
58 59
60extern void setup_apic_nmi_watchdog (void);
61extern int reserve_lapic_nmi(void);
62extern void release_lapic_nmi(void);
63extern void disable_timer_nmi_watchdog(void);
64extern void enable_timer_nmi_watchdog(void);
65extern void nmi_watchdog_tick (struct pt_regs * regs, unsigned reason);
66
67extern void nmi_watchdog_default(void);
68extern int setup_nmi_watchdog(char *);
69
70extern unsigned int nmi_watchdog;
71#define NMI_DEFAULT -1
72#define NMI_NONE 0
73#define NMI_IO_APIC 1
74#define NMI_LOCAL_APIC 2
75#define NMI_INVALID 3
76
59#endif /* ASM_NMI_H */ 77#endif /* ASM_NMI_H */
diff --git a/include/asm-x86_64/numa.h b/include/asm-x86_64/numa.h
index 1cc92fe0250..933ff11ece1 100644
--- a/include/asm-x86_64/numa.h
+++ b/include/asm-x86_64/numa.h
@@ -8,7 +8,6 @@ struct bootnode {
8}; 8};
9 9
10extern int compute_hash_shift(struct bootnode *nodes, int numnodes); 10extern int compute_hash_shift(struct bootnode *nodes, int numnodes);
11extern int pxm_to_node(int nid);
12 11
13#define ZONE_ALIGN (1UL << (MAX_ORDER+PAGE_SHIFT)) 12#define ZONE_ALIGN (1UL << (MAX_ORDER+PAGE_SHIFT))
14 13
diff --git a/include/asm-x86_64/page.h b/include/asm-x86_64/page.h
index 408185bac35..10f346165ca 100644
--- a/include/asm-x86_64/page.h
+++ b/include/asm-x86_64/page.h
@@ -1,7 +1,6 @@
1#ifndef _X86_64_PAGE_H 1#ifndef _X86_64_PAGE_H
2#define _X86_64_PAGE_H 2#define _X86_64_PAGE_H
3 3
4#include <linux/config.h>
5 4
6/* PAGE_SHIFT determines the page size */ 5/* PAGE_SHIFT determines the page size */
7#define PAGE_SHIFT 12 6#define PAGE_SHIFT 12
@@ -20,7 +19,7 @@
20#define EXCEPTION_STACK_ORDER 0 19#define EXCEPTION_STACK_ORDER 0
21#define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER) 20#define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER)
22 21
23#define DEBUG_STACK_ORDER EXCEPTION_STACK_ORDER 22#define DEBUG_STACK_ORDER (EXCEPTION_STACK_ORDER + 1)
24#define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER) 23#define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER)
25 24
26#define IRQSTACK_ORDER 2 25#define IRQSTACK_ORDER 2
@@ -136,9 +135,9 @@ typedef struct { unsigned long pgprot; } pgprot_t;
136 135
137#define __HAVE_ARCH_GATE_AREA 1 136#define __HAVE_ARCH_GATE_AREA 1
138 137
139#endif /* __KERNEL__ */
140
141#include <asm-generic/memory_model.h> 138#include <asm-generic/memory_model.h>
142#include <asm-generic/page.h> 139#include <asm-generic/page.h>
143 140
141#endif /* __KERNEL__ */
142
144#endif /* _X86_64_PAGE_H */ 143#endif /* _X86_64_PAGE_H */
diff --git a/include/asm-x86_64/param.h b/include/asm-x86_64/param.h
index 5956b23b57c..a728786c3c7 100644
--- a/include/asm-x86_64/param.h
+++ b/include/asm-x86_64/param.h
@@ -2,7 +2,6 @@
2#define _ASMx86_64_PARAM_H 2#define _ASMx86_64_PARAM_H
3 3
4#ifdef __KERNEL__ 4#ifdef __KERNEL__
5# include <linux/config.h>
6# define HZ CONFIG_HZ /* Internal kernel timer frequency */ 5# define HZ CONFIG_HZ /* Internal kernel timer frequency */
7# define USER_HZ 100 /* .. some user interfaces are in "ticks */ 6# define USER_HZ 100 /* .. some user interfaces are in "ticks */
8#define CLOCKS_PER_SEC (USER_HZ) /* like times() */ 7#define CLOCKS_PER_SEC (USER_HZ) /* like times() */
diff --git a/include/asm-x86_64/pci.h b/include/asm-x86_64/pci.h
index 8a05af264d1..49c5e928059 100644
--- a/include/asm-x86_64/pci.h
+++ b/include/asm-x86_64/pci.h
@@ -1,7 +1,6 @@
1#ifndef __x8664_PCI_H 1#ifndef __x8664_PCI_H
2#define __x8664_PCI_H 2#define __x8664_PCI_H
3 3
4#include <linux/config.h>
5#include <asm/io.h> 4#include <asm/io.h>
6 5
7#ifdef __KERNEL__ 6#ifdef __KERNEL__
@@ -40,8 +39,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq);
40#include <asm/scatterlist.h> 39#include <asm/scatterlist.h>
41#include <linux/string.h> 40#include <linux/string.h>
42#include <asm/page.h> 41#include <asm/page.h>
43#include <linux/dma-mapping.h> /* for have_iommu */
44 42
43extern void pci_iommu_alloc(void);
45extern int iommu_setup(char *opt); 44extern int iommu_setup(char *opt);
46 45
47/* The PCI address space does equal the physical memory 46/* The PCI address space does equal the physical memory
@@ -53,7 +52,7 @@ extern int iommu_setup(char *opt);
53 */ 52 */
54#define PCI_DMA_BUS_IS_PHYS (dma_ops->is_phys) 53#define PCI_DMA_BUS_IS_PHYS (dma_ops->is_phys)
55 54
56#ifdef CONFIG_GART_IOMMU 55#if defined(CONFIG_IOMMU) || defined(CONFIG_CALGARY_IOMMU)
57 56
58/* 57/*
59 * x86-64 always supports DAC, but sometimes it is useful to force 58 * x86-64 always supports DAC, but sometimes it is useful to force
diff --git a/include/asm-x86_64/percpu.h b/include/asm-x86_64/percpu.h
index 7f33aaf9f7b..08dd9f9dda8 100644
--- a/include/asm-x86_64/percpu.h
+++ b/include/asm-x86_64/percpu.h
@@ -14,6 +14,8 @@
14#define __per_cpu_offset(cpu) (cpu_pda(cpu)->data_offset) 14#define __per_cpu_offset(cpu) (cpu_pda(cpu)->data_offset)
15#define __my_cpu_offset() read_pda(data_offset) 15#define __my_cpu_offset() read_pda(data_offset)
16 16
17#define per_cpu_offset(x) (__per_cpu_offset(x))
18
17/* Separate out the type, so (int[3], foo) works. */ 19/* Separate out the type, so (int[3], foo) works. */
18#define DEFINE_PER_CPU(type, name) \ 20#define DEFINE_PER_CPU(type, name) \
19 __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name 21 __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name
@@ -21,6 +23,7 @@
21/* var is in discarded region: offset to particular copy we want */ 23/* var is in discarded region: offset to particular copy we want */
22#define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset(cpu))) 24#define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset(cpu)))
23#define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __my_cpu_offset())) 25#define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __my_cpu_offset()))
26#define __raw_get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __my_cpu_offset()))
24 27
25/* A macro to avoid #include hell... */ 28/* A macro to avoid #include hell... */
26#define percpu_modcopy(pcpudst, src, size) \ 29#define percpu_modcopy(pcpudst, src, size) \
@@ -40,6 +43,7 @@ extern void setup_per_cpu_areas(void);
40 43
41#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var)) 44#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var))
42#define __get_cpu_var(var) per_cpu__##var 45#define __get_cpu_var(var) per_cpu__##var
46#define __raw_get_cpu_var(var) per_cpu__##var
43 47
44#endif /* SMP */ 48#endif /* SMP */
45 49
diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h
index 31e83c3bd02..a31ab4e68a9 100644
--- a/include/asm-x86_64/pgtable.h
+++ b/include/asm-x86_64/pgtable.h
@@ -337,14 +337,8 @@ static inline int pmd_large(pmd_t pte) {
337/* to find an entry in a page-table-directory. */ 337/* to find an entry in a page-table-directory. */
338#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) 338#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
339#define pud_offset(pgd, address) ((pud_t *) pgd_page(*(pgd)) + pud_index(address)) 339#define pud_offset(pgd, address) ((pud_t *) pgd_page(*(pgd)) + pud_index(address))
340#define pud_offset_k(pgd, addr) pud_offset(pgd, addr)
341#define pud_present(pud) (pud_val(pud) & _PAGE_PRESENT) 340#define pud_present(pud) (pud_val(pud) & _PAGE_PRESENT)
342 341
343static inline pud_t *__pud_offset_k(pud_t *pud, unsigned long address)
344{
345 return pud + pud_index(address);
346}
347
348/* PMD - Level 2 access */ 342/* PMD - Level 2 access */
349#define pmd_page_kernel(pmd) ((unsigned long) __va(pmd_val(pmd) & PTE_MASK)) 343#define pmd_page_kernel(pmd) ((unsigned long) __va(pmd_val(pmd) & PTE_MASK))
350#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)) 344#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
diff --git a/include/asm-x86_64/processor.h b/include/asm-x86_64/processor.h
index 37a3ec433ee..de9c3147ee4 100644
--- a/include/asm-x86_64/processor.h
+++ b/include/asm-x86_64/processor.h
@@ -12,7 +12,6 @@
12#include <asm/types.h> 12#include <asm/types.h>
13#include <asm/sigcontext.h> 13#include <asm/sigcontext.h>
14#include <asm/cpufeature.h> 14#include <asm/cpufeature.h>
15#include <linux/config.h>
16#include <linux/threads.h> 15#include <linux/threads.h>
17#include <asm/msr.h> 16#include <asm/msr.h>
18#include <asm/current.h> 17#include <asm/current.h>
@@ -70,7 +69,11 @@ struct cpuinfo_x86 {
70 cpumask_t llc_shared_map; /* cpus sharing the last level cache */ 69 cpumask_t llc_shared_map; /* cpus sharing the last level cache */
71#endif 70#endif
72 __u8 apicid; 71 __u8 apicid;
72#ifdef CONFIG_SMP
73 __u8 booted_cores; /* number of cores as seen by OS */ 73 __u8 booted_cores; /* number of cores as seen by OS */
74 __u8 phys_proc_id; /* Physical Processor id. */
75 __u8 cpu_core_id; /* Core id. */
76#endif
74} ____cacheline_aligned; 77} ____cacheline_aligned;
75 78
76#define X86_VENDOR_INTEL 0 79#define X86_VENDOR_INTEL 0
@@ -97,6 +100,7 @@ extern char ignore_irq13;
97extern void identify_cpu(struct cpuinfo_x86 *); 100extern void identify_cpu(struct cpuinfo_x86 *);
98extern void print_cpu_info(struct cpuinfo_x86 *); 101extern void print_cpu_info(struct cpuinfo_x86 *);
99extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); 102extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
103extern unsigned short num_cache_leaves;
100 104
101/* 105/*
102 * EFLAGS bits 106 * EFLAGS bits
@@ -228,8 +232,14 @@ struct tss_struct {
228 unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; 232 unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
229} __attribute__((packed)) ____cacheline_aligned; 233} __attribute__((packed)) ____cacheline_aligned;
230 234
235
231extern struct cpuinfo_x86 boot_cpu_data; 236extern struct cpuinfo_x86 boot_cpu_data;
232DECLARE_PER_CPU(struct tss_struct,init_tss); 237DECLARE_PER_CPU(struct tss_struct,init_tss);
238/* Save the original ist values for checking stack pointers during debugging */
239struct orig_ist {
240 unsigned long ist[7];
241};
242DECLARE_PER_CPU(struct orig_ist, orig_ist);
233 243
234#ifdef CONFIG_X86_VSMP 244#ifdef CONFIG_X86_VSMP
235#define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT) 245#define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT)
diff --git a/include/asm-x86_64/proto.h b/include/asm-x86_64/proto.h
index 8abf2a43c94..038fe1f47e6 100644
--- a/include/asm-x86_64/proto.h
+++ b/include/asm-x86_64/proto.h
@@ -37,7 +37,6 @@ extern void ia32_sysenter_target(void);
37 37
38extern void config_acpi_tables(void); 38extern void config_acpi_tables(void);
39extern void ia32_syscall(void); 39extern void ia32_syscall(void);
40extern void iommu_hole_init(void);
41 40
42extern int pmtimer_mark_offset(void); 41extern int pmtimer_mark_offset(void);
43extern void pmtimer_resume(void); 42extern void pmtimer_resume(void);
@@ -75,7 +74,7 @@ extern void main_timer_handler(struct pt_regs *regs);
75 74
76extern unsigned long end_pfn_map; 75extern unsigned long end_pfn_map;
77 76
78extern void show_trace(unsigned long * rsp); 77extern void show_trace(struct task_struct *, struct pt_regs *, unsigned long * rsp);
79extern void show_registers(struct pt_regs *regs); 78extern void show_registers(struct pt_regs *regs);
80 79
81extern void exception_table_check(void); 80extern void exception_table_check(void);
@@ -101,13 +100,9 @@ extern int unsynchronized_tsc(void);
101 100
102extern void select_idle_routine(const struct cpuinfo_x86 *c); 101extern void select_idle_routine(const struct cpuinfo_x86 *c);
103 102
104extern void gart_parse_options(char *);
105extern void __init no_iommu_init(void);
106
107extern unsigned long table_start, table_end; 103extern unsigned long table_start, table_end;
108 104
109extern int exception_trace; 105extern int exception_trace;
110extern int force_iommu, no_iommu;
111extern int using_apic_timer; 106extern int using_apic_timer;
112extern int disable_apic; 107extern int disable_apic;
113extern unsigned cpu_khz; 108extern unsigned cpu_khz;
@@ -116,7 +111,13 @@ extern int skip_ioapic_setup;
116extern int acpi_ht; 111extern int acpi_ht;
117extern int acpi_disabled; 112extern int acpi_disabled;
118 113
119#ifdef CONFIG_GART_IOMMU 114extern void no_iommu_init(void);
115extern int force_iommu, no_iommu;
116extern int iommu_detected;
117#ifdef CONFIG_IOMMU
118extern void gart_iommu_init(void);
119extern void gart_parse_options(char *);
120extern void iommu_hole_init(void);
120extern int fallback_aper_order; 121extern int fallback_aper_order;
121extern int fallback_aper_force; 122extern int fallback_aper_force;
122extern int iommu_aperture; 123extern int iommu_aperture;
diff --git a/include/asm-x86_64/rwlock.h b/include/asm-x86_64/rwlock.h
index 9942cc39306..dea0e945926 100644
--- a/include/asm-x86_64/rwlock.h
+++ b/include/asm-x86_64/rwlock.h
@@ -24,7 +24,7 @@
24#define RW_LOCK_BIAS_STR "0x01000000" 24#define RW_LOCK_BIAS_STR "0x01000000"
25 25
26#define __build_read_lock_ptr(rw, helper) \ 26#define __build_read_lock_ptr(rw, helper) \
27 asm volatile(LOCK "subl $1,(%0)\n\t" \ 27 asm volatile(LOCK_PREFIX "subl $1,(%0)\n\t" \
28 "js 2f\n" \ 28 "js 2f\n" \
29 "1:\n" \ 29 "1:\n" \
30 LOCK_SECTION_START("") \ 30 LOCK_SECTION_START("") \
@@ -34,7 +34,7 @@
34 ::"a" (rw) : "memory") 34 ::"a" (rw) : "memory")
35 35
36#define __build_read_lock_const(rw, helper) \ 36#define __build_read_lock_const(rw, helper) \
37 asm volatile(LOCK "subl $1,%0\n\t" \ 37 asm volatile(LOCK_PREFIX "subl $1,%0\n\t" \
38 "js 2f\n" \ 38 "js 2f\n" \
39 "1:\n" \ 39 "1:\n" \
40 LOCK_SECTION_START("") \ 40 LOCK_SECTION_START("") \
@@ -54,7 +54,7 @@
54 } while (0) 54 } while (0)
55 55
56#define __build_write_lock_ptr(rw, helper) \ 56#define __build_write_lock_ptr(rw, helper) \
57 asm volatile(LOCK "subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" \ 57 asm volatile(LOCK_PREFIX "subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" \
58 "jnz 2f\n" \ 58 "jnz 2f\n" \
59 "1:\n" \ 59 "1:\n" \
60 LOCK_SECTION_START("") \ 60 LOCK_SECTION_START("") \
@@ -64,7 +64,7 @@
64 ::"a" (rw) : "memory") 64 ::"a" (rw) : "memory")
65 65
66#define __build_write_lock_const(rw, helper) \ 66#define __build_write_lock_const(rw, helper) \
67 asm volatile(LOCK "subl $" RW_LOCK_BIAS_STR ",%0\n\t" \ 67 asm volatile(LOCK_PREFIX "subl $" RW_LOCK_BIAS_STR ",%0\n\t" \
68 "jnz 2f\n" \ 68 "jnz 2f\n" \
69 "1:\n" \ 69 "1:\n" \
70 LOCK_SECTION_START("") \ 70 LOCK_SECTION_START("") \
diff --git a/include/asm-x86_64/semaphore.h b/include/asm-x86_64/semaphore.h
index a389aa6fe80..064df08b9a0 100644
--- a/include/asm-x86_64/semaphore.h
+++ b/include/asm-x86_64/semaphore.h
@@ -106,7 +106,7 @@ static inline void down(struct semaphore * sem)
106 106
107 __asm__ __volatile__( 107 __asm__ __volatile__(
108 "# atomic down operation\n\t" 108 "# atomic down operation\n\t"
109 LOCK "decl %0\n\t" /* --sem->count */ 109 LOCK_PREFIX "decl %0\n\t" /* --sem->count */
110 "js 2f\n" 110 "js 2f\n"
111 "1:\n" 111 "1:\n"
112 LOCK_SECTION_START("") 112 LOCK_SECTION_START("")
@@ -130,7 +130,7 @@ static inline int down_interruptible(struct semaphore * sem)
130 130
131 __asm__ __volatile__( 131 __asm__ __volatile__(
132 "# atomic interruptible down operation\n\t" 132 "# atomic interruptible down operation\n\t"
133 LOCK "decl %1\n\t" /* --sem->count */ 133 LOCK_PREFIX "decl %1\n\t" /* --sem->count */
134 "js 2f\n\t" 134 "js 2f\n\t"
135 "xorl %0,%0\n" 135 "xorl %0,%0\n"
136 "1:\n" 136 "1:\n"
@@ -154,7 +154,7 @@ static inline int down_trylock(struct semaphore * sem)
154 154
155 __asm__ __volatile__( 155 __asm__ __volatile__(
156 "# atomic interruptible down operation\n\t" 156 "# atomic interruptible down operation\n\t"
157 LOCK "decl %1\n\t" /* --sem->count */ 157 LOCK_PREFIX "decl %1\n\t" /* --sem->count */
158 "js 2f\n\t" 158 "js 2f\n\t"
159 "xorl %0,%0\n" 159 "xorl %0,%0\n"
160 "1:\n" 160 "1:\n"
@@ -178,7 +178,7 @@ static inline void up(struct semaphore * sem)
178{ 178{
179 __asm__ __volatile__( 179 __asm__ __volatile__(
180 "# atomic up operation\n\t" 180 "# atomic up operation\n\t"
181 LOCK "incl %0\n\t" /* ++sem->count */ 181 LOCK_PREFIX "incl %0\n\t" /* ++sem->count */
182 "jle 2f\n" 182 "jle 2f\n"
183 "1:\n" 183 "1:\n"
184 LOCK_SECTION_START("") 184 LOCK_SECTION_START("")
diff --git a/include/asm-x86_64/serial.h b/include/asm-x86_64/serial.h
index dc752eafa68..b0496e0d72a 100644
--- a/include/asm-x86_64/serial.h
+++ b/include/asm-x86_64/serial.h
@@ -2,7 +2,6 @@
2 * include/asm-x86_64/serial.h 2 * include/asm-x86_64/serial.h
3 */ 3 */
4 4
5#include <linux/config.h>
6 5
7/* 6/*
8 * This assumes you have a 1.8432 MHz clock for your UART. 7 * This assumes you have a 1.8432 MHz clock for your UART.
diff --git a/include/asm-x86_64/signal.h b/include/asm-x86_64/signal.h
index f8d55798535..3ede2a61973 100644
--- a/include/asm-x86_64/signal.h
+++ b/include/asm-x86_64/signal.h
@@ -3,13 +3,13 @@
3 3
4#ifndef __ASSEMBLY__ 4#ifndef __ASSEMBLY__
5#include <linux/types.h> 5#include <linux/types.h>
6#include <linux/linkage.h>
7#include <linux/time.h> 6#include <linux/time.h>
8 7
9/* Avoid too many header ordering problems. */ 8/* Avoid too many header ordering problems. */
10struct siginfo; 9struct siginfo;
11 10
12#ifdef __KERNEL__ 11#ifdef __KERNEL__
12#include <linux/linkage.h>
13/* Most things should be clean enough to redefine this at will, if care 13/* Most things should be clean enough to redefine this at will, if care
14 is taken to make libc match. */ 14 is taken to make libc match. */
15 15
@@ -83,7 +83,6 @@ typedef unsigned long sigset_t;
83 * SA_FLAGS values: 83 * SA_FLAGS values:
84 * 84 *
85 * SA_ONSTACK indicates that a registered stack_t will be used. 85 * SA_ONSTACK indicates that a registered stack_t will be used.
86 * SA_INTERRUPT is a no-op, but left due to historical reasons. Use the
87 * SA_RESTART flag to get restarting signals (which were the default long ago) 86 * SA_RESTART flag to get restarting signals (which were the default long ago)
88 * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop. 87 * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
89 * SA_RESETHAND clears the handler when the signal is delivered. 88 * SA_RESETHAND clears the handler when the signal is delivered.
@@ -103,7 +102,6 @@ typedef unsigned long sigset_t;
103 102
104#define SA_NOMASK SA_NODEFER 103#define SA_NOMASK SA_NODEFER
105#define SA_ONESHOT SA_RESETHAND 104#define SA_ONESHOT SA_RESETHAND
106#define SA_INTERRUPT 0x20000000 /* dummy -- ignored */
107 105
108#define SA_RESTORER 0x04000000 106#define SA_RESTORER 0x04000000
109 107
diff --git a/include/asm-x86_64/smp.h b/include/asm-x86_64/smp.h
index a4fdaeb5c39..6805e1feb30 100644
--- a/include/asm-x86_64/smp.h
+++ b/include/asm-x86_64/smp.h
@@ -5,7 +5,6 @@
5 * We need the APIC definitions automatically as part of 'smp.h' 5 * We need the APIC definitions automatically as part of 'smp.h'
6 */ 6 */
7#ifndef __ASSEMBLY__ 7#ifndef __ASSEMBLY__
8#include <linux/config.h>
9#include <linux/threads.h> 8#include <linux/threads.h>
10#include <linux/cpumask.h> 9#include <linux/cpumask.h>
11#include <linux/bitops.h> 10#include <linux/bitops.h>
@@ -54,8 +53,6 @@ extern int smp_call_function_single(int cpuid, void (*func) (void *info),
54 53
55extern cpumask_t cpu_sibling_map[NR_CPUS]; 54extern cpumask_t cpu_sibling_map[NR_CPUS];
56extern cpumask_t cpu_core_map[NR_CPUS]; 55extern cpumask_t cpu_core_map[NR_CPUS];
57extern u8 phys_proc_id[NR_CPUS];
58extern u8 cpu_core_id[NR_CPUS];
59extern u8 cpu_llc_id[NR_CPUS]; 56extern u8 cpu_llc_id[NR_CPUS];
60 57
61#define SMP_TRAMPOLINE_BASE 0x6000 58#define SMP_TRAMPOLINE_BASE 0x6000
diff --git a/include/asm-x86_64/socket.h b/include/asm-x86_64/socket.h
index f2cdbeae5d5..b4670260793 100644
--- a/include/asm-x86_64/socket.h
+++ b/include/asm-x86_64/socket.h
@@ -48,5 +48,6 @@
48#define SO_ACCEPTCONN 30 48#define SO_ACCEPTCONN 30
49 49
50#define SO_PEERSEC 31 50#define SO_PEERSEC 31
51#define SO_PASSSEC 34
51 52
52#endif /* _ASM_SOCKET_H */ 53#endif /* _ASM_SOCKET_H */
diff --git a/include/asm-x86_64/spinlock.h b/include/asm-x86_64/spinlock.h
index fe484a699cc..248a79f0eaf 100644
--- a/include/asm-x86_64/spinlock.h
+++ b/include/asm-x86_64/spinlock.h
@@ -4,7 +4,6 @@
4#include <asm/atomic.h> 4#include <asm/atomic.h>
5#include <asm/rwlock.h> 5#include <asm/rwlock.h>
6#include <asm/page.h> 6#include <asm/page.h>
7#include <linux/config.h>
8 7
9/* 8/*
10 * Your basic SMP spinlocks, allowing only a single CPU anywhere 9 * Your basic SMP spinlocks, allowing only a single CPU anywhere
@@ -22,7 +21,7 @@
22 21
23#define __raw_spin_lock_string \ 22#define __raw_spin_lock_string \
24 "\n1:\t" \ 23 "\n1:\t" \
25 "lock ; decl %0\n\t" \ 24 LOCK_PREFIX " ; decl %0\n\t" \
26 "js 2f\n" \ 25 "js 2f\n" \
27 LOCK_SECTION_START("") \ 26 LOCK_SECTION_START("") \
28 "2:\t" \ 27 "2:\t" \
@@ -32,15 +31,16 @@
32 "jmp 1b\n" \ 31 "jmp 1b\n" \
33 LOCK_SECTION_END 32 LOCK_SECTION_END
34 33
34#define __raw_spin_lock_string_up \
35 "\n\tdecl %0"
36
35#define __raw_spin_unlock_string \ 37#define __raw_spin_unlock_string \
36 "movl $1,%0" \ 38 "movl $1,%0" \
37 :"=m" (lock->slock) : : "memory" 39 :"=m" (lock->slock) : : "memory"
38 40
39static inline void __raw_spin_lock(raw_spinlock_t *lock) 41static inline void __raw_spin_lock(raw_spinlock_t *lock)
40{ 42{
41 __asm__ __volatile__( 43 asm volatile(__raw_spin_lock_string : "=m" (lock->slock) : : "memory");
42 __raw_spin_lock_string
43 :"=m" (lock->slock) : : "memory");
44} 44}
45 45
46#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 46#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
@@ -122,12 +122,12 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock)
122 122
123static inline void __raw_read_unlock(raw_rwlock_t *rw) 123static inline void __raw_read_unlock(raw_rwlock_t *rw)
124{ 124{
125 asm volatile("lock ; incl %0" :"=m" (rw->lock) : : "memory"); 125 asm volatile(LOCK_PREFIX " ; incl %0" :"=m" (rw->lock) : : "memory");
126} 126}
127 127
128static inline void __raw_write_unlock(raw_rwlock_t *rw) 128static inline void __raw_write_unlock(raw_rwlock_t *rw)
129{ 129{
130 asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0" 130 asm volatile(LOCK_PREFIX " ; addl $" RW_LOCK_BIAS_STR ",%0"
131 : "=m" (rw->lock) : : "memory"); 131 : "=m" (rw->lock) : : "memory");
132} 132}
133 133
diff --git a/include/asm-x86_64/string.h b/include/asm-x86_64/string.h
index ee6bf275349..9505d9f4bea 100644
--- a/include/asm-x86_64/string.h
+++ b/include/asm-x86_64/string.h
@@ -6,7 +6,8 @@
6/* Written 2002 by Andi Kleen */ 6/* Written 2002 by Andi Kleen */
7 7
8/* Only used for special circumstances. Stolen from i386/string.h */ 8/* Only used for special circumstances. Stolen from i386/string.h */
9static inline void * __inline_memcpy(void * to, const void * from, size_t n) 9static __always_inline void *
10__inline_memcpy(void * to, const void * from, size_t n)
10{ 11{
11unsigned long d0, d1, d2; 12unsigned long d0, d1, d2;
12__asm__ __volatile__( 13__asm__ __volatile__(
diff --git a/include/asm-x86_64/swiotlb.h b/include/asm-x86_64/swiotlb.h
index 60757efd135..ba94ab3d267 100644
--- a/include/asm-x86_64/swiotlb.h
+++ b/include/asm-x86_64/swiotlb.h
@@ -1,7 +1,6 @@
1#ifndef _ASM_SWIOTLB_H 1#ifndef _ASM_SWIOTLB_H
2#define _ASM_SWTIOLB_H 1 2#define _ASM_SWTIOLB_H 1
3 3
4#include <linux/config.h>
5 4
6#include <asm/dma-mapping.h> 5#include <asm/dma-mapping.h>
7 6
@@ -43,6 +42,8 @@ extern void swiotlb_free_coherent (struct device *hwdev, size_t size,
43extern int swiotlb_dma_supported(struct device *hwdev, u64 mask); 42extern int swiotlb_dma_supported(struct device *hwdev, u64 mask);
44extern void swiotlb_init(void); 43extern void swiotlb_init(void);
45 44
45extern int swiotlb_force;
46
46#ifdef CONFIG_SWIOTLB 47#ifdef CONFIG_SWIOTLB
47extern int swiotlb; 48extern int swiotlb;
48#else 49#else
diff --git a/include/asm-x86_64/system.h b/include/asm-x86_64/system.h
index 39759898022..6bf170bceae 100644
--- a/include/asm-x86_64/system.h
+++ b/include/asm-x86_64/system.h
@@ -1,18 +1,12 @@
1#ifndef __ASM_SYSTEM_H 1#ifndef __ASM_SYSTEM_H
2#define __ASM_SYSTEM_H 2#define __ASM_SYSTEM_H
3 3
4#include <linux/config.h>
5#include <linux/kernel.h> 4#include <linux/kernel.h>
6#include <asm/segment.h> 5#include <asm/segment.h>
6#include <asm/alternative.h>
7 7
8#ifdef __KERNEL__ 8#ifdef __KERNEL__
9 9
10#ifdef CONFIG_SMP
11#define LOCK_PREFIX "lock ; "
12#else
13#define LOCK_PREFIX ""
14#endif
15
16#define __STR(x) #x 10#define __STR(x) #x
17#define STR(x) __STR(x) 11#define STR(x) __STR(x)
18 12
@@ -35,7 +29,7 @@
35 "thread_return:\n\t" \ 29 "thread_return:\n\t" \
36 "movq %%gs:%P[pda_pcurrent],%%rsi\n\t" \ 30 "movq %%gs:%P[pda_pcurrent],%%rsi\n\t" \
37 "movq %P[thread_info](%%rsi),%%r8\n\t" \ 31 "movq %P[thread_info](%%rsi),%%r8\n\t" \
38 LOCK "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \ 32 LOCK_PREFIX "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \
39 "movq %%rax,%%rdi\n\t" \ 33 "movq %%rax,%%rdi\n\t" \
40 "jc ret_from_fork\n\t" \ 34 "jc ret_from_fork\n\t" \
41 RESTORE_CONTEXT \ 35 RESTORE_CONTEXT \
@@ -70,82 +64,6 @@ extern void load_gs_index(unsigned);
70 ".previous" \ 64 ".previous" \
71 : :"r" (value), "r" (0)) 65 : :"r" (value), "r" (0))
72 66
73#ifdef __KERNEL__
74struct alt_instr {
75 __u8 *instr; /* original instruction */
76 __u8 *replacement;
77 __u8 cpuid; /* cpuid bit set for replacement */
78 __u8 instrlen; /* length of original instruction */
79 __u8 replacementlen; /* length of new instruction, <= instrlen */
80 __u8 pad[5];
81};
82#endif
83
84/*
85 * Alternative instructions for different CPU types or capabilities.
86 *
87 * This allows to use optimized instructions even on generic binary
88 * kernels.
89 *
90 * length of oldinstr must be longer or equal the length of newinstr
91 * It can be padded with nops as needed.
92 *
93 * For non barrier like inlines please define new variants
94 * without volatile and memory clobber.
95 */
96#define alternative(oldinstr, newinstr, feature) \
97 asm volatile ("661:\n\t" oldinstr "\n662:\n" \
98 ".section .altinstructions,\"a\"\n" \
99 " .align 8\n" \
100 " .quad 661b\n" /* label */ \
101 " .quad 663f\n" /* new instruction */ \
102 " .byte %c0\n" /* feature bit */ \
103 " .byte 662b-661b\n" /* sourcelen */ \
104 " .byte 664f-663f\n" /* replacementlen */ \
105 ".previous\n" \
106 ".section .altinstr_replacement,\"ax\"\n" \
107 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
108 ".previous" :: "i" (feature) : "memory")
109
110/*
111 * Alternative inline assembly with input.
112 *
113 * Peculiarities:
114 * No memory clobber here.
115 * Argument numbers start with 1.
116 * Best is to use constraints that are fixed size (like (%1) ... "r")
117 * If you use variable sized constraints like "m" or "g" in the
118 * replacement make sure to pad to the worst case length.
119 */
120#define alternative_input(oldinstr, newinstr, feature, input...) \
121 asm volatile ("661:\n\t" oldinstr "\n662:\n" \
122 ".section .altinstructions,\"a\"\n" \
123 " .align 8\n" \
124 " .quad 661b\n" /* label */ \
125 " .quad 663f\n" /* new instruction */ \
126 " .byte %c0\n" /* feature bit */ \
127 " .byte 662b-661b\n" /* sourcelen */ \
128 " .byte 664f-663f\n" /* replacementlen */ \
129 ".previous\n" \
130 ".section .altinstr_replacement,\"ax\"\n" \
131 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
132 ".previous" :: "i" (feature), ##input)
133
134/* Like alternative_input, but with a single output argument */
135#define alternative_io(oldinstr, newinstr, feature, output, input...) \
136 asm volatile ("661:\n\t" oldinstr "\n662:\n" \
137 ".section .altinstructions,\"a\"\n" \
138 " .align 8\n" \
139 " .quad 661b\n" /* label */ \
140 " .quad 663f\n" /* new instruction */ \
141 " .byte %c[feat]\n" /* feature bit */ \
142 " .byte 662b-661b\n" /* sourcelen */ \
143 " .byte 664f-663f\n" /* replacementlen */ \
144 ".previous\n" \
145 ".section .altinstr_replacement,\"ax\"\n" \
146 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
147 ".previous" : output : [feat] "i" (feature), ##input)
148
149/* 67/*
150 * Clear and set 'TS' bit respectively 68 * Clear and set 'TS' bit respectively
151 */ 69 */
@@ -322,50 +240,14 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
322#endif 240#endif
323#define read_barrier_depends() do {} while(0) 241#define read_barrier_depends() do {} while(0)
324#define set_mb(var, value) do { (void) xchg(&var, value); } while (0) 242#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
325#define set_wmb(var, value) do { var = value; wmb(); } while (0)
326 243
327#define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0) 244#define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0)
328 245
329/* interrupt control.. */ 246#include <linux/irqflags.h>
330#define local_save_flags(x) do { warn_if_not_ulong(x); __asm__ __volatile__("# save_flags \n\t pushfq ; popq %q0":"=g" (x): /* no input */ :"memory"); } while (0)
331#define local_irq_restore(x) __asm__ __volatile__("# restore_flags \n\t pushq %0 ; popfq": /* no output */ :"g" (x):"memory", "cc")
332
333#ifdef CONFIG_X86_VSMP
334/* Interrupt control for VSMP architecture */
335#define local_irq_disable() do { unsigned long flags; local_save_flags(flags); local_irq_restore((flags & ~(1 << 9)) | (1 << 18)); } while (0)
336#define local_irq_enable() do { unsigned long flags; local_save_flags(flags); local_irq_restore((flags | (1 << 9)) & ~(1 << 18)); } while (0)
337
338#define irqs_disabled() \
339({ \
340 unsigned long flags; \
341 local_save_flags(flags); \
342 (flags & (1<<18)) || !(flags & (1<<9)); \
343})
344
345/* For spinlocks etc */
346#define local_irq_save(x) do { local_save_flags(x); local_irq_restore((x & ~(1 << 9)) | (1 << 18)); } while (0)
347#else /* CONFIG_X86_VSMP */
348#define local_irq_disable() __asm__ __volatile__("cli": : :"memory")
349#define local_irq_enable() __asm__ __volatile__("sti": : :"memory")
350
351#define irqs_disabled() \
352({ \
353 unsigned long flags; \
354 local_save_flags(flags); \
355 !(flags & (1<<9)); \
356})
357
358/* For spinlocks etc */
359#define local_irq_save(x) do { warn_if_not_ulong(x); __asm__ __volatile__("# local_irq_save \n\t pushfq ; popq %0 ; cli":"=g" (x): /* no input */ :"memory"); } while (0)
360#endif
361
362/* used in the idle loop; sti takes one instruction cycle to complete */
363#define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory")
364/* used when interrupts are already enabled or to shutdown the processor */
365#define halt() __asm__ __volatile__("hlt": : :"memory")
366 247
367void cpu_idle_wait(void); 248void cpu_idle_wait(void);
368 249
369extern unsigned long arch_align_stack(unsigned long sp); 250extern unsigned long arch_align_stack(unsigned long sp);
251extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
370 252
371#endif 253#endif
diff --git a/include/asm-x86_64/tce.h b/include/asm-x86_64/tce.h
new file mode 100644
index 00000000000..53e9a68b333
--- /dev/null
+++ b/include/asm-x86_64/tce.h
@@ -0,0 +1,49 @@
1/*
2 * This file is derived from asm-powerpc/tce.h.
3 *
4 * Copyright (C) IBM Corporation, 2006
5 *
6 * Author: Muli Ben-Yehuda <muli@il.ibm.com>
7 * Author: Jon Mason <jdmason@us.ibm.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 */
23
24#ifndef _ASM_X86_64_TCE_H
25#define _ASM_X86_64_TCE_H
26
27extern void* tce_table_kva[];
28extern unsigned int specified_table_size;
29struct iommu_table;
30
31#define TCE_ENTRY_SIZE 8 /* in bytes */
32
33#define TCE_READ_SHIFT 0
34#define TCE_WRITE_SHIFT 1
35#define TCE_HUBID_SHIFT 2 /* unused */
36#define TCE_RSVD_SHIFT 8 /* unused */
37#define TCE_RPN_SHIFT 12
38#define TCE_UNUSED_SHIFT 48 /* unused */
39
40#define TCE_RPN_MASK 0x0000fffffffff000ULL
41
42extern void tce_build(struct iommu_table *tbl, unsigned long index,
43 unsigned int npages, unsigned long uaddr, int direction);
44extern void tce_free(struct iommu_table *tbl, long index, unsigned int npages);
45extern void* alloc_tce_table(void);
46extern void free_tce_table(void *tbl);
47extern int build_tce_table(struct pci_dev *dev, void __iomem *bbar);
48
49#endif /* _ASM_X86_64_TCE_H */
diff --git a/include/asm-x86_64/thread_info.h b/include/asm-x86_64/thread_info.h
index 4ac0e0a3693..2029b00351f 100644
--- a/include/asm-x86_64/thread_info.h
+++ b/include/asm-x86_64/thread_info.h
@@ -73,8 +73,21 @@ static inline struct thread_info *stack_thread_info(void)
73} 73}
74 74
75/* thread information allocation */ 75/* thread information allocation */
76#ifdef CONFIG_DEBUG_STACK_USAGE
77#define alloc_thread_info(tsk) \
78 ({ \
79 struct thread_info *ret; \
80 \
81 ret = ((struct thread_info *) __get_free_pages(GFP_KERNEL,THREAD_ORDER)); \
82 if (ret) \
83 memset(ret, 0, THREAD_SIZE); \
84 ret; \
85 })
86#else
76#define alloc_thread_info(tsk) \ 87#define alloc_thread_info(tsk) \
77 ((struct thread_info *) __get_free_pages(GFP_KERNEL,THREAD_ORDER)) 88 ((struct thread_info *) __get_free_pages(GFP_KERNEL,THREAD_ORDER))
89#endif
90
78#define free_thread_info(ti) free_pages((unsigned long) (ti), THREAD_ORDER) 91#define free_thread_info(ti) free_pages((unsigned long) (ti), THREAD_ORDER)
79 92
80#else /* !__ASSEMBLY__ */ 93#else /* !__ASSEMBLY__ */
@@ -101,7 +114,7 @@ static inline struct thread_info *stack_thread_info(void)
101#define TIF_IRET 5 /* force IRET */ 114#define TIF_IRET 5 /* force IRET */
102#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ 115#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
103#define TIF_SECCOMP 8 /* secure computing */ 116#define TIF_SECCOMP 8 /* secure computing */
104#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ 117/* 16 free */
105#define TIF_IA32 17 /* 32bit process */ 118#define TIF_IA32 17 /* 32bit process */
106#define TIF_FORK 18 /* ret_from_fork */ 119#define TIF_FORK 18 /* ret_from_fork */
107#define TIF_ABI_PENDING 19 120#define TIF_ABI_PENDING 19
@@ -115,7 +128,6 @@ static inline struct thread_info *stack_thread_info(void)
115#define _TIF_IRET (1<<TIF_IRET) 128#define _TIF_IRET (1<<TIF_IRET)
116#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) 129#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
117#define _TIF_SECCOMP (1<<TIF_SECCOMP) 130#define _TIF_SECCOMP (1<<TIF_SECCOMP)
118#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
119#define _TIF_IA32 (1<<TIF_IA32) 131#define _TIF_IA32 (1<<TIF_IA32)
120#define _TIF_FORK (1<<TIF_FORK) 132#define _TIF_FORK (1<<TIF_FORK)
121#define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING) 133#define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING)
@@ -137,6 +149,9 @@ static inline struct thread_info *stack_thread_info(void)
137 */ 149 */
138#define TS_USEDFPU 0x0001 /* FPU was used by this task this quantum (SMP) */ 150#define TS_USEDFPU 0x0001 /* FPU was used by this task this quantum (SMP) */
139#define TS_COMPAT 0x0002 /* 32bit syscall active */ 151#define TS_COMPAT 0x0002 /* 32bit syscall active */
152#define TS_POLLING 0x0004 /* true if in idle loop and not sleeping */
153
154#define tsk_is_polling(t) ((t)->thread_info->status & TS_POLLING)
140 155
141#endif /* __KERNEL__ */ 156#endif /* __KERNEL__ */
142 157
diff --git a/include/asm-x86_64/tlbflush.h b/include/asm-x86_64/tlbflush.h
index 4a9c20ea9b1..d16d5b60f41 100644
--- a/include/asm-x86_64/tlbflush.h
+++ b/include/asm-x86_64/tlbflush.h
@@ -1,7 +1,6 @@
1#ifndef _X8664_TLBFLUSH_H 1#ifndef _X8664_TLBFLUSH_H
2#define _X8664_TLBFLUSH_H 2#define _X8664_TLBFLUSH_H
3 3
4#include <linux/config.h>
5#include <linux/mm.h> 4#include <linux/mm.h>
6#include <asm/processor.h> 5#include <asm/processor.h>
7 6
diff --git a/include/asm-x86_64/topology.h b/include/asm-x86_64/topology.h
index 9db54e9d17b..6e7a2e976b0 100644
--- a/include/asm-x86_64/topology.h
+++ b/include/asm-x86_64/topology.h
@@ -1,15 +1,12 @@
1#ifndef _ASM_X86_64_TOPOLOGY_H 1#ifndef _ASM_X86_64_TOPOLOGY_H
2#define _ASM_X86_64_TOPOLOGY_H 2#define _ASM_X86_64_TOPOLOGY_H
3 3
4#include <linux/config.h>
5 4
6#ifdef CONFIG_NUMA 5#ifdef CONFIG_NUMA
7 6
8#include <asm/mpspec.h> 7#include <asm/mpspec.h>
9#include <asm/bitops.h> 8#include <asm/bitops.h>
10 9
11/* Map the K8 CPU local memory controllers to a simple 1:1 CPU:NODE topology */
12
13extern cpumask_t cpu_online_map; 10extern cpumask_t cpu_online_map;
14 11
15extern unsigned char cpu_to_node[]; 12extern unsigned char cpu_to_node[];
@@ -58,12 +55,12 @@ extern int __node_distance(int, int);
58#endif 55#endif
59 56
60#ifdef CONFIG_SMP 57#ifdef CONFIG_SMP
61#define topology_physical_package_id(cpu) \ 58#define topology_physical_package_id(cpu) (cpu_data[cpu].phys_proc_id)
62 (phys_proc_id[cpu] == BAD_APICID ? -1 : phys_proc_id[cpu]) 59#define topology_core_id(cpu) (cpu_data[cpu].cpu_core_id)
63#define topology_core_id(cpu) \
64 (cpu_core_id[cpu] == BAD_APICID ? 0 : cpu_core_id[cpu])
65#define topology_core_siblings(cpu) (cpu_core_map[cpu]) 60#define topology_core_siblings(cpu) (cpu_core_map[cpu])
66#define topology_thread_siblings(cpu) (cpu_sibling_map[cpu]) 61#define topology_thread_siblings(cpu) (cpu_sibling_map[cpu])
62#define mc_capable() (boot_cpu_data.x86_max_cores > 1)
63#define smt_capable() (smp_num_siblings > 1)
67#endif 64#endif
68 65
69#include <asm-generic/topology.h> 66#include <asm-generic/topology.h>
diff --git a/include/asm-x86_64/uaccess.h b/include/asm-x86_64/uaccess.h
index bddffcb591b..1e1fa003daa 100644
--- a/include/asm-x86_64/uaccess.h
+++ b/include/asm-x86_64/uaccess.h
@@ -4,7 +4,6 @@
4/* 4/*
5 * User space memory access functions 5 * User space memory access functions
6 */ 6 */
7#include <linux/config.h>
8#include <linux/compiler.h> 7#include <linux/compiler.h>
9#include <linux/errno.h> 8#include <linux/errno.h>
10#include <linux/sched.h> 9#include <linux/sched.h>
diff --git a/include/asm-x86_64/unistd.h b/include/asm-x86_64/unistd.h
index feb77cb8c04..80fd48e84bb 100644
--- a/include/asm-x86_64/unistd.h
+++ b/include/asm-x86_64/unistd.h
@@ -617,8 +617,12 @@ __SYSCALL(__NR_tee, sys_tee)
617__SYSCALL(__NR_sync_file_range, sys_sync_file_range) 617__SYSCALL(__NR_sync_file_range, sys_sync_file_range)
618#define __NR_vmsplice 278 618#define __NR_vmsplice 278
619__SYSCALL(__NR_vmsplice, sys_vmsplice) 619__SYSCALL(__NR_vmsplice, sys_vmsplice)
620#define __NR_move_pages 279
621__SYSCALL(__NR_move_pages, sys_move_pages)
620 622
621#define __NR_syscall_max __NR_vmsplice 623#ifdef __KERNEL__
624
625#define __NR_syscall_max __NR_move_pages
622 626
623#ifndef __NO_STUBS 627#ifndef __NO_STUBS
624 628
@@ -635,7 +639,6 @@ do { \
635 return (type) (res); \ 639 return (type) (res); \
636} while (0) 640} while (0)
637 641
638#ifdef __KERNEL__
639#define __ARCH_WANT_OLD_READDIR 642#define __ARCH_WANT_OLD_READDIR
640#define __ARCH_WANT_OLD_STAT 643#define __ARCH_WANT_OLD_STAT
641#define __ARCH_WANT_SYS_ALARM 644#define __ARCH_WANT_SYS_ALARM
@@ -657,7 +660,6 @@ do { \
657#define __ARCH_WANT_SYS_RT_SIGACTION 660#define __ARCH_WANT_SYS_RT_SIGACTION
658#define __ARCH_WANT_SYS_TIME 661#define __ARCH_WANT_SYS_TIME
659#define __ARCH_WANT_COMPAT_SYS_TIME 662#define __ARCH_WANT_COMPAT_SYS_TIME
660#endif
661 663
662#ifndef __KERNEL_SYSCALLS__ 664#ifndef __KERNEL_SYSCALLS__
663 665
@@ -819,9 +821,7 @@ asmlinkage long sys_fork(struct pt_regs regs);
819asmlinkage long sys_vfork(struct pt_regs regs); 821asmlinkage long sys_vfork(struct pt_regs regs);
820asmlinkage long sys_pipe(int *fildes); 822asmlinkage long sys_pipe(int *fildes);
821 823
822#endif /* __KERNEL_SYSCALLS__ */ 824#ifndef __ASSEMBLY__
823
824#if !defined(__ASSEMBLY__) && defined(__KERNEL__)
825 825
826#include <linux/linkage.h> 826#include <linux/linkage.h>
827#include <linux/compiler.h> 827#include <linux/compiler.h>
@@ -836,9 +836,9 @@ asmlinkage long sys_rt_sigaction(int sig,
836 struct sigaction __user *oact, 836 struct sigaction __user *oact,
837 size_t sigsetsize); 837 size_t sigsetsize);
838 838
839#endif /* __ASSEMBLY__ */ 839#endif /* __ASSEMBLY__ */
840 840
841#endif /* __NO_STUBS */ 841#endif /* __KERNEL_SYSCALLS__ */
842 842
843/* 843/*
844 * "Conditional" syscalls 844 * "Conditional" syscalls
@@ -848,4 +848,8 @@ asmlinkage long sys_rt_sigaction(int sig,
848 */ 848 */
849#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall") 849#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
850 850
851#endif 851#endif /* __NO_STUBS */
852
853#endif /* __KERNEL__ */
854
855#endif /* _ASM_X86_64_UNISTD_H_ */
diff --git a/include/asm-x86_64/unwind.h b/include/asm-x86_64/unwind.h
new file mode 100644
index 00000000000..1f6e9bfb569
--- /dev/null
+++ b/include/asm-x86_64/unwind.h
@@ -0,0 +1,107 @@
1#ifndef _ASM_X86_64_UNWIND_H
2#define _ASM_X86_64_UNWIND_H
3
4/*
5 * Copyright (C) 2002-2006 Novell, Inc.
6 * Jan Beulich <jbeulich@novell.com>
7 * This code is released under version 2 of the GNU GPL.
8 */
9
10#ifdef CONFIG_STACK_UNWIND
11
12#include <linux/sched.h>
13#include <asm/ptrace.h>
14#include <asm/uaccess.h>
15#include <asm/vsyscall.h>
16
17struct unwind_frame_info
18{
19 struct pt_regs regs;
20 struct task_struct *task;
21};
22
23#define UNW_PC(frame) (frame)->regs.rip
24#define UNW_SP(frame) (frame)->regs.rsp
25#ifdef CONFIG_FRAME_POINTER
26#define UNW_FP(frame) (frame)->regs.rbp
27#define FRAME_RETADDR_OFFSET 8
28#define FRAME_LINK_OFFSET 0
29#define STACK_BOTTOM(tsk) (((tsk)->thread.rsp0 - 1) & ~(THREAD_SIZE - 1))
30#define STACK_TOP(tsk) ((tsk)->thread.rsp0)
31#endif
32/* Might need to account for the special exception and interrupt handling
33 stacks here, since normally
34 EXCEPTION_STACK_ORDER < THREAD_ORDER < IRQSTACK_ORDER,
35 but the construct is needed only for getting across the stack switch to
36 the interrupt stack - thus considering the IRQ stack itself is unnecessary,
37 and the overhead of comparing against all exception handling stacks seems
38 not desirable. */
39#define STACK_LIMIT(ptr) (((ptr) - 1) & ~(THREAD_SIZE - 1))
40
41#define UNW_REGISTER_INFO \
42 PTREGS_INFO(rax), \
43 PTREGS_INFO(rdx), \
44 PTREGS_INFO(rcx), \
45 PTREGS_INFO(rbx), \
46 PTREGS_INFO(rsi), \
47 PTREGS_INFO(rdi), \
48 PTREGS_INFO(rbp), \
49 PTREGS_INFO(rsp), \
50 PTREGS_INFO(r8), \
51 PTREGS_INFO(r9), \
52 PTREGS_INFO(r10), \
53 PTREGS_INFO(r11), \
54 PTREGS_INFO(r12), \
55 PTREGS_INFO(r13), \
56 PTREGS_INFO(r14), \
57 PTREGS_INFO(r15), \
58 PTREGS_INFO(rip)
59
60static inline void arch_unw_init_frame_info(struct unwind_frame_info *info,
61 /*const*/ struct pt_regs *regs)
62{
63 info->regs = *regs;
64}
65
66static inline void arch_unw_init_blocked(struct unwind_frame_info *info)
67{
68 extern const char thread_return[];
69
70 memset(&info->regs, 0, sizeof(info->regs));
71 info->regs.rip = (unsigned long)thread_return;
72 info->regs.cs = __KERNEL_CS;
73 __get_user(info->regs.rbp, (unsigned long *)info->task->thread.rsp);
74 info->regs.rsp = info->task->thread.rsp;
75 info->regs.ss = __KERNEL_DS;
76}
77
78extern int arch_unwind_init_running(struct unwind_frame_info *,
79 int (*callback)(struct unwind_frame_info *,
80 void *arg),
81 void *arg);
82
83static inline int arch_unw_user_mode(const struct unwind_frame_info *info)
84{
85#if 0 /* This can only work when selector register saves/restores
86 are properly annotated (and tracked in UNW_REGISTER_INFO). */
87 return user_mode(&info->regs);
88#else
89 return (long)info->regs.rip >= 0
90 || (info->regs.rip >= VSYSCALL_START && info->regs.rip < VSYSCALL_END)
91 || (long)info->regs.rsp >= 0;
92#endif
93}
94
95#else
96
97#define UNW_PC(frame) ((void)(frame), 0)
98#define UNW_SP(frame) ((void)(frame), 0)
99
100static inline int arch_unw_user_mode(const void *info)
101{
102 return 0;
103}
104
105#endif
106
107#endif /* _ASM_X86_64_UNWIND_H */
diff --git a/include/asm-x86_64/vga.h b/include/asm-x86_64/vga.h
index ef0c0e50cc9..0ecf68ac03a 100644
--- a/include/asm-x86_64/vga.h
+++ b/include/asm-x86_64/vga.h
@@ -12,7 +12,7 @@
12 * access the videoram directly without any black magic. 12 * access the videoram directly without any black magic.
13 */ 13 */
14 14
15#define VGA_MAP_MEM(x) (unsigned long)phys_to_virt(x) 15#define VGA_MAP_MEM(x,s) (unsigned long)phys_to_virt(x)
16 16
17#define vga_readb(x) (*(x)) 17#define vga_readb(x) (*(x))
18#define vga_writeb(x,y) (*(y) = (x)) 18#define vga_writeb(x,y) (*(y) = (x))
diff --git a/include/asm-x86_64/vsyscall.h b/include/asm-x86_64/vsyscall.h
index a85e16f56d7..146b24402a5 100644
--- a/include/asm-x86_64/vsyscall.h
+++ b/include/asm-x86_64/vsyscall.h
@@ -1,8 +1,6 @@
1#ifndef _ASM_X86_64_VSYSCALL_H_ 1#ifndef _ASM_X86_64_VSYSCALL_H_
2#define _ASM_X86_64_VSYSCALL_H_ 2#define _ASM_X86_64_VSYSCALL_H_
3 3
4#include <linux/seqlock.h>
5
6enum vsyscall_num { 4enum vsyscall_num {
7 __NR_vgettimeofday, 5 __NR_vgettimeofday,
8 __NR_vtime, 6 __NR_vtime,
@@ -14,6 +12,7 @@ enum vsyscall_num {
14#define VSYSCALL_ADDR(vsyscall_nr) (VSYSCALL_START+VSYSCALL_SIZE*(vsyscall_nr)) 12#define VSYSCALL_ADDR(vsyscall_nr) (VSYSCALL_START+VSYSCALL_SIZE*(vsyscall_nr))
15 13
16#ifdef __KERNEL__ 14#ifdef __KERNEL__
15#include <linux/seqlock.h>
17 16
18#define __section_vxtime __attribute__ ((unused, __section__ (".vxtime"), aligned(16))) 17#define __section_vxtime __attribute__ ((unused, __section__ (".vxtime"), aligned(16)))
19#define __section_wall_jiffies __attribute__ ((unused, __section__ (".wall_jiffies"), aligned(16))) 18#define __section_wall_jiffies __attribute__ ((unused, __section__ (".wall_jiffies"), aligned(16)))