aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-x86_64
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-x86_64')
-rw-r--r--include/asm-x86_64/apic.h17
-rw-r--r--include/asm-x86_64/apicdef.h1
-rw-r--r--include/asm-x86_64/atomic.h3
-rw-r--r--include/asm-x86_64/bitops.h70
-rw-r--r--include/asm-x86_64/cache.h13
-rw-r--r--include/asm-x86_64/cacheflush.h4
-rw-r--r--include/asm-x86_64/compat.h7
-rw-r--r--include/asm-x86_64/cpufeature.h3
-rw-r--r--include/asm-x86_64/desc.h18
-rw-r--r--include/asm-x86_64/dma-mapping.h221
-rw-r--r--include/asm-x86_64/dwarf2.h4
-rw-r--r--include/asm-x86_64/e820.h1
-rw-r--r--include/asm-x86_64/edac.h18
-rw-r--r--include/asm-x86_64/fixmap.h2
-rw-r--r--include/asm-x86_64/gart-mapping.h16
-rw-r--r--include/asm-x86_64/hw_irq.h10
-rw-r--r--include/asm-x86_64/i387.h68
-rw-r--r--include/asm-x86_64/ia32.h2
-rw-r--r--include/asm-x86_64/ia32_unistd.h16
-rw-r--r--include/asm-x86_64/idle.h14
-rw-r--r--include/asm-x86_64/io.h5
-rw-r--r--include/asm-x86_64/ioctl.h76
-rw-r--r--include/asm-x86_64/ipi.h4
-rw-r--r--include/asm-x86_64/irq.h2
-rw-r--r--include/asm-x86_64/kdebug.h13
-rw-r--r--include/asm-x86_64/kexec.h37
-rw-r--r--include/asm-x86_64/kprobes.h4
-rw-r--r--include/asm-x86_64/mman.h1
-rw-r--r--include/asm-x86_64/mmu_context.h9
-rw-r--r--include/asm-x86_64/mmzone.h16
-rw-r--r--include/asm-x86_64/mpspec.h4
-rw-r--r--include/asm-x86_64/mutex.h113
-rw-r--r--include/asm-x86_64/numa.h5
-rw-r--r--include/asm-x86_64/page.h22
-rw-r--r--include/asm-x86_64/param.h3
-rw-r--r--include/asm-x86_64/pci.h11
-rw-r--r--include/asm-x86_64/pda.h11
-rw-r--r--include/asm-x86_64/percpu.h2
-rw-r--r--include/asm-x86_64/pgtable.h34
-rw-r--r--include/asm-x86_64/processor.h21
-rw-r--r--include/asm-x86_64/proto.h15
-rw-r--r--include/asm-x86_64/rwlock.h2
-rw-r--r--include/asm-x86_64/segment.h4
-rw-r--r--include/asm-x86_64/smp.h1
-rw-r--r--include/asm-x86_64/swiotlb.h16
-rw-r--r--include/asm-x86_64/system.h59
-rw-r--r--include/asm-x86_64/thread_info.h3
-rw-r--r--include/asm-x86_64/timex.h16
-rw-r--r--include/asm-x86_64/topology.h3
-rw-r--r--include/asm-x86_64/uaccess.h7
-rw-r--r--include/asm-x86_64/unistd.h31
-rw-r--r--include/asm-x86_64/vsyscall.h4
52 files changed, 721 insertions, 341 deletions
diff --git a/include/asm-x86_64/apic.h b/include/asm-x86_64/apic.h
index 5647b7de1749..4f6a4dc455bb 100644
--- a/include/asm-x86_64/apic.h
+++ b/include/asm-x86_64/apic.h
@@ -42,11 +42,6 @@ static __inline void apic_write(unsigned long reg, unsigned int v)
42 *((volatile unsigned int *)(APIC_BASE+reg)) = v; 42 *((volatile unsigned int *)(APIC_BASE+reg)) = v;
43} 43}
44 44
45static __inline void apic_write_atomic(unsigned long reg, unsigned int v)
46{
47 xchg((volatile unsigned int *)(APIC_BASE+reg), v);
48}
49
50static __inline unsigned int apic_read(unsigned long reg) 45static __inline unsigned int apic_read(unsigned long reg)
51{ 46{
52 return *((volatile unsigned int *)(APIC_BASE+reg)); 47 return *((volatile unsigned int *)(APIC_BASE+reg));
@@ -57,10 +52,6 @@ static __inline__ void apic_wait_icr_idle(void)
57 while ( apic_read( APIC_ICR ) & APIC_ICR_BUSY ); 52 while ( apic_read( APIC_ICR ) & APIC_ICR_BUSY );
58} 53}
59 54
60#define FORCE_READ_AROUND_WRITE 0
61#define apic_read_around(x)
62#define apic_write_around(x,y) apic_write((x),(y))
63
64static inline void ack_APIC_irq(void) 55static inline void ack_APIC_irq(void)
65{ 56{
66 /* 57 /*
@@ -71,7 +62,7 @@ static inline void ack_APIC_irq(void)
71 */ 62 */
72 63
73 /* Docs say use 0 for future compatibility */ 64 /* Docs say use 0 for future compatibility */
74 apic_write_around(APIC_EOI, 0); 65 apic_write(APIC_EOI, 0);
75} 66}
76 67
77extern int get_maxlvt (void); 68extern int get_maxlvt (void);
@@ -113,6 +104,12 @@ extern int disable_timer_pin_1;
113 104
114extern void setup_threshold_lvt(unsigned long lvt_off); 105extern void setup_threshold_lvt(unsigned long lvt_off);
115 106
107void smp_send_timer_broadcast_ipi(void);
108void switch_APIC_timer_to_ipi(void *cpumask);
109void switch_ipi_to_APIC_timer(void *cpumask);
110
111#define ARCH_APICTIMER_STOPS_ON_C3 1
112
116#endif /* CONFIG_X86_LOCAL_APIC */ 113#endif /* CONFIG_X86_LOCAL_APIC */
117 114
118extern unsigned boot_cpu_id; 115extern unsigned boot_cpu_id;
diff --git a/include/asm-x86_64/apicdef.h b/include/asm-x86_64/apicdef.h
index fb1c99ac669f..decaa2d540e8 100644
--- a/include/asm-x86_64/apicdef.h
+++ b/include/asm-x86_64/apicdef.h
@@ -13,6 +13,7 @@
13#define APIC_ID 0x20 13#define APIC_ID 0x20
14#define APIC_ID_MASK (0xFFu<<24) 14#define APIC_ID_MASK (0xFFu<<24)
15#define GET_APIC_ID(x) (((x)>>24)&0xFFu) 15#define GET_APIC_ID(x) (((x)>>24)&0xFFu)
16#define SET_APIC_ID(x) (((x)<<24))
16#define APIC_LVR 0x30 17#define APIC_LVR 0x30
17#define APIC_LVR_MASK 0xFF00FF 18#define APIC_LVR_MASK 0xFF00FF
18#define GET_APIC_VERSION(x) ((x)&0xFFu) 19#define GET_APIC_VERSION(x) ((x)&0xFFu)
diff --git a/include/asm-x86_64/atomic.h b/include/asm-x86_64/atomic.h
index 50db9f39274f..4b5cd553e772 100644
--- a/include/asm-x86_64/atomic.h
+++ b/include/asm-x86_64/atomic.h
@@ -2,6 +2,7 @@
2#define __ARCH_X86_64_ATOMIC__ 2#define __ARCH_X86_64_ATOMIC__
3 3
4#include <linux/config.h> 4#include <linux/config.h>
5#include <asm/types.h>
5 6
6/* atomic_t should be 32 bit signed type */ 7/* atomic_t should be 32 bit signed type */
7 8
@@ -389,6 +390,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t *v)
389#define atomic64_dec_return(v) (atomic64_sub_return(1,v)) 390#define atomic64_dec_return(v) (atomic64_sub_return(1,v))
390 391
391#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new)) 392#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
393#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
392 394
393/** 395/**
394 * atomic_add_unless - add unless the number is a given value 396 * atomic_add_unless - add unless the number is a given value
@@ -424,4 +426,5 @@ __asm__ __volatile__(LOCK "orl %0,%1" \
424#define smp_mb__before_atomic_inc() barrier() 426#define smp_mb__before_atomic_inc() barrier()
425#define smp_mb__after_atomic_inc() barrier() 427#define smp_mb__after_atomic_inc() barrier()
426 428
429#include <asm-generic/atomic.h>
427#endif 430#endif
diff --git a/include/asm-x86_64/bitops.h b/include/asm-x86_64/bitops.h
index 05a0d374404b..eb4df23e1e41 100644
--- a/include/asm-x86_64/bitops.h
+++ b/include/asm-x86_64/bitops.h
@@ -29,7 +29,7 @@ static __inline__ void set_bit(int nr, volatile void * addr)
29{ 29{
30 __asm__ __volatile__( LOCK_PREFIX 30 __asm__ __volatile__( LOCK_PREFIX
31 "btsl %1,%0" 31 "btsl %1,%0"
32 :"=m" (ADDR) 32 :"+m" (ADDR)
33 :"dIr" (nr) : "memory"); 33 :"dIr" (nr) : "memory");
34} 34}
35 35
@@ -46,7 +46,7 @@ static __inline__ void __set_bit(int nr, volatile void * addr)
46{ 46{
47 __asm__ volatile( 47 __asm__ volatile(
48 "btsl %1,%0" 48 "btsl %1,%0"
49 :"=m" (ADDR) 49 :"+m" (ADDR)
50 :"dIr" (nr) : "memory"); 50 :"dIr" (nr) : "memory");
51} 51}
52 52
@@ -64,7 +64,7 @@ static __inline__ void clear_bit(int nr, volatile void * addr)
64{ 64{
65 __asm__ __volatile__( LOCK_PREFIX 65 __asm__ __volatile__( LOCK_PREFIX
66 "btrl %1,%0" 66 "btrl %1,%0"
67 :"=m" (ADDR) 67 :"+m" (ADDR)
68 :"dIr" (nr)); 68 :"dIr" (nr));
69} 69}
70 70
@@ -72,7 +72,7 @@ static __inline__ void __clear_bit(int nr, volatile void * addr)
72{ 72{
73 __asm__ __volatile__( 73 __asm__ __volatile__(
74 "btrl %1,%0" 74 "btrl %1,%0"
75 :"=m" (ADDR) 75 :"+m" (ADDR)
76 :"dIr" (nr)); 76 :"dIr" (nr));
77} 77}
78 78
@@ -92,7 +92,7 @@ static __inline__ void __change_bit(int nr, volatile void * addr)
92{ 92{
93 __asm__ __volatile__( 93 __asm__ __volatile__(
94 "btcl %1,%0" 94 "btcl %1,%0"
95 :"=m" (ADDR) 95 :"+m" (ADDR)
96 :"dIr" (nr)); 96 :"dIr" (nr));
97} 97}
98 98
@@ -109,7 +109,7 @@ static __inline__ void change_bit(int nr, volatile void * addr)
109{ 109{
110 __asm__ __volatile__( LOCK_PREFIX 110 __asm__ __volatile__( LOCK_PREFIX
111 "btcl %1,%0" 111 "btcl %1,%0"
112 :"=m" (ADDR) 112 :"+m" (ADDR)
113 :"dIr" (nr)); 113 :"dIr" (nr));
114} 114}
115 115
@@ -127,7 +127,7 @@ static __inline__ int test_and_set_bit(int nr, volatile void * addr)
127 127
128 __asm__ __volatile__( LOCK_PREFIX 128 __asm__ __volatile__( LOCK_PREFIX
129 "btsl %2,%1\n\tsbbl %0,%0" 129 "btsl %2,%1\n\tsbbl %0,%0"
130 :"=r" (oldbit),"=m" (ADDR) 130 :"=r" (oldbit),"+m" (ADDR)
131 :"dIr" (nr) : "memory"); 131 :"dIr" (nr) : "memory");
132 return oldbit; 132 return oldbit;
133} 133}
@@ -147,7 +147,7 @@ static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
147 147
148 __asm__( 148 __asm__(
149 "btsl %2,%1\n\tsbbl %0,%0" 149 "btsl %2,%1\n\tsbbl %0,%0"
150 :"=r" (oldbit),"=m" (ADDR) 150 :"=r" (oldbit),"+m" (ADDR)
151 :"dIr" (nr)); 151 :"dIr" (nr));
152 return oldbit; 152 return oldbit;
153} 153}
@@ -166,7 +166,7 @@ static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
166 166
167 __asm__ __volatile__( LOCK_PREFIX 167 __asm__ __volatile__( LOCK_PREFIX
168 "btrl %2,%1\n\tsbbl %0,%0" 168 "btrl %2,%1\n\tsbbl %0,%0"
169 :"=r" (oldbit),"=m" (ADDR) 169 :"=r" (oldbit),"+m" (ADDR)
170 :"dIr" (nr) : "memory"); 170 :"dIr" (nr) : "memory");
171 return oldbit; 171 return oldbit;
172} 172}
@@ -186,7 +186,7 @@ static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
186 186
187 __asm__( 187 __asm__(
188 "btrl %2,%1\n\tsbbl %0,%0" 188 "btrl %2,%1\n\tsbbl %0,%0"
189 :"=r" (oldbit),"=m" (ADDR) 189 :"=r" (oldbit),"+m" (ADDR)
190 :"dIr" (nr)); 190 :"dIr" (nr));
191 return oldbit; 191 return oldbit;
192} 192}
@@ -198,7 +198,7 @@ static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
198 198
199 __asm__ __volatile__( 199 __asm__ __volatile__(
200 "btcl %2,%1\n\tsbbl %0,%0" 200 "btcl %2,%1\n\tsbbl %0,%0"
201 :"=r" (oldbit),"=m" (ADDR) 201 :"=r" (oldbit),"+m" (ADDR)
202 :"dIr" (nr) : "memory"); 202 :"dIr" (nr) : "memory");
203 return oldbit; 203 return oldbit;
204} 204}
@@ -217,7 +217,7 @@ static __inline__ int test_and_change_bit(int nr, volatile void * addr)
217 217
218 __asm__ __volatile__( LOCK_PREFIX 218 __asm__ __volatile__( LOCK_PREFIX
219 "btcl %2,%1\n\tsbbl %0,%0" 219 "btcl %2,%1\n\tsbbl %0,%0"
220 :"=r" (oldbit),"=m" (ADDR) 220 :"=r" (oldbit),"+m" (ADDR)
221 :"dIr" (nr) : "memory"); 221 :"dIr" (nr) : "memory");
222 return oldbit; 222 return oldbit;
223} 223}
@@ -340,6 +340,20 @@ static __inline__ unsigned long __ffs(unsigned long word)
340 return word; 340 return word;
341} 341}
342 342
343/*
344 * __fls: find last bit set.
345 * @word: The word to search
346 *
347 * Undefined if no zero exists, so code should check against ~0UL first.
348 */
349static __inline__ unsigned long __fls(unsigned long word)
350{
351 __asm__("bsrq %1,%0"
352 :"=r" (word)
353 :"rm" (word));
354 return word;
355}
356
343#ifdef __KERNEL__ 357#ifdef __KERNEL__
344 358
345static inline int sched_find_first_bit(const unsigned long *b) 359static inline int sched_find_first_bit(const unsigned long *b)
@@ -370,6 +384,35 @@ static __inline__ int ffs(int x)
370} 384}
371 385
372/** 386/**
387 * fls64 - find last bit set in 64 bit word
388 * @x: the word to search
389 *
390 * This is defined the same way as fls.
391 */
392static __inline__ int fls64(__u64 x)
393{
394 if (x == 0)
395 return 0;
396 return __fls(x) + 1;
397}
398
399/**
400 * fls - find last bit set
401 * @x: the word to search
402 *
403 * This is defined the same way as ffs.
404 */
405static __inline__ int fls(int x)
406{
407 int r;
408
409 __asm__("bsrl %1,%0\n\t"
410 "cmovzl %2,%0"
411 : "=&r" (r) : "rm" (x), "rm" (-1));
412 return r+1;
413}
414
415/**
373 * hweightN - returns the hamming weight of a N-bit word 416 * hweightN - returns the hamming weight of a N-bit word
374 * @x: the word to weigh 417 * @x: the word to weigh
375 * 418 *
@@ -407,9 +450,6 @@ static __inline__ int ffs(int x)
407#define minix_find_first_zero_bit(addr,size) \ 450#define minix_find_first_zero_bit(addr,size) \
408 find_first_zero_bit((void*)addr,size) 451 find_first_zero_bit((void*)addr,size)
409 452
410/* find last set bit */
411#define fls(x) generic_fls(x)
412
413#endif /* __KERNEL__ */ 453#endif /* __KERNEL__ */
414 454
415#endif /* _X86_64_BITOPS_H */ 455#endif /* _X86_64_BITOPS_H */
diff --git a/include/asm-x86_64/cache.h b/include/asm-x86_64/cache.h
index 33e53424128b..263f0a211ed7 100644
--- a/include/asm-x86_64/cache.h
+++ b/include/asm-x86_64/cache.h
@@ -9,6 +9,17 @@
9/* L1 cache line size */ 9/* L1 cache line size */
10#define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT) 10#define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
11#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) 11#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12#define L1_CACHE_SHIFT_MAX 7 /* largest L1 which this arch supports */ 12
13#ifdef CONFIG_X86_VSMP
14
15/* vSMP Internode cacheline shift */
16#define INTERNODE_CACHE_SHIFT (12)
17#ifdef CONFIG_SMP
18#define __cacheline_aligned_in_smp \
19 __attribute__((__aligned__(1 << (INTERNODE_CACHE_SHIFT)))) \
20 __attribute__((__section__(".data.page_aligned")))
21#endif
22
23#endif
13 24
14#endif 25#endif
diff --git a/include/asm-x86_64/cacheflush.h b/include/asm-x86_64/cacheflush.h
index b3189fb229d1..d32f7f58752a 100644
--- a/include/asm-x86_64/cacheflush.h
+++ b/include/asm-x86_64/cacheflush.h
@@ -27,4 +27,8 @@ void global_flush_tlb(void);
27int change_page_attr(struct page *page, int numpages, pgprot_t prot); 27int change_page_attr(struct page *page, int numpages, pgprot_t prot);
28int change_page_attr_addr(unsigned long addr, int numpages, pgprot_t prot); 28int change_page_attr_addr(unsigned long addr, int numpages, pgprot_t prot);
29 29
30#ifdef CONFIG_DEBUG_RODATA
31void mark_rodata_ro(void);
32#endif
33
30#endif /* _X8664_CACHEFLUSH_H */ 34#endif /* _X8664_CACHEFLUSH_H */
diff --git a/include/asm-x86_64/compat.h b/include/asm-x86_64/compat.h
index f0155c38f639..b37ab8218ef0 100644
--- a/include/asm-x86_64/compat.h
+++ b/include/asm-x86_64/compat.h
@@ -198,8 +198,13 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
198 198
199static __inline__ void __user *compat_alloc_user_space(long len) 199static __inline__ void __user *compat_alloc_user_space(long len)
200{ 200{
201 struct pt_regs *regs = (void *)current->thread.rsp0 - sizeof(struct pt_regs); 201 struct pt_regs *regs = task_pt_regs(current);
202 return (void __user *)regs->rsp - len; 202 return (void __user *)regs->rsp - len;
203} 203}
204 204
205static inline int is_compat_task(void)
206{
207 return current_thread_info()->status & TS_COMPAT;
208}
209
205#endif /* _ASM_X86_64_COMPAT_H */ 210#endif /* _ASM_X86_64_COMPAT_H */
diff --git a/include/asm-x86_64/cpufeature.h b/include/asm-x86_64/cpufeature.h
index aea308c65709..41c0ac8559be 100644
--- a/include/asm-x86_64/cpufeature.h
+++ b/include/asm-x86_64/cpufeature.h
@@ -61,8 +61,9 @@
61#define X86_FEATURE_K6_MTRR (3*32+ 1) /* AMD K6 nonstandard MTRRs */ 61#define X86_FEATURE_K6_MTRR (3*32+ 1) /* AMD K6 nonstandard MTRRs */
62#define X86_FEATURE_CYRIX_ARR (3*32+ 2) /* Cyrix ARRs (= MTRRs) */ 62#define X86_FEATURE_CYRIX_ARR (3*32+ 2) /* Cyrix ARRs (= MTRRs) */
63#define X86_FEATURE_CENTAUR_MCR (3*32+ 3) /* Centaur MCRs (= MTRRs) */ 63#define X86_FEATURE_CENTAUR_MCR (3*32+ 3) /* Centaur MCRs (= MTRRs) */
64#define X86_FEATURE_K8_C (3*32+ 4) /* C stepping K8 */ 64/* 4 free */
65#define X86_FEATURE_CONSTANT_TSC (3*32+5) /* TSC runs at constant rate */ 65#define X86_FEATURE_CONSTANT_TSC (3*32+5) /* TSC runs at constant rate */
66#define X86_FEATURE_SYNC_RDTSC (3*32+6) /* RDTSC syncs CPU core */
66 67
67/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ 68/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
68#define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */ 69#define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */
diff --git a/include/asm-x86_64/desc.h b/include/asm-x86_64/desc.h
index 33764869387b..eb7723a46790 100644
--- a/include/asm-x86_64/desc.h
+++ b/include/asm-x86_64/desc.h
@@ -25,7 +25,7 @@ struct n_desc_struct {
25 unsigned int a,b; 25 unsigned int a,b;
26}; 26};
27 27
28extern struct desc_struct cpu_gdt_table[NR_CPUS][GDT_ENTRIES]; 28extern struct desc_struct cpu_gdt_table[GDT_ENTRIES];
29 29
30enum { 30enum {
31 GATE_INTERRUPT = 0xE, 31 GATE_INTERRUPT = 0xE,
@@ -79,6 +79,9 @@ extern struct desc_struct default_ldt[];
79extern struct gate_struct idt_table[]; 79extern struct gate_struct idt_table[];
80extern struct desc_ptr cpu_gdt_descr[]; 80extern struct desc_ptr cpu_gdt_descr[];
81 81
82/* the cpu gdt accessor */
83#define cpu_gdt(_cpu) ((struct desc_struct *)cpu_gdt_descr[_cpu].address)
84
82static inline void _set_gate(void *adr, unsigned type, unsigned long func, unsigned dpl, unsigned ist) 85static inline void _set_gate(void *adr, unsigned type, unsigned long func, unsigned dpl, unsigned ist)
83{ 86{
84 struct gate_struct s; 87 struct gate_struct s;
@@ -114,6 +117,11 @@ static inline void set_system_gate(int nr, void *func)
114 _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 3, 0); 117 _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 3, 0);
115} 118}
116 119
120static inline void set_system_gate_ist(int nr, void *func, unsigned ist)
121{
122 _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 3, ist);
123}
124
117static inline void set_tssldt_descriptor(void *ptr, unsigned long tss, unsigned type, 125static inline void set_tssldt_descriptor(void *ptr, unsigned long tss, unsigned type,
118 unsigned size) 126 unsigned size)
119{ 127{
@@ -139,20 +147,20 @@ static inline void set_tss_desc(unsigned cpu, void *addr)
139 * -1? seg base+limit should be pointing to the address of the 147 * -1? seg base+limit should be pointing to the address of the
140 * last valid byte 148 * last valid byte
141 */ 149 */
142 set_tssldt_descriptor(&cpu_gdt_table[cpu][GDT_ENTRY_TSS], 150 set_tssldt_descriptor(&cpu_gdt(cpu)[GDT_ENTRY_TSS],
143 (unsigned long)addr, DESC_TSS, 151 (unsigned long)addr, DESC_TSS,
144 IO_BITMAP_OFFSET + IO_BITMAP_BYTES + sizeof(unsigned long) - 1); 152 IO_BITMAP_OFFSET + IO_BITMAP_BYTES + sizeof(unsigned long) - 1);
145} 153}
146 154
147static inline void set_ldt_desc(unsigned cpu, void *addr, int size) 155static inline void set_ldt_desc(unsigned cpu, void *addr, int size)
148{ 156{
149 set_tssldt_descriptor(&cpu_gdt_table[cpu][GDT_ENTRY_LDT], (unsigned long)addr, 157 set_tssldt_descriptor(&cpu_gdt(cpu)[GDT_ENTRY_LDT], (unsigned long)addr,
150 DESC_LDT, size * 8 - 1); 158 DESC_LDT, size * 8 - 1);
151} 159}
152 160
153static inline void set_seg_base(unsigned cpu, int entry, void *base) 161static inline void set_seg_base(unsigned cpu, int entry, void *base)
154{ 162{
155 struct desc_struct *d = &cpu_gdt_table[cpu][entry]; 163 struct desc_struct *d = &cpu_gdt(cpu)[entry];
156 u32 addr = (u32)(u64)base; 164 u32 addr = (u32)(u64)base;
157 BUG_ON((u64)base >> 32); 165 BUG_ON((u64)base >> 32);
158 d->base0 = addr & 0xffff; 166 d->base0 = addr & 0xffff;
@@ -194,7 +202,7 @@ static inline void set_seg_base(unsigned cpu, int entry, void *base)
194 202
195static inline void load_TLS(struct thread_struct *t, unsigned int cpu) 203static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
196{ 204{
197 u64 *gdt = (u64 *)(cpu_gdt_table[cpu] + GDT_ENTRY_TLS_MIN); 205 u64 *gdt = (u64 *)(cpu_gdt(cpu) + GDT_ENTRY_TLS_MIN);
198 gdt[0] = t->tls_array[0]; 206 gdt[0] = t->tls_array[0];
199 gdt[1] = t->tls_array[1]; 207 gdt[1] = t->tls_array[1];
200 gdt[2] = t->tls_array[2]; 208 gdt[2] = t->tls_array[2];
diff --git a/include/asm-x86_64/dma-mapping.h b/include/asm-x86_64/dma-mapping.h
index 36d16dfbac88..49a81a66516e 100644
--- a/include/asm-x86_64/dma-mapping.h
+++ b/include/asm-x86_64/dma-mapping.h
@@ -12,155 +12,176 @@
12#include <asm/io.h> 12#include <asm/io.h>
13#include <asm/swiotlb.h> 13#include <asm/swiotlb.h>
14 14
15extern dma_addr_t bad_dma_address; 15struct dma_mapping_ops {
16#define dma_mapping_error(x) \ 16 int (*mapping_error)(dma_addr_t dma_addr);
17 (swiotlb ? swiotlb_dma_mapping_error(x) : ((x) == bad_dma_address)) 17 void* (*alloc_coherent)(struct device *dev, size_t size,
18 18 dma_addr_t *dma_handle, gfp_t gfp);
19void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, 19 void (*free_coherent)(struct device *dev, size_t size,
20 gfp_t gfp); 20 void *vaddr, dma_addr_t dma_handle);
21void dma_free_coherent(struct device *dev, size_t size, void *vaddr, 21 dma_addr_t (*map_single)(struct device *hwdev, void *ptr,
22 dma_addr_t dma_handle); 22 size_t size, int direction);
23 /* like map_single, but doesn't check the device mask */
24 dma_addr_t (*map_simple)(struct device *hwdev, char *ptr,
25 size_t size, int direction);
26 void (*unmap_single)(struct device *dev, dma_addr_t addr,
27 size_t size, int direction);
28 void (*sync_single_for_cpu)(struct device *hwdev,
29 dma_addr_t dma_handle, size_t size,
30 int direction);
31 void (*sync_single_for_device)(struct device *hwdev,
32 dma_addr_t dma_handle, size_t size,
33 int direction);
34 void (*sync_single_range_for_cpu)(struct device *hwdev,
35 dma_addr_t dma_handle, unsigned long offset,
36 size_t size, int direction);
37 void (*sync_single_range_for_device)(struct device *hwdev,
38 dma_addr_t dma_handle, unsigned long offset,
39 size_t size, int direction);
40 void (*sync_sg_for_cpu)(struct device *hwdev,
41 struct scatterlist *sg, int nelems,
42 int direction);
43 void (*sync_sg_for_device)(struct device *hwdev,
44 struct scatterlist *sg, int nelems,
45 int direction);
46 int (*map_sg)(struct device *hwdev, struct scatterlist *sg,
47 int nents, int direction);
48 void (*unmap_sg)(struct device *hwdev,
49 struct scatterlist *sg, int nents,
50 int direction);
51 int (*dma_supported)(struct device *hwdev, u64 mask);
52 int is_phys;
53};
23 54
24#ifdef CONFIG_GART_IOMMU 55extern dma_addr_t bad_dma_address;
56extern struct dma_mapping_ops* dma_ops;
57extern int iommu_merge;
25 58
26extern dma_addr_t dma_map_single(struct device *hwdev, void *ptr, size_t size, 59static inline int dma_mapping_error(dma_addr_t dma_addr)
27 int direction); 60{
28extern void dma_unmap_single(struct device *dev, dma_addr_t addr,size_t size, 61 if (dma_ops->mapping_error)
29 int direction); 62 return dma_ops->mapping_error(dma_addr);
30 63
31#else 64 return (dma_addr == bad_dma_address);
65}
32 66
33/* No IOMMU */ 67extern void *dma_alloc_coherent(struct device *dev, size_t size,
68 dma_addr_t *dma_handle, gfp_t gfp);
69extern void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
70 dma_addr_t dma_handle);
34 71
35static inline dma_addr_t dma_map_single(struct device *hwdev, void *ptr, 72static inline dma_addr_t
36 size_t size, int direction) 73dma_map_single(struct device *hwdev, void *ptr, size_t size,
74 int direction)
37{ 75{
38 dma_addr_t addr; 76 return dma_ops->map_single(hwdev, ptr, size, direction);
39
40 if (direction == DMA_NONE)
41 out_of_line_bug();
42 addr = virt_to_bus(ptr);
43
44 if ((addr+size) & ~*hwdev->dma_mask)
45 out_of_line_bug();
46 return addr;
47} 77}
48 78
49static inline void dma_unmap_single(struct device *hwdev, dma_addr_t dma_addr, 79static inline void
50 size_t size, int direction) 80dma_unmap_single(struct device *dev, dma_addr_t addr,size_t size,
81 int direction)
51{ 82{
52 if (direction == DMA_NONE) 83 dma_ops->unmap_single(dev, addr, size, direction);
53 out_of_line_bug();
54 /* Nothing to do */
55} 84}
56 85
57#endif
58
59#define dma_map_page(dev,page,offset,size,dir) \ 86#define dma_map_page(dev,page,offset,size,dir) \
60 dma_map_single((dev), page_address(page)+(offset), (size), (dir)) 87 dma_map_single((dev), page_address(page)+(offset), (size), (dir))
61 88
62static inline void dma_sync_single_for_cpu(struct device *hwdev, 89#define dma_unmap_page dma_unmap_single
63 dma_addr_t dma_handle,
64 size_t size, int direction)
65{
66 if (direction == DMA_NONE)
67 out_of_line_bug();
68
69 if (swiotlb)
70 return swiotlb_sync_single_for_cpu(hwdev,dma_handle,size,direction);
71 90
91static inline void
92dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
93 size_t size, int direction)
94{
95 if (dma_ops->sync_single_for_cpu)
96 dma_ops->sync_single_for_cpu(hwdev, dma_handle, size,
97 direction);
72 flush_write_buffers(); 98 flush_write_buffers();
73} 99}
74 100
75static inline void dma_sync_single_for_device(struct device *hwdev, 101static inline void
76 dma_addr_t dma_handle, 102dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
77 size_t size, int direction) 103 size_t size, int direction)
78{ 104{
79 if (direction == DMA_NONE) 105 if (dma_ops->sync_single_for_device)
80 out_of_line_bug(); 106 dma_ops->sync_single_for_device(hwdev, dma_handle, size,
81 107 direction);
82 if (swiotlb)
83 return swiotlb_sync_single_for_device(hwdev,dma_handle,size,direction);
84
85 flush_write_buffers(); 108 flush_write_buffers();
86} 109}
87 110
88static inline void dma_sync_single_range_for_cpu(struct device *hwdev, 111static inline void
89 dma_addr_t dma_handle, 112dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
90 unsigned long offset, 113 unsigned long offset, size_t size, int direction)
91 size_t size, int direction)
92{ 114{
93 if (direction == DMA_NONE) 115 if (dma_ops->sync_single_range_for_cpu) {
94 out_of_line_bug(); 116 dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, size, direction);
95 117 }
96 if (swiotlb)
97 return swiotlb_sync_single_range_for_cpu(hwdev,dma_handle,offset,size,direction);
98 118
99 flush_write_buffers(); 119 flush_write_buffers();
100} 120}
101 121
102static inline void dma_sync_single_range_for_device(struct device *hwdev, 122static inline void
103 dma_addr_t dma_handle, 123dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
104 unsigned long offset, 124 unsigned long offset, size_t size, int direction)
105 size_t size, int direction)
106{ 125{
107 if (direction == DMA_NONE) 126 if (dma_ops->sync_single_range_for_device)
108 out_of_line_bug(); 127 dma_ops->sync_single_range_for_device(hwdev, dma_handle,
109 128 offset, size, direction);
110 if (swiotlb)
111 return swiotlb_sync_single_range_for_device(hwdev,dma_handle,offset,size,direction);
112 129
113 flush_write_buffers(); 130 flush_write_buffers();
114} 131}
115 132
116static inline void dma_sync_sg_for_cpu(struct device *hwdev, 133static inline void
117 struct scatterlist *sg, 134dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
118 int nelems, int direction) 135 int nelems, int direction)
119{ 136{
120 if (direction == DMA_NONE) 137 if (dma_ops->sync_sg_for_cpu)
121 out_of_line_bug(); 138 dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
122
123 if (swiotlb)
124 return swiotlb_sync_sg_for_cpu(hwdev,sg,nelems,direction);
125
126 flush_write_buffers(); 139 flush_write_buffers();
127} 140}
128 141
129static inline void dma_sync_sg_for_device(struct device *hwdev, 142static inline void
130 struct scatterlist *sg, 143dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
131 int nelems, int direction) 144 int nelems, int direction)
132{ 145{
133 if (direction == DMA_NONE) 146 if (dma_ops->sync_sg_for_device) {
134 out_of_line_bug(); 147 dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction);
135 148 }
136 if (swiotlb)
137 return swiotlb_sync_sg_for_device(hwdev,sg,nelems,direction);
138 149
139 flush_write_buffers(); 150 flush_write_buffers();
140} 151}
141 152
142extern int dma_map_sg(struct device *hwdev, struct scatterlist *sg, 153static inline int
143 int nents, int direction); 154dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, int direction)
144extern void dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, 155{
145 int nents, int direction); 156 return dma_ops->map_sg(hwdev, sg, nents, direction);
157}
146 158
147#define dma_unmap_page dma_unmap_single 159static inline void
160dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
161 int direction)
162{
163 dma_ops->unmap_sg(hwdev, sg, nents, direction);
164}
148 165
149extern int dma_supported(struct device *hwdev, u64 mask); 166extern int dma_supported(struct device *hwdev, u64 mask);
150extern int dma_get_cache_alignment(void);
151#define dma_is_consistent(h) 1
152 167
153static inline int dma_set_mask(struct device *dev, u64 mask) 168/* same for gart, swiotlb, and nommu */
169static inline int dma_get_cache_alignment(void)
154{ 170{
155 if (!dev->dma_mask || !dma_supported(dev, mask)) 171 return boot_cpu_data.x86_clflush_size;
156 return -EIO;
157 *dev->dma_mask = mask;
158 return 0;
159} 172}
160 173
161static inline void dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction dir) 174#define dma_is_consistent(h) 1
175
176extern int dma_set_mask(struct device *dev, u64 mask);
177
178static inline void
179dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction dir)
162{ 180{
163 flush_write_buffers(); 181 flush_write_buffers();
164} 182}
165 183
166#endif 184extern struct device fallback_dev;
185extern int panic_on_overflow;
186
187#endif /* _X8664_DMA_MAPPING_H */
diff --git a/include/asm-x86_64/dwarf2.h b/include/asm-x86_64/dwarf2.h
index 582757fc0365..07654bd155bf 100644
--- a/include/asm-x86_64/dwarf2.h
+++ b/include/asm-x86_64/dwarf2.h
@@ -14,7 +14,7 @@
14 away for older version. 14 away for older version.
15 */ 15 */
16 16
17#ifdef CONFIG_DEBUG_INFO 17#ifdef CONFIG_UNWIND_INFO
18 18
19#define CFI_STARTPROC .cfi_startproc 19#define CFI_STARTPROC .cfi_startproc
20#define CFI_ENDPROC .cfi_endproc 20#define CFI_ENDPROC .cfi_endproc
@@ -28,6 +28,7 @@
28#define CFI_RESTORE .cfi_restore 28#define CFI_RESTORE .cfi_restore
29#define CFI_REMEMBER_STATE .cfi_remember_state 29#define CFI_REMEMBER_STATE .cfi_remember_state
30#define CFI_RESTORE_STATE .cfi_restore_state 30#define CFI_RESTORE_STATE .cfi_restore_state
31#define CFI_UNDEFINED .cfi_undefined
31 32
32#else 33#else
33 34
@@ -44,6 +45,7 @@
44#define CFI_RESTORE # 45#define CFI_RESTORE #
45#define CFI_REMEMBER_STATE # 46#define CFI_REMEMBER_STATE #
46#define CFI_RESTORE_STATE # 47#define CFI_RESTORE_STATE #
48#define CFI_UNDEFINED #
47 49
48#endif 50#endif
49 51
diff --git a/include/asm-x86_64/e820.h b/include/asm-x86_64/e820.h
index e682edc24a68..8dcc32665240 100644
--- a/include/asm-x86_64/e820.h
+++ b/include/asm-x86_64/e820.h
@@ -55,6 +55,7 @@ extern unsigned long e820_hole_size(unsigned long start_pfn,
55 unsigned long end_pfn); 55 unsigned long end_pfn);
56 56
57extern void __init parse_memopt(char *p, char **end); 57extern void __init parse_memopt(char *p, char **end);
58extern void __init parse_memmapopt(char *p, char **end);
58 59
59extern struct e820map e820; 60extern struct e820map e820;
60#endif/*!__ASSEMBLY__*/ 61#endif/*!__ASSEMBLY__*/
diff --git a/include/asm-x86_64/edac.h b/include/asm-x86_64/edac.h
new file mode 100644
index 000000000000..cad1cd42b4ee
--- /dev/null
+++ b/include/asm-x86_64/edac.h
@@ -0,0 +1,18 @@
1#ifndef ASM_EDAC_H
2#define ASM_EDAC_H
3
4/* ECC atomic, DMA, SMP and interrupt safe scrub function */
5
6static __inline__ void atomic_scrub(void *va, u32 size)
7{
8 unsigned int *virt_addr = va;
9 u32 i;
10
11 for (i = 0; i < size / 4; i++, virt_addr++)
12 /* Very carefully read and write to memory atomically
13 * so we are interrupt, DMA and SMP safe.
14 */
15 __asm__ __volatile__("lock; addl $0, %0"::"m"(*virt_addr));
16}
17
18#endif
diff --git a/include/asm-x86_64/fixmap.h b/include/asm-x86_64/fixmap.h
index a582cfcf2231..7b286bd21d1d 100644
--- a/include/asm-x86_64/fixmap.h
+++ b/include/asm-x86_64/fixmap.h
@@ -76,7 +76,7 @@ extern void __this_fixmap_does_not_exist(void);
76 * directly without translation, we catch the bug with a NULL-deference 76 * directly without translation, we catch the bug with a NULL-deference
77 * kernel oops. Illegal ranges of incoming indices are caught too. 77 * kernel oops. Illegal ranges of incoming indices are caught too.
78 */ 78 */
79static inline unsigned long fix_to_virt(const unsigned int idx) 79static __always_inline unsigned long fix_to_virt(const unsigned int idx)
80{ 80{
81 /* 81 /*
82 * this branch gets completely eliminated after inlining, 82 * this branch gets completely eliminated after inlining,
diff --git a/include/asm-x86_64/gart-mapping.h b/include/asm-x86_64/gart-mapping.h
new file mode 100644
index 000000000000..ada497b0b55b
--- /dev/null
+++ b/include/asm-x86_64/gart-mapping.h
@@ -0,0 +1,16 @@
1#ifndef _X8664_GART_MAPPING_H
2#define _X8664_GART_MAPPING_H 1
3
4#include <linux/types.h>
5#include <asm/types.h>
6
7struct device;
8
9extern void*
10gart_alloc_coherent(struct device *dev, size_t size,
11 dma_addr_t *dma_handle, gfp_t gfp);
12
13extern int
14gart_dma_supported(struct device *hwdev, u64 mask);
15
16#endif /* _X8664_GART_MAPPING_H */
diff --git a/include/asm-x86_64/hw_irq.h b/include/asm-x86_64/hw_irq.h
index c14a8c7267a6..0df1715dee71 100644
--- a/include/asm-x86_64/hw_irq.h
+++ b/include/asm-x86_64/hw_irq.h
@@ -46,18 +46,18 @@ struct hw_interrupt_type;
46 * some of the following vectors are 'rare', they are merged 46 * some of the following vectors are 'rare', they are merged
47 * into a single vector (CALL_FUNCTION_VECTOR) to save vector space. 47 * into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
48 * TLB, reschedule and local APIC vectors are performance-critical. 48 * TLB, reschedule and local APIC vectors are performance-critical.
49 *
50 * Vectors 0xf0-0xf9 are free (reserved for future Linux use).
51 */ 49 */
52#define SPURIOUS_APIC_VECTOR 0xff 50#define SPURIOUS_APIC_VECTOR 0xff
53#define ERROR_APIC_VECTOR 0xfe 51#define ERROR_APIC_VECTOR 0xfe
54#define RESCHEDULE_VECTOR 0xfd 52#define RESCHEDULE_VECTOR 0xfd
55#define CALL_FUNCTION_VECTOR 0xfc 53#define CALL_FUNCTION_VECTOR 0xfc
56#define KDB_VECTOR 0xfb /* reserved for KDB */ 54/* fb free - please don't readd KDB here because it's useless
55 (hint - think what a NMI bit does to a vector) */
57#define THERMAL_APIC_VECTOR 0xfa 56#define THERMAL_APIC_VECTOR 0xfa
58#define THRESHOLD_APIC_VECTOR 0xf9 57#define THRESHOLD_APIC_VECTOR 0xf9
59#define INVALIDATE_TLB_VECTOR_END 0xf8 58/* f8 free */
60#define INVALIDATE_TLB_VECTOR_START 0xf0 /* f0-f8 used for TLB flush */ 59#define INVALIDATE_TLB_VECTOR_END 0xf7
60#define INVALIDATE_TLB_VECTOR_START 0xf0 /* f0-f7 used for TLB flush */
61 61
62#define NUM_INVALIDATE_TLB_VECTORS 8 62#define NUM_INVALIDATE_TLB_VECTORS 8
63 63
diff --git a/include/asm-x86_64/i387.h b/include/asm-x86_64/i387.h
index aa39cfd0e001..876eb9a2fe78 100644
--- a/include/asm-x86_64/i387.h
+++ b/include/asm-x86_64/i387.h
@@ -30,7 +30,7 @@ extern int save_i387(struct _fpstate __user *buf);
30 */ 30 */
31 31
32#define unlazy_fpu(tsk) do { \ 32#define unlazy_fpu(tsk) do { \
33 if ((tsk)->thread_info->status & TS_USEDFPU) \ 33 if (task_thread_info(tsk)->status & TS_USEDFPU) \
34 save_init_fpu(tsk); \ 34 save_init_fpu(tsk); \
35} while (0) 35} while (0)
36 36
@@ -46,9 +46,9 @@ static inline void tolerant_fwait(void)
46} 46}
47 47
48#define clear_fpu(tsk) do { \ 48#define clear_fpu(tsk) do { \
49 if ((tsk)->thread_info->status & TS_USEDFPU) { \ 49 if (task_thread_info(tsk)->status & TS_USEDFPU) { \
50 tolerant_fwait(); \ 50 tolerant_fwait(); \
51 (tsk)->thread_info->status &= ~TS_USEDFPU; \ 51 task_thread_info(tsk)->status &= ~TS_USEDFPU; \
52 stts(); \ 52 stts(); \
53 } \ 53 } \
54} while (0) 54} while (0)
@@ -75,7 +75,8 @@ extern int set_fpregs(struct task_struct *tsk,
75static inline int restore_fpu_checking(struct i387_fxsave_struct *fx) 75static inline int restore_fpu_checking(struct i387_fxsave_struct *fx)
76{ 76{
77 int err; 77 int err;
78 asm volatile("1: rex64 ; fxrstor (%[fx])\n\t" 78
79 asm volatile("1: rex64/fxrstor (%[fx])\n\t"
79 "2:\n" 80 "2:\n"
80 ".section .fixup,\"ax\"\n" 81 ".section .fixup,\"ax\"\n"
81 "3: movl $-1,%[err]\n" 82 "3: movl $-1,%[err]\n"
@@ -86,7 +87,11 @@ static inline int restore_fpu_checking(struct i387_fxsave_struct *fx)
86 " .quad 1b,3b\n" 87 " .quad 1b,3b\n"
87 ".previous" 88 ".previous"
88 : [err] "=r" (err) 89 : [err] "=r" (err)
89 : [fx] "r" (fx), "0" (0)); 90#if 0 /* See comment in __fxsave_clear() below. */
91 : [fx] "r" (fx), "m" (*fx), "0" (0));
92#else
93 : [fx] "cdaSDb" (fx), "m" (*fx), "0" (0));
94#endif
90 if (unlikely(err)) 95 if (unlikely(err))
91 init_fpu(current); 96 init_fpu(current);
92 return err; 97 return err;
@@ -95,7 +100,8 @@ static inline int restore_fpu_checking(struct i387_fxsave_struct *fx)
95static inline int save_i387_checking(struct i387_fxsave_struct __user *fx) 100static inline int save_i387_checking(struct i387_fxsave_struct __user *fx)
96{ 101{
97 int err; 102 int err;
98 asm volatile("1: rex64 ; fxsave (%[fx])\n\t" 103
104 asm volatile("1: rex64/fxsave (%[fx])\n\t"
99 "2:\n" 105 "2:\n"
100 ".section .fixup,\"ax\"\n" 106 ".section .fixup,\"ax\"\n"
101 "3: movl $-1,%[err]\n" 107 "3: movl $-1,%[err]\n"
@@ -105,20 +111,53 @@ static inline int save_i387_checking(struct i387_fxsave_struct __user *fx)
105 " .align 8\n" 111 " .align 8\n"
106 " .quad 1b,3b\n" 112 " .quad 1b,3b\n"
107 ".previous" 113 ".previous"
108 : [err] "=r" (err) 114 : [err] "=r" (err), "=m" (*fx)
109 : [fx] "r" (fx), "0" (0)); 115#if 0 /* See comment in __fxsave_clear() below. */
116 : [fx] "r" (fx), "0" (0));
117#else
118 : [fx] "cdaSDb" (fx), "0" (0));
119#endif
110 if (unlikely(err)) 120 if (unlikely(err))
111 __clear_user(fx, sizeof(struct i387_fxsave_struct)); 121 __clear_user(fx, sizeof(struct i387_fxsave_struct));
112 return err; 122 return err;
113} 123}
114 124
125static inline void __fxsave_clear(struct task_struct *tsk)
126{
127 /* Using "rex64; fxsave %0" is broken because, if the memory operand
128 uses any extended registers for addressing, a second REX prefix
129 will be generated (to the assembler, rex64 followed by semicolon
130 is a separate instruction), and hence the 64-bitness is lost. */
131#if 0
132 /* Using "fxsaveq %0" would be the ideal choice, but is only supported
133 starting with gas 2.16. */
134 __asm__ __volatile__("fxsaveq %0"
135 : "=m" (tsk->thread.i387.fxsave));
136#elif 0
137 /* Using, as a workaround, the properly prefixed form below isn't
138 accepted by any binutils version so far released, complaining that
139 the same type of prefix is used twice if an extended register is
140 needed for addressing (fix submitted to mainline 2005-11-21). */
141 __asm__ __volatile__("rex64/fxsave %0"
142 : "=m" (tsk->thread.i387.fxsave));
143#else
144 /* This, however, we can work around by forcing the compiler to select
145 an addressing mode that doesn't require extended registers. */
146 __asm__ __volatile__("rex64/fxsave %P2(%1)"
147 : "=m" (tsk->thread.i387.fxsave)
148 : "cdaSDb" (tsk),
149 "i" (offsetof(__typeof__(*tsk),
150 thread.i387.fxsave)));
151#endif
152 __asm__ __volatile__("fnclex");
153}
154
115static inline void kernel_fpu_begin(void) 155static inline void kernel_fpu_begin(void)
116{ 156{
117 struct thread_info *me = current_thread_info(); 157 struct thread_info *me = current_thread_info();
118 preempt_disable(); 158 preempt_disable();
119 if (me->status & TS_USEDFPU) { 159 if (me->status & TS_USEDFPU) {
120 asm volatile("rex64 ; fxsave %0 ; fnclex" 160 __fxsave_clear(me->task);
121 : "=m" (me->task->thread.i387.fxsave));
122 me->status &= ~TS_USEDFPU; 161 me->status &= ~TS_USEDFPU;
123 return; 162 return;
124 } 163 }
@@ -131,11 +170,10 @@ static inline void kernel_fpu_end(void)
131 preempt_enable(); 170 preempt_enable();
132} 171}
133 172
134static inline void save_init_fpu( struct task_struct *tsk ) 173static inline void save_init_fpu(struct task_struct *tsk)
135{ 174{
136 asm volatile( "rex64 ; fxsave %0 ; fnclex" 175 __fxsave_clear(tsk);
137 : "=m" (tsk->thread.i387.fxsave)); 176 task_thread_info(tsk)->status &= ~TS_USEDFPU;
138 tsk->thread_info->status &= ~TS_USEDFPU;
139 stts(); 177 stts();
140} 178}
141 179
diff --git a/include/asm-x86_64/ia32.h b/include/asm-x86_64/ia32.h
index c7bc9c0525ba..e6b7f2234e43 100644
--- a/include/asm-x86_64/ia32.h
+++ b/include/asm-x86_64/ia32.h
@@ -169,6 +169,8 @@ int ia32_child_tls(struct task_struct *p, struct pt_regs *childregs);
169struct linux_binprm; 169struct linux_binprm;
170extern int ia32_setup_arg_pages(struct linux_binprm *bprm, 170extern int ia32_setup_arg_pages(struct linux_binprm *bprm,
171 unsigned long stack_top, int exec_stack); 171 unsigned long stack_top, int exec_stack);
172struct mm_struct;
173extern void ia32_pick_mmap_layout(struct mm_struct *mm);
172 174
173#endif 175#endif
174 176
diff --git a/include/asm-x86_64/ia32_unistd.h b/include/asm-x86_64/ia32_unistd.h
index d5166ec3868d..e87cd83a0e86 100644
--- a/include/asm-x86_64/ia32_unistd.h
+++ b/include/asm-x86_64/ia32_unistd.h
@@ -299,7 +299,21 @@
299#define __NR_ia32_inotify_init 291 299#define __NR_ia32_inotify_init 291
300#define __NR_ia32_inotify_add_watch 292 300#define __NR_ia32_inotify_add_watch 292
301#define __NR_ia32_inotify_rm_watch 293 301#define __NR_ia32_inotify_rm_watch 293
302#define __NR_ia32_migrate_pages 294
303#define __NR_ia32_opanat 295
304#define __NR_ia32_mkdirat 296
305#define __NR_ia32_mknodat 297
306#define __NR_ia32_fchownat 298
307#define __NR_ia32_futimesat 299
308#define __NR_ia32_newfstatat 300
309#define __NR_ia32_unlinkat 301
310#define __NR_ia32_renameat 302
311#define __NR_ia32_linkat 303
312#define __NR_ia32_symlinkat 304
313#define __NR_ia32_readlinkat 305
314#define __NR_ia32_fchmodat 306
315#define __NR_ia32_faccessat 307
302 316
303#define IA32_NR_syscalls 294 /* must be > than biggest syscall! */ 317#define IA32_NR_syscalls 308 /* must be > than biggest syscall! */
304 318
305#endif /* _ASM_X86_64_IA32_UNISTD_H_ */ 319#endif /* _ASM_X86_64_IA32_UNISTD_H_ */
diff --git a/include/asm-x86_64/idle.h b/include/asm-x86_64/idle.h
new file mode 100644
index 000000000000..6bd47dcf2067
--- /dev/null
+++ b/include/asm-x86_64/idle.h
@@ -0,0 +1,14 @@
1#ifndef _ASM_X86_64_IDLE_H
2#define _ASM_X86_64_IDLE_H 1
3
4#define IDLE_START 1
5#define IDLE_END 2
6
7struct notifier_block;
8void idle_notifier_register(struct notifier_block *n);
9void idle_notifier_unregister(struct notifier_block *n);
10
11void enter_idle(void);
12void exit_idle(void);
13
14#endif
diff --git a/include/asm-x86_64/io.h b/include/asm-x86_64/io.h
index 52ff269fe054..9dac18db8291 100644
--- a/include/asm-x86_64/io.h
+++ b/include/asm-x86_64/io.h
@@ -143,6 +143,11 @@ static inline void __iomem * ioremap (unsigned long offset, unsigned long size)
143extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size); 143extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size);
144extern void iounmap(volatile void __iomem *addr); 144extern void iounmap(volatile void __iomem *addr);
145 145
146/* Use normal IO mappings for DMI */
147#define dmi_ioremap ioremap
148#define dmi_iounmap(x,l) iounmap(x)
149#define dmi_alloc(l) kmalloc(l, GFP_ATOMIC)
150
146/* 151/*
147 * ISA I/O bus memory addresses are 1:1 with the physical address. 152 * ISA I/O bus memory addresses are 1:1 with the physical address.
148 */ 153 */
diff --git a/include/asm-x86_64/ioctl.h b/include/asm-x86_64/ioctl.h
index 609b663b6bf4..b279fe06dfe5 100644
--- a/include/asm-x86_64/ioctl.h
+++ b/include/asm-x86_64/ioctl.h
@@ -1,75 +1 @@
1/* $Id: ioctl.h,v 1.2 2001/07/04 09:08:13 ak Exp $ #include <asm-generic/ioctl.h>
2 *
3 * linux/ioctl.h for Linux by H.H. Bergman.
4 */
5
6#ifndef _ASMX8664_IOCTL_H
7#define _ASMX8664_IOCTL_H
8
9/* ioctl command encoding: 32 bits total, command in lower 16 bits,
10 * size of the parameter structure in the lower 14 bits of the
11 * upper 16 bits.
12 * Encoding the size of the parameter structure in the ioctl request
13 * is useful for catching programs compiled with old versions
14 * and to avoid overwriting user space outside the user buffer area.
15 * The highest 2 bits are reserved for indicating the ``access mode''.
16 * NOTE: This limits the max parameter size to 16kB -1 !
17 */
18
19/*
20 * The following is for compatibility across the various Linux
21 * platforms. The i386 ioctl numbering scheme doesn't really enforce
22 * a type field. De facto, however, the top 8 bits of the lower 16
23 * bits are indeed used as a type field, so we might just as well make
24 * this explicit here. Please be sure to use the decoding macros
25 * below from now on.
26 */
27#define _IOC_NRBITS 8
28#define _IOC_TYPEBITS 8
29#define _IOC_SIZEBITS 14
30#define _IOC_DIRBITS 2
31
32#define _IOC_NRMASK ((1 << _IOC_NRBITS)-1)
33#define _IOC_TYPEMASK ((1 << _IOC_TYPEBITS)-1)
34#define _IOC_SIZEMASK ((1 << _IOC_SIZEBITS)-1)
35#define _IOC_DIRMASK ((1 << _IOC_DIRBITS)-1)
36
37#define _IOC_NRSHIFT 0
38#define _IOC_TYPESHIFT (_IOC_NRSHIFT+_IOC_NRBITS)
39#define _IOC_SIZESHIFT (_IOC_TYPESHIFT+_IOC_TYPEBITS)
40#define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS)
41
42/*
43 * Direction bits.
44 */
45#define _IOC_NONE 0U
46#define _IOC_WRITE 1U
47#define _IOC_READ 2U
48
49#define _IOC(dir,type,nr,size) \
50 (((dir) << _IOC_DIRSHIFT) | \
51 ((type) << _IOC_TYPESHIFT) | \
52 ((nr) << _IOC_NRSHIFT) | \
53 ((size) << _IOC_SIZESHIFT))
54
55/* used to create numbers */
56#define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0)
57#define _IOR(type,nr,size) _IOC(_IOC_READ,(type),(nr),sizeof(size))
58#define _IOW(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),sizeof(size))
59#define _IOWR(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(size))
60
61/* used to decode ioctl numbers.. */
62#define _IOC_DIR(nr) (((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK)
63#define _IOC_TYPE(nr) (((nr) >> _IOC_TYPESHIFT) & _IOC_TYPEMASK)
64#define _IOC_NR(nr) (((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK)
65#define _IOC_SIZE(nr) (((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK)
66
67/* ...and for the drivers/sound files... */
68
69#define IOC_IN (_IOC_WRITE << _IOC_DIRSHIFT)
70#define IOC_OUT (_IOC_READ << _IOC_DIRSHIFT)
71#define IOC_INOUT ((_IOC_WRITE|_IOC_READ) << _IOC_DIRSHIFT)
72#define IOCSIZE_MASK (_IOC_SIZEMASK << _IOC_SIZESHIFT)
73#define IOCSIZE_SHIFT (_IOC_SIZESHIFT)
74
75#endif /* _ASMX8664_IOCTL_H */
diff --git a/include/asm-x86_64/ipi.h b/include/asm-x86_64/ipi.h
index 022e9d340ad7..2a5c162b7d92 100644
--- a/include/asm-x86_64/ipi.h
+++ b/include/asm-x86_64/ipi.h
@@ -38,10 +38,6 @@ static inline unsigned int __prepare_ICR (unsigned int shortcut, int vector, uns
38 icr |= APIC_DM_FIXED | vector; 38 icr |= APIC_DM_FIXED | vector;
39 break; 39 break;
40 case NMI_VECTOR: 40 case NMI_VECTOR:
41 /*
42 * Setup KDB IPI to be delivered as an NMI
43 */
44 case KDB_VECTOR:
45 icr |= APIC_DM_NMI; 41 icr |= APIC_DM_NMI;
46 break; 42 break;
47 } 43 }
diff --git a/include/asm-x86_64/irq.h b/include/asm-x86_64/irq.h
index fb724ba37ae6..9db5a1b4f7b1 100644
--- a/include/asm-x86_64/irq.h
+++ b/include/asm-x86_64/irq.h
@@ -36,7 +36,7 @@
36#define NR_IRQ_VECTORS NR_IRQS 36#define NR_IRQ_VECTORS NR_IRQS
37#else 37#else
38#define NR_IRQS 224 38#define NR_IRQS 224
39#define NR_IRQ_VECTORS 1024 39#define NR_IRQ_VECTORS (32 * NR_CPUS)
40#endif 40#endif
41 41
42static __inline__ int irq_canonicalize(int irq) 42static __inline__ int irq_canonicalize(int irq)
diff --git a/include/asm-x86_64/kdebug.h b/include/asm-x86_64/kdebug.h
index f604e84c5303..b9ed4c0c8783 100644
--- a/include/asm-x86_64/kdebug.h
+++ b/include/asm-x86_64/kdebug.h
@@ -35,9 +35,16 @@ enum die_val {
35 DIE_PAGE_FAULT, 35 DIE_PAGE_FAULT,
36}; 36};
37 37
38static inline int notify_die(enum die_val val,char *str,struct pt_regs *regs,long err,int trap, int sig) 38static inline int notify_die(enum die_val val, const char *str,
39{ 39 struct pt_regs *regs, long err, int trap, int sig)
40 struct die_args args = { .regs=regs, .str=str, .err=err, .trapnr=trap,.signr=sig }; 40{
41 struct die_args args = {
42 .regs = regs,
43 .str = str,
44 .err = err,
45 .trapnr = trap,
46 .signr = sig
47 };
41 return notifier_call_chain(&die_chain, val, &args); 48 return notifier_call_chain(&die_chain, val, &args);
42} 49}
43 50
diff --git a/include/asm-x86_64/kexec.h b/include/asm-x86_64/kexec.h
index 42d2ff15c592..ae28cd44bcd3 100644
--- a/include/asm-x86_64/kexec.h
+++ b/include/asm-x86_64/kexec.h
@@ -3,6 +3,7 @@
3 3
4#include <asm/page.h> 4#include <asm/page.h>
5#include <asm/proto.h> 5#include <asm/proto.h>
6#include <asm/ptrace.h>
6 7
7/* 8/*
8 * KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return. 9 * KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return.
@@ -26,8 +27,40 @@
26#define KEXEC_ARCH KEXEC_ARCH_X86_64 27#define KEXEC_ARCH KEXEC_ARCH_X86_64
27 28
28#define MAX_NOTE_BYTES 1024 29#define MAX_NOTE_BYTES 1024
29typedef u32 note_buf_t[MAX_NOTE_BYTES/4];
30 30
31extern note_buf_t crash_notes[]; 31/*
32 * Saving the registers of the cpu on which panic occured in
33 * crash_kexec to save a valid sp. The registers of other cpus
34 * will be saved in machine_crash_shutdown while shooting down them.
35 */
36
37static inline void crash_setup_regs(struct pt_regs *newregs,
38 struct pt_regs *oldregs)
39{
40 if (oldregs)
41 memcpy(newregs, oldregs, sizeof(*newregs));
42 else {
43 __asm__ __volatile__("movq %%rbx,%0" : "=m"(newregs->rbx));
44 __asm__ __volatile__("movq %%rcx,%0" : "=m"(newregs->rcx));
45 __asm__ __volatile__("movq %%rdx,%0" : "=m"(newregs->rdx));
46 __asm__ __volatile__("movq %%rsi,%0" : "=m"(newregs->rsi));
47 __asm__ __volatile__("movq %%rdi,%0" : "=m"(newregs->rdi));
48 __asm__ __volatile__("movq %%rbp,%0" : "=m"(newregs->rbp));
49 __asm__ __volatile__("movq %%rax,%0" : "=m"(newregs->rax));
50 __asm__ __volatile__("movq %%rsp,%0" : "=m"(newregs->rsp));
51 __asm__ __volatile__("movq %%r8,%0" : "=m"(newregs->r8));
52 __asm__ __volatile__("movq %%r9,%0" : "=m"(newregs->r9));
53 __asm__ __volatile__("movq %%r10,%0" : "=m"(newregs->r10));
54 __asm__ __volatile__("movq %%r11,%0" : "=m"(newregs->r11));
55 __asm__ __volatile__("movq %%r12,%0" : "=m"(newregs->r12));
56 __asm__ __volatile__("movq %%r13,%0" : "=m"(newregs->r13));
57 __asm__ __volatile__("movq %%r14,%0" : "=m"(newregs->r14));
58 __asm__ __volatile__("movq %%r15,%0" : "=m"(newregs->r15));
59 __asm__ __volatile__("movl %%ss, %%eax;" :"=a"(newregs->ss));
60 __asm__ __volatile__("movl %%cs, %%eax;" :"=a"(newregs->cs));
61 __asm__ __volatile__("pushfq; popq %0" :"=m"(newregs->eflags));
32 62
63 newregs->rip = (unsigned long)current_text_addr();
64 }
65}
33#endif /* _X86_64_KEXEC_H */ 66#endif /* _X86_64_KEXEC_H */
diff --git a/include/asm-x86_64/kprobes.h b/include/asm-x86_64/kprobes.h
index 4dd7a7e148d4..98a1e95ddb98 100644
--- a/include/asm-x86_64/kprobes.h
+++ b/include/asm-x86_64/kprobes.h
@@ -27,7 +27,10 @@
27#include <linux/ptrace.h> 27#include <linux/ptrace.h>
28#include <linux/percpu.h> 28#include <linux/percpu.h>
29 29
30#define __ARCH_WANT_KPROBES_INSN_SLOT
31
30struct pt_regs; 32struct pt_regs;
33struct kprobe;
31 34
32typedef u8 kprobe_opcode_t; 35typedef u8 kprobe_opcode_t;
33#define BREAKPOINT_INSTRUCTION 0xcc 36#define BREAKPOINT_INSTRUCTION 0xcc
@@ -42,6 +45,7 @@ typedef u8 kprobe_opcode_t;
42#define ARCH_SUPPORTS_KRETPROBES 45#define ARCH_SUPPORTS_KRETPROBES
43 46
44void kretprobe_trampoline(void); 47void kretprobe_trampoline(void);
48extern void arch_remove_kprobe(struct kprobe *p);
45 49
46/* Architecture specific copy of original instruction*/ 50/* Architecture specific copy of original instruction*/
47struct arch_specific_insn { 51struct arch_specific_insn {
diff --git a/include/asm-x86_64/mman.h b/include/asm-x86_64/mman.h
index 78e60a4fd4ee..d0e97b74f735 100644
--- a/include/asm-x86_64/mman.h
+++ b/include/asm-x86_64/mman.h
@@ -36,6 +36,7 @@
36#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */ 36#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
37#define MADV_WILLNEED 0x3 /* pre-fault pages */ 37#define MADV_WILLNEED 0x3 /* pre-fault pages */
38#define MADV_DONTNEED 0x4 /* discard these pages */ 38#define MADV_DONTNEED 0x4 /* discard these pages */
39#define MADV_REMOVE 0x5 /* remove these pages & resources */
39 40
40/* compatibility flags */ 41/* compatibility flags */
41#define MAP_ANON MAP_ANONYMOUS 42#define MAP_ANON MAP_ANONYMOUS
diff --git a/include/asm-x86_64/mmu_context.h b/include/asm-x86_64/mmu_context.h
index b630d52bdfb1..16e4be4de0c5 100644
--- a/include/asm-x86_64/mmu_context.h
+++ b/include/asm-x86_64/mmu_context.h
@@ -15,18 +15,13 @@
15int init_new_context(struct task_struct *tsk, struct mm_struct *mm); 15int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
16void destroy_context(struct mm_struct *mm); 16void destroy_context(struct mm_struct *mm);
17 17
18#ifdef CONFIG_SMP
19
20static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) 18static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
21{ 19{
20#ifdef CONFIG_SMP
22 if (read_pda(mmu_state) == TLBSTATE_OK) 21 if (read_pda(mmu_state) == TLBSTATE_OK)
23 write_pda(mmu_state, TLBSTATE_LAZY); 22 write_pda(mmu_state, TLBSTATE_LAZY);
24}
25#else
26static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
27{
28}
29#endif 23#endif
24}
30 25
31static inline void load_cr3(pgd_t *pgd) 26static inline void load_cr3(pgd_t *pgd)
32{ 27{
diff --git a/include/asm-x86_64/mmzone.h b/include/asm-x86_64/mmzone.h
index 69baaa8a3ce0..972c9359f7d7 100644
--- a/include/asm-x86_64/mmzone.h
+++ b/include/asm-x86_64/mmzone.h
@@ -36,22 +36,12 @@ static inline __attribute__((pure)) int phys_to_nid(unsigned long addr)
36 NODE_DATA(nid)->node_spanned_pages) 36 NODE_DATA(nid)->node_spanned_pages)
37 37
38#ifdef CONFIG_DISCONTIGMEM 38#ifdef CONFIG_DISCONTIGMEM
39
40#define pfn_to_nid(pfn) phys_to_nid((unsigned long)(pfn) << PAGE_SHIFT) 39#define pfn_to_nid(pfn) phys_to_nid((unsigned long)(pfn) << PAGE_SHIFT)
41#define kvaddr_to_nid(kaddr) phys_to_nid(__pa(kaddr)) 40#define kvaddr_to_nid(kaddr) phys_to_nid(__pa(kaddr))
42 41
43/* Requires pfn_valid(pfn) to be true */ 42extern struct page *pfn_to_page(unsigned long pfn);
44#define pfn_to_page(pfn) ({ \ 43extern unsigned long page_to_pfn(struct page *page);
45 int nid = phys_to_nid(((unsigned long)(pfn)) << PAGE_SHIFT); \ 44extern int pfn_valid(unsigned long pfn);
46 ((pfn) - node_start_pfn(nid)) + NODE_DATA(nid)->node_mem_map; \
47})
48
49#define page_to_pfn(page) \
50 (long)(((page) - page_zone(page)->zone_mem_map) + page_zone(page)->zone_start_pfn)
51
52#define pfn_valid(pfn) ((pfn) >= num_physpages ? 0 : \
53 ({ u8 nid__ = pfn_to_nid(pfn); \
54 nid__ != 0xff && (pfn) >= node_start_pfn(nid__) && (pfn) < node_end_pfn(nid__); }))
55#endif 45#endif
56 46
57#define local_mapnr(kvaddr) \ 47#define local_mapnr(kvaddr) \
diff --git a/include/asm-x86_64/mpspec.h b/include/asm-x86_64/mpspec.h
index 6f8a17d105ab..14fc3ddd9031 100644
--- a/include/asm-x86_64/mpspec.h
+++ b/include/asm-x86_64/mpspec.h
@@ -76,7 +76,7 @@ struct mpc_config_bus
76{ 76{
77 unsigned char mpc_type; 77 unsigned char mpc_type;
78 unsigned char mpc_busid; 78 unsigned char mpc_busid;
79 unsigned char mpc_bustype[6] __attribute((packed)); 79 unsigned char mpc_bustype[6];
80}; 80};
81 81
82/* List of Bus Type string values, Intel MP Spec. */ 82/* List of Bus Type string values, Intel MP Spec. */
@@ -188,7 +188,7 @@ extern void mp_register_lapic_address (u64 address);
188extern void mp_register_ioapic (u8 id, u32 address, u32 gsi_base); 188extern void mp_register_ioapic (u8 id, u32 address, u32 gsi_base);
189extern void mp_override_legacy_irq (u8 bus_irq, u8 polarity, u8 trigger, u32 gsi); 189extern void mp_override_legacy_irq (u8 bus_irq, u8 polarity, u8 trigger, u32 gsi);
190extern void mp_config_acpi_legacy_irqs (void); 190extern void mp_config_acpi_legacy_irqs (void);
191extern int mp_register_gsi (u32 gsi, int edge_level, int active_high_low); 191extern int mp_register_gsi (u32 gsi, int triggering, int polarity);
192#endif /*CONFIG_X86_IO_APIC*/ 192#endif /*CONFIG_X86_IO_APIC*/
193#endif 193#endif
194 194
diff --git a/include/asm-x86_64/mutex.h b/include/asm-x86_64/mutex.h
new file mode 100644
index 000000000000..11fbee2bd6c0
--- /dev/null
+++ b/include/asm-x86_64/mutex.h
@@ -0,0 +1,113 @@
1/*
2 * Assembly implementation of the mutex fastpath, based on atomic
3 * decrement/increment.
4 *
5 * started by Ingo Molnar:
6 *
7 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
8 */
9#ifndef _ASM_MUTEX_H
10#define _ASM_MUTEX_H
11
12/**
13 * __mutex_fastpath_lock - decrement and call function if negative
14 * @v: pointer of type atomic_t
15 * @fail_fn: function to call if the result is negative
16 *
17 * Atomically decrements @v and calls <fail_fn> if the result is negative.
18 */
19#define __mutex_fastpath_lock(v, fail_fn) \
20do { \
21 unsigned long dummy; \
22 \
23 typecheck(atomic_t *, v); \
24 typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \
25 \
26 __asm__ __volatile__( \
27 LOCK " decl (%%rdi) \n" \
28 " js 2f \n" \
29 "1: \n" \
30 \
31 LOCK_SECTION_START("") \
32 "2: call "#fail_fn" \n" \
33 " jmp 1b \n" \
34 LOCK_SECTION_END \
35 \
36 :"=D" (dummy) \
37 : "D" (v) \
38 : "rax", "rsi", "rdx", "rcx", \
39 "r8", "r9", "r10", "r11", "memory"); \
40} while (0)
41
42/**
43 * __mutex_fastpath_lock_retval - try to take the lock by moving the count
44 * from 1 to a 0 value
45 * @count: pointer of type atomic_t
46 * @fail_fn: function to call if the original value was not 1
47 *
48 * Change the count from 1 to a value lower than 1, and call <fail_fn> if
49 * it wasn't 1 originally. This function returns 0 if the fastpath succeeds,
50 * or anything the slow path function returns
51 */
52static inline int
53__mutex_fastpath_lock_retval(atomic_t *count,
54 int fastcall (*fail_fn)(atomic_t *))
55{
56 if (unlikely(atomic_dec_return(count) < 0))
57 return fail_fn(count);
58 else
59 return 0;
60}
61
62/**
63 * __mutex_fastpath_unlock - increment and call function if nonpositive
64 * @v: pointer of type atomic_t
65 * @fail_fn: function to call if the result is nonpositive
66 *
67 * Atomically increments @v and calls <fail_fn> if the result is nonpositive.
68 */
69#define __mutex_fastpath_unlock(v, fail_fn) \
70do { \
71 unsigned long dummy; \
72 \
73 typecheck(atomic_t *, v); \
74 typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \
75 \
76 __asm__ __volatile__( \
77 LOCK " incl (%%rdi) \n" \
78 " jle 2f \n" \
79 "1: \n" \
80 \
81 LOCK_SECTION_START("") \
82 "2: call "#fail_fn" \n" \
83 " jmp 1b \n" \
84 LOCK_SECTION_END \
85 \
86 :"=D" (dummy) \
87 : "D" (v) \
88 : "rax", "rsi", "rdx", "rcx", \
89 "r8", "r9", "r10", "r11", "memory"); \
90} while (0)
91
92#define __mutex_slowpath_needs_to_unlock() 1
93
94/**
95 * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
96 *
97 * @count: pointer of type atomic_t
98 * @fail_fn: fallback function
99 *
100 * Change the count from 1 to 0 and return 1 (success), or return 0 (failure)
101 * if it wasn't 1 originally. [the fallback function is never used on
102 * x86_64, because all x86_64 CPUs have a CMPXCHG instruction.]
103 */
104static inline int
105__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
106{
107 if (likely(atomic_cmpxchg(count, 1, 0) == 1))
108 return 1;
109 else
110 return 0;
111}
112
113#endif
diff --git a/include/asm-x86_64/numa.h b/include/asm-x86_64/numa.h
index d51e56fdc3da..34e434ce3268 100644
--- a/include/asm-x86_64/numa.h
+++ b/include/asm-x86_64/numa.h
@@ -20,6 +20,11 @@ extern int numa_off;
20extern void numa_set_node(int cpu, int node); 20extern void numa_set_node(int cpu, int node);
21 21
22extern unsigned char apicid_to_node[256]; 22extern unsigned char apicid_to_node[256];
23#ifdef CONFIG_NUMA
24extern void __init init_cpu_to_node(void);
25#else
26#define init_cpu_to_node() do {} while (0)
27#endif
23 28
24#define NUMA_NO_NODE 0xff 29#define NUMA_NO_NODE 0xff
25 30
diff --git a/include/asm-x86_64/page.h b/include/asm-x86_64/page.h
index 06e489f32472..615e3e494929 100644
--- a/include/asm-x86_64/page.h
+++ b/include/asm-x86_64/page.h
@@ -14,13 +14,25 @@
14#define PHYSICAL_PAGE_MASK (~(PAGE_SIZE-1) & __PHYSICAL_MASK) 14#define PHYSICAL_PAGE_MASK (~(PAGE_SIZE-1) & __PHYSICAL_MASK)
15 15
16#define THREAD_ORDER 1 16#define THREAD_ORDER 1
17#ifdef __ASSEMBLY__ 17#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
18#define THREAD_SIZE (1 << (PAGE_SHIFT + THREAD_ORDER))
19#else
20#define THREAD_SIZE (1UL << (PAGE_SHIFT + THREAD_ORDER))
21#endif
22#define CURRENT_MASK (~(THREAD_SIZE-1)) 18#define CURRENT_MASK (~(THREAD_SIZE-1))
23 19
20#define EXCEPTION_STACK_ORDER 0
21#define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER)
22
23#define DEBUG_STACK_ORDER EXCEPTION_STACK_ORDER
24#define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER)
25
26#define IRQSTACK_ORDER 2
27#define IRQSTACKSIZE (PAGE_SIZE << IRQSTACK_ORDER)
28
29#define STACKFAULT_STACK 1
30#define DOUBLEFAULT_STACK 2
31#define NMI_STACK 3
32#define DEBUG_STACK 4
33#define MCE_STACK 5
34#define N_EXCEPTION_STACKS 5 /* hw limit: 7 */
35
24#define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1)) 36#define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1))
25#define LARGE_PAGE_SIZE (1UL << PMD_SHIFT) 37#define LARGE_PAGE_SIZE (1UL << PMD_SHIFT)
26 38
diff --git a/include/asm-x86_64/param.h b/include/asm-x86_64/param.h
index 40b11937180d..5956b23b57c2 100644
--- a/include/asm-x86_64/param.h
+++ b/include/asm-x86_64/param.h
@@ -1,9 +1,8 @@
1#include <linux/config.h>
2
3#ifndef _ASMx86_64_PARAM_H 1#ifndef _ASMx86_64_PARAM_H
4#define _ASMx86_64_PARAM_H 2#define _ASMx86_64_PARAM_H
5 3
6#ifdef __KERNEL__ 4#ifdef __KERNEL__
5# include <linux/config.h>
7# define HZ CONFIG_HZ /* Internal kernel timer frequency */ 6# define HZ CONFIG_HZ /* Internal kernel timer frequency */
8# define USER_HZ 100 /* .. some user interfaces are in "ticks */ 7# define USER_HZ 100 /* .. some user interfaces are in "ticks */
9#define CLOCKS_PER_SEC (USER_HZ) /* like times() */ 8#define CLOCKS_PER_SEC (USER_HZ) /* like times() */
diff --git a/include/asm-x86_64/pci.h b/include/asm-x86_64/pci.h
index eeb3088a1c9e..fd03e15d7ea6 100644
--- a/include/asm-x86_64/pci.h
+++ b/include/asm-x86_64/pci.h
@@ -42,18 +42,20 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq);
42#include <asm/scatterlist.h> 42#include <asm/scatterlist.h>
43#include <linux/string.h> 43#include <linux/string.h>
44#include <asm/page.h> 44#include <asm/page.h>
45#include <linux/dma-mapping.h> /* for have_iommu */
45 46
46extern int iommu_setup(char *opt); 47extern int iommu_setup(char *opt);
47 48
48#ifdef CONFIG_GART_IOMMU
49/* The PCI address space does equal the physical memory 49/* The PCI address space does equal the physical memory
50 * address space. The networking and block device layers use 50 * address space. The networking and block device layers use
51 * this boolean for bounce buffer decisions 51 * this boolean for bounce buffer decisions
52 * 52 *
53 * On AMD64 it mostly equals, but we set it to zero to tell some subsystems 53 * On AMD64 it mostly equals, but we set it to zero if a hardware
54 * that an IOMMU is available. 54 * IOMMU (gart) of sotware IOMMU (swiotlb) is available.
55 */ 55 */
56#define PCI_DMA_BUS_IS_PHYS (no_iommu ? 1 : 0) 56#define PCI_DMA_BUS_IS_PHYS (dma_ops->is_phys)
57
58#ifdef CONFIG_GART_IOMMU
57 59
58/* 60/*
59 * x86-64 always supports DAC, but sometimes it is useful to force 61 * x86-64 always supports DAC, but sometimes it is useful to force
@@ -79,7 +81,6 @@ extern int iommu_sac_force;
79#else 81#else
80/* No IOMMU */ 82/* No IOMMU */
81 83
82#define PCI_DMA_BUS_IS_PHYS 1
83#define pci_dac_dma_supported(pci_dev, mask) 1 84#define pci_dac_dma_supported(pci_dev, mask) 1
84 85
85#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) 86#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
diff --git a/include/asm-x86_64/pda.h b/include/asm-x86_64/pda.h
index 8733ccfa442e..c7ab38a601af 100644
--- a/include/asm-x86_64/pda.h
+++ b/include/asm-x86_64/pda.h
@@ -5,6 +5,7 @@
5#include <linux/stddef.h> 5#include <linux/stddef.h>
6#include <linux/types.h> 6#include <linux/types.h>
7#include <linux/cache.h> 7#include <linux/cache.h>
8#include <asm/page.h>
8 9
9/* Per processor datastructure. %gs points to it while the kernel runs */ 10/* Per processor datastructure. %gs points to it while the kernel runs */
10struct x8664_pda { 11struct x8664_pda {
@@ -12,6 +13,9 @@ struct x8664_pda {
12 unsigned long data_offset; /* Per cpu data offset from linker address */ 13 unsigned long data_offset; /* Per cpu data offset from linker address */
13 unsigned long kernelstack; /* top of kernel stack for current */ 14 unsigned long kernelstack; /* top of kernel stack for current */
14 unsigned long oldrsp; /* user rsp for system call */ 15 unsigned long oldrsp; /* user rsp for system call */
16#if DEBUG_STKSZ > EXCEPTION_STKSZ
17 unsigned long debugstack; /* #DB/#BP stack. */
18#endif
15 int irqcount; /* Irq nesting counter. Starts with -1 */ 19 int irqcount; /* Irq nesting counter. Starts with -1 */
16 int cpunumber; /* Logical CPU number */ 20 int cpunumber; /* Logical CPU number */
17 char *irqstackptr; /* top of irqstack */ 21 char *irqstackptr; /* top of irqstack */
@@ -23,11 +27,10 @@ struct x8664_pda {
23 unsigned apic_timer_irqs; 27 unsigned apic_timer_irqs;
24} ____cacheline_aligned_in_smp; 28} ____cacheline_aligned_in_smp;
25 29
30extern struct x8664_pda *_cpu_pda[];
31extern struct x8664_pda boot_cpu_pda[];
26 32
27#define IRQSTACK_ORDER 2 33#define cpu_pda(i) (_cpu_pda[i])
28#define IRQSTACKSIZE (PAGE_SIZE << IRQSTACK_ORDER)
29
30extern struct x8664_pda cpu_pda[];
31 34
32/* 35/*
33 * There is no fast way to get the base address of the PDA, all the accesses 36 * There is no fast way to get the base address of the PDA, all the accesses
diff --git a/include/asm-x86_64/percpu.h b/include/asm-x86_64/percpu.h
index 9c71855736fb..29a6b0408f75 100644
--- a/include/asm-x86_64/percpu.h
+++ b/include/asm-x86_64/percpu.h
@@ -11,7 +11,7 @@
11 11
12#include <asm/pda.h> 12#include <asm/pda.h>
13 13
14#define __per_cpu_offset(cpu) (cpu_pda[cpu].data_offset) 14#define __per_cpu_offset(cpu) (cpu_pda(cpu)->data_offset)
15#define __my_cpu_offset() read_pda(data_offset) 15#define __my_cpu_offset() read_pda(data_offset)
16 16
17/* Separate out the type, so (int[3], foo) works. */ 17/* Separate out the type, so (int[3], foo) works. */
diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h
index ecf58c7c1650..8fbf4dd72115 100644
--- a/include/asm-x86_64/pgtable.h
+++ b/include/asm-x86_64/pgtable.h
@@ -122,6 +122,8 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long
122 122
123#define pte_same(a, b) ((a).pte == (b).pte) 123#define pte_same(a, b) ((a).pte == (b).pte)
124 124
125#define pte_pgprot(a) (__pgprot((a).pte & ~PHYSICAL_PAGE_MASK))
126
125#define PMD_SIZE (1UL << PMD_SHIFT) 127#define PMD_SIZE (1UL << PMD_SHIFT)
126#define PMD_MASK (~(PMD_SIZE-1)) 128#define PMD_MASK (~(PMD_SIZE-1))
127#define PUD_SIZE (1UL << PUD_SHIFT) 129#define PUD_SIZE (1UL << PUD_SHIFT)
@@ -265,25 +267,25 @@ static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
265 */ 267 */
266#define __LARGE_PTE (_PAGE_PSE|_PAGE_PRESENT) 268#define __LARGE_PTE (_PAGE_PSE|_PAGE_PRESENT)
267static inline int pte_user(pte_t pte) { return pte_val(pte) & _PAGE_USER; } 269static inline int pte_user(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
268extern inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER; } 270static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
269extern inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_USER; } 271static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
270extern inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } 272static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
271extern inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } 273static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
272extern inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; } 274static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
273static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } 275static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
274static inline int pte_huge(pte_t pte) { return (pte_val(pte) & __LARGE_PTE) == __LARGE_PTE; } 276static inline int pte_huge(pte_t pte) { return (pte_val(pte) & __LARGE_PTE) == __LARGE_PTE; }
275 277
276extern inline pte_t pte_rdprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); return pte; } 278static inline pte_t pte_rdprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); return pte; }
277extern inline pte_t pte_exprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); return pte; } 279static inline pte_t pte_exprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); return pte; }
278extern inline pte_t pte_mkclean(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; } 280static inline pte_t pte_mkclean(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; }
279extern inline pte_t pte_mkold(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_ACCESSED)); return pte; } 281static inline pte_t pte_mkold(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_ACCESSED)); return pte; }
280extern inline pte_t pte_wrprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_RW)); return pte; } 282static inline pte_t pte_wrprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_RW)); return pte; }
281extern inline pte_t pte_mkread(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_USER)); return pte; } 283static inline pte_t pte_mkread(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_USER)); return pte; }
282extern inline pte_t pte_mkexec(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_USER)); return pte; } 284static inline pte_t pte_mkexec(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_USER)); return pte; }
283extern inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; } 285static inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; }
284extern inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; } 286static inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; }
285extern inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_RW)); return pte; } 287static inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_RW)); return pte; }
286extern inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | __LARGE_PTE)); return pte; } 288static inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | __LARGE_PTE)); return pte; }
287 289
288struct vm_area_struct; 290struct vm_area_struct;
289 291
diff --git a/include/asm-x86_64/processor.h b/include/asm-x86_64/processor.h
index 4861246548f7..8c8d88c036ed 100644
--- a/include/asm-x86_64/processor.h
+++ b/include/asm-x86_64/processor.h
@@ -227,7 +227,13 @@ struct tss_struct {
227extern struct cpuinfo_x86 boot_cpu_data; 227extern struct cpuinfo_x86 boot_cpu_data;
228DECLARE_PER_CPU(struct tss_struct,init_tss); 228DECLARE_PER_CPU(struct tss_struct,init_tss);
229 229
230#ifdef CONFIG_X86_VSMP
231#define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT)
232#define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT)
233#else
230#define ARCH_MIN_TASKALIGN 16 234#define ARCH_MIN_TASKALIGN 16
235#define ARCH_MIN_MMSTRUCT_ALIGN 0
236#endif
231 237
232struct thread_struct { 238struct thread_struct {
233 unsigned long rsp0; 239 unsigned long rsp0;
@@ -267,15 +273,6 @@ struct thread_struct {
267#define INIT_MMAP \ 273#define INIT_MMAP \
268{ &init_mm, 0, 0, NULL, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, 1, NULL, NULL } 274{ &init_mm, 0, 0, NULL, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, 1, NULL, NULL }
269 275
270#define STACKFAULT_STACK 1
271#define DOUBLEFAULT_STACK 2
272#define NMI_STACK 3
273#define DEBUG_STACK 4
274#define MCE_STACK 5
275#define N_EXCEPTION_STACKS 5 /* hw limit: 7 */
276#define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER)
277#define EXCEPTION_STACK_ORDER 0
278
279#define start_thread(regs,new_rip,new_rsp) do { \ 276#define start_thread(regs,new_rip,new_rsp) do { \
280 asm volatile("movl %0,%%fs; movl %0,%%es; movl %0,%%ds": :"r" (0)); \ 277 asm volatile("movl %0,%%fs; movl %0,%%es; movl %0,%%ds": :"r" (0)); \
281 load_gs_index(0); \ 278 load_gs_index(0); \
@@ -317,8 +314,8 @@ extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
317#define thread_saved_pc(t) (*(unsigned long *)((t)->thread.rsp - 8)) 314#define thread_saved_pc(t) (*(unsigned long *)((t)->thread.rsp - 8))
318 315
319extern unsigned long get_wchan(struct task_struct *p); 316extern unsigned long get_wchan(struct task_struct *p);
320#define KSTK_EIP(tsk) \ 317#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.rsp0 - 1)
321 (((struct pt_regs *)(tsk->thread.rsp0 - sizeof(struct pt_regs)))->rip) 318#define KSTK_EIP(tsk) (task_pt_regs(tsk)->rip)
322#define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */ 319#define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */
323 320
324 321
@@ -480,4 +477,6 @@ extern unsigned long boot_option_idle_override;
480/* Boot loader type from the setup header */ 477/* Boot loader type from the setup header */
481extern int bootloader_type; 478extern int bootloader_type;
482 479
480#define HAVE_ARCH_PICK_MMAP_LAYOUT 1
481
483#endif /* __ASM_X86_64_PROCESSOR_H */ 482#endif /* __ASM_X86_64_PROCESSOR_H */
diff --git a/include/asm-x86_64/proto.h b/include/asm-x86_64/proto.h
index 34501086afef..115e496c6139 100644
--- a/include/asm-x86_64/proto.h
+++ b/include/asm-x86_64/proto.h
@@ -67,8 +67,6 @@ extern void load_gs_index(unsigned gs);
67 67
68extern unsigned long end_pfn_map; 68extern unsigned long end_pfn_map;
69 69
70extern cpumask_t cpu_initialized;
71
72extern void show_trace(unsigned long * rsp); 70extern void show_trace(unsigned long * rsp);
73extern void show_registers(struct pt_regs *regs); 71extern void show_registers(struct pt_regs *regs);
74 72
@@ -91,8 +89,12 @@ extern void check_efer(void);
91 89
92extern int unhandled_signal(struct task_struct *tsk, int sig); 90extern int unhandled_signal(struct task_struct *tsk, int sig);
93 91
92extern int unsynchronized_tsc(void);
93
94extern void select_idle_routine(const struct cpuinfo_x86 *c); 94extern void select_idle_routine(const struct cpuinfo_x86 *c);
95extern void swiotlb_init(void); 95
96extern void gart_parse_options(char *);
97extern void __init no_iommu_init(void);
96 98
97extern unsigned long table_start, table_end; 99extern unsigned long table_start, table_end;
98 100
@@ -106,12 +108,17 @@ extern int skip_ioapic_setup;
106extern int acpi_ht; 108extern int acpi_ht;
107extern int acpi_disabled; 109extern int acpi_disabled;
108 110
111#ifdef CONFIG_GART_IOMMU
109extern int fallback_aper_order; 112extern int fallback_aper_order;
110extern int fallback_aper_force; 113extern int fallback_aper_force;
111extern int iommu_aperture; 114extern int iommu_aperture;
112extern int iommu_aperture_disabled;
113extern int iommu_aperture_allowed; 115extern int iommu_aperture_allowed;
116extern int iommu_aperture_disabled;
114extern int fix_aperture; 117extern int fix_aperture;
118#else
119#define iommu_aperture 0
120#define iommu_aperture_allowed 0
121#endif
115extern int force_iommu; 122extern int force_iommu;
116 123
117extern int reboot_force; 124extern int reboot_force;
diff --git a/include/asm-x86_64/rwlock.h b/include/asm-x86_64/rwlock.h
index 8a78a4ace53c..9942cc393064 100644
--- a/include/asm-x86_64/rwlock.h
+++ b/include/asm-x86_64/rwlock.h
@@ -64,7 +64,7 @@
64 ::"a" (rw) : "memory") 64 ::"a" (rw) : "memory")
65 65
66#define __build_write_lock_const(rw, helper) \ 66#define __build_write_lock_const(rw, helper) \
67 asm volatile(LOCK "subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" \ 67 asm volatile(LOCK "subl $" RW_LOCK_BIAS_STR ",%0\n\t" \
68 "jnz 2f\n" \ 68 "jnz 2f\n" \
69 "1:\n" \ 69 "1:\n" \
70 LOCK_SECTION_START("") \ 70 LOCK_SECTION_START("") \
diff --git a/include/asm-x86_64/segment.h b/include/asm-x86_64/segment.h
index 44adaf18c11e..d4bed33fb32c 100644
--- a/include/asm-x86_64/segment.h
+++ b/include/asm-x86_64/segment.h
@@ -19,15 +19,13 @@
19#define __USER_DS 0x2b /* 5*8+3 */ 19#define __USER_DS 0x2b /* 5*8+3 */
20#define __USER_CS 0x33 /* 6*8+3 */ 20#define __USER_CS 0x33 /* 6*8+3 */
21#define __USER32_DS __USER_DS 21#define __USER32_DS __USER_DS
22#define __KERNEL16_CS (GDT_ENTRY_KERNELCS16 * 8)
23#define __KERNEL_COMPAT32_CS 0x8
24 22
25#define GDT_ENTRY_TLS 1 23#define GDT_ENTRY_TLS 1
26#define GDT_ENTRY_TSS 8 /* needs two entries */ 24#define GDT_ENTRY_TSS 8 /* needs two entries */
27#define GDT_ENTRY_LDT 10 /* needs two entries */ 25#define GDT_ENTRY_LDT 10 /* needs two entries */
28#define GDT_ENTRY_TLS_MIN 12 26#define GDT_ENTRY_TLS_MIN 12
29#define GDT_ENTRY_TLS_MAX 14 27#define GDT_ENTRY_TLS_MAX 14
30#define GDT_ENTRY_KERNELCS16 15 28/* 15 free */
31 29
32#define GDT_ENTRY_TLS_ENTRIES 3 30#define GDT_ENTRY_TLS_ENTRIES 3
33 31
diff --git a/include/asm-x86_64/smp.h b/include/asm-x86_64/smp.h
index d030409a8fb5..9ccbb2cfd5c0 100644
--- a/include/asm-x86_64/smp.h
+++ b/include/asm-x86_64/smp.h
@@ -35,6 +35,7 @@ extern cpumask_t cpu_present_mask;
35extern cpumask_t cpu_possible_map; 35extern cpumask_t cpu_possible_map;
36extern cpumask_t cpu_online_map; 36extern cpumask_t cpu_online_map;
37extern cpumask_t cpu_callout_map; 37extern cpumask_t cpu_callout_map;
38extern cpumask_t cpu_initialized;
38 39
39/* 40/*
40 * Private routines/data 41 * Private routines/data
diff --git a/include/asm-x86_64/swiotlb.h b/include/asm-x86_64/swiotlb.h
index dddf1b218681..60757efd1353 100644
--- a/include/asm-x86_64/swiotlb.h
+++ b/include/asm-x86_64/swiotlb.h
@@ -3,10 +3,14 @@
3 3
4#include <linux/config.h> 4#include <linux/config.h>
5 5
6#include <asm/dma-mapping.h>
7
6/* SWIOTLB interface */ 8/* SWIOTLB interface */
7 9
8extern dma_addr_t swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, 10extern dma_addr_t swiotlb_map_single(struct device *hwdev, void *ptr,
9 int dir); 11 size_t size, int dir);
12extern void *swiotlb_alloc_coherent(struct device *hwdev, size_t size,
13 dma_addr_t *dma_handle, gfp_t flags);
10extern void swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, 14extern void swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
11 size_t size, int dir); 15 size_t size, int dir);
12extern void swiotlb_sync_single_for_cpu(struct device *hwdev, 16extern void swiotlb_sync_single_for_cpu(struct device *hwdev,
@@ -34,10 +38,10 @@ extern int swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg,
34extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, 38extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg,
35 int nents, int direction); 39 int nents, int direction);
36extern int swiotlb_dma_mapping_error(dma_addr_t dma_addr); 40extern int swiotlb_dma_mapping_error(dma_addr_t dma_addr);
37extern void *swiotlb_alloc_coherent (struct device *hwdev, size_t size,
38 dma_addr_t *dma_handle, gfp_t flags);
39extern void swiotlb_free_coherent (struct device *hwdev, size_t size, 41extern void swiotlb_free_coherent (struct device *hwdev, size_t size,
40 void *vaddr, dma_addr_t dma_handle); 42 void *vaddr, dma_addr_t dma_handle);
43extern int swiotlb_dma_supported(struct device *hwdev, u64 mask);
44extern void swiotlb_init(void);
41 45
42#ifdef CONFIG_SWIOTLB 46#ifdef CONFIG_SWIOTLB
43extern int swiotlb; 47extern int swiotlb;
@@ -45,4 +49,6 @@ extern int swiotlb;
45#define swiotlb 0 49#define swiotlb 0
46#endif 50#endif
47 51
48#endif 52extern void pci_swiotlb_init(void);
53
54#endif /* _ASM_SWTIOLB_H */
diff --git a/include/asm-x86_64/system.h b/include/asm-x86_64/system.h
index 85348e02ad2e..a73f0c789d8b 100644
--- a/include/asm-x86_64/system.h
+++ b/include/asm-x86_64/system.h
@@ -20,8 +20,8 @@
20#define __RESTORE(reg,offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t" 20#define __RESTORE(reg,offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t"
21 21
22/* frame pointer must be last for get_wchan */ 22/* frame pointer must be last for get_wchan */
23#define SAVE_CONTEXT "pushfq ; pushq %%rbp ; movq %%rsi,%%rbp\n\t" 23#define SAVE_CONTEXT "pushq %%rbp ; movq %%rsi,%%rbp\n\t"
24#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popfq\n\t" 24#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp\n\t"
25 25
26#define __EXTRA_CLOBBER \ 26#define __EXTRA_CLOBBER \
27 ,"rcx","rbx","rdx","r8","r9","r10","r11","r12","r13","r14","r15" 27 ,"rcx","rbx","rdx","r8","r9","r10","r11","r12","r13","r14","r15"
@@ -137,6 +137,21 @@ struct alt_instr {
137 "663:\n\t" newinstr "\n664:\n" /* replacement */ \ 137 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
138 ".previous" :: "i" (feature), ##input) 138 ".previous" :: "i" (feature), ##input)
139 139
140/* Like alternative_input, but with a single output argument */
141#define alternative_io(oldinstr, newinstr, feature, output, input...) \
142 asm volatile ("661:\n\t" oldinstr "\n662:\n" \
143 ".section .altinstructions,\"a\"\n" \
144 " .align 8\n" \
145 " .quad 661b\n" /* label */ \
146 " .quad 663f\n" /* new instruction */ \
147 " .byte %c[feat]\n" /* feature bit */ \
148 " .byte 662b-661b\n" /* sourcelen */ \
149 " .byte 664f-663f\n" /* replacementlen */ \
150 ".previous\n" \
151 ".section .altinstr_replacement,\"ax\"\n" \
152 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
153 ".previous" : output : [feat] "i" (feature), ##input)
154
140/* 155/*
141 * Clear and set 'TS' bit respectively 156 * Clear and set 'TS' bit respectively
142 */ 157 */
@@ -178,6 +193,15 @@ static inline void write_cr4(unsigned long val)
178#define wbinvd() \ 193#define wbinvd() \
179 __asm__ __volatile__ ("wbinvd": : :"memory"); 194 __asm__ __volatile__ ("wbinvd": : :"memory");
180 195
196/*
197 * On SMP systems, when the scheduler does migration-cost autodetection,
198 * it needs a way to flush as much of the CPU's caches as possible.
199 */
200static inline void sched_cacheflush(void)
201{
202 wbinvd();
203}
204
181#endif /* __KERNEL__ */ 205#endif /* __KERNEL__ */
182 206
183#define nop() __asm__ __volatile__ ("nop") 207#define nop() __asm__ __volatile__ ("nop")
@@ -311,10 +335,24 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
311/* interrupt control.. */ 335/* interrupt control.. */
312#define local_save_flags(x) do { warn_if_not_ulong(x); __asm__ __volatile__("# save_flags \n\t pushfq ; popq %q0":"=g" (x): /* no input */ :"memory"); } while (0) 336#define local_save_flags(x) do { warn_if_not_ulong(x); __asm__ __volatile__("# save_flags \n\t pushfq ; popq %q0":"=g" (x): /* no input */ :"memory"); } while (0)
313#define local_irq_restore(x) __asm__ __volatile__("# restore_flags \n\t pushq %0 ; popfq": /* no output */ :"g" (x):"memory", "cc") 337#define local_irq_restore(x) __asm__ __volatile__("# restore_flags \n\t pushq %0 ; popfq": /* no output */ :"g" (x):"memory", "cc")
338
339#ifdef CONFIG_X86_VSMP
340/* Interrupt control for VSMP architecture */
341#define local_irq_disable() do { unsigned long flags; local_save_flags(flags); local_irq_restore((flags & ~(1 << 9)) | (1 << 18)); } while (0)
342#define local_irq_enable() do { unsigned long flags; local_save_flags(flags); local_irq_restore((flags | (1 << 9)) & ~(1 << 18)); } while (0)
343
344#define irqs_disabled() \
345({ \
346 unsigned long flags; \
347 local_save_flags(flags); \
348 (flags & (1<<18)) || !(flags & (1<<9)); \
349})
350
351/* For spinlocks etc */
352#define local_irq_save(x) do { local_save_flags(x); local_irq_restore((x & ~(1 << 9)) | (1 << 18)); } while (0)
353#else /* CONFIG_X86_VSMP */
314#define local_irq_disable() __asm__ __volatile__("cli": : :"memory") 354#define local_irq_disable() __asm__ __volatile__("cli": : :"memory")
315#define local_irq_enable() __asm__ __volatile__("sti": : :"memory") 355#define local_irq_enable() __asm__ __volatile__("sti": : :"memory")
316/* used in the idle loop; sti takes one instruction cycle to complete */
317#define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory")
318 356
319#define irqs_disabled() \ 357#define irqs_disabled() \
320({ \ 358({ \
@@ -325,15 +363,14 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
325 363
326/* For spinlocks etc */ 364/* For spinlocks etc */
327#define local_irq_save(x) do { warn_if_not_ulong(x); __asm__ __volatile__("# local_irq_save \n\t pushfq ; popq %0 ; cli":"=g" (x): /* no input */ :"memory"); } while (0) 365#define local_irq_save(x) do { warn_if_not_ulong(x); __asm__ __volatile__("# local_irq_save \n\t pushfq ; popq %0 ; cli":"=g" (x): /* no input */ :"memory"); } while (0)
366#endif
328 367
329void cpu_idle_wait(void); 368/* used in the idle loop; sti takes one instruction cycle to complete */
369#define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory")
370/* used when interrupts are already enabled or to shutdown the processor */
371#define halt() __asm__ __volatile__("hlt": : :"memory")
330 372
331/* 373void cpu_idle_wait(void);
332 * disable hlt during certain critical i/o operations
333 */
334#define HAVE_DISABLE_HLT
335void disable_hlt(void);
336void enable_hlt(void);
337 374
338extern unsigned long arch_align_stack(unsigned long sp); 375extern unsigned long arch_align_stack(unsigned long sp);
339 376
diff --git a/include/asm-x86_64/thread_info.h b/include/asm-x86_64/thread_info.h
index 08eb6e4f3737..4ac0e0a36934 100644
--- a/include/asm-x86_64/thread_info.h
+++ b/include/asm-x86_64/thread_info.h
@@ -76,8 +76,6 @@ static inline struct thread_info *stack_thread_info(void)
76#define alloc_thread_info(tsk) \ 76#define alloc_thread_info(tsk) \
77 ((struct thread_info *) __get_free_pages(GFP_KERNEL,THREAD_ORDER)) 77 ((struct thread_info *) __get_free_pages(GFP_KERNEL,THREAD_ORDER))
78#define free_thread_info(ti) free_pages((unsigned long) (ti), THREAD_ORDER) 78#define free_thread_info(ti) free_pages((unsigned long) (ti), THREAD_ORDER)
79#define get_thread_info(ti) get_task_struct((ti)->task)
80#define put_thread_info(ti) put_task_struct((ti)->task)
81 79
82#else /* !__ASSEMBLY__ */ 80#else /* !__ASSEMBLY__ */
83 81
@@ -138,6 +136,7 @@ static inline struct thread_info *stack_thread_info(void)
138 * have to worry about atomic accesses. 136 * have to worry about atomic accesses.
139 */ 137 */
140#define TS_USEDFPU 0x0001 /* FPU was used by this task this quantum (SMP) */ 138#define TS_USEDFPU 0x0001 /* FPU was used by this task this quantum (SMP) */
139#define TS_COMPAT 0x0002 /* 32bit syscall active */
141 140
142#endif /* __KERNEL__ */ 141#endif /* __KERNEL__ */
143 142
diff --git a/include/asm-x86_64/timex.h b/include/asm-x86_64/timex.h
index f971f45d6d78..f18443fcdf04 100644
--- a/include/asm-x86_64/timex.h
+++ b/include/asm-x86_64/timex.h
@@ -10,6 +10,9 @@
10#include <asm/msr.h> 10#include <asm/msr.h>
11#include <asm/vsyscall.h> 11#include <asm/vsyscall.h>
12#include <asm/hpet.h> 12#include <asm/hpet.h>
13#include <asm/system.h>
14#include <asm/processor.h>
15#include <linux/compiler.h>
13 16
14#define CLOCK_TICK_RATE PIT_TICK_RATE /* Underlying HZ */ 17#define CLOCK_TICK_RATE PIT_TICK_RATE /* Underlying HZ */
15 18
@@ -23,6 +26,19 @@ static inline cycles_t get_cycles (void)
23 return ret; 26 return ret;
24} 27}
25 28
29/* Like get_cycles, but make sure the CPU is synchronized. */
30static __always_inline cycles_t get_cycles_sync(void)
31{
32 unsigned long long ret;
33 unsigned eax;
34 /* Don't do an additional sync on CPUs where we know
35 RDTSC is already synchronous. */
36 alternative_io(ASM_NOP2, "cpuid", X86_FEATURE_SYNC_RDTSC,
37 "=a" (eax), "0" (1) : "ebx","ecx","edx","memory");
38 rdtscll(ret);
39 return ret;
40}
41
26extern unsigned int cpu_khz; 42extern unsigned int cpu_khz;
27 43
28extern int read_current_timer(unsigned long *timer_value); 44extern int read_current_timer(unsigned long *timer_value);
diff --git a/include/asm-x86_64/topology.h b/include/asm-x86_64/topology.h
index d39ebd5263ed..2fa7f27381b4 100644
--- a/include/asm-x86_64/topology.h
+++ b/include/asm-x86_64/topology.h
@@ -23,7 +23,7 @@ extern int __node_distance(int, int);
23 23
24#define cpu_to_node(cpu) (cpu_to_node[cpu]) 24#define cpu_to_node(cpu) (cpu_to_node[cpu])
25#define parent_node(node) (node) 25#define parent_node(node) (node)
26#define node_to_first_cpu(node) (__ffs(node_to_cpumask[node])) 26#define node_to_first_cpu(node) (first_cpu(node_to_cpumask[node]))
27#define node_to_cpumask(node) (node_to_cpumask[node]) 27#define node_to_cpumask(node) (node_to_cpumask[node])
28#define pcibus_to_node(bus) ((long)(bus->sysdata)) 28#define pcibus_to_node(bus) ((long)(bus->sysdata))
29#define pcibus_to_cpumask(bus) node_to_cpumask(pcibus_to_node(bus)); 29#define pcibus_to_cpumask(bus) node_to_cpumask(pcibus_to_node(bus));
@@ -39,7 +39,6 @@ extern int __node_distance(int, int);
39 .max_interval = 32, \ 39 .max_interval = 32, \
40 .busy_factor = 32, \ 40 .busy_factor = 32, \
41 .imbalance_pct = 125, \ 41 .imbalance_pct = 125, \
42 .cache_hot_time = (10*1000000), \
43 .cache_nice_tries = 2, \ 42 .cache_nice_tries = 2, \
44 .busy_idx = 3, \ 43 .busy_idx = 3, \
45 .idle_idx = 2, \ 44 .idle_idx = 2, \
diff --git a/include/asm-x86_64/uaccess.h b/include/asm-x86_64/uaccess.h
index 1bb8b8a24436..bddffcb591b8 100644
--- a/include/asm-x86_64/uaccess.h
+++ b/include/asm-x86_64/uaccess.h
@@ -244,7 +244,7 @@ extern unsigned long copy_to_user(void __user *to, const void *from, unsigned le
244extern unsigned long copy_from_user(void *to, const void __user *from, unsigned len); 244extern unsigned long copy_from_user(void *to, const void __user *from, unsigned len);
245extern unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len); 245extern unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len);
246 246
247static inline int __copy_from_user(void *dst, const void __user *src, unsigned size) 247static __always_inline int __copy_from_user(void *dst, const void __user *src, unsigned size)
248{ 248{
249 int ret = 0; 249 int ret = 0;
250 if (!__builtin_constant_p(size)) 250 if (!__builtin_constant_p(size))
@@ -273,7 +273,7 @@ static inline int __copy_from_user(void *dst, const void __user *src, unsigned s
273 } 273 }
274} 274}
275 275
276static inline int __copy_to_user(void __user *dst, const void *src, unsigned size) 276static __always_inline int __copy_to_user(void __user *dst, const void *src, unsigned size)
277{ 277{
278 int ret = 0; 278 int ret = 0;
279 if (!__builtin_constant_p(size)) 279 if (!__builtin_constant_p(size))
@@ -305,7 +305,7 @@ static inline int __copy_to_user(void __user *dst, const void *src, unsigned siz
305} 305}
306 306
307 307
308static inline int __copy_in_user(void __user *dst, const void __user *src, unsigned size) 308static __always_inline int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
309{ 309{
310 int ret = 0; 310 int ret = 0;
311 if (!__builtin_constant_p(size)) 311 if (!__builtin_constant_p(size))
@@ -348,6 +348,7 @@ static inline int __copy_in_user(void __user *dst, const void __user *src, unsig
348long strncpy_from_user(char *dst, const char __user *src, long count); 348long strncpy_from_user(char *dst, const char __user *src, long count);
349long __strncpy_from_user(char *dst, const char __user *src, long count); 349long __strncpy_from_user(char *dst, const char __user *src, long count);
350long strnlen_user(const char __user *str, long n); 350long strnlen_user(const char __user *str, long n);
351long __strnlen_user(const char __user *str, long n);
351long strlen_user(const char __user *str); 352long strlen_user(const char __user *str);
352unsigned long clear_user(void __user *mem, unsigned long len); 353unsigned long clear_user(void __user *mem, unsigned long len);
353unsigned long __clear_user(void __user *mem, unsigned long len); 354unsigned long __clear_user(void __user *mem, unsigned long len);
diff --git a/include/asm-x86_64/unistd.h b/include/asm-x86_64/unistd.h
index 2c42150bce0c..436d099b5b6b 100644
--- a/include/asm-x86_64/unistd.h
+++ b/include/asm-x86_64/unistd.h
@@ -571,8 +571,37 @@ __SYSCALL(__NR_inotify_init, sys_inotify_init)
571__SYSCALL(__NR_inotify_add_watch, sys_inotify_add_watch) 571__SYSCALL(__NR_inotify_add_watch, sys_inotify_add_watch)
572#define __NR_inotify_rm_watch 255 572#define __NR_inotify_rm_watch 255
573__SYSCALL(__NR_inotify_rm_watch, sys_inotify_rm_watch) 573__SYSCALL(__NR_inotify_rm_watch, sys_inotify_rm_watch)
574#define __NR_migrate_pages 256
575__SYSCALL(__NR_migrate_pages, sys_migrate_pages)
576#define __NR_openat 257
577__SYSCALL(__NR_openat, sys_openat)
578#define __NR_mkdirat 258
579__SYSCALL(__NR_mkdirat, sys_mkdirat)
580#define __NR_mknodat 259
581__SYSCALL(__NR_mknodat, sys_mknodat)
582#define __NR_fchownat 260
583__SYSCALL(__NR_fchownat, sys_fchownat)
584#define __NR_futimesat 261
585__SYSCALL(__NR_futimesat, sys_futimesat)
586#define __NR_newfstatat 262
587__SYSCALL(__NR_newfstatat, sys_newfstatat)
588#define __NR_unlinkat 263
589__SYSCALL(__NR_unlinkat, sys_unlinkat)
590#define __NR_renameat 264
591__SYSCALL(__NR_renameat, sys_renameat)
592#define __NR_linkat 265
593__SYSCALL(__NR_linkat, sys_linkat)
594#define __NR_symlinkat 266
595__SYSCALL(__NR_symlinkat, sys_symlinkat)
596#define __NR_readlinkat 267
597__SYSCALL(__NR_readlinkat, sys_readlinkat)
598#define __NR_fchmodat 268
599__SYSCALL(__NR_fchmodat, sys_fchmodat)
600#define __NR_faccessat 269
601__SYSCALL(__NR_faccessat, sys_faccessat)
602
603#define __NR_syscall_max __NR_faccessat
574 604
575#define __NR_syscall_max __NR_inotify_rm_watch
576#ifndef __NO_STUBS 605#ifndef __NO_STUBS
577 606
578/* user-visible error numbers are in the range -1 - -4095 */ 607/* user-visible error numbers are in the range -1 - -4095 */
diff --git a/include/asm-x86_64/vsyscall.h b/include/asm-x86_64/vsyscall.h
index 438a3f52f839..a85e16f56d73 100644
--- a/include/asm-x86_64/vsyscall.h
+++ b/include/asm-x86_64/vsyscall.h
@@ -36,8 +36,8 @@ struct vxtime_data {
36 int mode; 36 int mode;
37}; 37};
38 38
39#define hpet_readl(a) readl((void *)fix_to_virt(FIX_HPET_BASE) + a) 39#define hpet_readl(a) readl((const void __iomem *)fix_to_virt(FIX_HPET_BASE) + a)
40#define hpet_writel(d,a) writel(d, (void *)fix_to_virt(FIX_HPET_BASE) + a) 40#define hpet_writel(d,a) writel(d, (void __iomem *)fix_to_virt(FIX_HPET_BASE) + a)
41 41
42/* vsyscall space (readonly) */ 42/* vsyscall space (readonly) */
43extern struct vxtime_data __vxtime; 43extern struct vxtime_data __vxtime;