aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-x86_64
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-x86_64')
-rw-r--r--include/asm-x86_64/Kbuild3
-rw-r--r--include/asm-x86_64/agp.h6
-rw-r--r--include/asm-x86_64/alternative.h5
-rw-r--r--include/asm-x86_64/apic.h10
-rw-r--r--include/asm-x86_64/atomic.h65
-rw-r--r--include/asm-x86_64/bugs.h30
-rw-r--r--include/asm-x86_64/cmpxchg.h134
-rw-r--r--include/asm-x86_64/desc.h21
-rw-r--r--include/asm-x86_64/dma-mapping.h2
-rw-r--r--include/asm-x86_64/fixmap.h1
-rw-r--r--include/asm-x86_64/genapic.h4
-rw-r--r--include/asm-x86_64/ipi.h61
-rw-r--r--include/asm-x86_64/irqflags.h9
-rw-r--r--include/asm-x86_64/kdebug.h25
-rw-r--r--include/asm-x86_64/kexec.h2
-rw-r--r--include/asm-x86_64/local.h196
-rw-r--r--include/asm-x86_64/mmu_context.h1
-rw-r--r--include/asm-x86_64/mmzone.h2
-rw-r--r--include/asm-x86_64/msr-index.h1
-rw-r--r--include/asm-x86_64/msr.h285
-rw-r--r--include/asm-x86_64/mtrr.h12
-rw-r--r--include/asm-x86_64/nmi.h9
-rw-r--r--include/asm-x86_64/page.h50
-rw-r--r--include/asm-x86_64/percpu.h10
-rw-r--r--include/asm-x86_64/pgalloc.h15
-rw-r--r--include/asm-x86_64/pgtable.h43
-rw-r--r--include/asm-x86_64/processor-flags.h1
-rw-r--r--include/asm-x86_64/processor.h55
-rw-r--r--include/asm-x86_64/proto.h15
-rw-r--r--include/asm-x86_64/scatterlist.h2
-rw-r--r--include/asm-x86_64/segment.h2
-rw-r--r--include/asm-x86_64/serial.h16
-rw-r--r--include/asm-x86_64/smp.h18
-rw-r--r--include/asm-x86_64/suspend.h13
-rw-r--r--include/asm-x86_64/system.h105
-rw-r--r--include/asm-x86_64/termbits.h2
-rw-r--r--include/asm-x86_64/thread_info.h2
-rw-r--r--include/asm-x86_64/timex.h2
-rw-r--r--include/asm-x86_64/tlbflush.h33
-rw-r--r--include/asm-x86_64/unistd.h5
40 files changed, 543 insertions, 730 deletions
diff --git a/include/asm-x86_64/Kbuild b/include/asm-x86_64/Kbuild
index ebd7117782a6..75a2deffca68 100644
--- a/include/asm-x86_64/Kbuild
+++ b/include/asm-x86_64/Kbuild
@@ -8,7 +8,7 @@ header-y += boot.h
8header-y += bootsetup.h 8header-y += bootsetup.h
9header-y += debugreg.h 9header-y += debugreg.h
10header-y += ldt.h 10header-y += ldt.h
11header-y += msr.h 11header-y += msr-index.h
12header-y += prctl.h 12header-y += prctl.h
13header-y += ptrace-abi.h 13header-y += ptrace-abi.h
14header-y += sigcontext32.h 14header-y += sigcontext32.h
@@ -16,5 +16,6 @@ header-y += ucontext.h
16header-y += vsyscall32.h 16header-y += vsyscall32.h
17 17
18unifdef-y += mce.h 18unifdef-y += mce.h
19unifdef-y += msr.h
19unifdef-y += mtrr.h 20unifdef-y += mtrr.h
20unifdef-y += vsyscall.h 21unifdef-y += vsyscall.h
diff --git a/include/asm-x86_64/agp.h b/include/asm-x86_64/agp.h
index 06c52ee9c06b..de338666f3f9 100644
--- a/include/asm-x86_64/agp.h
+++ b/include/asm-x86_64/agp.h
@@ -10,8 +10,10 @@
10 * with different cachability attributes for the same page. 10 * with different cachability attributes for the same page.
11 */ 11 */
12 12
13int map_page_into_agp(struct page *page); 13/* Caller's responsibility to call global_flush_tlb() for
14int unmap_page_from_agp(struct page *page); 14 * performance reasons */
15#define map_page_into_agp(page) change_page_attr(page, 1, PAGE_KERNEL_NOCACHE)
16#define unmap_page_from_agp(page) change_page_attr(page, 1, PAGE_KERNEL)
15#define flush_agp_mappings() global_flush_tlb() 17#define flush_agp_mappings() global_flush_tlb()
16 18
17/* Could use CLFLUSH here if the cpu supports it. But then it would 19/* Could use CLFLUSH here if the cpu supports it. But then it would
diff --git a/include/asm-x86_64/alternative.h b/include/asm-x86_64/alternative.h
index a6657b4f3e0e..a09fe85c268e 100644
--- a/include/asm-x86_64/alternative.h
+++ b/include/asm-x86_64/alternative.h
@@ -16,6 +16,7 @@ struct alt_instr {
16 u8 pad[5]; 16 u8 pad[5];
17}; 17};
18 18
19extern void alternative_instructions(void);
19extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end); 20extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
20 21
21struct module; 22struct module;
@@ -141,8 +142,8 @@ void apply_paravirt(struct paravirt_patch *start, struct paravirt_patch *end);
141static inline void 142static inline void
142apply_paravirt(struct paravirt_patch *start, struct paravirt_patch *end) 143apply_paravirt(struct paravirt_patch *start, struct paravirt_patch *end)
143{} 144{}
144#define __start_parainstructions NULL 145#define __parainstructions NULL
145#define __stop_parainstructions NULL 146#define __parainstructions_end NULL
146#endif 147#endif
147 148
148#endif /* _X86_64_ALTERNATIVE_H */ 149#endif /* _X86_64_ALTERNATIVE_H */
diff --git a/include/asm-x86_64/apic.h b/include/asm-x86_64/apic.h
index 7cfb39cbd918..45e9fca1febc 100644
--- a/include/asm-x86_64/apic.h
+++ b/include/asm-x86_64/apic.h
@@ -2,6 +2,7 @@
2#define __ASM_APIC_H 2#define __ASM_APIC_H
3 3
4#include <linux/pm.h> 4#include <linux/pm.h>
5#include <linux/delay.h>
5#include <asm/fixmap.h> 6#include <asm/fixmap.h>
6#include <asm/apicdef.h> 7#include <asm/apicdef.h>
7#include <asm/system.h> 8#include <asm/system.h>
@@ -47,11 +48,8 @@ static __inline unsigned int apic_read(unsigned long reg)
47 return *((volatile unsigned int *)(APIC_BASE+reg)); 48 return *((volatile unsigned int *)(APIC_BASE+reg));
48} 49}
49 50
50static __inline__ void apic_wait_icr_idle(void) 51extern void apic_wait_icr_idle(void);
51{ 52extern unsigned int safe_apic_wait_icr_idle(void);
52 while (apic_read( APIC_ICR ) & APIC_ICR_BUSY)
53 cpu_relax();
54}
55 53
56static inline void ack_APIC_irq(void) 54static inline void ack_APIC_irq(void)
57{ 55{
@@ -83,7 +81,7 @@ extern void setup_secondary_APIC_clock (void);
83extern int APIC_init_uniprocessor (void); 81extern int APIC_init_uniprocessor (void);
84extern void disable_APIC_timer(void); 82extern void disable_APIC_timer(void);
85extern void enable_APIC_timer(void); 83extern void enable_APIC_timer(void);
86extern void clustered_apic_check(void); 84extern void setup_apic_routing(void);
87 85
88extern void setup_APIC_extened_lvt(unsigned char lvt_off, unsigned char vector, 86extern void setup_APIC_extened_lvt(unsigned char lvt_off, unsigned char vector,
89 unsigned char msg_type, unsigned char mask); 87 unsigned char msg_type, unsigned char mask);
diff --git a/include/asm-x86_64/atomic.h b/include/asm-x86_64/atomic.h
index 706ca4b60000..f2e64634fa48 100644
--- a/include/asm-x86_64/atomic.h
+++ b/include/asm-x86_64/atomic.h
@@ -2,6 +2,7 @@
2#define __ARCH_X86_64_ATOMIC__ 2#define __ARCH_X86_64_ATOMIC__
3 3
4#include <asm/alternative.h> 4#include <asm/alternative.h>
5#include <asm/cmpxchg.h>
5 6
6/* atomic_t should be 32 bit signed type */ 7/* atomic_t should be 32 bit signed type */
7 8
@@ -375,8 +376,8 @@ static __inline__ long atomic64_add_return(long i, atomic64_t *v)
375 long __i = i; 376 long __i = i;
376 __asm__ __volatile__( 377 __asm__ __volatile__(
377 LOCK_PREFIX "xaddq %0, %1;" 378 LOCK_PREFIX "xaddq %0, %1;"
378 :"=r"(i) 379 :"+r" (i), "+m" (v->counter)
379 :"m"(v->counter), "0"(i)); 380 : : "memory");
380 return i + __i; 381 return i + __i;
381} 382}
382 383
@@ -388,7 +389,10 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t *v)
388#define atomic64_inc_return(v) (atomic64_add_return(1,v)) 389#define atomic64_inc_return(v) (atomic64_add_return(1,v))
389#define atomic64_dec_return(v) (atomic64_sub_return(1,v)) 390#define atomic64_dec_return(v) (atomic64_sub_return(1,v))
390 391
391#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new)) 392#define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
393#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
394
395#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
392#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 396#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
393 397
394/** 398/**
@@ -400,22 +404,49 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t *v)
400 * Atomically adds @a to @v, so long as it was not @u. 404 * Atomically adds @a to @v, so long as it was not @u.
401 * Returns non-zero if @v was not @u, and zero otherwise. 405 * Returns non-zero if @v was not @u, and zero otherwise.
402 */ 406 */
403#define atomic_add_unless(v, a, u) \ 407static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
404({ \ 408{
405 int c, old; \ 409 int c, old;
406 c = atomic_read(v); \ 410 c = atomic_read(v);
407 for (;;) { \ 411 for (;;) {
408 if (unlikely(c == (u))) \ 412 if (unlikely(c == (u)))
409 break; \ 413 break;
410 old = atomic_cmpxchg((v), c, c + (a)); \ 414 old = atomic_cmpxchg((v), c, c + (a));
411 if (likely(old == c)) \ 415 if (likely(old == c))
412 break; \ 416 break;
413 c = old; \ 417 c = old;
414 } \ 418 }
415 c != (u); \ 419 return c != (u);
416}) 420}
421
417#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 422#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
418 423
424/**
425 * atomic64_add_unless - add unless the number is a given value
426 * @v: pointer of type atomic64_t
427 * @a: the amount to add to v...
428 * @u: ...unless v is equal to u.
429 *
430 * Atomically adds @a to @v, so long as it was not @u.
431 * Returns non-zero if @v was not @u, and zero otherwise.
432 */
433static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
434{
435 long c, old;
436 c = atomic64_read(v);
437 for (;;) {
438 if (unlikely(c == (u)))
439 break;
440 old = atomic64_cmpxchg((v), c, c + (a));
441 if (likely(old == c))
442 break;
443 c = old;
444 }
445 return c != (u);
446}
447
448#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
449
419/* These are x86-specific, used by some header files */ 450/* These are x86-specific, used by some header files */
420#define atomic_clear_mask(mask, addr) \ 451#define atomic_clear_mask(mask, addr) \
421__asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \ 452__asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \
diff --git a/include/asm-x86_64/bugs.h b/include/asm-x86_64/bugs.h
index d86c5dd689fa..b33dc04d8f42 100644
--- a/include/asm-x86_64/bugs.h
+++ b/include/asm-x86_64/bugs.h
@@ -1,28 +1,6 @@
1/* 1#ifndef _ASM_X86_64_BUGS_H
2 * include/asm-x86_64/bugs.h 2#define _ASM_X86_64_BUGS_H
3 *
4 * Copyright (C) 1994 Linus Torvalds
5 * Copyright (C) 2000 SuSE
6 *
7 * This is included by init/main.c to check for architecture-dependent bugs.
8 *
9 * Needs:
10 * void check_bugs(void);
11 */
12 3
13#include <asm/processor.h> 4void check_bugs(void);
14#include <asm/i387.h>
15#include <asm/msr.h>
16#include <asm/pda.h>
17 5
18extern void alternative_instructions(void); 6#endif /* _ASM_X86_64_BUGS_H */
19
20static void __init check_bugs(void)
21{
22 identify_cpu(&boot_cpu_data);
23#if !defined(CONFIG_SMP)
24 printk("CPU: ");
25 print_cpu_info(&boot_cpu_data);
26#endif
27 alternative_instructions();
28}
diff --git a/include/asm-x86_64/cmpxchg.h b/include/asm-x86_64/cmpxchg.h
new file mode 100644
index 000000000000..09a6b6b6b74d
--- /dev/null
+++ b/include/asm-x86_64/cmpxchg.h
@@ -0,0 +1,134 @@
1#ifndef __ASM_CMPXCHG_H
2#define __ASM_CMPXCHG_H
3
4#include <asm/alternative.h> /* Provides LOCK_PREFIX */
5
6#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
7
8#define __xg(x) ((volatile long *)(x))
9
10static inline void set_64bit(volatile unsigned long *ptr, unsigned long val)
11{
12 *ptr = val;
13}
14
15#define _set_64bit set_64bit
16
17/*
18 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
19 * Note 2: xchg has side effect, so that attribute volatile is necessary,
20 * but generally the primitive is invalid, *ptr is output argument. --ANK
21 */
22static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
23{
24 switch (size) {
25 case 1:
26 __asm__ __volatile__("xchgb %b0,%1"
27 :"=q" (x)
28 :"m" (*__xg(ptr)), "0" (x)
29 :"memory");
30 break;
31 case 2:
32 __asm__ __volatile__("xchgw %w0,%1"
33 :"=r" (x)
34 :"m" (*__xg(ptr)), "0" (x)
35 :"memory");
36 break;
37 case 4:
38 __asm__ __volatile__("xchgl %k0,%1"
39 :"=r" (x)
40 :"m" (*__xg(ptr)), "0" (x)
41 :"memory");
42 break;
43 case 8:
44 __asm__ __volatile__("xchgq %0,%1"
45 :"=r" (x)
46 :"m" (*__xg(ptr)), "0" (x)
47 :"memory");
48 break;
49 }
50 return x;
51}
52
53/*
54 * Atomic compare and exchange. Compare OLD with MEM, if identical,
55 * store NEW in MEM. Return the initial value in MEM. Success is
56 * indicated by comparing RETURN with OLD.
57 */
58
59#define __HAVE_ARCH_CMPXCHG 1
60
61static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
62 unsigned long new, int size)
63{
64 unsigned long prev;
65 switch (size) {
66 case 1:
67 __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
68 : "=a"(prev)
69 : "q"(new), "m"(*__xg(ptr)), "0"(old)
70 : "memory");
71 return prev;
72 case 2:
73 __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
74 : "=a"(prev)
75 : "r"(new), "m"(*__xg(ptr)), "0"(old)
76 : "memory");
77 return prev;
78 case 4:
79 __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2"
80 : "=a"(prev)
81 : "r"(new), "m"(*__xg(ptr)), "0"(old)
82 : "memory");
83 return prev;
84 case 8:
85 __asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2"
86 : "=a"(prev)
87 : "r"(new), "m"(*__xg(ptr)), "0"(old)
88 : "memory");
89 return prev;
90 }
91 return old;
92}
93
94static inline unsigned long __cmpxchg_local(volatile void *ptr,
95 unsigned long old, unsigned long new, int size)
96{
97 unsigned long prev;
98 switch (size) {
99 case 1:
100 __asm__ __volatile__("cmpxchgb %b1,%2"
101 : "=a"(prev)
102 : "q"(new), "m"(*__xg(ptr)), "0"(old)
103 : "memory");
104 return prev;
105 case 2:
106 __asm__ __volatile__("cmpxchgw %w1,%2"
107 : "=a"(prev)
108 : "r"(new), "m"(*__xg(ptr)), "0"(old)
109 : "memory");
110 return prev;
111 case 4:
112 __asm__ __volatile__("cmpxchgl %k1,%2"
113 : "=a"(prev)
114 : "r"(new), "m"(*__xg(ptr)), "0"(old)
115 : "memory");
116 return prev;
117 case 8:
118 __asm__ __volatile__("cmpxchgq %1,%2"
119 : "=a"(prev)
120 : "r"(new), "m"(*__xg(ptr)), "0"(old)
121 : "memory");
122 return prev;
123 }
124 return old;
125}
126
127#define cmpxchg(ptr,o,n)\
128 ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
129 (unsigned long)(n),sizeof(*(ptr))))
130#define cmpxchg_local(ptr,o,n)\
131 ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
132 (unsigned long)(n),sizeof(*(ptr))))
133
134#endif
diff --git a/include/asm-x86_64/desc.h b/include/asm-x86_64/desc.h
index 913d6ac00033..ac991b5ca0fd 100644
--- a/include/asm-x86_64/desc.h
+++ b/include/asm-x86_64/desc.h
@@ -107,16 +107,6 @@ static inline void set_ldt_desc(unsigned cpu, void *addr, int size)
107 DESC_LDT, size * 8 - 1); 107 DESC_LDT, size * 8 - 1);
108} 108}
109 109
110static inline void set_seg_base(unsigned cpu, int entry, void *base)
111{
112 struct desc_struct *d = &cpu_gdt(cpu)[entry];
113 u32 addr = (u32)(u64)base;
114 BUG_ON((u64)base >> 32);
115 d->base0 = addr & 0xffff;
116 d->base1 = (addr >> 16) & 0xff;
117 d->base2 = (addr >> 24) & 0xff;
118}
119
120#define LDT_entry_a(info) \ 110#define LDT_entry_a(info) \
121 ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff)) 111 ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
122/* Don't allow setting of the lm bit. It is useless anyways because 112/* Don't allow setting of the lm bit. It is useless anyways because
@@ -145,16 +135,13 @@ static inline void set_seg_base(unsigned cpu, int entry, void *base)
145 (info)->useable == 0 && \ 135 (info)->useable == 0 && \
146 (info)->lm == 0) 136 (info)->lm == 0)
147 137
148#if TLS_SIZE != 24
149# error update this code.
150#endif
151
152static inline void load_TLS(struct thread_struct *t, unsigned int cpu) 138static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
153{ 139{
140 unsigned int i;
154 u64 *gdt = (u64 *)(cpu_gdt(cpu) + GDT_ENTRY_TLS_MIN); 141 u64 *gdt = (u64 *)(cpu_gdt(cpu) + GDT_ENTRY_TLS_MIN);
155 gdt[0] = t->tls_array[0]; 142
156 gdt[1] = t->tls_array[1]; 143 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
157 gdt[2] = t->tls_array[2]; 144 gdt[i] = t->tls_array[i];
158} 145}
159 146
160/* 147/*
diff --git a/include/asm-x86_64/dma-mapping.h b/include/asm-x86_64/dma-mapping.h
index d2af227f06d0..6897e2a436e5 100644
--- a/include/asm-x86_64/dma-mapping.h
+++ b/include/asm-x86_64/dma-mapping.h
@@ -52,7 +52,7 @@ struct dma_mapping_ops {
52}; 52};
53 53
54extern dma_addr_t bad_dma_address; 54extern dma_addr_t bad_dma_address;
55extern struct dma_mapping_ops* dma_ops; 55extern const struct dma_mapping_ops* dma_ops;
56extern int iommu_merge; 56extern int iommu_merge;
57 57
58static inline int dma_mapping_error(dma_addr_t dma_addr) 58static inline int dma_mapping_error(dma_addr_t dma_addr)
diff --git a/include/asm-x86_64/fixmap.h b/include/asm-x86_64/fixmap.h
index 1b620db5b9e3..e90e1677531b 100644
--- a/include/asm-x86_64/fixmap.h
+++ b/include/asm-x86_64/fixmap.h
@@ -15,7 +15,6 @@
15#include <asm/apicdef.h> 15#include <asm/apicdef.h>
16#include <asm/page.h> 16#include <asm/page.h>
17#include <asm/vsyscall.h> 17#include <asm/vsyscall.h>
18#include <asm/vsyscall32.h>
19 18
20/* 19/*
21 * Here we define all the compile-time 'special' virtual 20 * Here we define all the compile-time 'special' virtual
diff --git a/include/asm-x86_64/genapic.h b/include/asm-x86_64/genapic.h
index b80f4bb5f273..d7e516ccbaa4 100644
--- a/include/asm-x86_64/genapic.h
+++ b/include/asm-x86_64/genapic.h
@@ -29,7 +29,9 @@ struct genapic {
29 unsigned int (*phys_pkg_id)(int index_msb); 29 unsigned int (*phys_pkg_id)(int index_msb);
30}; 30};
31 31
32extern struct genapic *genapic;
32 33
33extern struct genapic *genapic, *genapic_force, apic_flat; 34extern struct genapic apic_flat;
35extern struct genapic apic_physflat;
34 36
35#endif 37#endif
diff --git a/include/asm-x86_64/ipi.h b/include/asm-x86_64/ipi.h
index 2a5c162b7d92..a7c75ea408a8 100644
--- a/include/asm-x86_64/ipi.h
+++ b/include/asm-x86_64/ipi.h
@@ -18,10 +18,8 @@
18 * Subject to the GNU Public License, v.2 18 * Subject to the GNU Public License, v.2
19 */ 19 */
20 20
21#include <asm/fixmap.h>
22#include <asm/hw_irq.h> 21#include <asm/hw_irq.h>
23#include <asm/apicdef.h> 22#include <asm/apic.h>
24#include <asm/genapic.h>
25 23
26/* 24/*
27 * the following functions deal with sending IPIs between CPUs. 25 * the following functions deal with sending IPIs between CPUs.
@@ -76,10 +74,42 @@ static inline void __send_IPI_shortcut(unsigned int shortcut, int vector, unsign
76 apic_write(APIC_ICR, cfg); 74 apic_write(APIC_ICR, cfg);
77} 75}
78 76
77/*
78 * This is used to send an IPI with no shorthand notation (the destination is
79 * specified in bits 56 to 63 of the ICR).
80 */
81static inline void __send_IPI_dest_field(unsigned int mask, int vector, unsigned int dest)
82{
83 unsigned long cfg;
84
85 /*
86 * Wait for idle.
87 */
88 if (unlikely(vector == NMI_VECTOR))
89 safe_apic_wait_icr_idle();
90 else
91 apic_wait_icr_idle();
92
93 /*
94 * prepare target chip field
95 */
96 cfg = __prepare_ICR2(mask);
97 apic_write(APIC_ICR2, cfg);
98
99 /*
100 * program the ICR
101 */
102 cfg = __prepare_ICR(0, vector, dest);
103
104 /*
105 * Send the IPI. The write to APIC_ICR fires this off.
106 */
107 apic_write(APIC_ICR, cfg);
108}
79 109
80static inline void send_IPI_mask_sequence(cpumask_t mask, int vector) 110static inline void send_IPI_mask_sequence(cpumask_t mask, int vector)
81{ 111{
82 unsigned long cfg, flags; 112 unsigned long flags;
83 unsigned long query_cpu; 113 unsigned long query_cpu;
84 114
85 /* 115 /*
@@ -88,28 +118,9 @@ static inline void send_IPI_mask_sequence(cpumask_t mask, int vector)
88 * - mbligh 118 * - mbligh
89 */ 119 */
90 local_irq_save(flags); 120 local_irq_save(flags);
91
92 for_each_cpu_mask(query_cpu, mask) { 121 for_each_cpu_mask(query_cpu, mask) {
93 /* 122 __send_IPI_dest_field(x86_cpu_to_apicid[query_cpu],
94 * Wait for idle. 123 vector, APIC_DEST_PHYSICAL);
95 */
96 apic_wait_icr_idle();
97
98 /*
99 * prepare target chip field
100 */
101 cfg = __prepare_ICR2(x86_cpu_to_apicid[query_cpu]);
102 apic_write(APIC_ICR2, cfg);
103
104 /*
105 * program the ICR
106 */
107 cfg = __prepare_ICR(0, vector, APIC_DEST_PHYSICAL);
108
109 /*
110 * Send the IPI. The write to APIC_ICR fires this off.
111 */
112 apic_write(APIC_ICR, cfg);
113 } 124 }
114 local_irq_restore(flags); 125 local_irq_restore(flags);
115} 126}
diff --git a/include/asm-x86_64/irqflags.h b/include/asm-x86_64/irqflags.h
index cce6937e87c0..86e70fe23659 100644
--- a/include/asm-x86_64/irqflags.h
+++ b/include/asm-x86_64/irqflags.h
@@ -9,6 +9,7 @@
9 */ 9 */
10#ifndef _ASM_IRQFLAGS_H 10#ifndef _ASM_IRQFLAGS_H
11#define _ASM_IRQFLAGS_H 11#define _ASM_IRQFLAGS_H
12#include <asm/processor-flags.h>
12 13
13#ifndef __ASSEMBLY__ 14#ifndef __ASSEMBLY__
14/* 15/*
@@ -53,19 +54,19 @@ static inline void raw_local_irq_disable(void)
53{ 54{
54 unsigned long flags = __raw_local_save_flags(); 55 unsigned long flags = __raw_local_save_flags();
55 56
56 raw_local_irq_restore((flags & ~(1 << 9)) | (1 << 18)); 57 raw_local_irq_restore((flags & ~X86_EFLAGS_IF) | X86_EFLAGS_AC);
57} 58}
58 59
59static inline void raw_local_irq_enable(void) 60static inline void raw_local_irq_enable(void)
60{ 61{
61 unsigned long flags = __raw_local_save_flags(); 62 unsigned long flags = __raw_local_save_flags();
62 63
63 raw_local_irq_restore((flags | (1 << 9)) & ~(1 << 18)); 64 raw_local_irq_restore((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC));
64} 65}
65 66
66static inline int raw_irqs_disabled_flags(unsigned long flags) 67static inline int raw_irqs_disabled_flags(unsigned long flags)
67{ 68{
68 return !(flags & (1<<9)) || (flags & (1 << 18)); 69 return !(flags & X86_EFLAGS_IF) || (flags & X86_EFLAGS_AC);
69} 70}
70 71
71#else /* CONFIG_X86_VSMP */ 72#else /* CONFIG_X86_VSMP */
@@ -82,7 +83,7 @@ static inline void raw_local_irq_enable(void)
82 83
83static inline int raw_irqs_disabled_flags(unsigned long flags) 84static inline int raw_irqs_disabled_flags(unsigned long flags)
84{ 85{
85 return !(flags & (1 << 9)); 86 return !(flags & X86_EFLAGS_IF);
86} 87}
87 88
88#endif 89#endif
diff --git a/include/asm-x86_64/kdebug.h b/include/asm-x86_64/kdebug.h
index 2b0c088e2957..74feae945a26 100644
--- a/include/asm-x86_64/kdebug.h
+++ b/include/asm-x86_64/kdebug.h
@@ -5,19 +5,8 @@
5 5
6struct pt_regs; 6struct pt_regs;
7 7
8struct die_args {
9 struct pt_regs *regs;
10 const char *str;
11 long err;
12 int trapnr;
13 int signr;
14};
15
16extern int register_die_notifier(struct notifier_block *);
17extern int unregister_die_notifier(struct notifier_block *);
18extern int register_page_fault_notifier(struct notifier_block *); 8extern int register_page_fault_notifier(struct notifier_block *);
19extern int unregister_page_fault_notifier(struct notifier_block *); 9extern int unregister_page_fault_notifier(struct notifier_block *);
20extern struct atomic_notifier_head die_chain;
21 10
22/* Grossly misnamed. */ 11/* Grossly misnamed. */
23enum die_val { 12enum die_val {
@@ -33,22 +22,10 @@ enum die_val {
33 DIE_GPF, 22 DIE_GPF,
34 DIE_CALL, 23 DIE_CALL,
35 DIE_NMI_IPI, 24 DIE_NMI_IPI,
25 DIE_NMI_POST,
36 DIE_PAGE_FAULT, 26 DIE_PAGE_FAULT,
37}; 27};
38 28
39static inline int notify_die(enum die_val val, const char *str,
40 struct pt_regs *regs, long err, int trap, int sig)
41{
42 struct die_args args = {
43 .regs = regs,
44 .str = str,
45 .err = err,
46 .trapnr = trap,
47 .signr = sig
48 };
49 return atomic_notifier_call_chain(&die_chain, val, &args);
50}
51
52extern void printk_address(unsigned long address); 29extern void printk_address(unsigned long address);
53extern void die(const char *,struct pt_regs *,long); 30extern void die(const char *,struct pt_regs *,long);
54extern void __die(const char *,struct pt_regs *,long); 31extern void __die(const char *,struct pt_regs *,long);
diff --git a/include/asm-x86_64/kexec.h b/include/asm-x86_64/kexec.h
index 5fab957e1091..738e581b67f8 100644
--- a/include/asm-x86_64/kexec.h
+++ b/include/asm-x86_64/kexec.h
@@ -48,8 +48,6 @@
48/* The native architecture */ 48/* The native architecture */
49#define KEXEC_ARCH KEXEC_ARCH_X86_64 49#define KEXEC_ARCH KEXEC_ARCH_X86_64
50 50
51#define MAX_NOTE_BYTES 1024
52
53/* 51/*
54 * Saving the registers of the cpu on which panic occured in 52 * Saving the registers of the cpu on which panic occured in
55 * crash_kexec to save a valid sp. The registers of other cpus 53 * crash_kexec to save a valid sp. The registers of other cpus
diff --git a/include/asm-x86_64/local.h b/include/asm-x86_64/local.h
index e769e6200225..e87492bb0693 100644
--- a/include/asm-x86_64/local.h
+++ b/include/asm-x86_64/local.h
@@ -2,49 +2,183 @@
2#define _ARCH_X8664_LOCAL_H 2#define _ARCH_X8664_LOCAL_H
3 3
4#include <linux/percpu.h> 4#include <linux/percpu.h>
5#include <asm/atomic.h>
5 6
6typedef struct 7typedef struct
7{ 8{
8 volatile long counter; 9 atomic_long_t a;
9} local_t; 10} local_t;
10 11
11#define LOCAL_INIT(i) { (i) } 12#define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
12 13
13#define local_read(v) ((v)->counter) 14#define local_read(l) atomic_long_read(&(l)->a)
14#define local_set(v,i) (((v)->counter) = (i)) 15#define local_set(l,i) atomic_long_set(&(l)->a, (i))
15 16
16static inline void local_inc(local_t *v) 17static inline void local_inc(local_t *l)
17{ 18{
18 __asm__ __volatile__( 19 __asm__ __volatile__(
19 "incq %0" 20 "incq %0"
20 :"=m" (v->counter) 21 :"=m" (l->a.counter)
21 :"m" (v->counter)); 22 :"m" (l->a.counter));
22} 23}
23 24
24static inline void local_dec(local_t *v) 25static inline void local_dec(local_t *l)
25{ 26{
26 __asm__ __volatile__( 27 __asm__ __volatile__(
27 "decq %0" 28 "decq %0"
28 :"=m" (v->counter) 29 :"=m" (l->a.counter)
29 :"m" (v->counter)); 30 :"m" (l->a.counter));
30} 31}
31 32
32static inline void local_add(long i, local_t *v) 33static inline void local_add(long i, local_t *l)
33{ 34{
34 __asm__ __volatile__( 35 __asm__ __volatile__(
35 "addq %1,%0" 36 "addq %1,%0"
36 :"=m" (v->counter) 37 :"=m" (l->a.counter)
37 :"ir" (i), "m" (v->counter)); 38 :"ir" (i), "m" (l->a.counter));
38} 39}
39 40
40static inline void local_sub(long i, local_t *v) 41static inline void local_sub(long i, local_t *l)
41{ 42{
42 __asm__ __volatile__( 43 __asm__ __volatile__(
43 "subq %1,%0" 44 "subq %1,%0"
44 :"=m" (v->counter) 45 :"=m" (l->a.counter)
45 :"ir" (i), "m" (v->counter)); 46 :"ir" (i), "m" (l->a.counter));
46} 47}
47 48
49/**
50 * local_sub_and_test - subtract value from variable and test result
51 * @i: integer value to subtract
52 * @l: pointer to type local_t
53 *
54 * Atomically subtracts @i from @l and returns
55 * true if the result is zero, or false for all
56 * other cases.
57 */
58static __inline__ int local_sub_and_test(long i, local_t *l)
59{
60 unsigned char c;
61
62 __asm__ __volatile__(
63 "subq %2,%0; sete %1"
64 :"=m" (l->a.counter), "=qm" (c)
65 :"ir" (i), "m" (l->a.counter) : "memory");
66 return c;
67}
68
69/**
70 * local_dec_and_test - decrement and test
71 * @l: pointer to type local_t
72 *
73 * Atomically decrements @l by 1 and
74 * returns true if the result is 0, or false for all other
75 * cases.
76 */
77static __inline__ int local_dec_and_test(local_t *l)
78{
79 unsigned char c;
80
81 __asm__ __volatile__(
82 "decq %0; sete %1"
83 :"=m" (l->a.counter), "=qm" (c)
84 :"m" (l->a.counter) : "memory");
85 return c != 0;
86}
87
88/**
89 * local_inc_and_test - increment and test
90 * @l: pointer to type local_t
91 *
92 * Atomically increments @l by 1
93 * and returns true if the result is zero, or false for all
94 * other cases.
95 */
96static __inline__ int local_inc_and_test(local_t *l)
97{
98 unsigned char c;
99
100 __asm__ __volatile__(
101 "incq %0; sete %1"
102 :"=m" (l->a.counter), "=qm" (c)
103 :"m" (l->a.counter) : "memory");
104 return c != 0;
105}
106
107/**
108 * local_add_negative - add and test if negative
109 * @i: integer value to add
110 * @l: pointer to type local_t
111 *
112 * Atomically adds @i to @l and returns true
113 * if the result is negative, or false when
114 * result is greater than or equal to zero.
115 */
116static __inline__ int local_add_negative(long i, local_t *l)
117{
118 unsigned char c;
119
120 __asm__ __volatile__(
121 "addq %2,%0; sets %1"
122 :"=m" (l->a.counter), "=qm" (c)
123 :"ir" (i), "m" (l->a.counter) : "memory");
124 return c;
125}
126
127/**
128 * local_add_return - add and return
129 * @i: integer value to add
130 * @l: pointer to type local_t
131 *
132 * Atomically adds @i to @l and returns @i + @l
133 */
134static __inline__ long local_add_return(long i, local_t *l)
135{
136 long __i = i;
137 __asm__ __volatile__(
138 "xaddq %0, %1;"
139 :"+r" (i), "+m" (l->a.counter)
140 : : "memory");
141 return i + __i;
142}
143
144static __inline__ long local_sub_return(long i, local_t *l)
145{
146 return local_add_return(-i,l);
147}
148
149#define local_inc_return(l) (local_add_return(1,l))
150#define local_dec_return(l) (local_sub_return(1,l))
151
152#define local_cmpxchg(l, o, n) \
153 (cmpxchg_local(&((l)->a.counter), (o), (n)))
154/* Always has a lock prefix */
155#define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
156
157/**
158 * atomic_up_add_unless - add unless the number is a given value
159 * @l: pointer of type local_t
160 * @a: the amount to add to l...
161 * @u: ...unless l is equal to u.
162 *
163 * Atomically adds @a to @l, so long as it was not @u.
164 * Returns non-zero if @l was not @u, and zero otherwise.
165 */
166#define local_add_unless(l, a, u) \
167({ \
168 long c, old; \
169 c = local_read(l); \
170 for (;;) { \
171 if (unlikely(c == (u))) \
172 break; \
173 old = local_cmpxchg((l), c, c + (a)); \
174 if (likely(old == c)) \
175 break; \
176 c = old; \
177 } \
178 c != (u); \
179})
180#define local_inc_not_zero(l) local_add_unless((l), 1, 0)
181
48/* On x86-64 these are better than the atomic variants on SMP kernels 182/* On x86-64 these are better than the atomic variants on SMP kernels
49 because they dont use a lock prefix. */ 183 because they dont use a lock prefix. */
50#define __local_inc(l) local_inc(l) 184#define __local_inc(l) local_inc(l)
@@ -62,27 +196,27 @@ static inline void local_sub(long i, local_t *v)
62 196
63/* Need to disable preemption for the cpu local counters otherwise we could 197/* Need to disable preemption for the cpu local counters otherwise we could
64 still access a variable of a previous CPU in a non atomic way. */ 198 still access a variable of a previous CPU in a non atomic way. */
65#define cpu_local_wrap_v(v) \ 199#define cpu_local_wrap_v(l) \
66 ({ local_t res__; \ 200 ({ local_t res__; \
67 preempt_disable(); \ 201 preempt_disable(); \
68 res__ = (v); \ 202 res__ = (l); \
69 preempt_enable(); \ 203 preempt_enable(); \
70 res__; }) 204 res__; })
71#define cpu_local_wrap(v) \ 205#define cpu_local_wrap(l) \
72 ({ preempt_disable(); \ 206 ({ preempt_disable(); \
73 v; \ 207 l; \
74 preempt_enable(); }) \ 208 preempt_enable(); }) \
75 209
76#define cpu_local_read(v) cpu_local_wrap_v(local_read(&__get_cpu_var(v))) 210#define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l)))
77#define cpu_local_set(v, i) cpu_local_wrap(local_set(&__get_cpu_var(v), (i))) 211#define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i)))
78#define cpu_local_inc(v) cpu_local_wrap(local_inc(&__get_cpu_var(v))) 212#define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l)))
79#define cpu_local_dec(v) cpu_local_wrap(local_dec(&__get_cpu_var(v))) 213#define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l)))
80#define cpu_local_add(i, v) cpu_local_wrap(local_add((i), &__get_cpu_var(v))) 214#define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l)))
81#define cpu_local_sub(i, v) cpu_local_wrap(local_sub((i), &__get_cpu_var(v))) 215#define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l)))
82 216
83#define __cpu_local_inc(v) cpu_local_inc(v) 217#define __cpu_local_inc(l) cpu_local_inc(l)
84#define __cpu_local_dec(v) cpu_local_dec(v) 218#define __cpu_local_dec(l) cpu_local_dec(l)
85#define __cpu_local_add(i, v) cpu_local_add((i), (v)) 219#define __cpu_local_add(i, l) cpu_local_add((i), (l))
86#define __cpu_local_sub(i, v) cpu_local_sub((i), (v)) 220#define __cpu_local_sub(i, l) cpu_local_sub((i), (l))
87 221
88#endif /* _ARCH_I386_LOCAL_H */ 222#endif /* _ARCH_X8664_LOCAL_H */
diff --git a/include/asm-x86_64/mmu_context.h b/include/asm-x86_64/mmu_context.h
index af03b9f852d6..0cce83a78378 100644
--- a/include/asm-x86_64/mmu_context.h
+++ b/include/asm-x86_64/mmu_context.h
@@ -7,6 +7,7 @@
7#include <asm/pda.h> 7#include <asm/pda.h>
8#include <asm/pgtable.h> 8#include <asm/pgtable.h>
9#include <asm/tlbflush.h> 9#include <asm/tlbflush.h>
10#include <asm-generic/mm_hooks.h>
10 11
11/* 12/*
12 * possibly do the LDT unload here? 13 * possibly do the LDT unload here?
diff --git a/include/asm-x86_64/mmzone.h b/include/asm-x86_64/mmzone.h
index fb558fb1d211..19a89377b123 100644
--- a/include/asm-x86_64/mmzone.h
+++ b/include/asm-x86_64/mmzone.h
@@ -49,7 +49,7 @@ extern int pfn_valid(unsigned long pfn);
49 49
50#ifdef CONFIG_NUMA_EMU 50#ifdef CONFIG_NUMA_EMU
51#define FAKE_NODE_MIN_SIZE (64*1024*1024) 51#define FAKE_NODE_MIN_SIZE (64*1024*1024)
52#define FAKE_NODE_MIN_HASH_MASK (~(FAKE_NODE_MIN_SIZE - 1ul)) 52#define FAKE_NODE_MIN_HASH_MASK (~(FAKE_NODE_MIN_SIZE - 1uL))
53#endif 53#endif
54 54
55#endif 55#endif
diff --git a/include/asm-x86_64/msr-index.h b/include/asm-x86_64/msr-index.h
new file mode 100644
index 000000000000..d77a63f1ddf2
--- /dev/null
+++ b/include/asm-x86_64/msr-index.h
@@ -0,0 +1 @@
#include <asm-i386/msr-index.h>
diff --git a/include/asm-x86_64/msr.h b/include/asm-x86_64/msr.h
index 902f9a58617e..d5c55b80da54 100644
--- a/include/asm-x86_64/msr.h
+++ b/include/asm-x86_64/msr.h
@@ -1,7 +1,10 @@
1#ifndef X86_64_MSR_H 1#ifndef X86_64_MSR_H
2#define X86_64_MSR_H 1 2#define X86_64_MSR_H 1
3 3
4#include <asm/msr-index.h>
5
4#ifndef __ASSEMBLY__ 6#ifndef __ASSEMBLY__
7#include <linux/errno.h>
5/* 8/*
6 * Access to machine-specific registers (available on 586 and better only) 9 * Access to machine-specific registers (available on 586 and better only)
7 * Note: the rd* operations modify the parameters directly (without using 10 * Note: the rd* operations modify the parameters directly (without using
@@ -157,12 +160,11 @@ static inline unsigned int cpuid_edx(unsigned int op)
157 return edx; 160 return edx;
158} 161}
159 162
160#define MSR_IA32_UCODE_WRITE 0x79
161#define MSR_IA32_UCODE_REV 0x8b
162
163#ifdef CONFIG_SMP 163#ifdef CONFIG_SMP
164void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); 164void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
165void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); 165void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
166int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
167int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
166#else /* CONFIG_SMP */ 168#else /* CONFIG_SMP */
167static inline void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) 169static inline void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
168{ 170{
@@ -172,269 +174,14 @@ static inline void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
172{ 174{
173 wrmsr(msr_no, l, h); 175 wrmsr(msr_no, l, h);
174} 176}
175#endif /* CONFIG_SMP */ 177static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
176 178{
177#endif 179 return rdmsr_safe(msr_no, l, h);
178 180}
179/* AMD/K8 specific MSRs */ 181static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
180#define MSR_EFER 0xc0000080 /* extended feature register */ 182{
181#define MSR_STAR 0xc0000081 /* legacy mode SYSCALL target */ 183 return wrmsr_safe(msr_no, l, h);
182#define MSR_LSTAR 0xc0000082 /* long mode SYSCALL target */ 184}
183#define MSR_CSTAR 0xc0000083 /* compatibility mode SYSCALL target */ 185#endif /* CONFIG_SMP */
184#define MSR_SYSCALL_MASK 0xc0000084 /* EFLAGS mask for syscall */ 186#endif /* __ASSEMBLY__ */
185#define MSR_FS_BASE 0xc0000100 /* 64bit FS base */ 187#endif /* X86_64_MSR_H */
186#define MSR_GS_BASE 0xc0000101 /* 64bit GS base */
187#define MSR_KERNEL_GS_BASE 0xc0000102 /* SwapGS GS shadow (or USER_GS from kernel) */
188/* EFER bits: */
189#define _EFER_SCE 0 /* SYSCALL/SYSRET */
190#define _EFER_LME 8 /* Long mode enable */
191#define _EFER_LMA 10 /* Long mode active (read-only) */
192#define _EFER_NX 11 /* No execute enable */
193
194#define EFER_SCE (1<<_EFER_SCE)
195#define EFER_LME (1<<_EFER_LME)
196#define EFER_LMA (1<<_EFER_LMA)
197#define EFER_NX (1<<_EFER_NX)
198
199/* Intel MSRs. Some also available on other CPUs */
200#define MSR_IA32_TSC 0x10
201#define MSR_IA32_PLATFORM_ID 0x17
202
203#define MSR_IA32_PERFCTR0 0xc1
204#define MSR_IA32_PERFCTR1 0xc2
205#define MSR_FSB_FREQ 0xcd
206
207#define MSR_MTRRcap 0x0fe
208#define MSR_IA32_BBL_CR_CTL 0x119
209
210#define MSR_IA32_SYSENTER_CS 0x174
211#define MSR_IA32_SYSENTER_ESP 0x175
212#define MSR_IA32_SYSENTER_EIP 0x176
213
214#define MSR_IA32_MCG_CAP 0x179
215#define MSR_IA32_MCG_STATUS 0x17a
216#define MSR_IA32_MCG_CTL 0x17b
217
218#define MSR_IA32_EVNTSEL0 0x186
219#define MSR_IA32_EVNTSEL1 0x187
220
221#define MSR_IA32_DEBUGCTLMSR 0x1d9
222#define MSR_IA32_LASTBRANCHFROMIP 0x1db
223#define MSR_IA32_LASTBRANCHTOIP 0x1dc
224#define MSR_IA32_LASTINTFROMIP 0x1dd
225#define MSR_IA32_LASTINTTOIP 0x1de
226
227#define MSR_IA32_PEBS_ENABLE 0x3f1
228#define MSR_IA32_DS_AREA 0x600
229#define MSR_IA32_PERF_CAPABILITIES 0x345
230
231#define MSR_MTRRfix64K_00000 0x250
232#define MSR_MTRRfix16K_80000 0x258
233#define MSR_MTRRfix16K_A0000 0x259
234#define MSR_MTRRfix4K_C0000 0x268
235#define MSR_MTRRfix4K_C8000 0x269
236#define MSR_MTRRfix4K_D0000 0x26a
237#define MSR_MTRRfix4K_D8000 0x26b
238#define MSR_MTRRfix4K_E0000 0x26c
239#define MSR_MTRRfix4K_E8000 0x26d
240#define MSR_MTRRfix4K_F0000 0x26e
241#define MSR_MTRRfix4K_F8000 0x26f
242#define MSR_MTRRdefType 0x2ff
243
244#define MSR_IA32_MC0_CTL 0x400
245#define MSR_IA32_MC0_STATUS 0x401
246#define MSR_IA32_MC0_ADDR 0x402
247#define MSR_IA32_MC0_MISC 0x403
248
249#define MSR_P6_PERFCTR0 0xc1
250#define MSR_P6_PERFCTR1 0xc2
251#define MSR_P6_EVNTSEL0 0x186
252#define MSR_P6_EVNTSEL1 0x187
253
254/* K7/K8 MSRs. Not complete. See the architecture manual for a more complete list. */
255#define MSR_K7_EVNTSEL0 0xC0010000
256#define MSR_K7_PERFCTR0 0xC0010004
257#define MSR_K7_EVNTSEL1 0xC0010001
258#define MSR_K7_PERFCTR1 0xC0010005
259#define MSR_K7_EVNTSEL2 0xC0010002
260#define MSR_K7_PERFCTR2 0xC0010006
261#define MSR_K7_EVNTSEL3 0xC0010003
262#define MSR_K7_PERFCTR3 0xC0010007
263#define MSR_K8_TOP_MEM1 0xC001001A
264#define MSR_K8_TOP_MEM2 0xC001001D
265#define MSR_K8_SYSCFG 0xC0010010
266#define MSR_K8_HWCR 0xC0010015
267
268/* K6 MSRs */
269#define MSR_K6_EFER 0xC0000080
270#define MSR_K6_STAR 0xC0000081
271#define MSR_K6_WHCR 0xC0000082
272#define MSR_K6_UWCCR 0xC0000085
273#define MSR_K6_PSOR 0xC0000087
274#define MSR_K6_PFIR 0xC0000088
275
276/* Centaur-Hauls/IDT defined MSRs. */
277#define MSR_IDT_FCR1 0x107
278#define MSR_IDT_FCR2 0x108
279#define MSR_IDT_FCR3 0x109
280#define MSR_IDT_FCR4 0x10a
281
282#define MSR_IDT_MCR0 0x110
283#define MSR_IDT_MCR1 0x111
284#define MSR_IDT_MCR2 0x112
285#define MSR_IDT_MCR3 0x113
286#define MSR_IDT_MCR4 0x114
287#define MSR_IDT_MCR5 0x115
288#define MSR_IDT_MCR6 0x116
289#define MSR_IDT_MCR7 0x117
290#define MSR_IDT_MCR_CTRL 0x120
291
292/* VIA Cyrix defined MSRs*/
293#define MSR_VIA_FCR 0x1107
294#define MSR_VIA_LONGHAUL 0x110a
295#define MSR_VIA_RNG 0x110b
296#define MSR_VIA_BCR2 0x1147
297
298/* Intel defined MSRs. */
299#define MSR_IA32_P5_MC_ADDR 0
300#define MSR_IA32_P5_MC_TYPE 1
301#define MSR_IA32_PLATFORM_ID 0x17
302#define MSR_IA32_EBL_CR_POWERON 0x2a
303
304#define MSR_IA32_APICBASE 0x1b
305#define MSR_IA32_APICBASE_BSP (1<<8)
306#define MSR_IA32_APICBASE_ENABLE (1<<11)
307#define MSR_IA32_APICBASE_BASE (0xfffff<<12)
308
309/* P4/Xeon+ specific */
310#define MSR_IA32_MCG_EAX 0x180
311#define MSR_IA32_MCG_EBX 0x181
312#define MSR_IA32_MCG_ECX 0x182
313#define MSR_IA32_MCG_EDX 0x183
314#define MSR_IA32_MCG_ESI 0x184
315#define MSR_IA32_MCG_EDI 0x185
316#define MSR_IA32_MCG_EBP 0x186
317#define MSR_IA32_MCG_ESP 0x187
318#define MSR_IA32_MCG_EFLAGS 0x188
319#define MSR_IA32_MCG_EIP 0x189
320#define MSR_IA32_MCG_RESERVED 0x18A
321
322#define MSR_P6_EVNTSEL0 0x186
323#define MSR_P6_EVNTSEL1 0x187
324
325#define MSR_IA32_PERF_STATUS 0x198
326#define MSR_IA32_PERF_CTL 0x199
327
328#define MSR_IA32_MPERF 0xE7
329#define MSR_IA32_APERF 0xE8
330
331#define MSR_IA32_THERM_CONTROL 0x19a
332#define MSR_IA32_THERM_INTERRUPT 0x19b
333#define MSR_IA32_THERM_STATUS 0x19c
334#define MSR_IA32_MISC_ENABLE 0x1a0
335
336#define MSR_IA32_DEBUGCTLMSR 0x1d9
337#define MSR_IA32_LASTBRANCHFROMIP 0x1db
338#define MSR_IA32_LASTBRANCHTOIP 0x1dc
339#define MSR_IA32_LASTINTFROMIP 0x1dd
340#define MSR_IA32_LASTINTTOIP 0x1de
341
342#define MSR_IA32_MC0_CTL 0x400
343#define MSR_IA32_MC0_STATUS 0x401
344#define MSR_IA32_MC0_ADDR 0x402
345#define MSR_IA32_MC0_MISC 0x403
346
347/* Pentium IV performance counter MSRs */
348#define MSR_P4_BPU_PERFCTR0 0x300
349#define MSR_P4_BPU_PERFCTR1 0x301
350#define MSR_P4_BPU_PERFCTR2 0x302
351#define MSR_P4_BPU_PERFCTR3 0x303
352#define MSR_P4_MS_PERFCTR0 0x304
353#define MSR_P4_MS_PERFCTR1 0x305
354#define MSR_P4_MS_PERFCTR2 0x306
355#define MSR_P4_MS_PERFCTR3 0x307
356#define MSR_P4_FLAME_PERFCTR0 0x308
357#define MSR_P4_FLAME_PERFCTR1 0x309
358#define MSR_P4_FLAME_PERFCTR2 0x30a
359#define MSR_P4_FLAME_PERFCTR3 0x30b
360#define MSR_P4_IQ_PERFCTR0 0x30c
361#define MSR_P4_IQ_PERFCTR1 0x30d
362#define MSR_P4_IQ_PERFCTR2 0x30e
363#define MSR_P4_IQ_PERFCTR3 0x30f
364#define MSR_P4_IQ_PERFCTR4 0x310
365#define MSR_P4_IQ_PERFCTR5 0x311
366#define MSR_P4_BPU_CCCR0 0x360
367#define MSR_P4_BPU_CCCR1 0x361
368#define MSR_P4_BPU_CCCR2 0x362
369#define MSR_P4_BPU_CCCR3 0x363
370#define MSR_P4_MS_CCCR0 0x364
371#define MSR_P4_MS_CCCR1 0x365
372#define MSR_P4_MS_CCCR2 0x366
373#define MSR_P4_MS_CCCR3 0x367
374#define MSR_P4_FLAME_CCCR0 0x368
375#define MSR_P4_FLAME_CCCR1 0x369
376#define MSR_P4_FLAME_CCCR2 0x36a
377#define MSR_P4_FLAME_CCCR3 0x36b
378#define MSR_P4_IQ_CCCR0 0x36c
379#define MSR_P4_IQ_CCCR1 0x36d
380#define MSR_P4_IQ_CCCR2 0x36e
381#define MSR_P4_IQ_CCCR3 0x36f
382#define MSR_P4_IQ_CCCR4 0x370
383#define MSR_P4_IQ_CCCR5 0x371
384#define MSR_P4_ALF_ESCR0 0x3ca
385#define MSR_P4_ALF_ESCR1 0x3cb
386#define MSR_P4_BPU_ESCR0 0x3b2
387#define MSR_P4_BPU_ESCR1 0x3b3
388#define MSR_P4_BSU_ESCR0 0x3a0
389#define MSR_P4_BSU_ESCR1 0x3a1
390#define MSR_P4_CRU_ESCR0 0x3b8
391#define MSR_P4_CRU_ESCR1 0x3b9
392#define MSR_P4_CRU_ESCR2 0x3cc
393#define MSR_P4_CRU_ESCR3 0x3cd
394#define MSR_P4_CRU_ESCR4 0x3e0
395#define MSR_P4_CRU_ESCR5 0x3e1
396#define MSR_P4_DAC_ESCR0 0x3a8
397#define MSR_P4_DAC_ESCR1 0x3a9
398#define MSR_P4_FIRM_ESCR0 0x3a4
399#define MSR_P4_FIRM_ESCR1 0x3a5
400#define MSR_P4_FLAME_ESCR0 0x3a6
401#define MSR_P4_FLAME_ESCR1 0x3a7
402#define MSR_P4_FSB_ESCR0 0x3a2
403#define MSR_P4_FSB_ESCR1 0x3a3
404#define MSR_P4_IQ_ESCR0 0x3ba
405#define MSR_P4_IQ_ESCR1 0x3bb
406#define MSR_P4_IS_ESCR0 0x3b4
407#define MSR_P4_IS_ESCR1 0x3b5
408#define MSR_P4_ITLB_ESCR0 0x3b6
409#define MSR_P4_ITLB_ESCR1 0x3b7
410#define MSR_P4_IX_ESCR0 0x3c8
411#define MSR_P4_IX_ESCR1 0x3c9
412#define MSR_P4_MOB_ESCR0 0x3aa
413#define MSR_P4_MOB_ESCR1 0x3ab
414#define MSR_P4_MS_ESCR0 0x3c0
415#define MSR_P4_MS_ESCR1 0x3c1
416#define MSR_P4_PMH_ESCR0 0x3ac
417#define MSR_P4_PMH_ESCR1 0x3ad
418#define MSR_P4_RAT_ESCR0 0x3bc
419#define MSR_P4_RAT_ESCR1 0x3bd
420#define MSR_P4_SAAT_ESCR0 0x3ae
421#define MSR_P4_SAAT_ESCR1 0x3af
422#define MSR_P4_SSU_ESCR0 0x3be
423#define MSR_P4_SSU_ESCR1 0x3bf /* guess: not defined in manual */
424#define MSR_P4_TBPU_ESCR0 0x3c2
425#define MSR_P4_TBPU_ESCR1 0x3c3
426#define MSR_P4_TC_ESCR0 0x3c4
427#define MSR_P4_TC_ESCR1 0x3c5
428#define MSR_P4_U2L_ESCR0 0x3b0
429#define MSR_P4_U2L_ESCR1 0x3b1
430
431/* Intel Core-based CPU performance counters */
432#define MSR_CORE_PERF_FIXED_CTR0 0x309
433#define MSR_CORE_PERF_FIXED_CTR1 0x30a
434#define MSR_CORE_PERF_FIXED_CTR2 0x30b
435#define MSR_CORE_PERF_FIXED_CTR_CTRL 0x38d
436#define MSR_CORE_PERF_GLOBAL_STATUS 0x38e
437#define MSR_CORE_PERF_GLOBAL_CTRL 0x38f
438#define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x390
439
440#endif
diff --git a/include/asm-x86_64/mtrr.h b/include/asm-x86_64/mtrr.h
index d6135b2549bf..b557c486bef8 100644
--- a/include/asm-x86_64/mtrr.h
+++ b/include/asm-x86_64/mtrr.h
@@ -135,6 +135,18 @@ struct mtrr_gentry32
135 135
136#endif /* CONFIG_COMPAT */ 136#endif /* CONFIG_COMPAT */
137 137
138#ifdef CONFIG_MTRR
139extern void mtrr_ap_init(void);
140extern void mtrr_bp_init(void);
141extern void mtrr_save_fixed_ranges(void *);
142extern void mtrr_save_state(void);
143#else
144#define mtrr_ap_init() do {} while (0)
145#define mtrr_bp_init() do {} while (0)
146#define mtrr_save_fixed_ranges(arg) do {} while (0)
147#define mtrr_save_state() do {} while (0)
148#endif
149
138#endif /* __KERNEL__ */ 150#endif /* __KERNEL__ */
139 151
140#endif /* _LINUX_MTRR_H */ 152#endif /* _LINUX_MTRR_H */
diff --git a/include/asm-x86_64/nmi.h b/include/asm-x86_64/nmi.h
index 72375e7d32a8..d0a7f53b1497 100644
--- a/include/asm-x86_64/nmi.h
+++ b/include/asm-x86_64/nmi.h
@@ -80,4 +80,13 @@ extern int unknown_nmi_panic;
80void __trigger_all_cpu_backtrace(void); 80void __trigger_all_cpu_backtrace(void);
81#define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace() 81#define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace()
82 82
83
84void lapic_watchdog_stop(void);
85int lapic_watchdog_init(unsigned nmi_hz);
86int lapic_wd_event(unsigned nmi_hz);
87unsigned lapic_adjust_nmi_hz(unsigned hz);
88int lapic_watchdog_ok(void);
89void disable_lapic_nmi_watchdog(void);
90void enable_lapic_nmi_watchdog(void);
91
83#endif /* ASM_NMI_H */ 92#endif /* ASM_NMI_H */
diff --git a/include/asm-x86_64/page.h b/include/asm-x86_64/page.h
index 10f346165cab..dee632fa457d 100644
--- a/include/asm-x86_64/page.h
+++ b/include/asm-x86_64/page.h
@@ -1,14 +1,11 @@
1#ifndef _X86_64_PAGE_H 1#ifndef _X86_64_PAGE_H
2#define _X86_64_PAGE_H 2#define _X86_64_PAGE_H
3 3
4#include <linux/const.h>
4 5
5/* PAGE_SHIFT determines the page size */ 6/* PAGE_SHIFT determines the page size */
6#define PAGE_SHIFT 12 7#define PAGE_SHIFT 12
7#ifdef __ASSEMBLY__ 8#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
8#define PAGE_SIZE (0x1 << PAGE_SHIFT)
9#else
10#define PAGE_SIZE (1UL << PAGE_SHIFT)
11#endif
12#define PAGE_MASK (~(PAGE_SIZE-1)) 9#define PAGE_MASK (~(PAGE_SIZE-1))
13#define PHYSICAL_PAGE_MASK (~(PAGE_SIZE-1) & __PHYSICAL_MASK) 10#define PHYSICAL_PAGE_MASK (~(PAGE_SIZE-1) & __PHYSICAL_MASK)
14 11
@@ -33,10 +30,10 @@
33#define N_EXCEPTION_STACKS 5 /* hw limit: 7 */ 30#define N_EXCEPTION_STACKS 5 /* hw limit: 7 */
34 31
35#define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1)) 32#define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1))
36#define LARGE_PAGE_SIZE (1UL << PMD_SHIFT) 33#define LARGE_PAGE_SIZE (_AC(1,UL) << PMD_SHIFT)
37 34
38#define HPAGE_SHIFT PMD_SHIFT 35#define HPAGE_SHIFT PMD_SHIFT
39#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT) 36#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT)
40#define HPAGE_MASK (~(HPAGE_SIZE - 1)) 37#define HPAGE_MASK (~(HPAGE_SIZE - 1))
41#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) 38#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
42 39
@@ -64,6 +61,8 @@ typedef struct { unsigned long pgd; } pgd_t;
64 61
65typedef struct { unsigned long pgprot; } pgprot_t; 62typedef struct { unsigned long pgprot; } pgprot_t;
66 63
64extern unsigned long phys_base;
65
67#define pte_val(x) ((x).pte) 66#define pte_val(x) ((x).pte)
68#define pmd_val(x) ((x).pmd) 67#define pmd_val(x) ((x).pmd)
69#define pud_val(x) ((x).pud) 68#define pud_val(x) ((x).pud)
@@ -76,47 +75,38 @@ typedef struct { unsigned long pgprot; } pgprot_t;
76#define __pgd(x) ((pgd_t) { (x) } ) 75#define __pgd(x) ((pgd_t) { (x) } )
77#define __pgprot(x) ((pgprot_t) { (x) } ) 76#define __pgprot(x) ((pgprot_t) { (x) } )
78 77
79#define __PHYSICAL_START ((unsigned long)CONFIG_PHYSICAL_START) 78#endif /* !__ASSEMBLY__ */
80#define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START)
81#define __START_KERNEL_map 0xffffffff80000000UL
82#define __PAGE_OFFSET 0xffff810000000000UL
83 79
84#else
85#define __PHYSICAL_START CONFIG_PHYSICAL_START 80#define __PHYSICAL_START CONFIG_PHYSICAL_START
81#define __KERNEL_ALIGN 0x200000
82
86#define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START) 83#define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START)
87#define __START_KERNEL_map 0xffffffff80000000 84#define __START_KERNEL_map _AC(0xffffffff80000000, UL)
88#define __PAGE_OFFSET 0xffff810000000000 85#define __PAGE_OFFSET _AC(0xffff810000000000, UL)
89#endif /* !__ASSEMBLY__ */
90 86
91/* to align the pointer to the (next) page boundary */ 87/* to align the pointer to the (next) page boundary */
92#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) 88#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
93 89
94/* See Documentation/x86_64/mm.txt for a description of the memory map. */ 90/* See Documentation/x86_64/mm.txt for a description of the memory map. */
95#define __PHYSICAL_MASK_SHIFT 46 91#define __PHYSICAL_MASK_SHIFT 46
96#define __PHYSICAL_MASK ((1UL << __PHYSICAL_MASK_SHIFT) - 1) 92#define __PHYSICAL_MASK ((_AC(1,UL) << __PHYSICAL_MASK_SHIFT) - 1)
97#define __VIRTUAL_MASK_SHIFT 48 93#define __VIRTUAL_MASK_SHIFT 48
98#define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1) 94#define __VIRTUAL_MASK ((_AC(1,UL) << __VIRTUAL_MASK_SHIFT) - 1)
99 95
100#define KERNEL_TEXT_SIZE (40UL*1024*1024) 96#define KERNEL_TEXT_SIZE (40*1024*1024)
101#define KERNEL_TEXT_START 0xffffffff80000000UL 97#define KERNEL_TEXT_START _AC(0xffffffff80000000, UL)
98#define PAGE_OFFSET __PAGE_OFFSET
102 99
103#ifndef __ASSEMBLY__ 100#ifndef __ASSEMBLY__
104 101
105#include <asm/bug.h> 102#include <asm/bug.h>
106 103
104extern unsigned long __phys_addr(unsigned long);
105
107#endif /* __ASSEMBLY__ */ 106#endif /* __ASSEMBLY__ */
108 107
109#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET) 108#define __pa(x) __phys_addr((unsigned long)(x))
110 109#define __pa_symbol(x) __phys_addr((unsigned long)(x))
111/* Note: __pa(&symbol_visible_to_c) should be always replaced with __pa_symbol.
112 Otherwise you risk miscompilation. */
113#define __pa(x) (((unsigned long)(x)>=__START_KERNEL_map)?(unsigned long)(x) - (unsigned long)__START_KERNEL_map:(unsigned long)(x) - PAGE_OFFSET)
114/* __pa_symbol should be used for C visible symbols.
115 This seems to be the official gcc blessed way to do such arithmetic. */
116#define __pa_symbol(x) \
117 ({unsigned long v; \
118 asm("" : "=r" (v) : "0" (x)); \
119 __pa(v); })
120 110
121#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) 111#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
122#define __boot_va(x) __va(x) 112#define __boot_va(x) __va(x)
diff --git a/include/asm-x86_64/percpu.h b/include/asm-x86_64/percpu.h
index 5ed0ef340842..c6fbb67eac90 100644
--- a/include/asm-x86_64/percpu.h
+++ b/include/asm-x86_64/percpu.h
@@ -11,16 +11,6 @@
11 11
12#include <asm/pda.h> 12#include <asm/pda.h>
13 13
14#ifdef CONFIG_MODULES
15# define PERCPU_MODULE_RESERVE 8192
16#else
17# define PERCPU_MODULE_RESERVE 0
18#endif
19
20#define PERCPU_ENOUGH_ROOM \
21 (ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) + \
22 PERCPU_MODULE_RESERVE)
23
24#define __per_cpu_offset(cpu) (cpu_pda(cpu)->data_offset) 14#define __per_cpu_offset(cpu) (cpu_pda(cpu)->data_offset)
25#define __my_cpu_offset() read_pda(data_offset) 15#define __my_cpu_offset() read_pda(data_offset)
26 16
diff --git a/include/asm-x86_64/pgalloc.h b/include/asm-x86_64/pgalloc.h
index 4e28b6060a5e..8bb564687860 100644
--- a/include/asm-x86_64/pgalloc.h
+++ b/include/asm-x86_64/pgalloc.h
@@ -1,7 +1,6 @@
1#ifndef _X86_64_PGALLOC_H 1#ifndef _X86_64_PGALLOC_H
2#define _X86_64_PGALLOC_H 2#define _X86_64_PGALLOC_H
3 3
4#include <asm/fixmap.h>
5#include <asm/pda.h> 4#include <asm/pda.h>
6#include <linux/threads.h> 5#include <linux/threads.h>
7#include <linux/mm.h> 6#include <linux/mm.h>
@@ -45,24 +44,16 @@ static inline void pgd_list_add(pgd_t *pgd)
45 struct page *page = virt_to_page(pgd); 44 struct page *page = virt_to_page(pgd);
46 45
47 spin_lock(&pgd_lock); 46 spin_lock(&pgd_lock);
48 page->index = (pgoff_t)pgd_list; 47 list_add(&page->lru, &pgd_list);
49 if (pgd_list)
50 pgd_list->private = (unsigned long)&page->index;
51 pgd_list = page;
52 page->private = (unsigned long)&pgd_list;
53 spin_unlock(&pgd_lock); 48 spin_unlock(&pgd_lock);
54} 49}
55 50
56static inline void pgd_list_del(pgd_t *pgd) 51static inline void pgd_list_del(pgd_t *pgd)
57{ 52{
58 struct page *next, **pprev, *page = virt_to_page(pgd); 53 struct page *page = virt_to_page(pgd);
59 54
60 spin_lock(&pgd_lock); 55 spin_lock(&pgd_lock);
61 next = (struct page *)page->index; 56 list_del(&page->lru);
62 pprev = (struct page **)page->private;
63 *pprev = next;
64 if (next)
65 next->private = (unsigned long)pprev;
66 spin_unlock(&pgd_lock); 57 spin_unlock(&pgd_lock);
67} 58}
68 59
diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h
index 730bd6028416..08b9831f2e14 100644
--- a/include/asm-x86_64/pgtable.h
+++ b/include/asm-x86_64/pgtable.h
@@ -1,22 +1,22 @@
1#ifndef _X86_64_PGTABLE_H 1#ifndef _X86_64_PGTABLE_H
2#define _X86_64_PGTABLE_H 2#define _X86_64_PGTABLE_H
3 3
4#include <linux/const.h>
5#ifndef __ASSEMBLY__
6
4/* 7/*
5 * This file contains the functions and defines necessary to modify and use 8 * This file contains the functions and defines necessary to modify and use
6 * the x86-64 page table tree. 9 * the x86-64 page table tree.
7 */ 10 */
8#include <asm/processor.h> 11#include <asm/processor.h>
9#include <asm/fixmap.h>
10#include <asm/bitops.h> 12#include <asm/bitops.h>
11#include <linux/threads.h> 13#include <linux/threads.h>
12#include <asm/pda.h> 14#include <asm/pda.h>
13 15
14extern pud_t level3_kernel_pgt[512]; 16extern pud_t level3_kernel_pgt[512];
15extern pud_t level3_physmem_pgt[512];
16extern pud_t level3_ident_pgt[512]; 17extern pud_t level3_ident_pgt[512];
17extern pmd_t level2_kernel_pgt[512]; 18extern pmd_t level2_kernel_pgt[512];
18extern pgd_t init_level4_pgt[]; 19extern pgd_t init_level4_pgt[];
19extern pgd_t boot_level4_pgt[];
20extern unsigned long __supported_pte_mask; 20extern unsigned long __supported_pte_mask;
21 21
22#define swapper_pg_dir init_level4_pgt 22#define swapper_pg_dir init_level4_pgt
@@ -31,6 +31,8 @@ extern void clear_kernel_mapping(unsigned long addr, unsigned long size);
31extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)]; 31extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
32#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) 32#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
33 33
34#endif /* !__ASSEMBLY__ */
35
34/* 36/*
35 * PGDIR_SHIFT determines what a top-level page table entry can map 37 * PGDIR_SHIFT determines what a top-level page table entry can map
36 */ 38 */
@@ -55,6 +57,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
55 */ 57 */
56#define PTRS_PER_PTE 512 58#define PTRS_PER_PTE 512
57 59
60#ifndef __ASSEMBLY__
61
58#define pte_ERROR(e) \ 62#define pte_ERROR(e) \
59 printk("%s:%d: bad pte %p(%016lx).\n", __FILE__, __LINE__, &(e), pte_val(e)) 63 printk("%s:%d: bad pte %p(%016lx).\n", __FILE__, __LINE__, &(e), pte_val(e))
60#define pmd_ERROR(e) \ 64#define pmd_ERROR(e) \
@@ -118,22 +122,23 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long
118 122
119#define pte_pgprot(a) (__pgprot((a).pte & ~PHYSICAL_PAGE_MASK)) 123#define pte_pgprot(a) (__pgprot((a).pte & ~PHYSICAL_PAGE_MASK))
120 124
121#define PMD_SIZE (1UL << PMD_SHIFT) 125#endif /* !__ASSEMBLY__ */
126
127#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
122#define PMD_MASK (~(PMD_SIZE-1)) 128#define PMD_MASK (~(PMD_SIZE-1))
123#define PUD_SIZE (1UL << PUD_SHIFT) 129#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
124#define PUD_MASK (~(PUD_SIZE-1)) 130#define PUD_MASK (~(PUD_SIZE-1))
125#define PGDIR_SIZE (1UL << PGDIR_SHIFT) 131#define PGDIR_SIZE (_AC(1,UL) << PGDIR_SHIFT)
126#define PGDIR_MASK (~(PGDIR_SIZE-1)) 132#define PGDIR_MASK (~(PGDIR_SIZE-1))
127 133
128#define USER_PTRS_PER_PGD ((TASK_SIZE-1)/PGDIR_SIZE+1) 134#define USER_PTRS_PER_PGD ((TASK_SIZE-1)/PGDIR_SIZE+1)
129#define FIRST_USER_ADDRESS 0 135#define FIRST_USER_ADDRESS 0
130 136
131#ifndef __ASSEMBLY__ 137#define MAXMEM _AC(0x3fffffffffff, UL)
132#define MAXMEM 0x3fffffffffffUL 138#define VMALLOC_START _AC(0xffffc20000000000, UL)
133#define VMALLOC_START 0xffffc20000000000UL 139#define VMALLOC_END _AC(0xffffe1ffffffffff, UL)
134#define VMALLOC_END 0xffffe1ffffffffffUL 140#define MODULES_VADDR _AC(0xffffffff88000000, UL)
135#define MODULES_VADDR 0xffffffff88000000UL 141#define MODULES_END _AC(0xfffffffffff00000, UL)
136#define MODULES_END 0xfffffffffff00000UL
137#define MODULES_LEN (MODULES_END - MODULES_VADDR) 142#define MODULES_LEN (MODULES_END - MODULES_VADDR)
138 143
139#define _PAGE_BIT_PRESENT 0 144#define _PAGE_BIT_PRESENT 0
@@ -159,7 +164,7 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long
159#define _PAGE_GLOBAL 0x100 /* Global TLB entry */ 164#define _PAGE_GLOBAL 0x100 /* Global TLB entry */
160 165
161#define _PAGE_PROTNONE 0x080 /* If not present */ 166#define _PAGE_PROTNONE 0x080 /* If not present */
162#define _PAGE_NX (1UL<<_PAGE_BIT_NX) 167#define _PAGE_NX (_AC(1,UL)<<_PAGE_BIT_NX)
163 168
164#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY) 169#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
165#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) 170#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
@@ -221,6 +226,8 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long
221#define __S110 PAGE_SHARED_EXEC 226#define __S110 PAGE_SHARED_EXEC
222#define __S111 PAGE_SHARED_EXEC 227#define __S111 PAGE_SHARED_EXEC
223 228
229#ifndef __ASSEMBLY__
230
224static inline unsigned long pgd_bad(pgd_t pgd) 231static inline unsigned long pgd_bad(pgd_t pgd)
225{ 232{
226 return pgd_val(pgd) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER); 233 return pgd_val(pgd) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER);
@@ -403,20 +410,13 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
403#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 410#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
404 411
405extern spinlock_t pgd_lock; 412extern spinlock_t pgd_lock;
406extern struct page *pgd_list; 413extern struct list_head pgd_list;
407void vmalloc_sync_all(void);
408
409#endif /* !__ASSEMBLY__ */
410 414
411extern int kern_addr_valid(unsigned long addr); 415extern int kern_addr_valid(unsigned long addr);
412 416
413#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ 417#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
414 remap_pfn_range(vma, vaddr, pfn, size, prot) 418 remap_pfn_range(vma, vaddr, pfn, size, prot)
415 419
416#define MK_IOSPACE_PFN(space, pfn) (pfn)
417#define GET_IOSPACE(pfn) 0
418#define GET_PFN(pfn) (pfn)
419
420#define HAVE_ARCH_UNMAPPED_AREA 420#define HAVE_ARCH_UNMAPPED_AREA
421 421
422#define pgtable_cache_init() do { } while (0) 422#define pgtable_cache_init() do { } while (0)
@@ -437,5 +437,6 @@ extern int kern_addr_valid(unsigned long addr);
437#define __HAVE_ARCH_PTEP_SET_WRPROTECT 437#define __HAVE_ARCH_PTEP_SET_WRPROTECT
438#define __HAVE_ARCH_PTE_SAME 438#define __HAVE_ARCH_PTE_SAME
439#include <asm-generic/pgtable.h> 439#include <asm-generic/pgtable.h>
440#endif /* !__ASSEMBLY__ */
440 441
441#endif /* _X86_64_PGTABLE_H */ 442#endif /* _X86_64_PGTABLE_H */
diff --git a/include/asm-x86_64/processor-flags.h b/include/asm-x86_64/processor-flags.h
new file mode 100644
index 000000000000..ec99a57b2c6a
--- /dev/null
+++ b/include/asm-x86_64/processor-flags.h
@@ -0,0 +1 @@
#include <asm-i386/processor-flags.h>
diff --git a/include/asm-x86_64/processor.h b/include/asm-x86_64/processor.h
index 76552d72804c..461ffe4c1fcc 100644
--- a/include/asm-x86_64/processor.h
+++ b/include/asm-x86_64/processor.h
@@ -20,6 +20,7 @@
20#include <asm/percpu.h> 20#include <asm/percpu.h>
21#include <linux/personality.h> 21#include <linux/personality.h>
22#include <linux/cpumask.h> 22#include <linux/cpumask.h>
23#include <asm/processor-flags.h>
23 24
24#define TF_MASK 0x00000100 25#define TF_MASK 0x00000100
25#define IF_MASK 0x00000200 26#define IF_MASK 0x00000200
@@ -103,42 +104,6 @@ extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
103extern unsigned short num_cache_leaves; 104extern unsigned short num_cache_leaves;
104 105
105/* 106/*
106 * EFLAGS bits
107 */
108#define X86_EFLAGS_CF 0x00000001 /* Carry Flag */
109#define X86_EFLAGS_PF 0x00000004 /* Parity Flag */
110#define X86_EFLAGS_AF 0x00000010 /* Auxillary carry Flag */
111#define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */
112#define X86_EFLAGS_SF 0x00000080 /* Sign Flag */
113#define X86_EFLAGS_TF 0x00000100 /* Trap Flag */
114#define X86_EFLAGS_IF 0x00000200 /* Interrupt Flag */
115#define X86_EFLAGS_DF 0x00000400 /* Direction Flag */
116#define X86_EFLAGS_OF 0x00000800 /* Overflow Flag */
117#define X86_EFLAGS_IOPL 0x00003000 /* IOPL mask */
118#define X86_EFLAGS_NT 0x00004000 /* Nested Task */
119#define X86_EFLAGS_RF 0x00010000 /* Resume Flag */
120#define X86_EFLAGS_VM 0x00020000 /* Virtual Mode */
121#define X86_EFLAGS_AC 0x00040000 /* Alignment Check */
122#define X86_EFLAGS_VIF 0x00080000 /* Virtual Interrupt Flag */
123#define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */
124#define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */
125
126/*
127 * Intel CPU features in CR4
128 */
129#define X86_CR4_VME 0x0001 /* enable vm86 extensions */
130#define X86_CR4_PVI 0x0002 /* virtual interrupts flag enable */
131#define X86_CR4_TSD 0x0004 /* disable time stamp at ipl 3 */
132#define X86_CR4_DE 0x0008 /* enable debugging extensions */
133#define X86_CR4_PSE 0x0010 /* enable page size extensions */
134#define X86_CR4_PAE 0x0020 /* enable physical address extensions */
135#define X86_CR4_MCE 0x0040 /* Machine check enable */
136#define X86_CR4_PGE 0x0080 /* enable global pages */
137#define X86_CR4_PCE 0x0100 /* enable performance counters at ipl 3 */
138#define X86_CR4_OSFXSR 0x0200 /* enable fast FPU save and restore */
139#define X86_CR4_OSXMMEXCPT 0x0400 /* enable unmasked SSE exceptions */
140
141/*
142 * Save the cr4 feature set we're using (ie 107 * Save the cr4 feature set we're using (ie
143 * Pentium 4MB enable and PPro Global page 108 * Pentium 4MB enable and PPro Global page
144 * enable), so that any CPU's that boot up 109 * enable), so that any CPU's that boot up
@@ -201,7 +166,7 @@ struct i387_fxsave_struct {
201 u32 mxcsr; 166 u32 mxcsr;
202 u32 mxcsr_mask; 167 u32 mxcsr_mask;
203 u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */ 168 u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
204 u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 128 bytes */ 169 u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */
205 u32 padding[24]; 170 u32 padding[24];
206} __attribute__ ((aligned (16))); 171} __attribute__ ((aligned (16)));
207 172
@@ -427,22 +392,6 @@ static inline void prefetchw(void *x)
427#define cpu_relax() rep_nop() 392#define cpu_relax() rep_nop()
428 393
429/* 394/*
430 * NSC/Cyrix CPU configuration register indexes
431 */
432#define CX86_CCR0 0xc0
433#define CX86_CCR1 0xc1
434#define CX86_CCR2 0xc2
435#define CX86_CCR3 0xc3
436#define CX86_CCR4 0xe8
437#define CX86_CCR5 0xe9
438#define CX86_CCR6 0xea
439#define CX86_CCR7 0xeb
440#define CX86_DIR0 0xfe
441#define CX86_DIR1 0xff
442#define CX86_ARR_BASE 0xc4
443#define CX86_RCR_BASE 0xdc
444
445/*
446 * NSC/Cyrix CPU indexed register access macros 395 * NSC/Cyrix CPU indexed register access macros
447 */ 396 */
448 397
diff --git a/include/asm-x86_64/proto.h b/include/asm-x86_64/proto.h
index b6e65a699f2a..85255db1e82d 100644
--- a/include/asm-x86_64/proto.h
+++ b/include/asm-x86_64/proto.h
@@ -11,18 +11,9 @@ struct pt_regs;
11extern void start_kernel(void); 11extern void start_kernel(void);
12extern void pda_init(int); 12extern void pda_init(int);
13 13
14extern void zap_low_mappings(int cpu);
15
16extern void early_idt_handler(void); 14extern void early_idt_handler(void);
17 15
18extern void mcheck_init(struct cpuinfo_x86 *c); 16extern void mcheck_init(struct cpuinfo_x86 *c);
19#ifdef CONFIG_MTRR
20extern void mtrr_ap_init(void);
21extern void mtrr_bp_init(void);
22#else
23#define mtrr_ap_init() do {} while (0)
24#define mtrr_bp_init() do {} while (0)
25#endif
26extern void init_memory_mapping(unsigned long start, unsigned long end); 17extern void init_memory_mapping(unsigned long start, unsigned long end);
27 18
28extern void system_call(void); 19extern void system_call(void);
@@ -82,7 +73,6 @@ extern void syscall32_cpu_init(void);
82extern void setup_node_bootmem(int nodeid, unsigned long start, unsigned long end); 73extern void setup_node_bootmem(int nodeid, unsigned long start, unsigned long end);
83 74
84extern void early_quirks(void); 75extern void early_quirks(void);
85extern void quirk_intel_irqbalance(void);
86extern void check_efer(void); 76extern void check_efer(void);
87 77
88extern int unhandled_signal(struct task_struct *tsk, int sig); 78extern int unhandled_signal(struct task_struct *tsk, int sig);
@@ -93,6 +83,7 @@ extern unsigned long table_start, table_end;
93 83
94extern int exception_trace; 84extern int exception_trace;
95extern unsigned cpu_khz; 85extern unsigned cpu_khz;
86extern unsigned tsc_khz;
96 87
97extern void no_iommu_init(void); 88extern void no_iommu_init(void);
98extern int force_iommu, no_iommu; 89extern int force_iommu, no_iommu;
@@ -121,8 +112,12 @@ extern int gsi_irq_sharing(int gsi);
121 112
122extern void smp_local_timer_interrupt(void); 113extern void smp_local_timer_interrupt(void);
123 114
115extern int force_mwait;
116
124long do_arch_prctl(struct task_struct *task, int code, unsigned long addr); 117long do_arch_prctl(struct task_struct *task, int code, unsigned long addr);
125 118
119void i8254_timer_resume(void);
120
126#define round_up(x,y) (((x) + (y) - 1) & ~((y)-1)) 121#define round_up(x,y) (((x) + (y) - 1) & ~((y)-1))
127#define round_down(x,y) ((x) & ~((y)-1)) 122#define round_down(x,y) ((x) & ~((y)-1))
128 123
diff --git a/include/asm-x86_64/scatterlist.h b/include/asm-x86_64/scatterlist.h
index 49d89f8129cd..eaf7ada27e14 100644
--- a/include/asm-x86_64/scatterlist.h
+++ b/include/asm-x86_64/scatterlist.h
@@ -1,6 +1,8 @@
1#ifndef _X8664_SCATTERLIST_H 1#ifndef _X8664_SCATTERLIST_H
2#define _X8664_SCATTERLIST_H 2#define _X8664_SCATTERLIST_H
3 3
4#include <asm/types.h>
5
4struct scatterlist { 6struct scatterlist {
5 struct page *page; 7 struct page *page;
6 unsigned int offset; 8 unsigned int offset;
diff --git a/include/asm-x86_64/segment.h b/include/asm-x86_64/segment.h
index 334ddcdd8f92..adf2bf1e187c 100644
--- a/include/asm-x86_64/segment.h
+++ b/include/asm-x86_64/segment.h
@@ -6,7 +6,7 @@
6#define __KERNEL_CS 0x10 6#define __KERNEL_CS 0x10
7#define __KERNEL_DS 0x18 7#define __KERNEL_DS 0x18
8 8
9#define __KERNEL32_CS 0x38 9#define __KERNEL32_CS 0x08
10 10
11/* 11/*
12 * we cannot use the same code segment descriptor for user and kernel 12 * we cannot use the same code segment descriptor for user and kernel
diff --git a/include/asm-x86_64/serial.h b/include/asm-x86_64/serial.h
index b0496e0d72a6..8ebd765c674a 100644
--- a/include/asm-x86_64/serial.h
+++ b/include/asm-x86_64/serial.h
@@ -11,19 +11,3 @@
11 * megabits/second; but this requires the faster clock. 11 * megabits/second; but this requires the faster clock.
12 */ 12 */
13#define BASE_BAUD ( 1843200 / 16 ) 13#define BASE_BAUD ( 1843200 / 16 )
14
15/* Standard COM flags (except for COM4, because of the 8514 problem) */
16#ifdef CONFIG_SERIAL_DETECT_IRQ
17#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST | ASYNC_AUTO_IRQ)
18#define STD_COM4_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_AUTO_IRQ)
19#else
20#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST)
21#define STD_COM4_FLAGS ASYNC_BOOT_AUTOCONF
22#endif
23
24#define SERIAL_PORT_DFNS \
25 /* UART CLK PORT IRQ FLAGS */ \
26 { 0, BASE_BAUD, 0x3F8, 4, STD_COM_FLAGS }, /* ttyS0 */ \
27 { 0, BASE_BAUD, 0x2F8, 3, STD_COM_FLAGS }, /* ttyS1 */ \
28 { 0, BASE_BAUD, 0x3E8, 4, STD_COM_FLAGS }, /* ttyS2 */ \
29 { 0, BASE_BAUD, 0x2E8, 3, STD_COM4_FLAGS }, /* ttyS3 */
diff --git a/include/asm-x86_64/smp.h b/include/asm-x86_64/smp.h
index de592a408c07..3f303d2365ed 100644
--- a/include/asm-x86_64/smp.h
+++ b/include/asm-x86_64/smp.h
@@ -10,10 +10,9 @@
10#include <linux/init.h> 10#include <linux/init.h>
11extern int disable_apic; 11extern int disable_apic;
12 12
13#include <asm/fixmap.h>
14#include <asm/mpspec.h> 13#include <asm/mpspec.h>
15#include <asm/io_apic.h>
16#include <asm/apic.h> 14#include <asm/apic.h>
15#include <asm/io_apic.h>
17#include <asm/thread_info.h> 16#include <asm/thread_info.h>
18 17
19#ifdef CONFIG_SMP 18#ifdef CONFIG_SMP
@@ -38,7 +37,6 @@ extern void lock_ipi_call_lock(void);
38extern void unlock_ipi_call_lock(void); 37extern void unlock_ipi_call_lock(void);
39extern int smp_num_siblings; 38extern int smp_num_siblings;
40extern void smp_send_reschedule(int cpu); 39extern void smp_send_reschedule(int cpu);
41void smp_stop_cpu(void);
42 40
43extern cpumask_t cpu_sibling_map[NR_CPUS]; 41extern cpumask_t cpu_sibling_map[NR_CPUS];
44extern cpumask_t cpu_core_map[NR_CPUS]; 42extern cpumask_t cpu_core_map[NR_CPUS];
@@ -59,12 +57,6 @@ static inline int num_booting_cpus(void)
59 57
60#define raw_smp_processor_id() read_pda(cpunumber) 58#define raw_smp_processor_id() read_pda(cpunumber)
61 59
62static inline int hard_smp_processor_id(void)
63{
64 /* we don't want to mark this access volatile - bad code generation */
65 return GET_APIC_ID(*(unsigned int *)(APIC_BASE+APIC_ID));
66}
67
68extern int __cpu_disable(void); 60extern int __cpu_disable(void);
69extern void __cpu_die(unsigned int cpu); 61extern void __cpu_die(unsigned int cpu);
70extern void prefill_possible_map(void); 62extern void prefill_possible_map(void);
@@ -73,7 +65,13 @@ extern unsigned __cpuinitdata disabled_cpus;
73 65
74#define NO_PROC_ID 0xFF /* No processor magic marker */ 66#define NO_PROC_ID 0xFF /* No processor magic marker */
75 67
76#endif 68#endif /* CONFIG_SMP */
69
70static inline int hard_smp_processor_id(void)
71{
72 /* we don't want to mark this access volatile - bad code generation */
73 return GET_APIC_ID(*(unsigned int *)(APIC_BASE+APIC_ID));
74}
77 75
78/* 76/*
79 * Some lowlevel functions might want to know about 77 * Some lowlevel functions might want to know about
diff --git a/include/asm-x86_64/suspend.h b/include/asm-x86_64/suspend.h
index bc7f81715e5e..9c3f8de90d2d 100644
--- a/include/asm-x86_64/suspend.h
+++ b/include/asm-x86_64/suspend.h
@@ -17,6 +17,7 @@ struct saved_context {
17 u16 ds, es, fs, gs, ss; 17 u16 ds, es, fs, gs, ss;
18 unsigned long gs_base, gs_kernel_base, fs_base; 18 unsigned long gs_base, gs_kernel_base, fs_base;
19 unsigned long cr0, cr2, cr3, cr4, cr8; 19 unsigned long cr0, cr2, cr3, cr4, cr8;
20 unsigned long efer;
20 u16 gdt_pad; 21 u16 gdt_pad;
21 u16 gdt_limit; 22 u16 gdt_limit;
22 unsigned long gdt_base; 23 unsigned long gdt_base;
@@ -44,12 +45,12 @@ extern unsigned long saved_context_eflags;
44extern void fix_processor_context(void); 45extern void fix_processor_context(void);
45 46
46#ifdef CONFIG_ACPI_SLEEP 47#ifdef CONFIG_ACPI_SLEEP
47extern unsigned long saved_eip; 48extern unsigned long saved_rip;
48extern unsigned long saved_esp; 49extern unsigned long saved_rsp;
49extern unsigned long saved_ebp; 50extern unsigned long saved_rbp;
50extern unsigned long saved_ebx; 51extern unsigned long saved_rbx;
51extern unsigned long saved_esi; 52extern unsigned long saved_rsi;
52extern unsigned long saved_edi; 53extern unsigned long saved_rdi;
53 54
54/* routines for saving/restoring kernel state */ 55/* routines for saving/restoring kernel state */
55extern int acpi_save_state_mem(void); 56extern int acpi_save_state_mem(void);
diff --git a/include/asm-x86_64/system.h b/include/asm-x86_64/system.h
index bd376bc8c4ab..ead9f9a56234 100644
--- a/include/asm-x86_64/system.h
+++ b/include/asm-x86_64/system.h
@@ -3,7 +3,7 @@
3 3
4#include <linux/kernel.h> 4#include <linux/kernel.h>
5#include <asm/segment.h> 5#include <asm/segment.h>
6#include <asm/alternative.h> 6#include <asm/cmpxchg.h>
7 7
8#ifdef __KERNEL__ 8#ifdef __KERNEL__
9 9
@@ -39,7 +39,7 @@
39 [threadrsp] "i" (offsetof(struct task_struct, thread.rsp)), \ 39 [threadrsp] "i" (offsetof(struct task_struct, thread.rsp)), \
40 [ti_flags] "i" (offsetof(struct thread_info, flags)),\ 40 [ti_flags] "i" (offsetof(struct thread_info, flags)),\
41 [tif_fork] "i" (TIF_FORK), \ 41 [tif_fork] "i" (TIF_FORK), \
42 [thread_info] "i" (offsetof(struct task_struct, thread_info)), \ 42 [thread_info] "i" (offsetof(struct task_struct, stack)), \
43 [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent)) \ 43 [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent)) \
44 : "memory", "cc" __EXTRA_CLOBBER) 44 : "memory", "cc" __EXTRA_CLOBBER)
45 45
@@ -89,6 +89,11 @@ static inline unsigned long read_cr3(void)
89 return cr3; 89 return cr3;
90} 90}
91 91
92static inline void write_cr3(unsigned long val)
93{
94 asm volatile("movq %0,%%cr3" :: "r" (val) : "memory");
95}
96
92static inline unsigned long read_cr4(void) 97static inline unsigned long read_cr4(void)
93{ 98{
94 unsigned long cr4; 99 unsigned long cr4;
@@ -98,7 +103,7 @@ static inline unsigned long read_cr4(void)
98 103
99static inline void write_cr4(unsigned long val) 104static inline void write_cr4(unsigned long val)
100{ 105{
101 asm volatile("movq %0,%%cr4" :: "r" (val)); 106 asm volatile("movq %0,%%cr4" :: "r" (val) : "memory");
102} 107}
103 108
104#define stts() write_cr0(8 | read_cr0()) 109#define stts() write_cr0(8 | read_cr0())
@@ -119,100 +124,6 @@ static inline void sched_cacheflush(void)
119 124
120#define nop() __asm__ __volatile__ ("nop") 125#define nop() __asm__ __volatile__ ("nop")
121 126
122#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
123
124#define tas(ptr) (xchg((ptr),1))
125
126#define __xg(x) ((volatile long *)(x))
127
128static inline void set_64bit(volatile unsigned long *ptr, unsigned long val)
129{
130 *ptr = val;
131}
132
133#define _set_64bit set_64bit
134
135/*
136 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
137 * Note 2: xchg has side effect, so that attribute volatile is necessary,
138 * but generally the primitive is invalid, *ptr is output argument. --ANK
139 */
140static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
141{
142 switch (size) {
143 case 1:
144 __asm__ __volatile__("xchgb %b0,%1"
145 :"=q" (x)
146 :"m" (*__xg(ptr)), "0" (x)
147 :"memory");
148 break;
149 case 2:
150 __asm__ __volatile__("xchgw %w0,%1"
151 :"=r" (x)
152 :"m" (*__xg(ptr)), "0" (x)
153 :"memory");
154 break;
155 case 4:
156 __asm__ __volatile__("xchgl %k0,%1"
157 :"=r" (x)
158 :"m" (*__xg(ptr)), "0" (x)
159 :"memory");
160 break;
161 case 8:
162 __asm__ __volatile__("xchgq %0,%1"
163 :"=r" (x)
164 :"m" (*__xg(ptr)), "0" (x)
165 :"memory");
166 break;
167 }
168 return x;
169}
170
171/*
172 * Atomic compare and exchange. Compare OLD with MEM, if identical,
173 * store NEW in MEM. Return the initial value in MEM. Success is
174 * indicated by comparing RETURN with OLD.
175 */
176
177#define __HAVE_ARCH_CMPXCHG 1
178
179static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
180 unsigned long new, int size)
181{
182 unsigned long prev;
183 switch (size) {
184 case 1:
185 __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
186 : "=a"(prev)
187 : "q"(new), "m"(*__xg(ptr)), "0"(old)
188 : "memory");
189 return prev;
190 case 2:
191 __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
192 : "=a"(prev)
193 : "r"(new), "m"(*__xg(ptr)), "0"(old)
194 : "memory");
195 return prev;
196 case 4:
197 __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2"
198 : "=a"(prev)
199 : "r"(new), "m"(*__xg(ptr)), "0"(old)
200 : "memory");
201 return prev;
202 case 8:
203 __asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2"
204 : "=a"(prev)
205 : "r"(new), "m"(*__xg(ptr)), "0"(old)
206 : "memory");
207 return prev;
208 }
209 return old;
210}
211
212#define cmpxchg(ptr,o,n)\
213 ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
214 (unsigned long)(n),sizeof(*(ptr))))
215
216#ifdef CONFIG_SMP 127#ifdef CONFIG_SMP
217#define smp_mb() mb() 128#define smp_mb() mb()
218#define smp_rmb() rmb() 129#define smp_rmb() rmb()
diff --git a/include/asm-x86_64/termbits.h b/include/asm-x86_64/termbits.h
index 6cfc3bb10c1a..7405756dd41b 100644
--- a/include/asm-x86_64/termbits.h
+++ b/include/asm-x86_64/termbits.h
@@ -160,7 +160,7 @@ struct ktermios {
160#define CMSPAR 010000000000 /* mark or space (stick) parity */ 160#define CMSPAR 010000000000 /* mark or space (stick) parity */
161#define CRTSCTS 020000000000 /* flow control */ 161#define CRTSCTS 020000000000 /* flow control */
162 162
163#define IBSHIFT 8 /* Shift from CBAUD to CIBAUD */ 163#define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */
164 164
165/* c_lflag bits */ 165/* c_lflag bits */
166#define ISIG 0000001 166#define ISIG 0000001
diff --git a/include/asm-x86_64/thread_info.h b/include/asm-x86_64/thread_info.h
index 74a6c74397f7..10bb5a8ed688 100644
--- a/include/asm-x86_64/thread_info.h
+++ b/include/asm-x86_64/thread_info.h
@@ -162,7 +162,7 @@ static inline struct thread_info *stack_thread_info(void)
162#define TS_COMPAT 0x0002 /* 32bit syscall active */ 162#define TS_COMPAT 0x0002 /* 32bit syscall active */
163#define TS_POLLING 0x0004 /* true if in idle loop and not sleeping */ 163#define TS_POLLING 0x0004 /* true if in idle loop and not sleeping */
164 164
165#define tsk_is_polling(t) ((t)->thread_info->status & TS_POLLING) 165#define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING)
166 166
167#endif /* __KERNEL__ */ 167#endif /* __KERNEL__ */
168 168
diff --git a/include/asm-x86_64/timex.h b/include/asm-x86_64/timex.h
index 8c6808a3fba4..f6527e1b6c1c 100644
--- a/include/asm-x86_64/timex.h
+++ b/include/asm-x86_64/timex.h
@@ -27,6 +27,6 @@ extern int read_current_timer(unsigned long *timer_value);
27#define NS_SCALE 10 /* 2^10, carefully chosen */ 27#define NS_SCALE 10 /* 2^10, carefully chosen */
28#define US_SCALE 32 /* 2^32, arbitralrily chosen */ 28#define US_SCALE 32 /* 2^32, arbitralrily chosen */
29 29
30extern void mark_tsc_unstable(void); 30extern void mark_tsc_unstable(char *msg);
31extern void set_cyc2ns_scale(unsigned long khz); 31extern void set_cyc2ns_scale(unsigned long khz);
32#endif 32#endif
diff --git a/include/asm-x86_64/tlbflush.h b/include/asm-x86_64/tlbflush.h
index 983bd296c81a..512401b8725f 100644
--- a/include/asm-x86_64/tlbflush.h
+++ b/include/asm-x86_64/tlbflush.h
@@ -3,41 +3,18 @@
3 3
4#include <linux/mm.h> 4#include <linux/mm.h>
5#include <asm/processor.h> 5#include <asm/processor.h>
6 6#include <asm/system.h>
7static inline unsigned long get_cr3(void)
8{
9 unsigned long cr3;
10 asm volatile("mov %%cr3,%0" : "=r" (cr3));
11 return cr3;
12}
13
14static inline void set_cr3(unsigned long cr3)
15{
16 asm volatile("mov %0,%%cr3" :: "r" (cr3) : "memory");
17}
18 7
19static inline void __flush_tlb(void) 8static inline void __flush_tlb(void)
20{ 9{
21 set_cr3(get_cr3()); 10 write_cr3(read_cr3());
22}
23
24static inline unsigned long get_cr4(void)
25{
26 unsigned long cr4;
27 asm volatile("mov %%cr4,%0" : "=r" (cr4));
28 return cr4;
29}
30
31static inline void set_cr4(unsigned long cr4)
32{
33 asm volatile("mov %0,%%cr4" :: "r" (cr4) : "memory");
34} 11}
35 12
36static inline void __flush_tlb_all(void) 13static inline void __flush_tlb_all(void)
37{ 14{
38 unsigned long cr4 = get_cr4(); 15 unsigned long cr4 = read_cr4();
39 set_cr4(cr4 & ~X86_CR4_PGE); /* clear PGE */ 16 write_cr4(cr4 & ~X86_CR4_PGE); /* clear PGE */
40 set_cr4(cr4); /* write old PGE again and flush TLBs */ 17 write_cr4(cr4); /* write old PGE again and flush TLBs */
41} 18}
42 19
43#define __flush_tlb_one(addr) \ 20#define __flush_tlb_one(addr) \
diff --git a/include/asm-x86_64/unistd.h b/include/asm-x86_64/unistd.h
index c5f596e71faa..595703949df3 100644
--- a/include/asm-x86_64/unistd.h
+++ b/include/asm-x86_64/unistd.h
@@ -619,8 +619,8 @@ __SYSCALL(__NR_sync_file_range, sys_sync_file_range)
619__SYSCALL(__NR_vmsplice, sys_vmsplice) 619__SYSCALL(__NR_vmsplice, sys_vmsplice)
620#define __NR_move_pages 279 620#define __NR_move_pages 279
621__SYSCALL(__NR_move_pages, sys_move_pages) 621__SYSCALL(__NR_move_pages, sys_move_pages)
622 622#define __NR_utimensat 280
623#define __NR_syscall_max __NR_move_pages 623__SYSCALL(__NR_utimensat, sys_utimensat)
624 624
625#ifndef __NO_STUBS 625#ifndef __NO_STUBS
626#define __ARCH_WANT_OLD_READDIR 626#define __ARCH_WANT_OLD_READDIR
@@ -655,7 +655,6 @@ __SYSCALL(__NR_move_pages, sys_move_pages)
655#include <asm/ptrace.h> 655#include <asm/ptrace.h>
656 656
657asmlinkage long sys_iopl(unsigned int level, struct pt_regs *regs); 657asmlinkage long sys_iopl(unsigned int level, struct pt_regs *regs);
658asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on);
659struct sigaction; 658struct sigaction;
660asmlinkage long sys_rt_sigaction(int sig, 659asmlinkage long sys_rt_sigaction(int sig,
661 const struct sigaction __user *act, 660 const struct sigaction __user *act,