aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include/asm
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2010-05-20 08:38:55 -0400
committerIngo Molnar <mingo@elte.hu>2010-05-20 08:38:55 -0400
commitdfacc4d6c98b89609250269f518c1f54c30454ef (patch)
treee7effbee7bdc85d18f7b26ab9cb5c9f700d1481a /arch/x86/include/asm
parentf869097e884d8cb65b2bb7831ca57b7dffb66fdd (diff)
parent85cb68b27c428d477169f3aa46c72dba103a17bd (diff)
Merge branch 'perf/urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/frederic/random-tracing into perf/core
Diffstat (limited to 'arch/x86/include/asm')
-rw-r--r--arch/x86/include/asm/alternative-asm.h4
-rw-r--r--arch/x86/include/asm/alternative.h20
-rw-r--r--arch/x86/include/asm/amd_iommu_types.h34
-rw-r--r--arch/x86/include/asm/arch_hweight.h61
-rw-r--r--arch/x86/include/asm/atomic.h25
-rw-r--r--arch/x86/include/asm/atomic64_32.h278
-rw-r--r--arch/x86/include/asm/atomic64_64.h25
-rw-r--r--arch/x86/include/asm/bitops.h4
-rw-r--r--arch/x86/include/asm/boot.h2
-rw-r--r--arch/x86/include/asm/cacheflush.h44
-rw-r--r--arch/x86/include/asm/cmpxchg_32.h3
-rw-r--r--arch/x86/include/asm/cpufeature.h58
-rw-r--r--arch/x86/include/asm/dwarf2.h12
-rw-r--r--arch/x86/include/asm/e820.h7
-rw-r--r--arch/x86/include/asm/hardirq.h2
-rw-r--r--arch/x86/include/asm/hyperv.h11
-rw-r--r--arch/x86/include/asm/hypervisor.h27
-rw-r--r--arch/x86/include/asm/i387.h129
-rw-r--r--arch/x86/include/asm/i8253.h2
-rw-r--r--arch/x86/include/asm/io_apic.h13
-rw-r--r--arch/x86/include/asm/k8.h5
-rw-r--r--arch/x86/include/asm/mpspec.h10
-rw-r--r--arch/x86/include/asm/mshyperv.h14
-rw-r--r--arch/x86/include/asm/percpu.h24
-rw-r--r--arch/x86/include/asm/processor.h10
-rw-r--r--arch/x86/include/asm/thread_info.h1
-rw-r--r--arch/x86/include/asm/traps.h2
-rw-r--r--arch/x86/include/asm/uv/uv_bau.h247
-rw-r--r--arch/x86/include/asm/uv/uv_hub.h2
-rw-r--r--arch/x86/include/asm/uv/uv_mmrs.h528
-rw-r--r--arch/x86/include/asm/vmware.h27
-rw-r--r--arch/x86/include/asm/xsave.h7
32 files changed, 922 insertions, 716 deletions
diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
index b97f786a48d5..a63a68be1cce 100644
--- a/arch/x86/include/asm/alternative-asm.h
+++ b/arch/x86/include/asm/alternative-asm.h
@@ -6,8 +6,8 @@
6 .macro LOCK_PREFIX 6 .macro LOCK_PREFIX
71: lock 71: lock
8 .section .smp_locks,"a" 8 .section .smp_locks,"a"
9 _ASM_ALIGN 9 .balign 4
10 _ASM_PTR 1b 10 .long 1b - .
11 .previous 11 .previous
12 .endm 12 .endm
13#else 13#else
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
index b09ec55650b3..03b6bb5394a0 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -28,20 +28,20 @@
28 */ 28 */
29 29
30#ifdef CONFIG_SMP 30#ifdef CONFIG_SMP
31#define LOCK_PREFIX \ 31#define LOCK_PREFIX_HERE \
32 ".section .smp_locks,\"a\"\n" \ 32 ".section .smp_locks,\"a\"\n" \
33 _ASM_ALIGN "\n" \ 33 ".balign 4\n" \
34 _ASM_PTR "661f\n" /* address */ \ 34 ".long 671f - .\n" /* offset */ \
35 ".previous\n" \ 35 ".previous\n" \
36 "661:\n\tlock; " 36 "671:"
37
38#define LOCK_PREFIX LOCK_PREFIX_HERE "\n\tlock; "
37 39
38#else /* ! CONFIG_SMP */ 40#else /* ! CONFIG_SMP */
41#define LOCK_PREFIX_HERE ""
39#define LOCK_PREFIX "" 42#define LOCK_PREFIX ""
40#endif 43#endif
41 44
42/* This must be included *after* the definition of LOCK_PREFIX */
43#include <asm/cpufeature.h>
44
45struct alt_instr { 45struct alt_instr {
46 u8 *instr; /* original instruction */ 46 u8 *instr; /* original instruction */
47 u8 *replacement; 47 u8 *replacement;
@@ -96,6 +96,12 @@ static inline int alternatives_text_reserved(void *start, void *end)
96 ".previous" 96 ".previous"
97 97
98/* 98/*
99 * This must be included *after* the definition of ALTERNATIVE due to
100 * <asm/arch_hweight.h>
101 */
102#include <asm/cpufeature.h>
103
104/*
99 * Alternative instructions for different CPU types or capabilities. 105 * Alternative instructions for different CPU types or capabilities.
100 * 106 *
101 * This allows to use optimized instructions even on generic binary 107 * This allows to use optimized instructions even on generic binary
diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h
index 86a0ff0aeac7..7014e88bc779 100644
--- a/arch/x86/include/asm/amd_iommu_types.h
+++ b/arch/x86/include/asm/amd_iommu_types.h
@@ -174,6 +174,40 @@
174 (~((1ULL << (12 + ((lvl) * 9))) - 1))) 174 (~((1ULL << (12 + ((lvl) * 9))) - 1)))
175#define PM_ALIGNED(lvl, addr) ((PM_MAP_MASK(lvl) & (addr)) == (addr)) 175#define PM_ALIGNED(lvl, addr) ((PM_MAP_MASK(lvl) & (addr)) == (addr))
176 176
177/*
178 * Returns the page table level to use for a given page size
179 * Pagesize is expected to be a power-of-two
180 */
181#define PAGE_SIZE_LEVEL(pagesize) \
182 ((__ffs(pagesize) - 12) / 9)
183/*
184 * Returns the number of ptes to use for a given page size
185 * Pagesize is expected to be a power-of-two
186 */
187#define PAGE_SIZE_PTE_COUNT(pagesize) \
188 (1ULL << ((__ffs(pagesize) - 12) % 9))
189
190/*
191 * Aligns a given io-virtual address to a given page size
192 * Pagesize is expected to be a power-of-two
193 */
194#define PAGE_SIZE_ALIGN(address, pagesize) \
195 ((address) & ~((pagesize) - 1))
196/*
197 * Creates an IOMMU PTE for an address an a given pagesize
198 * The PTE has no permission bits set
199 * Pagesize is expected to be a power-of-two larger than 4096
200 */
201#define PAGE_SIZE_PTE(address, pagesize) \
202 (((address) | ((pagesize) - 1)) & \
203 (~(pagesize >> 1)) & PM_ADDR_MASK)
204
205/*
206 * Takes a PTE value with mode=0x07 and returns the page size it maps
207 */
208#define PTE_PAGE_SIZE(pte) \
209 (1ULL << (1 + ffz(((pte) | 0xfffULL))))
210
177#define IOMMU_PTE_P (1ULL << 0) 211#define IOMMU_PTE_P (1ULL << 0)
178#define IOMMU_PTE_TV (1ULL << 1) 212#define IOMMU_PTE_TV (1ULL << 1)
179#define IOMMU_PTE_U (1ULL << 59) 213#define IOMMU_PTE_U (1ULL << 59)
diff --git a/arch/x86/include/asm/arch_hweight.h b/arch/x86/include/asm/arch_hweight.h
new file mode 100644
index 000000000000..9686c3d9ff73
--- /dev/null
+++ b/arch/x86/include/asm/arch_hweight.h
@@ -0,0 +1,61 @@
1#ifndef _ASM_X86_HWEIGHT_H
2#define _ASM_X86_HWEIGHT_H
3
4#ifdef CONFIG_64BIT
5/* popcnt %edi, %eax -- redundant REX prefix for alignment */
6#define POPCNT32 ".byte 0xf3,0x40,0x0f,0xb8,0xc7"
7/* popcnt %rdi, %rax */
8#define POPCNT64 ".byte 0xf3,0x48,0x0f,0xb8,0xc7"
9#define REG_IN "D"
10#define REG_OUT "a"
11#else
12/* popcnt %eax, %eax */
13#define POPCNT32 ".byte 0xf3,0x0f,0xb8,0xc0"
14#define REG_IN "a"
15#define REG_OUT "a"
16#endif
17
18/*
19 * __sw_hweightXX are called from within the alternatives below
20 * and callee-clobbered registers need to be taken care of. See
21 * ARCH_HWEIGHT_CFLAGS in <arch/x86/Kconfig> for the respective
22 * compiler switches.
23 */
24static inline unsigned int __arch_hweight32(unsigned int w)
25{
26 unsigned int res = 0;
27
28 asm (ALTERNATIVE("call __sw_hweight32", POPCNT32, X86_FEATURE_POPCNT)
29 : "="REG_OUT (res)
30 : REG_IN (w));
31
32 return res;
33}
34
35static inline unsigned int __arch_hweight16(unsigned int w)
36{
37 return __arch_hweight32(w & 0xffff);
38}
39
40static inline unsigned int __arch_hweight8(unsigned int w)
41{
42 return __arch_hweight32(w & 0xff);
43}
44
45static inline unsigned long __arch_hweight64(__u64 w)
46{
47 unsigned long res = 0;
48
49#ifdef CONFIG_X86_32
50 return __arch_hweight32((u32)w) +
51 __arch_hweight32((u32)(w >> 32));
52#else
53 asm (ALTERNATIVE("call __sw_hweight64", POPCNT64, X86_FEATURE_POPCNT)
54 : "="REG_OUT (res)
55 : REG_IN (w));
56#endif /* CONFIG_X86_32 */
57
58 return res;
59}
60
61#endif
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index 8f8217b9bdac..952a826ac4e5 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -22,7 +22,7 @@
22 */ 22 */
23static inline int atomic_read(const atomic_t *v) 23static inline int atomic_read(const atomic_t *v)
24{ 24{
25 return v->counter; 25 return (*(volatile int *)&(v)->counter);
26} 26}
27 27
28/** 28/**
@@ -246,6 +246,29 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
246 246
247#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 247#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
248 248
249/*
250 * atomic_dec_if_positive - decrement by 1 if old value positive
251 * @v: pointer of type atomic_t
252 *
253 * The function returns the old value of *v minus 1, even if
254 * the atomic variable, v, was not decremented.
255 */
256static inline int atomic_dec_if_positive(atomic_t *v)
257{
258 int c, old, dec;
259 c = atomic_read(v);
260 for (;;) {
261 dec = c - 1;
262 if (unlikely(dec < 0))
263 break;
264 old = atomic_cmpxchg((v), c, dec);
265 if (likely(old == c))
266 break;
267 c = old;
268 }
269 return dec;
270}
271
249/** 272/**
250 * atomic_inc_short - increment of a short integer 273 * atomic_inc_short - increment of a short integer
251 * @v: pointer to type int 274 * @v: pointer to type int
diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
index 03027bf28de5..2a934aa19a43 100644
--- a/arch/x86/include/asm/atomic64_32.h
+++ b/arch/x86/include/asm/atomic64_32.h
@@ -14,109 +14,193 @@ typedef struct {
14 14
15#define ATOMIC64_INIT(val) { (val) } 15#define ATOMIC64_INIT(val) { (val) }
16 16
17extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val); 17#ifdef CONFIG_X86_CMPXCHG64
18#define ATOMIC64_ALTERNATIVE_(f, g) "call atomic64_" #g "_cx8"
19#else
20#define ATOMIC64_ALTERNATIVE_(f, g) ALTERNATIVE("call atomic64_" #f "_386", "call atomic64_" #g "_cx8", X86_FEATURE_CX8)
21#endif
22
23#define ATOMIC64_ALTERNATIVE(f) ATOMIC64_ALTERNATIVE_(f, f)
24
25/**
26 * atomic64_cmpxchg - cmpxchg atomic64 variable
27 * @p: pointer to type atomic64_t
28 * @o: expected value
29 * @n: new value
30 *
31 * Atomically sets @v to @n if it was equal to @o and returns
32 * the old value.
33 */
34
35static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
36{
37 return cmpxchg64(&v->counter, o, n);
38}
18 39
19/** 40/**
20 * atomic64_xchg - xchg atomic64 variable 41 * atomic64_xchg - xchg atomic64 variable
21 * @ptr: pointer to type atomic64_t 42 * @v: pointer to type atomic64_t
22 * @new_val: value to assign 43 * @n: value to assign
23 * 44 *
24 * Atomically xchgs the value of @ptr to @new_val and returns 45 * Atomically xchgs the value of @v to @n and returns
25 * the old value. 46 * the old value.
26 */ 47 */
27extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val); 48static inline long long atomic64_xchg(atomic64_t *v, long long n)
49{
50 long long o;
51 unsigned high = (unsigned)(n >> 32);
52 unsigned low = (unsigned)n;
53 asm volatile(ATOMIC64_ALTERNATIVE(xchg)
54 : "=A" (o), "+b" (low), "+c" (high)
55 : "S" (v)
56 : "memory"
57 );
58 return o;
59}
28 60
29/** 61/**
30 * atomic64_set - set atomic64 variable 62 * atomic64_set - set atomic64 variable
31 * @ptr: pointer to type atomic64_t 63 * @v: pointer to type atomic64_t
32 * @new_val: value to assign 64 * @n: value to assign
33 * 65 *
34 * Atomically sets the value of @ptr to @new_val. 66 * Atomically sets the value of @v to @n.
35 */ 67 */
36extern void atomic64_set(atomic64_t *ptr, u64 new_val); 68static inline void atomic64_set(atomic64_t *v, long long i)
69{
70 unsigned high = (unsigned)(i >> 32);
71 unsigned low = (unsigned)i;
72 asm volatile(ATOMIC64_ALTERNATIVE(set)
73 : "+b" (low), "+c" (high)
74 : "S" (v)
75 : "eax", "edx", "memory"
76 );
77}
37 78
38/** 79/**
39 * atomic64_read - read atomic64 variable 80 * atomic64_read - read atomic64 variable
40 * @ptr: pointer to type atomic64_t 81 * @v: pointer to type atomic64_t
41 * 82 *
42 * Atomically reads the value of @ptr and returns it. 83 * Atomically reads the value of @v and returns it.
43 */ 84 */
44static inline u64 atomic64_read(atomic64_t *ptr) 85static inline long long atomic64_read(atomic64_t *v)
45{ 86{
46 u64 res; 87 long long r;
47 88 asm volatile(ATOMIC64_ALTERNATIVE(read)
48 /* 89 : "=A" (r), "+c" (v)
49 * Note, we inline this atomic64_t primitive because 90 : : "memory"
50 * it only clobbers EAX/EDX and leaves the others 91 );
51 * untouched. We also (somewhat subtly) rely on the 92 return r;
52 * fact that cmpxchg8b returns the current 64-bit value 93 }
53 * of the memory location we are touching:
54 */
55 asm volatile(
56 "mov %%ebx, %%eax\n\t"
57 "mov %%ecx, %%edx\n\t"
58 LOCK_PREFIX "cmpxchg8b %1\n"
59 : "=&A" (res)
60 : "m" (*ptr)
61 );
62
63 return res;
64}
65
66extern u64 atomic64_read(atomic64_t *ptr);
67 94
68/** 95/**
69 * atomic64_add_return - add and return 96 * atomic64_add_return - add and return
70 * @delta: integer value to add 97 * @i: integer value to add
71 * @ptr: pointer to type atomic64_t 98 * @v: pointer to type atomic64_t
72 * 99 *
73 * Atomically adds @delta to @ptr and returns @delta + *@ptr 100 * Atomically adds @i to @v and returns @i + *@v
74 */ 101 */
75extern u64 atomic64_add_return(u64 delta, atomic64_t *ptr); 102static inline long long atomic64_add_return(long long i, atomic64_t *v)
103{
104 asm volatile(ATOMIC64_ALTERNATIVE(add_return)
105 : "+A" (i), "+c" (v)
106 : : "memory"
107 );
108 return i;
109}
76 110
77/* 111/*
78 * Other variants with different arithmetic operators: 112 * Other variants with different arithmetic operators:
79 */ 113 */
80extern u64 atomic64_sub_return(u64 delta, atomic64_t *ptr); 114static inline long long atomic64_sub_return(long long i, atomic64_t *v)
81extern u64 atomic64_inc_return(atomic64_t *ptr); 115{
82extern u64 atomic64_dec_return(atomic64_t *ptr); 116 asm volatile(ATOMIC64_ALTERNATIVE(sub_return)
117 : "+A" (i), "+c" (v)
118 : : "memory"
119 );
120 return i;
121}
122
123static inline long long atomic64_inc_return(atomic64_t *v)
124{
125 long long a;
126 asm volatile(ATOMIC64_ALTERNATIVE(inc_return)
127 : "=A" (a)
128 : "S" (v)
129 : "memory", "ecx"
130 );
131 return a;
132}
133
134static inline long long atomic64_dec_return(atomic64_t *v)
135{
136 long long a;
137 asm volatile(ATOMIC64_ALTERNATIVE(dec_return)
138 : "=A" (a)
139 : "S" (v)
140 : "memory", "ecx"
141 );
142 return a;
143}
83 144
84/** 145/**
85 * atomic64_add - add integer to atomic64 variable 146 * atomic64_add - add integer to atomic64 variable
86 * @delta: integer value to add 147 * @i: integer value to add
87 * @ptr: pointer to type atomic64_t 148 * @v: pointer to type atomic64_t
88 * 149 *
89 * Atomically adds @delta to @ptr. 150 * Atomically adds @i to @v.
90 */ 151 */
91extern void atomic64_add(u64 delta, atomic64_t *ptr); 152static inline long long atomic64_add(long long i, atomic64_t *v)
153{
154 asm volatile(ATOMIC64_ALTERNATIVE_(add, add_return)
155 : "+A" (i), "+c" (v)
156 : : "memory"
157 );
158 return i;
159}
92 160
93/** 161/**
94 * atomic64_sub - subtract the atomic64 variable 162 * atomic64_sub - subtract the atomic64 variable
95 * @delta: integer value to subtract 163 * @i: integer value to subtract
96 * @ptr: pointer to type atomic64_t 164 * @v: pointer to type atomic64_t
97 * 165 *
98 * Atomically subtracts @delta from @ptr. 166 * Atomically subtracts @i from @v.
99 */ 167 */
100extern void atomic64_sub(u64 delta, atomic64_t *ptr); 168static inline long long atomic64_sub(long long i, atomic64_t *v)
169{
170 asm volatile(ATOMIC64_ALTERNATIVE_(sub, sub_return)
171 : "+A" (i), "+c" (v)
172 : : "memory"
173 );
174 return i;
175}
101 176
102/** 177/**
103 * atomic64_sub_and_test - subtract value from variable and test result 178 * atomic64_sub_and_test - subtract value from variable and test result
104 * @delta: integer value to subtract 179 * @i: integer value to subtract
105 * @ptr: pointer to type atomic64_t 180 * @v: pointer to type atomic64_t
106 * 181 *
107 * Atomically subtracts @delta from @ptr and returns 182 * Atomically subtracts @i from @v and returns
108 * true if the result is zero, or false for all 183 * true if the result is zero, or false for all
109 * other cases. 184 * other cases.
110 */ 185 */
111extern int atomic64_sub_and_test(u64 delta, atomic64_t *ptr); 186static inline int atomic64_sub_and_test(long long i, atomic64_t *v)
187{
188 return atomic64_sub_return(i, v) == 0;
189}
112 190
113/** 191/**
114 * atomic64_inc - increment atomic64 variable 192 * atomic64_inc - increment atomic64 variable
115 * @ptr: pointer to type atomic64_t 193 * @v: pointer to type atomic64_t
116 * 194 *
117 * Atomically increments @ptr by 1. 195 * Atomically increments @v by 1.
118 */ 196 */
119extern void atomic64_inc(atomic64_t *ptr); 197static inline void atomic64_inc(atomic64_t *v)
198{
199 asm volatile(ATOMIC64_ALTERNATIVE_(inc, inc_return)
200 : : "S" (v)
201 : "memory", "eax", "ecx", "edx"
202 );
203}
120 204
121/** 205/**
122 * atomic64_dec - decrement atomic64 variable 206 * atomic64_dec - decrement atomic64 variable
@@ -124,37 +208,97 @@ extern void atomic64_inc(atomic64_t *ptr);
124 * 208 *
125 * Atomically decrements @ptr by 1. 209 * Atomically decrements @ptr by 1.
126 */ 210 */
127extern void atomic64_dec(atomic64_t *ptr); 211static inline void atomic64_dec(atomic64_t *v)
212{
213 asm volatile(ATOMIC64_ALTERNATIVE_(dec, dec_return)
214 : : "S" (v)
215 : "memory", "eax", "ecx", "edx"
216 );
217}
128 218
129/** 219/**
130 * atomic64_dec_and_test - decrement and test 220 * atomic64_dec_and_test - decrement and test
131 * @ptr: pointer to type atomic64_t 221 * @v: pointer to type atomic64_t
132 * 222 *
133 * Atomically decrements @ptr by 1 and 223 * Atomically decrements @v by 1 and
134 * returns true if the result is 0, or false for all other 224 * returns true if the result is 0, or false for all other
135 * cases. 225 * cases.
136 */ 226 */
137extern int atomic64_dec_and_test(atomic64_t *ptr); 227static inline int atomic64_dec_and_test(atomic64_t *v)
228{
229 return atomic64_dec_return(v) == 0;
230}
138 231
139/** 232/**
140 * atomic64_inc_and_test - increment and test 233 * atomic64_inc_and_test - increment and test
141 * @ptr: pointer to type atomic64_t 234 * @v: pointer to type atomic64_t
142 * 235 *
143 * Atomically increments @ptr by 1 236 * Atomically increments @v by 1
144 * and returns true if the result is zero, or false for all 237 * and returns true if the result is zero, or false for all
145 * other cases. 238 * other cases.
146 */ 239 */
147extern int atomic64_inc_and_test(atomic64_t *ptr); 240static inline int atomic64_inc_and_test(atomic64_t *v)
241{
242 return atomic64_inc_return(v) == 0;
243}
148 244
149/** 245/**
150 * atomic64_add_negative - add and test if negative 246 * atomic64_add_negative - add and test if negative
151 * @delta: integer value to add 247 * @i: integer value to add
152 * @ptr: pointer to type atomic64_t 248 * @v: pointer to type atomic64_t
153 * 249 *
154 * Atomically adds @delta to @ptr and returns true 250 * Atomically adds @i to @v and returns true
155 * if the result is negative, or false when 251 * if the result is negative, or false when
156 * result is greater than or equal to zero. 252 * result is greater than or equal to zero.
157 */ 253 */
158extern int atomic64_add_negative(u64 delta, atomic64_t *ptr); 254static inline int atomic64_add_negative(long long i, atomic64_t *v)
255{
256 return atomic64_add_return(i, v) < 0;
257}
258
259/**
260 * atomic64_add_unless - add unless the number is a given value
261 * @v: pointer of type atomic64_t
262 * @a: the amount to add to v...
263 * @u: ...unless v is equal to u.
264 *
265 * Atomically adds @a to @v, so long as it was not @u.
266 * Returns non-zero if @v was not @u, and zero otherwise.
267 */
268static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
269{
270 unsigned low = (unsigned)u;
271 unsigned high = (unsigned)(u >> 32);
272 asm volatile(ATOMIC64_ALTERNATIVE(add_unless) "\n\t"
273 : "+A" (a), "+c" (v), "+S" (low), "+D" (high)
274 : : "memory");
275 return (int)a;
276}
277
278
279static inline int atomic64_inc_not_zero(atomic64_t *v)
280{
281 int r;
282 asm volatile(ATOMIC64_ALTERNATIVE(inc_not_zero)
283 : "=a" (r)
284 : "S" (v)
285 : "ecx", "edx", "memory"
286 );
287 return r;
288}
289
290static inline long long atomic64_dec_if_positive(atomic64_t *v)
291{
292 long long r;
293 asm volatile(ATOMIC64_ALTERNATIVE(dec_if_positive)
294 : "=A" (r)
295 : "S" (v)
296 : "ecx", "memory"
297 );
298 return r;
299}
300
301#undef ATOMIC64_ALTERNATIVE
302#undef ATOMIC64_ALTERNATIVE_
159 303
160#endif /* _ASM_X86_ATOMIC64_32_H */ 304#endif /* _ASM_X86_ATOMIC64_32_H */
diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
index 51c5b4056929..49fd1ea22951 100644
--- a/arch/x86/include/asm/atomic64_64.h
+++ b/arch/x86/include/asm/atomic64_64.h
@@ -18,7 +18,7 @@
18 */ 18 */
19static inline long atomic64_read(const atomic64_t *v) 19static inline long atomic64_read(const atomic64_t *v)
20{ 20{
21 return v->counter; 21 return (*(volatile long *)&(v)->counter);
22} 22}
23 23
24/** 24/**
@@ -221,4 +221,27 @@ static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
221 221
222#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) 222#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
223 223
224/*
225 * atomic64_dec_if_positive - decrement by 1 if old value positive
226 * @v: pointer of type atomic_t
227 *
228 * The function returns the old value of *v minus 1, even if
229 * the atomic variable, v, was not decremented.
230 */
231static inline long atomic64_dec_if_positive(atomic64_t *v)
232{
233 long c, old, dec;
234 c = atomic64_read(v);
235 for (;;) {
236 dec = c - 1;
237 if (unlikely(dec < 0))
238 break;
239 old = atomic64_cmpxchg((v), c, dec);
240 if (likely(old == c))
241 break;
242 c = old;
243 }
244 return dec;
245}
246
224#endif /* _ASM_X86_ATOMIC64_64_H */ 247#endif /* _ASM_X86_ATOMIC64_64_H */
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
index 02b47a603fc8..545776efeb16 100644
--- a/arch/x86/include/asm/bitops.h
+++ b/arch/x86/include/asm/bitops.h
@@ -444,7 +444,9 @@ static inline int fls(int x)
444 444
445#define ARCH_HAS_FAST_MULTIPLIER 1 445#define ARCH_HAS_FAST_MULTIPLIER 1
446 446
447#include <asm-generic/bitops/hweight.h> 447#include <asm/arch_hweight.h>
448
449#include <asm-generic/bitops/const_hweight.h>
448 450
449#endif /* __KERNEL__ */ 451#endif /* __KERNEL__ */
450 452
diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
index 7a1065958ba9..3b62ab56c7a0 100644
--- a/arch/x86/include/asm/boot.h
+++ b/arch/x86/include/asm/boot.h
@@ -24,7 +24,7 @@
24#define MIN_KERNEL_ALIGN (_AC(1, UL) << MIN_KERNEL_ALIGN_LG2) 24#define MIN_KERNEL_ALIGN (_AC(1, UL) << MIN_KERNEL_ALIGN_LG2)
25 25
26#if (CONFIG_PHYSICAL_ALIGN & (CONFIG_PHYSICAL_ALIGN-1)) || \ 26#if (CONFIG_PHYSICAL_ALIGN & (CONFIG_PHYSICAL_ALIGN-1)) || \
27 (CONFIG_PHYSICAL_ALIGN < (_AC(1, UL) << MIN_KERNEL_ALIGN_LG2)) 27 (CONFIG_PHYSICAL_ALIGN < MIN_KERNEL_ALIGN)
28#error "Invalid value for CONFIG_PHYSICAL_ALIGN" 28#error "Invalid value for CONFIG_PHYSICAL_ALIGN"
29#endif 29#endif
30 30
diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
index 634c40a739a6..c70068d05f70 100644
--- a/arch/x86/include/asm/cacheflush.h
+++ b/arch/x86/include/asm/cacheflush.h
@@ -44,9 +44,6 @@ static inline void copy_from_user_page(struct vm_area_struct *vma,
44 memcpy(dst, src, len); 44 memcpy(dst, src, len);
45} 45}
46 46
47#define PG_WC PG_arch_1
48PAGEFLAG(WC, WC)
49
50#ifdef CONFIG_X86_PAT 47#ifdef CONFIG_X86_PAT
51/* 48/*
52 * X86 PAT uses page flags WC and Uncached together to keep track of 49 * X86 PAT uses page flags WC and Uncached together to keep track of
@@ -55,16 +52,24 @@ PAGEFLAG(WC, WC)
55 * _PAGE_CACHE_UC_MINUS and fourth state where page's memory type has not 52 * _PAGE_CACHE_UC_MINUS and fourth state where page's memory type has not
56 * been changed from its default (value of -1 used to denote this). 53 * been changed from its default (value of -1 used to denote this).
57 * Note we do not support _PAGE_CACHE_UC here. 54 * Note we do not support _PAGE_CACHE_UC here.
58 *
59 * Caller must hold memtype_lock for atomicity.
60 */ 55 */
56
57#define _PGMT_DEFAULT 0
58#define _PGMT_WC (1UL << PG_arch_1)
59#define _PGMT_UC_MINUS (1UL << PG_uncached)
60#define _PGMT_WB (1UL << PG_uncached | 1UL << PG_arch_1)
61#define _PGMT_MASK (1UL << PG_uncached | 1UL << PG_arch_1)
62#define _PGMT_CLEAR_MASK (~_PGMT_MASK)
63
61static inline unsigned long get_page_memtype(struct page *pg) 64static inline unsigned long get_page_memtype(struct page *pg)
62{ 65{
63 if (!PageUncached(pg) && !PageWC(pg)) 66 unsigned long pg_flags = pg->flags & _PGMT_MASK;
67
68 if (pg_flags == _PGMT_DEFAULT)
64 return -1; 69 return -1;
65 else if (!PageUncached(pg) && PageWC(pg)) 70 else if (pg_flags == _PGMT_WC)
66 return _PAGE_CACHE_WC; 71 return _PAGE_CACHE_WC;
67 else if (PageUncached(pg) && !PageWC(pg)) 72 else if (pg_flags == _PGMT_UC_MINUS)
68 return _PAGE_CACHE_UC_MINUS; 73 return _PAGE_CACHE_UC_MINUS;
69 else 74 else
70 return _PAGE_CACHE_WB; 75 return _PAGE_CACHE_WB;
@@ -72,25 +77,26 @@ static inline unsigned long get_page_memtype(struct page *pg)
72 77
73static inline void set_page_memtype(struct page *pg, unsigned long memtype) 78static inline void set_page_memtype(struct page *pg, unsigned long memtype)
74{ 79{
80 unsigned long memtype_flags = _PGMT_DEFAULT;
81 unsigned long old_flags;
82 unsigned long new_flags;
83
75 switch (memtype) { 84 switch (memtype) {
76 case _PAGE_CACHE_WC: 85 case _PAGE_CACHE_WC:
77 ClearPageUncached(pg); 86 memtype_flags = _PGMT_WC;
78 SetPageWC(pg);
79 break; 87 break;
80 case _PAGE_CACHE_UC_MINUS: 88 case _PAGE_CACHE_UC_MINUS:
81 SetPageUncached(pg); 89 memtype_flags = _PGMT_UC_MINUS;
82 ClearPageWC(pg);
83 break; 90 break;
84 case _PAGE_CACHE_WB: 91 case _PAGE_CACHE_WB:
85 SetPageUncached(pg); 92 memtype_flags = _PGMT_WB;
86 SetPageWC(pg);
87 break;
88 default:
89 case -1:
90 ClearPageUncached(pg);
91 ClearPageWC(pg);
92 break; 93 break;
93 } 94 }
95
96 do {
97 old_flags = pg->flags;
98 new_flags = (old_flags & _PGMT_CLEAR_MASK) | memtype_flags;
99 } while (cmpxchg(&pg->flags, old_flags, new_flags) != old_flags);
94} 100}
95#else 101#else
96static inline unsigned long get_page_memtype(struct page *pg) { return -1; } 102static inline unsigned long get_page_memtype(struct page *pg) { return -1; }
diff --git a/arch/x86/include/asm/cmpxchg_32.h b/arch/x86/include/asm/cmpxchg_32.h
index ffb9bb6b6c37..8859e12dd3cf 100644
--- a/arch/x86/include/asm/cmpxchg_32.h
+++ b/arch/x86/include/asm/cmpxchg_32.h
@@ -271,7 +271,8 @@ extern unsigned long long cmpxchg_486_u64(volatile void *, u64, u64);
271 __typeof__(*(ptr)) __ret; \ 271 __typeof__(*(ptr)) __ret; \
272 __typeof__(*(ptr)) __old = (o); \ 272 __typeof__(*(ptr)) __old = (o); \
273 __typeof__(*(ptr)) __new = (n); \ 273 __typeof__(*(ptr)) __new = (n); \
274 alternative_io("call cmpxchg8b_emu", \ 274 alternative_io(LOCK_PREFIX_HERE \
275 "call cmpxchg8b_emu", \
275 "lock; cmpxchg8b (%%esi)" , \ 276 "lock; cmpxchg8b (%%esi)" , \
276 X86_FEATURE_CX8, \ 277 X86_FEATURE_CX8, \
277 "=A" (__ret), \ 278 "=A" (__ret), \
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 0cd82d068613..dca9c545f44e 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -161,6 +161,7 @@
161 */ 161 */
162#define X86_FEATURE_IDA (7*32+ 0) /* Intel Dynamic Acceleration */ 162#define X86_FEATURE_IDA (7*32+ 0) /* Intel Dynamic Acceleration */
163#define X86_FEATURE_ARAT (7*32+ 1) /* Always Running APIC Timer */ 163#define X86_FEATURE_ARAT (7*32+ 1) /* Always Running APIC Timer */
164#define X86_FEATURE_CPB (7*32+ 2) /* AMD Core Performance Boost */
164 165
165/* Virtualization flags: Linux defined */ 166/* Virtualization flags: Linux defined */
166#define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */ 167#define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */
@@ -175,6 +176,7 @@
175 176
176#if defined(__KERNEL__) && !defined(__ASSEMBLY__) 177#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
177 178
179#include <asm/asm.h>
178#include <linux/bitops.h> 180#include <linux/bitops.h>
179 181
180extern const char * const x86_cap_flags[NCAPINTS*32]; 182extern const char * const x86_cap_flags[NCAPINTS*32];
@@ -283,6 +285,62 @@ extern const char * const x86_power_flags[32];
283 285
284#endif /* CONFIG_X86_64 */ 286#endif /* CONFIG_X86_64 */
285 287
288/*
289 * Static testing of CPU features. Used the same as boot_cpu_has().
290 * These are only valid after alternatives have run, but will statically
291 * patch the target code for additional performance.
292 *
293 */
294static __always_inline __pure bool __static_cpu_has(u8 bit)
295{
296#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)
297 asm goto("1: jmp %l[t_no]\n"
298 "2:\n"
299 ".section .altinstructions,\"a\"\n"
300 _ASM_ALIGN "\n"
301 _ASM_PTR "1b\n"
302 _ASM_PTR "0\n" /* no replacement */
303 " .byte %P0\n" /* feature bit */
304 " .byte 2b - 1b\n" /* source len */
305 " .byte 0\n" /* replacement len */
306 " .byte 0xff + 0 - (2b-1b)\n" /* padding */
307 ".previous\n"
308 : : "i" (bit) : : t_no);
309 return true;
310 t_no:
311 return false;
312#else
313 u8 flag;
314 /* Open-coded due to __stringify() in ALTERNATIVE() */
315 asm volatile("1: movb $0,%0\n"
316 "2:\n"
317 ".section .altinstructions,\"a\"\n"
318 _ASM_ALIGN "\n"
319 _ASM_PTR "1b\n"
320 _ASM_PTR "3f\n"
321 " .byte %P1\n" /* feature bit */
322 " .byte 2b - 1b\n" /* source len */
323 " .byte 4f - 3f\n" /* replacement len */
324 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* padding */
325 ".previous\n"
326 ".section .altinstr_replacement,\"ax\"\n"
327 "3: movb $1,%0\n"
328 "4:\n"
329 ".previous\n"
330 : "=qm" (flag) : "i" (bit));
331 return flag;
332#endif
333}
334
335#define static_cpu_has(bit) \
336( \
337 __builtin_constant_p(boot_cpu_has(bit)) ? \
338 boot_cpu_has(bit) : \
339 (__builtin_constant_p(bit) && !((bit) & ~0xff)) ? \
340 __static_cpu_has(bit) : \
341 boot_cpu_has(bit) \
342)
343
286#endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */ 344#endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */
287 345
288#endif /* _ASM_X86_CPUFEATURE_H */ 346#endif /* _ASM_X86_CPUFEATURE_H */
diff --git a/arch/x86/include/asm/dwarf2.h b/arch/x86/include/asm/dwarf2.h
index ae6253ab9029..733f7e91e7a9 100644
--- a/arch/x86/include/asm/dwarf2.h
+++ b/arch/x86/include/asm/dwarf2.h
@@ -34,6 +34,18 @@
34#define CFI_SIGNAL_FRAME 34#define CFI_SIGNAL_FRAME
35#endif 35#endif
36 36
37#if defined(CONFIG_AS_CFI_SECTIONS) && defined(__ASSEMBLY__)
38 /*
39 * Emit CFI data in .debug_frame sections, not .eh_frame sections.
40 * The latter we currently just discard since we don't do DWARF
41 * unwinding at runtime. So only the offline DWARF information is
42 * useful to anyone. Note we should not use this directive if this
43 * file is used in the vDSO assembly, or if vmlinux.lds.S gets
44 * changed so it doesn't discard .eh_frame.
45 */
46 .cfi_sections .debug_frame
47#endif
48
37#else 49#else
38 50
39/* 51/*
diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
index 0e22296790d3..ec8a52d14ab1 100644
--- a/arch/x86/include/asm/e820.h
+++ b/arch/x86/include/asm/e820.h
@@ -45,7 +45,12 @@
45#define E820_NVS 4 45#define E820_NVS 4
46#define E820_UNUSABLE 5 46#define E820_UNUSABLE 5
47 47
48/* reserved RAM used by kernel itself */ 48/*
49 * reserved RAM used by kernel itself
50 * if CONFIG_INTEL_TXT is enabled, memory of this type will be
51 * included in the S3 integrity calculation and so should not include
52 * any memory that BIOS might alter over the S3 transition
53 */
49#define E820_RESERVED_KERN 128 54#define E820_RESERVED_KERN 128
50 55
51#ifndef __ASSEMBLY__ 56#ifndef __ASSEMBLY__
diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h
index 0f8576427cfe..aeab29aee617 100644
--- a/arch/x86/include/asm/hardirq.h
+++ b/arch/x86/include/asm/hardirq.h
@@ -35,7 +35,7 @@ DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
35 35
36#define __ARCH_IRQ_STAT 36#define __ARCH_IRQ_STAT
37 37
38#define inc_irq_stat(member) percpu_add(irq_stat.member, 1) 38#define inc_irq_stat(member) percpu_inc(irq_stat.member)
39 39
40#define local_softirq_pending() percpu_read(irq_stat.__softirq_pending) 40#define local_softirq_pending() percpu_read(irq_stat.__softirq_pending)
41 41
diff --git a/arch/x86/include/asm/hyperv.h b/arch/x86/include/asm/hyperv.h
index e153a2b3889a..5df477ac3af7 100644
--- a/arch/x86/include/asm/hyperv.h
+++ b/arch/x86/include/asm/hyperv.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_KVM_HYPERV_H 1#ifndef _ASM_X86_HYPERV_H
2#define _ASM_X86_KVM_HYPERV_H 2#define _ASM_X86_HYPERV_H
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5 5
@@ -14,6 +14,10 @@
14#define HYPERV_CPUID_ENLIGHTMENT_INFO 0x40000004 14#define HYPERV_CPUID_ENLIGHTMENT_INFO 0x40000004
15#define HYPERV_CPUID_IMPLEMENT_LIMITS 0x40000005 15#define HYPERV_CPUID_IMPLEMENT_LIMITS 0x40000005
16 16
17#define HYPERV_HYPERVISOR_PRESENT_BIT 0x80000000
18#define HYPERV_CPUID_MIN 0x40000005
19#define HYPERV_CPUID_MAX 0x4000ffff
20
17/* 21/*
18 * Feature identification. EAX indicates which features are available 22 * Feature identification. EAX indicates which features are available
19 * to the partition based upon the current partition privileges. 23 * to the partition based upon the current partition privileges.
@@ -129,6 +133,9 @@
129/* MSR used to provide vcpu index */ 133/* MSR used to provide vcpu index */
130#define HV_X64_MSR_VP_INDEX 0x40000002 134#define HV_X64_MSR_VP_INDEX 0x40000002
131 135
136/* MSR used to read the per-partition time reference counter */
137#define HV_X64_MSR_TIME_REF_COUNT 0x40000020
138
132/* Define the virtual APIC registers */ 139/* Define the virtual APIC registers */
133#define HV_X64_MSR_EOI 0x40000070 140#define HV_X64_MSR_EOI 0x40000070
134#define HV_X64_MSR_ICR 0x40000071 141#define HV_X64_MSR_ICR 0x40000071
diff --git a/arch/x86/include/asm/hypervisor.h b/arch/x86/include/asm/hypervisor.h
index b78c0941e422..70abda7058c8 100644
--- a/arch/x86/include/asm/hypervisor.h
+++ b/arch/x86/include/asm/hypervisor.h
@@ -17,10 +17,33 @@
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 * 18 *
19 */ 19 */
20#ifndef ASM_X86__HYPERVISOR_H 20#ifndef _ASM_X86_HYPERVISOR_H
21#define ASM_X86__HYPERVISOR_H 21#define _ASM_X86_HYPERVISOR_H
22 22
23extern void init_hypervisor(struct cpuinfo_x86 *c); 23extern void init_hypervisor(struct cpuinfo_x86 *c);
24extern void init_hypervisor_platform(void); 24extern void init_hypervisor_platform(void);
25 25
26/*
27 * x86 hypervisor information
28 */
29struct hypervisor_x86 {
30 /* Hypervisor name */
31 const char *name;
32
33 /* Detection routine */
34 bool (*detect)(void);
35
36 /* Adjust CPU feature bits (run once per CPU) */
37 void (*set_cpu_features)(struct cpuinfo_x86 *);
38
39 /* Platform setup (run once per boot) */
40 void (*init_platform)(void);
41};
42
43extern const struct hypervisor_x86 *x86_hyper;
44
45/* Recognized hypervisors */
46extern const struct hypervisor_x86 x86_hyper_vmware;
47extern const struct hypervisor_x86 x86_hyper_ms_hyperv;
48
26#endif 49#endif
diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
index da2930924501..c991b3a7b904 100644
--- a/arch/x86/include/asm/i387.h
+++ b/arch/x86/include/asm/i387.h
@@ -16,7 +16,9 @@
16#include <linux/kernel_stat.h> 16#include <linux/kernel_stat.h>
17#include <linux/regset.h> 17#include <linux/regset.h>
18#include <linux/hardirq.h> 18#include <linux/hardirq.h>
19#include <linux/slab.h>
19#include <asm/asm.h> 20#include <asm/asm.h>
21#include <asm/cpufeature.h>
20#include <asm/processor.h> 22#include <asm/processor.h>
21#include <asm/sigcontext.h> 23#include <asm/sigcontext.h>
22#include <asm/user.h> 24#include <asm/user.h>
@@ -56,6 +58,11 @@ extern int restore_i387_xstate_ia32(void __user *buf);
56 58
57#define X87_FSW_ES (1 << 7) /* Exception Summary */ 59#define X87_FSW_ES (1 << 7) /* Exception Summary */
58 60
61static __always_inline __pure bool use_xsave(void)
62{
63 return static_cpu_has(X86_FEATURE_XSAVE);
64}
65
59#ifdef CONFIG_X86_64 66#ifdef CONFIG_X86_64
60 67
61/* Ignore delayed exceptions from user space */ 68/* Ignore delayed exceptions from user space */
@@ -91,15 +98,15 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
91 values. The kernel data segment can be sometimes 0 and sometimes 98 values. The kernel data segment can be sometimes 0 and sometimes
92 new user value. Both should be ok. 99 new user value. Both should be ok.
93 Use the PDA as safe address because it should be already in L1. */ 100 Use the PDA as safe address because it should be already in L1. */
94static inline void clear_fpu_state(struct task_struct *tsk) 101static inline void fpu_clear(struct fpu *fpu)
95{ 102{
96 struct xsave_struct *xstate = &tsk->thread.xstate->xsave; 103 struct xsave_struct *xstate = &fpu->state->xsave;
97 struct i387_fxsave_struct *fx = &tsk->thread.xstate->fxsave; 104 struct i387_fxsave_struct *fx = &fpu->state->fxsave;
98 105
99 /* 106 /*
100 * xsave header may indicate the init state of the FP. 107 * xsave header may indicate the init state of the FP.
101 */ 108 */
102 if ((task_thread_info(tsk)->status & TS_XSAVE) && 109 if (use_xsave() &&
103 !(xstate->xsave_hdr.xstate_bv & XSTATE_FP)) 110 !(xstate->xsave_hdr.xstate_bv & XSTATE_FP))
104 return; 111 return;
105 112
@@ -111,6 +118,11 @@ static inline void clear_fpu_state(struct task_struct *tsk)
111 X86_FEATURE_FXSAVE_LEAK); 118 X86_FEATURE_FXSAVE_LEAK);
112} 119}
113 120
121static inline void clear_fpu_state(struct task_struct *tsk)
122{
123 fpu_clear(&tsk->thread.fpu);
124}
125
114static inline int fxsave_user(struct i387_fxsave_struct __user *fx) 126static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
115{ 127{
116 int err; 128 int err;
@@ -135,7 +147,7 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
135 return err; 147 return err;
136} 148}
137 149
138static inline void fxsave(struct task_struct *tsk) 150static inline void fpu_fxsave(struct fpu *fpu)
139{ 151{
140 /* Using "rex64; fxsave %0" is broken because, if the memory operand 152 /* Using "rex64; fxsave %0" is broken because, if the memory operand
141 uses any extended registers for addressing, a second REX prefix 153 uses any extended registers for addressing, a second REX prefix
@@ -145,42 +157,45 @@ static inline void fxsave(struct task_struct *tsk)
145 /* Using "fxsaveq %0" would be the ideal choice, but is only supported 157 /* Using "fxsaveq %0" would be the ideal choice, but is only supported
146 starting with gas 2.16. */ 158 starting with gas 2.16. */
147 __asm__ __volatile__("fxsaveq %0" 159 __asm__ __volatile__("fxsaveq %0"
148 : "=m" (tsk->thread.xstate->fxsave)); 160 : "=m" (fpu->state->fxsave));
149#elif 0 161#elif 0
150 /* Using, as a workaround, the properly prefixed form below isn't 162 /* Using, as a workaround, the properly prefixed form below isn't
151 accepted by any binutils version so far released, complaining that 163 accepted by any binutils version so far released, complaining that
152 the same type of prefix is used twice if an extended register is 164 the same type of prefix is used twice if an extended register is
153 needed for addressing (fix submitted to mainline 2005-11-21). */ 165 needed for addressing (fix submitted to mainline 2005-11-21). */
154 __asm__ __volatile__("rex64/fxsave %0" 166 __asm__ __volatile__("rex64/fxsave %0"
155 : "=m" (tsk->thread.xstate->fxsave)); 167 : "=m" (fpu->state->fxsave));
156#else 168#else
157 /* This, however, we can work around by forcing the compiler to select 169 /* This, however, we can work around by forcing the compiler to select
158 an addressing mode that doesn't require extended registers. */ 170 an addressing mode that doesn't require extended registers. */
159 __asm__ __volatile__("rex64/fxsave (%1)" 171 __asm__ __volatile__("rex64/fxsave (%1)"
160 : "=m" (tsk->thread.xstate->fxsave) 172 : "=m" (fpu->state->fxsave)
161 : "cdaSDb" (&tsk->thread.xstate->fxsave)); 173 : "cdaSDb" (&fpu->state->fxsave));
162#endif 174#endif
163} 175}
164 176
165static inline void __save_init_fpu(struct task_struct *tsk) 177static inline void fpu_save_init(struct fpu *fpu)
166{ 178{
167 if (task_thread_info(tsk)->status & TS_XSAVE) 179 if (use_xsave())
168 xsave(tsk); 180 fpu_xsave(fpu);
169 else 181 else
170 fxsave(tsk); 182 fpu_fxsave(fpu);
183
184 fpu_clear(fpu);
185}
171 186
172 clear_fpu_state(tsk); 187static inline void __save_init_fpu(struct task_struct *tsk)
188{
189 fpu_save_init(&tsk->thread.fpu);
173 task_thread_info(tsk)->status &= ~TS_USEDFPU; 190 task_thread_info(tsk)->status &= ~TS_USEDFPU;
174} 191}
175 192
176#else /* CONFIG_X86_32 */ 193#else /* CONFIG_X86_32 */
177 194
178#ifdef CONFIG_MATH_EMULATION 195#ifdef CONFIG_MATH_EMULATION
179extern void finit_task(struct task_struct *tsk); 196extern void finit_soft_fpu(struct i387_soft_struct *soft);
180#else 197#else
181static inline void finit_task(struct task_struct *tsk) 198static inline void finit_soft_fpu(struct i387_soft_struct *soft) {}
182{
183}
184#endif 199#endif
185 200
186static inline void tolerant_fwait(void) 201static inline void tolerant_fwait(void)
@@ -216,13 +231,13 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
216/* 231/*
217 * These must be called with preempt disabled 232 * These must be called with preempt disabled
218 */ 233 */
219static inline void __save_init_fpu(struct task_struct *tsk) 234static inline void fpu_save_init(struct fpu *fpu)
220{ 235{
221 if (task_thread_info(tsk)->status & TS_XSAVE) { 236 if (use_xsave()) {
222 struct xsave_struct *xstate = &tsk->thread.xstate->xsave; 237 struct xsave_struct *xstate = &fpu->state->xsave;
223 struct i387_fxsave_struct *fx = &tsk->thread.xstate->fxsave; 238 struct i387_fxsave_struct *fx = &fpu->state->fxsave;
224 239
225 xsave(tsk); 240 fpu_xsave(fpu);
226 241
227 /* 242 /*
228 * xsave header may indicate the init state of the FP. 243 * xsave header may indicate the init state of the FP.
@@ -246,8 +261,8 @@ static inline void __save_init_fpu(struct task_struct *tsk)
246 "fxsave %[fx]\n" 261 "fxsave %[fx]\n"
247 "bt $7,%[fsw] ; jnc 1f ; fnclex\n1:", 262 "bt $7,%[fsw] ; jnc 1f ; fnclex\n1:",
248 X86_FEATURE_FXSR, 263 X86_FEATURE_FXSR,
249 [fx] "m" (tsk->thread.xstate->fxsave), 264 [fx] "m" (fpu->state->fxsave),
250 [fsw] "m" (tsk->thread.xstate->fxsave.swd) : "memory"); 265 [fsw] "m" (fpu->state->fxsave.swd) : "memory");
251clear_state: 266clear_state:
252 /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception 267 /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
253 is pending. Clear the x87 state here by setting it to fixed 268 is pending. Clear the x87 state here by setting it to fixed
@@ -259,17 +274,34 @@ clear_state:
259 X86_FEATURE_FXSAVE_LEAK, 274 X86_FEATURE_FXSAVE_LEAK,
260 [addr] "m" (safe_address)); 275 [addr] "m" (safe_address));
261end: 276end:
277 ;
278}
279
280static inline void __save_init_fpu(struct task_struct *tsk)
281{
282 fpu_save_init(&tsk->thread.fpu);
262 task_thread_info(tsk)->status &= ~TS_USEDFPU; 283 task_thread_info(tsk)->status &= ~TS_USEDFPU;
263} 284}
264 285
286
265#endif /* CONFIG_X86_64 */ 287#endif /* CONFIG_X86_64 */
266 288
267static inline int restore_fpu_checking(struct task_struct *tsk) 289static inline int fpu_fxrstor_checking(struct fpu *fpu)
268{ 290{
269 if (task_thread_info(tsk)->status & TS_XSAVE) 291 return fxrstor_checking(&fpu->state->fxsave);
270 return xrstor_checking(&tsk->thread.xstate->xsave); 292}
293
294static inline int fpu_restore_checking(struct fpu *fpu)
295{
296 if (use_xsave())
297 return fpu_xrstor_checking(fpu);
271 else 298 else
272 return fxrstor_checking(&tsk->thread.xstate->fxsave); 299 return fpu_fxrstor_checking(fpu);
300}
301
302static inline int restore_fpu_checking(struct task_struct *tsk)
303{
304 return fpu_restore_checking(&tsk->thread.fpu);
273} 305}
274 306
275/* 307/*
@@ -397,30 +429,59 @@ static inline void clear_fpu(struct task_struct *tsk)
397static inline unsigned short get_fpu_cwd(struct task_struct *tsk) 429static inline unsigned short get_fpu_cwd(struct task_struct *tsk)
398{ 430{
399 if (cpu_has_fxsr) { 431 if (cpu_has_fxsr) {
400 return tsk->thread.xstate->fxsave.cwd; 432 return tsk->thread.fpu.state->fxsave.cwd;
401 } else { 433 } else {
402 return (unsigned short)tsk->thread.xstate->fsave.cwd; 434 return (unsigned short)tsk->thread.fpu.state->fsave.cwd;
403 } 435 }
404} 436}
405 437
406static inline unsigned short get_fpu_swd(struct task_struct *tsk) 438static inline unsigned short get_fpu_swd(struct task_struct *tsk)
407{ 439{
408 if (cpu_has_fxsr) { 440 if (cpu_has_fxsr) {
409 return tsk->thread.xstate->fxsave.swd; 441 return tsk->thread.fpu.state->fxsave.swd;
410 } else { 442 } else {
411 return (unsigned short)tsk->thread.xstate->fsave.swd; 443 return (unsigned short)tsk->thread.fpu.state->fsave.swd;
412 } 444 }
413} 445}
414 446
415static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk) 447static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk)
416{ 448{
417 if (cpu_has_xmm) { 449 if (cpu_has_xmm) {
418 return tsk->thread.xstate->fxsave.mxcsr; 450 return tsk->thread.fpu.state->fxsave.mxcsr;
419 } else { 451 } else {
420 return MXCSR_DEFAULT; 452 return MXCSR_DEFAULT;
421 } 453 }
422} 454}
423 455
456static bool fpu_allocated(struct fpu *fpu)
457{
458 return fpu->state != NULL;
459}
460
461static inline int fpu_alloc(struct fpu *fpu)
462{
463 if (fpu_allocated(fpu))
464 return 0;
465 fpu->state = kmem_cache_alloc(task_xstate_cachep, GFP_KERNEL);
466 if (!fpu->state)
467 return -ENOMEM;
468 WARN_ON((unsigned long)fpu->state & 15);
469 return 0;
470}
471
472static inline void fpu_free(struct fpu *fpu)
473{
474 if (fpu->state) {
475 kmem_cache_free(task_xstate_cachep, fpu->state);
476 fpu->state = NULL;
477 }
478}
479
480static inline void fpu_copy(struct fpu *dst, struct fpu *src)
481{
482 memcpy(dst->state, src->state, xstate_size);
483}
484
424#endif /* __ASSEMBLY__ */ 485#endif /* __ASSEMBLY__ */
425 486
426#define PSHUFB_XMM5_XMM0 .byte 0x66, 0x0f, 0x38, 0x00, 0xc5 487#define PSHUFB_XMM5_XMM0 .byte 0x66, 0x0f, 0x38, 0x00, 0xc5
diff --git a/arch/x86/include/asm/i8253.h b/arch/x86/include/asm/i8253.h
index 1edbf89680fd..fc1f579fb965 100644
--- a/arch/x86/include/asm/i8253.h
+++ b/arch/x86/include/asm/i8253.h
@@ -6,7 +6,7 @@
6#define PIT_CH0 0x40 6#define PIT_CH0 0x40
7#define PIT_CH2 0x42 7#define PIT_CH2 0x42
8 8
9extern spinlock_t i8253_lock; 9extern raw_spinlock_t i8253_lock;
10 10
11extern struct clock_event_device *global_clock_event; 11extern struct clock_event_device *global_clock_event;
12 12
diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h
index 35832a03a515..63cb4096c3dc 100644
--- a/arch/x86/include/asm/io_apic.h
+++ b/arch/x86/include/asm/io_apic.h
@@ -159,7 +159,6 @@ struct io_apic_irq_attr;
159extern int io_apic_set_pci_routing(struct device *dev, int irq, 159extern int io_apic_set_pci_routing(struct device *dev, int irq,
160 struct io_apic_irq_attr *irq_attr); 160 struct io_apic_irq_attr *irq_attr);
161void setup_IO_APIC_irq_extra(u32 gsi); 161void setup_IO_APIC_irq_extra(u32 gsi);
162extern int (*ioapic_renumber_irq)(int ioapic, int irq);
163extern void ioapic_init_mappings(void); 162extern void ioapic_init_mappings(void);
164extern void ioapic_insert_resources(void); 163extern void ioapic_insert_resources(void);
165 164
@@ -180,12 +179,13 @@ extern void ioapic_write_entry(int apic, int pin,
180extern void setup_ioapic_ids_from_mpc(void); 179extern void setup_ioapic_ids_from_mpc(void);
181 180
182struct mp_ioapic_gsi{ 181struct mp_ioapic_gsi{
183 int gsi_base; 182 u32 gsi_base;
184 int gsi_end; 183 u32 gsi_end;
185}; 184};
186extern struct mp_ioapic_gsi mp_gsi_routing[]; 185extern struct mp_ioapic_gsi mp_gsi_routing[];
187int mp_find_ioapic(int gsi); 186extern u32 gsi_end;
188int mp_find_ioapic_pin(int ioapic, int gsi); 187int mp_find_ioapic(u32 gsi);
188int mp_find_ioapic_pin(int ioapic, u32 gsi);
189void __init mp_register_ioapic(int id, u32 address, u32 gsi_base); 189void __init mp_register_ioapic(int id, u32 address, u32 gsi_base);
190extern void __init pre_init_apic_IRQ0(void); 190extern void __init pre_init_apic_IRQ0(void);
191 191
@@ -197,7 +197,8 @@ static const int timer_through_8259 = 0;
197static inline void ioapic_init_mappings(void) { } 197static inline void ioapic_init_mappings(void) { }
198static inline void ioapic_insert_resources(void) { } 198static inline void ioapic_insert_resources(void) { }
199static inline void probe_nr_irqs_gsi(void) { } 199static inline void probe_nr_irqs_gsi(void) { }
200static inline int mp_find_ioapic(int gsi) { return 0; } 200#define gsi_end (NR_IRQS_LEGACY - 1)
201static inline int mp_find_ioapic(u32 gsi) { return 0; }
201 202
202struct io_apic_irq_attr; 203struct io_apic_irq_attr;
203static inline int io_apic_set_pci_routing(struct device *dev, int irq, 204static inline int io_apic_set_pci_routing(struct device *dev, int irq,
diff --git a/arch/x86/include/asm/k8.h b/arch/x86/include/asm/k8.h
index f70e60071fe8..af00bd1d2089 100644
--- a/arch/x86/include/asm/k8.h
+++ b/arch/x86/include/asm/k8.h
@@ -16,11 +16,16 @@ extern int k8_numa_init(unsigned long start_pfn, unsigned long end_pfn);
16extern int k8_scan_nodes(void); 16extern int k8_scan_nodes(void);
17 17
18#ifdef CONFIG_K8_NB 18#ifdef CONFIG_K8_NB
19extern int num_k8_northbridges;
20
19static inline struct pci_dev *node_to_k8_nb_misc(int node) 21static inline struct pci_dev *node_to_k8_nb_misc(int node)
20{ 22{
21 return (node < num_k8_northbridges) ? k8_northbridges[node] : NULL; 23 return (node < num_k8_northbridges) ? k8_northbridges[node] : NULL;
22} 24}
25
23#else 26#else
27#define num_k8_northbridges 0
28
24static inline struct pci_dev *node_to_k8_nb_misc(int node) 29static inline struct pci_dev *node_to_k8_nb_misc(int node)
25{ 30{
26 return NULL; 31 return NULL;
diff --git a/arch/x86/include/asm/mpspec.h b/arch/x86/include/asm/mpspec.h
index d8bf23a88d05..c82868e9f905 100644
--- a/arch/x86/include/asm/mpspec.h
+++ b/arch/x86/include/asm/mpspec.h
@@ -105,16 +105,6 @@ extern void mp_config_acpi_legacy_irqs(void);
105struct device; 105struct device;
106extern int mp_register_gsi(struct device *dev, u32 gsi, int edge_level, 106extern int mp_register_gsi(struct device *dev, u32 gsi, int edge_level,
107 int active_high_low); 107 int active_high_low);
108extern int acpi_probe_gsi(void);
109#ifdef CONFIG_X86_IO_APIC
110extern int mp_find_ioapic(int gsi);
111extern int mp_find_ioapic_pin(int ioapic, int gsi);
112#endif
113#else /* !CONFIG_ACPI: */
114static inline int acpi_probe_gsi(void)
115{
116 return 0;
117}
118#endif /* CONFIG_ACPI */ 108#endif /* CONFIG_ACPI */
119 109
120#define PHYSID_ARRAY_SIZE BITS_TO_LONGS(MAX_APICS) 110#define PHYSID_ARRAY_SIZE BITS_TO_LONGS(MAX_APICS)
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
new file mode 100644
index 000000000000..79ce5685ab64
--- /dev/null
+++ b/arch/x86/include/asm/mshyperv.h
@@ -0,0 +1,14 @@
1#ifndef _ASM_X86_MSHYPER_H
2#define _ASM_X86_MSHYPER_H
3
4#include <linux/types.h>
5#include <asm/hyperv.h>
6
7struct ms_hyperv_info {
8 u32 features;
9 u32 hints;
10};
11
12extern struct ms_hyperv_info ms_hyperv;
13
14#endif
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index 66a272dfd8b8..0ec6d12d84e6 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -190,6 +190,29 @@ do { \
190 pfo_ret__; \ 190 pfo_ret__; \
191}) 191})
192 192
193#define percpu_unary_op(op, var) \
194({ \
195 switch (sizeof(var)) { \
196 case 1: \
197 asm(op "b "__percpu_arg(0) \
198 : "+m" (var)); \
199 break; \
200 case 2: \
201 asm(op "w "__percpu_arg(0) \
202 : "+m" (var)); \
203 break; \
204 case 4: \
205 asm(op "l "__percpu_arg(0) \
206 : "+m" (var)); \
207 break; \
208 case 8: \
209 asm(op "q "__percpu_arg(0) \
210 : "+m" (var)); \
211 break; \
212 default: __bad_percpu_size(); \
213 } \
214})
215
193/* 216/*
194 * percpu_read() makes gcc load the percpu variable every time it is 217 * percpu_read() makes gcc load the percpu variable every time it is
195 * accessed while percpu_read_stable() allows the value to be cached. 218 * accessed while percpu_read_stable() allows the value to be cached.
@@ -207,6 +230,7 @@ do { \
207#define percpu_and(var, val) percpu_to_op("and", var, val) 230#define percpu_and(var, val) percpu_to_op("and", var, val)
208#define percpu_or(var, val) percpu_to_op("or", var, val) 231#define percpu_or(var, val) percpu_to_op("or", var, val)
209#define percpu_xor(var, val) percpu_to_op("xor", var, val) 232#define percpu_xor(var, val) percpu_to_op("xor", var, val)
233#define percpu_inc(var) percpu_unary_op("inc", var)
210 234
211#define __this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) 235#define __this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
212#define __this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) 236#define __this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 32428b410b55..5a51379dcbe4 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -113,7 +113,6 @@ struct cpuinfo_x86 {
113 /* Index into per_cpu list: */ 113 /* Index into per_cpu list: */
114 u16 cpu_index; 114 u16 cpu_index;
115#endif 115#endif
116 unsigned int x86_hyper_vendor;
117} __attribute__((__aligned__(SMP_CACHE_BYTES))); 116} __attribute__((__aligned__(SMP_CACHE_BYTES)));
118 117
119#define X86_VENDOR_INTEL 0 118#define X86_VENDOR_INTEL 0
@@ -127,9 +126,6 @@ struct cpuinfo_x86 {
127 126
128#define X86_VENDOR_UNKNOWN 0xff 127#define X86_VENDOR_UNKNOWN 0xff
129 128
130#define X86_HYPER_VENDOR_NONE 0
131#define X86_HYPER_VENDOR_VMWARE 1
132
133/* 129/*
134 * capabilities of CPUs 130 * capabilities of CPUs
135 */ 131 */
@@ -380,6 +376,10 @@ union thread_xstate {
380 struct xsave_struct xsave; 376 struct xsave_struct xsave;
381}; 377};
382 378
379struct fpu {
380 union thread_xstate *state;
381};
382
383#ifdef CONFIG_X86_64 383#ifdef CONFIG_X86_64
384DECLARE_PER_CPU(struct orig_ist, orig_ist); 384DECLARE_PER_CPU(struct orig_ist, orig_ist);
385 385
@@ -457,7 +457,7 @@ struct thread_struct {
457 unsigned long trap_no; 457 unsigned long trap_no;
458 unsigned long error_code; 458 unsigned long error_code;
459 /* floating point and extended processor state */ 459 /* floating point and extended processor state */
460 union thread_xstate *xstate; 460 struct fpu fpu;
461#ifdef CONFIG_X86_32 461#ifdef CONFIG_X86_32
462 /* Virtual 86 mode info */ 462 /* Virtual 86 mode info */
463 struct vm86_struct __user *vm86_info; 463 struct vm86_struct __user *vm86_info;
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index d017ed5502e2..d4092fac226b 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -242,7 +242,6 @@ static inline struct thread_info *current_thread_info(void)
242#define TS_POLLING 0x0004 /* true if in idle loop 242#define TS_POLLING 0x0004 /* true if in idle loop
243 and not sleeping */ 243 and not sleeping */
244#define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal() */ 244#define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal() */
245#define TS_XSAVE 0x0010 /* Use xsave/xrstor */
246 245
247#define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING) 246#define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING)
248 247
diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
index 4da91ad69e0d..f66cda56781d 100644
--- a/arch/x86/include/asm/traps.h
+++ b/arch/x86/include/asm/traps.h
@@ -79,7 +79,7 @@ static inline int get_si_code(unsigned long condition)
79 79
80extern int panic_on_unrecovered_nmi; 80extern int panic_on_unrecovered_nmi;
81 81
82void math_error(void __user *); 82void math_error(struct pt_regs *, int, int);
83void math_emulate(struct math_emu_info *); 83void math_emulate(struct math_emu_info *);
84#ifndef CONFIG_X86_32 84#ifndef CONFIG_X86_32
85asmlinkage void smp_thermal_interrupt(void); 85asmlinkage void smp_thermal_interrupt(void);
diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h
index b414d2b401f6..aa558ac0306e 100644
--- a/arch/x86/include/asm/uv/uv_bau.h
+++ b/arch/x86/include/asm/uv/uv_bau.h
@@ -27,13 +27,14 @@
27 * set 2 is at BASE + 2*512, set 3 at BASE + 3*512, and so on. 27 * set 2 is at BASE + 2*512, set 3 at BASE + 3*512, and so on.
28 * 28 *
29 * We will use 31 sets, one for sending BAU messages from each of the 32 29 * We will use 31 sets, one for sending BAU messages from each of the 32
30 * cpu's on the node. 30 * cpu's on the uvhub.
31 * 31 *
32 * TLB shootdown will use the first of the 8 descriptors of each set. 32 * TLB shootdown will use the first of the 8 descriptors of each set.
33 * Each of the descriptors is 64 bytes in size (8*64 = 512 bytes in a set). 33 * Each of the descriptors is 64 bytes in size (8*64 = 512 bytes in a set).
34 */ 34 */
35 35
36#define UV_ITEMS_PER_DESCRIPTOR 8 36#define UV_ITEMS_PER_DESCRIPTOR 8
37#define MAX_BAU_CONCURRENT 3
37#define UV_CPUS_PER_ACT_STATUS 32 38#define UV_CPUS_PER_ACT_STATUS 32
38#define UV_ACT_STATUS_MASK 0x3 39#define UV_ACT_STATUS_MASK 0x3
39#define UV_ACT_STATUS_SIZE 2 40#define UV_ACT_STATUS_SIZE 2
@@ -45,6 +46,9 @@
45#define UV_PAYLOADQ_PNODE_SHIFT 49 46#define UV_PAYLOADQ_PNODE_SHIFT 49
46#define UV_PTC_BASENAME "sgi_uv/ptc_statistics" 47#define UV_PTC_BASENAME "sgi_uv/ptc_statistics"
47#define uv_physnodeaddr(x) ((__pa((unsigned long)(x)) & uv_mmask)) 48#define uv_physnodeaddr(x) ((__pa((unsigned long)(x)) & uv_mmask))
49#define UV_ENABLE_INTD_SOFT_ACK_MODE_SHIFT 15
50#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHIFT 16
51#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD 0x000000000bUL
48 52
49/* 53/*
50 * bits in UVH_LB_BAU_SB_ACTIVATION_STATUS_0/1 54 * bits in UVH_LB_BAU_SB_ACTIVATION_STATUS_0/1
@@ -55,15 +59,29 @@
55#define DESC_STATUS_SOURCE_TIMEOUT 3 59#define DESC_STATUS_SOURCE_TIMEOUT 3
56 60
57/* 61/*
58 * source side thresholds at which message retries print a warning 62 * source side threshholds at which message retries print a warning
59 */ 63 */
60#define SOURCE_TIMEOUT_LIMIT 20 64#define SOURCE_TIMEOUT_LIMIT 20
61#define DESTINATION_TIMEOUT_LIMIT 20 65#define DESTINATION_TIMEOUT_LIMIT 20
62 66
63/* 67/*
68 * misc. delays, in microseconds
69 */
70#define THROTTLE_DELAY 10
71#define TIMEOUT_DELAY 10
72#define BIOS_TO 1000
73/* BIOS is assumed to set the destination timeout to 1003520 nanoseconds */
74
75/*
76 * threshholds at which to use IPI to free resources
77 */
78#define PLUGSB4RESET 100
79#define TIMEOUTSB4RESET 100
80
81/*
64 * number of entries in the destination side payload queue 82 * number of entries in the destination side payload queue
65 */ 83 */
66#define DEST_Q_SIZE 17 84#define DEST_Q_SIZE 20
67/* 85/*
68 * number of destination side software ack resources 86 * number of destination side software ack resources
69 */ 87 */
@@ -72,9 +90,10 @@
72/* 90/*
73 * completion statuses for sending a TLB flush message 91 * completion statuses for sending a TLB flush message
74 */ 92 */
75#define FLUSH_RETRY 1 93#define FLUSH_RETRY_PLUGGED 1
76#define FLUSH_GIVEUP 2 94#define FLUSH_RETRY_TIMEOUT 2
77#define FLUSH_COMPLETE 3 95#define FLUSH_GIVEUP 3
96#define FLUSH_COMPLETE 4
78 97
79/* 98/*
80 * Distribution: 32 bytes (256 bits) (bytes 0-0x1f of descriptor) 99 * Distribution: 32 bytes (256 bits) (bytes 0-0x1f of descriptor)
@@ -86,14 +105,14 @@
86 * 'base_dest_nodeid' field of the header corresponds to the 105 * 'base_dest_nodeid' field of the header corresponds to the
87 * destination nodeID associated with that specified bit. 106 * destination nodeID associated with that specified bit.
88 */ 107 */
89struct bau_target_nodemask { 108struct bau_target_uvhubmask {
90 unsigned long bits[BITS_TO_LONGS(256)]; 109 unsigned long bits[BITS_TO_LONGS(UV_DISTRIBUTION_SIZE)];
91}; 110};
92 111
93/* 112/*
94 * mask of cpu's on a node 113 * mask of cpu's on a uvhub
95 * (during initialization we need to check that unsigned long has 114 * (during initialization we need to check that unsigned long has
96 * enough bits for max. cpu's per node) 115 * enough bits for max. cpu's per uvhub)
97 */ 116 */
98struct bau_local_cpumask { 117struct bau_local_cpumask {
99 unsigned long bits; 118 unsigned long bits;
@@ -135,8 +154,8 @@ struct bau_msg_payload {
135struct bau_msg_header { 154struct bau_msg_header {
136 unsigned int dest_subnodeid:6; /* must be 0x10, for the LB */ 155 unsigned int dest_subnodeid:6; /* must be 0x10, for the LB */
137 /* bits 5:0 */ 156 /* bits 5:0 */
138 unsigned int base_dest_nodeid:15; /* nasid>>1 (pnode) of */ 157 unsigned int base_dest_nodeid:15; /* nasid (pnode<<1) of */
139 /* bits 20:6 */ /* first bit in node_map */ 158 /* bits 20:6 */ /* first bit in uvhub map */
140 unsigned int command:8; /* message type */ 159 unsigned int command:8; /* message type */
141 /* bits 28:21 */ 160 /* bits 28:21 */
142 /* 0x38: SN3net EndPoint Message */ 161 /* 0x38: SN3net EndPoint Message */
@@ -146,26 +165,38 @@ struct bau_msg_header {
146 unsigned int rsvd_2:9; /* must be zero */ 165 unsigned int rsvd_2:9; /* must be zero */
147 /* bits 40:32 */ 166 /* bits 40:32 */
148 /* Suppl_A is 56-41 */ 167 /* Suppl_A is 56-41 */
149 unsigned int payload_2a:8;/* becomes byte 16 of msg */ 168 unsigned int sequence:16;/* message sequence number */
150 /* bits 48:41 */ /* not currently using */ 169 /* bits 56:41 */ /* becomes bytes 16-17 of msg */
151 unsigned int payload_2b:8;/* becomes byte 17 of msg */
152 /* bits 56:49 */ /* not currently using */
153 /* Address field (96:57) is never used as an 170 /* Address field (96:57) is never used as an
154 address (these are address bits 42:3) */ 171 address (these are address bits 42:3) */
172
155 unsigned int rsvd_3:1; /* must be zero */ 173 unsigned int rsvd_3:1; /* must be zero */
156 /* bit 57 */ 174 /* bit 57 */
157 /* address bits 27:4 are payload */ 175 /* address bits 27:4 are payload */
158 /* these 24 bits become bytes 12-14 of msg */ 176 /* these next 24 (58-81) bits become bytes 12-14 of msg */
177
178 /* bits 65:58 land in byte 12 */
159 unsigned int replied_to:1;/* sent as 0 by the source to byte 12 */ 179 unsigned int replied_to:1;/* sent as 0 by the source to byte 12 */
160 /* bit 58 */ 180 /* bit 58 */
161 181 unsigned int msg_type:3; /* software type of the message*/
162 unsigned int payload_1a:5;/* not currently used */ 182 /* bits 61:59 */
163 /* bits 63:59 */ 183 unsigned int canceled:1; /* message canceled, resource to be freed*/
164 unsigned int payload_1b:8;/* not currently used */ 184 /* bit 62 */
165 /* bits 71:64 */ 185 unsigned int payload_1a:1;/* not currently used */
166 unsigned int payload_1c:8;/* not currently used */ 186 /* bit 63 */
167 /* bits 79:72 */ 187 unsigned int payload_1b:2;/* not currently used */
168 unsigned int payload_1d:2;/* not currently used */ 188 /* bits 65:64 */
189
190 /* bits 73:66 land in byte 13 */
191 unsigned int payload_1ca:6;/* not currently used */
192 /* bits 71:66 */
193 unsigned int payload_1c:2;/* not currently used */
194 /* bits 73:72 */
195
196 /* bits 81:74 land in byte 14 */
197 unsigned int payload_1d:6;/* not currently used */
198 /* bits 79:74 */
199 unsigned int payload_1e:2;/* not currently used */
169 /* bits 81:80 */ 200 /* bits 81:80 */
170 201
171 unsigned int rsvd_4:7; /* must be zero */ 202 unsigned int rsvd_4:7; /* must be zero */
@@ -178,7 +209,7 @@ struct bau_msg_header {
178 /* bits 95:90 */ 209 /* bits 95:90 */
179 unsigned int rsvd_6:5; /* must be zero */ 210 unsigned int rsvd_6:5; /* must be zero */
180 /* bits 100:96 */ 211 /* bits 100:96 */
181 unsigned int int_both:1;/* if 1, interrupt both sockets on the blade */ 212 unsigned int int_both:1;/* if 1, interrupt both sockets on the uvhub */
182 /* bit 101*/ 213 /* bit 101*/
183 unsigned int fairness:3;/* usually zero */ 214 unsigned int fairness:3;/* usually zero */
184 /* bits 104:102 */ 215 /* bits 104:102 */
@@ -191,13 +222,18 @@ struct bau_msg_header {
191 /* bits 127:107 */ 222 /* bits 127:107 */
192}; 223};
193 224
225/* see msg_type: */
226#define MSG_NOOP 0
227#define MSG_REGULAR 1
228#define MSG_RETRY 2
229
194/* 230/*
195 * The activation descriptor: 231 * The activation descriptor:
196 * The format of the message to send, plus all accompanying control 232 * The format of the message to send, plus all accompanying control
197 * Should be 64 bytes 233 * Should be 64 bytes
198 */ 234 */
199struct bau_desc { 235struct bau_desc {
200 struct bau_target_nodemask distribution; 236 struct bau_target_uvhubmask distribution;
201 /* 237 /*
202 * message template, consisting of header and payload: 238 * message template, consisting of header and payload:
203 */ 239 */
@@ -237,19 +273,25 @@ struct bau_payload_queue_entry {
237 unsigned short acknowledge_count; /* filled in by destination */ 273 unsigned short acknowledge_count; /* filled in by destination */
238 /* 16 bits, bytes 10-11 */ 274 /* 16 bits, bytes 10-11 */
239 275
240 unsigned short replied_to:1; /* sent as 0 by the source */ 276 /* these next 3 bytes come from bits 58-81 of the message header */
241 /* 1 bit */ 277 unsigned short replied_to:1; /* sent as 0 by the source */
242 unsigned short unused1:7; /* not currently using */ 278 unsigned short msg_type:3; /* software message type */
243 /* 7 bits: byte 12) */ 279 unsigned short canceled:1; /* sent as 0 by the source */
280 unsigned short unused1:3; /* not currently using */
281 /* byte 12 */
244 282
245 unsigned char unused2[2]; /* not currently using */ 283 unsigned char unused2a; /* not currently using */
246 /* bytes 13-14 */ 284 /* byte 13 */
285 unsigned char unused2; /* not currently using */
286 /* byte 14 */
247 287
248 unsigned char sw_ack_vector; /* filled in by the hardware */ 288 unsigned char sw_ack_vector; /* filled in by the hardware */
249 /* byte 15 (bits 127:120) */ 289 /* byte 15 (bits 127:120) */
250 290
251 unsigned char unused4[3]; /* not currently using bytes 17-19 */ 291 unsigned short sequence; /* message sequence number */
252 /* bytes 17-19 */ 292 /* bytes 16-17 */
293 unsigned char unused4[2]; /* not currently using bytes 18-19 */
294 /* bytes 18-19 */
253 295
254 int number_of_cpus; /* filled in at destination */ 296 int number_of_cpus; /* filled in at destination */
255 /* 32 bits, bytes 20-23 (aligned) */ 297 /* 32 bits, bytes 20-23 (aligned) */
@@ -259,63 +301,93 @@ struct bau_payload_queue_entry {
259}; 301};
260 302
261/* 303/*
262 * one for every slot in the destination payload queue 304 * one per-cpu; to locate the software tables
263 */
264struct bau_msg_status {
265 struct bau_local_cpumask seen_by; /* map of cpu's */
266};
267
268/*
269 * one for every slot in the destination software ack resources
270 */
271struct bau_sw_ack_status {
272 struct bau_payload_queue_entry *msg; /* associated message */
273 int watcher; /* cpu monitoring, or -1 */
274};
275
276/*
277 * one on every node and per-cpu; to locate the software tables
278 */ 305 */
279struct bau_control { 306struct bau_control {
280 struct bau_desc *descriptor_base; 307 struct bau_desc *descriptor_base;
281 struct bau_payload_queue_entry *bau_msg_head;
282 struct bau_payload_queue_entry *va_queue_first; 308 struct bau_payload_queue_entry *va_queue_first;
283 struct bau_payload_queue_entry *va_queue_last; 309 struct bau_payload_queue_entry *va_queue_last;
284 struct bau_msg_status *msg_statuses; 310 struct bau_payload_queue_entry *bau_msg_head;
285 int *watching; /* pointer to array */ 311 struct bau_control *uvhub_master;
312 struct bau_control *socket_master;
313 unsigned long timeout_interval;
314 atomic_t active_descriptor_count;
315 int max_concurrent;
316 int max_concurrent_constant;
317 int retry_message_scans;
318 int plugged_tries;
319 int timeout_tries;
320 int ipi_attempts;
321 int conseccompletes;
322 short cpu;
323 short uvhub_cpu;
324 short uvhub;
325 short cpus_in_socket;
326 short cpus_in_uvhub;
327 unsigned short message_number;
328 unsigned short uvhub_quiesce;
329 short socket_acknowledge_count[DEST_Q_SIZE];
330 cycles_t send_message;
331 spinlock_t masks_lock;
332 spinlock_t uvhub_lock;
333 spinlock_t queue_lock;
286}; 334};
287 335
288/* 336/*
289 * This structure is allocated per_cpu for UV TLB shootdown statistics. 337 * This structure is allocated per_cpu for UV TLB shootdown statistics.
290 */ 338 */
291struct ptc_stats { 339struct ptc_stats {
292 unsigned long ptc_i; /* number of IPI-style flushes */ 340 /* sender statistics */
293 unsigned long requestor; /* number of nodes this cpu sent to */ 341 unsigned long s_giveup; /* number of fall backs to IPI-style flushes */
294 unsigned long requestee; /* times cpu was remotely requested */ 342 unsigned long s_requestor; /* number of shootdown requests */
295 unsigned long alltlb; /* times all tlb's on this cpu were flushed */ 343 unsigned long s_stimeout; /* source side timeouts */
296 unsigned long onetlb; /* times just one tlb on this cpu was flushed */ 344 unsigned long s_dtimeout; /* destination side timeouts */
297 unsigned long s_retry; /* retries on source side timeouts */ 345 unsigned long s_time; /* time spent in sending side */
298 unsigned long d_retry; /* retries on destination side timeouts */ 346 unsigned long s_retriesok; /* successful retries */
299 unsigned long sflush; /* cycles spent in uv_flush_tlb_others */ 347 unsigned long s_ntargcpu; /* number of cpus targeted */
300 unsigned long dflush; /* cycles spent on destination side */ 348 unsigned long s_ntarguvhub; /* number of uvhubs targeted */
301 unsigned long retriesok; /* successes on retries */ 349 unsigned long s_ntarguvhub16; /* number of times >= 16 target hubs */
302 unsigned long nomsg; /* interrupts with no message */ 350 unsigned long s_ntarguvhub8; /* number of times >= 8 target hubs */
303 unsigned long multmsg; /* interrupts with multiple messages */ 351 unsigned long s_ntarguvhub4; /* number of times >= 4 target hubs */
304 unsigned long ntargeted;/* nodes targeted */ 352 unsigned long s_ntarguvhub2; /* number of times >= 2 target hubs */
353 unsigned long s_ntarguvhub1; /* number of times == 1 target hub */
354 unsigned long s_resets_plug; /* ipi-style resets from plug state */
355 unsigned long s_resets_timeout; /* ipi-style resets from timeouts */
356 unsigned long s_busy; /* status stayed busy past s/w timer */
357 unsigned long s_throttles; /* waits in throttle */
358 unsigned long s_retry_messages; /* retry broadcasts */
359 /* destination statistics */
360 unsigned long d_alltlb; /* times all tlb's on this cpu were flushed */
361 unsigned long d_onetlb; /* times just one tlb on this cpu was flushed */
362 unsigned long d_multmsg; /* interrupts with multiple messages */
363 unsigned long d_nomsg; /* interrupts with no message */
364 unsigned long d_time; /* time spent on destination side */
365 unsigned long d_requestee; /* number of messages processed */
366 unsigned long d_retries; /* number of retry messages processed */
367 unsigned long d_canceled; /* number of messages canceled by retries */
368 unsigned long d_nocanceled; /* retries that found nothing to cancel */
369 unsigned long d_resets; /* number of ipi-style requests processed */
370 unsigned long d_rcanceled; /* number of messages canceled by resets */
305}; 371};
306 372
307static inline int bau_node_isset(int node, struct bau_target_nodemask *dstp) 373static inline int bau_uvhub_isset(int uvhub, struct bau_target_uvhubmask *dstp)
308{ 374{
309 return constant_test_bit(node, &dstp->bits[0]); 375 return constant_test_bit(uvhub, &dstp->bits[0]);
310} 376}
311static inline void bau_node_set(int node, struct bau_target_nodemask *dstp) 377static inline void bau_uvhub_set(int uvhub, struct bau_target_uvhubmask *dstp)
312{ 378{
313 __set_bit(node, &dstp->bits[0]); 379 __set_bit(uvhub, &dstp->bits[0]);
314} 380}
315static inline void bau_nodes_clear(struct bau_target_nodemask *dstp, int nbits) 381static inline void bau_uvhubs_clear(struct bau_target_uvhubmask *dstp,
382 int nbits)
316{ 383{
317 bitmap_zero(&dstp->bits[0], nbits); 384 bitmap_zero(&dstp->bits[0], nbits);
318} 385}
386static inline int bau_uvhub_weight(struct bau_target_uvhubmask *dstp)
387{
388 return bitmap_weight((unsigned long *)&dstp->bits[0],
389 UV_DISTRIBUTION_SIZE);
390}
319 391
320static inline void bau_cpubits_clear(struct bau_local_cpumask *dstp, int nbits) 392static inline void bau_cpubits_clear(struct bau_local_cpumask *dstp, int nbits)
321{ 393{
@@ -328,4 +400,35 @@ static inline void bau_cpubits_clear(struct bau_local_cpumask *dstp, int nbits)
328extern void uv_bau_message_intr1(void); 400extern void uv_bau_message_intr1(void);
329extern void uv_bau_timeout_intr1(void); 401extern void uv_bau_timeout_intr1(void);
330 402
403struct atomic_short {
404 short counter;
405};
406
407/**
408 * atomic_read_short - read a short atomic variable
409 * @v: pointer of type atomic_short
410 *
411 * Atomically reads the value of @v.
412 */
413static inline int atomic_read_short(const struct atomic_short *v)
414{
415 return v->counter;
416}
417
418/**
419 * atomic_add_short_return - add and return a short int
420 * @i: short value to add
421 * @v: pointer of type atomic_short
422 *
423 * Atomically adds @i to @v and returns @i + @v
424 */
425static inline int atomic_add_short_return(short i, struct atomic_short *v)
426{
427 short __i = i;
428 asm volatile(LOCK_PREFIX "xaddw %0, %1"
429 : "+r" (i), "+m" (v->counter)
430 : : "memory");
431 return i + __i;
432}
433
331#endif /* _ASM_X86_UV_UV_BAU_H */ 434#endif /* _ASM_X86_UV_UV_BAU_H */
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h
index 14cc74ba5d23..bf6b88ef8eeb 100644
--- a/arch/x86/include/asm/uv/uv_hub.h
+++ b/arch/x86/include/asm/uv/uv_hub.h
@@ -307,7 +307,7 @@ static inline unsigned long uv_read_global_mmr32(int pnode, unsigned long offset
307 * Access Global MMR space using the MMR space located at the top of physical 307 * Access Global MMR space using the MMR space located at the top of physical
308 * memory. 308 * memory.
309 */ 309 */
310static inline unsigned long *uv_global_mmr64_address(int pnode, unsigned long offset) 310static inline volatile void __iomem *uv_global_mmr64_address(int pnode, unsigned long offset)
311{ 311{
312 return __va(UV_GLOBAL_MMR64_BASE | 312 return __va(UV_GLOBAL_MMR64_BASE |
313 UV_GLOBAL_MMR64_PNODE_BITS(pnode) | offset); 313 UV_GLOBAL_MMR64_PNODE_BITS(pnode) | offset);
diff --git a/arch/x86/include/asm/uv/uv_mmrs.h b/arch/x86/include/asm/uv/uv_mmrs.h
index 2cae46c7c8a2..b2f2d2e05cec 100644
--- a/arch/x86/include/asm/uv/uv_mmrs.h
+++ b/arch/x86/include/asm/uv/uv_mmrs.h
@@ -1,4 +1,3 @@
1
2/* 1/*
3 * This file is subject to the terms and conditions of the GNU General Public 2 * This file is subject to the terms and conditions of the GNU General Public
4 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
@@ -15,13 +14,25 @@
15#define UV_MMR_ENABLE (1UL << 63) 14#define UV_MMR_ENABLE (1UL << 63)
16 15
17/* ========================================================================= */ 16/* ========================================================================= */
17/* UVH_BAU_DATA_BROADCAST */
18/* ========================================================================= */
19#define UVH_BAU_DATA_BROADCAST 0x61688UL
20#define UVH_BAU_DATA_BROADCAST_32 0x0440
21
22#define UVH_BAU_DATA_BROADCAST_ENABLE_SHFT 0
23#define UVH_BAU_DATA_BROADCAST_ENABLE_MASK 0x0000000000000001UL
24
25union uvh_bau_data_broadcast_u {
26 unsigned long v;
27 struct uvh_bau_data_broadcast_s {
28 unsigned long enable : 1; /* RW */
29 unsigned long rsvd_1_63: 63; /* */
30 } s;
31};
32
33/* ========================================================================= */
18/* UVH_BAU_DATA_CONFIG */ 34/* UVH_BAU_DATA_CONFIG */
19/* ========================================================================= */ 35/* ========================================================================= */
20#define UVH_LB_BAU_MISC_CONTROL 0x320170UL
21#define UV_ENABLE_INTD_SOFT_ACK_MODE_SHIFT 15
22#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHIFT 16
23#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD 0x000000000bUL
24/* 1011 timebase 7 (168millisec) * 3 ticks -> 500ms */
25#define UVH_BAU_DATA_CONFIG 0x61680UL 36#define UVH_BAU_DATA_CONFIG 0x61680UL
26#define UVH_BAU_DATA_CONFIG_32 0x0438 37#define UVH_BAU_DATA_CONFIG_32 0x0438
27 38
@@ -604,6 +615,68 @@ union uvh_lb_bau_intd_software_acknowledge_u {
604#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS_32 0x0a70 615#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS_32 0x0a70
605 616
606/* ========================================================================= */ 617/* ========================================================================= */
618/* UVH_LB_BAU_MISC_CONTROL */
619/* ========================================================================= */
620#define UVH_LB_BAU_MISC_CONTROL 0x320170UL
621#define UVH_LB_BAU_MISC_CONTROL_32 0x00a10
622
623#define UVH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0
624#define UVH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_MASK 0x00000000000000ffUL
625#define UVH_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8
626#define UVH_LB_BAU_MISC_CONTROL_APIC_MODE_MASK 0x0000000000000100UL
627#define UVH_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_SHFT 9
628#define UVH_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_MASK 0x0000000000000200UL
629#define UVH_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_SHFT 10
630#define UVH_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_MASK 0x0000000000000400UL
631#define UVH_LB_BAU_MISC_CONTROL_CSI_AGENT_PRESENCE_VECTOR_SHFT 11
632#define UVH_LB_BAU_MISC_CONTROL_CSI_AGENT_PRESENCE_VECTOR_MASK 0x0000000000003800UL
633#define UVH_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_SHFT 14
634#define UVH_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_MASK 0x0000000000004000UL
635#define UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT 15
636#define UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK 0x0000000000008000UL
637#define UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT 16
638#define UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK 0x00000000000f0000UL
639#define UVH_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_SHFT 20
640#define UVH_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_MASK 0x0000000000100000UL
641#define UVH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_SHFT 21
642#define UVH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_MASK 0x0000000000200000UL
643#define UVH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_SHFT 22
644#define UVH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_MASK 0x0000000000400000UL
645#define UVH_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_SHFT 23
646#define UVH_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_MASK 0x0000000000800000UL
647#define UVH_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_SHFT 24
648#define UVH_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000007000000UL
649#define UVH_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_SHFT 27
650#define UVH_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_MASK 0x0000000008000000UL
651#define UVH_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_SHFT 28
652#define UVH_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL
653#define UVH_LB_BAU_MISC_CONTROL_FUN_SHFT 48
654#define UVH_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL
655
656union uvh_lb_bau_misc_control_u {
657 unsigned long v;
658 struct uvh_lb_bau_misc_control_s {
659 unsigned long rejection_delay : 8; /* RW */
660 unsigned long apic_mode : 1; /* RW */
661 unsigned long force_broadcast : 1; /* RW */
662 unsigned long force_lock_nop : 1; /* RW */
663 unsigned long csi_agent_presence_vector : 3; /* RW */
664 unsigned long descriptor_fetch_mode : 1; /* RW */
665 unsigned long enable_intd_soft_ack_mode : 1; /* RW */
666 unsigned long intd_soft_ack_timeout_period : 4; /* RW */
667 unsigned long enable_dual_mapping_mode : 1; /* RW */
668 unsigned long vga_io_port_decode_enable : 1; /* RW */
669 unsigned long vga_io_port_16_bit_decode : 1; /* RW */
670 unsigned long suppress_dest_registration : 1; /* RW */
671 unsigned long programmed_initial_priority : 3; /* RW */
672 unsigned long use_incoming_priority : 1; /* RW */
673 unsigned long enable_programmed_initial_priority : 1; /* RW */
674 unsigned long rsvd_29_47 : 19; /* */
675 unsigned long fun : 16; /* RW */
676 } s;
677};
678
679/* ========================================================================= */
607/* UVH_LB_BAU_SB_ACTIVATION_CONTROL */ 680/* UVH_LB_BAU_SB_ACTIVATION_CONTROL */
608/* ========================================================================= */ 681/* ========================================================================= */
609#define UVH_LB_BAU_SB_ACTIVATION_CONTROL 0x320020UL 682#define UVH_LB_BAU_SB_ACTIVATION_CONTROL 0x320020UL
@@ -681,334 +754,6 @@ union uvh_lb_bau_sb_descriptor_base_u {
681}; 754};
682 755
683/* ========================================================================= */ 756/* ========================================================================= */
684/* UVH_LB_MCAST_AOERR0_RPT_ENABLE */
685/* ========================================================================= */
686#define UVH_LB_MCAST_AOERR0_RPT_ENABLE 0x50b20UL
687
688#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_OBESE_MSG_SHFT 0
689#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_OBESE_MSG_MASK 0x0000000000000001UL
690#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_DATA_SB_ERR_SHFT 1
691#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_DATA_SB_ERR_MASK 0x0000000000000002UL
692#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_NACK_BUFF_PARITY_SHFT 2
693#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_NACK_BUFF_PARITY_MASK 0x0000000000000004UL
694#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_TIMEOUT_SHFT 3
695#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_TIMEOUT_MASK 0x0000000000000008UL
696#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_INACTIVE_REPLY_SHFT 4
697#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_INACTIVE_REPLY_MASK 0x0000000000000010UL
698#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_UPGRADE_ERROR_SHFT 5
699#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_UPGRADE_ERROR_MASK 0x0000000000000020UL
700#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_REG_COUNT_UNDERFLOW_SHFT 6
701#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_REG_COUNT_UNDERFLOW_MASK 0x0000000000000040UL
702#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_REP_OBESE_MSG_SHFT 7
703#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_REP_OBESE_MSG_MASK 0x0000000000000080UL
704#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REQ_RUNT_MSG_SHFT 8
705#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REQ_RUNT_MSG_MASK 0x0000000000000100UL
706#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REQ_OBESE_MSG_SHFT 9
707#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REQ_OBESE_MSG_MASK 0x0000000000000200UL
708#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REQ_DATA_SB_ERR_SHFT 10
709#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REQ_DATA_SB_ERR_MASK 0x0000000000000400UL
710#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_RUNT_MSG_SHFT 11
711#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_RUNT_MSG_MASK 0x0000000000000800UL
712#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_OBESE_MSG_SHFT 12
713#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_OBESE_MSG_MASK 0x0000000000001000UL
714#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_DATA_SB_ERR_SHFT 13
715#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_DATA_SB_ERR_MASK 0x0000000000002000UL
716#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_COMMAND_ERR_SHFT 14
717#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_COMMAND_ERR_MASK 0x0000000000004000UL
718#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_PEND_TIMEOUT_SHFT 15
719#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_PEND_TIMEOUT_MASK 0x0000000000008000UL
720#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REQ_RUNT_MSG_SHFT 16
721#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REQ_RUNT_MSG_MASK 0x0000000000010000UL
722#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REQ_OBESE_MSG_SHFT 17
723#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REQ_OBESE_MSG_MASK 0x0000000000020000UL
724#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REQ_DATA_SB_ERR_SHFT 18
725#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REQ_DATA_SB_ERR_MASK 0x0000000000040000UL
726#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REP_RUNT_MSG_SHFT 19
727#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REP_RUNT_MSG_MASK 0x0000000000080000UL
728#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REP_OBESE_MSG_SHFT 20
729#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REP_OBESE_MSG_MASK 0x0000000000100000UL
730#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REP_DATA_SB_ERR_SHFT 21
731#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REP_DATA_SB_ERR_MASK 0x0000000000200000UL
732#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_AMO_TIMEOUT_SHFT 22
733#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_AMO_TIMEOUT_MASK 0x0000000000400000UL
734#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_PUT_TIMEOUT_SHFT 23
735#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_PUT_TIMEOUT_MASK 0x0000000000800000UL
736#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_SPURIOUS_EVENT_SHFT 24
737#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_SPURIOUS_EVENT_MASK 0x0000000001000000UL
738#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_IOH_DESTINATION_TABLE_PARITY_SHFT 25
739#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_IOH_DESTINATION_TABLE_PARITY_MASK 0x0000000002000000UL
740#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_GET_HAD_ERROR_REPLY_SHFT 26
741#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_GET_HAD_ERROR_REPLY_MASK 0x0000000004000000UL
742#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_GET_TIMEOUT_SHFT 27
743#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_GET_TIMEOUT_MASK 0x0000000008000000UL
744#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_LOCK_MANAGER_HAD_ERROR_REPLY_SHFT 28
745#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_LOCK_MANAGER_HAD_ERROR_REPLY_MASK 0x0000000010000000UL
746#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_PUT_HAD_ERROR_REPLY_SHFT 29
747#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_PUT_HAD_ERROR_REPLY_MASK 0x0000000020000000UL
748#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_PUT_TIMEOUT_SHFT 30
749#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_PUT_TIMEOUT_MASK 0x0000000040000000UL
750#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_SB_ACTIVATION_OVERRUN_SHFT 31
751#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_SB_ACTIVATION_OVERRUN_MASK 0x0000000080000000UL
752#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_COMPLETED_GB_ACTIVATION_HAD_ERROR_REPLY_SHFT 32
753#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_COMPLETED_GB_ACTIVATION_HAD_ERROR_REPLY_MASK 0x0000000100000000UL
754#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_COMPLETED_GB_ACTIVATION_TIMEOUT_SHFT 33
755#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_COMPLETED_GB_ACTIVATION_TIMEOUT_MASK 0x0000000200000000UL
756#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_DESCRIPTOR_BUFFER_0_PARITY_SHFT 34
757#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_DESCRIPTOR_BUFFER_0_PARITY_MASK 0x0000000400000000UL
758#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_DESCRIPTOR_BUFFER_1_PARITY_SHFT 35
759#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_DESCRIPTOR_BUFFER_1_PARITY_MASK 0x0000000800000000UL
760#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_SOCKET_DESTINATION_TABLE_PARITY_SHFT 36
761#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_SOCKET_DESTINATION_TABLE_PARITY_MASK 0x0000001000000000UL
762#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_BAU_REPLY_PAYLOAD_CORRUPTION_SHFT 37
763#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_BAU_REPLY_PAYLOAD_CORRUPTION_MASK 0x0000002000000000UL
764#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_IO_PORT_DESTINATION_TABLE_PARITY_SHFT 38
765#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_IO_PORT_DESTINATION_TABLE_PARITY_MASK 0x0000004000000000UL
766#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INTD_SOFT_ACK_TIMEOUT_SHFT 39
767#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INTD_SOFT_ACK_TIMEOUT_MASK 0x0000008000000000UL
768#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INT_REP_OBESE_MSG_SHFT 40
769#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INT_REP_OBESE_MSG_MASK 0x0000010000000000UL
770#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INT_REP_COMMAND_ERR_SHFT 41
771#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INT_REP_COMMAND_ERR_MASK 0x0000020000000000UL
772#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INT_TIMEOUT_SHFT 42
773#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INT_TIMEOUT_MASK 0x0000040000000000UL
774
775union uvh_lb_mcast_aoerr0_rpt_enable_u {
776 unsigned long v;
777 struct uvh_lb_mcast_aoerr0_rpt_enable_s {
778 unsigned long mcast_obese_msg : 1; /* RW */
779 unsigned long mcast_data_sb_err : 1; /* RW */
780 unsigned long mcast_nack_buff_parity : 1; /* RW */
781 unsigned long mcast_timeout : 1; /* RW */
782 unsigned long mcast_inactive_reply : 1; /* RW */
783 unsigned long mcast_upgrade_error : 1; /* RW */
784 unsigned long mcast_reg_count_underflow : 1; /* RW */
785 unsigned long mcast_rep_obese_msg : 1; /* RW */
786 unsigned long ucache_req_runt_msg : 1; /* RW */
787 unsigned long ucache_req_obese_msg : 1; /* RW */
788 unsigned long ucache_req_data_sb_err : 1; /* RW */
789 unsigned long ucache_rep_runt_msg : 1; /* RW */
790 unsigned long ucache_rep_obese_msg : 1; /* RW */
791 unsigned long ucache_rep_data_sb_err : 1; /* RW */
792 unsigned long ucache_rep_command_err : 1; /* RW */
793 unsigned long ucache_pend_timeout : 1; /* RW */
794 unsigned long macc_req_runt_msg : 1; /* RW */
795 unsigned long macc_req_obese_msg : 1; /* RW */
796 unsigned long macc_req_data_sb_err : 1; /* RW */
797 unsigned long macc_rep_runt_msg : 1; /* RW */
798 unsigned long macc_rep_obese_msg : 1; /* RW */
799 unsigned long macc_rep_data_sb_err : 1; /* RW */
800 unsigned long macc_amo_timeout : 1; /* RW */
801 unsigned long macc_put_timeout : 1; /* RW */
802 unsigned long macc_spurious_event : 1; /* RW */
803 unsigned long ioh_destination_table_parity : 1; /* RW */
804 unsigned long get_had_error_reply : 1; /* RW */
805 unsigned long get_timeout : 1; /* RW */
806 unsigned long lock_manager_had_error_reply : 1; /* RW */
807 unsigned long put_had_error_reply : 1; /* RW */
808 unsigned long put_timeout : 1; /* RW */
809 unsigned long sb_activation_overrun : 1; /* RW */
810 unsigned long completed_gb_activation_had_error_reply : 1; /* RW */
811 unsigned long completed_gb_activation_timeout : 1; /* RW */
812 unsigned long descriptor_buffer_0_parity : 1; /* RW */
813 unsigned long descriptor_buffer_1_parity : 1; /* RW */
814 unsigned long socket_destination_table_parity : 1; /* RW */
815 unsigned long bau_reply_payload_corruption : 1; /* RW */
816 unsigned long io_port_destination_table_parity : 1; /* RW */
817 unsigned long intd_soft_ack_timeout : 1; /* RW */
818 unsigned long int_rep_obese_msg : 1; /* RW */
819 unsigned long int_rep_command_err : 1; /* RW */
820 unsigned long int_timeout : 1; /* RW */
821 unsigned long rsvd_43_63 : 21; /* */
822 } s;
823};
824
825/* ========================================================================= */
826/* UVH_LOCAL_INT0_CONFIG */
827/* ========================================================================= */
828#define UVH_LOCAL_INT0_CONFIG 0x61000UL
829
830#define UVH_LOCAL_INT0_CONFIG_VECTOR_SHFT 0
831#define UVH_LOCAL_INT0_CONFIG_VECTOR_MASK 0x00000000000000ffUL
832#define UVH_LOCAL_INT0_CONFIG_DM_SHFT 8
833#define UVH_LOCAL_INT0_CONFIG_DM_MASK 0x0000000000000700UL
834#define UVH_LOCAL_INT0_CONFIG_DESTMODE_SHFT 11
835#define UVH_LOCAL_INT0_CONFIG_DESTMODE_MASK 0x0000000000000800UL
836#define UVH_LOCAL_INT0_CONFIG_STATUS_SHFT 12
837#define UVH_LOCAL_INT0_CONFIG_STATUS_MASK 0x0000000000001000UL
838#define UVH_LOCAL_INT0_CONFIG_P_SHFT 13
839#define UVH_LOCAL_INT0_CONFIG_P_MASK 0x0000000000002000UL
840#define UVH_LOCAL_INT0_CONFIG_T_SHFT 15
841#define UVH_LOCAL_INT0_CONFIG_T_MASK 0x0000000000008000UL
842#define UVH_LOCAL_INT0_CONFIG_M_SHFT 16
843#define UVH_LOCAL_INT0_CONFIG_M_MASK 0x0000000000010000UL
844#define UVH_LOCAL_INT0_CONFIG_APIC_ID_SHFT 32
845#define UVH_LOCAL_INT0_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
846
847union uvh_local_int0_config_u {
848 unsigned long v;
849 struct uvh_local_int0_config_s {
850 unsigned long vector_ : 8; /* RW */
851 unsigned long dm : 3; /* RW */
852 unsigned long destmode : 1; /* RW */
853 unsigned long status : 1; /* RO */
854 unsigned long p : 1; /* RO */
855 unsigned long rsvd_14 : 1; /* */
856 unsigned long t : 1; /* RO */
857 unsigned long m : 1; /* RW */
858 unsigned long rsvd_17_31: 15; /* */
859 unsigned long apic_id : 32; /* RW */
860 } s;
861};
862
863/* ========================================================================= */
864/* UVH_LOCAL_INT0_ENABLE */
865/* ========================================================================= */
866#define UVH_LOCAL_INT0_ENABLE 0x65000UL
867
868#define UVH_LOCAL_INT0_ENABLE_LB_HCERR_SHFT 0
869#define UVH_LOCAL_INT0_ENABLE_LB_HCERR_MASK 0x0000000000000001UL
870#define UVH_LOCAL_INT0_ENABLE_GR0_HCERR_SHFT 1
871#define UVH_LOCAL_INT0_ENABLE_GR0_HCERR_MASK 0x0000000000000002UL
872#define UVH_LOCAL_INT0_ENABLE_GR1_HCERR_SHFT 2
873#define UVH_LOCAL_INT0_ENABLE_GR1_HCERR_MASK 0x0000000000000004UL
874#define UVH_LOCAL_INT0_ENABLE_LH_HCERR_SHFT 3
875#define UVH_LOCAL_INT0_ENABLE_LH_HCERR_MASK 0x0000000000000008UL
876#define UVH_LOCAL_INT0_ENABLE_RH_HCERR_SHFT 4
877#define UVH_LOCAL_INT0_ENABLE_RH_HCERR_MASK 0x0000000000000010UL
878#define UVH_LOCAL_INT0_ENABLE_XN_HCERR_SHFT 5
879#define UVH_LOCAL_INT0_ENABLE_XN_HCERR_MASK 0x0000000000000020UL
880#define UVH_LOCAL_INT0_ENABLE_SI_HCERR_SHFT 6
881#define UVH_LOCAL_INT0_ENABLE_SI_HCERR_MASK 0x0000000000000040UL
882#define UVH_LOCAL_INT0_ENABLE_LB_AOERR0_SHFT 7
883#define UVH_LOCAL_INT0_ENABLE_LB_AOERR0_MASK 0x0000000000000080UL
884#define UVH_LOCAL_INT0_ENABLE_GR0_AOERR0_SHFT 8
885#define UVH_LOCAL_INT0_ENABLE_GR0_AOERR0_MASK 0x0000000000000100UL
886#define UVH_LOCAL_INT0_ENABLE_GR1_AOERR0_SHFT 9
887#define UVH_LOCAL_INT0_ENABLE_GR1_AOERR0_MASK 0x0000000000000200UL
888#define UVH_LOCAL_INT0_ENABLE_LH_AOERR0_SHFT 10
889#define UVH_LOCAL_INT0_ENABLE_LH_AOERR0_MASK 0x0000000000000400UL
890#define UVH_LOCAL_INT0_ENABLE_RH_AOERR0_SHFT 11
891#define UVH_LOCAL_INT0_ENABLE_RH_AOERR0_MASK 0x0000000000000800UL
892#define UVH_LOCAL_INT0_ENABLE_XN_AOERR0_SHFT 12
893#define UVH_LOCAL_INT0_ENABLE_XN_AOERR0_MASK 0x0000000000001000UL
894#define UVH_LOCAL_INT0_ENABLE_SI_AOERR0_SHFT 13
895#define UVH_LOCAL_INT0_ENABLE_SI_AOERR0_MASK 0x0000000000002000UL
896#define UVH_LOCAL_INT0_ENABLE_LB_AOERR1_SHFT 14
897#define UVH_LOCAL_INT0_ENABLE_LB_AOERR1_MASK 0x0000000000004000UL
898#define UVH_LOCAL_INT0_ENABLE_GR0_AOERR1_SHFT 15
899#define UVH_LOCAL_INT0_ENABLE_GR0_AOERR1_MASK 0x0000000000008000UL
900#define UVH_LOCAL_INT0_ENABLE_GR1_AOERR1_SHFT 16
901#define UVH_LOCAL_INT0_ENABLE_GR1_AOERR1_MASK 0x0000000000010000UL
902#define UVH_LOCAL_INT0_ENABLE_LH_AOERR1_SHFT 17
903#define UVH_LOCAL_INT0_ENABLE_LH_AOERR1_MASK 0x0000000000020000UL
904#define UVH_LOCAL_INT0_ENABLE_RH_AOERR1_SHFT 18
905#define UVH_LOCAL_INT0_ENABLE_RH_AOERR1_MASK 0x0000000000040000UL
906#define UVH_LOCAL_INT0_ENABLE_XN_AOERR1_SHFT 19
907#define UVH_LOCAL_INT0_ENABLE_XN_AOERR1_MASK 0x0000000000080000UL
908#define UVH_LOCAL_INT0_ENABLE_SI_AOERR1_SHFT 20
909#define UVH_LOCAL_INT0_ENABLE_SI_AOERR1_MASK 0x0000000000100000UL
910#define UVH_LOCAL_INT0_ENABLE_RH_VPI_INT_SHFT 21
911#define UVH_LOCAL_INT0_ENABLE_RH_VPI_INT_MASK 0x0000000000200000UL
912#define UVH_LOCAL_INT0_ENABLE_SYSTEM_SHUTDOWN_INT_SHFT 22
913#define UVH_LOCAL_INT0_ENABLE_SYSTEM_SHUTDOWN_INT_MASK 0x0000000000400000UL
914#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_0_SHFT 23
915#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_0_MASK 0x0000000000800000UL
916#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_1_SHFT 24
917#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_1_MASK 0x0000000001000000UL
918#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_2_SHFT 25
919#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_2_MASK 0x0000000002000000UL
920#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_3_SHFT 26
921#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_3_MASK 0x0000000004000000UL
922#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_4_SHFT 27
923#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_4_MASK 0x0000000008000000UL
924#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_5_SHFT 28
925#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_5_MASK 0x0000000010000000UL
926#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_6_SHFT 29
927#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_6_MASK 0x0000000020000000UL
928#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_7_SHFT 30
929#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_7_MASK 0x0000000040000000UL
930#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_8_SHFT 31
931#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_8_MASK 0x0000000080000000UL
932#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_9_SHFT 32
933#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_9_MASK 0x0000000100000000UL
934#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_10_SHFT 33
935#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_10_MASK 0x0000000200000000UL
936#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_11_SHFT 34
937#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_11_MASK 0x0000000400000000UL
938#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_12_SHFT 35
939#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_12_MASK 0x0000000800000000UL
940#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_13_SHFT 36
941#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_13_MASK 0x0000001000000000UL
942#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_14_SHFT 37
943#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_14_MASK 0x0000002000000000UL
944#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_15_SHFT 38
945#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_15_MASK 0x0000004000000000UL
946#define UVH_LOCAL_INT0_ENABLE_L1_NMI_INT_SHFT 39
947#define UVH_LOCAL_INT0_ENABLE_L1_NMI_INT_MASK 0x0000008000000000UL
948#define UVH_LOCAL_INT0_ENABLE_STOP_CLOCK_SHFT 40
949#define UVH_LOCAL_INT0_ENABLE_STOP_CLOCK_MASK 0x0000010000000000UL
950#define UVH_LOCAL_INT0_ENABLE_ASIC_TO_L1_SHFT 41
951#define UVH_LOCAL_INT0_ENABLE_ASIC_TO_L1_MASK 0x0000020000000000UL
952#define UVH_LOCAL_INT0_ENABLE_L1_TO_ASIC_SHFT 42
953#define UVH_LOCAL_INT0_ENABLE_L1_TO_ASIC_MASK 0x0000040000000000UL
954#define UVH_LOCAL_INT0_ENABLE_LTC_INT_SHFT 43
955#define UVH_LOCAL_INT0_ENABLE_LTC_INT_MASK 0x0000080000000000UL
956#define UVH_LOCAL_INT0_ENABLE_LA_SEQ_TRIGGER_SHFT 44
957#define UVH_LOCAL_INT0_ENABLE_LA_SEQ_TRIGGER_MASK 0x0000100000000000UL
958
959union uvh_local_int0_enable_u {
960 unsigned long v;
961 struct uvh_local_int0_enable_s {
962 unsigned long lb_hcerr : 1; /* RW */
963 unsigned long gr0_hcerr : 1; /* RW */
964 unsigned long gr1_hcerr : 1; /* RW */
965 unsigned long lh_hcerr : 1; /* RW */
966 unsigned long rh_hcerr : 1; /* RW */
967 unsigned long xn_hcerr : 1; /* RW */
968 unsigned long si_hcerr : 1; /* RW */
969 unsigned long lb_aoerr0 : 1; /* RW */
970 unsigned long gr0_aoerr0 : 1; /* RW */
971 unsigned long gr1_aoerr0 : 1; /* RW */
972 unsigned long lh_aoerr0 : 1; /* RW */
973 unsigned long rh_aoerr0 : 1; /* RW */
974 unsigned long xn_aoerr0 : 1; /* RW */
975 unsigned long si_aoerr0 : 1; /* RW */
976 unsigned long lb_aoerr1 : 1; /* RW */
977 unsigned long gr0_aoerr1 : 1; /* RW */
978 unsigned long gr1_aoerr1 : 1; /* RW */
979 unsigned long lh_aoerr1 : 1; /* RW */
980 unsigned long rh_aoerr1 : 1; /* RW */
981 unsigned long xn_aoerr1 : 1; /* RW */
982 unsigned long si_aoerr1 : 1; /* RW */
983 unsigned long rh_vpi_int : 1; /* RW */
984 unsigned long system_shutdown_int : 1; /* RW */
985 unsigned long lb_irq_int_0 : 1; /* RW */
986 unsigned long lb_irq_int_1 : 1; /* RW */
987 unsigned long lb_irq_int_2 : 1; /* RW */
988 unsigned long lb_irq_int_3 : 1; /* RW */
989 unsigned long lb_irq_int_4 : 1; /* RW */
990 unsigned long lb_irq_int_5 : 1; /* RW */
991 unsigned long lb_irq_int_6 : 1; /* RW */
992 unsigned long lb_irq_int_7 : 1; /* RW */
993 unsigned long lb_irq_int_8 : 1; /* RW */
994 unsigned long lb_irq_int_9 : 1; /* RW */
995 unsigned long lb_irq_int_10 : 1; /* RW */
996 unsigned long lb_irq_int_11 : 1; /* RW */
997 unsigned long lb_irq_int_12 : 1; /* RW */
998 unsigned long lb_irq_int_13 : 1; /* RW */
999 unsigned long lb_irq_int_14 : 1; /* RW */
1000 unsigned long lb_irq_int_15 : 1; /* RW */
1001 unsigned long l1_nmi_int : 1; /* RW */
1002 unsigned long stop_clock : 1; /* RW */
1003 unsigned long asic_to_l1 : 1; /* RW */
1004 unsigned long l1_to_asic : 1; /* RW */
1005 unsigned long ltc_int : 1; /* RW */
1006 unsigned long la_seq_trigger : 1; /* RW */
1007 unsigned long rsvd_45_63 : 19; /* */
1008 } s;
1009};
1010
1011/* ========================================================================= */
1012/* UVH_NODE_ID */ 757/* UVH_NODE_ID */
1013/* ========================================================================= */ 758/* ========================================================================= */
1014#define UVH_NODE_ID 0x0UL 759#define UVH_NODE_ID 0x0UL
@@ -1112,26 +857,6 @@ union uvh_rh_gam_alias210_redirect_config_2_mmr_u {
1112}; 857};
1113 858
1114/* ========================================================================= */ 859/* ========================================================================= */
1115/* UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR */
1116/* ========================================================================= */
1117#define UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR 0x1600020UL
1118
1119#define UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR_BASE_SHFT 26
1120#define UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL
1121#define UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
1122#define UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
1123
1124union uvh_rh_gam_cfg_overlay_config_mmr_u {
1125 unsigned long v;
1126 struct uvh_rh_gam_cfg_overlay_config_mmr_s {
1127 unsigned long rsvd_0_25: 26; /* */
1128 unsigned long base : 20; /* RW */
1129 unsigned long rsvd_46_62: 17; /* */
1130 unsigned long enable : 1; /* RW */
1131 } s;
1132};
1133
1134/* ========================================================================= */
1135/* UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR */ 860/* UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR */
1136/* ========================================================================= */ 861/* ========================================================================= */
1137#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL 862#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL
@@ -1263,101 +988,6 @@ union uvh_rtc1_int_config_u {
1263}; 988};
1264 989
1265/* ========================================================================= */ 990/* ========================================================================= */
1266/* UVH_RTC2_INT_CONFIG */
1267/* ========================================================================= */
1268#define UVH_RTC2_INT_CONFIG 0x61600UL
1269
1270#define UVH_RTC2_INT_CONFIG_VECTOR_SHFT 0
1271#define UVH_RTC2_INT_CONFIG_VECTOR_MASK 0x00000000000000ffUL
1272#define UVH_RTC2_INT_CONFIG_DM_SHFT 8
1273#define UVH_RTC2_INT_CONFIG_DM_MASK 0x0000000000000700UL
1274#define UVH_RTC2_INT_CONFIG_DESTMODE_SHFT 11
1275#define UVH_RTC2_INT_CONFIG_DESTMODE_MASK 0x0000000000000800UL
1276#define UVH_RTC2_INT_CONFIG_STATUS_SHFT 12
1277#define UVH_RTC2_INT_CONFIG_STATUS_MASK 0x0000000000001000UL
1278#define UVH_RTC2_INT_CONFIG_P_SHFT 13
1279#define UVH_RTC2_INT_CONFIG_P_MASK 0x0000000000002000UL
1280#define UVH_RTC2_INT_CONFIG_T_SHFT 15
1281#define UVH_RTC2_INT_CONFIG_T_MASK 0x0000000000008000UL
1282#define UVH_RTC2_INT_CONFIG_M_SHFT 16
1283#define UVH_RTC2_INT_CONFIG_M_MASK 0x0000000000010000UL
1284#define UVH_RTC2_INT_CONFIG_APIC_ID_SHFT 32
1285#define UVH_RTC2_INT_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
1286
1287union uvh_rtc2_int_config_u {
1288 unsigned long v;
1289 struct uvh_rtc2_int_config_s {
1290 unsigned long vector_ : 8; /* RW */
1291 unsigned long dm : 3; /* RW */
1292 unsigned long destmode : 1; /* RW */
1293 unsigned long status : 1; /* RO */
1294 unsigned long p : 1; /* RO */
1295 unsigned long rsvd_14 : 1; /* */
1296 unsigned long t : 1; /* RO */
1297 unsigned long m : 1; /* RW */
1298 unsigned long rsvd_17_31: 15; /* */
1299 unsigned long apic_id : 32; /* RW */
1300 } s;
1301};
1302
1303/* ========================================================================= */
1304/* UVH_RTC3_INT_CONFIG */
1305/* ========================================================================= */
1306#define UVH_RTC3_INT_CONFIG 0x61640UL
1307
1308#define UVH_RTC3_INT_CONFIG_VECTOR_SHFT 0
1309#define UVH_RTC3_INT_CONFIG_VECTOR_MASK 0x00000000000000ffUL
1310#define UVH_RTC3_INT_CONFIG_DM_SHFT 8
1311#define UVH_RTC3_INT_CONFIG_DM_MASK 0x0000000000000700UL
1312#define UVH_RTC3_INT_CONFIG_DESTMODE_SHFT 11
1313#define UVH_RTC3_INT_CONFIG_DESTMODE_MASK 0x0000000000000800UL
1314#define UVH_RTC3_INT_CONFIG_STATUS_SHFT 12
1315#define UVH_RTC3_INT_CONFIG_STATUS_MASK 0x0000000000001000UL
1316#define UVH_RTC3_INT_CONFIG_P_SHFT 13
1317#define UVH_RTC3_INT_CONFIG_P_MASK 0x0000000000002000UL
1318#define UVH_RTC3_INT_CONFIG_T_SHFT 15
1319#define UVH_RTC3_INT_CONFIG_T_MASK 0x0000000000008000UL
1320#define UVH_RTC3_INT_CONFIG_M_SHFT 16
1321#define UVH_RTC3_INT_CONFIG_M_MASK 0x0000000000010000UL
1322#define UVH_RTC3_INT_CONFIG_APIC_ID_SHFT 32
1323#define UVH_RTC3_INT_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
1324
1325union uvh_rtc3_int_config_u {
1326 unsigned long v;
1327 struct uvh_rtc3_int_config_s {
1328 unsigned long vector_ : 8; /* RW */
1329 unsigned long dm : 3; /* RW */
1330 unsigned long destmode : 1; /* RW */
1331 unsigned long status : 1; /* RO */
1332 unsigned long p : 1; /* RO */
1333 unsigned long rsvd_14 : 1; /* */
1334 unsigned long t : 1; /* RO */
1335 unsigned long m : 1; /* RW */
1336 unsigned long rsvd_17_31: 15; /* */
1337 unsigned long apic_id : 32; /* RW */
1338 } s;
1339};
1340
1341/* ========================================================================= */
1342/* UVH_RTC_INC_RATIO */
1343/* ========================================================================= */
1344#define UVH_RTC_INC_RATIO 0x350000UL
1345
1346#define UVH_RTC_INC_RATIO_FRACTION_SHFT 0
1347#define UVH_RTC_INC_RATIO_FRACTION_MASK 0x00000000000fffffUL
1348#define UVH_RTC_INC_RATIO_RATIO_SHFT 20
1349#define UVH_RTC_INC_RATIO_RATIO_MASK 0x0000000000700000UL
1350
1351union uvh_rtc_inc_ratio_u {
1352 unsigned long v;
1353 struct uvh_rtc_inc_ratio_s {
1354 unsigned long fraction : 20; /* RW */
1355 unsigned long ratio : 3; /* RW */
1356 unsigned long rsvd_23_63: 41; /* */
1357 } s;
1358};
1359
1360/* ========================================================================= */
1361/* UVH_SI_ADDR_MAP_CONFIG */ 991/* UVH_SI_ADDR_MAP_CONFIG */
1362/* ========================================================================= */ 992/* ========================================================================= */
1363#define UVH_SI_ADDR_MAP_CONFIG 0xc80000UL 993#define UVH_SI_ADDR_MAP_CONFIG 0xc80000UL
diff --git a/arch/x86/include/asm/vmware.h b/arch/x86/include/asm/vmware.h
deleted file mode 100644
index e49ed6d2fd4e..000000000000
--- a/arch/x86/include/asm/vmware.h
+++ /dev/null
@@ -1,27 +0,0 @@
1/*
2 * Copyright (C) 2008, VMware, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
12 * NON INFRINGEMENT. See the GNU General Public License for more
13 * details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 */
20#ifndef ASM_X86__VMWARE_H
21#define ASM_X86__VMWARE_H
22
23extern void vmware_platform_setup(void);
24extern int vmware_platform(void);
25extern void vmware_set_feature_bits(struct cpuinfo_x86 *c);
26
27#endif
diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
index ddc04ccad03b..2c4390cae228 100644
--- a/arch/x86/include/asm/xsave.h
+++ b/arch/x86/include/asm/xsave.h
@@ -37,8 +37,9 @@ extern int check_for_xstate(struct i387_fxsave_struct __user *buf,
37 void __user *fpstate, 37 void __user *fpstate,
38 struct _fpx_sw_bytes *sw); 38 struct _fpx_sw_bytes *sw);
39 39
40static inline int xrstor_checking(struct xsave_struct *fx) 40static inline int fpu_xrstor_checking(struct fpu *fpu)
41{ 41{
42 struct xsave_struct *fx = &fpu->state->xsave;
42 int err; 43 int err;
43 44
44 asm volatile("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n\t" 45 asm volatile("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n\t"
@@ -110,12 +111,12 @@ static inline void xrstor_state(struct xsave_struct *fx, u64 mask)
110 : "memory"); 111 : "memory");
111} 112}
112 113
113static inline void xsave(struct task_struct *tsk) 114static inline void fpu_xsave(struct fpu *fpu)
114{ 115{
115 /* This, however, we can work around by forcing the compiler to select 116 /* This, however, we can work around by forcing the compiler to select
116 an addressing mode that doesn't require extended registers. */ 117 an addressing mode that doesn't require extended registers. */
117 __asm__ __volatile__(".byte " REX_PREFIX "0x0f,0xae,0x27" 118 __asm__ __volatile__(".byte " REX_PREFIX "0x0f,0xae,0x27"
118 : : "D" (&(tsk->thread.xstate->xsave)), 119 : : "D" (&(fpu->state->xsave)),
119 "a" (-1), "d"(-1) : "memory"); 120 "a" (-1), "d"(-1) : "memory");
120} 121}
121#endif 122#endif