aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/include')
-rw-r--r--arch/x86/include/asm/amd_nb.h6
-rw-r--r--arch/x86/include/asm/apic.h1
-rw-r--r--arch/x86/include/asm/apicdef.h2
-rw-r--r--arch/x86/include/asm/archrandom.h75
-rw-r--r--arch/x86/include/asm/atomic.h8
-rw-r--r--arch/x86/include/asm/atomic64_64.h6
-rw-r--r--arch/x86/include/asm/cmpxchg.h205
-rw-r--r--arch/x86/include/asm/cmpxchg_32.h114
-rw-r--r--arch/x86/include/asm/cmpxchg_64.h131
-rw-r--r--arch/x86/include/asm/compat.h3
-rw-r--r--arch/x86/include/asm/cpufeature.h5
-rw-r--r--arch/x86/include/asm/elf.h31
-rw-r--r--arch/x86/include/asm/intel_scu_ipc.h34
-rw-r--r--arch/x86/include/asm/kvm_emulate.h4
-rw-r--r--arch/x86/include/asm/kvm_host.h14
-rw-r--r--arch/x86/include/asm/mach_traps.h2
-rw-r--r--arch/x86/include/asm/mce.h5
-rw-r--r--arch/x86/include/asm/mrst.h16
-rw-r--r--arch/x86/include/asm/msr-index.h2
-rw-r--r--arch/x86/include/asm/msr.h9
-rw-r--r--arch/x86/include/asm/pci_x86.h6
-rw-r--r--arch/x86/include/asm/processor.h4
-rw-r--r--arch/x86/include/asm/rwsem.h8
-rw-r--r--arch/x86/include/asm/spinlock.h114
-rw-r--r--arch/x86/include/asm/spinlock_types.h22
-rw-r--r--arch/x86/include/asm/system.h1
-rw-r--r--arch/x86/include/asm/timer.h23
-rw-r--r--arch/x86/include/asm/unistd_32.h4
-rw-r--r--arch/x86/include/asm/unistd_64.h4
-rw-r--r--arch/x86/include/asm/uv/uv_bau.h7
-rw-r--r--arch/x86/include/asm/uv/uv_hub.h37
-rw-r--r--arch/x86/include/asm/uv/uv_mmrs.h1
-rw-r--r--arch/x86/include/asm/vmx.h12
-rw-r--r--arch/x86/include/asm/x86_init.h3
-rw-r--r--arch/x86/include/asm/xen/grant_table.h7
-rw-r--r--arch/x86/include/asm/xen/hypercall.h8
-rw-r--r--arch/x86/include/asm/xen/interface.h1
37 files changed, 548 insertions, 387 deletions
diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h
index 67f87f257611..8e41071704a5 100644
--- a/arch/x86/include/asm/amd_nb.h
+++ b/arch/x86/include/asm/amd_nb.h
@@ -19,9 +19,15 @@ extern int amd_numa_init(void);
19extern int amd_get_subcaches(int); 19extern int amd_get_subcaches(int);
20extern int amd_set_subcaches(int, int); 20extern int amd_set_subcaches(int, int);
21 21
22struct amd_l3_cache {
23 unsigned indices;
24 u8 subcaches[4];
25};
26
22struct amd_northbridge { 27struct amd_northbridge {
23 struct pci_dev *misc; 28 struct pci_dev *misc;
24 struct pci_dev *link; 29 struct pci_dev *link;
30 struct amd_l3_cache l3_cache;
25}; 31};
26 32
27struct amd_northbridge_info { 33struct amd_northbridge_info {
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index 9b7273cb2193..1a6c09af048f 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -49,6 +49,7 @@ extern unsigned int apic_verbosity;
49extern int local_apic_timer_c2_ok; 49extern int local_apic_timer_c2_ok;
50 50
51extern int disable_apic; 51extern int disable_apic;
52extern unsigned int lapic_timer_frequency;
52 53
53#ifdef CONFIG_SMP 54#ifdef CONFIG_SMP
54extern void __inquire_remote_apic(int apicid); 55extern void __inquire_remote_apic(int apicid);
diff --git a/arch/x86/include/asm/apicdef.h b/arch/x86/include/asm/apicdef.h
index 34595d5e1038..3925d8007864 100644
--- a/arch/x86/include/asm/apicdef.h
+++ b/arch/x86/include/asm/apicdef.h
@@ -100,7 +100,9 @@
100#define APIC_TIMER_BASE_CLKIN 0x0 100#define APIC_TIMER_BASE_CLKIN 0x0
101#define APIC_TIMER_BASE_TMBASE 0x1 101#define APIC_TIMER_BASE_TMBASE 0x1
102#define APIC_TIMER_BASE_DIV 0x2 102#define APIC_TIMER_BASE_DIV 0x2
103#define APIC_LVT_TIMER_ONESHOT (0 << 17)
103#define APIC_LVT_TIMER_PERIODIC (1 << 17) 104#define APIC_LVT_TIMER_PERIODIC (1 << 17)
105#define APIC_LVT_TIMER_TSCDEADLINE (2 << 17)
104#define APIC_LVT_MASKED (1 << 16) 106#define APIC_LVT_MASKED (1 << 16)
105#define APIC_LVT_LEVEL_TRIGGER (1 << 15) 107#define APIC_LVT_LEVEL_TRIGGER (1 << 15)
106#define APIC_LVT_REMOTE_IRR (1 << 14) 108#define APIC_LVT_REMOTE_IRR (1 << 14)
diff --git a/arch/x86/include/asm/archrandom.h b/arch/x86/include/asm/archrandom.h
new file mode 100644
index 000000000000..0d9ec770f2f8
--- /dev/null
+++ b/arch/x86/include/asm/archrandom.h
@@ -0,0 +1,75 @@
1/*
2 * This file is part of the Linux kernel.
3 *
4 * Copyright (c) 2011, Intel Corporation
5 * Authors: Fenghua Yu <fenghua.yu@intel.com>,
6 * H. Peter Anvin <hpa@linux.intel.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
20 *
21 */
22
23#ifndef ASM_X86_ARCHRANDOM_H
24#define ASM_X86_ARCHRANDOM_H
25
26#include <asm/processor.h>
27#include <asm/cpufeature.h>
28#include <asm/alternative.h>
29#include <asm/nops.h>
30
31#define RDRAND_RETRY_LOOPS 10
32
33#define RDRAND_INT ".byte 0x0f,0xc7,0xf0"
34#ifdef CONFIG_X86_64
35# define RDRAND_LONG ".byte 0x48,0x0f,0xc7,0xf0"
36#else
37# define RDRAND_LONG RDRAND_INT
38#endif
39
40#ifdef CONFIG_ARCH_RANDOM
41
42#define GET_RANDOM(name, type, rdrand, nop) \
43static inline int name(type *v) \
44{ \
45 int ok; \
46 alternative_io("movl $0, %0\n\t" \
47 nop, \
48 "\n1: " rdrand "\n\t" \
49 "jc 2f\n\t" \
50 "decl %0\n\t" \
51 "jnz 1b\n\t" \
52 "2:", \
53 X86_FEATURE_RDRAND, \
54 ASM_OUTPUT2("=r" (ok), "=a" (*v)), \
55 "0" (RDRAND_RETRY_LOOPS)); \
56 return ok; \
57}
58
59#ifdef CONFIG_X86_64
60
61GET_RANDOM(arch_get_random_long, unsigned long, RDRAND_LONG, ASM_NOP5);
62GET_RANDOM(arch_get_random_int, unsigned int, RDRAND_INT, ASM_NOP4);
63
64#else
65
66GET_RANDOM(arch_get_random_long, unsigned long, RDRAND_LONG, ASM_NOP3);
67GET_RANDOM(arch_get_random_int, unsigned int, RDRAND_INT, ASM_NOP3);
68
69#endif /* CONFIG_X86_64 */
70
71#endif /* CONFIG_ARCH_RANDOM */
72
73extern void x86_init_rdrand(struct cpuinfo_x86 *c);
74
75#endif /* ASM_X86_ARCHRANDOM_H */
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index 10572e309ab2..58cb6d4085f7 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -172,18 +172,14 @@ static inline int atomic_add_negative(int i, atomic_t *v)
172 */ 172 */
173static inline int atomic_add_return(int i, atomic_t *v) 173static inline int atomic_add_return(int i, atomic_t *v)
174{ 174{
175 int __i;
176#ifdef CONFIG_M386 175#ifdef CONFIG_M386
176 int __i;
177 unsigned long flags; 177 unsigned long flags;
178 if (unlikely(boot_cpu_data.x86 <= 3)) 178 if (unlikely(boot_cpu_data.x86 <= 3))
179 goto no_xadd; 179 goto no_xadd;
180#endif 180#endif
181 /* Modern 486+ processor */ 181 /* Modern 486+ processor */
182 __i = i; 182 return i + xadd(&v->counter, i);
183 asm volatile(LOCK_PREFIX "xaddl %0, %1"
184 : "+r" (i), "+m" (v->counter)
185 : : "memory");
186 return i + __i;
187 183
188#ifdef CONFIG_M386 184#ifdef CONFIG_M386
189no_xadd: /* Legacy 386 processor */ 185no_xadd: /* Legacy 386 processor */
diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
index 017594d403f6..0e1cbfc8ee06 100644
--- a/arch/x86/include/asm/atomic64_64.h
+++ b/arch/x86/include/asm/atomic64_64.h
@@ -170,11 +170,7 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
170 */ 170 */
171static inline long atomic64_add_return(long i, atomic64_t *v) 171static inline long atomic64_add_return(long i, atomic64_t *v)
172{ 172{
173 long __i = i; 173 return i + xadd(&v->counter, i);
174 asm volatile(LOCK_PREFIX "xaddq %0, %1;"
175 : "+r" (i), "+m" (v->counter)
176 : : "memory");
177 return i + __i;
178} 174}
179 175
180static inline long atomic64_sub_return(long i, atomic64_t *v) 176static inline long atomic64_sub_return(long i, atomic64_t *v)
diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
index a460fa088d4c..5d3acdf5a7a6 100644
--- a/arch/x86/include/asm/cmpxchg.h
+++ b/arch/x86/include/asm/cmpxchg.h
@@ -1,5 +1,210 @@
1#ifndef ASM_X86_CMPXCHG_H
2#define ASM_X86_CMPXCHG_H
3
4#include <linux/compiler.h>
5#include <asm/alternative.h> /* Provides LOCK_PREFIX */
6
7/*
8 * Non-existant functions to indicate usage errors at link time
9 * (or compile-time if the compiler implements __compiletime_error().
10 */
11extern void __xchg_wrong_size(void)
12 __compiletime_error("Bad argument size for xchg");
13extern void __cmpxchg_wrong_size(void)
14 __compiletime_error("Bad argument size for cmpxchg");
15extern void __xadd_wrong_size(void)
16 __compiletime_error("Bad argument size for xadd");
17
18/*
19 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
20 * -1 because sizeof will never return -1, thereby making those switch
21 * case statements guaranteeed dead code which the compiler will
22 * eliminate, and allowing the "missing symbol in the default case" to
23 * indicate a usage error.
24 */
25#define __X86_CASE_B 1
26#define __X86_CASE_W 2
27#define __X86_CASE_L 4
28#ifdef CONFIG_64BIT
29#define __X86_CASE_Q 8
30#else
31#define __X86_CASE_Q -1 /* sizeof will never return -1 */
32#endif
33
34/*
35 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
36 * Since this is generally used to protect other memory information, we
37 * use "asm volatile" and "memory" clobbers to prevent gcc from moving
38 * information around.
39 */
40#define __xchg(x, ptr, size) \
41({ \
42 __typeof(*(ptr)) __x = (x); \
43 switch (size) { \
44 case __X86_CASE_B: \
45 { \
46 volatile u8 *__ptr = (volatile u8 *)(ptr); \
47 asm volatile("xchgb %0,%1" \
48 : "=q" (__x), "+m" (*__ptr) \
49 : "0" (__x) \
50 : "memory"); \
51 break; \
52 } \
53 case __X86_CASE_W: \
54 { \
55 volatile u16 *__ptr = (volatile u16 *)(ptr); \
56 asm volatile("xchgw %0,%1" \
57 : "=r" (__x), "+m" (*__ptr) \
58 : "0" (__x) \
59 : "memory"); \
60 break; \
61 } \
62 case __X86_CASE_L: \
63 { \
64 volatile u32 *__ptr = (volatile u32 *)(ptr); \
65 asm volatile("xchgl %0,%1" \
66 : "=r" (__x), "+m" (*__ptr) \
67 : "0" (__x) \
68 : "memory"); \
69 break; \
70 } \
71 case __X86_CASE_Q: \
72 { \
73 volatile u64 *__ptr = (volatile u64 *)(ptr); \
74 asm volatile("xchgq %0,%1" \
75 : "=r" (__x), "+m" (*__ptr) \
76 : "0" (__x) \
77 : "memory"); \
78 break; \
79 } \
80 default: \
81 __xchg_wrong_size(); \
82 } \
83 __x; \
84})
85
86#define xchg(ptr, v) \
87 __xchg((v), (ptr), sizeof(*ptr))
88
89/*
90 * Atomic compare and exchange. Compare OLD with MEM, if identical,
91 * store NEW in MEM. Return the initial value in MEM. Success is
92 * indicated by comparing RETURN with OLD.
93 */
94#define __raw_cmpxchg(ptr, old, new, size, lock) \
95({ \
96 __typeof__(*(ptr)) __ret; \
97 __typeof__(*(ptr)) __old = (old); \
98 __typeof__(*(ptr)) __new = (new); \
99 switch (size) { \
100 case __X86_CASE_B: \
101 { \
102 volatile u8 *__ptr = (volatile u8 *)(ptr); \
103 asm volatile(lock "cmpxchgb %2,%1" \
104 : "=a" (__ret), "+m" (*__ptr) \
105 : "q" (__new), "0" (__old) \
106 : "memory"); \
107 break; \
108 } \
109 case __X86_CASE_W: \
110 { \
111 volatile u16 *__ptr = (volatile u16 *)(ptr); \
112 asm volatile(lock "cmpxchgw %2,%1" \
113 : "=a" (__ret), "+m" (*__ptr) \
114 : "r" (__new), "0" (__old) \
115 : "memory"); \
116 break; \
117 } \
118 case __X86_CASE_L: \
119 { \
120 volatile u32 *__ptr = (volatile u32 *)(ptr); \
121 asm volatile(lock "cmpxchgl %2,%1" \
122 : "=a" (__ret), "+m" (*__ptr) \
123 : "r" (__new), "0" (__old) \
124 : "memory"); \
125 break; \
126 } \
127 case __X86_CASE_Q: \
128 { \
129 volatile u64 *__ptr = (volatile u64 *)(ptr); \
130 asm volatile(lock "cmpxchgq %2,%1" \
131 : "=a" (__ret), "+m" (*__ptr) \
132 : "r" (__new), "0" (__old) \
133 : "memory"); \
134 break; \
135 } \
136 default: \
137 __cmpxchg_wrong_size(); \
138 } \
139 __ret; \
140})
141
142#define __cmpxchg(ptr, old, new, size) \
143 __raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX)
144
145#define __sync_cmpxchg(ptr, old, new, size) \
146 __raw_cmpxchg((ptr), (old), (new), (size), "lock; ")
147
148#define __cmpxchg_local(ptr, old, new, size) \
149 __raw_cmpxchg((ptr), (old), (new), (size), "")
150
1#ifdef CONFIG_X86_32 151#ifdef CONFIG_X86_32
2# include "cmpxchg_32.h" 152# include "cmpxchg_32.h"
3#else 153#else
4# include "cmpxchg_64.h" 154# include "cmpxchg_64.h"
5#endif 155#endif
156
157#ifdef __HAVE_ARCH_CMPXCHG
158#define cmpxchg(ptr, old, new) \
159 __cmpxchg((ptr), (old), (new), sizeof(*ptr))
160
161#define sync_cmpxchg(ptr, old, new) \
162 __sync_cmpxchg((ptr), (old), (new), sizeof(*ptr))
163
164#define cmpxchg_local(ptr, old, new) \
165 __cmpxchg_local((ptr), (old), (new), sizeof(*ptr))
166#endif
167
168#define __xadd(ptr, inc, lock) \
169 ({ \
170 __typeof__ (*(ptr)) __ret = (inc); \
171 switch (sizeof(*(ptr))) { \
172 case __X86_CASE_B: \
173 asm volatile (lock "xaddb %b0, %1\n" \
174 : "+r" (__ret), "+m" (*(ptr)) \
175 : : "memory", "cc"); \
176 break; \
177 case __X86_CASE_W: \
178 asm volatile (lock "xaddw %w0, %1\n" \
179 : "+r" (__ret), "+m" (*(ptr)) \
180 : : "memory", "cc"); \
181 break; \
182 case __X86_CASE_L: \
183 asm volatile (lock "xaddl %0, %1\n" \
184 : "+r" (__ret), "+m" (*(ptr)) \
185 : : "memory", "cc"); \
186 break; \
187 case __X86_CASE_Q: \
188 asm volatile (lock "xaddq %q0, %1\n" \
189 : "+r" (__ret), "+m" (*(ptr)) \
190 : : "memory", "cc"); \
191 break; \
192 default: \
193 __xadd_wrong_size(); \
194 } \
195 __ret; \
196 })
197
198/*
199 * xadd() adds "inc" to "*ptr" and atomically returns the previous
200 * value of "*ptr".
201 *
202 * xadd() is locked when multiple CPUs are online
203 * xadd_sync() is always locked
204 * xadd_local() is never locked
205 */
206#define xadd(ptr, inc) __xadd((ptr), (inc), LOCK_PREFIX)
207#define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
208#define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
209
210#endif /* ASM_X86_CMPXCHG_H */
diff --git a/arch/x86/include/asm/cmpxchg_32.h b/arch/x86/include/asm/cmpxchg_32.h
index 3deb7250624c..fbebb07dd80b 100644
--- a/arch/x86/include/asm/cmpxchg_32.h
+++ b/arch/x86/include/asm/cmpxchg_32.h
@@ -1,61 +1,11 @@
1#ifndef _ASM_X86_CMPXCHG_32_H 1#ifndef _ASM_X86_CMPXCHG_32_H
2#define _ASM_X86_CMPXCHG_32_H 2#define _ASM_X86_CMPXCHG_32_H
3 3
4#include <linux/bitops.h> /* for LOCK_PREFIX */
5
6/* 4/*
7 * Note: if you use set64_bit(), __cmpxchg64(), or their variants, you 5 * Note: if you use set64_bit(), __cmpxchg64(), or their variants, you
8 * you need to test for the feature in boot_cpu_data. 6 * you need to test for the feature in boot_cpu_data.
9 */ 7 */
10 8
11extern void __xchg_wrong_size(void);
12
13/*
14 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
15 * Since this is generally used to protect other memory information, we
16 * use "asm volatile" and "memory" clobbers to prevent gcc from moving
17 * information around.
18 */
19#define __xchg(x, ptr, size) \
20({ \
21 __typeof(*(ptr)) __x = (x); \
22 switch (size) { \
23 case 1: \
24 { \
25 volatile u8 *__ptr = (volatile u8 *)(ptr); \
26 asm volatile("xchgb %0,%1" \
27 : "=q" (__x), "+m" (*__ptr) \
28 : "0" (__x) \
29 : "memory"); \
30 break; \
31 } \
32 case 2: \
33 { \
34 volatile u16 *__ptr = (volatile u16 *)(ptr); \
35 asm volatile("xchgw %0,%1" \
36 : "=r" (__x), "+m" (*__ptr) \
37 : "0" (__x) \
38 : "memory"); \
39 break; \
40 } \
41 case 4: \
42 { \
43 volatile u32 *__ptr = (volatile u32 *)(ptr); \
44 asm volatile("xchgl %0,%1" \
45 : "=r" (__x), "+m" (*__ptr) \
46 : "0" (__x) \
47 : "memory"); \
48 break; \
49 } \
50 default: \
51 __xchg_wrong_size(); \
52 } \
53 __x; \
54})
55
56#define xchg(ptr, v) \
57 __xchg((v), (ptr), sizeof(*ptr))
58
59/* 9/*
60 * CMPXCHG8B only writes to the target if we had the previous 10 * CMPXCHG8B only writes to the target if we had the previous
61 * value in registers, otherwise it acts as a read and gives us the 11 * value in registers, otherwise it acts as a read and gives us the
@@ -84,72 +34,8 @@ static inline void set_64bit(volatile u64 *ptr, u64 value)
84 : "memory"); 34 : "memory");
85} 35}
86 36
87extern void __cmpxchg_wrong_size(void);
88
89/*
90 * Atomic compare and exchange. Compare OLD with MEM, if identical,
91 * store NEW in MEM. Return the initial value in MEM. Success is
92 * indicated by comparing RETURN with OLD.
93 */
94#define __raw_cmpxchg(ptr, old, new, size, lock) \
95({ \
96 __typeof__(*(ptr)) __ret; \
97 __typeof__(*(ptr)) __old = (old); \
98 __typeof__(*(ptr)) __new = (new); \
99 switch (size) { \
100 case 1: \
101 { \
102 volatile u8 *__ptr = (volatile u8 *)(ptr); \
103 asm volatile(lock "cmpxchgb %2,%1" \
104 : "=a" (__ret), "+m" (*__ptr) \
105 : "q" (__new), "0" (__old) \
106 : "memory"); \
107 break; \
108 } \
109 case 2: \
110 { \
111 volatile u16 *__ptr = (volatile u16 *)(ptr); \
112 asm volatile(lock "cmpxchgw %2,%1" \
113 : "=a" (__ret), "+m" (*__ptr) \
114 : "r" (__new), "0" (__old) \
115 : "memory"); \
116 break; \
117 } \
118 case 4: \
119 { \
120 volatile u32 *__ptr = (volatile u32 *)(ptr); \
121 asm volatile(lock "cmpxchgl %2,%1" \
122 : "=a" (__ret), "+m" (*__ptr) \
123 : "r" (__new), "0" (__old) \
124 : "memory"); \
125 break; \
126 } \
127 default: \
128 __cmpxchg_wrong_size(); \
129 } \
130 __ret; \
131})
132
133#define __cmpxchg(ptr, old, new, size) \
134 __raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX)
135
136#define __sync_cmpxchg(ptr, old, new, size) \
137 __raw_cmpxchg((ptr), (old), (new), (size), "lock; ")
138
139#define __cmpxchg_local(ptr, old, new, size) \
140 __raw_cmpxchg((ptr), (old), (new), (size), "")
141
142#ifdef CONFIG_X86_CMPXCHG 37#ifdef CONFIG_X86_CMPXCHG
143#define __HAVE_ARCH_CMPXCHG 1 38#define __HAVE_ARCH_CMPXCHG 1
144
145#define cmpxchg(ptr, old, new) \
146 __cmpxchg((ptr), (old), (new), sizeof(*ptr))
147
148#define sync_cmpxchg(ptr, old, new) \
149 __sync_cmpxchg((ptr), (old), (new), sizeof(*ptr))
150
151#define cmpxchg_local(ptr, old, new) \
152 __cmpxchg_local((ptr), (old), (new), sizeof(*ptr))
153#endif 39#endif
154 40
155#ifdef CONFIG_X86_CMPXCHG64 41#ifdef CONFIG_X86_CMPXCHG64
diff --git a/arch/x86/include/asm/cmpxchg_64.h b/arch/x86/include/asm/cmpxchg_64.h
index 7cf5c0a24434..285da02c38fa 100644
--- a/arch/x86/include/asm/cmpxchg_64.h
+++ b/arch/x86/include/asm/cmpxchg_64.h
@@ -1,144 +1,13 @@
1#ifndef _ASM_X86_CMPXCHG_64_H 1#ifndef _ASM_X86_CMPXCHG_64_H
2#define _ASM_X86_CMPXCHG_64_H 2#define _ASM_X86_CMPXCHG_64_H
3 3
4#include <asm/alternative.h> /* Provides LOCK_PREFIX */
5
6static inline void set_64bit(volatile u64 *ptr, u64 val) 4static inline void set_64bit(volatile u64 *ptr, u64 val)
7{ 5{
8 *ptr = val; 6 *ptr = val;
9} 7}
10 8
11extern void __xchg_wrong_size(void);
12extern void __cmpxchg_wrong_size(void);
13
14/*
15 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
16 * Since this is generally used to protect other memory information, we
17 * use "asm volatile" and "memory" clobbers to prevent gcc from moving
18 * information around.
19 */
20#define __xchg(x, ptr, size) \
21({ \
22 __typeof(*(ptr)) __x = (x); \
23 switch (size) { \
24 case 1: \
25 { \
26 volatile u8 *__ptr = (volatile u8 *)(ptr); \
27 asm volatile("xchgb %0,%1" \
28 : "=q" (__x), "+m" (*__ptr) \
29 : "0" (__x) \
30 : "memory"); \
31 break; \
32 } \
33 case 2: \
34 { \
35 volatile u16 *__ptr = (volatile u16 *)(ptr); \
36 asm volatile("xchgw %0,%1" \
37 : "=r" (__x), "+m" (*__ptr) \
38 : "0" (__x) \
39 : "memory"); \
40 break; \
41 } \
42 case 4: \
43 { \
44 volatile u32 *__ptr = (volatile u32 *)(ptr); \
45 asm volatile("xchgl %0,%1" \
46 : "=r" (__x), "+m" (*__ptr) \
47 : "0" (__x) \
48 : "memory"); \
49 break; \
50 } \
51 case 8: \
52 { \
53 volatile u64 *__ptr = (volatile u64 *)(ptr); \
54 asm volatile("xchgq %0,%1" \
55 : "=r" (__x), "+m" (*__ptr) \
56 : "0" (__x) \
57 : "memory"); \
58 break; \
59 } \
60 default: \
61 __xchg_wrong_size(); \
62 } \
63 __x; \
64})
65
66#define xchg(ptr, v) \
67 __xchg((v), (ptr), sizeof(*ptr))
68
69#define __HAVE_ARCH_CMPXCHG 1 9#define __HAVE_ARCH_CMPXCHG 1
70 10
71/*
72 * Atomic compare and exchange. Compare OLD with MEM, if identical,
73 * store NEW in MEM. Return the initial value in MEM. Success is
74 * indicated by comparing RETURN with OLD.
75 */
76#define __raw_cmpxchg(ptr, old, new, size, lock) \
77({ \
78 __typeof__(*(ptr)) __ret; \
79 __typeof__(*(ptr)) __old = (old); \
80 __typeof__(*(ptr)) __new = (new); \
81 switch (size) { \
82 case 1: \
83 { \
84 volatile u8 *__ptr = (volatile u8 *)(ptr); \
85 asm volatile(lock "cmpxchgb %2,%1" \
86 : "=a" (__ret), "+m" (*__ptr) \
87 : "q" (__new), "0" (__old) \
88 : "memory"); \
89 break; \
90 } \
91 case 2: \
92 { \
93 volatile u16 *__ptr = (volatile u16 *)(ptr); \
94 asm volatile(lock "cmpxchgw %2,%1" \
95 : "=a" (__ret), "+m" (*__ptr) \
96 : "r" (__new), "0" (__old) \
97 : "memory"); \
98 break; \
99 } \
100 case 4: \
101 { \
102 volatile u32 *__ptr = (volatile u32 *)(ptr); \
103 asm volatile(lock "cmpxchgl %2,%1" \
104 : "=a" (__ret), "+m" (*__ptr) \
105 : "r" (__new), "0" (__old) \
106 : "memory"); \
107 break; \
108 } \
109 case 8: \
110 { \
111 volatile u64 *__ptr = (volatile u64 *)(ptr); \
112 asm volatile(lock "cmpxchgq %2,%1" \
113 : "=a" (__ret), "+m" (*__ptr) \
114 : "r" (__new), "0" (__old) \
115 : "memory"); \
116 break; \
117 } \
118 default: \
119 __cmpxchg_wrong_size(); \
120 } \
121 __ret; \
122})
123
124#define __cmpxchg(ptr, old, new, size) \
125 __raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX)
126
127#define __sync_cmpxchg(ptr, old, new, size) \
128 __raw_cmpxchg((ptr), (old), (new), (size), "lock; ")
129
130#define __cmpxchg_local(ptr, old, new, size) \
131 __raw_cmpxchg((ptr), (old), (new), (size), "")
132
133#define cmpxchg(ptr, old, new) \
134 __cmpxchg((ptr), (old), (new), sizeof(*ptr))
135
136#define sync_cmpxchg(ptr, old, new) \
137 __sync_cmpxchg((ptr), (old), (new), sizeof(*ptr))
138
139#define cmpxchg_local(ptr, old, new) \
140 __cmpxchg_local((ptr), (old), (new), sizeof(*ptr))
141
142#define cmpxchg64(ptr, o, n) \ 11#define cmpxchg64(ptr, o, n) \
143({ \ 12({ \
144 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ 13 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
index 1d9cd27c2920..30d737ef2a42 100644
--- a/arch/x86/include/asm/compat.h
+++ b/arch/x86/include/asm/compat.h
@@ -108,7 +108,8 @@ struct compat_statfs {
108 compat_fsid_t f_fsid; 108 compat_fsid_t f_fsid;
109 int f_namelen; /* SunOS ignores this field. */ 109 int f_namelen; /* SunOS ignores this field. */
110 int f_frsize; 110 int f_frsize;
111 int f_spare[5]; 111 int f_flags;
112 int f_spare[4];
112}; 113};
113 114
114#define COMPAT_RLIM_OLD_INFINITY 0x7fffffff 115#define COMPAT_RLIM_OLD_INFINITY 0x7fffffff
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 88b23a43f340..f3444f700f36 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -114,12 +114,14 @@
114#define X86_FEATURE_CX16 (4*32+13) /* CMPXCHG16B */ 114#define X86_FEATURE_CX16 (4*32+13) /* CMPXCHG16B */
115#define X86_FEATURE_XTPR (4*32+14) /* Send Task Priority Messages */ 115#define X86_FEATURE_XTPR (4*32+14) /* Send Task Priority Messages */
116#define X86_FEATURE_PDCM (4*32+15) /* Performance Capabilities */ 116#define X86_FEATURE_PDCM (4*32+15) /* Performance Capabilities */
117#define X86_FEATURE_PCID (4*32+17) /* Process Context Identifiers */
117#define X86_FEATURE_DCA (4*32+18) /* Direct Cache Access */ 118#define X86_FEATURE_DCA (4*32+18) /* Direct Cache Access */
118#define X86_FEATURE_XMM4_1 (4*32+19) /* "sse4_1" SSE-4.1 */ 119#define X86_FEATURE_XMM4_1 (4*32+19) /* "sse4_1" SSE-4.1 */
119#define X86_FEATURE_XMM4_2 (4*32+20) /* "sse4_2" SSE-4.2 */ 120#define X86_FEATURE_XMM4_2 (4*32+20) /* "sse4_2" SSE-4.2 */
120#define X86_FEATURE_X2APIC (4*32+21) /* x2APIC */ 121#define X86_FEATURE_X2APIC (4*32+21) /* x2APIC */
121#define X86_FEATURE_MOVBE (4*32+22) /* MOVBE instruction */ 122#define X86_FEATURE_MOVBE (4*32+22) /* MOVBE instruction */
122#define X86_FEATURE_POPCNT (4*32+23) /* POPCNT instruction */ 123#define X86_FEATURE_POPCNT (4*32+23) /* POPCNT instruction */
124#define X86_FEATURE_TSC_DEADLINE_TIMER (4*32+24) /* Tsc deadline timer */
123#define X86_FEATURE_AES (4*32+25) /* AES instructions */ 125#define X86_FEATURE_AES (4*32+25) /* AES instructions */
124#define X86_FEATURE_XSAVE (4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */ 126#define X86_FEATURE_XSAVE (4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */
125#define X86_FEATURE_OSXSAVE (4*32+27) /* "" XSAVE enabled in the OS */ 127#define X86_FEATURE_OSXSAVE (4*32+27) /* "" XSAVE enabled in the OS */
@@ -257,7 +259,9 @@ extern const char * const x86_power_flags[32];
257#define cpu_has_xmm boot_cpu_has(X86_FEATURE_XMM) 259#define cpu_has_xmm boot_cpu_has(X86_FEATURE_XMM)
258#define cpu_has_xmm2 boot_cpu_has(X86_FEATURE_XMM2) 260#define cpu_has_xmm2 boot_cpu_has(X86_FEATURE_XMM2)
259#define cpu_has_xmm3 boot_cpu_has(X86_FEATURE_XMM3) 261#define cpu_has_xmm3 boot_cpu_has(X86_FEATURE_XMM3)
262#define cpu_has_ssse3 boot_cpu_has(X86_FEATURE_SSSE3)
260#define cpu_has_aes boot_cpu_has(X86_FEATURE_AES) 263#define cpu_has_aes boot_cpu_has(X86_FEATURE_AES)
264#define cpu_has_avx boot_cpu_has(X86_FEATURE_AVX)
261#define cpu_has_ht boot_cpu_has(X86_FEATURE_HT) 265#define cpu_has_ht boot_cpu_has(X86_FEATURE_HT)
262#define cpu_has_mp boot_cpu_has(X86_FEATURE_MP) 266#define cpu_has_mp boot_cpu_has(X86_FEATURE_MP)
263#define cpu_has_nx boot_cpu_has(X86_FEATURE_NX) 267#define cpu_has_nx boot_cpu_has(X86_FEATURE_NX)
@@ -285,6 +289,7 @@ extern const char * const x86_power_flags[32];
285#define cpu_has_xmm4_2 boot_cpu_has(X86_FEATURE_XMM4_2) 289#define cpu_has_xmm4_2 boot_cpu_has(X86_FEATURE_XMM4_2)
286#define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC) 290#define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC)
287#define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE) 291#define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE)
292#define cpu_has_osxsave boot_cpu_has(X86_FEATURE_OSXSAVE)
288#define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR) 293#define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR)
289#define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ) 294#define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ)
290#define cpu_has_perfctr_core boot_cpu_has(X86_FEATURE_PERFCTR_CORE) 295#define cpu_has_perfctr_core boot_cpu_has(X86_FEATURE_PERFCTR_CORE)
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index f2ad2163109d..5f962df30d0f 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -4,6 +4,7 @@
4/* 4/*
5 * ELF register definitions.. 5 * ELF register definitions..
6 */ 6 */
7#include <linux/thread_info.h>
7 8
8#include <asm/ptrace.h> 9#include <asm/ptrace.h>
9#include <asm/user.h> 10#include <asm/user.h>
@@ -320,4 +321,34 @@ extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
320extern unsigned long arch_randomize_brk(struct mm_struct *mm); 321extern unsigned long arch_randomize_brk(struct mm_struct *mm);
321#define arch_randomize_brk arch_randomize_brk 322#define arch_randomize_brk arch_randomize_brk
322 323
324/*
325 * True on X86_32 or when emulating IA32 on X86_64
326 */
327static inline int mmap_is_ia32(void)
328{
329#ifdef CONFIG_X86_32
330 return 1;
331#endif
332#ifdef CONFIG_IA32_EMULATION
333 if (test_thread_flag(TIF_IA32))
334 return 1;
335#endif
336 return 0;
337}
338
339/* The first two values are special, do not change. See align_addr() */
340enum align_flags {
341 ALIGN_VA_32 = BIT(0),
342 ALIGN_VA_64 = BIT(1),
343 ALIGN_VDSO = BIT(2),
344 ALIGN_TOPDOWN = BIT(3),
345};
346
347struct va_alignment {
348 int flags;
349 unsigned long mask;
350} ____cacheline_aligned;
351
352extern struct va_alignment va_align;
353extern unsigned long align_addr(unsigned long, struct file *, enum align_flags);
323#endif /* _ASM_X86_ELF_H */ 354#endif /* _ASM_X86_ELF_H */
diff --git a/arch/x86/include/asm/intel_scu_ipc.h b/arch/x86/include/asm/intel_scu_ipc.h
index 29f66793cc55..925b605eb5c6 100644
--- a/arch/x86/include/asm/intel_scu_ipc.h
+++ b/arch/x86/include/asm/intel_scu_ipc.h
@@ -1,11 +1,17 @@
1#ifndef _ASM_X86_INTEL_SCU_IPC_H_ 1#ifndef _ASM_X86_INTEL_SCU_IPC_H_
2#define _ASM_X86_INTEL_SCU_IPC_H_ 2#define _ASM_X86_INTEL_SCU_IPC_H_
3 3
4#define IPCMSG_VRTC 0xFA /* Set vRTC device */ 4#include <linux/notifier.h>
5 5
6/* Command id associated with message IPCMSG_VRTC */ 6#define IPCMSG_WARM_RESET 0xF0
7#define IPC_CMD_VRTC_SETTIME 1 /* Set time */ 7#define IPCMSG_COLD_RESET 0xF1
8#define IPC_CMD_VRTC_SETALARM 2 /* Set alarm */ 8#define IPCMSG_SOFT_RESET 0xF2
9#define IPCMSG_COLD_BOOT 0xF3
10
11#define IPCMSG_VRTC 0xFA /* Set vRTC device */
12 /* Command id associated with message IPCMSG_VRTC */
13 #define IPC_CMD_VRTC_SETTIME 1 /* Set time */
14 #define IPC_CMD_VRTC_SETALARM 2 /* Set alarm */
9 15
10/* Read single register */ 16/* Read single register */
11int intel_scu_ipc_ioread8(u16 addr, u8 *data); 17int intel_scu_ipc_ioread8(u16 addr, u8 *data);
@@ -44,4 +50,24 @@ int intel_scu_ipc_i2c_cntrl(u32 addr, u32 *data);
44/* Update FW version */ 50/* Update FW version */
45int intel_scu_ipc_fw_update(u8 *buffer, u32 length); 51int intel_scu_ipc_fw_update(u8 *buffer, u32 length);
46 52
53extern struct blocking_notifier_head intel_scu_notifier;
54
55static inline void intel_scu_notifier_add(struct notifier_block *nb)
56{
57 blocking_notifier_chain_register(&intel_scu_notifier, nb);
58}
59
60static inline void intel_scu_notifier_remove(struct notifier_block *nb)
61{
62 blocking_notifier_chain_unregister(&intel_scu_notifier, nb);
63}
64
65static inline int intel_scu_notifier_post(unsigned long v, void *p)
66{
67 return blocking_notifier_call_chain(&intel_scu_notifier, v, p);
68}
69
70#define SCU_AVAILABLE 1
71#define SCU_DOWN 2
72
47#endif 73#endif
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index 6040d115ef51..a026507893e9 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -262,7 +262,7 @@ struct x86_emulate_ctxt {
262 struct operand dst; 262 struct operand dst;
263 bool has_seg_override; 263 bool has_seg_override;
264 u8 seg_override; 264 u8 seg_override;
265 unsigned int d; 265 u64 d;
266 int (*execute)(struct x86_emulate_ctxt *ctxt); 266 int (*execute)(struct x86_emulate_ctxt *ctxt);
267 int (*check_perm)(struct x86_emulate_ctxt *ctxt); 267 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
268 /* modrm */ 268 /* modrm */
@@ -275,6 +275,8 @@ struct x86_emulate_ctxt {
275 unsigned long _eip; 275 unsigned long _eip;
276 /* Fields above regs are cleared together. */ 276 /* Fields above regs are cleared together. */
277 unsigned long regs[NR_VCPU_REGS]; 277 unsigned long regs[NR_VCPU_REGS];
278 struct operand memop;
279 struct operand *memopp;
278 struct fetch_cache fetch; 280 struct fetch_cache fetch;
279 struct read_cache io_read; 281 struct read_cache io_read;
280 struct read_cache mem_read; 282 struct read_cache mem_read;
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index dd51c83aa5de..b4973f4dab98 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -26,7 +26,8 @@
26#include <asm/mtrr.h> 26#include <asm/mtrr.h>
27#include <asm/msr-index.h> 27#include <asm/msr-index.h>
28 28
29#define KVM_MAX_VCPUS 64 29#define KVM_MAX_VCPUS 254
30#define KVM_SOFT_MAX_VCPUS 64
30#define KVM_MEMORY_SLOTS 32 31#define KVM_MEMORY_SLOTS 32
31/* memory slots that does not exposed to userspace */ 32/* memory slots that does not exposed to userspace */
32#define KVM_PRIVATE_MEM_SLOTS 4 33#define KVM_PRIVATE_MEM_SLOTS 4
@@ -264,6 +265,7 @@ struct kvm_mmu {
264 void (*new_cr3)(struct kvm_vcpu *vcpu); 265 void (*new_cr3)(struct kvm_vcpu *vcpu);
265 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root); 266 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root);
266 unsigned long (*get_cr3)(struct kvm_vcpu *vcpu); 267 unsigned long (*get_cr3)(struct kvm_vcpu *vcpu);
268 u64 (*get_pdptr)(struct kvm_vcpu *vcpu, int index);
267 int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err, 269 int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err,
268 bool prefault); 270 bool prefault);
269 void (*inject_page_fault)(struct kvm_vcpu *vcpu, 271 void (*inject_page_fault)(struct kvm_vcpu *vcpu,
@@ -411,8 +413,9 @@ struct kvm_vcpu_arch {
411 u32 tsc_catchup_mult; 413 u32 tsc_catchup_mult;
412 s8 tsc_catchup_shift; 414 s8 tsc_catchup_shift;
413 415
414 bool nmi_pending; 416 atomic_t nmi_queued; /* unprocessed asynchronous NMIs */
415 bool nmi_injected; 417 unsigned nmi_pending; /* NMI queued after currently running handler */
418 bool nmi_injected; /* Trying to inject an NMI this entry */
416 419
417 struct mtrr_state_type mtrr_state; 420 struct mtrr_state_type mtrr_state;
418 u32 pat; 421 u32 pat;
@@ -628,14 +631,13 @@ struct kvm_x86_ops {
628 void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset); 631 void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
629 632
630 u64 (*compute_tsc_offset)(struct kvm_vcpu *vcpu, u64 target_tsc); 633 u64 (*compute_tsc_offset)(struct kvm_vcpu *vcpu, u64 target_tsc);
634 u64 (*read_l1_tsc)(struct kvm_vcpu *vcpu);
631 635
632 void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2); 636 void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);
633 637
634 int (*check_intercept)(struct kvm_vcpu *vcpu, 638 int (*check_intercept)(struct kvm_vcpu *vcpu,
635 struct x86_instruction_info *info, 639 struct x86_instruction_info *info,
636 enum x86_intercept_stage stage); 640 enum x86_intercept_stage stage);
637
638 const struct trace_print_flags *exit_reasons_str;
639}; 641};
640 642
641struct kvm_arch_async_pf { 643struct kvm_arch_async_pf {
@@ -672,6 +674,8 @@ u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn);
672 674
673extern bool tdp_enabled; 675extern bool tdp_enabled;
674 676
677u64 vcpu_tsc_khz(struct kvm_vcpu *vcpu);
678
675/* control of guest tsc rate supported? */ 679/* control of guest tsc rate supported? */
676extern bool kvm_has_tsc_control; 680extern bool kvm_has_tsc_control;
677/* minimum supported tsc_khz for guests */ 681/* minimum supported tsc_khz for guests */
diff --git a/arch/x86/include/asm/mach_traps.h b/arch/x86/include/asm/mach_traps.h
index 72a8b52e7dfd..a01e7ec7d237 100644
--- a/arch/x86/include/asm/mach_traps.h
+++ b/arch/x86/include/asm/mach_traps.h
@@ -17,7 +17,7 @@
17#define NMI_REASON_CLEAR_IOCHK 0x08 17#define NMI_REASON_CLEAR_IOCHK 0x08
18#define NMI_REASON_CLEAR_MASK 0x0f 18#define NMI_REASON_CLEAR_MASK 0x0f
19 19
20static inline unsigned char get_nmi_reason(void) 20static inline unsigned char default_get_nmi_reason(void)
21{ 21{
22 return inb(NMI_REASON_PORT); 22 return inb(NMI_REASON_PORT);
23} 23}
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index c9321f34e55b..0e8ae57d3656 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -201,7 +201,10 @@ int mce_notify_irq(void);
201void mce_notify_process(void); 201void mce_notify_process(void);
202 202
203DECLARE_PER_CPU(struct mce, injectm); 203DECLARE_PER_CPU(struct mce, injectm);
204extern struct file_operations mce_chrdev_ops; 204
205extern void register_mce_write_callback(ssize_t (*)(struct file *filp,
206 const char __user *ubuf,
207 size_t usize, loff_t *off));
205 208
206/* 209/*
207 * Exception handler 210 * Exception handler
diff --git a/arch/x86/include/asm/mrst.h b/arch/x86/include/asm/mrst.h
index 719f00b28ff5..93f79094c224 100644
--- a/arch/x86/include/asm/mrst.h
+++ b/arch/x86/include/asm/mrst.h
@@ -31,11 +31,20 @@ enum mrst_cpu_type {
31}; 31};
32 32
33extern enum mrst_cpu_type __mrst_cpu_chip; 33extern enum mrst_cpu_type __mrst_cpu_chip;
34
35#ifdef CONFIG_X86_INTEL_MID
36
34static inline enum mrst_cpu_type mrst_identify_cpu(void) 37static inline enum mrst_cpu_type mrst_identify_cpu(void)
35{ 38{
36 return __mrst_cpu_chip; 39 return __mrst_cpu_chip;
37} 40}
38 41
42#else /* !CONFIG_X86_INTEL_MID */
43
44#define mrst_identify_cpu() (0)
45
46#endif /* !CONFIG_X86_INTEL_MID */
47
39enum mrst_timer_options { 48enum mrst_timer_options {
40 MRST_TIMER_DEFAULT, 49 MRST_TIMER_DEFAULT,
41 MRST_TIMER_APBT_ONLY, 50 MRST_TIMER_APBT_ONLY,
@@ -44,6 +53,13 @@ enum mrst_timer_options {
44 53
45extern enum mrst_timer_options mrst_timer_options; 54extern enum mrst_timer_options mrst_timer_options;
46 55
56/*
57 * Penwell uses spread spectrum clock, so the freq number is not exactly
58 * the same as reported by MSR based on SDM.
59 */
60#define PENWELL_FSB_FREQ_83SKU 83200
61#define PENWELL_FSB_FREQ_100SKU 99840
62
47#define SFI_MTMR_MAX_NUM 8 63#define SFI_MTMR_MAX_NUM 8
48#define SFI_MRTC_MAX 8 64#define SFI_MRTC_MAX 8
49 65
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index d52609aeeab8..a6962d9161a0 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -229,6 +229,8 @@
229#define MSR_IA32_APICBASE_ENABLE (1<<11) 229#define MSR_IA32_APICBASE_ENABLE (1<<11)
230#define MSR_IA32_APICBASE_BASE (0xfffff<<12) 230#define MSR_IA32_APICBASE_BASE (0xfffff<<12)
231 231
232#define MSR_IA32_TSCDEADLINE 0x000006e0
233
232#define MSR_IA32_UCODE_WRITE 0x00000079 234#define MSR_IA32_UCODE_WRITE 0x00000079
233#define MSR_IA32_UCODE_REV 0x0000008b 235#define MSR_IA32_UCODE_REV 0x0000008b
234 236
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index 084ef95274cd..95203d40ffdd 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -169,7 +169,14 @@ static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high)
169 return native_write_msr_safe(msr, low, high); 169 return native_write_msr_safe(msr, low, high);
170} 170}
171 171
172/* rdmsr with exception handling */ 172/*
173 * rdmsr with exception handling.
174 *
175 * Please note that the exception handling works only after we've
176 * switched to the "smart" #GP handler in trap_init() which knows about
177 * exception tables - using this macro earlier than that causes machine
178 * hangs on boxes which do not implement the @msr in the first argument.
179 */
173#define rdmsr_safe(msr, p1, p2) \ 180#define rdmsr_safe(msr, p1, p2) \
174({ \ 181({ \
175 int __err; \ 182 int __err; \
diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h
index 704526734bef..e38197806853 100644
--- a/arch/x86/include/asm/pci_x86.h
+++ b/arch/x86/include/asm/pci_x86.h
@@ -99,10 +99,10 @@ struct pci_raw_ops {
99 int reg, int len, u32 val); 99 int reg, int len, u32 val);
100}; 100};
101 101
102extern struct pci_raw_ops *raw_pci_ops; 102extern const struct pci_raw_ops *raw_pci_ops;
103extern struct pci_raw_ops *raw_pci_ext_ops; 103extern const struct pci_raw_ops *raw_pci_ext_ops;
104 104
105extern struct pci_raw_ops pci_direct_conf1; 105extern const struct pci_raw_ops pci_direct_conf1;
106extern bool port_cf9_safe; 106extern bool port_cf9_safe;
107 107
108/* arch_initcall level */ 108/* arch_initcall level */
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 0d1171c97729..b650435ffb53 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -111,6 +111,7 @@ struct cpuinfo_x86 {
111 /* Index into per_cpu list: */ 111 /* Index into per_cpu list: */
112 u16 cpu_index; 112 u16 cpu_index;
113#endif 113#endif
114 u32 microcode;
114} __attribute__((__aligned__(SMP_CACHE_BYTES))); 115} __attribute__((__aligned__(SMP_CACHE_BYTES)));
115 116
116#define X86_VENDOR_INTEL 0 117#define X86_VENDOR_INTEL 0
@@ -179,7 +180,8 @@ static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
179 "=b" (*ebx), 180 "=b" (*ebx),
180 "=c" (*ecx), 181 "=c" (*ecx),
181 "=d" (*edx) 182 "=d" (*edx)
182 : "0" (*eax), "2" (*ecx)); 183 : "0" (*eax), "2" (*ecx)
184 : "memory");
183} 185}
184 186
185static inline void load_cr3(pgd_t *pgdir) 187static inline void load_cr3(pgd_t *pgdir)
diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
index df4cd32b4cc6..2dbe4a721ce5 100644
--- a/arch/x86/include/asm/rwsem.h
+++ b/arch/x86/include/asm/rwsem.h
@@ -204,13 +204,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
204 */ 204 */
205static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem) 205static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
206{ 206{
207 long tmp = delta; 207 return delta + xadd(&sem->count, delta);
208
209 asm volatile(LOCK_PREFIX "xadd %0,%1"
210 : "+r" (tmp), "+m" (sem->count)
211 : : "memory");
212
213 return tmp + delta;
214} 208}
215 209
216#endif /* __KERNEL__ */ 210#endif /* __KERNEL__ */
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index ee67edf86fdd..972c260919a3 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -49,109 +49,49 @@
49 * issues and should be optimal for the uncontended case. Note the tail must be 49 * issues and should be optimal for the uncontended case. Note the tail must be
50 * in the high part, because a wide xadd increment of the low part would carry 50 * in the high part, because a wide xadd increment of the low part would carry
51 * up and contaminate the high part. 51 * up and contaminate the high part.
52 *
53 * With fewer than 2^8 possible CPUs, we can use x86's partial registers to
54 * save some instructions and make the code more elegant. There really isn't
55 * much between them in performance though, especially as locks are out of line.
56 */ 52 */
57#if (NR_CPUS < 256)
58#define TICKET_SHIFT 8
59
60static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock) 53static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
61{ 54{
62 short inc = 0x0100; 55 register struct __raw_tickets inc = { .tail = 1 };
63 56
64 asm volatile ( 57 inc = xadd(&lock->tickets, inc);
65 LOCK_PREFIX "xaddw %w0, %1\n" 58
66 "1:\t" 59 for (;;) {
67 "cmpb %h0, %b0\n\t" 60 if (inc.head == inc.tail)
68 "je 2f\n\t" 61 break;
69 "rep ; nop\n\t" 62 cpu_relax();
70 "movb %1, %b0\n\t" 63 inc.head = ACCESS_ONCE(lock->tickets.head);
71 /* don't need lfence here, because loads are in-order */ 64 }
72 "jmp 1b\n" 65 barrier(); /* make sure nothing creeps before the lock is taken */
73 "2:"
74 : "+Q" (inc), "+m" (lock->slock)
75 :
76 : "memory", "cc");
77} 66}
78 67
79static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) 68static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
80{ 69{
81 int tmp, new; 70 arch_spinlock_t old, new;
82 71
83 asm volatile("movzwl %2, %0\n\t" 72 old.tickets = ACCESS_ONCE(lock->tickets);
84 "cmpb %h0,%b0\n\t" 73 if (old.tickets.head != old.tickets.tail)
85 "leal 0x100(%" REG_PTR_MODE "0), %1\n\t" 74 return 0;
86 "jne 1f\n\t" 75
87 LOCK_PREFIX "cmpxchgw %w1,%2\n\t" 76 new.head_tail = old.head_tail + (1 << TICKET_SHIFT);
88 "1:"
89 "sete %b1\n\t"
90 "movzbl %b1,%0\n\t"
91 : "=&a" (tmp), "=&q" (new), "+m" (lock->slock)
92 :
93 : "memory", "cc");
94 77
95 return tmp; 78 /* cmpxchg is a full barrier, so nothing can move before it */
79 return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail;
96} 80}
97 81
82#if (NR_CPUS < 256)
98static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) 83static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
99{ 84{
100 asm volatile(UNLOCK_LOCK_PREFIX "incb %0" 85 asm volatile(UNLOCK_LOCK_PREFIX "incb %0"
101 : "+m" (lock->slock) 86 : "+m" (lock->head_tail)
102 : 87 :
103 : "memory", "cc"); 88 : "memory", "cc");
104} 89}
105#else 90#else
106#define TICKET_SHIFT 16
107
108static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
109{
110 int inc = 0x00010000;
111 int tmp;
112
113 asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
114 "movzwl %w0, %2\n\t"
115 "shrl $16, %0\n\t"
116 "1:\t"
117 "cmpl %0, %2\n\t"
118 "je 2f\n\t"
119 "rep ; nop\n\t"
120 "movzwl %1, %2\n\t"
121 /* don't need lfence here, because loads are in-order */
122 "jmp 1b\n"
123 "2:"
124 : "+r" (inc), "+m" (lock->slock), "=&r" (tmp)
125 :
126 : "memory", "cc");
127}
128
129static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
130{
131 int tmp;
132 int new;
133
134 asm volatile("movl %2,%0\n\t"
135 "movl %0,%1\n\t"
136 "roll $16, %0\n\t"
137 "cmpl %0,%1\n\t"
138 "leal 0x00010000(%" REG_PTR_MODE "0), %1\n\t"
139 "jne 1f\n\t"
140 LOCK_PREFIX "cmpxchgl %1,%2\n\t"
141 "1:"
142 "sete %b1\n\t"
143 "movzbl %b1,%0\n\t"
144 : "=&a" (tmp), "=&q" (new), "+m" (lock->slock)
145 :
146 : "memory", "cc");
147
148 return tmp;
149}
150
151static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) 91static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
152{ 92{
153 asm volatile(UNLOCK_LOCK_PREFIX "incw %0" 93 asm volatile(UNLOCK_LOCK_PREFIX "incw %0"
154 : "+m" (lock->slock) 94 : "+m" (lock->head_tail)
155 : 95 :
156 : "memory", "cc"); 96 : "memory", "cc");
157} 97}
@@ -159,16 +99,16 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
159 99
160static inline int __ticket_spin_is_locked(arch_spinlock_t *lock) 100static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
161{ 101{
162 int tmp = ACCESS_ONCE(lock->slock); 102 struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
163 103
164 return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1 << TICKET_SHIFT) - 1)); 104 return !!(tmp.tail ^ tmp.head);
165} 105}
166 106
167static inline int __ticket_spin_is_contended(arch_spinlock_t *lock) 107static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
168{ 108{
169 int tmp = ACCESS_ONCE(lock->slock); 109 struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
170 110
171 return (((tmp >> TICKET_SHIFT) - tmp) & ((1 << TICKET_SHIFT) - 1)) > 1; 111 return ((tmp.tail - tmp.head) & TICKET_MASK) > 1;
172} 112}
173 113
174#ifndef CONFIG_PARAVIRT_SPINLOCKS 114#ifndef CONFIG_PARAVIRT_SPINLOCKS
diff --git a/arch/x86/include/asm/spinlock_types.h b/arch/x86/include/asm/spinlock_types.h
index 7c7a486fcb68..8ebd5df7451e 100644
--- a/arch/x86/include/asm/spinlock_types.h
+++ b/arch/x86/include/asm/spinlock_types.h
@@ -5,11 +5,29 @@
5# error "please don't include this file directly" 5# error "please don't include this file directly"
6#endif 6#endif
7 7
8#include <linux/types.h>
9
10#if (CONFIG_NR_CPUS < 256)
11typedef u8 __ticket_t;
12typedef u16 __ticketpair_t;
13#else
14typedef u16 __ticket_t;
15typedef u32 __ticketpair_t;
16#endif
17
18#define TICKET_SHIFT (sizeof(__ticket_t) * 8)
19#define TICKET_MASK ((__ticket_t)((1 << TICKET_SHIFT) - 1))
20
8typedef struct arch_spinlock { 21typedef struct arch_spinlock {
9 unsigned int slock; 22 union {
23 __ticketpair_t head_tail;
24 struct __raw_tickets {
25 __ticket_t head, tail;
26 } tickets;
27 };
10} arch_spinlock_t; 28} arch_spinlock_t;
11 29
12#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } 30#define __ARCH_SPIN_LOCK_UNLOCKED { { 0 } }
13 31
14#include <asm/rwlock.h> 32#include <asm/rwlock.h>
15 33
diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
index c2ff2a1d845e..2d2f01ce6dcb 100644
--- a/arch/x86/include/asm/system.h
+++ b/arch/x86/include/asm/system.h
@@ -401,6 +401,7 @@ extern unsigned long arch_align_stack(unsigned long sp);
401extern void free_init_pages(char *what, unsigned long begin, unsigned long end); 401extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
402 402
403void default_idle(void); 403void default_idle(void);
404bool set_pm_idle_to_default(void);
404 405
405void stop_this_cpu(void *dummy); 406void stop_this_cpu(void *dummy);
406 407
diff --git a/arch/x86/include/asm/timer.h b/arch/x86/include/asm/timer.h
index fa7b9176b76c..431793e5d484 100644
--- a/arch/x86/include/asm/timer.h
+++ b/arch/x86/include/asm/timer.h
@@ -32,6 +32,22 @@ extern int no_timer_check;
32 * (mathieu.desnoyers@polymtl.ca) 32 * (mathieu.desnoyers@polymtl.ca)
33 * 33 *
34 * -johnstul@us.ibm.com "math is hard, lets go shopping!" 34 * -johnstul@us.ibm.com "math is hard, lets go shopping!"
35 *
36 * In:
37 *
38 * ns = cycles * cyc2ns_scale / SC
39 *
40 * Although we may still have enough bits to store the value of ns,
41 * in some cases, we may not have enough bits to store cycles * cyc2ns_scale,
42 * leading to an incorrect result.
43 *
44 * To avoid this, we can decompose 'cycles' into quotient and remainder
45 * of division by SC. Then,
46 *
47 * ns = (quot * SC + rem) * cyc2ns_scale / SC
48 * = quot * cyc2ns_scale + (rem * cyc2ns_scale) / SC
49 *
50 * - sqazi@google.com
35 */ 51 */
36 52
37DECLARE_PER_CPU(unsigned long, cyc2ns); 53DECLARE_PER_CPU(unsigned long, cyc2ns);
@@ -41,9 +57,14 @@ DECLARE_PER_CPU(unsigned long long, cyc2ns_offset);
41 57
42static inline unsigned long long __cycles_2_ns(unsigned long long cyc) 58static inline unsigned long long __cycles_2_ns(unsigned long long cyc)
43{ 59{
60 unsigned long long quot;
61 unsigned long long rem;
44 int cpu = smp_processor_id(); 62 int cpu = smp_processor_id();
45 unsigned long long ns = per_cpu(cyc2ns_offset, cpu); 63 unsigned long long ns = per_cpu(cyc2ns_offset, cpu);
46 ns += cyc * per_cpu(cyc2ns, cpu) >> CYC2NS_SCALE_FACTOR; 64 quot = (cyc >> CYC2NS_SCALE_FACTOR);
65 rem = cyc & ((1ULL << CYC2NS_SCALE_FACTOR) - 1);
66 ns += quot * per_cpu(cyc2ns, cpu) +
67 ((rem * per_cpu(cyc2ns, cpu)) >> CYC2NS_SCALE_FACTOR);
47 return ns; 68 return ns;
48} 69}
49 70
diff --git a/arch/x86/include/asm/unistd_32.h b/arch/x86/include/asm/unistd_32.h
index 593485b38ab3..599c77d38f33 100644
--- a/arch/x86/include/asm/unistd_32.h
+++ b/arch/x86/include/asm/unistd_32.h
@@ -352,10 +352,12 @@
352#define __NR_syncfs 344 352#define __NR_syncfs 344
353#define __NR_sendmmsg 345 353#define __NR_sendmmsg 345
354#define __NR_setns 346 354#define __NR_setns 346
355#define __NR_process_vm_readv 347
356#define __NR_process_vm_writev 348
355 357
356#ifdef __KERNEL__ 358#ifdef __KERNEL__
357 359
358#define NR_syscalls 347 360#define NR_syscalls 349
359 361
360#define __ARCH_WANT_IPC_PARSE_VERSION 362#define __ARCH_WANT_IPC_PARSE_VERSION
361#define __ARCH_WANT_OLD_READDIR 363#define __ARCH_WANT_OLD_READDIR
diff --git a/arch/x86/include/asm/unistd_64.h b/arch/x86/include/asm/unistd_64.h
index 0a6ba337a2eb..0431f193c3f2 100644
--- a/arch/x86/include/asm/unistd_64.h
+++ b/arch/x86/include/asm/unistd_64.h
@@ -682,6 +682,10 @@ __SYSCALL(__NR_sendmmsg, sys_sendmmsg)
682__SYSCALL(__NR_setns, sys_setns) 682__SYSCALL(__NR_setns, sys_setns)
683#define __NR_getcpu 309 683#define __NR_getcpu 309
684__SYSCALL(__NR_getcpu, sys_getcpu) 684__SYSCALL(__NR_getcpu, sys_getcpu)
685#define __NR_process_vm_readv 310
686__SYSCALL(__NR_process_vm_readv, sys_process_vm_readv)
687#define __NR_process_vm_writev 311
688__SYSCALL(__NR_process_vm_writev, sys_process_vm_writev)
685 689
686#ifndef __NO_STUBS 690#ifndef __NO_STUBS
687#define __ARCH_WANT_OLD_READDIR 691#define __ARCH_WANT_OLD_READDIR
diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h
index 37d369859c8e..8e862aaf0d90 100644
--- a/arch/x86/include/asm/uv/uv_bau.h
+++ b/arch/x86/include/asm/uv/uv_bau.h
@@ -55,6 +55,7 @@
55#define UV_BAU_TUNABLES_DIR "sgi_uv" 55#define UV_BAU_TUNABLES_DIR "sgi_uv"
56#define UV_BAU_TUNABLES_FILE "bau_tunables" 56#define UV_BAU_TUNABLES_FILE "bau_tunables"
57#define WHITESPACE " \t\n" 57#define WHITESPACE " \t\n"
58#define uv_mmask ((1UL << uv_hub_info->m_val) - 1)
58#define uv_physnodeaddr(x) ((__pa((unsigned long)(x)) & uv_mmask)) 59#define uv_physnodeaddr(x) ((__pa((unsigned long)(x)) & uv_mmask))
59#define cpubit_isset(cpu, bau_local_cpumask) \ 60#define cpubit_isset(cpu, bau_local_cpumask) \
60 test_bit((cpu), (bau_local_cpumask).bits) 61 test_bit((cpu), (bau_local_cpumask).bits)
@@ -656,11 +657,7 @@ static inline int atomic_read_short(const struct atomic_short *v)
656 */ 657 */
657static inline int atom_asr(short i, struct atomic_short *v) 658static inline int atom_asr(short i, struct atomic_short *v)
658{ 659{
659 short __i = i; 660 return i + xadd(&v->counter, i);
660 asm volatile(LOCK_PREFIX "xaddw %0, %1"
661 : "+r" (i), "+m" (v->counter)
662 : : "memory");
663 return i + __i;
664} 661}
665 662
666/* 663/*
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h
index f26544a15214..54a13aaebc40 100644
--- a/arch/x86/include/asm/uv/uv_hub.h
+++ b/arch/x86/include/asm/uv/uv_hub.h
@@ -46,6 +46,13 @@
46 * PNODE - the low N bits of the GNODE. The PNODE is the most useful variant 46 * PNODE - the low N bits of the GNODE. The PNODE is the most useful variant
47 * of the nasid for socket usage. 47 * of the nasid for socket usage.
48 * 48 *
49 * GPA - (global physical address) a socket physical address converted
50 * so that it can be used by the GRU as a global address. Socket
51 * physical addresses 1) need additional NASID (node) bits added
52 * to the high end of the address, and 2) unaliased if the
53 * partition does not have a physical address 0. In addition, on
54 * UV2 rev 1, GPAs need the gnode left shifted to bits 39 or 40.
55 *
49 * 56 *
50 * NumaLink Global Physical Address Format: 57 * NumaLink Global Physical Address Format:
51 * +--------------------------------+---------------------+ 58 * +--------------------------------+---------------------+
@@ -141,6 +148,8 @@ struct uv_hub_info_s {
141 unsigned int gnode_extra; 148 unsigned int gnode_extra;
142 unsigned char hub_revision; 149 unsigned char hub_revision;
143 unsigned char apic_pnode_shift; 150 unsigned char apic_pnode_shift;
151 unsigned char m_shift;
152 unsigned char n_lshift;
144 unsigned long gnode_upper; 153 unsigned long gnode_upper;
145 unsigned long lowmem_remap_top; 154 unsigned long lowmem_remap_top;
146 unsigned long lowmem_remap_base; 155 unsigned long lowmem_remap_base;
@@ -177,6 +186,16 @@ static inline int is_uv2_hub(void)
177 return uv_hub_info->hub_revision >= UV2_HUB_REVISION_BASE; 186 return uv_hub_info->hub_revision >= UV2_HUB_REVISION_BASE;
178} 187}
179 188
189static inline int is_uv2_1_hub(void)
190{
191 return uv_hub_info->hub_revision == UV2_HUB_REVISION_BASE;
192}
193
194static inline int is_uv2_2_hub(void)
195{
196 return uv_hub_info->hub_revision == UV2_HUB_REVISION_BASE + 1;
197}
198
180union uvh_apicid { 199union uvh_apicid {
181 unsigned long v; 200 unsigned long v;
182 struct uvh_apicid_s { 201 struct uvh_apicid_s {
@@ -276,7 +295,10 @@ static inline unsigned long uv_soc_phys_ram_to_gpa(unsigned long paddr)
276{ 295{
277 if (paddr < uv_hub_info->lowmem_remap_top) 296 if (paddr < uv_hub_info->lowmem_remap_top)
278 paddr |= uv_hub_info->lowmem_remap_base; 297 paddr |= uv_hub_info->lowmem_remap_base;
279 return paddr | uv_hub_info->gnode_upper; 298 paddr |= uv_hub_info->gnode_upper;
299 paddr = ((paddr << uv_hub_info->m_shift) >> uv_hub_info->m_shift) |
300 ((paddr >> uv_hub_info->m_val) << uv_hub_info->n_lshift);
301 return paddr;
280} 302}
281 303
282 304
@@ -300,16 +322,19 @@ static inline unsigned long uv_gpa_to_soc_phys_ram(unsigned long gpa)
300 unsigned long remap_base = uv_hub_info->lowmem_remap_base; 322 unsigned long remap_base = uv_hub_info->lowmem_remap_base;
301 unsigned long remap_top = uv_hub_info->lowmem_remap_top; 323 unsigned long remap_top = uv_hub_info->lowmem_remap_top;
302 324
325 gpa = ((gpa << uv_hub_info->m_shift) >> uv_hub_info->m_shift) |
326 ((gpa >> uv_hub_info->n_lshift) << uv_hub_info->m_val);
327 gpa = gpa & uv_hub_info->gpa_mask;
303 if (paddr >= remap_base && paddr < remap_base + remap_top) 328 if (paddr >= remap_base && paddr < remap_base + remap_top)
304 paddr -= remap_base; 329 paddr -= remap_base;
305 return paddr; 330 return paddr;
306} 331}
307 332
308 333
309/* gnode -> pnode */ 334/* gpa -> pnode */
310static inline unsigned long uv_gpa_to_gnode(unsigned long gpa) 335static inline unsigned long uv_gpa_to_gnode(unsigned long gpa)
311{ 336{
312 return gpa >> uv_hub_info->m_val; 337 return gpa >> uv_hub_info->n_lshift;
313} 338}
314 339
315/* gpa -> pnode */ 340/* gpa -> pnode */
@@ -320,6 +345,12 @@ static inline int uv_gpa_to_pnode(unsigned long gpa)
320 return uv_gpa_to_gnode(gpa) & n_mask; 345 return uv_gpa_to_gnode(gpa) & n_mask;
321} 346}
322 347
348/* gpa -> node offset*/
349static inline unsigned long uv_gpa_to_offset(unsigned long gpa)
350{
351 return (gpa << uv_hub_info->m_shift) >> uv_hub_info->m_shift;
352}
353
323/* pnode, offset --> socket virtual */ 354/* pnode, offset --> socket virtual */
324static inline void *uv_pnode_offset_to_vaddr(int pnode, unsigned long offset) 355static inline void *uv_pnode_offset_to_vaddr(int pnode, unsigned long offset)
325{ 356{
diff --git a/arch/x86/include/asm/uv/uv_mmrs.h b/arch/x86/include/asm/uv/uv_mmrs.h
index 10474fb1185d..cf1d73643f60 100644
--- a/arch/x86/include/asm/uv/uv_mmrs.h
+++ b/arch/x86/include/asm/uv/uv_mmrs.h
@@ -57,6 +57,7 @@
57 57
58#define UV1_HUB_PART_NUMBER 0x88a5 58#define UV1_HUB_PART_NUMBER 0x88a5
59#define UV2_HUB_PART_NUMBER 0x8eb8 59#define UV2_HUB_PART_NUMBER 0x8eb8
60#define UV2_HUB_PART_NUMBER_X 0x1111
60 61
61/* Compat: if this #define is present, UV headers support UV2 */ 62/* Compat: if this #define is present, UV headers support UV2 */
62#define UV2_HUB_IS_SUPPORTED 1 63#define UV2_HUB_IS_SUPPORTED 1
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index 2caf290e9895..31f180c21ce9 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -350,6 +350,18 @@ enum vmcs_field {
350#define DEBUG_REG_ACCESS_REG(eq) (((eq) >> 8) & 0xf) /* 11:8, general purpose reg. */ 350#define DEBUG_REG_ACCESS_REG(eq) (((eq) >> 8) & 0xf) /* 11:8, general purpose reg. */
351 351
352 352
353/*
354 * Exit Qualifications for APIC-Access
355 */
356#define APIC_ACCESS_OFFSET 0xfff /* 11:0, offset within the APIC page */
357#define APIC_ACCESS_TYPE 0xf000 /* 15:12, access type */
358#define TYPE_LINEAR_APIC_INST_READ (0 << 12)
359#define TYPE_LINEAR_APIC_INST_WRITE (1 << 12)
360#define TYPE_LINEAR_APIC_INST_FETCH (2 << 12)
361#define TYPE_LINEAR_APIC_EVENT (3 << 12)
362#define TYPE_PHYSICAL_APIC_EVENT (10 << 12)
363#define TYPE_PHYSICAL_APIC_INST (15 << 12)
364
353/* segment AR */ 365/* segment AR */
354#define SEGMENT_AR_L_MASK (1 << 13) 366#define SEGMENT_AR_L_MASK (1 << 13)
355 367
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index d3d859035af9..1971e652d24b 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -152,6 +152,7 @@ struct x86_cpuinit_ops {
152/** 152/**
153 * struct x86_platform_ops - platform specific runtime functions 153 * struct x86_platform_ops - platform specific runtime functions
154 * @calibrate_tsc: calibrate TSC 154 * @calibrate_tsc: calibrate TSC
155 * @wallclock_init: init the wallclock device
155 * @get_wallclock: get time from HW clock like RTC etc. 156 * @get_wallclock: get time from HW clock like RTC etc.
156 * @set_wallclock: set time back to HW clock 157 * @set_wallclock: set time back to HW clock
157 * @is_untracked_pat_range exclude from PAT logic 158 * @is_untracked_pat_range exclude from PAT logic
@@ -160,11 +161,13 @@ struct x86_cpuinit_ops {
160 */ 161 */
161struct x86_platform_ops { 162struct x86_platform_ops {
162 unsigned long (*calibrate_tsc)(void); 163 unsigned long (*calibrate_tsc)(void);
164 void (*wallclock_init)(void);
163 unsigned long (*get_wallclock)(void); 165 unsigned long (*get_wallclock)(void);
164 int (*set_wallclock)(unsigned long nowtime); 166 int (*set_wallclock)(unsigned long nowtime);
165 void (*iommu_shutdown)(void); 167 void (*iommu_shutdown)(void);
166 bool (*is_untracked_pat_range)(u64 start, u64 end); 168 bool (*is_untracked_pat_range)(u64 start, u64 end);
167 void (*nmi_init)(void); 169 void (*nmi_init)(void);
170 unsigned char (*get_nmi_reason)(void);
168 int (*i8042_detect)(void); 171 int (*i8042_detect)(void);
169}; 172};
170 173
diff --git a/arch/x86/include/asm/xen/grant_table.h b/arch/x86/include/asm/xen/grant_table.h
deleted file mode 100644
index fdbbb45767a6..000000000000
--- a/arch/x86/include/asm/xen/grant_table.h
+++ /dev/null
@@ -1,7 +0,0 @@
1#ifndef _ASM_X86_XEN_GRANT_TABLE_H
2#define _ASM_X86_XEN_GRANT_TABLE_H
3
4#define xen_alloc_vm_area(size) alloc_vm_area(size)
5#define xen_free_vm_area(area) free_vm_area(area)
6
7#endif /* _ASM_X86_XEN_GRANT_TABLE_H */
diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h
index 417777de5a40..5728852fb90f 100644
--- a/arch/x86/include/asm/xen/hypercall.h
+++ b/arch/x86/include/asm/xen/hypercall.h
@@ -47,6 +47,7 @@
47#include <xen/interface/xen.h> 47#include <xen/interface/xen.h>
48#include <xen/interface/sched.h> 48#include <xen/interface/sched.h>
49#include <xen/interface/physdev.h> 49#include <xen/interface/physdev.h>
50#include <xen/interface/platform.h>
50 51
51/* 52/*
52 * The hypercall asms have to meet several constraints: 53 * The hypercall asms have to meet several constraints:
@@ -301,6 +302,13 @@ HYPERVISOR_set_timer_op(u64 timeout)
301} 302}
302 303
303static inline int 304static inline int
305HYPERVISOR_dom0_op(struct xen_platform_op *platform_op)
306{
307 platform_op->interface_version = XENPF_INTERFACE_VERSION;
308 return _hypercall1(int, dom0_op, platform_op);
309}
310
311static inline int
304HYPERVISOR_set_debugreg(int reg, unsigned long value) 312HYPERVISOR_set_debugreg(int reg, unsigned long value)
305{ 313{
306 return _hypercall2(int, set_debugreg, reg, value); 314 return _hypercall2(int, set_debugreg, reg, value);
diff --git a/arch/x86/include/asm/xen/interface.h b/arch/x86/include/asm/xen/interface.h
index 5d4922ad4b9b..a1f2db5f1170 100644
--- a/arch/x86/include/asm/xen/interface.h
+++ b/arch/x86/include/asm/xen/interface.h
@@ -55,6 +55,7 @@ DEFINE_GUEST_HANDLE(char);
55DEFINE_GUEST_HANDLE(int); 55DEFINE_GUEST_HANDLE(int);
56DEFINE_GUEST_HANDLE(long); 56DEFINE_GUEST_HANDLE(long);
57DEFINE_GUEST_HANDLE(void); 57DEFINE_GUEST_HANDLE(void);
58DEFINE_GUEST_HANDLE(uint64_t);
58#endif 59#endif
59 60
60#ifndef HYPERVISOR_VIRT_START 61#ifndef HYPERVISOR_VIRT_START