aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/include')
-rw-r--r--arch/x86/include/asm/acpi.h2
-rw-r--r--arch/x86/include/asm/alternative-asm.h4
-rw-r--r--arch/x86/include/asm/alternative.h20
-rw-r--r--arch/x86/include/asm/amd_iommu_types.h34
-rw-r--r--arch/x86/include/asm/apic.h13
-rw-r--r--arch/x86/include/asm/arch_hweight.h61
-rw-r--r--arch/x86/include/asm/atomic.h25
-rw-r--r--arch/x86/include/asm/atomic64_32.h278
-rw-r--r--arch/x86/include/asm/atomic64_64.h25
-rw-r--r--arch/x86/include/asm/bitops.h4
-rw-r--r--arch/x86/include/asm/boot.h2
-rw-r--r--arch/x86/include/asm/cacheflush.h46
-rw-r--r--arch/x86/include/asm/cmpxchg_32.h3
-rw-r--r--arch/x86/include/asm/cpufeature.h65
-rw-r--r--arch/x86/include/asm/ds.h302
-rw-r--r--arch/x86/include/asm/dwarf2.h12
-rw-r--r--arch/x86/include/asm/e820.h7
-rw-r--r--arch/x86/include/asm/hardirq.h2
-rw-r--r--arch/x86/include/asm/hpet.h1
-rw-r--r--arch/x86/include/asm/hw_breakpoint.h10
-rw-r--r--arch/x86/include/asm/hyperv.h11
-rw-r--r--arch/x86/include/asm/hypervisor.h27
-rw-r--r--arch/x86/include/asm/i387.h129
-rw-r--r--arch/x86/include/asm/i8253.h2
-rw-r--r--arch/x86/include/asm/insn.h2
-rw-r--r--arch/x86/include/asm/inst.h96
-rw-r--r--arch/x86/include/asm/intel_scu_ipc.h55
-rw-r--r--arch/x86/include/asm/io.h1
-rw-r--r--arch/x86/include/asm/io_apic.h13
-rw-r--r--arch/x86/include/asm/k8.h5
-rw-r--r--arch/x86/include/asm/kgdb.h3
-rw-r--r--arch/x86/include/asm/kprobes.h2
-rw-r--r--arch/x86/include/asm/kvm.h17
-rw-r--r--arch/x86/include/asm/kvm_emulate.h46
-rw-r--r--arch/x86/include/asm/kvm_host.h80
-rw-r--r--arch/x86/include/asm/kvm_para.h13
-rw-r--r--arch/x86/include/asm/mce.h8
-rw-r--r--arch/x86/include/asm/mpspec.h10
-rw-r--r--arch/x86/include/asm/mshyperv.h14
-rw-r--r--arch/x86/include/asm/msr-index.h22
-rw-r--r--arch/x86/include/asm/pci_x86.h2
-rw-r--r--arch/x86/include/asm/percpu.h26
-rw-r--r--arch/x86/include/asm/perf_event.h76
-rw-r--r--arch/x86/include/asm/perf_event_p4.h795
-rw-r--r--arch/x86/include/asm/processor.h47
-rw-r--r--arch/x86/include/asm/ptrace-abi.h57
-rw-r--r--arch/x86/include/asm/ptrace.h6
-rw-r--r--arch/x86/include/asm/pvclock-abi.h4
-rw-r--r--arch/x86/include/asm/pvclock.h1
-rw-r--r--arch/x86/include/asm/rdc321x_defs.h12
-rw-r--r--arch/x86/include/asm/scatterlist.h5
-rw-r--r--arch/x86/include/asm/svm.h9
-rw-r--r--arch/x86/include/asm/thread_info.h15
-rw-r--r--arch/x86/include/asm/topology.h26
-rw-r--r--arch/x86/include/asm/traps.h2
-rw-r--r--arch/x86/include/asm/uv/uv_bau.h247
-rw-r--r--arch/x86/include/asm/uv/uv_hub.h2
-rw-r--r--arch/x86/include/asm/uv/uv_mmrs.h528
-rw-r--r--arch/x86/include/asm/vmware.h27
-rw-r--r--arch/x86/include/asm/vmx.h12
-rw-r--r--arch/x86/include/asm/xsave.h7
61 files changed, 2111 insertions, 1267 deletions
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index 56f462cf22d2..aa2c39d968fc 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -85,7 +85,6 @@ extern int acpi_ioapic;
85extern int acpi_noirq; 85extern int acpi_noirq;
86extern int acpi_strict; 86extern int acpi_strict;
87extern int acpi_disabled; 87extern int acpi_disabled;
88extern int acpi_ht;
89extern int acpi_pci_disabled; 88extern int acpi_pci_disabled;
90extern int acpi_skip_timer_override; 89extern int acpi_skip_timer_override;
91extern int acpi_use_timer_override; 90extern int acpi_use_timer_override;
@@ -97,7 +96,6 @@ void acpi_pic_sci_set_trigger(unsigned int, u16);
97static inline void disable_acpi(void) 96static inline void disable_acpi(void)
98{ 97{
99 acpi_disabled = 1; 98 acpi_disabled = 1;
100 acpi_ht = 0;
101 acpi_pci_disabled = 1; 99 acpi_pci_disabled = 1;
102 acpi_noirq = 1; 100 acpi_noirq = 1;
103} 101}
diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
index b97f786a48d5..a63a68be1cce 100644
--- a/arch/x86/include/asm/alternative-asm.h
+++ b/arch/x86/include/asm/alternative-asm.h
@@ -6,8 +6,8 @@
6 .macro LOCK_PREFIX 6 .macro LOCK_PREFIX
71: lock 71: lock
8 .section .smp_locks,"a" 8 .section .smp_locks,"a"
9 _ASM_ALIGN 9 .balign 4
10 _ASM_PTR 1b 10 .long 1b - .
11 .previous 11 .previous
12 .endm 12 .endm
13#else 13#else
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
index b09ec55650b3..03b6bb5394a0 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -28,20 +28,20 @@
28 */ 28 */
29 29
30#ifdef CONFIG_SMP 30#ifdef CONFIG_SMP
31#define LOCK_PREFIX \ 31#define LOCK_PREFIX_HERE \
32 ".section .smp_locks,\"a\"\n" \ 32 ".section .smp_locks,\"a\"\n" \
33 _ASM_ALIGN "\n" \ 33 ".balign 4\n" \
34 _ASM_PTR "661f\n" /* address */ \ 34 ".long 671f - .\n" /* offset */ \
35 ".previous\n" \ 35 ".previous\n" \
36 "661:\n\tlock; " 36 "671:"
37
38#define LOCK_PREFIX LOCK_PREFIX_HERE "\n\tlock; "
37 39
38#else /* ! CONFIG_SMP */ 40#else /* ! CONFIG_SMP */
41#define LOCK_PREFIX_HERE ""
39#define LOCK_PREFIX "" 42#define LOCK_PREFIX ""
40#endif 43#endif
41 44
42/* This must be included *after* the definition of LOCK_PREFIX */
43#include <asm/cpufeature.h>
44
45struct alt_instr { 45struct alt_instr {
46 u8 *instr; /* original instruction */ 46 u8 *instr; /* original instruction */
47 u8 *replacement; 47 u8 *replacement;
@@ -96,6 +96,12 @@ static inline int alternatives_text_reserved(void *start, void *end)
96 ".previous" 96 ".previous"
97 97
98/* 98/*
99 * This must be included *after* the definition of ALTERNATIVE due to
100 * <asm/arch_hweight.h>
101 */
102#include <asm/cpufeature.h>
103
104/*
99 * Alternative instructions for different CPU types or capabilities. 105 * Alternative instructions for different CPU types or capabilities.
100 * 106 *
101 * This allows to use optimized instructions even on generic binary 107 * This allows to use optimized instructions even on generic binary
diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h
index 86a0ff0aeac7..7014e88bc779 100644
--- a/arch/x86/include/asm/amd_iommu_types.h
+++ b/arch/x86/include/asm/amd_iommu_types.h
@@ -174,6 +174,40 @@
174 (~((1ULL << (12 + ((lvl) * 9))) - 1))) 174 (~((1ULL << (12 + ((lvl) * 9))) - 1)))
175#define PM_ALIGNED(lvl, addr) ((PM_MAP_MASK(lvl) & (addr)) == (addr)) 175#define PM_ALIGNED(lvl, addr) ((PM_MAP_MASK(lvl) & (addr)) == (addr))
176 176
177/*
178 * Returns the page table level to use for a given page size
179 * Pagesize is expected to be a power-of-two
180 */
181#define PAGE_SIZE_LEVEL(pagesize) \
182 ((__ffs(pagesize) - 12) / 9)
183/*
184 * Returns the number of ptes to use for a given page size
185 * Pagesize is expected to be a power-of-two
186 */
187#define PAGE_SIZE_PTE_COUNT(pagesize) \
188 (1ULL << ((__ffs(pagesize) - 12) % 9))
189
190/*
191 * Aligns a given io-virtual address to a given page size
192 * Pagesize is expected to be a power-of-two
193 */
194#define PAGE_SIZE_ALIGN(address, pagesize) \
195 ((address) & ~((pagesize) - 1))
196/*
197 * Creates an IOMMU PTE for an address an a given pagesize
198 * The PTE has no permission bits set
199 * Pagesize is expected to be a power-of-two larger than 4096
200 */
201#define PAGE_SIZE_PTE(address, pagesize) \
202 (((address) | ((pagesize) - 1)) & \
203 (~(pagesize >> 1)) & PM_ADDR_MASK)
204
205/*
206 * Takes a PTE value with mode=0x07 and returns the page size it maps
207 */
208#define PTE_PAGE_SIZE(pte) \
209 (1ULL << (1 + ffz(((pte) | 0xfffULL))))
210
177#define IOMMU_PTE_P (1ULL << 0) 211#define IOMMU_PTE_P (1ULL << 0)
178#define IOMMU_PTE_TV (1ULL << 1) 212#define IOMMU_PTE_TV (1ULL << 1)
179#define IOMMU_PTE_U (1ULL << 59) 213#define IOMMU_PTE_U (1ULL << 59)
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index b4ac2cdcb64f..1fa03e04ae44 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -373,6 +373,7 @@ extern atomic_t init_deasserted;
373extern int wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip); 373extern int wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip);
374#endif 374#endif
375 375
376#ifdef CONFIG_X86_LOCAL_APIC
376static inline u32 apic_read(u32 reg) 377static inline u32 apic_read(u32 reg)
377{ 378{
378 return apic->read(reg); 379 return apic->read(reg);
@@ -403,10 +404,19 @@ static inline u32 safe_apic_wait_icr_idle(void)
403 return apic->safe_wait_icr_idle(); 404 return apic->safe_wait_icr_idle();
404} 405}
405 406
407#else /* CONFIG_X86_LOCAL_APIC */
408
409static inline u32 apic_read(u32 reg) { return 0; }
410static inline void apic_write(u32 reg, u32 val) { }
411static inline u64 apic_icr_read(void) { return 0; }
412static inline void apic_icr_write(u32 low, u32 high) { }
413static inline void apic_wait_icr_idle(void) { }
414static inline u32 safe_apic_wait_icr_idle(void) { return 0; }
415
416#endif /* CONFIG_X86_LOCAL_APIC */
406 417
407static inline void ack_APIC_irq(void) 418static inline void ack_APIC_irq(void)
408{ 419{
409#ifdef CONFIG_X86_LOCAL_APIC
410 /* 420 /*
411 * ack_APIC_irq() actually gets compiled as a single instruction 421 * ack_APIC_irq() actually gets compiled as a single instruction
412 * ... yummie. 422 * ... yummie.
@@ -414,7 +424,6 @@ static inline void ack_APIC_irq(void)
414 424
415 /* Docs say use 0 for future compatibility */ 425 /* Docs say use 0 for future compatibility */
416 apic_write(APIC_EOI, 0); 426 apic_write(APIC_EOI, 0);
417#endif
418} 427}
419 428
420static inline unsigned default_get_apic_id(unsigned long x) 429static inline unsigned default_get_apic_id(unsigned long x)
diff --git a/arch/x86/include/asm/arch_hweight.h b/arch/x86/include/asm/arch_hweight.h
new file mode 100644
index 000000000000..9686c3d9ff73
--- /dev/null
+++ b/arch/x86/include/asm/arch_hweight.h
@@ -0,0 +1,61 @@
1#ifndef _ASM_X86_HWEIGHT_H
2#define _ASM_X86_HWEIGHT_H
3
4#ifdef CONFIG_64BIT
5/* popcnt %edi, %eax -- redundant REX prefix for alignment */
6#define POPCNT32 ".byte 0xf3,0x40,0x0f,0xb8,0xc7"
7/* popcnt %rdi, %rax */
8#define POPCNT64 ".byte 0xf3,0x48,0x0f,0xb8,0xc7"
9#define REG_IN "D"
10#define REG_OUT "a"
11#else
12/* popcnt %eax, %eax */
13#define POPCNT32 ".byte 0xf3,0x0f,0xb8,0xc0"
14#define REG_IN "a"
15#define REG_OUT "a"
16#endif
17
18/*
19 * __sw_hweightXX are called from within the alternatives below
20 * and callee-clobbered registers need to be taken care of. See
21 * ARCH_HWEIGHT_CFLAGS in <arch/x86/Kconfig> for the respective
22 * compiler switches.
23 */
24static inline unsigned int __arch_hweight32(unsigned int w)
25{
26 unsigned int res = 0;
27
28 asm (ALTERNATIVE("call __sw_hweight32", POPCNT32, X86_FEATURE_POPCNT)
29 : "="REG_OUT (res)
30 : REG_IN (w));
31
32 return res;
33}
34
35static inline unsigned int __arch_hweight16(unsigned int w)
36{
37 return __arch_hweight32(w & 0xffff);
38}
39
40static inline unsigned int __arch_hweight8(unsigned int w)
41{
42 return __arch_hweight32(w & 0xff);
43}
44
45static inline unsigned long __arch_hweight64(__u64 w)
46{
47 unsigned long res = 0;
48
49#ifdef CONFIG_X86_32
50 return __arch_hweight32((u32)w) +
51 __arch_hweight32((u32)(w >> 32));
52#else
53 asm (ALTERNATIVE("call __sw_hweight64", POPCNT64, X86_FEATURE_POPCNT)
54 : "="REG_OUT (res)
55 : REG_IN (w));
56#endif /* CONFIG_X86_32 */
57
58 return res;
59}
60
61#endif
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index 8f8217b9bdac..952a826ac4e5 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -22,7 +22,7 @@
22 */ 22 */
23static inline int atomic_read(const atomic_t *v) 23static inline int atomic_read(const atomic_t *v)
24{ 24{
25 return v->counter; 25 return (*(volatile int *)&(v)->counter);
26} 26}
27 27
28/** 28/**
@@ -246,6 +246,29 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
246 246
247#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 247#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
248 248
249/*
250 * atomic_dec_if_positive - decrement by 1 if old value positive
251 * @v: pointer of type atomic_t
252 *
253 * The function returns the old value of *v minus 1, even if
254 * the atomic variable, v, was not decremented.
255 */
256static inline int atomic_dec_if_positive(atomic_t *v)
257{
258 int c, old, dec;
259 c = atomic_read(v);
260 for (;;) {
261 dec = c - 1;
262 if (unlikely(dec < 0))
263 break;
264 old = atomic_cmpxchg((v), c, dec);
265 if (likely(old == c))
266 break;
267 c = old;
268 }
269 return dec;
270}
271
249/** 272/**
250 * atomic_inc_short - increment of a short integer 273 * atomic_inc_short - increment of a short integer
251 * @v: pointer to type int 274 * @v: pointer to type int
diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
index 03027bf28de5..2a934aa19a43 100644
--- a/arch/x86/include/asm/atomic64_32.h
+++ b/arch/x86/include/asm/atomic64_32.h
@@ -14,109 +14,193 @@ typedef struct {
14 14
15#define ATOMIC64_INIT(val) { (val) } 15#define ATOMIC64_INIT(val) { (val) }
16 16
17extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val); 17#ifdef CONFIG_X86_CMPXCHG64
18#define ATOMIC64_ALTERNATIVE_(f, g) "call atomic64_" #g "_cx8"
19#else
20#define ATOMIC64_ALTERNATIVE_(f, g) ALTERNATIVE("call atomic64_" #f "_386", "call atomic64_" #g "_cx8", X86_FEATURE_CX8)
21#endif
22
23#define ATOMIC64_ALTERNATIVE(f) ATOMIC64_ALTERNATIVE_(f, f)
24
25/**
26 * atomic64_cmpxchg - cmpxchg atomic64 variable
27 * @p: pointer to type atomic64_t
28 * @o: expected value
29 * @n: new value
30 *
31 * Atomically sets @v to @n if it was equal to @o and returns
32 * the old value.
33 */
34
35static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
36{
37 return cmpxchg64(&v->counter, o, n);
38}
18 39
19/** 40/**
20 * atomic64_xchg - xchg atomic64 variable 41 * atomic64_xchg - xchg atomic64 variable
21 * @ptr: pointer to type atomic64_t 42 * @v: pointer to type atomic64_t
22 * @new_val: value to assign 43 * @n: value to assign
23 * 44 *
24 * Atomically xchgs the value of @ptr to @new_val and returns 45 * Atomically xchgs the value of @v to @n and returns
25 * the old value. 46 * the old value.
26 */ 47 */
27extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val); 48static inline long long atomic64_xchg(atomic64_t *v, long long n)
49{
50 long long o;
51 unsigned high = (unsigned)(n >> 32);
52 unsigned low = (unsigned)n;
53 asm volatile(ATOMIC64_ALTERNATIVE(xchg)
54 : "=A" (o), "+b" (low), "+c" (high)
55 : "S" (v)
56 : "memory"
57 );
58 return o;
59}
28 60
29/** 61/**
30 * atomic64_set - set atomic64 variable 62 * atomic64_set - set atomic64 variable
31 * @ptr: pointer to type atomic64_t 63 * @v: pointer to type atomic64_t
32 * @new_val: value to assign 64 * @n: value to assign
33 * 65 *
34 * Atomically sets the value of @ptr to @new_val. 66 * Atomically sets the value of @v to @n.
35 */ 67 */
36extern void atomic64_set(atomic64_t *ptr, u64 new_val); 68static inline void atomic64_set(atomic64_t *v, long long i)
69{
70 unsigned high = (unsigned)(i >> 32);
71 unsigned low = (unsigned)i;
72 asm volatile(ATOMIC64_ALTERNATIVE(set)
73 : "+b" (low), "+c" (high)
74 : "S" (v)
75 : "eax", "edx", "memory"
76 );
77}
37 78
38/** 79/**
39 * atomic64_read - read atomic64 variable 80 * atomic64_read - read atomic64 variable
40 * @ptr: pointer to type atomic64_t 81 * @v: pointer to type atomic64_t
41 * 82 *
42 * Atomically reads the value of @ptr and returns it. 83 * Atomically reads the value of @v and returns it.
43 */ 84 */
44static inline u64 atomic64_read(atomic64_t *ptr) 85static inline long long atomic64_read(atomic64_t *v)
45{ 86{
46 u64 res; 87 long long r;
47 88 asm volatile(ATOMIC64_ALTERNATIVE(read)
48 /* 89 : "=A" (r), "+c" (v)
49 * Note, we inline this atomic64_t primitive because 90 : : "memory"
50 * it only clobbers EAX/EDX and leaves the others 91 );
51 * untouched. We also (somewhat subtly) rely on the 92 return r;
52 * fact that cmpxchg8b returns the current 64-bit value 93 }
53 * of the memory location we are touching:
54 */
55 asm volatile(
56 "mov %%ebx, %%eax\n\t"
57 "mov %%ecx, %%edx\n\t"
58 LOCK_PREFIX "cmpxchg8b %1\n"
59 : "=&A" (res)
60 : "m" (*ptr)
61 );
62
63 return res;
64}
65
66extern u64 atomic64_read(atomic64_t *ptr);
67 94
68/** 95/**
69 * atomic64_add_return - add and return 96 * atomic64_add_return - add and return
70 * @delta: integer value to add 97 * @i: integer value to add
71 * @ptr: pointer to type atomic64_t 98 * @v: pointer to type atomic64_t
72 * 99 *
73 * Atomically adds @delta to @ptr and returns @delta + *@ptr 100 * Atomically adds @i to @v and returns @i + *@v
74 */ 101 */
75extern u64 atomic64_add_return(u64 delta, atomic64_t *ptr); 102static inline long long atomic64_add_return(long long i, atomic64_t *v)
103{
104 asm volatile(ATOMIC64_ALTERNATIVE(add_return)
105 : "+A" (i), "+c" (v)
106 : : "memory"
107 );
108 return i;
109}
76 110
77/* 111/*
78 * Other variants with different arithmetic operators: 112 * Other variants with different arithmetic operators:
79 */ 113 */
80extern u64 atomic64_sub_return(u64 delta, atomic64_t *ptr); 114static inline long long atomic64_sub_return(long long i, atomic64_t *v)
81extern u64 atomic64_inc_return(atomic64_t *ptr); 115{
82extern u64 atomic64_dec_return(atomic64_t *ptr); 116 asm volatile(ATOMIC64_ALTERNATIVE(sub_return)
117 : "+A" (i), "+c" (v)
118 : : "memory"
119 );
120 return i;
121}
122
123static inline long long atomic64_inc_return(atomic64_t *v)
124{
125 long long a;
126 asm volatile(ATOMIC64_ALTERNATIVE(inc_return)
127 : "=A" (a)
128 : "S" (v)
129 : "memory", "ecx"
130 );
131 return a;
132}
133
134static inline long long atomic64_dec_return(atomic64_t *v)
135{
136 long long a;
137 asm volatile(ATOMIC64_ALTERNATIVE(dec_return)
138 : "=A" (a)
139 : "S" (v)
140 : "memory", "ecx"
141 );
142 return a;
143}
83 144
84/** 145/**
85 * atomic64_add - add integer to atomic64 variable 146 * atomic64_add - add integer to atomic64 variable
86 * @delta: integer value to add 147 * @i: integer value to add
87 * @ptr: pointer to type atomic64_t 148 * @v: pointer to type atomic64_t
88 * 149 *
89 * Atomically adds @delta to @ptr. 150 * Atomically adds @i to @v.
90 */ 151 */
91extern void atomic64_add(u64 delta, atomic64_t *ptr); 152static inline long long atomic64_add(long long i, atomic64_t *v)
153{
154 asm volatile(ATOMIC64_ALTERNATIVE_(add, add_return)
155 : "+A" (i), "+c" (v)
156 : : "memory"
157 );
158 return i;
159}
92 160
93/** 161/**
94 * atomic64_sub - subtract the atomic64 variable 162 * atomic64_sub - subtract the atomic64 variable
95 * @delta: integer value to subtract 163 * @i: integer value to subtract
96 * @ptr: pointer to type atomic64_t 164 * @v: pointer to type atomic64_t
97 * 165 *
98 * Atomically subtracts @delta from @ptr. 166 * Atomically subtracts @i from @v.
99 */ 167 */
100extern void atomic64_sub(u64 delta, atomic64_t *ptr); 168static inline long long atomic64_sub(long long i, atomic64_t *v)
169{
170 asm volatile(ATOMIC64_ALTERNATIVE_(sub, sub_return)
171 : "+A" (i), "+c" (v)
172 : : "memory"
173 );
174 return i;
175}
101 176
102/** 177/**
103 * atomic64_sub_and_test - subtract value from variable and test result 178 * atomic64_sub_and_test - subtract value from variable and test result
104 * @delta: integer value to subtract 179 * @i: integer value to subtract
105 * @ptr: pointer to type atomic64_t 180 * @v: pointer to type atomic64_t
106 * 181 *
107 * Atomically subtracts @delta from @ptr and returns 182 * Atomically subtracts @i from @v and returns
108 * true if the result is zero, or false for all 183 * true if the result is zero, or false for all
109 * other cases. 184 * other cases.
110 */ 185 */
111extern int atomic64_sub_and_test(u64 delta, atomic64_t *ptr); 186static inline int atomic64_sub_and_test(long long i, atomic64_t *v)
187{
188 return atomic64_sub_return(i, v) == 0;
189}
112 190
113/** 191/**
114 * atomic64_inc - increment atomic64 variable 192 * atomic64_inc - increment atomic64 variable
115 * @ptr: pointer to type atomic64_t 193 * @v: pointer to type atomic64_t
116 * 194 *
117 * Atomically increments @ptr by 1. 195 * Atomically increments @v by 1.
118 */ 196 */
119extern void atomic64_inc(atomic64_t *ptr); 197static inline void atomic64_inc(atomic64_t *v)
198{
199 asm volatile(ATOMIC64_ALTERNATIVE_(inc, inc_return)
200 : : "S" (v)
201 : "memory", "eax", "ecx", "edx"
202 );
203}
120 204
121/** 205/**
122 * atomic64_dec - decrement atomic64 variable 206 * atomic64_dec - decrement atomic64 variable
@@ -124,37 +208,97 @@ extern void atomic64_inc(atomic64_t *ptr);
124 * 208 *
125 * Atomically decrements @ptr by 1. 209 * Atomically decrements @ptr by 1.
126 */ 210 */
127extern void atomic64_dec(atomic64_t *ptr); 211static inline void atomic64_dec(atomic64_t *v)
212{
213 asm volatile(ATOMIC64_ALTERNATIVE_(dec, dec_return)
214 : : "S" (v)
215 : "memory", "eax", "ecx", "edx"
216 );
217}
128 218
129/** 219/**
130 * atomic64_dec_and_test - decrement and test 220 * atomic64_dec_and_test - decrement and test
131 * @ptr: pointer to type atomic64_t 221 * @v: pointer to type atomic64_t
132 * 222 *
133 * Atomically decrements @ptr by 1 and 223 * Atomically decrements @v by 1 and
134 * returns true if the result is 0, or false for all other 224 * returns true if the result is 0, or false for all other
135 * cases. 225 * cases.
136 */ 226 */
137extern int atomic64_dec_and_test(atomic64_t *ptr); 227static inline int atomic64_dec_and_test(atomic64_t *v)
228{
229 return atomic64_dec_return(v) == 0;
230}
138 231
139/** 232/**
140 * atomic64_inc_and_test - increment and test 233 * atomic64_inc_and_test - increment and test
141 * @ptr: pointer to type atomic64_t 234 * @v: pointer to type atomic64_t
142 * 235 *
143 * Atomically increments @ptr by 1 236 * Atomically increments @v by 1
144 * and returns true if the result is zero, or false for all 237 * and returns true if the result is zero, or false for all
145 * other cases. 238 * other cases.
146 */ 239 */
147extern int atomic64_inc_and_test(atomic64_t *ptr); 240static inline int atomic64_inc_and_test(atomic64_t *v)
241{
242 return atomic64_inc_return(v) == 0;
243}
148 244
149/** 245/**
150 * atomic64_add_negative - add and test if negative 246 * atomic64_add_negative - add and test if negative
151 * @delta: integer value to add 247 * @i: integer value to add
152 * @ptr: pointer to type atomic64_t 248 * @v: pointer to type atomic64_t
153 * 249 *
154 * Atomically adds @delta to @ptr and returns true 250 * Atomically adds @i to @v and returns true
155 * if the result is negative, or false when 251 * if the result is negative, or false when
156 * result is greater than or equal to zero. 252 * result is greater than or equal to zero.
157 */ 253 */
158extern int atomic64_add_negative(u64 delta, atomic64_t *ptr); 254static inline int atomic64_add_negative(long long i, atomic64_t *v)
255{
256 return atomic64_add_return(i, v) < 0;
257}
258
259/**
260 * atomic64_add_unless - add unless the number is a given value
261 * @v: pointer of type atomic64_t
262 * @a: the amount to add to v...
263 * @u: ...unless v is equal to u.
264 *
265 * Atomically adds @a to @v, so long as it was not @u.
266 * Returns non-zero if @v was not @u, and zero otherwise.
267 */
268static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
269{
270 unsigned low = (unsigned)u;
271 unsigned high = (unsigned)(u >> 32);
272 asm volatile(ATOMIC64_ALTERNATIVE(add_unless) "\n\t"
273 : "+A" (a), "+c" (v), "+S" (low), "+D" (high)
274 : : "memory");
275 return (int)a;
276}
277
278
279static inline int atomic64_inc_not_zero(atomic64_t *v)
280{
281 int r;
282 asm volatile(ATOMIC64_ALTERNATIVE(inc_not_zero)
283 : "=a" (r)
284 : "S" (v)
285 : "ecx", "edx", "memory"
286 );
287 return r;
288}
289
290static inline long long atomic64_dec_if_positive(atomic64_t *v)
291{
292 long long r;
293 asm volatile(ATOMIC64_ALTERNATIVE(dec_if_positive)
294 : "=A" (r)
295 : "S" (v)
296 : "ecx", "memory"
297 );
298 return r;
299}
300
301#undef ATOMIC64_ALTERNATIVE
302#undef ATOMIC64_ALTERNATIVE_
159 303
160#endif /* _ASM_X86_ATOMIC64_32_H */ 304#endif /* _ASM_X86_ATOMIC64_32_H */
diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
index 51c5b4056929..49fd1ea22951 100644
--- a/arch/x86/include/asm/atomic64_64.h
+++ b/arch/x86/include/asm/atomic64_64.h
@@ -18,7 +18,7 @@
18 */ 18 */
19static inline long atomic64_read(const atomic64_t *v) 19static inline long atomic64_read(const atomic64_t *v)
20{ 20{
21 return v->counter; 21 return (*(volatile long *)&(v)->counter);
22} 22}
23 23
24/** 24/**
@@ -221,4 +221,27 @@ static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
221 221
222#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) 222#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
223 223
224/*
225 * atomic64_dec_if_positive - decrement by 1 if old value positive
226 * @v: pointer of type atomic_t
227 *
228 * The function returns the old value of *v minus 1, even if
229 * the atomic variable, v, was not decremented.
230 */
231static inline long atomic64_dec_if_positive(atomic64_t *v)
232{
233 long c, old, dec;
234 c = atomic64_read(v);
235 for (;;) {
236 dec = c - 1;
237 if (unlikely(dec < 0))
238 break;
239 old = atomic64_cmpxchg((v), c, dec);
240 if (likely(old == c))
241 break;
242 c = old;
243 }
244 return dec;
245}
246
224#endif /* _ASM_X86_ATOMIC64_64_H */ 247#endif /* _ASM_X86_ATOMIC64_64_H */
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
index 02b47a603fc8..545776efeb16 100644
--- a/arch/x86/include/asm/bitops.h
+++ b/arch/x86/include/asm/bitops.h
@@ -444,7 +444,9 @@ static inline int fls(int x)
444 444
445#define ARCH_HAS_FAST_MULTIPLIER 1 445#define ARCH_HAS_FAST_MULTIPLIER 1
446 446
447#include <asm-generic/bitops/hweight.h> 447#include <asm/arch_hweight.h>
448
449#include <asm-generic/bitops/const_hweight.h>
448 450
449#endif /* __KERNEL__ */ 451#endif /* __KERNEL__ */
450 452
diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
index 7a1065958ba9..3b62ab56c7a0 100644
--- a/arch/x86/include/asm/boot.h
+++ b/arch/x86/include/asm/boot.h
@@ -24,7 +24,7 @@
24#define MIN_KERNEL_ALIGN (_AC(1, UL) << MIN_KERNEL_ALIGN_LG2) 24#define MIN_KERNEL_ALIGN (_AC(1, UL) << MIN_KERNEL_ALIGN_LG2)
25 25
26#if (CONFIG_PHYSICAL_ALIGN & (CONFIG_PHYSICAL_ALIGN-1)) || \ 26#if (CONFIG_PHYSICAL_ALIGN & (CONFIG_PHYSICAL_ALIGN-1)) || \
27 (CONFIG_PHYSICAL_ALIGN < (_AC(1, UL) << MIN_KERNEL_ALIGN_LG2)) 27 (CONFIG_PHYSICAL_ALIGN < MIN_KERNEL_ALIGN)
28#error "Invalid value for CONFIG_PHYSICAL_ALIGN" 28#error "Invalid value for CONFIG_PHYSICAL_ALIGN"
29#endif 29#endif
30 30
diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
index 634c40a739a6..63e35ec9075c 100644
--- a/arch/x86/include/asm/cacheflush.h
+++ b/arch/x86/include/asm/cacheflush.h
@@ -44,9 +44,6 @@ static inline void copy_from_user_page(struct vm_area_struct *vma,
44 memcpy(dst, src, len); 44 memcpy(dst, src, len);
45} 45}
46 46
47#define PG_WC PG_arch_1
48PAGEFLAG(WC, WC)
49
50#ifdef CONFIG_X86_PAT 47#ifdef CONFIG_X86_PAT
51/* 48/*
52 * X86 PAT uses page flags WC and Uncached together to keep track of 49 * X86 PAT uses page flags WC and Uncached together to keep track of
@@ -55,16 +52,24 @@ PAGEFLAG(WC, WC)
55 * _PAGE_CACHE_UC_MINUS and fourth state where page's memory type has not 52 * _PAGE_CACHE_UC_MINUS and fourth state where page's memory type has not
56 * been changed from its default (value of -1 used to denote this). 53 * been changed from its default (value of -1 used to denote this).
57 * Note we do not support _PAGE_CACHE_UC here. 54 * Note we do not support _PAGE_CACHE_UC here.
58 *
59 * Caller must hold memtype_lock for atomicity.
60 */ 55 */
56
57#define _PGMT_DEFAULT 0
58#define _PGMT_WC (1UL << PG_arch_1)
59#define _PGMT_UC_MINUS (1UL << PG_uncached)
60#define _PGMT_WB (1UL << PG_uncached | 1UL << PG_arch_1)
61#define _PGMT_MASK (1UL << PG_uncached | 1UL << PG_arch_1)
62#define _PGMT_CLEAR_MASK (~_PGMT_MASK)
63
61static inline unsigned long get_page_memtype(struct page *pg) 64static inline unsigned long get_page_memtype(struct page *pg)
62{ 65{
63 if (!PageUncached(pg) && !PageWC(pg)) 66 unsigned long pg_flags = pg->flags & _PGMT_MASK;
67
68 if (pg_flags == _PGMT_DEFAULT)
64 return -1; 69 return -1;
65 else if (!PageUncached(pg) && PageWC(pg)) 70 else if (pg_flags == _PGMT_WC)
66 return _PAGE_CACHE_WC; 71 return _PAGE_CACHE_WC;
67 else if (PageUncached(pg) && !PageWC(pg)) 72 else if (pg_flags == _PGMT_UC_MINUS)
68 return _PAGE_CACHE_UC_MINUS; 73 return _PAGE_CACHE_UC_MINUS;
69 else 74 else
70 return _PAGE_CACHE_WB; 75 return _PAGE_CACHE_WB;
@@ -72,25 +77,26 @@ static inline unsigned long get_page_memtype(struct page *pg)
72 77
73static inline void set_page_memtype(struct page *pg, unsigned long memtype) 78static inline void set_page_memtype(struct page *pg, unsigned long memtype)
74{ 79{
80 unsigned long memtype_flags = _PGMT_DEFAULT;
81 unsigned long old_flags;
82 unsigned long new_flags;
83
75 switch (memtype) { 84 switch (memtype) {
76 case _PAGE_CACHE_WC: 85 case _PAGE_CACHE_WC:
77 ClearPageUncached(pg); 86 memtype_flags = _PGMT_WC;
78 SetPageWC(pg);
79 break; 87 break;
80 case _PAGE_CACHE_UC_MINUS: 88 case _PAGE_CACHE_UC_MINUS:
81 SetPageUncached(pg); 89 memtype_flags = _PGMT_UC_MINUS;
82 ClearPageWC(pg);
83 break; 90 break;
84 case _PAGE_CACHE_WB: 91 case _PAGE_CACHE_WB:
85 SetPageUncached(pg); 92 memtype_flags = _PGMT_WB;
86 SetPageWC(pg);
87 break;
88 default:
89 case -1:
90 ClearPageUncached(pg);
91 ClearPageWC(pg);
92 break; 93 break;
93 } 94 }
95
96 do {
97 old_flags = pg->flags;
98 new_flags = (old_flags & _PGMT_CLEAR_MASK) | memtype_flags;
99 } while (cmpxchg(&pg->flags, old_flags, new_flags) != old_flags);
94} 100}
95#else 101#else
96static inline unsigned long get_page_memtype(struct page *pg) { return -1; } 102static inline unsigned long get_page_memtype(struct page *pg) { return -1; }
@@ -139,9 +145,11 @@ int set_memory_np(unsigned long addr, int numpages);
139int set_memory_4k(unsigned long addr, int numpages); 145int set_memory_4k(unsigned long addr, int numpages);
140 146
141int set_memory_array_uc(unsigned long *addr, int addrinarray); 147int set_memory_array_uc(unsigned long *addr, int addrinarray);
148int set_memory_array_wc(unsigned long *addr, int addrinarray);
142int set_memory_array_wb(unsigned long *addr, int addrinarray); 149int set_memory_array_wb(unsigned long *addr, int addrinarray);
143 150
144int set_pages_array_uc(struct page **pages, int addrinarray); 151int set_pages_array_uc(struct page **pages, int addrinarray);
152int set_pages_array_wc(struct page **pages, int addrinarray);
145int set_pages_array_wb(struct page **pages, int addrinarray); 153int set_pages_array_wb(struct page **pages, int addrinarray);
146 154
147/* 155/*
diff --git a/arch/x86/include/asm/cmpxchg_32.h b/arch/x86/include/asm/cmpxchg_32.h
index ffb9bb6b6c37..8859e12dd3cf 100644
--- a/arch/x86/include/asm/cmpxchg_32.h
+++ b/arch/x86/include/asm/cmpxchg_32.h
@@ -271,7 +271,8 @@ extern unsigned long long cmpxchg_486_u64(volatile void *, u64, u64);
271 __typeof__(*(ptr)) __ret; \ 271 __typeof__(*(ptr)) __ret; \
272 __typeof__(*(ptr)) __old = (o); \ 272 __typeof__(*(ptr)) __old = (o); \
273 __typeof__(*(ptr)) __new = (n); \ 273 __typeof__(*(ptr)) __new = (n); \
274 alternative_io("call cmpxchg8b_emu", \ 274 alternative_io(LOCK_PREFIX_HERE \
275 "call cmpxchg8b_emu", \
275 "lock; cmpxchg8b (%%esi)" , \ 276 "lock; cmpxchg8b (%%esi)" , \
276 X86_FEATURE_CX8, \ 277 X86_FEATURE_CX8, \
277 "=A" (__ret), \ 278 "=A" (__ret), \
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 0cd82d068613..468145914389 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -161,6 +161,7 @@
161 */ 161 */
162#define X86_FEATURE_IDA (7*32+ 0) /* Intel Dynamic Acceleration */ 162#define X86_FEATURE_IDA (7*32+ 0) /* Intel Dynamic Acceleration */
163#define X86_FEATURE_ARAT (7*32+ 1) /* Always Running APIC Timer */ 163#define X86_FEATURE_ARAT (7*32+ 1) /* Always Running APIC Timer */
164#define X86_FEATURE_CPB (7*32+ 2) /* AMD Core Performance Boost */
164 165
165/* Virtualization flags: Linux defined */ 166/* Virtualization flags: Linux defined */
166#define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */ 167#define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */
@@ -175,6 +176,7 @@
175 176
176#if defined(__KERNEL__) && !defined(__ASSEMBLY__) 177#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
177 178
179#include <asm/asm.h>
178#include <linux/bitops.h> 180#include <linux/bitops.h>
179 181
180extern const char * const x86_cap_flags[NCAPINTS*32]; 182extern const char * const x86_cap_flags[NCAPINTS*32];
@@ -283,6 +285,69 @@ extern const char * const x86_power_flags[32];
283 285
284#endif /* CONFIG_X86_64 */ 286#endif /* CONFIG_X86_64 */
285 287
288/*
289 * Static testing of CPU features. Used the same as boot_cpu_has().
290 * These are only valid after alternatives have run, but will statically
291 * patch the target code for additional performance.
292 *
293 */
294static __always_inline __pure bool __static_cpu_has(u8 bit)
295{
296#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)
297 asm goto("1: jmp %l[t_no]\n"
298 "2:\n"
299 ".section .altinstructions,\"a\"\n"
300 _ASM_ALIGN "\n"
301 _ASM_PTR "1b\n"
302 _ASM_PTR "0\n" /* no replacement */
303 " .byte %P0\n" /* feature bit */
304 " .byte 2b - 1b\n" /* source len */
305 " .byte 0\n" /* replacement len */
306 " .byte 0xff + 0 - (2b-1b)\n" /* padding */
307 ".previous\n"
308 : : "i" (bit) : : t_no);
309 return true;
310 t_no:
311 return false;
312#else
313 u8 flag;
314 /* Open-coded due to __stringify() in ALTERNATIVE() */
315 asm volatile("1: movb $0,%0\n"
316 "2:\n"
317 ".section .altinstructions,\"a\"\n"
318 _ASM_ALIGN "\n"
319 _ASM_PTR "1b\n"
320 _ASM_PTR "3f\n"
321 " .byte %P1\n" /* feature bit */
322 " .byte 2b - 1b\n" /* source len */
323 " .byte 4f - 3f\n" /* replacement len */
324 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* padding */
325 ".previous\n"
326 ".section .altinstr_replacement,\"ax\"\n"
327 "3: movb $1,%0\n"
328 "4:\n"
329 ".previous\n"
330 : "=qm" (flag) : "i" (bit));
331 return flag;
332#endif
333}
334
335#if __GNUC__ >= 4
336#define static_cpu_has(bit) \
337( \
338 __builtin_constant_p(boot_cpu_has(bit)) ? \
339 boot_cpu_has(bit) : \
340 (__builtin_constant_p(bit) && !((bit) & ~0xff)) ? \
341 __static_cpu_has(bit) : \
342 boot_cpu_has(bit) \
343)
344#else
345/*
346 * gcc 3.x is too stupid to do the static test; fall back to dynamic.
347 */
348#define static_cpu_has(bit) boot_cpu_has(bit)
349#endif
350
286#endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */ 351#endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */
287 352
288#endif /* _ASM_X86_CPUFEATURE_H */ 353#endif /* _ASM_X86_CPUFEATURE_H */
diff --git a/arch/x86/include/asm/ds.h b/arch/x86/include/asm/ds.h
deleted file mode 100644
index 70dac199b093..000000000000
--- a/arch/x86/include/asm/ds.h
+++ /dev/null
@@ -1,302 +0,0 @@
1/*
2 * Debug Store (DS) support
3 *
4 * This provides a low-level interface to the hardware's Debug Store
5 * feature that is used for branch trace store (BTS) and
6 * precise-event based sampling (PEBS).
7 *
8 * It manages:
9 * - DS and BTS hardware configuration
10 * - buffer overflow handling (to be done)
11 * - buffer access
12 *
13 * It does not do:
14 * - security checking (is the caller allowed to trace the task)
15 * - buffer allocation (memory accounting)
16 *
17 *
18 * Copyright (C) 2007-2009 Intel Corporation.
19 * Markus Metzger <markus.t.metzger@intel.com>, 2007-2009
20 */
21
22#ifndef _ASM_X86_DS_H
23#define _ASM_X86_DS_H
24
25
26#include <linux/types.h>
27#include <linux/init.h>
28#include <linux/err.h>
29
30
31#ifdef CONFIG_X86_DS
32
33struct task_struct;
34struct ds_context;
35struct ds_tracer;
36struct bts_tracer;
37struct pebs_tracer;
38
39typedef void (*bts_ovfl_callback_t)(struct bts_tracer *);
40typedef void (*pebs_ovfl_callback_t)(struct pebs_tracer *);
41
42
43/*
44 * A list of features plus corresponding macros to talk about them in
45 * the ds_request function's flags parameter.
46 *
47 * We use the enum to index an array of corresponding control bits;
48 * we use the macro to index a flags bit-vector.
49 */
50enum ds_feature {
51 dsf_bts = 0,
52 dsf_bts_kernel,
53#define BTS_KERNEL (1 << dsf_bts_kernel)
54 /* trace kernel-mode branches */
55
56 dsf_bts_user,
57#define BTS_USER (1 << dsf_bts_user)
58 /* trace user-mode branches */
59
60 dsf_bts_overflow,
61 dsf_bts_max,
62 dsf_pebs = dsf_bts_max,
63
64 dsf_pebs_max,
65 dsf_ctl_max = dsf_pebs_max,
66 dsf_bts_timestamps = dsf_ctl_max,
67#define BTS_TIMESTAMPS (1 << dsf_bts_timestamps)
68 /* add timestamps into BTS trace */
69
70#define BTS_USER_FLAGS (BTS_KERNEL | BTS_USER | BTS_TIMESTAMPS)
71};
72
73
74/*
75 * Request BTS or PEBS
76 *
77 * Due to alignement constraints, the actual buffer may be slightly
78 * smaller than the requested or provided buffer.
79 *
80 * Returns a pointer to a tracer structure on success, or
81 * ERR_PTR(errcode) on failure.
82 *
83 * The interrupt threshold is independent from the overflow callback
84 * to allow users to use their own overflow interrupt handling mechanism.
85 *
86 * The function might sleep.
87 *
88 * task: the task to request recording for
89 * cpu: the cpu to request recording for
90 * base: the base pointer for the (non-pageable) buffer;
91 * size: the size of the provided buffer in bytes
92 * ovfl: pointer to a function to be called on buffer overflow;
93 * NULL if cyclic buffer requested
94 * th: the interrupt threshold in records from the end of the buffer;
95 * -1 if no interrupt threshold is requested.
96 * flags: a bit-mask of the above flags
97 */
98extern struct bts_tracer *ds_request_bts_task(struct task_struct *task,
99 void *base, size_t size,
100 bts_ovfl_callback_t ovfl,
101 size_t th, unsigned int flags);
102extern struct bts_tracer *ds_request_bts_cpu(int cpu, void *base, size_t size,
103 bts_ovfl_callback_t ovfl,
104 size_t th, unsigned int flags);
105extern struct pebs_tracer *ds_request_pebs_task(struct task_struct *task,
106 void *base, size_t size,
107 pebs_ovfl_callback_t ovfl,
108 size_t th, unsigned int flags);
109extern struct pebs_tracer *ds_request_pebs_cpu(int cpu,
110 void *base, size_t size,
111 pebs_ovfl_callback_t ovfl,
112 size_t th, unsigned int flags);
113
114/*
115 * Release BTS or PEBS resources
116 * Suspend and resume BTS or PEBS tracing
117 *
118 * Must be called with irq's enabled.
119 *
120 * tracer: the tracer handle returned from ds_request_~()
121 */
122extern void ds_release_bts(struct bts_tracer *tracer);
123extern void ds_suspend_bts(struct bts_tracer *tracer);
124extern void ds_resume_bts(struct bts_tracer *tracer);
125extern void ds_release_pebs(struct pebs_tracer *tracer);
126extern void ds_suspend_pebs(struct pebs_tracer *tracer);
127extern void ds_resume_pebs(struct pebs_tracer *tracer);
128
129/*
130 * Release BTS or PEBS resources
131 * Suspend and resume BTS or PEBS tracing
132 *
133 * Cpu tracers must call this on the traced cpu.
134 * Task tracers must call ds_release_~_noirq() for themselves.
135 *
136 * May be called with irq's disabled.
137 *
138 * Returns 0 if successful;
139 * -EPERM if the cpu tracer does not trace the current cpu.
140 * -EPERM if the task tracer does not trace itself.
141 *
142 * tracer: the tracer handle returned from ds_request_~()
143 */
144extern int ds_release_bts_noirq(struct bts_tracer *tracer);
145extern int ds_suspend_bts_noirq(struct bts_tracer *tracer);
146extern int ds_resume_bts_noirq(struct bts_tracer *tracer);
147extern int ds_release_pebs_noirq(struct pebs_tracer *tracer);
148extern int ds_suspend_pebs_noirq(struct pebs_tracer *tracer);
149extern int ds_resume_pebs_noirq(struct pebs_tracer *tracer);
150
151
152/*
153 * The raw DS buffer state as it is used for BTS and PEBS recording.
154 *
155 * This is the low-level, arch-dependent interface for working
156 * directly on the raw trace data.
157 */
158struct ds_trace {
159 /* the number of bts/pebs records */
160 size_t n;
161 /* the size of a bts/pebs record in bytes */
162 size_t size;
163 /* pointers into the raw buffer:
164 - to the first entry */
165 void *begin;
166 /* - one beyond the last entry */
167 void *end;
168 /* - one beyond the newest entry */
169 void *top;
170 /* - the interrupt threshold */
171 void *ith;
172 /* flags given on ds_request() */
173 unsigned int flags;
174};
175
176/*
177 * An arch-independent view on branch trace data.
178 */
179enum bts_qualifier {
180 bts_invalid,
181#define BTS_INVALID bts_invalid
182
183 bts_branch,
184#define BTS_BRANCH bts_branch
185
186 bts_task_arrives,
187#define BTS_TASK_ARRIVES bts_task_arrives
188
189 bts_task_departs,
190#define BTS_TASK_DEPARTS bts_task_departs
191
192 bts_qual_bit_size = 4,
193 bts_qual_max = (1 << bts_qual_bit_size),
194};
195
196struct bts_struct {
197 __u64 qualifier;
198 union {
199 /* BTS_BRANCH */
200 struct {
201 __u64 from;
202 __u64 to;
203 } lbr;
204 /* BTS_TASK_ARRIVES or BTS_TASK_DEPARTS */
205 struct {
206 __u64 clock;
207 pid_t pid;
208 } event;
209 } variant;
210};
211
212
213/*
214 * The BTS state.
215 *
216 * This gives access to the raw DS state and adds functions to provide
217 * an arch-independent view of the BTS data.
218 */
219struct bts_trace {
220 struct ds_trace ds;
221
222 int (*read)(struct bts_tracer *tracer, const void *at,
223 struct bts_struct *out);
224 int (*write)(struct bts_tracer *tracer, const struct bts_struct *in);
225};
226
227
228/*
229 * The PEBS state.
230 *
231 * This gives access to the raw DS state and the PEBS-specific counter
232 * reset value.
233 */
234struct pebs_trace {
235 struct ds_trace ds;
236
237 /* the number of valid counters in the below array */
238 unsigned int counters;
239
240#define MAX_PEBS_COUNTERS 4
241 /* the counter reset value */
242 unsigned long long counter_reset[MAX_PEBS_COUNTERS];
243};
244
245
246/*
247 * Read the BTS or PEBS trace.
248 *
249 * Returns a view on the trace collected for the parameter tracer.
250 *
251 * The view remains valid as long as the traced task is not running or
252 * the tracer is suspended.
253 * Writes into the trace buffer are not reflected.
254 *
255 * tracer: the tracer handle returned from ds_request_~()
256 */
257extern const struct bts_trace *ds_read_bts(struct bts_tracer *tracer);
258extern const struct pebs_trace *ds_read_pebs(struct pebs_tracer *tracer);
259
260
261/*
262 * Reset the write pointer of the BTS/PEBS buffer.
263 *
264 * Returns 0 on success; -Eerrno on error
265 *
266 * tracer: the tracer handle returned from ds_request_~()
267 */
268extern int ds_reset_bts(struct bts_tracer *tracer);
269extern int ds_reset_pebs(struct pebs_tracer *tracer);
270
271/*
272 * Set the PEBS counter reset value.
273 *
274 * Returns 0 on success; -Eerrno on error
275 *
276 * tracer: the tracer handle returned from ds_request_pebs()
277 * counter: the index of the counter
278 * value: the new counter reset value
279 */
280extern int ds_set_pebs_reset(struct pebs_tracer *tracer,
281 unsigned int counter, u64 value);
282
283/*
284 * Initialization
285 */
286struct cpuinfo_x86;
287extern void __cpuinit ds_init_intel(struct cpuinfo_x86 *);
288
289/*
290 * Context switch work
291 */
292extern void ds_switch_to(struct task_struct *prev, struct task_struct *next);
293
294#else /* CONFIG_X86_DS */
295
296struct cpuinfo_x86;
297static inline void __cpuinit ds_init_intel(struct cpuinfo_x86 *ignored) {}
298static inline void ds_switch_to(struct task_struct *prev,
299 struct task_struct *next) {}
300
301#endif /* CONFIG_X86_DS */
302#endif /* _ASM_X86_DS_H */
diff --git a/arch/x86/include/asm/dwarf2.h b/arch/x86/include/asm/dwarf2.h
index ae6253ab9029..733f7e91e7a9 100644
--- a/arch/x86/include/asm/dwarf2.h
+++ b/arch/x86/include/asm/dwarf2.h
@@ -34,6 +34,18 @@
34#define CFI_SIGNAL_FRAME 34#define CFI_SIGNAL_FRAME
35#endif 35#endif
36 36
37#if defined(CONFIG_AS_CFI_SECTIONS) && defined(__ASSEMBLY__)
38 /*
39 * Emit CFI data in .debug_frame sections, not .eh_frame sections.
40 * The latter we currently just discard since we don't do DWARF
41 * unwinding at runtime. So only the offline DWARF information is
42 * useful to anyone. Note we should not use this directive if this
43 * file is used in the vDSO assembly, or if vmlinux.lds.S gets
44 * changed so it doesn't discard .eh_frame.
45 */
46 .cfi_sections .debug_frame
47#endif
48
37#else 49#else
38 50
39/* 51/*
diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
index 0e22296790d3..ec8a52d14ab1 100644
--- a/arch/x86/include/asm/e820.h
+++ b/arch/x86/include/asm/e820.h
@@ -45,7 +45,12 @@
45#define E820_NVS 4 45#define E820_NVS 4
46#define E820_UNUSABLE 5 46#define E820_UNUSABLE 5
47 47
48/* reserved RAM used by kernel itself */ 48/*
49 * reserved RAM used by kernel itself
50 * if CONFIG_INTEL_TXT is enabled, memory of this type will be
51 * included in the S3 integrity calculation and so should not include
52 * any memory that BIOS might alter over the S3 transition
53 */
49#define E820_RESERVED_KERN 128 54#define E820_RESERVED_KERN 128
50 55
51#ifndef __ASSEMBLY__ 56#ifndef __ASSEMBLY__
diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h
index 0f8576427cfe..aeab29aee617 100644
--- a/arch/x86/include/asm/hardirq.h
+++ b/arch/x86/include/asm/hardirq.h
@@ -35,7 +35,7 @@ DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
35 35
36#define __ARCH_IRQ_STAT 36#define __ARCH_IRQ_STAT
37 37
38#define inc_irq_stat(member) percpu_add(irq_stat.member, 1) 38#define inc_irq_stat(member) percpu_inc(irq_stat.member)
39 39
40#define local_softirq_pending() percpu_read(irq_stat.__softirq_pending) 40#define local_softirq_pending() percpu_read(irq_stat.__softirq_pending)
41 41
diff --git a/arch/x86/include/asm/hpet.h b/arch/x86/include/asm/hpet.h
index 1d5c08a1bdfd..004e6e25e913 100644
--- a/arch/x86/include/asm/hpet.h
+++ b/arch/x86/include/asm/hpet.h
@@ -68,6 +68,7 @@ extern unsigned long force_hpet_address;
68extern u8 hpet_blockid; 68extern u8 hpet_blockid;
69extern int hpet_force_user; 69extern int hpet_force_user;
70extern u8 hpet_msi_disable; 70extern u8 hpet_msi_disable;
71extern u8 hpet_readback_cmp;
71extern int is_hpet_enabled(void); 72extern int is_hpet_enabled(void);
72extern int hpet_enable(void); 73extern int hpet_enable(void);
73extern void hpet_disable(void); 74extern void hpet_disable(void);
diff --git a/arch/x86/include/asm/hw_breakpoint.h b/arch/x86/include/asm/hw_breakpoint.h
index 2a1bd8f4f23a..942255310e6a 100644
--- a/arch/x86/include/asm/hw_breakpoint.h
+++ b/arch/x86/include/asm/hw_breakpoint.h
@@ -41,12 +41,16 @@ struct arch_hw_breakpoint {
41/* Total number of available HW breakpoint registers */ 41/* Total number of available HW breakpoint registers */
42#define HBP_NUM 4 42#define HBP_NUM 4
43 43
44static inline int hw_breakpoint_slots(int type)
45{
46 return HBP_NUM;
47}
48
44struct perf_event; 49struct perf_event;
45struct pmu; 50struct pmu;
46 51
47extern int arch_check_va_in_userspace(unsigned long va, u8 hbp_len); 52extern int arch_check_bp_in_kernelspace(struct perf_event *bp);
48extern int arch_validate_hwbkpt_settings(struct perf_event *bp, 53extern int arch_validate_hwbkpt_settings(struct perf_event *bp);
49 struct task_struct *tsk);
50extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused, 54extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
51 unsigned long val, void *data); 55 unsigned long val, void *data);
52 56
diff --git a/arch/x86/include/asm/hyperv.h b/arch/x86/include/asm/hyperv.h
index e153a2b3889a..5df477ac3af7 100644
--- a/arch/x86/include/asm/hyperv.h
+++ b/arch/x86/include/asm/hyperv.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_KVM_HYPERV_H 1#ifndef _ASM_X86_HYPERV_H
2#define _ASM_X86_KVM_HYPERV_H 2#define _ASM_X86_HYPERV_H
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5 5
@@ -14,6 +14,10 @@
14#define HYPERV_CPUID_ENLIGHTMENT_INFO 0x40000004 14#define HYPERV_CPUID_ENLIGHTMENT_INFO 0x40000004
15#define HYPERV_CPUID_IMPLEMENT_LIMITS 0x40000005 15#define HYPERV_CPUID_IMPLEMENT_LIMITS 0x40000005
16 16
17#define HYPERV_HYPERVISOR_PRESENT_BIT 0x80000000
18#define HYPERV_CPUID_MIN 0x40000005
19#define HYPERV_CPUID_MAX 0x4000ffff
20
17/* 21/*
18 * Feature identification. EAX indicates which features are available 22 * Feature identification. EAX indicates which features are available
19 * to the partition based upon the current partition privileges. 23 * to the partition based upon the current partition privileges.
@@ -129,6 +133,9 @@
129/* MSR used to provide vcpu index */ 133/* MSR used to provide vcpu index */
130#define HV_X64_MSR_VP_INDEX 0x40000002 134#define HV_X64_MSR_VP_INDEX 0x40000002
131 135
136/* MSR used to read the per-partition time reference counter */
137#define HV_X64_MSR_TIME_REF_COUNT 0x40000020
138
132/* Define the virtual APIC registers */ 139/* Define the virtual APIC registers */
133#define HV_X64_MSR_EOI 0x40000070 140#define HV_X64_MSR_EOI 0x40000070
134#define HV_X64_MSR_ICR 0x40000071 141#define HV_X64_MSR_ICR 0x40000071
diff --git a/arch/x86/include/asm/hypervisor.h b/arch/x86/include/asm/hypervisor.h
index b78c0941e422..70abda7058c8 100644
--- a/arch/x86/include/asm/hypervisor.h
+++ b/arch/x86/include/asm/hypervisor.h
@@ -17,10 +17,33 @@
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 * 18 *
19 */ 19 */
20#ifndef ASM_X86__HYPERVISOR_H 20#ifndef _ASM_X86_HYPERVISOR_H
21#define ASM_X86__HYPERVISOR_H 21#define _ASM_X86_HYPERVISOR_H
22 22
23extern void init_hypervisor(struct cpuinfo_x86 *c); 23extern void init_hypervisor(struct cpuinfo_x86 *c);
24extern void init_hypervisor_platform(void); 24extern void init_hypervisor_platform(void);
25 25
26/*
27 * x86 hypervisor information
28 */
29struct hypervisor_x86 {
30 /* Hypervisor name */
31 const char *name;
32
33 /* Detection routine */
34 bool (*detect)(void);
35
36 /* Adjust CPU feature bits (run once per CPU) */
37 void (*set_cpu_features)(struct cpuinfo_x86 *);
38
39 /* Platform setup (run once per boot) */
40 void (*init_platform)(void);
41};
42
43extern const struct hypervisor_x86 *x86_hyper;
44
45/* Recognized hypervisors */
46extern const struct hypervisor_x86 x86_hyper_vmware;
47extern const struct hypervisor_x86 x86_hyper_ms_hyperv;
48
26#endif 49#endif
diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
index da2930924501..c991b3a7b904 100644
--- a/arch/x86/include/asm/i387.h
+++ b/arch/x86/include/asm/i387.h
@@ -16,7 +16,9 @@
16#include <linux/kernel_stat.h> 16#include <linux/kernel_stat.h>
17#include <linux/regset.h> 17#include <linux/regset.h>
18#include <linux/hardirq.h> 18#include <linux/hardirq.h>
19#include <linux/slab.h>
19#include <asm/asm.h> 20#include <asm/asm.h>
21#include <asm/cpufeature.h>
20#include <asm/processor.h> 22#include <asm/processor.h>
21#include <asm/sigcontext.h> 23#include <asm/sigcontext.h>
22#include <asm/user.h> 24#include <asm/user.h>
@@ -56,6 +58,11 @@ extern int restore_i387_xstate_ia32(void __user *buf);
56 58
57#define X87_FSW_ES (1 << 7) /* Exception Summary */ 59#define X87_FSW_ES (1 << 7) /* Exception Summary */
58 60
61static __always_inline __pure bool use_xsave(void)
62{
63 return static_cpu_has(X86_FEATURE_XSAVE);
64}
65
59#ifdef CONFIG_X86_64 66#ifdef CONFIG_X86_64
60 67
61/* Ignore delayed exceptions from user space */ 68/* Ignore delayed exceptions from user space */
@@ -91,15 +98,15 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
91 values. The kernel data segment can be sometimes 0 and sometimes 98 values. The kernel data segment can be sometimes 0 and sometimes
92 new user value. Both should be ok. 99 new user value. Both should be ok.
93 Use the PDA as safe address because it should be already in L1. */ 100 Use the PDA as safe address because it should be already in L1. */
94static inline void clear_fpu_state(struct task_struct *tsk) 101static inline void fpu_clear(struct fpu *fpu)
95{ 102{
96 struct xsave_struct *xstate = &tsk->thread.xstate->xsave; 103 struct xsave_struct *xstate = &fpu->state->xsave;
97 struct i387_fxsave_struct *fx = &tsk->thread.xstate->fxsave; 104 struct i387_fxsave_struct *fx = &fpu->state->fxsave;
98 105
99 /* 106 /*
100 * xsave header may indicate the init state of the FP. 107 * xsave header may indicate the init state of the FP.
101 */ 108 */
102 if ((task_thread_info(tsk)->status & TS_XSAVE) && 109 if (use_xsave() &&
103 !(xstate->xsave_hdr.xstate_bv & XSTATE_FP)) 110 !(xstate->xsave_hdr.xstate_bv & XSTATE_FP))
104 return; 111 return;
105 112
@@ -111,6 +118,11 @@ static inline void clear_fpu_state(struct task_struct *tsk)
111 X86_FEATURE_FXSAVE_LEAK); 118 X86_FEATURE_FXSAVE_LEAK);
112} 119}
113 120
121static inline void clear_fpu_state(struct task_struct *tsk)
122{
123 fpu_clear(&tsk->thread.fpu);
124}
125
114static inline int fxsave_user(struct i387_fxsave_struct __user *fx) 126static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
115{ 127{
116 int err; 128 int err;
@@ -135,7 +147,7 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
135 return err; 147 return err;
136} 148}
137 149
138static inline void fxsave(struct task_struct *tsk) 150static inline void fpu_fxsave(struct fpu *fpu)
139{ 151{
140 /* Using "rex64; fxsave %0" is broken because, if the memory operand 152 /* Using "rex64; fxsave %0" is broken because, if the memory operand
141 uses any extended registers for addressing, a second REX prefix 153 uses any extended registers for addressing, a second REX prefix
@@ -145,42 +157,45 @@ static inline void fxsave(struct task_struct *tsk)
145 /* Using "fxsaveq %0" would be the ideal choice, but is only supported 157 /* Using "fxsaveq %0" would be the ideal choice, but is only supported
146 starting with gas 2.16. */ 158 starting with gas 2.16. */
147 __asm__ __volatile__("fxsaveq %0" 159 __asm__ __volatile__("fxsaveq %0"
148 : "=m" (tsk->thread.xstate->fxsave)); 160 : "=m" (fpu->state->fxsave));
149#elif 0 161#elif 0
150 /* Using, as a workaround, the properly prefixed form below isn't 162 /* Using, as a workaround, the properly prefixed form below isn't
151 accepted by any binutils version so far released, complaining that 163 accepted by any binutils version so far released, complaining that
152 the same type of prefix is used twice if an extended register is 164 the same type of prefix is used twice if an extended register is
153 needed for addressing (fix submitted to mainline 2005-11-21). */ 165 needed for addressing (fix submitted to mainline 2005-11-21). */
154 __asm__ __volatile__("rex64/fxsave %0" 166 __asm__ __volatile__("rex64/fxsave %0"
155 : "=m" (tsk->thread.xstate->fxsave)); 167 : "=m" (fpu->state->fxsave));
156#else 168#else
157 /* This, however, we can work around by forcing the compiler to select 169 /* This, however, we can work around by forcing the compiler to select
158 an addressing mode that doesn't require extended registers. */ 170 an addressing mode that doesn't require extended registers. */
159 __asm__ __volatile__("rex64/fxsave (%1)" 171 __asm__ __volatile__("rex64/fxsave (%1)"
160 : "=m" (tsk->thread.xstate->fxsave) 172 : "=m" (fpu->state->fxsave)
161 : "cdaSDb" (&tsk->thread.xstate->fxsave)); 173 : "cdaSDb" (&fpu->state->fxsave));
162#endif 174#endif
163} 175}
164 176
165static inline void __save_init_fpu(struct task_struct *tsk) 177static inline void fpu_save_init(struct fpu *fpu)
166{ 178{
167 if (task_thread_info(tsk)->status & TS_XSAVE) 179 if (use_xsave())
168 xsave(tsk); 180 fpu_xsave(fpu);
169 else 181 else
170 fxsave(tsk); 182 fpu_fxsave(fpu);
183
184 fpu_clear(fpu);
185}
171 186
172 clear_fpu_state(tsk); 187static inline void __save_init_fpu(struct task_struct *tsk)
188{
189 fpu_save_init(&tsk->thread.fpu);
173 task_thread_info(tsk)->status &= ~TS_USEDFPU; 190 task_thread_info(tsk)->status &= ~TS_USEDFPU;
174} 191}
175 192
176#else /* CONFIG_X86_32 */ 193#else /* CONFIG_X86_32 */
177 194
178#ifdef CONFIG_MATH_EMULATION 195#ifdef CONFIG_MATH_EMULATION
179extern void finit_task(struct task_struct *tsk); 196extern void finit_soft_fpu(struct i387_soft_struct *soft);
180#else 197#else
181static inline void finit_task(struct task_struct *tsk) 198static inline void finit_soft_fpu(struct i387_soft_struct *soft) {}
182{
183}
184#endif 199#endif
185 200
186static inline void tolerant_fwait(void) 201static inline void tolerant_fwait(void)
@@ -216,13 +231,13 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
216/* 231/*
217 * These must be called with preempt disabled 232 * These must be called with preempt disabled
218 */ 233 */
219static inline void __save_init_fpu(struct task_struct *tsk) 234static inline void fpu_save_init(struct fpu *fpu)
220{ 235{
221 if (task_thread_info(tsk)->status & TS_XSAVE) { 236 if (use_xsave()) {
222 struct xsave_struct *xstate = &tsk->thread.xstate->xsave; 237 struct xsave_struct *xstate = &fpu->state->xsave;
223 struct i387_fxsave_struct *fx = &tsk->thread.xstate->fxsave; 238 struct i387_fxsave_struct *fx = &fpu->state->fxsave;
224 239
225 xsave(tsk); 240 fpu_xsave(fpu);
226 241
227 /* 242 /*
228 * xsave header may indicate the init state of the FP. 243 * xsave header may indicate the init state of the FP.
@@ -246,8 +261,8 @@ static inline void __save_init_fpu(struct task_struct *tsk)
246 "fxsave %[fx]\n" 261 "fxsave %[fx]\n"
247 "bt $7,%[fsw] ; jnc 1f ; fnclex\n1:", 262 "bt $7,%[fsw] ; jnc 1f ; fnclex\n1:",
248 X86_FEATURE_FXSR, 263 X86_FEATURE_FXSR,
249 [fx] "m" (tsk->thread.xstate->fxsave), 264 [fx] "m" (fpu->state->fxsave),
250 [fsw] "m" (tsk->thread.xstate->fxsave.swd) : "memory"); 265 [fsw] "m" (fpu->state->fxsave.swd) : "memory");
251clear_state: 266clear_state:
252 /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception 267 /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
253 is pending. Clear the x87 state here by setting it to fixed 268 is pending. Clear the x87 state here by setting it to fixed
@@ -259,17 +274,34 @@ clear_state:
259 X86_FEATURE_FXSAVE_LEAK, 274 X86_FEATURE_FXSAVE_LEAK,
260 [addr] "m" (safe_address)); 275 [addr] "m" (safe_address));
261end: 276end:
277 ;
278}
279
280static inline void __save_init_fpu(struct task_struct *tsk)
281{
282 fpu_save_init(&tsk->thread.fpu);
262 task_thread_info(tsk)->status &= ~TS_USEDFPU; 283 task_thread_info(tsk)->status &= ~TS_USEDFPU;
263} 284}
264 285
286
265#endif /* CONFIG_X86_64 */ 287#endif /* CONFIG_X86_64 */
266 288
267static inline int restore_fpu_checking(struct task_struct *tsk) 289static inline int fpu_fxrstor_checking(struct fpu *fpu)
268{ 290{
269 if (task_thread_info(tsk)->status & TS_XSAVE) 291 return fxrstor_checking(&fpu->state->fxsave);
270 return xrstor_checking(&tsk->thread.xstate->xsave); 292}
293
294static inline int fpu_restore_checking(struct fpu *fpu)
295{
296 if (use_xsave())
297 return fpu_xrstor_checking(fpu);
271 else 298 else
272 return fxrstor_checking(&tsk->thread.xstate->fxsave); 299 return fpu_fxrstor_checking(fpu);
300}
301
302static inline int restore_fpu_checking(struct task_struct *tsk)
303{
304 return fpu_restore_checking(&tsk->thread.fpu);
273} 305}
274 306
275/* 307/*
@@ -397,30 +429,59 @@ static inline void clear_fpu(struct task_struct *tsk)
397static inline unsigned short get_fpu_cwd(struct task_struct *tsk) 429static inline unsigned short get_fpu_cwd(struct task_struct *tsk)
398{ 430{
399 if (cpu_has_fxsr) { 431 if (cpu_has_fxsr) {
400 return tsk->thread.xstate->fxsave.cwd; 432 return tsk->thread.fpu.state->fxsave.cwd;
401 } else { 433 } else {
402 return (unsigned short)tsk->thread.xstate->fsave.cwd; 434 return (unsigned short)tsk->thread.fpu.state->fsave.cwd;
403 } 435 }
404} 436}
405 437
406static inline unsigned short get_fpu_swd(struct task_struct *tsk) 438static inline unsigned short get_fpu_swd(struct task_struct *tsk)
407{ 439{
408 if (cpu_has_fxsr) { 440 if (cpu_has_fxsr) {
409 return tsk->thread.xstate->fxsave.swd; 441 return tsk->thread.fpu.state->fxsave.swd;
410 } else { 442 } else {
411 return (unsigned short)tsk->thread.xstate->fsave.swd; 443 return (unsigned short)tsk->thread.fpu.state->fsave.swd;
412 } 444 }
413} 445}
414 446
415static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk) 447static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk)
416{ 448{
417 if (cpu_has_xmm) { 449 if (cpu_has_xmm) {
418 return tsk->thread.xstate->fxsave.mxcsr; 450 return tsk->thread.fpu.state->fxsave.mxcsr;
419 } else { 451 } else {
420 return MXCSR_DEFAULT; 452 return MXCSR_DEFAULT;
421 } 453 }
422} 454}
423 455
456static bool fpu_allocated(struct fpu *fpu)
457{
458 return fpu->state != NULL;
459}
460
461static inline int fpu_alloc(struct fpu *fpu)
462{
463 if (fpu_allocated(fpu))
464 return 0;
465 fpu->state = kmem_cache_alloc(task_xstate_cachep, GFP_KERNEL);
466 if (!fpu->state)
467 return -ENOMEM;
468 WARN_ON((unsigned long)fpu->state & 15);
469 return 0;
470}
471
472static inline void fpu_free(struct fpu *fpu)
473{
474 if (fpu->state) {
475 kmem_cache_free(task_xstate_cachep, fpu->state);
476 fpu->state = NULL;
477 }
478}
479
480static inline void fpu_copy(struct fpu *dst, struct fpu *src)
481{
482 memcpy(dst->state, src->state, xstate_size);
483}
484
424#endif /* __ASSEMBLY__ */ 485#endif /* __ASSEMBLY__ */
425 486
426#define PSHUFB_XMM5_XMM0 .byte 0x66, 0x0f, 0x38, 0x00, 0xc5 487#define PSHUFB_XMM5_XMM0 .byte 0x66, 0x0f, 0x38, 0x00, 0xc5
diff --git a/arch/x86/include/asm/i8253.h b/arch/x86/include/asm/i8253.h
index 1edbf89680fd..fc1f579fb965 100644
--- a/arch/x86/include/asm/i8253.h
+++ b/arch/x86/include/asm/i8253.h
@@ -6,7 +6,7 @@
6#define PIT_CH0 0x40 6#define PIT_CH0 0x40
7#define PIT_CH2 0x42 7#define PIT_CH2 0x42
8 8
9extern spinlock_t i8253_lock; 9extern raw_spinlock_t i8253_lock;
10 10
11extern struct clock_event_device *global_clock_event; 11extern struct clock_event_device *global_clock_event;
12 12
diff --git a/arch/x86/include/asm/insn.h b/arch/x86/include/asm/insn.h
index 96c2e0ad04ca..88c765e16410 100644
--- a/arch/x86/include/asm/insn.h
+++ b/arch/x86/include/asm/insn.h
@@ -68,6 +68,8 @@ struct insn {
68 const insn_byte_t *next_byte; 68 const insn_byte_t *next_byte;
69}; 69};
70 70
71#define MAX_INSN_SIZE 16
72
71#define X86_MODRM_MOD(modrm) (((modrm) & 0xc0) >> 6) 73#define X86_MODRM_MOD(modrm) (((modrm) & 0xc0) >> 6)
72#define X86_MODRM_REG(modrm) (((modrm) & 0x38) >> 3) 74#define X86_MODRM_REG(modrm) (((modrm) & 0x38) >> 3)
73#define X86_MODRM_RM(modrm) ((modrm) & 0x07) 75#define X86_MODRM_RM(modrm) ((modrm) & 0x07)
diff --git a/arch/x86/include/asm/inst.h b/arch/x86/include/asm/inst.h
index 14cf526091f9..280bf7fb6aba 100644
--- a/arch/x86/include/asm/inst.h
+++ b/arch/x86/include/asm/inst.h
@@ -7,7 +7,66 @@
7 7
8#ifdef __ASSEMBLY__ 8#ifdef __ASSEMBLY__
9 9
10#define REG_NUM_INVALID 100
11
12#define REG_TYPE_R64 0
13#define REG_TYPE_XMM 1
14#define REG_TYPE_INVALID 100
15
16 .macro R64_NUM opd r64
17 \opd = REG_NUM_INVALID
18 .ifc \r64,%rax
19 \opd = 0
20 .endif
21 .ifc \r64,%rcx
22 \opd = 1
23 .endif
24 .ifc \r64,%rdx
25 \opd = 2
26 .endif
27 .ifc \r64,%rbx
28 \opd = 3
29 .endif
30 .ifc \r64,%rsp
31 \opd = 4
32 .endif
33 .ifc \r64,%rbp
34 \opd = 5
35 .endif
36 .ifc \r64,%rsi
37 \opd = 6
38 .endif
39 .ifc \r64,%rdi
40 \opd = 7
41 .endif
42 .ifc \r64,%r8
43 \opd = 8
44 .endif
45 .ifc \r64,%r9
46 \opd = 9
47 .endif
48 .ifc \r64,%r10
49 \opd = 10
50 .endif
51 .ifc \r64,%r11
52 \opd = 11
53 .endif
54 .ifc \r64,%r12
55 \opd = 12
56 .endif
57 .ifc \r64,%r13
58 \opd = 13
59 .endif
60 .ifc \r64,%r14
61 \opd = 14
62 .endif
63 .ifc \r64,%r15
64 \opd = 15
65 .endif
66 .endm
67
10 .macro XMM_NUM opd xmm 68 .macro XMM_NUM opd xmm
69 \opd = REG_NUM_INVALID
11 .ifc \xmm,%xmm0 70 .ifc \xmm,%xmm0
12 \opd = 0 71 \opd = 0
13 .endif 72 .endif
@@ -58,13 +117,25 @@
58 .endif 117 .endif
59 .endm 118 .endm
60 119
120 .macro REG_TYPE type reg
121 R64_NUM reg_type_r64 \reg
122 XMM_NUM reg_type_xmm \reg
123 .if reg_type_r64 <> REG_NUM_INVALID
124 \type = REG_TYPE_R64
125 .elseif reg_type_xmm <> REG_NUM_INVALID
126 \type = REG_TYPE_XMM
127 .else
128 \type = REG_TYPE_INVALID
129 .endif
130 .endm
131
61 .macro PFX_OPD_SIZE 132 .macro PFX_OPD_SIZE
62 .byte 0x66 133 .byte 0x66
63 .endm 134 .endm
64 135
65 .macro PFX_REX opd1 opd2 136 .macro PFX_REX opd1 opd2 W=0
66 .if (\opd1 | \opd2) & 8 137 .if ((\opd1 | \opd2) & 8) || \W
67 .byte 0x40 | ((\opd1 & 8) >> 3) | ((\opd2 & 8) >> 1) 138 .byte 0x40 | ((\opd1 & 8) >> 3) | ((\opd2 & 8) >> 1) | (\W << 3)
68 .endif 139 .endif
69 .endm 140 .endm
70 141
@@ -145,6 +216,25 @@
145 .byte 0x0f, 0x38, 0xdf 216 .byte 0x0f, 0x38, 0xdf
146 MODRM 0xc0 aesdeclast_opd1 aesdeclast_opd2 217 MODRM 0xc0 aesdeclast_opd1 aesdeclast_opd2
147 .endm 218 .endm
219
220 .macro MOVQ_R64_XMM opd1 opd2
221 REG_TYPE movq_r64_xmm_opd1_type \opd1
222 .if movq_r64_xmm_opd1_type == REG_TYPE_XMM
223 XMM_NUM movq_r64_xmm_opd1 \opd1
224 R64_NUM movq_r64_xmm_opd2 \opd2
225 .else
226 R64_NUM movq_r64_xmm_opd1 \opd1
227 XMM_NUM movq_r64_xmm_opd2 \opd2
228 .endif
229 PFX_OPD_SIZE
230 PFX_REX movq_r64_xmm_opd1 movq_r64_xmm_opd2 1
231 .if movq_r64_xmm_opd1_type == REG_TYPE_XMM
232 .byte 0x0f, 0x7e
233 .else
234 .byte 0x0f, 0x6e
235 .endif
236 MODRM 0xc0 movq_r64_xmm_opd1 movq_r64_xmm_opd2
237 .endm
148#endif 238#endif
149 239
150#endif 240#endif
diff --git a/arch/x86/include/asm/intel_scu_ipc.h b/arch/x86/include/asm/intel_scu_ipc.h
new file mode 100644
index 000000000000..4470c9ad4a3e
--- /dev/null
+++ b/arch/x86/include/asm/intel_scu_ipc.h
@@ -0,0 +1,55 @@
1#ifndef _ASM_X86_INTEL_SCU_IPC_H_
2#define _ASM_X86_INTEL_SCU_IPC_H_
3
4/* Read single register */
5int intel_scu_ipc_ioread8(u16 addr, u8 *data);
6
7/* Read two sequential registers */
8int intel_scu_ipc_ioread16(u16 addr, u16 *data);
9
10/* Read four sequential registers */
11int intel_scu_ipc_ioread32(u16 addr, u32 *data);
12
13/* Read a vector */
14int intel_scu_ipc_readv(u16 *addr, u8 *data, int len);
15
16/* Write single register */
17int intel_scu_ipc_iowrite8(u16 addr, u8 data);
18
19/* Write two sequential registers */
20int intel_scu_ipc_iowrite16(u16 addr, u16 data);
21
22/* Write four sequential registers */
23int intel_scu_ipc_iowrite32(u16 addr, u32 data);
24
25/* Write a vector */
26int intel_scu_ipc_writev(u16 *addr, u8 *data, int len);
27
28/* Update single register based on the mask */
29int intel_scu_ipc_update_register(u16 addr, u8 data, u8 mask);
30
31/*
32 * Indirect register read
33 * Can be used when SCCB(System Controller Configuration Block) register
34 * HRIM(Honor Restricted IPC Messages) is set (bit 23)
35 */
36int intel_scu_ipc_register_read(u32 addr, u32 *data);
37
38/*
39 * Indirect register write
40 * Can be used when SCCB(System Controller Configuration Block) register
41 * HRIM(Honor Restricted IPC Messages) is set (bit 23)
42 */
43int intel_scu_ipc_register_write(u32 addr, u32 data);
44
45/* Issue commands to the SCU with or without data */
46int intel_scu_ipc_simple_command(int cmd, int sub);
47int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen,
48 u32 *out, int outlen);
49/* I2C control api */
50int intel_scu_ipc_i2c_cntrl(u32 addr, u32 *data);
51
52/* Update FW version */
53int intel_scu_ipc_fw_update(u8 *buffer, u32 length);
54
55#endif
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index a1dcfa3ab17d..30a3e9776123 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -347,6 +347,7 @@ extern void __iomem *early_ioremap(resource_size_t phys_addr,
347extern void __iomem *early_memremap(resource_size_t phys_addr, 347extern void __iomem *early_memremap(resource_size_t phys_addr,
348 unsigned long size); 348 unsigned long size);
349extern void early_iounmap(void __iomem *addr, unsigned long size); 349extern void early_iounmap(void __iomem *addr, unsigned long size);
350extern void fixup_early_ioremap(void);
350 351
351#define IO_SPACE_LIMIT 0xffff 352#define IO_SPACE_LIMIT 0xffff
352 353
diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h
index 35832a03a515..63cb4096c3dc 100644
--- a/arch/x86/include/asm/io_apic.h
+++ b/arch/x86/include/asm/io_apic.h
@@ -159,7 +159,6 @@ struct io_apic_irq_attr;
159extern int io_apic_set_pci_routing(struct device *dev, int irq, 159extern int io_apic_set_pci_routing(struct device *dev, int irq,
160 struct io_apic_irq_attr *irq_attr); 160 struct io_apic_irq_attr *irq_attr);
161void setup_IO_APIC_irq_extra(u32 gsi); 161void setup_IO_APIC_irq_extra(u32 gsi);
162extern int (*ioapic_renumber_irq)(int ioapic, int irq);
163extern void ioapic_init_mappings(void); 162extern void ioapic_init_mappings(void);
164extern void ioapic_insert_resources(void); 163extern void ioapic_insert_resources(void);
165 164
@@ -180,12 +179,13 @@ extern void ioapic_write_entry(int apic, int pin,
180extern void setup_ioapic_ids_from_mpc(void); 179extern void setup_ioapic_ids_from_mpc(void);
181 180
182struct mp_ioapic_gsi{ 181struct mp_ioapic_gsi{
183 int gsi_base; 182 u32 gsi_base;
184 int gsi_end; 183 u32 gsi_end;
185}; 184};
186extern struct mp_ioapic_gsi mp_gsi_routing[]; 185extern struct mp_ioapic_gsi mp_gsi_routing[];
187int mp_find_ioapic(int gsi); 186extern u32 gsi_end;
188int mp_find_ioapic_pin(int ioapic, int gsi); 187int mp_find_ioapic(u32 gsi);
188int mp_find_ioapic_pin(int ioapic, u32 gsi);
189void __init mp_register_ioapic(int id, u32 address, u32 gsi_base); 189void __init mp_register_ioapic(int id, u32 address, u32 gsi_base);
190extern void __init pre_init_apic_IRQ0(void); 190extern void __init pre_init_apic_IRQ0(void);
191 191
@@ -197,7 +197,8 @@ static const int timer_through_8259 = 0;
197static inline void ioapic_init_mappings(void) { } 197static inline void ioapic_init_mappings(void) { }
198static inline void ioapic_insert_resources(void) { } 198static inline void ioapic_insert_resources(void) { }
199static inline void probe_nr_irqs_gsi(void) { } 199static inline void probe_nr_irqs_gsi(void) { }
200static inline int mp_find_ioapic(int gsi) { return 0; } 200#define gsi_end (NR_IRQS_LEGACY - 1)
201static inline int mp_find_ioapic(u32 gsi) { return 0; }
201 202
202struct io_apic_irq_attr; 203struct io_apic_irq_attr;
203static inline int io_apic_set_pci_routing(struct device *dev, int irq, 204static inline int io_apic_set_pci_routing(struct device *dev, int irq,
diff --git a/arch/x86/include/asm/k8.h b/arch/x86/include/asm/k8.h
index f70e60071fe8..af00bd1d2089 100644
--- a/arch/x86/include/asm/k8.h
+++ b/arch/x86/include/asm/k8.h
@@ -16,11 +16,16 @@ extern int k8_numa_init(unsigned long start_pfn, unsigned long end_pfn);
16extern int k8_scan_nodes(void); 16extern int k8_scan_nodes(void);
17 17
18#ifdef CONFIG_K8_NB 18#ifdef CONFIG_K8_NB
19extern int num_k8_northbridges;
20
19static inline struct pci_dev *node_to_k8_nb_misc(int node) 21static inline struct pci_dev *node_to_k8_nb_misc(int node)
20{ 22{
21 return (node < num_k8_northbridges) ? k8_northbridges[node] : NULL; 23 return (node < num_k8_northbridges) ? k8_northbridges[node] : NULL;
22} 24}
25
23#else 26#else
27#define num_k8_northbridges 0
28
24static inline struct pci_dev *node_to_k8_nb_misc(int node) 29static inline struct pci_dev *node_to_k8_nb_misc(int node)
25{ 30{
26 return NULL; 31 return NULL;
diff --git a/arch/x86/include/asm/kgdb.h b/arch/x86/include/asm/kgdb.h
index e6c6c808489f..006da3687cdc 100644
--- a/arch/x86/include/asm/kgdb.h
+++ b/arch/x86/include/asm/kgdb.h
@@ -76,4 +76,7 @@ static inline void arch_kgdb_breakpoint(void)
76#define BREAK_INSTR_SIZE 1 76#define BREAK_INSTR_SIZE 1
77#define CACHE_FLUSH_IS_SAFE 1 77#define CACHE_FLUSH_IS_SAFE 1
78 78
79extern int kgdb_ll_trap(int cmd, const char *str,
80 struct pt_regs *regs, long err, int trap, int sig);
81
79#endif /* _ASM_X86_KGDB_H */ 82#endif /* _ASM_X86_KGDB_H */
diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
index 4ffa345a8ccb..547882539157 100644
--- a/arch/x86/include/asm/kprobes.h
+++ b/arch/x86/include/asm/kprobes.h
@@ -24,6 +24,7 @@
24#include <linux/types.h> 24#include <linux/types.h>
25#include <linux/ptrace.h> 25#include <linux/ptrace.h>
26#include <linux/percpu.h> 26#include <linux/percpu.h>
27#include <asm/insn.h>
27 28
28#define __ARCH_WANT_KPROBES_INSN_SLOT 29#define __ARCH_WANT_KPROBES_INSN_SLOT
29 30
@@ -36,7 +37,6 @@ typedef u8 kprobe_opcode_t;
36#define RELATIVEJUMP_SIZE 5 37#define RELATIVEJUMP_SIZE 5
37#define RELATIVECALL_OPCODE 0xe8 38#define RELATIVECALL_OPCODE 0xe8
38#define RELATIVE_ADDR_SIZE 4 39#define RELATIVE_ADDR_SIZE 4
39#define MAX_INSN_SIZE 16
40#define MAX_STACK_SIZE 64 40#define MAX_STACK_SIZE 64
41#define MIN_STACK_SIZE(ADDR) \ 41#define MIN_STACK_SIZE(ADDR) \
42 (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \ 42 (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
diff --git a/arch/x86/include/asm/kvm.h b/arch/x86/include/asm/kvm.h
index f46b79f6c16c..ff90055c7f0b 100644
--- a/arch/x86/include/asm/kvm.h
+++ b/arch/x86/include/asm/kvm.h
@@ -21,6 +21,7 @@
21#define __KVM_HAVE_PIT_STATE2 21#define __KVM_HAVE_PIT_STATE2
22#define __KVM_HAVE_XEN_HVM 22#define __KVM_HAVE_XEN_HVM
23#define __KVM_HAVE_VCPU_EVENTS 23#define __KVM_HAVE_VCPU_EVENTS
24#define __KVM_HAVE_DEBUGREGS
24 25
25/* Architectural interrupt line count. */ 26/* Architectural interrupt line count. */
26#define KVM_NR_INTERRUPTS 256 27#define KVM_NR_INTERRUPTS 256
@@ -257,6 +258,11 @@ struct kvm_reinject_control {
257/* When set in flags, include corresponding fields on KVM_SET_VCPU_EVENTS */ 258/* When set in flags, include corresponding fields on KVM_SET_VCPU_EVENTS */
258#define KVM_VCPUEVENT_VALID_NMI_PENDING 0x00000001 259#define KVM_VCPUEVENT_VALID_NMI_PENDING 0x00000001
259#define KVM_VCPUEVENT_VALID_SIPI_VECTOR 0x00000002 260#define KVM_VCPUEVENT_VALID_SIPI_VECTOR 0x00000002
261#define KVM_VCPUEVENT_VALID_SHADOW 0x00000004
262
263/* Interrupt shadow states */
264#define KVM_X86_SHADOW_INT_MOV_SS 0x01
265#define KVM_X86_SHADOW_INT_STI 0x02
260 266
261/* for KVM_GET/SET_VCPU_EVENTS */ 267/* for KVM_GET/SET_VCPU_EVENTS */
262struct kvm_vcpu_events { 268struct kvm_vcpu_events {
@@ -271,7 +277,7 @@ struct kvm_vcpu_events {
271 __u8 injected; 277 __u8 injected;
272 __u8 nr; 278 __u8 nr;
273 __u8 soft; 279 __u8 soft;
274 __u8 pad; 280 __u8 shadow;
275 } interrupt; 281 } interrupt;
276 struct { 282 struct {
277 __u8 injected; 283 __u8 injected;
@@ -284,4 +290,13 @@ struct kvm_vcpu_events {
284 __u32 reserved[10]; 290 __u32 reserved[10];
285}; 291};
286 292
293/* for KVM_GET/SET_DEBUGREGS */
294struct kvm_debugregs {
295 __u64 db[4];
296 __u64 dr6;
297 __u64 dr7;
298 __u64 flags;
299 __u64 reserved[9];
300};
301
287#endif /* _ASM_X86_KVM_H */ 302#endif /* _ASM_X86_KVM_H */
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index 7a6f54fa13ba..0b2729bf2070 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -11,6 +11,8 @@
11#ifndef _ASM_X86_KVM_X86_EMULATE_H 11#ifndef _ASM_X86_KVM_X86_EMULATE_H
12#define _ASM_X86_KVM_X86_EMULATE_H 12#define _ASM_X86_KVM_X86_EMULATE_H
13 13
14#include <asm/desc_defs.h>
15
14struct x86_emulate_ctxt; 16struct x86_emulate_ctxt;
15 17
16/* 18/*
@@ -63,6 +65,15 @@ struct x86_emulate_ops {
63 unsigned int bytes, struct kvm_vcpu *vcpu, u32 *error); 65 unsigned int bytes, struct kvm_vcpu *vcpu, u32 *error);
64 66
65 /* 67 /*
68 * write_std: Write bytes of standard (non-emulated/special) memory.
69 * Used for descriptor writing.
70 * @addr: [IN ] Linear address to which to write.
71 * @val: [OUT] Value write to memory, zero-extended to 'u_long'.
72 * @bytes: [IN ] Number of bytes to write to memory.
73 */
74 int (*write_std)(unsigned long addr, void *val,
75 unsigned int bytes, struct kvm_vcpu *vcpu, u32 *error);
76 /*
66 * fetch: Read bytes of standard (non-emulated/special) memory. 77 * fetch: Read bytes of standard (non-emulated/special) memory.
67 * Used for instruction fetch. 78 * Used for instruction fetch.
68 * @addr: [IN ] Linear address from which to read. 79 * @addr: [IN ] Linear address from which to read.
@@ -109,6 +120,23 @@ struct x86_emulate_ops {
109 unsigned int bytes, 120 unsigned int bytes,
110 struct kvm_vcpu *vcpu); 121 struct kvm_vcpu *vcpu);
111 122
123 int (*pio_in_emulated)(int size, unsigned short port, void *val,
124 unsigned int count, struct kvm_vcpu *vcpu);
125
126 int (*pio_out_emulated)(int size, unsigned short port, const void *val,
127 unsigned int count, struct kvm_vcpu *vcpu);
128
129 bool (*get_cached_descriptor)(struct desc_struct *desc,
130 int seg, struct kvm_vcpu *vcpu);
131 void (*set_cached_descriptor)(struct desc_struct *desc,
132 int seg, struct kvm_vcpu *vcpu);
133 u16 (*get_segment_selector)(int seg, struct kvm_vcpu *vcpu);
134 void (*set_segment_selector)(u16 sel, int seg, struct kvm_vcpu *vcpu);
135 void (*get_gdt)(struct desc_ptr *dt, struct kvm_vcpu *vcpu);
136 ulong (*get_cr)(int cr, struct kvm_vcpu *vcpu);
137 void (*set_cr)(int cr, ulong val, struct kvm_vcpu *vcpu);
138 int (*cpl)(struct kvm_vcpu *vcpu);
139 void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
112}; 140};
113 141
114/* Type, address-of, and value of an instruction's operand. */ 142/* Type, address-of, and value of an instruction's operand. */
@@ -124,6 +152,12 @@ struct fetch_cache {
124 unsigned long end; 152 unsigned long end;
125}; 153};
126 154
155struct read_cache {
156 u8 data[1024];
157 unsigned long pos;
158 unsigned long end;
159};
160
127struct decode_cache { 161struct decode_cache {
128 u8 twobyte; 162 u8 twobyte;
129 u8 b; 163 u8 b;
@@ -139,7 +173,7 @@ struct decode_cache {
139 u8 seg_override; 173 u8 seg_override;
140 unsigned int d; 174 unsigned int d;
141 unsigned long regs[NR_VCPU_REGS]; 175 unsigned long regs[NR_VCPU_REGS];
142 unsigned long eip, eip_orig; 176 unsigned long eip;
143 /* modrm */ 177 /* modrm */
144 u8 modrm; 178 u8 modrm;
145 u8 modrm_mod; 179 u8 modrm_mod;
@@ -151,16 +185,15 @@ struct decode_cache {
151 void *modrm_ptr; 185 void *modrm_ptr;
152 unsigned long modrm_val; 186 unsigned long modrm_val;
153 struct fetch_cache fetch; 187 struct fetch_cache fetch;
188 struct read_cache io_read;
154}; 189};
155 190
156#define X86_SHADOW_INT_MOV_SS 1
157#define X86_SHADOW_INT_STI 2
158
159struct x86_emulate_ctxt { 191struct x86_emulate_ctxt {
160 /* Register state before/after emulation. */ 192 /* Register state before/after emulation. */
161 struct kvm_vcpu *vcpu; 193 struct kvm_vcpu *vcpu;
162 194
163 unsigned long eflags; 195 unsigned long eflags;
196 unsigned long eip; /* eip before instruction emulation */
164 /* Emulated execution mode, represented by an X86EMUL_MODE value. */ 197 /* Emulated execution mode, represented by an X86EMUL_MODE value. */
165 int mode; 198 int mode;
166 u32 cs_base; 199 u32 cs_base;
@@ -168,6 +201,7 @@ struct x86_emulate_ctxt {
168 /* interruptibility state, as a result of execution of STI or MOV SS */ 201 /* interruptibility state, as a result of execution of STI or MOV SS */
169 int interruptibility; 202 int interruptibility;
170 203
204 bool restart; /* restart string instruction after writeback */
171 /* decode cache */ 205 /* decode cache */
172 struct decode_cache decode; 206 struct decode_cache decode;
173}; 207};
@@ -194,5 +228,9 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt,
194 struct x86_emulate_ops *ops); 228 struct x86_emulate_ops *ops);
195int x86_emulate_insn(struct x86_emulate_ctxt *ctxt, 229int x86_emulate_insn(struct x86_emulate_ctxt *ctxt,
196 struct x86_emulate_ops *ops); 230 struct x86_emulate_ops *ops);
231int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
232 struct x86_emulate_ops *ops,
233 u16 tss_selector, int reason,
234 bool has_error_code, u32 error_code);
197 235
198#endif /* _ASM_X86_KVM_X86_EMULATE_H */ 236#endif /* _ASM_X86_KVM_X86_EMULATE_H */
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 06d9e79ca37d..76f5483cffec 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -171,15 +171,15 @@ struct kvm_pte_chain {
171union kvm_mmu_page_role { 171union kvm_mmu_page_role {
172 unsigned word; 172 unsigned word;
173 struct { 173 struct {
174 unsigned glevels:4;
175 unsigned level:4; 174 unsigned level:4;
175 unsigned cr4_pae:1;
176 unsigned quadrant:2; 176 unsigned quadrant:2;
177 unsigned pad_for_nice_hex_output:6; 177 unsigned pad_for_nice_hex_output:6;
178 unsigned direct:1; 178 unsigned direct:1;
179 unsigned access:3; 179 unsigned access:3;
180 unsigned invalid:1; 180 unsigned invalid:1;
181 unsigned cr4_pge:1;
182 unsigned nxe:1; 181 unsigned nxe:1;
182 unsigned cr0_wp:1;
183 }; 183 };
184}; 184};
185 185
@@ -187,8 +187,6 @@ struct kvm_mmu_page {
187 struct list_head link; 187 struct list_head link;
188 struct hlist_node hash_link; 188 struct hlist_node hash_link;
189 189
190 struct list_head oos_link;
191
192 /* 190 /*
193 * The following two entries are used to key the shadow page in the 191 * The following two entries are used to key the shadow page in the
194 * hash table. 192 * hash table.
@@ -204,9 +202,9 @@ struct kvm_mmu_page {
204 * in this shadow page. 202 * in this shadow page.
205 */ 203 */
206 DECLARE_BITMAP(slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS); 204 DECLARE_BITMAP(slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS);
207 int multimapped; /* More than one parent_pte? */ 205 bool multimapped; /* More than one parent_pte? */
208 int root_count; /* Currently serving as active root */
209 bool unsync; 206 bool unsync;
207 int root_count; /* Currently serving as active root */
210 unsigned int unsync_children; 208 unsigned int unsync_children;
211 union { 209 union {
212 u64 *parent_pte; /* !multimapped */ 210 u64 *parent_pte; /* !multimapped */
@@ -224,14 +222,9 @@ struct kvm_pv_mmu_op_buffer {
224 222
225struct kvm_pio_request { 223struct kvm_pio_request {
226 unsigned long count; 224 unsigned long count;
227 int cur_count;
228 gva_t guest_gva;
229 int in; 225 int in;
230 int port; 226 int port;
231 int size; 227 int size;
232 int string;
233 int down;
234 int rep;
235}; 228};
236 229
237/* 230/*
@@ -320,6 +313,7 @@ struct kvm_vcpu_arch {
320 struct kvm_queued_exception { 313 struct kvm_queued_exception {
321 bool pending; 314 bool pending;
322 bool has_error_code; 315 bool has_error_code;
316 bool reinject;
323 u8 nr; 317 u8 nr;
324 u32 error_code; 318 u32 error_code;
325 } exception; 319 } exception;
@@ -362,8 +356,8 @@ struct kvm_vcpu_arch {
362 u64 *mce_banks; 356 u64 *mce_banks;
363 357
364 /* used for guest single stepping over the given code position */ 358 /* used for guest single stepping over the given code position */
365 u16 singlestep_cs;
366 unsigned long singlestep_rip; 359 unsigned long singlestep_rip;
360
367 /* fields used by HYPER-V emulation */ 361 /* fields used by HYPER-V emulation */
368 u64 hv_vapic; 362 u64 hv_vapic;
369}; 363};
@@ -389,6 +383,7 @@ struct kvm_arch {
389 unsigned int n_free_mmu_pages; 383 unsigned int n_free_mmu_pages;
390 unsigned int n_requested_mmu_pages; 384 unsigned int n_requested_mmu_pages;
391 unsigned int n_alloc_mmu_pages; 385 unsigned int n_alloc_mmu_pages;
386 atomic_t invlpg_counter;
392 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES]; 387 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
393 /* 388 /*
394 * Hash table of struct kvm_mmu_page. 389 * Hash table of struct kvm_mmu_page.
@@ -461,11 +456,6 @@ struct kvm_vcpu_stat {
461 u32 nmi_injections; 456 u32 nmi_injections;
462}; 457};
463 458
464struct descriptor_table {
465 u16 limit;
466 unsigned long base;
467} __attribute__((packed));
468
469struct kvm_x86_ops { 459struct kvm_x86_ops {
470 int (*cpu_has_kvm_support)(void); /* __init */ 460 int (*cpu_has_kvm_support)(void); /* __init */
471 int (*disabled_by_bios)(void); /* __init */ 461 int (*disabled_by_bios)(void); /* __init */
@@ -503,12 +493,11 @@ struct kvm_x86_ops {
503 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3); 493 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
504 void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4); 494 void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
505 void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer); 495 void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
506 void (*get_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); 496 void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
507 void (*set_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); 497 void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
508 void (*get_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); 498 void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
509 void (*set_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); 499 void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
510 int (*get_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long *dest); 500 void (*set_dr7)(struct kvm_vcpu *vcpu, unsigned long value);
511 int (*set_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long value);
512 void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg); 501 void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
513 unsigned long (*get_rflags)(struct kvm_vcpu *vcpu); 502 unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
514 void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags); 503 void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
@@ -527,7 +516,8 @@ struct kvm_x86_ops {
527 void (*set_irq)(struct kvm_vcpu *vcpu); 516 void (*set_irq)(struct kvm_vcpu *vcpu);
528 void (*set_nmi)(struct kvm_vcpu *vcpu); 517 void (*set_nmi)(struct kvm_vcpu *vcpu);
529 void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr, 518 void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr,
530 bool has_error_code, u32 error_code); 519 bool has_error_code, u32 error_code,
520 bool reinject);
531 int (*interrupt_allowed)(struct kvm_vcpu *vcpu); 521 int (*interrupt_allowed)(struct kvm_vcpu *vcpu);
532 int (*nmi_allowed)(struct kvm_vcpu *vcpu); 522 int (*nmi_allowed)(struct kvm_vcpu *vcpu);
533 bool (*get_nmi_mask)(struct kvm_vcpu *vcpu); 523 bool (*get_nmi_mask)(struct kvm_vcpu *vcpu);
@@ -541,6 +531,8 @@ struct kvm_x86_ops {
541 int (*get_lpage_level)(void); 531 int (*get_lpage_level)(void);
542 bool (*rdtscp_supported)(void); 532 bool (*rdtscp_supported)(void);
543 533
534 void (*set_supported_cpuid)(u32 func, struct kvm_cpuid_entry2 *entry);
535
544 const struct trace_print_flags *exit_reasons_str; 536 const struct trace_print_flags *exit_reasons_str;
545}; 537};
546 538
@@ -587,23 +579,14 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
587void kvm_report_emulation_failure(struct kvm_vcpu *cvpu, const char *context); 579void kvm_report_emulation_failure(struct kvm_vcpu *cvpu, const char *context);
588void realmode_lgdt(struct kvm_vcpu *vcpu, u16 size, unsigned long address); 580void realmode_lgdt(struct kvm_vcpu *vcpu, u16 size, unsigned long address);
589void realmode_lidt(struct kvm_vcpu *vcpu, u16 size, unsigned long address); 581void realmode_lidt(struct kvm_vcpu *vcpu, u16 size, unsigned long address);
590void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
591 unsigned long *rflags);
592 582
593unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr);
594void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long value,
595 unsigned long *rflags);
596void kvm_enable_efer_bits(u64); 583void kvm_enable_efer_bits(u64);
597int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data); 584int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data);
598int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data); 585int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
599 586
600struct x86_emulate_ctxt; 587struct x86_emulate_ctxt;
601 588
602int kvm_emulate_pio(struct kvm_vcpu *vcpu, int in, 589int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port);
603 int size, unsigned port);
604int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, int in,
605 int size, unsigned long count, int down,
606 gva_t address, int rep, unsigned port);
607void kvm_emulate_cpuid(struct kvm_vcpu *vcpu); 590void kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
608int kvm_emulate_halt(struct kvm_vcpu *vcpu); 591int kvm_emulate_halt(struct kvm_vcpu *vcpu);
609int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address); 592int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address);
@@ -616,12 +599,15 @@ int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr,
616void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); 599void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
617int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg); 600int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg);
618 601
619int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason); 602int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason,
603 bool has_error_code, u32 error_code);
620 604
621void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); 605void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
622void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3); 606void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
623void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); 607void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
624void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8); 608void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8);
609int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val);
610int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val);
625unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu); 611unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu);
626void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw); 612void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
627void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l); 613void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
@@ -634,6 +620,8 @@ void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
634 620
635void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr); 621void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr);
636void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); 622void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
623void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr);
624void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
637void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long cr2, 625void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long cr2,
638 u32 error_code); 626 u32 error_code);
639bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl); 627bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl);
@@ -649,8 +637,6 @@ int emulator_write_emulated(unsigned long addr,
649 unsigned int bytes, 637 unsigned int bytes,
650 struct kvm_vcpu *vcpu); 638 struct kvm_vcpu *vcpu);
651 639
652unsigned long segment_base(u16 selector);
653
654void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu); 640void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu);
655void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, 641void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
656 const u8 *new, int bytes, 642 const u8 *new, int bytes,
@@ -675,7 +661,6 @@ void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
675void kvm_enable_tdp(void); 661void kvm_enable_tdp(void);
676void kvm_disable_tdp(void); 662void kvm_disable_tdp(void);
677 663
678int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3);
679int complete_pio(struct kvm_vcpu *vcpu); 664int complete_pio(struct kvm_vcpu *vcpu);
680bool kvm_check_iopl(struct kvm_vcpu *vcpu); 665bool kvm_check_iopl(struct kvm_vcpu *vcpu);
681 666
@@ -724,23 +709,6 @@ static inline void kvm_load_ldt(u16 sel)
724 asm("lldt %0" : : "rm"(sel)); 709 asm("lldt %0" : : "rm"(sel));
725} 710}
726 711
727static inline void kvm_get_idt(struct descriptor_table *table)
728{
729 asm("sidt %0" : "=m"(*table));
730}
731
732static inline void kvm_get_gdt(struct descriptor_table *table)
733{
734 asm("sgdt %0" : "=m"(*table));
735}
736
737static inline unsigned long kvm_read_tr_base(void)
738{
739 u16 tr;
740 asm("str %0" : "=g"(tr));
741 return segment_base(tr);
742}
743
744#ifdef CONFIG_X86_64 712#ifdef CONFIG_X86_64
745static inline unsigned long read_msr(unsigned long msr) 713static inline unsigned long read_msr(unsigned long msr)
746{ 714{
@@ -826,4 +794,6 @@ int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
826void kvm_define_shared_msr(unsigned index, u32 msr); 794void kvm_define_shared_msr(unsigned index, u32 msr);
827void kvm_set_shared_msr(unsigned index, u64 val, u64 mask); 795void kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
828 796
797bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);
798
829#endif /* _ASM_X86_KVM_HOST_H */ 799#endif /* _ASM_X86_KVM_HOST_H */
diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h
index ffae1420e7d7..05eba5e9a8e8 100644
--- a/arch/x86/include/asm/kvm_para.h
+++ b/arch/x86/include/asm/kvm_para.h
@@ -16,10 +16,23 @@
16#define KVM_FEATURE_CLOCKSOURCE 0 16#define KVM_FEATURE_CLOCKSOURCE 0
17#define KVM_FEATURE_NOP_IO_DELAY 1 17#define KVM_FEATURE_NOP_IO_DELAY 1
18#define KVM_FEATURE_MMU_OP 2 18#define KVM_FEATURE_MMU_OP 2
19/* This indicates that the new set of kvmclock msrs
20 * are available. The use of 0x11 and 0x12 is deprecated
21 */
22#define KVM_FEATURE_CLOCKSOURCE2 3
23
24/* The last 8 bits are used to indicate how to interpret the flags field
25 * in pvclock structure. If no bits are set, all flags are ignored.
26 */
27#define KVM_FEATURE_CLOCKSOURCE_STABLE_BIT 24
19 28
20#define MSR_KVM_WALL_CLOCK 0x11 29#define MSR_KVM_WALL_CLOCK 0x11
21#define MSR_KVM_SYSTEM_TIME 0x12 30#define MSR_KVM_SYSTEM_TIME 0x12
22 31
32/* Custom MSRs falls in the range 0x4b564d00-0x4b564dff */
33#define MSR_KVM_WALL_CLOCK_NEW 0x4b564d00
34#define MSR_KVM_SYSTEM_TIME_NEW 0x4b564d01
35
23#define KVM_MAX_MMU_OP_BATCH 32 36#define KVM_MAX_MMU_OP_BATCH 32
24 37
25/* Operations for KVM_HC_MMU_OP */ 38/* Operations for KVM_HC_MMU_OP */
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index 6c3fdd631ed3..f32a4301c4d4 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -225,5 +225,13 @@ extern void mcheck_intel_therm_init(void);
225static inline void mcheck_intel_therm_init(void) { } 225static inline void mcheck_intel_therm_init(void) { }
226#endif 226#endif
227 227
228/*
229 * Used by APEI to report memory error via /dev/mcelog
230 */
231
232struct cper_sec_mem_err;
233extern void apei_mce_report_mem_error(int corrected,
234 struct cper_sec_mem_err *mem_err);
235
228#endif /* __KERNEL__ */ 236#endif /* __KERNEL__ */
229#endif /* _ASM_X86_MCE_H */ 237#endif /* _ASM_X86_MCE_H */
diff --git a/arch/x86/include/asm/mpspec.h b/arch/x86/include/asm/mpspec.h
index d8bf23a88d05..c82868e9f905 100644
--- a/arch/x86/include/asm/mpspec.h
+++ b/arch/x86/include/asm/mpspec.h
@@ -105,16 +105,6 @@ extern void mp_config_acpi_legacy_irqs(void);
105struct device; 105struct device;
106extern int mp_register_gsi(struct device *dev, u32 gsi, int edge_level, 106extern int mp_register_gsi(struct device *dev, u32 gsi, int edge_level,
107 int active_high_low); 107 int active_high_low);
108extern int acpi_probe_gsi(void);
109#ifdef CONFIG_X86_IO_APIC
110extern int mp_find_ioapic(int gsi);
111extern int mp_find_ioapic_pin(int ioapic, int gsi);
112#endif
113#else /* !CONFIG_ACPI: */
114static inline int acpi_probe_gsi(void)
115{
116 return 0;
117}
118#endif /* CONFIG_ACPI */ 108#endif /* CONFIG_ACPI */
119 109
120#define PHYSID_ARRAY_SIZE BITS_TO_LONGS(MAX_APICS) 110#define PHYSID_ARRAY_SIZE BITS_TO_LONGS(MAX_APICS)
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
new file mode 100644
index 000000000000..79ce5685ab64
--- /dev/null
+++ b/arch/x86/include/asm/mshyperv.h
@@ -0,0 +1,14 @@
1#ifndef _ASM_X86_MSHYPER_H
2#define _ASM_X86_MSHYPER_H
3
4#include <linux/types.h>
5#include <asm/hyperv.h>
6
7struct ms_hyperv_info {
8 u32 features;
9 u32 hints;
10};
11
12extern struct ms_hyperv_info ms_hyperv;
13
14#endif
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 4604e6a54d36..b49d8ca228f6 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -71,11 +71,14 @@
71#define MSR_IA32_LASTINTTOIP 0x000001de 71#define MSR_IA32_LASTINTTOIP 0x000001de
72 72
73/* DEBUGCTLMSR bits (others vary by model): */ 73/* DEBUGCTLMSR bits (others vary by model): */
74#define _DEBUGCTLMSR_LBR 0 /* last branch recording */ 74#define DEBUGCTLMSR_LBR (1UL << 0) /* last branch recording */
75#define _DEBUGCTLMSR_BTF 1 /* single-step on branches */ 75#define DEBUGCTLMSR_BTF (1UL << 1) /* single-step on branches */
76 76#define DEBUGCTLMSR_TR (1UL << 6)
77#define DEBUGCTLMSR_LBR (1UL << _DEBUGCTLMSR_LBR) 77#define DEBUGCTLMSR_BTS (1UL << 7)
78#define DEBUGCTLMSR_BTF (1UL << _DEBUGCTLMSR_BTF) 78#define DEBUGCTLMSR_BTINT (1UL << 8)
79#define DEBUGCTLMSR_BTS_OFF_OS (1UL << 9)
80#define DEBUGCTLMSR_BTS_OFF_USR (1UL << 10)
81#define DEBUGCTLMSR_FREEZE_LBRS_ON_PMI (1UL << 11)
79 82
80#define MSR_IA32_MC0_CTL 0x00000400 83#define MSR_IA32_MC0_CTL 0x00000400
81#define MSR_IA32_MC0_STATUS 0x00000401 84#define MSR_IA32_MC0_STATUS 0x00000401
@@ -199,8 +202,9 @@
199#define MSR_IA32_EBL_CR_POWERON 0x0000002a 202#define MSR_IA32_EBL_CR_POWERON 0x0000002a
200#define MSR_IA32_FEATURE_CONTROL 0x0000003a 203#define MSR_IA32_FEATURE_CONTROL 0x0000003a
201 204
202#define FEATURE_CONTROL_LOCKED (1<<0) 205#define FEATURE_CONTROL_LOCKED (1<<0)
203#define FEATURE_CONTROL_VMXON_ENABLED (1<<2) 206#define FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX (1<<1)
207#define FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX (1<<2)
204 208
205#define MSR_IA32_APICBASE 0x0000001b 209#define MSR_IA32_APICBASE 0x0000001b
206#define MSR_IA32_APICBASE_BSP (1<<8) 210#define MSR_IA32_APICBASE_BSP (1<<8)
@@ -232,6 +236,8 @@
232 236
233#define MSR_IA32_MISC_ENABLE 0x000001a0 237#define MSR_IA32_MISC_ENABLE 0x000001a0
234 238
239#define MSR_IA32_TEMPERATURE_TARGET 0x000001a2
240
235/* MISC_ENABLE bits: architectural */ 241/* MISC_ENABLE bits: architectural */
236#define MSR_IA32_MISC_ENABLE_FAST_STRING (1ULL << 0) 242#define MSR_IA32_MISC_ENABLE_FAST_STRING (1ULL << 0)
237#define MSR_IA32_MISC_ENABLE_TCC (1ULL << 1) 243#define MSR_IA32_MISC_ENABLE_TCC (1ULL << 1)
@@ -359,6 +365,8 @@
359#define MSR_P4_U2L_ESCR0 0x000003b0 365#define MSR_P4_U2L_ESCR0 0x000003b0
360#define MSR_P4_U2L_ESCR1 0x000003b1 366#define MSR_P4_U2L_ESCR1 0x000003b1
361 367
368#define MSR_P4_PEBS_MATRIX_VERT 0x000003f2
369
362/* Intel Core-based CPU performance counters */ 370/* Intel Core-based CPU performance counters */
363#define MSR_CORE_PERF_FIXED_CTR0 0x00000309 371#define MSR_CORE_PERF_FIXED_CTR0 0x00000309
364#define MSR_CORE_PERF_FIXED_CTR1 0x0000030a 372#define MSR_CORE_PERF_FIXED_CTR1 0x0000030a
diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h
index 1a0422348d6d..8d8797eae5d7 100644
--- a/arch/x86/include/asm/pci_x86.h
+++ b/arch/x86/include/asm/pci_x86.h
@@ -83,7 +83,7 @@ struct irq_routing_table {
83 83
84extern unsigned int pcibios_irq_mask; 84extern unsigned int pcibios_irq_mask;
85 85
86extern spinlock_t pci_config_lock; 86extern raw_spinlock_t pci_config_lock;
87 87
88extern int (*pcibios_enable_irq)(struct pci_dev *dev); 88extern int (*pcibios_enable_irq)(struct pci_dev *dev);
89extern void (*pcibios_disable_irq)(struct pci_dev *dev); 89extern void (*pcibios_disable_irq)(struct pci_dev *dev);
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index 66a272dfd8b8..0797e748d280 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -105,7 +105,7 @@ do { \
105 105
106/* 106/*
107 * Generate a percpu add to memory instruction and optimize code 107 * Generate a percpu add to memory instruction and optimize code
108 * if a one is added or subtracted. 108 * if one is added or subtracted.
109 */ 109 */
110#define percpu_add_op(var, val) \ 110#define percpu_add_op(var, val) \
111do { \ 111do { \
@@ -190,6 +190,29 @@ do { \
190 pfo_ret__; \ 190 pfo_ret__; \
191}) 191})
192 192
193#define percpu_unary_op(op, var) \
194({ \
195 switch (sizeof(var)) { \
196 case 1: \
197 asm(op "b "__percpu_arg(0) \
198 : "+m" (var)); \
199 break; \
200 case 2: \
201 asm(op "w "__percpu_arg(0) \
202 : "+m" (var)); \
203 break; \
204 case 4: \
205 asm(op "l "__percpu_arg(0) \
206 : "+m" (var)); \
207 break; \
208 case 8: \
209 asm(op "q "__percpu_arg(0) \
210 : "+m" (var)); \
211 break; \
212 default: __bad_percpu_size(); \
213 } \
214})
215
193/* 216/*
194 * percpu_read() makes gcc load the percpu variable every time it is 217 * percpu_read() makes gcc load the percpu variable every time it is
195 * accessed while percpu_read_stable() allows the value to be cached. 218 * accessed while percpu_read_stable() allows the value to be cached.
@@ -207,6 +230,7 @@ do { \
207#define percpu_and(var, val) percpu_to_op("and", var, val) 230#define percpu_and(var, val) percpu_to_op("and", var, val)
208#define percpu_or(var, val) percpu_to_op("or", var, val) 231#define percpu_or(var, val) percpu_to_op("or", var, val)
209#define percpu_xor(var, val) percpu_to_op("xor", var, val) 232#define percpu_xor(var, val) percpu_to_op("xor", var, val)
233#define percpu_inc(var) percpu_unary_op("inc", var)
210 234
211#define __this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) 235#define __this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
212#define __this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) 236#define __this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index db6109a885a7..254883d0c7e0 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -5,7 +5,7 @@
5 * Performance event hw details: 5 * Performance event hw details:
6 */ 6 */
7 7
8#define X86_PMC_MAX_GENERIC 8 8#define X86_PMC_MAX_GENERIC 32
9#define X86_PMC_MAX_FIXED 3 9#define X86_PMC_MAX_FIXED 3
10 10
11#define X86_PMC_IDX_GENERIC 0 11#define X86_PMC_IDX_GENERIC 0
@@ -18,39 +18,31 @@
18#define MSR_ARCH_PERFMON_EVENTSEL0 0x186 18#define MSR_ARCH_PERFMON_EVENTSEL0 0x186
19#define MSR_ARCH_PERFMON_EVENTSEL1 0x187 19#define MSR_ARCH_PERFMON_EVENTSEL1 0x187
20 20
21#define ARCH_PERFMON_EVENTSEL_ENABLE (1 << 22) 21#define ARCH_PERFMON_EVENTSEL_EVENT 0x000000FFULL
22#define ARCH_PERFMON_EVENTSEL_ANY (1 << 21) 22#define ARCH_PERFMON_EVENTSEL_UMASK 0x0000FF00ULL
23#define ARCH_PERFMON_EVENTSEL_INT (1 << 20) 23#define ARCH_PERFMON_EVENTSEL_USR (1ULL << 16)
24#define ARCH_PERFMON_EVENTSEL_OS (1 << 17) 24#define ARCH_PERFMON_EVENTSEL_OS (1ULL << 17)
25#define ARCH_PERFMON_EVENTSEL_USR (1 << 16) 25#define ARCH_PERFMON_EVENTSEL_EDGE (1ULL << 18)
26 26#define ARCH_PERFMON_EVENTSEL_INT (1ULL << 20)
27/* 27#define ARCH_PERFMON_EVENTSEL_ANY (1ULL << 21)
28 * Includes eventsel and unit mask as well: 28#define ARCH_PERFMON_EVENTSEL_ENABLE (1ULL << 22)
29 */ 29#define ARCH_PERFMON_EVENTSEL_INV (1ULL << 23)
30 30#define ARCH_PERFMON_EVENTSEL_CMASK 0xFF000000ULL
31 31
32#define INTEL_ARCH_EVTSEL_MASK 0x000000FFULL 32#define AMD64_EVENTSEL_EVENT \
33#define INTEL_ARCH_UNIT_MASK 0x0000FF00ULL 33 (ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32))
34#define INTEL_ARCH_EDGE_MASK 0x00040000ULL 34#define INTEL_ARCH_EVENT_MASK \
35#define INTEL_ARCH_INV_MASK 0x00800000ULL 35 (ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT)
36#define INTEL_ARCH_CNT_MASK 0xFF000000ULL 36
37#define INTEL_ARCH_EVENT_MASK (INTEL_ARCH_UNIT_MASK|INTEL_ARCH_EVTSEL_MASK) 37#define X86_RAW_EVENT_MASK \
38 38 (ARCH_PERFMON_EVENTSEL_EVENT | \
39/* 39 ARCH_PERFMON_EVENTSEL_UMASK | \
40 * filter mask to validate fixed counter events. 40 ARCH_PERFMON_EVENTSEL_EDGE | \
41 * the following filters disqualify for fixed counters: 41 ARCH_PERFMON_EVENTSEL_INV | \
42 * - inv 42 ARCH_PERFMON_EVENTSEL_CMASK)
43 * - edge 43#define AMD64_RAW_EVENT_MASK \
44 * - cnt-mask 44 (X86_RAW_EVENT_MASK | \
45 * The other filters are supported by fixed counters. 45 AMD64_EVENTSEL_EVENT)
46 * The any-thread option is supported starting with v3.
47 */
48#define INTEL_ARCH_FIXED_MASK \
49 (INTEL_ARCH_CNT_MASK| \
50 INTEL_ARCH_INV_MASK| \
51 INTEL_ARCH_EDGE_MASK|\
52 INTEL_ARCH_UNIT_MASK|\
53 INTEL_ARCH_EVENT_MASK)
54 46
55#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c 47#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c
56#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) 48#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
@@ -67,7 +59,7 @@
67union cpuid10_eax { 59union cpuid10_eax {
68 struct { 60 struct {
69 unsigned int version_id:8; 61 unsigned int version_id:8;
70 unsigned int num_events:8; 62 unsigned int num_counters:8;
71 unsigned int bit_width:8; 63 unsigned int bit_width:8;
72 unsigned int mask_length:8; 64 unsigned int mask_length:8;
73 } split; 65 } split;
@@ -76,7 +68,7 @@ union cpuid10_eax {
76 68
77union cpuid10_edx { 69union cpuid10_edx {
78 struct { 70 struct {
79 unsigned int num_events_fixed:4; 71 unsigned int num_counters_fixed:4;
80 unsigned int reserved:28; 72 unsigned int reserved:28;
81 } split; 73 } split;
82 unsigned int full; 74 unsigned int full;
@@ -136,6 +128,18 @@ extern void perf_events_lapic_init(void);
136 128
137#define PERF_EVENT_INDEX_OFFSET 0 129#define PERF_EVENT_INDEX_OFFSET 0
138 130
131/*
132 * Abuse bit 3 of the cpu eflags register to indicate proper PEBS IP fixups.
133 * This flag is otherwise unused and ABI specified to be 0, so nobody should
134 * care what we do with it.
135 */
136#define PERF_EFLAGS_EXACT (1UL << 3)
137
138struct pt_regs;
139extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
140extern unsigned long perf_misc_flags(struct pt_regs *regs);
141#define perf_misc_flags(regs) perf_misc_flags(regs)
142
139#else 143#else
140static inline void init_hw_perf_events(void) { } 144static inline void init_hw_perf_events(void) { }
141static inline void perf_events_lapic_init(void) { } 145static inline void perf_events_lapic_init(void) { }
diff --git a/arch/x86/include/asm/perf_event_p4.h b/arch/x86/include/asm/perf_event_p4.h
new file mode 100644
index 000000000000..64a8ebff06fc
--- /dev/null
+++ b/arch/x86/include/asm/perf_event_p4.h
@@ -0,0 +1,795 @@
1/*
2 * Netburst Perfomance Events (P4, old Xeon)
3 */
4
5#ifndef PERF_EVENT_P4_H
6#define PERF_EVENT_P4_H
7
8#include <linux/cpu.h>
9#include <linux/bitops.h>
10
11/*
12 * NetBurst has perfomance MSRs shared between
13 * threads if HT is turned on, ie for both logical
14 * processors (mem: in turn in Atom with HT support
15 * perf-MSRs are not shared and every thread has its
16 * own perf-MSRs set)
17 */
18#define ARCH_P4_TOTAL_ESCR (46)
19#define ARCH_P4_RESERVED_ESCR (2) /* IQ_ESCR(0,1) not always present */
20#define ARCH_P4_MAX_ESCR (ARCH_P4_TOTAL_ESCR - ARCH_P4_RESERVED_ESCR)
21#define ARCH_P4_MAX_CCCR (18)
22#define ARCH_P4_MAX_COUNTER (ARCH_P4_MAX_CCCR / 2)
23
24#define P4_ESCR_EVENT_MASK 0x7e000000U
25#define P4_ESCR_EVENT_SHIFT 25
26#define P4_ESCR_EVENTMASK_MASK 0x01fffe00U
27#define P4_ESCR_EVENTMASK_SHIFT 9
28#define P4_ESCR_TAG_MASK 0x000001e0U
29#define P4_ESCR_TAG_SHIFT 5
30#define P4_ESCR_TAG_ENABLE 0x00000010U
31#define P4_ESCR_T0_OS 0x00000008U
32#define P4_ESCR_T0_USR 0x00000004U
33#define P4_ESCR_T1_OS 0x00000002U
34#define P4_ESCR_T1_USR 0x00000001U
35
36#define P4_ESCR_EVENT(v) ((v) << P4_ESCR_EVENT_SHIFT)
37#define P4_ESCR_EMASK(v) ((v) << P4_ESCR_EVENTMASK_SHIFT)
38#define P4_ESCR_TAG(v) ((v) << P4_ESCR_TAG_SHIFT)
39
40/* Non HT mask */
41#define P4_ESCR_MASK \
42 (P4_ESCR_EVENT_MASK | \
43 P4_ESCR_EVENTMASK_MASK | \
44 P4_ESCR_TAG_MASK | \
45 P4_ESCR_TAG_ENABLE | \
46 P4_ESCR_T0_OS | \
47 P4_ESCR_T0_USR)
48
49/* HT mask */
50#define P4_ESCR_MASK_HT \
51 (P4_ESCR_MASK | P4_ESCR_T1_OS | P4_ESCR_T1_USR)
52
53#define P4_CCCR_OVF 0x80000000U
54#define P4_CCCR_CASCADE 0x40000000U
55#define P4_CCCR_OVF_PMI_T0 0x04000000U
56#define P4_CCCR_OVF_PMI_T1 0x08000000U
57#define P4_CCCR_FORCE_OVF 0x02000000U
58#define P4_CCCR_EDGE 0x01000000U
59#define P4_CCCR_THRESHOLD_MASK 0x00f00000U
60#define P4_CCCR_THRESHOLD_SHIFT 20
61#define P4_CCCR_COMPLEMENT 0x00080000U
62#define P4_CCCR_COMPARE 0x00040000U
63#define P4_CCCR_ESCR_SELECT_MASK 0x0000e000U
64#define P4_CCCR_ESCR_SELECT_SHIFT 13
65#define P4_CCCR_ENABLE 0x00001000U
66#define P4_CCCR_THREAD_SINGLE 0x00010000U
67#define P4_CCCR_THREAD_BOTH 0x00020000U
68#define P4_CCCR_THREAD_ANY 0x00030000U
69#define P4_CCCR_RESERVED 0x00000fffU
70
71#define P4_CCCR_THRESHOLD(v) ((v) << P4_CCCR_THRESHOLD_SHIFT)
72#define P4_CCCR_ESEL(v) ((v) << P4_CCCR_ESCR_SELECT_SHIFT)
73
74/* Custom bits in reerved CCCR area */
75#define P4_CCCR_CACHE_OPS_MASK 0x0000003fU
76
77
78/* Non HT mask */
79#define P4_CCCR_MASK \
80 (P4_CCCR_OVF | \
81 P4_CCCR_CASCADE | \
82 P4_CCCR_OVF_PMI_T0 | \
83 P4_CCCR_FORCE_OVF | \
84 P4_CCCR_EDGE | \
85 P4_CCCR_THRESHOLD_MASK | \
86 P4_CCCR_COMPLEMENT | \
87 P4_CCCR_COMPARE | \
88 P4_CCCR_ESCR_SELECT_MASK | \
89 P4_CCCR_ENABLE)
90
91/* HT mask */
92#define P4_CCCR_MASK_HT \
93 (P4_CCCR_MASK | P4_CCCR_OVF_PMI_T1 | P4_CCCR_THREAD_ANY)
94
95#define P4_GEN_ESCR_EMASK(class, name, bit) \
96 class##__##name = ((1 << bit) << P4_ESCR_EVENTMASK_SHIFT)
97#define P4_ESCR_EMASK_BIT(class, name) class##__##name
98
99/*
100 * config field is 64bit width and consists of
101 * HT << 63 | ESCR << 32 | CCCR
102 * where HT is HyperThreading bit (since ESCR
103 * has it reserved we may use it for own purpose)
104 *
105 * note that this is NOT the addresses of respective
106 * ESCR and CCCR but rather an only packed value should
107 * be unpacked and written to a proper addresses
108 *
109 * the base idea is to pack as much info as
110 * possible
111 */
112#define p4_config_pack_escr(v) (((u64)(v)) << 32)
113#define p4_config_pack_cccr(v) (((u64)(v)) & 0xffffffffULL)
114#define p4_config_unpack_escr(v) (((u64)(v)) >> 32)
115#define p4_config_unpack_cccr(v) (((u64)(v)) & 0xffffffffULL)
116
117#define p4_config_unpack_emask(v) \
118 ({ \
119 u32 t = p4_config_unpack_escr((v)); \
120 t = t & P4_ESCR_EVENTMASK_MASK; \
121 t = t >> P4_ESCR_EVENTMASK_SHIFT; \
122 t; \
123 })
124
125#define p4_config_unpack_event(v) \
126 ({ \
127 u32 t = p4_config_unpack_escr((v)); \
128 t = t & P4_ESCR_EVENT_MASK; \
129 t = t >> P4_ESCR_EVENT_SHIFT; \
130 t; \
131 })
132
133#define p4_config_unpack_cache_event(v) (((u64)(v)) & P4_CCCR_CACHE_OPS_MASK)
134
135#define P4_CONFIG_HT_SHIFT 63
136#define P4_CONFIG_HT (1ULL << P4_CONFIG_HT_SHIFT)
137
138static inline bool p4_is_event_cascaded(u64 config)
139{
140 u32 cccr = p4_config_unpack_cccr(config);
141 return !!(cccr & P4_CCCR_CASCADE);
142}
143
144static inline int p4_ht_config_thread(u64 config)
145{
146 return !!(config & P4_CONFIG_HT);
147}
148
149static inline u64 p4_set_ht_bit(u64 config)
150{
151 return config | P4_CONFIG_HT;
152}
153
154static inline u64 p4_clear_ht_bit(u64 config)
155{
156 return config & ~P4_CONFIG_HT;
157}
158
159static inline int p4_ht_active(void)
160{
161#ifdef CONFIG_SMP
162 return smp_num_siblings > 1;
163#endif
164 return 0;
165}
166
167static inline int p4_ht_thread(int cpu)
168{
169#ifdef CONFIG_SMP
170 if (smp_num_siblings == 2)
171 return cpu != cpumask_first(__get_cpu_var(cpu_sibling_map));
172#endif
173 return 0;
174}
175
176static inline int p4_should_swap_ts(u64 config, int cpu)
177{
178 return p4_ht_config_thread(config) ^ p4_ht_thread(cpu);
179}
180
181static inline u32 p4_default_cccr_conf(int cpu)
182{
183 /*
184 * Note that P4_CCCR_THREAD_ANY is "required" on
185 * non-HT machines (on HT machines we count TS events
186 * regardless the state of second logical processor
187 */
188 u32 cccr = P4_CCCR_THREAD_ANY;
189
190 if (!p4_ht_thread(cpu))
191 cccr |= P4_CCCR_OVF_PMI_T0;
192 else
193 cccr |= P4_CCCR_OVF_PMI_T1;
194
195 return cccr;
196}
197
198static inline u32 p4_default_escr_conf(int cpu, int exclude_os, int exclude_usr)
199{
200 u32 escr = 0;
201
202 if (!p4_ht_thread(cpu)) {
203 if (!exclude_os)
204 escr |= P4_ESCR_T0_OS;
205 if (!exclude_usr)
206 escr |= P4_ESCR_T0_USR;
207 } else {
208 if (!exclude_os)
209 escr |= P4_ESCR_T1_OS;
210 if (!exclude_usr)
211 escr |= P4_ESCR_T1_USR;
212 }
213
214 return escr;
215}
216
217enum P4_EVENTS {
218 P4_EVENT_TC_DELIVER_MODE,
219 P4_EVENT_BPU_FETCH_REQUEST,
220 P4_EVENT_ITLB_REFERENCE,
221 P4_EVENT_MEMORY_CANCEL,
222 P4_EVENT_MEMORY_COMPLETE,
223 P4_EVENT_LOAD_PORT_REPLAY,
224 P4_EVENT_STORE_PORT_REPLAY,
225 P4_EVENT_MOB_LOAD_REPLAY,
226 P4_EVENT_PAGE_WALK_TYPE,
227 P4_EVENT_BSQ_CACHE_REFERENCE,
228 P4_EVENT_IOQ_ALLOCATION,
229 P4_EVENT_IOQ_ACTIVE_ENTRIES,
230 P4_EVENT_FSB_DATA_ACTIVITY,
231 P4_EVENT_BSQ_ALLOCATION,
232 P4_EVENT_BSQ_ACTIVE_ENTRIES,
233 P4_EVENT_SSE_INPUT_ASSIST,
234 P4_EVENT_PACKED_SP_UOP,
235 P4_EVENT_PACKED_DP_UOP,
236 P4_EVENT_SCALAR_SP_UOP,
237 P4_EVENT_SCALAR_DP_UOP,
238 P4_EVENT_64BIT_MMX_UOP,
239 P4_EVENT_128BIT_MMX_UOP,
240 P4_EVENT_X87_FP_UOP,
241 P4_EVENT_TC_MISC,
242 P4_EVENT_GLOBAL_POWER_EVENTS,
243 P4_EVENT_TC_MS_XFER,
244 P4_EVENT_UOP_QUEUE_WRITES,
245 P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE,
246 P4_EVENT_RETIRED_BRANCH_TYPE,
247 P4_EVENT_RESOURCE_STALL,
248 P4_EVENT_WC_BUFFER,
249 P4_EVENT_B2B_CYCLES,
250 P4_EVENT_BNR,
251 P4_EVENT_SNOOP,
252 P4_EVENT_RESPONSE,
253 P4_EVENT_FRONT_END_EVENT,
254 P4_EVENT_EXECUTION_EVENT,
255 P4_EVENT_REPLAY_EVENT,
256 P4_EVENT_INSTR_RETIRED,
257 P4_EVENT_UOPS_RETIRED,
258 P4_EVENT_UOP_TYPE,
259 P4_EVENT_BRANCH_RETIRED,
260 P4_EVENT_MISPRED_BRANCH_RETIRED,
261 P4_EVENT_X87_ASSIST,
262 P4_EVENT_MACHINE_CLEAR,
263 P4_EVENT_INSTR_COMPLETED,
264};
265
266#define P4_OPCODE(event) event##_OPCODE
267#define P4_OPCODE_ESEL(opcode) ((opcode & 0x00ff) >> 0)
268#define P4_OPCODE_EVNT(opcode) ((opcode & 0xff00) >> 8)
269#define P4_OPCODE_PACK(event, sel) (((event) << 8) | sel)
270
271/*
272 * Comments below the event represent ESCR restriction
273 * for this event and counter index per ESCR
274 *
275 * MSR_P4_IQ_ESCR0 and MSR_P4_IQ_ESCR1 are available only on early
276 * processor builds (family 0FH, models 01H-02H). These MSRs
277 * are not available on later versions, so that we don't use
278 * them completely
279 *
280 * Also note that CCCR1 do not have P4_CCCR_ENABLE bit properly
281 * working so that we should not use this CCCR and respective
282 * counter as result
283 */
284enum P4_EVENT_OPCODES {
285 P4_OPCODE(P4_EVENT_TC_DELIVER_MODE) = P4_OPCODE_PACK(0x01, 0x01),
286 /*
287 * MSR_P4_TC_ESCR0: 4, 5
288 * MSR_P4_TC_ESCR1: 6, 7
289 */
290
291 P4_OPCODE(P4_EVENT_BPU_FETCH_REQUEST) = P4_OPCODE_PACK(0x03, 0x00),
292 /*
293 * MSR_P4_BPU_ESCR0: 0, 1
294 * MSR_P4_BPU_ESCR1: 2, 3
295 */
296
297 P4_OPCODE(P4_EVENT_ITLB_REFERENCE) = P4_OPCODE_PACK(0x18, 0x03),
298 /*
299 * MSR_P4_ITLB_ESCR0: 0, 1
300 * MSR_P4_ITLB_ESCR1: 2, 3
301 */
302
303 P4_OPCODE(P4_EVENT_MEMORY_CANCEL) = P4_OPCODE_PACK(0x02, 0x05),
304 /*
305 * MSR_P4_DAC_ESCR0: 8, 9
306 * MSR_P4_DAC_ESCR1: 10, 11
307 */
308
309 P4_OPCODE(P4_EVENT_MEMORY_COMPLETE) = P4_OPCODE_PACK(0x08, 0x02),
310 /*
311 * MSR_P4_SAAT_ESCR0: 8, 9
312 * MSR_P4_SAAT_ESCR1: 10, 11
313 */
314
315 P4_OPCODE(P4_EVENT_LOAD_PORT_REPLAY) = P4_OPCODE_PACK(0x04, 0x02),
316 /*
317 * MSR_P4_SAAT_ESCR0: 8, 9
318 * MSR_P4_SAAT_ESCR1: 10, 11
319 */
320
321 P4_OPCODE(P4_EVENT_STORE_PORT_REPLAY) = P4_OPCODE_PACK(0x05, 0x02),
322 /*
323 * MSR_P4_SAAT_ESCR0: 8, 9
324 * MSR_P4_SAAT_ESCR1: 10, 11
325 */
326
327 P4_OPCODE(P4_EVENT_MOB_LOAD_REPLAY) = P4_OPCODE_PACK(0x03, 0x02),
328 /*
329 * MSR_P4_MOB_ESCR0: 0, 1
330 * MSR_P4_MOB_ESCR1: 2, 3
331 */
332
333 P4_OPCODE(P4_EVENT_PAGE_WALK_TYPE) = P4_OPCODE_PACK(0x01, 0x04),
334 /*
335 * MSR_P4_PMH_ESCR0: 0, 1
336 * MSR_P4_PMH_ESCR1: 2, 3
337 */
338
339 P4_OPCODE(P4_EVENT_BSQ_CACHE_REFERENCE) = P4_OPCODE_PACK(0x0c, 0x07),
340 /*
341 * MSR_P4_BSU_ESCR0: 0, 1
342 * MSR_P4_BSU_ESCR1: 2, 3
343 */
344
345 P4_OPCODE(P4_EVENT_IOQ_ALLOCATION) = P4_OPCODE_PACK(0x03, 0x06),
346 /*
347 * MSR_P4_FSB_ESCR0: 0, 1
348 * MSR_P4_FSB_ESCR1: 2, 3
349 */
350
351 P4_OPCODE(P4_EVENT_IOQ_ACTIVE_ENTRIES) = P4_OPCODE_PACK(0x1a, 0x06),
352 /*
353 * MSR_P4_FSB_ESCR1: 2, 3
354 */
355
356 P4_OPCODE(P4_EVENT_FSB_DATA_ACTIVITY) = P4_OPCODE_PACK(0x17, 0x06),
357 /*
358 * MSR_P4_FSB_ESCR0: 0, 1
359 * MSR_P4_FSB_ESCR1: 2, 3
360 */
361
362 P4_OPCODE(P4_EVENT_BSQ_ALLOCATION) = P4_OPCODE_PACK(0x05, 0x07),
363 /*
364 * MSR_P4_BSU_ESCR0: 0, 1
365 */
366
367 P4_OPCODE(P4_EVENT_BSQ_ACTIVE_ENTRIES) = P4_OPCODE_PACK(0x06, 0x07),
368 /*
369 * NOTE: no ESCR name in docs, it's guessed
370 * MSR_P4_BSU_ESCR1: 2, 3
371 */
372
373 P4_OPCODE(P4_EVENT_SSE_INPUT_ASSIST) = P4_OPCODE_PACK(0x34, 0x01),
374 /*
375 * MSR_P4_FIRM_ESCR0: 8, 9
376 * MSR_P4_FIRM_ESCR1: 10, 11
377 */
378
379 P4_OPCODE(P4_EVENT_PACKED_SP_UOP) = P4_OPCODE_PACK(0x08, 0x01),
380 /*
381 * MSR_P4_FIRM_ESCR0: 8, 9
382 * MSR_P4_FIRM_ESCR1: 10, 11
383 */
384
385 P4_OPCODE(P4_EVENT_PACKED_DP_UOP) = P4_OPCODE_PACK(0x0c, 0x01),
386 /*
387 * MSR_P4_FIRM_ESCR0: 8, 9
388 * MSR_P4_FIRM_ESCR1: 10, 11
389 */
390
391 P4_OPCODE(P4_EVENT_SCALAR_SP_UOP) = P4_OPCODE_PACK(0x0a, 0x01),
392 /*
393 * MSR_P4_FIRM_ESCR0: 8, 9
394 * MSR_P4_FIRM_ESCR1: 10, 11
395 */
396
397 P4_OPCODE(P4_EVENT_SCALAR_DP_UOP) = P4_OPCODE_PACK(0x0e, 0x01),
398 /*
399 * MSR_P4_FIRM_ESCR0: 8, 9
400 * MSR_P4_FIRM_ESCR1: 10, 11
401 */
402
403 P4_OPCODE(P4_EVENT_64BIT_MMX_UOP) = P4_OPCODE_PACK(0x02, 0x01),
404 /*
405 * MSR_P4_FIRM_ESCR0: 8, 9
406 * MSR_P4_FIRM_ESCR1: 10, 11
407 */
408
409 P4_OPCODE(P4_EVENT_128BIT_MMX_UOP) = P4_OPCODE_PACK(0x1a, 0x01),
410 /*
411 * MSR_P4_FIRM_ESCR0: 8, 9
412 * MSR_P4_FIRM_ESCR1: 10, 11
413 */
414
415 P4_OPCODE(P4_EVENT_X87_FP_UOP) = P4_OPCODE_PACK(0x04, 0x01),
416 /*
417 * MSR_P4_FIRM_ESCR0: 8, 9
418 * MSR_P4_FIRM_ESCR1: 10, 11
419 */
420
421 P4_OPCODE(P4_EVENT_TC_MISC) = P4_OPCODE_PACK(0x06, 0x01),
422 /*
423 * MSR_P4_TC_ESCR0: 4, 5
424 * MSR_P4_TC_ESCR1: 6, 7
425 */
426
427 P4_OPCODE(P4_EVENT_GLOBAL_POWER_EVENTS) = P4_OPCODE_PACK(0x13, 0x06),
428 /*
429 * MSR_P4_FSB_ESCR0: 0, 1
430 * MSR_P4_FSB_ESCR1: 2, 3
431 */
432
433 P4_OPCODE(P4_EVENT_TC_MS_XFER) = P4_OPCODE_PACK(0x05, 0x00),
434 /*
435 * MSR_P4_MS_ESCR0: 4, 5
436 * MSR_P4_MS_ESCR1: 6, 7
437 */
438
439 P4_OPCODE(P4_EVENT_UOP_QUEUE_WRITES) = P4_OPCODE_PACK(0x09, 0x00),
440 /*
441 * MSR_P4_MS_ESCR0: 4, 5
442 * MSR_P4_MS_ESCR1: 6, 7
443 */
444
445 P4_OPCODE(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE) = P4_OPCODE_PACK(0x05, 0x02),
446 /*
447 * MSR_P4_TBPU_ESCR0: 4, 5
448 * MSR_P4_TBPU_ESCR1: 6, 7
449 */
450
451 P4_OPCODE(P4_EVENT_RETIRED_BRANCH_TYPE) = P4_OPCODE_PACK(0x04, 0x02),
452 /*
453 * MSR_P4_TBPU_ESCR0: 4, 5
454 * MSR_P4_TBPU_ESCR1: 6, 7
455 */
456
457 P4_OPCODE(P4_EVENT_RESOURCE_STALL) = P4_OPCODE_PACK(0x01, 0x01),
458 /*
459 * MSR_P4_ALF_ESCR0: 12, 13, 16
460 * MSR_P4_ALF_ESCR1: 14, 15, 17
461 */
462
463 P4_OPCODE(P4_EVENT_WC_BUFFER) = P4_OPCODE_PACK(0x05, 0x05),
464 /*
465 * MSR_P4_DAC_ESCR0: 8, 9
466 * MSR_P4_DAC_ESCR1: 10, 11
467 */
468
469 P4_OPCODE(P4_EVENT_B2B_CYCLES) = P4_OPCODE_PACK(0x16, 0x03),
470 /*
471 * MSR_P4_FSB_ESCR0: 0, 1
472 * MSR_P4_FSB_ESCR1: 2, 3
473 */
474
475 P4_OPCODE(P4_EVENT_BNR) = P4_OPCODE_PACK(0x08, 0x03),
476 /*
477 * MSR_P4_FSB_ESCR0: 0, 1
478 * MSR_P4_FSB_ESCR1: 2, 3
479 */
480
481 P4_OPCODE(P4_EVENT_SNOOP) = P4_OPCODE_PACK(0x06, 0x03),
482 /*
483 * MSR_P4_FSB_ESCR0: 0, 1
484 * MSR_P4_FSB_ESCR1: 2, 3
485 */
486
487 P4_OPCODE(P4_EVENT_RESPONSE) = P4_OPCODE_PACK(0x04, 0x03),
488 /*
489 * MSR_P4_FSB_ESCR0: 0, 1
490 * MSR_P4_FSB_ESCR1: 2, 3
491 */
492
493 P4_OPCODE(P4_EVENT_FRONT_END_EVENT) = P4_OPCODE_PACK(0x08, 0x05),
494 /*
495 * MSR_P4_CRU_ESCR2: 12, 13, 16
496 * MSR_P4_CRU_ESCR3: 14, 15, 17
497 */
498
499 P4_OPCODE(P4_EVENT_EXECUTION_EVENT) = P4_OPCODE_PACK(0x0c, 0x05),
500 /*
501 * MSR_P4_CRU_ESCR2: 12, 13, 16
502 * MSR_P4_CRU_ESCR3: 14, 15, 17
503 */
504
505 P4_OPCODE(P4_EVENT_REPLAY_EVENT) = P4_OPCODE_PACK(0x09, 0x05),
506 /*
507 * MSR_P4_CRU_ESCR2: 12, 13, 16
508 * MSR_P4_CRU_ESCR3: 14, 15, 17
509 */
510
511 P4_OPCODE(P4_EVENT_INSTR_RETIRED) = P4_OPCODE_PACK(0x02, 0x04),
512 /*
513 * MSR_P4_CRU_ESCR0: 12, 13, 16
514 * MSR_P4_CRU_ESCR1: 14, 15, 17
515 */
516
517 P4_OPCODE(P4_EVENT_UOPS_RETIRED) = P4_OPCODE_PACK(0x01, 0x04),
518 /*
519 * MSR_P4_CRU_ESCR0: 12, 13, 16
520 * MSR_P4_CRU_ESCR1: 14, 15, 17
521 */
522
523 P4_OPCODE(P4_EVENT_UOP_TYPE) = P4_OPCODE_PACK(0x02, 0x02),
524 /*
525 * MSR_P4_RAT_ESCR0: 12, 13, 16
526 * MSR_P4_RAT_ESCR1: 14, 15, 17
527 */
528
529 P4_OPCODE(P4_EVENT_BRANCH_RETIRED) = P4_OPCODE_PACK(0x06, 0x05),
530 /*
531 * MSR_P4_CRU_ESCR2: 12, 13, 16
532 * MSR_P4_CRU_ESCR3: 14, 15, 17
533 */
534
535 P4_OPCODE(P4_EVENT_MISPRED_BRANCH_RETIRED) = P4_OPCODE_PACK(0x03, 0x04),
536 /*
537 * MSR_P4_CRU_ESCR0: 12, 13, 16
538 * MSR_P4_CRU_ESCR1: 14, 15, 17
539 */
540
541 P4_OPCODE(P4_EVENT_X87_ASSIST) = P4_OPCODE_PACK(0x03, 0x05),
542 /*
543 * MSR_P4_CRU_ESCR2: 12, 13, 16
544 * MSR_P4_CRU_ESCR3: 14, 15, 17
545 */
546
547 P4_OPCODE(P4_EVENT_MACHINE_CLEAR) = P4_OPCODE_PACK(0x02, 0x05),
548 /*
549 * MSR_P4_CRU_ESCR2: 12, 13, 16
550 * MSR_P4_CRU_ESCR3: 14, 15, 17
551 */
552
553 P4_OPCODE(P4_EVENT_INSTR_COMPLETED) = P4_OPCODE_PACK(0x07, 0x04),
554 /*
555 * MSR_P4_CRU_ESCR0: 12, 13, 16
556 * MSR_P4_CRU_ESCR1: 14, 15, 17
557 */
558};
559
560/*
561 * a caller should use P4_ESCR_EMASK_NAME helper to
562 * pick the EventMask needed, for example
563 *
564 * P4_ESCR_EMASK_NAME(P4_EVENT_TC_DELIVER_MODE, DD)
565 */
566enum P4_ESCR_EMASKS {
567 P4_GEN_ESCR_EMASK(P4_EVENT_TC_DELIVER_MODE, DD, 0),
568 P4_GEN_ESCR_EMASK(P4_EVENT_TC_DELIVER_MODE, DB, 1),
569 P4_GEN_ESCR_EMASK(P4_EVENT_TC_DELIVER_MODE, DI, 2),
570 P4_GEN_ESCR_EMASK(P4_EVENT_TC_DELIVER_MODE, BD, 3),
571 P4_GEN_ESCR_EMASK(P4_EVENT_TC_DELIVER_MODE, BB, 4),
572 P4_GEN_ESCR_EMASK(P4_EVENT_TC_DELIVER_MODE, BI, 5),
573 P4_GEN_ESCR_EMASK(P4_EVENT_TC_DELIVER_MODE, ID, 6),
574
575 P4_GEN_ESCR_EMASK(P4_EVENT_BPU_FETCH_REQUEST, TCMISS, 0),
576
577 P4_GEN_ESCR_EMASK(P4_EVENT_ITLB_REFERENCE, HIT, 0),
578 P4_GEN_ESCR_EMASK(P4_EVENT_ITLB_REFERENCE, MISS, 1),
579 P4_GEN_ESCR_EMASK(P4_EVENT_ITLB_REFERENCE, HIT_UK, 2),
580
581 P4_GEN_ESCR_EMASK(P4_EVENT_MEMORY_CANCEL, ST_RB_FULL, 2),
582 P4_GEN_ESCR_EMASK(P4_EVENT_MEMORY_CANCEL, 64K_CONF, 3),
583
584 P4_GEN_ESCR_EMASK(P4_EVENT_MEMORY_COMPLETE, LSC, 0),
585 P4_GEN_ESCR_EMASK(P4_EVENT_MEMORY_COMPLETE, SSC, 1),
586
587 P4_GEN_ESCR_EMASK(P4_EVENT_LOAD_PORT_REPLAY, SPLIT_LD, 1),
588
589 P4_GEN_ESCR_EMASK(P4_EVENT_STORE_PORT_REPLAY, SPLIT_ST, 1),
590
591 P4_GEN_ESCR_EMASK(P4_EVENT_MOB_LOAD_REPLAY, NO_STA, 1),
592 P4_GEN_ESCR_EMASK(P4_EVENT_MOB_LOAD_REPLAY, NO_STD, 3),
593 P4_GEN_ESCR_EMASK(P4_EVENT_MOB_LOAD_REPLAY, PARTIAL_DATA, 4),
594 P4_GEN_ESCR_EMASK(P4_EVENT_MOB_LOAD_REPLAY, UNALGN_ADDR, 5),
595
596 P4_GEN_ESCR_EMASK(P4_EVENT_PAGE_WALK_TYPE, DTMISS, 0),
597 P4_GEN_ESCR_EMASK(P4_EVENT_PAGE_WALK_TYPE, ITMISS, 1),
598
599 P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITS, 0),
600 P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITE, 1),
601 P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITM, 2),
602 P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITS, 3),
603 P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITE, 4),
604 P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITM, 5),
605 P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_MISS, 8),
606 P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_MISS, 9),
607 P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_CACHE_REFERENCE, WR_2ndL_MISS, 10),
608
609 P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ALLOCATION, DEFAULT, 0),
610 P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ALLOCATION, ALL_READ, 5),
611 P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ALLOCATION, ALL_WRITE, 6),
612 P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ALLOCATION, MEM_UC, 7),
613 P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ALLOCATION, MEM_WC, 8),
614 P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ALLOCATION, MEM_WT, 9),
615 P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ALLOCATION, MEM_WP, 10),
616 P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ALLOCATION, MEM_WB, 11),
617 P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ALLOCATION, OWN, 13),
618 P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ALLOCATION, OTHER, 14),
619 P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ALLOCATION, PREFETCH, 15),
620
621 P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ACTIVE_ENTRIES, DEFAULT, 0),
622 P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ACTIVE_ENTRIES, ALL_READ, 5),
623 P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ACTIVE_ENTRIES, ALL_WRITE, 6),
624 P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_UC, 7),
625 P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_WC, 8),
626 P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_WT, 9),
627 P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_WP, 10),
628 P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_WB, 11),
629 P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ACTIVE_ENTRIES, OWN, 13),
630 P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ACTIVE_ENTRIES, OTHER, 14),
631 P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ACTIVE_ENTRIES, PREFETCH, 15),
632
633 P4_GEN_ESCR_EMASK(P4_EVENT_FSB_DATA_ACTIVITY, DRDY_DRV, 0),
634 P4_GEN_ESCR_EMASK(P4_EVENT_FSB_DATA_ACTIVITY, DRDY_OWN, 1),
635 P4_GEN_ESCR_EMASK(P4_EVENT_FSB_DATA_ACTIVITY, DRDY_OTHER, 2),
636 P4_GEN_ESCR_EMASK(P4_EVENT_FSB_DATA_ACTIVITY, DBSY_DRV, 3),
637 P4_GEN_ESCR_EMASK(P4_EVENT_FSB_DATA_ACTIVITY, DBSY_OWN, 4),
638 P4_GEN_ESCR_EMASK(P4_EVENT_FSB_DATA_ACTIVITY, DBSY_OTHER, 5),
639
640 P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, REQ_TYPE0, 0),
641 P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, REQ_TYPE1, 1),
642 P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, REQ_LEN0, 2),
643 P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, REQ_LEN1, 3),
644 P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, REQ_IO_TYPE, 5),
645 P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, REQ_LOCK_TYPE, 6),
646 P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, REQ_CACHE_TYPE, 7),
647 P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, REQ_SPLIT_TYPE, 8),
648 P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, REQ_DEM_TYPE, 9),
649 P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, REQ_ORD_TYPE, 10),
650 P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, MEM_TYPE0, 11),
651 P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, MEM_TYPE1, 12),
652 P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, MEM_TYPE2, 13),
653
654 P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_TYPE0, 0),
655 P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_TYPE1, 1),
656 P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_LEN0, 2),
657 P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_LEN1, 3),
658 P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_IO_TYPE, 5),
659 P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_LOCK_TYPE, 6),
660 P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_CACHE_TYPE, 7),
661 P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_SPLIT_TYPE, 8),
662 P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_DEM_TYPE, 9),
663 P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_ORD_TYPE, 10),
664 P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, MEM_TYPE0, 11),
665 P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, MEM_TYPE1, 12),
666 P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, MEM_TYPE2, 13),
667
668 P4_GEN_ESCR_EMASK(P4_EVENT_SSE_INPUT_ASSIST, ALL, 15),
669
670 P4_GEN_ESCR_EMASK(P4_EVENT_PACKED_SP_UOP, ALL, 15),
671
672 P4_GEN_ESCR_EMASK(P4_EVENT_PACKED_DP_UOP, ALL, 15),
673
674 P4_GEN_ESCR_EMASK(P4_EVENT_SCALAR_SP_UOP, ALL, 15),
675
676 P4_GEN_ESCR_EMASK(P4_EVENT_SCALAR_DP_UOP, ALL, 15),
677
678 P4_GEN_ESCR_EMASK(P4_EVENT_64BIT_MMX_UOP, ALL, 15),
679
680 P4_GEN_ESCR_EMASK(P4_EVENT_128BIT_MMX_UOP, ALL, 15),
681
682 P4_GEN_ESCR_EMASK(P4_EVENT_X87_FP_UOP, ALL, 15),
683
684 P4_GEN_ESCR_EMASK(P4_EVENT_TC_MISC, FLUSH, 4),
685
686 P4_GEN_ESCR_EMASK(P4_EVENT_GLOBAL_POWER_EVENTS, RUNNING, 0),
687
688 P4_GEN_ESCR_EMASK(P4_EVENT_TC_MS_XFER, CISC, 0),
689
690 P4_GEN_ESCR_EMASK(P4_EVENT_UOP_QUEUE_WRITES, FROM_TC_BUILD, 0),
691 P4_GEN_ESCR_EMASK(P4_EVENT_UOP_QUEUE_WRITES, FROM_TC_DELIVER, 1),
692 P4_GEN_ESCR_EMASK(P4_EVENT_UOP_QUEUE_WRITES, FROM_ROM, 2),
693
694 P4_GEN_ESCR_EMASK(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE, CONDITIONAL, 1),
695 P4_GEN_ESCR_EMASK(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE, CALL, 2),
696 P4_GEN_ESCR_EMASK(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE, RETURN, 3),
697 P4_GEN_ESCR_EMASK(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE, INDIRECT, 4),
698
699 P4_GEN_ESCR_EMASK(P4_EVENT_RETIRED_BRANCH_TYPE, CONDITIONAL, 1),
700 P4_GEN_ESCR_EMASK(P4_EVENT_RETIRED_BRANCH_TYPE, CALL, 2),
701 P4_GEN_ESCR_EMASK(P4_EVENT_RETIRED_BRANCH_TYPE, RETURN, 3),
702 P4_GEN_ESCR_EMASK(P4_EVENT_RETIRED_BRANCH_TYPE, INDIRECT, 4),
703
704 P4_GEN_ESCR_EMASK(P4_EVENT_RESOURCE_STALL, SBFULL, 5),
705
706 P4_GEN_ESCR_EMASK(P4_EVENT_WC_BUFFER, WCB_EVICTS, 0),
707 P4_GEN_ESCR_EMASK(P4_EVENT_WC_BUFFER, WCB_FULL_EVICTS, 1),
708
709 P4_GEN_ESCR_EMASK(P4_EVENT_FRONT_END_EVENT, NBOGUS, 0),
710 P4_GEN_ESCR_EMASK(P4_EVENT_FRONT_END_EVENT, BOGUS, 1),
711
712 P4_GEN_ESCR_EMASK(P4_EVENT_EXECUTION_EVENT, NBOGUS0, 0),
713 P4_GEN_ESCR_EMASK(P4_EVENT_EXECUTION_EVENT, NBOGUS1, 1),
714 P4_GEN_ESCR_EMASK(P4_EVENT_EXECUTION_EVENT, NBOGUS2, 2),
715 P4_GEN_ESCR_EMASK(P4_EVENT_EXECUTION_EVENT, NBOGUS3, 3),
716 P4_GEN_ESCR_EMASK(P4_EVENT_EXECUTION_EVENT, BOGUS0, 4),
717 P4_GEN_ESCR_EMASK(P4_EVENT_EXECUTION_EVENT, BOGUS1, 5),
718 P4_GEN_ESCR_EMASK(P4_EVENT_EXECUTION_EVENT, BOGUS2, 6),
719 P4_GEN_ESCR_EMASK(P4_EVENT_EXECUTION_EVENT, BOGUS3, 7),
720
721 P4_GEN_ESCR_EMASK(P4_EVENT_REPLAY_EVENT, NBOGUS, 0),
722 P4_GEN_ESCR_EMASK(P4_EVENT_REPLAY_EVENT, BOGUS, 1),
723
724 P4_GEN_ESCR_EMASK(P4_EVENT_INSTR_RETIRED, NBOGUSNTAG, 0),
725 P4_GEN_ESCR_EMASK(P4_EVENT_INSTR_RETIRED, NBOGUSTAG, 1),
726 P4_GEN_ESCR_EMASK(P4_EVENT_INSTR_RETIRED, BOGUSNTAG, 2),
727 P4_GEN_ESCR_EMASK(P4_EVENT_INSTR_RETIRED, BOGUSTAG, 3),
728
729 P4_GEN_ESCR_EMASK(P4_EVENT_UOPS_RETIRED, NBOGUS, 0),
730 P4_GEN_ESCR_EMASK(P4_EVENT_UOPS_RETIRED, BOGUS, 1),
731
732 P4_GEN_ESCR_EMASK(P4_EVENT_UOP_TYPE, TAGLOADS, 1),
733 P4_GEN_ESCR_EMASK(P4_EVENT_UOP_TYPE, TAGSTORES, 2),
734
735 P4_GEN_ESCR_EMASK(P4_EVENT_BRANCH_RETIRED, MMNP, 0),
736 P4_GEN_ESCR_EMASK(P4_EVENT_BRANCH_RETIRED, MMNM, 1),
737 P4_GEN_ESCR_EMASK(P4_EVENT_BRANCH_RETIRED, MMTP, 2),
738 P4_GEN_ESCR_EMASK(P4_EVENT_BRANCH_RETIRED, MMTM, 3),
739
740 P4_GEN_ESCR_EMASK(P4_EVENT_MISPRED_BRANCH_RETIRED, NBOGUS, 0),
741
742 P4_GEN_ESCR_EMASK(P4_EVENT_X87_ASSIST, FPSU, 0),
743 P4_GEN_ESCR_EMASK(P4_EVENT_X87_ASSIST, FPSO, 1),
744 P4_GEN_ESCR_EMASK(P4_EVENT_X87_ASSIST, POAO, 2),
745 P4_GEN_ESCR_EMASK(P4_EVENT_X87_ASSIST, POAU, 3),
746 P4_GEN_ESCR_EMASK(P4_EVENT_X87_ASSIST, PREA, 4),
747
748 P4_GEN_ESCR_EMASK(P4_EVENT_MACHINE_CLEAR, CLEAR, 0),
749 P4_GEN_ESCR_EMASK(P4_EVENT_MACHINE_CLEAR, MOCLEAR, 1),
750 P4_GEN_ESCR_EMASK(P4_EVENT_MACHINE_CLEAR, SMCLEAR, 2),
751
752 P4_GEN_ESCR_EMASK(P4_EVENT_INSTR_COMPLETED, NBOGUS, 0),
753 P4_GEN_ESCR_EMASK(P4_EVENT_INSTR_COMPLETED, BOGUS, 1),
754};
755
756/* P4 PEBS: stale for a while */
757#define P4_PEBS_METRIC_MASK 0x00001fffU
758#define P4_PEBS_UOB_TAG 0x01000000U
759#define P4_PEBS_ENABLE 0x02000000U
760
761/* Replay metrics for MSR_IA32_PEBS_ENABLE and MSR_P4_PEBS_MATRIX_VERT */
762#define P4_PEBS__1stl_cache_load_miss_retired 0x3000001
763#define P4_PEBS__2ndl_cache_load_miss_retired 0x3000002
764#define P4_PEBS__dtlb_load_miss_retired 0x3000004
765#define P4_PEBS__dtlb_store_miss_retired 0x3000004
766#define P4_PEBS__dtlb_all_miss_retired 0x3000004
767#define P4_PEBS__tagged_mispred_branch 0x3018000
768#define P4_PEBS__mob_load_replay_retired 0x3000200
769#define P4_PEBS__split_load_retired 0x3000400
770#define P4_PEBS__split_store_retired 0x3000400
771
772#define P4_VERT__1stl_cache_load_miss_retired 0x0000001
773#define P4_VERT__2ndl_cache_load_miss_retired 0x0000001
774#define P4_VERT__dtlb_load_miss_retired 0x0000001
775#define P4_VERT__dtlb_store_miss_retired 0x0000002
776#define P4_VERT__dtlb_all_miss_retired 0x0000003
777#define P4_VERT__tagged_mispred_branch 0x0000010
778#define P4_VERT__mob_load_replay_retired 0x0000001
779#define P4_VERT__split_load_retired 0x0000001
780#define P4_VERT__split_store_retired 0x0000002
781
782enum P4_CACHE_EVENTS {
783 P4_CACHE__NONE,
784
785 P4_CACHE__1stl_cache_load_miss_retired,
786 P4_CACHE__2ndl_cache_load_miss_retired,
787 P4_CACHE__dtlb_load_miss_retired,
788 P4_CACHE__dtlb_store_miss_retired,
789 P4_CACHE__itlb_reference_hit,
790 P4_CACHE__itlb_reference_miss,
791
792 P4_CACHE__MAX
793};
794
795#endif /* PERF_EVENT_P4_H */
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index b753ea59703a..7e5c6a60b8ee 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -21,7 +21,6 @@ struct mm_struct;
21#include <asm/msr.h> 21#include <asm/msr.h>
22#include <asm/desc_defs.h> 22#include <asm/desc_defs.h>
23#include <asm/nops.h> 23#include <asm/nops.h>
24#include <asm/ds.h>
25 24
26#include <linux/personality.h> 25#include <linux/personality.h>
27#include <linux/cpumask.h> 26#include <linux/cpumask.h>
@@ -29,6 +28,7 @@ struct mm_struct;
29#include <linux/threads.h> 28#include <linux/threads.h>
30#include <linux/math64.h> 29#include <linux/math64.h>
31#include <linux/init.h> 30#include <linux/init.h>
31#include <linux/err.h>
32 32
33#define HBP_NUM 4 33#define HBP_NUM 4
34/* 34/*
@@ -113,7 +113,6 @@ struct cpuinfo_x86 {
113 /* Index into per_cpu list: */ 113 /* Index into per_cpu list: */
114 u16 cpu_index; 114 u16 cpu_index;
115#endif 115#endif
116 unsigned int x86_hyper_vendor;
117} __attribute__((__aligned__(SMP_CACHE_BYTES))); 116} __attribute__((__aligned__(SMP_CACHE_BYTES)));
118 117
119#define X86_VENDOR_INTEL 0 118#define X86_VENDOR_INTEL 0
@@ -127,9 +126,6 @@ struct cpuinfo_x86 {
127 126
128#define X86_VENDOR_UNKNOWN 0xff 127#define X86_VENDOR_UNKNOWN 0xff
129 128
130#define X86_HYPER_VENDOR_NONE 0
131#define X86_HYPER_VENDOR_VMWARE 1
132
133/* 129/*
134 * capabilities of CPUs 130 * capabilities of CPUs
135 */ 131 */
@@ -380,6 +376,10 @@ union thread_xstate {
380 struct xsave_struct xsave; 376 struct xsave_struct xsave;
381}; 377};
382 378
379struct fpu {
380 union thread_xstate *state;
381};
382
383#ifdef CONFIG_X86_64 383#ifdef CONFIG_X86_64
384DECLARE_PER_CPU(struct orig_ist, orig_ist); 384DECLARE_PER_CPU(struct orig_ist, orig_ist);
385 385
@@ -457,7 +457,7 @@ struct thread_struct {
457 unsigned long trap_no; 457 unsigned long trap_no;
458 unsigned long error_code; 458 unsigned long error_code;
459 /* floating point and extended processor state */ 459 /* floating point and extended processor state */
460 union thread_xstate *xstate; 460 struct fpu fpu;
461#ifdef CONFIG_X86_32 461#ifdef CONFIG_X86_32
462 /* Virtual 86 mode info */ 462 /* Virtual 86 mode info */
463 struct vm86_struct __user *vm86_info; 463 struct vm86_struct __user *vm86_info;
@@ -473,10 +473,6 @@ struct thread_struct {
473 unsigned long iopl; 473 unsigned long iopl;
474 /* Max allowed port in the bitmap, in bytes: */ 474 /* Max allowed port in the bitmap, in bytes: */
475 unsigned io_bitmap_max; 475 unsigned io_bitmap_max;
476/* MSR_IA32_DEBUGCTLMSR value to switch in if TIF_DEBUGCTLMSR is set. */
477 unsigned long debugctlmsr;
478 /* Debug Store context; see asm/ds.h */
479 struct ds_context *ds_ctx;
480}; 476};
481 477
482static inline unsigned long native_get_debugreg(int regno) 478static inline unsigned long native_get_debugreg(int regno)
@@ -793,6 +789,8 @@ static inline void wbinvd_halt(void)
793extern void enable_sep_cpu(void); 789extern void enable_sep_cpu(void);
794extern int sysenter_setup(void); 790extern int sysenter_setup(void);
795 791
792extern void early_trap_init(void);
793
796/* Defined in head.S */ 794/* Defined in head.S */
797extern struct desc_ptr early_gdt_descr; 795extern struct desc_ptr early_gdt_descr;
798 796
@@ -803,7 +801,7 @@ extern void cpu_init(void);
803 801
804static inline unsigned long get_debugctlmsr(void) 802static inline unsigned long get_debugctlmsr(void)
805{ 803{
806 unsigned long debugctlmsr = 0; 804 unsigned long debugctlmsr = 0;
807 805
808#ifndef CONFIG_X86_DEBUGCTLMSR 806#ifndef CONFIG_X86_DEBUGCTLMSR
809 if (boot_cpu_data.x86 < 6) 807 if (boot_cpu_data.x86 < 6)
@@ -811,21 +809,6 @@ static inline unsigned long get_debugctlmsr(void)
811#endif 809#endif
812 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr); 810 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
813 811
814 return debugctlmsr;
815}
816
817static inline unsigned long get_debugctlmsr_on_cpu(int cpu)
818{
819 u64 debugctlmsr = 0;
820 u32 val1, val2;
821
822#ifndef CONFIG_X86_DEBUGCTLMSR
823 if (boot_cpu_data.x86 < 6)
824 return 0;
825#endif
826 rdmsr_on_cpu(cpu, MSR_IA32_DEBUGCTLMSR, &val1, &val2);
827 debugctlmsr = val1 | ((u64)val2 << 32);
828
829 return debugctlmsr; 812 return debugctlmsr;
830} 813}
831 814
@@ -838,18 +821,6 @@ static inline void update_debugctlmsr(unsigned long debugctlmsr)
838 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr); 821 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
839} 822}
840 823
841static inline void update_debugctlmsr_on_cpu(int cpu,
842 unsigned long debugctlmsr)
843{
844#ifndef CONFIG_X86_DEBUGCTLMSR
845 if (boot_cpu_data.x86 < 6)
846 return;
847#endif
848 wrmsr_on_cpu(cpu, MSR_IA32_DEBUGCTLMSR,
849 (u32)((u64)debugctlmsr),
850 (u32)((u64)debugctlmsr >> 32));
851}
852
853/* 824/*
854 * from system description table in BIOS. Mostly for MCA use, but 825 * from system description table in BIOS. Mostly for MCA use, but
855 * others may find it useful: 826 * others may find it useful:
diff --git a/arch/x86/include/asm/ptrace-abi.h b/arch/x86/include/asm/ptrace-abi.h
index 86723035a515..52b098a6eebb 100644
--- a/arch/x86/include/asm/ptrace-abi.h
+++ b/arch/x86/include/asm/ptrace-abi.h
@@ -82,61 +82,6 @@
82 82
83#ifndef __ASSEMBLY__ 83#ifndef __ASSEMBLY__
84#include <linux/types.h> 84#include <linux/types.h>
85 85#endif
86/* configuration/status structure used in PTRACE_BTS_CONFIG and
87 PTRACE_BTS_STATUS commands.
88*/
89struct ptrace_bts_config {
90 /* requested or actual size of BTS buffer in bytes */
91 __u32 size;
92 /* bitmask of below flags */
93 __u32 flags;
94 /* buffer overflow signal */
95 __u32 signal;
96 /* actual size of bts_struct in bytes */
97 __u32 bts_size;
98};
99#endif /* __ASSEMBLY__ */
100
101#define PTRACE_BTS_O_TRACE 0x1 /* branch trace */
102#define PTRACE_BTS_O_SCHED 0x2 /* scheduling events w/ jiffies */
103#define PTRACE_BTS_O_SIGNAL 0x4 /* send SIG<signal> on buffer overflow
104 instead of wrapping around */
105#define PTRACE_BTS_O_ALLOC 0x8 /* (re)allocate buffer */
106
107#define PTRACE_BTS_CONFIG 40
108/* Configure branch trace recording.
109 ADDR points to a struct ptrace_bts_config.
110 DATA gives the size of that buffer.
111 A new buffer is allocated, if requested in the flags.
112 An overflow signal may only be requested for new buffers.
113 Returns the number of bytes read.
114*/
115#define PTRACE_BTS_STATUS 41
116/* Return the current configuration in a struct ptrace_bts_config
117 pointed to by ADDR; DATA gives the size of that buffer.
118 Returns the number of bytes written.
119*/
120#define PTRACE_BTS_SIZE 42
121/* Return the number of available BTS records for draining.
122 DATA and ADDR are ignored.
123*/
124#define PTRACE_BTS_GET 43
125/* Get a single BTS record.
126 DATA defines the index into the BTS array, where 0 is the newest
127 entry, and higher indices refer to older entries.
128 ADDR is pointing to struct bts_struct (see asm/ds.h).
129*/
130#define PTRACE_BTS_CLEAR 44
131/* Clear the BTS buffer.
132 DATA and ADDR are ignored.
133*/
134#define PTRACE_BTS_DRAIN 45
135/* Read all available BTS records and clear the buffer.
136 ADDR points to an array of struct bts_struct.
137 DATA gives the size of that buffer.
138 BTS records are read from oldest to newest.
139 Returns number of BTS records drained.
140*/
141 86
142#endif /* _ASM_X86_PTRACE_ABI_H */ 87#endif /* _ASM_X86_PTRACE_ABI_H */
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
index 69a686a7dff0..78cd1ea94500 100644
--- a/arch/x86/include/asm/ptrace.h
+++ b/arch/x86/include/asm/ptrace.h
@@ -289,12 +289,6 @@ extern int do_get_thread_area(struct task_struct *p, int idx,
289extern int do_set_thread_area(struct task_struct *p, int idx, 289extern int do_set_thread_area(struct task_struct *p, int idx,
290 struct user_desc __user *info, int can_allocate); 290 struct user_desc __user *info, int can_allocate);
291 291
292#ifdef CONFIG_X86_PTRACE_BTS
293extern void ptrace_bts_untrace(struct task_struct *tsk);
294
295#define arch_ptrace_untrace(tsk) ptrace_bts_untrace(tsk)
296#endif /* CONFIG_X86_PTRACE_BTS */
297
298#endif /* __KERNEL__ */ 292#endif /* __KERNEL__ */
299 293
300#endif /* !__ASSEMBLY__ */ 294#endif /* !__ASSEMBLY__ */
diff --git a/arch/x86/include/asm/pvclock-abi.h b/arch/x86/include/asm/pvclock-abi.h
index 6d93508f2626..35f2d1948ada 100644
--- a/arch/x86/include/asm/pvclock-abi.h
+++ b/arch/x86/include/asm/pvclock-abi.h
@@ -29,7 +29,8 @@ struct pvclock_vcpu_time_info {
29 u64 system_time; 29 u64 system_time;
30 u32 tsc_to_system_mul; 30 u32 tsc_to_system_mul;
31 s8 tsc_shift; 31 s8 tsc_shift;
32 u8 pad[3]; 32 u8 flags;
33 u8 pad[2];
33} __attribute__((__packed__)); /* 32 bytes */ 34} __attribute__((__packed__)); /* 32 bytes */
34 35
35struct pvclock_wall_clock { 36struct pvclock_wall_clock {
@@ -38,5 +39,6 @@ struct pvclock_wall_clock {
38 u32 nsec; 39 u32 nsec;
39} __attribute__((__packed__)); 40} __attribute__((__packed__));
40 41
42#define PVCLOCK_TSC_STABLE_BIT (1 << 0)
41#endif /* __ASSEMBLY__ */ 43#endif /* __ASSEMBLY__ */
42#endif /* _ASM_X86_PVCLOCK_ABI_H */ 44#endif /* _ASM_X86_PVCLOCK_ABI_H */
diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h
index 53235fd5f8ce..cd02f324aa6b 100644
--- a/arch/x86/include/asm/pvclock.h
+++ b/arch/x86/include/asm/pvclock.h
@@ -6,6 +6,7 @@
6 6
7/* some helper functions for xen and kvm pv clock sources */ 7/* some helper functions for xen and kvm pv clock sources */
8cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src); 8cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src);
9void pvclock_set_flags(u8 flags);
9unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src); 10unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src);
10void pvclock_read_wallclock(struct pvclock_wall_clock *wall, 11void pvclock_read_wallclock(struct pvclock_wall_clock *wall,
11 struct pvclock_vcpu_time_info *vcpu, 12 struct pvclock_vcpu_time_info *vcpu,
diff --git a/arch/x86/include/asm/rdc321x_defs.h b/arch/x86/include/asm/rdc321x_defs.h
deleted file mode 100644
index c8e9c8bed3d0..000000000000
--- a/arch/x86/include/asm/rdc321x_defs.h
+++ /dev/null
@@ -1,12 +0,0 @@
1#define PFX "rdc321x: "
2
3/* General purpose configuration and data registers */
4#define RDC3210_CFGREG_ADDR 0x0CF8
5#define RDC3210_CFGREG_DATA 0x0CFC
6
7#define RDC321X_GPIO_CTRL_REG1 0x48
8#define RDC321X_GPIO_CTRL_REG2 0x84
9#define RDC321X_GPIO_DATA_REG1 0x4c
10#define RDC321X_GPIO_DATA_REG2 0x88
11
12#define RDC321X_MAX_GPIO 58
diff --git a/arch/x86/include/asm/scatterlist.h b/arch/x86/include/asm/scatterlist.h
index 75af592677ec..fb0b1874396f 100644
--- a/arch/x86/include/asm/scatterlist.h
+++ b/arch/x86/include/asm/scatterlist.h
@@ -1,8 +1,9 @@
1#ifndef _ASM_X86_SCATTERLIST_H 1#ifndef _ASM_X86_SCATTERLIST_H
2#define _ASM_X86_SCATTERLIST_H 2#define _ASM_X86_SCATTERLIST_H
3 3
4#define ISA_DMA_THRESHOLD (0x00ffffff)
5
6#include <asm-generic/scatterlist.h> 4#include <asm-generic/scatterlist.h>
7 5
6#define ISA_DMA_THRESHOLD (0x00ffffff)
7#define ARCH_HAS_SG_CHAIN
8
8#endif /* _ASM_X86_SCATTERLIST_H */ 9#endif /* _ASM_X86_SCATTERLIST_H */
diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index 38638cd2fa4c..0e831059ac5a 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -81,7 +81,9 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
81 u32 event_inj_err; 81 u32 event_inj_err;
82 u64 nested_cr3; 82 u64 nested_cr3;
83 u64 lbr_ctl; 83 u64 lbr_ctl;
84 u8 reserved_5[832]; 84 u64 reserved_5;
85 u64 next_rip;
86 u8 reserved_6[816];
85}; 87};
86 88
87 89
@@ -115,6 +117,10 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
115#define SVM_IOIO_SIZE_MASK (7 << SVM_IOIO_SIZE_SHIFT) 117#define SVM_IOIO_SIZE_MASK (7 << SVM_IOIO_SIZE_SHIFT)
116#define SVM_IOIO_ASIZE_MASK (7 << SVM_IOIO_ASIZE_SHIFT) 118#define SVM_IOIO_ASIZE_MASK (7 << SVM_IOIO_ASIZE_SHIFT)
117 119
120#define SVM_VM_CR_VALID_MASK 0x001fULL
121#define SVM_VM_CR_SVM_LOCK_MASK 0x0008ULL
122#define SVM_VM_CR_SVM_DIS_MASK 0x0010ULL
123
118struct __attribute__ ((__packed__)) vmcb_seg { 124struct __attribute__ ((__packed__)) vmcb_seg {
119 u16 selector; 125 u16 selector;
120 u16 attrib; 126 u16 attrib;
@@ -238,6 +244,7 @@ struct __attribute__ ((__packed__)) vmcb {
238 244
239#define SVM_EXITINFOSHIFT_TS_REASON_IRET 36 245#define SVM_EXITINFOSHIFT_TS_REASON_IRET 36
240#define SVM_EXITINFOSHIFT_TS_REASON_JMP 38 246#define SVM_EXITINFOSHIFT_TS_REASON_JMP 38
247#define SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE 44
241 248
242#define SVM_EXIT_READ_CR0 0x000 249#define SVM_EXIT_READ_CR0 0x000
243#define SVM_EXIT_READ_CR3 0x003 250#define SVM_EXIT_READ_CR3 0x003
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index e0d28901e969..f0b6e5dbc5a0 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -87,13 +87,12 @@ struct thread_info {
87#define TIF_NOTSC 16 /* TSC is not accessible in userland */ 87#define TIF_NOTSC 16 /* TSC is not accessible in userland */
88#define TIF_IA32 17 /* 32bit process */ 88#define TIF_IA32 17 /* 32bit process */
89#define TIF_FORK 18 /* ret_from_fork */ 89#define TIF_FORK 18 /* ret_from_fork */
90#define TIF_MEMDIE 20 90#define TIF_MEMDIE 20 /* is terminating due to OOM killer */
91#define TIF_DEBUG 21 /* uses debug registers */ 91#define TIF_DEBUG 21 /* uses debug registers */
92#define TIF_IO_BITMAP 22 /* uses I/O bitmap */ 92#define TIF_IO_BITMAP 22 /* uses I/O bitmap */
93#define TIF_FREEZE 23 /* is freezing for suspend */ 93#define TIF_FREEZE 23 /* is freezing for suspend */
94#define TIF_FORCED_TF 24 /* true if TF in eflags artificially */ 94#define TIF_FORCED_TF 24 /* true if TF in eflags artificially */
95#define TIF_DEBUGCTLMSR 25 /* uses thread_struct.debugctlmsr */ 95#define TIF_BLOCKSTEP 25 /* set when we want DEBUGCTLMSR_BTF */
96#define TIF_DS_AREA_MSR 26 /* uses thread_struct.ds_area_msr */
97#define TIF_LAZY_MMU_UPDATES 27 /* task is updating the mmu lazily */ 96#define TIF_LAZY_MMU_UPDATES 27 /* task is updating the mmu lazily */
98#define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */ 97#define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
99 98
@@ -115,8 +114,7 @@ struct thread_info {
115#define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP) 114#define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP)
116#define _TIF_FREEZE (1 << TIF_FREEZE) 115#define _TIF_FREEZE (1 << TIF_FREEZE)
117#define _TIF_FORCED_TF (1 << TIF_FORCED_TF) 116#define _TIF_FORCED_TF (1 << TIF_FORCED_TF)
118#define _TIF_DEBUGCTLMSR (1 << TIF_DEBUGCTLMSR) 117#define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP)
119#define _TIF_DS_AREA_MSR (1 << TIF_DS_AREA_MSR)
120#define _TIF_LAZY_MMU_UPDATES (1 << TIF_LAZY_MMU_UPDATES) 118#define _TIF_LAZY_MMU_UPDATES (1 << TIF_LAZY_MMU_UPDATES)
121#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) 119#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
122 120
@@ -147,7 +145,7 @@ struct thread_info {
147 145
148/* flags to check in __switch_to() */ 146/* flags to check in __switch_to() */
149#define _TIF_WORK_CTXSW \ 147#define _TIF_WORK_CTXSW \
150 (_TIF_IO_BITMAP|_TIF_DEBUGCTLMSR|_TIF_DS_AREA_MSR|_TIF_NOTSC) 148 (_TIF_IO_BITMAP|_TIF_NOTSC|_TIF_BLOCKSTEP)
151 149
152#define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY) 150#define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
153#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW|_TIF_DEBUG) 151#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW|_TIF_DEBUG)
@@ -241,10 +239,9 @@ static inline struct thread_info *current_thread_info(void)
241#define TS_USEDFPU 0x0001 /* FPU was used by this task 239#define TS_USEDFPU 0x0001 /* FPU was used by this task
242 this quantum (SMP) */ 240 this quantum (SMP) */
243#define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/ 241#define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/
244#define TS_POLLING 0x0004 /* true if in idle loop 242#define TS_POLLING 0x0004 /* idle task polling need_resched,
245 and not sleeping */ 243 skip sending interrupt */
246#define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal() */ 244#define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal() */
247#define TS_XSAVE 0x0010 /* Use xsave/xrstor */
248 245
249#define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING) 246#define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING)
250 247
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index c5087d796587..21899cc31e52 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -53,33 +53,29 @@
53extern int cpu_to_node_map[]; 53extern int cpu_to_node_map[];
54 54
55/* Returns the number of the node containing CPU 'cpu' */ 55/* Returns the number of the node containing CPU 'cpu' */
56static inline int cpu_to_node(int cpu) 56static inline int __cpu_to_node(int cpu)
57{ 57{
58 return cpu_to_node_map[cpu]; 58 return cpu_to_node_map[cpu];
59} 59}
60#define early_cpu_to_node(cpu) cpu_to_node(cpu) 60#define early_cpu_to_node __cpu_to_node
61#define cpu_to_node __cpu_to_node
61 62
62#else /* CONFIG_X86_64 */ 63#else /* CONFIG_X86_64 */
63 64
64/* Mappings between logical cpu number and node number */ 65/* Mappings between logical cpu number and node number */
65DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map); 66DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map);
66 67
67/* Returns the number of the current Node. */
68DECLARE_PER_CPU(int, node_number);
69#define numa_node_id() percpu_read(node_number)
70
71#ifdef CONFIG_DEBUG_PER_CPU_MAPS 68#ifdef CONFIG_DEBUG_PER_CPU_MAPS
72extern int cpu_to_node(int cpu); 69/*
70 * override generic percpu implementation of cpu_to_node
71 */
72extern int __cpu_to_node(int cpu);
73#define cpu_to_node __cpu_to_node
74
73extern int early_cpu_to_node(int cpu); 75extern int early_cpu_to_node(int cpu);
74 76
75#else /* !CONFIG_DEBUG_PER_CPU_MAPS */ 77#else /* !CONFIG_DEBUG_PER_CPU_MAPS */
76 78
77/* Returns the number of the node containing CPU 'cpu' */
78static inline int cpu_to_node(int cpu)
79{
80 return per_cpu(x86_cpu_to_node_map, cpu);
81}
82
83/* Same function but used if called before per_cpu areas are setup */ 79/* Same function but used if called before per_cpu areas are setup */
84static inline int early_cpu_to_node(int cpu) 80static inline int early_cpu_to_node(int cpu)
85{ 81{
@@ -170,6 +166,10 @@ static inline int numa_node_id(void)
170{ 166{
171 return 0; 167 return 0;
172} 168}
169/*
170 * indicate override:
171 */
172#define numa_node_id numa_node_id
173 173
174static inline int early_cpu_to_node(int cpu) 174static inline int early_cpu_to_node(int cpu)
175{ 175{
diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
index 4da91ad69e0d..f66cda56781d 100644
--- a/arch/x86/include/asm/traps.h
+++ b/arch/x86/include/asm/traps.h
@@ -79,7 +79,7 @@ static inline int get_si_code(unsigned long condition)
79 79
80extern int panic_on_unrecovered_nmi; 80extern int panic_on_unrecovered_nmi;
81 81
82void math_error(void __user *); 82void math_error(struct pt_regs *, int, int);
83void math_emulate(struct math_emu_info *); 83void math_emulate(struct math_emu_info *);
84#ifndef CONFIG_X86_32 84#ifndef CONFIG_X86_32
85asmlinkage void smp_thermal_interrupt(void); 85asmlinkage void smp_thermal_interrupt(void);
diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h
index b414d2b401f6..aa558ac0306e 100644
--- a/arch/x86/include/asm/uv/uv_bau.h
+++ b/arch/x86/include/asm/uv/uv_bau.h
@@ -27,13 +27,14 @@
27 * set 2 is at BASE + 2*512, set 3 at BASE + 3*512, and so on. 27 * set 2 is at BASE + 2*512, set 3 at BASE + 3*512, and so on.
28 * 28 *
29 * We will use 31 sets, one for sending BAU messages from each of the 32 29 * We will use 31 sets, one for sending BAU messages from each of the 32
30 * cpu's on the node. 30 * cpu's on the uvhub.
31 * 31 *
32 * TLB shootdown will use the first of the 8 descriptors of each set. 32 * TLB shootdown will use the first of the 8 descriptors of each set.
33 * Each of the descriptors is 64 bytes in size (8*64 = 512 bytes in a set). 33 * Each of the descriptors is 64 bytes in size (8*64 = 512 bytes in a set).
34 */ 34 */
35 35
36#define UV_ITEMS_PER_DESCRIPTOR 8 36#define UV_ITEMS_PER_DESCRIPTOR 8
37#define MAX_BAU_CONCURRENT 3
37#define UV_CPUS_PER_ACT_STATUS 32 38#define UV_CPUS_PER_ACT_STATUS 32
38#define UV_ACT_STATUS_MASK 0x3 39#define UV_ACT_STATUS_MASK 0x3
39#define UV_ACT_STATUS_SIZE 2 40#define UV_ACT_STATUS_SIZE 2
@@ -45,6 +46,9 @@
45#define UV_PAYLOADQ_PNODE_SHIFT 49 46#define UV_PAYLOADQ_PNODE_SHIFT 49
46#define UV_PTC_BASENAME "sgi_uv/ptc_statistics" 47#define UV_PTC_BASENAME "sgi_uv/ptc_statistics"
47#define uv_physnodeaddr(x) ((__pa((unsigned long)(x)) & uv_mmask)) 48#define uv_physnodeaddr(x) ((__pa((unsigned long)(x)) & uv_mmask))
49#define UV_ENABLE_INTD_SOFT_ACK_MODE_SHIFT 15
50#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHIFT 16
51#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD 0x000000000bUL
48 52
49/* 53/*
50 * bits in UVH_LB_BAU_SB_ACTIVATION_STATUS_0/1 54 * bits in UVH_LB_BAU_SB_ACTIVATION_STATUS_0/1
@@ -55,15 +59,29 @@
55#define DESC_STATUS_SOURCE_TIMEOUT 3 59#define DESC_STATUS_SOURCE_TIMEOUT 3
56 60
57/* 61/*
58 * source side thresholds at which message retries print a warning 62 * source side threshholds at which message retries print a warning
59 */ 63 */
60#define SOURCE_TIMEOUT_LIMIT 20 64#define SOURCE_TIMEOUT_LIMIT 20
61#define DESTINATION_TIMEOUT_LIMIT 20 65#define DESTINATION_TIMEOUT_LIMIT 20
62 66
63/* 67/*
68 * misc. delays, in microseconds
69 */
70#define THROTTLE_DELAY 10
71#define TIMEOUT_DELAY 10
72#define BIOS_TO 1000
73/* BIOS is assumed to set the destination timeout to 1003520 nanoseconds */
74
75/*
76 * threshholds at which to use IPI to free resources
77 */
78#define PLUGSB4RESET 100
79#define TIMEOUTSB4RESET 100
80
81/*
64 * number of entries in the destination side payload queue 82 * number of entries in the destination side payload queue
65 */ 83 */
66#define DEST_Q_SIZE 17 84#define DEST_Q_SIZE 20
67/* 85/*
68 * number of destination side software ack resources 86 * number of destination side software ack resources
69 */ 87 */
@@ -72,9 +90,10 @@
72/* 90/*
73 * completion statuses for sending a TLB flush message 91 * completion statuses for sending a TLB flush message
74 */ 92 */
75#define FLUSH_RETRY 1 93#define FLUSH_RETRY_PLUGGED 1
76#define FLUSH_GIVEUP 2 94#define FLUSH_RETRY_TIMEOUT 2
77#define FLUSH_COMPLETE 3 95#define FLUSH_GIVEUP 3
96#define FLUSH_COMPLETE 4
78 97
79/* 98/*
80 * Distribution: 32 bytes (256 bits) (bytes 0-0x1f of descriptor) 99 * Distribution: 32 bytes (256 bits) (bytes 0-0x1f of descriptor)
@@ -86,14 +105,14 @@
86 * 'base_dest_nodeid' field of the header corresponds to the 105 * 'base_dest_nodeid' field of the header corresponds to the
87 * destination nodeID associated with that specified bit. 106 * destination nodeID associated with that specified bit.
88 */ 107 */
89struct bau_target_nodemask { 108struct bau_target_uvhubmask {
90 unsigned long bits[BITS_TO_LONGS(256)]; 109 unsigned long bits[BITS_TO_LONGS(UV_DISTRIBUTION_SIZE)];
91}; 110};
92 111
93/* 112/*
94 * mask of cpu's on a node 113 * mask of cpu's on a uvhub
95 * (during initialization we need to check that unsigned long has 114 * (during initialization we need to check that unsigned long has
96 * enough bits for max. cpu's per node) 115 * enough bits for max. cpu's per uvhub)
97 */ 116 */
98struct bau_local_cpumask { 117struct bau_local_cpumask {
99 unsigned long bits; 118 unsigned long bits;
@@ -135,8 +154,8 @@ struct bau_msg_payload {
135struct bau_msg_header { 154struct bau_msg_header {
136 unsigned int dest_subnodeid:6; /* must be 0x10, for the LB */ 155 unsigned int dest_subnodeid:6; /* must be 0x10, for the LB */
137 /* bits 5:0 */ 156 /* bits 5:0 */
138 unsigned int base_dest_nodeid:15; /* nasid>>1 (pnode) of */ 157 unsigned int base_dest_nodeid:15; /* nasid (pnode<<1) of */
139 /* bits 20:6 */ /* first bit in node_map */ 158 /* bits 20:6 */ /* first bit in uvhub map */
140 unsigned int command:8; /* message type */ 159 unsigned int command:8; /* message type */
141 /* bits 28:21 */ 160 /* bits 28:21 */
142 /* 0x38: SN3net EndPoint Message */ 161 /* 0x38: SN3net EndPoint Message */
@@ -146,26 +165,38 @@ struct bau_msg_header {
146 unsigned int rsvd_2:9; /* must be zero */ 165 unsigned int rsvd_2:9; /* must be zero */
147 /* bits 40:32 */ 166 /* bits 40:32 */
148 /* Suppl_A is 56-41 */ 167 /* Suppl_A is 56-41 */
149 unsigned int payload_2a:8;/* becomes byte 16 of msg */ 168 unsigned int sequence:16;/* message sequence number */
150 /* bits 48:41 */ /* not currently using */ 169 /* bits 56:41 */ /* becomes bytes 16-17 of msg */
151 unsigned int payload_2b:8;/* becomes byte 17 of msg */
152 /* bits 56:49 */ /* not currently using */
153 /* Address field (96:57) is never used as an 170 /* Address field (96:57) is never used as an
154 address (these are address bits 42:3) */ 171 address (these are address bits 42:3) */
172
155 unsigned int rsvd_3:1; /* must be zero */ 173 unsigned int rsvd_3:1; /* must be zero */
156 /* bit 57 */ 174 /* bit 57 */
157 /* address bits 27:4 are payload */ 175 /* address bits 27:4 are payload */
158 /* these 24 bits become bytes 12-14 of msg */ 176 /* these next 24 (58-81) bits become bytes 12-14 of msg */
177
178 /* bits 65:58 land in byte 12 */
159 unsigned int replied_to:1;/* sent as 0 by the source to byte 12 */ 179 unsigned int replied_to:1;/* sent as 0 by the source to byte 12 */
160 /* bit 58 */ 180 /* bit 58 */
161 181 unsigned int msg_type:3; /* software type of the message*/
162 unsigned int payload_1a:5;/* not currently used */ 182 /* bits 61:59 */
163 /* bits 63:59 */ 183 unsigned int canceled:1; /* message canceled, resource to be freed*/
164 unsigned int payload_1b:8;/* not currently used */ 184 /* bit 62 */
165 /* bits 71:64 */ 185 unsigned int payload_1a:1;/* not currently used */
166 unsigned int payload_1c:8;/* not currently used */ 186 /* bit 63 */
167 /* bits 79:72 */ 187 unsigned int payload_1b:2;/* not currently used */
168 unsigned int payload_1d:2;/* not currently used */ 188 /* bits 65:64 */
189
190 /* bits 73:66 land in byte 13 */
191 unsigned int payload_1ca:6;/* not currently used */
192 /* bits 71:66 */
193 unsigned int payload_1c:2;/* not currently used */
194 /* bits 73:72 */
195
196 /* bits 81:74 land in byte 14 */
197 unsigned int payload_1d:6;/* not currently used */
198 /* bits 79:74 */
199 unsigned int payload_1e:2;/* not currently used */
169 /* bits 81:80 */ 200 /* bits 81:80 */
170 201
171 unsigned int rsvd_4:7; /* must be zero */ 202 unsigned int rsvd_4:7; /* must be zero */
@@ -178,7 +209,7 @@ struct bau_msg_header {
178 /* bits 95:90 */ 209 /* bits 95:90 */
179 unsigned int rsvd_6:5; /* must be zero */ 210 unsigned int rsvd_6:5; /* must be zero */
180 /* bits 100:96 */ 211 /* bits 100:96 */
181 unsigned int int_both:1;/* if 1, interrupt both sockets on the blade */ 212 unsigned int int_both:1;/* if 1, interrupt both sockets on the uvhub */
182 /* bit 101*/ 213 /* bit 101*/
183 unsigned int fairness:3;/* usually zero */ 214 unsigned int fairness:3;/* usually zero */
184 /* bits 104:102 */ 215 /* bits 104:102 */
@@ -191,13 +222,18 @@ struct bau_msg_header {
191 /* bits 127:107 */ 222 /* bits 127:107 */
192}; 223};
193 224
225/* see msg_type: */
226#define MSG_NOOP 0
227#define MSG_REGULAR 1
228#define MSG_RETRY 2
229
194/* 230/*
195 * The activation descriptor: 231 * The activation descriptor:
196 * The format of the message to send, plus all accompanying control 232 * The format of the message to send, plus all accompanying control
197 * Should be 64 bytes 233 * Should be 64 bytes
198 */ 234 */
199struct bau_desc { 235struct bau_desc {
200 struct bau_target_nodemask distribution; 236 struct bau_target_uvhubmask distribution;
201 /* 237 /*
202 * message template, consisting of header and payload: 238 * message template, consisting of header and payload:
203 */ 239 */
@@ -237,19 +273,25 @@ struct bau_payload_queue_entry {
237 unsigned short acknowledge_count; /* filled in by destination */ 273 unsigned short acknowledge_count; /* filled in by destination */
238 /* 16 bits, bytes 10-11 */ 274 /* 16 bits, bytes 10-11 */
239 275
240 unsigned short replied_to:1; /* sent as 0 by the source */ 276 /* these next 3 bytes come from bits 58-81 of the message header */
241 /* 1 bit */ 277 unsigned short replied_to:1; /* sent as 0 by the source */
242 unsigned short unused1:7; /* not currently using */ 278 unsigned short msg_type:3; /* software message type */
243 /* 7 bits: byte 12) */ 279 unsigned short canceled:1; /* sent as 0 by the source */
280 unsigned short unused1:3; /* not currently using */
281 /* byte 12 */
244 282
245 unsigned char unused2[2]; /* not currently using */ 283 unsigned char unused2a; /* not currently using */
246 /* bytes 13-14 */ 284 /* byte 13 */
285 unsigned char unused2; /* not currently using */
286 /* byte 14 */
247 287
248 unsigned char sw_ack_vector; /* filled in by the hardware */ 288 unsigned char sw_ack_vector; /* filled in by the hardware */
249 /* byte 15 (bits 127:120) */ 289 /* byte 15 (bits 127:120) */
250 290
251 unsigned char unused4[3]; /* not currently using bytes 17-19 */ 291 unsigned short sequence; /* message sequence number */
252 /* bytes 17-19 */ 292 /* bytes 16-17 */
293 unsigned char unused4[2]; /* not currently using bytes 18-19 */
294 /* bytes 18-19 */
253 295
254 int number_of_cpus; /* filled in at destination */ 296 int number_of_cpus; /* filled in at destination */
255 /* 32 bits, bytes 20-23 (aligned) */ 297 /* 32 bits, bytes 20-23 (aligned) */
@@ -259,63 +301,93 @@ struct bau_payload_queue_entry {
259}; 301};
260 302
261/* 303/*
262 * one for every slot in the destination payload queue 304 * one per-cpu; to locate the software tables
263 */
264struct bau_msg_status {
265 struct bau_local_cpumask seen_by; /* map of cpu's */
266};
267
268/*
269 * one for every slot in the destination software ack resources
270 */
271struct bau_sw_ack_status {
272 struct bau_payload_queue_entry *msg; /* associated message */
273 int watcher; /* cpu monitoring, or -1 */
274};
275
276/*
277 * one on every node and per-cpu; to locate the software tables
278 */ 305 */
279struct bau_control { 306struct bau_control {
280 struct bau_desc *descriptor_base; 307 struct bau_desc *descriptor_base;
281 struct bau_payload_queue_entry *bau_msg_head;
282 struct bau_payload_queue_entry *va_queue_first; 308 struct bau_payload_queue_entry *va_queue_first;
283 struct bau_payload_queue_entry *va_queue_last; 309 struct bau_payload_queue_entry *va_queue_last;
284 struct bau_msg_status *msg_statuses; 310 struct bau_payload_queue_entry *bau_msg_head;
285 int *watching; /* pointer to array */ 311 struct bau_control *uvhub_master;
312 struct bau_control *socket_master;
313 unsigned long timeout_interval;
314 atomic_t active_descriptor_count;
315 int max_concurrent;
316 int max_concurrent_constant;
317 int retry_message_scans;
318 int plugged_tries;
319 int timeout_tries;
320 int ipi_attempts;
321 int conseccompletes;
322 short cpu;
323 short uvhub_cpu;
324 short uvhub;
325 short cpus_in_socket;
326 short cpus_in_uvhub;
327 unsigned short message_number;
328 unsigned short uvhub_quiesce;
329 short socket_acknowledge_count[DEST_Q_SIZE];
330 cycles_t send_message;
331 spinlock_t masks_lock;
332 spinlock_t uvhub_lock;
333 spinlock_t queue_lock;
286}; 334};
287 335
288/* 336/*
289 * This structure is allocated per_cpu for UV TLB shootdown statistics. 337 * This structure is allocated per_cpu for UV TLB shootdown statistics.
290 */ 338 */
291struct ptc_stats { 339struct ptc_stats {
292 unsigned long ptc_i; /* number of IPI-style flushes */ 340 /* sender statistics */
293 unsigned long requestor; /* number of nodes this cpu sent to */ 341 unsigned long s_giveup; /* number of fall backs to IPI-style flushes */
294 unsigned long requestee; /* times cpu was remotely requested */ 342 unsigned long s_requestor; /* number of shootdown requests */
295 unsigned long alltlb; /* times all tlb's on this cpu were flushed */ 343 unsigned long s_stimeout; /* source side timeouts */
296 unsigned long onetlb; /* times just one tlb on this cpu was flushed */ 344 unsigned long s_dtimeout; /* destination side timeouts */
297 unsigned long s_retry; /* retries on source side timeouts */ 345 unsigned long s_time; /* time spent in sending side */
298 unsigned long d_retry; /* retries on destination side timeouts */ 346 unsigned long s_retriesok; /* successful retries */
299 unsigned long sflush; /* cycles spent in uv_flush_tlb_others */ 347 unsigned long s_ntargcpu; /* number of cpus targeted */
300 unsigned long dflush; /* cycles spent on destination side */ 348 unsigned long s_ntarguvhub; /* number of uvhubs targeted */
301 unsigned long retriesok; /* successes on retries */ 349 unsigned long s_ntarguvhub16; /* number of times >= 16 target hubs */
302 unsigned long nomsg; /* interrupts with no message */ 350 unsigned long s_ntarguvhub8; /* number of times >= 8 target hubs */
303 unsigned long multmsg; /* interrupts with multiple messages */ 351 unsigned long s_ntarguvhub4; /* number of times >= 4 target hubs */
304 unsigned long ntargeted;/* nodes targeted */ 352 unsigned long s_ntarguvhub2; /* number of times >= 2 target hubs */
353 unsigned long s_ntarguvhub1; /* number of times == 1 target hub */
354 unsigned long s_resets_plug; /* ipi-style resets from plug state */
355 unsigned long s_resets_timeout; /* ipi-style resets from timeouts */
356 unsigned long s_busy; /* status stayed busy past s/w timer */
357 unsigned long s_throttles; /* waits in throttle */
358 unsigned long s_retry_messages; /* retry broadcasts */
359 /* destination statistics */
360 unsigned long d_alltlb; /* times all tlb's on this cpu were flushed */
361 unsigned long d_onetlb; /* times just one tlb on this cpu was flushed */
362 unsigned long d_multmsg; /* interrupts with multiple messages */
363 unsigned long d_nomsg; /* interrupts with no message */
364 unsigned long d_time; /* time spent on destination side */
365 unsigned long d_requestee; /* number of messages processed */
366 unsigned long d_retries; /* number of retry messages processed */
367 unsigned long d_canceled; /* number of messages canceled by retries */
368 unsigned long d_nocanceled; /* retries that found nothing to cancel */
369 unsigned long d_resets; /* number of ipi-style requests processed */
370 unsigned long d_rcanceled; /* number of messages canceled by resets */
305}; 371};
306 372
307static inline int bau_node_isset(int node, struct bau_target_nodemask *dstp) 373static inline int bau_uvhub_isset(int uvhub, struct bau_target_uvhubmask *dstp)
308{ 374{
309 return constant_test_bit(node, &dstp->bits[0]); 375 return constant_test_bit(uvhub, &dstp->bits[0]);
310} 376}
311static inline void bau_node_set(int node, struct bau_target_nodemask *dstp) 377static inline void bau_uvhub_set(int uvhub, struct bau_target_uvhubmask *dstp)
312{ 378{
313 __set_bit(node, &dstp->bits[0]); 379 __set_bit(uvhub, &dstp->bits[0]);
314} 380}
315static inline void bau_nodes_clear(struct bau_target_nodemask *dstp, int nbits) 381static inline void bau_uvhubs_clear(struct bau_target_uvhubmask *dstp,
382 int nbits)
316{ 383{
317 bitmap_zero(&dstp->bits[0], nbits); 384 bitmap_zero(&dstp->bits[0], nbits);
318} 385}
386static inline int bau_uvhub_weight(struct bau_target_uvhubmask *dstp)
387{
388 return bitmap_weight((unsigned long *)&dstp->bits[0],
389 UV_DISTRIBUTION_SIZE);
390}
319 391
320static inline void bau_cpubits_clear(struct bau_local_cpumask *dstp, int nbits) 392static inline void bau_cpubits_clear(struct bau_local_cpumask *dstp, int nbits)
321{ 393{
@@ -328,4 +400,35 @@ static inline void bau_cpubits_clear(struct bau_local_cpumask *dstp, int nbits)
328extern void uv_bau_message_intr1(void); 400extern void uv_bau_message_intr1(void);
329extern void uv_bau_timeout_intr1(void); 401extern void uv_bau_timeout_intr1(void);
330 402
403struct atomic_short {
404 short counter;
405};
406
407/**
408 * atomic_read_short - read a short atomic variable
409 * @v: pointer of type atomic_short
410 *
411 * Atomically reads the value of @v.
412 */
413static inline int atomic_read_short(const struct atomic_short *v)
414{
415 return v->counter;
416}
417
418/**
419 * atomic_add_short_return - add and return a short int
420 * @i: short value to add
421 * @v: pointer of type atomic_short
422 *
423 * Atomically adds @i to @v and returns @i + @v
424 */
425static inline int atomic_add_short_return(short i, struct atomic_short *v)
426{
427 short __i = i;
428 asm volatile(LOCK_PREFIX "xaddw %0, %1"
429 : "+r" (i), "+m" (v->counter)
430 : : "memory");
431 return i + __i;
432}
433
331#endif /* _ASM_X86_UV_UV_BAU_H */ 434#endif /* _ASM_X86_UV_UV_BAU_H */
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h
index 14cc74ba5d23..bf6b88ef8eeb 100644
--- a/arch/x86/include/asm/uv/uv_hub.h
+++ b/arch/x86/include/asm/uv/uv_hub.h
@@ -307,7 +307,7 @@ static inline unsigned long uv_read_global_mmr32(int pnode, unsigned long offset
307 * Access Global MMR space using the MMR space located at the top of physical 307 * Access Global MMR space using the MMR space located at the top of physical
308 * memory. 308 * memory.
309 */ 309 */
310static inline unsigned long *uv_global_mmr64_address(int pnode, unsigned long offset) 310static inline volatile void __iomem *uv_global_mmr64_address(int pnode, unsigned long offset)
311{ 311{
312 return __va(UV_GLOBAL_MMR64_BASE | 312 return __va(UV_GLOBAL_MMR64_BASE |
313 UV_GLOBAL_MMR64_PNODE_BITS(pnode) | offset); 313 UV_GLOBAL_MMR64_PNODE_BITS(pnode) | offset);
diff --git a/arch/x86/include/asm/uv/uv_mmrs.h b/arch/x86/include/asm/uv/uv_mmrs.h
index 2cae46c7c8a2..b2f2d2e05cec 100644
--- a/arch/x86/include/asm/uv/uv_mmrs.h
+++ b/arch/x86/include/asm/uv/uv_mmrs.h
@@ -1,4 +1,3 @@
1
2/* 1/*
3 * This file is subject to the terms and conditions of the GNU General Public 2 * This file is subject to the terms and conditions of the GNU General Public
4 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
@@ -15,13 +14,25 @@
15#define UV_MMR_ENABLE (1UL << 63) 14#define UV_MMR_ENABLE (1UL << 63)
16 15
17/* ========================================================================= */ 16/* ========================================================================= */
17/* UVH_BAU_DATA_BROADCAST */
18/* ========================================================================= */
19#define UVH_BAU_DATA_BROADCAST 0x61688UL
20#define UVH_BAU_DATA_BROADCAST_32 0x0440
21
22#define UVH_BAU_DATA_BROADCAST_ENABLE_SHFT 0
23#define UVH_BAU_DATA_BROADCAST_ENABLE_MASK 0x0000000000000001UL
24
25union uvh_bau_data_broadcast_u {
26 unsigned long v;
27 struct uvh_bau_data_broadcast_s {
28 unsigned long enable : 1; /* RW */
29 unsigned long rsvd_1_63: 63; /* */
30 } s;
31};
32
33/* ========================================================================= */
18/* UVH_BAU_DATA_CONFIG */ 34/* UVH_BAU_DATA_CONFIG */
19/* ========================================================================= */ 35/* ========================================================================= */
20#define UVH_LB_BAU_MISC_CONTROL 0x320170UL
21#define UV_ENABLE_INTD_SOFT_ACK_MODE_SHIFT 15
22#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHIFT 16
23#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD 0x000000000bUL
24/* 1011 timebase 7 (168millisec) * 3 ticks -> 500ms */
25#define UVH_BAU_DATA_CONFIG 0x61680UL 36#define UVH_BAU_DATA_CONFIG 0x61680UL
26#define UVH_BAU_DATA_CONFIG_32 0x0438 37#define UVH_BAU_DATA_CONFIG_32 0x0438
27 38
@@ -604,6 +615,68 @@ union uvh_lb_bau_intd_software_acknowledge_u {
604#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS_32 0x0a70 615#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS_32 0x0a70
605 616
606/* ========================================================================= */ 617/* ========================================================================= */
618/* UVH_LB_BAU_MISC_CONTROL */
619/* ========================================================================= */
620#define UVH_LB_BAU_MISC_CONTROL 0x320170UL
621#define UVH_LB_BAU_MISC_CONTROL_32 0x00a10
622
623#define UVH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0
624#define UVH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_MASK 0x00000000000000ffUL
625#define UVH_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8
626#define UVH_LB_BAU_MISC_CONTROL_APIC_MODE_MASK 0x0000000000000100UL
627#define UVH_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_SHFT 9
628#define UVH_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_MASK 0x0000000000000200UL
629#define UVH_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_SHFT 10
630#define UVH_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_MASK 0x0000000000000400UL
631#define UVH_LB_BAU_MISC_CONTROL_CSI_AGENT_PRESENCE_VECTOR_SHFT 11
632#define UVH_LB_BAU_MISC_CONTROL_CSI_AGENT_PRESENCE_VECTOR_MASK 0x0000000000003800UL
633#define UVH_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_SHFT 14
634#define UVH_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_MASK 0x0000000000004000UL
635#define UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT 15
636#define UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK 0x0000000000008000UL
637#define UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT 16
638#define UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK 0x00000000000f0000UL
639#define UVH_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_SHFT 20
640#define UVH_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_MASK 0x0000000000100000UL
641#define UVH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_SHFT 21
642#define UVH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_MASK 0x0000000000200000UL
643#define UVH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_SHFT 22
644#define UVH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_MASK 0x0000000000400000UL
645#define UVH_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_SHFT 23
646#define UVH_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_MASK 0x0000000000800000UL
647#define UVH_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_SHFT 24
648#define UVH_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000007000000UL
649#define UVH_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_SHFT 27
650#define UVH_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_MASK 0x0000000008000000UL
651#define UVH_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_SHFT 28
652#define UVH_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL
653#define UVH_LB_BAU_MISC_CONTROL_FUN_SHFT 48
654#define UVH_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL
655
656union uvh_lb_bau_misc_control_u {
657 unsigned long v;
658 struct uvh_lb_bau_misc_control_s {
659 unsigned long rejection_delay : 8; /* RW */
660 unsigned long apic_mode : 1; /* RW */
661 unsigned long force_broadcast : 1; /* RW */
662 unsigned long force_lock_nop : 1; /* RW */
663 unsigned long csi_agent_presence_vector : 3; /* RW */
664 unsigned long descriptor_fetch_mode : 1; /* RW */
665 unsigned long enable_intd_soft_ack_mode : 1; /* RW */
666 unsigned long intd_soft_ack_timeout_period : 4; /* RW */
667 unsigned long enable_dual_mapping_mode : 1; /* RW */
668 unsigned long vga_io_port_decode_enable : 1; /* RW */
669 unsigned long vga_io_port_16_bit_decode : 1; /* RW */
670 unsigned long suppress_dest_registration : 1; /* RW */
671 unsigned long programmed_initial_priority : 3; /* RW */
672 unsigned long use_incoming_priority : 1; /* RW */
673 unsigned long enable_programmed_initial_priority : 1; /* RW */
674 unsigned long rsvd_29_47 : 19; /* */
675 unsigned long fun : 16; /* RW */
676 } s;
677};
678
679/* ========================================================================= */
607/* UVH_LB_BAU_SB_ACTIVATION_CONTROL */ 680/* UVH_LB_BAU_SB_ACTIVATION_CONTROL */
608/* ========================================================================= */ 681/* ========================================================================= */
609#define UVH_LB_BAU_SB_ACTIVATION_CONTROL 0x320020UL 682#define UVH_LB_BAU_SB_ACTIVATION_CONTROL 0x320020UL
@@ -681,334 +754,6 @@ union uvh_lb_bau_sb_descriptor_base_u {
681}; 754};
682 755
683/* ========================================================================= */ 756/* ========================================================================= */
684/* UVH_LB_MCAST_AOERR0_RPT_ENABLE */
685/* ========================================================================= */
686#define UVH_LB_MCAST_AOERR0_RPT_ENABLE 0x50b20UL
687
688#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_OBESE_MSG_SHFT 0
689#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_OBESE_MSG_MASK 0x0000000000000001UL
690#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_DATA_SB_ERR_SHFT 1
691#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_DATA_SB_ERR_MASK 0x0000000000000002UL
692#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_NACK_BUFF_PARITY_SHFT 2
693#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_NACK_BUFF_PARITY_MASK 0x0000000000000004UL
694#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_TIMEOUT_SHFT 3
695#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_TIMEOUT_MASK 0x0000000000000008UL
696#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_INACTIVE_REPLY_SHFT 4
697#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_INACTIVE_REPLY_MASK 0x0000000000000010UL
698#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_UPGRADE_ERROR_SHFT 5
699#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_UPGRADE_ERROR_MASK 0x0000000000000020UL
700#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_REG_COUNT_UNDERFLOW_SHFT 6
701#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_REG_COUNT_UNDERFLOW_MASK 0x0000000000000040UL
702#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_REP_OBESE_MSG_SHFT 7
703#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_REP_OBESE_MSG_MASK 0x0000000000000080UL
704#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REQ_RUNT_MSG_SHFT 8
705#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REQ_RUNT_MSG_MASK 0x0000000000000100UL
706#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REQ_OBESE_MSG_SHFT 9
707#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REQ_OBESE_MSG_MASK 0x0000000000000200UL
708#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REQ_DATA_SB_ERR_SHFT 10
709#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REQ_DATA_SB_ERR_MASK 0x0000000000000400UL
710#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_RUNT_MSG_SHFT 11
711#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_RUNT_MSG_MASK 0x0000000000000800UL
712#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_OBESE_MSG_SHFT 12
713#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_OBESE_MSG_MASK 0x0000000000001000UL
714#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_DATA_SB_ERR_SHFT 13
715#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_DATA_SB_ERR_MASK 0x0000000000002000UL
716#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_COMMAND_ERR_SHFT 14
717#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_COMMAND_ERR_MASK 0x0000000000004000UL
718#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_PEND_TIMEOUT_SHFT 15
719#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_PEND_TIMEOUT_MASK 0x0000000000008000UL
720#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REQ_RUNT_MSG_SHFT 16
721#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REQ_RUNT_MSG_MASK 0x0000000000010000UL
722#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REQ_OBESE_MSG_SHFT 17
723#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REQ_OBESE_MSG_MASK 0x0000000000020000UL
724#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REQ_DATA_SB_ERR_SHFT 18
725#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REQ_DATA_SB_ERR_MASK 0x0000000000040000UL
726#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REP_RUNT_MSG_SHFT 19
727#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REP_RUNT_MSG_MASK 0x0000000000080000UL
728#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REP_OBESE_MSG_SHFT 20
729#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REP_OBESE_MSG_MASK 0x0000000000100000UL
730#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REP_DATA_SB_ERR_SHFT 21
731#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REP_DATA_SB_ERR_MASK 0x0000000000200000UL
732#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_AMO_TIMEOUT_SHFT 22
733#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_AMO_TIMEOUT_MASK 0x0000000000400000UL
734#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_PUT_TIMEOUT_SHFT 23
735#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_PUT_TIMEOUT_MASK 0x0000000000800000UL
736#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_SPURIOUS_EVENT_SHFT 24
737#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_SPURIOUS_EVENT_MASK 0x0000000001000000UL
738#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_IOH_DESTINATION_TABLE_PARITY_SHFT 25
739#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_IOH_DESTINATION_TABLE_PARITY_MASK 0x0000000002000000UL
740#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_GET_HAD_ERROR_REPLY_SHFT 26
741#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_GET_HAD_ERROR_REPLY_MASK 0x0000000004000000UL
742#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_GET_TIMEOUT_SHFT 27
743#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_GET_TIMEOUT_MASK 0x0000000008000000UL
744#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_LOCK_MANAGER_HAD_ERROR_REPLY_SHFT 28
745#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_LOCK_MANAGER_HAD_ERROR_REPLY_MASK 0x0000000010000000UL
746#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_PUT_HAD_ERROR_REPLY_SHFT 29
747#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_PUT_HAD_ERROR_REPLY_MASK 0x0000000020000000UL
748#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_PUT_TIMEOUT_SHFT 30
749#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_PUT_TIMEOUT_MASK 0x0000000040000000UL
750#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_SB_ACTIVATION_OVERRUN_SHFT 31
751#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_SB_ACTIVATION_OVERRUN_MASK 0x0000000080000000UL
752#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_COMPLETED_GB_ACTIVATION_HAD_ERROR_REPLY_SHFT 32
753#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_COMPLETED_GB_ACTIVATION_HAD_ERROR_REPLY_MASK 0x0000000100000000UL
754#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_COMPLETED_GB_ACTIVATION_TIMEOUT_SHFT 33
755#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_COMPLETED_GB_ACTIVATION_TIMEOUT_MASK 0x0000000200000000UL
756#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_DESCRIPTOR_BUFFER_0_PARITY_SHFT 34
757#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_DESCRIPTOR_BUFFER_0_PARITY_MASK 0x0000000400000000UL
758#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_DESCRIPTOR_BUFFER_1_PARITY_SHFT 35
759#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_DESCRIPTOR_BUFFER_1_PARITY_MASK 0x0000000800000000UL
760#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_SOCKET_DESTINATION_TABLE_PARITY_SHFT 36
761#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_SOCKET_DESTINATION_TABLE_PARITY_MASK 0x0000001000000000UL
762#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_BAU_REPLY_PAYLOAD_CORRUPTION_SHFT 37
763#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_BAU_REPLY_PAYLOAD_CORRUPTION_MASK 0x0000002000000000UL
764#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_IO_PORT_DESTINATION_TABLE_PARITY_SHFT 38
765#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_IO_PORT_DESTINATION_TABLE_PARITY_MASK 0x0000004000000000UL
766#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INTD_SOFT_ACK_TIMEOUT_SHFT 39
767#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INTD_SOFT_ACK_TIMEOUT_MASK 0x0000008000000000UL
768#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INT_REP_OBESE_MSG_SHFT 40
769#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INT_REP_OBESE_MSG_MASK 0x0000010000000000UL
770#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INT_REP_COMMAND_ERR_SHFT 41
771#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INT_REP_COMMAND_ERR_MASK 0x0000020000000000UL
772#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INT_TIMEOUT_SHFT 42
773#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INT_TIMEOUT_MASK 0x0000040000000000UL
774
775union uvh_lb_mcast_aoerr0_rpt_enable_u {
776 unsigned long v;
777 struct uvh_lb_mcast_aoerr0_rpt_enable_s {
778 unsigned long mcast_obese_msg : 1; /* RW */
779 unsigned long mcast_data_sb_err : 1; /* RW */
780 unsigned long mcast_nack_buff_parity : 1; /* RW */
781 unsigned long mcast_timeout : 1; /* RW */
782 unsigned long mcast_inactive_reply : 1; /* RW */
783 unsigned long mcast_upgrade_error : 1; /* RW */
784 unsigned long mcast_reg_count_underflow : 1; /* RW */
785 unsigned long mcast_rep_obese_msg : 1; /* RW */
786 unsigned long ucache_req_runt_msg : 1; /* RW */
787 unsigned long ucache_req_obese_msg : 1; /* RW */
788 unsigned long ucache_req_data_sb_err : 1; /* RW */
789 unsigned long ucache_rep_runt_msg : 1; /* RW */
790 unsigned long ucache_rep_obese_msg : 1; /* RW */
791 unsigned long ucache_rep_data_sb_err : 1; /* RW */
792 unsigned long ucache_rep_command_err : 1; /* RW */
793 unsigned long ucache_pend_timeout : 1; /* RW */
794 unsigned long macc_req_runt_msg : 1; /* RW */
795 unsigned long macc_req_obese_msg : 1; /* RW */
796 unsigned long macc_req_data_sb_err : 1; /* RW */
797 unsigned long macc_rep_runt_msg : 1; /* RW */
798 unsigned long macc_rep_obese_msg : 1; /* RW */
799 unsigned long macc_rep_data_sb_err : 1; /* RW */
800 unsigned long macc_amo_timeout : 1; /* RW */
801 unsigned long macc_put_timeout : 1; /* RW */
802 unsigned long macc_spurious_event : 1; /* RW */
803 unsigned long ioh_destination_table_parity : 1; /* RW */
804 unsigned long get_had_error_reply : 1; /* RW */
805 unsigned long get_timeout : 1; /* RW */
806 unsigned long lock_manager_had_error_reply : 1; /* RW */
807 unsigned long put_had_error_reply : 1; /* RW */
808 unsigned long put_timeout : 1; /* RW */
809 unsigned long sb_activation_overrun : 1; /* RW */
810 unsigned long completed_gb_activation_had_error_reply : 1; /* RW */
811 unsigned long completed_gb_activation_timeout : 1; /* RW */
812 unsigned long descriptor_buffer_0_parity : 1; /* RW */
813 unsigned long descriptor_buffer_1_parity : 1; /* RW */
814 unsigned long socket_destination_table_parity : 1; /* RW */
815 unsigned long bau_reply_payload_corruption : 1; /* RW */
816 unsigned long io_port_destination_table_parity : 1; /* RW */
817 unsigned long intd_soft_ack_timeout : 1; /* RW */
818 unsigned long int_rep_obese_msg : 1; /* RW */
819 unsigned long int_rep_command_err : 1; /* RW */
820 unsigned long int_timeout : 1; /* RW */
821 unsigned long rsvd_43_63 : 21; /* */
822 } s;
823};
824
825/* ========================================================================= */
826/* UVH_LOCAL_INT0_CONFIG */
827/* ========================================================================= */
828#define UVH_LOCAL_INT0_CONFIG 0x61000UL
829
830#define UVH_LOCAL_INT0_CONFIG_VECTOR_SHFT 0
831#define UVH_LOCAL_INT0_CONFIG_VECTOR_MASK 0x00000000000000ffUL
832#define UVH_LOCAL_INT0_CONFIG_DM_SHFT 8
833#define UVH_LOCAL_INT0_CONFIG_DM_MASK 0x0000000000000700UL
834#define UVH_LOCAL_INT0_CONFIG_DESTMODE_SHFT 11
835#define UVH_LOCAL_INT0_CONFIG_DESTMODE_MASK 0x0000000000000800UL
836#define UVH_LOCAL_INT0_CONFIG_STATUS_SHFT 12
837#define UVH_LOCAL_INT0_CONFIG_STATUS_MASK 0x0000000000001000UL
838#define UVH_LOCAL_INT0_CONFIG_P_SHFT 13
839#define UVH_LOCAL_INT0_CONFIG_P_MASK 0x0000000000002000UL
840#define UVH_LOCAL_INT0_CONFIG_T_SHFT 15
841#define UVH_LOCAL_INT0_CONFIG_T_MASK 0x0000000000008000UL
842#define UVH_LOCAL_INT0_CONFIG_M_SHFT 16
843#define UVH_LOCAL_INT0_CONFIG_M_MASK 0x0000000000010000UL
844#define UVH_LOCAL_INT0_CONFIG_APIC_ID_SHFT 32
845#define UVH_LOCAL_INT0_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
846
847union uvh_local_int0_config_u {
848 unsigned long v;
849 struct uvh_local_int0_config_s {
850 unsigned long vector_ : 8; /* RW */
851 unsigned long dm : 3; /* RW */
852 unsigned long destmode : 1; /* RW */
853 unsigned long status : 1; /* RO */
854 unsigned long p : 1; /* RO */
855 unsigned long rsvd_14 : 1; /* */
856 unsigned long t : 1; /* RO */
857 unsigned long m : 1; /* RW */
858 unsigned long rsvd_17_31: 15; /* */
859 unsigned long apic_id : 32; /* RW */
860 } s;
861};
862
863/* ========================================================================= */
864/* UVH_LOCAL_INT0_ENABLE */
865/* ========================================================================= */
866#define UVH_LOCAL_INT0_ENABLE 0x65000UL
867
868#define UVH_LOCAL_INT0_ENABLE_LB_HCERR_SHFT 0
869#define UVH_LOCAL_INT0_ENABLE_LB_HCERR_MASK 0x0000000000000001UL
870#define UVH_LOCAL_INT0_ENABLE_GR0_HCERR_SHFT 1
871#define UVH_LOCAL_INT0_ENABLE_GR0_HCERR_MASK 0x0000000000000002UL
872#define UVH_LOCAL_INT0_ENABLE_GR1_HCERR_SHFT 2
873#define UVH_LOCAL_INT0_ENABLE_GR1_HCERR_MASK 0x0000000000000004UL
874#define UVH_LOCAL_INT0_ENABLE_LH_HCERR_SHFT 3
875#define UVH_LOCAL_INT0_ENABLE_LH_HCERR_MASK 0x0000000000000008UL
876#define UVH_LOCAL_INT0_ENABLE_RH_HCERR_SHFT 4
877#define UVH_LOCAL_INT0_ENABLE_RH_HCERR_MASK 0x0000000000000010UL
878#define UVH_LOCAL_INT0_ENABLE_XN_HCERR_SHFT 5
879#define UVH_LOCAL_INT0_ENABLE_XN_HCERR_MASK 0x0000000000000020UL
880#define UVH_LOCAL_INT0_ENABLE_SI_HCERR_SHFT 6
881#define UVH_LOCAL_INT0_ENABLE_SI_HCERR_MASK 0x0000000000000040UL
882#define UVH_LOCAL_INT0_ENABLE_LB_AOERR0_SHFT 7
883#define UVH_LOCAL_INT0_ENABLE_LB_AOERR0_MASK 0x0000000000000080UL
884#define UVH_LOCAL_INT0_ENABLE_GR0_AOERR0_SHFT 8
885#define UVH_LOCAL_INT0_ENABLE_GR0_AOERR0_MASK 0x0000000000000100UL
886#define UVH_LOCAL_INT0_ENABLE_GR1_AOERR0_SHFT 9
887#define UVH_LOCAL_INT0_ENABLE_GR1_AOERR0_MASK 0x0000000000000200UL
888#define UVH_LOCAL_INT0_ENABLE_LH_AOERR0_SHFT 10
889#define UVH_LOCAL_INT0_ENABLE_LH_AOERR0_MASK 0x0000000000000400UL
890#define UVH_LOCAL_INT0_ENABLE_RH_AOERR0_SHFT 11
891#define UVH_LOCAL_INT0_ENABLE_RH_AOERR0_MASK 0x0000000000000800UL
892#define UVH_LOCAL_INT0_ENABLE_XN_AOERR0_SHFT 12
893#define UVH_LOCAL_INT0_ENABLE_XN_AOERR0_MASK 0x0000000000001000UL
894#define UVH_LOCAL_INT0_ENABLE_SI_AOERR0_SHFT 13
895#define UVH_LOCAL_INT0_ENABLE_SI_AOERR0_MASK 0x0000000000002000UL
896#define UVH_LOCAL_INT0_ENABLE_LB_AOERR1_SHFT 14
897#define UVH_LOCAL_INT0_ENABLE_LB_AOERR1_MASK 0x0000000000004000UL
898#define UVH_LOCAL_INT0_ENABLE_GR0_AOERR1_SHFT 15
899#define UVH_LOCAL_INT0_ENABLE_GR0_AOERR1_MASK 0x0000000000008000UL
900#define UVH_LOCAL_INT0_ENABLE_GR1_AOERR1_SHFT 16
901#define UVH_LOCAL_INT0_ENABLE_GR1_AOERR1_MASK 0x0000000000010000UL
902#define UVH_LOCAL_INT0_ENABLE_LH_AOERR1_SHFT 17
903#define UVH_LOCAL_INT0_ENABLE_LH_AOERR1_MASK 0x0000000000020000UL
904#define UVH_LOCAL_INT0_ENABLE_RH_AOERR1_SHFT 18
905#define UVH_LOCAL_INT0_ENABLE_RH_AOERR1_MASK 0x0000000000040000UL
906#define UVH_LOCAL_INT0_ENABLE_XN_AOERR1_SHFT 19
907#define UVH_LOCAL_INT0_ENABLE_XN_AOERR1_MASK 0x0000000000080000UL
908#define UVH_LOCAL_INT0_ENABLE_SI_AOERR1_SHFT 20
909#define UVH_LOCAL_INT0_ENABLE_SI_AOERR1_MASK 0x0000000000100000UL
910#define UVH_LOCAL_INT0_ENABLE_RH_VPI_INT_SHFT 21
911#define UVH_LOCAL_INT0_ENABLE_RH_VPI_INT_MASK 0x0000000000200000UL
912#define UVH_LOCAL_INT0_ENABLE_SYSTEM_SHUTDOWN_INT_SHFT 22
913#define UVH_LOCAL_INT0_ENABLE_SYSTEM_SHUTDOWN_INT_MASK 0x0000000000400000UL
914#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_0_SHFT 23
915#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_0_MASK 0x0000000000800000UL
916#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_1_SHFT 24
917#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_1_MASK 0x0000000001000000UL
918#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_2_SHFT 25
919#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_2_MASK 0x0000000002000000UL
920#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_3_SHFT 26
921#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_3_MASK 0x0000000004000000UL
922#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_4_SHFT 27
923#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_4_MASK 0x0000000008000000UL
924#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_5_SHFT 28
925#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_5_MASK 0x0000000010000000UL
926#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_6_SHFT 29
927#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_6_MASK 0x0000000020000000UL
928#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_7_SHFT 30
929#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_7_MASK 0x0000000040000000UL
930#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_8_SHFT 31
931#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_8_MASK 0x0000000080000000UL
932#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_9_SHFT 32
933#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_9_MASK 0x0000000100000000UL
934#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_10_SHFT 33
935#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_10_MASK 0x0000000200000000UL
936#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_11_SHFT 34
937#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_11_MASK 0x0000000400000000UL
938#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_12_SHFT 35
939#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_12_MASK 0x0000000800000000UL
940#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_13_SHFT 36
941#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_13_MASK 0x0000001000000000UL
942#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_14_SHFT 37
943#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_14_MASK 0x0000002000000000UL
944#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_15_SHFT 38
945#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_15_MASK 0x0000004000000000UL
946#define UVH_LOCAL_INT0_ENABLE_L1_NMI_INT_SHFT 39
947#define UVH_LOCAL_INT0_ENABLE_L1_NMI_INT_MASK 0x0000008000000000UL
948#define UVH_LOCAL_INT0_ENABLE_STOP_CLOCK_SHFT 40
949#define UVH_LOCAL_INT0_ENABLE_STOP_CLOCK_MASK 0x0000010000000000UL
950#define UVH_LOCAL_INT0_ENABLE_ASIC_TO_L1_SHFT 41
951#define UVH_LOCAL_INT0_ENABLE_ASIC_TO_L1_MASK 0x0000020000000000UL
952#define UVH_LOCAL_INT0_ENABLE_L1_TO_ASIC_SHFT 42
953#define UVH_LOCAL_INT0_ENABLE_L1_TO_ASIC_MASK 0x0000040000000000UL
954#define UVH_LOCAL_INT0_ENABLE_LTC_INT_SHFT 43
955#define UVH_LOCAL_INT0_ENABLE_LTC_INT_MASK 0x0000080000000000UL
956#define UVH_LOCAL_INT0_ENABLE_LA_SEQ_TRIGGER_SHFT 44
957#define UVH_LOCAL_INT0_ENABLE_LA_SEQ_TRIGGER_MASK 0x0000100000000000UL
958
959union uvh_local_int0_enable_u {
960 unsigned long v;
961 struct uvh_local_int0_enable_s {
962 unsigned long lb_hcerr : 1; /* RW */
963 unsigned long gr0_hcerr : 1; /* RW */
964 unsigned long gr1_hcerr : 1; /* RW */
965 unsigned long lh_hcerr : 1; /* RW */
966 unsigned long rh_hcerr : 1; /* RW */
967 unsigned long xn_hcerr : 1; /* RW */
968 unsigned long si_hcerr : 1; /* RW */
969 unsigned long lb_aoerr0 : 1; /* RW */
970 unsigned long gr0_aoerr0 : 1; /* RW */
971 unsigned long gr1_aoerr0 : 1; /* RW */
972 unsigned long lh_aoerr0 : 1; /* RW */
973 unsigned long rh_aoerr0 : 1; /* RW */
974 unsigned long xn_aoerr0 : 1; /* RW */
975 unsigned long si_aoerr0 : 1; /* RW */
976 unsigned long lb_aoerr1 : 1; /* RW */
977 unsigned long gr0_aoerr1 : 1; /* RW */
978 unsigned long gr1_aoerr1 : 1; /* RW */
979 unsigned long lh_aoerr1 : 1; /* RW */
980 unsigned long rh_aoerr1 : 1; /* RW */
981 unsigned long xn_aoerr1 : 1; /* RW */
982 unsigned long si_aoerr1 : 1; /* RW */
983 unsigned long rh_vpi_int : 1; /* RW */
984 unsigned long system_shutdown_int : 1; /* RW */
985 unsigned long lb_irq_int_0 : 1; /* RW */
986 unsigned long lb_irq_int_1 : 1; /* RW */
987 unsigned long lb_irq_int_2 : 1; /* RW */
988 unsigned long lb_irq_int_3 : 1; /* RW */
989 unsigned long lb_irq_int_4 : 1; /* RW */
990 unsigned long lb_irq_int_5 : 1; /* RW */
991 unsigned long lb_irq_int_6 : 1; /* RW */
992 unsigned long lb_irq_int_7 : 1; /* RW */
993 unsigned long lb_irq_int_8 : 1; /* RW */
994 unsigned long lb_irq_int_9 : 1; /* RW */
995 unsigned long lb_irq_int_10 : 1; /* RW */
996 unsigned long lb_irq_int_11 : 1; /* RW */
997 unsigned long lb_irq_int_12 : 1; /* RW */
998 unsigned long lb_irq_int_13 : 1; /* RW */
999 unsigned long lb_irq_int_14 : 1; /* RW */
1000 unsigned long lb_irq_int_15 : 1; /* RW */
1001 unsigned long l1_nmi_int : 1; /* RW */
1002 unsigned long stop_clock : 1; /* RW */
1003 unsigned long asic_to_l1 : 1; /* RW */
1004 unsigned long l1_to_asic : 1; /* RW */
1005 unsigned long ltc_int : 1; /* RW */
1006 unsigned long la_seq_trigger : 1; /* RW */
1007 unsigned long rsvd_45_63 : 19; /* */
1008 } s;
1009};
1010
1011/* ========================================================================= */
1012/* UVH_NODE_ID */ 757/* UVH_NODE_ID */
1013/* ========================================================================= */ 758/* ========================================================================= */
1014#define UVH_NODE_ID 0x0UL 759#define UVH_NODE_ID 0x0UL
@@ -1112,26 +857,6 @@ union uvh_rh_gam_alias210_redirect_config_2_mmr_u {
1112}; 857};
1113 858
1114/* ========================================================================= */ 859/* ========================================================================= */
1115/* UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR */
1116/* ========================================================================= */
1117#define UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR 0x1600020UL
1118
1119#define UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR_BASE_SHFT 26
1120#define UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL
1121#define UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
1122#define UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
1123
1124union uvh_rh_gam_cfg_overlay_config_mmr_u {
1125 unsigned long v;
1126 struct uvh_rh_gam_cfg_overlay_config_mmr_s {
1127 unsigned long rsvd_0_25: 26; /* */
1128 unsigned long base : 20; /* RW */
1129 unsigned long rsvd_46_62: 17; /* */
1130 unsigned long enable : 1; /* RW */
1131 } s;
1132};
1133
1134/* ========================================================================= */
1135/* UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR */ 860/* UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR */
1136/* ========================================================================= */ 861/* ========================================================================= */
1137#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL 862#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL
@@ -1263,101 +988,6 @@ union uvh_rtc1_int_config_u {
1263}; 988};
1264 989
1265/* ========================================================================= */ 990/* ========================================================================= */
1266/* UVH_RTC2_INT_CONFIG */
1267/* ========================================================================= */
1268#define UVH_RTC2_INT_CONFIG 0x61600UL
1269
1270#define UVH_RTC2_INT_CONFIG_VECTOR_SHFT 0
1271#define UVH_RTC2_INT_CONFIG_VECTOR_MASK 0x00000000000000ffUL
1272#define UVH_RTC2_INT_CONFIG_DM_SHFT 8
1273#define UVH_RTC2_INT_CONFIG_DM_MASK 0x0000000000000700UL
1274#define UVH_RTC2_INT_CONFIG_DESTMODE_SHFT 11
1275#define UVH_RTC2_INT_CONFIG_DESTMODE_MASK 0x0000000000000800UL
1276#define UVH_RTC2_INT_CONFIG_STATUS_SHFT 12
1277#define UVH_RTC2_INT_CONFIG_STATUS_MASK 0x0000000000001000UL
1278#define UVH_RTC2_INT_CONFIG_P_SHFT 13
1279#define UVH_RTC2_INT_CONFIG_P_MASK 0x0000000000002000UL
1280#define UVH_RTC2_INT_CONFIG_T_SHFT 15
1281#define UVH_RTC2_INT_CONFIG_T_MASK 0x0000000000008000UL
1282#define UVH_RTC2_INT_CONFIG_M_SHFT 16
1283#define UVH_RTC2_INT_CONFIG_M_MASK 0x0000000000010000UL
1284#define UVH_RTC2_INT_CONFIG_APIC_ID_SHFT 32
1285#define UVH_RTC2_INT_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
1286
1287union uvh_rtc2_int_config_u {
1288 unsigned long v;
1289 struct uvh_rtc2_int_config_s {
1290 unsigned long vector_ : 8; /* RW */
1291 unsigned long dm : 3; /* RW */
1292 unsigned long destmode : 1; /* RW */
1293 unsigned long status : 1; /* RO */
1294 unsigned long p : 1; /* RO */
1295 unsigned long rsvd_14 : 1; /* */
1296 unsigned long t : 1; /* RO */
1297 unsigned long m : 1; /* RW */
1298 unsigned long rsvd_17_31: 15; /* */
1299 unsigned long apic_id : 32; /* RW */
1300 } s;
1301};
1302
1303/* ========================================================================= */
1304/* UVH_RTC3_INT_CONFIG */
1305/* ========================================================================= */
1306#define UVH_RTC3_INT_CONFIG 0x61640UL
1307
1308#define UVH_RTC3_INT_CONFIG_VECTOR_SHFT 0
1309#define UVH_RTC3_INT_CONFIG_VECTOR_MASK 0x00000000000000ffUL
1310#define UVH_RTC3_INT_CONFIG_DM_SHFT 8
1311#define UVH_RTC3_INT_CONFIG_DM_MASK 0x0000000000000700UL
1312#define UVH_RTC3_INT_CONFIG_DESTMODE_SHFT 11
1313#define UVH_RTC3_INT_CONFIG_DESTMODE_MASK 0x0000000000000800UL
1314#define UVH_RTC3_INT_CONFIG_STATUS_SHFT 12
1315#define UVH_RTC3_INT_CONFIG_STATUS_MASK 0x0000000000001000UL
1316#define UVH_RTC3_INT_CONFIG_P_SHFT 13
1317#define UVH_RTC3_INT_CONFIG_P_MASK 0x0000000000002000UL
1318#define UVH_RTC3_INT_CONFIG_T_SHFT 15
1319#define UVH_RTC3_INT_CONFIG_T_MASK 0x0000000000008000UL
1320#define UVH_RTC3_INT_CONFIG_M_SHFT 16
1321#define UVH_RTC3_INT_CONFIG_M_MASK 0x0000000000010000UL
1322#define UVH_RTC3_INT_CONFIG_APIC_ID_SHFT 32
1323#define UVH_RTC3_INT_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
1324
1325union uvh_rtc3_int_config_u {
1326 unsigned long v;
1327 struct uvh_rtc3_int_config_s {
1328 unsigned long vector_ : 8; /* RW */
1329 unsigned long dm : 3; /* RW */
1330 unsigned long destmode : 1; /* RW */
1331 unsigned long status : 1; /* RO */
1332 unsigned long p : 1; /* RO */
1333 unsigned long rsvd_14 : 1; /* */
1334 unsigned long t : 1; /* RO */
1335 unsigned long m : 1; /* RW */
1336 unsigned long rsvd_17_31: 15; /* */
1337 unsigned long apic_id : 32; /* RW */
1338 } s;
1339};
1340
1341/* ========================================================================= */
1342/* UVH_RTC_INC_RATIO */
1343/* ========================================================================= */
1344#define UVH_RTC_INC_RATIO 0x350000UL
1345
1346#define UVH_RTC_INC_RATIO_FRACTION_SHFT 0
1347#define UVH_RTC_INC_RATIO_FRACTION_MASK 0x00000000000fffffUL
1348#define UVH_RTC_INC_RATIO_RATIO_SHFT 20
1349#define UVH_RTC_INC_RATIO_RATIO_MASK 0x0000000000700000UL
1350
1351union uvh_rtc_inc_ratio_u {
1352 unsigned long v;
1353 struct uvh_rtc_inc_ratio_s {
1354 unsigned long fraction : 20; /* RW */
1355 unsigned long ratio : 3; /* RW */
1356 unsigned long rsvd_23_63: 41; /* */
1357 } s;
1358};
1359
1360/* ========================================================================= */
1361/* UVH_SI_ADDR_MAP_CONFIG */ 991/* UVH_SI_ADDR_MAP_CONFIG */
1362/* ========================================================================= */ 992/* ========================================================================= */
1363#define UVH_SI_ADDR_MAP_CONFIG 0xc80000UL 993#define UVH_SI_ADDR_MAP_CONFIG 0xc80000UL
diff --git a/arch/x86/include/asm/vmware.h b/arch/x86/include/asm/vmware.h
deleted file mode 100644
index e49ed6d2fd4e..000000000000
--- a/arch/x86/include/asm/vmware.h
+++ /dev/null
@@ -1,27 +0,0 @@
1/*
2 * Copyright (C) 2008, VMware, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
12 * NON INFRINGEMENT. See the GNU General Public License for more
13 * details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 */
20#ifndef ASM_X86__VMWARE_H
21#define ASM_X86__VMWARE_H
22
23extern void vmware_platform_setup(void);
24extern int vmware_platform(void);
25extern void vmware_set_feature_bits(struct cpuinfo_x86 *c);
26
27#endif
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index fb9a080740ec..9e6779f7cf2d 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -25,6 +25,8 @@
25 * 25 *
26 */ 26 */
27 27
28#include <linux/types.h>
29
28/* 30/*
29 * Definitions of Primary Processor-Based VM-Execution Controls. 31 * Definitions of Primary Processor-Based VM-Execution Controls.
30 */ 32 */
@@ -120,6 +122,8 @@ enum vmcs_field {
120 GUEST_IA32_DEBUGCTL_HIGH = 0x00002803, 122 GUEST_IA32_DEBUGCTL_HIGH = 0x00002803,
121 GUEST_IA32_PAT = 0x00002804, 123 GUEST_IA32_PAT = 0x00002804,
122 GUEST_IA32_PAT_HIGH = 0x00002805, 124 GUEST_IA32_PAT_HIGH = 0x00002805,
125 GUEST_IA32_EFER = 0x00002806,
126 GUEST_IA32_EFER_HIGH = 0x00002807,
123 GUEST_PDPTR0 = 0x0000280a, 127 GUEST_PDPTR0 = 0x0000280a,
124 GUEST_PDPTR0_HIGH = 0x0000280b, 128 GUEST_PDPTR0_HIGH = 0x0000280b,
125 GUEST_PDPTR1 = 0x0000280c, 129 GUEST_PDPTR1 = 0x0000280c,
@@ -130,6 +134,8 @@ enum vmcs_field {
130 GUEST_PDPTR3_HIGH = 0x00002811, 134 GUEST_PDPTR3_HIGH = 0x00002811,
131 HOST_IA32_PAT = 0x00002c00, 135 HOST_IA32_PAT = 0x00002c00,
132 HOST_IA32_PAT_HIGH = 0x00002c01, 136 HOST_IA32_PAT_HIGH = 0x00002c01,
137 HOST_IA32_EFER = 0x00002c02,
138 HOST_IA32_EFER_HIGH = 0x00002c03,
133 PIN_BASED_VM_EXEC_CONTROL = 0x00004000, 139 PIN_BASED_VM_EXEC_CONTROL = 0x00004000,
134 CPU_BASED_VM_EXEC_CONTROL = 0x00004002, 140 CPU_BASED_VM_EXEC_CONTROL = 0x00004002,
135 EXCEPTION_BITMAP = 0x00004004, 141 EXCEPTION_BITMAP = 0x00004004,
@@ -394,6 +400,10 @@ enum vmcs_field {
394#define ASM_VMX_INVEPT ".byte 0x66, 0x0f, 0x38, 0x80, 0x08" 400#define ASM_VMX_INVEPT ".byte 0x66, 0x0f, 0x38, 0x80, 0x08"
395#define ASM_VMX_INVVPID ".byte 0x66, 0x0f, 0x38, 0x81, 0x08" 401#define ASM_VMX_INVVPID ".byte 0x66, 0x0f, 0x38, 0x81, 0x08"
396 402
397 403struct vmx_msr_entry {
404 u32 index;
405 u32 reserved;
406 u64 value;
407} __aligned(16);
398 408
399#endif 409#endif
diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
index ddc04ccad03b..2c4390cae228 100644
--- a/arch/x86/include/asm/xsave.h
+++ b/arch/x86/include/asm/xsave.h
@@ -37,8 +37,9 @@ extern int check_for_xstate(struct i387_fxsave_struct __user *buf,
37 void __user *fpstate, 37 void __user *fpstate,
38 struct _fpx_sw_bytes *sw); 38 struct _fpx_sw_bytes *sw);
39 39
40static inline int xrstor_checking(struct xsave_struct *fx) 40static inline int fpu_xrstor_checking(struct fpu *fpu)
41{ 41{
42 struct xsave_struct *fx = &fpu->state->xsave;
42 int err; 43 int err;
43 44
44 asm volatile("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n\t" 45 asm volatile("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n\t"
@@ -110,12 +111,12 @@ static inline void xrstor_state(struct xsave_struct *fx, u64 mask)
110 : "memory"); 111 : "memory");
111} 112}
112 113
113static inline void xsave(struct task_struct *tsk) 114static inline void fpu_xsave(struct fpu *fpu)
114{ 115{
115 /* This, however, we can work around by forcing the compiler to select 116 /* This, however, we can work around by forcing the compiler to select
116 an addressing mode that doesn't require extended registers. */ 117 an addressing mode that doesn't require extended registers. */
117 __asm__ __volatile__(".byte " REX_PREFIX "0x0f,0xae,0x27" 118 __asm__ __volatile__(".byte " REX_PREFIX "0x0f,0xae,0x27"
118 : : "D" (&(tsk->thread.xstate->xsave)), 119 : : "D" (&(fpu->state->xsave)),
119 "a" (-1), "d"(-1) : "memory"); 120 "a" (-1), "d"(-1) : "memory");
120} 121}
121#endif 122#endif