aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-x86
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-x86')
-rw-r--r--include/asm-x86/bios_ebda.h17
-rw-r--r--include/asm-x86/boot.h2
-rw-r--r--include/asm-x86/desc.h15
-rw-r--r--include/asm-x86/microcode.h47
-rw-r--r--include/asm-x86/mmzone_64.h3
-rw-r--r--include/asm-x86/page_32.h5
-rw-r--r--include/asm-x86/paravirt.h20
-rw-r--r--include/asm-x86/processor.h35
-rw-r--r--include/asm-x86/ptrace.h6
-rw-r--r--include/asm-x86/smp.h34
-rw-r--r--include/asm-x86/spinlock.h67
-rw-r--r--include/asm-x86/tlbflush.h10
-rw-r--r--include/asm-x86/traps.h12
-rw-r--r--include/asm-x86/xen/hypervisor.h14
14 files changed, 198 insertions, 89 deletions
diff --git a/include/asm-x86/bios_ebda.h b/include/asm-x86/bios_ebda.h
index ec42ed874591..79b4b88505d7 100644
--- a/include/asm-x86/bios_ebda.h
+++ b/include/asm-x86/bios_ebda.h
@@ -16,4 +16,21 @@ static inline unsigned int get_bios_ebda(void)
16 16
17void reserve_ebda_region(void); 17void reserve_ebda_region(void);
18 18
19#ifdef CONFIG_X86_CHECK_BIOS_CORRUPTION
20/*
21 * This is obviously not a great place for this, but we want to be
22 * able to scatter it around anywhere in the kernel.
23 */
24void check_for_bios_corruption(void);
25void start_periodic_check_for_corruption(void);
26#else
27static inline void check_for_bios_corruption(void)
28{
29}
30
31static inline void start_periodic_check_for_corruption(void)
32{
33}
34#endif
35
19#endif /* ASM_X86__BIOS_EBDA_H */ 36#endif /* ASM_X86__BIOS_EBDA_H */
diff --git a/include/asm-x86/boot.h b/include/asm-x86/boot.h
index 825de5dc867c..1d63bd5d5946 100644
--- a/include/asm-x86/boot.h
+++ b/include/asm-x86/boot.h
@@ -2,9 +2,7 @@
2#define ASM_X86__BOOT_H 2#define ASM_X86__BOOT_H
3 3
4/* Don't touch these, unless you really know what you're doing. */ 4/* Don't touch these, unless you really know what you're doing. */
5#define DEF_INITSEG 0x9000
6#define DEF_SYSSEG 0x1000 5#define DEF_SYSSEG 0x1000
7#define DEF_SETUPSEG 0x9020
8#define DEF_SYSSIZE 0x7F00 6#define DEF_SYSSIZE 0x7F00
9 7
10/* Internal svga startup constants */ 8/* Internal svga startup constants */
diff --git a/include/asm-x86/desc.h b/include/asm-x86/desc.h
index b73fea54def2..ebc307817e98 100644
--- a/include/asm-x86/desc.h
+++ b/include/asm-x86/desc.h
@@ -24,6 +24,11 @@ static inline void fill_ldt(struct desc_struct *desc,
24 desc->d = info->seg_32bit; 24 desc->d = info->seg_32bit;
25 desc->g = info->limit_in_pages; 25 desc->g = info->limit_in_pages;
26 desc->base2 = (info->base_addr & 0xff000000) >> 24; 26 desc->base2 = (info->base_addr & 0xff000000) >> 24;
27 /*
28 * Don't allow setting of the lm bit. It is useless anyway
29 * because 64bit system calls require __USER_CS:
30 */
31 desc->l = 0;
27} 32}
28 33
29extern struct desc_ptr idt_descr; 34extern struct desc_ptr idt_descr;
@@ -97,7 +102,15 @@ static inline int desc_empty(const void *ptr)
97 native_write_gdt_entry(dt, entry, desc, type) 102 native_write_gdt_entry(dt, entry, desc, type)
98#define write_idt_entry(dt, entry, g) \ 103#define write_idt_entry(dt, entry, g) \
99 native_write_idt_entry(dt, entry, g) 104 native_write_idt_entry(dt, entry, g)
100#endif 105
106static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
107{
108}
109
110static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
111{
112}
113#endif /* CONFIG_PARAVIRT */
101 114
102static inline void native_write_idt_entry(gate_desc *idt, int entry, 115static inline void native_write_idt_entry(gate_desc *idt, int entry,
103 const gate_desc *gate) 116 const gate_desc *gate)
diff --git a/include/asm-x86/microcode.h b/include/asm-x86/microcode.h
new file mode 100644
index 000000000000..62c793bb70ca
--- /dev/null
+++ b/include/asm-x86/microcode.h
@@ -0,0 +1,47 @@
1#ifndef ASM_X86__MICROCODE_H
2#define ASM_X86__MICROCODE_H
3
4struct cpu_signature {
5 unsigned int sig;
6 unsigned int pf;
7 unsigned int rev;
8};
9
10struct device;
11
12struct microcode_ops {
13 int (*request_microcode_user) (int cpu, const void __user *buf, size_t size);
14 int (*request_microcode_fw) (int cpu, struct device *device);
15
16 void (*apply_microcode) (int cpu);
17
18 int (*collect_cpu_info) (int cpu, struct cpu_signature *csig);
19 void (*microcode_fini_cpu) (int cpu);
20};
21
22struct ucode_cpu_info {
23 struct cpu_signature cpu_sig;
24 int valid;
25 void *mc;
26};
27extern struct ucode_cpu_info ucode_cpu_info[];
28
29#ifdef CONFIG_MICROCODE_INTEL
30extern struct microcode_ops * __init init_intel_microcode(void);
31#else
32static inline struct microcode_ops * __init init_intel_microcode(void)
33{
34 return NULL;
35}
36#endif /* CONFIG_MICROCODE_INTEL */
37
38#ifdef CONFIG_MICROCODE_AMD
39extern struct microcode_ops * __init init_amd_microcode(void);
40#else
41static inline struct microcode_ops * __init init_amd_microcode(void)
42{
43 return NULL;
44}
45#endif
46
47#endif /* ASM_X86__MICROCODE_H */
diff --git a/include/asm-x86/mmzone_64.h b/include/asm-x86/mmzone_64.h
index 626b03a14875..6480f3333b2a 100644
--- a/include/asm-x86/mmzone_64.h
+++ b/include/asm-x86/mmzone_64.h
@@ -7,7 +7,7 @@
7 7
8#ifdef CONFIG_NUMA 8#ifdef CONFIG_NUMA
9 9
10#define VIRTUAL_BUG_ON(x) 10#include <linux/mmdebug.h>
11 11
12#include <asm/smp.h> 12#include <asm/smp.h>
13 13
@@ -29,7 +29,6 @@ static inline __attribute__((pure)) int phys_to_nid(unsigned long addr)
29{ 29{
30 unsigned nid; 30 unsigned nid;
31 VIRTUAL_BUG_ON(!memnodemap); 31 VIRTUAL_BUG_ON(!memnodemap);
32 VIRTUAL_BUG_ON((addr >> memnode_shift) >= memnodemapsize);
33 nid = memnodemap[addr >> memnode_shift]; 32 nid = memnodemap[addr >> memnode_shift];
34 VIRTUAL_BUG_ON(nid >= MAX_NUMNODES || !node_data[nid]); 33 VIRTUAL_BUG_ON(nid >= MAX_NUMNODES || !node_data[nid]);
35 return nid; 34 return nid;
diff --git a/include/asm-x86/page_32.h b/include/asm-x86/page_32.h
index 72f7305682c6..9c5a737a9af9 100644
--- a/include/asm-x86/page_32.h
+++ b/include/asm-x86/page_32.h
@@ -73,7 +73,12 @@ typedef struct page *pgtable_t;
73#endif 73#endif
74 74
75#ifndef __ASSEMBLY__ 75#ifndef __ASSEMBLY__
76#define __phys_addr_const(x) ((x) - PAGE_OFFSET)
77#ifdef CONFIG_DEBUG_VIRTUAL
78extern unsigned long __phys_addr(unsigned long);
79#else
76#define __phys_addr(x) ((x) - PAGE_OFFSET) 80#define __phys_addr(x) ((x) - PAGE_OFFSET)
81#endif
77#define __phys_reloc_hide(x) RELOC_HIDE((x), 0) 82#define __phys_reloc_hide(x) RELOC_HIDE((x), 0)
78 83
79#ifdef CONFIG_FLATMEM 84#ifdef CONFIG_FLATMEM
diff --git a/include/asm-x86/paravirt.h b/include/asm-x86/paravirt.h
index d7d358a43996..8d6ae2f760d0 100644
--- a/include/asm-x86/paravirt.h
+++ b/include/asm-x86/paravirt.h
@@ -124,6 +124,9 @@ struct pv_cpu_ops {
124 int entrynum, const void *desc, int size); 124 int entrynum, const void *desc, int size);
125 void (*write_idt_entry)(gate_desc *, 125 void (*write_idt_entry)(gate_desc *,
126 int entrynum, const gate_desc *gate); 126 int entrynum, const gate_desc *gate);
127 void (*alloc_ldt)(struct desc_struct *ldt, unsigned entries);
128 void (*free_ldt)(struct desc_struct *ldt, unsigned entries);
129
127 void (*load_sp0)(struct tss_struct *tss, struct thread_struct *t); 130 void (*load_sp0)(struct tss_struct *tss, struct thread_struct *t);
128 131
129 void (*set_iopl_mask)(unsigned mask); 132 void (*set_iopl_mask)(unsigned mask);
@@ -325,6 +328,7 @@ struct pv_lock_ops {
325 int (*spin_is_locked)(struct raw_spinlock *lock); 328 int (*spin_is_locked)(struct raw_spinlock *lock);
326 int (*spin_is_contended)(struct raw_spinlock *lock); 329 int (*spin_is_contended)(struct raw_spinlock *lock);
327 void (*spin_lock)(struct raw_spinlock *lock); 330 void (*spin_lock)(struct raw_spinlock *lock);
331 void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags);
328 int (*spin_trylock)(struct raw_spinlock *lock); 332 int (*spin_trylock)(struct raw_spinlock *lock);
329 void (*spin_unlock)(struct raw_spinlock *lock); 333 void (*spin_unlock)(struct raw_spinlock *lock);
330}; 334};
@@ -830,6 +834,16 @@ do { \
830 (aux) = __aux; \ 834 (aux) = __aux; \
831} while (0) 835} while (0)
832 836
837static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
838{
839 PVOP_VCALL2(pv_cpu_ops.alloc_ldt, ldt, entries);
840}
841
842static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
843{
844 PVOP_VCALL2(pv_cpu_ops.free_ldt, ldt, entries);
845}
846
833static inline void load_TR_desc(void) 847static inline void load_TR_desc(void)
834{ 848{
835 PVOP_VCALL0(pv_cpu_ops.load_tr_desc); 849 PVOP_VCALL0(pv_cpu_ops.load_tr_desc);
@@ -1394,6 +1408,12 @@ static __always_inline void __raw_spin_lock(struct raw_spinlock *lock)
1394 PVOP_VCALL1(pv_lock_ops.spin_lock, lock); 1408 PVOP_VCALL1(pv_lock_ops.spin_lock, lock);
1395} 1409}
1396 1410
1411static __always_inline void __raw_spin_lock_flags(struct raw_spinlock *lock,
1412 unsigned long flags)
1413{
1414 PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags);
1415}
1416
1397static __always_inline int __raw_spin_trylock(struct raw_spinlock *lock) 1417static __always_inline int __raw_spin_trylock(struct raw_spinlock *lock)
1398{ 1418{
1399 return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock); 1419 return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock);
diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h
index c7d35464a4bb..ee7cbb30773a 100644
--- a/include/asm-x86/processor.h
+++ b/include/asm-x86/processor.h
@@ -586,41 +586,6 @@ static inline void clear_in_cr4(unsigned long mask)
586 write_cr4(cr4); 586 write_cr4(cr4);
587} 587}
588 588
589struct microcode_header {
590 unsigned int hdrver;
591 unsigned int rev;
592 unsigned int date;
593 unsigned int sig;
594 unsigned int cksum;
595 unsigned int ldrver;
596 unsigned int pf;
597 unsigned int datasize;
598 unsigned int totalsize;
599 unsigned int reserved[3];
600};
601
602struct microcode {
603 struct microcode_header hdr;
604 unsigned int bits[0];
605};
606
607typedef struct microcode microcode_t;
608typedef struct microcode_header microcode_header_t;
609
610/* microcode format is extended from prescott processors */
611struct extended_signature {
612 unsigned int sig;
613 unsigned int pf;
614 unsigned int cksum;
615};
616
617struct extended_sigtable {
618 unsigned int count;
619 unsigned int cksum;
620 unsigned int reserved[3];
621 struct extended_signature sigs[0];
622};
623
624typedef struct { 589typedef struct {
625 unsigned long seg; 590 unsigned long seg;
626} mm_segment_t; 591} mm_segment_t;
diff --git a/include/asm-x86/ptrace.h b/include/asm-x86/ptrace.h
index d64a61097165..ac578f11c1c5 100644
--- a/include/asm-x86/ptrace.h
+++ b/include/asm-x86/ptrace.h
@@ -177,11 +177,11 @@ convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs);
177 177
178#ifdef CONFIG_X86_32 178#ifdef CONFIG_X86_32
179extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, 179extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
180 int error_code); 180 int error_code, int si_code);
181#else
182void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
183#endif 181#endif
184 182
183void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
184
185extern long syscall_trace_enter(struct pt_regs *); 185extern long syscall_trace_enter(struct pt_regs *);
186extern void syscall_trace_leave(struct pt_regs *); 186extern void syscall_trace_leave(struct pt_regs *);
187 187
diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h
index 29324c103341..6df2615f9138 100644
--- a/include/asm-x86/smp.h
+++ b/include/asm-x86/smp.h
@@ -50,12 +50,16 @@ extern struct {
50struct smp_ops { 50struct smp_ops {
51 void (*smp_prepare_boot_cpu)(void); 51 void (*smp_prepare_boot_cpu)(void);
52 void (*smp_prepare_cpus)(unsigned max_cpus); 52 void (*smp_prepare_cpus)(unsigned max_cpus);
53 int (*cpu_up)(unsigned cpu);
54 void (*smp_cpus_done)(unsigned max_cpus); 53 void (*smp_cpus_done)(unsigned max_cpus);
55 54
56 void (*smp_send_stop)(void); 55 void (*smp_send_stop)(void);
57 void (*smp_send_reschedule)(int cpu); 56 void (*smp_send_reschedule)(int cpu);
58 57
58 int (*cpu_up)(unsigned cpu);
59 int (*cpu_disable)(void);
60 void (*cpu_die)(unsigned int cpu);
61 void (*play_dead)(void);
62
59 void (*send_call_func_ipi)(cpumask_t mask); 63 void (*send_call_func_ipi)(cpumask_t mask);
60 void (*send_call_func_single_ipi)(int cpu); 64 void (*send_call_func_single_ipi)(int cpu);
61}; 65};
@@ -94,6 +98,21 @@ static inline int __cpu_up(unsigned int cpu)
94 return smp_ops.cpu_up(cpu); 98 return smp_ops.cpu_up(cpu);
95} 99}
96 100
101static inline int __cpu_disable(void)
102{
103 return smp_ops.cpu_disable();
104}
105
106static inline void __cpu_die(unsigned int cpu)
107{
108 smp_ops.cpu_die(cpu);
109}
110
111static inline void play_dead(void)
112{
113 smp_ops.play_dead();
114}
115
97static inline void smp_send_reschedule(int cpu) 116static inline void smp_send_reschedule(int cpu)
98{ 117{
99 smp_ops.smp_send_reschedule(cpu); 118 smp_ops.smp_send_reschedule(cpu);
@@ -109,16 +128,19 @@ static inline void arch_send_call_function_ipi(cpumask_t mask)
109 smp_ops.send_call_func_ipi(mask); 128 smp_ops.send_call_func_ipi(mask);
110} 129}
111 130
131void cpu_disable_common(void);
112void native_smp_prepare_boot_cpu(void); 132void native_smp_prepare_boot_cpu(void);
113void native_smp_prepare_cpus(unsigned int max_cpus); 133void native_smp_prepare_cpus(unsigned int max_cpus);
114void native_smp_cpus_done(unsigned int max_cpus); 134void native_smp_cpus_done(unsigned int max_cpus);
115int native_cpu_up(unsigned int cpunum); 135int native_cpu_up(unsigned int cpunum);
136int native_cpu_disable(void);
137void native_cpu_die(unsigned int cpu);
138void native_play_dead(void);
139void play_dead_common(void);
140
116void native_send_call_func_ipi(cpumask_t mask); 141void native_send_call_func_ipi(cpumask_t mask);
117void native_send_call_func_single_ipi(int cpu); 142void native_send_call_func_single_ipi(int cpu);
118 143
119extern int __cpu_disable(void);
120extern void __cpu_die(unsigned int cpu);
121
122void smp_store_cpu_info(int id); 144void smp_store_cpu_info(int id);
123#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) 145#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu)
124 146
@@ -205,9 +227,5 @@ static inline int hard_smp_processor_id(void)
205 227
206#endif /* CONFIG_X86_LOCAL_APIC */ 228#endif /* CONFIG_X86_LOCAL_APIC */
207 229
208#ifdef CONFIG_HOTPLUG_CPU
209extern void cpu_uninit(void);
210#endif
211
212#endif /* __ASSEMBLY__ */ 230#endif /* __ASSEMBLY__ */
213#endif /* ASM_X86__SMP_H */ 231#endif /* ASM_X86__SMP_H */
diff --git a/include/asm-x86/spinlock.h b/include/asm-x86/spinlock.h
index 93adae338ac6..157ff7fab97a 100644
--- a/include/asm-x86/spinlock.h
+++ b/include/asm-x86/spinlock.h
@@ -21,8 +21,10 @@
21 21
22#ifdef CONFIG_X86_32 22#ifdef CONFIG_X86_32
23# define LOCK_PTR_REG "a" 23# define LOCK_PTR_REG "a"
24# define REG_PTR_MODE "k"
24#else 25#else
25# define LOCK_PTR_REG "D" 26# define LOCK_PTR_REG "D"
27# define REG_PTR_MODE "q"
26#endif 28#endif
27 29
28#if defined(CONFIG_X86_32) && \ 30#if defined(CONFIG_X86_32) && \
@@ -54,19 +56,7 @@
54 * much between them in performance though, especially as locks are out of line. 56 * much between them in performance though, especially as locks are out of line.
55 */ 57 */
56#if (NR_CPUS < 256) 58#if (NR_CPUS < 256)
57static inline int __ticket_spin_is_locked(raw_spinlock_t *lock) 59#define TICKET_SHIFT 8
58{
59 int tmp = ACCESS_ONCE(lock->slock);
60
61 return (((tmp >> 8) & 0xff) != (tmp & 0xff));
62}
63
64static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
65{
66 int tmp = ACCESS_ONCE(lock->slock);
67
68 return (((tmp >> 8) - tmp) & 0xff) > 1;
69}
70 60
71static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) 61static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
72{ 62{
@@ -89,19 +79,17 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
89 79
90static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) 80static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
91{ 81{
92 int tmp; 82 int tmp, new;
93 short new;
94 83
95 asm volatile("movw %2,%w0\n\t" 84 asm volatile("movzwl %2, %0\n\t"
96 "cmpb %h0,%b0\n\t" 85 "cmpb %h0,%b0\n\t"
86 "leal 0x100(%" REG_PTR_MODE "0), %1\n\t"
97 "jne 1f\n\t" 87 "jne 1f\n\t"
98 "movw %w0,%w1\n\t"
99 "incb %h1\n\t"
100 LOCK_PREFIX "cmpxchgw %w1,%2\n\t" 88 LOCK_PREFIX "cmpxchgw %w1,%2\n\t"
101 "1:" 89 "1:"
102 "sete %b1\n\t" 90 "sete %b1\n\t"
103 "movzbl %b1,%0\n\t" 91 "movzbl %b1,%0\n\t"
104 : "=&a" (tmp), "=Q" (new), "+m" (lock->slock) 92 : "=&a" (tmp), "=&q" (new), "+m" (lock->slock)
105 : 93 :
106 : "memory", "cc"); 94 : "memory", "cc");
107 95
@@ -116,19 +104,7 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
116 : "memory", "cc"); 104 : "memory", "cc");
117} 105}
118#else 106#else
119static inline int __ticket_spin_is_locked(raw_spinlock_t *lock) 107#define TICKET_SHIFT 16
120{
121 int tmp = ACCESS_ONCE(lock->slock);
122
123 return (((tmp >> 16) & 0xffff) != (tmp & 0xffff));
124}
125
126static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
127{
128 int tmp = ACCESS_ONCE(lock->slock);
129
130 return (((tmp >> 16) - tmp) & 0xffff) > 1;
131}
132 108
133static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) 109static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
134{ 110{
@@ -146,7 +122,7 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
146 /* don't need lfence here, because loads are in-order */ 122 /* don't need lfence here, because loads are in-order */
147 "jmp 1b\n" 123 "jmp 1b\n"
148 "2:" 124 "2:"
149 : "+Q" (inc), "+m" (lock->slock), "=r" (tmp) 125 : "+r" (inc), "+m" (lock->slock), "=&r" (tmp)
150 : 126 :
151 : "memory", "cc"); 127 : "memory", "cc");
152} 128}
@@ -160,13 +136,13 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
160 "movl %0,%1\n\t" 136 "movl %0,%1\n\t"
161 "roll $16, %0\n\t" 137 "roll $16, %0\n\t"
162 "cmpl %0,%1\n\t" 138 "cmpl %0,%1\n\t"
139 "leal 0x00010000(%" REG_PTR_MODE "0), %1\n\t"
163 "jne 1f\n\t" 140 "jne 1f\n\t"
164 "addl $0x00010000, %1\n\t"
165 LOCK_PREFIX "cmpxchgl %1,%2\n\t" 141 LOCK_PREFIX "cmpxchgl %1,%2\n\t"
166 "1:" 142 "1:"
167 "sete %b1\n\t" 143 "sete %b1\n\t"
168 "movzbl %b1,%0\n\t" 144 "movzbl %b1,%0\n\t"
169 : "=&a" (tmp), "=r" (new), "+m" (lock->slock) 145 : "=&a" (tmp), "=&q" (new), "+m" (lock->slock)
170 : 146 :
171 : "memory", "cc"); 147 : "memory", "cc");
172 148
@@ -182,7 +158,19 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
182} 158}
183#endif 159#endif
184 160
185#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 161static inline int __ticket_spin_is_locked(raw_spinlock_t *lock)
162{
163 int tmp = ACCESS_ONCE(lock->slock);
164
165 return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1 << TICKET_SHIFT) - 1));
166}
167
168static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
169{
170 int tmp = ACCESS_ONCE(lock->slock);
171
172 return (((tmp >> TICKET_SHIFT) - tmp) & ((1 << TICKET_SHIFT) - 1)) > 1;
173}
186 174
187#ifdef CONFIG_PARAVIRT 175#ifdef CONFIG_PARAVIRT
188/* 176/*
@@ -272,6 +260,13 @@ static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock)
272{ 260{
273 __ticket_spin_unlock(lock); 261 __ticket_spin_unlock(lock);
274} 262}
263
264static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
265 unsigned long flags)
266{
267 __raw_spin_lock(lock);
268}
269
275#endif /* CONFIG_PARAVIRT */ 270#endif /* CONFIG_PARAVIRT */
276 271
277static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) 272static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
diff --git a/include/asm-x86/tlbflush.h b/include/asm-x86/tlbflush.h
index ef68b76dc3c5..3cdd08b5bdb7 100644
--- a/include/asm-x86/tlbflush.h
+++ b/include/asm-x86/tlbflush.h
@@ -119,6 +119,10 @@ static inline void native_flush_tlb_others(const cpumask_t *cpumask,
119{ 119{
120} 120}
121 121
122static inline void reset_lazy_tlbstate(void)
123{
124}
125
122#else /* SMP */ 126#else /* SMP */
123 127
124#include <asm/smp.h> 128#include <asm/smp.h>
@@ -151,6 +155,12 @@ struct tlb_state {
151 char __cacheline_padding[L1_CACHE_BYTES-8]; 155 char __cacheline_padding[L1_CACHE_BYTES-8];
152}; 156};
153DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate); 157DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate);
158
159void reset_lazy_tlbstate(void);
160#else
161static inline void reset_lazy_tlbstate(void)
162{
163}
154#endif 164#endif
155 165
156#endif /* SMP */ 166#endif /* SMP */
diff --git a/include/asm-x86/traps.h b/include/asm-x86/traps.h
index 2ccebc6fb0b0..7a692baa51ae 100644
--- a/include/asm-x86/traps.h
+++ b/include/asm-x86/traps.h
@@ -1,6 +1,8 @@
1#ifndef ASM_X86__TRAPS_H 1#ifndef ASM_X86__TRAPS_H
2#define ASM_X86__TRAPS_H 2#define ASM_X86__TRAPS_H
3 3
4#include <asm/debugreg.h>
5
4/* Common in X86_32 and X86_64 */ 6/* Common in X86_32 and X86_64 */
5asmlinkage void divide_error(void); 7asmlinkage void divide_error(void);
6asmlinkage void debug(void); 8asmlinkage void debug(void);
@@ -36,6 +38,16 @@ void do_invalid_op(struct pt_regs *, long);
36void do_general_protection(struct pt_regs *, long); 38void do_general_protection(struct pt_regs *, long);
37void do_nmi(struct pt_regs *, long); 39void do_nmi(struct pt_regs *, long);
38 40
41static inline int get_si_code(unsigned long condition)
42{
43 if (condition & DR_STEP)
44 return TRAP_TRACE;
45 else if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3))
46 return TRAP_HWBKPT;
47 else
48 return TRAP_BRKPT;
49}
50
39extern int panic_on_unrecovered_nmi; 51extern int panic_on_unrecovered_nmi;
40extern int kstack_depth_to_print; 52extern int kstack_depth_to_print;
41 53
diff --git a/include/asm-x86/xen/hypervisor.h b/include/asm-x86/xen/hypervisor.h
index 0ef3a88b869d..445a24759560 100644
--- a/include/asm-x86/xen/hypervisor.h
+++ b/include/asm-x86/xen/hypervisor.h
@@ -54,7 +54,6 @@
54/* arch/i386/kernel/setup.c */ 54/* arch/i386/kernel/setup.c */
55extern struct shared_info *HYPERVISOR_shared_info; 55extern struct shared_info *HYPERVISOR_shared_info;
56extern struct start_info *xen_start_info; 56extern struct start_info *xen_start_info;
57#define is_initial_xendomain() (xen_start_info->flags & SIF_INITDOMAIN)
58 57
59/* arch/i386/mach-xen/evtchn.c */ 58/* arch/i386/mach-xen/evtchn.c */
60/* Force a proper event-channel callback from Xen. */ 59/* Force a proper event-channel callback from Xen. */
@@ -67,6 +66,17 @@ u64 jiffies_to_st(unsigned long jiffies);
67#define MULTI_UVMFLAGS_INDEX 3 66#define MULTI_UVMFLAGS_INDEX 3
68#define MULTI_UVMDOMID_INDEX 4 67#define MULTI_UVMDOMID_INDEX 4
69 68
70#define is_running_on_xen() (xen_start_info ? 1 : 0) 69enum xen_domain_type {
70 XEN_NATIVE,
71 XEN_PV_DOMAIN,
72 XEN_HVM_DOMAIN,
73};
74
75extern enum xen_domain_type xen_domain_type;
76
77#define xen_domain() (xen_domain_type != XEN_NATIVE)
78#define xen_pv_domain() (xen_domain_type == XEN_PV_DOMAIN)
79#define xen_initial_domain() (xen_pv_domain() && xen_start_info->flags & SIF_INITDOMAIN)
80#define xen_hvm_domain() (xen_domain_type == XEN_HVM_DOMAIN)
71 81
72#endif /* ASM_X86__XEN__HYPERVISOR_H */ 82#endif /* ASM_X86__XEN__HYPERVISOR_H */