aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/include')
-rw-r--r--arch/x86/include/asm/atomic.h29
-rw-r--r--arch/x86/include/asm/atomic64_64.h28
-rw-r--r--arch/x86/include/asm/bitops.h24
-rw-r--r--arch/x86/include/asm/calling.h50
-rw-r--r--arch/x86/include/asm/efi.h2
-rw-r--r--arch/x86/include/asm/fpu-internal.h10
-rw-r--r--arch/x86/include/asm/intel-mid.h113
-rw-r--r--arch/x86/include/asm/intel_mid_vrtc.h (renamed from arch/x86/include/asm/mrst-vrtc.h)4
-rw-r--r--arch/x86/include/asm/local.h28
-rw-r--r--arch/x86/include/asm/mce.h1
-rw-r--r--arch/x86/include/asm/misc.h6
-rw-r--r--arch/x86/include/asm/mrst.h81
-rw-r--r--arch/x86/include/asm/percpu.h11
-rw-r--r--arch/x86/include/asm/preempt.h100
-rw-r--r--arch/x86/include/asm/processor.h9
-rw-r--r--arch/x86/include/asm/prom.h5
-rw-r--r--arch/x86/include/asm/rmwcc.h41
-rw-r--r--arch/x86/include/asm/setup.h4
-rw-r--r--arch/x86/include/asm/thread_info.h5
-rw-r--r--arch/x86/include/asm/uaccess.h98
-rw-r--r--arch/x86/include/asm/uaccess_32.h29
-rw-r--r--arch/x86/include/asm/uaccess_64.h52
-rw-r--r--arch/x86/include/asm/uprobes.h12
-rw-r--r--arch/x86/include/asm/uv/uv.h2
-rw-r--r--arch/x86/include/asm/uv/uv_hub.h57
-rw-r--r--arch/x86/include/asm/uv/uv_mmrs.h31
-rw-r--r--arch/x86/include/uapi/asm/bootparam.h2
-rw-r--r--arch/x86/include/uapi/asm/hyperv.h19
28 files changed, 580 insertions, 273 deletions
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index 722aa3b04624..da31c8b8a92d 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -6,6 +6,7 @@
6#include <asm/processor.h> 6#include <asm/processor.h>
7#include <asm/alternative.h> 7#include <asm/alternative.h>
8#include <asm/cmpxchg.h> 8#include <asm/cmpxchg.h>
9#include <asm/rmwcc.h>
9 10
10/* 11/*
11 * Atomic operations that C can't guarantee us. Useful for 12 * Atomic operations that C can't guarantee us. Useful for
@@ -76,12 +77,7 @@ static inline void atomic_sub(int i, atomic_t *v)
76 */ 77 */
77static inline int atomic_sub_and_test(int i, atomic_t *v) 78static inline int atomic_sub_and_test(int i, atomic_t *v)
78{ 79{
79 unsigned char c; 80 GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, i, "%0", "e");
80
81 asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
82 : "+m" (v->counter), "=qm" (c)
83 : "ir" (i) : "memory");
84 return c;
85} 81}
86 82
87/** 83/**
@@ -118,12 +114,7 @@ static inline void atomic_dec(atomic_t *v)
118 */ 114 */
119static inline int atomic_dec_and_test(atomic_t *v) 115static inline int atomic_dec_and_test(atomic_t *v)
120{ 116{
121 unsigned char c; 117 GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", "e");
122
123 asm volatile(LOCK_PREFIX "decl %0; sete %1"
124 : "+m" (v->counter), "=qm" (c)
125 : : "memory");
126 return c != 0;
127} 118}
128 119
129/** 120/**
@@ -136,12 +127,7 @@ static inline int atomic_dec_and_test(atomic_t *v)
136 */ 127 */
137static inline int atomic_inc_and_test(atomic_t *v) 128static inline int atomic_inc_and_test(atomic_t *v)
138{ 129{
139 unsigned char c; 130 GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", "e");
140
141 asm volatile(LOCK_PREFIX "incl %0; sete %1"
142 : "+m" (v->counter), "=qm" (c)
143 : : "memory");
144 return c != 0;
145} 131}
146 132
147/** 133/**
@@ -155,12 +141,7 @@ static inline int atomic_inc_and_test(atomic_t *v)
155 */ 141 */
156static inline int atomic_add_negative(int i, atomic_t *v) 142static inline int atomic_add_negative(int i, atomic_t *v)
157{ 143{
158 unsigned char c; 144 GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, i, "%0", "s");
159
160 asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
161 : "+m" (v->counter), "=qm" (c)
162 : "ir" (i) : "memory");
163 return c;
164} 145}
165 146
166/** 147/**
diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
index 0e1cbfc8ee06..3f065c985aee 100644
--- a/arch/x86/include/asm/atomic64_64.h
+++ b/arch/x86/include/asm/atomic64_64.h
@@ -72,12 +72,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
72 */ 72 */
73static inline int atomic64_sub_and_test(long i, atomic64_t *v) 73static inline int atomic64_sub_and_test(long i, atomic64_t *v)
74{ 74{
75 unsigned char c; 75 GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, i, "%0", "e");
76
77 asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
78 : "=m" (v->counter), "=qm" (c)
79 : "er" (i), "m" (v->counter) : "memory");
80 return c;
81} 76}
82 77
83/** 78/**
@@ -116,12 +111,7 @@ static inline void atomic64_dec(atomic64_t *v)
116 */ 111 */
117static inline int atomic64_dec_and_test(atomic64_t *v) 112static inline int atomic64_dec_and_test(atomic64_t *v)
118{ 113{
119 unsigned char c; 114 GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", "e");
120
121 asm volatile(LOCK_PREFIX "decq %0; sete %1"
122 : "=m" (v->counter), "=qm" (c)
123 : "m" (v->counter) : "memory");
124 return c != 0;
125} 115}
126 116
127/** 117/**
@@ -134,12 +124,7 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
134 */ 124 */
135static inline int atomic64_inc_and_test(atomic64_t *v) 125static inline int atomic64_inc_and_test(atomic64_t *v)
136{ 126{
137 unsigned char c; 127 GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", "e");
138
139 asm volatile(LOCK_PREFIX "incq %0; sete %1"
140 : "=m" (v->counter), "=qm" (c)
141 : "m" (v->counter) : "memory");
142 return c != 0;
143} 128}
144 129
145/** 130/**
@@ -153,12 +138,7 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
153 */ 138 */
154static inline int atomic64_add_negative(long i, atomic64_t *v) 139static inline int atomic64_add_negative(long i, atomic64_t *v)
155{ 140{
156 unsigned char c; 141 GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, i, "%0", "s");
157
158 asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
159 : "=m" (v->counter), "=qm" (c)
160 : "er" (i), "m" (v->counter) : "memory");
161 return c;
162} 142}
163 143
164/** 144/**
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
index 41639ce8fd63..6d76d0935989 100644
--- a/arch/x86/include/asm/bitops.h
+++ b/arch/x86/include/asm/bitops.h
@@ -14,6 +14,7 @@
14 14
15#include <linux/compiler.h> 15#include <linux/compiler.h>
16#include <asm/alternative.h> 16#include <asm/alternative.h>
17#include <asm/rmwcc.h>
17 18
18#if BITS_PER_LONG == 32 19#if BITS_PER_LONG == 32
19# define _BITOPS_LONG_SHIFT 5 20# define _BITOPS_LONG_SHIFT 5
@@ -204,12 +205,7 @@ static inline void change_bit(long nr, volatile unsigned long *addr)
204 */ 205 */
205static inline int test_and_set_bit(long nr, volatile unsigned long *addr) 206static inline int test_and_set_bit(long nr, volatile unsigned long *addr)
206{ 207{
207 int oldbit; 208 GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, nr, "%0", "c");
208
209 asm volatile(LOCK_PREFIX "bts %2,%1\n\t"
210 "sbb %0,%0" : "=r" (oldbit), ADDR : "Ir" (nr) : "memory");
211
212 return oldbit;
213} 209}
214 210
215/** 211/**
@@ -255,13 +251,7 @@ static inline int __test_and_set_bit(long nr, volatile unsigned long *addr)
255 */ 251 */
256static inline int test_and_clear_bit(long nr, volatile unsigned long *addr) 252static inline int test_and_clear_bit(long nr, volatile unsigned long *addr)
257{ 253{
258 int oldbit; 254 GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, nr, "%0", "c");
259
260 asm volatile(LOCK_PREFIX "btr %2,%1\n\t"
261 "sbb %0,%0"
262 : "=r" (oldbit), ADDR : "Ir" (nr) : "memory");
263
264 return oldbit;
265} 255}
266 256
267/** 257/**
@@ -314,13 +304,7 @@ static inline int __test_and_change_bit(long nr, volatile unsigned long *addr)
314 */ 304 */
315static inline int test_and_change_bit(long nr, volatile unsigned long *addr) 305static inline int test_and_change_bit(long nr, volatile unsigned long *addr)
316{ 306{
317 int oldbit; 307 GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, nr, "%0", "c");
318
319 asm volatile(LOCK_PREFIX "btc %2,%1\n\t"
320 "sbb %0,%0"
321 : "=r" (oldbit), ADDR : "Ir" (nr) : "memory");
322
323 return oldbit;
324} 308}
325 309
326static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr) 310static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr)
diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
index 0fa675033912..cb4c73bfeb48 100644
--- a/arch/x86/include/asm/calling.h
+++ b/arch/x86/include/asm/calling.h
@@ -48,6 +48,8 @@ For 32-bit we have the following conventions - kernel is built with
48 48
49#include <asm/dwarf2.h> 49#include <asm/dwarf2.h>
50 50
51#ifdef CONFIG_X86_64
52
51/* 53/*
52 * 64-bit system call stack frame layout defines and helpers, 54 * 64-bit system call stack frame layout defines and helpers,
53 * for assembly code: 55 * for assembly code:
@@ -192,3 +194,51 @@ For 32-bit we have the following conventions - kernel is built with
192 .macro icebp 194 .macro icebp
193 .byte 0xf1 195 .byte 0xf1
194 .endm 196 .endm
197
198#else /* CONFIG_X86_64 */
199
200/*
201 * For 32bit only simplified versions of SAVE_ALL/RESTORE_ALL. These
202 * are different from the entry_32.S versions in not changing the segment
203 * registers. So only suitable for in kernel use, not when transitioning
204 * from or to user space. The resulting stack frame is not a standard
205 * pt_regs frame. The main use case is calling C code from assembler
206 * when all the registers need to be preserved.
207 */
208
209 .macro SAVE_ALL
210 pushl_cfi %eax
211 CFI_REL_OFFSET eax, 0
212 pushl_cfi %ebp
213 CFI_REL_OFFSET ebp, 0
214 pushl_cfi %edi
215 CFI_REL_OFFSET edi, 0
216 pushl_cfi %esi
217 CFI_REL_OFFSET esi, 0
218 pushl_cfi %edx
219 CFI_REL_OFFSET edx, 0
220 pushl_cfi %ecx
221 CFI_REL_OFFSET ecx, 0
222 pushl_cfi %ebx
223 CFI_REL_OFFSET ebx, 0
224 .endm
225
226 .macro RESTORE_ALL
227 popl_cfi %ebx
228 CFI_RESTORE ebx
229 popl_cfi %ecx
230 CFI_RESTORE ecx
231 popl_cfi %edx
232 CFI_RESTORE edx
233 popl_cfi %esi
234 CFI_RESTORE esi
235 popl_cfi %edi
236 CFI_RESTORE edi
237 popl_cfi %ebp
238 CFI_RESTORE ebp
239 popl_cfi %eax
240 CFI_RESTORE eax
241 .endm
242
243#endif /* CONFIG_X86_64 */
244
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index 0062a0125041..65c6e6e3a552 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -109,6 +109,8 @@ static inline bool efi_is_native(void)
109 return IS_ENABLED(CONFIG_X86_64) == efi_enabled(EFI_64BIT); 109 return IS_ENABLED(CONFIG_X86_64) == efi_enabled(EFI_64BIT);
110} 110}
111 111
112extern struct console early_efi_console;
113
112#else 114#else
113/* 115/*
114 * IF EFI is not configured, have the EFI calls return -ENOSYS. 116 * IF EFI is not configured, have the EFI calls return -ENOSYS.
diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
index 4d0bda7b11e3..c49a613c6452 100644
--- a/arch/x86/include/asm/fpu-internal.h
+++ b/arch/x86/include/asm/fpu-internal.h
@@ -365,7 +365,7 @@ static inline void drop_fpu(struct task_struct *tsk)
365 * Forget coprocessor state.. 365 * Forget coprocessor state..
366 */ 366 */
367 preempt_disable(); 367 preempt_disable();
368 tsk->fpu_counter = 0; 368 tsk->thread.fpu_counter = 0;
369 __drop_fpu(tsk); 369 __drop_fpu(tsk);
370 clear_used_math(); 370 clear_used_math();
371 preempt_enable(); 371 preempt_enable();
@@ -424,7 +424,7 @@ static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct ta
424 * or if the past 5 consecutive context-switches used math. 424 * or if the past 5 consecutive context-switches used math.
425 */ 425 */
426 fpu.preload = tsk_used_math(new) && (use_eager_fpu() || 426 fpu.preload = tsk_used_math(new) && (use_eager_fpu() ||
427 new->fpu_counter > 5); 427 new->thread.fpu_counter > 5);
428 if (__thread_has_fpu(old)) { 428 if (__thread_has_fpu(old)) {
429 if (!__save_init_fpu(old)) 429 if (!__save_init_fpu(old))
430 cpu = ~0; 430 cpu = ~0;
@@ -433,16 +433,16 @@ static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct ta
433 433
434 /* Don't change CR0.TS if we just switch! */ 434 /* Don't change CR0.TS if we just switch! */
435 if (fpu.preload) { 435 if (fpu.preload) {
436 new->fpu_counter++; 436 new->thread.fpu_counter++;
437 __thread_set_has_fpu(new); 437 __thread_set_has_fpu(new);
438 prefetch(new->thread.fpu.state); 438 prefetch(new->thread.fpu.state);
439 } else if (!use_eager_fpu()) 439 } else if (!use_eager_fpu())
440 stts(); 440 stts();
441 } else { 441 } else {
442 old->fpu_counter = 0; 442 old->thread.fpu_counter = 0;
443 old->thread.fpu.last_cpu = ~0; 443 old->thread.fpu.last_cpu = ~0;
444 if (fpu.preload) { 444 if (fpu.preload) {
445 new->fpu_counter++; 445 new->thread.fpu_counter++;
446 if (!use_eager_fpu() && fpu_lazy_restore(new, cpu)) 446 if (!use_eager_fpu() && fpu_lazy_restore(new, cpu))
447 fpu.preload = 0; 447 fpu.preload = 0;
448 else 448 else
diff --git a/arch/x86/include/asm/intel-mid.h b/arch/x86/include/asm/intel-mid.h
new file mode 100644
index 000000000000..459769d39263
--- /dev/null
+++ b/arch/x86/include/asm/intel-mid.h
@@ -0,0 +1,113 @@
1/*
2 * intel-mid.h: Intel MID specific setup code
3 *
4 * (C) Copyright 2009 Intel Corporation
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; version 2
9 * of the License.
10 */
11#ifndef _ASM_X86_INTEL_MID_H
12#define _ASM_X86_INTEL_MID_H
13
14#include <linux/sfi.h>
15#include <linux/platform_device.h>
16
17extern int intel_mid_pci_init(void);
18extern int get_gpio_by_name(const char *name);
19extern void intel_scu_device_register(struct platform_device *pdev);
20extern int __init sfi_parse_mrtc(struct sfi_table_header *table);
21extern int __init sfi_parse_mtmr(struct sfi_table_header *table);
22extern int sfi_mrtc_num;
23extern struct sfi_rtc_table_entry sfi_mrtc_array[];
24
25/*
26 * Here defines the array of devices platform data that IAFW would export
27 * through SFI "DEVS" table, we use name and type to match the device and
28 * its platform data.
29 */
30struct devs_id {
31 char name[SFI_NAME_LEN + 1];
32 u8 type;
33 u8 delay;
34 void *(*get_platform_data)(void *info);
35 /* Custom handler for devices */
36 void (*device_handler)(struct sfi_device_table_entry *pentry,
37 struct devs_id *dev);
38};
39
40#define sfi_device(i) \
41 static const struct devs_id *const __intel_mid_sfi_##i##_dev __used \
42 __attribute__((__section__(".x86_intel_mid_dev.init"))) = &i
43
44/*
45 * Medfield is the follow-up of Moorestown, it combines two chip solution into
46 * one. Other than that it also added always-on and constant tsc and lapic
47 * timers. Medfield is the platform name, and the chip name is called Penwell
48 * we treat Medfield/Penwell as a variant of Moorestown. Penwell can be
49 * identified via MSRs.
50 */
51enum intel_mid_cpu_type {
52 /* 1 was Moorestown */
53 INTEL_MID_CPU_CHIP_PENWELL = 2,
54};
55
56extern enum intel_mid_cpu_type __intel_mid_cpu_chip;
57
58#ifdef CONFIG_X86_INTEL_MID
59
60static inline enum intel_mid_cpu_type intel_mid_identify_cpu(void)
61{
62 return __intel_mid_cpu_chip;
63}
64
65static inline bool intel_mid_has_msic(void)
66{
67 return (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_PENWELL);
68}
69
70#else /* !CONFIG_X86_INTEL_MID */
71
72#define intel_mid_identify_cpu() (0)
73#define intel_mid_has_msic() (0)
74
75#endif /* !CONFIG_X86_INTEL_MID */
76
77enum intel_mid_timer_options {
78 INTEL_MID_TIMER_DEFAULT,
79 INTEL_MID_TIMER_APBT_ONLY,
80 INTEL_MID_TIMER_LAPIC_APBT,
81};
82
83extern enum intel_mid_timer_options intel_mid_timer_options;
84
85/*
86 * Penwell uses spread spectrum clock, so the freq number is not exactly
87 * the same as reported by MSR based on SDM.
88 */
89#define PENWELL_FSB_FREQ_83SKU 83200
90#define PENWELL_FSB_FREQ_100SKU 99840
91
92#define SFI_MTMR_MAX_NUM 8
93#define SFI_MRTC_MAX 8
94
95extern struct console early_mrst_console;
96extern void mrst_early_console_init(void);
97
98extern struct console early_hsu_console;
99extern void hsu_early_console_init(const char *);
100
101extern void intel_scu_devices_create(void);
102extern void intel_scu_devices_destroy(void);
103
104/* VRTC timer */
105#define MRST_VRTC_MAP_SZ (1024)
106/*#define MRST_VRTC_PGOFFSET (0xc00) */
107
108extern void intel_mid_rtc_init(void);
109
110/* the offset for the mapping of global gpio pin to irq */
111#define INTEL_MID_IRQ_OFFSET 0x100
112
113#endif /* _ASM_X86_INTEL_MID_H */
diff --git a/arch/x86/include/asm/mrst-vrtc.h b/arch/x86/include/asm/intel_mid_vrtc.h
index 1e69a75412a4..86ff4685c409 100644
--- a/arch/x86/include/asm/mrst-vrtc.h
+++ b/arch/x86/include/asm/intel_mid_vrtc.h
@@ -1,5 +1,5 @@
1#ifndef _MRST_VRTC_H 1#ifndef _INTEL_MID_VRTC_H
2#define _MRST_VRTC_H 2#define _INTEL_MID_VRTC_H
3 3
4extern unsigned char vrtc_cmos_read(unsigned char reg); 4extern unsigned char vrtc_cmos_read(unsigned char reg);
5extern void vrtc_cmos_write(unsigned char val, unsigned char reg); 5extern void vrtc_cmos_write(unsigned char val, unsigned char reg);
diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
index 2d89e3980cbd..5b23e605e707 100644
--- a/arch/x86/include/asm/local.h
+++ b/arch/x86/include/asm/local.h
@@ -52,12 +52,7 @@ static inline void local_sub(long i, local_t *l)
52 */ 52 */
53static inline int local_sub_and_test(long i, local_t *l) 53static inline int local_sub_and_test(long i, local_t *l)
54{ 54{
55 unsigned char c; 55 GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, i, "%0", "e");
56
57 asm volatile(_ASM_SUB "%2,%0; sete %1"
58 : "+m" (l->a.counter), "=qm" (c)
59 : "ir" (i) : "memory");
60 return c;
61} 56}
62 57
63/** 58/**
@@ -70,12 +65,7 @@ static inline int local_sub_and_test(long i, local_t *l)
70 */ 65 */
71static inline int local_dec_and_test(local_t *l) 66static inline int local_dec_and_test(local_t *l)
72{ 67{
73 unsigned char c; 68 GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, "%0", "e");
74
75 asm volatile(_ASM_DEC "%0; sete %1"
76 : "+m" (l->a.counter), "=qm" (c)
77 : : "memory");
78 return c != 0;
79} 69}
80 70
81/** 71/**
@@ -88,12 +78,7 @@ static inline int local_dec_and_test(local_t *l)
88 */ 78 */
89static inline int local_inc_and_test(local_t *l) 79static inline int local_inc_and_test(local_t *l)
90{ 80{
91 unsigned char c; 81 GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, "%0", "e");
92
93 asm volatile(_ASM_INC "%0; sete %1"
94 : "+m" (l->a.counter), "=qm" (c)
95 : : "memory");
96 return c != 0;
97} 82}
98 83
99/** 84/**
@@ -107,12 +92,7 @@ static inline int local_inc_and_test(local_t *l)
107 */ 92 */
108static inline int local_add_negative(long i, local_t *l) 93static inline int local_add_negative(long i, local_t *l)
109{ 94{
110 unsigned char c; 95 GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, i, "%0", "s");
111
112 asm volatile(_ASM_ADD "%2,%0; sets %1"
113 : "+m" (l->a.counter), "=qm" (c)
114 : "ir" (i) : "memory");
115 return c;
116} 96}
117 97
118/** 98/**
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index cbe6b9e404ce..c696a8687567 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -16,6 +16,7 @@
16#define MCG_EXT_CNT_SHIFT 16 16#define MCG_EXT_CNT_SHIFT 16
17#define MCG_EXT_CNT(c) (((c) & MCG_EXT_CNT_MASK) >> MCG_EXT_CNT_SHIFT) 17#define MCG_EXT_CNT(c) (((c) & MCG_EXT_CNT_MASK) >> MCG_EXT_CNT_SHIFT)
18#define MCG_SER_P (1ULL<<24) /* MCA recovery/new status bits */ 18#define MCG_SER_P (1ULL<<24) /* MCA recovery/new status bits */
19#define MCG_ELOG_P (1ULL<<26) /* Extended error log supported */
19 20
20/* MCG_STATUS register defines */ 21/* MCG_STATUS register defines */
21#define MCG_STATUS_RIPV (1ULL<<0) /* restart ip valid */ 22#define MCG_STATUS_RIPV (1ULL<<0) /* restart ip valid */
diff --git a/arch/x86/include/asm/misc.h b/arch/x86/include/asm/misc.h
new file mode 100644
index 000000000000..475f5bbc7f53
--- /dev/null
+++ b/arch/x86/include/asm/misc.h
@@ -0,0 +1,6 @@
1#ifndef _ASM_X86_MISC_H
2#define _ASM_X86_MISC_H
3
4int num_digits(int val);
5
6#endif /* _ASM_X86_MISC_H */
diff --git a/arch/x86/include/asm/mrst.h b/arch/x86/include/asm/mrst.h
deleted file mode 100644
index fc18bf3ce7c8..000000000000
--- a/arch/x86/include/asm/mrst.h
+++ /dev/null
@@ -1,81 +0,0 @@
1/*
2 * mrst.h: Intel Moorestown platform specific setup code
3 *
4 * (C) Copyright 2009 Intel Corporation
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; version 2
9 * of the License.
10 */
11#ifndef _ASM_X86_MRST_H
12#define _ASM_X86_MRST_H
13
14#include <linux/sfi.h>
15
16extern int pci_mrst_init(void);
17extern int __init sfi_parse_mrtc(struct sfi_table_header *table);
18extern int sfi_mrtc_num;
19extern struct sfi_rtc_table_entry sfi_mrtc_array[];
20
21/*
22 * Medfield is the follow-up of Moorestown, it combines two chip solution into
23 * one. Other than that it also added always-on and constant tsc and lapic
24 * timers. Medfield is the platform name, and the chip name is called Penwell
25 * we treat Medfield/Penwell as a variant of Moorestown. Penwell can be
26 * identified via MSRs.
27 */
28enum mrst_cpu_type {
29 /* 1 was Moorestown */
30 MRST_CPU_CHIP_PENWELL = 2,
31};
32
33extern enum mrst_cpu_type __mrst_cpu_chip;
34
35#ifdef CONFIG_X86_INTEL_MID
36
37static inline enum mrst_cpu_type mrst_identify_cpu(void)
38{
39 return __mrst_cpu_chip;
40}
41
42#else /* !CONFIG_X86_INTEL_MID */
43
44#define mrst_identify_cpu() (0)
45
46#endif /* !CONFIG_X86_INTEL_MID */
47
48enum mrst_timer_options {
49 MRST_TIMER_DEFAULT,
50 MRST_TIMER_APBT_ONLY,
51 MRST_TIMER_LAPIC_APBT,
52};
53
54extern enum mrst_timer_options mrst_timer_options;
55
56/*
57 * Penwell uses spread spectrum clock, so the freq number is not exactly
58 * the same as reported by MSR based on SDM.
59 */
60#define PENWELL_FSB_FREQ_83SKU 83200
61#define PENWELL_FSB_FREQ_100SKU 99840
62
63#define SFI_MTMR_MAX_NUM 8
64#define SFI_MRTC_MAX 8
65
66extern struct console early_mrst_console;
67extern void mrst_early_console_init(void);
68
69extern struct console early_hsu_console;
70extern void hsu_early_console_init(const char *);
71
72extern void intel_scu_devices_create(void);
73extern void intel_scu_devices_destroy(void);
74
75/* VRTC timer */
76#define MRST_VRTC_MAP_SZ (1024)
77/*#define MRST_VRTC_PGOFFSET (0xc00) */
78
79extern void mrst_rtc_init(void);
80
81#endif /* _ASM_X86_MRST_H */
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index 0da5200ee79d..94220d14d5cc 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -128,7 +128,8 @@ do { \
128do { \ 128do { \
129 typedef typeof(var) pao_T__; \ 129 typedef typeof(var) pao_T__; \
130 const int pao_ID__ = (__builtin_constant_p(val) && \ 130 const int pao_ID__ = (__builtin_constant_p(val) && \
131 ((val) == 1 || (val) == -1)) ? (val) : 0; \ 131 ((val) == 1 || (val) == -1)) ? \
132 (int)(val) : 0; \
132 if (0) { \ 133 if (0) { \
133 pao_T__ pao_tmp__; \ 134 pao_T__ pao_tmp__; \
134 pao_tmp__ = (val); \ 135 pao_tmp__ = (val); \
@@ -377,9 +378,6 @@ do { \
377#define __this_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val) 378#define __this_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val)
378#define __this_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val) 379#define __this_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val)
379#define __this_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val) 380#define __this_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val)
380#define __this_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val)
381#define __this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
382#define __this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
383#define __this_cpu_xchg_1(pcp, val) percpu_xchg_op(pcp, val) 381#define __this_cpu_xchg_1(pcp, val) percpu_xchg_op(pcp, val)
384#define __this_cpu_xchg_2(pcp, val) percpu_xchg_op(pcp, val) 382#define __this_cpu_xchg_2(pcp, val) percpu_xchg_op(pcp, val)
385#define __this_cpu_xchg_4(pcp, val) percpu_xchg_op(pcp, val) 383#define __this_cpu_xchg_4(pcp, val) percpu_xchg_op(pcp, val)
@@ -399,9 +397,6 @@ do { \
399#define this_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val) 397#define this_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val)
400#define this_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val) 398#define this_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val)
401#define this_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val) 399#define this_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val)
402#define this_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val)
403#define this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
404#define this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
405#define this_cpu_xchg_1(pcp, nval) percpu_xchg_op(pcp, nval) 400#define this_cpu_xchg_1(pcp, nval) percpu_xchg_op(pcp, nval)
406#define this_cpu_xchg_2(pcp, nval) percpu_xchg_op(pcp, nval) 401#define this_cpu_xchg_2(pcp, nval) percpu_xchg_op(pcp, nval)
407#define this_cpu_xchg_4(pcp, nval) percpu_xchg_op(pcp, nval) 402#define this_cpu_xchg_4(pcp, nval) percpu_xchg_op(pcp, nval)
@@ -446,7 +441,6 @@ do { \
446#define __this_cpu_add_8(pcp, val) percpu_add_op((pcp), val) 441#define __this_cpu_add_8(pcp, val) percpu_add_op((pcp), val)
447#define __this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val) 442#define __this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
448#define __this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) 443#define __this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
449#define __this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
450#define __this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val) 444#define __this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val)
451#define __this_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval) 445#define __this_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
452#define __this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) 446#define __this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
@@ -456,7 +450,6 @@ do { \
456#define this_cpu_add_8(pcp, val) percpu_add_op((pcp), val) 450#define this_cpu_add_8(pcp, val) percpu_add_op((pcp), val)
457#define this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val) 451#define this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
458#define this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) 452#define this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
459#define this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
460#define this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val) 453#define this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val)
461#define this_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval) 454#define this_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
462#define this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) 455#define this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
new file mode 100644
index 000000000000..8729723636fd
--- /dev/null
+++ b/arch/x86/include/asm/preempt.h
@@ -0,0 +1,100 @@
1#ifndef __ASM_PREEMPT_H
2#define __ASM_PREEMPT_H
3
4#include <asm/rmwcc.h>
5#include <asm/percpu.h>
6#include <linux/thread_info.h>
7
8DECLARE_PER_CPU(int, __preempt_count);
9
10/*
11 * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users
12 * that think a non-zero value indicates we cannot preempt.
13 */
14static __always_inline int preempt_count(void)
15{
16 return __this_cpu_read_4(__preempt_count) & ~PREEMPT_NEED_RESCHED;
17}
18
19static __always_inline void preempt_count_set(int pc)
20{
21 __this_cpu_write_4(__preempt_count, pc);
22}
23
24/*
25 * must be macros to avoid header recursion hell
26 */
27#define task_preempt_count(p) \
28 (task_thread_info(p)->saved_preempt_count & ~PREEMPT_NEED_RESCHED)
29
30#define init_task_preempt_count(p) do { \
31 task_thread_info(p)->saved_preempt_count = PREEMPT_DISABLED; \
32} while (0)
33
34#define init_idle_preempt_count(p, cpu) do { \
35 task_thread_info(p)->saved_preempt_count = PREEMPT_ENABLED; \
36 per_cpu(__preempt_count, (cpu)) = PREEMPT_ENABLED; \
37} while (0)
38
39/*
40 * We fold the NEED_RESCHED bit into the preempt count such that
41 * preempt_enable() can decrement and test for needing to reschedule with a
42 * single instruction.
43 *
44 * We invert the actual bit, so that when the decrement hits 0 we know we both
45 * need to resched (the bit is cleared) and can resched (no preempt count).
46 */
47
48static __always_inline void set_preempt_need_resched(void)
49{
50 __this_cpu_and_4(__preempt_count, ~PREEMPT_NEED_RESCHED);
51}
52
53static __always_inline void clear_preempt_need_resched(void)
54{
55 __this_cpu_or_4(__preempt_count, PREEMPT_NEED_RESCHED);
56}
57
58static __always_inline bool test_preempt_need_resched(void)
59{
60 return !(__this_cpu_read_4(__preempt_count) & PREEMPT_NEED_RESCHED);
61}
62
63/*
64 * The various preempt_count add/sub methods
65 */
66
67static __always_inline void __preempt_count_add(int val)
68{
69 __this_cpu_add_4(__preempt_count, val);
70}
71
72static __always_inline void __preempt_count_sub(int val)
73{
74 __this_cpu_add_4(__preempt_count, -val);
75}
76
77static __always_inline bool __preempt_count_dec_and_test(void)
78{
79 GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e");
80}
81
82/*
83 * Returns true when we need to resched and can (barring IRQ state).
84 */
85static __always_inline bool should_resched(void)
86{
87 return unlikely(!__this_cpu_read_4(__preempt_count));
88}
89
90#ifdef CONFIG_PREEMPT
91 extern asmlinkage void ___preempt_schedule(void);
92# define __preempt_schedule() asm ("call ___preempt_schedule")
93 extern asmlinkage void preempt_schedule(void);
94# ifdef CONFIG_CONTEXT_TRACKING
95 extern asmlinkage void ___preempt_schedule_context(void);
96# define __preempt_schedule_context() asm ("call ___preempt_schedule_context")
97# endif
98#endif
99
100#endif /* __ASM_PREEMPT_H */
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 987c75ecc334..7b034a4057f9 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -488,6 +488,15 @@ struct thread_struct {
488 unsigned long iopl; 488 unsigned long iopl;
489 /* Max allowed port in the bitmap, in bytes: */ 489 /* Max allowed port in the bitmap, in bytes: */
490 unsigned io_bitmap_max; 490 unsigned io_bitmap_max;
491 /*
492 * fpu_counter contains the number of consecutive context switches
493 * that the FPU is used. If this is over a threshold, the lazy fpu
494 * saving becomes unlazy to save the trap. This is an unsigned char
495 * so that after 256 times the counter wraps and the behavior turns
496 * lazy again; this to deal with bursty apps that only use FPU for
497 * a short time
498 */
499 unsigned char fpu_counter;
491}; 500};
492 501
493/* 502/*
diff --git a/arch/x86/include/asm/prom.h b/arch/x86/include/asm/prom.h
index bade6ac3b14f..fbeb06ed0eaa 100644
--- a/arch/x86/include/asm/prom.h
+++ b/arch/x86/include/asm/prom.h
@@ -39,10 +39,5 @@ static inline void x86_dtb_init(void) { }
39 39
40extern char cmd_line[COMMAND_LINE_SIZE]; 40extern char cmd_line[COMMAND_LINE_SIZE];
41 41
42#define pci_address_to_pio pci_address_to_pio
43unsigned long pci_address_to_pio(phys_addr_t addr);
44
45#define HAVE_ARCH_DEVTREE_FIXUPS
46
47#endif /* __ASSEMBLY__ */ 42#endif /* __ASSEMBLY__ */
48#endif 43#endif
diff --git a/arch/x86/include/asm/rmwcc.h b/arch/x86/include/asm/rmwcc.h
new file mode 100644
index 000000000000..1ff990f1de8e
--- /dev/null
+++ b/arch/x86/include/asm/rmwcc.h
@@ -0,0 +1,41 @@
1#ifndef _ASM_X86_RMWcc
2#define _ASM_X86_RMWcc
3
4#ifdef CC_HAVE_ASM_GOTO
5
6#define __GEN_RMWcc(fullop, var, cc, ...) \
7do { \
8 asm_volatile_goto (fullop "; j" cc " %l[cc_label]" \
9 : : "m" (var), ## __VA_ARGS__ \
10 : "memory" : cc_label); \
11 return 0; \
12cc_label: \
13 return 1; \
14} while (0)
15
16#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
17 __GEN_RMWcc(op " " arg0, var, cc)
18
19#define GEN_BINARY_RMWcc(op, var, val, arg0, cc) \
20 __GEN_RMWcc(op " %1, " arg0, var, cc, "er" (val))
21
22#else /* !CC_HAVE_ASM_GOTO */
23
24#define __GEN_RMWcc(fullop, var, cc, ...) \
25do { \
26 char c; \
27 asm volatile (fullop "; set" cc " %1" \
28 : "+m" (var), "=qm" (c) \
29 : __VA_ARGS__ : "memory"); \
30 return c != 0; \
31} while (0)
32
33#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
34 __GEN_RMWcc(op " " arg0, var, cc)
35
36#define GEN_BINARY_RMWcc(op, var, val, arg0, cc) \
37 __GEN_RMWcc(op " %2, " arg0, var, cc, "er" (val))
38
39#endif /* CC_HAVE_ASM_GOTO */
40
41#endif /* _ASM_X86_RMWcc */
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h
index 347555492dad..59bcf4e22418 100644
--- a/arch/x86/include/asm/setup.h
+++ b/arch/x86/include/asm/setup.h
@@ -51,9 +51,9 @@ extern void i386_reserve_resources(void);
51extern void setup_default_timer_irq(void); 51extern void setup_default_timer_irq(void);
52 52
53#ifdef CONFIG_X86_INTEL_MID 53#ifdef CONFIG_X86_INTEL_MID
54extern void x86_mrst_early_setup(void); 54extern void x86_intel_mid_early_setup(void);
55#else 55#else
56static inline void x86_mrst_early_setup(void) { } 56static inline void x86_intel_mid_early_setup(void) { }
57#endif 57#endif
58 58
59#ifdef CONFIG_X86_INTEL_CE 59#ifdef CONFIG_X86_INTEL_CE
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index 27811190cbd7..c46a46be1ec6 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -28,8 +28,7 @@ struct thread_info {
28 __u32 flags; /* low level flags */ 28 __u32 flags; /* low level flags */
29 __u32 status; /* thread synchronous flags */ 29 __u32 status; /* thread synchronous flags */
30 __u32 cpu; /* current CPU */ 30 __u32 cpu; /* current CPU */
31 int preempt_count; /* 0 => preemptable, 31 int saved_preempt_count;
32 <0 => BUG */
33 mm_segment_t addr_limit; 32 mm_segment_t addr_limit;
34 struct restart_block restart_block; 33 struct restart_block restart_block;
35 void __user *sysenter_return; 34 void __user *sysenter_return;
@@ -49,7 +48,7 @@ struct thread_info {
49 .exec_domain = &default_exec_domain, \ 48 .exec_domain = &default_exec_domain, \
50 .flags = 0, \ 49 .flags = 0, \
51 .cpu = 0, \ 50 .cpu = 0, \
52 .preempt_count = INIT_PREEMPT_COUNT, \ 51 .saved_preempt_count = INIT_PREEMPT_COUNT, \
53 .addr_limit = KERNEL_DS, \ 52 .addr_limit = KERNEL_DS, \
54 .restart_block = { \ 53 .restart_block = { \
55 .fn = do_no_restart_syscall, \ 54 .fn = do_no_restart_syscall, \
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 5838fa911aa0..8ec57c07b125 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -542,5 +542,103 @@ extern struct movsl_mask {
542# include <asm/uaccess_64.h> 542# include <asm/uaccess_64.h>
543#endif 543#endif
544 544
545unsigned long __must_check _copy_from_user(void *to, const void __user *from,
546 unsigned n);
547unsigned long __must_check _copy_to_user(void __user *to, const void *from,
548 unsigned n);
549
550#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
551# define copy_user_diag __compiletime_error
552#else
553# define copy_user_diag __compiletime_warning
554#endif
555
556extern void copy_user_diag("copy_from_user() buffer size is too small")
557copy_from_user_overflow(void);
558extern void copy_user_diag("copy_to_user() buffer size is too small")
559copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
560
561#undef copy_user_diag
562
563#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
564
565extern void
566__compiletime_warning("copy_from_user() buffer size is not provably correct")
567__copy_from_user_overflow(void) __asm__("copy_from_user_overflow");
568#define __copy_from_user_overflow(size, count) __copy_from_user_overflow()
569
570extern void
571__compiletime_warning("copy_to_user() buffer size is not provably correct")
572__copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
573#define __copy_to_user_overflow(size, count) __copy_to_user_overflow()
574
575#else
576
577static inline void
578__copy_from_user_overflow(int size, unsigned long count)
579{
580 WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
581}
582
583#define __copy_to_user_overflow __copy_from_user_overflow
584
585#endif
586
587static inline unsigned long __must_check
588copy_from_user(void *to, const void __user *from, unsigned long n)
589{
590 int sz = __compiletime_object_size(to);
591
592 might_fault();
593
594 /*
595 * While we would like to have the compiler do the checking for us
596 * even in the non-constant size case, any false positives there are
597 * a problem (especially when DEBUG_STRICT_USER_COPY_CHECKS, but even
598 * without - the [hopefully] dangerous looking nature of the warning
599 * would make people go look at the respecitive call sites over and
600 * over again just to find that there's no problem).
601 *
602 * And there are cases where it's just not realistic for the compiler
603 * to prove the count to be in range. For example when multiple call
604 * sites of a helper function - perhaps in different source files -
605 * all doing proper range checking, yet the helper function not doing
606 * so again.
607 *
608 * Therefore limit the compile time checking to the constant size
609 * case, and do only runtime checking for non-constant sizes.
610 */
611
612 if (likely(sz < 0 || sz >= n))
613 n = _copy_from_user(to, from, n);
614 else if(__builtin_constant_p(n))
615 copy_from_user_overflow();
616 else
617 __copy_from_user_overflow(sz, n);
618
619 return n;
620}
621
622static inline unsigned long __must_check
623copy_to_user(void __user *to, const void *from, unsigned long n)
624{
625 int sz = __compiletime_object_size(from);
626
627 might_fault();
628
629 /* See the comment in copy_from_user() above. */
630 if (likely(sz < 0 || sz >= n))
631 n = _copy_to_user(to, from, n);
632 else if(__builtin_constant_p(n))
633 copy_to_user_overflow();
634 else
635 __copy_to_user_overflow(sz, n);
636
637 return n;
638}
639
640#undef __copy_from_user_overflow
641#undef __copy_to_user_overflow
642
545#endif /* _ASM_X86_UACCESS_H */ 643#endif /* _ASM_X86_UACCESS_H */
546 644
diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
index 7f760a9f1f61..3c03a5de64d3 100644
--- a/arch/x86/include/asm/uaccess_32.h
+++ b/arch/x86/include/asm/uaccess_32.h
@@ -184,33 +184,4 @@ __copy_from_user_inatomic_nocache(void *to, const void __user *from,
184 return __copy_from_user_ll_nocache_nozero(to, from, n); 184 return __copy_from_user_ll_nocache_nozero(to, from, n);
185} 185}
186 186
187unsigned long __must_check copy_to_user(void __user *to,
188 const void *from, unsigned long n);
189unsigned long __must_check _copy_from_user(void *to,
190 const void __user *from,
191 unsigned long n);
192
193
194extern void copy_from_user_overflow(void)
195#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
196 __compiletime_error("copy_from_user() buffer size is not provably correct")
197#else
198 __compiletime_warning("copy_from_user() buffer size is not provably correct")
199#endif
200;
201
202static inline unsigned long __must_check copy_from_user(void *to,
203 const void __user *from,
204 unsigned long n)
205{
206 int sz = __compiletime_object_size(to);
207
208 if (likely(sz == -1 || sz >= n))
209 n = _copy_from_user(to, from, n);
210 else
211 copy_from_user_overflow();
212
213 return n;
214}
215
216#endif /* _ASM_X86_UACCESS_32_H */ 187#endif /* _ASM_X86_UACCESS_32_H */
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index 4f7923dd0007..190413d0de57 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -46,42 +46,13 @@ copy_user_generic(void *to, const void *from, unsigned len)
46} 46}
47 47
48__must_check unsigned long 48__must_check unsigned long
49_copy_to_user(void __user *to, const void *from, unsigned len);
50__must_check unsigned long
51_copy_from_user(void *to, const void __user *from, unsigned len);
52__must_check unsigned long
53copy_in_user(void __user *to, const void __user *from, unsigned len); 49copy_in_user(void __user *to, const void __user *from, unsigned len);
54 50
55static inline unsigned long __must_check copy_from_user(void *to,
56 const void __user *from,
57 unsigned long n)
58{
59 int sz = __compiletime_object_size(to);
60
61 might_fault();
62 if (likely(sz == -1 || sz >= n))
63 n = _copy_from_user(to, from, n);
64#ifdef CONFIG_DEBUG_VM
65 else
66 WARN(1, "Buffer overflow detected!\n");
67#endif
68 return n;
69}
70
71static __always_inline __must_check 51static __always_inline __must_check
72int copy_to_user(void __user *dst, const void *src, unsigned size) 52int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
73{
74 might_fault();
75
76 return _copy_to_user(dst, src, size);
77}
78
79static __always_inline __must_check
80int __copy_from_user(void *dst, const void __user *src, unsigned size)
81{ 53{
82 int ret = 0; 54 int ret = 0;
83 55
84 might_fault();
85 if (!__builtin_constant_p(size)) 56 if (!__builtin_constant_p(size))
86 return copy_user_generic(dst, (__force void *)src, size); 57 return copy_user_generic(dst, (__force void *)src, size);
87 switch (size) { 58 switch (size) {
@@ -121,11 +92,17 @@ int __copy_from_user(void *dst, const void __user *src, unsigned size)
121} 92}
122 93
123static __always_inline __must_check 94static __always_inline __must_check
124int __copy_to_user(void __user *dst, const void *src, unsigned size) 95int __copy_from_user(void *dst, const void __user *src, unsigned size)
96{
97 might_fault();
98 return __copy_from_user_nocheck(dst, src, size);
99}
100
101static __always_inline __must_check
102int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
125{ 103{
126 int ret = 0; 104 int ret = 0;
127 105
128 might_fault();
129 if (!__builtin_constant_p(size)) 106 if (!__builtin_constant_p(size))
130 return copy_user_generic((__force void *)dst, src, size); 107 return copy_user_generic((__force void *)dst, src, size);
131 switch (size) { 108 switch (size) {
@@ -165,6 +142,13 @@ int __copy_to_user(void __user *dst, const void *src, unsigned size)
165} 142}
166 143
167static __always_inline __must_check 144static __always_inline __must_check
145int __copy_to_user(void __user *dst, const void *src, unsigned size)
146{
147 might_fault();
148 return __copy_to_user_nocheck(dst, src, size);
149}
150
151static __always_inline __must_check
168int __copy_in_user(void __user *dst, const void __user *src, unsigned size) 152int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
169{ 153{
170 int ret = 0; 154 int ret = 0;
@@ -220,13 +204,13 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
220static __must_check __always_inline int 204static __must_check __always_inline int
221__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size) 205__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
222{ 206{
223 return copy_user_generic(dst, (__force const void *)src, size); 207 return __copy_from_user_nocheck(dst, (__force const void *)src, size);
224} 208}
225 209
226static __must_check __always_inline int 210static __must_check __always_inline int
227__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size) 211__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
228{ 212{
229 return copy_user_generic((__force void *)dst, src, size); 213 return __copy_to_user_nocheck((__force void *)dst, src, size);
230} 214}
231 215
232extern long __copy_user_nocache(void *dst, const void __user *src, 216extern long __copy_user_nocache(void *dst, const void __user *src,
diff --git a/arch/x86/include/asm/uprobes.h b/arch/x86/include/asm/uprobes.h
index 6e5197910fd8..3087ea9c5f2e 100644
--- a/arch/x86/include/asm/uprobes.h
+++ b/arch/x86/include/asm/uprobes.h
@@ -35,7 +35,10 @@ typedef u8 uprobe_opcode_t;
35 35
36struct arch_uprobe { 36struct arch_uprobe {
37 u16 fixups; 37 u16 fixups;
38 u8 insn[MAX_UINSN_BYTES]; 38 union {
39 u8 insn[MAX_UINSN_BYTES];
40 u8 ixol[MAX_UINSN_BYTES];
41 };
39#ifdef CONFIG_X86_64 42#ifdef CONFIG_X86_64
40 unsigned long rip_rela_target_address; 43 unsigned long rip_rela_target_address;
41#endif 44#endif
@@ -49,11 +52,4 @@ struct arch_uprobe_task {
49 unsigned int saved_tf; 52 unsigned int saved_tf;
50}; 53};
51 54
52extern int arch_uprobe_analyze_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long addr);
53extern int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs);
54extern int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs);
55extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk);
56extern int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data);
57extern void arch_uprobe_abort_xol(struct arch_uprobe *aup, struct pt_regs *regs);
58extern unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs);
59#endif /* _ASM_UPROBES_H */ 55#endif /* _ASM_UPROBES_H */
diff --git a/arch/x86/include/asm/uv/uv.h b/arch/x86/include/asm/uv/uv.h
index 062921ef34e9..6b964a0b86d1 100644
--- a/arch/x86/include/asm/uv/uv.h
+++ b/arch/x86/include/asm/uv/uv.h
@@ -12,6 +12,7 @@ extern enum uv_system_type get_uv_system_type(void);
12extern int is_uv_system(void); 12extern int is_uv_system(void);
13extern void uv_cpu_init(void); 13extern void uv_cpu_init(void);
14extern void uv_nmi_init(void); 14extern void uv_nmi_init(void);
15extern void uv_register_nmi_notifier(void);
15extern void uv_system_init(void); 16extern void uv_system_init(void);
16extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, 17extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
17 struct mm_struct *mm, 18 struct mm_struct *mm,
@@ -25,6 +26,7 @@ static inline enum uv_system_type get_uv_system_type(void) { return UV_NONE; }
25static inline int is_uv_system(void) { return 0; } 26static inline int is_uv_system(void) { return 0; }
26static inline void uv_cpu_init(void) { } 27static inline void uv_cpu_init(void) { }
27static inline void uv_system_init(void) { } 28static inline void uv_system_init(void) { }
29static inline void uv_register_nmi_notifier(void) { }
28static inline const struct cpumask * 30static inline const struct cpumask *
29uv_flush_tlb_others(const struct cpumask *cpumask, struct mm_struct *mm, 31uv_flush_tlb_others(const struct cpumask *cpumask, struct mm_struct *mm,
30 unsigned long start, unsigned long end, unsigned int cpu) 32 unsigned long start, unsigned long end, unsigned int cpu)
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h
index 2c32df95bb78..a30836c8ac4d 100644
--- a/arch/x86/include/asm/uv/uv_hub.h
+++ b/arch/x86/include/asm/uv/uv_hub.h
@@ -502,8 +502,8 @@ struct uv_blade_info {
502 unsigned short nr_online_cpus; 502 unsigned short nr_online_cpus;
503 unsigned short pnode; 503 unsigned short pnode;
504 short memory_nid; 504 short memory_nid;
505 spinlock_t nmi_lock; 505 spinlock_t nmi_lock; /* obsolete, see uv_hub_nmi */
506 unsigned long nmi_count; 506 unsigned long nmi_count; /* obsolete, see uv_hub_nmi */
507}; 507};
508extern struct uv_blade_info *uv_blade_info; 508extern struct uv_blade_info *uv_blade_info;
509extern short *uv_node_to_blade; 509extern short *uv_node_to_blade;
@@ -576,6 +576,59 @@ static inline int uv_num_possible_blades(void)
576 return uv_possible_blades; 576 return uv_possible_blades;
577} 577}
578 578
579/* Per Hub NMI support */
580extern void uv_nmi_setup(void);
581
582/* BMC sets a bit this MMR non-zero before sending an NMI */
583#define UVH_NMI_MMR UVH_SCRATCH5
584#define UVH_NMI_MMR_CLEAR UVH_SCRATCH5_ALIAS
585#define UVH_NMI_MMR_SHIFT 63
586#define UVH_NMI_MMR_TYPE "SCRATCH5"
587
588/* Newer SMM NMI handler, not present in all systems */
589#define UVH_NMI_MMRX UVH_EVENT_OCCURRED0
590#define UVH_NMI_MMRX_CLEAR UVH_EVENT_OCCURRED0_ALIAS
591#define UVH_NMI_MMRX_SHIFT (is_uv1_hub() ? \
592 UV1H_EVENT_OCCURRED0_EXTIO_INT0_SHFT :\
593 UVXH_EVENT_OCCURRED0_EXTIO_INT0_SHFT)
594#define UVH_NMI_MMRX_TYPE "EXTIO_INT0"
595
596/* Non-zero indicates newer SMM NMI handler present */
597#define UVH_NMI_MMRX_SUPPORTED UVH_EXTIO_INT0_BROADCAST
598
599/* Indicates to BIOS that we want to use the newer SMM NMI handler */
600#define UVH_NMI_MMRX_REQ UVH_SCRATCH5_ALIAS_2
601#define UVH_NMI_MMRX_REQ_SHIFT 62
602
603struct uv_hub_nmi_s {
604 raw_spinlock_t nmi_lock;
605 atomic_t in_nmi; /* flag this node in UV NMI IRQ */
606 atomic_t cpu_owner; /* last locker of this struct */
607 atomic_t read_mmr_count; /* count of MMR reads */
608 atomic_t nmi_count; /* count of true UV NMIs */
609 unsigned long nmi_value; /* last value read from NMI MMR */
610};
611
612struct uv_cpu_nmi_s {
613 struct uv_hub_nmi_s *hub;
614 atomic_t state;
615 atomic_t pinging;
616 int queries;
617 int pings;
618};
619
620DECLARE_PER_CPU(struct uv_cpu_nmi_s, __uv_cpu_nmi);
621#define uv_cpu_nmi (__get_cpu_var(__uv_cpu_nmi))
622#define uv_hub_nmi (uv_cpu_nmi.hub)
623#define uv_cpu_nmi_per(cpu) (per_cpu(__uv_cpu_nmi, cpu))
624#define uv_hub_nmi_per(cpu) (uv_cpu_nmi_per(cpu).hub)
625
626/* uv_cpu_nmi_states */
627#define UV_NMI_STATE_OUT 0
628#define UV_NMI_STATE_IN 1
629#define UV_NMI_STATE_DUMP 2
630#define UV_NMI_STATE_DUMP_DONE 3
631
579/* Update SCIR state */ 632/* Update SCIR state */
580static inline void uv_set_scir_bits(unsigned char value) 633static inline void uv_set_scir_bits(unsigned char value)
581{ 634{
diff --git a/arch/x86/include/asm/uv/uv_mmrs.h b/arch/x86/include/asm/uv/uv_mmrs.h
index bd5f80e58a23..e42249bcf7e1 100644
--- a/arch/x86/include/asm/uv/uv_mmrs.h
+++ b/arch/x86/include/asm/uv/uv_mmrs.h
@@ -461,6 +461,23 @@ union uvh_event_occurred0_u {
461 461
462 462
463/* ========================================================================= */ 463/* ========================================================================= */
464/* UVH_EXTIO_INT0_BROADCAST */
465/* ========================================================================= */
466#define UVH_EXTIO_INT0_BROADCAST 0x61448UL
467#define UVH_EXTIO_INT0_BROADCAST_32 0x3f0
468
469#define UVH_EXTIO_INT0_BROADCAST_ENABLE_SHFT 0
470#define UVH_EXTIO_INT0_BROADCAST_ENABLE_MASK 0x0000000000000001UL
471
472union uvh_extio_int0_broadcast_u {
473 unsigned long v;
474 struct uvh_extio_int0_broadcast_s {
475 unsigned long enable:1; /* RW */
476 unsigned long rsvd_1_63:63;
477 } s;
478};
479
480/* ========================================================================= */
464/* UVH_GR0_TLB_INT0_CONFIG */ 481/* UVH_GR0_TLB_INT0_CONFIG */
465/* ========================================================================= */ 482/* ========================================================================= */
466#define UVH_GR0_TLB_INT0_CONFIG 0x61b00UL 483#define UVH_GR0_TLB_INT0_CONFIG 0x61b00UL
@@ -2606,6 +2623,20 @@ union uvh_scratch5_u {
2606}; 2623};
2607 2624
2608/* ========================================================================= */ 2625/* ========================================================================= */
2626/* UVH_SCRATCH5_ALIAS */
2627/* ========================================================================= */
2628#define UVH_SCRATCH5_ALIAS 0x2d0208UL
2629#define UVH_SCRATCH5_ALIAS_32 0x780
2630
2631
2632/* ========================================================================= */
2633/* UVH_SCRATCH5_ALIAS_2 */
2634/* ========================================================================= */
2635#define UVH_SCRATCH5_ALIAS_2 0x2d0210UL
2636#define UVH_SCRATCH5_ALIAS_2_32 0x788
2637
2638
2639/* ========================================================================= */
2609/* UVXH_EVENT_OCCURRED2 */ 2640/* UVXH_EVENT_OCCURRED2 */
2610/* ========================================================================= */ 2641/* ========================================================================= */
2611#define UVXH_EVENT_OCCURRED2 0x70100UL 2642#define UVXH_EVENT_OCCURRED2 0x70100UL
diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h
index c15ddaf90710..9c3733c5f8f7 100644
--- a/arch/x86/include/uapi/asm/bootparam.h
+++ b/arch/x86/include/uapi/asm/bootparam.h
@@ -158,7 +158,7 @@ enum {
158 X86_SUBARCH_PC = 0, 158 X86_SUBARCH_PC = 0,
159 X86_SUBARCH_LGUEST, 159 X86_SUBARCH_LGUEST,
160 X86_SUBARCH_XEN, 160 X86_SUBARCH_XEN,
161 X86_SUBARCH_MRST, 161 X86_SUBARCH_INTEL_MID,
162 X86_SUBARCH_CE4100, 162 X86_SUBARCH_CE4100,
163 X86_NR_SUBARCHS, 163 X86_NR_SUBARCHS,
164}; 164};
diff --git a/arch/x86/include/uapi/asm/hyperv.h b/arch/x86/include/uapi/asm/hyperv.h
index b80420bcd09d..b8f1c0176cbc 100644
--- a/arch/x86/include/uapi/asm/hyperv.h
+++ b/arch/x86/include/uapi/asm/hyperv.h
@@ -27,6 +27,19 @@
27#define HV_X64_MSR_VP_RUNTIME_AVAILABLE (1 << 0) 27#define HV_X64_MSR_VP_RUNTIME_AVAILABLE (1 << 0)
28/* Partition Reference Counter (HV_X64_MSR_TIME_REF_COUNT) available*/ 28/* Partition Reference Counter (HV_X64_MSR_TIME_REF_COUNT) available*/
29#define HV_X64_MSR_TIME_REF_COUNT_AVAILABLE (1 << 1) 29#define HV_X64_MSR_TIME_REF_COUNT_AVAILABLE (1 << 1)
30
31/*
32 * There is a single feature flag that signifies the presence of the MSR
33 * that can be used to retrieve both the local APIC Timer frequency as
34 * well as the TSC frequency.
35 */
36
37/* Local APIC timer frequency MSR (HV_X64_MSR_APIC_FREQUENCY) is available */
38#define HV_X64_MSR_APIC_FREQUENCY_AVAILABLE (1 << 11)
39
40/* TSC frequency MSR (HV_X64_MSR_TSC_FREQUENCY) is available */
41#define HV_X64_MSR_TSC_FREQUENCY_AVAILABLE (1 << 11)
42
30/* 43/*
31 * Basic SynIC MSRs (HV_X64_MSR_SCONTROL through HV_X64_MSR_EOM 44 * Basic SynIC MSRs (HV_X64_MSR_SCONTROL through HV_X64_MSR_EOM
32 * and HV_X64_MSR_SINT0 through HV_X64_MSR_SINT15) available 45 * and HV_X64_MSR_SINT0 through HV_X64_MSR_SINT15) available
@@ -136,6 +149,12 @@
136/* MSR used to read the per-partition time reference counter */ 149/* MSR used to read the per-partition time reference counter */
137#define HV_X64_MSR_TIME_REF_COUNT 0x40000020 150#define HV_X64_MSR_TIME_REF_COUNT 0x40000020
138 151
152/* MSR used to retrieve the TSC frequency */
153#define HV_X64_MSR_TSC_FREQUENCY 0x40000022
154
155/* MSR used to retrieve the local APIC timer frequency */
156#define HV_X64_MSR_APIC_FREQUENCY 0x40000023
157
139/* Define the virtual APIC registers */ 158/* Define the virtual APIC registers */
140#define HV_X64_MSR_EOI 0x40000070 159#define HV_X64_MSR_EOI 0x40000070
141#define HV_X64_MSR_ICR 0x40000071 160#define HV_X64_MSR_ICR 0x40000071