diff options
Diffstat (limited to 'arch/x86/include/asm')
57 files changed, 790 insertions, 459 deletions
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index 4ef949c1972e..42f2f8377422 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h | |||
@@ -75,7 +75,7 @@ static inline void default_inquire_remote_apic(int apicid) | |||
75 | #define setup_secondary_clock setup_secondary_APIC_clock | 75 | #define setup_secondary_clock setup_secondary_APIC_clock |
76 | #endif | 76 | #endif |
77 | 77 | ||
78 | #ifdef CONFIG_X86_VSMP | 78 | #ifdef CONFIG_X86_64 |
79 | extern int is_vsmp_box(void); | 79 | extern int is_vsmp_box(void); |
80 | #else | 80 | #else |
81 | static inline int is_vsmp_box(void) | 81 | static inline int is_vsmp_box(void) |
@@ -107,7 +107,20 @@ extern u32 native_safe_apic_wait_icr_idle(void); | |||
107 | extern void native_apic_icr_write(u32 low, u32 id); | 107 | extern void native_apic_icr_write(u32 low, u32 id); |
108 | extern u64 native_apic_icr_read(void); | 108 | extern u64 native_apic_icr_read(void); |
109 | 109 | ||
110 | #define EIM_8BIT_APIC_ID 0 | ||
111 | #define EIM_32BIT_APIC_ID 1 | ||
112 | |||
110 | #ifdef CONFIG_X86_X2APIC | 113 | #ifdef CONFIG_X86_X2APIC |
114 | /* | ||
115 | * Make previous memory operations globally visible before | ||
116 | * sending the IPI through x2apic wrmsr. We need a serializing instruction or | ||
117 | * mfence for this. | ||
118 | */ | ||
119 | static inline void x2apic_wrmsr_fence(void) | ||
120 | { | ||
121 | asm volatile("mfence" : : : "memory"); | ||
122 | } | ||
123 | |||
111 | static inline void native_apic_msr_write(u32 reg, u32 v) | 124 | static inline void native_apic_msr_write(u32 reg, u32 v) |
112 | { | 125 | { |
113 | if (reg == APIC_DFR || reg == APIC_ID || reg == APIC_LDR || | 126 | if (reg == APIC_DFR || reg == APIC_ID || reg == APIC_LDR || |
@@ -184,6 +197,9 @@ static inline int x2apic_enabled(void) | |||
184 | { | 197 | { |
185 | return 0; | 198 | return 0; |
186 | } | 199 | } |
200 | |||
201 | #define x2apic 0 | ||
202 | |||
187 | #endif | 203 | #endif |
188 | 204 | ||
189 | extern int get_physical_broadcast(void); | 205 | extern int get_physical_broadcast(void); |
@@ -379,6 +395,7 @@ static inline u32 safe_apic_wait_icr_idle(void) | |||
379 | 395 | ||
380 | static inline void ack_APIC_irq(void) | 396 | static inline void ack_APIC_irq(void) |
381 | { | 397 | { |
398 | #ifdef CONFIG_X86_LOCAL_APIC | ||
382 | /* | 399 | /* |
383 | * ack_APIC_irq() actually gets compiled as a single instruction | 400 | * ack_APIC_irq() actually gets compiled as a single instruction |
384 | * ... yummie. | 401 | * ... yummie. |
@@ -386,6 +403,7 @@ static inline void ack_APIC_irq(void) | |||
386 | 403 | ||
387 | /* Docs say use 0 for future compatibility */ | 404 | /* Docs say use 0 for future compatibility */ |
388 | apic_write(APIC_EOI, 0); | 405 | apic_write(APIC_EOI, 0); |
406 | #endif | ||
389 | } | 407 | } |
390 | 408 | ||
391 | static inline unsigned default_get_apic_id(unsigned long x) | 409 | static inline unsigned default_get_apic_id(unsigned long x) |
@@ -474,10 +492,19 @@ static inline int default_apic_id_registered(void) | |||
474 | return physid_isset(read_apic_id(), phys_cpu_present_map); | 492 | return physid_isset(read_apic_id(), phys_cpu_present_map); |
475 | } | 493 | } |
476 | 494 | ||
495 | static inline int default_phys_pkg_id(int cpuid_apic, int index_msb) | ||
496 | { | ||
497 | return cpuid_apic >> index_msb; | ||
498 | } | ||
499 | |||
500 | extern int default_apicid_to_node(int logical_apicid); | ||
501 | |||
502 | #endif | ||
503 | |||
477 | static inline unsigned int | 504 | static inline unsigned int |
478 | default_cpu_mask_to_apicid(const struct cpumask *cpumask) | 505 | default_cpu_mask_to_apicid(const struct cpumask *cpumask) |
479 | { | 506 | { |
480 | return cpumask_bits(cpumask)[0]; | 507 | return cpumask_bits(cpumask)[0] & APIC_ALL_CPUS; |
481 | } | 508 | } |
482 | 509 | ||
483 | static inline unsigned int | 510 | static inline unsigned int |
@@ -491,15 +518,6 @@ default_cpu_mask_to_apicid_and(const struct cpumask *cpumask, | |||
491 | return (unsigned int)(mask1 & mask2 & mask3); | 518 | return (unsigned int)(mask1 & mask2 & mask3); |
492 | } | 519 | } |
493 | 520 | ||
494 | static inline int default_phys_pkg_id(int cpuid_apic, int index_msb) | ||
495 | { | ||
496 | return cpuid_apic >> index_msb; | ||
497 | } | ||
498 | |||
499 | extern int default_apicid_to_node(int logical_apicid); | ||
500 | |||
501 | #endif | ||
502 | |||
503 | static inline unsigned long default_check_apicid_used(physid_mask_t bitmap, int apicid) | 521 | static inline unsigned long default_check_apicid_used(physid_mask_t bitmap, int apicid) |
504 | { | 522 | { |
505 | return physid_isset(apicid, bitmap); | 523 | return physid_isset(apicid, bitmap); |
diff --git a/arch/x86/include/asm/apicdef.h b/arch/x86/include/asm/apicdef.h index 63134e31e8b9..bc9514fb3b13 100644 --- a/arch/x86/include/asm/apicdef.h +++ b/arch/x86/include/asm/apicdef.h | |||
@@ -53,6 +53,7 @@ | |||
53 | #define APIC_ESR_SENDILL 0x00020 | 53 | #define APIC_ESR_SENDILL 0x00020 |
54 | #define APIC_ESR_RECVILL 0x00040 | 54 | #define APIC_ESR_RECVILL 0x00040 |
55 | #define APIC_ESR_ILLREGA 0x00080 | 55 | #define APIC_ESR_ILLREGA 0x00080 |
56 | #define APIC_LVTCMCI 0x2f0 | ||
56 | #define APIC_ICR 0x300 | 57 | #define APIC_ICR 0x300 |
57 | #define APIC_DEST_SELF 0x40000 | 58 | #define APIC_DEST_SELF 0x40000 |
58 | #define APIC_DEST_ALLINC 0x80000 | 59 | #define APIC_DEST_ALLINC 0x80000 |
diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h index 6526cf08b0e4..6ba23dd9fc92 100644 --- a/arch/x86/include/asm/boot.h +++ b/arch/x86/include/asm/boot.h | |||
@@ -1,10 +1,6 @@ | |||
1 | #ifndef _ASM_X86_BOOT_H | 1 | #ifndef _ASM_X86_BOOT_H |
2 | #define _ASM_X86_BOOT_H | 2 | #define _ASM_X86_BOOT_H |
3 | 3 | ||
4 | /* Don't touch these, unless you really know what you're doing. */ | ||
5 | #define DEF_SYSSEG 0x1000 | ||
6 | #define DEF_SYSSIZE 0x7F00 | ||
7 | |||
8 | /* Internal svga startup constants */ | 4 | /* Internal svga startup constants */ |
9 | #define NORMAL_VGA 0xffff /* 80x25 mode */ | 5 | #define NORMAL_VGA 0xffff /* 80x25 mode */ |
10 | #define EXTENDED_VGA 0xfffe /* 80x50 mode */ | 6 | #define EXTENDED_VGA 0xfffe /* 80x50 mode */ |
diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h index 5b301b7ff5f4..e55dfc1ad453 100644 --- a/arch/x86/include/asm/cacheflush.h +++ b/arch/x86/include/asm/cacheflush.h | |||
@@ -90,6 +90,9 @@ int set_memory_4k(unsigned long addr, int numpages); | |||
90 | int set_memory_array_uc(unsigned long *addr, int addrinarray); | 90 | int set_memory_array_uc(unsigned long *addr, int addrinarray); |
91 | int set_memory_array_wb(unsigned long *addr, int addrinarray); | 91 | int set_memory_array_wb(unsigned long *addr, int addrinarray); |
92 | 92 | ||
93 | int set_pages_array_uc(struct page **pages, int addrinarray); | ||
94 | int set_pages_array_wb(struct page **pages, int addrinarray); | ||
95 | |||
93 | /* | 96 | /* |
94 | * For legacy compatibility with the old APIs, a few functions | 97 | * For legacy compatibility with the old APIs, a few functions |
95 | * are provided that work on a "struct page". | 98 | * are provided that work on a "struct page". |
@@ -123,6 +126,11 @@ void clflush_cache_range(void *addr, unsigned int size); | |||
123 | #ifdef CONFIG_DEBUG_RODATA | 126 | #ifdef CONFIG_DEBUG_RODATA |
124 | void mark_rodata_ro(void); | 127 | void mark_rodata_ro(void); |
125 | extern const int rodata_test_data; | 128 | extern const int rodata_test_data; |
129 | void set_kernel_text_rw(void); | ||
130 | void set_kernel_text_ro(void); | ||
131 | #else | ||
132 | static inline void set_kernel_text_rw(void) { } | ||
133 | static inline void set_kernel_text_ro(void) { } | ||
126 | #endif | 134 | #endif |
127 | 135 | ||
128 | #ifdef CONFIG_DEBUG_RODATA_TEST | 136 | #ifdef CONFIG_DEBUG_RODATA_TEST |
diff --git a/arch/x86/include/asm/cpu_debug.h b/arch/x86/include/asm/cpu_debug.h new file mode 100755 index 000000000000..222802029fa6 --- /dev/null +++ b/arch/x86/include/asm/cpu_debug.h | |||
@@ -0,0 +1,226 @@ | |||
1 | #ifndef _ASM_X86_CPU_DEBUG_H | ||
2 | #define _ASM_X86_CPU_DEBUG_H | ||
3 | |||
4 | /* | ||
5 | * CPU x86 architecture debug | ||
6 | * | ||
7 | * Copyright(C) 2009 Jaswinder Singh Rajput | ||
8 | */ | ||
9 | |||
10 | /* Register flags */ | ||
11 | enum cpu_debug_bit { | ||
12 | /* Model Specific Registers (MSRs) */ | ||
13 | CPU_MC_BIT, /* Machine Check */ | ||
14 | CPU_MONITOR_BIT, /* Monitor */ | ||
15 | CPU_TIME_BIT, /* Time */ | ||
16 | CPU_PMC_BIT, /* Performance Monitor */ | ||
17 | CPU_PLATFORM_BIT, /* Platform */ | ||
18 | CPU_APIC_BIT, /* APIC */ | ||
19 | CPU_POWERON_BIT, /* Power-on */ | ||
20 | CPU_CONTROL_BIT, /* Control */ | ||
21 | CPU_FEATURES_BIT, /* Features control */ | ||
22 | CPU_LBRANCH_BIT, /* Last Branch */ | ||
23 | CPU_BIOS_BIT, /* BIOS */ | ||
24 | CPU_FREQ_BIT, /* Frequency */ | ||
25 | CPU_MTTR_BIT, /* MTRR */ | ||
26 | CPU_PERF_BIT, /* Performance */ | ||
27 | CPU_CACHE_BIT, /* Cache */ | ||
28 | CPU_SYSENTER_BIT, /* Sysenter */ | ||
29 | CPU_THERM_BIT, /* Thermal */ | ||
30 | CPU_MISC_BIT, /* Miscellaneous */ | ||
31 | CPU_DEBUG_BIT, /* Debug */ | ||
32 | CPU_PAT_BIT, /* PAT */ | ||
33 | CPU_VMX_BIT, /* VMX */ | ||
34 | CPU_CALL_BIT, /* System Call */ | ||
35 | CPU_BASE_BIT, /* BASE Address */ | ||
36 | CPU_VER_BIT, /* Version ID */ | ||
37 | CPU_CONF_BIT, /* Configuration */ | ||
38 | CPU_SMM_BIT, /* System mgmt mode */ | ||
39 | CPU_SVM_BIT, /*Secure Virtual Machine*/ | ||
40 | CPU_OSVM_BIT, /* OS-Visible Workaround*/ | ||
41 | /* Standard Registers */ | ||
42 | CPU_TSS_BIT, /* Task Stack Segment */ | ||
43 | CPU_CR_BIT, /* Control Registers */ | ||
44 | CPU_DT_BIT, /* Descriptor Table */ | ||
45 | /* End of Registers flags */ | ||
46 | CPU_REG_ALL_BIT, /* Select all Registers */ | ||
47 | }; | ||
48 | |||
49 | #define CPU_REG_ALL (~0) /* Select all Registers */ | ||
50 | |||
51 | #define CPU_MC (1 << CPU_MC_BIT) | ||
52 | #define CPU_MONITOR (1 << CPU_MONITOR_BIT) | ||
53 | #define CPU_TIME (1 << CPU_TIME_BIT) | ||
54 | #define CPU_PMC (1 << CPU_PMC_BIT) | ||
55 | #define CPU_PLATFORM (1 << CPU_PLATFORM_BIT) | ||
56 | #define CPU_APIC (1 << CPU_APIC_BIT) | ||
57 | #define CPU_POWERON (1 << CPU_POWERON_BIT) | ||
58 | #define CPU_CONTROL (1 << CPU_CONTROL_BIT) | ||
59 | #define CPU_FEATURES (1 << CPU_FEATURES_BIT) | ||
60 | #define CPU_LBRANCH (1 << CPU_LBRANCH_BIT) | ||
61 | #define CPU_BIOS (1 << CPU_BIOS_BIT) | ||
62 | #define CPU_FREQ (1 << CPU_FREQ_BIT) | ||
63 | #define CPU_MTRR (1 << CPU_MTTR_BIT) | ||
64 | #define CPU_PERF (1 << CPU_PERF_BIT) | ||
65 | #define CPU_CACHE (1 << CPU_CACHE_BIT) | ||
66 | #define CPU_SYSENTER (1 << CPU_SYSENTER_BIT) | ||
67 | #define CPU_THERM (1 << CPU_THERM_BIT) | ||
68 | #define CPU_MISC (1 << CPU_MISC_BIT) | ||
69 | #define CPU_DEBUG (1 << CPU_DEBUG_BIT) | ||
70 | #define CPU_PAT (1 << CPU_PAT_BIT) | ||
71 | #define CPU_VMX (1 << CPU_VMX_BIT) | ||
72 | #define CPU_CALL (1 << CPU_CALL_BIT) | ||
73 | #define CPU_BASE (1 << CPU_BASE_BIT) | ||
74 | #define CPU_VER (1 << CPU_VER_BIT) | ||
75 | #define CPU_CONF (1 << CPU_CONF_BIT) | ||
76 | #define CPU_SMM (1 << CPU_SMM_BIT) | ||
77 | #define CPU_SVM (1 << CPU_SVM_BIT) | ||
78 | #define CPU_OSVM (1 << CPU_OSVM_BIT) | ||
79 | #define CPU_TSS (1 << CPU_TSS_BIT) | ||
80 | #define CPU_CR (1 << CPU_CR_BIT) | ||
81 | #define CPU_DT (1 << CPU_DT_BIT) | ||
82 | |||
83 | /* Register file flags */ | ||
84 | enum cpu_file_bit { | ||
85 | CPU_INDEX_BIT, /* index */ | ||
86 | CPU_VALUE_BIT, /* value */ | ||
87 | }; | ||
88 | |||
89 | #define CPU_FILE_VALUE (1 << CPU_VALUE_BIT) | ||
90 | |||
91 | /* | ||
92 | * DisplayFamily_DisplayModel Processor Families/Processor Number Series | ||
93 | * -------------------------- ------------------------------------------ | ||
94 | * 05_01, 05_02, 05_04 Pentium, Pentium with MMX | ||
95 | * | ||
96 | * 06_01 Pentium Pro | ||
97 | * 06_03, 06_05 Pentium II Xeon, Pentium II | ||
98 | * 06_07, 06_08, 06_0A, 06_0B Pentium III Xeon, Pentum III | ||
99 | * | ||
100 | * 06_09, 060D Pentium M | ||
101 | * | ||
102 | * 06_0E Core Duo, Core Solo | ||
103 | * | ||
104 | * 06_0F Xeon 3000, 3200, 5100, 5300, 7300 series, | ||
105 | * Core 2 Quad, Core 2 Extreme, Core 2 Duo, | ||
106 | * Pentium dual-core | ||
107 | * 06_17 Xeon 5200, 5400 series, Core 2 Quad Q9650 | ||
108 | * | ||
109 | * 06_1C Atom | ||
110 | * | ||
111 | * 0F_00, 0F_01, 0F_02 Xeon, Xeon MP, Pentium 4 | ||
112 | * 0F_03, 0F_04 Xeon, Xeon MP, Pentium 4, Pentium D | ||
113 | * | ||
114 | * 0F_06 Xeon 7100, 5000 Series, Xeon MP, | ||
115 | * Pentium 4, Pentium D | ||
116 | */ | ||
117 | |||
118 | /* Register processors bits */ | ||
119 | enum cpu_processor_bit { | ||
120 | CPU_NONE, | ||
121 | /* Intel */ | ||
122 | CPU_INTEL_PENTIUM_BIT, | ||
123 | CPU_INTEL_P6_BIT, | ||
124 | CPU_INTEL_PENTIUM_M_BIT, | ||
125 | CPU_INTEL_CORE_BIT, | ||
126 | CPU_INTEL_CORE2_BIT, | ||
127 | CPU_INTEL_ATOM_BIT, | ||
128 | CPU_INTEL_XEON_P4_BIT, | ||
129 | CPU_INTEL_XEON_MP_BIT, | ||
130 | /* AMD */ | ||
131 | CPU_AMD_K6_BIT, | ||
132 | CPU_AMD_K7_BIT, | ||
133 | CPU_AMD_K8_BIT, | ||
134 | CPU_AMD_0F_BIT, | ||
135 | CPU_AMD_10_BIT, | ||
136 | CPU_AMD_11_BIT, | ||
137 | }; | ||
138 | |||
139 | #define CPU_INTEL_PENTIUM (1 << CPU_INTEL_PENTIUM_BIT) | ||
140 | #define CPU_INTEL_P6 (1 << CPU_INTEL_P6_BIT) | ||
141 | #define CPU_INTEL_PENTIUM_M (1 << CPU_INTEL_PENTIUM_M_BIT) | ||
142 | #define CPU_INTEL_CORE (1 << CPU_INTEL_CORE_BIT) | ||
143 | #define CPU_INTEL_CORE2 (1 << CPU_INTEL_CORE2_BIT) | ||
144 | #define CPU_INTEL_ATOM (1 << CPU_INTEL_ATOM_BIT) | ||
145 | #define CPU_INTEL_XEON_P4 (1 << CPU_INTEL_XEON_P4_BIT) | ||
146 | #define CPU_INTEL_XEON_MP (1 << CPU_INTEL_XEON_MP_BIT) | ||
147 | |||
148 | #define CPU_INTEL_PX (CPU_INTEL_P6 | CPU_INTEL_PENTIUM_M) | ||
149 | #define CPU_INTEL_COREX (CPU_INTEL_CORE | CPU_INTEL_CORE2) | ||
150 | #define CPU_INTEL_XEON (CPU_INTEL_XEON_P4 | CPU_INTEL_XEON_MP) | ||
151 | #define CPU_CO_AT (CPU_INTEL_CORE | CPU_INTEL_ATOM) | ||
152 | #define CPU_C2_AT (CPU_INTEL_CORE2 | CPU_INTEL_ATOM) | ||
153 | #define CPU_CX_AT (CPU_INTEL_COREX | CPU_INTEL_ATOM) | ||
154 | #define CPU_CX_XE (CPU_INTEL_COREX | CPU_INTEL_XEON) | ||
155 | #define CPU_P6_XE (CPU_INTEL_P6 | CPU_INTEL_XEON) | ||
156 | #define CPU_PM_CO_AT (CPU_INTEL_PENTIUM_M | CPU_CO_AT) | ||
157 | #define CPU_C2_AT_XE (CPU_C2_AT | CPU_INTEL_XEON) | ||
158 | #define CPU_CX_AT_XE (CPU_CX_AT | CPU_INTEL_XEON) | ||
159 | #define CPU_P6_CX_AT (CPU_INTEL_P6 | CPU_CX_AT) | ||
160 | #define CPU_P6_CX_XE (CPU_P6_XE | CPU_INTEL_COREX) | ||
161 | #define CPU_P6_CX_AT_XE (CPU_INTEL_P6 | CPU_CX_AT_XE) | ||
162 | #define CPU_PM_CX_AT_XE (CPU_INTEL_PENTIUM_M | CPU_CX_AT_XE) | ||
163 | #define CPU_PM_CX_AT (CPU_INTEL_PENTIUM_M | CPU_CX_AT) | ||
164 | #define CPU_PM_CX_XE (CPU_INTEL_PENTIUM_M | CPU_CX_XE) | ||
165 | #define CPU_PX_CX_AT (CPU_INTEL_PX | CPU_CX_AT) | ||
166 | #define CPU_PX_CX_AT_XE (CPU_INTEL_PX | CPU_CX_AT_XE) | ||
167 | |||
168 | /* Select all supported Intel CPUs */ | ||
169 | #define CPU_INTEL_ALL (CPU_INTEL_PENTIUM | CPU_PX_CX_AT_XE) | ||
170 | |||
171 | #define CPU_AMD_K6 (1 << CPU_AMD_K6_BIT) | ||
172 | #define CPU_AMD_K7 (1 << CPU_AMD_K7_BIT) | ||
173 | #define CPU_AMD_K8 (1 << CPU_AMD_K8_BIT) | ||
174 | #define CPU_AMD_0F (1 << CPU_AMD_0F_BIT) | ||
175 | #define CPU_AMD_10 (1 << CPU_AMD_10_BIT) | ||
176 | #define CPU_AMD_11 (1 << CPU_AMD_11_BIT) | ||
177 | |||
178 | #define CPU_K10_PLUS (CPU_AMD_10 | CPU_AMD_11) | ||
179 | #define CPU_K0F_PLUS (CPU_AMD_0F | CPU_K10_PLUS) | ||
180 | #define CPU_K8_PLUS (CPU_AMD_K8 | CPU_K0F_PLUS) | ||
181 | #define CPU_K7_PLUS (CPU_AMD_K7 | CPU_K8_PLUS) | ||
182 | |||
183 | /* Select all supported AMD CPUs */ | ||
184 | #define CPU_AMD_ALL (CPU_AMD_K6 | CPU_K7_PLUS) | ||
185 | |||
186 | /* Select all supported CPUs */ | ||
187 | #define CPU_ALL (CPU_INTEL_ALL | CPU_AMD_ALL) | ||
188 | |||
189 | #define MAX_CPU_FILES 512 | ||
190 | |||
191 | struct cpu_private { | ||
192 | unsigned cpu; | ||
193 | unsigned type; | ||
194 | unsigned reg; | ||
195 | unsigned file; | ||
196 | }; | ||
197 | |||
198 | struct cpu_debug_base { | ||
199 | char *name; /* Register name */ | ||
200 | unsigned flag; /* Register flag */ | ||
201 | unsigned write; /* Register write flag */ | ||
202 | }; | ||
203 | |||
204 | /* | ||
205 | * Currently it looks similar to cpu_debug_base but once we add more files | ||
206 | * cpu_file_base will go in different direction | ||
207 | */ | ||
208 | struct cpu_file_base { | ||
209 | char *name; /* Register file name */ | ||
210 | unsigned flag; /* Register file flag */ | ||
211 | unsigned write; /* Register write flag */ | ||
212 | }; | ||
213 | |||
214 | struct cpu_cpuX_base { | ||
215 | struct dentry *dentry; /* Register dentry */ | ||
216 | int init; /* Register index file */ | ||
217 | }; | ||
218 | |||
219 | struct cpu_debug_range { | ||
220 | unsigned min; /* Register range min */ | ||
221 | unsigned max; /* Register range max */ | ||
222 | unsigned flag; /* Supported flags */ | ||
223 | unsigned model; /* Supported models */ | ||
224 | }; | ||
225 | |||
226 | #endif /* _ASM_X86_CPU_DEBUG_H */ | ||
diff --git a/arch/x86/include/asm/cpumask.h b/arch/x86/include/asm/cpumask.h index a7f3c75f8ad7..61c852fa346b 100644 --- a/arch/x86/include/asm/cpumask.h +++ b/arch/x86/include/asm/cpumask.h | |||
@@ -3,8 +3,6 @@ | |||
3 | #ifndef __ASSEMBLY__ | 3 | #ifndef __ASSEMBLY__ |
4 | #include <linux/cpumask.h> | 4 | #include <linux/cpumask.h> |
5 | 5 | ||
6 | #ifdef CONFIG_X86_64 | ||
7 | |||
8 | extern cpumask_var_t cpu_callin_mask; | 6 | extern cpumask_var_t cpu_callin_mask; |
9 | extern cpumask_var_t cpu_callout_mask; | 7 | extern cpumask_var_t cpu_callout_mask; |
10 | extern cpumask_var_t cpu_initialized_mask; | 8 | extern cpumask_var_t cpu_initialized_mask; |
@@ -12,21 +10,5 @@ extern cpumask_var_t cpu_sibling_setup_mask; | |||
12 | 10 | ||
13 | extern void setup_cpu_local_masks(void); | 11 | extern void setup_cpu_local_masks(void); |
14 | 12 | ||
15 | #else /* CONFIG_X86_32 */ | ||
16 | |||
17 | extern cpumask_t cpu_callin_map; | ||
18 | extern cpumask_t cpu_callout_map; | ||
19 | extern cpumask_t cpu_initialized; | ||
20 | extern cpumask_t cpu_sibling_setup_map; | ||
21 | |||
22 | #define cpu_callin_mask ((struct cpumask *)&cpu_callin_map) | ||
23 | #define cpu_callout_mask ((struct cpumask *)&cpu_callout_map) | ||
24 | #define cpu_initialized_mask ((struct cpumask *)&cpu_initialized) | ||
25 | #define cpu_sibling_setup_mask ((struct cpumask *)&cpu_sibling_setup_map) | ||
26 | |||
27 | static inline void setup_cpu_local_masks(void) { } | ||
28 | |||
29 | #endif /* CONFIG_X86_32 */ | ||
30 | |||
31 | #endif /* __ASSEMBLY__ */ | 13 | #endif /* __ASSEMBLY__ */ |
32 | #endif /* _ASM_X86_CPUMASK_H */ | 14 | #endif /* _ASM_X86_CPUMASK_H */ |
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h index dc27705f5443..5623c50d67b2 100644 --- a/arch/x86/include/asm/desc.h +++ b/arch/x86/include/asm/desc.h | |||
@@ -91,7 +91,6 @@ static inline int desc_empty(const void *ptr) | |||
91 | #define store_gdt(dtr) native_store_gdt(dtr) | 91 | #define store_gdt(dtr) native_store_gdt(dtr) |
92 | #define store_idt(dtr) native_store_idt(dtr) | 92 | #define store_idt(dtr) native_store_idt(dtr) |
93 | #define store_tr(tr) (tr = native_store_tr()) | 93 | #define store_tr(tr) (tr = native_store_tr()) |
94 | #define store_ldt(ldt) asm("sldt %0":"=m" (ldt)) | ||
95 | 94 | ||
96 | #define load_TLS(t, cpu) native_load_tls(t, cpu) | 95 | #define load_TLS(t, cpu) native_load_tls(t, cpu) |
97 | #define set_ldt native_set_ldt | 96 | #define set_ldt native_set_ldt |
@@ -112,6 +111,8 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries) | |||
112 | } | 111 | } |
113 | #endif /* CONFIG_PARAVIRT */ | 112 | #endif /* CONFIG_PARAVIRT */ |
114 | 113 | ||
114 | #define store_ldt(ldt) asm("sldt %0" : "=m"(ldt)) | ||
115 | |||
115 | static inline void native_write_idt_entry(gate_desc *idt, int entry, | 116 | static inline void native_write_idt_entry(gate_desc *idt, int entry, |
116 | const gate_desc *gate) | 117 | const gate_desc *gate) |
117 | { | 118 | { |
diff --git a/arch/x86/include/asm/device.h b/arch/x86/include/asm/device.h index 3c034f48fdb0..4994a20acbcb 100644 --- a/arch/x86/include/asm/device.h +++ b/arch/x86/include/asm/device.h | |||
@@ -6,7 +6,7 @@ struct dev_archdata { | |||
6 | void *acpi_handle; | 6 | void *acpi_handle; |
7 | #endif | 7 | #endif |
8 | #ifdef CONFIG_X86_64 | 8 | #ifdef CONFIG_X86_64 |
9 | struct dma_mapping_ops *dma_ops; | 9 | struct dma_map_ops *dma_ops; |
10 | #endif | 10 | #endif |
11 | #ifdef CONFIG_DMAR | 11 | #ifdef CONFIG_DMAR |
12 | void *iommu; /* hook for IOMMU specific extension */ | 12 | void *iommu; /* hook for IOMMU specific extension */ |
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index 132a134d12f2..f82fdc412c64 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h | |||
@@ -7,6 +7,8 @@ | |||
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/scatterlist.h> | 9 | #include <linux/scatterlist.h> |
10 | #include <linux/dma-debug.h> | ||
11 | #include <linux/dma-attrs.h> | ||
10 | #include <asm/io.h> | 12 | #include <asm/io.h> |
11 | #include <asm/swiotlb.h> | 13 | #include <asm/swiotlb.h> |
12 | #include <asm-generic/dma-coherent.h> | 14 | #include <asm-generic/dma-coherent.h> |
@@ -16,47 +18,9 @@ extern int iommu_merge; | |||
16 | extern struct device x86_dma_fallback_dev; | 18 | extern struct device x86_dma_fallback_dev; |
17 | extern int panic_on_overflow; | 19 | extern int panic_on_overflow; |
18 | 20 | ||
19 | struct dma_mapping_ops { | 21 | extern struct dma_map_ops *dma_ops; |
20 | int (*mapping_error)(struct device *dev, | 22 | |
21 | dma_addr_t dma_addr); | 23 | static inline struct dma_map_ops *get_dma_ops(struct device *dev) |
22 | void* (*alloc_coherent)(struct device *dev, size_t size, | ||
23 | dma_addr_t *dma_handle, gfp_t gfp); | ||
24 | void (*free_coherent)(struct device *dev, size_t size, | ||
25 | void *vaddr, dma_addr_t dma_handle); | ||
26 | dma_addr_t (*map_single)(struct device *hwdev, phys_addr_t ptr, | ||
27 | size_t size, int direction); | ||
28 | void (*unmap_single)(struct device *dev, dma_addr_t addr, | ||
29 | size_t size, int direction); | ||
30 | void (*sync_single_for_cpu)(struct device *hwdev, | ||
31 | dma_addr_t dma_handle, size_t size, | ||
32 | int direction); | ||
33 | void (*sync_single_for_device)(struct device *hwdev, | ||
34 | dma_addr_t dma_handle, size_t size, | ||
35 | int direction); | ||
36 | void (*sync_single_range_for_cpu)(struct device *hwdev, | ||
37 | dma_addr_t dma_handle, unsigned long offset, | ||
38 | size_t size, int direction); | ||
39 | void (*sync_single_range_for_device)(struct device *hwdev, | ||
40 | dma_addr_t dma_handle, unsigned long offset, | ||
41 | size_t size, int direction); | ||
42 | void (*sync_sg_for_cpu)(struct device *hwdev, | ||
43 | struct scatterlist *sg, int nelems, | ||
44 | int direction); | ||
45 | void (*sync_sg_for_device)(struct device *hwdev, | ||
46 | struct scatterlist *sg, int nelems, | ||
47 | int direction); | ||
48 | int (*map_sg)(struct device *hwdev, struct scatterlist *sg, | ||
49 | int nents, int direction); | ||
50 | void (*unmap_sg)(struct device *hwdev, | ||
51 | struct scatterlist *sg, int nents, | ||
52 | int direction); | ||
53 | int (*dma_supported)(struct device *hwdev, u64 mask); | ||
54 | int is_phys; | ||
55 | }; | ||
56 | |||
57 | extern struct dma_mapping_ops *dma_ops; | ||
58 | |||
59 | static inline struct dma_mapping_ops *get_dma_ops(struct device *dev) | ||
60 | { | 24 | { |
61 | #ifdef CONFIG_X86_32 | 25 | #ifdef CONFIG_X86_32 |
62 | return dma_ops; | 26 | return dma_ops; |
@@ -71,7 +35,7 @@ static inline struct dma_mapping_ops *get_dma_ops(struct device *dev) | |||
71 | /* Make sure we keep the same behaviour */ | 35 | /* Make sure we keep the same behaviour */ |
72 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | 36 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
73 | { | 37 | { |
74 | struct dma_mapping_ops *ops = get_dma_ops(dev); | 38 | struct dma_map_ops *ops = get_dma_ops(dev); |
75 | if (ops->mapping_error) | 39 | if (ops->mapping_error) |
76 | return ops->mapping_error(dev, dma_addr); | 40 | return ops->mapping_error(dev, dma_addr); |
77 | 41 | ||
@@ -90,137 +54,167 @@ extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, | |||
90 | 54 | ||
91 | static inline dma_addr_t | 55 | static inline dma_addr_t |
92 | dma_map_single(struct device *hwdev, void *ptr, size_t size, | 56 | dma_map_single(struct device *hwdev, void *ptr, size_t size, |
93 | int direction) | 57 | enum dma_data_direction dir) |
94 | { | 58 | { |
95 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); | 59 | struct dma_map_ops *ops = get_dma_ops(hwdev); |
96 | 60 | dma_addr_t addr; | |
97 | BUG_ON(!valid_dma_direction(direction)); | 61 | |
98 | return ops->map_single(hwdev, virt_to_phys(ptr), size, direction); | 62 | BUG_ON(!valid_dma_direction(dir)); |
63 | addr = ops->map_page(hwdev, virt_to_page(ptr), | ||
64 | (unsigned long)ptr & ~PAGE_MASK, size, | ||
65 | dir, NULL); | ||
66 | debug_dma_map_page(hwdev, virt_to_page(ptr), | ||
67 | (unsigned long)ptr & ~PAGE_MASK, size, | ||
68 | dir, addr, true); | ||
69 | return addr; | ||
99 | } | 70 | } |
100 | 71 | ||
101 | static inline void | 72 | static inline void |
102 | dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, | 73 | dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, |
103 | int direction) | 74 | enum dma_data_direction dir) |
104 | { | 75 | { |
105 | struct dma_mapping_ops *ops = get_dma_ops(dev); | 76 | struct dma_map_ops *ops = get_dma_ops(dev); |
106 | 77 | ||
107 | BUG_ON(!valid_dma_direction(direction)); | 78 | BUG_ON(!valid_dma_direction(dir)); |
108 | if (ops->unmap_single) | 79 | if (ops->unmap_page) |
109 | ops->unmap_single(dev, addr, size, direction); | 80 | ops->unmap_page(dev, addr, size, dir, NULL); |
81 | debug_dma_unmap_page(dev, addr, size, dir, true); | ||
110 | } | 82 | } |
111 | 83 | ||
112 | static inline int | 84 | static inline int |
113 | dma_map_sg(struct device *hwdev, struct scatterlist *sg, | 85 | dma_map_sg(struct device *hwdev, struct scatterlist *sg, |
114 | int nents, int direction) | 86 | int nents, enum dma_data_direction dir) |
115 | { | 87 | { |
116 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); | 88 | struct dma_map_ops *ops = get_dma_ops(hwdev); |
89 | int ents; | ||
90 | |||
91 | BUG_ON(!valid_dma_direction(dir)); | ||
92 | ents = ops->map_sg(hwdev, sg, nents, dir, NULL); | ||
93 | debug_dma_map_sg(hwdev, sg, nents, ents, dir); | ||
117 | 94 | ||
118 | BUG_ON(!valid_dma_direction(direction)); | 95 | return ents; |
119 | return ops->map_sg(hwdev, sg, nents, direction); | ||
120 | } | 96 | } |
121 | 97 | ||
122 | static inline void | 98 | static inline void |
123 | dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, | 99 | dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, |
124 | int direction) | 100 | enum dma_data_direction dir) |
125 | { | 101 | { |
126 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); | 102 | struct dma_map_ops *ops = get_dma_ops(hwdev); |
127 | 103 | ||
128 | BUG_ON(!valid_dma_direction(direction)); | 104 | BUG_ON(!valid_dma_direction(dir)); |
105 | debug_dma_unmap_sg(hwdev, sg, nents, dir); | ||
129 | if (ops->unmap_sg) | 106 | if (ops->unmap_sg) |
130 | ops->unmap_sg(hwdev, sg, nents, direction); | 107 | ops->unmap_sg(hwdev, sg, nents, dir, NULL); |
131 | } | 108 | } |
132 | 109 | ||
133 | static inline void | 110 | static inline void |
134 | dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle, | 111 | dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle, |
135 | size_t size, int direction) | 112 | size_t size, enum dma_data_direction dir) |
136 | { | 113 | { |
137 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); | 114 | struct dma_map_ops *ops = get_dma_ops(hwdev); |
138 | 115 | ||
139 | BUG_ON(!valid_dma_direction(direction)); | 116 | BUG_ON(!valid_dma_direction(dir)); |
140 | if (ops->sync_single_for_cpu) | 117 | if (ops->sync_single_for_cpu) |
141 | ops->sync_single_for_cpu(hwdev, dma_handle, size, direction); | 118 | ops->sync_single_for_cpu(hwdev, dma_handle, size, dir); |
119 | debug_dma_sync_single_for_cpu(hwdev, dma_handle, size, dir); | ||
142 | flush_write_buffers(); | 120 | flush_write_buffers(); |
143 | } | 121 | } |
144 | 122 | ||
145 | static inline void | 123 | static inline void |
146 | dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle, | 124 | dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle, |
147 | size_t size, int direction) | 125 | size_t size, enum dma_data_direction dir) |
148 | { | 126 | { |
149 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); | 127 | struct dma_map_ops *ops = get_dma_ops(hwdev); |
150 | 128 | ||
151 | BUG_ON(!valid_dma_direction(direction)); | 129 | BUG_ON(!valid_dma_direction(dir)); |
152 | if (ops->sync_single_for_device) | 130 | if (ops->sync_single_for_device) |
153 | ops->sync_single_for_device(hwdev, dma_handle, size, direction); | 131 | ops->sync_single_for_device(hwdev, dma_handle, size, dir); |
132 | debug_dma_sync_single_for_device(hwdev, dma_handle, size, dir); | ||
154 | flush_write_buffers(); | 133 | flush_write_buffers(); |
155 | } | 134 | } |
156 | 135 | ||
157 | static inline void | 136 | static inline void |
158 | dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle, | 137 | dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle, |
159 | unsigned long offset, size_t size, int direction) | 138 | unsigned long offset, size_t size, |
139 | enum dma_data_direction dir) | ||
160 | { | 140 | { |
161 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); | 141 | struct dma_map_ops *ops = get_dma_ops(hwdev); |
162 | 142 | ||
163 | BUG_ON(!valid_dma_direction(direction)); | 143 | BUG_ON(!valid_dma_direction(dir)); |
164 | if (ops->sync_single_range_for_cpu) | 144 | if (ops->sync_single_range_for_cpu) |
165 | ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, | 145 | ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, |
166 | size, direction); | 146 | size, dir); |
147 | debug_dma_sync_single_range_for_cpu(hwdev, dma_handle, | ||
148 | offset, size, dir); | ||
167 | flush_write_buffers(); | 149 | flush_write_buffers(); |
168 | } | 150 | } |
169 | 151 | ||
170 | static inline void | 152 | static inline void |
171 | dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle, | 153 | dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle, |
172 | unsigned long offset, size_t size, | 154 | unsigned long offset, size_t size, |
173 | int direction) | 155 | enum dma_data_direction dir) |
174 | { | 156 | { |
175 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); | 157 | struct dma_map_ops *ops = get_dma_ops(hwdev); |
176 | 158 | ||
177 | BUG_ON(!valid_dma_direction(direction)); | 159 | BUG_ON(!valid_dma_direction(dir)); |
178 | if (ops->sync_single_range_for_device) | 160 | if (ops->sync_single_range_for_device) |
179 | ops->sync_single_range_for_device(hwdev, dma_handle, | 161 | ops->sync_single_range_for_device(hwdev, dma_handle, |
180 | offset, size, direction); | 162 | offset, size, dir); |
163 | debug_dma_sync_single_range_for_device(hwdev, dma_handle, | ||
164 | offset, size, dir); | ||
181 | flush_write_buffers(); | 165 | flush_write_buffers(); |
182 | } | 166 | } |
183 | 167 | ||
184 | static inline void | 168 | static inline void |
185 | dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, | 169 | dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, |
186 | int nelems, int direction) | 170 | int nelems, enum dma_data_direction dir) |
187 | { | 171 | { |
188 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); | 172 | struct dma_map_ops *ops = get_dma_ops(hwdev); |
189 | 173 | ||
190 | BUG_ON(!valid_dma_direction(direction)); | 174 | BUG_ON(!valid_dma_direction(dir)); |
191 | if (ops->sync_sg_for_cpu) | 175 | if (ops->sync_sg_for_cpu) |
192 | ops->sync_sg_for_cpu(hwdev, sg, nelems, direction); | 176 | ops->sync_sg_for_cpu(hwdev, sg, nelems, dir); |
177 | debug_dma_sync_sg_for_cpu(hwdev, sg, nelems, dir); | ||
193 | flush_write_buffers(); | 178 | flush_write_buffers(); |
194 | } | 179 | } |
195 | 180 | ||
196 | static inline void | 181 | static inline void |
197 | dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, | 182 | dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, |
198 | int nelems, int direction) | 183 | int nelems, enum dma_data_direction dir) |
199 | { | 184 | { |
200 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); | 185 | struct dma_map_ops *ops = get_dma_ops(hwdev); |
201 | 186 | ||
202 | BUG_ON(!valid_dma_direction(direction)); | 187 | BUG_ON(!valid_dma_direction(dir)); |
203 | if (ops->sync_sg_for_device) | 188 | if (ops->sync_sg_for_device) |
204 | ops->sync_sg_for_device(hwdev, sg, nelems, direction); | 189 | ops->sync_sg_for_device(hwdev, sg, nelems, dir); |
190 | debug_dma_sync_sg_for_device(hwdev, sg, nelems, dir); | ||
205 | 191 | ||
206 | flush_write_buffers(); | 192 | flush_write_buffers(); |
207 | } | 193 | } |
208 | 194 | ||
209 | static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, | 195 | static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, |
210 | size_t offset, size_t size, | 196 | size_t offset, size_t size, |
211 | int direction) | 197 | enum dma_data_direction dir) |
212 | { | 198 | { |
213 | struct dma_mapping_ops *ops = get_dma_ops(dev); | 199 | struct dma_map_ops *ops = get_dma_ops(dev); |
200 | dma_addr_t addr; | ||
214 | 201 | ||
215 | BUG_ON(!valid_dma_direction(direction)); | 202 | BUG_ON(!valid_dma_direction(dir)); |
216 | return ops->map_single(dev, page_to_phys(page) + offset, | 203 | addr = ops->map_page(dev, page, offset, size, dir, NULL); |
217 | size, direction); | 204 | debug_dma_map_page(dev, page, offset, size, dir, addr, false); |
205 | |||
206 | return addr; | ||
218 | } | 207 | } |
219 | 208 | ||
220 | static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, | 209 | static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, |
221 | size_t size, int direction) | 210 | size_t size, enum dma_data_direction dir) |
222 | { | 211 | { |
223 | dma_unmap_single(dev, addr, size, direction); | 212 | struct dma_map_ops *ops = get_dma_ops(dev); |
213 | |||
214 | BUG_ON(!valid_dma_direction(dir)); | ||
215 | if (ops->unmap_page) | ||
216 | ops->unmap_page(dev, addr, size, dir, NULL); | ||
217 | debug_dma_unmap_page(dev, addr, size, dir, false); | ||
224 | } | 218 | } |
225 | 219 | ||
226 | static inline void | 220 | static inline void |
@@ -244,7 +238,7 @@ static inline unsigned long dma_alloc_coherent_mask(struct device *dev, | |||
244 | 238 | ||
245 | dma_mask = dev->coherent_dma_mask; | 239 | dma_mask = dev->coherent_dma_mask; |
246 | if (!dma_mask) | 240 | if (!dma_mask) |
247 | dma_mask = (gfp & GFP_DMA) ? DMA_24BIT_MASK : DMA_32BIT_MASK; | 241 | dma_mask = (gfp & GFP_DMA) ? DMA_BIT_MASK(24) : DMA_BIT_MASK(32); |
248 | 242 | ||
249 | return dma_mask; | 243 | return dma_mask; |
250 | } | 244 | } |
@@ -253,10 +247,10 @@ static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp) | |||
253 | { | 247 | { |
254 | unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp); | 248 | unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp); |
255 | 249 | ||
256 | if (dma_mask <= DMA_24BIT_MASK) | 250 | if (dma_mask <= DMA_BIT_MASK(24)) |
257 | gfp |= GFP_DMA; | 251 | gfp |= GFP_DMA; |
258 | #ifdef CONFIG_X86_64 | 252 | #ifdef CONFIG_X86_64 |
259 | if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA)) | 253 | if (dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA)) |
260 | gfp |= GFP_DMA32; | 254 | gfp |= GFP_DMA32; |
261 | #endif | 255 | #endif |
262 | return gfp; | 256 | return gfp; |
@@ -266,7 +260,7 @@ static inline void * | |||
266 | dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, | 260 | dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, |
267 | gfp_t gfp) | 261 | gfp_t gfp) |
268 | { | 262 | { |
269 | struct dma_mapping_ops *ops = get_dma_ops(dev); | 263 | struct dma_map_ops *ops = get_dma_ops(dev); |
270 | void *memory; | 264 | void *memory; |
271 | 265 | ||
272 | gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); | 266 | gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); |
@@ -285,20 +279,24 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, | |||
285 | if (!ops->alloc_coherent) | 279 | if (!ops->alloc_coherent) |
286 | return NULL; | 280 | return NULL; |
287 | 281 | ||
288 | return ops->alloc_coherent(dev, size, dma_handle, | 282 | memory = ops->alloc_coherent(dev, size, dma_handle, |
289 | dma_alloc_coherent_gfp_flags(dev, gfp)); | 283 | dma_alloc_coherent_gfp_flags(dev, gfp)); |
284 | debug_dma_alloc_coherent(dev, size, *dma_handle, memory); | ||
285 | |||
286 | return memory; | ||
290 | } | 287 | } |
291 | 288 | ||
292 | static inline void dma_free_coherent(struct device *dev, size_t size, | 289 | static inline void dma_free_coherent(struct device *dev, size_t size, |
293 | void *vaddr, dma_addr_t bus) | 290 | void *vaddr, dma_addr_t bus) |
294 | { | 291 | { |
295 | struct dma_mapping_ops *ops = get_dma_ops(dev); | 292 | struct dma_map_ops *ops = get_dma_ops(dev); |
296 | 293 | ||
297 | WARN_ON(irqs_disabled()); /* for portability */ | 294 | WARN_ON(irqs_disabled()); /* for portability */ |
298 | 295 | ||
299 | if (dma_release_from_coherent(dev, get_order(size), vaddr)) | 296 | if (dma_release_from_coherent(dev, get_order(size), vaddr)) |
300 | return; | 297 | return; |
301 | 298 | ||
299 | debug_dma_free_coherent(dev, size, vaddr, bus); | ||
302 | if (ops->free_coherent) | 300 | if (ops->free_coherent) |
303 | ops->free_coherent(dev, size, vaddr, bus); | 301 | ops->free_coherent(dev, size, vaddr, bus); |
304 | } | 302 | } |
diff --git a/arch/x86/include/asm/dmi.h b/arch/x86/include/asm/dmi.h index bc68212c6bc0..fd8f9e2ca35f 100644 --- a/arch/x86/include/asm/dmi.h +++ b/arch/x86/include/asm/dmi.h | |||
@@ -1,22 +1,15 @@ | |||
1 | #ifndef _ASM_X86_DMI_H | 1 | #ifndef _ASM_X86_DMI_H |
2 | #define _ASM_X86_DMI_H | 2 | #define _ASM_X86_DMI_H |
3 | 3 | ||
4 | #include <asm/io.h> | 4 | #include <linux/compiler.h> |
5 | 5 | #include <linux/init.h> | |
6 | #define DMI_MAX_DATA 2048 | ||
7 | 6 | ||
8 | extern int dmi_alloc_index; | 7 | #include <asm/io.h> |
9 | extern char dmi_alloc_data[DMI_MAX_DATA]; | 8 | #include <asm/setup.h> |
10 | 9 | ||
11 | /* This is so early that there is no good way to allocate dynamic memory. | 10 | static __always_inline __init void *dmi_alloc(unsigned len) |
12 | Allocate data in an BSS array. */ | ||
13 | static inline void *dmi_alloc(unsigned len) | ||
14 | { | 11 | { |
15 | int idx = dmi_alloc_index; | 12 | return extend_brk(len, sizeof(int)); |
16 | if ((dmi_alloc_index + len) > DMI_MAX_DATA) | ||
17 | return NULL; | ||
18 | dmi_alloc_index += len; | ||
19 | return dmi_alloc_data + idx; | ||
20 | } | 13 | } |
21 | 14 | ||
22 | /* Use early IO mappings for DMI because it's initialized early */ | 15 | /* Use early IO mappings for DMI because it's initialized early */ |
diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h index 00d41ce4c844..7ecba4d85089 100644 --- a/arch/x86/include/asm/e820.h +++ b/arch/x86/include/asm/e820.h | |||
@@ -72,7 +72,7 @@ extern int e820_all_mapped(u64 start, u64 end, unsigned type); | |||
72 | extern void e820_add_region(u64 start, u64 size, int type); | 72 | extern void e820_add_region(u64 start, u64 size, int type); |
73 | extern void e820_print_map(char *who); | 73 | extern void e820_print_map(char *who); |
74 | extern int | 74 | extern int |
75 | sanitize_e820_map(struct e820entry *biosmap, int max_nr_map, int *pnr_map); | 75 | sanitize_e820_map(struct e820entry *biosmap, int max_nr_map, u32 *pnr_map); |
76 | extern u64 e820_update_range(u64 start, u64 size, unsigned old_type, | 76 | extern u64 e820_update_range(u64 start, u64 size, unsigned old_type, |
77 | unsigned new_type); | 77 | unsigned new_type); |
78 | extern u64 e820_remove_range(u64 start, u64 size, unsigned old_type, | 78 | extern u64 e820_remove_range(u64 start, u64 size, unsigned old_type, |
diff --git a/arch/x86/include/asm/entry_arch.h b/arch/x86/include/asm/entry_arch.h index 854d538ae857..c2e6bedaf258 100644 --- a/arch/x86/include/asm/entry_arch.h +++ b/arch/x86/include/asm/entry_arch.h | |||
@@ -33,6 +33,8 @@ BUILD_INTERRUPT3(invalidate_interrupt7,INVALIDATE_TLB_VECTOR_START+7, | |||
33 | smp_invalidate_interrupt) | 33 | smp_invalidate_interrupt) |
34 | #endif | 34 | #endif |
35 | 35 | ||
36 | BUILD_INTERRUPT(generic_interrupt, GENERIC_INTERRUPT_VECTOR) | ||
37 | |||
36 | /* | 38 | /* |
37 | * every pentium local APIC has two 'local interrupts', with a | 39 | * every pentium local APIC has two 'local interrupts', with a |
38 | * soft-definable vector attached to both interrupts, one of | 40 | * soft-definable vector attached to both interrupts, one of |
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h index 63a79c77d220..81937a5dc77c 100644 --- a/arch/x86/include/asm/fixmap.h +++ b/arch/x86/include/asm/fixmap.h | |||
@@ -111,6 +111,8 @@ enum fixed_addresses { | |||
111 | #ifdef CONFIG_PARAVIRT | 111 | #ifdef CONFIG_PARAVIRT |
112 | FIX_PARAVIRT_BOOTMAP, | 112 | FIX_PARAVIRT_BOOTMAP, |
113 | #endif | 113 | #endif |
114 | FIX_TEXT_POKE0, /* reserve 2 pages for text_poke() */ | ||
115 | FIX_TEXT_POKE1, | ||
114 | __end_of_permanent_fixed_addresses, | 116 | __end_of_permanent_fixed_addresses, |
115 | #ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT | 117 | #ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT |
116 | FIX_OHCI1394_BASE, | 118 | FIX_OHCI1394_BASE, |
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h index b55b4a7fbefd..bd2c6511c887 100644 --- a/arch/x86/include/asm/ftrace.h +++ b/arch/x86/include/asm/ftrace.h | |||
@@ -28,6 +28,13 @@ | |||
28 | 28 | ||
29 | #endif | 29 | #endif |
30 | 30 | ||
31 | /* FIXME: I don't want to stay hardcoded */ | ||
32 | #ifdef CONFIG_X86_64 | ||
33 | # define FTRACE_SYSCALL_MAX 296 | ||
34 | #else | ||
35 | # define FTRACE_SYSCALL_MAX 333 | ||
36 | #endif | ||
37 | |||
31 | #ifdef CONFIG_FUNCTION_TRACER | 38 | #ifdef CONFIG_FUNCTION_TRACER |
32 | #define MCOUNT_ADDR ((long)(mcount)) | 39 | #define MCOUNT_ADDR ((long)(mcount)) |
33 | #define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */ | 40 | #define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */ |
@@ -55,29 +62,4 @@ struct dyn_arch_ftrace { | |||
55 | #endif /* __ASSEMBLY__ */ | 62 | #endif /* __ASSEMBLY__ */ |
56 | #endif /* CONFIG_FUNCTION_TRACER */ | 63 | #endif /* CONFIG_FUNCTION_TRACER */ |
57 | 64 | ||
58 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
59 | |||
60 | #ifndef __ASSEMBLY__ | ||
61 | |||
62 | /* | ||
63 | * Stack of return addresses for functions | ||
64 | * of a thread. | ||
65 | * Used in struct thread_info | ||
66 | */ | ||
67 | struct ftrace_ret_stack { | ||
68 | unsigned long ret; | ||
69 | unsigned long func; | ||
70 | unsigned long long calltime; | ||
71 | }; | ||
72 | |||
73 | /* | ||
74 | * Primary handler of a function return. | ||
75 | * It relays on ftrace_return_to_handler. | ||
76 | * Defined in entry_32/64.S | ||
77 | */ | ||
78 | extern void return_to_handler(void); | ||
79 | |||
80 | #endif /* __ASSEMBLY__ */ | ||
81 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | ||
82 | |||
83 | #endif /* _ASM_X86_FTRACE_H */ | 65 | #endif /* _ASM_X86_FTRACE_H */ |
diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h index 176f058e7159..039db6aa8e02 100644 --- a/arch/x86/include/asm/hardirq.h +++ b/arch/x86/include/asm/hardirq.h | |||
@@ -12,6 +12,7 @@ typedef struct { | |||
12 | unsigned int apic_timer_irqs; /* arch dependent */ | 12 | unsigned int apic_timer_irqs; /* arch dependent */ |
13 | unsigned int irq_spurious_count; | 13 | unsigned int irq_spurious_count; |
14 | #endif | 14 | #endif |
15 | unsigned int generic_irqs; /* arch dependent */ | ||
15 | #ifdef CONFIG_SMP | 16 | #ifdef CONFIG_SMP |
16 | unsigned int irq_resched_count; | 17 | unsigned int irq_resched_count; |
17 | unsigned int irq_call_count; | 18 | unsigned int irq_call_count; |
diff --git a/arch/x86/include/asm/highmem.h b/arch/x86/include/asm/highmem.h index bf9276bea660..014c2b85ae45 100644 --- a/arch/x86/include/asm/highmem.h +++ b/arch/x86/include/asm/highmem.h | |||
@@ -63,6 +63,7 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot); | |||
63 | void *kmap_atomic(struct page *page, enum km_type type); | 63 | void *kmap_atomic(struct page *page, enum km_type type); |
64 | void kunmap_atomic(void *kvaddr, enum km_type type); | 64 | void kunmap_atomic(void *kvaddr, enum km_type type); |
65 | void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); | 65 | void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); |
66 | void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot); | ||
66 | struct page *kmap_atomic_to_page(void *ptr); | 67 | struct page *kmap_atomic_to_page(void *ptr); |
67 | 68 | ||
68 | #ifndef CONFIG_PARAVIRT | 69 | #ifndef CONFIG_PARAVIRT |
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h index 370e1c83bb49..b762ea49bd70 100644 --- a/arch/x86/include/asm/hw_irq.h +++ b/arch/x86/include/asm/hw_irq.h | |||
@@ -27,6 +27,7 @@ | |||
27 | 27 | ||
28 | /* Interrupt handlers registered during init_IRQ */ | 28 | /* Interrupt handlers registered during init_IRQ */ |
29 | extern void apic_timer_interrupt(void); | 29 | extern void apic_timer_interrupt(void); |
30 | extern void generic_interrupt(void); | ||
30 | extern void error_interrupt(void); | 31 | extern void error_interrupt(void); |
31 | extern void spurious_interrupt(void); | 32 | extern void spurious_interrupt(void); |
32 | extern void thermal_interrupt(void); | 33 | extern void thermal_interrupt(void); |
diff --git a/arch/x86/include/asm/init.h b/arch/x86/include/asm/init.h new file mode 100644 index 000000000000..36fb1a6a5109 --- /dev/null +++ b/arch/x86/include/asm/init.h | |||
@@ -0,0 +1,18 @@ | |||
1 | #ifndef _ASM_X86_INIT_32_H | ||
2 | #define _ASM_X86_INIT_32_H | ||
3 | |||
4 | #ifdef CONFIG_X86_32 | ||
5 | extern void __init early_ioremap_page_table_range_init(void); | ||
6 | #endif | ||
7 | |||
8 | extern unsigned long __init | ||
9 | kernel_physical_mapping_init(unsigned long start, | ||
10 | unsigned long end, | ||
11 | unsigned long page_size_mask); | ||
12 | |||
13 | |||
14 | extern unsigned long __initdata e820_table_start; | ||
15 | extern unsigned long __meminitdata e820_table_end; | ||
16 | extern unsigned long __meminitdata e820_table_top; | ||
17 | |||
18 | #endif /* _ASM_X86_INIT_32_H */ | ||
diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h index 59cb4a1317b7..9d826e436010 100644 --- a/arch/x86/include/asm/io_apic.h +++ b/arch/x86/include/asm/io_apic.h | |||
@@ -162,9 +162,13 @@ extern int (*ioapic_renumber_irq)(int ioapic, int irq); | |||
162 | extern void ioapic_init_mappings(void); | 162 | extern void ioapic_init_mappings(void); |
163 | 163 | ||
164 | #ifdef CONFIG_X86_64 | 164 | #ifdef CONFIG_X86_64 |
165 | extern int save_mask_IO_APIC_setup(void); | 165 | extern struct IO_APIC_route_entry **alloc_ioapic_entries(void); |
166 | extern void restore_IO_APIC_setup(void); | 166 | extern void free_ioapic_entries(struct IO_APIC_route_entry **ioapic_entries); |
167 | extern void reinit_intr_remapped_IO_APIC(int); | 167 | extern int save_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries); |
168 | extern void mask_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries); | ||
169 | extern int restore_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries); | ||
170 | extern void reinit_intr_remapped_IO_APIC(int intr_remapping, | ||
171 | struct IO_APIC_route_entry **ioapic_entries); | ||
168 | #endif | 172 | #endif |
169 | 173 | ||
170 | extern void probe_nr_irqs_gsi(void); | 174 | extern void probe_nr_irqs_gsi(void); |
@@ -172,7 +176,7 @@ extern void probe_nr_irqs_gsi(void); | |||
172 | extern int setup_ioapic_entry(int apic, int irq, | 176 | extern int setup_ioapic_entry(int apic, int irq, |
173 | struct IO_APIC_route_entry *entry, | 177 | struct IO_APIC_route_entry *entry, |
174 | unsigned int destination, int trigger, | 178 | unsigned int destination, int trigger, |
175 | int polarity, int vector); | 179 | int polarity, int vector, int pin); |
176 | extern void ioapic_write_entry(int apic, int pin, | 180 | extern void ioapic_write_entry(int apic, int pin, |
177 | struct IO_APIC_route_entry e); | 181 | struct IO_APIC_route_entry e); |
178 | #else /* !CONFIG_X86_IO_APIC */ | 182 | #else /* !CONFIG_X86_IO_APIC */ |
diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h index a6ee9e6f530f..af326a2975b5 100644 --- a/arch/x86/include/asm/iommu.h +++ b/arch/x86/include/asm/iommu.h | |||
@@ -3,7 +3,7 @@ | |||
3 | 3 | ||
4 | extern void pci_iommu_shutdown(void); | 4 | extern void pci_iommu_shutdown(void); |
5 | extern void no_iommu_init(void); | 5 | extern void no_iommu_init(void); |
6 | extern struct dma_mapping_ops nommu_dma_ops; | 6 | extern struct dma_map_ops nommu_dma_ops; |
7 | extern int force_iommu, no_iommu; | 7 | extern int force_iommu, no_iommu; |
8 | extern int iommu_detected; | 8 | extern int iommu_detected; |
9 | 9 | ||
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h index 107eb2196691..f38481bcd455 100644 --- a/arch/x86/include/asm/irq.h +++ b/arch/x86/include/asm/irq.h | |||
@@ -36,6 +36,7 @@ static inline int irq_canonicalize(int irq) | |||
36 | extern void fixup_irqs(void); | 36 | extern void fixup_irqs(void); |
37 | #endif | 37 | #endif |
38 | 38 | ||
39 | extern void (*generic_interrupt_extension)(void); | ||
39 | extern void init_IRQ(void); | 40 | extern void init_IRQ(void); |
40 | extern void native_init_IRQ(void); | 41 | extern void native_init_IRQ(void); |
41 | extern bool handle_irq(unsigned irq, struct pt_regs *regs); | 42 | extern bool handle_irq(unsigned irq, struct pt_regs *regs); |
diff --git a/arch/x86/include/asm/irq_remapping.h b/arch/x86/include/asm/irq_remapping.h index 20e1fd588dbf..0396760fccb8 100644 --- a/arch/x86/include/asm/irq_remapping.h +++ b/arch/x86/include/asm/irq_remapping.h | |||
@@ -1,8 +1,6 @@ | |||
1 | #ifndef _ASM_X86_IRQ_REMAPPING_H | 1 | #ifndef _ASM_X86_IRQ_REMAPPING_H |
2 | #define _ASM_X86_IRQ_REMAPPING_H | 2 | #define _ASM_X86_IRQ_REMAPPING_H |
3 | 3 | ||
4 | extern int x2apic; | ||
5 | |||
6 | #define IRTE_DEST(dest) ((x2apic) ? dest : dest << 8) | 4 | #define IRTE_DEST(dest) ((x2apic) ? dest : dest << 8) |
7 | 5 | ||
8 | #endif /* _ASM_X86_IRQ_REMAPPING_H */ | 6 | #endif /* _ASM_X86_IRQ_REMAPPING_H */ |
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h index 8a285f356f8a..3cbd79bbb47c 100644 --- a/arch/x86/include/asm/irq_vectors.h +++ b/arch/x86/include/asm/irq_vectors.h | |||
@@ -112,6 +112,11 @@ | |||
112 | #define LOCAL_PERF_VECTOR 0xee | 112 | #define LOCAL_PERF_VECTOR 0xee |
113 | 113 | ||
114 | /* | 114 | /* |
115 | * Generic system vector for platform specific use | ||
116 | */ | ||
117 | #define GENERIC_INTERRUPT_VECTOR 0xed | ||
118 | |||
119 | /* | ||
115 | * First APIC vector available to drivers: (vectors 0x30-0xee) we | 120 | * First APIC vector available to drivers: (vectors 0x30-0xee) we |
116 | * start at 0x31(0x41) to spread out vectors evenly between priority | 121 | * start at 0x31(0x41) to spread out vectors evenly between priority |
117 | * levels. (0x80 is the syscall vector) | 122 | * levels. (0x80 is the syscall vector) |
diff --git a/arch/x86/include/asm/kexec.h b/arch/x86/include/asm/kexec.h index 0ceb6d19ed30..317ff1703d0b 100644 --- a/arch/x86/include/asm/kexec.h +++ b/arch/x86/include/asm/kexec.h | |||
@@ -9,13 +9,13 @@ | |||
9 | # define PAGES_NR 4 | 9 | # define PAGES_NR 4 |
10 | #else | 10 | #else |
11 | # define PA_CONTROL_PAGE 0 | 11 | # define PA_CONTROL_PAGE 0 |
12 | # define PA_TABLE_PAGE 1 | 12 | # define VA_CONTROL_PAGE 1 |
13 | # define PAGES_NR 2 | 13 | # define PA_TABLE_PAGE 2 |
14 | # define PA_SWAP_PAGE 3 | ||
15 | # define PAGES_NR 4 | ||
14 | #endif | 16 | #endif |
15 | 17 | ||
16 | #ifdef CONFIG_X86_32 | ||
17 | # define KEXEC_CONTROL_CODE_MAX_SIZE 2048 | 18 | # define KEXEC_CONTROL_CODE_MAX_SIZE 2048 |
18 | #endif | ||
19 | 19 | ||
20 | #ifndef __ASSEMBLY__ | 20 | #ifndef __ASSEMBLY__ |
21 | 21 | ||
@@ -136,10 +136,11 @@ relocate_kernel(unsigned long indirection_page, | |||
136 | unsigned int has_pae, | 136 | unsigned int has_pae, |
137 | unsigned int preserve_context); | 137 | unsigned int preserve_context); |
138 | #else | 138 | #else |
139 | NORET_TYPE void | 139 | unsigned long |
140 | relocate_kernel(unsigned long indirection_page, | 140 | relocate_kernel(unsigned long indirection_page, |
141 | unsigned long page_list, | 141 | unsigned long page_list, |
142 | unsigned long start_address) ATTRIB_NORET; | 142 | unsigned long start_address, |
143 | unsigned int preserve_context); | ||
143 | #endif | 144 | #endif |
144 | 145 | ||
145 | #define ARCH_HAS_KIMAGE_ARCH | 146 | #define ARCH_HAS_KIMAGE_ARCH |
diff --git a/arch/x86/include/asm/lguest_hcall.h b/arch/x86/include/asm/lguest_hcall.h index 43894428c3c2..0f4ee7148afe 100644 --- a/arch/x86/include/asm/lguest_hcall.h +++ b/arch/x86/include/asm/lguest_hcall.h | |||
@@ -26,36 +26,20 @@ | |||
26 | 26 | ||
27 | #ifndef __ASSEMBLY__ | 27 | #ifndef __ASSEMBLY__ |
28 | #include <asm/hw_irq.h> | 28 | #include <asm/hw_irq.h> |
29 | #include <asm/kvm_para.h> | ||
29 | 30 | ||
30 | /*G:031 But first, how does our Guest contact the Host to ask for privileged | 31 | /*G:031 But first, how does our Guest contact the Host to ask for privileged |
31 | * operations? There are two ways: the direct way is to make a "hypercall", | 32 | * operations? There are two ways: the direct way is to make a "hypercall", |
32 | * to make requests of the Host Itself. | 33 | * to make requests of the Host Itself. |
33 | * | 34 | * |
34 | * Our hypercall mechanism uses the highest unused trap code (traps 32 and | 35 | * We use the KVM hypercall mechanism. Eighteen hypercalls are |
35 | * above are used by real hardware interrupts). Fifteen hypercalls are | ||
36 | * available: the hypercall number is put in the %eax register, and the | 36 | * available: the hypercall number is put in the %eax register, and the |
37 | * arguments (when required) are placed in %edx, %ebx and %ecx. If a return | 37 | * arguments (when required) are placed in %ebx, %ecx and %edx. If a return |
38 | * value makes sense, it's returned in %eax. | 38 | * value makes sense, it's returned in %eax. |
39 | * | 39 | * |
40 | * Grossly invalid calls result in Sudden Death at the hands of the vengeful | 40 | * Grossly invalid calls result in Sudden Death at the hands of the vengeful |
41 | * Host, rather than returning failure. This reflects Winston Churchill's | 41 | * Host, rather than returning failure. This reflects Winston Churchill's |
42 | * definition of a gentleman: "someone who is only rude intentionally". */ | 42 | * definition of a gentleman: "someone who is only rude intentionally". */ |
43 | static inline unsigned long | ||
44 | hcall(unsigned long call, | ||
45 | unsigned long arg1, unsigned long arg2, unsigned long arg3) | ||
46 | { | ||
47 | /* "int" is the Intel instruction to trigger a trap. */ | ||
48 | asm volatile("int $" __stringify(LGUEST_TRAP_ENTRY) | ||
49 | /* The call in %eax (aka "a") might be overwritten */ | ||
50 | : "=a"(call) | ||
51 | /* The arguments are in %eax, %edx, %ebx & %ecx */ | ||
52 | : "a"(call), "d"(arg1), "b"(arg2), "c"(arg3) | ||
53 | /* "memory" means this might write somewhere in memory. | ||
54 | * This isn't true for all calls, but it's safe to tell | ||
55 | * gcc that it might happen so it doesn't get clever. */ | ||
56 | : "memory"); | ||
57 | return call; | ||
58 | } | ||
59 | /*:*/ | 43 | /*:*/ |
60 | 44 | ||
61 | /* Can't use our min() macro here: needs to be a constant */ | 45 | /* Can't use our min() macro here: needs to be a constant */ |
@@ -64,7 +48,7 @@ hcall(unsigned long call, | |||
64 | #define LHCALL_RING_SIZE 64 | 48 | #define LHCALL_RING_SIZE 64 |
65 | struct hcall_args { | 49 | struct hcall_args { |
66 | /* These map directly onto eax, ebx, ecx, edx in struct lguest_regs */ | 50 | /* These map directly onto eax, ebx, ecx, edx in struct lguest_regs */ |
67 | unsigned long arg0, arg2, arg3, arg1; | 51 | unsigned long arg0, arg1, arg2, arg3; |
68 | }; | 52 | }; |
69 | 53 | ||
70 | #endif /* !__ASSEMBLY__ */ | 54 | #endif /* !__ASSEMBLY__ */ |
diff --git a/arch/x86/include/asm/linkage.h b/arch/x86/include/asm/linkage.h index 9320e2a8a26a..12d55e773eb6 100644 --- a/arch/x86/include/asm/linkage.h +++ b/arch/x86/include/asm/linkage.h | |||
@@ -1,14 +1,11 @@ | |||
1 | #ifndef _ASM_X86_LINKAGE_H | 1 | #ifndef _ASM_X86_LINKAGE_H |
2 | #define _ASM_X86_LINKAGE_H | 2 | #define _ASM_X86_LINKAGE_H |
3 | 3 | ||
4 | #include <linux/stringify.h> | ||
5 | |||
4 | #undef notrace | 6 | #undef notrace |
5 | #define notrace __attribute__((no_instrument_function)) | 7 | #define notrace __attribute__((no_instrument_function)) |
6 | 8 | ||
7 | #ifdef CONFIG_X86_64 | ||
8 | #define __ALIGN .p2align 4,,15 | ||
9 | #define __ALIGN_STR ".p2align 4,,15" | ||
10 | #endif | ||
11 | |||
12 | #ifdef CONFIG_X86_32 | 9 | #ifdef CONFIG_X86_32 |
13 | #define asmlinkage CPP_ASMLINKAGE __attribute__((regparm(0))) | 10 | #define asmlinkage CPP_ASMLINKAGE __attribute__((regparm(0))) |
14 | /* | 11 | /* |
@@ -50,16 +47,20 @@ | |||
50 | __asmlinkage_protect_n(ret, "g" (arg1), "g" (arg2), "g" (arg3), \ | 47 | __asmlinkage_protect_n(ret, "g" (arg1), "g" (arg2), "g" (arg3), \ |
51 | "g" (arg4), "g" (arg5), "g" (arg6)) | 48 | "g" (arg4), "g" (arg5), "g" (arg6)) |
52 | 49 | ||
53 | #endif | 50 | #endif /* CONFIG_X86_32 */ |
51 | |||
52 | #ifdef __ASSEMBLY__ | ||
54 | 53 | ||
55 | #define GLOBAL(name) \ | 54 | #define GLOBAL(name) \ |
56 | .globl name; \ | 55 | .globl name; \ |
57 | name: | 56 | name: |
58 | 57 | ||
59 | #ifdef CONFIG_X86_ALIGNMENT_16 | 58 | #if defined(CONFIG_X86_64) || defined(CONFIG_X86_ALIGNMENT_16) |
60 | #define __ALIGN .align 16,0x90 | 59 | #define __ALIGN .p2align 4, 0x90 |
61 | #define __ALIGN_STR ".align 16,0x90" | 60 | #define __ALIGN_STR __stringify(__ALIGN) |
62 | #endif | 61 | #endif |
63 | 62 | ||
63 | #endif /* __ASSEMBLY__ */ | ||
64 | |||
64 | #endif /* _ASM_X86_LINKAGE_H */ | 65 | #endif /* _ASM_X86_LINKAGE_H */ |
65 | 66 | ||
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index 32c6e17b960b..563933e06a35 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h | |||
@@ -11,6 +11,8 @@ | |||
11 | */ | 11 | */ |
12 | 12 | ||
13 | #define MCG_CTL_P (1UL<<8) /* MCG_CAP register available */ | 13 | #define MCG_CTL_P (1UL<<8) /* MCG_CAP register available */ |
14 | #define MCG_EXT_P (1ULL<<9) /* Extended registers available */ | ||
15 | #define MCG_CMCI_P (1ULL<<10) /* CMCI supported */ | ||
14 | 16 | ||
15 | #define MCG_STATUS_RIPV (1UL<<0) /* restart ip valid */ | 17 | #define MCG_STATUS_RIPV (1UL<<0) /* restart ip valid */ |
16 | #define MCG_STATUS_EIPV (1UL<<1) /* ip points to correct instruction */ | 18 | #define MCG_STATUS_EIPV (1UL<<1) /* ip points to correct instruction */ |
@@ -90,14 +92,29 @@ extern int mce_disabled; | |||
90 | 92 | ||
91 | #include <asm/atomic.h> | 93 | #include <asm/atomic.h> |
92 | 94 | ||
95 | void mce_setup(struct mce *m); | ||
93 | void mce_log(struct mce *m); | 96 | void mce_log(struct mce *m); |
94 | DECLARE_PER_CPU(struct sys_device, device_mce); | 97 | DECLARE_PER_CPU(struct sys_device, device_mce); |
95 | extern void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu); | 98 | extern void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu); |
96 | 99 | ||
100 | /* | ||
101 | * To support more than 128 would need to escape the predefined | ||
102 | * Linux defined extended banks first. | ||
103 | */ | ||
104 | #define MAX_NR_BANKS (MCE_EXTENDED_BANK - 1) | ||
105 | |||
97 | #ifdef CONFIG_X86_MCE_INTEL | 106 | #ifdef CONFIG_X86_MCE_INTEL |
98 | void mce_intel_feature_init(struct cpuinfo_x86 *c); | 107 | void mce_intel_feature_init(struct cpuinfo_x86 *c); |
108 | void cmci_clear(void); | ||
109 | void cmci_reenable(void); | ||
110 | void cmci_rediscover(int dying); | ||
111 | void cmci_recheck(void); | ||
99 | #else | 112 | #else |
100 | static inline void mce_intel_feature_init(struct cpuinfo_x86 *c) { } | 113 | static inline void mce_intel_feature_init(struct cpuinfo_x86 *c) { } |
114 | static inline void cmci_clear(void) {} | ||
115 | static inline void cmci_reenable(void) {} | ||
116 | static inline void cmci_rediscover(int dying) {} | ||
117 | static inline void cmci_recheck(void) {} | ||
101 | #endif | 118 | #endif |
102 | 119 | ||
103 | #ifdef CONFIG_X86_MCE_AMD | 120 | #ifdef CONFIG_X86_MCE_AMD |
@@ -106,11 +123,23 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c); | |||
106 | static inline void mce_amd_feature_init(struct cpuinfo_x86 *c) { } | 123 | static inline void mce_amd_feature_init(struct cpuinfo_x86 *c) { } |
107 | #endif | 124 | #endif |
108 | 125 | ||
109 | void mce_log_therm_throt_event(unsigned int cpu, __u64 status); | 126 | extern int mce_available(struct cpuinfo_x86 *c); |
127 | |||
128 | void mce_log_therm_throt_event(__u64 status); | ||
110 | 129 | ||
111 | extern atomic_t mce_entry; | 130 | extern atomic_t mce_entry; |
112 | 131 | ||
113 | extern void do_machine_check(struct pt_regs *, long); | 132 | extern void do_machine_check(struct pt_regs *, long); |
133 | |||
134 | typedef DECLARE_BITMAP(mce_banks_t, MAX_NR_BANKS); | ||
135 | DECLARE_PER_CPU(mce_banks_t, mce_poll_banks); | ||
136 | |||
137 | enum mcp_flags { | ||
138 | MCP_TIMESTAMP = (1 << 0), /* log time stamp */ | ||
139 | MCP_UC = (1 << 1), /* log uncorrected errors */ | ||
140 | }; | ||
141 | extern void machine_check_poll(enum mcp_flags flags, mce_banks_t *b); | ||
142 | |||
114 | extern int mce_notify_user(void); | 143 | extern int mce_notify_user(void); |
115 | 144 | ||
116 | #endif /* !CONFIG_X86_32 */ | 145 | #endif /* !CONFIG_X86_32 */ |
@@ -120,8 +149,8 @@ extern void mcheck_init(struct cpuinfo_x86 *c); | |||
120 | #else | 149 | #else |
121 | #define mcheck_init(c) do { } while (0) | 150 | #define mcheck_init(c) do { } while (0) |
122 | #endif | 151 | #endif |
123 | extern void stop_mce(void); | 152 | |
124 | extern void restart_mce(void); | 153 | extern void (*mce_threshold_vector)(void); |
125 | 154 | ||
126 | #endif /* __KERNEL__ */ | 155 | #endif /* __KERNEL__ */ |
127 | #endif /* _ASM_X86_MCE_H */ | 156 | #endif /* _ASM_X86_MCE_H */ |
diff --git a/arch/x86/include/asm/msidef.h b/arch/x86/include/asm/msidef.h index 6706b3006f13..4cc48af23fef 100644 --- a/arch/x86/include/asm/msidef.h +++ b/arch/x86/include/asm/msidef.h | |||
@@ -47,6 +47,7 @@ | |||
47 | #define MSI_ADDR_DEST_ID_MASK 0x00ffff0 | 47 | #define MSI_ADDR_DEST_ID_MASK 0x00ffff0 |
48 | #define MSI_ADDR_DEST_ID(dest) (((dest) << MSI_ADDR_DEST_ID_SHIFT) & \ | 48 | #define MSI_ADDR_DEST_ID(dest) (((dest) << MSI_ADDR_DEST_ID_SHIFT) & \ |
49 | MSI_ADDR_DEST_ID_MASK) | 49 | MSI_ADDR_DEST_ID_MASK) |
50 | #define MSI_ADDR_EXT_DEST_ID(dest) ((dest) & 0xffffff00) | ||
50 | 51 | ||
51 | #define MSI_ADDR_IR_EXT_INT (1 << 4) | 52 | #define MSI_ADDR_IR_EXT_INT (1 << 4) |
52 | #define MSI_ADDR_IR_SHV (1 << 3) | 53 | #define MSI_ADDR_IR_SHV (1 << 3) |
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index f4e505f286bc..ec41fc16c167 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h | |||
@@ -81,6 +81,11 @@ | |||
81 | #define MSR_IA32_MC0_ADDR 0x00000402 | 81 | #define MSR_IA32_MC0_ADDR 0x00000402 |
82 | #define MSR_IA32_MC0_MISC 0x00000403 | 82 | #define MSR_IA32_MC0_MISC 0x00000403 |
83 | 83 | ||
84 | /* These are consecutive and not in the normal 4er MCE bank block */ | ||
85 | #define MSR_IA32_MC0_CTL2 0x00000280 | ||
86 | #define CMCI_EN (1ULL << 30) | ||
87 | #define CMCI_THRESHOLD_MASK 0xffffULL | ||
88 | |||
84 | #define MSR_P6_PERFCTR0 0x000000c1 | 89 | #define MSR_P6_PERFCTR0 0x000000c1 |
85 | #define MSR_P6_PERFCTR1 0x000000c2 | 90 | #define MSR_P6_PERFCTR1 0x000000c2 |
86 | #define MSR_P6_EVNTSEL0 0x00000186 | 91 | #define MSR_P6_EVNTSEL0 0x00000186 |
diff --git a/arch/x86/include/asm/page_32_types.h b/arch/x86/include/asm/page_32_types.h index f1e4a79a6e41..0f915ae649a7 100644 --- a/arch/x86/include/asm/page_32_types.h +++ b/arch/x86/include/asm/page_32_types.h | |||
@@ -39,6 +39,11 @@ | |||
39 | #define __VIRTUAL_MASK_SHIFT 32 | 39 | #define __VIRTUAL_MASK_SHIFT 32 |
40 | #endif /* CONFIG_X86_PAE */ | 40 | #endif /* CONFIG_X86_PAE */ |
41 | 41 | ||
42 | /* | ||
43 | * Kernel image size is limited to 512 MB (see in arch/x86/kernel/head_32.S) | ||
44 | */ | ||
45 | #define KERNEL_IMAGE_SIZE (512 * 1024 * 1024) | ||
46 | |||
42 | #ifndef __ASSEMBLY__ | 47 | #ifndef __ASSEMBLY__ |
43 | 48 | ||
44 | /* | 49 | /* |
diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h index 2d625da6603c..826ad37006ab 100644 --- a/arch/x86/include/asm/page_types.h +++ b/arch/x86/include/asm/page_types.h | |||
@@ -40,14 +40,8 @@ | |||
40 | 40 | ||
41 | #ifndef __ASSEMBLY__ | 41 | #ifndef __ASSEMBLY__ |
42 | 42 | ||
43 | struct pgprot; | ||
44 | |||
45 | extern int page_is_ram(unsigned long pagenr); | 43 | extern int page_is_ram(unsigned long pagenr); |
46 | extern int devmem_is_allowed(unsigned long pagenr); | 44 | extern int devmem_is_allowed(unsigned long pagenr); |
47 | extern void map_devmem(unsigned long pfn, unsigned long size, | ||
48 | struct pgprot vma_prot); | ||
49 | extern void unmap_devmem(unsigned long pfn, unsigned long size, | ||
50 | struct pgprot vma_prot); | ||
51 | 45 | ||
52 | extern unsigned long max_low_pfn_mapped; | 46 | extern unsigned long max_low_pfn_mapped; |
53 | extern unsigned long max_pfn_mapped; | 47 | extern unsigned long max_pfn_mapped; |
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index dfdee0ca57d3..bc384be6aa44 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h | |||
@@ -319,8 +319,6 @@ struct pv_mmu_ops { | |||
319 | #if PAGETABLE_LEVELS >= 3 | 319 | #if PAGETABLE_LEVELS >= 3 |
320 | #ifdef CONFIG_X86_PAE | 320 | #ifdef CONFIG_X86_PAE |
321 | void (*set_pte_atomic)(pte_t *ptep, pte_t pteval); | 321 | void (*set_pte_atomic)(pte_t *ptep, pte_t pteval); |
322 | void (*set_pte_present)(struct mm_struct *mm, unsigned long addr, | ||
323 | pte_t *ptep, pte_t pte); | ||
324 | void (*pte_clear)(struct mm_struct *mm, unsigned long addr, | 322 | void (*pte_clear)(struct mm_struct *mm, unsigned long addr, |
325 | pte_t *ptep); | 323 | pte_t *ptep); |
326 | void (*pmd_clear)(pmd_t *pmdp); | 324 | void (*pmd_clear)(pmd_t *pmdp); |
@@ -391,7 +389,7 @@ extern struct pv_lock_ops pv_lock_ops; | |||
391 | 389 | ||
392 | #define paravirt_type(op) \ | 390 | #define paravirt_type(op) \ |
393 | [paravirt_typenum] "i" (PARAVIRT_PATCH(op)), \ | 391 | [paravirt_typenum] "i" (PARAVIRT_PATCH(op)), \ |
394 | [paravirt_opptr] "m" (op) | 392 | [paravirt_opptr] "i" (&(op)) |
395 | #define paravirt_clobber(clobber) \ | 393 | #define paravirt_clobber(clobber) \ |
396 | [paravirt_clobber] "i" (clobber) | 394 | [paravirt_clobber] "i" (clobber) |
397 | 395 | ||
@@ -445,7 +443,7 @@ int paravirt_disable_iospace(void); | |||
445 | * offset into the paravirt_patch_template structure, and can therefore be | 443 | * offset into the paravirt_patch_template structure, and can therefore be |
446 | * freely converted back into a structure offset. | 444 | * freely converted back into a structure offset. |
447 | */ | 445 | */ |
448 | #define PARAVIRT_CALL "call *%[paravirt_opptr];" | 446 | #define PARAVIRT_CALL "call *%c[paravirt_opptr];" |
449 | 447 | ||
450 | /* | 448 | /* |
451 | * These macros are intended to wrap calls through one of the paravirt | 449 | * These macros are intended to wrap calls through one of the paravirt |
@@ -1367,13 +1365,6 @@ static inline void set_pte_atomic(pte_t *ptep, pte_t pte) | |||
1367 | pte.pte, pte.pte >> 32); | 1365 | pte.pte, pte.pte >> 32); |
1368 | } | 1366 | } |
1369 | 1367 | ||
1370 | static inline void set_pte_present(struct mm_struct *mm, unsigned long addr, | ||
1371 | pte_t *ptep, pte_t pte) | ||
1372 | { | ||
1373 | /* 5 arg words */ | ||
1374 | pv_mmu_ops.set_pte_present(mm, addr, ptep, pte); | ||
1375 | } | ||
1376 | |||
1377 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, | 1368 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, |
1378 | pte_t *ptep) | 1369 | pte_t *ptep) |
1379 | { | 1370 | { |
@@ -1390,12 +1381,6 @@ static inline void set_pte_atomic(pte_t *ptep, pte_t pte) | |||
1390 | set_pte(ptep, pte); | 1381 | set_pte(ptep, pte); |
1391 | } | 1382 | } |
1392 | 1383 | ||
1393 | static inline void set_pte_present(struct mm_struct *mm, unsigned long addr, | ||
1394 | pte_t *ptep, pte_t pte) | ||
1395 | { | ||
1396 | set_pte(ptep, pte); | ||
1397 | } | ||
1398 | |||
1399 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, | 1384 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, |
1400 | pte_t *ptep) | 1385 | pte_t *ptep) |
1401 | { | 1386 | { |
diff --git a/arch/x86/include/asm/pat.h b/arch/x86/include/asm/pat.h index b0e70056838e..2cd07b9422f4 100644 --- a/arch/x86/include/asm/pat.h +++ b/arch/x86/include/asm/pat.h | |||
@@ -2,6 +2,7 @@ | |||
2 | #define _ASM_X86_PAT_H | 2 | #define _ASM_X86_PAT_H |
3 | 3 | ||
4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
5 | #include <asm/pgtable_types.h> | ||
5 | 6 | ||
6 | #ifdef CONFIG_X86_PAT | 7 | #ifdef CONFIG_X86_PAT |
7 | extern int pat_enabled; | 8 | extern int pat_enabled; |
@@ -17,5 +18,9 @@ extern int free_memtype(u64 start, u64 end); | |||
17 | 18 | ||
18 | extern int kernel_map_sync_memtype(u64 base, unsigned long size, | 19 | extern int kernel_map_sync_memtype(u64 base, unsigned long size, |
19 | unsigned long flag); | 20 | unsigned long flag); |
21 | extern void map_devmem(unsigned long pfn, unsigned long size, | ||
22 | struct pgprot vma_prot); | ||
23 | extern void unmap_devmem(unsigned long pfn, unsigned long size, | ||
24 | struct pgprot vma_prot); | ||
20 | 25 | ||
21 | #endif /* _ASM_X86_PAT_H */ | 26 | #endif /* _ASM_X86_PAT_H */ |
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h index a977de23cb4d..b51a1e8b0baf 100644 --- a/arch/x86/include/asm/pci.h +++ b/arch/x86/include/asm/pci.h | |||
@@ -86,12 +86,43 @@ static inline void early_quirks(void) { } | |||
86 | 86 | ||
87 | extern void pci_iommu_alloc(void); | 87 | extern void pci_iommu_alloc(void); |
88 | 88 | ||
89 | #endif /* __KERNEL__ */ | 89 | /* MSI arch hook */ |
90 | #define arch_setup_msi_irqs arch_setup_msi_irqs | ||
91 | |||
92 | #define PCI_DMA_BUS_IS_PHYS (dma_ops->is_phys) | ||
93 | |||
94 | #if defined(CONFIG_X86_64) || defined(CONFIG_DMA_API_DEBUG) | ||
95 | |||
96 | #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ | ||
97 | dma_addr_t ADDR_NAME; | ||
98 | #define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \ | ||
99 | __u32 LEN_NAME; | ||
100 | #define pci_unmap_addr(PTR, ADDR_NAME) \ | ||
101 | ((PTR)->ADDR_NAME) | ||
102 | #define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \ | ||
103 | (((PTR)->ADDR_NAME) = (VAL)) | ||
104 | #define pci_unmap_len(PTR, LEN_NAME) \ | ||
105 | ((PTR)->LEN_NAME) | ||
106 | #define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ | ||
107 | (((PTR)->LEN_NAME) = (VAL)) | ||
90 | 108 | ||
91 | #ifdef CONFIG_X86_32 | ||
92 | # include "pci_32.h" | ||
93 | #else | 109 | #else |
94 | # include "pci_64.h" | 110 | |
111 | #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME[0]; | ||
112 | #define DECLARE_PCI_UNMAP_LEN(LEN_NAME) unsigned LEN_NAME[0]; | ||
113 | #define pci_unmap_addr(PTR, ADDR_NAME) sizeof((PTR)->ADDR_NAME) | ||
114 | #define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \ | ||
115 | do { break; } while (pci_unmap_addr(PTR, ADDR_NAME)) | ||
116 | #define pci_unmap_len(PTR, LEN_NAME) sizeof((PTR)->LEN_NAME) | ||
117 | #define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ | ||
118 | do { break; } while (pci_unmap_len(PTR, LEN_NAME)) | ||
119 | |||
120 | #endif | ||
121 | |||
122 | #endif /* __KERNEL__ */ | ||
123 | |||
124 | #ifdef CONFIG_X86_64 | ||
125 | #include "pci_64.h" | ||
95 | #endif | 126 | #endif |
96 | 127 | ||
97 | /* implement the pci_ DMA API in terms of the generic device dma_ one */ | 128 | /* implement the pci_ DMA API in terms of the generic device dma_ one */ |
@@ -109,11 +140,6 @@ static inline int __pcibus_to_node(const struct pci_bus *bus) | |||
109 | return sd->node; | 140 | return sd->node; |
110 | } | 141 | } |
111 | 142 | ||
112 | static inline cpumask_t __pcibus_to_cpumask(struct pci_bus *bus) | ||
113 | { | ||
114 | return node_to_cpumask(__pcibus_to_node(bus)); | ||
115 | } | ||
116 | |||
117 | static inline const struct cpumask * | 143 | static inline const struct cpumask * |
118 | cpumask_of_pcibus(const struct pci_bus *bus) | 144 | cpumask_of_pcibus(const struct pci_bus *bus) |
119 | { | 145 | { |
diff --git a/arch/x86/include/asm/pci_32.h b/arch/x86/include/asm/pci_32.h deleted file mode 100644 index 6f1213a6ef4f..000000000000 --- a/arch/x86/include/asm/pci_32.h +++ /dev/null | |||
@@ -1,34 +0,0 @@ | |||
1 | #ifndef _ASM_X86_PCI_32_H | ||
2 | #define _ASM_X86_PCI_32_H | ||
3 | |||
4 | |||
5 | #ifdef __KERNEL__ | ||
6 | |||
7 | |||
8 | /* Dynamic DMA mapping stuff. | ||
9 | * i386 has everything mapped statically. | ||
10 | */ | ||
11 | |||
12 | struct pci_dev; | ||
13 | |||
14 | /* The PCI address space does equal the physical memory | ||
15 | * address space. The networking and block device layers use | ||
16 | * this boolean for bounce buffer decisions. | ||
17 | */ | ||
18 | #define PCI_DMA_BUS_IS_PHYS (1) | ||
19 | |||
20 | /* pci_unmap_{page,single} is a nop so... */ | ||
21 | #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME[0]; | ||
22 | #define DECLARE_PCI_UNMAP_LEN(LEN_NAME) unsigned LEN_NAME[0]; | ||
23 | #define pci_unmap_addr(PTR, ADDR_NAME) sizeof((PTR)->ADDR_NAME) | ||
24 | #define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \ | ||
25 | do { break; } while (pci_unmap_addr(PTR, ADDR_NAME)) | ||
26 | #define pci_unmap_len(PTR, LEN_NAME) sizeof((PTR)->LEN_NAME) | ||
27 | #define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ | ||
28 | do { break; } while (pci_unmap_len(PTR, LEN_NAME)) | ||
29 | |||
30 | |||
31 | #endif /* __KERNEL__ */ | ||
32 | |||
33 | |||
34 | #endif /* _ASM_X86_PCI_32_H */ | ||
diff --git a/arch/x86/include/asm/pci_64.h b/arch/x86/include/asm/pci_64.h index 4da207982777..ae5e40f67daf 100644 --- a/arch/x86/include/asm/pci_64.h +++ b/arch/x86/include/asm/pci_64.h | |||
@@ -24,28 +24,6 @@ extern int (*pci_config_write)(int seg, int bus, int dev, int fn, | |||
24 | 24 | ||
25 | extern void dma32_reserve_bootmem(void); | 25 | extern void dma32_reserve_bootmem(void); |
26 | 26 | ||
27 | /* The PCI address space does equal the physical memory | ||
28 | * address space. The networking and block device layers use | ||
29 | * this boolean for bounce buffer decisions | ||
30 | * | ||
31 | * On AMD64 it mostly equals, but we set it to zero if a hardware | ||
32 | * IOMMU (gart) of sotware IOMMU (swiotlb) is available. | ||
33 | */ | ||
34 | #define PCI_DMA_BUS_IS_PHYS (dma_ops->is_phys) | ||
35 | |||
36 | #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ | ||
37 | dma_addr_t ADDR_NAME; | ||
38 | #define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \ | ||
39 | __u32 LEN_NAME; | ||
40 | #define pci_unmap_addr(PTR, ADDR_NAME) \ | ||
41 | ((PTR)->ADDR_NAME) | ||
42 | #define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \ | ||
43 | (((PTR)->ADDR_NAME) = (VAL)) | ||
44 | #define pci_unmap_len(PTR, LEN_NAME) \ | ||
45 | ((PTR)->LEN_NAME) | ||
46 | #define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ | ||
47 | (((PTR)->LEN_NAME) = (VAL)) | ||
48 | |||
49 | #endif /* __KERNEL__ */ | 27 | #endif /* __KERNEL__ */ |
50 | 28 | ||
51 | #endif /* _ASM_X86_PCI_64_H */ | 29 | #endif /* _ASM_X86_PCI_64_H */ |
diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h index c1774ac9da7a..2334982b339e 100644 --- a/arch/x86/include/asm/pgtable-2level.h +++ b/arch/x86/include/asm/pgtable-2level.h | |||
@@ -26,13 +26,6 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) | |||
26 | native_set_pte(ptep, pte); | 26 | native_set_pte(ptep, pte); |
27 | } | 27 | } |
28 | 28 | ||
29 | static inline void native_set_pte_present(struct mm_struct *mm, | ||
30 | unsigned long addr, | ||
31 | pte_t *ptep, pte_t pte) | ||
32 | { | ||
33 | native_set_pte(ptep, pte); | ||
34 | } | ||
35 | |||
36 | static inline void native_pmd_clear(pmd_t *pmdp) | 29 | static inline void native_pmd_clear(pmd_t *pmdp) |
37 | { | 30 | { |
38 | native_set_pmd(pmdp, __pmd(0)); | 31 | native_set_pmd(pmdp, __pmd(0)); |
diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h index 3f13cdf61156..177b0165ea01 100644 --- a/arch/x86/include/asm/pgtable-3level.h +++ b/arch/x86/include/asm/pgtable-3level.h | |||
@@ -31,23 +31,6 @@ static inline void native_set_pte(pte_t *ptep, pte_t pte) | |||
31 | ptep->pte_low = pte.pte_low; | 31 | ptep->pte_low = pte.pte_low; |
32 | } | 32 | } |
33 | 33 | ||
34 | /* | ||
35 | * Since this is only called on user PTEs, and the page fault handler | ||
36 | * must handle the already racy situation of simultaneous page faults, | ||
37 | * we are justified in merely clearing the PTE present bit, followed | ||
38 | * by a set. The ordering here is important. | ||
39 | */ | ||
40 | static inline void native_set_pte_present(struct mm_struct *mm, | ||
41 | unsigned long addr, | ||
42 | pte_t *ptep, pte_t pte) | ||
43 | { | ||
44 | ptep->pte_low = 0; | ||
45 | smp_wmb(); | ||
46 | ptep->pte_high = pte.pte_high; | ||
47 | smp_wmb(); | ||
48 | ptep->pte_low = pte.pte_low; | ||
49 | } | ||
50 | |||
51 | static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) | 34 | static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) |
52 | { | 35 | { |
53 | set_64bit((unsigned long long *)(ptep), native_pte_val(pte)); | 36 | set_64bit((unsigned long long *)(ptep), native_pte_val(pte)); |
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 24e42836e921..b27c4f29b5e0 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h | |||
@@ -31,8 +31,6 @@ extern struct list_head pgd_list; | |||
31 | #define set_pte(ptep, pte) native_set_pte(ptep, pte) | 31 | #define set_pte(ptep, pte) native_set_pte(ptep, pte) |
32 | #define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte) | 32 | #define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte) |
33 | 33 | ||
34 | #define set_pte_present(mm, addr, ptep, pte) \ | ||
35 | native_set_pte_present(mm, addr, ptep, pte) | ||
36 | #define set_pte_atomic(ptep, pte) \ | 34 | #define set_pte_atomic(ptep, pte) \ |
37 | native_set_pte_atomic(ptep, pte) | 35 | native_set_pte_atomic(ptep, pte) |
38 | 36 | ||
diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h index 97612fc7632f..31bd120cf2a2 100644 --- a/arch/x86/include/asm/pgtable_32.h +++ b/arch/x86/include/asm/pgtable_32.h | |||
@@ -42,9 +42,6 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t); | |||
42 | */ | 42 | */ |
43 | #undef TEST_ACCESS_OK | 43 | #undef TEST_ACCESS_OK |
44 | 44 | ||
45 | /* The boot page tables (all created as a single array) */ | ||
46 | extern unsigned long pg0[]; | ||
47 | |||
48 | #ifdef CONFIG_X86_PAE | 45 | #ifdef CONFIG_X86_PAE |
49 | # include <asm/pgtable-3level.h> | 46 | # include <asm/pgtable-3level.h> |
50 | #else | 47 | #else |
diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h index bd8df3b2fe04..2733fad45f98 100644 --- a/arch/x86/include/asm/pgtable_32_types.h +++ b/arch/x86/include/asm/pgtable_32_types.h | |||
@@ -25,6 +25,11 @@ | |||
25 | * area for the same reason. ;) | 25 | * area for the same reason. ;) |
26 | */ | 26 | */ |
27 | #define VMALLOC_OFFSET (8 * 1024 * 1024) | 27 | #define VMALLOC_OFFSET (8 * 1024 * 1024) |
28 | |||
29 | #ifndef __ASSEMBLER__ | ||
30 | extern bool __vmalloc_start_set; /* set once high_memory is set */ | ||
31 | #endif | ||
32 | |||
28 | #define VMALLOC_START ((unsigned long)high_memory + VMALLOC_OFFSET) | 33 | #define VMALLOC_START ((unsigned long)high_memory + VMALLOC_OFFSET) |
29 | #ifdef CONFIG_X86_PAE | 34 | #ifdef CONFIG_X86_PAE |
30 | #define LAST_PKMAP 512 | 35 | #define LAST_PKMAP 512 |
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index 4d258ad76a0f..b8238dc8786d 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h | |||
@@ -273,6 +273,7 @@ typedef struct page *pgtable_t; | |||
273 | 273 | ||
274 | extern pteval_t __supported_pte_mask; | 274 | extern pteval_t __supported_pte_mask; |
275 | extern int nx_enabled; | 275 | extern int nx_enabled; |
276 | extern void set_nx(void); | ||
276 | 277 | ||
277 | #define pgprot_writecombine pgprot_writecombine | 278 | #define pgprot_writecombine pgprot_writecombine |
278 | extern pgprot_t pgprot_writecombine(pgprot_t prot); | 279 | extern pgprot_t pgprot_writecombine(pgprot_t prot); |
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 76139506c3e4..34c52370f2fe 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h | |||
@@ -75,9 +75,9 @@ struct cpuinfo_x86 { | |||
75 | #else | 75 | #else |
76 | /* Number of 4K pages in DTLB/ITLB combined(in pages): */ | 76 | /* Number of 4K pages in DTLB/ITLB combined(in pages): */ |
77 | int x86_tlbsize; | 77 | int x86_tlbsize; |
78 | #endif | ||
78 | __u8 x86_virt_bits; | 79 | __u8 x86_virt_bits; |
79 | __u8 x86_phys_bits; | 80 | __u8 x86_phys_bits; |
80 | #endif | ||
81 | /* CPUID returned core id bits: */ | 81 | /* CPUID returned core id bits: */ |
82 | __u8 x86_coreid_bits; | 82 | __u8 x86_coreid_bits; |
83 | /* Max extended CPUID function supported: */ | 83 | /* Max extended CPUID function supported: */ |
@@ -94,7 +94,7 @@ struct cpuinfo_x86 { | |||
94 | unsigned long loops_per_jiffy; | 94 | unsigned long loops_per_jiffy; |
95 | #ifdef CONFIG_SMP | 95 | #ifdef CONFIG_SMP |
96 | /* cpus sharing the last level cache: */ | 96 | /* cpus sharing the last level cache: */ |
97 | cpumask_t llc_shared_map; | 97 | cpumask_var_t llc_shared_map; |
98 | #endif | 98 | #endif |
99 | /* cpuid returned max cores value: */ | 99 | /* cpuid returned max cores value: */ |
100 | u16 x86_max_cores; | 100 | u16 x86_max_cores; |
@@ -391,6 +391,9 @@ DECLARE_PER_CPU(union irq_stack_union, irq_stack_union); | |||
391 | DECLARE_INIT_PER_CPU(irq_stack_union); | 391 | DECLARE_INIT_PER_CPU(irq_stack_union); |
392 | 392 | ||
393 | DECLARE_PER_CPU(char *, irq_stack_ptr); | 393 | DECLARE_PER_CPU(char *, irq_stack_ptr); |
394 | DECLARE_PER_CPU(unsigned int, irq_count); | ||
395 | extern unsigned long kernel_eflags; | ||
396 | extern asmlinkage void ignore_sysret(void); | ||
394 | #else /* X86_64 */ | 397 | #else /* X86_64 */ |
395 | #ifdef CONFIG_CC_STACKPROTECTOR | 398 | #ifdef CONFIG_CC_STACKPROTECTOR |
396 | DECLARE_PER_CPU(unsigned long, stack_canary); | 399 | DECLARE_PER_CPU(unsigned long, stack_canary); |
@@ -733,6 +736,7 @@ static inline void __sti_mwait(unsigned long eax, unsigned long ecx) | |||
733 | extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx); | 736 | extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx); |
734 | 737 | ||
735 | extern void select_idle_routine(const struct cpuinfo_x86 *c); | 738 | extern void select_idle_routine(const struct cpuinfo_x86 *c); |
739 | extern void init_c1e_mask(void); | ||
736 | 740 | ||
737 | extern unsigned long boot_option_idle_override; | 741 | extern unsigned long boot_option_idle_override; |
738 | extern unsigned long idle_halt; | 742 | extern unsigned long idle_halt; |
diff --git a/arch/x86/include/asm/ptrace-abi.h b/arch/x86/include/asm/ptrace-abi.h index 8e0f8d199e05..86723035a515 100644 --- a/arch/x86/include/asm/ptrace-abi.h +++ b/arch/x86/include/asm/ptrace-abi.h | |||
@@ -80,8 +80,6 @@ | |||
80 | 80 | ||
81 | #define PTRACE_SINGLEBLOCK 33 /* resume execution until next branch */ | 81 | #define PTRACE_SINGLEBLOCK 33 /* resume execution until next branch */ |
82 | 82 | ||
83 | #ifdef CONFIG_X86_PTRACE_BTS | ||
84 | |||
85 | #ifndef __ASSEMBLY__ | 83 | #ifndef __ASSEMBLY__ |
86 | #include <linux/types.h> | 84 | #include <linux/types.h> |
87 | 85 | ||
@@ -140,6 +138,5 @@ struct ptrace_bts_config { | |||
140 | BTS records are read from oldest to newest. | 138 | BTS records are read from oldest to newest. |
141 | Returns number of BTS records drained. | 139 | Returns number of BTS records drained. |
142 | */ | 140 | */ |
143 | #endif /* CONFIG_X86_PTRACE_BTS */ | ||
144 | 141 | ||
145 | #endif /* _ASM_X86_PTRACE_ABI_H */ | 142 | #endif /* _ASM_X86_PTRACE_ABI_H */ |
diff --git a/arch/x86/include/asm/sections.h b/arch/x86/include/asm/sections.h index 2b8c5160388f..1b7ee5d673c2 100644 --- a/arch/x86/include/asm/sections.h +++ b/arch/x86/include/asm/sections.h | |||
@@ -1 +1,8 @@ | |||
1 | #ifndef _ASM_X86_SECTIONS_H | ||
2 | #define _ASM_X86_SECTIONS_H | ||
3 | |||
1 | #include <asm-generic/sections.h> | 4 | #include <asm-generic/sections.h> |
5 | |||
6 | extern char __brk_base[], __brk_limit[]; | ||
7 | |||
8 | #endif /* _ASM_X86_SECTIONS_H */ | ||
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h index 05c6f6b11fd5..bdc2ada05ae0 100644 --- a/arch/x86/include/asm/setup.h +++ b/arch/x86/include/asm/setup.h | |||
@@ -64,7 +64,7 @@ extern void x86_quirk_time_init(void); | |||
64 | #include <asm/bootparam.h> | 64 | #include <asm/bootparam.h> |
65 | 65 | ||
66 | /* Interrupt control for vSMPowered x86_64 systems */ | 66 | /* Interrupt control for vSMPowered x86_64 systems */ |
67 | #ifdef CONFIG_X86_VSMP | 67 | #ifdef CONFIG_X86_64 |
68 | void vsmp_init(void); | 68 | void vsmp_init(void); |
69 | #else | 69 | #else |
70 | static inline void vsmp_init(void) { } | 70 | static inline void vsmp_init(void) { } |
@@ -100,20 +100,51 @@ extern struct boot_params boot_params; | |||
100 | */ | 100 | */ |
101 | #define LOWMEMSIZE() (0x9f000) | 101 | #define LOWMEMSIZE() (0x9f000) |
102 | 102 | ||
103 | /* exceedingly early brk-like allocator */ | ||
104 | extern unsigned long _brk_end; | ||
105 | void *extend_brk(size_t size, size_t align); | ||
106 | |||
107 | /* | ||
108 | * Reserve space in the brk section. The name must be unique within | ||
109 | * the file, and somewhat descriptive. The size is in bytes. Must be | ||
110 | * used at file scope. | ||
111 | * | ||
112 | * (This uses a temp function to wrap the asm so we can pass it the | ||
113 | * size parameter; otherwise we wouldn't be able to. We can't use a | ||
114 | * "section" attribute on a normal variable because it always ends up | ||
115 | * being @progbits, which ends up allocating space in the vmlinux | ||
116 | * executable.) | ||
117 | */ | ||
118 | #define RESERVE_BRK(name,sz) \ | ||
119 | static void __section(.discard) __used \ | ||
120 | __brk_reservation_fn_##name##__(void) { \ | ||
121 | asm volatile ( \ | ||
122 | ".pushsection .brk_reservation,\"aw\",@nobits;" \ | ||
123 | ".brk." #name ":" \ | ||
124 | " 1:.skip %c0;" \ | ||
125 | " .size .brk." #name ", . - 1b;" \ | ||
126 | " .popsection" \ | ||
127 | : : "i" (sz)); \ | ||
128 | } | ||
129 | |||
103 | #ifdef __i386__ | 130 | #ifdef __i386__ |
104 | 131 | ||
105 | void __init i386_start_kernel(void); | 132 | void __init i386_start_kernel(void); |
106 | extern void probe_roms(void); | 133 | extern void probe_roms(void); |
107 | 134 | ||
108 | extern unsigned long init_pg_tables_start; | ||
109 | extern unsigned long init_pg_tables_end; | ||
110 | |||
111 | #else | 135 | #else |
112 | void __init x86_64_start_kernel(char *real_mode); | 136 | void __init x86_64_start_kernel(char *real_mode); |
113 | void __init x86_64_start_reservations(char *real_mode_data); | 137 | void __init x86_64_start_reservations(char *real_mode_data); |
114 | 138 | ||
115 | #endif /* __i386__ */ | 139 | #endif /* __i386__ */ |
116 | #endif /* _SETUP */ | 140 | #endif /* _SETUP */ |
141 | #else | ||
142 | #define RESERVE_BRK(name,sz) \ | ||
143 | .pushsection .brk_reservation,"aw",@nobits; \ | ||
144 | .brk.name: \ | ||
145 | 1: .skip sz; \ | ||
146 | .size .brk.name,.-1b; \ | ||
147 | .popsection | ||
117 | #endif /* __ASSEMBLY__ */ | 148 | #endif /* __ASSEMBLY__ */ |
118 | #endif /* __KERNEL__ */ | 149 | #endif /* __KERNEL__ */ |
119 | 150 | ||
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index 47d0e21f2b9e..19e0d88b966d 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h | |||
@@ -21,19 +21,19 @@ | |||
21 | extern int smp_num_siblings; | 21 | extern int smp_num_siblings; |
22 | extern unsigned int num_processors; | 22 | extern unsigned int num_processors; |
23 | 23 | ||
24 | DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); | 24 | DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map); |
25 | DECLARE_PER_CPU(cpumask_t, cpu_core_map); | 25 | DECLARE_PER_CPU(cpumask_var_t, cpu_core_map); |
26 | DECLARE_PER_CPU(u16, cpu_llc_id); | 26 | DECLARE_PER_CPU(u16, cpu_llc_id); |
27 | DECLARE_PER_CPU(int, cpu_number); | 27 | DECLARE_PER_CPU(int, cpu_number); |
28 | 28 | ||
29 | static inline struct cpumask *cpu_sibling_mask(int cpu) | 29 | static inline struct cpumask *cpu_sibling_mask(int cpu) |
30 | { | 30 | { |
31 | return &per_cpu(cpu_sibling_map, cpu); | 31 | return per_cpu(cpu_sibling_map, cpu); |
32 | } | 32 | } |
33 | 33 | ||
34 | static inline struct cpumask *cpu_core_mask(int cpu) | 34 | static inline struct cpumask *cpu_core_mask(int cpu) |
35 | { | 35 | { |
36 | return &per_cpu(cpu_core_map, cpu); | 36 | return per_cpu(cpu_core_map, cpu); |
37 | } | 37 | } |
38 | 38 | ||
39 | DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid); | 39 | DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid); |
@@ -121,9 +121,10 @@ static inline void arch_send_call_function_single_ipi(int cpu) | |||
121 | smp_ops.send_call_func_single_ipi(cpu); | 121 | smp_ops.send_call_func_single_ipi(cpu); |
122 | } | 122 | } |
123 | 123 | ||
124 | static inline void arch_send_call_function_ipi(cpumask_t mask) | 124 | #define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask |
125 | static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask) | ||
125 | { | 126 | { |
126 | smp_ops.send_call_func_ipi(&mask); | 127 | smp_ops.send_call_func_ipi(mask); |
127 | } | 128 | } |
128 | 129 | ||
129 | void cpu_disable_common(void); | 130 | void cpu_disable_common(void); |
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h index 3a5696656680..e5e6caffec87 100644 --- a/arch/x86/include/asm/spinlock.h +++ b/arch/x86/include/asm/spinlock.h | |||
@@ -295,6 +295,9 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw) | |||
295 | : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory"); | 295 | : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory"); |
296 | } | 296 | } |
297 | 297 | ||
298 | #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) | ||
299 | #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) | ||
300 | |||
298 | #define _raw_spin_relax(lock) cpu_relax() | 301 | #define _raw_spin_relax(lock) cpu_relax() |
299 | #define _raw_read_relax(lock) cpu_relax() | 302 | #define _raw_read_relax(lock) cpu_relax() |
300 | #define _raw_write_relax(lock) cpu_relax() | 303 | #define _raw_write_relax(lock) cpu_relax() |
diff --git a/arch/x86/include/asm/suspend_32.h b/arch/x86/include/asm/suspend_32.h index a5074bd0f8be..48dcfa62ea07 100644 --- a/arch/x86/include/asm/suspend_32.h +++ b/arch/x86/include/asm/suspend_32.h | |||
@@ -24,28 +24,4 @@ struct saved_context { | |||
24 | unsigned long return_address; | 24 | unsigned long return_address; |
25 | } __attribute__((packed)); | 25 | } __attribute__((packed)); |
26 | 26 | ||
27 | #ifdef CONFIG_ACPI | ||
28 | extern unsigned long saved_eip; | ||
29 | extern unsigned long saved_esp; | ||
30 | extern unsigned long saved_ebp; | ||
31 | extern unsigned long saved_ebx; | ||
32 | extern unsigned long saved_esi; | ||
33 | extern unsigned long saved_edi; | ||
34 | |||
35 | static inline void acpi_save_register_state(unsigned long return_point) | ||
36 | { | ||
37 | saved_eip = return_point; | ||
38 | asm volatile("movl %%esp,%0" : "=m" (saved_esp)); | ||
39 | asm volatile("movl %%ebp,%0" : "=m" (saved_ebp)); | ||
40 | asm volatile("movl %%ebx,%0" : "=m" (saved_ebx)); | ||
41 | asm volatile("movl %%edi,%0" : "=m" (saved_edi)); | ||
42 | asm volatile("movl %%esi,%0" : "=m" (saved_esi)); | ||
43 | } | ||
44 | |||
45 | #define acpi_restore_register_state() do {} while (0) | ||
46 | |||
47 | /* routines for saving/restoring kernel state */ | ||
48 | extern int acpi_save_state_mem(void); | ||
49 | #endif | ||
50 | |||
51 | #endif /* _ASM_X86_SUSPEND_32_H */ | 27 | #endif /* _ASM_X86_SUSPEND_32_H */ |
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index 2f34d643b567..602c769fc98c 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h | |||
@@ -95,6 +95,7 @@ struct thread_info { | |||
95 | #define TIF_DEBUGCTLMSR 25 /* uses thread_struct.debugctlmsr */ | 95 | #define TIF_DEBUGCTLMSR 25 /* uses thread_struct.debugctlmsr */ |
96 | #define TIF_DS_AREA_MSR 26 /* uses thread_struct.ds_area_msr */ | 96 | #define TIF_DS_AREA_MSR 26 /* uses thread_struct.ds_area_msr */ |
97 | #define TIF_LAZY_MMU_UPDATES 27 /* task is updating the mmu lazily */ | 97 | #define TIF_LAZY_MMU_UPDATES 27 /* task is updating the mmu lazily */ |
98 | #define TIF_SYSCALL_FTRACE 28 /* for ftrace syscall instrumentation */ | ||
98 | 99 | ||
99 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) | 100 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) |
100 | #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) | 101 | #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) |
@@ -117,15 +118,17 @@ struct thread_info { | |||
117 | #define _TIF_DEBUGCTLMSR (1 << TIF_DEBUGCTLMSR) | 118 | #define _TIF_DEBUGCTLMSR (1 << TIF_DEBUGCTLMSR) |
118 | #define _TIF_DS_AREA_MSR (1 << TIF_DS_AREA_MSR) | 119 | #define _TIF_DS_AREA_MSR (1 << TIF_DS_AREA_MSR) |
119 | #define _TIF_LAZY_MMU_UPDATES (1 << TIF_LAZY_MMU_UPDATES) | 120 | #define _TIF_LAZY_MMU_UPDATES (1 << TIF_LAZY_MMU_UPDATES) |
121 | #define _TIF_SYSCALL_FTRACE (1 << TIF_SYSCALL_FTRACE) | ||
120 | 122 | ||
121 | /* work to do in syscall_trace_enter() */ | 123 | /* work to do in syscall_trace_enter() */ |
122 | #define _TIF_WORK_SYSCALL_ENTRY \ | 124 | #define _TIF_WORK_SYSCALL_ENTRY \ |
123 | (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | \ | 125 | (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_FTRACE | \ |
124 | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | _TIF_SINGLESTEP) | 126 | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | _TIF_SINGLESTEP) |
125 | 127 | ||
126 | /* work to do in syscall_trace_leave() */ | 128 | /* work to do in syscall_trace_leave() */ |
127 | #define _TIF_WORK_SYSCALL_EXIT \ | 129 | #define _TIF_WORK_SYSCALL_EXIT \ |
128 | (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP) | 130 | (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \ |
131 | _TIF_SYSCALL_FTRACE) | ||
129 | 132 | ||
130 | /* work to do on interrupt/exception return */ | 133 | /* work to do on interrupt/exception return */ |
131 | #define _TIF_WORK_MASK \ | 134 | #define _TIF_WORK_MASK \ |
@@ -134,7 +137,7 @@ struct thread_info { | |||
134 | _TIF_SINGLESTEP|_TIF_SECCOMP|_TIF_SYSCALL_EMU)) | 137 | _TIF_SINGLESTEP|_TIF_SECCOMP|_TIF_SYSCALL_EMU)) |
135 | 138 | ||
136 | /* work to do on any return to user space */ | 139 | /* work to do on any return to user space */ |
137 | #define _TIF_ALLWORK_MASK (0x0000FFFF & ~_TIF_SECCOMP) | 140 | #define _TIF_ALLWORK_MASK ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_FTRACE) |
138 | 141 | ||
139 | /* Only used for 64 bit */ | 142 | /* Only used for 64 bit */ |
140 | #define _TIF_DO_NOTIFY_MASK \ | 143 | #define _TIF_DO_NOTIFY_MASK \ |
diff --git a/arch/x86/include/asm/timer.h b/arch/x86/include/asm/timer.h index a81195eaa2b3..bd37ed444a21 100644 --- a/arch/x86/include/asm/timer.h +++ b/arch/x86/include/asm/timer.h | |||
@@ -12,9 +12,9 @@ unsigned long native_calibrate_tsc(void); | |||
12 | 12 | ||
13 | #ifdef CONFIG_X86_32 | 13 | #ifdef CONFIG_X86_32 |
14 | extern int timer_ack; | 14 | extern int timer_ack; |
15 | extern int recalibrate_cpu_khz(void); | ||
16 | extern irqreturn_t timer_interrupt(int irq, void *dev_id); | 15 | extern irqreturn_t timer_interrupt(int irq, void *dev_id); |
17 | #endif /* CONFIG_X86_32 */ | 16 | #endif /* CONFIG_X86_32 */ |
17 | extern int recalibrate_cpu_khz(void); | ||
18 | 18 | ||
19 | extern int no_timer_check; | 19 | extern int no_timer_check; |
20 | 20 | ||
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h index 77cfb2cfb386..892b119dba6f 100644 --- a/arch/x86/include/asm/topology.h +++ b/arch/x86/include/asm/topology.h | |||
@@ -44,9 +44,6 @@ | |||
44 | 44 | ||
45 | #ifdef CONFIG_X86_32 | 45 | #ifdef CONFIG_X86_32 |
46 | 46 | ||
47 | /* Mappings between node number and cpus on that node. */ | ||
48 | extern cpumask_t node_to_cpumask_map[]; | ||
49 | |||
50 | /* Mappings between logical cpu number and node number */ | 47 | /* Mappings between logical cpu number and node number */ |
51 | extern int cpu_to_node_map[]; | 48 | extern int cpu_to_node_map[]; |
52 | 49 | ||
@@ -57,30 +54,8 @@ static inline int cpu_to_node(int cpu) | |||
57 | } | 54 | } |
58 | #define early_cpu_to_node(cpu) cpu_to_node(cpu) | 55 | #define early_cpu_to_node(cpu) cpu_to_node(cpu) |
59 | 56 | ||
60 | /* Returns a bitmask of CPUs on Node 'node'. | ||
61 | * | ||
62 | * Side note: this function creates the returned cpumask on the stack | ||
63 | * so with a high NR_CPUS count, excessive stack space is used. The | ||
64 | * cpumask_of_node function should be used whenever possible. | ||
65 | */ | ||
66 | static inline cpumask_t node_to_cpumask(int node) | ||
67 | { | ||
68 | return node_to_cpumask_map[node]; | ||
69 | } | ||
70 | |||
71 | /* Returns a bitmask of CPUs on Node 'node'. */ | ||
72 | static inline const struct cpumask *cpumask_of_node(int node) | ||
73 | { | ||
74 | return &node_to_cpumask_map[node]; | ||
75 | } | ||
76 | |||
77 | static inline void setup_node_to_cpumask_map(void) { } | ||
78 | |||
79 | #else /* CONFIG_X86_64 */ | 57 | #else /* CONFIG_X86_64 */ |
80 | 58 | ||
81 | /* Mappings between node number and cpus on that node. */ | ||
82 | extern cpumask_t *node_to_cpumask_map; | ||
83 | |||
84 | /* Mappings between logical cpu number and node number */ | 59 | /* Mappings between logical cpu number and node number */ |
85 | DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map); | 60 | DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map); |
86 | 61 | ||
@@ -91,8 +66,6 @@ DECLARE_PER_CPU(int, node_number); | |||
91 | #ifdef CONFIG_DEBUG_PER_CPU_MAPS | 66 | #ifdef CONFIG_DEBUG_PER_CPU_MAPS |
92 | extern int cpu_to_node(int cpu); | 67 | extern int cpu_to_node(int cpu); |
93 | extern int early_cpu_to_node(int cpu); | 68 | extern int early_cpu_to_node(int cpu); |
94 | extern const cpumask_t *cpumask_of_node(int node); | ||
95 | extern cpumask_t node_to_cpumask(int node); | ||
96 | 69 | ||
97 | #else /* !CONFIG_DEBUG_PER_CPU_MAPS */ | 70 | #else /* !CONFIG_DEBUG_PER_CPU_MAPS */ |
98 | 71 | ||
@@ -108,42 +81,32 @@ static inline int early_cpu_to_node(int cpu) | |||
108 | return early_per_cpu(x86_cpu_to_node_map, cpu); | 81 | return early_per_cpu(x86_cpu_to_node_map, cpu); |
109 | } | 82 | } |
110 | 83 | ||
111 | /* Returns a pointer to the cpumask of CPUs on Node 'node'. */ | 84 | #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */ |
112 | static inline const cpumask_t *cpumask_of_node(int node) | 85 | |
113 | { | 86 | #endif /* CONFIG_X86_64 */ |
114 | return &node_to_cpumask_map[node]; | ||
115 | } | ||
116 | 87 | ||
117 | /* Returns a bitmask of CPUs on Node 'node'. */ | 88 | /* Mappings between node number and cpus on that node. */ |
118 | static inline cpumask_t node_to_cpumask(int node) | 89 | extern cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; |
90 | |||
91 | #ifdef CONFIG_DEBUG_PER_CPU_MAPS | ||
92 | extern const struct cpumask *cpumask_of_node(int node); | ||
93 | #else | ||
94 | /* Returns a pointer to the cpumask of CPUs on Node 'node'. */ | ||
95 | static inline const struct cpumask *cpumask_of_node(int node) | ||
119 | { | 96 | { |
120 | return node_to_cpumask_map[node]; | 97 | return node_to_cpumask_map[node]; |
121 | } | 98 | } |
122 | 99 | #endif | |
123 | #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */ | ||
124 | 100 | ||
125 | extern void setup_node_to_cpumask_map(void); | 101 | extern void setup_node_to_cpumask_map(void); |
126 | 102 | ||
127 | /* | 103 | /* |
128 | * Replace default node_to_cpumask_ptr with optimized version | ||
129 | * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)" | ||
130 | */ | ||
131 | #define node_to_cpumask_ptr(v, node) \ | ||
132 | const cpumask_t *v = cpumask_of_node(node) | ||
133 | |||
134 | #define node_to_cpumask_ptr_next(v, node) \ | ||
135 | v = cpumask_of_node(node) | ||
136 | |||
137 | #endif /* CONFIG_X86_64 */ | ||
138 | |||
139 | /* | ||
140 | * Returns the number of the node containing Node 'node'. This | 104 | * Returns the number of the node containing Node 'node'. This |
141 | * architecture is flat, so it is a pretty simple function! | 105 | * architecture is flat, so it is a pretty simple function! |
142 | */ | 106 | */ |
143 | #define parent_node(node) (node) | 107 | #define parent_node(node) (node) |
144 | 108 | ||
145 | #define pcibus_to_node(bus) __pcibus_to_node(bus) | 109 | #define pcibus_to_node(bus) __pcibus_to_node(bus) |
146 | #define pcibus_to_cpumask(bus) __pcibus_to_cpumask(bus) | ||
147 | 110 | ||
148 | #ifdef CONFIG_X86_32 | 111 | #ifdef CONFIG_X86_32 |
149 | extern unsigned long node_start_pfn[]; | 112 | extern unsigned long node_start_pfn[]; |
@@ -209,52 +172,24 @@ static inline int early_cpu_to_node(int cpu) | |||
209 | return 0; | 172 | return 0; |
210 | } | 173 | } |
211 | 174 | ||
212 | static inline const cpumask_t *cpumask_of_node(int node) | 175 | static inline const struct cpumask *cpumask_of_node(int node) |
213 | { | ||
214 | return &cpu_online_map; | ||
215 | } | ||
216 | static inline cpumask_t node_to_cpumask(int node) | ||
217 | { | ||
218 | return cpu_online_map; | ||
219 | } | ||
220 | static inline int node_to_first_cpu(int node) | ||
221 | { | 176 | { |
222 | return first_cpu(cpu_online_map); | 177 | return cpu_online_mask; |
223 | } | 178 | } |
224 | 179 | ||
225 | static inline void setup_node_to_cpumask_map(void) { } | 180 | static inline void setup_node_to_cpumask_map(void) { } |
226 | 181 | ||
227 | /* | ||
228 | * Replace default node_to_cpumask_ptr with optimized version | ||
229 | * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)" | ||
230 | */ | ||
231 | #define node_to_cpumask_ptr(v, node) \ | ||
232 | const cpumask_t *v = cpumask_of_node(node) | ||
233 | |||
234 | #define node_to_cpumask_ptr_next(v, node) \ | ||
235 | v = cpumask_of_node(node) | ||
236 | #endif | 182 | #endif |
237 | 183 | ||
238 | #include <asm-generic/topology.h> | 184 | #include <asm-generic/topology.h> |
239 | 185 | ||
240 | #ifdef CONFIG_NUMA | ||
241 | /* Returns the number of the first CPU on Node 'node'. */ | ||
242 | static inline int node_to_first_cpu(int node) | ||
243 | { | ||
244 | return cpumask_first(cpumask_of_node(node)); | ||
245 | } | ||
246 | #endif | ||
247 | |||
248 | extern cpumask_t cpu_coregroup_map(int cpu); | ||
249 | extern const struct cpumask *cpu_coregroup_mask(int cpu); | 186 | extern const struct cpumask *cpu_coregroup_mask(int cpu); |
250 | 187 | ||
251 | #ifdef ENABLE_TOPO_DEFINES | 188 | #ifdef ENABLE_TOPO_DEFINES |
252 | #define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id) | 189 | #define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id) |
253 | #define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id) | 190 | #define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id) |
254 | #define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu)) | 191 | #define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu)) |
255 | #define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu)) | 192 | #define topology_thread_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu)) |
256 | #define topology_core_cpumask(cpu) (&per_cpu(cpu_core_map, cpu)) | ||
257 | #define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu)) | ||
258 | 193 | ||
259 | /* indicates that pointers to the topology cpumask_t maps are valid */ | 194 | /* indicates that pointers to the topology cpumask_t maps are valid */ |
260 | #define arch_provides_topology_pointers yes | 195 | #define arch_provides_topology_pointers yes |
@@ -268,7 +203,7 @@ struct pci_bus; | |||
268 | void set_pci_bus_resources_arch_default(struct pci_bus *b); | 203 | void set_pci_bus_resources_arch_default(struct pci_bus *b); |
269 | 204 | ||
270 | #ifdef CONFIG_SMP | 205 | #ifdef CONFIG_SMP |
271 | #define mc_capable() (cpus_weight(per_cpu(cpu_core_map, 0)) != nr_cpu_ids) | 206 | #define mc_capable() (cpumask_weight(cpu_core_mask(0)) != nr_cpu_ids) |
272 | #define smt_capable() (smp_num_siblings > 1) | 207 | #define smt_capable() (smp_num_siblings > 1) |
273 | #endif | 208 | #endif |
274 | 209 | ||
diff --git a/arch/x86/include/asm/unistd_32.h b/arch/x86/include/asm/unistd_32.h index f2bba78430a4..6e72d74cf8dc 100644 --- a/arch/x86/include/asm/unistd_32.h +++ b/arch/x86/include/asm/unistd_32.h | |||
@@ -338,6 +338,8 @@ | |||
338 | #define __NR_dup3 330 | 338 | #define __NR_dup3 330 |
339 | #define __NR_pipe2 331 | 339 | #define __NR_pipe2 331 |
340 | #define __NR_inotify_init1 332 | 340 | #define __NR_inotify_init1 332 |
341 | #define __NR_preadv 333 | ||
342 | #define __NR_pwritev 334 | ||
341 | 343 | ||
342 | #ifdef __KERNEL__ | 344 | #ifdef __KERNEL__ |
343 | 345 | ||
diff --git a/arch/x86/include/asm/unistd_64.h b/arch/x86/include/asm/unistd_64.h index d2e415e6666f..f81829462325 100644 --- a/arch/x86/include/asm/unistd_64.h +++ b/arch/x86/include/asm/unistd_64.h | |||
@@ -653,6 +653,10 @@ __SYSCALL(__NR_dup3, sys_dup3) | |||
653 | __SYSCALL(__NR_pipe2, sys_pipe2) | 653 | __SYSCALL(__NR_pipe2, sys_pipe2) |
654 | #define __NR_inotify_init1 294 | 654 | #define __NR_inotify_init1 294 |
655 | __SYSCALL(__NR_inotify_init1, sys_inotify_init1) | 655 | __SYSCALL(__NR_inotify_init1, sys_inotify_init1) |
656 | #define __NR_preadv 295 | ||
657 | __SYSCALL(__NR_preadv, sys_preadv) | ||
658 | #define __NR_pwritev 296 | ||
659 | __SYSCALL(__NR_pwritev, sys_pwritev) | ||
656 | 660 | ||
657 | 661 | ||
658 | #ifndef __NO_STUBS | 662 | #ifndef __NO_STUBS |
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h index 777327ef05c1..d3a98ea1062e 100644 --- a/arch/x86/include/asm/uv/uv_hub.h +++ b/arch/x86/include/asm/uv/uv_hub.h | |||
@@ -11,11 +11,13 @@ | |||
11 | #ifndef _ASM_X86_UV_UV_HUB_H | 11 | #ifndef _ASM_X86_UV_UV_HUB_H |
12 | #define _ASM_X86_UV_UV_HUB_H | 12 | #define _ASM_X86_UV_UV_HUB_H |
13 | 13 | ||
14 | #ifdef CONFIG_X86_64 | ||
14 | #include <linux/numa.h> | 15 | #include <linux/numa.h> |
15 | #include <linux/percpu.h> | 16 | #include <linux/percpu.h> |
16 | #include <linux/timer.h> | 17 | #include <linux/timer.h> |
17 | #include <asm/types.h> | 18 | #include <asm/types.h> |
18 | #include <asm/percpu.h> | 19 | #include <asm/percpu.h> |
20 | #include <asm/uv/uv_mmrs.h> | ||
19 | 21 | ||
20 | 22 | ||
21 | /* | 23 | /* |
@@ -199,6 +201,10 @@ DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info); | |||
199 | #define SCIR_CPU_ACTIVITY 0x02 /* not idle */ | 201 | #define SCIR_CPU_ACTIVITY 0x02 /* not idle */ |
200 | #define SCIR_CPU_HB_INTERVAL (HZ) /* once per second */ | 202 | #define SCIR_CPU_HB_INTERVAL (HZ) /* once per second */ |
201 | 203 | ||
204 | /* Loop through all installed blades */ | ||
205 | #define for_each_possible_blade(bid) \ | ||
206 | for ((bid) = 0; (bid) < uv_num_possible_blades(); (bid)++) | ||
207 | |||
202 | /* | 208 | /* |
203 | * Macros for converting between kernel virtual addresses, socket local physical | 209 | * Macros for converting between kernel virtual addresses, socket local physical |
204 | * addresses, and UV global physical addresses. | 210 | * addresses, and UV global physical addresses. |
@@ -393,6 +399,7 @@ static inline void uv_set_scir_bits(unsigned char value) | |||
393 | uv_write_local_mmr8(uv_hub_info->scir.offset, value); | 399 | uv_write_local_mmr8(uv_hub_info->scir.offset, value); |
394 | } | 400 | } |
395 | } | 401 | } |
402 | |||
396 | static inline void uv_set_cpu_scir_bits(int cpu, unsigned char value) | 403 | static inline void uv_set_cpu_scir_bits(int cpu, unsigned char value) |
397 | { | 404 | { |
398 | if (uv_cpu_hub_info(cpu)->scir.state != value) { | 405 | if (uv_cpu_hub_info(cpu)->scir.state != value) { |
@@ -401,4 +408,15 @@ static inline void uv_set_cpu_scir_bits(int cpu, unsigned char value) | |||
401 | } | 408 | } |
402 | } | 409 | } |
403 | 410 | ||
411 | static inline void uv_hub_send_ipi(int pnode, int apicid, int vector) | ||
412 | { | ||
413 | unsigned long val; | ||
414 | |||
415 | val = (1UL << UVH_IPI_INT_SEND_SHFT) | | ||
416 | ((apicid & 0x3f) << UVH_IPI_INT_APIC_ID_SHFT) | | ||
417 | (vector << UVH_IPI_INT_VECTOR_SHFT); | ||
418 | uv_write_global_mmr64(pnode, UVH_IPI_INT, val); | ||
419 | } | ||
420 | |||
421 | #endif /* CONFIG_X86_64 */ | ||
404 | #endif /* _ASM_X86_UV_UV_HUB_H */ | 422 | #endif /* _ASM_X86_UV_UV_HUB_H */ |
diff --git a/arch/x86/include/asm/uv/uv_mmrs.h b/arch/x86/include/asm/uv/uv_mmrs.h index dd627793a234..db68ac8a5ac2 100644 --- a/arch/x86/include/asm/uv/uv_mmrs.h +++ b/arch/x86/include/asm/uv/uv_mmrs.h | |||
@@ -1,3 +1,4 @@ | |||
1 | |||
1 | /* | 2 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | 3 | * This file is subject to the terms and conditions of the GNU General Public |
3 | * License. See the file "COPYING" in the main directory of this archive | 4 | * License. See the file "COPYING" in the main directory of this archive |
@@ -243,6 +244,158 @@ union uvh_event_occurred0_u { | |||
243 | #define UVH_EVENT_OCCURRED0_ALIAS_32 0x005f0 | 244 | #define UVH_EVENT_OCCURRED0_ALIAS_32 0x005f0 |
244 | 245 | ||
245 | /* ========================================================================= */ | 246 | /* ========================================================================= */ |
247 | /* UVH_GR0_TLB_INT0_CONFIG */ | ||
248 | /* ========================================================================= */ | ||
249 | #define UVH_GR0_TLB_INT0_CONFIG 0x61b00UL | ||
250 | |||
251 | #define UVH_GR0_TLB_INT0_CONFIG_VECTOR_SHFT 0 | ||
252 | #define UVH_GR0_TLB_INT0_CONFIG_VECTOR_MASK 0x00000000000000ffUL | ||
253 | #define UVH_GR0_TLB_INT0_CONFIG_DM_SHFT 8 | ||
254 | #define UVH_GR0_TLB_INT0_CONFIG_DM_MASK 0x0000000000000700UL | ||
255 | #define UVH_GR0_TLB_INT0_CONFIG_DESTMODE_SHFT 11 | ||
256 | #define UVH_GR0_TLB_INT0_CONFIG_DESTMODE_MASK 0x0000000000000800UL | ||
257 | #define UVH_GR0_TLB_INT0_CONFIG_STATUS_SHFT 12 | ||
258 | #define UVH_GR0_TLB_INT0_CONFIG_STATUS_MASK 0x0000000000001000UL | ||
259 | #define UVH_GR0_TLB_INT0_CONFIG_P_SHFT 13 | ||
260 | #define UVH_GR0_TLB_INT0_CONFIG_P_MASK 0x0000000000002000UL | ||
261 | #define UVH_GR0_TLB_INT0_CONFIG_T_SHFT 15 | ||
262 | #define UVH_GR0_TLB_INT0_CONFIG_T_MASK 0x0000000000008000UL | ||
263 | #define UVH_GR0_TLB_INT0_CONFIG_M_SHFT 16 | ||
264 | #define UVH_GR0_TLB_INT0_CONFIG_M_MASK 0x0000000000010000UL | ||
265 | #define UVH_GR0_TLB_INT0_CONFIG_APIC_ID_SHFT 32 | ||
266 | #define UVH_GR0_TLB_INT0_CONFIG_APIC_ID_MASK 0xffffffff00000000UL | ||
267 | |||
268 | union uvh_gr0_tlb_int0_config_u { | ||
269 | unsigned long v; | ||
270 | struct uvh_gr0_tlb_int0_config_s { | ||
271 | unsigned long vector_ : 8; /* RW */ | ||
272 | unsigned long dm : 3; /* RW */ | ||
273 | unsigned long destmode : 1; /* RW */ | ||
274 | unsigned long status : 1; /* RO */ | ||
275 | unsigned long p : 1; /* RO */ | ||
276 | unsigned long rsvd_14 : 1; /* */ | ||
277 | unsigned long t : 1; /* RO */ | ||
278 | unsigned long m : 1; /* RW */ | ||
279 | unsigned long rsvd_17_31: 15; /* */ | ||
280 | unsigned long apic_id : 32; /* RW */ | ||
281 | } s; | ||
282 | }; | ||
283 | |||
284 | /* ========================================================================= */ | ||
285 | /* UVH_GR0_TLB_INT1_CONFIG */ | ||
286 | /* ========================================================================= */ | ||
287 | #define UVH_GR0_TLB_INT1_CONFIG 0x61b40UL | ||
288 | |||
289 | #define UVH_GR0_TLB_INT1_CONFIG_VECTOR_SHFT 0 | ||
290 | #define UVH_GR0_TLB_INT1_CONFIG_VECTOR_MASK 0x00000000000000ffUL | ||
291 | #define UVH_GR0_TLB_INT1_CONFIG_DM_SHFT 8 | ||
292 | #define UVH_GR0_TLB_INT1_CONFIG_DM_MASK 0x0000000000000700UL | ||
293 | #define UVH_GR0_TLB_INT1_CONFIG_DESTMODE_SHFT 11 | ||
294 | #define UVH_GR0_TLB_INT1_CONFIG_DESTMODE_MASK 0x0000000000000800UL | ||
295 | #define UVH_GR0_TLB_INT1_CONFIG_STATUS_SHFT 12 | ||
296 | #define UVH_GR0_TLB_INT1_CONFIG_STATUS_MASK 0x0000000000001000UL | ||
297 | #define UVH_GR0_TLB_INT1_CONFIG_P_SHFT 13 | ||
298 | #define UVH_GR0_TLB_INT1_CONFIG_P_MASK 0x0000000000002000UL | ||
299 | #define UVH_GR0_TLB_INT1_CONFIG_T_SHFT 15 | ||
300 | #define UVH_GR0_TLB_INT1_CONFIG_T_MASK 0x0000000000008000UL | ||
301 | #define UVH_GR0_TLB_INT1_CONFIG_M_SHFT 16 | ||
302 | #define UVH_GR0_TLB_INT1_CONFIG_M_MASK 0x0000000000010000UL | ||
303 | #define UVH_GR0_TLB_INT1_CONFIG_APIC_ID_SHFT 32 | ||
304 | #define UVH_GR0_TLB_INT1_CONFIG_APIC_ID_MASK 0xffffffff00000000UL | ||
305 | |||
306 | union uvh_gr0_tlb_int1_config_u { | ||
307 | unsigned long v; | ||
308 | struct uvh_gr0_tlb_int1_config_s { | ||
309 | unsigned long vector_ : 8; /* RW */ | ||
310 | unsigned long dm : 3; /* RW */ | ||
311 | unsigned long destmode : 1; /* RW */ | ||
312 | unsigned long status : 1; /* RO */ | ||
313 | unsigned long p : 1; /* RO */ | ||
314 | unsigned long rsvd_14 : 1; /* */ | ||
315 | unsigned long t : 1; /* RO */ | ||
316 | unsigned long m : 1; /* RW */ | ||
317 | unsigned long rsvd_17_31: 15; /* */ | ||
318 | unsigned long apic_id : 32; /* RW */ | ||
319 | } s; | ||
320 | }; | ||
321 | |||
322 | /* ========================================================================= */ | ||
323 | /* UVH_GR1_TLB_INT0_CONFIG */ | ||
324 | /* ========================================================================= */ | ||
325 | #define UVH_GR1_TLB_INT0_CONFIG 0x61f00UL | ||
326 | |||
327 | #define UVH_GR1_TLB_INT0_CONFIG_VECTOR_SHFT 0 | ||
328 | #define UVH_GR1_TLB_INT0_CONFIG_VECTOR_MASK 0x00000000000000ffUL | ||
329 | #define UVH_GR1_TLB_INT0_CONFIG_DM_SHFT 8 | ||
330 | #define UVH_GR1_TLB_INT0_CONFIG_DM_MASK 0x0000000000000700UL | ||
331 | #define UVH_GR1_TLB_INT0_CONFIG_DESTMODE_SHFT 11 | ||
332 | #define UVH_GR1_TLB_INT0_CONFIG_DESTMODE_MASK 0x0000000000000800UL | ||
333 | #define UVH_GR1_TLB_INT0_CONFIG_STATUS_SHFT 12 | ||
334 | #define UVH_GR1_TLB_INT0_CONFIG_STATUS_MASK 0x0000000000001000UL | ||
335 | #define UVH_GR1_TLB_INT0_CONFIG_P_SHFT 13 | ||
336 | #define UVH_GR1_TLB_INT0_CONFIG_P_MASK 0x0000000000002000UL | ||
337 | #define UVH_GR1_TLB_INT0_CONFIG_T_SHFT 15 | ||
338 | #define UVH_GR1_TLB_INT0_CONFIG_T_MASK 0x0000000000008000UL | ||
339 | #define UVH_GR1_TLB_INT0_CONFIG_M_SHFT 16 | ||
340 | #define UVH_GR1_TLB_INT0_CONFIG_M_MASK 0x0000000000010000UL | ||
341 | #define UVH_GR1_TLB_INT0_CONFIG_APIC_ID_SHFT 32 | ||
342 | #define UVH_GR1_TLB_INT0_CONFIG_APIC_ID_MASK 0xffffffff00000000UL | ||
343 | |||
344 | union uvh_gr1_tlb_int0_config_u { | ||
345 | unsigned long v; | ||
346 | struct uvh_gr1_tlb_int0_config_s { | ||
347 | unsigned long vector_ : 8; /* RW */ | ||
348 | unsigned long dm : 3; /* RW */ | ||
349 | unsigned long destmode : 1; /* RW */ | ||
350 | unsigned long status : 1; /* RO */ | ||
351 | unsigned long p : 1; /* RO */ | ||
352 | unsigned long rsvd_14 : 1; /* */ | ||
353 | unsigned long t : 1; /* RO */ | ||
354 | unsigned long m : 1; /* RW */ | ||
355 | unsigned long rsvd_17_31: 15; /* */ | ||
356 | unsigned long apic_id : 32; /* RW */ | ||
357 | } s; | ||
358 | }; | ||
359 | |||
360 | /* ========================================================================= */ | ||
361 | /* UVH_GR1_TLB_INT1_CONFIG */ | ||
362 | /* ========================================================================= */ | ||
363 | #define UVH_GR1_TLB_INT1_CONFIG 0x61f40UL | ||
364 | |||
365 | #define UVH_GR1_TLB_INT1_CONFIG_VECTOR_SHFT 0 | ||
366 | #define UVH_GR1_TLB_INT1_CONFIG_VECTOR_MASK 0x00000000000000ffUL | ||
367 | #define UVH_GR1_TLB_INT1_CONFIG_DM_SHFT 8 | ||
368 | #define UVH_GR1_TLB_INT1_CONFIG_DM_MASK 0x0000000000000700UL | ||
369 | #define UVH_GR1_TLB_INT1_CONFIG_DESTMODE_SHFT 11 | ||
370 | #define UVH_GR1_TLB_INT1_CONFIG_DESTMODE_MASK 0x0000000000000800UL | ||
371 | #define UVH_GR1_TLB_INT1_CONFIG_STATUS_SHFT 12 | ||
372 | #define UVH_GR1_TLB_INT1_CONFIG_STATUS_MASK 0x0000000000001000UL | ||
373 | #define UVH_GR1_TLB_INT1_CONFIG_P_SHFT 13 | ||
374 | #define UVH_GR1_TLB_INT1_CONFIG_P_MASK 0x0000000000002000UL | ||
375 | #define UVH_GR1_TLB_INT1_CONFIG_T_SHFT 15 | ||
376 | #define UVH_GR1_TLB_INT1_CONFIG_T_MASK 0x0000000000008000UL | ||
377 | #define UVH_GR1_TLB_INT1_CONFIG_M_SHFT 16 | ||
378 | #define UVH_GR1_TLB_INT1_CONFIG_M_MASK 0x0000000000010000UL | ||
379 | #define UVH_GR1_TLB_INT1_CONFIG_APIC_ID_SHFT 32 | ||
380 | #define UVH_GR1_TLB_INT1_CONFIG_APIC_ID_MASK 0xffffffff00000000UL | ||
381 | |||
382 | union uvh_gr1_tlb_int1_config_u { | ||
383 | unsigned long v; | ||
384 | struct uvh_gr1_tlb_int1_config_s { | ||
385 | unsigned long vector_ : 8; /* RW */ | ||
386 | unsigned long dm : 3; /* RW */ | ||
387 | unsigned long destmode : 1; /* RW */ | ||
388 | unsigned long status : 1; /* RO */ | ||
389 | unsigned long p : 1; /* RO */ | ||
390 | unsigned long rsvd_14 : 1; /* */ | ||
391 | unsigned long t : 1; /* RO */ | ||
392 | unsigned long m : 1; /* RW */ | ||
393 | unsigned long rsvd_17_31: 15; /* */ | ||
394 | unsigned long apic_id : 32; /* RW */ | ||
395 | } s; | ||
396 | }; | ||
397 | |||
398 | /* ========================================================================= */ | ||
246 | /* UVH_INT_CMPB */ | 399 | /* UVH_INT_CMPB */ |
247 | /* ========================================================================= */ | 400 | /* ========================================================================= */ |
248 | #define UVH_INT_CMPB 0x22080UL | 401 | #define UVH_INT_CMPB 0x22080UL |
diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h index 5e79ca694326..9c371e4a9fa6 100644 --- a/arch/x86/include/asm/xen/hypercall.h +++ b/arch/x86/include/asm/xen/hypercall.h | |||
@@ -296,6 +296,8 @@ HYPERVISOR_get_debugreg(int reg) | |||
296 | static inline int | 296 | static inline int |
297 | HYPERVISOR_update_descriptor(u64 ma, u64 desc) | 297 | HYPERVISOR_update_descriptor(u64 ma, u64 desc) |
298 | { | 298 | { |
299 | if (sizeof(u64) == sizeof(long)) | ||
300 | return _hypercall2(int, update_descriptor, ma, desc); | ||
299 | return _hypercall4(int, update_descriptor, ma, ma>>32, desc, desc>>32); | 301 | return _hypercall4(int, update_descriptor, ma, ma>>32, desc, desc>>32); |
300 | } | 302 | } |
301 | 303 | ||