diff options
author | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-30 19:16:45 -0400 |
---|---|---|
committer | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-30 19:16:45 -0400 |
commit | ada47b5fe13d89735805b566185f4885f5a3f750 (patch) | |
tree | 644b88f8a71896307d71438e9b3af49126ffb22b /arch/x86/include/asm | |
parent | 43e98717ad40a4ae64545b5ba047c7b86aa44f4f (diff) | |
parent | 3280f21d43ee541f97f8cda5792150d2dbec20d5 (diff) |
Merge branch 'wip-2.6.34' into old-private-masterarchived-private-master
Diffstat (limited to 'arch/x86/include/asm')
125 files changed, 3387 insertions, 2655 deletions
diff --git a/arch/x86/include/asm/Kbuild b/arch/x86/include/asm/Kbuild index 4a8e80cdcfa5..493092efaa3b 100644 --- a/arch/x86/include/asm/Kbuild +++ b/arch/x86/include/asm/Kbuild | |||
@@ -10,6 +10,8 @@ header-y += ptrace-abi.h | |||
10 | header-y += sigcontext32.h | 10 | header-y += sigcontext32.h |
11 | header-y += ucontext.h | 11 | header-y += ucontext.h |
12 | header-y += processor-flags.h | 12 | header-y += processor-flags.h |
13 | header-y += hw_breakpoint.h | ||
14 | header-y += hyperv.h | ||
13 | 15 | ||
14 | unifdef-y += e820.h | 16 | unifdef-y += e820.h |
15 | unifdef-y += ist.h | 17 | unifdef-y += ist.h |
diff --git a/arch/x86/include/asm/a.out-core.h b/arch/x86/include/asm/a.out-core.h index bb70e397aa84..7a15588e45d4 100644 --- a/arch/x86/include/asm/a.out-core.h +++ b/arch/x86/include/asm/a.out-core.h | |||
@@ -17,6 +17,7 @@ | |||
17 | 17 | ||
18 | #include <linux/user.h> | 18 | #include <linux/user.h> |
19 | #include <linux/elfcore.h> | 19 | #include <linux/elfcore.h> |
20 | #include <asm/debugreg.h> | ||
20 | 21 | ||
21 | /* | 22 | /* |
22 | * fill in the user structure for an a.out core dump | 23 | * fill in the user structure for an a.out core dump |
@@ -32,14 +33,7 @@ static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump) | |||
32 | >> PAGE_SHIFT; | 33 | >> PAGE_SHIFT; |
33 | dump->u_dsize -= dump->u_tsize; | 34 | dump->u_dsize -= dump->u_tsize; |
34 | dump->u_ssize = 0; | 35 | dump->u_ssize = 0; |
35 | dump->u_debugreg[0] = current->thread.debugreg0; | 36 | aout_dump_debugregs(dump); |
36 | dump->u_debugreg[1] = current->thread.debugreg1; | ||
37 | dump->u_debugreg[2] = current->thread.debugreg2; | ||
38 | dump->u_debugreg[3] = current->thread.debugreg3; | ||
39 | dump->u_debugreg[4] = 0; | ||
40 | dump->u_debugreg[5] = 0; | ||
41 | dump->u_debugreg[6] = current->thread.debugreg6; | ||
42 | dump->u_debugreg[7] = current->thread.debugreg7; | ||
43 | 37 | ||
44 | if (dump->start_stack < TASK_SIZE) | 38 | if (dump->start_stack < TASK_SIZE) |
45 | dump->u_ssize = ((unsigned long)(TASK_SIZE - dump->start_stack)) | 39 | dump->u_ssize = ((unsigned long)(TASK_SIZE - dump->start_stack)) |
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h index 4518dc500903..56f462cf22d2 100644 --- a/arch/x86/include/asm/acpi.h +++ b/arch/x86/include/asm/acpi.h | |||
@@ -118,7 +118,7 @@ extern void acpi_restore_state_mem(void); | |||
118 | extern unsigned long acpi_wakeup_address; | 118 | extern unsigned long acpi_wakeup_address; |
119 | 119 | ||
120 | /* early initialization routine */ | 120 | /* early initialization routine */ |
121 | extern void acpi_reserve_bootmem(void); | 121 | extern void acpi_reserve_wakeup_memory(void); |
122 | 122 | ||
123 | /* | 123 | /* |
124 | * Check if the CPU can handle C2 and deeper | 124 | * Check if the CPU can handle C2 and deeper |
@@ -142,6 +142,32 @@ static inline unsigned int acpi_processor_cstate_check(unsigned int max_cstate) | |||
142 | return max_cstate; | 142 | return max_cstate; |
143 | } | 143 | } |
144 | 144 | ||
145 | static inline bool arch_has_acpi_pdc(void) | ||
146 | { | ||
147 | struct cpuinfo_x86 *c = &cpu_data(0); | ||
148 | return (c->x86_vendor == X86_VENDOR_INTEL || | ||
149 | c->x86_vendor == X86_VENDOR_CENTAUR); | ||
150 | } | ||
151 | |||
152 | static inline void arch_acpi_set_pdc_bits(u32 *buf) | ||
153 | { | ||
154 | struct cpuinfo_x86 *c = &cpu_data(0); | ||
155 | |||
156 | buf[2] |= ACPI_PDC_C_CAPABILITY_SMP; | ||
157 | |||
158 | if (cpu_has(c, X86_FEATURE_EST)) | ||
159 | buf[2] |= ACPI_PDC_EST_CAPABILITY_SWSMP; | ||
160 | |||
161 | if (cpu_has(c, X86_FEATURE_ACPI)) | ||
162 | buf[2] |= ACPI_PDC_T_FFH; | ||
163 | |||
164 | /* | ||
165 | * If mwait/monitor is unsupported, C2/C3_FFH will be disabled | ||
166 | */ | ||
167 | if (!cpu_has(c, X86_FEATURE_MWAIT)) | ||
168 | buf[2] &= ~(ACPI_PDC_C_C2C3_FFH); | ||
169 | } | ||
170 | |||
145 | #else /* !CONFIG_ACPI */ | 171 | #else /* !CONFIG_ACPI */ |
146 | 172 | ||
147 | #define acpi_lapic 0 | 173 | #define acpi_lapic 0 |
@@ -158,6 +184,7 @@ struct bootnode; | |||
158 | 184 | ||
159 | #ifdef CONFIG_ACPI_NUMA | 185 | #ifdef CONFIG_ACPI_NUMA |
160 | extern int acpi_numa; | 186 | extern int acpi_numa; |
187 | extern int acpi_get_nodes(struct bootnode *physnodes); | ||
161 | extern int acpi_scan_nodes(unsigned long start, unsigned long end); | 188 | extern int acpi_scan_nodes(unsigned long start, unsigned long end); |
162 | #define NR_NODE_MEMBLKS (MAX_NUMNODES*2) | 189 | #define NR_NODE_MEMBLKS (MAX_NUMNODES*2) |
163 | extern void acpi_fake_nodes(const struct bootnode *fake_nodes, | 190 | extern void acpi_fake_nodes(const struct bootnode *fake_nodes, |
diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h index e2077d343c33..b97f786a48d5 100644 --- a/arch/x86/include/asm/alternative-asm.h +++ b/arch/x86/include/asm/alternative-asm.h | |||
@@ -1,17 +1,13 @@ | |||
1 | #ifdef __ASSEMBLY__ | 1 | #ifdef __ASSEMBLY__ |
2 | 2 | ||
3 | #ifdef CONFIG_X86_32 | 3 | #include <asm/asm.h> |
4 | # define X86_ALIGN .long | ||
5 | #else | ||
6 | # define X86_ALIGN .quad | ||
7 | #endif | ||
8 | 4 | ||
9 | #ifdef CONFIG_SMP | 5 | #ifdef CONFIG_SMP |
10 | .macro LOCK_PREFIX | 6 | .macro LOCK_PREFIX |
11 | 1: lock | 7 | 1: lock |
12 | .section .smp_locks,"a" | 8 | .section .smp_locks,"a" |
13 | .align 4 | 9 | _ASM_ALIGN |
14 | X86_ALIGN 1b | 10 | _ASM_PTR 1b |
15 | .previous | 11 | .previous |
16 | .endm | 12 | .endm |
17 | #else | 13 | #else |
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h index c240efc74e00..b09ec55650b3 100644 --- a/arch/x86/include/asm/alternative.h +++ b/arch/x86/include/asm/alternative.h | |||
@@ -65,12 +65,17 @@ extern void alternatives_smp_module_add(struct module *mod, char *name, | |||
65 | void *text, void *text_end); | 65 | void *text, void *text_end); |
66 | extern void alternatives_smp_module_del(struct module *mod); | 66 | extern void alternatives_smp_module_del(struct module *mod); |
67 | extern void alternatives_smp_switch(int smp); | 67 | extern void alternatives_smp_switch(int smp); |
68 | extern int alternatives_text_reserved(void *start, void *end); | ||
68 | #else | 69 | #else |
69 | static inline void alternatives_smp_module_add(struct module *mod, char *name, | 70 | static inline void alternatives_smp_module_add(struct module *mod, char *name, |
70 | void *locks, void *locks_end, | 71 | void *locks, void *locks_end, |
71 | void *text, void *text_end) {} | 72 | void *text, void *text_end) {} |
72 | static inline void alternatives_smp_module_del(struct module *mod) {} | 73 | static inline void alternatives_smp_module_del(struct module *mod) {} |
73 | static inline void alternatives_smp_switch(int smp) {} | 74 | static inline void alternatives_smp_switch(int smp) {} |
75 | static inline int alternatives_text_reserved(void *start, void *end) | ||
76 | { | ||
77 | return 0; | ||
78 | } | ||
74 | #endif /* CONFIG_SMP */ | 79 | #endif /* CONFIG_SMP */ |
75 | 80 | ||
76 | /* alternative assembly primitive: */ | 81 | /* alternative assembly primitive: */ |
@@ -84,6 +89,7 @@ static inline void alternatives_smp_switch(int smp) {} | |||
84 | " .byte " __stringify(feature) "\n" /* feature bit */ \ | 89 | " .byte " __stringify(feature) "\n" /* feature bit */ \ |
85 | " .byte 662b-661b\n" /* sourcelen */ \ | 90 | " .byte 662b-661b\n" /* sourcelen */ \ |
86 | " .byte 664f-663f\n" /* replacementlen */ \ | 91 | " .byte 664f-663f\n" /* replacementlen */ \ |
92 | " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \ | ||
87 | ".previous\n" \ | 93 | ".previous\n" \ |
88 | ".section .altinstr_replacement, \"ax\"\n" \ | 94 | ".section .altinstr_replacement, \"ax\"\n" \ |
89 | "663:\n\t" newinstr "\n664:\n" /* replacement */ \ | 95 | "663:\n\t" newinstr "\n664:\n" /* replacement */ \ |
@@ -124,11 +130,16 @@ static inline void alternatives_smp_switch(int smp) {} | |||
124 | asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) \ | 130 | asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) \ |
125 | : output : "i" (0), ## input) | 131 | : output : "i" (0), ## input) |
126 | 132 | ||
133 | /* Like alternative_io, but for replacing a direct call with another one. */ | ||
134 | #define alternative_call(oldfunc, newfunc, feature, output, input...) \ | ||
135 | asm volatile (ALTERNATIVE("call %P[old]", "call %P[new]", feature) \ | ||
136 | : output : [old] "i" (oldfunc), [new] "i" (newfunc), ## input) | ||
137 | |||
127 | /* | 138 | /* |
128 | * use this macro(s) if you need more than one output parameter | 139 | * use this macro(s) if you need more than one output parameter |
129 | * in alternative_io | 140 | * in alternative_io |
130 | */ | 141 | */ |
131 | #define ASM_OUTPUT2(a, b) a, b | 142 | #define ASM_OUTPUT2(a...) a |
132 | 143 | ||
133 | struct paravirt_patch_site; | 144 | struct paravirt_patch_site; |
134 | #ifdef CONFIG_PARAVIRT | 145 | #ifdef CONFIG_PARAVIRT |
@@ -154,10 +165,12 @@ static inline void apply_paravirt(struct paravirt_patch_site *start, | |||
154 | * invalid instruction possible) or if the instructions are changed from a | 165 | * invalid instruction possible) or if the instructions are changed from a |
155 | * consistent state to another consistent state atomically. | 166 | * consistent state to another consistent state atomically. |
156 | * More care must be taken when modifying code in the SMP case because of | 167 | * More care must be taken when modifying code in the SMP case because of |
157 | * Intel's errata. | 168 | * Intel's errata. text_poke_smp() takes care that errata, but still |
169 | * doesn't support NMI/MCE handler code modifying. | ||
158 | * On the local CPU you need to be protected again NMI or MCE handlers seeing an | 170 | * On the local CPU you need to be protected again NMI or MCE handlers seeing an |
159 | * inconsistent instruction while you patch. | 171 | * inconsistent instruction while you patch. |
160 | */ | 172 | */ |
161 | extern void *text_poke(void *addr, const void *opcode, size_t len); | 173 | extern void *text_poke(void *addr, const void *opcode, size_t len); |
174 | extern void *text_poke_smp(void *addr, const void *opcode, size_t len); | ||
162 | 175 | ||
163 | #endif /* _ASM_X86_ALTERNATIVE_H */ | 176 | #endif /* _ASM_X86_ALTERNATIVE_H */ |
diff --git a/arch/x86/include/asm/amd_iommu.h b/arch/x86/include/asm/amd_iommu.h index 4b180897e6b5..5af2982133b5 100644 --- a/arch/x86/include/asm/amd_iommu.h +++ b/arch/x86/include/asm/amd_iommu.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. | 2 | * Copyright (C) 2007-2009 Advanced Micro Devices, Inc. |
3 | * Author: Joerg Roedel <joerg.roedel@amd.com> | 3 | * Author: Joerg Roedel <joerg.roedel@amd.com> |
4 | * Leo Duran <leo.duran@amd.com> | 4 | * Leo Duran <leo.duran@amd.com> |
5 | * | 5 | * |
@@ -23,19 +23,13 @@ | |||
23 | #include <linux/irqreturn.h> | 23 | #include <linux/irqreturn.h> |
24 | 24 | ||
25 | #ifdef CONFIG_AMD_IOMMU | 25 | #ifdef CONFIG_AMD_IOMMU |
26 | extern int amd_iommu_init(void); | 26 | |
27 | extern int amd_iommu_init_dma_ops(void); | ||
28 | extern int amd_iommu_init_passthrough(void); | ||
29 | extern void amd_iommu_detect(void); | 27 | extern void amd_iommu_detect(void); |
30 | extern irqreturn_t amd_iommu_int_handler(int irq, void *data); | 28 | |
31 | extern void amd_iommu_flush_all_domains(void); | ||
32 | extern void amd_iommu_flush_all_devices(void); | ||
33 | extern void amd_iommu_shutdown(void); | ||
34 | extern void amd_iommu_apply_erratum_63(u16 devid); | ||
35 | #else | 29 | #else |
36 | static inline int amd_iommu_init(void) { return -ENODEV; } | 30 | |
37 | static inline void amd_iommu_detect(void) { } | 31 | static inline void amd_iommu_detect(void) { } |
38 | static inline void amd_iommu_shutdown(void) { } | 32 | |
39 | #endif | 33 | #endif |
40 | 34 | ||
41 | #endif /* _ASM_X86_AMD_IOMMU_H */ | 35 | #endif /* _ASM_X86_AMD_IOMMU_H */ |
diff --git a/arch/x86/include/asm/amd_iommu_proto.h b/arch/x86/include/asm/amd_iommu_proto.h new file mode 100644 index 000000000000..d2544f1d705d --- /dev/null +++ b/arch/x86/include/asm/amd_iommu_proto.h | |||
@@ -0,0 +1,41 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2009 Advanced Micro Devices, Inc. | ||
3 | * Author: Joerg Roedel <joerg.roedel@amd.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of the GNU General Public License version 2 as published | ||
7 | * by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
17 | */ | ||
18 | |||
19 | #ifndef _ASM_X86_AMD_IOMMU_PROTO_H | ||
20 | #define _ASM_X86_AMD_IOMMU_PROTO_H | ||
21 | |||
22 | struct amd_iommu; | ||
23 | |||
24 | extern int amd_iommu_init_dma_ops(void); | ||
25 | extern int amd_iommu_init_passthrough(void); | ||
26 | extern irqreturn_t amd_iommu_int_handler(int irq, void *data); | ||
27 | extern void amd_iommu_flush_all_domains(void); | ||
28 | extern void amd_iommu_flush_all_devices(void); | ||
29 | extern void amd_iommu_apply_erratum_63(u16 devid); | ||
30 | extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu); | ||
31 | extern int amd_iommu_init_devices(void); | ||
32 | extern void amd_iommu_uninit_devices(void); | ||
33 | extern void amd_iommu_init_notifier(void); | ||
34 | extern void amd_iommu_init_api(void); | ||
35 | #ifndef CONFIG_AMD_IOMMU_STATS | ||
36 | |||
37 | static inline void amd_iommu_stats_init(void) { } | ||
38 | |||
39 | #endif /* !CONFIG_AMD_IOMMU_STATS */ | ||
40 | |||
41 | #endif /* _ASM_X86_AMD_IOMMU_PROTO_H */ | ||
diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h index 2a2cc7a78a81..86a0ff0aeac7 100644 --- a/arch/x86/include/asm/amd_iommu_types.h +++ b/arch/x86/include/asm/amd_iommu_types.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. | 2 | * Copyright (C) 2007-2009 Advanced Micro Devices, Inc. |
3 | * Author: Joerg Roedel <joerg.roedel@amd.com> | 3 | * Author: Joerg Roedel <joerg.roedel@amd.com> |
4 | * Leo Duran <leo.duran@amd.com> | 4 | * Leo Duran <leo.duran@amd.com> |
5 | * | 5 | * |
@@ -21,10 +21,16 @@ | |||
21 | #define _ASM_X86_AMD_IOMMU_TYPES_H | 21 | #define _ASM_X86_AMD_IOMMU_TYPES_H |
22 | 22 | ||
23 | #include <linux/types.h> | 23 | #include <linux/types.h> |
24 | #include <linux/mutex.h> | ||
24 | #include <linux/list.h> | 25 | #include <linux/list.h> |
25 | #include <linux/spinlock.h> | 26 | #include <linux/spinlock.h> |
26 | 27 | ||
27 | /* | 28 | /* |
29 | * Maximum number of IOMMUs supported | ||
30 | */ | ||
31 | #define MAX_IOMMUS 32 | ||
32 | |||
33 | /* | ||
28 | * some size calculation constants | 34 | * some size calculation constants |
29 | */ | 35 | */ |
30 | #define DEV_TABLE_ENTRY_SIZE 32 | 36 | #define DEV_TABLE_ENTRY_SIZE 32 |
@@ -135,6 +141,7 @@ | |||
135 | 141 | ||
136 | /* constants to configure the command buffer */ | 142 | /* constants to configure the command buffer */ |
137 | #define CMD_BUFFER_SIZE 8192 | 143 | #define CMD_BUFFER_SIZE 8192 |
144 | #define CMD_BUFFER_UNINITIALIZED 1 | ||
138 | #define CMD_BUFFER_ENTRIES 512 | 145 | #define CMD_BUFFER_ENTRIES 512 |
139 | #define MMIO_CMD_SIZE_SHIFT 56 | 146 | #define MMIO_CMD_SIZE_SHIFT 56 |
140 | #define MMIO_CMD_SIZE_512 (0x9ULL << MMIO_CMD_SIZE_SHIFT) | 147 | #define MMIO_CMD_SIZE_512 (0x9ULL << MMIO_CMD_SIZE_SHIFT) |
@@ -206,6 +213,9 @@ extern bool amd_iommu_dump; | |||
206 | printk(KERN_INFO "AMD-Vi: " format, ## arg); \ | 213 | printk(KERN_INFO "AMD-Vi: " format, ## arg); \ |
207 | } while(0); | 214 | } while(0); |
208 | 215 | ||
216 | /* global flag if IOMMUs cache non-present entries */ | ||
217 | extern bool amd_iommu_np_cache; | ||
218 | |||
209 | /* | 219 | /* |
210 | * Make iterating over all IOMMUs easier | 220 | * Make iterating over all IOMMUs easier |
211 | */ | 221 | */ |
@@ -226,14 +236,30 @@ extern bool amd_iommu_dump; | |||
226 | * independent of their use. | 236 | * independent of their use. |
227 | */ | 237 | */ |
228 | struct protection_domain { | 238 | struct protection_domain { |
239 | struct list_head list; /* for list of all protection domains */ | ||
240 | struct list_head dev_list; /* List of all devices in this domain */ | ||
229 | spinlock_t lock; /* mostly used to lock the page table*/ | 241 | spinlock_t lock; /* mostly used to lock the page table*/ |
242 | struct mutex api_lock; /* protect page tables in the iommu-api path */ | ||
230 | u16 id; /* the domain id written to the device table */ | 243 | u16 id; /* the domain id written to the device table */ |
231 | int mode; /* paging mode (0-6 levels) */ | 244 | int mode; /* paging mode (0-6 levels) */ |
232 | u64 *pt_root; /* page table root pointer */ | 245 | u64 *pt_root; /* page table root pointer */ |
233 | unsigned long flags; /* flags to find out type of domain */ | 246 | unsigned long flags; /* flags to find out type of domain */ |
234 | bool updated; /* complete domain flush required */ | 247 | bool updated; /* complete domain flush required */ |
235 | unsigned dev_cnt; /* devices assigned to this domain */ | 248 | unsigned dev_cnt; /* devices assigned to this domain */ |
249 | unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */ | ||
236 | void *priv; /* private data */ | 250 | void *priv; /* private data */ |
251 | |||
252 | }; | ||
253 | |||
254 | /* | ||
255 | * This struct contains device specific data for the IOMMU | ||
256 | */ | ||
257 | struct iommu_dev_data { | ||
258 | struct list_head list; /* For domain->dev_list */ | ||
259 | struct device *dev; /* Device this data belong to */ | ||
260 | struct device *alias; /* The Alias Device */ | ||
261 | struct protection_domain *domain; /* Domain the device is bound to */ | ||
262 | atomic_t bind; /* Domain attach reverent count */ | ||
237 | }; | 263 | }; |
238 | 264 | ||
239 | /* | 265 | /* |
@@ -291,6 +317,9 @@ struct dma_ops_domain { | |||
291 | struct amd_iommu { | 317 | struct amd_iommu { |
292 | struct list_head list; | 318 | struct list_head list; |
293 | 319 | ||
320 | /* Index within the IOMMU array */ | ||
321 | int index; | ||
322 | |||
294 | /* locks the accesses to the hardware */ | 323 | /* locks the accesses to the hardware */ |
295 | spinlock_t lock; | 324 | spinlock_t lock; |
296 | 325 | ||
@@ -357,6 +386,21 @@ struct amd_iommu { | |||
357 | extern struct list_head amd_iommu_list; | 386 | extern struct list_head amd_iommu_list; |
358 | 387 | ||
359 | /* | 388 | /* |
389 | * Array with pointers to each IOMMU struct | ||
390 | * The indices are referenced in the protection domains | ||
391 | */ | ||
392 | extern struct amd_iommu *amd_iommus[MAX_IOMMUS]; | ||
393 | |||
394 | /* Number of IOMMUs present in the system */ | ||
395 | extern int amd_iommus_present; | ||
396 | |||
397 | /* | ||
398 | * Declarations for the global list of all protection domains | ||
399 | */ | ||
400 | extern spinlock_t amd_iommu_pd_lock; | ||
401 | extern struct list_head amd_iommu_pd_list; | ||
402 | |||
403 | /* | ||
360 | * Structure defining one entry in the device table | 404 | * Structure defining one entry in the device table |
361 | */ | 405 | */ |
362 | struct dev_table_entry { | 406 | struct dev_table_entry { |
@@ -416,15 +460,9 @@ extern unsigned amd_iommu_aperture_order; | |||
416 | /* largest PCI device id we expect translation requests for */ | 460 | /* largest PCI device id we expect translation requests for */ |
417 | extern u16 amd_iommu_last_bdf; | 461 | extern u16 amd_iommu_last_bdf; |
418 | 462 | ||
419 | /* data structures for protection domain handling */ | ||
420 | extern struct protection_domain **amd_iommu_pd_table; | ||
421 | |||
422 | /* allocation bitmap for domain ids */ | 463 | /* allocation bitmap for domain ids */ |
423 | extern unsigned long *amd_iommu_pd_alloc_bitmap; | 464 | extern unsigned long *amd_iommu_pd_alloc_bitmap; |
424 | 465 | ||
425 | /* will be 1 if device isolation is enabled */ | ||
426 | extern bool amd_iommu_isolate; | ||
427 | |||
428 | /* | 466 | /* |
429 | * If true, the addresses will be flushed on unmap time, not when | 467 | * If true, the addresses will be flushed on unmap time, not when |
430 | * they are reused | 468 | * they are reused |
@@ -462,11 +500,6 @@ struct __iommu_counter { | |||
462 | #define ADD_STATS_COUNTER(name, x) | 500 | #define ADD_STATS_COUNTER(name, x) |
463 | #define SUB_STATS_COUNTER(name, x) | 501 | #define SUB_STATS_COUNTER(name, x) |
464 | 502 | ||
465 | static inline void amd_iommu_stats_init(void) { } | ||
466 | |||
467 | #endif /* CONFIG_AMD_IOMMU_STATS */ | 503 | #endif /* CONFIG_AMD_IOMMU_STATS */ |
468 | 504 | ||
469 | /* some function prototypes */ | ||
470 | extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu); | ||
471 | |||
472 | #endif /* _ASM_X86_AMD_IOMMU_TYPES_H */ | 505 | #endif /* _ASM_X86_AMD_IOMMU_TYPES_H */ |
diff --git a/arch/x86/include/asm/apb_timer.h b/arch/x86/include/asm/apb_timer.h new file mode 100644 index 000000000000..c74a2eebe570 --- /dev/null +++ b/arch/x86/include/asm/apb_timer.h | |||
@@ -0,0 +1,70 @@ | |||
1 | /* | ||
2 | * apb_timer.h: Driver for Langwell APB timer based on Synopsis DesignWare | ||
3 | * | ||
4 | * (C) Copyright 2009 Intel Corporation | ||
5 | * Author: Jacob Pan (jacob.jun.pan@intel.com) | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * as published by the Free Software Foundation; version 2 | ||
10 | * of the License. | ||
11 | * | ||
12 | * Note: | ||
13 | */ | ||
14 | |||
15 | #ifndef ASM_X86_APBT_H | ||
16 | #define ASM_X86_APBT_H | ||
17 | #include <linux/sfi.h> | ||
18 | |||
19 | #ifdef CONFIG_APB_TIMER | ||
20 | |||
21 | /* Langwell DW APB timer registers */ | ||
22 | #define APBTMR_N_LOAD_COUNT 0x00 | ||
23 | #define APBTMR_N_CURRENT_VALUE 0x04 | ||
24 | #define APBTMR_N_CONTROL 0x08 | ||
25 | #define APBTMR_N_EOI 0x0c | ||
26 | #define APBTMR_N_INT_STATUS 0x10 | ||
27 | |||
28 | #define APBTMRS_INT_STATUS 0xa0 | ||
29 | #define APBTMRS_EOI 0xa4 | ||
30 | #define APBTMRS_RAW_INT_STATUS 0xa8 | ||
31 | #define APBTMRS_COMP_VERSION 0xac | ||
32 | #define APBTMRS_REG_SIZE 0x14 | ||
33 | |||
34 | /* register bits */ | ||
35 | #define APBTMR_CONTROL_ENABLE (1<<0) | ||
36 | #define APBTMR_CONTROL_MODE_PERIODIC (1<<1) /*1: periodic 0:free running */ | ||
37 | #define APBTMR_CONTROL_INT (1<<2) | ||
38 | |||
39 | /* default memory mapped register base */ | ||
40 | #define LNW_SCU_ADDR 0xFF100000 | ||
41 | #define LNW_EXT_TIMER_OFFSET 0x1B800 | ||
42 | #define APBT_DEFAULT_BASE (LNW_SCU_ADDR+LNW_EXT_TIMER_OFFSET) | ||
43 | #define LNW_EXT_TIMER_PGOFFSET 0x800 | ||
44 | |||
45 | /* APBT clock speed range from PCLK to fabric base, 25-100MHz */ | ||
46 | #define APBT_MAX_FREQ 50 | ||
47 | #define APBT_MIN_FREQ 1 | ||
48 | #define APBT_MMAP_SIZE 1024 | ||
49 | |||
50 | #define APBT_DEV_USED 1 | ||
51 | |||
52 | extern void apbt_time_init(void); | ||
53 | extern struct clock_event_device *global_clock_event; | ||
54 | extern unsigned long apbt_quick_calibrate(void); | ||
55 | extern int arch_setup_apbt_irqs(int irq, int trigger, int mask, int cpu); | ||
56 | extern void apbt_setup_secondary_clock(void); | ||
57 | extern unsigned int boot_cpu_id; | ||
58 | extern int disable_apbt_percpu; | ||
59 | |||
60 | extern struct sfi_timer_table_entry *sfi_get_mtmr(int hint); | ||
61 | extern void sfi_free_mtmr(struct sfi_timer_table_entry *mtmr); | ||
62 | extern int sfi_mtimer_num; | ||
63 | |||
64 | #else /* CONFIG_APB_TIMER */ | ||
65 | |||
66 | static inline unsigned long apbt_quick_calibrate(void) {return 0; } | ||
67 | static inline void apbt_time_init(void) {return 0; } | ||
68 | |||
69 | #endif | ||
70 | #endif /* ASM_X86_APBT_H */ | ||
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index 474d80d3e6cc..b4ac2cdcb64f 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h | |||
@@ -297,20 +297,20 @@ struct apic { | |||
297 | int disable_esr; | 297 | int disable_esr; |
298 | 298 | ||
299 | int dest_logical; | 299 | int dest_logical; |
300 | unsigned long (*check_apicid_used)(physid_mask_t bitmap, int apicid); | 300 | unsigned long (*check_apicid_used)(physid_mask_t *map, int apicid); |
301 | unsigned long (*check_apicid_present)(int apicid); | 301 | unsigned long (*check_apicid_present)(int apicid); |
302 | 302 | ||
303 | void (*vector_allocation_domain)(int cpu, struct cpumask *retmask); | 303 | void (*vector_allocation_domain)(int cpu, struct cpumask *retmask); |
304 | void (*init_apic_ldr)(void); | 304 | void (*init_apic_ldr)(void); |
305 | 305 | ||
306 | physid_mask_t (*ioapic_phys_id_map)(physid_mask_t map); | 306 | void (*ioapic_phys_id_map)(physid_mask_t *phys_map, physid_mask_t *retmap); |
307 | 307 | ||
308 | void (*setup_apic_routing)(void); | 308 | void (*setup_apic_routing)(void); |
309 | int (*multi_timer_check)(int apic, int irq); | 309 | int (*multi_timer_check)(int apic, int irq); |
310 | int (*apicid_to_node)(int logical_apicid); | 310 | int (*apicid_to_node)(int logical_apicid); |
311 | int (*cpu_to_logical_apicid)(int cpu); | 311 | int (*cpu_to_logical_apicid)(int cpu); |
312 | int (*cpu_present_to_apicid)(int mps_cpu); | 312 | int (*cpu_present_to_apicid)(int mps_cpu); |
313 | physid_mask_t (*apicid_to_cpu_present)(int phys_apicid); | 313 | void (*apicid_to_cpu_present)(int phys_apicid, physid_mask_t *retmap); |
314 | void (*setup_portio_remap)(void); | 314 | void (*setup_portio_remap)(void); |
315 | int (*check_phys_apicid_present)(int phys_apicid); | 315 | int (*check_phys_apicid_present)(int phys_apicid); |
316 | void (*enable_apic_mode)(void); | 316 | void (*enable_apic_mode)(void); |
@@ -488,6 +488,8 @@ static inline unsigned int read_apic_id(void) | |||
488 | 488 | ||
489 | extern void default_setup_apic_routing(void); | 489 | extern void default_setup_apic_routing(void); |
490 | 490 | ||
491 | extern struct apic apic_noop; | ||
492 | |||
491 | #ifdef CONFIG_X86_32 | 493 | #ifdef CONFIG_X86_32 |
492 | 494 | ||
493 | extern struct apic apic_default; | 495 | extern struct apic apic_default; |
@@ -532,9 +534,9 @@ default_cpu_mask_to_apicid_and(const struct cpumask *cpumask, | |||
532 | return (unsigned int)(mask1 & mask2 & mask3); | 534 | return (unsigned int)(mask1 & mask2 & mask3); |
533 | } | 535 | } |
534 | 536 | ||
535 | static inline unsigned long default_check_apicid_used(physid_mask_t bitmap, int apicid) | 537 | static inline unsigned long default_check_apicid_used(physid_mask_t *map, int apicid) |
536 | { | 538 | { |
537 | return physid_isset(apicid, bitmap); | 539 | return physid_isset(apicid, *map); |
538 | } | 540 | } |
539 | 541 | ||
540 | static inline unsigned long default_check_apicid_present(int bit) | 542 | static inline unsigned long default_check_apicid_present(int bit) |
@@ -542,9 +544,9 @@ static inline unsigned long default_check_apicid_present(int bit) | |||
542 | return physid_isset(bit, phys_cpu_present_map); | 544 | return physid_isset(bit, phys_cpu_present_map); |
543 | } | 545 | } |
544 | 546 | ||
545 | static inline physid_mask_t default_ioapic_phys_id_map(physid_mask_t phys_map) | 547 | static inline void default_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap) |
546 | { | 548 | { |
547 | return phys_map; | 549 | *retmap = *phys_map; |
548 | } | 550 | } |
549 | 551 | ||
550 | /* Mapping from cpu number to logical apicid */ | 552 | /* Mapping from cpu number to logical apicid */ |
@@ -583,11 +585,6 @@ extern int default_cpu_present_to_apicid(int mps_cpu); | |||
583 | extern int default_check_phys_apicid_present(int phys_apicid); | 585 | extern int default_check_phys_apicid_present(int phys_apicid); |
584 | #endif | 586 | #endif |
585 | 587 | ||
586 | static inline physid_mask_t default_apicid_to_cpu_present(int phys_apicid) | ||
587 | { | ||
588 | return physid_mask_of_physid(phys_apicid); | ||
589 | } | ||
590 | |||
591 | #endif /* CONFIG_X86_LOCAL_APIC */ | 588 | #endif /* CONFIG_X86_LOCAL_APIC */ |
592 | 589 | ||
593 | #ifdef CONFIG_X86_32 | 590 | #ifdef CONFIG_X86_32 |
diff --git a/arch/x86/include/asm/apicdef.h b/arch/x86/include/asm/apicdef.h index 3b62da926de9..7fe3b3060f08 100644 --- a/arch/x86/include/asm/apicdef.h +++ b/arch/x86/include/asm/apicdef.h | |||
@@ -11,6 +11,12 @@ | |||
11 | #define IO_APIC_DEFAULT_PHYS_BASE 0xfec00000 | 11 | #define IO_APIC_DEFAULT_PHYS_BASE 0xfec00000 |
12 | #define APIC_DEFAULT_PHYS_BASE 0xfee00000 | 12 | #define APIC_DEFAULT_PHYS_BASE 0xfee00000 |
13 | 13 | ||
14 | /* | ||
15 | * This is the IO-APIC register space as specified | ||
16 | * by Intel docs: | ||
17 | */ | ||
18 | #define IO_APIC_SLOT_SIZE 1024 | ||
19 | |||
14 | #define APIC_ID 0x20 | 20 | #define APIC_ID 0x20 |
15 | 21 | ||
16 | #define APIC_LVR 0x30 | 22 | #define APIC_LVR 0x30 |
diff --git a/arch/x86/include/asm/apicnum.h b/arch/x86/include/asm/apicnum.h deleted file mode 100644 index 82f613c607ce..000000000000 --- a/arch/x86/include/asm/apicnum.h +++ /dev/null | |||
@@ -1,12 +0,0 @@ | |||
1 | #ifndef _ASM_X86_APICNUM_H | ||
2 | #define _ASM_X86_APICNUM_H | ||
3 | |||
4 | /* define MAX_IO_APICS */ | ||
5 | #ifdef CONFIG_X86_32 | ||
6 | # define MAX_IO_APICS 64 | ||
7 | #else | ||
8 | # define MAX_IO_APICS 128 | ||
9 | # define MAX_LOCAL_APIC 32768 | ||
10 | #endif | ||
11 | |||
12 | #endif /* _ASM_X86_APICNUM_H */ | ||
diff --git a/arch/x86/include/asm/asm-offsets.h b/arch/x86/include/asm/asm-offsets.h new file mode 100644 index 000000000000..d370ee36a182 --- /dev/null +++ b/arch/x86/include/asm/asm-offsets.h | |||
@@ -0,0 +1 @@ | |||
#include <generated/asm-offsets.h> | |||
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h index 4e1b8873c474..8f8217b9bdac 100644 --- a/arch/x86/include/asm/atomic.h +++ b/arch/x86/include/asm/atomic.h | |||
@@ -1,5 +1,300 @@ | |||
1 | #ifndef _ASM_X86_ATOMIC_H | ||
2 | #define _ASM_X86_ATOMIC_H | ||
3 | |||
4 | #include <linux/compiler.h> | ||
5 | #include <linux/types.h> | ||
6 | #include <asm/processor.h> | ||
7 | #include <asm/alternative.h> | ||
8 | #include <asm/cmpxchg.h> | ||
9 | |||
10 | /* | ||
11 | * Atomic operations that C can't guarantee us. Useful for | ||
12 | * resource counting etc.. | ||
13 | */ | ||
14 | |||
15 | #define ATOMIC_INIT(i) { (i) } | ||
16 | |||
17 | /** | ||
18 | * atomic_read - read atomic variable | ||
19 | * @v: pointer of type atomic_t | ||
20 | * | ||
21 | * Atomically reads the value of @v. | ||
22 | */ | ||
23 | static inline int atomic_read(const atomic_t *v) | ||
24 | { | ||
25 | return v->counter; | ||
26 | } | ||
27 | |||
28 | /** | ||
29 | * atomic_set - set atomic variable | ||
30 | * @v: pointer of type atomic_t | ||
31 | * @i: required value | ||
32 | * | ||
33 | * Atomically sets the value of @v to @i. | ||
34 | */ | ||
35 | static inline void atomic_set(atomic_t *v, int i) | ||
36 | { | ||
37 | v->counter = i; | ||
38 | } | ||
39 | |||
40 | /** | ||
41 | * atomic_add - add integer to atomic variable | ||
42 | * @i: integer value to add | ||
43 | * @v: pointer of type atomic_t | ||
44 | * | ||
45 | * Atomically adds @i to @v. | ||
46 | */ | ||
47 | static inline void atomic_add(int i, atomic_t *v) | ||
48 | { | ||
49 | asm volatile(LOCK_PREFIX "addl %1,%0" | ||
50 | : "+m" (v->counter) | ||
51 | : "ir" (i)); | ||
52 | } | ||
53 | |||
54 | /** | ||
55 | * atomic_sub - subtract integer from atomic variable | ||
56 | * @i: integer value to subtract | ||
57 | * @v: pointer of type atomic_t | ||
58 | * | ||
59 | * Atomically subtracts @i from @v. | ||
60 | */ | ||
61 | static inline void atomic_sub(int i, atomic_t *v) | ||
62 | { | ||
63 | asm volatile(LOCK_PREFIX "subl %1,%0" | ||
64 | : "+m" (v->counter) | ||
65 | : "ir" (i)); | ||
66 | } | ||
67 | |||
68 | /** | ||
69 | * atomic_sub_and_test - subtract value from variable and test result | ||
70 | * @i: integer value to subtract | ||
71 | * @v: pointer of type atomic_t | ||
72 | * | ||
73 | * Atomically subtracts @i from @v and returns | ||
74 | * true if the result is zero, or false for all | ||
75 | * other cases. | ||
76 | */ | ||
77 | static inline int atomic_sub_and_test(int i, atomic_t *v) | ||
78 | { | ||
79 | unsigned char c; | ||
80 | |||
81 | asm volatile(LOCK_PREFIX "subl %2,%0; sete %1" | ||
82 | : "+m" (v->counter), "=qm" (c) | ||
83 | : "ir" (i) : "memory"); | ||
84 | return c; | ||
85 | } | ||
86 | |||
87 | /** | ||
88 | * atomic_inc - increment atomic variable | ||
89 | * @v: pointer of type atomic_t | ||
90 | * | ||
91 | * Atomically increments @v by 1. | ||
92 | */ | ||
93 | static inline void atomic_inc(atomic_t *v) | ||
94 | { | ||
95 | asm volatile(LOCK_PREFIX "incl %0" | ||
96 | : "+m" (v->counter)); | ||
97 | } | ||
98 | |||
99 | /** | ||
100 | * atomic_dec - decrement atomic variable | ||
101 | * @v: pointer of type atomic_t | ||
102 | * | ||
103 | * Atomically decrements @v by 1. | ||
104 | */ | ||
105 | static inline void atomic_dec(atomic_t *v) | ||
106 | { | ||
107 | asm volatile(LOCK_PREFIX "decl %0" | ||
108 | : "+m" (v->counter)); | ||
109 | } | ||
110 | |||
111 | /** | ||
112 | * atomic_dec_and_test - decrement and test | ||
113 | * @v: pointer of type atomic_t | ||
114 | * | ||
115 | * Atomically decrements @v by 1 and | ||
116 | * returns true if the result is 0, or false for all other | ||
117 | * cases. | ||
118 | */ | ||
119 | static inline int atomic_dec_and_test(atomic_t *v) | ||
120 | { | ||
121 | unsigned char c; | ||
122 | |||
123 | asm volatile(LOCK_PREFIX "decl %0; sete %1" | ||
124 | : "+m" (v->counter), "=qm" (c) | ||
125 | : : "memory"); | ||
126 | return c != 0; | ||
127 | } | ||
128 | |||
129 | /** | ||
130 | * atomic_inc_and_test - increment and test | ||
131 | * @v: pointer of type atomic_t | ||
132 | * | ||
133 | * Atomically increments @v by 1 | ||
134 | * and returns true if the result is zero, or false for all | ||
135 | * other cases. | ||
136 | */ | ||
137 | static inline int atomic_inc_and_test(atomic_t *v) | ||
138 | { | ||
139 | unsigned char c; | ||
140 | |||
141 | asm volatile(LOCK_PREFIX "incl %0; sete %1" | ||
142 | : "+m" (v->counter), "=qm" (c) | ||
143 | : : "memory"); | ||
144 | return c != 0; | ||
145 | } | ||
146 | |||
147 | /** | ||
148 | * atomic_add_negative - add and test if negative | ||
149 | * @i: integer value to add | ||
150 | * @v: pointer of type atomic_t | ||
151 | * | ||
152 | * Atomically adds @i to @v and returns true | ||
153 | * if the result is negative, or false when | ||
154 | * result is greater than or equal to zero. | ||
155 | */ | ||
156 | static inline int atomic_add_negative(int i, atomic_t *v) | ||
157 | { | ||
158 | unsigned char c; | ||
159 | |||
160 | asm volatile(LOCK_PREFIX "addl %2,%0; sets %1" | ||
161 | : "+m" (v->counter), "=qm" (c) | ||
162 | : "ir" (i) : "memory"); | ||
163 | return c; | ||
164 | } | ||
165 | |||
166 | /** | ||
167 | * atomic_add_return - add integer and return | ||
168 | * @i: integer value to add | ||
169 | * @v: pointer of type atomic_t | ||
170 | * | ||
171 | * Atomically adds @i to @v and returns @i + @v | ||
172 | */ | ||
173 | static inline int atomic_add_return(int i, atomic_t *v) | ||
174 | { | ||
175 | int __i; | ||
176 | #ifdef CONFIG_M386 | ||
177 | unsigned long flags; | ||
178 | if (unlikely(boot_cpu_data.x86 <= 3)) | ||
179 | goto no_xadd; | ||
180 | #endif | ||
181 | /* Modern 486+ processor */ | ||
182 | __i = i; | ||
183 | asm volatile(LOCK_PREFIX "xaddl %0, %1" | ||
184 | : "+r" (i), "+m" (v->counter) | ||
185 | : : "memory"); | ||
186 | return i + __i; | ||
187 | |||
188 | #ifdef CONFIG_M386 | ||
189 | no_xadd: /* Legacy 386 processor */ | ||
190 | raw_local_irq_save(flags); | ||
191 | __i = atomic_read(v); | ||
192 | atomic_set(v, i + __i); | ||
193 | raw_local_irq_restore(flags); | ||
194 | return i + __i; | ||
195 | #endif | ||
196 | } | ||
197 | |||
198 | /** | ||
199 | * atomic_sub_return - subtract integer and return | ||
200 | * @v: pointer of type atomic_t | ||
201 | * @i: integer value to subtract | ||
202 | * | ||
203 | * Atomically subtracts @i from @v and returns @v - @i | ||
204 | */ | ||
205 | static inline int atomic_sub_return(int i, atomic_t *v) | ||
206 | { | ||
207 | return atomic_add_return(-i, v); | ||
208 | } | ||
209 | |||
210 | #define atomic_inc_return(v) (atomic_add_return(1, v)) | ||
211 | #define atomic_dec_return(v) (atomic_sub_return(1, v)) | ||
212 | |||
213 | static inline int atomic_cmpxchg(atomic_t *v, int old, int new) | ||
214 | { | ||
215 | return cmpxchg(&v->counter, old, new); | ||
216 | } | ||
217 | |||
218 | static inline int atomic_xchg(atomic_t *v, int new) | ||
219 | { | ||
220 | return xchg(&v->counter, new); | ||
221 | } | ||
222 | |||
223 | /** | ||
224 | * atomic_add_unless - add unless the number is already a given value | ||
225 | * @v: pointer of type atomic_t | ||
226 | * @a: the amount to add to v... | ||
227 | * @u: ...unless v is equal to u. | ||
228 | * | ||
229 | * Atomically adds @a to @v, so long as @v was not already @u. | ||
230 | * Returns non-zero if @v was not @u, and zero otherwise. | ||
231 | */ | ||
232 | static inline int atomic_add_unless(atomic_t *v, int a, int u) | ||
233 | { | ||
234 | int c, old; | ||
235 | c = atomic_read(v); | ||
236 | for (;;) { | ||
237 | if (unlikely(c == (u))) | ||
238 | break; | ||
239 | old = atomic_cmpxchg((v), c, c + (a)); | ||
240 | if (likely(old == c)) | ||
241 | break; | ||
242 | c = old; | ||
243 | } | ||
244 | return c != (u); | ||
245 | } | ||
246 | |||
247 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) | ||
248 | |||
249 | /** | ||
250 | * atomic_inc_short - increment of a short integer | ||
251 | * @v: pointer to type int | ||
252 | * | ||
253 | * Atomically adds 1 to @v | ||
254 | * Returns the new value of @u | ||
255 | */ | ||
256 | static inline short int atomic_inc_short(short int *v) | ||
257 | { | ||
258 | asm(LOCK_PREFIX "addw $1, %0" : "+m" (*v)); | ||
259 | return *v; | ||
260 | } | ||
261 | |||
262 | #ifdef CONFIG_X86_64 | ||
263 | /** | ||
264 | * atomic_or_long - OR of two long integers | ||
265 | * @v1: pointer to type unsigned long | ||
266 | * @v2: pointer to type unsigned long | ||
267 | * | ||
268 | * Atomically ORs @v1 and @v2 | ||
269 | * Returns the result of the OR | ||
270 | */ | ||
271 | static inline void atomic_or_long(unsigned long *v1, unsigned long v2) | ||
272 | { | ||
273 | asm(LOCK_PREFIX "orq %1, %0" : "+m" (*v1) : "r" (v2)); | ||
274 | } | ||
275 | #endif | ||
276 | |||
277 | /* These are x86-specific, used by some header files */ | ||
278 | #define atomic_clear_mask(mask, addr) \ | ||
279 | asm volatile(LOCK_PREFIX "andl %0,%1" \ | ||
280 | : : "r" (~(mask)), "m" (*(addr)) : "memory") | ||
281 | |||
282 | #define atomic_set_mask(mask, addr) \ | ||
283 | asm volatile(LOCK_PREFIX "orl %0,%1" \ | ||
284 | : : "r" ((unsigned)(mask)), "m" (*(addr)) \ | ||
285 | : "memory") | ||
286 | |||
287 | /* Atomic operations are already serializing on x86 */ | ||
288 | #define smp_mb__before_atomic_dec() barrier() | ||
289 | #define smp_mb__after_atomic_dec() barrier() | ||
290 | #define smp_mb__before_atomic_inc() barrier() | ||
291 | #define smp_mb__after_atomic_inc() barrier() | ||
292 | |||
1 | #ifdef CONFIG_X86_32 | 293 | #ifdef CONFIG_X86_32 |
2 | # include "atomic_32.h" | 294 | # include "atomic64_32.h" |
3 | #else | 295 | #else |
4 | # include "atomic_64.h" | 296 | # include "atomic64_64.h" |
5 | #endif | 297 | #endif |
298 | |||
299 | #include <asm-generic/atomic-long.h> | ||
300 | #endif /* _ASM_X86_ATOMIC_H */ | ||
diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h new file mode 100644 index 000000000000..03027bf28de5 --- /dev/null +++ b/arch/x86/include/asm/atomic64_32.h | |||
@@ -0,0 +1,160 @@ | |||
1 | #ifndef _ASM_X86_ATOMIC64_32_H | ||
2 | #define _ASM_X86_ATOMIC64_32_H | ||
3 | |||
4 | #include <linux/compiler.h> | ||
5 | #include <linux/types.h> | ||
6 | #include <asm/processor.h> | ||
7 | //#include <asm/cmpxchg.h> | ||
8 | |||
9 | /* An 64bit atomic type */ | ||
10 | |||
11 | typedef struct { | ||
12 | u64 __aligned(8) counter; | ||
13 | } atomic64_t; | ||
14 | |||
15 | #define ATOMIC64_INIT(val) { (val) } | ||
16 | |||
17 | extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val); | ||
18 | |||
19 | /** | ||
20 | * atomic64_xchg - xchg atomic64 variable | ||
21 | * @ptr: pointer to type atomic64_t | ||
22 | * @new_val: value to assign | ||
23 | * | ||
24 | * Atomically xchgs the value of @ptr to @new_val and returns | ||
25 | * the old value. | ||
26 | */ | ||
27 | extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val); | ||
28 | |||
29 | /** | ||
30 | * atomic64_set - set atomic64 variable | ||
31 | * @ptr: pointer to type atomic64_t | ||
32 | * @new_val: value to assign | ||
33 | * | ||
34 | * Atomically sets the value of @ptr to @new_val. | ||
35 | */ | ||
36 | extern void atomic64_set(atomic64_t *ptr, u64 new_val); | ||
37 | |||
38 | /** | ||
39 | * atomic64_read - read atomic64 variable | ||
40 | * @ptr: pointer to type atomic64_t | ||
41 | * | ||
42 | * Atomically reads the value of @ptr and returns it. | ||
43 | */ | ||
44 | static inline u64 atomic64_read(atomic64_t *ptr) | ||
45 | { | ||
46 | u64 res; | ||
47 | |||
48 | /* | ||
49 | * Note, we inline this atomic64_t primitive because | ||
50 | * it only clobbers EAX/EDX and leaves the others | ||
51 | * untouched. We also (somewhat subtly) rely on the | ||
52 | * fact that cmpxchg8b returns the current 64-bit value | ||
53 | * of the memory location we are touching: | ||
54 | */ | ||
55 | asm volatile( | ||
56 | "mov %%ebx, %%eax\n\t" | ||
57 | "mov %%ecx, %%edx\n\t" | ||
58 | LOCK_PREFIX "cmpxchg8b %1\n" | ||
59 | : "=&A" (res) | ||
60 | : "m" (*ptr) | ||
61 | ); | ||
62 | |||
63 | return res; | ||
64 | } | ||
65 | |||
66 | extern u64 atomic64_read(atomic64_t *ptr); | ||
67 | |||
68 | /** | ||
69 | * atomic64_add_return - add and return | ||
70 | * @delta: integer value to add | ||
71 | * @ptr: pointer to type atomic64_t | ||
72 | * | ||
73 | * Atomically adds @delta to @ptr and returns @delta + *@ptr | ||
74 | */ | ||
75 | extern u64 atomic64_add_return(u64 delta, atomic64_t *ptr); | ||
76 | |||
77 | /* | ||
78 | * Other variants with different arithmetic operators: | ||
79 | */ | ||
80 | extern u64 atomic64_sub_return(u64 delta, atomic64_t *ptr); | ||
81 | extern u64 atomic64_inc_return(atomic64_t *ptr); | ||
82 | extern u64 atomic64_dec_return(atomic64_t *ptr); | ||
83 | |||
84 | /** | ||
85 | * atomic64_add - add integer to atomic64 variable | ||
86 | * @delta: integer value to add | ||
87 | * @ptr: pointer to type atomic64_t | ||
88 | * | ||
89 | * Atomically adds @delta to @ptr. | ||
90 | */ | ||
91 | extern void atomic64_add(u64 delta, atomic64_t *ptr); | ||
92 | |||
93 | /** | ||
94 | * atomic64_sub - subtract the atomic64 variable | ||
95 | * @delta: integer value to subtract | ||
96 | * @ptr: pointer to type atomic64_t | ||
97 | * | ||
98 | * Atomically subtracts @delta from @ptr. | ||
99 | */ | ||
100 | extern void atomic64_sub(u64 delta, atomic64_t *ptr); | ||
101 | |||
102 | /** | ||
103 | * atomic64_sub_and_test - subtract value from variable and test result | ||
104 | * @delta: integer value to subtract | ||
105 | * @ptr: pointer to type atomic64_t | ||
106 | * | ||
107 | * Atomically subtracts @delta from @ptr and returns | ||
108 | * true if the result is zero, or false for all | ||
109 | * other cases. | ||
110 | */ | ||
111 | extern int atomic64_sub_and_test(u64 delta, atomic64_t *ptr); | ||
112 | |||
113 | /** | ||
114 | * atomic64_inc - increment atomic64 variable | ||
115 | * @ptr: pointer to type atomic64_t | ||
116 | * | ||
117 | * Atomically increments @ptr by 1. | ||
118 | */ | ||
119 | extern void atomic64_inc(atomic64_t *ptr); | ||
120 | |||
121 | /** | ||
122 | * atomic64_dec - decrement atomic64 variable | ||
123 | * @ptr: pointer to type atomic64_t | ||
124 | * | ||
125 | * Atomically decrements @ptr by 1. | ||
126 | */ | ||
127 | extern void atomic64_dec(atomic64_t *ptr); | ||
128 | |||
129 | /** | ||
130 | * atomic64_dec_and_test - decrement and test | ||
131 | * @ptr: pointer to type atomic64_t | ||
132 | * | ||
133 | * Atomically decrements @ptr by 1 and | ||
134 | * returns true if the result is 0, or false for all other | ||
135 | * cases. | ||
136 | */ | ||
137 | extern int atomic64_dec_and_test(atomic64_t *ptr); | ||
138 | |||
139 | /** | ||
140 | * atomic64_inc_and_test - increment and test | ||
141 | * @ptr: pointer to type atomic64_t | ||
142 | * | ||
143 | * Atomically increments @ptr by 1 | ||
144 | * and returns true if the result is zero, or false for all | ||
145 | * other cases. | ||
146 | */ | ||
147 | extern int atomic64_inc_and_test(atomic64_t *ptr); | ||
148 | |||
149 | /** | ||
150 | * atomic64_add_negative - add and test if negative | ||
151 | * @delta: integer value to add | ||
152 | * @ptr: pointer to type atomic64_t | ||
153 | * | ||
154 | * Atomically adds @delta to @ptr and returns true | ||
155 | * if the result is negative, or false when | ||
156 | * result is greater than or equal to zero. | ||
157 | */ | ||
158 | extern int atomic64_add_negative(u64 delta, atomic64_t *ptr); | ||
159 | |||
160 | #endif /* _ASM_X86_ATOMIC64_32_H */ | ||
diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h new file mode 100644 index 000000000000..51c5b4056929 --- /dev/null +++ b/arch/x86/include/asm/atomic64_64.h | |||
@@ -0,0 +1,224 @@ | |||
1 | #ifndef _ASM_X86_ATOMIC64_64_H | ||
2 | #define _ASM_X86_ATOMIC64_64_H | ||
3 | |||
4 | #include <linux/types.h> | ||
5 | #include <asm/alternative.h> | ||
6 | #include <asm/cmpxchg.h> | ||
7 | |||
8 | /* The 64-bit atomic type */ | ||
9 | |||
10 | #define ATOMIC64_INIT(i) { (i) } | ||
11 | |||
12 | /** | ||
13 | * atomic64_read - read atomic64 variable | ||
14 | * @v: pointer of type atomic64_t | ||
15 | * | ||
16 | * Atomically reads the value of @v. | ||
17 | * Doesn't imply a read memory barrier. | ||
18 | */ | ||
19 | static inline long atomic64_read(const atomic64_t *v) | ||
20 | { | ||
21 | return v->counter; | ||
22 | } | ||
23 | |||
24 | /** | ||
25 | * atomic64_set - set atomic64 variable | ||
26 | * @v: pointer to type atomic64_t | ||
27 | * @i: required value | ||
28 | * | ||
29 | * Atomically sets the value of @v to @i. | ||
30 | */ | ||
31 | static inline void atomic64_set(atomic64_t *v, long i) | ||
32 | { | ||
33 | v->counter = i; | ||
34 | } | ||
35 | |||
36 | /** | ||
37 | * atomic64_add - add integer to atomic64 variable | ||
38 | * @i: integer value to add | ||
39 | * @v: pointer to type atomic64_t | ||
40 | * | ||
41 | * Atomically adds @i to @v. | ||
42 | */ | ||
43 | static inline void atomic64_add(long i, atomic64_t *v) | ||
44 | { | ||
45 | asm volatile(LOCK_PREFIX "addq %1,%0" | ||
46 | : "=m" (v->counter) | ||
47 | : "er" (i), "m" (v->counter)); | ||
48 | } | ||
49 | |||
50 | /** | ||
51 | * atomic64_sub - subtract the atomic64 variable | ||
52 | * @i: integer value to subtract | ||
53 | * @v: pointer to type atomic64_t | ||
54 | * | ||
55 | * Atomically subtracts @i from @v. | ||
56 | */ | ||
57 | static inline void atomic64_sub(long i, atomic64_t *v) | ||
58 | { | ||
59 | asm volatile(LOCK_PREFIX "subq %1,%0" | ||
60 | : "=m" (v->counter) | ||
61 | : "er" (i), "m" (v->counter)); | ||
62 | } | ||
63 | |||
64 | /** | ||
65 | * atomic64_sub_and_test - subtract value from variable and test result | ||
66 | * @i: integer value to subtract | ||
67 | * @v: pointer to type atomic64_t | ||
68 | * | ||
69 | * Atomically subtracts @i from @v and returns | ||
70 | * true if the result is zero, or false for all | ||
71 | * other cases. | ||
72 | */ | ||
73 | static inline int atomic64_sub_and_test(long i, atomic64_t *v) | ||
74 | { | ||
75 | unsigned char c; | ||
76 | |||
77 | asm volatile(LOCK_PREFIX "subq %2,%0; sete %1" | ||
78 | : "=m" (v->counter), "=qm" (c) | ||
79 | : "er" (i), "m" (v->counter) : "memory"); | ||
80 | return c; | ||
81 | } | ||
82 | |||
83 | /** | ||
84 | * atomic64_inc - increment atomic64 variable | ||
85 | * @v: pointer to type atomic64_t | ||
86 | * | ||
87 | * Atomically increments @v by 1. | ||
88 | */ | ||
89 | static inline void atomic64_inc(atomic64_t *v) | ||
90 | { | ||
91 | asm volatile(LOCK_PREFIX "incq %0" | ||
92 | : "=m" (v->counter) | ||
93 | : "m" (v->counter)); | ||
94 | } | ||
95 | |||
96 | /** | ||
97 | * atomic64_dec - decrement atomic64 variable | ||
98 | * @v: pointer to type atomic64_t | ||
99 | * | ||
100 | * Atomically decrements @v by 1. | ||
101 | */ | ||
102 | static inline void atomic64_dec(atomic64_t *v) | ||
103 | { | ||
104 | asm volatile(LOCK_PREFIX "decq %0" | ||
105 | : "=m" (v->counter) | ||
106 | : "m" (v->counter)); | ||
107 | } | ||
108 | |||
109 | /** | ||
110 | * atomic64_dec_and_test - decrement and test | ||
111 | * @v: pointer to type atomic64_t | ||
112 | * | ||
113 | * Atomically decrements @v by 1 and | ||
114 | * returns true if the result is 0, or false for all other | ||
115 | * cases. | ||
116 | */ | ||
117 | static inline int atomic64_dec_and_test(atomic64_t *v) | ||
118 | { | ||
119 | unsigned char c; | ||
120 | |||
121 | asm volatile(LOCK_PREFIX "decq %0; sete %1" | ||
122 | : "=m" (v->counter), "=qm" (c) | ||
123 | : "m" (v->counter) : "memory"); | ||
124 | return c != 0; | ||
125 | } | ||
126 | |||
127 | /** | ||
128 | * atomic64_inc_and_test - increment and test | ||
129 | * @v: pointer to type atomic64_t | ||
130 | * | ||
131 | * Atomically increments @v by 1 | ||
132 | * and returns true if the result is zero, or false for all | ||
133 | * other cases. | ||
134 | */ | ||
135 | static inline int atomic64_inc_and_test(atomic64_t *v) | ||
136 | { | ||
137 | unsigned char c; | ||
138 | |||
139 | asm volatile(LOCK_PREFIX "incq %0; sete %1" | ||
140 | : "=m" (v->counter), "=qm" (c) | ||
141 | : "m" (v->counter) : "memory"); | ||
142 | return c != 0; | ||
143 | } | ||
144 | |||
145 | /** | ||
146 | * atomic64_add_negative - add and test if negative | ||
147 | * @i: integer value to add | ||
148 | * @v: pointer to type atomic64_t | ||
149 | * | ||
150 | * Atomically adds @i to @v and returns true | ||
151 | * if the result is negative, or false when | ||
152 | * result is greater than or equal to zero. | ||
153 | */ | ||
154 | static inline int atomic64_add_negative(long i, atomic64_t *v) | ||
155 | { | ||
156 | unsigned char c; | ||
157 | |||
158 | asm volatile(LOCK_PREFIX "addq %2,%0; sets %1" | ||
159 | : "=m" (v->counter), "=qm" (c) | ||
160 | : "er" (i), "m" (v->counter) : "memory"); | ||
161 | return c; | ||
162 | } | ||
163 | |||
164 | /** | ||
165 | * atomic64_add_return - add and return | ||
166 | * @i: integer value to add | ||
167 | * @v: pointer to type atomic64_t | ||
168 | * | ||
169 | * Atomically adds @i to @v and returns @i + @v | ||
170 | */ | ||
171 | static inline long atomic64_add_return(long i, atomic64_t *v) | ||
172 | { | ||
173 | long __i = i; | ||
174 | asm volatile(LOCK_PREFIX "xaddq %0, %1;" | ||
175 | : "+r" (i), "+m" (v->counter) | ||
176 | : : "memory"); | ||
177 | return i + __i; | ||
178 | } | ||
179 | |||
180 | static inline long atomic64_sub_return(long i, atomic64_t *v) | ||
181 | { | ||
182 | return atomic64_add_return(-i, v); | ||
183 | } | ||
184 | |||
185 | #define atomic64_inc_return(v) (atomic64_add_return(1, (v))) | ||
186 | #define atomic64_dec_return(v) (atomic64_sub_return(1, (v))) | ||
187 | |||
188 | static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new) | ||
189 | { | ||
190 | return cmpxchg(&v->counter, old, new); | ||
191 | } | ||
192 | |||
193 | static inline long atomic64_xchg(atomic64_t *v, long new) | ||
194 | { | ||
195 | return xchg(&v->counter, new); | ||
196 | } | ||
197 | |||
198 | /** | ||
199 | * atomic64_add_unless - add unless the number is a given value | ||
200 | * @v: pointer of type atomic64_t | ||
201 | * @a: the amount to add to v... | ||
202 | * @u: ...unless v is equal to u. | ||
203 | * | ||
204 | * Atomically adds @a to @v, so long as it was not @u. | ||
205 | * Returns non-zero if @v was not @u, and zero otherwise. | ||
206 | */ | ||
207 | static inline int atomic64_add_unless(atomic64_t *v, long a, long u) | ||
208 | { | ||
209 | long c, old; | ||
210 | c = atomic64_read(v); | ||
211 | for (;;) { | ||
212 | if (unlikely(c == (u))) | ||
213 | break; | ||
214 | old = atomic64_cmpxchg((v), c, c + (a)); | ||
215 | if (likely(old == c)) | ||
216 | break; | ||
217 | c = old; | ||
218 | } | ||
219 | return c != (u); | ||
220 | } | ||
221 | |||
222 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) | ||
223 | |||
224 | #endif /* _ASM_X86_ATOMIC64_64_H */ | ||
diff --git a/arch/x86/include/asm/atomic_32.h b/arch/x86/include/asm/atomic_32.h deleted file mode 100644 index dc5a667ff791..000000000000 --- a/arch/x86/include/asm/atomic_32.h +++ /dev/null | |||
@@ -1,415 +0,0 @@ | |||
1 | #ifndef _ASM_X86_ATOMIC_32_H | ||
2 | #define _ASM_X86_ATOMIC_32_H | ||
3 | |||
4 | #include <linux/compiler.h> | ||
5 | #include <linux/types.h> | ||
6 | #include <asm/processor.h> | ||
7 | #include <asm/cmpxchg.h> | ||
8 | |||
9 | /* | ||
10 | * Atomic operations that C can't guarantee us. Useful for | ||
11 | * resource counting etc.. | ||
12 | */ | ||
13 | |||
14 | #define ATOMIC_INIT(i) { (i) } | ||
15 | |||
16 | /** | ||
17 | * atomic_read - read atomic variable | ||
18 | * @v: pointer of type atomic_t | ||
19 | * | ||
20 | * Atomically reads the value of @v. | ||
21 | */ | ||
22 | static inline int atomic_read(const atomic_t *v) | ||
23 | { | ||
24 | return v->counter; | ||
25 | } | ||
26 | |||
27 | /** | ||
28 | * atomic_set - set atomic variable | ||
29 | * @v: pointer of type atomic_t | ||
30 | * @i: required value | ||
31 | * | ||
32 | * Atomically sets the value of @v to @i. | ||
33 | */ | ||
34 | static inline void atomic_set(atomic_t *v, int i) | ||
35 | { | ||
36 | v->counter = i; | ||
37 | } | ||
38 | |||
39 | /** | ||
40 | * atomic_add - add integer to atomic variable | ||
41 | * @i: integer value to add | ||
42 | * @v: pointer of type atomic_t | ||
43 | * | ||
44 | * Atomically adds @i to @v. | ||
45 | */ | ||
46 | static inline void atomic_add(int i, atomic_t *v) | ||
47 | { | ||
48 | asm volatile(LOCK_PREFIX "addl %1,%0" | ||
49 | : "+m" (v->counter) | ||
50 | : "ir" (i)); | ||
51 | } | ||
52 | |||
53 | /** | ||
54 | * atomic_sub - subtract integer from atomic variable | ||
55 | * @i: integer value to subtract | ||
56 | * @v: pointer of type atomic_t | ||
57 | * | ||
58 | * Atomically subtracts @i from @v. | ||
59 | */ | ||
60 | static inline void atomic_sub(int i, atomic_t *v) | ||
61 | { | ||
62 | asm volatile(LOCK_PREFIX "subl %1,%0" | ||
63 | : "+m" (v->counter) | ||
64 | : "ir" (i)); | ||
65 | } | ||
66 | |||
67 | /** | ||
68 | * atomic_sub_and_test - subtract value from variable and test result | ||
69 | * @i: integer value to subtract | ||
70 | * @v: pointer of type atomic_t | ||
71 | * | ||
72 | * Atomically subtracts @i from @v and returns | ||
73 | * true if the result is zero, or false for all | ||
74 | * other cases. | ||
75 | */ | ||
76 | static inline int atomic_sub_and_test(int i, atomic_t *v) | ||
77 | { | ||
78 | unsigned char c; | ||
79 | |||
80 | asm volatile(LOCK_PREFIX "subl %2,%0; sete %1" | ||
81 | : "+m" (v->counter), "=qm" (c) | ||
82 | : "ir" (i) : "memory"); | ||
83 | return c; | ||
84 | } | ||
85 | |||
86 | /** | ||
87 | * atomic_inc - increment atomic variable | ||
88 | * @v: pointer of type atomic_t | ||
89 | * | ||
90 | * Atomically increments @v by 1. | ||
91 | */ | ||
92 | static inline void atomic_inc(atomic_t *v) | ||
93 | { | ||
94 | asm volatile(LOCK_PREFIX "incl %0" | ||
95 | : "+m" (v->counter)); | ||
96 | } | ||
97 | |||
98 | /** | ||
99 | * atomic_dec - decrement atomic variable | ||
100 | * @v: pointer of type atomic_t | ||
101 | * | ||
102 | * Atomically decrements @v by 1. | ||
103 | */ | ||
104 | static inline void atomic_dec(atomic_t *v) | ||
105 | { | ||
106 | asm volatile(LOCK_PREFIX "decl %0" | ||
107 | : "+m" (v->counter)); | ||
108 | } | ||
109 | |||
110 | /** | ||
111 | * atomic_dec_and_test - decrement and test | ||
112 | * @v: pointer of type atomic_t | ||
113 | * | ||
114 | * Atomically decrements @v by 1 and | ||
115 | * returns true if the result is 0, or false for all other | ||
116 | * cases. | ||
117 | */ | ||
118 | static inline int atomic_dec_and_test(atomic_t *v) | ||
119 | { | ||
120 | unsigned char c; | ||
121 | |||
122 | asm volatile(LOCK_PREFIX "decl %0; sete %1" | ||
123 | : "+m" (v->counter), "=qm" (c) | ||
124 | : : "memory"); | ||
125 | return c != 0; | ||
126 | } | ||
127 | |||
128 | /** | ||
129 | * atomic_inc_and_test - increment and test | ||
130 | * @v: pointer of type atomic_t | ||
131 | * | ||
132 | * Atomically increments @v by 1 | ||
133 | * and returns true if the result is zero, or false for all | ||
134 | * other cases. | ||
135 | */ | ||
136 | static inline int atomic_inc_and_test(atomic_t *v) | ||
137 | { | ||
138 | unsigned char c; | ||
139 | |||
140 | asm volatile(LOCK_PREFIX "incl %0; sete %1" | ||
141 | : "+m" (v->counter), "=qm" (c) | ||
142 | : : "memory"); | ||
143 | return c != 0; | ||
144 | } | ||
145 | |||
146 | /** | ||
147 | * atomic_add_negative - add and test if negative | ||
148 | * @v: pointer of type atomic_t | ||
149 | * @i: integer value to add | ||
150 | * | ||
151 | * Atomically adds @i to @v and returns true | ||
152 | * if the result is negative, or false when | ||
153 | * result is greater than or equal to zero. | ||
154 | */ | ||
155 | static inline int atomic_add_negative(int i, atomic_t *v) | ||
156 | { | ||
157 | unsigned char c; | ||
158 | |||
159 | asm volatile(LOCK_PREFIX "addl %2,%0; sets %1" | ||
160 | : "+m" (v->counter), "=qm" (c) | ||
161 | : "ir" (i) : "memory"); | ||
162 | return c; | ||
163 | } | ||
164 | |||
165 | /** | ||
166 | * atomic_add_return - add integer and return | ||
167 | * @v: pointer of type atomic_t | ||
168 | * @i: integer value to add | ||
169 | * | ||
170 | * Atomically adds @i to @v and returns @i + @v | ||
171 | */ | ||
172 | static inline int atomic_add_return(int i, atomic_t *v) | ||
173 | { | ||
174 | int __i; | ||
175 | #ifdef CONFIG_M386 | ||
176 | unsigned long flags; | ||
177 | if (unlikely(boot_cpu_data.x86 <= 3)) | ||
178 | goto no_xadd; | ||
179 | #endif | ||
180 | /* Modern 486+ processor */ | ||
181 | __i = i; | ||
182 | asm volatile(LOCK_PREFIX "xaddl %0, %1" | ||
183 | : "+r" (i), "+m" (v->counter) | ||
184 | : : "memory"); | ||
185 | return i + __i; | ||
186 | |||
187 | #ifdef CONFIG_M386 | ||
188 | no_xadd: /* Legacy 386 processor */ | ||
189 | local_irq_save(flags); | ||
190 | __i = atomic_read(v); | ||
191 | atomic_set(v, i + __i); | ||
192 | local_irq_restore(flags); | ||
193 | return i + __i; | ||
194 | #endif | ||
195 | } | ||
196 | |||
197 | /** | ||
198 | * atomic_sub_return - subtract integer and return | ||
199 | * @v: pointer of type atomic_t | ||
200 | * @i: integer value to subtract | ||
201 | * | ||
202 | * Atomically subtracts @i from @v and returns @v - @i | ||
203 | */ | ||
204 | static inline int atomic_sub_return(int i, atomic_t *v) | ||
205 | { | ||
206 | return atomic_add_return(-i, v); | ||
207 | } | ||
208 | |||
209 | static inline int atomic_cmpxchg(atomic_t *v, int old, int new) | ||
210 | { | ||
211 | return cmpxchg(&v->counter, old, new); | ||
212 | } | ||
213 | |||
214 | static inline int atomic_xchg(atomic_t *v, int new) | ||
215 | { | ||
216 | return xchg(&v->counter, new); | ||
217 | } | ||
218 | |||
219 | /** | ||
220 | * atomic_add_unless - add unless the number is already a given value | ||
221 | * @v: pointer of type atomic_t | ||
222 | * @a: the amount to add to v... | ||
223 | * @u: ...unless v is equal to u. | ||
224 | * | ||
225 | * Atomically adds @a to @v, so long as @v was not already @u. | ||
226 | * Returns non-zero if @v was not @u, and zero otherwise. | ||
227 | */ | ||
228 | static inline int atomic_add_unless(atomic_t *v, int a, int u) | ||
229 | { | ||
230 | int c, old; | ||
231 | c = atomic_read(v); | ||
232 | for (;;) { | ||
233 | if (unlikely(c == (u))) | ||
234 | break; | ||
235 | old = atomic_cmpxchg((v), c, c + (a)); | ||
236 | if (likely(old == c)) | ||
237 | break; | ||
238 | c = old; | ||
239 | } | ||
240 | return c != (u); | ||
241 | } | ||
242 | |||
243 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) | ||
244 | |||
245 | #define atomic_inc_return(v) (atomic_add_return(1, v)) | ||
246 | #define atomic_dec_return(v) (atomic_sub_return(1, v)) | ||
247 | |||
248 | /* These are x86-specific, used by some header files */ | ||
249 | #define atomic_clear_mask(mask, addr) \ | ||
250 | asm volatile(LOCK_PREFIX "andl %0,%1" \ | ||
251 | : : "r" (~(mask)), "m" (*(addr)) : "memory") | ||
252 | |||
253 | #define atomic_set_mask(mask, addr) \ | ||
254 | asm volatile(LOCK_PREFIX "orl %0,%1" \ | ||
255 | : : "r" (mask), "m" (*(addr)) : "memory") | ||
256 | |||
257 | /* Atomic operations are already serializing on x86 */ | ||
258 | #define smp_mb__before_atomic_dec() barrier() | ||
259 | #define smp_mb__after_atomic_dec() barrier() | ||
260 | #define smp_mb__before_atomic_inc() barrier() | ||
261 | #define smp_mb__after_atomic_inc() barrier() | ||
262 | |||
263 | /* An 64bit atomic type */ | ||
264 | |||
265 | typedef struct { | ||
266 | u64 __aligned(8) counter; | ||
267 | } atomic64_t; | ||
268 | |||
269 | #define ATOMIC64_INIT(val) { (val) } | ||
270 | |||
271 | extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val); | ||
272 | |||
273 | /** | ||
274 | * atomic64_xchg - xchg atomic64 variable | ||
275 | * @ptr: pointer to type atomic64_t | ||
276 | * @new_val: value to assign | ||
277 | * | ||
278 | * Atomically xchgs the value of @ptr to @new_val and returns | ||
279 | * the old value. | ||
280 | */ | ||
281 | extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val); | ||
282 | |||
283 | /** | ||
284 | * atomic64_set - set atomic64 variable | ||
285 | * @ptr: pointer to type atomic64_t | ||
286 | * @new_val: value to assign | ||
287 | * | ||
288 | * Atomically sets the value of @ptr to @new_val. | ||
289 | */ | ||
290 | extern void atomic64_set(atomic64_t *ptr, u64 new_val); | ||
291 | |||
292 | /** | ||
293 | * atomic64_read - read atomic64 variable | ||
294 | * @ptr: pointer to type atomic64_t | ||
295 | * | ||
296 | * Atomically reads the value of @ptr and returns it. | ||
297 | */ | ||
298 | static inline u64 atomic64_read(atomic64_t *ptr) | ||
299 | { | ||
300 | u64 res; | ||
301 | |||
302 | /* | ||
303 | * Note, we inline this atomic64_t primitive because | ||
304 | * it only clobbers EAX/EDX and leaves the others | ||
305 | * untouched. We also (somewhat subtly) rely on the | ||
306 | * fact that cmpxchg8b returns the current 64-bit value | ||
307 | * of the memory location we are touching: | ||
308 | */ | ||
309 | asm volatile( | ||
310 | "mov %%ebx, %%eax\n\t" | ||
311 | "mov %%ecx, %%edx\n\t" | ||
312 | LOCK_PREFIX "cmpxchg8b %1\n" | ||
313 | : "=&A" (res) | ||
314 | : "m" (*ptr) | ||
315 | ); | ||
316 | |||
317 | return res; | ||
318 | } | ||
319 | |||
320 | extern u64 atomic64_read(atomic64_t *ptr); | ||
321 | |||
322 | /** | ||
323 | * atomic64_add_return - add and return | ||
324 | * @delta: integer value to add | ||
325 | * @ptr: pointer to type atomic64_t | ||
326 | * | ||
327 | * Atomically adds @delta to @ptr and returns @delta + *@ptr | ||
328 | */ | ||
329 | extern u64 atomic64_add_return(u64 delta, atomic64_t *ptr); | ||
330 | |||
331 | /* | ||
332 | * Other variants with different arithmetic operators: | ||
333 | */ | ||
334 | extern u64 atomic64_sub_return(u64 delta, atomic64_t *ptr); | ||
335 | extern u64 atomic64_inc_return(atomic64_t *ptr); | ||
336 | extern u64 atomic64_dec_return(atomic64_t *ptr); | ||
337 | |||
338 | /** | ||
339 | * atomic64_add - add integer to atomic64 variable | ||
340 | * @delta: integer value to add | ||
341 | * @ptr: pointer to type atomic64_t | ||
342 | * | ||
343 | * Atomically adds @delta to @ptr. | ||
344 | */ | ||
345 | extern void atomic64_add(u64 delta, atomic64_t *ptr); | ||
346 | |||
347 | /** | ||
348 | * atomic64_sub - subtract the atomic64 variable | ||
349 | * @delta: integer value to subtract | ||
350 | * @ptr: pointer to type atomic64_t | ||
351 | * | ||
352 | * Atomically subtracts @delta from @ptr. | ||
353 | */ | ||
354 | extern void atomic64_sub(u64 delta, atomic64_t *ptr); | ||
355 | |||
356 | /** | ||
357 | * atomic64_sub_and_test - subtract value from variable and test result | ||
358 | * @delta: integer value to subtract | ||
359 | * @ptr: pointer to type atomic64_t | ||
360 | * | ||
361 | * Atomically subtracts @delta from @ptr and returns | ||
362 | * true if the result is zero, or false for all | ||
363 | * other cases. | ||
364 | */ | ||
365 | extern int atomic64_sub_and_test(u64 delta, atomic64_t *ptr); | ||
366 | |||
367 | /** | ||
368 | * atomic64_inc - increment atomic64 variable | ||
369 | * @ptr: pointer to type atomic64_t | ||
370 | * | ||
371 | * Atomically increments @ptr by 1. | ||
372 | */ | ||
373 | extern void atomic64_inc(atomic64_t *ptr); | ||
374 | |||
375 | /** | ||
376 | * atomic64_dec - decrement atomic64 variable | ||
377 | * @ptr: pointer to type atomic64_t | ||
378 | * | ||
379 | * Atomically decrements @ptr by 1. | ||
380 | */ | ||
381 | extern void atomic64_dec(atomic64_t *ptr); | ||
382 | |||
383 | /** | ||
384 | * atomic64_dec_and_test - decrement and test | ||
385 | * @ptr: pointer to type atomic64_t | ||
386 | * | ||
387 | * Atomically decrements @ptr by 1 and | ||
388 | * returns true if the result is 0, or false for all other | ||
389 | * cases. | ||
390 | */ | ||
391 | extern int atomic64_dec_and_test(atomic64_t *ptr); | ||
392 | |||
393 | /** | ||
394 | * atomic64_inc_and_test - increment and test | ||
395 | * @ptr: pointer to type atomic64_t | ||
396 | * | ||
397 | * Atomically increments @ptr by 1 | ||
398 | * and returns true if the result is zero, or false for all | ||
399 | * other cases. | ||
400 | */ | ||
401 | extern int atomic64_inc_and_test(atomic64_t *ptr); | ||
402 | |||
403 | /** | ||
404 | * atomic64_add_negative - add and test if negative | ||
405 | * @delta: integer value to add | ||
406 | * @ptr: pointer to type atomic64_t | ||
407 | * | ||
408 | * Atomically adds @delta to @ptr and returns true | ||
409 | * if the result is negative, or false when | ||
410 | * result is greater than or equal to zero. | ||
411 | */ | ||
412 | extern int atomic64_add_negative(u64 delta, atomic64_t *ptr); | ||
413 | |||
414 | #include <asm-generic/atomic-long.h> | ||
415 | #endif /* _ASM_X86_ATOMIC_32_H */ | ||
diff --git a/arch/x86/include/asm/atomic_64.h b/arch/x86/include/asm/atomic_64.h deleted file mode 100644 index d605dc268e79..000000000000 --- a/arch/x86/include/asm/atomic_64.h +++ /dev/null | |||
@@ -1,485 +0,0 @@ | |||
1 | #ifndef _ASM_X86_ATOMIC_64_H | ||
2 | #define _ASM_X86_ATOMIC_64_H | ||
3 | |||
4 | #include <linux/types.h> | ||
5 | #include <asm/alternative.h> | ||
6 | #include <asm/cmpxchg.h> | ||
7 | |||
8 | /* | ||
9 | * Atomic operations that C can't guarantee us. Useful for | ||
10 | * resource counting etc.. | ||
11 | */ | ||
12 | |||
13 | #define ATOMIC_INIT(i) { (i) } | ||
14 | |||
15 | /** | ||
16 | * atomic_read - read atomic variable | ||
17 | * @v: pointer of type atomic_t | ||
18 | * | ||
19 | * Atomically reads the value of @v. | ||
20 | */ | ||
21 | static inline int atomic_read(const atomic_t *v) | ||
22 | { | ||
23 | return v->counter; | ||
24 | } | ||
25 | |||
26 | /** | ||
27 | * atomic_set - set atomic variable | ||
28 | * @v: pointer of type atomic_t | ||
29 | * @i: required value | ||
30 | * | ||
31 | * Atomically sets the value of @v to @i. | ||
32 | */ | ||
33 | static inline void atomic_set(atomic_t *v, int i) | ||
34 | { | ||
35 | v->counter = i; | ||
36 | } | ||
37 | |||
38 | /** | ||
39 | * atomic_add - add integer to atomic variable | ||
40 | * @i: integer value to add | ||
41 | * @v: pointer of type atomic_t | ||
42 | * | ||
43 | * Atomically adds @i to @v. | ||
44 | */ | ||
45 | static inline void atomic_add(int i, atomic_t *v) | ||
46 | { | ||
47 | asm volatile(LOCK_PREFIX "addl %1,%0" | ||
48 | : "=m" (v->counter) | ||
49 | : "ir" (i), "m" (v->counter)); | ||
50 | } | ||
51 | |||
52 | /** | ||
53 | * atomic_sub - subtract the atomic variable | ||
54 | * @i: integer value to subtract | ||
55 | * @v: pointer of type atomic_t | ||
56 | * | ||
57 | * Atomically subtracts @i from @v. | ||
58 | */ | ||
59 | static inline void atomic_sub(int i, atomic_t *v) | ||
60 | { | ||
61 | asm volatile(LOCK_PREFIX "subl %1,%0" | ||
62 | : "=m" (v->counter) | ||
63 | : "ir" (i), "m" (v->counter)); | ||
64 | } | ||
65 | |||
66 | /** | ||
67 | * atomic_sub_and_test - subtract value from variable and test result | ||
68 | * @i: integer value to subtract | ||
69 | * @v: pointer of type atomic_t | ||
70 | * | ||
71 | * Atomically subtracts @i from @v and returns | ||
72 | * true if the result is zero, or false for all | ||
73 | * other cases. | ||
74 | */ | ||
75 | static inline int atomic_sub_and_test(int i, atomic_t *v) | ||
76 | { | ||
77 | unsigned char c; | ||
78 | |||
79 | asm volatile(LOCK_PREFIX "subl %2,%0; sete %1" | ||
80 | : "=m" (v->counter), "=qm" (c) | ||
81 | : "ir" (i), "m" (v->counter) : "memory"); | ||
82 | return c; | ||
83 | } | ||
84 | |||
85 | /** | ||
86 | * atomic_inc - increment atomic variable | ||
87 | * @v: pointer of type atomic_t | ||
88 | * | ||
89 | * Atomically increments @v by 1. | ||
90 | */ | ||
91 | static inline void atomic_inc(atomic_t *v) | ||
92 | { | ||
93 | asm volatile(LOCK_PREFIX "incl %0" | ||
94 | : "=m" (v->counter) | ||
95 | : "m" (v->counter)); | ||
96 | } | ||
97 | |||
98 | /** | ||
99 | * atomic_dec - decrement atomic variable | ||
100 | * @v: pointer of type atomic_t | ||
101 | * | ||
102 | * Atomically decrements @v by 1. | ||
103 | */ | ||
104 | static inline void atomic_dec(atomic_t *v) | ||
105 | { | ||
106 | asm volatile(LOCK_PREFIX "decl %0" | ||
107 | : "=m" (v->counter) | ||
108 | : "m" (v->counter)); | ||
109 | } | ||
110 | |||
111 | /** | ||
112 | * atomic_dec_and_test - decrement and test | ||
113 | * @v: pointer of type atomic_t | ||
114 | * | ||
115 | * Atomically decrements @v by 1 and | ||
116 | * returns true if the result is 0, or false for all other | ||
117 | * cases. | ||
118 | */ | ||
119 | static inline int atomic_dec_and_test(atomic_t *v) | ||
120 | { | ||
121 | unsigned char c; | ||
122 | |||
123 | asm volatile(LOCK_PREFIX "decl %0; sete %1" | ||
124 | : "=m" (v->counter), "=qm" (c) | ||
125 | : "m" (v->counter) : "memory"); | ||
126 | return c != 0; | ||
127 | } | ||
128 | |||
129 | /** | ||
130 | * atomic_inc_and_test - increment and test | ||
131 | * @v: pointer of type atomic_t | ||
132 | * | ||
133 | * Atomically increments @v by 1 | ||
134 | * and returns true if the result is zero, or false for all | ||
135 | * other cases. | ||
136 | */ | ||
137 | static inline int atomic_inc_and_test(atomic_t *v) | ||
138 | { | ||
139 | unsigned char c; | ||
140 | |||
141 | asm volatile(LOCK_PREFIX "incl %0; sete %1" | ||
142 | : "=m" (v->counter), "=qm" (c) | ||
143 | : "m" (v->counter) : "memory"); | ||
144 | return c != 0; | ||
145 | } | ||
146 | |||
147 | /** | ||
148 | * atomic_add_negative - add and test if negative | ||
149 | * @i: integer value to add | ||
150 | * @v: pointer of type atomic_t | ||
151 | * | ||
152 | * Atomically adds @i to @v and returns true | ||
153 | * if the result is negative, or false when | ||
154 | * result is greater than or equal to zero. | ||
155 | */ | ||
156 | static inline int atomic_add_negative(int i, atomic_t *v) | ||
157 | { | ||
158 | unsigned char c; | ||
159 | |||
160 | asm volatile(LOCK_PREFIX "addl %2,%0; sets %1" | ||
161 | : "=m" (v->counter), "=qm" (c) | ||
162 | : "ir" (i), "m" (v->counter) : "memory"); | ||
163 | return c; | ||
164 | } | ||
165 | |||
166 | /** | ||
167 | * atomic_add_return - add and return | ||
168 | * @i: integer value to add | ||
169 | * @v: pointer of type atomic_t | ||
170 | * | ||
171 | * Atomically adds @i to @v and returns @i + @v | ||
172 | */ | ||
173 | static inline int atomic_add_return(int i, atomic_t *v) | ||
174 | { | ||
175 | int __i = i; | ||
176 | asm volatile(LOCK_PREFIX "xaddl %0, %1" | ||
177 | : "+r" (i), "+m" (v->counter) | ||
178 | : : "memory"); | ||
179 | return i + __i; | ||
180 | } | ||
181 | |||
182 | static inline int atomic_sub_return(int i, atomic_t *v) | ||
183 | { | ||
184 | return atomic_add_return(-i, v); | ||
185 | } | ||
186 | |||
187 | #define atomic_inc_return(v) (atomic_add_return(1, v)) | ||
188 | #define atomic_dec_return(v) (atomic_sub_return(1, v)) | ||
189 | |||
190 | /* The 64-bit atomic type */ | ||
191 | |||
192 | #define ATOMIC64_INIT(i) { (i) } | ||
193 | |||
194 | /** | ||
195 | * atomic64_read - read atomic64 variable | ||
196 | * @v: pointer of type atomic64_t | ||
197 | * | ||
198 | * Atomically reads the value of @v. | ||
199 | * Doesn't imply a read memory barrier. | ||
200 | */ | ||
201 | static inline long atomic64_read(const atomic64_t *v) | ||
202 | { | ||
203 | return v->counter; | ||
204 | } | ||
205 | |||
206 | /** | ||
207 | * atomic64_set - set atomic64 variable | ||
208 | * @v: pointer to type atomic64_t | ||
209 | * @i: required value | ||
210 | * | ||
211 | * Atomically sets the value of @v to @i. | ||
212 | */ | ||
213 | static inline void atomic64_set(atomic64_t *v, long i) | ||
214 | { | ||
215 | v->counter = i; | ||
216 | } | ||
217 | |||
218 | /** | ||
219 | * atomic64_add - add integer to atomic64 variable | ||
220 | * @i: integer value to add | ||
221 | * @v: pointer to type atomic64_t | ||
222 | * | ||
223 | * Atomically adds @i to @v. | ||
224 | */ | ||
225 | static inline void atomic64_add(long i, atomic64_t *v) | ||
226 | { | ||
227 | asm volatile(LOCK_PREFIX "addq %1,%0" | ||
228 | : "=m" (v->counter) | ||
229 | : "er" (i), "m" (v->counter)); | ||
230 | } | ||
231 | |||
232 | /** | ||
233 | * atomic64_sub - subtract the atomic64 variable | ||
234 | * @i: integer value to subtract | ||
235 | * @v: pointer to type atomic64_t | ||
236 | * | ||
237 | * Atomically subtracts @i from @v. | ||
238 | */ | ||
239 | static inline void atomic64_sub(long i, atomic64_t *v) | ||
240 | { | ||
241 | asm volatile(LOCK_PREFIX "subq %1,%0" | ||
242 | : "=m" (v->counter) | ||
243 | : "er" (i), "m" (v->counter)); | ||
244 | } | ||
245 | |||
246 | /** | ||
247 | * atomic64_sub_and_test - subtract value from variable and test result | ||
248 | * @i: integer value to subtract | ||
249 | * @v: pointer to type atomic64_t | ||
250 | * | ||
251 | * Atomically subtracts @i from @v and returns | ||
252 | * true if the result is zero, or false for all | ||
253 | * other cases. | ||
254 | */ | ||
255 | static inline int atomic64_sub_and_test(long i, atomic64_t *v) | ||
256 | { | ||
257 | unsigned char c; | ||
258 | |||
259 | asm volatile(LOCK_PREFIX "subq %2,%0; sete %1" | ||
260 | : "=m" (v->counter), "=qm" (c) | ||
261 | : "er" (i), "m" (v->counter) : "memory"); | ||
262 | return c; | ||
263 | } | ||
264 | |||
265 | /** | ||
266 | * atomic64_inc - increment atomic64 variable | ||
267 | * @v: pointer to type atomic64_t | ||
268 | * | ||
269 | * Atomically increments @v by 1. | ||
270 | */ | ||
271 | static inline void atomic64_inc(atomic64_t *v) | ||
272 | { | ||
273 | asm volatile(LOCK_PREFIX "incq %0" | ||
274 | : "=m" (v->counter) | ||
275 | : "m" (v->counter)); | ||
276 | } | ||
277 | |||
278 | /** | ||
279 | * atomic64_dec - decrement atomic64 variable | ||
280 | * @v: pointer to type atomic64_t | ||
281 | * | ||
282 | * Atomically decrements @v by 1. | ||
283 | */ | ||
284 | static inline void atomic64_dec(atomic64_t *v) | ||
285 | { | ||
286 | asm volatile(LOCK_PREFIX "decq %0" | ||
287 | : "=m" (v->counter) | ||
288 | : "m" (v->counter)); | ||
289 | } | ||
290 | |||
291 | /** | ||
292 | * atomic64_dec_and_test - decrement and test | ||
293 | * @v: pointer to type atomic64_t | ||
294 | * | ||
295 | * Atomically decrements @v by 1 and | ||
296 | * returns true if the result is 0, or false for all other | ||
297 | * cases. | ||
298 | */ | ||
299 | static inline int atomic64_dec_and_test(atomic64_t *v) | ||
300 | { | ||
301 | unsigned char c; | ||
302 | |||
303 | asm volatile(LOCK_PREFIX "decq %0; sete %1" | ||
304 | : "=m" (v->counter), "=qm" (c) | ||
305 | : "m" (v->counter) : "memory"); | ||
306 | return c != 0; | ||
307 | } | ||
308 | |||
309 | /** | ||
310 | * atomic64_inc_and_test - increment and test | ||
311 | * @v: pointer to type atomic64_t | ||
312 | * | ||
313 | * Atomically increments @v by 1 | ||
314 | * and returns true if the result is zero, or false for all | ||
315 | * other cases. | ||
316 | */ | ||
317 | static inline int atomic64_inc_and_test(atomic64_t *v) | ||
318 | { | ||
319 | unsigned char c; | ||
320 | |||
321 | asm volatile(LOCK_PREFIX "incq %0; sete %1" | ||
322 | : "=m" (v->counter), "=qm" (c) | ||
323 | : "m" (v->counter) : "memory"); | ||
324 | return c != 0; | ||
325 | } | ||
326 | |||
327 | /** | ||
328 | * atomic64_add_negative - add and test if negative | ||
329 | * @i: integer value to add | ||
330 | * @v: pointer to type atomic64_t | ||
331 | * | ||
332 | * Atomically adds @i to @v and returns true | ||
333 | * if the result is negative, or false when | ||
334 | * result is greater than or equal to zero. | ||
335 | */ | ||
336 | static inline int atomic64_add_negative(long i, atomic64_t *v) | ||
337 | { | ||
338 | unsigned char c; | ||
339 | |||
340 | asm volatile(LOCK_PREFIX "addq %2,%0; sets %1" | ||
341 | : "=m" (v->counter), "=qm" (c) | ||
342 | : "er" (i), "m" (v->counter) : "memory"); | ||
343 | return c; | ||
344 | } | ||
345 | |||
346 | /** | ||
347 | * atomic64_add_return - add and return | ||
348 | * @i: integer value to add | ||
349 | * @v: pointer to type atomic64_t | ||
350 | * | ||
351 | * Atomically adds @i to @v and returns @i + @v | ||
352 | */ | ||
353 | static inline long atomic64_add_return(long i, atomic64_t *v) | ||
354 | { | ||
355 | long __i = i; | ||
356 | asm volatile(LOCK_PREFIX "xaddq %0, %1;" | ||
357 | : "+r" (i), "+m" (v->counter) | ||
358 | : : "memory"); | ||
359 | return i + __i; | ||
360 | } | ||
361 | |||
362 | static inline long atomic64_sub_return(long i, atomic64_t *v) | ||
363 | { | ||
364 | return atomic64_add_return(-i, v); | ||
365 | } | ||
366 | |||
367 | #define atomic64_inc_return(v) (atomic64_add_return(1, (v))) | ||
368 | #define atomic64_dec_return(v) (atomic64_sub_return(1, (v))) | ||
369 | |||
370 | static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new) | ||
371 | { | ||
372 | return cmpxchg(&v->counter, old, new); | ||
373 | } | ||
374 | |||
375 | static inline long atomic64_xchg(atomic64_t *v, long new) | ||
376 | { | ||
377 | return xchg(&v->counter, new); | ||
378 | } | ||
379 | |||
380 | static inline long atomic_cmpxchg(atomic_t *v, int old, int new) | ||
381 | { | ||
382 | return cmpxchg(&v->counter, old, new); | ||
383 | } | ||
384 | |||
385 | static inline long atomic_xchg(atomic_t *v, int new) | ||
386 | { | ||
387 | return xchg(&v->counter, new); | ||
388 | } | ||
389 | |||
390 | /** | ||
391 | * atomic_add_unless - add unless the number is a given value | ||
392 | * @v: pointer of type atomic_t | ||
393 | * @a: the amount to add to v... | ||
394 | * @u: ...unless v is equal to u. | ||
395 | * | ||
396 | * Atomically adds @a to @v, so long as it was not @u. | ||
397 | * Returns non-zero if @v was not @u, and zero otherwise. | ||
398 | */ | ||
399 | static inline int atomic_add_unless(atomic_t *v, int a, int u) | ||
400 | { | ||
401 | int c, old; | ||
402 | c = atomic_read(v); | ||
403 | for (;;) { | ||
404 | if (unlikely(c == (u))) | ||
405 | break; | ||
406 | old = atomic_cmpxchg((v), c, c + (a)); | ||
407 | if (likely(old == c)) | ||
408 | break; | ||
409 | c = old; | ||
410 | } | ||
411 | return c != (u); | ||
412 | } | ||
413 | |||
414 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) | ||
415 | |||
416 | /** | ||
417 | * atomic64_add_unless - add unless the number is a given value | ||
418 | * @v: pointer of type atomic64_t | ||
419 | * @a: the amount to add to v... | ||
420 | * @u: ...unless v is equal to u. | ||
421 | * | ||
422 | * Atomically adds @a to @v, so long as it was not @u. | ||
423 | * Returns non-zero if @v was not @u, and zero otherwise. | ||
424 | */ | ||
425 | static inline int atomic64_add_unless(atomic64_t *v, long a, long u) | ||
426 | { | ||
427 | long c, old; | ||
428 | c = atomic64_read(v); | ||
429 | for (;;) { | ||
430 | if (unlikely(c == (u))) | ||
431 | break; | ||
432 | old = atomic64_cmpxchg((v), c, c + (a)); | ||
433 | if (likely(old == c)) | ||
434 | break; | ||
435 | c = old; | ||
436 | } | ||
437 | return c != (u); | ||
438 | } | ||
439 | |||
440 | /** | ||
441 | * atomic_inc_short - increment of a short integer | ||
442 | * @v: pointer to type int | ||
443 | * | ||
444 | * Atomically adds 1 to @v | ||
445 | * Returns the new value of @u | ||
446 | */ | ||
447 | static inline short int atomic_inc_short(short int *v) | ||
448 | { | ||
449 | asm(LOCK_PREFIX "addw $1, %0" : "+m" (*v)); | ||
450 | return *v; | ||
451 | } | ||
452 | |||
453 | /** | ||
454 | * atomic_or_long - OR of two long integers | ||
455 | * @v1: pointer to type unsigned long | ||
456 | * @v2: pointer to type unsigned long | ||
457 | * | ||
458 | * Atomically ORs @v1 and @v2 | ||
459 | * Returns the result of the OR | ||
460 | */ | ||
461 | static inline void atomic_or_long(unsigned long *v1, unsigned long v2) | ||
462 | { | ||
463 | asm(LOCK_PREFIX "orq %1, %0" : "+m" (*v1) : "r" (v2)); | ||
464 | } | ||
465 | |||
466 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) | ||
467 | |||
468 | /* These are x86-specific, used by some header files */ | ||
469 | #define atomic_clear_mask(mask, addr) \ | ||
470 | asm volatile(LOCK_PREFIX "andl %0,%1" \ | ||
471 | : : "r" (~(mask)), "m" (*(addr)) : "memory") | ||
472 | |||
473 | #define atomic_set_mask(mask, addr) \ | ||
474 | asm volatile(LOCK_PREFIX "orl %0,%1" \ | ||
475 | : : "r" ((unsigned)(mask)), "m" (*(addr)) \ | ||
476 | : "memory") | ||
477 | |||
478 | /* Atomic operations are already serializing on x86 */ | ||
479 | #define smp_mb__before_atomic_dec() barrier() | ||
480 | #define smp_mb__after_atomic_dec() barrier() | ||
481 | #define smp_mb__before_atomic_inc() barrier() | ||
482 | #define smp_mb__after_atomic_inc() barrier() | ||
483 | |||
484 | #include <asm-generic/atomic-long.h> | ||
485 | #endif /* _ASM_X86_ATOMIC_64_H */ | ||
diff --git a/arch/x86/include/asm/bug.h b/arch/x86/include/asm/bug.h index d9cf1cd156d2..f654d1bb17fb 100644 --- a/arch/x86/include/asm/bug.h +++ b/arch/x86/include/asm/bug.h | |||
@@ -22,14 +22,14 @@ do { \ | |||
22 | ".popsection" \ | 22 | ".popsection" \ |
23 | : : "i" (__FILE__), "i" (__LINE__), \ | 23 | : : "i" (__FILE__), "i" (__LINE__), \ |
24 | "i" (sizeof(struct bug_entry))); \ | 24 | "i" (sizeof(struct bug_entry))); \ |
25 | for (;;) ; \ | 25 | unreachable(); \ |
26 | } while (0) | 26 | } while (0) |
27 | 27 | ||
28 | #else | 28 | #else |
29 | #define BUG() \ | 29 | #define BUG() \ |
30 | do { \ | 30 | do { \ |
31 | asm volatile("ud2"); \ | 31 | asm volatile("ud2"); \ |
32 | for (;;) ; \ | 32 | unreachable(); \ |
33 | } while (0) | 33 | } while (0) |
34 | #endif | 34 | #endif |
35 | 35 | ||
diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h index 549860d3be8f..2f9047cfaaca 100644 --- a/arch/x86/include/asm/cache.h +++ b/arch/x86/include/asm/cache.h | |||
@@ -9,12 +9,13 @@ | |||
9 | 9 | ||
10 | #define __read_mostly __attribute__((__section__(".data.read_mostly"))) | 10 | #define __read_mostly __attribute__((__section__(".data.read_mostly"))) |
11 | 11 | ||
12 | #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT | ||
13 | #define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT) | ||
14 | |||
12 | #ifdef CONFIG_X86_VSMP | 15 | #ifdef CONFIG_X86_VSMP |
13 | /* vSMP Internode cacheline shift */ | ||
14 | #define INTERNODE_CACHE_SHIFT (12) | ||
15 | #ifdef CONFIG_SMP | 16 | #ifdef CONFIG_SMP |
16 | #define __cacheline_aligned_in_smp \ | 17 | #define __cacheline_aligned_in_smp \ |
17 | __attribute__((__aligned__(1 << (INTERNODE_CACHE_SHIFT)))) \ | 18 | __attribute__((__aligned__(INTERNODE_CACHE_BYTES))) \ |
18 | __page_aligned_data | 19 | __page_aligned_data |
19 | #endif | 20 | #endif |
20 | #endif | 21 | #endif |
diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h index b54f6afe7ec4..634c40a739a6 100644 --- a/arch/x86/include/asm/cacheflush.h +++ b/arch/x86/include/asm/cacheflush.h | |||
@@ -12,6 +12,7 @@ static inline void flush_cache_range(struct vm_area_struct *vma, | |||
12 | unsigned long start, unsigned long end) { } | 12 | unsigned long start, unsigned long end) { } |
13 | static inline void flush_cache_page(struct vm_area_struct *vma, | 13 | static inline void flush_cache_page(struct vm_area_struct *vma, |
14 | unsigned long vmaddr, unsigned long pfn) { } | 14 | unsigned long vmaddr, unsigned long pfn) { } |
15 | #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 | ||
15 | static inline void flush_dcache_page(struct page *page) { } | 16 | static inline void flush_dcache_page(struct page *page) { } |
16 | static inline void flush_dcache_mmap_lock(struct address_space *mapping) { } | 17 | static inline void flush_dcache_mmap_lock(struct address_space *mapping) { } |
17 | static inline void flush_dcache_mmap_unlock(struct address_space *mapping) { } | 18 | static inline void flush_dcache_mmap_unlock(struct address_space *mapping) { } |
@@ -176,6 +177,7 @@ void clflush_cache_range(void *addr, unsigned int size); | |||
176 | #ifdef CONFIG_DEBUG_RODATA | 177 | #ifdef CONFIG_DEBUG_RODATA |
177 | void mark_rodata_ro(void); | 178 | void mark_rodata_ro(void); |
178 | extern const int rodata_test_data; | 179 | extern const int rodata_test_data; |
180 | extern int kernel_set_to_readonly; | ||
179 | void set_kernel_text_rw(void); | 181 | void set_kernel_text_rw(void); |
180 | void set_kernel_text_ro(void); | 182 | void set_kernel_text_ro(void); |
181 | #else | 183 | #else |
diff --git a/arch/x86/include/asm/calgary.h b/arch/x86/include/asm/calgary.h index b03bedb62aa7..0918654305af 100644 --- a/arch/x86/include/asm/calgary.h +++ b/arch/x86/include/asm/calgary.h | |||
@@ -62,10 +62,8 @@ struct cal_chipset_ops { | |||
62 | extern int use_calgary; | 62 | extern int use_calgary; |
63 | 63 | ||
64 | #ifdef CONFIG_CALGARY_IOMMU | 64 | #ifdef CONFIG_CALGARY_IOMMU |
65 | extern int calgary_iommu_init(void); | ||
66 | extern void detect_calgary(void); | 65 | extern void detect_calgary(void); |
67 | #else | 66 | #else |
68 | static inline int calgary_iommu_init(void) { return 1; } | ||
69 | static inline void detect_calgary(void) { return; } | 67 | static inline void detect_calgary(void) { return; } |
70 | #endif | 68 | #endif |
71 | 69 | ||
diff --git a/arch/x86/include/asm/cmpxchg_32.h b/arch/x86/include/asm/cmpxchg_32.h index ee1931be6593..ffb9bb6b6c37 100644 --- a/arch/x86/include/asm/cmpxchg_32.h +++ b/arch/x86/include/asm/cmpxchg_32.h | |||
@@ -8,14 +8,50 @@ | |||
8 | * you need to test for the feature in boot_cpu_data. | 8 | * you need to test for the feature in boot_cpu_data. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #define xchg(ptr, v) \ | 11 | extern void __xchg_wrong_size(void); |
12 | ((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), sizeof(*(ptr)))) | 12 | |
13 | /* | ||
14 | * Note: no "lock" prefix even on SMP: xchg always implies lock anyway | ||
15 | * Note 2: xchg has side effect, so that attribute volatile is necessary, | ||
16 | * but generally the primitive is invalid, *ptr is output argument. --ANK | ||
17 | */ | ||
13 | 18 | ||
14 | struct __xchg_dummy { | 19 | struct __xchg_dummy { |
15 | unsigned long a[100]; | 20 | unsigned long a[100]; |
16 | }; | 21 | }; |
17 | #define __xg(x) ((struct __xchg_dummy *)(x)) | 22 | #define __xg(x) ((struct __xchg_dummy *)(x)) |
18 | 23 | ||
24 | #define __xchg(x, ptr, size) \ | ||
25 | ({ \ | ||
26 | __typeof(*(ptr)) __x = (x); \ | ||
27 | switch (size) { \ | ||
28 | case 1: \ | ||
29 | asm volatile("xchgb %b0,%1" \ | ||
30 | : "=q" (__x) \ | ||
31 | : "m" (*__xg(ptr)), "0" (__x) \ | ||
32 | : "memory"); \ | ||
33 | break; \ | ||
34 | case 2: \ | ||
35 | asm volatile("xchgw %w0,%1" \ | ||
36 | : "=r" (__x) \ | ||
37 | : "m" (*__xg(ptr)), "0" (__x) \ | ||
38 | : "memory"); \ | ||
39 | break; \ | ||
40 | case 4: \ | ||
41 | asm volatile("xchgl %0,%1" \ | ||
42 | : "=r" (__x) \ | ||
43 | : "m" (*__xg(ptr)), "0" (__x) \ | ||
44 | : "memory"); \ | ||
45 | break; \ | ||
46 | default: \ | ||
47 | __xchg_wrong_size(); \ | ||
48 | } \ | ||
49 | __x; \ | ||
50 | }) | ||
51 | |||
52 | #define xchg(ptr, v) \ | ||
53 | __xchg((v), (ptr), sizeof(*ptr)) | ||
54 | |||
19 | /* | 55 | /* |
20 | * The semantics of XCHGCMP8B are a bit strange, this is why | 56 | * The semantics of XCHGCMP8B are a bit strange, this is why |
21 | * there is a loop and the loading of %%eax and %%edx has to | 57 | * there is a loop and the loading of %%eax and %%edx has to |
@@ -71,57 +107,63 @@ static inline void __set_64bit_var(unsigned long long *ptr, | |||
71 | (unsigned int)((value) >> 32)) \ | 107 | (unsigned int)((value) >> 32)) \ |
72 | : __set_64bit(ptr, ll_low((value)), ll_high((value)))) | 108 | : __set_64bit(ptr, ll_low((value)), ll_high((value)))) |
73 | 109 | ||
74 | /* | 110 | extern void __cmpxchg_wrong_size(void); |
75 | * Note: no "lock" prefix even on SMP: xchg always implies lock anyway | ||
76 | * Note 2: xchg has side effect, so that attribute volatile is necessary, | ||
77 | * but generally the primitive is invalid, *ptr is output argument. --ANK | ||
78 | */ | ||
79 | static inline unsigned long __xchg(unsigned long x, volatile void *ptr, | ||
80 | int size) | ||
81 | { | ||
82 | switch (size) { | ||
83 | case 1: | ||
84 | asm volatile("xchgb %b0,%1" | ||
85 | : "=q" (x) | ||
86 | : "m" (*__xg(ptr)), "0" (x) | ||
87 | : "memory"); | ||
88 | break; | ||
89 | case 2: | ||
90 | asm volatile("xchgw %w0,%1" | ||
91 | : "=r" (x) | ||
92 | : "m" (*__xg(ptr)), "0" (x) | ||
93 | : "memory"); | ||
94 | break; | ||
95 | case 4: | ||
96 | asm volatile("xchgl %0,%1" | ||
97 | : "=r" (x) | ||
98 | : "m" (*__xg(ptr)), "0" (x) | ||
99 | : "memory"); | ||
100 | break; | ||
101 | } | ||
102 | return x; | ||
103 | } | ||
104 | 111 | ||
105 | /* | 112 | /* |
106 | * Atomic compare and exchange. Compare OLD with MEM, if identical, | 113 | * Atomic compare and exchange. Compare OLD with MEM, if identical, |
107 | * store NEW in MEM. Return the initial value in MEM. Success is | 114 | * store NEW in MEM. Return the initial value in MEM. Success is |
108 | * indicated by comparing RETURN with OLD. | 115 | * indicated by comparing RETURN with OLD. |
109 | */ | 116 | */ |
117 | #define __raw_cmpxchg(ptr, old, new, size, lock) \ | ||
118 | ({ \ | ||
119 | __typeof__(*(ptr)) __ret; \ | ||
120 | __typeof__(*(ptr)) __old = (old); \ | ||
121 | __typeof__(*(ptr)) __new = (new); \ | ||
122 | switch (size) { \ | ||
123 | case 1: \ | ||
124 | asm volatile(lock "cmpxchgb %b1,%2" \ | ||
125 | : "=a"(__ret) \ | ||
126 | : "q"(__new), "m"(*__xg(ptr)), "0"(__old) \ | ||
127 | : "memory"); \ | ||
128 | break; \ | ||
129 | case 2: \ | ||
130 | asm volatile(lock "cmpxchgw %w1,%2" \ | ||
131 | : "=a"(__ret) \ | ||
132 | : "r"(__new), "m"(*__xg(ptr)), "0"(__old) \ | ||
133 | : "memory"); \ | ||
134 | break; \ | ||
135 | case 4: \ | ||
136 | asm volatile(lock "cmpxchgl %1,%2" \ | ||
137 | : "=a"(__ret) \ | ||
138 | : "r"(__new), "m"(*__xg(ptr)), "0"(__old) \ | ||
139 | : "memory"); \ | ||
140 | break; \ | ||
141 | default: \ | ||
142 | __cmpxchg_wrong_size(); \ | ||
143 | } \ | ||
144 | __ret; \ | ||
145 | }) | ||
146 | |||
147 | #define __cmpxchg(ptr, old, new, size) \ | ||
148 | __raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX) | ||
149 | |||
150 | #define __sync_cmpxchg(ptr, old, new, size) \ | ||
151 | __raw_cmpxchg((ptr), (old), (new), (size), "lock; ") | ||
152 | |||
153 | #define __cmpxchg_local(ptr, old, new, size) \ | ||
154 | __raw_cmpxchg((ptr), (old), (new), (size), "") | ||
110 | 155 | ||
111 | #ifdef CONFIG_X86_CMPXCHG | 156 | #ifdef CONFIG_X86_CMPXCHG |
112 | #define __HAVE_ARCH_CMPXCHG 1 | 157 | #define __HAVE_ARCH_CMPXCHG 1 |
113 | #define cmpxchg(ptr, o, n) \ | 158 | |
114 | ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \ | 159 | #define cmpxchg(ptr, old, new) \ |
115 | (unsigned long)(n), \ | 160 | __cmpxchg((ptr), (old), (new), sizeof(*ptr)) |
116 | sizeof(*(ptr)))) | 161 | |
117 | #define sync_cmpxchg(ptr, o, n) \ | 162 | #define sync_cmpxchg(ptr, old, new) \ |
118 | ((__typeof__(*(ptr)))__sync_cmpxchg((ptr), (unsigned long)(o), \ | 163 | __sync_cmpxchg((ptr), (old), (new), sizeof(*ptr)) |
119 | (unsigned long)(n), \ | 164 | |
120 | sizeof(*(ptr)))) | 165 | #define cmpxchg_local(ptr, old, new) \ |
121 | #define cmpxchg_local(ptr, o, n) \ | 166 | __cmpxchg_local((ptr), (old), (new), sizeof(*ptr)) |
122 | ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \ | ||
123 | (unsigned long)(n), \ | ||
124 | sizeof(*(ptr)))) | ||
125 | #endif | 167 | #endif |
126 | 168 | ||
127 | #ifdef CONFIG_X86_CMPXCHG64 | 169 | #ifdef CONFIG_X86_CMPXCHG64 |
@@ -133,94 +175,6 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, | |||
133 | (unsigned long long)(n))) | 175 | (unsigned long long)(n))) |
134 | #endif | 176 | #endif |
135 | 177 | ||
136 | static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, | ||
137 | unsigned long new, int size) | ||
138 | { | ||
139 | unsigned long prev; | ||
140 | switch (size) { | ||
141 | case 1: | ||
142 | asm volatile(LOCK_PREFIX "cmpxchgb %b1,%2" | ||
143 | : "=a"(prev) | ||
144 | : "q"(new), "m"(*__xg(ptr)), "0"(old) | ||
145 | : "memory"); | ||
146 | return prev; | ||
147 | case 2: | ||
148 | asm volatile(LOCK_PREFIX "cmpxchgw %w1,%2" | ||
149 | : "=a"(prev) | ||
150 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||
151 | : "memory"); | ||
152 | return prev; | ||
153 | case 4: | ||
154 | asm volatile(LOCK_PREFIX "cmpxchgl %1,%2" | ||
155 | : "=a"(prev) | ||
156 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||
157 | : "memory"); | ||
158 | return prev; | ||
159 | } | ||
160 | return old; | ||
161 | } | ||
162 | |||
163 | /* | ||
164 | * Always use locked operations when touching memory shared with a | ||
165 | * hypervisor, since the system may be SMP even if the guest kernel | ||
166 | * isn't. | ||
167 | */ | ||
168 | static inline unsigned long __sync_cmpxchg(volatile void *ptr, | ||
169 | unsigned long old, | ||
170 | unsigned long new, int size) | ||
171 | { | ||
172 | unsigned long prev; | ||
173 | switch (size) { | ||
174 | case 1: | ||
175 | asm volatile("lock; cmpxchgb %b1,%2" | ||
176 | : "=a"(prev) | ||
177 | : "q"(new), "m"(*__xg(ptr)), "0"(old) | ||
178 | : "memory"); | ||
179 | return prev; | ||
180 | case 2: | ||
181 | asm volatile("lock; cmpxchgw %w1,%2" | ||
182 | : "=a"(prev) | ||
183 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||
184 | : "memory"); | ||
185 | return prev; | ||
186 | case 4: | ||
187 | asm volatile("lock; cmpxchgl %1,%2" | ||
188 | : "=a"(prev) | ||
189 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||
190 | : "memory"); | ||
191 | return prev; | ||
192 | } | ||
193 | return old; | ||
194 | } | ||
195 | |||
196 | static inline unsigned long __cmpxchg_local(volatile void *ptr, | ||
197 | unsigned long old, | ||
198 | unsigned long new, int size) | ||
199 | { | ||
200 | unsigned long prev; | ||
201 | switch (size) { | ||
202 | case 1: | ||
203 | asm volatile("cmpxchgb %b1,%2" | ||
204 | : "=a"(prev) | ||
205 | : "q"(new), "m"(*__xg(ptr)), "0"(old) | ||
206 | : "memory"); | ||
207 | return prev; | ||
208 | case 2: | ||
209 | asm volatile("cmpxchgw %w1,%2" | ||
210 | : "=a"(prev) | ||
211 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||
212 | : "memory"); | ||
213 | return prev; | ||
214 | case 4: | ||
215 | asm volatile("cmpxchgl %1,%2" | ||
216 | : "=a"(prev) | ||
217 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||
218 | : "memory"); | ||
219 | return prev; | ||
220 | } | ||
221 | return old; | ||
222 | } | ||
223 | |||
224 | static inline unsigned long long __cmpxchg64(volatile void *ptr, | 178 | static inline unsigned long long __cmpxchg64(volatile void *ptr, |
225 | unsigned long long old, | 179 | unsigned long long old, |
226 | unsigned long long new) | 180 | unsigned long long new) |
diff --git a/arch/x86/include/asm/cmpxchg_64.h b/arch/x86/include/asm/cmpxchg_64.h index 52de72e0de8c..485ae415faec 100644 --- a/arch/x86/include/asm/cmpxchg_64.h +++ b/arch/x86/include/asm/cmpxchg_64.h | |||
@@ -3,9 +3,6 @@ | |||
3 | 3 | ||
4 | #include <asm/alternative.h> /* Provides LOCK_PREFIX */ | 4 | #include <asm/alternative.h> /* Provides LOCK_PREFIX */ |
5 | 5 | ||
6 | #define xchg(ptr, v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v), \ | ||
7 | (ptr), sizeof(*(ptr)))) | ||
8 | |||
9 | #define __xg(x) ((volatile long *)(x)) | 6 | #define __xg(x) ((volatile long *)(x)) |
10 | 7 | ||
11 | static inline void set_64bit(volatile unsigned long *ptr, unsigned long val) | 8 | static inline void set_64bit(volatile unsigned long *ptr, unsigned long val) |
@@ -15,167 +12,118 @@ static inline void set_64bit(volatile unsigned long *ptr, unsigned long val) | |||
15 | 12 | ||
16 | #define _set_64bit set_64bit | 13 | #define _set_64bit set_64bit |
17 | 14 | ||
15 | extern void __xchg_wrong_size(void); | ||
16 | extern void __cmpxchg_wrong_size(void); | ||
17 | |||
18 | /* | 18 | /* |
19 | * Note: no "lock" prefix even on SMP: xchg always implies lock anyway | 19 | * Note: no "lock" prefix even on SMP: xchg always implies lock anyway |
20 | * Note 2: xchg has side effect, so that attribute volatile is necessary, | 20 | * Note 2: xchg has side effect, so that attribute volatile is necessary, |
21 | * but generally the primitive is invalid, *ptr is output argument. --ANK | 21 | * but generally the primitive is invalid, *ptr is output argument. --ANK |
22 | */ | 22 | */ |
23 | static inline unsigned long __xchg(unsigned long x, volatile void *ptr, | 23 | #define __xchg(x, ptr, size) \ |
24 | int size) | 24 | ({ \ |
25 | { | 25 | __typeof(*(ptr)) __x = (x); \ |
26 | switch (size) { | 26 | switch (size) { \ |
27 | case 1: | 27 | case 1: \ |
28 | asm volatile("xchgb %b0,%1" | 28 | asm volatile("xchgb %b0,%1" \ |
29 | : "=q" (x) | 29 | : "=q" (__x) \ |
30 | : "m" (*__xg(ptr)), "0" (x) | 30 | : "m" (*__xg(ptr)), "0" (__x) \ |
31 | : "memory"); | 31 | : "memory"); \ |
32 | break; | 32 | break; \ |
33 | case 2: | 33 | case 2: \ |
34 | asm volatile("xchgw %w0,%1" | 34 | asm volatile("xchgw %w0,%1" \ |
35 | : "=r" (x) | 35 | : "=r" (__x) \ |
36 | : "m" (*__xg(ptr)), "0" (x) | 36 | : "m" (*__xg(ptr)), "0" (__x) \ |
37 | : "memory"); | 37 | : "memory"); \ |
38 | break; | 38 | break; \ |
39 | case 4: | 39 | case 4: \ |
40 | asm volatile("xchgl %k0,%1" | 40 | asm volatile("xchgl %k0,%1" \ |
41 | : "=r" (x) | 41 | : "=r" (__x) \ |
42 | : "m" (*__xg(ptr)), "0" (x) | 42 | : "m" (*__xg(ptr)), "0" (__x) \ |
43 | : "memory"); | 43 | : "memory"); \ |
44 | break; | 44 | break; \ |
45 | case 8: | 45 | case 8: \ |
46 | asm volatile("xchgq %0,%1" | 46 | asm volatile("xchgq %0,%1" \ |
47 | : "=r" (x) | 47 | : "=r" (__x) \ |
48 | : "m" (*__xg(ptr)), "0" (x) | 48 | : "m" (*__xg(ptr)), "0" (__x) \ |
49 | : "memory"); | 49 | : "memory"); \ |
50 | break; | 50 | break; \ |
51 | } | 51 | default: \ |
52 | return x; | 52 | __xchg_wrong_size(); \ |
53 | } | 53 | } \ |
54 | __x; \ | ||
55 | }) | ||
56 | |||
57 | #define xchg(ptr, v) \ | ||
58 | __xchg((v), (ptr), sizeof(*ptr)) | ||
59 | |||
60 | #define __HAVE_ARCH_CMPXCHG 1 | ||
54 | 61 | ||
55 | /* | 62 | /* |
56 | * Atomic compare and exchange. Compare OLD with MEM, if identical, | 63 | * Atomic compare and exchange. Compare OLD with MEM, if identical, |
57 | * store NEW in MEM. Return the initial value in MEM. Success is | 64 | * store NEW in MEM. Return the initial value in MEM. Success is |
58 | * indicated by comparing RETURN with OLD. | 65 | * indicated by comparing RETURN with OLD. |
59 | */ | 66 | */ |
67 | #define __raw_cmpxchg(ptr, old, new, size, lock) \ | ||
68 | ({ \ | ||
69 | __typeof__(*(ptr)) __ret; \ | ||
70 | __typeof__(*(ptr)) __old = (old); \ | ||
71 | __typeof__(*(ptr)) __new = (new); \ | ||
72 | switch (size) { \ | ||
73 | case 1: \ | ||
74 | asm volatile(lock "cmpxchgb %b1,%2" \ | ||
75 | : "=a"(__ret) \ | ||
76 | : "q"(__new), "m"(*__xg(ptr)), "0"(__old) \ | ||
77 | : "memory"); \ | ||
78 | break; \ | ||
79 | case 2: \ | ||
80 | asm volatile(lock "cmpxchgw %w1,%2" \ | ||
81 | : "=a"(__ret) \ | ||
82 | : "r"(__new), "m"(*__xg(ptr)), "0"(__old) \ | ||
83 | : "memory"); \ | ||
84 | break; \ | ||
85 | case 4: \ | ||
86 | asm volatile(lock "cmpxchgl %k1,%2" \ | ||
87 | : "=a"(__ret) \ | ||
88 | : "r"(__new), "m"(*__xg(ptr)), "0"(__old) \ | ||
89 | : "memory"); \ | ||
90 | break; \ | ||
91 | case 8: \ | ||
92 | asm volatile(lock "cmpxchgq %1,%2" \ | ||
93 | : "=a"(__ret) \ | ||
94 | : "r"(__new), "m"(*__xg(ptr)), "0"(__old) \ | ||
95 | : "memory"); \ | ||
96 | break; \ | ||
97 | default: \ | ||
98 | __cmpxchg_wrong_size(); \ | ||
99 | } \ | ||
100 | __ret; \ | ||
101 | }) | ||
60 | 102 | ||
61 | #define __HAVE_ARCH_CMPXCHG 1 | 103 | #define __cmpxchg(ptr, old, new, size) \ |
104 | __raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX) | ||
62 | 105 | ||
63 | static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, | 106 | #define __sync_cmpxchg(ptr, old, new, size) \ |
64 | unsigned long new, int size) | 107 | __raw_cmpxchg((ptr), (old), (new), (size), "lock; ") |
65 | { | ||
66 | unsigned long prev; | ||
67 | switch (size) { | ||
68 | case 1: | ||
69 | asm volatile(LOCK_PREFIX "cmpxchgb %b1,%2" | ||
70 | : "=a"(prev) | ||
71 | : "q"(new), "m"(*__xg(ptr)), "0"(old) | ||
72 | : "memory"); | ||
73 | return prev; | ||
74 | case 2: | ||
75 | asm volatile(LOCK_PREFIX "cmpxchgw %w1,%2" | ||
76 | : "=a"(prev) | ||
77 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||
78 | : "memory"); | ||
79 | return prev; | ||
80 | case 4: | ||
81 | asm volatile(LOCK_PREFIX "cmpxchgl %k1,%2" | ||
82 | : "=a"(prev) | ||
83 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||
84 | : "memory"); | ||
85 | return prev; | ||
86 | case 8: | ||
87 | asm volatile(LOCK_PREFIX "cmpxchgq %1,%2" | ||
88 | : "=a"(prev) | ||
89 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||
90 | : "memory"); | ||
91 | return prev; | ||
92 | } | ||
93 | return old; | ||
94 | } | ||
95 | 108 | ||
96 | /* | 109 | #define __cmpxchg_local(ptr, old, new, size) \ |
97 | * Always use locked operations when touching memory shared with a | 110 | __raw_cmpxchg((ptr), (old), (new), (size), "") |
98 | * hypervisor, since the system may be SMP even if the guest kernel | ||
99 | * isn't. | ||
100 | */ | ||
101 | static inline unsigned long __sync_cmpxchg(volatile void *ptr, | ||
102 | unsigned long old, | ||
103 | unsigned long new, int size) | ||
104 | { | ||
105 | unsigned long prev; | ||
106 | switch (size) { | ||
107 | case 1: | ||
108 | asm volatile("lock; cmpxchgb %b1,%2" | ||
109 | : "=a"(prev) | ||
110 | : "q"(new), "m"(*__xg(ptr)), "0"(old) | ||
111 | : "memory"); | ||
112 | return prev; | ||
113 | case 2: | ||
114 | asm volatile("lock; cmpxchgw %w1,%2" | ||
115 | : "=a"(prev) | ||
116 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||
117 | : "memory"); | ||
118 | return prev; | ||
119 | case 4: | ||
120 | asm volatile("lock; cmpxchgl %1,%2" | ||
121 | : "=a"(prev) | ||
122 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||
123 | : "memory"); | ||
124 | return prev; | ||
125 | } | ||
126 | return old; | ||
127 | } | ||
128 | 111 | ||
129 | static inline unsigned long __cmpxchg_local(volatile void *ptr, | 112 | #define cmpxchg(ptr, old, new) \ |
130 | unsigned long old, | 113 | __cmpxchg((ptr), (old), (new), sizeof(*ptr)) |
131 | unsigned long new, int size) | 114 | |
132 | { | 115 | #define sync_cmpxchg(ptr, old, new) \ |
133 | unsigned long prev; | 116 | __sync_cmpxchg((ptr), (old), (new), sizeof(*ptr)) |
134 | switch (size) { | 117 | |
135 | case 1: | 118 | #define cmpxchg_local(ptr, old, new) \ |
136 | asm volatile("cmpxchgb %b1,%2" | 119 | __cmpxchg_local((ptr), (old), (new), sizeof(*ptr)) |
137 | : "=a"(prev) | ||
138 | : "q"(new), "m"(*__xg(ptr)), "0"(old) | ||
139 | : "memory"); | ||
140 | return prev; | ||
141 | case 2: | ||
142 | asm volatile("cmpxchgw %w1,%2" | ||
143 | : "=a"(prev) | ||
144 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||
145 | : "memory"); | ||
146 | return prev; | ||
147 | case 4: | ||
148 | asm volatile("cmpxchgl %k1,%2" | ||
149 | : "=a"(prev) | ||
150 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||
151 | : "memory"); | ||
152 | return prev; | ||
153 | case 8: | ||
154 | asm volatile("cmpxchgq %1,%2" | ||
155 | : "=a"(prev) | ||
156 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||
157 | : "memory"); | ||
158 | return prev; | ||
159 | } | ||
160 | return old; | ||
161 | } | ||
162 | 120 | ||
163 | #define cmpxchg(ptr, o, n) \ | ||
164 | ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \ | ||
165 | (unsigned long)(n), sizeof(*(ptr)))) | ||
166 | #define cmpxchg64(ptr, o, n) \ | 121 | #define cmpxchg64(ptr, o, n) \ |
167 | ({ \ | 122 | ({ \ |
168 | BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ | 123 | BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ |
169 | cmpxchg((ptr), (o), (n)); \ | 124 | cmpxchg((ptr), (o), (n)); \ |
170 | }) | 125 | }) |
171 | #define cmpxchg_local(ptr, o, n) \ | 126 | |
172 | ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \ | ||
173 | (unsigned long)(n), \ | ||
174 | sizeof(*(ptr)))) | ||
175 | #define sync_cmpxchg(ptr, o, n) \ | ||
176 | ((__typeof__(*(ptr)))__sync_cmpxchg((ptr), (unsigned long)(o), \ | ||
177 | (unsigned long)(n), \ | ||
178 | sizeof(*(ptr)))) | ||
179 | #define cmpxchg64_local(ptr, o, n) \ | 127 | #define cmpxchg64_local(ptr, o, n) \ |
180 | ({ \ | 128 | ({ \ |
181 | BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ | 129 | BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ |
diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h index 9a9c7bdc923d..306160e58b48 100644 --- a/arch/x86/include/asm/compat.h +++ b/arch/x86/include/asm/compat.h | |||
@@ -8,7 +8,8 @@ | |||
8 | #include <linux/sched.h> | 8 | #include <linux/sched.h> |
9 | #include <asm/user32.h> | 9 | #include <asm/user32.h> |
10 | 10 | ||
11 | #define COMPAT_USER_HZ 100 | 11 | #define COMPAT_USER_HZ 100 |
12 | #define COMPAT_UTS_MACHINE "i686\0\0" | ||
12 | 13 | ||
13 | typedef u32 compat_size_t; | 14 | typedef u32 compat_size_t; |
14 | typedef s32 compat_ssize_t; | 15 | typedef s32 compat_ssize_t; |
diff --git a/arch/x86/include/asm/cpu_debug.h b/arch/x86/include/asm/cpu_debug.h deleted file mode 100644 index d96c1ee3a95c..000000000000 --- a/arch/x86/include/asm/cpu_debug.h +++ /dev/null | |||
@@ -1,127 +0,0 @@ | |||
1 | #ifndef _ASM_X86_CPU_DEBUG_H | ||
2 | #define _ASM_X86_CPU_DEBUG_H | ||
3 | |||
4 | /* | ||
5 | * CPU x86 architecture debug | ||
6 | * | ||
7 | * Copyright(C) 2009 Jaswinder Singh Rajput | ||
8 | */ | ||
9 | |||
10 | /* Register flags */ | ||
11 | enum cpu_debug_bit { | ||
12 | /* Model Specific Registers (MSRs) */ | ||
13 | CPU_MC_BIT, /* Machine Check */ | ||
14 | CPU_MONITOR_BIT, /* Monitor */ | ||
15 | CPU_TIME_BIT, /* Time */ | ||
16 | CPU_PMC_BIT, /* Performance Monitor */ | ||
17 | CPU_PLATFORM_BIT, /* Platform */ | ||
18 | CPU_APIC_BIT, /* APIC */ | ||
19 | CPU_POWERON_BIT, /* Power-on */ | ||
20 | CPU_CONTROL_BIT, /* Control */ | ||
21 | CPU_FEATURES_BIT, /* Features control */ | ||
22 | CPU_LBRANCH_BIT, /* Last Branch */ | ||
23 | CPU_BIOS_BIT, /* BIOS */ | ||
24 | CPU_FREQ_BIT, /* Frequency */ | ||
25 | CPU_MTTR_BIT, /* MTRR */ | ||
26 | CPU_PERF_BIT, /* Performance */ | ||
27 | CPU_CACHE_BIT, /* Cache */ | ||
28 | CPU_SYSENTER_BIT, /* Sysenter */ | ||
29 | CPU_THERM_BIT, /* Thermal */ | ||
30 | CPU_MISC_BIT, /* Miscellaneous */ | ||
31 | CPU_DEBUG_BIT, /* Debug */ | ||
32 | CPU_PAT_BIT, /* PAT */ | ||
33 | CPU_VMX_BIT, /* VMX */ | ||
34 | CPU_CALL_BIT, /* System Call */ | ||
35 | CPU_BASE_BIT, /* BASE Address */ | ||
36 | CPU_VER_BIT, /* Version ID */ | ||
37 | CPU_CONF_BIT, /* Configuration */ | ||
38 | CPU_SMM_BIT, /* System mgmt mode */ | ||
39 | CPU_SVM_BIT, /*Secure Virtual Machine*/ | ||
40 | CPU_OSVM_BIT, /* OS-Visible Workaround*/ | ||
41 | /* Standard Registers */ | ||
42 | CPU_TSS_BIT, /* Task Stack Segment */ | ||
43 | CPU_CR_BIT, /* Control Registers */ | ||
44 | CPU_DT_BIT, /* Descriptor Table */ | ||
45 | /* End of Registers flags */ | ||
46 | CPU_REG_ALL_BIT, /* Select all Registers */ | ||
47 | }; | ||
48 | |||
49 | #define CPU_REG_ALL (~0) /* Select all Registers */ | ||
50 | |||
51 | #define CPU_MC (1 << CPU_MC_BIT) | ||
52 | #define CPU_MONITOR (1 << CPU_MONITOR_BIT) | ||
53 | #define CPU_TIME (1 << CPU_TIME_BIT) | ||
54 | #define CPU_PMC (1 << CPU_PMC_BIT) | ||
55 | #define CPU_PLATFORM (1 << CPU_PLATFORM_BIT) | ||
56 | #define CPU_APIC (1 << CPU_APIC_BIT) | ||
57 | #define CPU_POWERON (1 << CPU_POWERON_BIT) | ||
58 | #define CPU_CONTROL (1 << CPU_CONTROL_BIT) | ||
59 | #define CPU_FEATURES (1 << CPU_FEATURES_BIT) | ||
60 | #define CPU_LBRANCH (1 << CPU_LBRANCH_BIT) | ||
61 | #define CPU_BIOS (1 << CPU_BIOS_BIT) | ||
62 | #define CPU_FREQ (1 << CPU_FREQ_BIT) | ||
63 | #define CPU_MTRR (1 << CPU_MTTR_BIT) | ||
64 | #define CPU_PERF (1 << CPU_PERF_BIT) | ||
65 | #define CPU_CACHE (1 << CPU_CACHE_BIT) | ||
66 | #define CPU_SYSENTER (1 << CPU_SYSENTER_BIT) | ||
67 | #define CPU_THERM (1 << CPU_THERM_BIT) | ||
68 | #define CPU_MISC (1 << CPU_MISC_BIT) | ||
69 | #define CPU_DEBUG (1 << CPU_DEBUG_BIT) | ||
70 | #define CPU_PAT (1 << CPU_PAT_BIT) | ||
71 | #define CPU_VMX (1 << CPU_VMX_BIT) | ||
72 | #define CPU_CALL (1 << CPU_CALL_BIT) | ||
73 | #define CPU_BASE (1 << CPU_BASE_BIT) | ||
74 | #define CPU_VER (1 << CPU_VER_BIT) | ||
75 | #define CPU_CONF (1 << CPU_CONF_BIT) | ||
76 | #define CPU_SMM (1 << CPU_SMM_BIT) | ||
77 | #define CPU_SVM (1 << CPU_SVM_BIT) | ||
78 | #define CPU_OSVM (1 << CPU_OSVM_BIT) | ||
79 | #define CPU_TSS (1 << CPU_TSS_BIT) | ||
80 | #define CPU_CR (1 << CPU_CR_BIT) | ||
81 | #define CPU_DT (1 << CPU_DT_BIT) | ||
82 | |||
83 | /* Register file flags */ | ||
84 | enum cpu_file_bit { | ||
85 | CPU_INDEX_BIT, /* index */ | ||
86 | CPU_VALUE_BIT, /* value */ | ||
87 | }; | ||
88 | |||
89 | #define CPU_FILE_VALUE (1 << CPU_VALUE_BIT) | ||
90 | |||
91 | #define MAX_CPU_FILES 512 | ||
92 | |||
93 | struct cpu_private { | ||
94 | unsigned cpu; | ||
95 | unsigned type; | ||
96 | unsigned reg; | ||
97 | unsigned file; | ||
98 | }; | ||
99 | |||
100 | struct cpu_debug_base { | ||
101 | char *name; /* Register name */ | ||
102 | unsigned flag; /* Register flag */ | ||
103 | unsigned write; /* Register write flag */ | ||
104 | }; | ||
105 | |||
106 | /* | ||
107 | * Currently it looks similar to cpu_debug_base but once we add more files | ||
108 | * cpu_file_base will go in different direction | ||
109 | */ | ||
110 | struct cpu_file_base { | ||
111 | char *name; /* Register file name */ | ||
112 | unsigned flag; /* Register file flag */ | ||
113 | unsigned write; /* Register write flag */ | ||
114 | }; | ||
115 | |||
116 | struct cpu_cpuX_base { | ||
117 | struct dentry *dentry; /* Register dentry */ | ||
118 | int init; /* Register index file */ | ||
119 | }; | ||
120 | |||
121 | struct cpu_debug_range { | ||
122 | unsigned min; /* Register range min */ | ||
123 | unsigned max; /* Register range max */ | ||
124 | unsigned flag; /* Supported flags */ | ||
125 | }; | ||
126 | |||
127 | #endif /* _ASM_X86_CPU_DEBUG_H */ | ||
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 9cfc88b97742..0cd82d068613 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h | |||
@@ -153,6 +153,7 @@ | |||
153 | #define X86_FEATURE_SSE5 (6*32+11) /* SSE-5 */ | 153 | #define X86_FEATURE_SSE5 (6*32+11) /* SSE-5 */ |
154 | #define X86_FEATURE_SKINIT (6*32+12) /* SKINIT/STGI instructions */ | 154 | #define X86_FEATURE_SKINIT (6*32+12) /* SKINIT/STGI instructions */ |
155 | #define X86_FEATURE_WDT (6*32+13) /* Watchdog timer */ | 155 | #define X86_FEATURE_WDT (6*32+13) /* Watchdog timer */ |
156 | #define X86_FEATURE_NODEID_MSR (6*32+19) /* NodeId MSR */ | ||
156 | 157 | ||
157 | /* | 158 | /* |
158 | * Auxiliary flags: Linux defined - For features scattered in various | 159 | * Auxiliary flags: Linux defined - For features scattered in various |
@@ -167,6 +168,10 @@ | |||
167 | #define X86_FEATURE_FLEXPRIORITY (8*32+ 2) /* Intel FlexPriority */ | 168 | #define X86_FEATURE_FLEXPRIORITY (8*32+ 2) /* Intel FlexPriority */ |
168 | #define X86_FEATURE_EPT (8*32+ 3) /* Intel Extended Page Table */ | 169 | #define X86_FEATURE_EPT (8*32+ 3) /* Intel Extended Page Table */ |
169 | #define X86_FEATURE_VPID (8*32+ 4) /* Intel Virtual Processor ID */ | 170 | #define X86_FEATURE_VPID (8*32+ 4) /* Intel Virtual Processor ID */ |
171 | #define X86_FEATURE_NPT (8*32+5) /* AMD Nested Page Table support */ | ||
172 | #define X86_FEATURE_LBRV (8*32+6) /* AMD LBR Virtualization support */ | ||
173 | #define X86_FEATURE_SVML (8*32+7) /* "svm_lock" AMD SVM locking MSR */ | ||
174 | #define X86_FEATURE_NRIPS (8*32+8) /* "nrip_save" AMD SVM next_rip save */ | ||
170 | 175 | ||
171 | #if defined(__KERNEL__) && !defined(__ASSEMBLY__) | 176 | #if defined(__KERNEL__) && !defined(__ASSEMBLY__) |
172 | 177 | ||
@@ -248,6 +253,7 @@ extern const char * const x86_power_flags[32]; | |||
248 | #define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC) | 253 | #define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC) |
249 | #define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE) | 254 | #define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE) |
250 | #define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR) | 255 | #define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR) |
256 | #define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ) | ||
251 | 257 | ||
252 | #if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64) | 258 | #if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64) |
253 | # define cpu_has_invlpg 1 | 259 | # define cpu_has_invlpg 1 |
diff --git a/arch/x86/include/asm/debugreg.h b/arch/x86/include/asm/debugreg.h index 3ea6f37be9e2..b81002f23614 100644 --- a/arch/x86/include/asm/debugreg.h +++ b/arch/x86/include/asm/debugreg.h | |||
@@ -14,10 +14,14 @@ | |||
14 | which debugging register was responsible for the trap. The other bits | 14 | which debugging register was responsible for the trap. The other bits |
15 | are either reserved or not of interest to us. */ | 15 | are either reserved or not of interest to us. */ |
16 | 16 | ||
17 | /* Define reserved bits in DR6 which are always set to 1 */ | ||
18 | #define DR6_RESERVED (0xFFFF0FF0) | ||
19 | |||
17 | #define DR_TRAP0 (0x1) /* db0 */ | 20 | #define DR_TRAP0 (0x1) /* db0 */ |
18 | #define DR_TRAP1 (0x2) /* db1 */ | 21 | #define DR_TRAP1 (0x2) /* db1 */ |
19 | #define DR_TRAP2 (0x4) /* db2 */ | 22 | #define DR_TRAP2 (0x4) /* db2 */ |
20 | #define DR_TRAP3 (0x8) /* db3 */ | 23 | #define DR_TRAP3 (0x8) /* db3 */ |
24 | #define DR_TRAP_BITS (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3) | ||
21 | 25 | ||
22 | #define DR_STEP (0x4000) /* single-step */ | 26 | #define DR_STEP (0x4000) /* single-step */ |
23 | #define DR_SWITCH (0x8000) /* task switch */ | 27 | #define DR_SWITCH (0x8000) /* task switch */ |
@@ -49,6 +53,8 @@ | |||
49 | 53 | ||
50 | #define DR_LOCAL_ENABLE_SHIFT 0 /* Extra shift to the local enable bit */ | 54 | #define DR_LOCAL_ENABLE_SHIFT 0 /* Extra shift to the local enable bit */ |
51 | #define DR_GLOBAL_ENABLE_SHIFT 1 /* Extra shift to the global enable bit */ | 55 | #define DR_GLOBAL_ENABLE_SHIFT 1 /* Extra shift to the global enable bit */ |
56 | #define DR_LOCAL_ENABLE (0x1) /* Local enable for reg 0 */ | ||
57 | #define DR_GLOBAL_ENABLE (0x2) /* Global enable for reg 0 */ | ||
52 | #define DR_ENABLE_SIZE 2 /* 2 enable bits per register */ | 58 | #define DR_ENABLE_SIZE 2 /* 2 enable bits per register */ |
53 | 59 | ||
54 | #define DR_LOCAL_ENABLE_MASK (0x55) /* Set local bits for all 4 regs */ | 60 | #define DR_LOCAL_ENABLE_MASK (0x55) /* Set local bits for all 4 regs */ |
@@ -67,4 +73,34 @@ | |||
67 | #define DR_LOCAL_SLOWDOWN (0x100) /* Local slow the pipeline */ | 73 | #define DR_LOCAL_SLOWDOWN (0x100) /* Local slow the pipeline */ |
68 | #define DR_GLOBAL_SLOWDOWN (0x200) /* Global slow the pipeline */ | 74 | #define DR_GLOBAL_SLOWDOWN (0x200) /* Global slow the pipeline */ |
69 | 75 | ||
76 | /* | ||
77 | * HW breakpoint additions | ||
78 | */ | ||
79 | #ifdef __KERNEL__ | ||
80 | |||
81 | DECLARE_PER_CPU(unsigned long, cpu_dr7); | ||
82 | |||
83 | static inline void hw_breakpoint_disable(void) | ||
84 | { | ||
85 | /* Zero the control register for HW Breakpoint */ | ||
86 | set_debugreg(0UL, 7); | ||
87 | |||
88 | /* Zero-out the individual HW breakpoint address registers */ | ||
89 | set_debugreg(0UL, 0); | ||
90 | set_debugreg(0UL, 1); | ||
91 | set_debugreg(0UL, 2); | ||
92 | set_debugreg(0UL, 3); | ||
93 | } | ||
94 | |||
95 | static inline int hw_breakpoint_active(void) | ||
96 | { | ||
97 | return __get_cpu_var(cpu_dr7) & DR_GLOBAL_ENABLE_MASK; | ||
98 | } | ||
99 | |||
100 | extern void aout_dump_debugregs(struct user *dump); | ||
101 | |||
102 | extern void hw_breakpoint_restore(void); | ||
103 | |||
104 | #endif /* __KERNEL__ */ | ||
105 | |||
70 | #endif /* _ASM_X86_DEBUGREG_H */ | 106 | #endif /* _ASM_X86_DEBUGREG_H */ |
diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h index 9d6684849fd9..278441f39856 100644 --- a/arch/x86/include/asm/desc_defs.h +++ b/arch/x86/include/asm/desc_defs.h | |||
@@ -12,9 +12,9 @@ | |||
12 | #include <linux/types.h> | 12 | #include <linux/types.h> |
13 | 13 | ||
14 | /* | 14 | /* |
15 | * FIXME: Acessing the desc_struct through its fields is more elegant, | 15 | * FIXME: Accessing the desc_struct through its fields is more elegant, |
16 | * and should be the one valid thing to do. However, a lot of open code | 16 | * and should be the one valid thing to do. However, a lot of open code |
17 | * still touches the a and b acessors, and doing this allow us to do it | 17 | * still touches the a and b accessors, and doing this allow us to do it |
18 | * incrementally. We keep the signature as a struct, rather than an union, | 18 | * incrementally. We keep the signature as a struct, rather than an union, |
19 | * so we can get rid of it transparently in the future -- glommer | 19 | * so we can get rid of it transparently in the future -- glommer |
20 | */ | 20 | */ |
diff --git a/arch/x86/include/asm/device.h b/arch/x86/include/asm/device.h index cee34e9ca45b..029f230ab637 100644 --- a/arch/x86/include/asm/device.h +++ b/arch/x86/include/asm/device.h | |||
@@ -8,7 +8,7 @@ struct dev_archdata { | |||
8 | #ifdef CONFIG_X86_64 | 8 | #ifdef CONFIG_X86_64 |
9 | struct dma_map_ops *dma_ops; | 9 | struct dma_map_ops *dma_ops; |
10 | #endif | 10 | #endif |
11 | #ifdef CONFIG_DMAR | 11 | #if defined(CONFIG_DMAR) || defined(CONFIG_AMD_IOMMU) |
12 | void *iommu; /* hook for IOMMU specific extension */ | 12 | void *iommu; /* hook for IOMMU specific extension */ |
13 | #endif | 13 | #endif |
14 | }; | 14 | }; |
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index 6a25d5d42836..ac91eed21061 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h | |||
@@ -20,7 +20,8 @@ | |||
20 | # define ISA_DMA_BIT_MASK DMA_BIT_MASK(32) | 20 | # define ISA_DMA_BIT_MASK DMA_BIT_MASK(32) |
21 | #endif | 21 | #endif |
22 | 22 | ||
23 | extern dma_addr_t bad_dma_address; | 23 | #define DMA_ERROR_CODE 0 |
24 | |||
24 | extern int iommu_merge; | 25 | extern int iommu_merge; |
25 | extern struct device x86_dma_fallback_dev; | 26 | extern struct device x86_dma_fallback_dev; |
26 | extern int panic_on_overflow; | 27 | extern int panic_on_overflow; |
@@ -48,7 +49,7 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | |||
48 | if (ops->mapping_error) | 49 | if (ops->mapping_error) |
49 | return ops->mapping_error(dev, dma_addr); | 50 | return ops->mapping_error(dev, dma_addr); |
50 | 51 | ||
51 | return (dma_addr == bad_dma_address); | 52 | return (dma_addr == DMA_ERROR_CODE); |
52 | } | 53 | } |
53 | 54 | ||
54 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) | 55 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) |
@@ -66,7 +67,7 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) | |||
66 | if (!dev->dma_mask) | 67 | if (!dev->dma_mask) |
67 | return 0; | 68 | return 0; |
68 | 69 | ||
69 | return addr + size <= *dev->dma_mask; | 70 | return addr + size - 1 <= *dev->dma_mask; |
70 | } | 71 | } |
71 | 72 | ||
72 | static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) | 73 | static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) |
diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h index 40b4e614fe71..0e22296790d3 100644 --- a/arch/x86/include/asm/e820.h +++ b/arch/x86/include/asm/e820.h | |||
@@ -61,6 +61,12 @@ struct e820map { | |||
61 | struct e820entry map[E820_X_MAX]; | 61 | struct e820entry map[E820_X_MAX]; |
62 | }; | 62 | }; |
63 | 63 | ||
64 | #define ISA_START_ADDRESS 0xa0000 | ||
65 | #define ISA_END_ADDRESS 0x100000 | ||
66 | |||
67 | #define BIOS_BEGIN 0x000a0000 | ||
68 | #define BIOS_END 0x00100000 | ||
69 | |||
64 | #ifdef __KERNEL__ | 70 | #ifdef __KERNEL__ |
65 | /* see comment in arch/x86/kernel/e820.c */ | 71 | /* see comment in arch/x86/kernel/e820.c */ |
66 | extern struct e820map e820; | 72 | extern struct e820map e820; |
@@ -105,11 +111,8 @@ extern unsigned long end_user_pfn; | |||
105 | 111 | ||
106 | extern u64 find_e820_area(u64 start, u64 end, u64 size, u64 align); | 112 | extern u64 find_e820_area(u64 start, u64 end, u64 size, u64 align); |
107 | extern u64 find_e820_area_size(u64 start, u64 *sizep, u64 align); | 113 | extern u64 find_e820_area_size(u64 start, u64 *sizep, u64 align); |
108 | extern void reserve_early(u64 start, u64 end, char *name); | ||
109 | extern void reserve_early_overlap_ok(u64 start, u64 end, char *name); | ||
110 | extern void free_early(u64 start, u64 end); | ||
111 | extern void early_res_to_bootmem(u64 start, u64 end); | ||
112 | extern u64 early_reserve_e820(u64 startt, u64 sizet, u64 align); | 114 | extern u64 early_reserve_e820(u64 startt, u64 sizet, u64 align); |
115 | #include <linux/early_res.h> | ||
113 | 116 | ||
114 | extern unsigned long e820_end_of_ram_pfn(void); | 117 | extern unsigned long e820_end_of_ram_pfn(void); |
115 | extern unsigned long e820_end_of_low_ram_pfn(void); | 118 | extern unsigned long e820_end_of_low_ram_pfn(void); |
@@ -126,15 +129,18 @@ extern void e820_reserve_resources(void); | |||
126 | extern void e820_reserve_resources_late(void); | 129 | extern void e820_reserve_resources_late(void); |
127 | extern void setup_memory_map(void); | 130 | extern void setup_memory_map(void); |
128 | extern char *default_machine_specific_memory_setup(void); | 131 | extern char *default_machine_specific_memory_setup(void); |
129 | #endif /* __KERNEL__ */ | ||
130 | #endif /* __ASSEMBLY__ */ | ||
131 | 132 | ||
132 | #define ISA_START_ADDRESS 0xa0000 | 133 | /* |
133 | #define ISA_END_ADDRESS 0x100000 | 134 | * Returns true iff the specified range [s,e) is completely contained inside |
134 | #define is_ISA_range(s, e) ((s) >= ISA_START_ADDRESS && (e) < ISA_END_ADDRESS) | 135 | * the ISA region. |
136 | */ | ||
137 | static inline bool is_ISA_range(u64 s, u64 e) | ||
138 | { | ||
139 | return s >= ISA_START_ADDRESS && e <= ISA_END_ADDRESS; | ||
140 | } | ||
135 | 141 | ||
136 | #define BIOS_BEGIN 0x000a0000 | 142 | #endif /* __KERNEL__ */ |
137 | #define BIOS_END 0x00100000 | 143 | #endif /* __ASSEMBLY__ */ |
138 | 144 | ||
139 | #ifdef __KERNEL__ | 145 | #ifdef __KERNEL__ |
140 | #include <linux/ioport.h> | 146 | #include <linux/ioport.h> |
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h index 456a304b8172..f2ad2163109d 100644 --- a/arch/x86/include/asm/elf.h +++ b/arch/x86/include/asm/elf.h | |||
@@ -157,19 +157,6 @@ do { \ | |||
157 | 157 | ||
158 | #define compat_elf_check_arch(x) elf_check_arch_ia32(x) | 158 | #define compat_elf_check_arch(x) elf_check_arch_ia32(x) |
159 | 159 | ||
160 | static inline void start_ia32_thread(struct pt_regs *regs, u32 ip, u32 sp) | ||
161 | { | ||
162 | loadsegment(fs, 0); | ||
163 | loadsegment(ds, __USER32_DS); | ||
164 | loadsegment(es, __USER32_DS); | ||
165 | load_gs_index(0); | ||
166 | regs->ip = ip; | ||
167 | regs->sp = sp; | ||
168 | regs->flags = X86_EFLAGS_IF; | ||
169 | regs->cs = __USER32_CS; | ||
170 | regs->ss = __USER32_DS; | ||
171 | } | ||
172 | |||
173 | static inline void elf_common_init(struct thread_struct *t, | 160 | static inline void elf_common_init(struct thread_struct *t, |
174 | struct pt_regs *regs, const u16 ds) | 161 | struct pt_regs *regs, const u16 ds) |
175 | { | 162 | { |
@@ -183,28 +170,16 @@ static inline void elf_common_init(struct thread_struct *t, | |||
183 | } | 170 | } |
184 | 171 | ||
185 | #define ELF_PLAT_INIT(_r, load_addr) \ | 172 | #define ELF_PLAT_INIT(_r, load_addr) \ |
186 | do { \ | 173 | elf_common_init(¤t->thread, _r, 0) |
187 | elf_common_init(¤t->thread, _r, 0); \ | ||
188 | clear_thread_flag(TIF_IA32); \ | ||
189 | } while (0) | ||
190 | 174 | ||
191 | #define COMPAT_ELF_PLAT_INIT(regs, load_addr) \ | 175 | #define COMPAT_ELF_PLAT_INIT(regs, load_addr) \ |
192 | elf_common_init(¤t->thread, regs, __USER_DS) | 176 | elf_common_init(¤t->thread, regs, __USER_DS) |
193 | 177 | ||
194 | #define compat_start_thread(regs, ip, sp) \ | 178 | void start_thread_ia32(struct pt_regs *regs, u32 new_ip, u32 new_sp); |
195 | do { \ | 179 | #define compat_start_thread start_thread_ia32 |
196 | start_ia32_thread(regs, ip, sp); \ | ||
197 | set_fs(USER_DS); \ | ||
198 | } while (0) | ||
199 | 180 | ||
200 | #define COMPAT_SET_PERSONALITY(ex) \ | 181 | void set_personality_ia32(void); |
201 | do { \ | 182 | #define COMPAT_SET_PERSONALITY(ex) set_personality_ia32() |
202 | if (test_thread_flag(TIF_IA32)) \ | ||
203 | clear_thread_flag(TIF_ABI_PENDING); \ | ||
204 | else \ | ||
205 | set_thread_flag(TIF_ABI_PENDING); \ | ||
206 | current->personality |= force_personality32; \ | ||
207 | } while (0) | ||
208 | 183 | ||
209 | #define COMPAT_ELF_PLATFORM ("i686") | 184 | #define COMPAT_ELF_PLATFORM ("i686") |
210 | 185 | ||
@@ -255,7 +230,6 @@ extern int force_personality32; | |||
255 | #endif /* !CONFIG_X86_32 */ | 230 | #endif /* !CONFIG_X86_32 */ |
256 | 231 | ||
257 | #define CORE_DUMP_USE_REGSET | 232 | #define CORE_DUMP_USE_REGSET |
258 | #define USE_ELF_CORE_DUMP | ||
259 | #define ELF_EXEC_PAGESIZE 4096 | 233 | #define ELF_EXEC_PAGESIZE 4096 |
260 | 234 | ||
261 | /* This is the location that an ET_DYN program is loaded if exec'ed. Typical | 235 | /* This is the location that an ET_DYN program is loaded if exec'ed. Typical |
diff --git a/arch/x86/include/asm/entry_arch.h b/arch/x86/include/asm/entry_arch.h index 19e22e3784d0..5d07dea2ebb8 100644 --- a/arch/x86/include/asm/entry_arch.h +++ b/arch/x86/include/asm/entry_arch.h | |||
@@ -35,7 +35,7 @@ BUILD_INTERRUPT3(invalidate_interrupt7,INVALIDATE_TLB_VECTOR_START+7, | |||
35 | smp_invalidate_interrupt) | 35 | smp_invalidate_interrupt) |
36 | #endif | 36 | #endif |
37 | 37 | ||
38 | BUILD_INTERRUPT(generic_interrupt, GENERIC_INTERRUPT_VECTOR) | 38 | BUILD_INTERRUPT(x86_platform_ipi, X86_PLATFORM_IPI_VECTOR) |
39 | 39 | ||
40 | /* | 40 | /* |
41 | * every pentium local APIC has two 'local interrupts', with a | 41 | * every pentium local APIC has two 'local interrupts', with a |
diff --git a/arch/x86/include/asm/fb.h b/arch/x86/include/asm/fb.h index 53018464aea6..2519d0679d99 100644 --- a/arch/x86/include/asm/fb.h +++ b/arch/x86/include/asm/fb.h | |||
@@ -12,10 +12,6 @@ static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, | |||
12 | pgprot_val(vma->vm_page_prot) |= _PAGE_PCD; | 12 | pgprot_val(vma->vm_page_prot) |= _PAGE_PCD; |
13 | } | 13 | } |
14 | 14 | ||
15 | #ifdef CONFIG_X86_32 | ||
16 | extern int fb_is_primary_device(struct fb_info *info); | 15 | extern int fb_is_primary_device(struct fb_info *info); |
17 | #else | ||
18 | static inline int fb_is_primary_device(struct fb_info *info) { return 0; } | ||
19 | #endif | ||
20 | 16 | ||
21 | #endif /* _ASM_X86_FB_H */ | 17 | #endif /* _ASM_X86_FB_H */ |
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h index 14f9890eb495..d07b44f7d1dc 100644 --- a/arch/x86/include/asm/fixmap.h +++ b/arch/x86/include/asm/fixmap.h | |||
@@ -82,6 +82,9 @@ enum fixed_addresses { | |||
82 | #endif | 82 | #endif |
83 | FIX_DBGP_BASE, | 83 | FIX_DBGP_BASE, |
84 | FIX_EARLYCON_MEM_BASE, | 84 | FIX_EARLYCON_MEM_BASE, |
85 | #ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT | ||
86 | FIX_OHCI1394_BASE, | ||
87 | #endif | ||
85 | #ifdef CONFIG_X86_LOCAL_APIC | 88 | #ifdef CONFIG_X86_LOCAL_APIC |
86 | FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */ | 89 | FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */ |
87 | #endif | 90 | #endif |
@@ -118,17 +121,20 @@ enum fixed_addresses { | |||
118 | * 256 temporary boot-time mappings, used by early_ioremap(), | 121 | * 256 temporary boot-time mappings, used by early_ioremap(), |
119 | * before ioremap() is functional. | 122 | * before ioremap() is functional. |
120 | * | 123 | * |
121 | * We round it up to the next 256 pages boundary so that we | 124 | * If necessary we round it up to the next 256 pages boundary so |
122 | * can have a single pgd entry and a single pte table: | 125 | * that we can have a single pgd entry and a single pte table: |
123 | */ | 126 | */ |
124 | #define NR_FIX_BTMAPS 64 | 127 | #define NR_FIX_BTMAPS 64 |
125 | #define FIX_BTMAPS_SLOTS 4 | 128 | #define FIX_BTMAPS_SLOTS 4 |
126 | FIX_BTMAP_END = __end_of_permanent_fixed_addresses + 256 - | 129 | #define TOTAL_FIX_BTMAPS (NR_FIX_BTMAPS * FIX_BTMAPS_SLOTS) |
127 | (__end_of_permanent_fixed_addresses & 255), | 130 | FIX_BTMAP_END = |
128 | FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_SLOTS - 1, | 131 | (__end_of_permanent_fixed_addresses ^ |
129 | #ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT | 132 | (__end_of_permanent_fixed_addresses + TOTAL_FIX_BTMAPS - 1)) & |
130 | FIX_OHCI1394_BASE, | 133 | -PTRS_PER_PTE |
131 | #endif | 134 | ? __end_of_permanent_fixed_addresses + TOTAL_FIX_BTMAPS - |
135 | (__end_of_permanent_fixed_addresses & (TOTAL_FIX_BTMAPS - 1)) | ||
136 | : __end_of_permanent_fixed_addresses, | ||
137 | FIX_BTMAP_BEGIN = FIX_BTMAP_END + TOTAL_FIX_BTMAPS - 1, | ||
132 | #ifdef CONFIG_X86_32 | 138 | #ifdef CONFIG_X86_32 |
133 | FIX_WP_TEST, | 139 | FIX_WP_TEST, |
134 | #endif | 140 | #endif |
diff --git a/arch/x86/include/asm/gart.h b/arch/x86/include/asm/gart.h index 6cfdafa409d8..4ac5b0f33fc1 100644 --- a/arch/x86/include/asm/gart.h +++ b/arch/x86/include/asm/gart.h | |||
@@ -35,8 +35,7 @@ extern int gart_iommu_aperture_allowed; | |||
35 | extern int gart_iommu_aperture_disabled; | 35 | extern int gart_iommu_aperture_disabled; |
36 | 36 | ||
37 | extern void early_gart_iommu_check(void); | 37 | extern void early_gart_iommu_check(void); |
38 | extern void gart_iommu_init(void); | 38 | extern int gart_iommu_init(void); |
39 | extern void gart_iommu_shutdown(void); | ||
40 | extern void __init gart_parse_options(char *); | 39 | extern void __init gart_parse_options(char *); |
41 | extern void gart_iommu_hole_init(void); | 40 | extern void gart_iommu_hole_init(void); |
42 | 41 | ||
@@ -48,12 +47,6 @@ extern void gart_iommu_hole_init(void); | |||
48 | static inline void early_gart_iommu_check(void) | 47 | static inline void early_gart_iommu_check(void) |
49 | { | 48 | { |
50 | } | 49 | } |
51 | static inline void gart_iommu_init(void) | ||
52 | { | ||
53 | } | ||
54 | static inline void gart_iommu_shutdown(void) | ||
55 | { | ||
56 | } | ||
57 | static inline void gart_parse_options(char *options) | 50 | static inline void gart_parse_options(char *options) |
58 | { | 51 | { |
59 | } | 52 | } |
diff --git a/arch/x86/include/asm/geode.h b/arch/x86/include/asm/geode.h index ad3c2ed75481..7cd73552a4e8 100644 --- a/arch/x86/include/asm/geode.h +++ b/arch/x86/include/asm/geode.h | |||
@@ -12,160 +12,7 @@ | |||
12 | 12 | ||
13 | #include <asm/processor.h> | 13 | #include <asm/processor.h> |
14 | #include <linux/io.h> | 14 | #include <linux/io.h> |
15 | 15 | #include <linux/cs5535.h> | |
16 | /* Generic southbridge functions */ | ||
17 | |||
18 | #define GEODE_DEV_PMS 0 | ||
19 | #define GEODE_DEV_ACPI 1 | ||
20 | #define GEODE_DEV_GPIO 2 | ||
21 | #define GEODE_DEV_MFGPT 3 | ||
22 | |||
23 | extern int geode_get_dev_base(unsigned int dev); | ||
24 | |||
25 | /* Useful macros */ | ||
26 | #define geode_pms_base() geode_get_dev_base(GEODE_DEV_PMS) | ||
27 | #define geode_acpi_base() geode_get_dev_base(GEODE_DEV_ACPI) | ||
28 | #define geode_gpio_base() geode_get_dev_base(GEODE_DEV_GPIO) | ||
29 | #define geode_mfgpt_base() geode_get_dev_base(GEODE_DEV_MFGPT) | ||
30 | |||
31 | /* MSRS */ | ||
32 | |||
33 | #define MSR_GLIU_P2D_RO0 0x10000029 | ||
34 | |||
35 | #define MSR_LX_GLD_MSR_CONFIG 0x48002001 | ||
36 | #define MSR_LX_MSR_PADSEL 0x48002011 /* NOT 0x48000011; the data | ||
37 | * sheet has the wrong value */ | ||
38 | #define MSR_GLCP_SYS_RSTPLL 0x4C000014 | ||
39 | #define MSR_GLCP_DOTPLL 0x4C000015 | ||
40 | |||
41 | #define MSR_LBAR_SMB 0x5140000B | ||
42 | #define MSR_LBAR_GPIO 0x5140000C | ||
43 | #define MSR_LBAR_MFGPT 0x5140000D | ||
44 | #define MSR_LBAR_ACPI 0x5140000E | ||
45 | #define MSR_LBAR_PMS 0x5140000F | ||
46 | |||
47 | #define MSR_DIVIL_SOFT_RESET 0x51400017 | ||
48 | |||
49 | #define MSR_PIC_YSEL_LOW 0x51400020 | ||
50 | #define MSR_PIC_YSEL_HIGH 0x51400021 | ||
51 | #define MSR_PIC_ZSEL_LOW 0x51400022 | ||
52 | #define MSR_PIC_ZSEL_HIGH 0x51400023 | ||
53 | #define MSR_PIC_IRQM_LPC 0x51400025 | ||
54 | |||
55 | #define MSR_MFGPT_IRQ 0x51400028 | ||
56 | #define MSR_MFGPT_NR 0x51400029 | ||
57 | #define MSR_MFGPT_SETUP 0x5140002B | ||
58 | |||
59 | #define MSR_LX_SPARE_MSR 0x80000011 /* DC-specific */ | ||
60 | |||
61 | #define MSR_GX_GLD_MSR_CONFIG 0xC0002001 | ||
62 | #define MSR_GX_MSR_PADSEL 0xC0002011 | ||
63 | |||
64 | /* Resource Sizes */ | ||
65 | |||
66 | #define LBAR_GPIO_SIZE 0xFF | ||
67 | #define LBAR_MFGPT_SIZE 0x40 | ||
68 | #define LBAR_ACPI_SIZE 0x40 | ||
69 | #define LBAR_PMS_SIZE 0x80 | ||
70 | |||
71 | /* ACPI registers (PMS block) */ | ||
72 | |||
73 | /* | ||
74 | * PM1_EN is only valid when VSA is enabled for 16 bit reads. | ||
75 | * When VSA is not enabled, *always* read both PM1_STS and PM1_EN | ||
76 | * with a 32 bit read at offset 0x0 | ||
77 | */ | ||
78 | |||
79 | #define PM1_STS 0x00 | ||
80 | #define PM1_EN 0x02 | ||
81 | #define PM1_CNT 0x08 | ||
82 | #define PM2_CNT 0x0C | ||
83 | #define PM_TMR 0x10 | ||
84 | #define PM_GPE0_STS 0x18 | ||
85 | #define PM_GPE0_EN 0x1C | ||
86 | |||
87 | /* PMC registers (PMS block) */ | ||
88 | |||
89 | #define PM_SSD 0x00 | ||
90 | #define PM_SCXA 0x04 | ||
91 | #define PM_SCYA 0x08 | ||
92 | #define PM_OUT_SLPCTL 0x0C | ||
93 | #define PM_SCLK 0x10 | ||
94 | #define PM_SED 0x1 | ||
95 | #define PM_SCXD 0x18 | ||
96 | #define PM_SCYD 0x1C | ||
97 | #define PM_IN_SLPCTL 0x20 | ||
98 | #define PM_WKD 0x30 | ||
99 | #define PM_WKXD 0x34 | ||
100 | #define PM_RD 0x38 | ||
101 | #define PM_WKXA 0x3C | ||
102 | #define PM_FSD 0x40 | ||
103 | #define PM_TSD 0x44 | ||
104 | #define PM_PSD 0x48 | ||
105 | #define PM_NWKD 0x4C | ||
106 | #define PM_AWKD 0x50 | ||
107 | #define PM_SSC 0x54 | ||
108 | |||
109 | /* VSA2 magic values */ | ||
110 | |||
111 | #define VSA_VRC_INDEX 0xAC1C | ||
112 | #define VSA_VRC_DATA 0xAC1E | ||
113 | #define VSA_VR_UNLOCK 0xFC53 /* unlock virtual register */ | ||
114 | #define VSA_VR_SIGNATURE 0x0003 | ||
115 | #define VSA_VR_MEM_SIZE 0x0200 | ||
116 | #define AMD_VSA_SIG 0x4132 /* signature is ascii 'VSA2' */ | ||
117 | #define GSW_VSA_SIG 0x534d /* General Software signature */ | ||
118 | /* GPIO */ | ||
119 | |||
120 | #define GPIO_OUTPUT_VAL 0x00 | ||
121 | #define GPIO_OUTPUT_ENABLE 0x04 | ||
122 | #define GPIO_OUTPUT_OPEN_DRAIN 0x08 | ||
123 | #define GPIO_OUTPUT_INVERT 0x0C | ||
124 | #define GPIO_OUTPUT_AUX1 0x10 | ||
125 | #define GPIO_OUTPUT_AUX2 0x14 | ||
126 | #define GPIO_PULL_UP 0x18 | ||
127 | #define GPIO_PULL_DOWN 0x1C | ||
128 | #define GPIO_INPUT_ENABLE 0x20 | ||
129 | #define GPIO_INPUT_INVERT 0x24 | ||
130 | #define GPIO_INPUT_FILTER 0x28 | ||
131 | #define GPIO_INPUT_EVENT_COUNT 0x2C | ||
132 | #define GPIO_READ_BACK 0x30 | ||
133 | #define GPIO_INPUT_AUX1 0x34 | ||
134 | #define GPIO_EVENTS_ENABLE 0x38 | ||
135 | #define GPIO_LOCK_ENABLE 0x3C | ||
136 | #define GPIO_POSITIVE_EDGE_EN 0x40 | ||
137 | #define GPIO_NEGATIVE_EDGE_EN 0x44 | ||
138 | #define GPIO_POSITIVE_EDGE_STS 0x48 | ||
139 | #define GPIO_NEGATIVE_EDGE_STS 0x4C | ||
140 | |||
141 | #define GPIO_MAP_X 0xE0 | ||
142 | #define GPIO_MAP_Y 0xE4 | ||
143 | #define GPIO_MAP_Z 0xE8 | ||
144 | #define GPIO_MAP_W 0xEC | ||
145 | |||
146 | static inline u32 geode_gpio(unsigned int nr) | ||
147 | { | ||
148 | BUG_ON(nr > 28); | ||
149 | return 1 << nr; | ||
150 | } | ||
151 | |||
152 | extern void geode_gpio_set(u32, unsigned int); | ||
153 | extern void geode_gpio_clear(u32, unsigned int); | ||
154 | extern int geode_gpio_isset(u32, unsigned int); | ||
155 | extern void geode_gpio_setup_event(unsigned int, int, int); | ||
156 | extern void geode_gpio_set_irq(unsigned int, unsigned int); | ||
157 | |||
158 | static inline void geode_gpio_event_irq(unsigned int gpio, int pair) | ||
159 | { | ||
160 | geode_gpio_setup_event(gpio, pair, 0); | ||
161 | } | ||
162 | |||
163 | static inline void geode_gpio_event_pme(unsigned int gpio, int pair) | ||
164 | { | ||
165 | geode_gpio_setup_event(gpio, pair, 1); | ||
166 | } | ||
167 | |||
168 | /* Specific geode tests */ | ||
169 | 16 | ||
170 | static inline int is_geode_gx(void) | 17 | static inline int is_geode_gx(void) |
171 | { | 18 | { |
@@ -186,68 +33,4 @@ static inline int is_geode(void) | |||
186 | return (is_geode_gx() || is_geode_lx()); | 33 | return (is_geode_gx() || is_geode_lx()); |
187 | } | 34 | } |
188 | 35 | ||
189 | #ifdef CONFIG_MGEODE_LX | ||
190 | extern int geode_has_vsa2(void); | ||
191 | #else | ||
192 | static inline int geode_has_vsa2(void) | ||
193 | { | ||
194 | return 0; | ||
195 | } | ||
196 | #endif | ||
197 | |||
198 | /* MFGPTs */ | ||
199 | |||
200 | #define MFGPT_MAX_TIMERS 8 | ||
201 | #define MFGPT_TIMER_ANY (-1) | ||
202 | |||
203 | #define MFGPT_DOMAIN_WORKING 1 | ||
204 | #define MFGPT_DOMAIN_STANDBY 2 | ||
205 | #define MFGPT_DOMAIN_ANY (MFGPT_DOMAIN_WORKING | MFGPT_DOMAIN_STANDBY) | ||
206 | |||
207 | #define MFGPT_CMP1 0 | ||
208 | #define MFGPT_CMP2 1 | ||
209 | |||
210 | #define MFGPT_EVENT_IRQ 0 | ||
211 | #define MFGPT_EVENT_NMI 1 | ||
212 | #define MFGPT_EVENT_RESET 3 | ||
213 | |||
214 | #define MFGPT_REG_CMP1 0 | ||
215 | #define MFGPT_REG_CMP2 2 | ||
216 | #define MFGPT_REG_COUNTER 4 | ||
217 | #define MFGPT_REG_SETUP 6 | ||
218 | |||
219 | #define MFGPT_SETUP_CNTEN (1 << 15) | ||
220 | #define MFGPT_SETUP_CMP2 (1 << 14) | ||
221 | #define MFGPT_SETUP_CMP1 (1 << 13) | ||
222 | #define MFGPT_SETUP_SETUP (1 << 12) | ||
223 | #define MFGPT_SETUP_STOPEN (1 << 11) | ||
224 | #define MFGPT_SETUP_EXTEN (1 << 10) | ||
225 | #define MFGPT_SETUP_REVEN (1 << 5) | ||
226 | #define MFGPT_SETUP_CLKSEL (1 << 4) | ||
227 | |||
228 | static inline void geode_mfgpt_write(int timer, u16 reg, u16 value) | ||
229 | { | ||
230 | u32 base = geode_get_dev_base(GEODE_DEV_MFGPT); | ||
231 | outw(value, base + reg + (timer * 8)); | ||
232 | } | ||
233 | |||
234 | static inline u16 geode_mfgpt_read(int timer, u16 reg) | ||
235 | { | ||
236 | u32 base = geode_get_dev_base(GEODE_DEV_MFGPT); | ||
237 | return inw(base + reg + (timer * 8)); | ||
238 | } | ||
239 | |||
240 | extern int geode_mfgpt_toggle_event(int timer, int cmp, int event, int enable); | ||
241 | extern int geode_mfgpt_set_irq(int timer, int cmp, int *irq, int enable); | ||
242 | extern int geode_mfgpt_alloc_timer(int timer, int domain); | ||
243 | |||
244 | #define geode_mfgpt_setup_irq(t, c, i) geode_mfgpt_set_irq((t), (c), (i), 1) | ||
245 | #define geode_mfgpt_release_irq(t, c, i) geode_mfgpt_set_irq((t), (c), (i), 0) | ||
246 | |||
247 | #ifdef CONFIG_GEODE_MFGPT_TIMER | ||
248 | extern int __init mfgpt_timer_setup(void); | ||
249 | #else | ||
250 | static inline int mfgpt_timer_setup(void) { return 0; } | ||
251 | #endif | ||
252 | |||
253 | #endif /* _ASM_X86_GEODE_H */ | 36 | #endif /* _ASM_X86_GEODE_H */ |
diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h index 82e3e8f01043..0f8576427cfe 100644 --- a/arch/x86/include/asm/hardirq.h +++ b/arch/x86/include/asm/hardirq.h | |||
@@ -12,7 +12,7 @@ typedef struct { | |||
12 | unsigned int apic_timer_irqs; /* arch dependent */ | 12 | unsigned int apic_timer_irqs; /* arch dependent */ |
13 | unsigned int irq_spurious_count; | 13 | unsigned int irq_spurious_count; |
14 | #endif | 14 | #endif |
15 | unsigned int generic_irqs; /* arch dependent */ | 15 | unsigned int x86_platform_ipis; /* arch dependent */ |
16 | unsigned int apic_perf_irqs; | 16 | unsigned int apic_perf_irqs; |
17 | unsigned int apic_pending_irqs; | 17 | unsigned int apic_pending_irqs; |
18 | #ifdef CONFIG_SMP | 18 | #ifdef CONFIG_SMP |
@@ -20,11 +20,11 @@ typedef struct { | |||
20 | unsigned int irq_call_count; | 20 | unsigned int irq_call_count; |
21 | unsigned int irq_tlb_count; | 21 | unsigned int irq_tlb_count; |
22 | #endif | 22 | #endif |
23 | #ifdef CONFIG_X86_MCE | 23 | #ifdef CONFIG_X86_THERMAL_VECTOR |
24 | unsigned int irq_thermal_count; | 24 | unsigned int irq_thermal_count; |
25 | # ifdef CONFIG_X86_MCE_THRESHOLD | 25 | #endif |
26 | #ifdef CONFIG_X86_MCE_THRESHOLD | ||
26 | unsigned int irq_threshold_count; | 27 | unsigned int irq_threshold_count; |
27 | # endif | ||
28 | #endif | 28 | #endif |
29 | } ____cacheline_aligned irq_cpustat_t; | 29 | } ____cacheline_aligned irq_cpustat_t; |
30 | 30 | ||
diff --git a/arch/x86/include/asm/highmem.h b/arch/x86/include/asm/highmem.h index 014c2b85ae45..a726650fc80f 100644 --- a/arch/x86/include/asm/highmem.h +++ b/arch/x86/include/asm/highmem.h | |||
@@ -66,10 +66,6 @@ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); | |||
66 | void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot); | 66 | void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot); |
67 | struct page *kmap_atomic_to_page(void *ptr); | 67 | struct page *kmap_atomic_to_page(void *ptr); |
68 | 68 | ||
69 | #ifndef CONFIG_PARAVIRT | ||
70 | #define kmap_atomic_pte(page, type) kmap_atomic(page, type) | ||
71 | #endif | ||
72 | |||
73 | #define flush_cache_kmaps() do { } while (0) | 69 | #define flush_cache_kmaps() do { } while (0) |
74 | 70 | ||
75 | extern void add_highpages_with_active_regions(int nid, unsigned long start_pfn, | 71 | extern void add_highpages_with_active_regions(int nid, unsigned long start_pfn, |
diff --git a/arch/x86/include/asm/hpet.h b/arch/x86/include/asm/hpet.h index 1c22cb05ad6a..1d5c08a1bdfd 100644 --- a/arch/x86/include/asm/hpet.h +++ b/arch/x86/include/asm/hpet.h | |||
@@ -65,11 +65,13 @@ | |||
65 | /* hpet memory map physical address */ | 65 | /* hpet memory map physical address */ |
66 | extern unsigned long hpet_address; | 66 | extern unsigned long hpet_address; |
67 | extern unsigned long force_hpet_address; | 67 | extern unsigned long force_hpet_address; |
68 | extern u8 hpet_blockid; | ||
68 | extern int hpet_force_user; | 69 | extern int hpet_force_user; |
70 | extern u8 hpet_msi_disable; | ||
69 | extern int is_hpet_enabled(void); | 71 | extern int is_hpet_enabled(void); |
70 | extern int hpet_enable(void); | 72 | extern int hpet_enable(void); |
71 | extern void hpet_disable(void); | 73 | extern void hpet_disable(void); |
72 | extern unsigned long hpet_readl(unsigned long a); | 74 | extern unsigned int hpet_readl(unsigned int a); |
73 | extern void force_hpet_resume(void); | 75 | extern void force_hpet_resume(void); |
74 | 76 | ||
75 | extern void hpet_msi_unmask(unsigned int irq); | 77 | extern void hpet_msi_unmask(unsigned int irq); |
@@ -78,9 +80,9 @@ extern void hpet_msi_write(unsigned int irq, struct msi_msg *msg); | |||
78 | extern void hpet_msi_read(unsigned int irq, struct msi_msg *msg); | 80 | extern void hpet_msi_read(unsigned int irq, struct msi_msg *msg); |
79 | 81 | ||
80 | #ifdef CONFIG_PCI_MSI | 82 | #ifdef CONFIG_PCI_MSI |
81 | extern int arch_setup_hpet_msi(unsigned int irq); | 83 | extern int arch_setup_hpet_msi(unsigned int irq, unsigned int id); |
82 | #else | 84 | #else |
83 | static inline int arch_setup_hpet_msi(unsigned int irq) | 85 | static inline int arch_setup_hpet_msi(unsigned int irq, unsigned int id) |
84 | { | 86 | { |
85 | return -EINVAL; | 87 | return -EINVAL; |
86 | } | 88 | } |
diff --git a/arch/x86/include/asm/hw_breakpoint.h b/arch/x86/include/asm/hw_breakpoint.h new file mode 100644 index 000000000000..2a1bd8f4f23a --- /dev/null +++ b/arch/x86/include/asm/hw_breakpoint.h | |||
@@ -0,0 +1,72 @@ | |||
1 | #ifndef _I386_HW_BREAKPOINT_H | ||
2 | #define _I386_HW_BREAKPOINT_H | ||
3 | |||
4 | #ifdef __KERNEL__ | ||
5 | #define __ARCH_HW_BREAKPOINT_H | ||
6 | |||
7 | /* | ||
8 | * The name should probably be something dealt in | ||
9 | * a higher level. While dealing with the user | ||
10 | * (display/resolving) | ||
11 | */ | ||
12 | struct arch_hw_breakpoint { | ||
13 | unsigned long address; | ||
14 | u8 len; | ||
15 | u8 type; | ||
16 | }; | ||
17 | |||
18 | #include <linux/kdebug.h> | ||
19 | #include <linux/percpu.h> | ||
20 | #include <linux/list.h> | ||
21 | |||
22 | /* Available HW breakpoint length encodings */ | ||
23 | #define X86_BREAKPOINT_LEN_1 0x40 | ||
24 | #define X86_BREAKPOINT_LEN_2 0x44 | ||
25 | #define X86_BREAKPOINT_LEN_4 0x4c | ||
26 | #define X86_BREAKPOINT_LEN_EXECUTE 0x40 | ||
27 | |||
28 | #ifdef CONFIG_X86_64 | ||
29 | #define X86_BREAKPOINT_LEN_8 0x48 | ||
30 | #endif | ||
31 | |||
32 | /* Available HW breakpoint type encodings */ | ||
33 | |||
34 | /* trigger on instruction execute */ | ||
35 | #define X86_BREAKPOINT_EXECUTE 0x80 | ||
36 | /* trigger on memory write */ | ||
37 | #define X86_BREAKPOINT_WRITE 0x81 | ||
38 | /* trigger on memory read or write */ | ||
39 | #define X86_BREAKPOINT_RW 0x83 | ||
40 | |||
41 | /* Total number of available HW breakpoint registers */ | ||
42 | #define HBP_NUM 4 | ||
43 | |||
44 | struct perf_event; | ||
45 | struct pmu; | ||
46 | |||
47 | extern int arch_check_va_in_userspace(unsigned long va, u8 hbp_len); | ||
48 | extern int arch_validate_hwbkpt_settings(struct perf_event *bp, | ||
49 | struct task_struct *tsk); | ||
50 | extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused, | ||
51 | unsigned long val, void *data); | ||
52 | |||
53 | |||
54 | int arch_install_hw_breakpoint(struct perf_event *bp); | ||
55 | void arch_uninstall_hw_breakpoint(struct perf_event *bp); | ||
56 | void hw_breakpoint_pmu_read(struct perf_event *bp); | ||
57 | void hw_breakpoint_pmu_unthrottle(struct perf_event *bp); | ||
58 | |||
59 | extern void | ||
60 | arch_fill_perf_breakpoint(struct perf_event *bp); | ||
61 | |||
62 | unsigned long encode_dr7(int drnum, unsigned int len, unsigned int type); | ||
63 | int decode_dr7(unsigned long dr7, int bpnum, unsigned *len, unsigned *type); | ||
64 | |||
65 | extern int arch_bp_generic_fields(int x86_len, int x86_type, | ||
66 | int *gen_len, int *gen_type); | ||
67 | |||
68 | extern struct pmu perf_ops_bp; | ||
69 | |||
70 | #endif /* __KERNEL__ */ | ||
71 | #endif /* _I386_HW_BREAKPOINT_H */ | ||
72 | |||
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h index 68900e7dada8..c17411503f28 100644 --- a/arch/x86/include/asm/hw_irq.h +++ b/arch/x86/include/asm/hw_irq.h | |||
@@ -27,7 +27,7 @@ | |||
27 | 27 | ||
28 | /* Interrupt handlers registered during init_IRQ */ | 28 | /* Interrupt handlers registered during init_IRQ */ |
29 | extern void apic_timer_interrupt(void); | 29 | extern void apic_timer_interrupt(void); |
30 | extern void generic_interrupt(void); | 30 | extern void x86_platform_ipi(void); |
31 | extern void error_interrupt(void); | 31 | extern void error_interrupt(void); |
32 | extern void perf_pending_interrupt(void); | 32 | extern void perf_pending_interrupt(void); |
33 | 33 | ||
@@ -55,13 +55,6 @@ extern void call_function_single_interrupt(void); | |||
55 | 55 | ||
56 | extern void pull_timers_interrupt(void); | 56 | extern void pull_timers_interrupt(void); |
57 | 57 | ||
58 | /* PIC specific functions */ | ||
59 | extern void disable_8259A_irq(unsigned int irq); | ||
60 | extern void enable_8259A_irq(unsigned int irq); | ||
61 | extern int i8259A_irq_pending(unsigned int irq); | ||
62 | extern void make_8259A_irq(unsigned int irq); | ||
63 | extern void init_8259A(int aeoi); | ||
64 | |||
65 | /* IOAPIC */ | 58 | /* IOAPIC */ |
66 | #define IO_APIC_IRQ(x) (((x) >= NR_IRQS_LEGACY) || ((1<<(x)) & io_apic_irqs)) | 59 | #define IO_APIC_IRQ(x) (((x) >= NR_IRQS_LEGACY) || ((1<<(x)) & io_apic_irqs)) |
67 | extern unsigned long io_apic_irqs; | 60 | extern unsigned long io_apic_irqs; |
@@ -81,14 +74,33 @@ static inline void set_io_apic_irq_attr(struct io_apic_irq_attr *irq_attr, | |||
81 | int ioapic, int ioapic_pin, | 74 | int ioapic, int ioapic_pin, |
82 | int trigger, int polarity) | 75 | int trigger, int polarity) |
83 | { | 76 | { |
84 | irq_attr->ioapic = ioapic; | 77 | irq_attr->ioapic = ioapic; |
85 | irq_attr->ioapic_pin = ioapic_pin; | 78 | irq_attr->ioapic_pin = ioapic_pin; |
86 | irq_attr->trigger = trigger; | 79 | irq_attr->trigger = trigger; |
87 | irq_attr->polarity = polarity; | 80 | irq_attr->polarity = polarity; |
88 | } | 81 | } |
89 | 82 | ||
90 | extern int IO_APIC_get_PCI_irq_vector(int bus, int devfn, int pin, | 83 | /* |
91 | struct io_apic_irq_attr *irq_attr); | 84 | * This is performance-critical, we want to do it O(1) |
85 | * | ||
86 | * Most irqs are mapped 1:1 with pins. | ||
87 | */ | ||
88 | struct irq_cfg { | ||
89 | struct irq_pin_list *irq_2_pin; | ||
90 | cpumask_var_t domain; | ||
91 | cpumask_var_t old_domain; | ||
92 | u8 vector; | ||
93 | u8 move_in_progress : 1; | ||
94 | }; | ||
95 | |||
96 | extern struct irq_cfg *irq_cfg(unsigned int); | ||
97 | extern int assign_irq_vector(int, struct irq_cfg *, const struct cpumask *); | ||
98 | extern void send_cleanup_vector(struct irq_cfg *); | ||
99 | |||
100 | struct irq_desc; | ||
101 | extern unsigned int set_desc_affinity(struct irq_desc *, const struct cpumask *, | ||
102 | unsigned int *dest_id); | ||
103 | extern int IO_APIC_get_PCI_irq_vector(int bus, int devfn, int pin, struct io_apic_irq_attr *irq_attr); | ||
92 | extern void setup_ioapic_dest(void); | 104 | extern void setup_ioapic_dest(void); |
93 | 105 | ||
94 | extern void enable_IO_APIC(void); | 106 | extern void enable_IO_APIC(void); |
@@ -103,7 +115,7 @@ extern void eisa_set_level_irq(unsigned int irq); | |||
103 | /* SMP */ | 115 | /* SMP */ |
104 | extern void smp_apic_timer_interrupt(struct pt_regs *); | 116 | extern void smp_apic_timer_interrupt(struct pt_regs *); |
105 | extern void smp_spurious_interrupt(struct pt_regs *); | 117 | extern void smp_spurious_interrupt(struct pt_regs *); |
106 | extern void smp_generic_interrupt(struct pt_regs *); | 118 | extern void smp_x86_platform_ipi(struct pt_regs *); |
107 | extern void smp_error_interrupt(struct pt_regs *); | 119 | extern void smp_error_interrupt(struct pt_regs *); |
108 | #ifdef CONFIG_X86_IO_APIC | 120 | #ifdef CONFIG_X86_IO_APIC |
109 | extern asmlinkage void smp_irq_move_cleanup_interrupt(void); | 121 | extern asmlinkage void smp_irq_move_cleanup_interrupt(void); |
@@ -124,6 +136,7 @@ extern void (*__initconst interrupt[NR_VECTORS-FIRST_EXTERNAL_VECTOR])(void); | |||
124 | 136 | ||
125 | typedef int vector_irq_t[NR_VECTORS]; | 137 | typedef int vector_irq_t[NR_VECTORS]; |
126 | DECLARE_PER_CPU(vector_irq_t, vector_irq); | 138 | DECLARE_PER_CPU(vector_irq_t, vector_irq); |
139 | extern void setup_vector_irq(int cpu); | ||
127 | 140 | ||
128 | #ifdef CONFIG_X86_IO_APIC | 141 | #ifdef CONFIG_X86_IO_APIC |
129 | extern void lock_vector_lock(void); | 142 | extern void lock_vector_lock(void); |
diff --git a/arch/x86/include/asm/hyperv.h b/arch/x86/include/asm/hyperv.h new file mode 100644 index 000000000000..e153a2b3889a --- /dev/null +++ b/arch/x86/include/asm/hyperv.h | |||
@@ -0,0 +1,186 @@ | |||
1 | #ifndef _ASM_X86_KVM_HYPERV_H | ||
2 | #define _ASM_X86_KVM_HYPERV_H | ||
3 | |||
4 | #include <linux/types.h> | ||
5 | |||
6 | /* | ||
7 | * The below CPUID leaves are present if VersionAndFeatures.HypervisorPresent | ||
8 | * is set by CPUID(HvCpuIdFunctionVersionAndFeatures). | ||
9 | */ | ||
10 | #define HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS 0x40000000 | ||
11 | #define HYPERV_CPUID_INTERFACE 0x40000001 | ||
12 | #define HYPERV_CPUID_VERSION 0x40000002 | ||
13 | #define HYPERV_CPUID_FEATURES 0x40000003 | ||
14 | #define HYPERV_CPUID_ENLIGHTMENT_INFO 0x40000004 | ||
15 | #define HYPERV_CPUID_IMPLEMENT_LIMITS 0x40000005 | ||
16 | |||
17 | /* | ||
18 | * Feature identification. EAX indicates which features are available | ||
19 | * to the partition based upon the current partition privileges. | ||
20 | */ | ||
21 | |||
22 | /* VP Runtime (HV_X64_MSR_VP_RUNTIME) available */ | ||
23 | #define HV_X64_MSR_VP_RUNTIME_AVAILABLE (1 << 0) | ||
24 | /* Partition Reference Counter (HV_X64_MSR_TIME_REF_COUNT) available*/ | ||
25 | #define HV_X64_MSR_TIME_REF_COUNT_AVAILABLE (1 << 1) | ||
26 | /* | ||
27 | * Basic SynIC MSRs (HV_X64_MSR_SCONTROL through HV_X64_MSR_EOM | ||
28 | * and HV_X64_MSR_SINT0 through HV_X64_MSR_SINT15) available | ||
29 | */ | ||
30 | #define HV_X64_MSR_SYNIC_AVAILABLE (1 << 2) | ||
31 | /* | ||
32 | * Synthetic Timer MSRs (HV_X64_MSR_STIMER0_CONFIG through | ||
33 | * HV_X64_MSR_STIMER3_COUNT) available | ||
34 | */ | ||
35 | #define HV_X64_MSR_SYNTIMER_AVAILABLE (1 << 3) | ||
36 | /* | ||
37 | * APIC access MSRs (HV_X64_MSR_EOI, HV_X64_MSR_ICR and HV_X64_MSR_TPR) | ||
38 | * are available | ||
39 | */ | ||
40 | #define HV_X64_MSR_APIC_ACCESS_AVAILABLE (1 << 4) | ||
41 | /* Hypercall MSRs (HV_X64_MSR_GUEST_OS_ID and HV_X64_MSR_HYPERCALL) available*/ | ||
42 | #define HV_X64_MSR_HYPERCALL_AVAILABLE (1 << 5) | ||
43 | /* Access virtual processor index MSR (HV_X64_MSR_VP_INDEX) available*/ | ||
44 | #define HV_X64_MSR_VP_INDEX_AVAILABLE (1 << 6) | ||
45 | /* Virtual system reset MSR (HV_X64_MSR_RESET) is available*/ | ||
46 | #define HV_X64_MSR_RESET_AVAILABLE (1 << 7) | ||
47 | /* | ||
48 | * Access statistics pages MSRs (HV_X64_MSR_STATS_PARTITION_RETAIL_PAGE, | ||
49 | * HV_X64_MSR_STATS_PARTITION_INTERNAL_PAGE, HV_X64_MSR_STATS_VP_RETAIL_PAGE, | ||
50 | * HV_X64_MSR_STATS_VP_INTERNAL_PAGE) available | ||
51 | */ | ||
52 | #define HV_X64_MSR_STAT_PAGES_AVAILABLE (1 << 8) | ||
53 | |||
54 | /* | ||
55 | * Feature identification: EBX indicates which flags were specified at | ||
56 | * partition creation. The format is the same as the partition creation | ||
57 | * flag structure defined in section Partition Creation Flags. | ||
58 | */ | ||
59 | #define HV_X64_CREATE_PARTITIONS (1 << 0) | ||
60 | #define HV_X64_ACCESS_PARTITION_ID (1 << 1) | ||
61 | #define HV_X64_ACCESS_MEMORY_POOL (1 << 2) | ||
62 | #define HV_X64_ADJUST_MESSAGE_BUFFERS (1 << 3) | ||
63 | #define HV_X64_POST_MESSAGES (1 << 4) | ||
64 | #define HV_X64_SIGNAL_EVENTS (1 << 5) | ||
65 | #define HV_X64_CREATE_PORT (1 << 6) | ||
66 | #define HV_X64_CONNECT_PORT (1 << 7) | ||
67 | #define HV_X64_ACCESS_STATS (1 << 8) | ||
68 | #define HV_X64_DEBUGGING (1 << 11) | ||
69 | #define HV_X64_CPU_POWER_MANAGEMENT (1 << 12) | ||
70 | #define HV_X64_CONFIGURE_PROFILER (1 << 13) | ||
71 | |||
72 | /* | ||
73 | * Feature identification. EDX indicates which miscellaneous features | ||
74 | * are available to the partition. | ||
75 | */ | ||
76 | /* The MWAIT instruction is available (per section MONITOR / MWAIT) */ | ||
77 | #define HV_X64_MWAIT_AVAILABLE (1 << 0) | ||
78 | /* Guest debugging support is available */ | ||
79 | #define HV_X64_GUEST_DEBUGGING_AVAILABLE (1 << 1) | ||
80 | /* Performance Monitor support is available*/ | ||
81 | #define HV_X64_PERF_MONITOR_AVAILABLE (1 << 2) | ||
82 | /* Support for physical CPU dynamic partitioning events is available*/ | ||
83 | #define HV_X64_CPU_DYNAMIC_PARTITIONING_AVAILABLE (1 << 3) | ||
84 | /* | ||
85 | * Support for passing hypercall input parameter block via XMM | ||
86 | * registers is available | ||
87 | */ | ||
88 | #define HV_X64_HYPERCALL_PARAMS_XMM_AVAILABLE (1 << 4) | ||
89 | /* Support for a virtual guest idle state is available */ | ||
90 | #define HV_X64_GUEST_IDLE_STATE_AVAILABLE (1 << 5) | ||
91 | |||
92 | /* | ||
93 | * Implementation recommendations. Indicates which behaviors the hypervisor | ||
94 | * recommends the OS implement for optimal performance. | ||
95 | */ | ||
96 | /* | ||
97 | * Recommend using hypercall for address space switches rather | ||
98 | * than MOV to CR3 instruction | ||
99 | */ | ||
100 | #define HV_X64_MWAIT_RECOMMENDED (1 << 0) | ||
101 | /* Recommend using hypercall for local TLB flushes rather | ||
102 | * than INVLPG or MOV to CR3 instructions */ | ||
103 | #define HV_X64_LOCAL_TLB_FLUSH_RECOMMENDED (1 << 1) | ||
104 | /* | ||
105 | * Recommend using hypercall for remote TLB flushes rather | ||
106 | * than inter-processor interrupts | ||
107 | */ | ||
108 | #define HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED (1 << 2) | ||
109 | /* | ||
110 | * Recommend using MSRs for accessing APIC registers | ||
111 | * EOI, ICR and TPR rather than their memory-mapped counterparts | ||
112 | */ | ||
113 | #define HV_X64_APIC_ACCESS_RECOMMENDED (1 << 3) | ||
114 | /* Recommend using the hypervisor-provided MSR to initiate a system RESET */ | ||
115 | #define HV_X64_SYSTEM_RESET_RECOMMENDED (1 << 4) | ||
116 | /* | ||
117 | * Recommend using relaxed timing for this partition. If used, | ||
118 | * the VM should disable any watchdog timeouts that rely on the | ||
119 | * timely delivery of external interrupts | ||
120 | */ | ||
121 | #define HV_X64_RELAXED_TIMING_RECOMMENDED (1 << 5) | ||
122 | |||
123 | /* MSR used to identify the guest OS. */ | ||
124 | #define HV_X64_MSR_GUEST_OS_ID 0x40000000 | ||
125 | |||
126 | /* MSR used to setup pages used to communicate with the hypervisor. */ | ||
127 | #define HV_X64_MSR_HYPERCALL 0x40000001 | ||
128 | |||
129 | /* MSR used to provide vcpu index */ | ||
130 | #define HV_X64_MSR_VP_INDEX 0x40000002 | ||
131 | |||
132 | /* Define the virtual APIC registers */ | ||
133 | #define HV_X64_MSR_EOI 0x40000070 | ||
134 | #define HV_X64_MSR_ICR 0x40000071 | ||
135 | #define HV_X64_MSR_TPR 0x40000072 | ||
136 | #define HV_X64_MSR_APIC_ASSIST_PAGE 0x40000073 | ||
137 | |||
138 | /* Define synthetic interrupt controller model specific registers. */ | ||
139 | #define HV_X64_MSR_SCONTROL 0x40000080 | ||
140 | #define HV_X64_MSR_SVERSION 0x40000081 | ||
141 | #define HV_X64_MSR_SIEFP 0x40000082 | ||
142 | #define HV_X64_MSR_SIMP 0x40000083 | ||
143 | #define HV_X64_MSR_EOM 0x40000084 | ||
144 | #define HV_X64_MSR_SINT0 0x40000090 | ||
145 | #define HV_X64_MSR_SINT1 0x40000091 | ||
146 | #define HV_X64_MSR_SINT2 0x40000092 | ||
147 | #define HV_X64_MSR_SINT3 0x40000093 | ||
148 | #define HV_X64_MSR_SINT4 0x40000094 | ||
149 | #define HV_X64_MSR_SINT5 0x40000095 | ||
150 | #define HV_X64_MSR_SINT6 0x40000096 | ||
151 | #define HV_X64_MSR_SINT7 0x40000097 | ||
152 | #define HV_X64_MSR_SINT8 0x40000098 | ||
153 | #define HV_X64_MSR_SINT9 0x40000099 | ||
154 | #define HV_X64_MSR_SINT10 0x4000009A | ||
155 | #define HV_X64_MSR_SINT11 0x4000009B | ||
156 | #define HV_X64_MSR_SINT12 0x4000009C | ||
157 | #define HV_X64_MSR_SINT13 0x4000009D | ||
158 | #define HV_X64_MSR_SINT14 0x4000009E | ||
159 | #define HV_X64_MSR_SINT15 0x4000009F | ||
160 | |||
161 | |||
162 | #define HV_X64_MSR_HYPERCALL_ENABLE 0x00000001 | ||
163 | #define HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT 12 | ||
164 | #define HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_MASK \ | ||
165 | (~((1ull << HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT) - 1)) | ||
166 | |||
167 | /* Declare the various hypercall operations. */ | ||
168 | #define HV_X64_HV_NOTIFY_LONG_SPIN_WAIT 0x0008 | ||
169 | |||
170 | #define HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE 0x00000001 | ||
171 | #define HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT 12 | ||
172 | #define HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_MASK \ | ||
173 | (~((1ull << HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT) - 1)) | ||
174 | |||
175 | #define HV_PROCESSOR_POWER_STATE_C0 0 | ||
176 | #define HV_PROCESSOR_POWER_STATE_C1 1 | ||
177 | #define HV_PROCESSOR_POWER_STATE_C2 2 | ||
178 | #define HV_PROCESSOR_POWER_STATE_C3 3 | ||
179 | |||
180 | /* hypercall status code */ | ||
181 | #define HV_STATUS_SUCCESS 0 | ||
182 | #define HV_STATUS_INVALID_HYPERCALL_CODE 2 | ||
183 | #define HV_STATUS_INVALID_HYPERCALL_INPUT 3 | ||
184 | #define HV_STATUS_INVALID_ALIGNMENT 4 | ||
185 | |||
186 | #endif | ||
diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h index 0b20bbb758f2..da2930924501 100644 --- a/arch/x86/include/asm/i387.h +++ b/arch/x86/include/asm/i387.h | |||
@@ -10,6 +10,8 @@ | |||
10 | #ifndef _ASM_X86_I387_H | 10 | #ifndef _ASM_X86_I387_H |
11 | #define _ASM_X86_I387_H | 11 | #define _ASM_X86_I387_H |
12 | 12 | ||
13 | #ifndef __ASSEMBLY__ | ||
14 | |||
13 | #include <linux/sched.h> | 15 | #include <linux/sched.h> |
14 | #include <linux/kernel_stat.h> | 16 | #include <linux/kernel_stat.h> |
15 | #include <linux/regset.h> | 17 | #include <linux/regset.h> |
@@ -31,8 +33,16 @@ extern void init_thread_xstate(void); | |||
31 | extern int dump_fpu(struct pt_regs *, struct user_i387_struct *); | 33 | extern int dump_fpu(struct pt_regs *, struct user_i387_struct *); |
32 | 34 | ||
33 | extern user_regset_active_fn fpregs_active, xfpregs_active; | 35 | extern user_regset_active_fn fpregs_active, xfpregs_active; |
34 | extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get; | 36 | extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get, |
35 | extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set; | 37 | xstateregs_get; |
38 | extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set, | ||
39 | xstateregs_set; | ||
40 | |||
41 | /* | ||
42 | * xstateregs_active == fpregs_active. Please refer to the comment | ||
43 | * at the definition of fpregs_active. | ||
44 | */ | ||
45 | #define xstateregs_active fpregs_active | ||
36 | 46 | ||
37 | extern struct _fpx_sw_bytes fx_sw_reserved; | 47 | extern struct _fpx_sw_bytes fx_sw_reserved; |
38 | #ifdef CONFIG_IA32_EMULATION | 48 | #ifdef CONFIG_IA32_EMULATION |
@@ -411,4 +421,9 @@ static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk) | |||
411 | } | 421 | } |
412 | } | 422 | } |
413 | 423 | ||
424 | #endif /* __ASSEMBLY__ */ | ||
425 | |||
426 | #define PSHUFB_XMM5_XMM0 .byte 0x66, 0x0f, 0x38, 0x00, 0xc5 | ||
427 | #define PSHUFB_XMM5_XMM6 .byte 0x66, 0x0f, 0x38, 0x00, 0xf5 | ||
428 | |||
414 | #endif /* _ASM_X86_I387_H */ | 429 | #endif /* _ASM_X86_I387_H */ |
diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h index 58d7091eeb1f..1655147646aa 100644 --- a/arch/x86/include/asm/i8259.h +++ b/arch/x86/include/asm/i8259.h | |||
@@ -24,12 +24,7 @@ extern unsigned int cached_irq_mask; | |||
24 | #define SLAVE_ICW4_DEFAULT 0x01 | 24 | #define SLAVE_ICW4_DEFAULT 0x01 |
25 | #define PIC_ICW4_AEOI 2 | 25 | #define PIC_ICW4_AEOI 2 |
26 | 26 | ||
27 | extern spinlock_t i8259A_lock; | 27 | extern raw_spinlock_t i8259A_lock; |
28 | |||
29 | extern void init_8259A(int auto_eoi); | ||
30 | extern void enable_8259A_irq(unsigned int irq); | ||
31 | extern void disable_8259A_irq(unsigned int irq); | ||
32 | extern unsigned int startup_8259A_irq(unsigned int irq); | ||
33 | 28 | ||
34 | /* the PIC may need a careful delay on some platforms, hence specific calls */ | 29 | /* the PIC may need a careful delay on some platforms, hence specific calls */ |
35 | static inline unsigned char inb_pic(unsigned int port) | 30 | static inline unsigned char inb_pic(unsigned int port) |
@@ -57,7 +52,17 @@ static inline void outb_pic(unsigned char value, unsigned int port) | |||
57 | 52 | ||
58 | extern struct irq_chip i8259A_chip; | 53 | extern struct irq_chip i8259A_chip; |
59 | 54 | ||
60 | extern void mask_8259A(void); | 55 | struct legacy_pic { |
61 | extern void unmask_8259A(void); | 56 | int nr_legacy_irqs; |
57 | struct irq_chip *chip; | ||
58 | void (*mask_all)(void); | ||
59 | void (*restore_mask)(void); | ||
60 | void (*init)(int auto_eoi); | ||
61 | int (*irq_pending)(unsigned int irq); | ||
62 | void (*make_irq)(unsigned int irq); | ||
63 | }; | ||
64 | |||
65 | extern struct legacy_pic *legacy_pic; | ||
66 | extern struct legacy_pic null_legacy_pic; | ||
62 | 67 | ||
63 | #endif /* _ASM_X86_I8259_H */ | 68 | #endif /* _ASM_X86_I8259_H */ |
diff --git a/arch/x86/include/asm/inat.h b/arch/x86/include/asm/inat.h new file mode 100644 index 000000000000..205b063e3e32 --- /dev/null +++ b/arch/x86/include/asm/inat.h | |||
@@ -0,0 +1,220 @@ | |||
1 | #ifndef _ASM_X86_INAT_H | ||
2 | #define _ASM_X86_INAT_H | ||
3 | /* | ||
4 | * x86 instruction attributes | ||
5 | * | ||
6 | * Written by Masami Hiramatsu <mhiramat@redhat.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
21 | * | ||
22 | */ | ||
23 | #include <asm/inat_types.h> | ||
24 | |||
25 | /* | ||
26 | * Internal bits. Don't use bitmasks directly, because these bits are | ||
27 | * unstable. You should use checking functions. | ||
28 | */ | ||
29 | |||
30 | #define INAT_OPCODE_TABLE_SIZE 256 | ||
31 | #define INAT_GROUP_TABLE_SIZE 8 | ||
32 | |||
33 | /* Legacy last prefixes */ | ||
34 | #define INAT_PFX_OPNDSZ 1 /* 0x66 */ /* LPFX1 */ | ||
35 | #define INAT_PFX_REPE 2 /* 0xF3 */ /* LPFX2 */ | ||
36 | #define INAT_PFX_REPNE 3 /* 0xF2 */ /* LPFX3 */ | ||
37 | /* Other Legacy prefixes */ | ||
38 | #define INAT_PFX_LOCK 4 /* 0xF0 */ | ||
39 | #define INAT_PFX_CS 5 /* 0x2E */ | ||
40 | #define INAT_PFX_DS 6 /* 0x3E */ | ||
41 | #define INAT_PFX_ES 7 /* 0x26 */ | ||
42 | #define INAT_PFX_FS 8 /* 0x64 */ | ||
43 | #define INAT_PFX_GS 9 /* 0x65 */ | ||
44 | #define INAT_PFX_SS 10 /* 0x36 */ | ||
45 | #define INAT_PFX_ADDRSZ 11 /* 0x67 */ | ||
46 | /* x86-64 REX prefix */ | ||
47 | #define INAT_PFX_REX 12 /* 0x4X */ | ||
48 | /* AVX VEX prefixes */ | ||
49 | #define INAT_PFX_VEX2 13 /* 2-bytes VEX prefix */ | ||
50 | #define INAT_PFX_VEX3 14 /* 3-bytes VEX prefix */ | ||
51 | |||
52 | #define INAT_LSTPFX_MAX 3 | ||
53 | #define INAT_LGCPFX_MAX 11 | ||
54 | |||
55 | /* Immediate size */ | ||
56 | #define INAT_IMM_BYTE 1 | ||
57 | #define INAT_IMM_WORD 2 | ||
58 | #define INAT_IMM_DWORD 3 | ||
59 | #define INAT_IMM_QWORD 4 | ||
60 | #define INAT_IMM_PTR 5 | ||
61 | #define INAT_IMM_VWORD32 6 | ||
62 | #define INAT_IMM_VWORD 7 | ||
63 | |||
64 | /* Legacy prefix */ | ||
65 | #define INAT_PFX_OFFS 0 | ||
66 | #define INAT_PFX_BITS 4 | ||
67 | #define INAT_PFX_MAX ((1 << INAT_PFX_BITS) - 1) | ||
68 | #define INAT_PFX_MASK (INAT_PFX_MAX << INAT_PFX_OFFS) | ||
69 | /* Escape opcodes */ | ||
70 | #define INAT_ESC_OFFS (INAT_PFX_OFFS + INAT_PFX_BITS) | ||
71 | #define INAT_ESC_BITS 2 | ||
72 | #define INAT_ESC_MAX ((1 << INAT_ESC_BITS) - 1) | ||
73 | #define INAT_ESC_MASK (INAT_ESC_MAX << INAT_ESC_OFFS) | ||
74 | /* Group opcodes (1-16) */ | ||
75 | #define INAT_GRP_OFFS (INAT_ESC_OFFS + INAT_ESC_BITS) | ||
76 | #define INAT_GRP_BITS 5 | ||
77 | #define INAT_GRP_MAX ((1 << INAT_GRP_BITS) - 1) | ||
78 | #define INAT_GRP_MASK (INAT_GRP_MAX << INAT_GRP_OFFS) | ||
79 | /* Immediates */ | ||
80 | #define INAT_IMM_OFFS (INAT_GRP_OFFS + INAT_GRP_BITS) | ||
81 | #define INAT_IMM_BITS 3 | ||
82 | #define INAT_IMM_MASK (((1 << INAT_IMM_BITS) - 1) << INAT_IMM_OFFS) | ||
83 | /* Flags */ | ||
84 | #define INAT_FLAG_OFFS (INAT_IMM_OFFS + INAT_IMM_BITS) | ||
85 | #define INAT_MODRM (1 << (INAT_FLAG_OFFS)) | ||
86 | #define INAT_FORCE64 (1 << (INAT_FLAG_OFFS + 1)) | ||
87 | #define INAT_SCNDIMM (1 << (INAT_FLAG_OFFS + 2)) | ||
88 | #define INAT_MOFFSET (1 << (INAT_FLAG_OFFS + 3)) | ||
89 | #define INAT_VARIANT (1 << (INAT_FLAG_OFFS + 4)) | ||
90 | #define INAT_VEXOK (1 << (INAT_FLAG_OFFS + 5)) | ||
91 | #define INAT_VEXONLY (1 << (INAT_FLAG_OFFS + 6)) | ||
92 | /* Attribute making macros for attribute tables */ | ||
93 | #define INAT_MAKE_PREFIX(pfx) (pfx << INAT_PFX_OFFS) | ||
94 | #define INAT_MAKE_ESCAPE(esc) (esc << INAT_ESC_OFFS) | ||
95 | #define INAT_MAKE_GROUP(grp) ((grp << INAT_GRP_OFFS) | INAT_MODRM) | ||
96 | #define INAT_MAKE_IMM(imm) (imm << INAT_IMM_OFFS) | ||
97 | |||
98 | /* Attribute search APIs */ | ||
99 | extern insn_attr_t inat_get_opcode_attribute(insn_byte_t opcode); | ||
100 | extern insn_attr_t inat_get_escape_attribute(insn_byte_t opcode, | ||
101 | insn_byte_t last_pfx, | ||
102 | insn_attr_t esc_attr); | ||
103 | extern insn_attr_t inat_get_group_attribute(insn_byte_t modrm, | ||
104 | insn_byte_t last_pfx, | ||
105 | insn_attr_t esc_attr); | ||
106 | extern insn_attr_t inat_get_avx_attribute(insn_byte_t opcode, | ||
107 | insn_byte_t vex_m, | ||
108 | insn_byte_t vex_pp); | ||
109 | |||
110 | /* Attribute checking functions */ | ||
111 | static inline int inat_is_legacy_prefix(insn_attr_t attr) | ||
112 | { | ||
113 | attr &= INAT_PFX_MASK; | ||
114 | return attr && attr <= INAT_LGCPFX_MAX; | ||
115 | } | ||
116 | |||
117 | static inline int inat_is_address_size_prefix(insn_attr_t attr) | ||
118 | { | ||
119 | return (attr & INAT_PFX_MASK) == INAT_PFX_ADDRSZ; | ||
120 | } | ||
121 | |||
122 | static inline int inat_is_operand_size_prefix(insn_attr_t attr) | ||
123 | { | ||
124 | return (attr & INAT_PFX_MASK) == INAT_PFX_OPNDSZ; | ||
125 | } | ||
126 | |||
127 | static inline int inat_is_rex_prefix(insn_attr_t attr) | ||
128 | { | ||
129 | return (attr & INAT_PFX_MASK) == INAT_PFX_REX; | ||
130 | } | ||
131 | |||
132 | static inline int inat_last_prefix_id(insn_attr_t attr) | ||
133 | { | ||
134 | if ((attr & INAT_PFX_MASK) > INAT_LSTPFX_MAX) | ||
135 | return 0; | ||
136 | else | ||
137 | return attr & INAT_PFX_MASK; | ||
138 | } | ||
139 | |||
140 | static inline int inat_is_vex_prefix(insn_attr_t attr) | ||
141 | { | ||
142 | attr &= INAT_PFX_MASK; | ||
143 | return attr == INAT_PFX_VEX2 || attr == INAT_PFX_VEX3; | ||
144 | } | ||
145 | |||
146 | static inline int inat_is_vex3_prefix(insn_attr_t attr) | ||
147 | { | ||
148 | return (attr & INAT_PFX_MASK) == INAT_PFX_VEX3; | ||
149 | } | ||
150 | |||
151 | static inline int inat_is_escape(insn_attr_t attr) | ||
152 | { | ||
153 | return attr & INAT_ESC_MASK; | ||
154 | } | ||
155 | |||
156 | static inline int inat_escape_id(insn_attr_t attr) | ||
157 | { | ||
158 | return (attr & INAT_ESC_MASK) >> INAT_ESC_OFFS; | ||
159 | } | ||
160 | |||
161 | static inline int inat_is_group(insn_attr_t attr) | ||
162 | { | ||
163 | return attr & INAT_GRP_MASK; | ||
164 | } | ||
165 | |||
166 | static inline int inat_group_id(insn_attr_t attr) | ||
167 | { | ||
168 | return (attr & INAT_GRP_MASK) >> INAT_GRP_OFFS; | ||
169 | } | ||
170 | |||
171 | static inline int inat_group_common_attribute(insn_attr_t attr) | ||
172 | { | ||
173 | return attr & ~INAT_GRP_MASK; | ||
174 | } | ||
175 | |||
176 | static inline int inat_has_immediate(insn_attr_t attr) | ||
177 | { | ||
178 | return attr & INAT_IMM_MASK; | ||
179 | } | ||
180 | |||
181 | static inline int inat_immediate_size(insn_attr_t attr) | ||
182 | { | ||
183 | return (attr & INAT_IMM_MASK) >> INAT_IMM_OFFS; | ||
184 | } | ||
185 | |||
186 | static inline int inat_has_modrm(insn_attr_t attr) | ||
187 | { | ||
188 | return attr & INAT_MODRM; | ||
189 | } | ||
190 | |||
191 | static inline int inat_is_force64(insn_attr_t attr) | ||
192 | { | ||
193 | return attr & INAT_FORCE64; | ||
194 | } | ||
195 | |||
196 | static inline int inat_has_second_immediate(insn_attr_t attr) | ||
197 | { | ||
198 | return attr & INAT_SCNDIMM; | ||
199 | } | ||
200 | |||
201 | static inline int inat_has_moffset(insn_attr_t attr) | ||
202 | { | ||
203 | return attr & INAT_MOFFSET; | ||
204 | } | ||
205 | |||
206 | static inline int inat_has_variant(insn_attr_t attr) | ||
207 | { | ||
208 | return attr & INAT_VARIANT; | ||
209 | } | ||
210 | |||
211 | static inline int inat_accept_vex(insn_attr_t attr) | ||
212 | { | ||
213 | return attr & INAT_VEXOK; | ||
214 | } | ||
215 | |||
216 | static inline int inat_must_vex(insn_attr_t attr) | ||
217 | { | ||
218 | return attr & INAT_VEXONLY; | ||
219 | } | ||
220 | #endif | ||
diff --git a/arch/x86/include/asm/inat_types.h b/arch/x86/include/asm/inat_types.h new file mode 100644 index 000000000000..cb3c20ce39cf --- /dev/null +++ b/arch/x86/include/asm/inat_types.h | |||
@@ -0,0 +1,29 @@ | |||
1 | #ifndef _ASM_X86_INAT_TYPES_H | ||
2 | #define _ASM_X86_INAT_TYPES_H | ||
3 | /* | ||
4 | * x86 instruction attributes | ||
5 | * | ||
6 | * Written by Masami Hiramatsu <mhiramat@redhat.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
21 | * | ||
22 | */ | ||
23 | |||
24 | /* Instruction attributes */ | ||
25 | typedef unsigned int insn_attr_t; | ||
26 | typedef unsigned char insn_byte_t; | ||
27 | typedef signed int insn_value_t; | ||
28 | |||
29 | #endif | ||
diff --git a/arch/x86/include/asm/insn.h b/arch/x86/include/asm/insn.h new file mode 100644 index 000000000000..96c2e0ad04ca --- /dev/null +++ b/arch/x86/include/asm/insn.h | |||
@@ -0,0 +1,184 @@ | |||
1 | #ifndef _ASM_X86_INSN_H | ||
2 | #define _ASM_X86_INSN_H | ||
3 | /* | ||
4 | * x86 instruction analysis | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
19 | * | ||
20 | * Copyright (C) IBM Corporation, 2009 | ||
21 | */ | ||
22 | |||
23 | /* insn_attr_t is defined in inat.h */ | ||
24 | #include <asm/inat.h> | ||
25 | |||
26 | struct insn_field { | ||
27 | union { | ||
28 | insn_value_t value; | ||
29 | insn_byte_t bytes[4]; | ||
30 | }; | ||
31 | /* !0 if we've run insn_get_xxx() for this field */ | ||
32 | unsigned char got; | ||
33 | unsigned char nbytes; | ||
34 | }; | ||
35 | |||
36 | struct insn { | ||
37 | struct insn_field prefixes; /* | ||
38 | * Prefixes | ||
39 | * prefixes.bytes[3]: last prefix | ||
40 | */ | ||
41 | struct insn_field rex_prefix; /* REX prefix */ | ||
42 | struct insn_field vex_prefix; /* VEX prefix */ | ||
43 | struct insn_field opcode; /* | ||
44 | * opcode.bytes[0]: opcode1 | ||
45 | * opcode.bytes[1]: opcode2 | ||
46 | * opcode.bytes[2]: opcode3 | ||
47 | */ | ||
48 | struct insn_field modrm; | ||
49 | struct insn_field sib; | ||
50 | struct insn_field displacement; | ||
51 | union { | ||
52 | struct insn_field immediate; | ||
53 | struct insn_field moffset1; /* for 64bit MOV */ | ||
54 | struct insn_field immediate1; /* for 64bit imm or off16/32 */ | ||
55 | }; | ||
56 | union { | ||
57 | struct insn_field moffset2; /* for 64bit MOV */ | ||
58 | struct insn_field immediate2; /* for 64bit imm or seg16 */ | ||
59 | }; | ||
60 | |||
61 | insn_attr_t attr; | ||
62 | unsigned char opnd_bytes; | ||
63 | unsigned char addr_bytes; | ||
64 | unsigned char length; | ||
65 | unsigned char x86_64; | ||
66 | |||
67 | const insn_byte_t *kaddr; /* kernel address of insn to analyze */ | ||
68 | const insn_byte_t *next_byte; | ||
69 | }; | ||
70 | |||
71 | #define X86_MODRM_MOD(modrm) (((modrm) & 0xc0) >> 6) | ||
72 | #define X86_MODRM_REG(modrm) (((modrm) & 0x38) >> 3) | ||
73 | #define X86_MODRM_RM(modrm) ((modrm) & 0x07) | ||
74 | |||
75 | #define X86_SIB_SCALE(sib) (((sib) & 0xc0) >> 6) | ||
76 | #define X86_SIB_INDEX(sib) (((sib) & 0x38) >> 3) | ||
77 | #define X86_SIB_BASE(sib) ((sib) & 0x07) | ||
78 | |||
79 | #define X86_REX_W(rex) ((rex) & 8) | ||
80 | #define X86_REX_R(rex) ((rex) & 4) | ||
81 | #define X86_REX_X(rex) ((rex) & 2) | ||
82 | #define X86_REX_B(rex) ((rex) & 1) | ||
83 | |||
84 | /* VEX bit flags */ | ||
85 | #define X86_VEX_W(vex) ((vex) & 0x80) /* VEX3 Byte2 */ | ||
86 | #define X86_VEX_R(vex) ((vex) & 0x80) /* VEX2/3 Byte1 */ | ||
87 | #define X86_VEX_X(vex) ((vex) & 0x40) /* VEX3 Byte1 */ | ||
88 | #define X86_VEX_B(vex) ((vex) & 0x20) /* VEX3 Byte1 */ | ||
89 | #define X86_VEX_L(vex) ((vex) & 0x04) /* VEX3 Byte2, VEX2 Byte1 */ | ||
90 | /* VEX bit fields */ | ||
91 | #define X86_VEX3_M(vex) ((vex) & 0x1f) /* VEX3 Byte1 */ | ||
92 | #define X86_VEX2_M 1 /* VEX2.M always 1 */ | ||
93 | #define X86_VEX_V(vex) (((vex) & 0x78) >> 3) /* VEX3 Byte2, VEX2 Byte1 */ | ||
94 | #define X86_VEX_P(vex) ((vex) & 0x03) /* VEX3 Byte2, VEX2 Byte1 */ | ||
95 | #define X86_VEX_M_MAX 0x1f /* VEX3.M Maximum value */ | ||
96 | |||
97 | /* The last prefix is needed for two-byte and three-byte opcodes */ | ||
98 | static inline insn_byte_t insn_last_prefix(struct insn *insn) | ||
99 | { | ||
100 | return insn->prefixes.bytes[3]; | ||
101 | } | ||
102 | |||
103 | extern void insn_init(struct insn *insn, const void *kaddr, int x86_64); | ||
104 | extern void insn_get_prefixes(struct insn *insn); | ||
105 | extern void insn_get_opcode(struct insn *insn); | ||
106 | extern void insn_get_modrm(struct insn *insn); | ||
107 | extern void insn_get_sib(struct insn *insn); | ||
108 | extern void insn_get_displacement(struct insn *insn); | ||
109 | extern void insn_get_immediate(struct insn *insn); | ||
110 | extern void insn_get_length(struct insn *insn); | ||
111 | |||
112 | /* Attribute will be determined after getting ModRM (for opcode groups) */ | ||
113 | static inline void insn_get_attribute(struct insn *insn) | ||
114 | { | ||
115 | insn_get_modrm(insn); | ||
116 | } | ||
117 | |||
118 | /* Instruction uses RIP-relative addressing */ | ||
119 | extern int insn_rip_relative(struct insn *insn); | ||
120 | |||
121 | /* Init insn for kernel text */ | ||
122 | static inline void kernel_insn_init(struct insn *insn, const void *kaddr) | ||
123 | { | ||
124 | #ifdef CONFIG_X86_64 | ||
125 | insn_init(insn, kaddr, 1); | ||
126 | #else /* CONFIG_X86_32 */ | ||
127 | insn_init(insn, kaddr, 0); | ||
128 | #endif | ||
129 | } | ||
130 | |||
131 | static inline int insn_is_avx(struct insn *insn) | ||
132 | { | ||
133 | if (!insn->prefixes.got) | ||
134 | insn_get_prefixes(insn); | ||
135 | return (insn->vex_prefix.value != 0); | ||
136 | } | ||
137 | |||
138 | static inline insn_byte_t insn_vex_m_bits(struct insn *insn) | ||
139 | { | ||
140 | if (insn->vex_prefix.nbytes == 2) /* 2 bytes VEX */ | ||
141 | return X86_VEX2_M; | ||
142 | else | ||
143 | return X86_VEX3_M(insn->vex_prefix.bytes[1]); | ||
144 | } | ||
145 | |||
146 | static inline insn_byte_t insn_vex_p_bits(struct insn *insn) | ||
147 | { | ||
148 | if (insn->vex_prefix.nbytes == 2) /* 2 bytes VEX */ | ||
149 | return X86_VEX_P(insn->vex_prefix.bytes[1]); | ||
150 | else | ||
151 | return X86_VEX_P(insn->vex_prefix.bytes[2]); | ||
152 | } | ||
153 | |||
154 | /* Offset of each field from kaddr */ | ||
155 | static inline int insn_offset_rex_prefix(struct insn *insn) | ||
156 | { | ||
157 | return insn->prefixes.nbytes; | ||
158 | } | ||
159 | static inline int insn_offset_vex_prefix(struct insn *insn) | ||
160 | { | ||
161 | return insn_offset_rex_prefix(insn) + insn->rex_prefix.nbytes; | ||
162 | } | ||
163 | static inline int insn_offset_opcode(struct insn *insn) | ||
164 | { | ||
165 | return insn_offset_vex_prefix(insn) + insn->vex_prefix.nbytes; | ||
166 | } | ||
167 | static inline int insn_offset_modrm(struct insn *insn) | ||
168 | { | ||
169 | return insn_offset_opcode(insn) + insn->opcode.nbytes; | ||
170 | } | ||
171 | static inline int insn_offset_sib(struct insn *insn) | ||
172 | { | ||
173 | return insn_offset_modrm(insn) + insn->modrm.nbytes; | ||
174 | } | ||
175 | static inline int insn_offset_displacement(struct insn *insn) | ||
176 | { | ||
177 | return insn_offset_sib(insn) + insn->sib.nbytes; | ||
178 | } | ||
179 | static inline int insn_offset_immediate(struct insn *insn) | ||
180 | { | ||
181 | return insn_offset_displacement(insn) + insn->displacement.nbytes; | ||
182 | } | ||
183 | |||
184 | #endif /* _ASM_X86_INSN_H */ | ||
diff --git a/arch/x86/include/asm/inst.h b/arch/x86/include/asm/inst.h new file mode 100644 index 000000000000..14cf526091f9 --- /dev/null +++ b/arch/x86/include/asm/inst.h | |||
@@ -0,0 +1,150 @@ | |||
1 | /* | ||
2 | * Generate .byte code for some instructions not supported by old | ||
3 | * binutils. | ||
4 | */ | ||
5 | #ifndef X86_ASM_INST_H | ||
6 | #define X86_ASM_INST_H | ||
7 | |||
8 | #ifdef __ASSEMBLY__ | ||
9 | |||
10 | .macro XMM_NUM opd xmm | ||
11 | .ifc \xmm,%xmm0 | ||
12 | \opd = 0 | ||
13 | .endif | ||
14 | .ifc \xmm,%xmm1 | ||
15 | \opd = 1 | ||
16 | .endif | ||
17 | .ifc \xmm,%xmm2 | ||
18 | \opd = 2 | ||
19 | .endif | ||
20 | .ifc \xmm,%xmm3 | ||
21 | \opd = 3 | ||
22 | .endif | ||
23 | .ifc \xmm,%xmm4 | ||
24 | \opd = 4 | ||
25 | .endif | ||
26 | .ifc \xmm,%xmm5 | ||
27 | \opd = 5 | ||
28 | .endif | ||
29 | .ifc \xmm,%xmm6 | ||
30 | \opd = 6 | ||
31 | .endif | ||
32 | .ifc \xmm,%xmm7 | ||
33 | \opd = 7 | ||
34 | .endif | ||
35 | .ifc \xmm,%xmm8 | ||
36 | \opd = 8 | ||
37 | .endif | ||
38 | .ifc \xmm,%xmm9 | ||
39 | \opd = 9 | ||
40 | .endif | ||
41 | .ifc \xmm,%xmm10 | ||
42 | \opd = 10 | ||
43 | .endif | ||
44 | .ifc \xmm,%xmm11 | ||
45 | \opd = 11 | ||
46 | .endif | ||
47 | .ifc \xmm,%xmm12 | ||
48 | \opd = 12 | ||
49 | .endif | ||
50 | .ifc \xmm,%xmm13 | ||
51 | \opd = 13 | ||
52 | .endif | ||
53 | .ifc \xmm,%xmm14 | ||
54 | \opd = 14 | ||
55 | .endif | ||
56 | .ifc \xmm,%xmm15 | ||
57 | \opd = 15 | ||
58 | .endif | ||
59 | .endm | ||
60 | |||
61 | .macro PFX_OPD_SIZE | ||
62 | .byte 0x66 | ||
63 | .endm | ||
64 | |||
65 | .macro PFX_REX opd1 opd2 | ||
66 | .if (\opd1 | \opd2) & 8 | ||
67 | .byte 0x40 | ((\opd1 & 8) >> 3) | ((\opd2 & 8) >> 1) | ||
68 | .endif | ||
69 | .endm | ||
70 | |||
71 | .macro MODRM mod opd1 opd2 | ||
72 | .byte \mod | (\opd1 & 7) | ((\opd2 & 7) << 3) | ||
73 | .endm | ||
74 | |||
75 | .macro PSHUFB_XMM xmm1 xmm2 | ||
76 | XMM_NUM pshufb_opd1 \xmm1 | ||
77 | XMM_NUM pshufb_opd2 \xmm2 | ||
78 | PFX_OPD_SIZE | ||
79 | PFX_REX pshufb_opd1 pshufb_opd2 | ||
80 | .byte 0x0f, 0x38, 0x00 | ||
81 | MODRM 0xc0 pshufb_opd1 pshufb_opd2 | ||
82 | .endm | ||
83 | |||
84 | .macro PCLMULQDQ imm8 xmm1 xmm2 | ||
85 | XMM_NUM clmul_opd1 \xmm1 | ||
86 | XMM_NUM clmul_opd2 \xmm2 | ||
87 | PFX_OPD_SIZE | ||
88 | PFX_REX clmul_opd1 clmul_opd2 | ||
89 | .byte 0x0f, 0x3a, 0x44 | ||
90 | MODRM 0xc0 clmul_opd1 clmul_opd2 | ||
91 | .byte \imm8 | ||
92 | .endm | ||
93 | |||
94 | .macro AESKEYGENASSIST rcon xmm1 xmm2 | ||
95 | XMM_NUM aeskeygen_opd1 \xmm1 | ||
96 | XMM_NUM aeskeygen_opd2 \xmm2 | ||
97 | PFX_OPD_SIZE | ||
98 | PFX_REX aeskeygen_opd1 aeskeygen_opd2 | ||
99 | .byte 0x0f, 0x3a, 0xdf | ||
100 | MODRM 0xc0 aeskeygen_opd1 aeskeygen_opd2 | ||
101 | .byte \rcon | ||
102 | .endm | ||
103 | |||
104 | .macro AESIMC xmm1 xmm2 | ||
105 | XMM_NUM aesimc_opd1 \xmm1 | ||
106 | XMM_NUM aesimc_opd2 \xmm2 | ||
107 | PFX_OPD_SIZE | ||
108 | PFX_REX aesimc_opd1 aesimc_opd2 | ||
109 | .byte 0x0f, 0x38, 0xdb | ||
110 | MODRM 0xc0 aesimc_opd1 aesimc_opd2 | ||
111 | .endm | ||
112 | |||
113 | .macro AESENC xmm1 xmm2 | ||
114 | XMM_NUM aesenc_opd1 \xmm1 | ||
115 | XMM_NUM aesenc_opd2 \xmm2 | ||
116 | PFX_OPD_SIZE | ||
117 | PFX_REX aesenc_opd1 aesenc_opd2 | ||
118 | .byte 0x0f, 0x38, 0xdc | ||
119 | MODRM 0xc0 aesenc_opd1 aesenc_opd2 | ||
120 | .endm | ||
121 | |||
122 | .macro AESENCLAST xmm1 xmm2 | ||
123 | XMM_NUM aesenclast_opd1 \xmm1 | ||
124 | XMM_NUM aesenclast_opd2 \xmm2 | ||
125 | PFX_OPD_SIZE | ||
126 | PFX_REX aesenclast_opd1 aesenclast_opd2 | ||
127 | .byte 0x0f, 0x38, 0xdd | ||
128 | MODRM 0xc0 aesenclast_opd1 aesenclast_opd2 | ||
129 | .endm | ||
130 | |||
131 | .macro AESDEC xmm1 xmm2 | ||
132 | XMM_NUM aesdec_opd1 \xmm1 | ||
133 | XMM_NUM aesdec_opd2 \xmm2 | ||
134 | PFX_OPD_SIZE | ||
135 | PFX_REX aesdec_opd1 aesdec_opd2 | ||
136 | .byte 0x0f, 0x38, 0xde | ||
137 | MODRM 0xc0 aesdec_opd1 aesdec_opd2 | ||
138 | .endm | ||
139 | |||
140 | .macro AESDECLAST xmm1 xmm2 | ||
141 | XMM_NUM aesdeclast_opd1 \xmm1 | ||
142 | XMM_NUM aesdeclast_opd2 \xmm2 | ||
143 | PFX_OPD_SIZE | ||
144 | PFX_REX aesdeclast_opd1 aesdeclast_opd2 | ||
145 | .byte 0x0f, 0x38, 0xdf | ||
146 | MODRM 0xc0 aesdeclast_opd1 aesdeclast_opd2 | ||
147 | .endm | ||
148 | #endif | ||
149 | |||
150 | #endif | ||
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h index 73739322b6d0..30a3e9776123 100644 --- a/arch/x86/include/asm/io.h +++ b/arch/x86/include/asm/io.h | |||
@@ -1,8 +1,42 @@ | |||
1 | #ifndef _ASM_X86_IO_H | 1 | #ifndef _ASM_X86_IO_H |
2 | #define _ASM_X86_IO_H | 2 | #define _ASM_X86_IO_H |
3 | 3 | ||
4 | /* | ||
5 | * This file contains the definitions for the x86 IO instructions | ||
6 | * inb/inw/inl/outb/outw/outl and the "string versions" of the same | ||
7 | * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing" | ||
8 | * versions of the single-IO instructions (inb_p/inw_p/..). | ||
9 | * | ||
10 | * This file is not meant to be obfuscating: it's just complicated | ||
11 | * to (a) handle it all in a way that makes gcc able to optimize it | ||
12 | * as well as possible and (b) trying to avoid writing the same thing | ||
13 | * over and over again with slight variations and possibly making a | ||
14 | * mistake somewhere. | ||
15 | */ | ||
16 | |||
17 | /* | ||
18 | * Thanks to James van Artsdalen for a better timing-fix than | ||
19 | * the two short jumps: using outb's to a nonexistent port seems | ||
20 | * to guarantee better timings even on fast machines. | ||
21 | * | ||
22 | * On the other hand, I'd like to be sure of a non-existent port: | ||
23 | * I feel a bit unsafe about using 0x80 (should be safe, though) | ||
24 | * | ||
25 | * Linus | ||
26 | */ | ||
27 | |||
28 | /* | ||
29 | * Bit simplified and optimized by Jan Hubicka | ||
30 | * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999. | ||
31 | * | ||
32 | * isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added, | ||
33 | * isa_read[wl] and isa_write[wl] fixed | ||
34 | * - Arnaldo Carvalho de Melo <acme@conectiva.com.br> | ||
35 | */ | ||
36 | |||
4 | #define ARCH_HAS_IOREMAP_WC | 37 | #define ARCH_HAS_IOREMAP_WC |
5 | 38 | ||
39 | #include <linux/string.h> | ||
6 | #include <linux/compiler.h> | 40 | #include <linux/compiler.h> |
7 | #include <asm-generic/int-ll64.h> | 41 | #include <asm-generic/int-ll64.h> |
8 | #include <asm/page.h> | 42 | #include <asm/page.h> |
@@ -173,11 +207,126 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size) | |||
173 | extern void iounmap(volatile void __iomem *addr); | 207 | extern void iounmap(volatile void __iomem *addr); |
174 | 208 | ||
175 | 209 | ||
176 | #ifdef CONFIG_X86_32 | 210 | #ifdef __KERNEL__ |
177 | # include "io_32.h" | 211 | |
212 | #include <asm-generic/iomap.h> | ||
213 | |||
214 | #include <linux/vmalloc.h> | ||
215 | |||
216 | /* | ||
217 | * Convert a virtual cached pointer to an uncached pointer | ||
218 | */ | ||
219 | #define xlate_dev_kmem_ptr(p) p | ||
220 | |||
221 | static inline void | ||
222 | memset_io(volatile void __iomem *addr, unsigned char val, size_t count) | ||
223 | { | ||
224 | memset((void __force *)addr, val, count); | ||
225 | } | ||
226 | |||
227 | static inline void | ||
228 | memcpy_fromio(void *dst, const volatile void __iomem *src, size_t count) | ||
229 | { | ||
230 | memcpy(dst, (const void __force *)src, count); | ||
231 | } | ||
232 | |||
233 | static inline void | ||
234 | memcpy_toio(volatile void __iomem *dst, const void *src, size_t count) | ||
235 | { | ||
236 | memcpy((void __force *)dst, src, count); | ||
237 | } | ||
238 | |||
239 | /* | ||
240 | * ISA space is 'always mapped' on a typical x86 system, no need to | ||
241 | * explicitly ioremap() it. The fact that the ISA IO space is mapped | ||
242 | * to PAGE_OFFSET is pure coincidence - it does not mean ISA values | ||
243 | * are physical addresses. The following constant pointer can be | ||
244 | * used as the IO-area pointer (it can be iounmapped as well, so the | ||
245 | * analogy with PCI is quite large): | ||
246 | */ | ||
247 | #define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET)) | ||
248 | |||
249 | /* | ||
250 | * Cache management | ||
251 | * | ||
252 | * This needed for two cases | ||
253 | * 1. Out of order aware processors | ||
254 | * 2. Accidentally out of order processors (PPro errata #51) | ||
255 | */ | ||
256 | |||
257 | static inline void flush_write_buffers(void) | ||
258 | { | ||
259 | #if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE) | ||
260 | asm volatile("lock; addl $0,0(%%esp)": : :"memory"); | ||
261 | #endif | ||
262 | } | ||
263 | |||
264 | #endif /* __KERNEL__ */ | ||
265 | |||
266 | extern void native_io_delay(void); | ||
267 | |||
268 | extern int io_delay_type; | ||
269 | extern void io_delay_init(void); | ||
270 | |||
271 | #if defined(CONFIG_PARAVIRT) | ||
272 | #include <asm/paravirt.h> | ||
178 | #else | 273 | #else |
179 | # include "io_64.h" | 274 | |
275 | static inline void slow_down_io(void) | ||
276 | { | ||
277 | native_io_delay(); | ||
278 | #ifdef REALLY_SLOW_IO | ||
279 | native_io_delay(); | ||
280 | native_io_delay(); | ||
281 | native_io_delay(); | ||
180 | #endif | 282 | #endif |
283 | } | ||
284 | |||
285 | #endif | ||
286 | |||
287 | #define BUILDIO(bwl, bw, type) \ | ||
288 | static inline void out##bwl(unsigned type value, int port) \ | ||
289 | { \ | ||
290 | asm volatile("out" #bwl " %" #bw "0, %w1" \ | ||
291 | : : "a"(value), "Nd"(port)); \ | ||
292 | } \ | ||
293 | \ | ||
294 | static inline unsigned type in##bwl(int port) \ | ||
295 | { \ | ||
296 | unsigned type value; \ | ||
297 | asm volatile("in" #bwl " %w1, %" #bw "0" \ | ||
298 | : "=a"(value) : "Nd"(port)); \ | ||
299 | return value; \ | ||
300 | } \ | ||
301 | \ | ||
302 | static inline void out##bwl##_p(unsigned type value, int port) \ | ||
303 | { \ | ||
304 | out##bwl(value, port); \ | ||
305 | slow_down_io(); \ | ||
306 | } \ | ||
307 | \ | ||
308 | static inline unsigned type in##bwl##_p(int port) \ | ||
309 | { \ | ||
310 | unsigned type value = in##bwl(port); \ | ||
311 | slow_down_io(); \ | ||
312 | return value; \ | ||
313 | } \ | ||
314 | \ | ||
315 | static inline void outs##bwl(int port, const void *addr, unsigned long count) \ | ||
316 | { \ | ||
317 | asm volatile("rep; outs" #bwl \ | ||
318 | : "+S"(addr), "+c"(count) : "d"(port)); \ | ||
319 | } \ | ||
320 | \ | ||
321 | static inline void ins##bwl(int port, void *addr, unsigned long count) \ | ||
322 | { \ | ||
323 | asm volatile("rep; ins" #bwl \ | ||
324 | : "+D"(addr), "+c"(count) : "d"(port)); \ | ||
325 | } | ||
326 | |||
327 | BUILDIO(b, b, char) | ||
328 | BUILDIO(w, w, short) | ||
329 | BUILDIO(l, , int) | ||
181 | 330 | ||
182 | extern void *xlate_dev_mem_ptr(unsigned long phys); | 331 | extern void *xlate_dev_mem_ptr(unsigned long phys); |
183 | extern void unxlate_dev_mem_ptr(unsigned long phys, void *addr); | 332 | extern void unxlate_dev_mem_ptr(unsigned long phys, void *addr); |
@@ -198,6 +347,7 @@ extern void __iomem *early_ioremap(resource_size_t phys_addr, | |||
198 | extern void __iomem *early_memremap(resource_size_t phys_addr, | 347 | extern void __iomem *early_memremap(resource_size_t phys_addr, |
199 | unsigned long size); | 348 | unsigned long size); |
200 | extern void early_iounmap(void __iomem *addr, unsigned long size); | 349 | extern void early_iounmap(void __iomem *addr, unsigned long size); |
350 | extern void fixup_early_ioremap(void); | ||
201 | 351 | ||
202 | #define IO_SPACE_LIMIT 0xffff | 352 | #define IO_SPACE_LIMIT 0xffff |
203 | 353 | ||
diff --git a/arch/x86/include/asm/io_32.h b/arch/x86/include/asm/io_32.h deleted file mode 100644 index a299900f5920..000000000000 --- a/arch/x86/include/asm/io_32.h +++ /dev/null | |||
@@ -1,196 +0,0 @@ | |||
1 | #ifndef _ASM_X86_IO_32_H | ||
2 | #define _ASM_X86_IO_32_H | ||
3 | |||
4 | #include <linux/string.h> | ||
5 | #include <linux/compiler.h> | ||
6 | |||
7 | /* | ||
8 | * This file contains the definitions for the x86 IO instructions | ||
9 | * inb/inw/inl/outb/outw/outl and the "string versions" of the same | ||
10 | * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing" | ||
11 | * versions of the single-IO instructions (inb_p/inw_p/..). | ||
12 | * | ||
13 | * This file is not meant to be obfuscating: it's just complicated | ||
14 | * to (a) handle it all in a way that makes gcc able to optimize it | ||
15 | * as well as possible and (b) trying to avoid writing the same thing | ||
16 | * over and over again with slight variations and possibly making a | ||
17 | * mistake somewhere. | ||
18 | */ | ||
19 | |||
20 | /* | ||
21 | * Thanks to James van Artsdalen for a better timing-fix than | ||
22 | * the two short jumps: using outb's to a nonexistent port seems | ||
23 | * to guarantee better timings even on fast machines. | ||
24 | * | ||
25 | * On the other hand, I'd like to be sure of a non-existent port: | ||
26 | * I feel a bit unsafe about using 0x80 (should be safe, though) | ||
27 | * | ||
28 | * Linus | ||
29 | */ | ||
30 | |||
31 | /* | ||
32 | * Bit simplified and optimized by Jan Hubicka | ||
33 | * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999. | ||
34 | * | ||
35 | * isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added, | ||
36 | * isa_read[wl] and isa_write[wl] fixed | ||
37 | * - Arnaldo Carvalho de Melo <acme@conectiva.com.br> | ||
38 | */ | ||
39 | |||
40 | #define XQUAD_PORTIO_BASE 0xfe400000 | ||
41 | #define XQUAD_PORTIO_QUAD 0x40000 /* 256k per quad. */ | ||
42 | |||
43 | #ifdef __KERNEL__ | ||
44 | |||
45 | #include <asm-generic/iomap.h> | ||
46 | |||
47 | #include <linux/vmalloc.h> | ||
48 | |||
49 | /* | ||
50 | * Convert a virtual cached pointer to an uncached pointer | ||
51 | */ | ||
52 | #define xlate_dev_kmem_ptr(p) p | ||
53 | |||
54 | static inline void | ||
55 | memset_io(volatile void __iomem *addr, unsigned char val, int count) | ||
56 | { | ||
57 | memset((void __force *)addr, val, count); | ||
58 | } | ||
59 | |||
60 | static inline void | ||
61 | memcpy_fromio(void *dst, const volatile void __iomem *src, int count) | ||
62 | { | ||
63 | __memcpy(dst, (const void __force *)src, count); | ||
64 | } | ||
65 | |||
66 | static inline void | ||
67 | memcpy_toio(volatile void __iomem *dst, const void *src, int count) | ||
68 | { | ||
69 | __memcpy((void __force *)dst, src, count); | ||
70 | } | ||
71 | |||
72 | /* | ||
73 | * ISA space is 'always mapped' on a typical x86 system, no need to | ||
74 | * explicitly ioremap() it. The fact that the ISA IO space is mapped | ||
75 | * to PAGE_OFFSET is pure coincidence - it does not mean ISA values | ||
76 | * are physical addresses. The following constant pointer can be | ||
77 | * used as the IO-area pointer (it can be iounmapped as well, so the | ||
78 | * analogy with PCI is quite large): | ||
79 | */ | ||
80 | #define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET)) | ||
81 | |||
82 | /* | ||
83 | * Cache management | ||
84 | * | ||
85 | * This needed for two cases | ||
86 | * 1. Out of order aware processors | ||
87 | * 2. Accidentally out of order processors (PPro errata #51) | ||
88 | */ | ||
89 | |||
90 | #if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE) | ||
91 | |||
92 | static inline void flush_write_buffers(void) | ||
93 | { | ||
94 | asm volatile("lock; addl $0,0(%%esp)": : :"memory"); | ||
95 | } | ||
96 | |||
97 | #else | ||
98 | |||
99 | #define flush_write_buffers() do { } while (0) | ||
100 | |||
101 | #endif | ||
102 | |||
103 | #endif /* __KERNEL__ */ | ||
104 | |||
105 | extern void native_io_delay(void); | ||
106 | |||
107 | extern int io_delay_type; | ||
108 | extern void io_delay_init(void); | ||
109 | |||
110 | #if defined(CONFIG_PARAVIRT) | ||
111 | #include <asm/paravirt.h> | ||
112 | #else | ||
113 | |||
114 | static inline void slow_down_io(void) | ||
115 | { | ||
116 | native_io_delay(); | ||
117 | #ifdef REALLY_SLOW_IO | ||
118 | native_io_delay(); | ||
119 | native_io_delay(); | ||
120 | native_io_delay(); | ||
121 | #endif | ||
122 | } | ||
123 | |||
124 | #endif | ||
125 | |||
126 | #define __BUILDIO(bwl, bw, type) \ | ||
127 | static inline void out##bwl(unsigned type value, int port) \ | ||
128 | { \ | ||
129 | out##bwl##_local(value, port); \ | ||
130 | } \ | ||
131 | \ | ||
132 | static inline unsigned type in##bwl(int port) \ | ||
133 | { \ | ||
134 | return in##bwl##_local(port); \ | ||
135 | } | ||
136 | |||
137 | #define BUILDIO(bwl, bw, type) \ | ||
138 | static inline void out##bwl##_local(unsigned type value, int port) \ | ||
139 | { \ | ||
140 | asm volatile("out" #bwl " %" #bw "0, %w1" \ | ||
141 | : : "a"(value), "Nd"(port)); \ | ||
142 | } \ | ||
143 | \ | ||
144 | static inline unsigned type in##bwl##_local(int port) \ | ||
145 | { \ | ||
146 | unsigned type value; \ | ||
147 | asm volatile("in" #bwl " %w1, %" #bw "0" \ | ||
148 | : "=a"(value) : "Nd"(port)); \ | ||
149 | return value; \ | ||
150 | } \ | ||
151 | \ | ||
152 | static inline void out##bwl##_local_p(unsigned type value, int port) \ | ||
153 | { \ | ||
154 | out##bwl##_local(value, port); \ | ||
155 | slow_down_io(); \ | ||
156 | } \ | ||
157 | \ | ||
158 | static inline unsigned type in##bwl##_local_p(int port) \ | ||
159 | { \ | ||
160 | unsigned type value = in##bwl##_local(port); \ | ||
161 | slow_down_io(); \ | ||
162 | return value; \ | ||
163 | } \ | ||
164 | \ | ||
165 | __BUILDIO(bwl, bw, type) \ | ||
166 | \ | ||
167 | static inline void out##bwl##_p(unsigned type value, int port) \ | ||
168 | { \ | ||
169 | out##bwl(value, port); \ | ||
170 | slow_down_io(); \ | ||
171 | } \ | ||
172 | \ | ||
173 | static inline unsigned type in##bwl##_p(int port) \ | ||
174 | { \ | ||
175 | unsigned type value = in##bwl(port); \ | ||
176 | slow_down_io(); \ | ||
177 | return value; \ | ||
178 | } \ | ||
179 | \ | ||
180 | static inline void outs##bwl(int port, const void *addr, unsigned long count) \ | ||
181 | { \ | ||
182 | asm volatile("rep; outs" #bwl \ | ||
183 | : "+S"(addr), "+c"(count) : "d"(port)); \ | ||
184 | } \ | ||
185 | \ | ||
186 | static inline void ins##bwl(int port, void *addr, unsigned long count) \ | ||
187 | { \ | ||
188 | asm volatile("rep; ins" #bwl \ | ||
189 | : "+D"(addr), "+c"(count) : "d"(port)); \ | ||
190 | } | ||
191 | |||
192 | BUILDIO(b, b, char) | ||
193 | BUILDIO(w, w, short) | ||
194 | BUILDIO(l, , int) | ||
195 | |||
196 | #endif /* _ASM_X86_IO_32_H */ | ||
diff --git a/arch/x86/include/asm/io_64.h b/arch/x86/include/asm/io_64.h deleted file mode 100644 index 244067893af4..000000000000 --- a/arch/x86/include/asm/io_64.h +++ /dev/null | |||
@@ -1,181 +0,0 @@ | |||
1 | #ifndef _ASM_X86_IO_64_H | ||
2 | #define _ASM_X86_IO_64_H | ||
3 | |||
4 | |||
5 | /* | ||
6 | * This file contains the definitions for the x86 IO instructions | ||
7 | * inb/inw/inl/outb/outw/outl and the "string versions" of the same | ||
8 | * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing" | ||
9 | * versions of the single-IO instructions (inb_p/inw_p/..). | ||
10 | * | ||
11 | * This file is not meant to be obfuscating: it's just complicated | ||
12 | * to (a) handle it all in a way that makes gcc able to optimize it | ||
13 | * as well as possible and (b) trying to avoid writing the same thing | ||
14 | * over and over again with slight variations and possibly making a | ||
15 | * mistake somewhere. | ||
16 | */ | ||
17 | |||
18 | /* | ||
19 | * Thanks to James van Artsdalen for a better timing-fix than | ||
20 | * the two short jumps: using outb's to a nonexistent port seems | ||
21 | * to guarantee better timings even on fast machines. | ||
22 | * | ||
23 | * On the other hand, I'd like to be sure of a non-existent port: | ||
24 | * I feel a bit unsafe about using 0x80 (should be safe, though) | ||
25 | * | ||
26 | * Linus | ||
27 | */ | ||
28 | |||
29 | /* | ||
30 | * Bit simplified and optimized by Jan Hubicka | ||
31 | * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999. | ||
32 | * | ||
33 | * isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added, | ||
34 | * isa_read[wl] and isa_write[wl] fixed | ||
35 | * - Arnaldo Carvalho de Melo <acme@conectiva.com.br> | ||
36 | */ | ||
37 | |||
38 | extern void native_io_delay(void); | ||
39 | |||
40 | extern int io_delay_type; | ||
41 | extern void io_delay_init(void); | ||
42 | |||
43 | #if defined(CONFIG_PARAVIRT) | ||
44 | #include <asm/paravirt.h> | ||
45 | #else | ||
46 | |||
47 | static inline void slow_down_io(void) | ||
48 | { | ||
49 | native_io_delay(); | ||
50 | #ifdef REALLY_SLOW_IO | ||
51 | native_io_delay(); | ||
52 | native_io_delay(); | ||
53 | native_io_delay(); | ||
54 | #endif | ||
55 | } | ||
56 | #endif | ||
57 | |||
58 | /* | ||
59 | * Talk about misusing macros.. | ||
60 | */ | ||
61 | #define __OUT1(s, x) \ | ||
62 | static inline void out##s(unsigned x value, unsigned short port) { | ||
63 | |||
64 | #define __OUT2(s, s1, s2) \ | ||
65 | asm volatile ("out" #s " %" s1 "0,%" s2 "1" | ||
66 | |||
67 | #ifndef REALLY_SLOW_IO | ||
68 | #define REALLY_SLOW_IO | ||
69 | #define UNSET_REALLY_SLOW_IO | ||
70 | #endif | ||
71 | |||
72 | #define __OUT(s, s1, x) \ | ||
73 | __OUT1(s, x) __OUT2(s, s1, "w") : : "a" (value), "Nd" (port)); \ | ||
74 | } \ | ||
75 | __OUT1(s##_p, x) __OUT2(s, s1, "w") : : "a" (value), "Nd" (port)); \ | ||
76 | slow_down_io(); \ | ||
77 | } | ||
78 | |||
79 | #define __IN1(s) \ | ||
80 | static inline RETURN_TYPE in##s(unsigned short port) \ | ||
81 | { \ | ||
82 | RETURN_TYPE _v; | ||
83 | |||
84 | #define __IN2(s, s1, s2) \ | ||
85 | asm volatile ("in" #s " %" s2 "1,%" s1 "0" | ||
86 | |||
87 | #define __IN(s, s1, i...) \ | ||
88 | __IN1(s) __IN2(s, s1, "w") : "=a" (_v) : "Nd" (port), ##i); \ | ||
89 | return _v; \ | ||
90 | } \ | ||
91 | __IN1(s##_p) __IN2(s, s1, "w") : "=a" (_v) : "Nd" (port), ##i); \ | ||
92 | slow_down_io(); \ | ||
93 | return _v; } | ||
94 | |||
95 | #ifdef UNSET_REALLY_SLOW_IO | ||
96 | #undef REALLY_SLOW_IO | ||
97 | #endif | ||
98 | |||
99 | #define __INS(s) \ | ||
100 | static inline void ins##s(unsigned short port, void *addr, \ | ||
101 | unsigned long count) \ | ||
102 | { \ | ||
103 | asm volatile ("rep ; ins" #s \ | ||
104 | : "=D" (addr), "=c" (count) \ | ||
105 | : "d" (port), "0" (addr), "1" (count)); \ | ||
106 | } | ||
107 | |||
108 | #define __OUTS(s) \ | ||
109 | static inline void outs##s(unsigned short port, const void *addr, \ | ||
110 | unsigned long count) \ | ||
111 | { \ | ||
112 | asm volatile ("rep ; outs" #s \ | ||
113 | : "=S" (addr), "=c" (count) \ | ||
114 | : "d" (port), "0" (addr), "1" (count)); \ | ||
115 | } | ||
116 | |||
117 | #define RETURN_TYPE unsigned char | ||
118 | __IN(b, "") | ||
119 | #undef RETURN_TYPE | ||
120 | #define RETURN_TYPE unsigned short | ||
121 | __IN(w, "") | ||
122 | #undef RETURN_TYPE | ||
123 | #define RETURN_TYPE unsigned int | ||
124 | __IN(l, "") | ||
125 | #undef RETURN_TYPE | ||
126 | |||
127 | __OUT(b, "b", char) | ||
128 | __OUT(w, "w", short) | ||
129 | __OUT(l, , int) | ||
130 | |||
131 | __INS(b) | ||
132 | __INS(w) | ||
133 | __INS(l) | ||
134 | |||
135 | __OUTS(b) | ||
136 | __OUTS(w) | ||
137 | __OUTS(l) | ||
138 | |||
139 | #if defined(__KERNEL__) && defined(__x86_64__) | ||
140 | |||
141 | #include <linux/vmalloc.h> | ||
142 | |||
143 | #include <asm-generic/iomap.h> | ||
144 | |||
145 | void __memcpy_fromio(void *, unsigned long, unsigned); | ||
146 | void __memcpy_toio(unsigned long, const void *, unsigned); | ||
147 | |||
148 | static inline void memcpy_fromio(void *to, const volatile void __iomem *from, | ||
149 | unsigned len) | ||
150 | { | ||
151 | __memcpy_fromio(to, (unsigned long)from, len); | ||
152 | } | ||
153 | |||
154 | static inline void memcpy_toio(volatile void __iomem *to, const void *from, | ||
155 | unsigned len) | ||
156 | { | ||
157 | __memcpy_toio((unsigned long)to, from, len); | ||
158 | } | ||
159 | |||
160 | void memset_io(volatile void __iomem *a, int b, size_t c); | ||
161 | |||
162 | /* | ||
163 | * ISA space is 'always mapped' on a typical x86 system, no need to | ||
164 | * explicitly ioremap() it. The fact that the ISA IO space is mapped | ||
165 | * to PAGE_OFFSET is pure coincidence - it does not mean ISA values | ||
166 | * are physical addresses. The following constant pointer can be | ||
167 | * used as the IO-area pointer (it can be iounmapped as well, so the | ||
168 | * analogy with PCI is quite large): | ||
169 | */ | ||
170 | #define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET)) | ||
171 | |||
172 | #define flush_write_buffers() | ||
173 | |||
174 | /* | ||
175 | * Convert a virtual cached pointer to an uncached pointer | ||
176 | */ | ||
177 | #define xlate_dev_kmem_ptr(p) p | ||
178 | |||
179 | #endif /* __KERNEL__ */ | ||
180 | |||
181 | #endif /* _ASM_X86_IO_64_H */ | ||
diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h index 7c7c16cde1f8..35832a03a515 100644 --- a/arch/x86/include/asm/io_apic.h +++ b/arch/x86/include/asm/io_apic.h | |||
@@ -143,8 +143,6 @@ extern int noioapicreroute; | |||
143 | /* 1 if the timer IRQ uses the '8259A Virtual Wire' mode */ | 143 | /* 1 if the timer IRQ uses the '8259A Virtual Wire' mode */ |
144 | extern int timer_through_8259; | 144 | extern int timer_through_8259; |
145 | 145 | ||
146 | extern void io_apic_disable_legacy(void); | ||
147 | |||
148 | /* | 146 | /* |
149 | * If we use the IO-APIC for IRQ routing, disable automatic | 147 | * If we use the IO-APIC for IRQ routing, disable automatic |
150 | * assignment of PCI IRQ's. | 148 | * assignment of PCI IRQ's. |
@@ -160,6 +158,7 @@ extern int io_apic_get_redir_entries(int ioapic); | |||
160 | struct io_apic_irq_attr; | 158 | struct io_apic_irq_attr; |
161 | extern int io_apic_set_pci_routing(struct device *dev, int irq, | 159 | extern int io_apic_set_pci_routing(struct device *dev, int irq, |
162 | struct io_apic_irq_attr *irq_attr); | 160 | struct io_apic_irq_attr *irq_attr); |
161 | void setup_IO_APIC_irq_extra(u32 gsi); | ||
163 | extern int (*ioapic_renumber_irq)(int ioapic, int irq); | 162 | extern int (*ioapic_renumber_irq)(int ioapic, int irq); |
164 | extern void ioapic_init_mappings(void); | 163 | extern void ioapic_init_mappings(void); |
165 | extern void ioapic_insert_resources(void); | 164 | extern void ioapic_insert_resources(void); |
@@ -188,6 +187,7 @@ extern struct mp_ioapic_gsi mp_gsi_routing[]; | |||
188 | int mp_find_ioapic(int gsi); | 187 | int mp_find_ioapic(int gsi); |
189 | int mp_find_ioapic_pin(int ioapic, int gsi); | 188 | int mp_find_ioapic_pin(int ioapic, int gsi); |
190 | void __init mp_register_ioapic(int id, u32 address, u32 gsi_base); | 189 | void __init mp_register_ioapic(int id, u32 address, u32 gsi_base); |
190 | extern void __init pre_init_apic_IRQ0(void); | ||
191 | 191 | ||
192 | #else /* !CONFIG_X86_IO_APIC */ | 192 | #else /* !CONFIG_X86_IO_APIC */ |
193 | 193 | ||
@@ -197,7 +197,11 @@ static const int timer_through_8259 = 0; | |||
197 | static inline void ioapic_init_mappings(void) { } | 197 | static inline void ioapic_init_mappings(void) { } |
198 | static inline void ioapic_insert_resources(void) { } | 198 | static inline void ioapic_insert_resources(void) { } |
199 | static inline void probe_nr_irqs_gsi(void) { } | 199 | static inline void probe_nr_irqs_gsi(void) { } |
200 | static inline int mp_find_ioapic(int gsi) { return 0; } | ||
200 | 201 | ||
202 | struct io_apic_irq_attr; | ||
203 | static inline int io_apic_set_pci_routing(struct device *dev, int irq, | ||
204 | struct io_apic_irq_attr *irq_attr) { return 0; } | ||
201 | #endif | 205 | #endif |
202 | 206 | ||
203 | #endif /* _ASM_X86_IO_APIC_H */ | 207 | #endif /* _ASM_X86_IO_APIC_H */ |
diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h index fd6d21bbee6c..345c99cef152 100644 --- a/arch/x86/include/asm/iommu.h +++ b/arch/x86/include/asm/iommu.h | |||
@@ -1,8 +1,6 @@ | |||
1 | #ifndef _ASM_X86_IOMMU_H | 1 | #ifndef _ASM_X86_IOMMU_H |
2 | #define _ASM_X86_IOMMU_H | 2 | #define _ASM_X86_IOMMU_H |
3 | 3 | ||
4 | extern void pci_iommu_shutdown(void); | ||
5 | extern void no_iommu_init(void); | ||
6 | extern struct dma_map_ops nommu_dma_ops; | 4 | extern struct dma_map_ops nommu_dma_ops; |
7 | extern int force_iommu, no_iommu; | 5 | extern int force_iommu, no_iommu; |
8 | extern int iommu_detected; | 6 | extern int iommu_detected; |
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h index ddda6cbed6f4..5458380b6ef8 100644 --- a/arch/x86/include/asm/irq.h +++ b/arch/x86/include/asm/irq.h | |||
@@ -34,9 +34,10 @@ static inline int irq_canonicalize(int irq) | |||
34 | #ifdef CONFIG_HOTPLUG_CPU | 34 | #ifdef CONFIG_HOTPLUG_CPU |
35 | #include <linux/cpumask.h> | 35 | #include <linux/cpumask.h> |
36 | extern void fixup_irqs(void); | 36 | extern void fixup_irqs(void); |
37 | extern void irq_force_complete_move(int); | ||
37 | #endif | 38 | #endif |
38 | 39 | ||
39 | extern void (*generic_interrupt_extension)(void); | 40 | extern void (*x86_platform_ipi_callback)(void); |
40 | extern void native_init_IRQ(void); | 41 | extern void native_init_IRQ(void); |
41 | extern bool handle_irq(unsigned irq, struct pt_regs *regs); | 42 | extern bool handle_irq(unsigned irq, struct pt_regs *regs); |
42 | 43 | ||
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h index 28c3bf3f4c84..bb5318bbe0e4 100644 --- a/arch/x86/include/asm/irq_vectors.h +++ b/arch/x86/include/asm/irq_vectors.h | |||
@@ -28,28 +28,33 @@ | |||
28 | #define MCE_VECTOR 0x12 | 28 | #define MCE_VECTOR 0x12 |
29 | 29 | ||
30 | /* | 30 | /* |
31 | * IDT vectors usable for external interrupt sources start | 31 | * IDT vectors usable for external interrupt sources start at 0x20. |
32 | * at 0x20: | 32 | * (0x80 is the syscall vector, 0x30-0x3f are for ISA) |
33 | */ | 33 | */ |
34 | #define FIRST_EXTERNAL_VECTOR 0x20 | 34 | #define FIRST_EXTERNAL_VECTOR 0x20 |
35 | 35 | /* | |
36 | #ifdef CONFIG_X86_32 | 36 | * We start allocating at 0x21 to spread out vectors evenly between |
37 | # define SYSCALL_VECTOR 0x80 | 37 | * priority levels. (0x80 is the syscall vector) |
38 | # define IA32_SYSCALL_VECTOR 0x80 | 38 | */ |
39 | #else | 39 | #define VECTOR_OFFSET_START 1 |
40 | # define IA32_SYSCALL_VECTOR 0x80 | ||
41 | #endif | ||
42 | 40 | ||
43 | /* | 41 | /* |
44 | * Reserve the lowest usable priority level 0x20 - 0x2f for triggering | 42 | * Reserve the lowest usable vector (and hence lowest priority) 0x20 for |
45 | * cleanup after irq migration. | 43 | * triggering cleanup after irq migration. 0x21-0x2f will still be used |
44 | * for device interrupts. | ||
46 | */ | 45 | */ |
47 | #define IRQ_MOVE_CLEANUP_VECTOR FIRST_EXTERNAL_VECTOR | 46 | #define IRQ_MOVE_CLEANUP_VECTOR FIRST_EXTERNAL_VECTOR |
48 | 47 | ||
48 | #define IA32_SYSCALL_VECTOR 0x80 | ||
49 | #ifdef CONFIG_X86_32 | ||
50 | # define SYSCALL_VECTOR 0x80 | ||
51 | #endif | ||
52 | |||
49 | /* | 53 | /* |
50 | * Vectors 0x30-0x3f are used for ISA interrupts. | 54 | * Vectors 0x30-0x3f are used for ISA interrupts. |
55 | * round up to the next 16-vector boundary | ||
51 | */ | 56 | */ |
52 | #define IRQ0_VECTOR (FIRST_EXTERNAL_VECTOR + 0x10) | 57 | #define IRQ0_VECTOR ((FIRST_EXTERNAL_VECTOR + 16) & ~15) |
53 | 58 | ||
54 | #define IRQ1_VECTOR (IRQ0_VECTOR + 1) | 59 | #define IRQ1_VECTOR (IRQ0_VECTOR + 1) |
55 | #define IRQ2_VECTOR (IRQ0_VECTOR + 2) | 60 | #define IRQ2_VECTOR (IRQ0_VECTOR + 2) |
@@ -111,27 +116,20 @@ | |||
111 | /* | 116 | /* |
112 | * Generic system vector for platform specific use | 117 | * Generic system vector for platform specific use |
113 | */ | 118 | */ |
114 | #define GENERIC_INTERRUPT_VECTOR 0xed | 119 | #define X86_PLATFORM_IPI_VECTOR 0xed |
115 | 120 | ||
116 | /* | 121 | /* |
117 | * Performance monitoring pending work vector: | 122 | * Performance monitoring pending work vector: |
118 | */ | 123 | */ |
119 | #define LOCAL_PENDING_VECTOR 0xec | 124 | #define LOCAL_PENDING_VECTOR 0xec |
120 | 125 | ||
121 | #define UV_BAU_MESSAGE 0xec | 126 | #define UV_BAU_MESSAGE 0xea |
122 | 127 | ||
123 | /* | 128 | /* |
124 | * Self IPI vector for machine checks | 129 | * Self IPI vector for machine checks |
125 | */ | 130 | */ |
126 | #define MCE_SELF_VECTOR 0xeb | 131 | #define MCE_SELF_VECTOR 0xeb |
127 | 132 | ||
128 | /* | ||
129 | * First APIC vector available to drivers: (vectors 0x30-0xee) we | ||
130 | * start at 0x31(0x41) to spread out vectors evenly between priority | ||
131 | * levels. (0x80 is the syscall vector) | ||
132 | */ | ||
133 | #define FIRST_DEVICE_VECTOR (IRQ15_VECTOR + 2) | ||
134 | |||
135 | #define NR_VECTORS 256 | 133 | #define NR_VECTORS 256 |
136 | 134 | ||
137 | #define FPU_IRQ 13 | 135 | #define FPU_IRQ 13 |
@@ -159,21 +157,21 @@ static inline int invalid_vm86_irq(int irq) | |||
159 | 157 | ||
160 | #define NR_IRQS_LEGACY 16 | 158 | #define NR_IRQS_LEGACY 16 |
161 | 159 | ||
162 | #define CPU_VECTOR_LIMIT ( 8 * NR_CPUS ) | ||
163 | #define IO_APIC_VECTOR_LIMIT ( 32 * MAX_IO_APICS ) | 160 | #define IO_APIC_VECTOR_LIMIT ( 32 * MAX_IO_APICS ) |
164 | 161 | ||
165 | #ifdef CONFIG_X86_IO_APIC | 162 | #ifdef CONFIG_X86_IO_APIC |
166 | # ifdef CONFIG_SPARSE_IRQ | 163 | # ifdef CONFIG_SPARSE_IRQ |
164 | # define CPU_VECTOR_LIMIT (64 * NR_CPUS) | ||
167 | # define NR_IRQS \ | 165 | # define NR_IRQS \ |
168 | (CPU_VECTOR_LIMIT > IO_APIC_VECTOR_LIMIT ? \ | 166 | (CPU_VECTOR_LIMIT > IO_APIC_VECTOR_LIMIT ? \ |
169 | (NR_VECTORS + CPU_VECTOR_LIMIT) : \ | 167 | (NR_VECTORS + CPU_VECTOR_LIMIT) : \ |
170 | (NR_VECTORS + IO_APIC_VECTOR_LIMIT)) | 168 | (NR_VECTORS + IO_APIC_VECTOR_LIMIT)) |
171 | # else | 169 | # else |
172 | # if NR_CPUS < MAX_IO_APICS | 170 | # define CPU_VECTOR_LIMIT (32 * NR_CPUS) |
173 | # define NR_IRQS (NR_VECTORS + 4*CPU_VECTOR_LIMIT) | 171 | # define NR_IRQS \ |
174 | # else | 172 | (CPU_VECTOR_LIMIT < IO_APIC_VECTOR_LIMIT ? \ |
175 | # define NR_IRQS (NR_VECTORS + IO_APIC_VECTOR_LIMIT) | 173 | (NR_VECTORS + CPU_VECTOR_LIMIT) : \ |
176 | # endif | 174 | (NR_VECTORS + IO_APIC_VECTOR_LIMIT)) |
177 | # endif | 175 | # endif |
178 | #else /* !CONFIG_X86_IO_APIC: */ | 176 | #else /* !CONFIG_X86_IO_APIC: */ |
179 | # define NR_IRQS NR_IRQS_LEGACY | 177 | # define NR_IRQS NR_IRQS_LEGACY |
diff --git a/arch/x86/include/asm/k8.h b/arch/x86/include/asm/k8.h index c2d1f3b58e5f..af00bd1d2089 100644 --- a/arch/x86/include/asm/k8.h +++ b/arch/x86/include/asm/k8.h | |||
@@ -4,20 +4,28 @@ | |||
4 | #include <linux/pci.h> | 4 | #include <linux/pci.h> |
5 | 5 | ||
6 | extern struct pci_device_id k8_nb_ids[]; | 6 | extern struct pci_device_id k8_nb_ids[]; |
7 | struct bootnode; | ||
7 | 8 | ||
8 | extern int early_is_k8_nb(u32 value); | 9 | extern int early_is_k8_nb(u32 value); |
9 | extern struct pci_dev **k8_northbridges; | 10 | extern struct pci_dev **k8_northbridges; |
10 | extern int num_k8_northbridges; | 11 | extern int num_k8_northbridges; |
11 | extern int cache_k8_northbridges(void); | 12 | extern int cache_k8_northbridges(void); |
12 | extern void k8_flush_garts(void); | 13 | extern void k8_flush_garts(void); |
13 | extern int k8_scan_nodes(unsigned long start, unsigned long end); | 14 | extern int k8_get_nodes(struct bootnode *nodes); |
15 | extern int k8_numa_init(unsigned long start_pfn, unsigned long end_pfn); | ||
16 | extern int k8_scan_nodes(void); | ||
14 | 17 | ||
15 | #ifdef CONFIG_K8_NB | 18 | #ifdef CONFIG_K8_NB |
19 | extern int num_k8_northbridges; | ||
20 | |||
16 | static inline struct pci_dev *node_to_k8_nb_misc(int node) | 21 | static inline struct pci_dev *node_to_k8_nb_misc(int node) |
17 | { | 22 | { |
18 | return (node < num_k8_northbridges) ? k8_northbridges[node] : NULL; | 23 | return (node < num_k8_northbridges) ? k8_northbridges[node] : NULL; |
19 | } | 24 | } |
25 | |||
20 | #else | 26 | #else |
27 | #define num_k8_northbridges 0 | ||
28 | |||
21 | static inline struct pci_dev *node_to_k8_nb_misc(int node) | 29 | static inline struct pci_dev *node_to_k8_nb_misc(int node) |
22 | { | 30 | { |
23 | return NULL; | 31 | return NULL; |
diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h index 4fe681de1e76..4ffa345a8ccb 100644 --- a/arch/x86/include/asm/kprobes.h +++ b/arch/x86/include/asm/kprobes.h | |||
@@ -32,7 +32,10 @@ struct kprobe; | |||
32 | 32 | ||
33 | typedef u8 kprobe_opcode_t; | 33 | typedef u8 kprobe_opcode_t; |
34 | #define BREAKPOINT_INSTRUCTION 0xcc | 34 | #define BREAKPOINT_INSTRUCTION 0xcc |
35 | #define RELATIVEJUMP_INSTRUCTION 0xe9 | 35 | #define RELATIVEJUMP_OPCODE 0xe9 |
36 | #define RELATIVEJUMP_SIZE 5 | ||
37 | #define RELATIVECALL_OPCODE 0xe8 | ||
38 | #define RELATIVE_ADDR_SIZE 4 | ||
36 | #define MAX_INSN_SIZE 16 | 39 | #define MAX_INSN_SIZE 16 |
37 | #define MAX_STACK_SIZE 64 | 40 | #define MAX_STACK_SIZE 64 |
38 | #define MIN_STACK_SIZE(ADDR) \ | 41 | #define MIN_STACK_SIZE(ADDR) \ |
@@ -44,6 +47,17 @@ typedef u8 kprobe_opcode_t; | |||
44 | 47 | ||
45 | #define flush_insn_slot(p) do { } while (0) | 48 | #define flush_insn_slot(p) do { } while (0) |
46 | 49 | ||
50 | /* optinsn template addresses */ | ||
51 | extern kprobe_opcode_t optprobe_template_entry; | ||
52 | extern kprobe_opcode_t optprobe_template_val; | ||
53 | extern kprobe_opcode_t optprobe_template_call; | ||
54 | extern kprobe_opcode_t optprobe_template_end; | ||
55 | #define MAX_OPTIMIZED_LENGTH (MAX_INSN_SIZE + RELATIVE_ADDR_SIZE) | ||
56 | #define MAX_OPTINSN_SIZE \ | ||
57 | (((unsigned long)&optprobe_template_end - \ | ||
58 | (unsigned long)&optprobe_template_entry) + \ | ||
59 | MAX_OPTIMIZED_LENGTH + RELATIVEJUMP_SIZE) | ||
60 | |||
47 | extern const int kretprobe_blacklist_size; | 61 | extern const int kretprobe_blacklist_size; |
48 | 62 | ||
49 | void arch_remove_kprobe(struct kprobe *p); | 63 | void arch_remove_kprobe(struct kprobe *p); |
@@ -64,6 +78,21 @@ struct arch_specific_insn { | |||
64 | int boostable; | 78 | int boostable; |
65 | }; | 79 | }; |
66 | 80 | ||
81 | struct arch_optimized_insn { | ||
82 | /* copy of the original instructions */ | ||
83 | kprobe_opcode_t copied_insn[RELATIVE_ADDR_SIZE]; | ||
84 | /* detour code buffer */ | ||
85 | kprobe_opcode_t *insn; | ||
86 | /* the size of instructions copied to detour code buffer */ | ||
87 | size_t size; | ||
88 | }; | ||
89 | |||
90 | /* Return true (!0) if optinsn is prepared for optimization. */ | ||
91 | static inline int arch_prepared_optinsn(struct arch_optimized_insn *optinsn) | ||
92 | { | ||
93 | return optinsn->size; | ||
94 | } | ||
95 | |||
67 | struct prev_kprobe { | 96 | struct prev_kprobe { |
68 | struct kprobe *kp; | 97 | struct kprobe *kp; |
69 | unsigned long status; | 98 | unsigned long status; |
diff --git a/arch/x86/include/asm/kvm.h b/arch/x86/include/asm/kvm.h index 4a5fe914dc59..f46b79f6c16c 100644 --- a/arch/x86/include/asm/kvm.h +++ b/arch/x86/include/asm/kvm.h | |||
@@ -19,6 +19,8 @@ | |||
19 | #define __KVM_HAVE_MSIX | 19 | #define __KVM_HAVE_MSIX |
20 | #define __KVM_HAVE_MCE | 20 | #define __KVM_HAVE_MCE |
21 | #define __KVM_HAVE_PIT_STATE2 | 21 | #define __KVM_HAVE_PIT_STATE2 |
22 | #define __KVM_HAVE_XEN_HVM | ||
23 | #define __KVM_HAVE_VCPU_EVENTS | ||
22 | 24 | ||
23 | /* Architectural interrupt line count. */ | 25 | /* Architectural interrupt line count. */ |
24 | #define KVM_NR_INTERRUPTS 256 | 26 | #define KVM_NR_INTERRUPTS 256 |
@@ -79,6 +81,7 @@ struct kvm_ioapic_state { | |||
79 | #define KVM_IRQCHIP_PIC_MASTER 0 | 81 | #define KVM_IRQCHIP_PIC_MASTER 0 |
80 | #define KVM_IRQCHIP_PIC_SLAVE 1 | 82 | #define KVM_IRQCHIP_PIC_SLAVE 1 |
81 | #define KVM_IRQCHIP_IOAPIC 2 | 83 | #define KVM_IRQCHIP_IOAPIC 2 |
84 | #define KVM_NR_IRQCHIPS 3 | ||
82 | 85 | ||
83 | /* for KVM_GET_REGS and KVM_SET_REGS */ | 86 | /* for KVM_GET_REGS and KVM_SET_REGS */ |
84 | struct kvm_regs { | 87 | struct kvm_regs { |
@@ -250,4 +253,35 @@ struct kvm_reinject_control { | |||
250 | __u8 pit_reinject; | 253 | __u8 pit_reinject; |
251 | __u8 reserved[31]; | 254 | __u8 reserved[31]; |
252 | }; | 255 | }; |
256 | |||
257 | /* When set in flags, include corresponding fields on KVM_SET_VCPU_EVENTS */ | ||
258 | #define KVM_VCPUEVENT_VALID_NMI_PENDING 0x00000001 | ||
259 | #define KVM_VCPUEVENT_VALID_SIPI_VECTOR 0x00000002 | ||
260 | |||
261 | /* for KVM_GET/SET_VCPU_EVENTS */ | ||
262 | struct kvm_vcpu_events { | ||
263 | struct { | ||
264 | __u8 injected; | ||
265 | __u8 nr; | ||
266 | __u8 has_error_code; | ||
267 | __u8 pad; | ||
268 | __u32 error_code; | ||
269 | } exception; | ||
270 | struct { | ||
271 | __u8 injected; | ||
272 | __u8 nr; | ||
273 | __u8 soft; | ||
274 | __u8 pad; | ||
275 | } interrupt; | ||
276 | struct { | ||
277 | __u8 injected; | ||
278 | __u8 pending; | ||
279 | __u8 masked; | ||
280 | __u8 pad; | ||
281 | } nmi; | ||
282 | __u32 sipi_vector; | ||
283 | __u32 flags; | ||
284 | __u32 reserved[10]; | ||
285 | }; | ||
286 | |||
253 | #endif /* _ASM_X86_KVM_H */ | 287 | #endif /* _ASM_X86_KVM_H */ |
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h index b7ed2c423116..7a6f54fa13ba 100644 --- a/arch/x86/include/asm/kvm_emulate.h +++ b/arch/x86/include/asm/kvm_emulate.h | |||
@@ -54,13 +54,23 @@ struct x86_emulate_ctxt; | |||
54 | struct x86_emulate_ops { | 54 | struct x86_emulate_ops { |
55 | /* | 55 | /* |
56 | * read_std: Read bytes of standard (non-emulated/special) memory. | 56 | * read_std: Read bytes of standard (non-emulated/special) memory. |
57 | * Used for instruction fetch, stack operations, and others. | 57 | * Used for descriptor reading. |
58 | * @addr: [IN ] Linear address from which to read. | 58 | * @addr: [IN ] Linear address from which to read. |
59 | * @val: [OUT] Value read from memory, zero-extended to 'u_long'. | 59 | * @val: [OUT] Value read from memory, zero-extended to 'u_long'. |
60 | * @bytes: [IN ] Number of bytes to read from memory. | 60 | * @bytes: [IN ] Number of bytes to read from memory. |
61 | */ | 61 | */ |
62 | int (*read_std)(unsigned long addr, void *val, | 62 | int (*read_std)(unsigned long addr, void *val, |
63 | unsigned int bytes, struct kvm_vcpu *vcpu); | 63 | unsigned int bytes, struct kvm_vcpu *vcpu, u32 *error); |
64 | |||
65 | /* | ||
66 | * fetch: Read bytes of standard (non-emulated/special) memory. | ||
67 | * Used for instruction fetch. | ||
68 | * @addr: [IN ] Linear address from which to read. | ||
69 | * @val: [OUT] Value read from memory, zero-extended to 'u_long'. | ||
70 | * @bytes: [IN ] Number of bytes to read from memory. | ||
71 | */ | ||
72 | int (*fetch)(unsigned long addr, void *val, | ||
73 | unsigned int bytes, struct kvm_vcpu *vcpu, u32 *error); | ||
64 | 74 | ||
65 | /* | 75 | /* |
66 | * read_emulated: Read bytes from emulated/special memory area. | 76 | * read_emulated: Read bytes from emulated/special memory area. |
@@ -74,7 +84,7 @@ struct x86_emulate_ops { | |||
74 | struct kvm_vcpu *vcpu); | 84 | struct kvm_vcpu *vcpu); |
75 | 85 | ||
76 | /* | 86 | /* |
77 | * write_emulated: Read bytes from emulated/special memory area. | 87 | * write_emulated: Write bytes to emulated/special memory area. |
78 | * @addr: [IN ] Linear address to which to write. | 88 | * @addr: [IN ] Linear address to which to write. |
79 | * @val: [IN ] Value to write to memory (low-order bytes used as | 89 | * @val: [IN ] Value to write to memory (low-order bytes used as |
80 | * required). | 90 | * required). |
@@ -129,7 +139,7 @@ struct decode_cache { | |||
129 | u8 seg_override; | 139 | u8 seg_override; |
130 | unsigned int d; | 140 | unsigned int d; |
131 | unsigned long regs[NR_VCPU_REGS]; | 141 | unsigned long regs[NR_VCPU_REGS]; |
132 | unsigned long eip; | 142 | unsigned long eip, eip_orig; |
133 | /* modrm */ | 143 | /* modrm */ |
134 | u8 modrm; | 144 | u8 modrm; |
135 | u8 modrm_mod; | 145 | u8 modrm_mod; |
@@ -168,6 +178,7 @@ struct x86_emulate_ctxt { | |||
168 | 178 | ||
169 | /* Execution mode, passed to the emulator. */ | 179 | /* Execution mode, passed to the emulator. */ |
170 | #define X86EMUL_MODE_REAL 0 /* Real mode. */ | 180 | #define X86EMUL_MODE_REAL 0 /* Real mode. */ |
181 | #define X86EMUL_MODE_VM86 1 /* Virtual 8086 mode. */ | ||
171 | #define X86EMUL_MODE_PROT16 2 /* 16-bit protected mode. */ | 182 | #define X86EMUL_MODE_PROT16 2 /* 16-bit protected mode. */ |
172 | #define X86EMUL_MODE_PROT32 4 /* 32-bit protected mode. */ | 183 | #define X86EMUL_MODE_PROT32 4 /* 32-bit protected mode. */ |
173 | #define X86EMUL_MODE_PROT64 8 /* 64-bit (long) mode. */ | 184 | #define X86EMUL_MODE_PROT64 8 /* 64-bit (long) mode. */ |
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index d83892226f73..06d9e79ca37d 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
@@ -25,7 +25,7 @@ | |||
25 | #include <asm/mtrr.h> | 25 | #include <asm/mtrr.h> |
26 | #include <asm/msr-index.h> | 26 | #include <asm/msr-index.h> |
27 | 27 | ||
28 | #define KVM_MAX_VCPUS 16 | 28 | #define KVM_MAX_VCPUS 64 |
29 | #define KVM_MEMORY_SLOTS 32 | 29 | #define KVM_MEMORY_SLOTS 32 |
30 | /* memory slots that does not exposed to userspace */ | 30 | /* memory slots that does not exposed to userspace */ |
31 | #define KVM_PRIVATE_MEM_SLOTS 4 | 31 | #define KVM_PRIVATE_MEM_SLOTS 4 |
@@ -38,19 +38,6 @@ | |||
38 | #define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS | \ | 38 | #define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS | \ |
39 | 0xFFFFFF0000000000ULL) | 39 | 0xFFFFFF0000000000ULL) |
40 | 40 | ||
41 | #define KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST \ | ||
42 | (X86_CR0_WP | X86_CR0_NE | X86_CR0_NW | X86_CR0_CD) | ||
43 | #define KVM_GUEST_CR0_MASK \ | ||
44 | (KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE) | ||
45 | #define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST \ | ||
46 | (X86_CR0_WP | X86_CR0_NE | X86_CR0_TS | X86_CR0_MP) | ||
47 | #define KVM_VM_CR0_ALWAYS_ON \ | ||
48 | (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE) | ||
49 | #define KVM_GUEST_CR4_MASK \ | ||
50 | (X86_CR4_VME | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE | X86_CR4_VMXE) | ||
51 | #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE) | ||
52 | #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE) | ||
53 | |||
54 | #define INVALID_PAGE (~(hpa_t)0) | 41 | #define INVALID_PAGE (~(hpa_t)0) |
55 | #define UNMAPPED_GVA (~(gpa_t)0) | 42 | #define UNMAPPED_GVA (~(gpa_t)0) |
56 | 43 | ||
@@ -256,7 +243,8 @@ struct kvm_mmu { | |||
256 | void (*new_cr3)(struct kvm_vcpu *vcpu); | 243 | void (*new_cr3)(struct kvm_vcpu *vcpu); |
257 | int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err); | 244 | int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err); |
258 | void (*free)(struct kvm_vcpu *vcpu); | 245 | void (*free)(struct kvm_vcpu *vcpu); |
259 | gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva); | 246 | gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access, |
247 | u32 *error); | ||
260 | void (*prefetch_page)(struct kvm_vcpu *vcpu, | 248 | void (*prefetch_page)(struct kvm_vcpu *vcpu, |
261 | struct kvm_mmu_page *page); | 249 | struct kvm_mmu_page *page); |
262 | int (*sync_page)(struct kvm_vcpu *vcpu, | 250 | int (*sync_page)(struct kvm_vcpu *vcpu, |
@@ -282,13 +270,15 @@ struct kvm_vcpu_arch { | |||
282 | u32 regs_dirty; | 270 | u32 regs_dirty; |
283 | 271 | ||
284 | unsigned long cr0; | 272 | unsigned long cr0; |
273 | unsigned long cr0_guest_owned_bits; | ||
285 | unsigned long cr2; | 274 | unsigned long cr2; |
286 | unsigned long cr3; | 275 | unsigned long cr3; |
287 | unsigned long cr4; | 276 | unsigned long cr4; |
277 | unsigned long cr4_guest_owned_bits; | ||
288 | unsigned long cr8; | 278 | unsigned long cr8; |
289 | u32 hflags; | 279 | u32 hflags; |
290 | u64 pdptrs[4]; /* pae */ | 280 | u64 pdptrs[4]; /* pae */ |
291 | u64 shadow_efer; | 281 | u64 efer; |
292 | u64 apic_base; | 282 | u64 apic_base; |
293 | struct kvm_lapic *apic; /* kernel irqchip context */ | 283 | struct kvm_lapic *apic; /* kernel irqchip context */ |
294 | int32_t apic_arb_prio; | 284 | int32_t apic_arb_prio; |
@@ -354,7 +344,6 @@ struct kvm_vcpu_arch { | |||
354 | unsigned int time_offset; | 344 | unsigned int time_offset; |
355 | struct page *time_page; | 345 | struct page *time_page; |
356 | 346 | ||
357 | bool singlestep; /* guest is single stepped by KVM */ | ||
358 | bool nmi_pending; | 347 | bool nmi_pending; |
359 | bool nmi_injected; | 348 | bool nmi_injected; |
360 | 349 | ||
@@ -371,17 +360,31 @@ struct kvm_vcpu_arch { | |||
371 | u64 mcg_status; | 360 | u64 mcg_status; |
372 | u64 mcg_ctl; | 361 | u64 mcg_ctl; |
373 | u64 *mce_banks; | 362 | u64 *mce_banks; |
363 | |||
364 | /* used for guest single stepping over the given code position */ | ||
365 | u16 singlestep_cs; | ||
366 | unsigned long singlestep_rip; | ||
367 | /* fields used by HYPER-V emulation */ | ||
368 | u64 hv_vapic; | ||
374 | }; | 369 | }; |
375 | 370 | ||
376 | struct kvm_mem_alias { | 371 | struct kvm_mem_alias { |
377 | gfn_t base_gfn; | 372 | gfn_t base_gfn; |
378 | unsigned long npages; | 373 | unsigned long npages; |
379 | gfn_t target_gfn; | 374 | gfn_t target_gfn; |
375 | #define KVM_ALIAS_INVALID 1UL | ||
376 | unsigned long flags; | ||
380 | }; | 377 | }; |
381 | 378 | ||
382 | struct kvm_arch{ | 379 | #define KVM_ARCH_HAS_UNALIAS_INSTANTIATION |
383 | int naliases; | 380 | |
381 | struct kvm_mem_aliases { | ||
384 | struct kvm_mem_alias aliases[KVM_ALIAS_SLOTS]; | 382 | struct kvm_mem_alias aliases[KVM_ALIAS_SLOTS]; |
383 | int naliases; | ||
384 | }; | ||
385 | |||
386 | struct kvm_arch { | ||
387 | struct kvm_mem_aliases *aliases; | ||
385 | 388 | ||
386 | unsigned int n_free_mmu_pages; | 389 | unsigned int n_free_mmu_pages; |
387 | unsigned int n_requested_mmu_pages; | 390 | unsigned int n_requested_mmu_pages; |
@@ -397,7 +400,6 @@ struct kvm_arch{ | |||
397 | struct kvm_pic *vpic; | 400 | struct kvm_pic *vpic; |
398 | struct kvm_ioapic *vioapic; | 401 | struct kvm_ioapic *vioapic; |
399 | struct kvm_pit *vpit; | 402 | struct kvm_pit *vpit; |
400 | struct hlist_head irq_ack_notifier_list; | ||
401 | int vapics_in_nmi_mode; | 403 | int vapics_in_nmi_mode; |
402 | 404 | ||
403 | unsigned int tss_addr; | 405 | unsigned int tss_addr; |
@@ -410,8 +412,14 @@ struct kvm_arch{ | |||
410 | gpa_t ept_identity_map_addr; | 412 | gpa_t ept_identity_map_addr; |
411 | 413 | ||
412 | unsigned long irq_sources_bitmap; | 414 | unsigned long irq_sources_bitmap; |
413 | unsigned long irq_states[KVM_IOAPIC_NUM_PINS]; | ||
414 | u64 vm_init_tsc; | 415 | u64 vm_init_tsc; |
416 | s64 kvmclock_offset; | ||
417 | |||
418 | struct kvm_xen_hvm_config xen_hvm_config; | ||
419 | |||
420 | /* fields used by HYPER-V emulation */ | ||
421 | u64 hv_guest_os_id; | ||
422 | u64 hv_hypercall; | ||
415 | }; | 423 | }; |
416 | 424 | ||
417 | struct kvm_vm_stat { | 425 | struct kvm_vm_stat { |
@@ -461,12 +469,13 @@ struct descriptor_table { | |||
461 | struct kvm_x86_ops { | 469 | struct kvm_x86_ops { |
462 | int (*cpu_has_kvm_support)(void); /* __init */ | 470 | int (*cpu_has_kvm_support)(void); /* __init */ |
463 | int (*disabled_by_bios)(void); /* __init */ | 471 | int (*disabled_by_bios)(void); /* __init */ |
464 | void (*hardware_enable)(void *dummy); /* __init */ | 472 | int (*hardware_enable)(void *dummy); |
465 | void (*hardware_disable)(void *dummy); | 473 | void (*hardware_disable)(void *dummy); |
466 | void (*check_processor_compatibility)(void *rtn); | 474 | void (*check_processor_compatibility)(void *rtn); |
467 | int (*hardware_setup)(void); /* __init */ | 475 | int (*hardware_setup)(void); /* __init */ |
468 | void (*hardware_unsetup)(void); /* __exit */ | 476 | void (*hardware_unsetup)(void); /* __exit */ |
469 | bool (*cpu_has_accelerated_tpr)(void); | 477 | bool (*cpu_has_accelerated_tpr)(void); |
478 | void (*cpuid_update)(struct kvm_vcpu *vcpu); | ||
470 | 479 | ||
471 | /* Create, but do not attach this VCPU */ | 480 | /* Create, but do not attach this VCPU */ |
472 | struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id); | 481 | struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id); |
@@ -477,8 +486,8 @@ struct kvm_x86_ops { | |||
477 | void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu); | 486 | void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu); |
478 | void (*vcpu_put)(struct kvm_vcpu *vcpu); | 487 | void (*vcpu_put)(struct kvm_vcpu *vcpu); |
479 | 488 | ||
480 | int (*set_guest_debug)(struct kvm_vcpu *vcpu, | 489 | void (*set_guest_debug)(struct kvm_vcpu *vcpu, |
481 | struct kvm_guest_debug *dbg); | 490 | struct kvm_guest_debug *dbg); |
482 | int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata); | 491 | int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata); |
483 | int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data); | 492 | int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data); |
484 | u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg); | 493 | u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg); |
@@ -488,6 +497,7 @@ struct kvm_x86_ops { | |||
488 | void (*set_segment)(struct kvm_vcpu *vcpu, | 497 | void (*set_segment)(struct kvm_vcpu *vcpu, |
489 | struct kvm_segment *var, int seg); | 498 | struct kvm_segment *var, int seg); |
490 | void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l); | 499 | void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l); |
500 | void (*decache_cr0_guest_bits)(struct kvm_vcpu *vcpu); | ||
491 | void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu); | 501 | void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu); |
492 | void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0); | 502 | void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0); |
493 | void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3); | 503 | void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3); |
@@ -497,17 +507,18 @@ struct kvm_x86_ops { | |||
497 | void (*set_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); | 507 | void (*set_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); |
498 | void (*get_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); | 508 | void (*get_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); |
499 | void (*set_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); | 509 | void (*set_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); |
500 | unsigned long (*get_dr)(struct kvm_vcpu *vcpu, int dr); | 510 | int (*get_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long *dest); |
501 | void (*set_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long value, | 511 | int (*set_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long value); |
502 | int *exception); | ||
503 | void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg); | 512 | void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg); |
504 | unsigned long (*get_rflags)(struct kvm_vcpu *vcpu); | 513 | unsigned long (*get_rflags)(struct kvm_vcpu *vcpu); |
505 | void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags); | 514 | void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags); |
515 | void (*fpu_activate)(struct kvm_vcpu *vcpu); | ||
516 | void (*fpu_deactivate)(struct kvm_vcpu *vcpu); | ||
506 | 517 | ||
507 | void (*tlb_flush)(struct kvm_vcpu *vcpu); | 518 | void (*tlb_flush)(struct kvm_vcpu *vcpu); |
508 | 519 | ||
509 | void (*run)(struct kvm_vcpu *vcpu, struct kvm_run *run); | 520 | void (*run)(struct kvm_vcpu *vcpu); |
510 | int (*handle_exit)(struct kvm_run *run, struct kvm_vcpu *vcpu); | 521 | int (*handle_exit)(struct kvm_vcpu *vcpu); |
511 | void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu); | 522 | void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu); |
512 | void (*set_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask); | 523 | void (*set_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask); |
513 | u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask); | 524 | u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask); |
@@ -519,13 +530,16 @@ struct kvm_x86_ops { | |||
519 | bool has_error_code, u32 error_code); | 530 | bool has_error_code, u32 error_code); |
520 | int (*interrupt_allowed)(struct kvm_vcpu *vcpu); | 531 | int (*interrupt_allowed)(struct kvm_vcpu *vcpu); |
521 | int (*nmi_allowed)(struct kvm_vcpu *vcpu); | 532 | int (*nmi_allowed)(struct kvm_vcpu *vcpu); |
533 | bool (*get_nmi_mask)(struct kvm_vcpu *vcpu); | ||
534 | void (*set_nmi_mask)(struct kvm_vcpu *vcpu, bool masked); | ||
522 | void (*enable_nmi_window)(struct kvm_vcpu *vcpu); | 535 | void (*enable_nmi_window)(struct kvm_vcpu *vcpu); |
523 | void (*enable_irq_window)(struct kvm_vcpu *vcpu); | 536 | void (*enable_irq_window)(struct kvm_vcpu *vcpu); |
524 | void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr); | 537 | void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr); |
525 | int (*set_tss_addr)(struct kvm *kvm, unsigned int addr); | 538 | int (*set_tss_addr)(struct kvm *kvm, unsigned int addr); |
526 | int (*get_tdp_level)(void); | 539 | int (*get_tdp_level)(void); |
527 | u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio); | 540 | u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio); |
528 | bool (*gb_page_enable)(void); | 541 | int (*get_lpage_level)(void); |
542 | bool (*rdtscp_supported)(void); | ||
529 | 543 | ||
530 | const struct trace_print_flags *exit_reasons_str; | 544 | const struct trace_print_flags *exit_reasons_str; |
531 | }; | 545 | }; |
@@ -568,7 +582,7 @@ enum emulation_result { | |||
568 | #define EMULTYPE_NO_DECODE (1 << 0) | 582 | #define EMULTYPE_NO_DECODE (1 << 0) |
569 | #define EMULTYPE_TRAP_UD (1 << 1) | 583 | #define EMULTYPE_TRAP_UD (1 << 1) |
570 | #define EMULTYPE_SKIP (1 << 2) | 584 | #define EMULTYPE_SKIP (1 << 2) |
571 | int emulate_instruction(struct kvm_vcpu *vcpu, struct kvm_run *run, | 585 | int emulate_instruction(struct kvm_vcpu *vcpu, |
572 | unsigned long cr2, u16 error_code, int emulation_type); | 586 | unsigned long cr2, u16 error_code, int emulation_type); |
573 | void kvm_report_emulation_failure(struct kvm_vcpu *cvpu, const char *context); | 587 | void kvm_report_emulation_failure(struct kvm_vcpu *cvpu, const char *context); |
574 | void realmode_lgdt(struct kvm_vcpu *vcpu, u16 size, unsigned long address); | 588 | void realmode_lgdt(struct kvm_vcpu *vcpu, u16 size, unsigned long address); |
@@ -585,9 +599,9 @@ int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data); | |||
585 | 599 | ||
586 | struct x86_emulate_ctxt; | 600 | struct x86_emulate_ctxt; |
587 | 601 | ||
588 | int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, | 602 | int kvm_emulate_pio(struct kvm_vcpu *vcpu, int in, |
589 | int size, unsigned port); | 603 | int size, unsigned port); |
590 | int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, | 604 | int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, int in, |
591 | int size, unsigned long count, int down, | 605 | int size, unsigned long count, int down, |
592 | gva_t address, int rep, unsigned port); | 606 | gva_t address, int rep, unsigned port); |
593 | void kvm_emulate_cpuid(struct kvm_vcpu *vcpu); | 607 | void kvm_emulate_cpuid(struct kvm_vcpu *vcpu); |
@@ -600,8 +614,7 @@ int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, | |||
600 | unsigned long value); | 614 | unsigned long value); |
601 | 615 | ||
602 | void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); | 616 | void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); |
603 | int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, | 617 | int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg); |
604 | int type_bits, int seg); | ||
605 | 618 | ||
606 | int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason); | 619 | int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason); |
607 | 620 | ||
@@ -616,6 +629,9 @@ void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l); | |||
616 | int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata); | 629 | int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata); |
617 | int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data); | 630 | int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data); |
618 | 631 | ||
632 | unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu); | ||
633 | void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags); | ||
634 | |||
619 | void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr); | 635 | void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr); |
620 | void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); | 636 | void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); |
621 | void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long cr2, | 637 | void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long cr2, |
@@ -644,6 +660,10 @@ void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu); | |||
644 | int kvm_mmu_load(struct kvm_vcpu *vcpu); | 660 | int kvm_mmu_load(struct kvm_vcpu *vcpu); |
645 | void kvm_mmu_unload(struct kvm_vcpu *vcpu); | 661 | void kvm_mmu_unload(struct kvm_vcpu *vcpu); |
646 | void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu); | 662 | void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu); |
663 | gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error); | ||
664 | gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, u32 *error); | ||
665 | gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, u32 *error); | ||
666 | gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, u32 *error); | ||
647 | 667 | ||
648 | int kvm_emulate_hypercall(struct kvm_vcpu *vcpu); | 668 | int kvm_emulate_hypercall(struct kvm_vcpu *vcpu); |
649 | 669 | ||
@@ -657,6 +677,7 @@ void kvm_disable_tdp(void); | |||
657 | 677 | ||
658 | int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3); | 678 | int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3); |
659 | int complete_pio(struct kvm_vcpu *vcpu); | 679 | int complete_pio(struct kvm_vcpu *vcpu); |
680 | bool kvm_check_iopl(struct kvm_vcpu *vcpu); | ||
660 | 681 | ||
661 | struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn); | 682 | struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn); |
662 | 683 | ||
@@ -802,4 +823,7 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu); | |||
802 | int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu); | 823 | int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu); |
803 | int kvm_cpu_get_interrupt(struct kvm_vcpu *v); | 824 | int kvm_cpu_get_interrupt(struct kvm_vcpu *v); |
804 | 825 | ||
826 | void kvm_define_shared_msr(unsigned index, u32 msr); | ||
827 | void kvm_set_shared_msr(unsigned index, u64 val, u64 mask); | ||
828 | |||
805 | #endif /* _ASM_X86_KVM_HOST_H */ | 829 | #endif /* _ASM_X86_KVM_HOST_H */ |
diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h index c584076a47f4..ffae1420e7d7 100644 --- a/arch/x86/include/asm/kvm_para.h +++ b/arch/x86/include/asm/kvm_para.h | |||
@@ -2,6 +2,7 @@ | |||
2 | #define _ASM_X86_KVM_PARA_H | 2 | #define _ASM_X86_KVM_PARA_H |
3 | 3 | ||
4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
5 | #include <asm/hyperv.h> | ||
5 | 6 | ||
6 | /* This CPUID returns the signature 'KVMKVMKVM' in ebx, ecx, and edx. It | 7 | /* This CPUID returns the signature 'KVMKVMKVM' in ebx, ecx, and edx. It |
7 | * should be used to determine that a VM is running under KVM. | 8 | * should be used to determine that a VM is running under KVM. |
diff --git a/arch/x86/include/asm/lguest_hcall.h b/arch/x86/include/asm/lguest_hcall.h index ba0eed8aa1a6..b60f2924c413 100644 --- a/arch/x86/include/asm/lguest_hcall.h +++ b/arch/x86/include/asm/lguest_hcall.h | |||
@@ -28,22 +28,39 @@ | |||
28 | 28 | ||
29 | #ifndef __ASSEMBLY__ | 29 | #ifndef __ASSEMBLY__ |
30 | #include <asm/hw_irq.h> | 30 | #include <asm/hw_irq.h> |
31 | #include <asm/kvm_para.h> | ||
32 | 31 | ||
33 | /*G:030 | 32 | /*G:030 |
34 | * But first, how does our Guest contact the Host to ask for privileged | 33 | * But first, how does our Guest contact the Host to ask for privileged |
35 | * operations? There are two ways: the direct way is to make a "hypercall", | 34 | * operations? There are two ways: the direct way is to make a "hypercall", |
36 | * to make requests of the Host Itself. | 35 | * to make requests of the Host Itself. |
37 | * | 36 | * |
38 | * We use the KVM hypercall mechanism, though completely different hypercall | 37 | * Our hypercall mechanism uses the highest unused trap code (traps 32 and |
39 | * numbers. Seventeen hypercalls are available: the hypercall number is put in | 38 | * above are used by real hardware interrupts). Seventeen hypercalls are |
40 | * the %eax register, and the arguments (when required) are placed in %ebx, | 39 | * available: the hypercall number is put in the %eax register, and the |
41 | * %ecx, %edx and %esi. If a return value makes sense, it's returned in %eax. | 40 | * arguments (when required) are placed in %ebx, %ecx, %edx and %esi. |
41 | * If a return value makes sense, it's returned in %eax. | ||
42 | * | 42 | * |
43 | * Grossly invalid calls result in Sudden Death at the hands of the vengeful | 43 | * Grossly invalid calls result in Sudden Death at the hands of the vengeful |
44 | * Host, rather than returning failure. This reflects Winston Churchill's | 44 | * Host, rather than returning failure. This reflects Winston Churchill's |
45 | * definition of a gentleman: "someone who is only rude intentionally". | 45 | * definition of a gentleman: "someone who is only rude intentionally". |
46 | :*/ | 46 | */ |
47 | static inline unsigned long | ||
48 | hcall(unsigned long call, | ||
49 | unsigned long arg1, unsigned long arg2, unsigned long arg3, | ||
50 | unsigned long arg4) | ||
51 | { | ||
52 | /* "int" is the Intel instruction to trigger a trap. */ | ||
53 | asm volatile("int $" __stringify(LGUEST_TRAP_ENTRY) | ||
54 | /* The call in %eax (aka "a") might be overwritten */ | ||
55 | : "=a"(call) | ||
56 | /* The arguments are in %eax, %ebx, %ecx, %edx & %esi */ | ||
57 | : "a"(call), "b"(arg1), "c"(arg2), "d"(arg3), "S"(arg4) | ||
58 | /* "memory" means this might write somewhere in memory. | ||
59 | * This isn't true for all calls, but it's safe to tell | ||
60 | * gcc that it might happen so it doesn't get clever. */ | ||
61 | : "memory"); | ||
62 | return call; | ||
63 | } | ||
47 | 64 | ||
48 | /* Can't use our min() macro here: needs to be a constant */ | 65 | /* Can't use our min() macro here: needs to be a constant */ |
49 | #define LGUEST_IRQS (NR_IRQS < 32 ? NR_IRQS: 32) | 66 | #define LGUEST_IRQS (NR_IRQS < 32 ? NR_IRQS: 32) |
diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h index 47b9b6f19057..2e9972468a5d 100644 --- a/arch/x86/include/asm/local.h +++ b/arch/x86/include/asm/local.h | |||
@@ -195,41 +195,4 @@ static inline long local_sub_return(long i, local_t *l) | |||
195 | #define __local_add(i, l) local_add((i), (l)) | 195 | #define __local_add(i, l) local_add((i), (l)) |
196 | #define __local_sub(i, l) local_sub((i), (l)) | 196 | #define __local_sub(i, l) local_sub((i), (l)) |
197 | 197 | ||
198 | /* Use these for per-cpu local_t variables: on some archs they are | ||
199 | * much more efficient than these naive implementations. Note they take | ||
200 | * a variable, not an address. | ||
201 | * | ||
202 | * X86_64: This could be done better if we moved the per cpu data directly | ||
203 | * after GS. | ||
204 | */ | ||
205 | |||
206 | /* Need to disable preemption for the cpu local counters otherwise we could | ||
207 | still access a variable of a previous CPU in a non atomic way. */ | ||
208 | #define cpu_local_wrap_v(l) \ | ||
209 | ({ \ | ||
210 | local_t res__; \ | ||
211 | preempt_disable(); \ | ||
212 | res__ = (l); \ | ||
213 | preempt_enable(); \ | ||
214 | res__; \ | ||
215 | }) | ||
216 | #define cpu_local_wrap(l) \ | ||
217 | ({ \ | ||
218 | preempt_disable(); \ | ||
219 | (l); \ | ||
220 | preempt_enable(); \ | ||
221 | }) \ | ||
222 | |||
223 | #define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var((l)))) | ||
224 | #define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var((l)), (i))) | ||
225 | #define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var((l)))) | ||
226 | #define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var((l)))) | ||
227 | #define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var((l)))) | ||
228 | #define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var((l)))) | ||
229 | |||
230 | #define __cpu_local_inc(l) cpu_local_inc((l)) | ||
231 | #define __cpu_local_dec(l) cpu_local_dec((l)) | ||
232 | #define __cpu_local_add(i, l) cpu_local_add((i), (l)) | ||
233 | #define __cpu_local_sub(i, l) cpu_local_sub((i), (l)) | ||
234 | |||
235 | #endif /* _ASM_X86_LOCAL_H */ | 198 | #endif /* _ASM_X86_LOCAL_H */ |
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index f1363b72364f..6c3fdd631ed3 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h | |||
@@ -108,8 +108,11 @@ struct mce_log { | |||
108 | #define K8_MCE_THRESHOLD_BANK_5 (MCE_THRESHOLD_BASE + 5 * 9) | 108 | #define K8_MCE_THRESHOLD_BANK_5 (MCE_THRESHOLD_BASE + 5 * 9) |
109 | #define K8_MCE_THRESHOLD_DRAM_ECC (MCE_THRESHOLD_BANK_4 + 0) | 109 | #define K8_MCE_THRESHOLD_DRAM_ECC (MCE_THRESHOLD_BANK_4 + 0) |
110 | 110 | ||
111 | |||
111 | #ifdef __KERNEL__ | 112 | #ifdef __KERNEL__ |
112 | 113 | ||
114 | extern struct atomic_notifier_head x86_mce_decoder_chain; | ||
115 | |||
113 | #include <linux/percpu.h> | 116 | #include <linux/percpu.h> |
114 | #include <linux/init.h> | 117 | #include <linux/init.h> |
115 | #include <asm/atomic.h> | 118 | #include <asm/atomic.h> |
@@ -118,9 +121,11 @@ extern int mce_disabled; | |||
118 | extern int mce_p5_enabled; | 121 | extern int mce_p5_enabled; |
119 | 122 | ||
120 | #ifdef CONFIG_X86_MCE | 123 | #ifdef CONFIG_X86_MCE |
121 | void mcheck_init(struct cpuinfo_x86 *c); | 124 | int mcheck_init(void); |
125 | void mcheck_cpu_init(struct cpuinfo_x86 *c); | ||
122 | #else | 126 | #else |
123 | static inline void mcheck_init(struct cpuinfo_x86 *c) {} | 127 | static inline int mcheck_init(void) { return 0; } |
128 | static inline void mcheck_cpu_init(struct cpuinfo_x86 *c) {} | ||
124 | #endif | 129 | #endif |
125 | 130 | ||
126 | #ifdef CONFIG_X86_ANCIENT_MCE | 131 | #ifdef CONFIG_X86_ANCIENT_MCE |
@@ -214,5 +219,11 @@ void intel_init_thermal(struct cpuinfo_x86 *c); | |||
214 | 219 | ||
215 | void mce_log_therm_throt_event(__u64 status); | 220 | void mce_log_therm_throt_event(__u64 status); |
216 | 221 | ||
222 | #ifdef CONFIG_X86_THERMAL_VECTOR | ||
223 | extern void mcheck_intel_therm_init(void); | ||
224 | #else | ||
225 | static inline void mcheck_intel_therm_init(void) { } | ||
226 | #endif | ||
227 | |||
217 | #endif /* __KERNEL__ */ | 228 | #endif /* __KERNEL__ */ |
218 | #endif /* _ASM_X86_MCE_H */ | 229 | #endif /* _ASM_X86_MCE_H */ |
diff --git a/arch/x86/include/asm/mmzone_32.h b/arch/x86/include/asm/mmzone_32.h index ede6998bd92c..91df7c51806c 100644 --- a/arch/x86/include/asm/mmzone_32.h +++ b/arch/x86/include/asm/mmzone_32.h | |||
@@ -47,7 +47,7 @@ static inline void resume_map_numa_kva(pgd_t *pgd) {} | |||
47 | /* | 47 | /* |
48 | * generic node memory support, the following assumptions apply: | 48 | * generic node memory support, the following assumptions apply: |
49 | * | 49 | * |
50 | * 1) memory comes in 64Mb contigious chunks which are either present or not | 50 | * 1) memory comes in 64Mb contiguous chunks which are either present or not |
51 | * 2) we will not have more than 64Gb in total | 51 | * 2) we will not have more than 64Gb in total |
52 | * | 52 | * |
53 | * for now assume that 64Gb is max amount of RAM for whole system | 53 | * for now assume that 64Gb is max amount of RAM for whole system |
diff --git a/arch/x86/include/asm/mmzone_64.h b/arch/x86/include/asm/mmzone_64.h index a29f48c2a322..288b96f815a6 100644 --- a/arch/x86/include/asm/mmzone_64.h +++ b/arch/x86/include/asm/mmzone_64.h | |||
@@ -39,11 +39,5 @@ static inline __attribute__((pure)) int phys_to_nid(unsigned long addr) | |||
39 | #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) | 39 | #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) |
40 | #define node_end_pfn(nid) (NODE_DATA(nid)->node_start_pfn + \ | 40 | #define node_end_pfn(nid) (NODE_DATA(nid)->node_start_pfn + \ |
41 | NODE_DATA(nid)->node_spanned_pages) | 41 | NODE_DATA(nid)->node_spanned_pages) |
42 | |||
43 | #ifdef CONFIG_NUMA_EMU | ||
44 | #define FAKE_NODE_MIN_SIZE (64 * 1024 * 1024) | ||
45 | #define FAKE_NODE_MIN_HASH_MASK (~(FAKE_NODE_MIN_SIZE - 1UL)) | ||
46 | #endif | ||
47 | |||
48 | #endif | 42 | #endif |
49 | #endif /* _ASM_X86_MMZONE_64_H */ | 43 | #endif /* _ASM_X86_MMZONE_64_H */ |
diff --git a/arch/x86/include/asm/mpspec.h b/arch/x86/include/asm/mpspec.h index 79c94500c0bb..d8bf23a88d05 100644 --- a/arch/x86/include/asm/mpspec.h +++ b/arch/x86/include/asm/mpspec.h | |||
@@ -71,12 +71,7 @@ static inline void early_get_smp_config(void) | |||
71 | 71 | ||
72 | static inline void find_smp_config(void) | 72 | static inline void find_smp_config(void) |
73 | { | 73 | { |
74 | x86_init.mpparse.find_smp_config(1); | 74 | x86_init.mpparse.find_smp_config(); |
75 | } | ||
76 | |||
77 | static inline void early_find_smp_config(void) | ||
78 | { | ||
79 | x86_init.mpparse.find_smp_config(0); | ||
80 | } | 75 | } |
81 | 76 | ||
82 | #ifdef CONFIG_X86_MPPARSE | 77 | #ifdef CONFIG_X86_MPPARSE |
@@ -89,7 +84,7 @@ extern void default_mpc_oem_bus_info(struct mpc_bus *m, char *str); | |||
89 | # else | 84 | # else |
90 | # define default_mpc_oem_bus_info NULL | 85 | # define default_mpc_oem_bus_info NULL |
91 | # endif | 86 | # endif |
92 | extern void default_find_smp_config(unsigned int reserve); | 87 | extern void default_find_smp_config(void); |
93 | extern void default_get_smp_config(unsigned int early); | 88 | extern void default_get_smp_config(unsigned int early); |
94 | #else | 89 | #else |
95 | static inline void early_reserve_e820_mpc_new(void) { } | 90 | static inline void early_reserve_e820_mpc_new(void) { } |
@@ -97,7 +92,7 @@ static inline void early_reserve_e820_mpc_new(void) { } | |||
97 | #define default_mpc_apic_id NULL | 92 | #define default_mpc_apic_id NULL |
98 | #define default_smp_read_mpc_oem NULL | 93 | #define default_smp_read_mpc_oem NULL |
99 | #define default_mpc_oem_bus_info NULL | 94 | #define default_mpc_oem_bus_info NULL |
100 | #define default_find_smp_config x86_init_uint_noop | 95 | #define default_find_smp_config x86_init_noop |
101 | #define default_get_smp_config x86_init_uint_noop | 96 | #define default_get_smp_config x86_init_uint_noop |
102 | #endif | 97 | #endif |
103 | 98 | ||
@@ -163,14 +158,16 @@ typedef struct physid_mask physid_mask_t; | |||
163 | #define physids_shift_left(d, s, n) \ | 158 | #define physids_shift_left(d, s, n) \ |
164 | bitmap_shift_left((d).mask, (s).mask, n, MAX_APICS) | 159 | bitmap_shift_left((d).mask, (s).mask, n, MAX_APICS) |
165 | 160 | ||
166 | #define physids_coerce(map) ((map).mask[0]) | 161 | static inline unsigned long physids_coerce(physid_mask_t *map) |
162 | { | ||
163 | return map->mask[0]; | ||
164 | } | ||
167 | 165 | ||
168 | #define physids_promote(physids) \ | 166 | static inline void physids_promote(unsigned long physids, physid_mask_t *map) |
169 | ({ \ | 167 | { |
170 | physid_mask_t __physid_mask = PHYSID_MASK_NONE; \ | 168 | physids_clear(*map); |
171 | __physid_mask.mask[0] = physids; \ | 169 | map->mask[0] = physids; |
172 | __physid_mask; \ | 170 | } |
173 | }) | ||
174 | 171 | ||
175 | /* Note: will create very large stack frames if physid_mask_t is big */ | 172 | /* Note: will create very large stack frames if physid_mask_t is big */ |
176 | #define physid_mask_of_physid(physid) \ | 173 | #define physid_mask_of_physid(physid) \ |
diff --git a/arch/x86/include/asm/mrst.h b/arch/x86/include/asm/mrst.h new file mode 100644 index 000000000000..451d30e7f62d --- /dev/null +++ b/arch/x86/include/asm/mrst.h | |||
@@ -0,0 +1,19 @@ | |||
1 | /* | ||
2 | * mrst.h: Intel Moorestown platform specific setup code | ||
3 | * | ||
4 | * (C) Copyright 2009 Intel Corporation | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; version 2 | ||
9 | * of the License. | ||
10 | */ | ||
11 | #ifndef _ASM_X86_MRST_H | ||
12 | #define _ASM_X86_MRST_H | ||
13 | extern int pci_mrst_init(void); | ||
14 | int __init sfi_parse_mrtc(struct sfi_table_header *table); | ||
15 | |||
16 | #define SFI_MTMR_MAX_NUM 8 | ||
17 | #define SFI_MRTC_MAX 8 | ||
18 | |||
19 | #endif /* _ASM_X86_MRST_H */ | ||
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 4ffe09b2ad75..4604e6a54d36 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h | |||
@@ -12,6 +12,7 @@ | |||
12 | #define MSR_FS_BASE 0xc0000100 /* 64bit FS base */ | 12 | #define MSR_FS_BASE 0xc0000100 /* 64bit FS base */ |
13 | #define MSR_GS_BASE 0xc0000101 /* 64bit GS base */ | 13 | #define MSR_GS_BASE 0xc0000101 /* 64bit GS base */ |
14 | #define MSR_KERNEL_GS_BASE 0xc0000102 /* SwapGS GS shadow */ | 14 | #define MSR_KERNEL_GS_BASE 0xc0000102 /* SwapGS GS shadow */ |
15 | #define MSR_TSC_AUX 0xc0000103 /* Auxiliary TSC */ | ||
15 | 16 | ||
16 | /* EFER bits: */ | 17 | /* EFER bits: */ |
17 | #define _EFER_SCE 0 /* SYSCALL/SYSRET */ | 18 | #define _EFER_SCE 0 /* SYSCALL/SYSRET */ |
@@ -104,6 +105,8 @@ | |||
104 | #define MSR_AMD64_PATCH_LEVEL 0x0000008b | 105 | #define MSR_AMD64_PATCH_LEVEL 0x0000008b |
105 | #define MSR_AMD64_NB_CFG 0xc001001f | 106 | #define MSR_AMD64_NB_CFG 0xc001001f |
106 | #define MSR_AMD64_PATCH_LOADER 0xc0010020 | 107 | #define MSR_AMD64_PATCH_LOADER 0xc0010020 |
108 | #define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140 | ||
109 | #define MSR_AMD64_OSVW_STATUS 0xc0010141 | ||
107 | #define MSR_AMD64_IBSFETCHCTL 0xc0011030 | 110 | #define MSR_AMD64_IBSFETCHCTL 0xc0011030 |
108 | #define MSR_AMD64_IBSFETCHLINAD 0xc0011031 | 111 | #define MSR_AMD64_IBSFETCHLINAD 0xc0011031 |
109 | #define MSR_AMD64_IBSFETCHPHYSAD 0xc0011032 | 112 | #define MSR_AMD64_IBSFETCHPHYSAD 0xc0011032 |
@@ -123,6 +126,7 @@ | |||
123 | #define FAM10H_MMIO_CONF_BUSRANGE_SHIFT 2 | 126 | #define FAM10H_MMIO_CONF_BUSRANGE_SHIFT 2 |
124 | #define FAM10H_MMIO_CONF_BASE_MASK 0xfffffff | 127 | #define FAM10H_MMIO_CONF_BASE_MASK 0xfffffff |
125 | #define FAM10H_MMIO_CONF_BASE_SHIFT 20 | 128 | #define FAM10H_MMIO_CONF_BASE_SHIFT 20 |
129 | #define MSR_FAM10H_NODE_ID 0xc001100c | ||
126 | 130 | ||
127 | /* K8 MSRs */ | 131 | /* K8 MSRs */ |
128 | #define MSR_K8_TOP_MEM1 0xc001001a | 132 | #define MSR_K8_TOP_MEM1 0xc001001a |
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h index 7e2b6ba962ff..c5bc4c2d33f5 100644 --- a/arch/x86/include/asm/msr.h +++ b/arch/x86/include/asm/msr.h | |||
@@ -27,6 +27,18 @@ struct msr { | |||
27 | }; | 27 | }; |
28 | }; | 28 | }; |
29 | 29 | ||
30 | struct msr_info { | ||
31 | u32 msr_no; | ||
32 | struct msr reg; | ||
33 | struct msr *msrs; | ||
34 | int err; | ||
35 | }; | ||
36 | |||
37 | struct msr_regs_info { | ||
38 | u32 *regs; | ||
39 | int err; | ||
40 | }; | ||
41 | |||
30 | static inline unsigned long long native_read_tscp(unsigned int *aux) | 42 | static inline unsigned long long native_read_tscp(unsigned int *aux) |
31 | { | 43 | { |
32 | unsigned long low, high; | 44 | unsigned long low, high; |
@@ -240,15 +252,18 @@ do { \ | |||
240 | #define checking_wrmsrl(msr, val) wrmsr_safe((msr), (u32)(val), \ | 252 | #define checking_wrmsrl(msr, val) wrmsr_safe((msr), (u32)(val), \ |
241 | (u32)((val) >> 32)) | 253 | (u32)((val) >> 32)) |
242 | 254 | ||
243 | #define write_tsc(val1, val2) wrmsr(0x10, (val1), (val2)) | 255 | #define write_tsc(val1, val2) wrmsr(MSR_IA32_TSC, (val1), (val2)) |
256 | |||
257 | #define write_rdtscp_aux(val) wrmsr(MSR_TSC_AUX, (val), 0) | ||
244 | 258 | ||
245 | #define write_rdtscp_aux(val) wrmsr(0xc0000103, (val), 0) | 259 | struct msr *msrs_alloc(void); |
260 | void msrs_free(struct msr *msrs); | ||
246 | 261 | ||
247 | #ifdef CONFIG_SMP | 262 | #ifdef CONFIG_SMP |
248 | int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); | 263 | int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); |
249 | int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); | 264 | int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); |
250 | void rdmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs); | 265 | void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs); |
251 | void wrmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs); | 266 | void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs); |
252 | int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); | 267 | int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); |
253 | int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); | 268 | int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); |
254 | int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]); | 269 | int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]); |
@@ -264,12 +279,12 @@ static inline int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) | |||
264 | wrmsr(msr_no, l, h); | 279 | wrmsr(msr_no, l, h); |
265 | return 0; | 280 | return 0; |
266 | } | 281 | } |
267 | static inline void rdmsr_on_cpus(const cpumask_t *m, u32 msr_no, | 282 | static inline void rdmsr_on_cpus(const struct cpumask *m, u32 msr_no, |
268 | struct msr *msrs) | 283 | struct msr *msrs) |
269 | { | 284 | { |
270 | rdmsr_on_cpu(0, msr_no, &(msrs[0].l), &(msrs[0].h)); | 285 | rdmsr_on_cpu(0, msr_no, &(msrs[0].l), &(msrs[0].h)); |
271 | } | 286 | } |
272 | static inline void wrmsr_on_cpus(const cpumask_t *m, u32 msr_no, | 287 | static inline void wrmsr_on_cpus(const struct cpumask *m, u32 msr_no, |
273 | struct msr *msrs) | 288 | struct msr *msrs) |
274 | { | 289 | { |
275 | wrmsr_on_cpu(0, msr_no, msrs[0].l, msrs[0].h); | 290 | wrmsr_on_cpu(0, msr_no, msrs[0].l, msrs[0].h); |
diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h index 139d4c1a33a7..93da9c3f3341 100644 --- a/arch/x86/include/asm/nmi.h +++ b/arch/x86/include/asm/nmi.h | |||
@@ -19,7 +19,6 @@ extern void die_nmi(char *str, struct pt_regs *regs, int do_panic); | |||
19 | extern int check_nmi_watchdog(void); | 19 | extern int check_nmi_watchdog(void); |
20 | extern int nmi_watchdog_enabled; | 20 | extern int nmi_watchdog_enabled; |
21 | extern int avail_to_resrv_perfctr_nmi_bit(unsigned int); | 21 | extern int avail_to_resrv_perfctr_nmi_bit(unsigned int); |
22 | extern int avail_to_resrv_perfctr_nmi(unsigned int); | ||
23 | extern int reserve_perfctr_nmi(unsigned int); | 22 | extern int reserve_perfctr_nmi(unsigned int); |
24 | extern void release_perfctr_nmi(unsigned int); | 23 | extern void release_perfctr_nmi(unsigned int); |
25 | extern int reserve_evntsel_nmi(unsigned int); | 24 | extern int reserve_evntsel_nmi(unsigned int); |
diff --git a/arch/x86/include/asm/numa_64.h b/arch/x86/include/asm/numa_64.h index c4ae822e415f..823e070e7c26 100644 --- a/arch/x86/include/asm/numa_64.h +++ b/arch/x86/include/asm/numa_64.h | |||
@@ -36,6 +36,11 @@ extern void __cpuinit numa_set_node(int cpu, int node); | |||
36 | extern void __cpuinit numa_clear_node(int cpu); | 36 | extern void __cpuinit numa_clear_node(int cpu); |
37 | extern void __cpuinit numa_add_cpu(int cpu); | 37 | extern void __cpuinit numa_add_cpu(int cpu); |
38 | extern void __cpuinit numa_remove_cpu(int cpu); | 38 | extern void __cpuinit numa_remove_cpu(int cpu); |
39 | |||
40 | #ifdef CONFIG_NUMA_EMU | ||
41 | #define FAKE_NODE_MIN_SIZE ((u64)64 << 20) | ||
42 | #define FAKE_NODE_MIN_HASH_MASK (~(FAKE_NODE_MIN_SIZE - 1UL)) | ||
43 | #endif /* CONFIG_NUMA_EMU */ | ||
39 | #else | 44 | #else |
40 | static inline void init_cpu_to_node(void) { } | 45 | static inline void init_cpu_to_node(void) { } |
41 | static inline void numa_set_node(int cpu, int node) { } | 46 | static inline void numa_set_node(int cpu, int node) { } |
diff --git a/arch/x86/include/asm/numaq.h b/arch/x86/include/asm/numaq.h index 9f0a5f5d29ec..37c516545ec8 100644 --- a/arch/x86/include/asm/numaq.h +++ b/arch/x86/include/asm/numaq.h | |||
@@ -30,9 +30,14 @@ | |||
30 | 30 | ||
31 | extern int found_numaq; | 31 | extern int found_numaq; |
32 | extern int get_memcfg_numaq(void); | 32 | extern int get_memcfg_numaq(void); |
33 | extern int pci_numaq_init(void); | ||
33 | 34 | ||
34 | extern void *xquad_portio; | 35 | extern void *xquad_portio; |
35 | 36 | ||
37 | #define XQUAD_PORTIO_BASE 0xfe400000 | ||
38 | #define XQUAD_PORTIO_QUAD 0x40000 /* 256k per quad. */ | ||
39 | #define XQUAD_PORT_ADDR(port, quad) (xquad_portio + (XQUAD_PORTIO_QUAD*quad) + port) | ||
40 | |||
36 | /* | 41 | /* |
37 | * SYS_CFG_DATA_PRIV_ADDR, struct eachquadmem, and struct sys_cfg_data are the | 42 | * SYS_CFG_DATA_PRIV_ADDR, struct eachquadmem, and struct sys_cfg_data are the |
38 | */ | 43 | */ |
diff --git a/arch/x86/include/asm/olpc.h b/arch/x86/include/asm/olpc.h index 834a30295fab..101229b0d8ed 100644 --- a/arch/x86/include/asm/olpc.h +++ b/arch/x86/include/asm/olpc.h | |||
@@ -13,7 +13,6 @@ struct olpc_platform_t { | |||
13 | 13 | ||
14 | #define OLPC_F_PRESENT 0x01 | 14 | #define OLPC_F_PRESENT 0x01 |
15 | #define OLPC_F_DCON 0x02 | 15 | #define OLPC_F_DCON 0x02 |
16 | #define OLPC_F_VSA 0x04 | ||
17 | 16 | ||
18 | #ifdef CONFIG_OLPC | 17 | #ifdef CONFIG_OLPC |
19 | 18 | ||
@@ -51,18 +50,6 @@ static inline int olpc_has_dcon(void) | |||
51 | } | 50 | } |
52 | 51 | ||
53 | /* | 52 | /* |
54 | * The VSA is software from AMD that typical Geode bioses will include. | ||
55 | * It is used to emulate the PCI bus, VGA, etc. OLPC's Open Firmware does | ||
56 | * not include the VSA; instead, PCI is emulated by the kernel. | ||
57 | * | ||
58 | * The VSA is described further in arch/x86/pci/olpc.c. | ||
59 | */ | ||
60 | static inline int olpc_has_vsa(void) | ||
61 | { | ||
62 | return (olpc_platform_info.flags & OLPC_F_VSA) ? 1 : 0; | ||
63 | } | ||
64 | |||
65 | /* | ||
66 | * The "Mass Production" version of OLPC's XO is identified as being model | 53 | * The "Mass Production" version of OLPC's XO is identified as being model |
67 | * C2. During the prototype phase, the following models (in chronological | 54 | * C2. During the prototype phase, the following models (in chronological |
68 | * order) were created: A1, B1, B2, B3, B4, C1. The A1 through B2 models | 55 | * order) were created: A1, B1, B2, B3, B4, C1. The A1 through B2 models |
@@ -87,13 +74,10 @@ static inline int olpc_has_dcon(void) | |||
87 | return 0; | 74 | return 0; |
88 | } | 75 | } |
89 | 76 | ||
90 | static inline int olpc_has_vsa(void) | ||
91 | { | ||
92 | return 0; | ||
93 | } | ||
94 | |||
95 | #endif | 77 | #endif |
96 | 78 | ||
79 | extern int pci_olpc_init(void); | ||
80 | |||
97 | /* EC related functions */ | 81 | /* EC related functions */ |
98 | 82 | ||
99 | extern int olpc_ec_cmd(unsigned char cmd, unsigned char *inbuf, size_t inlen, | 83 | extern int olpc_ec_cmd(unsigned char cmd, unsigned char *inbuf, size_t inlen, |
@@ -120,7 +104,7 @@ extern int olpc_ec_mask_unset(uint8_t bits); | |||
120 | 104 | ||
121 | /* GPIO assignments */ | 105 | /* GPIO assignments */ |
122 | 106 | ||
123 | #define OLPC_GPIO_MIC_AC geode_gpio(1) | 107 | #define OLPC_GPIO_MIC_AC 1 |
124 | #define OLPC_GPIO_DCON_IRQ geode_gpio(7) | 108 | #define OLPC_GPIO_DCON_IRQ geode_gpio(7) |
125 | #define OLPC_GPIO_THRM_ALRM geode_gpio(10) | 109 | #define OLPC_GPIO_THRM_ALRM geode_gpio(10) |
126 | #define OLPC_GPIO_SMB_CLK geode_gpio(14) | 110 | #define OLPC_GPIO_SMB_CLK geode_gpio(14) |
diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h index 6473f5ccff85..a667f24c7254 100644 --- a/arch/x86/include/asm/page_types.h +++ b/arch/x86/include/asm/page_types.h | |||
@@ -40,7 +40,6 @@ | |||
40 | 40 | ||
41 | #ifndef __ASSEMBLY__ | 41 | #ifndef __ASSEMBLY__ |
42 | 42 | ||
43 | extern int page_is_ram(unsigned long pagenr); | ||
44 | extern int devmem_is_allowed(unsigned long pagenr); | 43 | extern int devmem_is_allowed(unsigned long pagenr); |
45 | 44 | ||
46 | extern unsigned long max_low_pfn_mapped; | 45 | extern unsigned long max_low_pfn_mapped; |
@@ -49,7 +48,8 @@ extern unsigned long max_pfn_mapped; | |||
49 | extern unsigned long init_memory_mapping(unsigned long start, | 48 | extern unsigned long init_memory_mapping(unsigned long start, |
50 | unsigned long end); | 49 | unsigned long end); |
51 | 50 | ||
52 | extern void initmem_init(unsigned long start_pfn, unsigned long end_pfn); | 51 | extern void initmem_init(unsigned long start_pfn, unsigned long end_pfn, |
52 | int acpi, int k8); | ||
53 | extern void free_initmem(void); | 53 | extern void free_initmem(void); |
54 | 54 | ||
55 | #endif /* !__ASSEMBLY__ */ | 55 | #endif /* !__ASSEMBLY__ */ |
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index efb38994859c..5653f43d90e5 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h | |||
@@ -435,15 +435,6 @@ static inline void paravirt_release_pud(unsigned long pfn) | |||
435 | PVOP_VCALL1(pv_mmu_ops.release_pud, pfn); | 435 | PVOP_VCALL1(pv_mmu_ops.release_pud, pfn); |
436 | } | 436 | } |
437 | 437 | ||
438 | #ifdef CONFIG_HIGHPTE | ||
439 | static inline void *kmap_atomic_pte(struct page *page, enum km_type type) | ||
440 | { | ||
441 | unsigned long ret; | ||
442 | ret = PVOP_CALL2(unsigned long, pv_mmu_ops.kmap_atomic_pte, page, type); | ||
443 | return (void *)ret; | ||
444 | } | ||
445 | #endif | ||
446 | |||
447 | static inline void pte_update(struct mm_struct *mm, unsigned long addr, | 438 | static inline void pte_update(struct mm_struct *mm, unsigned long addr, |
448 | pte_t *ptep) | 439 | pte_t *ptep) |
449 | { | 440 | { |
@@ -731,34 +722,34 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx, | |||
731 | 722 | ||
732 | #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS) | 723 | #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS) |
733 | 724 | ||
734 | static inline int __raw_spin_is_locked(struct raw_spinlock *lock) | 725 | static inline int arch_spin_is_locked(struct arch_spinlock *lock) |
735 | { | 726 | { |
736 | return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock); | 727 | return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock); |
737 | } | 728 | } |
738 | 729 | ||
739 | static inline int __raw_spin_is_contended(struct raw_spinlock *lock) | 730 | static inline int arch_spin_is_contended(struct arch_spinlock *lock) |
740 | { | 731 | { |
741 | return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock); | 732 | return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock); |
742 | } | 733 | } |
743 | #define __raw_spin_is_contended __raw_spin_is_contended | 734 | #define arch_spin_is_contended arch_spin_is_contended |
744 | 735 | ||
745 | static __always_inline void __raw_spin_lock(struct raw_spinlock *lock) | 736 | static __always_inline void arch_spin_lock(struct arch_spinlock *lock) |
746 | { | 737 | { |
747 | PVOP_VCALL1(pv_lock_ops.spin_lock, lock); | 738 | PVOP_VCALL1(pv_lock_ops.spin_lock, lock); |
748 | } | 739 | } |
749 | 740 | ||
750 | static __always_inline void __raw_spin_lock_flags(struct raw_spinlock *lock, | 741 | static __always_inline void arch_spin_lock_flags(struct arch_spinlock *lock, |
751 | unsigned long flags) | 742 | unsigned long flags) |
752 | { | 743 | { |
753 | PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags); | 744 | PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags); |
754 | } | 745 | } |
755 | 746 | ||
756 | static __always_inline int __raw_spin_trylock(struct raw_spinlock *lock) | 747 | static __always_inline int arch_spin_trylock(struct arch_spinlock *lock) |
757 | { | 748 | { |
758 | return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock); | 749 | return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock); |
759 | } | 750 | } |
760 | 751 | ||
761 | static __always_inline void __raw_spin_unlock(struct raw_spinlock *lock) | 752 | static __always_inline void arch_spin_unlock(struct arch_spinlock *lock) |
762 | { | 753 | { |
763 | PVOP_VCALL1(pv_lock_ops.spin_unlock, lock); | 754 | PVOP_VCALL1(pv_lock_ops.spin_unlock, lock); |
764 | } | 755 | } |
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index 9357473c8da0..db9ef5532341 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h | |||
@@ -304,10 +304,6 @@ struct pv_mmu_ops { | |||
304 | #endif /* PAGETABLE_LEVELS == 4 */ | 304 | #endif /* PAGETABLE_LEVELS == 4 */ |
305 | #endif /* PAGETABLE_LEVELS >= 3 */ | 305 | #endif /* PAGETABLE_LEVELS >= 3 */ |
306 | 306 | ||
307 | #ifdef CONFIG_HIGHPTE | ||
308 | void *(*kmap_atomic_pte)(struct page *page, enum km_type type); | ||
309 | #endif | ||
310 | |||
311 | struct pv_lazy_ops lazy_mode; | 307 | struct pv_lazy_ops lazy_mode; |
312 | 308 | ||
313 | /* dom0 ops */ | 309 | /* dom0 ops */ |
@@ -318,14 +314,14 @@ struct pv_mmu_ops { | |||
318 | phys_addr_t phys, pgprot_t flags); | 314 | phys_addr_t phys, pgprot_t flags); |
319 | }; | 315 | }; |
320 | 316 | ||
321 | struct raw_spinlock; | 317 | struct arch_spinlock; |
322 | struct pv_lock_ops { | 318 | struct pv_lock_ops { |
323 | int (*spin_is_locked)(struct raw_spinlock *lock); | 319 | int (*spin_is_locked)(struct arch_spinlock *lock); |
324 | int (*spin_is_contended)(struct raw_spinlock *lock); | 320 | int (*spin_is_contended)(struct arch_spinlock *lock); |
325 | void (*spin_lock)(struct raw_spinlock *lock); | 321 | void (*spin_lock)(struct arch_spinlock *lock); |
326 | void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags); | 322 | void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags); |
327 | int (*spin_trylock)(struct raw_spinlock *lock); | 323 | int (*spin_trylock)(struct arch_spinlock *lock); |
328 | void (*spin_unlock)(struct raw_spinlock *lock); | 324 | void (*spin_unlock)(struct arch_spinlock *lock); |
329 | }; | 325 | }; |
330 | 326 | ||
331 | /* This contains all the paravirt structures: we get a convenient | 327 | /* This contains all the paravirt structures: we get a convenient |
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h index ada8c201d513..404a880ea325 100644 --- a/arch/x86/include/asm/pci.h +++ b/arch/x86/include/asm/pci.h | |||
@@ -45,8 +45,15 @@ static inline int pci_proc_domain(struct pci_bus *bus) | |||
45 | 45 | ||
46 | #ifdef CONFIG_PCI | 46 | #ifdef CONFIG_PCI |
47 | extern unsigned int pcibios_assign_all_busses(void); | 47 | extern unsigned int pcibios_assign_all_busses(void); |
48 | extern int pci_legacy_init(void); | ||
49 | # ifdef CONFIG_ACPI | ||
50 | # define x86_default_pci_init pci_acpi_init | ||
51 | # else | ||
52 | # define x86_default_pci_init pci_legacy_init | ||
53 | # endif | ||
48 | #else | 54 | #else |
49 | #define pcibios_assign_all_busses() 0 | 55 | # define pcibios_assign_all_busses() 0 |
56 | # define x86_default_pci_init NULL | ||
50 | #endif | 57 | #endif |
51 | 58 | ||
52 | extern unsigned long pci_mem_start; | 59 | extern unsigned long pci_mem_start; |
@@ -90,40 +97,14 @@ extern void pci_iommu_alloc(void); | |||
90 | 97 | ||
91 | #define PCI_DMA_BUS_IS_PHYS (dma_ops->is_phys) | 98 | #define PCI_DMA_BUS_IS_PHYS (dma_ops->is_phys) |
92 | 99 | ||
93 | #if defined(CONFIG_X86_64) || defined(CONFIG_DMAR) || defined(CONFIG_DMA_API_DEBUG) | ||
94 | |||
95 | #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ | ||
96 | dma_addr_t ADDR_NAME; | ||
97 | #define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \ | ||
98 | __u32 LEN_NAME; | ||
99 | #define pci_unmap_addr(PTR, ADDR_NAME) \ | ||
100 | ((PTR)->ADDR_NAME) | ||
101 | #define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \ | ||
102 | (((PTR)->ADDR_NAME) = (VAL)) | ||
103 | #define pci_unmap_len(PTR, LEN_NAME) \ | ||
104 | ((PTR)->LEN_NAME) | ||
105 | #define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ | ||
106 | (((PTR)->LEN_NAME) = (VAL)) | ||
107 | |||
108 | #else | ||
109 | |||
110 | #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME[0]; | ||
111 | #define DECLARE_PCI_UNMAP_LEN(LEN_NAME) unsigned LEN_NAME[0]; | ||
112 | #define pci_unmap_addr(PTR, ADDR_NAME) sizeof((PTR)->ADDR_NAME) | ||
113 | #define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \ | ||
114 | do { break; } while (pci_unmap_addr(PTR, ADDR_NAME)) | ||
115 | #define pci_unmap_len(PTR, LEN_NAME) sizeof((PTR)->LEN_NAME) | ||
116 | #define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ | ||
117 | do { break; } while (pci_unmap_len(PTR, LEN_NAME)) | ||
118 | |||
119 | #endif | ||
120 | |||
121 | #endif /* __KERNEL__ */ | 100 | #endif /* __KERNEL__ */ |
122 | 101 | ||
123 | #ifdef CONFIG_X86_64 | 102 | #ifdef CONFIG_X86_64 |
124 | #include "pci_64.h" | 103 | #include "pci_64.h" |
125 | #endif | 104 | #endif |
126 | 105 | ||
106 | void dma32_reserve_bootmem(void); | ||
107 | |||
127 | /* implement the pci_ DMA API in terms of the generic device dma_ one */ | 108 | /* implement the pci_ DMA API in terms of the generic device dma_ one */ |
128 | #include <asm-generic/pci-dma-compat.h> | 109 | #include <asm-generic/pci-dma-compat.h> |
129 | 110 | ||
diff --git a/arch/x86/include/asm/pci_64.h b/arch/x86/include/asm/pci_64.h index ae5e40f67daf..fe15cfb21b9b 100644 --- a/arch/x86/include/asm/pci_64.h +++ b/arch/x86/include/asm/pci_64.h | |||
@@ -22,8 +22,6 @@ extern int (*pci_config_read)(int seg, int bus, int dev, int fn, | |||
22 | extern int (*pci_config_write)(int seg, int bus, int dev, int fn, | 22 | extern int (*pci_config_write)(int seg, int bus, int dev, int fn, |
23 | int reg, int len, u32 value); | 23 | int reg, int len, u32 value); |
24 | 24 | ||
25 | extern void dma32_reserve_bootmem(void); | ||
26 | |||
27 | #endif /* __KERNEL__ */ | 25 | #endif /* __KERNEL__ */ |
28 | 26 | ||
29 | #endif /* _ASM_X86_PCI_64_H */ | 27 | #endif /* _ASM_X86_PCI_64_H */ |
diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h index b399988eee3a..1a0422348d6d 100644 --- a/arch/x86/include/asm/pci_x86.h +++ b/arch/x86/include/asm/pci_x86.h | |||
@@ -29,6 +29,7 @@ | |||
29 | #define PCI_CHECK_ENABLE_AMD_MMCONF 0x20000 | 29 | #define PCI_CHECK_ENABLE_AMD_MMCONF 0x20000 |
30 | #define PCI_HAS_IO_ECS 0x40000 | 30 | #define PCI_HAS_IO_ECS 0x40000 |
31 | #define PCI_NOASSIGN_ROMS 0x80000 | 31 | #define PCI_NOASSIGN_ROMS 0x80000 |
32 | #define PCI_ROOT_NO_CRS 0x100000 | ||
32 | 33 | ||
33 | extern unsigned int pci_probe; | 34 | extern unsigned int pci_probe; |
34 | extern unsigned long pirq_table_addr; | 35 | extern unsigned long pirq_table_addr; |
@@ -82,7 +83,6 @@ struct irq_routing_table { | |||
82 | 83 | ||
83 | extern unsigned int pcibios_irq_mask; | 84 | extern unsigned int pcibios_irq_mask; |
84 | 85 | ||
85 | extern int pcibios_scanned; | ||
86 | extern spinlock_t pci_config_lock; | 86 | extern spinlock_t pci_config_lock; |
87 | 87 | ||
88 | extern int (*pcibios_enable_irq)(struct pci_dev *dev); | 88 | extern int (*pcibios_enable_irq)(struct pci_dev *dev); |
@@ -105,24 +105,39 @@ extern bool port_cf9_safe; | |||
105 | extern int pci_direct_probe(void); | 105 | extern int pci_direct_probe(void); |
106 | extern void pci_direct_init(int type); | 106 | extern void pci_direct_init(int type); |
107 | extern void pci_pcbios_init(void); | 107 | extern void pci_pcbios_init(void); |
108 | extern int pci_olpc_init(void); | ||
109 | extern void __init dmi_check_pciprobe(void); | 108 | extern void __init dmi_check_pciprobe(void); |
110 | extern void __init dmi_check_skip_isa_align(void); | 109 | extern void __init dmi_check_skip_isa_align(void); |
111 | 110 | ||
112 | /* some common used subsys_initcalls */ | 111 | /* some common used subsys_initcalls */ |
113 | extern int __init pci_acpi_init(void); | 112 | extern int __init pci_acpi_init(void); |
114 | extern int __init pcibios_irq_init(void); | 113 | extern void __init pcibios_irq_init(void); |
115 | extern int __init pci_visws_init(void); | ||
116 | extern int __init pci_numaq_init(void); | ||
117 | extern int __init pcibios_init(void); | 114 | extern int __init pcibios_init(void); |
115 | extern int pci_legacy_init(void); | ||
116 | extern void pcibios_fixup_irqs(void); | ||
118 | 117 | ||
119 | /* pci-mmconfig.c */ | 118 | /* pci-mmconfig.c */ |
120 | 119 | ||
120 | /* "PCI MMCONFIG %04x [bus %02x-%02x]" */ | ||
121 | #define PCI_MMCFG_RESOURCE_NAME_LEN (22 + 4 + 2 + 2) | ||
122 | |||
123 | struct pci_mmcfg_region { | ||
124 | struct list_head list; | ||
125 | struct resource res; | ||
126 | u64 address; | ||
127 | char __iomem *virt; | ||
128 | u16 segment; | ||
129 | u8 start_bus; | ||
130 | u8 end_bus; | ||
131 | char name[PCI_MMCFG_RESOURCE_NAME_LEN]; | ||
132 | }; | ||
133 | |||
121 | extern int __init pci_mmcfg_arch_init(void); | 134 | extern int __init pci_mmcfg_arch_init(void); |
122 | extern void __init pci_mmcfg_arch_free(void); | 135 | extern void __init pci_mmcfg_arch_free(void); |
136 | extern struct pci_mmcfg_region *pci_mmconfig_lookup(int segment, int bus); | ||
137 | |||
138 | extern struct list_head pci_mmcfg_list; | ||
123 | 139 | ||
124 | extern struct acpi_mcfg_allocation *pci_mmcfg_config; | 140 | #define PCI_MMCFG_BUS_OFFSET(bus) ((bus) << 20) |
125 | extern int pci_mmcfg_config_num; | ||
126 | 141 | ||
127 | /* | 142 | /* |
128 | * AMD Fam10h CPUs are buggy, and cannot access MMIO config space | 143 | * AMD Fam10h CPUs are buggy, and cannot access MMIO config space |
@@ -166,3 +181,17 @@ static inline void mmio_config_writel(void __iomem *pos, u32 val) | |||
166 | { | 181 | { |
167 | asm volatile("movl %%eax,(%1)" : : "a" (val), "r" (pos) : "memory"); | 182 | asm volatile("movl %%eax,(%1)" : : "a" (val), "r" (pos) : "memory"); |
168 | } | 183 | } |
184 | |||
185 | #ifdef CONFIG_PCI | ||
186 | # ifdef CONFIG_ACPI | ||
187 | # define x86_default_pci_init pci_acpi_init | ||
188 | # else | ||
189 | # define x86_default_pci_init pci_legacy_init | ||
190 | # endif | ||
191 | # define x86_default_pci_init_irq pcibios_irq_init | ||
192 | # define x86_default_pci_fixup_irqs pcibios_fixup_irqs | ||
193 | #else | ||
194 | # define x86_default_pci_init NULL | ||
195 | # define x86_default_pci_init_irq NULL | ||
196 | # define x86_default_pci_fixup_irqs NULL | ||
197 | #endif | ||
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index b65a36defeb7..66a272dfd8b8 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h | |||
@@ -25,19 +25,18 @@ | |||
25 | */ | 25 | */ |
26 | #ifdef CONFIG_SMP | 26 | #ifdef CONFIG_SMP |
27 | #define PER_CPU(var, reg) \ | 27 | #define PER_CPU(var, reg) \ |
28 | __percpu_mov_op %__percpu_seg:per_cpu__this_cpu_off, reg; \ | 28 | __percpu_mov_op %__percpu_seg:this_cpu_off, reg; \ |
29 | lea per_cpu__##var(reg), reg | 29 | lea var(reg), reg |
30 | #define PER_CPU_VAR(var) %__percpu_seg:per_cpu__##var | 30 | #define PER_CPU_VAR(var) %__percpu_seg:var |
31 | #else /* ! SMP */ | 31 | #else /* ! SMP */ |
32 | #define PER_CPU(var, reg) \ | 32 | #define PER_CPU(var, reg) __percpu_mov_op $var, reg |
33 | __percpu_mov_op $per_cpu__##var, reg | 33 | #define PER_CPU_VAR(var) var |
34 | #define PER_CPU_VAR(var) per_cpu__##var | ||
35 | #endif /* SMP */ | 34 | #endif /* SMP */ |
36 | 35 | ||
37 | #ifdef CONFIG_X86_64_SMP | 36 | #ifdef CONFIG_X86_64_SMP |
38 | #define INIT_PER_CPU_VAR(var) init_per_cpu__##var | 37 | #define INIT_PER_CPU_VAR(var) init_per_cpu__##var |
39 | #else | 38 | #else |
40 | #define INIT_PER_CPU_VAR(var) per_cpu__##var | 39 | #define INIT_PER_CPU_VAR(var) var |
41 | #endif | 40 | #endif |
42 | 41 | ||
43 | #else /* ...!ASSEMBLY */ | 42 | #else /* ...!ASSEMBLY */ |
@@ -60,12 +59,12 @@ | |||
60 | * There also must be an entry in vmlinux_64.lds.S | 59 | * There also must be an entry in vmlinux_64.lds.S |
61 | */ | 60 | */ |
62 | #define DECLARE_INIT_PER_CPU(var) \ | 61 | #define DECLARE_INIT_PER_CPU(var) \ |
63 | extern typeof(per_cpu_var(var)) init_per_cpu_var(var) | 62 | extern typeof(var) init_per_cpu_var(var) |
64 | 63 | ||
65 | #ifdef CONFIG_X86_64_SMP | 64 | #ifdef CONFIG_X86_64_SMP |
66 | #define init_per_cpu_var(var) init_per_cpu__##var | 65 | #define init_per_cpu_var(var) init_per_cpu__##var |
67 | #else | 66 | #else |
68 | #define init_per_cpu_var(var) per_cpu_var(var) | 67 | #define init_per_cpu_var(var) var |
69 | #endif | 68 | #endif |
70 | 69 | ||
71 | /* For arch-specific code, we can use direct single-insn ops (they | 70 | /* For arch-specific code, we can use direct single-insn ops (they |
@@ -74,63 +73,121 @@ extern void __bad_percpu_size(void); | |||
74 | 73 | ||
75 | #define percpu_to_op(op, var, val) \ | 74 | #define percpu_to_op(op, var, val) \ |
76 | do { \ | 75 | do { \ |
77 | typedef typeof(var) T__; \ | 76 | typedef typeof(var) pto_T__; \ |
78 | if (0) { \ | 77 | if (0) { \ |
79 | T__ tmp__; \ | 78 | pto_T__ pto_tmp__; \ |
80 | tmp__ = (val); \ | 79 | pto_tmp__ = (val); \ |
81 | } \ | 80 | } \ |
82 | switch (sizeof(var)) { \ | 81 | switch (sizeof(var)) { \ |
83 | case 1: \ | 82 | case 1: \ |
84 | asm(op "b %1,"__percpu_arg(0) \ | 83 | asm(op "b %1,"__percpu_arg(0) \ |
85 | : "+m" (var) \ | 84 | : "+m" (var) \ |
86 | : "qi" ((T__)(val))); \ | 85 | : "qi" ((pto_T__)(val))); \ |
87 | break; \ | 86 | break; \ |
88 | case 2: \ | 87 | case 2: \ |
89 | asm(op "w %1,"__percpu_arg(0) \ | 88 | asm(op "w %1,"__percpu_arg(0) \ |
90 | : "+m" (var) \ | 89 | : "+m" (var) \ |
91 | : "ri" ((T__)(val))); \ | 90 | : "ri" ((pto_T__)(val))); \ |
92 | break; \ | 91 | break; \ |
93 | case 4: \ | 92 | case 4: \ |
94 | asm(op "l %1,"__percpu_arg(0) \ | 93 | asm(op "l %1,"__percpu_arg(0) \ |
95 | : "+m" (var) \ | 94 | : "+m" (var) \ |
96 | : "ri" ((T__)(val))); \ | 95 | : "ri" ((pto_T__)(val))); \ |
97 | break; \ | 96 | break; \ |
98 | case 8: \ | 97 | case 8: \ |
99 | asm(op "q %1,"__percpu_arg(0) \ | 98 | asm(op "q %1,"__percpu_arg(0) \ |
100 | : "+m" (var) \ | 99 | : "+m" (var) \ |
101 | : "re" ((T__)(val))); \ | 100 | : "re" ((pto_T__)(val))); \ |
102 | break; \ | 101 | break; \ |
103 | default: __bad_percpu_size(); \ | 102 | default: __bad_percpu_size(); \ |
104 | } \ | 103 | } \ |
105 | } while (0) | 104 | } while (0) |
106 | 105 | ||
106 | /* | ||
107 | * Generate a percpu add to memory instruction and optimize code | ||
108 | * if a one is added or subtracted. | ||
109 | */ | ||
110 | #define percpu_add_op(var, val) \ | ||
111 | do { \ | ||
112 | typedef typeof(var) pao_T__; \ | ||
113 | const int pao_ID__ = (__builtin_constant_p(val) && \ | ||
114 | ((val) == 1 || (val) == -1)) ? (val) : 0; \ | ||
115 | if (0) { \ | ||
116 | pao_T__ pao_tmp__; \ | ||
117 | pao_tmp__ = (val); \ | ||
118 | } \ | ||
119 | switch (sizeof(var)) { \ | ||
120 | case 1: \ | ||
121 | if (pao_ID__ == 1) \ | ||
122 | asm("incb "__percpu_arg(0) : "+m" (var)); \ | ||
123 | else if (pao_ID__ == -1) \ | ||
124 | asm("decb "__percpu_arg(0) : "+m" (var)); \ | ||
125 | else \ | ||
126 | asm("addb %1, "__percpu_arg(0) \ | ||
127 | : "+m" (var) \ | ||
128 | : "qi" ((pao_T__)(val))); \ | ||
129 | break; \ | ||
130 | case 2: \ | ||
131 | if (pao_ID__ == 1) \ | ||
132 | asm("incw "__percpu_arg(0) : "+m" (var)); \ | ||
133 | else if (pao_ID__ == -1) \ | ||
134 | asm("decw "__percpu_arg(0) : "+m" (var)); \ | ||
135 | else \ | ||
136 | asm("addw %1, "__percpu_arg(0) \ | ||
137 | : "+m" (var) \ | ||
138 | : "ri" ((pao_T__)(val))); \ | ||
139 | break; \ | ||
140 | case 4: \ | ||
141 | if (pao_ID__ == 1) \ | ||
142 | asm("incl "__percpu_arg(0) : "+m" (var)); \ | ||
143 | else if (pao_ID__ == -1) \ | ||
144 | asm("decl "__percpu_arg(0) : "+m" (var)); \ | ||
145 | else \ | ||
146 | asm("addl %1, "__percpu_arg(0) \ | ||
147 | : "+m" (var) \ | ||
148 | : "ri" ((pao_T__)(val))); \ | ||
149 | break; \ | ||
150 | case 8: \ | ||
151 | if (pao_ID__ == 1) \ | ||
152 | asm("incq "__percpu_arg(0) : "+m" (var)); \ | ||
153 | else if (pao_ID__ == -1) \ | ||
154 | asm("decq "__percpu_arg(0) : "+m" (var)); \ | ||
155 | else \ | ||
156 | asm("addq %1, "__percpu_arg(0) \ | ||
157 | : "+m" (var) \ | ||
158 | : "re" ((pao_T__)(val))); \ | ||
159 | break; \ | ||
160 | default: __bad_percpu_size(); \ | ||
161 | } \ | ||
162 | } while (0) | ||
163 | |||
107 | #define percpu_from_op(op, var, constraint) \ | 164 | #define percpu_from_op(op, var, constraint) \ |
108 | ({ \ | 165 | ({ \ |
109 | typeof(var) ret__; \ | 166 | typeof(var) pfo_ret__; \ |
110 | switch (sizeof(var)) { \ | 167 | switch (sizeof(var)) { \ |
111 | case 1: \ | 168 | case 1: \ |
112 | asm(op "b "__percpu_arg(1)",%0" \ | 169 | asm(op "b "__percpu_arg(1)",%0" \ |
113 | : "=q" (ret__) \ | 170 | : "=q" (pfo_ret__) \ |
114 | : constraint); \ | 171 | : constraint); \ |
115 | break; \ | 172 | break; \ |
116 | case 2: \ | 173 | case 2: \ |
117 | asm(op "w "__percpu_arg(1)",%0" \ | 174 | asm(op "w "__percpu_arg(1)",%0" \ |
118 | : "=r" (ret__) \ | 175 | : "=r" (pfo_ret__) \ |
119 | : constraint); \ | 176 | : constraint); \ |
120 | break; \ | 177 | break; \ |
121 | case 4: \ | 178 | case 4: \ |
122 | asm(op "l "__percpu_arg(1)",%0" \ | 179 | asm(op "l "__percpu_arg(1)",%0" \ |
123 | : "=r" (ret__) \ | 180 | : "=r" (pfo_ret__) \ |
124 | : constraint); \ | 181 | : constraint); \ |
125 | break; \ | 182 | break; \ |
126 | case 8: \ | 183 | case 8: \ |
127 | asm(op "q "__percpu_arg(1)",%0" \ | 184 | asm(op "q "__percpu_arg(1)",%0" \ |
128 | : "=r" (ret__) \ | 185 | : "=r" (pfo_ret__) \ |
129 | : constraint); \ | 186 | : constraint); \ |
130 | break; \ | 187 | break; \ |
131 | default: __bad_percpu_size(); \ | 188 | default: __bad_percpu_size(); \ |
132 | } \ | 189 | } \ |
133 | ret__; \ | 190 | pfo_ret__; \ |
134 | }) | 191 | }) |
135 | 192 | ||
136 | /* | 193 | /* |
@@ -142,23 +199,99 @@ do { \ | |||
142 | * per-thread variables implemented as per-cpu variables and thus | 199 | * per-thread variables implemented as per-cpu variables and thus |
143 | * stable for the duration of the respective task. | 200 | * stable for the duration of the respective task. |
144 | */ | 201 | */ |
145 | #define percpu_read(var) percpu_from_op("mov", per_cpu__##var, \ | 202 | #define percpu_read(var) percpu_from_op("mov", var, "m" (var)) |
146 | "m" (per_cpu__##var)) | 203 | #define percpu_read_stable(var) percpu_from_op("mov", var, "p" (&(var))) |
147 | #define percpu_read_stable(var) percpu_from_op("mov", per_cpu__##var, \ | 204 | #define percpu_write(var, val) percpu_to_op("mov", var, val) |
148 | "p" (&per_cpu__##var)) | 205 | #define percpu_add(var, val) percpu_add_op(var, val) |
149 | #define percpu_write(var, val) percpu_to_op("mov", per_cpu__##var, val) | 206 | #define percpu_sub(var, val) percpu_add_op(var, -(val)) |
150 | #define percpu_add(var, val) percpu_to_op("add", per_cpu__##var, val) | 207 | #define percpu_and(var, val) percpu_to_op("and", var, val) |
151 | #define percpu_sub(var, val) percpu_to_op("sub", per_cpu__##var, val) | 208 | #define percpu_or(var, val) percpu_to_op("or", var, val) |
152 | #define percpu_and(var, val) percpu_to_op("and", per_cpu__##var, val) | 209 | #define percpu_xor(var, val) percpu_to_op("xor", var, val) |
153 | #define percpu_or(var, val) percpu_to_op("or", per_cpu__##var, val) | 210 | |
154 | #define percpu_xor(var, val) percpu_to_op("xor", per_cpu__##var, val) | 211 | #define __this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) |
212 | #define __this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) | ||
213 | #define __this_cpu_read_4(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) | ||
214 | |||
215 | #define __this_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val) | ||
216 | #define __this_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val) | ||
217 | #define __this_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val) | ||
218 | #define __this_cpu_add_1(pcp, val) percpu_add_op((pcp), val) | ||
219 | #define __this_cpu_add_2(pcp, val) percpu_add_op((pcp), val) | ||
220 | #define __this_cpu_add_4(pcp, val) percpu_add_op((pcp), val) | ||
221 | #define __this_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val) | ||
222 | #define __this_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val) | ||
223 | #define __this_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val) | ||
224 | #define __this_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val) | ||
225 | #define __this_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val) | ||
226 | #define __this_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val) | ||
227 | #define __this_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val) | ||
228 | #define __this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val) | ||
229 | #define __this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val) | ||
230 | |||
231 | #define this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) | ||
232 | #define this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) | ||
233 | #define this_cpu_read_4(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) | ||
234 | #define this_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val) | ||
235 | #define this_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val) | ||
236 | #define this_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val) | ||
237 | #define this_cpu_add_1(pcp, val) percpu_add_op((pcp), val) | ||
238 | #define this_cpu_add_2(pcp, val) percpu_add_op((pcp), val) | ||
239 | #define this_cpu_add_4(pcp, val) percpu_add_op((pcp), val) | ||
240 | #define this_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val) | ||
241 | #define this_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val) | ||
242 | #define this_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val) | ||
243 | #define this_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val) | ||
244 | #define this_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val) | ||
245 | #define this_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val) | ||
246 | #define this_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val) | ||
247 | #define this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val) | ||
248 | #define this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val) | ||
249 | |||
250 | #define irqsafe_cpu_add_1(pcp, val) percpu_add_op((pcp), val) | ||
251 | #define irqsafe_cpu_add_2(pcp, val) percpu_add_op((pcp), val) | ||
252 | #define irqsafe_cpu_add_4(pcp, val) percpu_add_op((pcp), val) | ||
253 | #define irqsafe_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val) | ||
254 | #define irqsafe_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val) | ||
255 | #define irqsafe_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val) | ||
256 | #define irqsafe_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val) | ||
257 | #define irqsafe_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val) | ||
258 | #define irqsafe_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val) | ||
259 | #define irqsafe_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val) | ||
260 | #define irqsafe_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val) | ||
261 | #define irqsafe_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val) | ||
262 | |||
263 | /* | ||
264 | * Per cpu atomic 64 bit operations are only available under 64 bit. | ||
265 | * 32 bit must fall back to generic operations. | ||
266 | */ | ||
267 | #ifdef CONFIG_X86_64 | ||
268 | #define __this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) | ||
269 | #define __this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val) | ||
270 | #define __this_cpu_add_8(pcp, val) percpu_add_op((pcp), val) | ||
271 | #define __this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val) | ||
272 | #define __this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) | ||
273 | #define __this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val) | ||
274 | |||
275 | #define this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) | ||
276 | #define this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val) | ||
277 | #define this_cpu_add_8(pcp, val) percpu_add_op((pcp), val) | ||
278 | #define this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val) | ||
279 | #define this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) | ||
280 | #define this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val) | ||
281 | |||
282 | #define irqsafe_cpu_add_8(pcp, val) percpu_add_op((pcp), val) | ||
283 | #define irqsafe_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val) | ||
284 | #define irqsafe_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) | ||
285 | #define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val) | ||
286 | |||
287 | #endif | ||
155 | 288 | ||
156 | /* This is not atomic against other CPUs -- CPU preemption needs to be off */ | 289 | /* This is not atomic against other CPUs -- CPU preemption needs to be off */ |
157 | #define x86_test_and_clear_bit_percpu(bit, var) \ | 290 | #define x86_test_and_clear_bit_percpu(bit, var) \ |
158 | ({ \ | 291 | ({ \ |
159 | int old__; \ | 292 | int old__; \ |
160 | asm volatile("btr %2,"__percpu_arg(1)"\n\tsbbl %0,%0" \ | 293 | asm volatile("btr %2,"__percpu_arg(1)"\n\tsbbl %0,%0" \ |
161 | : "=r" (old__), "+m" (per_cpu__##var) \ | 294 | : "=r" (old__), "+m" (var) \ |
162 | : "dIr" (bit)); \ | 295 | : "dIr" (bit)); \ |
163 | old__; \ | 296 | old__; \ |
164 | }) | 297 | }) |
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index ad7ce3fd5065..db6109a885a7 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h | |||
@@ -18,7 +18,8 @@ | |||
18 | #define MSR_ARCH_PERFMON_EVENTSEL0 0x186 | 18 | #define MSR_ARCH_PERFMON_EVENTSEL0 0x186 |
19 | #define MSR_ARCH_PERFMON_EVENTSEL1 0x187 | 19 | #define MSR_ARCH_PERFMON_EVENTSEL1 0x187 |
20 | 20 | ||
21 | #define ARCH_PERFMON_EVENTSEL0_ENABLE (1 << 22) | 21 | #define ARCH_PERFMON_EVENTSEL_ENABLE (1 << 22) |
22 | #define ARCH_PERFMON_EVENTSEL_ANY (1 << 21) | ||
22 | #define ARCH_PERFMON_EVENTSEL_INT (1 << 20) | 23 | #define ARCH_PERFMON_EVENTSEL_INT (1 << 20) |
23 | #define ARCH_PERFMON_EVENTSEL_OS (1 << 17) | 24 | #define ARCH_PERFMON_EVENTSEL_OS (1 << 17) |
24 | #define ARCH_PERFMON_EVENTSEL_USR (1 << 16) | 25 | #define ARCH_PERFMON_EVENTSEL_USR (1 << 16) |
@@ -26,11 +27,34 @@ | |||
26 | /* | 27 | /* |
27 | * Includes eventsel and unit mask as well: | 28 | * Includes eventsel and unit mask as well: |
28 | */ | 29 | */ |
29 | #define ARCH_PERFMON_EVENT_MASK 0xffff | 30 | |
31 | |||
32 | #define INTEL_ARCH_EVTSEL_MASK 0x000000FFULL | ||
33 | #define INTEL_ARCH_UNIT_MASK 0x0000FF00ULL | ||
34 | #define INTEL_ARCH_EDGE_MASK 0x00040000ULL | ||
35 | #define INTEL_ARCH_INV_MASK 0x00800000ULL | ||
36 | #define INTEL_ARCH_CNT_MASK 0xFF000000ULL | ||
37 | #define INTEL_ARCH_EVENT_MASK (INTEL_ARCH_UNIT_MASK|INTEL_ARCH_EVTSEL_MASK) | ||
38 | |||
39 | /* | ||
40 | * filter mask to validate fixed counter events. | ||
41 | * the following filters disqualify for fixed counters: | ||
42 | * - inv | ||
43 | * - edge | ||
44 | * - cnt-mask | ||
45 | * The other filters are supported by fixed counters. | ||
46 | * The any-thread option is supported starting with v3. | ||
47 | */ | ||
48 | #define INTEL_ARCH_FIXED_MASK \ | ||
49 | (INTEL_ARCH_CNT_MASK| \ | ||
50 | INTEL_ARCH_INV_MASK| \ | ||
51 | INTEL_ARCH_EDGE_MASK|\ | ||
52 | INTEL_ARCH_UNIT_MASK|\ | ||
53 | INTEL_ARCH_EVENT_MASK) | ||
30 | 54 | ||
31 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c | 55 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c |
32 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) | 56 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) |
33 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX 0 | 57 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX 0 |
34 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \ | 58 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \ |
35 | (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX)) | 59 | (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX)) |
36 | 60 | ||
@@ -93,6 +117,18 @@ union cpuid10_edx { | |||
93 | */ | 117 | */ |
94 | #define X86_PMC_IDX_FIXED_BTS (X86_PMC_IDX_FIXED + 16) | 118 | #define X86_PMC_IDX_FIXED_BTS (X86_PMC_IDX_FIXED + 16) |
95 | 119 | ||
120 | /* IbsFetchCtl bits/masks */ | ||
121 | #define IBS_FETCH_RAND_EN (1ULL<<57) | ||
122 | #define IBS_FETCH_VAL (1ULL<<49) | ||
123 | #define IBS_FETCH_ENABLE (1ULL<<48) | ||
124 | #define IBS_FETCH_CNT 0xFFFF0000ULL | ||
125 | #define IBS_FETCH_MAX_CNT 0x0000FFFFULL | ||
126 | |||
127 | /* IbsOpCtl bits */ | ||
128 | #define IBS_OP_CNT_CTL (1ULL<<19) | ||
129 | #define IBS_OP_VAL (1ULL<<18) | ||
130 | #define IBS_OP_ENABLE (1ULL<<17) | ||
131 | #define IBS_OP_MAX_CNT 0x0000FFFFULL | ||
96 | 132 | ||
97 | #ifdef CONFIG_PERF_EVENTS | 133 | #ifdef CONFIG_PERF_EVENTS |
98 | extern void init_hw_perf_events(void); | 134 | extern void init_hw_perf_events(void); |
diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h index 0e8c2a0fd922..271de94c3810 100644 --- a/arch/x86/include/asm/pgalloc.h +++ b/arch/x86/include/asm/pgalloc.h | |||
@@ -23,6 +23,11 @@ static inline void paravirt_release_pud(unsigned long pfn) {} | |||
23 | #endif | 23 | #endif |
24 | 24 | ||
25 | /* | 25 | /* |
26 | * Flags to use when allocating a user page table page. | ||
27 | */ | ||
28 | extern gfp_t __userpte_alloc_gfp; | ||
29 | |||
30 | /* | ||
26 | * Allocate and free page tables. | 31 | * Allocate and free page tables. |
27 | */ | 32 | */ |
28 | extern pgd_t *pgd_alloc(struct mm_struct *); | 33 | extern pgd_t *pgd_alloc(struct mm_struct *); |
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index af6fd360ab35..a34c785c5a63 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h | |||
@@ -16,6 +16,8 @@ | |||
16 | 16 | ||
17 | #ifndef __ASSEMBLY__ | 17 | #ifndef __ASSEMBLY__ |
18 | 18 | ||
19 | #include <asm/x86_init.h> | ||
20 | |||
19 | /* | 21 | /* |
20 | * ZERO_PAGE is a global shared page that is always zero: used | 22 | * ZERO_PAGE is a global shared page that is always zero: used |
21 | * for zero-mapped memory areas etc.. | 23 | * for zero-mapped memory areas etc.. |
@@ -270,9 +272,9 @@ static inline int is_new_memtype_allowed(u64 paddr, unsigned long size, | |||
270 | unsigned long new_flags) | 272 | unsigned long new_flags) |
271 | { | 273 | { |
272 | /* | 274 | /* |
273 | * PAT type is always WB for ISA. So no need to check. | 275 | * PAT type is always WB for untracked ranges, so no need to check. |
274 | */ | 276 | */ |
275 | if (is_ISA_range(paddr, paddr + size - 1)) | 277 | if (x86_platform.is_untracked_pat_range(paddr, paddr + size)) |
276 | return 1; | 278 | return 1; |
277 | 279 | ||
278 | /* | 280 | /* |
diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h index 01fd9461d323..2984a25ff383 100644 --- a/arch/x86/include/asm/pgtable_32.h +++ b/arch/x86/include/asm/pgtable_32.h | |||
@@ -19,7 +19,6 @@ | |||
19 | #include <asm/paravirt.h> | 19 | #include <asm/paravirt.h> |
20 | 20 | ||
21 | #include <linux/bitops.h> | 21 | #include <linux/bitops.h> |
22 | #include <linux/slab.h> | ||
23 | #include <linux/list.h> | 22 | #include <linux/list.h> |
24 | #include <linux/spinlock.h> | 23 | #include <linux/spinlock.h> |
25 | 24 | ||
@@ -54,10 +53,10 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t); | |||
54 | in_irq() ? KM_IRQ_PTE : \ | 53 | in_irq() ? KM_IRQ_PTE : \ |
55 | KM_PTE0) | 54 | KM_PTE0) |
56 | #define pte_offset_map(dir, address) \ | 55 | #define pte_offset_map(dir, address) \ |
57 | ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), __KM_PTE) + \ | 56 | ((pte_t *)kmap_atomic(pmd_page(*(dir)), __KM_PTE) + \ |
58 | pte_index((address))) | 57 | pte_index((address))) |
59 | #define pte_offset_map_nested(dir, address) \ | 58 | #define pte_offset_map_nested(dir, address) \ |
60 | ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), KM_PTE1) + \ | 59 | ((pte_t *)kmap_atomic(pmd_page(*(dir)), KM_PTE1) + \ |
61 | pte_index((address))) | 60 | pte_index((address))) |
62 | #define pte_unmap(pte) kunmap_atomic((pte), __KM_PTE) | 61 | #define pte_unmap(pte) kunmap_atomic((pte), __KM_PTE) |
63 | #define pte_unmap_nested(pte) kunmap_atomic((pte), KM_PTE1) | 62 | #define pte_unmap_nested(pte) kunmap_atomic((pte), KM_PTE1) |
@@ -80,7 +79,7 @@ do { \ | |||
80 | * The i386 doesn't have any external MMU info: the kernel page | 79 | * The i386 doesn't have any external MMU info: the kernel page |
81 | * tables contain all the necessary information. | 80 | * tables contain all the necessary information. |
82 | */ | 81 | */ |
83 | #define update_mmu_cache(vma, address, pte) do { } while (0) | 82 | #define update_mmu_cache(vma, address, ptep) do { } while (0) |
84 | 83 | ||
85 | #endif /* !__ASSEMBLY__ */ | 84 | #endif /* !__ASSEMBLY__ */ |
86 | 85 | ||
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index c57a30117149..181be528c612 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h | |||
@@ -129,7 +129,7 @@ static inline int pgd_large(pgd_t pgd) { return 0; } | |||
129 | #define pte_unmap(pte) /* NOP */ | 129 | #define pte_unmap(pte) /* NOP */ |
130 | #define pte_unmap_nested(pte) /* NOP */ | 130 | #define pte_unmap_nested(pte) /* NOP */ |
131 | 131 | ||
132 | #define update_mmu_cache(vma, address, pte) do { } while (0) | 132 | #define update_mmu_cache(vma, address, ptep) do { } while (0) |
133 | 133 | ||
134 | /* Encode and de-code a swap entry */ | 134 | /* Encode and de-code a swap entry */ |
135 | #if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE | 135 | #if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE |
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index e75daac64962..91d323f47364 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h | |||
@@ -30,6 +30,7 @@ struct mm_struct; | |||
30 | #include <linux/math64.h> | 30 | #include <linux/math64.h> |
31 | #include <linux/init.h> | 31 | #include <linux/init.h> |
32 | 32 | ||
33 | #define HBP_NUM 4 | ||
33 | /* | 34 | /* |
34 | * Default implementation of macro that returns current | 35 | * Default implementation of macro that returns current |
35 | * instruction pointer ("program counter"). | 36 | * instruction pointer ("program counter"). |
@@ -182,7 +183,7 @@ static inline void native_cpuid(unsigned int *eax, unsigned int *ebx, | |||
182 | unsigned int *ecx, unsigned int *edx) | 183 | unsigned int *ecx, unsigned int *edx) |
183 | { | 184 | { |
184 | /* ecx is often an input as well as an output. */ | 185 | /* ecx is often an input as well as an output. */ |
185 | asm("cpuid" | 186 | asm volatile("cpuid" |
186 | : "=a" (*eax), | 187 | : "=a" (*eax), |
187 | "=b" (*ebx), | 188 | "=b" (*ebx), |
188 | "=c" (*ecx), | 189 | "=c" (*ecx), |
@@ -424,6 +425,8 @@ extern unsigned int xstate_size; | |||
424 | extern void free_thread_xstate(struct task_struct *); | 425 | extern void free_thread_xstate(struct task_struct *); |
425 | extern struct kmem_cache *task_xstate_cachep; | 426 | extern struct kmem_cache *task_xstate_cachep; |
426 | 427 | ||
428 | struct perf_event; | ||
429 | |||
427 | struct thread_struct { | 430 | struct thread_struct { |
428 | /* Cached TLS descriptors: */ | 431 | /* Cached TLS descriptors: */ |
429 | struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; | 432 | struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; |
@@ -445,13 +448,12 @@ struct thread_struct { | |||
445 | unsigned long fs; | 448 | unsigned long fs; |
446 | #endif | 449 | #endif |
447 | unsigned long gs; | 450 | unsigned long gs; |
448 | /* Hardware debugging registers: */ | 451 | /* Save middle states of ptrace breakpoints */ |
449 | unsigned long debugreg0; | 452 | struct perf_event *ptrace_bps[HBP_NUM]; |
450 | unsigned long debugreg1; | 453 | /* Debug status used for traps, single steps, etc... */ |
451 | unsigned long debugreg2; | 454 | unsigned long debugreg6; |
452 | unsigned long debugreg3; | 455 | /* Keep track of the exact dr7 value set by the user */ |
453 | unsigned long debugreg6; | 456 | unsigned long ptrace_dr7; |
454 | unsigned long debugreg7; | ||
455 | /* Fault info: */ | 457 | /* Fault info: */ |
456 | unsigned long cr2; | 458 | unsigned long cr2; |
457 | unsigned long trap_no; | 459 | unsigned long trap_no; |
diff --git a/arch/x86/include/asm/proto.h b/arch/x86/include/asm/proto.h index 621f56d73121..6f414ed88620 100644 --- a/arch/x86/include/asm/proto.h +++ b/arch/x86/include/asm/proto.h | |||
@@ -5,31 +5,22 @@ | |||
5 | 5 | ||
6 | /* misc architecture specific prototypes */ | 6 | /* misc architecture specific prototypes */ |
7 | 7 | ||
8 | extern void early_idt_handler(void); | 8 | void early_idt_handler(void); |
9 | 9 | ||
10 | extern void system_call(void); | 10 | void system_call(void); |
11 | extern void syscall_init(void); | 11 | void syscall_init(void); |
12 | 12 | ||
13 | extern void ia32_syscall(void); | 13 | void ia32_syscall(void); |
14 | extern void ia32_cstar_target(void); | 14 | void ia32_cstar_target(void); |
15 | extern void ia32_sysenter_target(void); | 15 | void ia32_sysenter_target(void); |
16 | 16 | ||
17 | extern void syscall32_cpu_init(void); | 17 | void syscall32_cpu_init(void); |
18 | 18 | ||
19 | extern void check_efer(void); | 19 | void x86_configure_nx(void); |
20 | void x86_report_nx(void); | ||
20 | 21 | ||
21 | extern int reboot_force; | 22 | extern int reboot_force; |
22 | 23 | ||
23 | long do_arch_prctl(struct task_struct *task, int code, unsigned long addr); | 24 | long do_arch_prctl(struct task_struct *task, int code, unsigned long addr); |
24 | 25 | ||
25 | /* | ||
26 | * This looks more complex than it should be. But we need to | ||
27 | * get the type for the ~ right in round_down (it needs to be | ||
28 | * as wide as the result!), and we want to evaluate the macro | ||
29 | * arguments just once each. | ||
30 | */ | ||
31 | #define __round_mask(x,y) ((__typeof__(x))((y)-1)) | ||
32 | #define round_up(x,y) ((((x)-1) | __round_mask(x,y))+1) | ||
33 | #define round_down(x,y) ((x) & ~__round_mask(x,y)) | ||
34 | |||
35 | #endif /* _ASM_X86_PROTO_H */ | 26 | #endif /* _ASM_X86_PROTO_H */ |
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h index 0f0d908349aa..69a686a7dff0 100644 --- a/arch/x86/include/asm/ptrace.h +++ b/arch/x86/include/asm/ptrace.h | |||
@@ -7,6 +7,7 @@ | |||
7 | 7 | ||
8 | #ifdef __KERNEL__ | 8 | #ifdef __KERNEL__ |
9 | #include <asm/segment.h> | 9 | #include <asm/segment.h> |
10 | #include <asm/page_types.h> | ||
10 | #endif | 11 | #endif |
11 | 12 | ||
12 | #ifndef __ASSEMBLY__ | 13 | #ifndef __ASSEMBLY__ |
@@ -216,20 +217,72 @@ static inline unsigned long user_stack_pointer(struct pt_regs *regs) | |||
216 | return regs->sp; | 217 | return regs->sp; |
217 | } | 218 | } |
218 | 219 | ||
219 | /* | 220 | /* Query offset/name of register from its name/offset */ |
220 | * These are defined as per linux/ptrace.h, which see. | 221 | extern int regs_query_register_offset(const char *name); |
222 | extern const char *regs_query_register_name(unsigned int offset); | ||
223 | #define MAX_REG_OFFSET (offsetof(struct pt_regs, ss)) | ||
224 | |||
225 | /** | ||
226 | * regs_get_register() - get register value from its offset | ||
227 | * @regs: pt_regs from which register value is gotten. | ||
228 | * @offset: offset number of the register. | ||
229 | * | ||
230 | * regs_get_register returns the value of a register. The @offset is the | ||
231 | * offset of the register in struct pt_regs address which specified by @regs. | ||
232 | * If @offset is bigger than MAX_REG_OFFSET, this returns 0. | ||
221 | */ | 233 | */ |
222 | #define arch_has_single_step() (1) | 234 | static inline unsigned long regs_get_register(struct pt_regs *regs, |
223 | extern void user_enable_single_step(struct task_struct *); | 235 | unsigned int offset) |
224 | extern void user_disable_single_step(struct task_struct *); | 236 | { |
237 | if (unlikely(offset > MAX_REG_OFFSET)) | ||
238 | return 0; | ||
239 | return *(unsigned long *)((unsigned long)regs + offset); | ||
240 | } | ||
241 | |||
242 | /** | ||
243 | * regs_within_kernel_stack() - check the address in the stack | ||
244 | * @regs: pt_regs which contains kernel stack pointer. | ||
245 | * @addr: address which is checked. | ||
246 | * | ||
247 | * regs_within_kernel_stack() checks @addr is within the kernel stack page(s). | ||
248 | * If @addr is within the kernel stack, it returns true. If not, returns false. | ||
249 | */ | ||
250 | static inline int regs_within_kernel_stack(struct pt_regs *regs, | ||
251 | unsigned long addr) | ||
252 | { | ||
253 | return ((addr & ~(THREAD_SIZE - 1)) == | ||
254 | (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))); | ||
255 | } | ||
225 | 256 | ||
226 | extern void user_enable_block_step(struct task_struct *); | 257 | /** |
258 | * regs_get_kernel_stack_nth() - get Nth entry of the stack | ||
259 | * @regs: pt_regs which contains kernel stack pointer. | ||
260 | * @n: stack entry number. | ||
261 | * | ||
262 | * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which | ||
263 | * is specified by @regs. If the @n th entry is NOT in the kernel stack, | ||
264 | * this returns 0. | ||
265 | */ | ||
266 | static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, | ||
267 | unsigned int n) | ||
268 | { | ||
269 | unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs); | ||
270 | addr += n; | ||
271 | if (regs_within_kernel_stack(regs, (unsigned long)addr)) | ||
272 | return *addr; | ||
273 | else | ||
274 | return 0; | ||
275 | } | ||
276 | |||
277 | #define arch_has_single_step() (1) | ||
227 | #ifdef CONFIG_X86_DEBUGCTLMSR | 278 | #ifdef CONFIG_X86_DEBUGCTLMSR |
228 | #define arch_has_block_step() (1) | 279 | #define arch_has_block_step() (1) |
229 | #else | 280 | #else |
230 | #define arch_has_block_step() (boot_cpu_data.x86 >= 6) | 281 | #define arch_has_block_step() (boot_cpu_data.x86 >= 6) |
231 | #endif | 282 | #endif |
232 | 283 | ||
284 | #define ARCH_HAS_USER_SINGLE_STEP_INFO | ||
285 | |||
233 | struct user_desc; | 286 | struct user_desc; |
234 | extern int do_get_thread_area(struct task_struct *p, int idx, | 287 | extern int do_get_thread_area(struct task_struct *p, int idx, |
235 | struct user_desc __user *info); | 288 | struct user_desc __user *info); |
diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h index ca7517d33776..606ede126972 100644 --- a/arch/x86/include/asm/rwsem.h +++ b/arch/x86/include/asm/rwsem.h | |||
@@ -41,6 +41,7 @@ | |||
41 | #include <linux/list.h> | 41 | #include <linux/list.h> |
42 | #include <linux/spinlock.h> | 42 | #include <linux/spinlock.h> |
43 | #include <linux/lockdep.h> | 43 | #include <linux/lockdep.h> |
44 | #include <asm/asm.h> | ||
44 | 45 | ||
45 | struct rwsem_waiter; | 46 | struct rwsem_waiter; |
46 | 47 | ||
@@ -55,17 +56,28 @@ extern asmregparm struct rw_semaphore * | |||
55 | 56 | ||
56 | /* | 57 | /* |
57 | * the semaphore definition | 58 | * the semaphore definition |
59 | * | ||
60 | * The bias values and the counter type limits the number of | ||
61 | * potential readers/writers to 32767 for 32 bits and 2147483647 | ||
62 | * for 64 bits. | ||
58 | */ | 63 | */ |
59 | 64 | ||
60 | #define RWSEM_UNLOCKED_VALUE 0x00000000 | 65 | #ifdef CONFIG_X86_64 |
61 | #define RWSEM_ACTIVE_BIAS 0x00000001 | 66 | # define RWSEM_ACTIVE_MASK 0xffffffffL |
62 | #define RWSEM_ACTIVE_MASK 0x0000ffff | 67 | #else |
63 | #define RWSEM_WAITING_BIAS (-0x00010000) | 68 | # define RWSEM_ACTIVE_MASK 0x0000ffffL |
69 | #endif | ||
70 | |||
71 | #define RWSEM_UNLOCKED_VALUE 0x00000000L | ||
72 | #define RWSEM_ACTIVE_BIAS 0x00000001L | ||
73 | #define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1) | ||
64 | #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS | 74 | #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS |
65 | #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) | 75 | #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) |
66 | 76 | ||
77 | typedef signed long rwsem_count_t; | ||
78 | |||
67 | struct rw_semaphore { | 79 | struct rw_semaphore { |
68 | signed long count; | 80 | rwsem_count_t count; |
69 | spinlock_t wait_lock; | 81 | spinlock_t wait_lock; |
70 | struct list_head wait_list; | 82 | struct list_head wait_list; |
71 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 83 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
@@ -105,7 +117,7 @@ do { \ | |||
105 | static inline void __down_read(struct rw_semaphore *sem) | 117 | static inline void __down_read(struct rw_semaphore *sem) |
106 | { | 118 | { |
107 | asm volatile("# beginning down_read\n\t" | 119 | asm volatile("# beginning down_read\n\t" |
108 | LOCK_PREFIX " incl (%%eax)\n\t" | 120 | LOCK_PREFIX _ASM_INC "(%1)\n\t" |
109 | /* adds 0x00000001, returns the old value */ | 121 | /* adds 0x00000001, returns the old value */ |
110 | " jns 1f\n" | 122 | " jns 1f\n" |
111 | " call call_rwsem_down_read_failed\n" | 123 | " call call_rwsem_down_read_failed\n" |
@@ -121,14 +133,14 @@ static inline void __down_read(struct rw_semaphore *sem) | |||
121 | */ | 133 | */ |
122 | static inline int __down_read_trylock(struct rw_semaphore *sem) | 134 | static inline int __down_read_trylock(struct rw_semaphore *sem) |
123 | { | 135 | { |
124 | __s32 result, tmp; | 136 | rwsem_count_t result, tmp; |
125 | asm volatile("# beginning __down_read_trylock\n\t" | 137 | asm volatile("# beginning __down_read_trylock\n\t" |
126 | " movl %0,%1\n\t" | 138 | " mov %0,%1\n\t" |
127 | "1:\n\t" | 139 | "1:\n\t" |
128 | " movl %1,%2\n\t" | 140 | " mov %1,%2\n\t" |
129 | " addl %3,%2\n\t" | 141 | " add %3,%2\n\t" |
130 | " jle 2f\n\t" | 142 | " jle 2f\n\t" |
131 | LOCK_PREFIX " cmpxchgl %2,%0\n\t" | 143 | LOCK_PREFIX " cmpxchg %2,%0\n\t" |
132 | " jnz 1b\n\t" | 144 | " jnz 1b\n\t" |
133 | "2:\n\t" | 145 | "2:\n\t" |
134 | "# ending __down_read_trylock\n\t" | 146 | "# ending __down_read_trylock\n\t" |
@@ -143,13 +155,13 @@ static inline int __down_read_trylock(struct rw_semaphore *sem) | |||
143 | */ | 155 | */ |
144 | static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) | 156 | static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) |
145 | { | 157 | { |
146 | int tmp; | 158 | rwsem_count_t tmp; |
147 | 159 | ||
148 | tmp = RWSEM_ACTIVE_WRITE_BIAS; | 160 | tmp = RWSEM_ACTIVE_WRITE_BIAS; |
149 | asm volatile("# beginning down_write\n\t" | 161 | asm volatile("# beginning down_write\n\t" |
150 | LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" | 162 | LOCK_PREFIX " xadd %1,(%2)\n\t" |
151 | /* subtract 0x0000ffff, returns the old value */ | 163 | /* subtract 0x0000ffff, returns the old value */ |
152 | " testl %%edx,%%edx\n\t" | 164 | " test %1,%1\n\t" |
153 | /* was the count 0 before? */ | 165 | /* was the count 0 before? */ |
154 | " jz 1f\n" | 166 | " jz 1f\n" |
155 | " call call_rwsem_down_write_failed\n" | 167 | " call call_rwsem_down_write_failed\n" |
@@ -170,9 +182,9 @@ static inline void __down_write(struct rw_semaphore *sem) | |||
170 | */ | 182 | */ |
171 | static inline int __down_write_trylock(struct rw_semaphore *sem) | 183 | static inline int __down_write_trylock(struct rw_semaphore *sem) |
172 | { | 184 | { |
173 | signed long ret = cmpxchg(&sem->count, | 185 | rwsem_count_t ret = cmpxchg(&sem->count, |
174 | RWSEM_UNLOCKED_VALUE, | 186 | RWSEM_UNLOCKED_VALUE, |
175 | RWSEM_ACTIVE_WRITE_BIAS); | 187 | RWSEM_ACTIVE_WRITE_BIAS); |
176 | if (ret == RWSEM_UNLOCKED_VALUE) | 188 | if (ret == RWSEM_UNLOCKED_VALUE) |
177 | return 1; | 189 | return 1; |
178 | return 0; | 190 | return 0; |
@@ -183,9 +195,9 @@ static inline int __down_write_trylock(struct rw_semaphore *sem) | |||
183 | */ | 195 | */ |
184 | static inline void __up_read(struct rw_semaphore *sem) | 196 | static inline void __up_read(struct rw_semaphore *sem) |
185 | { | 197 | { |
186 | __s32 tmp = -RWSEM_ACTIVE_READ_BIAS; | 198 | rwsem_count_t tmp = -RWSEM_ACTIVE_READ_BIAS; |
187 | asm volatile("# beginning __up_read\n\t" | 199 | asm volatile("# beginning __up_read\n\t" |
188 | LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" | 200 | LOCK_PREFIX " xadd %1,(%2)\n\t" |
189 | /* subtracts 1, returns the old value */ | 201 | /* subtracts 1, returns the old value */ |
190 | " jns 1f\n\t" | 202 | " jns 1f\n\t" |
191 | " call call_rwsem_wake\n" | 203 | " call call_rwsem_wake\n" |
@@ -201,18 +213,18 @@ static inline void __up_read(struct rw_semaphore *sem) | |||
201 | */ | 213 | */ |
202 | static inline void __up_write(struct rw_semaphore *sem) | 214 | static inline void __up_write(struct rw_semaphore *sem) |
203 | { | 215 | { |
216 | rwsem_count_t tmp; | ||
204 | asm volatile("# beginning __up_write\n\t" | 217 | asm volatile("# beginning __up_write\n\t" |
205 | " movl %2,%%edx\n\t" | 218 | LOCK_PREFIX " xadd %1,(%2)\n\t" |
206 | LOCK_PREFIX " xaddl %%edx,(%%eax)\n\t" | ||
207 | /* tries to transition | 219 | /* tries to transition |
208 | 0xffff0001 -> 0x00000000 */ | 220 | 0xffff0001 -> 0x00000000 */ |
209 | " jz 1f\n" | 221 | " jz 1f\n" |
210 | " call call_rwsem_wake\n" | 222 | " call call_rwsem_wake\n" |
211 | "1:\n\t" | 223 | "1:\n\t" |
212 | "# ending __up_write\n" | 224 | "# ending __up_write\n" |
213 | : "+m" (sem->count) | 225 | : "+m" (sem->count), "=d" (tmp) |
214 | : "a" (sem), "i" (-RWSEM_ACTIVE_WRITE_BIAS) | 226 | : "a" (sem), "1" (-RWSEM_ACTIVE_WRITE_BIAS) |
215 | : "memory", "cc", "edx"); | 227 | : "memory", "cc"); |
216 | } | 228 | } |
217 | 229 | ||
218 | /* | 230 | /* |
@@ -221,33 +233,38 @@ static inline void __up_write(struct rw_semaphore *sem) | |||
221 | static inline void __downgrade_write(struct rw_semaphore *sem) | 233 | static inline void __downgrade_write(struct rw_semaphore *sem) |
222 | { | 234 | { |
223 | asm volatile("# beginning __downgrade_write\n\t" | 235 | asm volatile("# beginning __downgrade_write\n\t" |
224 | LOCK_PREFIX " addl %2,(%%eax)\n\t" | 236 | LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t" |
225 | /* transitions 0xZZZZ0001 -> 0xYYYY0001 */ | 237 | /* |
238 | * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386) | ||
239 | * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64) | ||
240 | */ | ||
226 | " jns 1f\n\t" | 241 | " jns 1f\n\t" |
227 | " call call_rwsem_downgrade_wake\n" | 242 | " call call_rwsem_downgrade_wake\n" |
228 | "1:\n\t" | 243 | "1:\n\t" |
229 | "# ending __downgrade_write\n" | 244 | "# ending __downgrade_write\n" |
230 | : "+m" (sem->count) | 245 | : "+m" (sem->count) |
231 | : "a" (sem), "i" (-RWSEM_WAITING_BIAS) | 246 | : "a" (sem), "er" (-RWSEM_WAITING_BIAS) |
232 | : "memory", "cc"); | 247 | : "memory", "cc"); |
233 | } | 248 | } |
234 | 249 | ||
235 | /* | 250 | /* |
236 | * implement atomic add functionality | 251 | * implement atomic add functionality |
237 | */ | 252 | */ |
238 | static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem) | 253 | static inline void rwsem_atomic_add(rwsem_count_t delta, |
254 | struct rw_semaphore *sem) | ||
239 | { | 255 | { |
240 | asm volatile(LOCK_PREFIX "addl %1,%0" | 256 | asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0" |
241 | : "+m" (sem->count) | 257 | : "+m" (sem->count) |
242 | : "ir" (delta)); | 258 | : "er" (delta)); |
243 | } | 259 | } |
244 | 260 | ||
245 | /* | 261 | /* |
246 | * implement exchange and add functionality | 262 | * implement exchange and add functionality |
247 | */ | 263 | */ |
248 | static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem) | 264 | static inline rwsem_count_t rwsem_atomic_update(rwsem_count_t delta, |
265 | struct rw_semaphore *sem) | ||
249 | { | 266 | { |
250 | int tmp = delta; | 267 | rwsem_count_t tmp = delta; |
251 | 268 | ||
252 | asm volatile(LOCK_PREFIX "xadd %0,%1" | 269 | asm volatile(LOCK_PREFIX "xadd %0,%1" |
253 | : "+r" (tmp), "+m" (sem->count) | 270 | : "+r" (tmp), "+m" (sem->count) |
diff --git a/arch/x86/include/asm/sections.h b/arch/x86/include/asm/sections.h index 1b7ee5d673c2..0a5242428659 100644 --- a/arch/x86/include/asm/sections.h +++ b/arch/x86/include/asm/sections.h | |||
@@ -2,7 +2,13 @@ | |||
2 | #define _ASM_X86_SECTIONS_H | 2 | #define _ASM_X86_SECTIONS_H |
3 | 3 | ||
4 | #include <asm-generic/sections.h> | 4 | #include <asm-generic/sections.h> |
5 | #include <asm/uaccess.h> | ||
5 | 6 | ||
6 | extern char __brk_base[], __brk_limit[]; | 7 | extern char __brk_base[], __brk_limit[]; |
8 | extern struct exception_table_entry __stop___ex_table[]; | ||
9 | |||
10 | #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA) | ||
11 | extern char __end_rodata_hpage_align[]; | ||
12 | #endif | ||
7 | 13 | ||
8 | #endif /* _ASM_X86_SECTIONS_H */ | 14 | #endif /* _ASM_X86_SECTIONS_H */ |
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h index 18e496c98ff0..86b1506f4179 100644 --- a/arch/x86/include/asm/setup.h +++ b/arch/x86/include/asm/setup.h | |||
@@ -37,10 +37,8 @@ void setup_bios_corruption_check(void); | |||
37 | 37 | ||
38 | #ifdef CONFIG_X86_VISWS | 38 | #ifdef CONFIG_X86_VISWS |
39 | extern void visws_early_detect(void); | 39 | extern void visws_early_detect(void); |
40 | extern int is_visws_box(void); | ||
41 | #else | 40 | #else |
42 | static inline void visws_early_detect(void) { } | 41 | static inline void visws_early_detect(void) { } |
43 | static inline int is_visws_box(void) { return 0; } | ||
44 | #endif | 42 | #endif |
45 | 43 | ||
46 | extern unsigned long saved_video_mode; | 44 | extern unsigned long saved_video_mode; |
diff --git a/arch/x86/include/asm/sigcontext.h b/arch/x86/include/asm/sigcontext.h index 72e5a4491661..04459d25e66e 100644 --- a/arch/x86/include/asm/sigcontext.h +++ b/arch/x86/include/asm/sigcontext.h | |||
@@ -124,7 +124,7 @@ struct sigcontext { | |||
124 | * fpstate is really (struct _fpstate *) or (struct _xstate *) | 124 | * fpstate is really (struct _fpstate *) or (struct _xstate *) |
125 | * depending on the FP_XSTATE_MAGIC1 encoded in the SW reserved | 125 | * depending on the FP_XSTATE_MAGIC1 encoded in the SW reserved |
126 | * bytes of (struct _fpstate) and FP_XSTATE_MAGIC2 present at the end | 126 | * bytes of (struct _fpstate) and FP_XSTATE_MAGIC2 present at the end |
127 | * of extended memory layout. See comments at the defintion of | 127 | * of extended memory layout. See comments at the definition of |
128 | * (struct _fpx_sw_bytes) | 128 | * (struct _fpx_sw_bytes) |
129 | */ | 129 | */ |
130 | void __user *fpstate; /* zero when no FPU/extended context */ | 130 | void __user *fpstate; /* zero when no FPU/extended context */ |
@@ -219,7 +219,7 @@ struct sigcontext { | |||
219 | * fpstate is really (struct _fpstate *) or (struct _xstate *) | 219 | * fpstate is really (struct _fpstate *) or (struct _xstate *) |
220 | * depending on the FP_XSTATE_MAGIC1 encoded in the SW reserved | 220 | * depending on the FP_XSTATE_MAGIC1 encoded in the SW reserved |
221 | * bytes of (struct _fpstate) and FP_XSTATE_MAGIC2 present at the end | 221 | * bytes of (struct _fpstate) and FP_XSTATE_MAGIC2 present at the end |
222 | * of extended memory layout. See comments at the defintion of | 222 | * of extended memory layout. See comments at the definition of |
223 | * (struct _fpx_sw_bytes) | 223 | * (struct _fpx_sw_bytes) |
224 | */ | 224 | */ |
225 | void __user *fpstate; /* zero when no FPU/extended context */ | 225 | void __user *fpstate; /* zero when no FPU/extended context */ |
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index 1e796782cd7b..4cfc90824068 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h | |||
@@ -135,6 +135,8 @@ int native_cpu_disable(void); | |||
135 | void native_cpu_die(unsigned int cpu); | 135 | void native_cpu_die(unsigned int cpu); |
136 | void native_play_dead(void); | 136 | void native_play_dead(void); |
137 | void play_dead_common(void); | 137 | void play_dead_common(void); |
138 | void wbinvd_on_cpu(int cpu); | ||
139 | int wbinvd_on_all_cpus(void); | ||
138 | 140 | ||
139 | void native_send_call_func_ipi(const struct cpumask *mask); | 141 | void native_send_call_func_ipi(const struct cpumask *mask); |
140 | void native_send_call_func_single_ipi(int cpu); | 142 | void native_send_call_func_single_ipi(int cpu); |
@@ -147,6 +149,13 @@ static inline int num_booting_cpus(void) | |||
147 | { | 149 | { |
148 | return cpumask_weight(cpu_callout_mask); | 150 | return cpumask_weight(cpu_callout_mask); |
149 | } | 151 | } |
152 | #else /* !CONFIG_SMP */ | ||
153 | #define wbinvd_on_cpu(cpu) wbinvd() | ||
154 | static inline int wbinvd_on_all_cpus(void) | ||
155 | { | ||
156 | wbinvd(); | ||
157 | return 0; | ||
158 | } | ||
150 | #endif /* CONFIG_SMP */ | 159 | #endif /* CONFIG_SMP */ |
151 | 160 | ||
152 | extern unsigned disabled_cpus __cpuinitdata; | 161 | extern unsigned disabled_cpus __cpuinitdata; |
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h index 4e77853321db..3089f70c0c52 100644 --- a/arch/x86/include/asm/spinlock.h +++ b/arch/x86/include/asm/spinlock.h | |||
@@ -58,7 +58,7 @@ | |||
58 | #if (NR_CPUS < 256) | 58 | #if (NR_CPUS < 256) |
59 | #define TICKET_SHIFT 8 | 59 | #define TICKET_SHIFT 8 |
60 | 60 | ||
61 | static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) | 61 | static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock) |
62 | { | 62 | { |
63 | short inc = 0x0100; | 63 | short inc = 0x0100; |
64 | 64 | ||
@@ -77,7 +77,7 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) | |||
77 | : "memory", "cc"); | 77 | : "memory", "cc"); |
78 | } | 78 | } |
79 | 79 | ||
80 | static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) | 80 | static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) |
81 | { | 81 | { |
82 | int tmp, new; | 82 | int tmp, new; |
83 | 83 | ||
@@ -96,7 +96,7 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) | |||
96 | return tmp; | 96 | return tmp; |
97 | } | 97 | } |
98 | 98 | ||
99 | static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) | 99 | static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) |
100 | { | 100 | { |
101 | asm volatile(UNLOCK_LOCK_PREFIX "incb %0" | 101 | asm volatile(UNLOCK_LOCK_PREFIX "incb %0" |
102 | : "+m" (lock->slock) | 102 | : "+m" (lock->slock) |
@@ -106,7 +106,7 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) | |||
106 | #else | 106 | #else |
107 | #define TICKET_SHIFT 16 | 107 | #define TICKET_SHIFT 16 |
108 | 108 | ||
109 | static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) | 109 | static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock) |
110 | { | 110 | { |
111 | int inc = 0x00010000; | 111 | int inc = 0x00010000; |
112 | int tmp; | 112 | int tmp; |
@@ -127,7 +127,7 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) | |||
127 | : "memory", "cc"); | 127 | : "memory", "cc"); |
128 | } | 128 | } |
129 | 129 | ||
130 | static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) | 130 | static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) |
131 | { | 131 | { |
132 | int tmp; | 132 | int tmp; |
133 | int new; | 133 | int new; |
@@ -149,7 +149,7 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) | |||
149 | return tmp; | 149 | return tmp; |
150 | } | 150 | } |
151 | 151 | ||
152 | static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) | 152 | static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) |
153 | { | 153 | { |
154 | asm volatile(UNLOCK_LOCK_PREFIX "incw %0" | 154 | asm volatile(UNLOCK_LOCK_PREFIX "incw %0" |
155 | : "+m" (lock->slock) | 155 | : "+m" (lock->slock) |
@@ -158,14 +158,14 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) | |||
158 | } | 158 | } |
159 | #endif | 159 | #endif |
160 | 160 | ||
161 | static inline int __ticket_spin_is_locked(raw_spinlock_t *lock) | 161 | static inline int __ticket_spin_is_locked(arch_spinlock_t *lock) |
162 | { | 162 | { |
163 | int tmp = ACCESS_ONCE(lock->slock); | 163 | int tmp = ACCESS_ONCE(lock->slock); |
164 | 164 | ||
165 | return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1 << TICKET_SHIFT) - 1)); | 165 | return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1 << TICKET_SHIFT) - 1)); |
166 | } | 166 | } |
167 | 167 | ||
168 | static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) | 168 | static inline int __ticket_spin_is_contended(arch_spinlock_t *lock) |
169 | { | 169 | { |
170 | int tmp = ACCESS_ONCE(lock->slock); | 170 | int tmp = ACCESS_ONCE(lock->slock); |
171 | 171 | ||
@@ -174,43 +174,43 @@ static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) | |||
174 | 174 | ||
175 | #ifndef CONFIG_PARAVIRT_SPINLOCKS | 175 | #ifndef CONFIG_PARAVIRT_SPINLOCKS |
176 | 176 | ||
177 | static inline int __raw_spin_is_locked(raw_spinlock_t *lock) | 177 | static inline int arch_spin_is_locked(arch_spinlock_t *lock) |
178 | { | 178 | { |
179 | return __ticket_spin_is_locked(lock); | 179 | return __ticket_spin_is_locked(lock); |
180 | } | 180 | } |
181 | 181 | ||
182 | static inline int __raw_spin_is_contended(raw_spinlock_t *lock) | 182 | static inline int arch_spin_is_contended(arch_spinlock_t *lock) |
183 | { | 183 | { |
184 | return __ticket_spin_is_contended(lock); | 184 | return __ticket_spin_is_contended(lock); |
185 | } | 185 | } |
186 | #define __raw_spin_is_contended __raw_spin_is_contended | 186 | #define arch_spin_is_contended arch_spin_is_contended |
187 | 187 | ||
188 | static __always_inline void __raw_spin_lock(raw_spinlock_t *lock) | 188 | static __always_inline void arch_spin_lock(arch_spinlock_t *lock) |
189 | { | 189 | { |
190 | __ticket_spin_lock(lock); | 190 | __ticket_spin_lock(lock); |
191 | } | 191 | } |
192 | 192 | ||
193 | static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock) | 193 | static __always_inline int arch_spin_trylock(arch_spinlock_t *lock) |
194 | { | 194 | { |
195 | return __ticket_spin_trylock(lock); | 195 | return __ticket_spin_trylock(lock); |
196 | } | 196 | } |
197 | 197 | ||
198 | static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock) | 198 | static __always_inline void arch_spin_unlock(arch_spinlock_t *lock) |
199 | { | 199 | { |
200 | __ticket_spin_unlock(lock); | 200 | __ticket_spin_unlock(lock); |
201 | } | 201 | } |
202 | 202 | ||
203 | static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock, | 203 | static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock, |
204 | unsigned long flags) | 204 | unsigned long flags) |
205 | { | 205 | { |
206 | __raw_spin_lock(lock); | 206 | arch_spin_lock(lock); |
207 | } | 207 | } |
208 | 208 | ||
209 | #endif /* CONFIG_PARAVIRT_SPINLOCKS */ | 209 | #endif /* CONFIG_PARAVIRT_SPINLOCKS */ |
210 | 210 | ||
211 | static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) | 211 | static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) |
212 | { | 212 | { |
213 | while (__raw_spin_is_locked(lock)) | 213 | while (arch_spin_is_locked(lock)) |
214 | cpu_relax(); | 214 | cpu_relax(); |
215 | } | 215 | } |
216 | 216 | ||
@@ -232,7 +232,7 @@ static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) | |||
232 | * read_can_lock - would read_trylock() succeed? | 232 | * read_can_lock - would read_trylock() succeed? |
233 | * @lock: the rwlock in question. | 233 | * @lock: the rwlock in question. |
234 | */ | 234 | */ |
235 | static inline int __raw_read_can_lock(raw_rwlock_t *lock) | 235 | static inline int arch_read_can_lock(arch_rwlock_t *lock) |
236 | { | 236 | { |
237 | return (int)(lock)->lock > 0; | 237 | return (int)(lock)->lock > 0; |
238 | } | 238 | } |
@@ -241,12 +241,12 @@ static inline int __raw_read_can_lock(raw_rwlock_t *lock) | |||
241 | * write_can_lock - would write_trylock() succeed? | 241 | * write_can_lock - would write_trylock() succeed? |
242 | * @lock: the rwlock in question. | 242 | * @lock: the rwlock in question. |
243 | */ | 243 | */ |
244 | static inline int __raw_write_can_lock(raw_rwlock_t *lock) | 244 | static inline int arch_write_can_lock(arch_rwlock_t *lock) |
245 | { | 245 | { |
246 | return (lock)->lock == RW_LOCK_BIAS; | 246 | return (lock)->lock == RW_LOCK_BIAS; |
247 | } | 247 | } |
248 | 248 | ||
249 | static inline void __raw_read_lock(raw_rwlock_t *rw) | 249 | static inline void arch_read_lock(arch_rwlock_t *rw) |
250 | { | 250 | { |
251 | asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t" | 251 | asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t" |
252 | "jns 1f\n" | 252 | "jns 1f\n" |
@@ -255,7 +255,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) | |||
255 | ::LOCK_PTR_REG (rw) : "memory"); | 255 | ::LOCK_PTR_REG (rw) : "memory"); |
256 | } | 256 | } |
257 | 257 | ||
258 | static inline void __raw_write_lock(raw_rwlock_t *rw) | 258 | static inline void arch_write_lock(arch_rwlock_t *rw) |
259 | { | 259 | { |
260 | asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t" | 260 | asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t" |
261 | "jz 1f\n" | 261 | "jz 1f\n" |
@@ -264,7 +264,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) | |||
264 | ::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory"); | 264 | ::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory"); |
265 | } | 265 | } |
266 | 266 | ||
267 | static inline int __raw_read_trylock(raw_rwlock_t *lock) | 267 | static inline int arch_read_trylock(arch_rwlock_t *lock) |
268 | { | 268 | { |
269 | atomic_t *count = (atomic_t *)lock; | 269 | atomic_t *count = (atomic_t *)lock; |
270 | 270 | ||
@@ -274,7 +274,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *lock) | |||
274 | return 0; | 274 | return 0; |
275 | } | 275 | } |
276 | 276 | ||
277 | static inline int __raw_write_trylock(raw_rwlock_t *lock) | 277 | static inline int arch_write_trylock(arch_rwlock_t *lock) |
278 | { | 278 | { |
279 | atomic_t *count = (atomic_t *)lock; | 279 | atomic_t *count = (atomic_t *)lock; |
280 | 280 | ||
@@ -284,23 +284,23 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock) | |||
284 | return 0; | 284 | return 0; |
285 | } | 285 | } |
286 | 286 | ||
287 | static inline void __raw_read_unlock(raw_rwlock_t *rw) | 287 | static inline void arch_read_unlock(arch_rwlock_t *rw) |
288 | { | 288 | { |
289 | asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory"); | 289 | asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory"); |
290 | } | 290 | } |
291 | 291 | ||
292 | static inline void __raw_write_unlock(raw_rwlock_t *rw) | 292 | static inline void arch_write_unlock(arch_rwlock_t *rw) |
293 | { | 293 | { |
294 | asm volatile(LOCK_PREFIX "addl %1, %0" | 294 | asm volatile(LOCK_PREFIX "addl %1, %0" |
295 | : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory"); | 295 | : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory"); |
296 | } | 296 | } |
297 | 297 | ||
298 | #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) | 298 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) |
299 | #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) | 299 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) |
300 | 300 | ||
301 | #define _raw_spin_relax(lock) cpu_relax() | 301 | #define arch_spin_relax(lock) cpu_relax() |
302 | #define _raw_read_relax(lock) cpu_relax() | 302 | #define arch_read_relax(lock) cpu_relax() |
303 | #define _raw_write_relax(lock) cpu_relax() | 303 | #define arch_write_relax(lock) cpu_relax() |
304 | 304 | ||
305 | /* The {read|write|spin}_lock() on x86 are full memory barriers. */ | 305 | /* The {read|write|spin}_lock() on x86 are full memory barriers. */ |
306 | static inline void smp_mb__after_lock(void) { } | 306 | static inline void smp_mb__after_lock(void) { } |
diff --git a/arch/x86/include/asm/spinlock_types.h b/arch/x86/include/asm/spinlock_types.h index 845f81c87091..dcb48b2edc11 100644 --- a/arch/x86/include/asm/spinlock_types.h +++ b/arch/x86/include/asm/spinlock_types.h | |||
@@ -5,16 +5,16 @@ | |||
5 | # error "please don't include this file directly" | 5 | # error "please don't include this file directly" |
6 | #endif | 6 | #endif |
7 | 7 | ||
8 | typedef struct raw_spinlock { | 8 | typedef struct arch_spinlock { |
9 | unsigned int slock; | 9 | unsigned int slock; |
10 | } raw_spinlock_t; | 10 | } arch_spinlock_t; |
11 | 11 | ||
12 | #define __RAW_SPIN_LOCK_UNLOCKED { 0 } | 12 | #define __ARCH_SPIN_LOCK_UNLOCKED { 0 } |
13 | 13 | ||
14 | typedef struct { | 14 | typedef struct { |
15 | unsigned int lock; | 15 | unsigned int lock; |
16 | } raw_rwlock_t; | 16 | } arch_rwlock_t; |
17 | 17 | ||
18 | #define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } | 18 | #define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } |
19 | 19 | ||
20 | #endif /* _ASM_X86_SPINLOCK_TYPES_H */ | 20 | #endif /* _ASM_X86_SPINLOCK_TYPES_H */ |
diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h index cf86a5e73815..4dab78edbad9 100644 --- a/arch/x86/include/asm/stacktrace.h +++ b/arch/x86/include/asm/stacktrace.h | |||
@@ -3,7 +3,28 @@ | |||
3 | 3 | ||
4 | extern int kstack_depth_to_print; | 4 | extern int kstack_depth_to_print; |
5 | 5 | ||
6 | int x86_is_stack_id(int id, char *name); | 6 | struct thread_info; |
7 | struct stacktrace_ops; | ||
8 | |||
9 | typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo, | ||
10 | unsigned long *stack, | ||
11 | unsigned long bp, | ||
12 | const struct stacktrace_ops *ops, | ||
13 | void *data, | ||
14 | unsigned long *end, | ||
15 | int *graph); | ||
16 | |||
17 | extern unsigned long | ||
18 | print_context_stack(struct thread_info *tinfo, | ||
19 | unsigned long *stack, unsigned long bp, | ||
20 | const struct stacktrace_ops *ops, void *data, | ||
21 | unsigned long *end, int *graph); | ||
22 | |||
23 | extern unsigned long | ||
24 | print_context_stack_bp(struct thread_info *tinfo, | ||
25 | unsigned long *stack, unsigned long bp, | ||
26 | const struct stacktrace_ops *ops, void *data, | ||
27 | unsigned long *end, int *graph); | ||
7 | 28 | ||
8 | /* Generic stack tracer with callbacks */ | 29 | /* Generic stack tracer with callbacks */ |
9 | 30 | ||
@@ -14,6 +35,7 @@ struct stacktrace_ops { | |||
14 | void (*address)(void *data, unsigned long address, int reliable); | 35 | void (*address)(void *data, unsigned long address, int reliable); |
15 | /* On negative return stop dumping */ | 36 | /* On negative return stop dumping */ |
16 | int (*stack)(void *data, char *name); | 37 | int (*stack)(void *data, char *name); |
38 | walk_stack_t walk_stack; | ||
17 | }; | 39 | }; |
18 | 40 | ||
19 | void dump_trace(struct task_struct *tsk, struct pt_regs *regs, | 41 | void dump_trace(struct task_struct *tsk, struct pt_regs *regs, |
diff --git a/arch/x86/include/asm/string_32.h b/arch/x86/include/asm/string_32.h index ae907e617181..3d3e8353ee5c 100644 --- a/arch/x86/include/asm/string_32.h +++ b/arch/x86/include/asm/string_32.h | |||
@@ -177,10 +177,15 @@ static inline void *__memcpy3d(void *to, const void *from, size_t len) | |||
177 | */ | 177 | */ |
178 | 178 | ||
179 | #ifndef CONFIG_KMEMCHECK | 179 | #ifndef CONFIG_KMEMCHECK |
180 | |||
181 | #if (__GNUC__ >= 4) | ||
182 | #define memcpy(t, f, n) __builtin_memcpy(t, f, n) | ||
183 | #else | ||
180 | #define memcpy(t, f, n) \ | 184 | #define memcpy(t, f, n) \ |
181 | (__builtin_constant_p((n)) \ | 185 | (__builtin_constant_p((n)) \ |
182 | ? __constant_memcpy((t), (f), (n)) \ | 186 | ? __constant_memcpy((t), (f), (n)) \ |
183 | : __memcpy((t), (f), (n))) | 187 | : __memcpy((t), (f), (n))) |
188 | #endif | ||
184 | #else | 189 | #else |
185 | /* | 190 | /* |
186 | * kmemcheck becomes very happy if we use the REP instructions unconditionally, | 191 | * kmemcheck becomes very happy if we use the REP instructions unconditionally, |
@@ -316,11 +321,15 @@ void *__constant_c_and_count_memset(void *s, unsigned long pattern, | |||
316 | : __memset_generic((s), (c), (count))) | 321 | : __memset_generic((s), (c), (count))) |
317 | 322 | ||
318 | #define __HAVE_ARCH_MEMSET | 323 | #define __HAVE_ARCH_MEMSET |
324 | #if (__GNUC__ >= 4) | ||
325 | #define memset(s, c, count) __builtin_memset(s, c, count) | ||
326 | #else | ||
319 | #define memset(s, c, count) \ | 327 | #define memset(s, c, count) \ |
320 | (__builtin_constant_p(c) \ | 328 | (__builtin_constant_p(c) \ |
321 | ? __constant_c_x_memset((s), (0x01010101UL * (unsigned char)(c)), \ | 329 | ? __constant_c_x_memset((s), (0x01010101UL * (unsigned char)(c)), \ |
322 | (count)) \ | 330 | (count)) \ |
323 | : __memset((s), (c), (count))) | 331 | : __memset((s), (c), (count))) |
332 | #endif | ||
324 | 333 | ||
325 | /* | 334 | /* |
326 | * find the first occurrence of byte 'c', or 1 past the area if none | 335 | * find the first occurrence of byte 'c', or 1 past the area if none |
diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h index 85574b7c1bc1..38638cd2fa4c 100644 --- a/arch/x86/include/asm/svm.h +++ b/arch/x86/include/asm/svm.h | |||
@@ -57,7 +57,8 @@ struct __attribute__ ((__packed__)) vmcb_control_area { | |||
57 | u16 intercept_dr_write; | 57 | u16 intercept_dr_write; |
58 | u32 intercept_exceptions; | 58 | u32 intercept_exceptions; |
59 | u64 intercept; | 59 | u64 intercept; |
60 | u8 reserved_1[44]; | 60 | u8 reserved_1[42]; |
61 | u16 pause_filter_count; | ||
61 | u64 iopm_base_pa; | 62 | u64 iopm_base_pa; |
62 | u64 msrpm_base_pa; | 63 | u64 msrpm_base_pa; |
63 | u64 tsc_offset; | 64 | u64 tsc_offset; |
@@ -312,7 +313,7 @@ struct __attribute__ ((__packed__)) vmcb { | |||
312 | 313 | ||
313 | #define SVM_EXIT_ERR -1 | 314 | #define SVM_EXIT_ERR -1 |
314 | 315 | ||
315 | #define SVM_CR0_SELECTIVE_MASK (1 << 3 | 1) /* TS and MP */ | 316 | #define SVM_CR0_SELECTIVE_MASK (X86_CR0_TS | X86_CR0_MP) |
316 | 317 | ||
317 | #define SVM_VMLOAD ".byte 0x0f, 0x01, 0xda" | 318 | #define SVM_VMLOAD ".byte 0x0f, 0x01, 0xda" |
318 | #define SVM_VMRUN ".byte 0x0f, 0x01, 0xd8" | 319 | #define SVM_VMRUN ".byte 0x0f, 0x01, 0xd8" |
diff --git a/arch/x86/include/asm/swiotlb.h b/arch/x86/include/asm/swiotlb.h index b9e4e20174fb..8085277e1b8b 100644 --- a/arch/x86/include/asm/swiotlb.h +++ b/arch/x86/include/asm/swiotlb.h | |||
@@ -3,15 +3,16 @@ | |||
3 | 3 | ||
4 | #include <linux/swiotlb.h> | 4 | #include <linux/swiotlb.h> |
5 | 5 | ||
6 | /* SWIOTLB interface */ | ||
7 | |||
8 | extern int swiotlb_force; | ||
9 | |||
10 | #ifdef CONFIG_SWIOTLB | 6 | #ifdef CONFIG_SWIOTLB |
11 | extern int swiotlb; | 7 | extern int swiotlb; |
12 | extern void pci_swiotlb_init(void); | 8 | extern int __init pci_swiotlb_detect(void); |
9 | extern void __init pci_swiotlb_init(void); | ||
13 | #else | 10 | #else |
14 | #define swiotlb 0 | 11 | #define swiotlb 0 |
12 | static inline int pci_swiotlb_detect(void) | ||
13 | { | ||
14 | return 0; | ||
15 | } | ||
15 | static inline void pci_swiotlb_init(void) | 16 | static inline void pci_swiotlb_init(void) |
16 | { | 17 | { |
17 | } | 18 | } |
diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h index 72a6dcd1299b..3ad421784ae7 100644 --- a/arch/x86/include/asm/sys_ia32.h +++ b/arch/x86/include/asm/sys_ia32.h | |||
@@ -26,11 +26,10 @@ asmlinkage long sys32_lstat64(char __user *, struct stat64 __user *); | |||
26 | asmlinkage long sys32_fstat64(unsigned int, struct stat64 __user *); | 26 | asmlinkage long sys32_fstat64(unsigned int, struct stat64 __user *); |
27 | asmlinkage long sys32_fstatat(unsigned int, char __user *, | 27 | asmlinkage long sys32_fstatat(unsigned int, char __user *, |
28 | struct stat64 __user *, int); | 28 | struct stat64 __user *, int); |
29 | struct mmap_arg_struct; | 29 | struct mmap_arg_struct32; |
30 | asmlinkage long sys32_mmap(struct mmap_arg_struct __user *); | 30 | asmlinkage long sys32_mmap(struct mmap_arg_struct32 __user *); |
31 | asmlinkage long sys32_mprotect(unsigned long, size_t, unsigned long); | 31 | asmlinkage long sys32_mprotect(unsigned long, size_t, unsigned long); |
32 | 32 | ||
33 | asmlinkage long sys32_pipe(int __user *); | ||
34 | struct sigaction32; | 33 | struct sigaction32; |
35 | struct old_sigaction32; | 34 | struct old_sigaction32; |
36 | asmlinkage long sys32_rt_sigaction(int, struct sigaction32 __user *, | 35 | asmlinkage long sys32_rt_sigaction(int, struct sigaction32 __user *, |
@@ -41,8 +40,6 @@ asmlinkage long sys32_rt_sigprocmask(int, compat_sigset_t __user *, | |||
41 | compat_sigset_t __user *, unsigned int); | 40 | compat_sigset_t __user *, unsigned int); |
42 | asmlinkage long sys32_alarm(unsigned int); | 41 | asmlinkage long sys32_alarm(unsigned int); |
43 | 42 | ||
44 | struct sel_arg_struct; | ||
45 | asmlinkage long sys32_old_select(struct sel_arg_struct __user *); | ||
46 | asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int); | 43 | asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int); |
47 | asmlinkage long sys32_sysfs(int, u32, u32); | 44 | asmlinkage long sys32_sysfs(int, u32, u32); |
48 | 45 | ||
@@ -51,25 +48,12 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t, | |||
51 | asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *, compat_size_t); | 48 | asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *, compat_size_t); |
52 | asmlinkage long sys32_rt_sigqueueinfo(int, int, compat_siginfo_t __user *); | 49 | asmlinkage long sys32_rt_sigqueueinfo(int, int, compat_siginfo_t __user *); |
53 | 50 | ||
54 | #ifdef CONFIG_SYSCTL_SYSCALL | ||
55 | struct sysctl_ia32; | ||
56 | asmlinkage long sys32_sysctl(struct sysctl_ia32 __user *); | ||
57 | #endif | ||
58 | |||
59 | asmlinkage long sys32_pread(unsigned int, char __user *, u32, u32, u32); | 51 | asmlinkage long sys32_pread(unsigned int, char __user *, u32, u32, u32); |
60 | asmlinkage long sys32_pwrite(unsigned int, char __user *, u32, u32, u32); | 52 | asmlinkage long sys32_pwrite(unsigned int, char __user *, u32, u32, u32); |
61 | 53 | ||
62 | asmlinkage long sys32_personality(unsigned long); | 54 | asmlinkage long sys32_personality(unsigned long); |
63 | asmlinkage long sys32_sendfile(int, int, compat_off_t __user *, s32); | 55 | asmlinkage long sys32_sendfile(int, int, compat_off_t __user *, s32); |
64 | 56 | ||
65 | asmlinkage long sys32_mmap2(unsigned long, unsigned long, unsigned long, | ||
66 | unsigned long, unsigned long, unsigned long); | ||
67 | |||
68 | struct oldold_utsname; | ||
69 | struct old_utsname; | ||
70 | asmlinkage long sys32_olduname(struct oldold_utsname __user *); | ||
71 | long sys32_uname(struct old_utsname __user *); | ||
72 | |||
73 | asmlinkage long sys32_execve(char __user *, compat_uptr_t __user *, | 57 | asmlinkage long sys32_execve(char __user *, compat_uptr_t __user *, |
74 | compat_uptr_t __user *, struct pt_regs *); | 58 | compat_uptr_t __user *, struct pt_regs *); |
75 | asmlinkage long sys32_clone(unsigned int, unsigned int, struct pt_regs *); | 59 | asmlinkage long sys32_clone(unsigned int, unsigned int, struct pt_regs *); |
diff --git a/arch/x86/include/asm/syscall.h b/arch/x86/include/asm/syscall.h index 8d33bc5462d1..c4a348f7bd43 100644 --- a/arch/x86/include/asm/syscall.h +++ b/arch/x86/include/asm/syscall.h | |||
@@ -16,6 +16,8 @@ | |||
16 | #include <linux/sched.h> | 16 | #include <linux/sched.h> |
17 | #include <linux/err.h> | 17 | #include <linux/err.h> |
18 | 18 | ||
19 | extern const unsigned long sys_call_table[]; | ||
20 | |||
19 | /* | 21 | /* |
20 | * Only the low 32 bits of orig_ax are meaningful, so we return int. | 22 | * Only the low 32 bits of orig_ax are meaningful, so we return int. |
21 | * This importantly ignores the high bits on 64-bit, so comparisons | 23 | * This importantly ignores the high bits on 64-bit, so comparisons |
diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h index 372b76edd63f..5c044b43e9a7 100644 --- a/arch/x86/include/asm/syscalls.h +++ b/arch/x86/include/asm/syscalls.h | |||
@@ -18,16 +18,24 @@ | |||
18 | /* Common in X86_32 and X86_64 */ | 18 | /* Common in X86_32 and X86_64 */ |
19 | /* kernel/ioport.c */ | 19 | /* kernel/ioport.c */ |
20 | asmlinkage long sys_ioperm(unsigned long, unsigned long, int); | 20 | asmlinkage long sys_ioperm(unsigned long, unsigned long, int); |
21 | long sys_iopl(unsigned int, struct pt_regs *); | ||
21 | 22 | ||
22 | /* kernel/process.c */ | 23 | /* kernel/process.c */ |
23 | int sys_fork(struct pt_regs *); | 24 | int sys_fork(struct pt_regs *); |
24 | int sys_vfork(struct pt_regs *); | 25 | int sys_vfork(struct pt_regs *); |
26 | long sys_execve(char __user *, char __user * __user *, | ||
27 | char __user * __user *, struct pt_regs *); | ||
28 | long sys_clone(unsigned long, unsigned long, void __user *, | ||
29 | void __user *, struct pt_regs *); | ||
25 | 30 | ||
26 | /* kernel/ldt.c */ | 31 | /* kernel/ldt.c */ |
27 | asmlinkage int sys_modify_ldt(int, void __user *, unsigned long); | 32 | asmlinkage int sys_modify_ldt(int, void __user *, unsigned long); |
28 | 33 | ||
29 | /* kernel/signal.c */ | 34 | /* kernel/signal.c */ |
30 | long sys_rt_sigreturn(struct pt_regs *); | 35 | long sys_rt_sigreturn(struct pt_regs *); |
36 | long sys_sigaltstack(const stack_t __user *, stack_t __user *, | ||
37 | struct pt_regs *); | ||
38 | |||
31 | 39 | ||
32 | /* kernel/tls.c */ | 40 | /* kernel/tls.c */ |
33 | asmlinkage int sys_set_thread_area(struct user_desc __user *); | 41 | asmlinkage int sys_set_thread_area(struct user_desc __user *); |
@@ -35,63 +43,26 @@ asmlinkage int sys_get_thread_area(struct user_desc __user *); | |||
35 | 43 | ||
36 | /* X86_32 only */ | 44 | /* X86_32 only */ |
37 | #ifdef CONFIG_X86_32 | 45 | #ifdef CONFIG_X86_32 |
38 | /* kernel/ioport.c */ | ||
39 | long sys_iopl(struct pt_regs *); | ||
40 | |||
41 | /* kernel/process_32.c */ | ||
42 | int sys_clone(struct pt_regs *); | ||
43 | int sys_execve(struct pt_regs *); | ||
44 | 46 | ||
45 | /* kernel/signal.c */ | 47 | /* kernel/signal.c */ |
46 | asmlinkage int sys_sigsuspend(int, int, old_sigset_t); | 48 | asmlinkage int sys_sigsuspend(int, int, old_sigset_t); |
47 | asmlinkage int sys_sigaction(int, const struct old_sigaction __user *, | 49 | asmlinkage int sys_sigaction(int, const struct old_sigaction __user *, |
48 | struct old_sigaction __user *); | 50 | struct old_sigaction __user *); |
49 | int sys_sigaltstack(struct pt_regs *); | ||
50 | unsigned long sys_sigreturn(struct pt_regs *); | 51 | unsigned long sys_sigreturn(struct pt_regs *); |
51 | 52 | ||
52 | /* kernel/sys_i386_32.c */ | ||
53 | struct mmap_arg_struct; | ||
54 | struct sel_arg_struct; | ||
55 | struct oldold_utsname; | ||
56 | struct old_utsname; | ||
57 | |||
58 | asmlinkage long sys_mmap2(unsigned long, unsigned long, unsigned long, | ||
59 | unsigned long, unsigned long, unsigned long); | ||
60 | asmlinkage int old_mmap(struct mmap_arg_struct __user *); | ||
61 | asmlinkage int old_select(struct sel_arg_struct __user *); | ||
62 | asmlinkage int sys_ipc(uint, int, int, int, void __user *, long); | ||
63 | asmlinkage int sys_uname(struct old_utsname __user *); | ||
64 | asmlinkage int sys_olduname(struct oldold_utsname __user *); | ||
65 | |||
66 | /* kernel/vm86_32.c */ | 53 | /* kernel/vm86_32.c */ |
67 | int sys_vm86old(struct pt_regs *); | 54 | int sys_vm86old(struct vm86_struct __user *, struct pt_regs *); |
68 | int sys_vm86(struct pt_regs *); | 55 | int sys_vm86(unsigned long, unsigned long, struct pt_regs *); |
69 | 56 | ||
70 | #else /* CONFIG_X86_32 */ | 57 | #else /* CONFIG_X86_32 */ |
71 | 58 | ||
72 | /* X86_64 only */ | 59 | /* X86_64 only */ |
73 | /* kernel/ioport.c */ | ||
74 | asmlinkage long sys_iopl(unsigned int, struct pt_regs *); | ||
75 | |||
76 | /* kernel/process_64.c */ | 60 | /* kernel/process_64.c */ |
77 | asmlinkage long sys_clone(unsigned long, unsigned long, | ||
78 | void __user *, void __user *, | ||
79 | struct pt_regs *); | ||
80 | asmlinkage long sys_execve(char __user *, char __user * __user *, | ||
81 | char __user * __user *, | ||
82 | struct pt_regs *); | ||
83 | long sys_arch_prctl(int, unsigned long); | 61 | long sys_arch_prctl(int, unsigned long); |
84 | 62 | ||
85 | /* kernel/signal.c */ | ||
86 | asmlinkage long sys_sigaltstack(const stack_t __user *, stack_t __user *, | ||
87 | struct pt_regs *); | ||
88 | |||
89 | /* kernel/sys_x86_64.c */ | 63 | /* kernel/sys_x86_64.c */ |
90 | struct new_utsname; | ||
91 | |||
92 | asmlinkage long sys_mmap(unsigned long, unsigned long, unsigned long, | 64 | asmlinkage long sys_mmap(unsigned long, unsigned long, unsigned long, |
93 | unsigned long, unsigned long, unsigned long); | 65 | unsigned long, unsigned long, unsigned long); |
94 | asmlinkage long sys_uname(struct new_utsname __user *); | ||
95 | 66 | ||
96 | #endif /* CONFIG_X86_32 */ | 67 | #endif /* CONFIG_X86_32 */ |
97 | #endif /* _ASM_X86_SYSCALLS_H */ | 68 | #endif /* _ASM_X86_SYSCALLS_H */ |
diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h index f08f97374892..b8fe48ee2ed9 100644 --- a/arch/x86/include/asm/system.h +++ b/arch/x86/include/asm/system.h | |||
@@ -11,9 +11,9 @@ | |||
11 | #include <linux/irqflags.h> | 11 | #include <linux/irqflags.h> |
12 | 12 | ||
13 | /* entries in ARCH_DLINFO: */ | 13 | /* entries in ARCH_DLINFO: */ |
14 | #ifdef CONFIG_IA32_EMULATION | 14 | #if defined(CONFIG_IA32_EMULATION) || !defined(CONFIG_X86_64) |
15 | # define AT_VECTOR_SIZE_ARCH 2 | 15 | # define AT_VECTOR_SIZE_ARCH 2 |
16 | #else | 16 | #else /* else it's non-compat x86-64 */ |
17 | # define AT_VECTOR_SIZE_ARCH 1 | 17 | # define AT_VECTOR_SIZE_ARCH 1 |
18 | #endif | 18 | #endif |
19 | 19 | ||
@@ -23,6 +23,7 @@ struct task_struct *__switch_to(struct task_struct *prev, | |||
23 | struct tss_struct; | 23 | struct tss_struct; |
24 | void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, | 24 | void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, |
25 | struct tss_struct *tss); | 25 | struct tss_struct *tss); |
26 | extern void show_regs_common(void); | ||
26 | 27 | ||
27 | #ifdef CONFIG_X86_32 | 28 | #ifdef CONFIG_X86_32 |
28 | 29 | ||
@@ -31,7 +32,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, | |||
31 | "movl %P[task_canary](%[next]), %%ebx\n\t" \ | 32 | "movl %P[task_canary](%[next]), %%ebx\n\t" \ |
32 | "movl %%ebx, "__percpu_arg([stack_canary])"\n\t" | 33 | "movl %%ebx, "__percpu_arg([stack_canary])"\n\t" |
33 | #define __switch_canary_oparam \ | 34 | #define __switch_canary_oparam \ |
34 | , [stack_canary] "=m" (per_cpu_var(stack_canary.canary)) | 35 | , [stack_canary] "=m" (stack_canary.canary) |
35 | #define __switch_canary_iparam \ | 36 | #define __switch_canary_iparam \ |
36 | , [task_canary] "i" (offsetof(struct task_struct, stack_canary)) | 37 | , [task_canary] "i" (offsetof(struct task_struct, stack_canary)) |
37 | #else /* CC_STACKPROTECTOR */ | 38 | #else /* CC_STACKPROTECTOR */ |
@@ -113,7 +114,7 @@ do { \ | |||
113 | "movq %P[task_canary](%%rsi),%%r8\n\t" \ | 114 | "movq %P[task_canary](%%rsi),%%r8\n\t" \ |
114 | "movq %%r8,"__percpu_arg([gs_canary])"\n\t" | 115 | "movq %%r8,"__percpu_arg([gs_canary])"\n\t" |
115 | #define __switch_canary_oparam \ | 116 | #define __switch_canary_oparam \ |
116 | , [gs_canary] "=m" (per_cpu_var(irq_stack_union.stack_canary)) | 117 | , [gs_canary] "=m" (irq_stack_union.stack_canary) |
117 | #define __switch_canary_iparam \ | 118 | #define __switch_canary_iparam \ |
118 | , [task_canary] "i" (offsetof(struct task_struct, stack_canary)) | 119 | , [task_canary] "i" (offsetof(struct task_struct, stack_canary)) |
119 | #else /* CC_STACKPROTECTOR */ | 120 | #else /* CC_STACKPROTECTOR */ |
@@ -128,13 +129,11 @@ do { \ | |||
128 | "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \ | 129 | "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \ |
129 | "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \ | 130 | "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \ |
130 | "call __switch_to\n\t" \ | 131 | "call __switch_to\n\t" \ |
131 | ".globl thread_return\n" \ | ||
132 | "thread_return:\n\t" \ | ||
133 | "movq "__percpu_arg([current_task])",%%rsi\n\t" \ | 132 | "movq "__percpu_arg([current_task])",%%rsi\n\t" \ |
134 | __switch_canary \ | 133 | __switch_canary \ |
135 | "movq %P[thread_info](%%rsi),%%r8\n\t" \ | 134 | "movq %P[thread_info](%%rsi),%%r8\n\t" \ |
136 | "movq %%rax,%%rdi\n\t" \ | 135 | "movq %%rax,%%rdi\n\t" \ |
137 | "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \ | 136 | "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \ |
138 | "jnz ret_from_fork\n\t" \ | 137 | "jnz ret_from_fork\n\t" \ |
139 | RESTORE_CONTEXT \ | 138 | RESTORE_CONTEXT \ |
140 | : "=a" (last) \ | 139 | : "=a" (last) \ |
@@ -144,7 +143,7 @@ do { \ | |||
144 | [ti_flags] "i" (offsetof(struct thread_info, flags)), \ | 143 | [ti_flags] "i" (offsetof(struct thread_info, flags)), \ |
145 | [_tif_fork] "i" (_TIF_FORK), \ | 144 | [_tif_fork] "i" (_TIF_FORK), \ |
146 | [thread_info] "i" (offsetof(struct task_struct, stack)), \ | 145 | [thread_info] "i" (offsetof(struct task_struct, stack)), \ |
147 | [current_task] "m" (per_cpu_var(current_task)) \ | 146 | [current_task] "m" (current_task) \ |
148 | __switch_canary_iparam \ | 147 | __switch_canary_iparam \ |
149 | : "memory", "cc" __EXTRA_CLOBBER) | 148 | : "memory", "cc" __EXTRA_CLOBBER) |
150 | #endif | 149 | #endif |
@@ -157,19 +156,22 @@ extern void native_load_gs_index(unsigned); | |||
157 | * Load a segment. Fall back on loading the zero | 156 | * Load a segment. Fall back on loading the zero |
158 | * segment if something goes wrong.. | 157 | * segment if something goes wrong.. |
159 | */ | 158 | */ |
160 | #define loadsegment(seg, value) \ | 159 | #define loadsegment(seg, value) \ |
161 | asm volatile("\n" \ | 160 | do { \ |
162 | "1:\t" \ | 161 | unsigned short __val = (value); \ |
163 | "movl %k0,%%" #seg "\n" \ | 162 | \ |
164 | "2:\n" \ | 163 | asm volatile(" \n" \ |
165 | ".section .fixup,\"ax\"\n" \ | 164 | "1: movl %k0,%%" #seg " \n" \ |
166 | "3:\t" \ | 165 | \ |
167 | "movl %k1, %%" #seg "\n\t" \ | 166 | ".section .fixup,\"ax\" \n" \ |
168 | "jmp 2b\n" \ | 167 | "2: xorl %k0,%k0 \n" \ |
169 | ".previous\n" \ | 168 | " jmp 1b \n" \ |
170 | _ASM_EXTABLE(1b,3b) \ | 169 | ".previous \n" \ |
171 | : :"r" (value), "r" (0) : "memory") | 170 | \ |
172 | 171 | _ASM_EXTABLE(1b, 2b) \ | |
172 | \ | ||
173 | : "+r" (__val) : : "memory"); \ | ||
174 | } while (0) | ||
173 | 175 | ||
174 | /* | 176 | /* |
175 | * Save a segment register away | 177 | * Save a segment register away |
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index d27d0a2fec4c..e0d28901e969 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h | |||
@@ -83,10 +83,10 @@ struct thread_info { | |||
83 | #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ | 83 | #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ |
84 | #define TIF_SECCOMP 8 /* secure computing */ | 84 | #define TIF_SECCOMP 8 /* secure computing */ |
85 | #define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */ | 85 | #define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */ |
86 | #define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */ | ||
86 | #define TIF_NOTSC 16 /* TSC is not accessible in userland */ | 87 | #define TIF_NOTSC 16 /* TSC is not accessible in userland */ |
87 | #define TIF_IA32 17 /* 32bit process */ | 88 | #define TIF_IA32 17 /* 32bit process */ |
88 | #define TIF_FORK 18 /* ret_from_fork */ | 89 | #define TIF_FORK 18 /* ret_from_fork */ |
89 | #define TIF_ABI_PENDING 19 | ||
90 | #define TIF_MEMDIE 20 | 90 | #define TIF_MEMDIE 20 |
91 | #define TIF_DEBUG 21 /* uses debug registers */ | 91 | #define TIF_DEBUG 21 /* uses debug registers */ |
92 | #define TIF_IO_BITMAP 22 /* uses I/O bitmap */ | 92 | #define TIF_IO_BITMAP 22 /* uses I/O bitmap */ |
@@ -107,10 +107,10 @@ struct thread_info { | |||
107 | #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) | 107 | #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) |
108 | #define _TIF_SECCOMP (1 << TIF_SECCOMP) | 108 | #define _TIF_SECCOMP (1 << TIF_SECCOMP) |
109 | #define _TIF_MCE_NOTIFY (1 << TIF_MCE_NOTIFY) | 109 | #define _TIF_MCE_NOTIFY (1 << TIF_MCE_NOTIFY) |
110 | #define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY) | ||
110 | #define _TIF_NOTSC (1 << TIF_NOTSC) | 111 | #define _TIF_NOTSC (1 << TIF_NOTSC) |
111 | #define _TIF_IA32 (1 << TIF_IA32) | 112 | #define _TIF_IA32 (1 << TIF_IA32) |
112 | #define _TIF_FORK (1 << TIF_FORK) | 113 | #define _TIF_FORK (1 << TIF_FORK) |
113 | #define _TIF_ABI_PENDING (1 << TIF_ABI_PENDING) | ||
114 | #define _TIF_DEBUG (1 << TIF_DEBUG) | 114 | #define _TIF_DEBUG (1 << TIF_DEBUG) |
115 | #define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP) | 115 | #define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP) |
116 | #define _TIF_FREEZE (1 << TIF_FREEZE) | 116 | #define _TIF_FREEZE (1 << TIF_FREEZE) |
@@ -142,13 +142,14 @@ struct thread_info { | |||
142 | 142 | ||
143 | /* Only used for 64 bit */ | 143 | /* Only used for 64 bit */ |
144 | #define _TIF_DO_NOTIFY_MASK \ | 144 | #define _TIF_DO_NOTIFY_MASK \ |
145 | (_TIF_SIGPENDING|_TIF_MCE_NOTIFY|_TIF_NOTIFY_RESUME) | 145 | (_TIF_SIGPENDING | _TIF_MCE_NOTIFY | _TIF_NOTIFY_RESUME | \ |
146 | _TIF_USER_RETURN_NOTIFY) | ||
146 | 147 | ||
147 | /* flags to check in __switch_to() */ | 148 | /* flags to check in __switch_to() */ |
148 | #define _TIF_WORK_CTXSW \ | 149 | #define _TIF_WORK_CTXSW \ |
149 | (_TIF_IO_BITMAP|_TIF_DEBUGCTLMSR|_TIF_DS_AREA_MSR|_TIF_NOTSC) | 150 | (_TIF_IO_BITMAP|_TIF_DEBUGCTLMSR|_TIF_DS_AREA_MSR|_TIF_NOTSC) |
150 | 151 | ||
151 | #define _TIF_WORK_CTXSW_PREV _TIF_WORK_CTXSW | 152 | #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY) |
152 | #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW|_TIF_DEBUG) | 153 | #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW|_TIF_DEBUG) |
153 | 154 | ||
154 | #define PREEMPT_ACTIVE 0x10000000 | 155 | #define PREEMPT_ACTIVE 0x10000000 |
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h index 40e37b10c6c0..c5087d796587 100644 --- a/arch/x86/include/asm/topology.h +++ b/arch/x86/include/asm/topology.h | |||
@@ -35,11 +35,16 @@ | |||
35 | # endif | 35 | # endif |
36 | #endif | 36 | #endif |
37 | 37 | ||
38 | /* Node not present */ | 38 | /* |
39 | #define NUMA_NO_NODE (-1) | 39 | * to preserve the visibility of NUMA_NO_NODE definition, |
40 | * moved to there from here. May be used independent of | ||
41 | * CONFIG_NUMA. | ||
42 | */ | ||
43 | #include <linux/numa.h> | ||
40 | 44 | ||
41 | #ifdef CONFIG_NUMA | 45 | #ifdef CONFIG_NUMA |
42 | #include <linux/cpumask.h> | 46 | #include <linux/cpumask.h> |
47 | |||
43 | #include <asm/mpspec.h> | 48 | #include <asm/mpspec.h> |
44 | 49 | ||
45 | #ifdef CONFIG_X86_32 | 50 | #ifdef CONFIG_X86_32 |
diff --git a/arch/x86/include/asm/trampoline.h b/arch/x86/include/asm/trampoline.h index 90f06c25221d..cb507bb05d79 100644 --- a/arch/x86/include/asm/trampoline.h +++ b/arch/x86/include/asm/trampoline.h | |||
@@ -16,7 +16,6 @@ extern unsigned long initial_code; | |||
16 | extern unsigned long initial_gs; | 16 | extern unsigned long initial_gs; |
17 | 17 | ||
18 | #define TRAMPOLINE_SIZE roundup(trampoline_end - trampoline_data, PAGE_SIZE) | 18 | #define TRAMPOLINE_SIZE roundup(trampoline_end - trampoline_data, PAGE_SIZE) |
19 | #define TRAMPOLINE_BASE 0x6000 | ||
20 | 19 | ||
21 | extern unsigned long setup_trampoline(void); | 20 | extern unsigned long setup_trampoline(void); |
22 | extern void __init reserve_trampoline_memory(void); | 21 | extern void __init reserve_trampoline_memory(void); |
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index d2c6c930b491..abd3e0ea762a 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h | |||
@@ -570,7 +570,6 @@ extern struct movsl_mask { | |||
570 | #ifdef CONFIG_X86_32 | 570 | #ifdef CONFIG_X86_32 |
571 | # include "uaccess_32.h" | 571 | # include "uaccess_32.h" |
572 | #else | 572 | #else |
573 | # define ARCH_HAS_SEARCH_EXTABLE | ||
574 | # include "uaccess_64.h" | 573 | # include "uaccess_64.h" |
575 | #endif | 574 | #endif |
576 | 575 | ||
diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h index 632fb44b4cb5..088d09fb1615 100644 --- a/arch/x86/include/asm/uaccess_32.h +++ b/arch/x86/include/asm/uaccess_32.h | |||
@@ -187,9 +187,33 @@ __copy_from_user_inatomic_nocache(void *to, const void __user *from, | |||
187 | 187 | ||
188 | unsigned long __must_check copy_to_user(void __user *to, | 188 | unsigned long __must_check copy_to_user(void __user *to, |
189 | const void *from, unsigned long n); | 189 | const void *from, unsigned long n); |
190 | unsigned long __must_check copy_from_user(void *to, | 190 | unsigned long __must_check _copy_from_user(void *to, |
191 | const void __user *from, | 191 | const void __user *from, |
192 | unsigned long n); | 192 | unsigned long n); |
193 | |||
194 | |||
195 | extern void copy_from_user_overflow(void) | ||
196 | #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS | ||
197 | __compiletime_error("copy_from_user() buffer size is not provably correct") | ||
198 | #else | ||
199 | __compiletime_warning("copy_from_user() buffer size is not provably correct") | ||
200 | #endif | ||
201 | ; | ||
202 | |||
203 | static inline unsigned long __must_check copy_from_user(void *to, | ||
204 | const void __user *from, | ||
205 | unsigned long n) | ||
206 | { | ||
207 | int sz = __compiletime_object_size(to); | ||
208 | |||
209 | if (likely(sz == -1 || sz >= n)) | ||
210 | n = _copy_from_user(to, from, n); | ||
211 | else | ||
212 | copy_from_user_overflow(); | ||
213 | |||
214 | return n; | ||
215 | } | ||
216 | |||
193 | long __must_check strncpy_from_user(char *dst, const char __user *src, | 217 | long __must_check strncpy_from_user(char *dst, const char __user *src, |
194 | long count); | 218 | long count); |
195 | long __must_check __strncpy_from_user(char *dst, | 219 | long __must_check __strncpy_from_user(char *dst, |
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h index db24b215fc50..316708d5af92 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h | |||
@@ -8,6 +8,8 @@ | |||
8 | #include <linux/errno.h> | 8 | #include <linux/errno.h> |
9 | #include <linux/prefetch.h> | 9 | #include <linux/prefetch.h> |
10 | #include <linux/lockdep.h> | 10 | #include <linux/lockdep.h> |
11 | #include <asm/alternative.h> | ||
12 | #include <asm/cpufeature.h> | ||
11 | #include <asm/page.h> | 13 | #include <asm/page.h> |
12 | 14 | ||
13 | /* | 15 | /* |
@@ -16,15 +18,56 @@ | |||
16 | 18 | ||
17 | /* Handles exceptions in both to and from, but doesn't do access_ok */ | 19 | /* Handles exceptions in both to and from, but doesn't do access_ok */ |
18 | __must_check unsigned long | 20 | __must_check unsigned long |
19 | copy_user_generic(void *to, const void *from, unsigned len); | 21 | copy_user_generic_string(void *to, const void *from, unsigned len); |
22 | __must_check unsigned long | ||
23 | copy_user_generic_unrolled(void *to, const void *from, unsigned len); | ||
24 | |||
25 | static __always_inline __must_check unsigned long | ||
26 | copy_user_generic(void *to, const void *from, unsigned len) | ||
27 | { | ||
28 | unsigned ret; | ||
29 | |||
30 | alternative_call(copy_user_generic_unrolled, | ||
31 | copy_user_generic_string, | ||
32 | X86_FEATURE_REP_GOOD, | ||
33 | ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from), | ||
34 | "=d" (len)), | ||
35 | "1" (to), "2" (from), "3" (len) | ||
36 | : "memory", "rcx", "r8", "r9", "r10", "r11"); | ||
37 | return ret; | ||
38 | } | ||
20 | 39 | ||
21 | __must_check unsigned long | 40 | __must_check unsigned long |
22 | copy_to_user(void __user *to, const void *from, unsigned len); | 41 | _copy_to_user(void __user *to, const void *from, unsigned len); |
23 | __must_check unsigned long | 42 | __must_check unsigned long |
24 | copy_from_user(void *to, const void __user *from, unsigned len); | 43 | _copy_from_user(void *to, const void __user *from, unsigned len); |
25 | __must_check unsigned long | 44 | __must_check unsigned long |
26 | copy_in_user(void __user *to, const void __user *from, unsigned len); | 45 | copy_in_user(void __user *to, const void __user *from, unsigned len); |
27 | 46 | ||
47 | static inline unsigned long __must_check copy_from_user(void *to, | ||
48 | const void __user *from, | ||
49 | unsigned long n) | ||
50 | { | ||
51 | int sz = __compiletime_object_size(to); | ||
52 | |||
53 | might_fault(); | ||
54 | if (likely(sz == -1 || sz >= n)) | ||
55 | n = _copy_from_user(to, from, n); | ||
56 | #ifdef CONFIG_DEBUG_VM | ||
57 | else | ||
58 | WARN(1, "Buffer overflow detected!\n"); | ||
59 | #endif | ||
60 | return n; | ||
61 | } | ||
62 | |||
63 | static __always_inline __must_check | ||
64 | int copy_to_user(void __user *dst, const void *src, unsigned size) | ||
65 | { | ||
66 | might_fault(); | ||
67 | |||
68 | return _copy_to_user(dst, src, size); | ||
69 | } | ||
70 | |||
28 | static __always_inline __must_check | 71 | static __always_inline __must_check |
29 | int __copy_from_user(void *dst, const void __user *src, unsigned size) | 72 | int __copy_from_user(void *dst, const void __user *src, unsigned size) |
30 | { | 73 | { |
@@ -176,8 +219,11 @@ __must_check long strlen_user(const char __user *str); | |||
176 | __must_check unsigned long clear_user(void __user *mem, unsigned long len); | 219 | __must_check unsigned long clear_user(void __user *mem, unsigned long len); |
177 | __must_check unsigned long __clear_user(void __user *mem, unsigned long len); | 220 | __must_check unsigned long __clear_user(void __user *mem, unsigned long len); |
178 | 221 | ||
179 | __must_check long __copy_from_user_inatomic(void *dst, const void __user *src, | 222 | static __must_check __always_inline int |
180 | unsigned size); | 223 | __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size) |
224 | { | ||
225 | return copy_user_generic(dst, (__force const void *)src, size); | ||
226 | } | ||
181 | 227 | ||
182 | static __must_check __always_inline int | 228 | static __must_check __always_inline int |
183 | __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size) | 229 | __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size) |
diff --git a/arch/x86/include/asm/unistd_32.h b/arch/x86/include/asm/unistd_32.h index f9b507f30d65..4f61e8b0715a 100644 --- a/arch/x86/include/asm/unistd_32.h +++ b/arch/x86/include/asm/unistd_32.h | |||
@@ -342,14 +342,15 @@ | |||
342 | #define __NR_pwritev 334 | 342 | #define __NR_pwritev 334 |
343 | #define __NR_rt_tgsigqueueinfo 335 | 343 | #define __NR_rt_tgsigqueueinfo 335 |
344 | #define __NR_perf_event_open 336 | 344 | #define __NR_perf_event_open 336 |
345 | #define __NR_recvmmsg 337 | ||
345 | 346 | ||
346 | #define __NR_LITMUS 337 | 347 | #define __NR_LITMUS 338 |
347 | 348 | ||
348 | #include "litmus/unistd_32.h" | 349 | #include "litmus/unistd_32.h" |
349 | 350 | ||
350 | #ifdef __KERNEL__ | 351 | #ifdef __KERNEL__ |
351 | 352 | ||
352 | #define NR_syscalls 336 + NR_litmus_syscalls | 353 | #define NR_syscalls 339 + NR_litmus_syscalls |
353 | 354 | ||
354 | #define __ARCH_WANT_IPC_PARSE_VERSION | 355 | #define __ARCH_WANT_IPC_PARSE_VERSION |
355 | #define __ARCH_WANT_OLD_READDIR | 356 | #define __ARCH_WANT_OLD_READDIR |
@@ -357,6 +358,7 @@ | |||
357 | #define __ARCH_WANT_STAT64 | 358 | #define __ARCH_WANT_STAT64 |
358 | #define __ARCH_WANT_SYS_ALARM | 359 | #define __ARCH_WANT_SYS_ALARM |
359 | #define __ARCH_WANT_SYS_GETHOSTNAME | 360 | #define __ARCH_WANT_SYS_GETHOSTNAME |
361 | #define __ARCH_WANT_SYS_IPC | ||
360 | #define __ARCH_WANT_SYS_PAUSE | 362 | #define __ARCH_WANT_SYS_PAUSE |
361 | #define __ARCH_WANT_SYS_SGETMASK | 363 | #define __ARCH_WANT_SYS_SGETMASK |
362 | #define __ARCH_WANT_SYS_SIGNAL | 364 | #define __ARCH_WANT_SYS_SIGNAL |
@@ -369,6 +371,9 @@ | |||
369 | #define __ARCH_WANT_SYS_LLSEEK | 371 | #define __ARCH_WANT_SYS_LLSEEK |
370 | #define __ARCH_WANT_SYS_NICE | 372 | #define __ARCH_WANT_SYS_NICE |
371 | #define __ARCH_WANT_SYS_OLD_GETRLIMIT | 373 | #define __ARCH_WANT_SYS_OLD_GETRLIMIT |
374 | #define __ARCH_WANT_SYS_OLD_UNAME | ||
375 | #define __ARCH_WANT_SYS_OLD_MMAP | ||
376 | #define __ARCH_WANT_SYS_OLD_SELECT | ||
372 | #define __ARCH_WANT_SYS_OLDUMOUNT | 377 | #define __ARCH_WANT_SYS_OLDUMOUNT |
373 | #define __ARCH_WANT_SYS_SIGPENDING | 378 | #define __ARCH_WANT_SYS_SIGPENDING |
374 | #define __ARCH_WANT_SYS_SIGPROCMASK | 379 | #define __ARCH_WANT_SYS_SIGPROCMASK |
diff --git a/arch/x86/include/asm/unistd_64.h b/arch/x86/include/asm/unistd_64.h index 33b2003c0450..b21c3b269aac 100644 --- a/arch/x86/include/asm/unistd_64.h +++ b/arch/x86/include/asm/unistd_64.h | |||
@@ -146,7 +146,7 @@ __SYSCALL(__NR_wait4, sys_wait4) | |||
146 | #define __NR_kill 62 | 146 | #define __NR_kill 62 |
147 | __SYSCALL(__NR_kill, sys_kill) | 147 | __SYSCALL(__NR_kill, sys_kill) |
148 | #define __NR_uname 63 | 148 | #define __NR_uname 63 |
149 | __SYSCALL(__NR_uname, sys_uname) | 149 | __SYSCALL(__NR_uname, sys_newuname) |
150 | 150 | ||
151 | #define __NR_semget 64 | 151 | #define __NR_semget 64 |
152 | __SYSCALL(__NR_semget, sys_semget) | 152 | __SYSCALL(__NR_semget, sys_semget) |
@@ -661,6 +661,8 @@ __SYSCALL(__NR_pwritev, sys_pwritev) | |||
661 | __SYSCALL(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo) | 661 | __SYSCALL(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo) |
662 | #define __NR_perf_event_open 298 | 662 | #define __NR_perf_event_open 298 |
663 | __SYSCALL(__NR_perf_event_open, sys_perf_event_open) | 663 | __SYSCALL(__NR_perf_event_open, sys_perf_event_open) |
664 | #define __NR_recvmmsg 299 | ||
665 | __SYSCALL(__NR_recvmmsg, sys_recvmmsg) | ||
664 | 666 | ||
665 | #define __NR_LITMUS 299 | 667 | #define __NR_LITMUS 299 |
666 | 668 | ||
@@ -682,6 +684,7 @@ __SYSCALL(__NR_perf_event_open, sys_perf_event_open) | |||
682 | #define __ARCH_WANT_SYS_LLSEEK | 684 | #define __ARCH_WANT_SYS_LLSEEK |
683 | #define __ARCH_WANT_SYS_NICE | 685 | #define __ARCH_WANT_SYS_NICE |
684 | #define __ARCH_WANT_SYS_OLD_GETRLIMIT | 686 | #define __ARCH_WANT_SYS_OLD_GETRLIMIT |
687 | #define __ARCH_WANT_SYS_OLD_UNAME | ||
685 | #define __ARCH_WANT_SYS_OLDUMOUNT | 688 | #define __ARCH_WANT_SYS_OLDUMOUNT |
686 | #define __ARCH_WANT_SYS_SIGPENDING | 689 | #define __ARCH_WANT_SYS_SIGPENDING |
687 | #define __ARCH_WANT_SYS_SIGPROCMASK | 690 | #define __ARCH_WANT_SYS_SIGPROCMASK |
diff --git a/arch/x86/include/asm/user.h b/arch/x86/include/asm/user.h index 999873b22e7f..24532c7da3d6 100644 --- a/arch/x86/include/asm/user.h +++ b/arch/x86/include/asm/user.h | |||
@@ -1,5 +1,63 @@ | |||
1 | #ifndef _ASM_X86_USER_H | ||
2 | #define _ASM_X86_USER_H | ||
3 | |||
1 | #ifdef CONFIG_X86_32 | 4 | #ifdef CONFIG_X86_32 |
2 | # include "user_32.h" | 5 | # include "user_32.h" |
3 | #else | 6 | #else |
4 | # include "user_64.h" | 7 | # include "user_64.h" |
5 | #endif | 8 | #endif |
9 | |||
10 | #include <asm/types.h> | ||
11 | |||
12 | struct user_ymmh_regs { | ||
13 | /* 16 * 16 bytes for each YMMH-reg */ | ||
14 | __u32 ymmh_space[64]; | ||
15 | }; | ||
16 | |||
17 | struct user_xsave_hdr { | ||
18 | __u64 xstate_bv; | ||
19 | __u64 reserved1[2]; | ||
20 | __u64 reserved2[5]; | ||
21 | }; | ||
22 | |||
23 | /* | ||
24 | * The structure layout of user_xstateregs, used for exporting the | ||
25 | * extended register state through ptrace and core-dump (NT_X86_XSTATE note) | ||
26 | * interfaces will be same as the memory layout of xsave used by the processor | ||
27 | * (except for the bytes 464..511, which can be used by the software) and hence | ||
28 | * the size of this structure varies depending on the features supported by the | ||
29 | * processor and OS. The size of the structure that users need to use can be | ||
30 | * obtained by doing: | ||
31 | * cpuid_count(0xd, 0, &eax, &ptrace_xstateregs_struct_size, &ecx, &edx); | ||
32 | * i.e., cpuid.(eax=0xd,ecx=0).ebx will be the size that user (debuggers, etc.) | ||
33 | * need to use. | ||
34 | * | ||
35 | * For now, only the first 8 bytes of the software usable bytes[464..471] will | ||
36 | * be used and will be set to OS enabled xstate mask (which is same as the | ||
37 | * 64bit mask returned by the xgetbv's xCR0). Users (analyzing core dump | ||
38 | * remotely, etc.) can use this mask as well as the mask saved in the | ||
39 | * xstate_hdr bytes and interpret what states the processor/OS supports | ||
40 | * and what states are in modified/initialized conditions for the | ||
41 | * particular process/thread. | ||
42 | * | ||
43 | * Also when the user modifies certain state FP/SSE/etc through the | ||
44 | * ptrace interface, they must ensure that the xsave_hdr.xstate_bv | ||
45 | * bytes[512..519] of the memory layout are updated correspondingly. | ||
46 | * i.e., for example when FP state is modified to a non-init state, | ||
47 | * xsave_hdr.xstate_bv's bit 0 must be set to '1', when SSE is modified to | ||
48 | * non-init state, xsave_hdr.xstate_bv's bit 1 must to be set to '1', etc. | ||
49 | */ | ||
50 | #define USER_XSTATE_FX_SW_WORDS 6 | ||
51 | #define USER_XSTATE_XCR0_WORD 0 | ||
52 | |||
53 | struct user_xstateregs { | ||
54 | struct { | ||
55 | __u64 fpx_space[58]; | ||
56 | __u64 xstate_fx_sw[USER_XSTATE_FX_SW_WORDS]; | ||
57 | } i387; | ||
58 | struct user_xsave_hdr xsave_hdr; | ||
59 | struct user_ymmh_regs ymmh; | ||
60 | /* further processor state extensions go here */ | ||
61 | }; | ||
62 | |||
63 | #endif /* _ASM_X86_USER_H */ | ||
diff --git a/arch/x86/include/asm/uv/bios.h b/arch/x86/include/asm/uv/bios.h index 7ed17ff502b9..71605c7d5c5c 100644 --- a/arch/x86/include/asm/uv/bios.h +++ b/arch/x86/include/asm/uv/bios.h | |||
@@ -18,8 +18,8 @@ | |||
18 | * along with this program; if not, write to the Free Software | 18 | * along with this program; if not, write to the Free Software |
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
20 | * | 20 | * |
21 | * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. | 21 | * Copyright (c) 2008-2009 Silicon Graphics, Inc. All Rights Reserved. |
22 | * Copyright (c) Russ Anderson | 22 | * Copyright (c) Russ Anderson <rja@sgi.com> |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <linux/rtc.h> | 25 | #include <linux/rtc.h> |
@@ -36,7 +36,8 @@ enum uv_bios_cmd { | |||
36 | UV_BIOS_WATCHLIST_ALLOC, | 36 | UV_BIOS_WATCHLIST_ALLOC, |
37 | UV_BIOS_WATCHLIST_FREE, | 37 | UV_BIOS_WATCHLIST_FREE, |
38 | UV_BIOS_MEMPROTECT, | 38 | UV_BIOS_MEMPROTECT, |
39 | UV_BIOS_GET_PARTITION_ADDR | 39 | UV_BIOS_GET_PARTITION_ADDR, |
40 | UV_BIOS_SET_LEGACY_VGA_TARGET | ||
40 | }; | 41 | }; |
41 | 42 | ||
42 | /* | 43 | /* |
@@ -76,15 +77,6 @@ union partition_info_u { | |||
76 | }; | 77 | }; |
77 | }; | 78 | }; |
78 | 79 | ||
79 | union uv_watchlist_u { | ||
80 | u64 val; | ||
81 | struct { | ||
82 | u64 blade : 16, | ||
83 | size : 32, | ||
84 | filler : 16; | ||
85 | }; | ||
86 | }; | ||
87 | |||
88 | enum uv_memprotect { | 80 | enum uv_memprotect { |
89 | UV_MEMPROT_RESTRICT_ACCESS, | 81 | UV_MEMPROT_RESTRICT_ACCESS, |
90 | UV_MEMPROT_ALLOW_AMO, | 82 | UV_MEMPROT_ALLOW_AMO, |
@@ -98,13 +90,14 @@ extern s64 uv_bios_call(enum uv_bios_cmd, u64, u64, u64, u64, u64); | |||
98 | extern s64 uv_bios_call_irqsave(enum uv_bios_cmd, u64, u64, u64, u64, u64); | 90 | extern s64 uv_bios_call_irqsave(enum uv_bios_cmd, u64, u64, u64, u64, u64); |
99 | extern s64 uv_bios_call_reentrant(enum uv_bios_cmd, u64, u64, u64, u64, u64); | 91 | extern s64 uv_bios_call_reentrant(enum uv_bios_cmd, u64, u64, u64, u64, u64); |
100 | 92 | ||
101 | extern s64 uv_bios_get_sn_info(int, int *, long *, long *, long *); | 93 | extern s64 uv_bios_get_sn_info(int, int *, long *, long *, long *, long *); |
102 | extern s64 uv_bios_freq_base(u64, u64 *); | 94 | extern s64 uv_bios_freq_base(u64, u64 *); |
103 | extern int uv_bios_mq_watchlist_alloc(int, unsigned long, unsigned int, | 95 | extern int uv_bios_mq_watchlist_alloc(unsigned long, unsigned int, |
104 | unsigned long *); | 96 | unsigned long *); |
105 | extern int uv_bios_mq_watchlist_free(int, int); | 97 | extern int uv_bios_mq_watchlist_free(int, int); |
106 | extern s64 uv_bios_change_memprotect(u64, u64, enum uv_memprotect); | 98 | extern s64 uv_bios_change_memprotect(u64, u64, enum uv_memprotect); |
107 | extern s64 uv_bios_reserved_page_pa(u64, u64 *, u64 *, u64 *); | 99 | extern s64 uv_bios_reserved_page_pa(u64, u64 *, u64 *, u64 *); |
100 | extern int uv_bios_set_legacy_vga_target(bool decode, int domain, int bus); | ||
108 | 101 | ||
109 | extern void uv_bios_init(void); | 102 | extern void uv_bios_init(void); |
110 | 103 | ||
@@ -113,6 +106,7 @@ extern int uv_type; | |||
113 | extern long sn_partition_id; | 106 | extern long sn_partition_id; |
114 | extern long sn_coherency_id; | 107 | extern long sn_coherency_id; |
115 | extern long sn_region_size; | 108 | extern long sn_region_size; |
109 | extern long system_serial_number; | ||
116 | #define partition_coherence_id() (sn_coherency_id) | 110 | #define partition_coherence_id() (sn_coherency_id) |
117 | 111 | ||
118 | extern struct kobject *sgi_uv_kobj; /* /sys/firmware/sgi_uv */ | 112 | extern struct kobject *sgi_uv_kobj; /* /sys/firmware/sgi_uv */ |
diff --git a/arch/x86/include/asm/uv/uv.h b/arch/x86/include/asm/uv/uv.h index c0a01b5d985b..3bb9491b7659 100644 --- a/arch/x86/include/asm/uv/uv.h +++ b/arch/x86/include/asm/uv/uv.h | |||
@@ -11,6 +11,7 @@ struct mm_struct; | |||
11 | extern enum uv_system_type get_uv_system_type(void); | 11 | extern enum uv_system_type get_uv_system_type(void); |
12 | extern int is_uv_system(void); | 12 | extern int is_uv_system(void); |
13 | extern void uv_cpu_init(void); | 13 | extern void uv_cpu_init(void); |
14 | extern void uv_nmi_init(void); | ||
14 | extern void uv_system_init(void); | 15 | extern void uv_system_init(void); |
15 | extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, | 16 | extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, |
16 | struct mm_struct *mm, | 17 | struct mm_struct *mm, |
diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h index 80e2984f521c..b414d2b401f6 100644 --- a/arch/x86/include/asm/uv/uv_bau.h +++ b/arch/x86/include/asm/uv/uv_bau.h | |||
@@ -55,7 +55,7 @@ | |||
55 | #define DESC_STATUS_SOURCE_TIMEOUT 3 | 55 | #define DESC_STATUS_SOURCE_TIMEOUT 3 |
56 | 56 | ||
57 | /* | 57 | /* |
58 | * source side threshholds at which message retries print a warning | 58 | * source side thresholds at which message retries print a warning |
59 | */ | 59 | */ |
60 | #define SOURCE_TIMEOUT_LIMIT 20 | 60 | #define SOURCE_TIMEOUT_LIMIT 20 |
61 | #define DESTINATION_TIMEOUT_LIMIT 20 | 61 | #define DESTINATION_TIMEOUT_LIMIT 20 |
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h index d1414af98559..14cc74ba5d23 100644 --- a/arch/x86/include/asm/uv/uv_hub.h +++ b/arch/x86/include/asm/uv/uv_hub.h | |||
@@ -31,20 +31,20 @@ | |||
31 | * contiguous (although various IO spaces may punch holes in | 31 | * contiguous (although various IO spaces may punch holes in |
32 | * it).. | 32 | * it).. |
33 | * | 33 | * |
34 | * N - Number of bits in the node portion of a socket physical | 34 | * N - Number of bits in the node portion of a socket physical |
35 | * address. | 35 | * address. |
36 | * | 36 | * |
37 | * NASID - network ID of a router, Mbrick or Cbrick. Nasid values of | 37 | * NASID - network ID of a router, Mbrick or Cbrick. Nasid values of |
38 | * routers always have low bit of 1, C/MBricks have low bit | 38 | * routers always have low bit of 1, C/MBricks have low bit |
39 | * equal to 0. Most addressing macros that target UV hub chips | 39 | * equal to 0. Most addressing macros that target UV hub chips |
40 | * right shift the NASID by 1 to exclude the always-zero bit. | 40 | * right shift the NASID by 1 to exclude the always-zero bit. |
41 | * NASIDs contain up to 15 bits. | 41 | * NASIDs contain up to 15 bits. |
42 | * | 42 | * |
43 | * GNODE - NASID right shifted by 1 bit. Most mmrs contain gnodes instead | 43 | * GNODE - NASID right shifted by 1 bit. Most mmrs contain gnodes instead |
44 | * of nasids. | 44 | * of nasids. |
45 | * | 45 | * |
46 | * PNODE - the low N bits of the GNODE. The PNODE is the most useful variant | 46 | * PNODE - the low N bits of the GNODE. The PNODE is the most useful variant |
47 | * of the nasid for socket usage. | 47 | * of the nasid for socket usage. |
48 | * | 48 | * |
49 | * | 49 | * |
50 | * NumaLink Global Physical Address Format: | 50 | * NumaLink Global Physical Address Format: |
@@ -71,12 +71,12 @@ | |||
71 | * | 71 | * |
72 | * | 72 | * |
73 | * APICID format | 73 | * APICID format |
74 | * NOTE!!!!!! This is the current format of the APICID. However, code | 74 | * NOTE!!!!!! This is the current format of the APICID. However, code |
75 | * should assume that this will change in the future. Use functions | 75 | * should assume that this will change in the future. Use functions |
76 | * in this file for all APICID bit manipulations and conversion. | 76 | * in this file for all APICID bit manipulations and conversion. |
77 | * | 77 | * |
78 | * 1111110000000000 | 78 | * 1111110000000000 |
79 | * 5432109876543210 | 79 | * 5432109876543210 |
80 | * pppppppppplc0cch | 80 | * pppppppppplc0cch |
81 | * sssssssssss | 81 | * sssssssssss |
82 | * | 82 | * |
@@ -89,9 +89,9 @@ | |||
89 | * Note: Processor only supports 12 bits in the APICID register. The ACPI | 89 | * Note: Processor only supports 12 bits in the APICID register. The ACPI |
90 | * tables hold all 16 bits. Software needs to be aware of this. | 90 | * tables hold all 16 bits. Software needs to be aware of this. |
91 | * | 91 | * |
92 | * Unless otherwise specified, all references to APICID refer to | 92 | * Unless otherwise specified, all references to APICID refer to |
93 | * the FULL value contained in ACPI tables, not the subset in the | 93 | * the FULL value contained in ACPI tables, not the subset in the |
94 | * processor APICID register. | 94 | * processor APICID register. |
95 | */ | 95 | */ |
96 | 96 | ||
97 | 97 | ||
@@ -151,16 +151,16 @@ struct uv_hub_info_s { | |||
151 | }; | 151 | }; |
152 | 152 | ||
153 | DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info); | 153 | DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info); |
154 | #define uv_hub_info (&__get_cpu_var(__uv_hub_info)) | 154 | #define uv_hub_info (&__get_cpu_var(__uv_hub_info)) |
155 | #define uv_cpu_hub_info(cpu) (&per_cpu(__uv_hub_info, cpu)) | 155 | #define uv_cpu_hub_info(cpu) (&per_cpu(__uv_hub_info, cpu)) |
156 | 156 | ||
157 | /* | 157 | /* |
158 | * Local & Global MMR space macros. | 158 | * Local & Global MMR space macros. |
159 | * Note: macros are intended to be used ONLY by inline functions | 159 | * Note: macros are intended to be used ONLY by inline functions |
160 | * in this file - not by other kernel code. | 160 | * in this file - not by other kernel code. |
161 | * n - NASID (full 15-bit global nasid) | 161 | * n - NASID (full 15-bit global nasid) |
162 | * g - GNODE (full 15-bit global nasid, right shifted 1) | 162 | * g - GNODE (full 15-bit global nasid, right shifted 1) |
163 | * p - PNODE (local part of nsids, right shifted 1) | 163 | * p - PNODE (local part of nsids, right shifted 1) |
164 | */ | 164 | */ |
165 | #define UV_NASID_TO_PNODE(n) (((n) >> 1) & uv_hub_info->pnode_mask) | 165 | #define UV_NASID_TO_PNODE(n) (((n) >> 1) & uv_hub_info->pnode_mask) |
166 | #define UV_PNODE_TO_GNODE(p) ((p) |uv_hub_info->gnode_extra) | 166 | #define UV_PNODE_TO_GNODE(p) ((p) |uv_hub_info->gnode_extra) |
@@ -172,6 +172,8 @@ DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info); | |||
172 | #define UV_LOCAL_MMR_SIZE (64UL * 1024 * 1024) | 172 | #define UV_LOCAL_MMR_SIZE (64UL * 1024 * 1024) |
173 | #define UV_GLOBAL_MMR32_SIZE (64UL * 1024 * 1024) | 173 | #define UV_GLOBAL_MMR32_SIZE (64UL * 1024 * 1024) |
174 | 174 | ||
175 | #define UV_GLOBAL_GRU_MMR_BASE 0x4000000 | ||
176 | |||
175 | #define UV_GLOBAL_MMR32_PNODE_SHIFT 15 | 177 | #define UV_GLOBAL_MMR32_PNODE_SHIFT 15 |
176 | #define UV_GLOBAL_MMR64_PNODE_SHIFT 26 | 178 | #define UV_GLOBAL_MMR64_PNODE_SHIFT 26 |
177 | 179 | ||
@@ -213,8 +215,8 @@ DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info); | |||
213 | /* | 215 | /* |
214 | * Macros for converting between kernel virtual addresses, socket local physical | 216 | * Macros for converting between kernel virtual addresses, socket local physical |
215 | * addresses, and UV global physical addresses. | 217 | * addresses, and UV global physical addresses. |
216 | * Note: use the standard __pa() & __va() macros for converting | 218 | * Note: use the standard __pa() & __va() macros for converting |
217 | * between socket virtual and socket physical addresses. | 219 | * between socket virtual and socket physical addresses. |
218 | */ | 220 | */ |
219 | 221 | ||
220 | /* socket phys RAM --> UV global physical address */ | 222 | /* socket phys RAM --> UV global physical address */ |
@@ -232,6 +234,26 @@ static inline unsigned long uv_gpa(void *v) | |||
232 | return uv_soc_phys_ram_to_gpa(__pa(v)); | 234 | return uv_soc_phys_ram_to_gpa(__pa(v)); |
233 | } | 235 | } |
234 | 236 | ||
237 | /* Top two bits indicate the requested address is in MMR space. */ | ||
238 | static inline int | ||
239 | uv_gpa_in_mmr_space(unsigned long gpa) | ||
240 | { | ||
241 | return (gpa >> 62) == 0x3UL; | ||
242 | } | ||
243 | |||
244 | /* UV global physical address --> socket phys RAM */ | ||
245 | static inline unsigned long uv_gpa_to_soc_phys_ram(unsigned long gpa) | ||
246 | { | ||
247 | unsigned long paddr = gpa & uv_hub_info->gpa_mask; | ||
248 | unsigned long remap_base = uv_hub_info->lowmem_remap_base; | ||
249 | unsigned long remap_top = uv_hub_info->lowmem_remap_top; | ||
250 | |||
251 | if (paddr >= remap_base && paddr < remap_base + remap_top) | ||
252 | paddr -= remap_base; | ||
253 | return paddr; | ||
254 | } | ||
255 | |||
256 | |||
235 | /* gnode -> pnode */ | 257 | /* gnode -> pnode */ |
236 | static inline unsigned long uv_gpa_to_gnode(unsigned long gpa) | 258 | static inline unsigned long uv_gpa_to_gnode(unsigned long gpa) |
237 | { | 259 | { |
@@ -265,21 +287,18 @@ static inline int uv_apicid_to_pnode(int apicid) | |||
265 | * Access global MMRs using the low memory MMR32 space. This region supports | 287 | * Access global MMRs using the low memory MMR32 space. This region supports |
266 | * faster MMR access but not all MMRs are accessible in this space. | 288 | * faster MMR access but not all MMRs are accessible in this space. |
267 | */ | 289 | */ |
268 | static inline unsigned long *uv_global_mmr32_address(int pnode, | 290 | static inline unsigned long *uv_global_mmr32_address(int pnode, unsigned long offset) |
269 | unsigned long offset) | ||
270 | { | 291 | { |
271 | return __va(UV_GLOBAL_MMR32_BASE | | 292 | return __va(UV_GLOBAL_MMR32_BASE | |
272 | UV_GLOBAL_MMR32_PNODE_BITS(pnode) | offset); | 293 | UV_GLOBAL_MMR32_PNODE_BITS(pnode) | offset); |
273 | } | 294 | } |
274 | 295 | ||
275 | static inline void uv_write_global_mmr32(int pnode, unsigned long offset, | 296 | static inline void uv_write_global_mmr32(int pnode, unsigned long offset, unsigned long val) |
276 | unsigned long val) | ||
277 | { | 297 | { |
278 | writeq(val, uv_global_mmr32_address(pnode, offset)); | 298 | writeq(val, uv_global_mmr32_address(pnode, offset)); |
279 | } | 299 | } |
280 | 300 | ||
281 | static inline unsigned long uv_read_global_mmr32(int pnode, | 301 | static inline unsigned long uv_read_global_mmr32(int pnode, unsigned long offset) |
282 | unsigned long offset) | ||
283 | { | 302 | { |
284 | return readq(uv_global_mmr32_address(pnode, offset)); | 303 | return readq(uv_global_mmr32_address(pnode, offset)); |
285 | } | 304 | } |
@@ -288,26 +307,43 @@ static inline unsigned long uv_read_global_mmr32(int pnode, | |||
288 | * Access Global MMR space using the MMR space located at the top of physical | 307 | * Access Global MMR space using the MMR space located at the top of physical |
289 | * memory. | 308 | * memory. |
290 | */ | 309 | */ |
291 | static inline unsigned long *uv_global_mmr64_address(int pnode, | 310 | static inline unsigned long *uv_global_mmr64_address(int pnode, unsigned long offset) |
292 | unsigned long offset) | ||
293 | { | 311 | { |
294 | return __va(UV_GLOBAL_MMR64_BASE | | 312 | return __va(UV_GLOBAL_MMR64_BASE | |
295 | UV_GLOBAL_MMR64_PNODE_BITS(pnode) | offset); | 313 | UV_GLOBAL_MMR64_PNODE_BITS(pnode) | offset); |
296 | } | 314 | } |
297 | 315 | ||
298 | static inline void uv_write_global_mmr64(int pnode, unsigned long offset, | 316 | static inline void uv_write_global_mmr64(int pnode, unsigned long offset, unsigned long val) |
299 | unsigned long val) | ||
300 | { | 317 | { |
301 | writeq(val, uv_global_mmr64_address(pnode, offset)); | 318 | writeq(val, uv_global_mmr64_address(pnode, offset)); |
302 | } | 319 | } |
303 | 320 | ||
304 | static inline unsigned long uv_read_global_mmr64(int pnode, | 321 | static inline unsigned long uv_read_global_mmr64(int pnode, unsigned long offset) |
305 | unsigned long offset) | ||
306 | { | 322 | { |
307 | return readq(uv_global_mmr64_address(pnode, offset)); | 323 | return readq(uv_global_mmr64_address(pnode, offset)); |
308 | } | 324 | } |
309 | 325 | ||
310 | /* | 326 | /* |
327 | * Global MMR space addresses when referenced by the GRU. (GRU does | ||
328 | * NOT use socket addressing). | ||
329 | */ | ||
330 | static inline unsigned long uv_global_gru_mmr_address(int pnode, unsigned long offset) | ||
331 | { | ||
332 | return UV_GLOBAL_GRU_MMR_BASE | offset | | ||
333 | ((unsigned long)pnode << uv_hub_info->m_val); | ||
334 | } | ||
335 | |||
336 | static inline void uv_write_global_mmr8(int pnode, unsigned long offset, unsigned char val) | ||
337 | { | ||
338 | writeb(val, uv_global_mmr64_address(pnode, offset)); | ||
339 | } | ||
340 | |||
341 | static inline unsigned char uv_read_global_mmr8(int pnode, unsigned long offset) | ||
342 | { | ||
343 | return readb(uv_global_mmr64_address(pnode, offset)); | ||
344 | } | ||
345 | |||
346 | /* | ||
311 | * Access hub local MMRs. Faster than using global space but only local MMRs | 347 | * Access hub local MMRs. Faster than using global space but only local MMRs |
312 | * are accessible. | 348 | * are accessible. |
313 | */ | 349 | */ |
@@ -426,14 +462,28 @@ static inline void uv_set_scir_bits(unsigned char value) | |||
426 | } | 462 | } |
427 | } | 463 | } |
428 | 464 | ||
465 | static inline unsigned long uv_scir_offset(int apicid) | ||
466 | { | ||
467 | return SCIR_LOCAL_MMR_BASE | (apicid & 0x3f); | ||
468 | } | ||
469 | |||
429 | static inline void uv_set_cpu_scir_bits(int cpu, unsigned char value) | 470 | static inline void uv_set_cpu_scir_bits(int cpu, unsigned char value) |
430 | { | 471 | { |
431 | if (uv_cpu_hub_info(cpu)->scir.state != value) { | 472 | if (uv_cpu_hub_info(cpu)->scir.state != value) { |
473 | uv_write_global_mmr8(uv_cpu_to_pnode(cpu), | ||
474 | uv_cpu_hub_info(cpu)->scir.offset, value); | ||
432 | uv_cpu_hub_info(cpu)->scir.state = value; | 475 | uv_cpu_hub_info(cpu)->scir.state = value; |
433 | uv_write_local_mmr8(uv_cpu_hub_info(cpu)->scir.offset, value); | ||
434 | } | 476 | } |
435 | } | 477 | } |
436 | 478 | ||
479 | static unsigned long uv_hub_ipi_value(int apicid, int vector, int mode) | ||
480 | { | ||
481 | return (1UL << UVH_IPI_INT_SEND_SHFT) | | ||
482 | ((apicid) << UVH_IPI_INT_APIC_ID_SHFT) | | ||
483 | (mode << UVH_IPI_INT_DELIVERY_MODE_SHFT) | | ||
484 | (vector << UVH_IPI_INT_VECTOR_SHFT); | ||
485 | } | ||
486 | |||
437 | static inline void uv_hub_send_ipi(int pnode, int apicid, int vector) | 487 | static inline void uv_hub_send_ipi(int pnode, int apicid, int vector) |
438 | { | 488 | { |
439 | unsigned long val; | 489 | unsigned long val; |
@@ -442,12 +492,21 @@ static inline void uv_hub_send_ipi(int pnode, int apicid, int vector) | |||
442 | if (vector == NMI_VECTOR) | 492 | if (vector == NMI_VECTOR) |
443 | dmode = dest_NMI; | 493 | dmode = dest_NMI; |
444 | 494 | ||
445 | val = (1UL << UVH_IPI_INT_SEND_SHFT) | | 495 | val = uv_hub_ipi_value(apicid, vector, dmode); |
446 | ((apicid) << UVH_IPI_INT_APIC_ID_SHFT) | | ||
447 | (dmode << UVH_IPI_INT_DELIVERY_MODE_SHFT) | | ||
448 | (vector << UVH_IPI_INT_VECTOR_SHFT); | ||
449 | uv_write_global_mmr64(pnode, UVH_IPI_INT, val); | 496 | uv_write_global_mmr64(pnode, UVH_IPI_INT, val); |
450 | } | 497 | } |
451 | 498 | ||
499 | /* | ||
500 | * Get the minimum revision number of the hub chips within the partition. | ||
501 | * 1 - initial rev 1.0 silicon | ||
502 | * 2 - rev 2.0 production silicon | ||
503 | */ | ||
504 | static inline int uv_get_min_hub_revision_id(void) | ||
505 | { | ||
506 | extern int uv_min_hub_revision_id; | ||
507 | |||
508 | return uv_min_hub_revision_id; | ||
509 | } | ||
510 | |||
452 | #endif /* CONFIG_X86_64 */ | 511 | #endif /* CONFIG_X86_64 */ |
453 | #endif /* _ASM_X86_UV_UV_HUB_H */ | 512 | #endif /* _ASM_X86_UV_UV_HUB_H */ |
diff --git a/arch/x86/include/asm/uv/uv_irq.h b/arch/x86/include/asm/uv/uv_irq.h index 9613c8c0b647..d6b17c760622 100644 --- a/arch/x86/include/asm/uv/uv_irq.h +++ b/arch/x86/include/asm/uv/uv_irq.h | |||
@@ -25,12 +25,14 @@ struct uv_IO_APIC_route_entry { | |||
25 | dest : 32; | 25 | dest : 32; |
26 | }; | 26 | }; |
27 | 27 | ||
28 | extern struct irq_chip uv_irq_chip; | 28 | enum { |
29 | 29 | UV_AFFINITY_ALL, | |
30 | extern int arch_enable_uv_irq(char *, unsigned int, int, int, unsigned long); | 30 | UV_AFFINITY_NODE, |
31 | extern void arch_disable_uv_irq(int, unsigned long); | 31 | UV_AFFINITY_CPU |
32 | }; | ||
32 | 33 | ||
33 | extern int uv_setup_irq(char *, int, int, unsigned long); | 34 | extern int uv_irq_2_mmr_info(int, unsigned long *, int *); |
34 | extern void uv_teardown_irq(unsigned int, int, unsigned long); | 35 | extern int uv_setup_irq(char *, int, int, unsigned long, int); |
36 | extern void uv_teardown_irq(unsigned int); | ||
35 | 37 | ||
36 | #endif /* _ASM_X86_UV_UV_IRQ_H */ | 38 | #endif /* _ASM_X86_UV_UV_IRQ_H */ |
diff --git a/arch/x86/include/asm/visws/cobalt.h b/arch/x86/include/asm/visws/cobalt.h index 166adf61e770..2edb37637ead 100644 --- a/arch/x86/include/asm/visws/cobalt.h +++ b/arch/x86/include/asm/visws/cobalt.h | |||
@@ -122,4 +122,6 @@ extern char visws_board_type; | |||
122 | 122 | ||
123 | extern char visws_board_rev; | 123 | extern char visws_board_rev; |
124 | 124 | ||
125 | extern int pci_visws_init(void); | ||
126 | |||
125 | #endif /* _ASM_X86_VISWS_COBALT_H */ | 127 | #endif /* _ASM_X86_VISWS_COBALT_H */ |
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index 272514c2d456..fb9a080740ec 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h | |||
@@ -53,9 +53,11 @@ | |||
53 | */ | 53 | */ |
54 | #define SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES 0x00000001 | 54 | #define SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES 0x00000001 |
55 | #define SECONDARY_EXEC_ENABLE_EPT 0x00000002 | 55 | #define SECONDARY_EXEC_ENABLE_EPT 0x00000002 |
56 | #define SECONDARY_EXEC_RDTSCP 0x00000008 | ||
56 | #define SECONDARY_EXEC_ENABLE_VPID 0x00000020 | 57 | #define SECONDARY_EXEC_ENABLE_VPID 0x00000020 |
57 | #define SECONDARY_EXEC_WBINVD_EXITING 0x00000040 | 58 | #define SECONDARY_EXEC_WBINVD_EXITING 0x00000040 |
58 | #define SECONDARY_EXEC_UNRESTRICTED_GUEST 0x00000080 | 59 | #define SECONDARY_EXEC_UNRESTRICTED_GUEST 0x00000080 |
60 | #define SECONDARY_EXEC_PAUSE_LOOP_EXITING 0x00000400 | ||
59 | 61 | ||
60 | 62 | ||
61 | #define PIN_BASED_EXT_INTR_MASK 0x00000001 | 63 | #define PIN_BASED_EXT_INTR_MASK 0x00000001 |
@@ -144,6 +146,8 @@ enum vmcs_field { | |||
144 | VM_ENTRY_INSTRUCTION_LEN = 0x0000401a, | 146 | VM_ENTRY_INSTRUCTION_LEN = 0x0000401a, |
145 | TPR_THRESHOLD = 0x0000401c, | 147 | TPR_THRESHOLD = 0x0000401c, |
146 | SECONDARY_VM_EXEC_CONTROL = 0x0000401e, | 148 | SECONDARY_VM_EXEC_CONTROL = 0x0000401e, |
149 | PLE_GAP = 0x00004020, | ||
150 | PLE_WINDOW = 0x00004022, | ||
147 | VM_INSTRUCTION_ERROR = 0x00004400, | 151 | VM_INSTRUCTION_ERROR = 0x00004400, |
148 | VM_EXIT_REASON = 0x00004402, | 152 | VM_EXIT_REASON = 0x00004402, |
149 | VM_EXIT_INTR_INFO = 0x00004404, | 153 | VM_EXIT_INTR_INFO = 0x00004404, |
@@ -248,6 +252,8 @@ enum vmcs_field { | |||
248 | #define EXIT_REASON_MSR_READ 31 | 252 | #define EXIT_REASON_MSR_READ 31 |
249 | #define EXIT_REASON_MSR_WRITE 32 | 253 | #define EXIT_REASON_MSR_WRITE 32 |
250 | #define EXIT_REASON_MWAIT_INSTRUCTION 36 | 254 | #define EXIT_REASON_MWAIT_INSTRUCTION 36 |
255 | #define EXIT_REASON_MONITOR_INSTRUCTION 39 | ||
256 | #define EXIT_REASON_PAUSE_INSTRUCTION 40 | ||
251 | #define EXIT_REASON_MCE_DURING_VMENTRY 41 | 257 | #define EXIT_REASON_MCE_DURING_VMENTRY 41 |
252 | #define EXIT_REASON_TPR_BELOW_THRESHOLD 43 | 258 | #define EXIT_REASON_TPR_BELOW_THRESHOLD 43 |
253 | #define EXIT_REASON_APIC_ACCESS 44 | 259 | #define EXIT_REASON_APIC_ACCESS 44 |
@@ -358,6 +364,7 @@ enum vmcs_field { | |||
358 | #define VMX_EPTP_UC_BIT (1ull << 8) | 364 | #define VMX_EPTP_UC_BIT (1ull << 8) |
359 | #define VMX_EPTP_WB_BIT (1ull << 14) | 365 | #define VMX_EPTP_WB_BIT (1ull << 14) |
360 | #define VMX_EPT_2MB_PAGE_BIT (1ull << 16) | 366 | #define VMX_EPT_2MB_PAGE_BIT (1ull << 16) |
367 | #define VMX_EPT_1GB_PAGE_BIT (1ull << 17) | ||
361 | #define VMX_EPT_EXTENT_INDIVIDUAL_BIT (1ull << 24) | 368 | #define VMX_EPT_EXTENT_INDIVIDUAL_BIT (1ull << 24) |
362 | #define VMX_EPT_EXTENT_CONTEXT_BIT (1ull << 25) | 369 | #define VMX_EPT_EXTENT_CONTEXT_BIT (1ull << 25) |
363 | #define VMX_EPT_EXTENT_GLOBAL_BIT (1ull << 26) | 370 | #define VMX_EPT_EXTENT_GLOBAL_BIT (1ull << 26) |
@@ -370,7 +377,7 @@ enum vmcs_field { | |||
370 | #define VMX_EPT_READABLE_MASK 0x1ull | 377 | #define VMX_EPT_READABLE_MASK 0x1ull |
371 | #define VMX_EPT_WRITABLE_MASK 0x2ull | 378 | #define VMX_EPT_WRITABLE_MASK 0x2ull |
372 | #define VMX_EPT_EXECUTABLE_MASK 0x4ull | 379 | #define VMX_EPT_EXECUTABLE_MASK 0x4ull |
373 | #define VMX_EPT_IGMT_BIT (1ull << 6) | 380 | #define VMX_EPT_IPAT_BIT (1ull << 6) |
374 | 381 | ||
375 | #define VMX_EPT_IDENTITY_PAGETABLE_ADDR 0xfffbc000ul | 382 | #define VMX_EPT_IDENTITY_PAGETABLE_ADDR 0xfffbc000ul |
376 | 383 | ||
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h index 2c756fd4ab0e..519b54327d75 100644 --- a/arch/x86/include/asm/x86_init.h +++ b/arch/x86/include/asm/x86_init.h | |||
@@ -26,7 +26,7 @@ struct x86_init_mpparse { | |||
26 | void (*smp_read_mpc_oem)(struct mpc_table *mpc); | 26 | void (*smp_read_mpc_oem)(struct mpc_table *mpc); |
27 | void (*mpc_oem_pci_bus)(struct mpc_bus *m); | 27 | void (*mpc_oem_pci_bus)(struct mpc_bus *m); |
28 | void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name); | 28 | void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name); |
29 | void (*find_smp_config)(unsigned int reserve); | 29 | void (*find_smp_config)(void); |
30 | void (*get_smp_config)(unsigned int early); | 30 | void (*get_smp_config)(unsigned int early); |
31 | }; | 31 | }; |
32 | 32 | ||
@@ -91,6 +91,28 @@ struct x86_init_timers { | |||
91 | }; | 91 | }; |
92 | 92 | ||
93 | /** | 93 | /** |
94 | * struct x86_init_iommu - platform specific iommu setup | ||
95 | * @iommu_init: platform specific iommu setup | ||
96 | */ | ||
97 | struct x86_init_iommu { | ||
98 | int (*iommu_init)(void); | ||
99 | }; | ||
100 | |||
101 | /** | ||
102 | * struct x86_init_pci - platform specific pci init functions | ||
103 | * @arch_init: platform specific pci arch init call | ||
104 | * @init: platform specific pci subsystem init | ||
105 | * @init_irq: platform specific pci irq init | ||
106 | * @fixup_irqs: platform specific pci irq fixup | ||
107 | */ | ||
108 | struct x86_init_pci { | ||
109 | int (*arch_init)(void); | ||
110 | int (*init)(void); | ||
111 | void (*init_irq)(void); | ||
112 | void (*fixup_irqs)(void); | ||
113 | }; | ||
114 | |||
115 | /** | ||
94 | * struct x86_init_ops - functions for platform specific setup | 116 | * struct x86_init_ops - functions for platform specific setup |
95 | * | 117 | * |
96 | */ | 118 | */ |
@@ -101,6 +123,8 @@ struct x86_init_ops { | |||
101 | struct x86_init_oem oem; | 123 | struct x86_init_oem oem; |
102 | struct x86_init_paging paging; | 124 | struct x86_init_paging paging; |
103 | struct x86_init_timers timers; | 125 | struct x86_init_timers timers; |
126 | struct x86_init_iommu iommu; | ||
127 | struct x86_init_pci pci; | ||
104 | }; | 128 | }; |
105 | 129 | ||
106 | /** | 130 | /** |
@@ -116,11 +140,16 @@ struct x86_cpuinit_ops { | |||
116 | * @calibrate_tsc: calibrate TSC | 140 | * @calibrate_tsc: calibrate TSC |
117 | * @get_wallclock: get time from HW clock like RTC etc. | 141 | * @get_wallclock: get time from HW clock like RTC etc. |
118 | * @set_wallclock: set time back to HW clock | 142 | * @set_wallclock: set time back to HW clock |
143 | * @is_untracked_pat_range exclude from PAT logic | ||
144 | * @nmi_init enable NMI on cpus | ||
119 | */ | 145 | */ |
120 | struct x86_platform_ops { | 146 | struct x86_platform_ops { |
121 | unsigned long (*calibrate_tsc)(void); | 147 | unsigned long (*calibrate_tsc)(void); |
122 | unsigned long (*get_wallclock)(void); | 148 | unsigned long (*get_wallclock)(void); |
123 | int (*set_wallclock)(unsigned long nowtime); | 149 | int (*set_wallclock)(unsigned long nowtime); |
150 | void (*iommu_shutdown)(void); | ||
151 | bool (*is_untracked_pat_range)(u64 start, u64 end); | ||
152 | void (*nmi_init)(void); | ||
124 | }; | 153 | }; |
125 | 154 | ||
126 | extern struct x86_init_ops x86_init; | 155 | extern struct x86_init_ops x86_init; |
diff --git a/arch/x86/include/asm/xen/hypervisor.h b/arch/x86/include/asm/xen/hypervisor.h index d5b7e90c0edf..396ff4cc8ed4 100644 --- a/arch/x86/include/asm/xen/hypervisor.h +++ b/arch/x86/include/asm/xen/hypervisor.h | |||
@@ -37,31 +37,4 @@ | |||
37 | extern struct shared_info *HYPERVISOR_shared_info; | 37 | extern struct shared_info *HYPERVISOR_shared_info; |
38 | extern struct start_info *xen_start_info; | 38 | extern struct start_info *xen_start_info; |
39 | 39 | ||
40 | enum xen_domain_type { | ||
41 | XEN_NATIVE, /* running on bare hardware */ | ||
42 | XEN_PV_DOMAIN, /* running in a PV domain */ | ||
43 | XEN_HVM_DOMAIN, /* running in a Xen hvm domain */ | ||
44 | }; | ||
45 | |||
46 | #ifdef CONFIG_XEN | ||
47 | extern enum xen_domain_type xen_domain_type; | ||
48 | #else | ||
49 | #define xen_domain_type XEN_NATIVE | ||
50 | #endif | ||
51 | |||
52 | #define xen_domain() (xen_domain_type != XEN_NATIVE) | ||
53 | #define xen_pv_domain() (xen_domain() && \ | ||
54 | xen_domain_type == XEN_PV_DOMAIN) | ||
55 | #define xen_hvm_domain() (xen_domain() && \ | ||
56 | xen_domain_type == XEN_HVM_DOMAIN) | ||
57 | |||
58 | #ifdef CONFIG_XEN_DOM0 | ||
59 | #include <xen/interface/xen.h> | ||
60 | |||
61 | #define xen_initial_domain() (xen_pv_domain() && \ | ||
62 | xen_start_info->flags & SIF_INITDOMAIN) | ||
63 | #else /* !CONFIG_XEN_DOM0 */ | ||
64 | #define xen_initial_domain() (0) | ||
65 | #endif /* CONFIG_XEN_DOM0 */ | ||
66 | |||
67 | #endif /* _ASM_X86_XEN_HYPERVISOR_H */ | 40 | #endif /* _ASM_X86_XEN_HYPERVISOR_H */ |
diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h index 727acc152344..ddc04ccad03b 100644 --- a/arch/x86/include/asm/xsave.h +++ b/arch/x86/include/asm/xsave.h | |||
@@ -27,9 +27,11 @@ | |||
27 | extern unsigned int xstate_size; | 27 | extern unsigned int xstate_size; |
28 | extern u64 pcntxt_mask; | 28 | extern u64 pcntxt_mask; |
29 | extern struct xsave_struct *init_xstate_buf; | 29 | extern struct xsave_struct *init_xstate_buf; |
30 | extern u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS]; | ||
30 | 31 | ||
31 | extern void xsave_cntxt_init(void); | 32 | extern void xsave_cntxt_init(void); |
32 | extern void xsave_init(void); | 33 | extern void xsave_init(void); |
34 | extern void update_regset_xstate_info(unsigned int size, u64 xstate_mask); | ||
33 | extern int init_fpu(struct task_struct *child); | 35 | extern int init_fpu(struct task_struct *child); |
34 | extern int check_for_xstate(struct i387_fxsave_struct __user *buf, | 36 | extern int check_for_xstate(struct i387_fxsave_struct __user *buf, |
35 | void __user *fpstate, | 37 | void __user *fpstate, |