diff options
Diffstat (limited to 'arch/x86')
65 files changed, 739 insertions, 553 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 1d14cc6b79ad..c9866b0b77d8 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -81,7 +81,7 @@ config X86 | |||
81 | select CLKEVT_I8253 | 81 | select CLKEVT_I8253 |
82 | select ARCH_HAVE_NMI_SAFE_CMPXCHG | 82 | select ARCH_HAVE_NMI_SAFE_CMPXCHG |
83 | select GENERIC_IOMAP | 83 | select GENERIC_IOMAP |
84 | select DCACHE_WORD_ACCESS if !DEBUG_PAGEALLOC | 84 | select DCACHE_WORD_ACCESS |
85 | 85 | ||
86 | config INSTRUCTION_DECODER | 86 | config INSTRUCTION_DECODER |
87 | def_bool (KPROBES || PERF_EVENTS) | 87 | def_bool (KPROBES || PERF_EVENTS) |
diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 968dbe24a255..41a7237606a3 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile | |||
@@ -129,6 +129,7 @@ KBUILD_CFLAGS += -Wno-sign-compare | |||
129 | KBUILD_CFLAGS += -fno-asynchronous-unwind-tables | 129 | KBUILD_CFLAGS += -fno-asynchronous-unwind-tables |
130 | # prevent gcc from generating any FP code by mistake | 130 | # prevent gcc from generating any FP code by mistake |
131 | KBUILD_CFLAGS += $(call cc-option,-mno-sse -mno-mmx -mno-sse2 -mno-3dnow,) | 131 | KBUILD_CFLAGS += $(call cc-option,-mno-sse -mno-mmx -mno-sse2 -mno-3dnow,) |
132 | KBUILD_CFLAGS += $(call cc-option,-mno-avx,) | ||
132 | 133 | ||
133 | KBUILD_CFLAGS += $(mflags-y) | 134 | KBUILD_CFLAGS += $(mflags-y) |
134 | KBUILD_AFLAGS += $(mflags-y) | 135 | KBUILD_AFLAGS += $(mflags-y) |
diff --git a/arch/x86/Makefile.um b/arch/x86/Makefile.um index 4be406abeefd..36b62bc52638 100644 --- a/arch/x86/Makefile.um +++ b/arch/x86/Makefile.um | |||
@@ -14,6 +14,9 @@ LINK-y += $(call cc-option,-m32) | |||
14 | 14 | ||
15 | export LDFLAGS | 15 | export LDFLAGS |
16 | 16 | ||
17 | LDS_EXTRA := -Ui386 | ||
18 | export LDS_EXTRA | ||
19 | |||
17 | # First of all, tune CFLAGS for the specific CPU. This actually sets cflags-y. | 20 | # First of all, tune CFLAGS for the specific CPU. This actually sets cflags-y. |
18 | include $(srctree)/arch/x86/Makefile_32.cpu | 21 | include $(srctree)/arch/x86/Makefile_32.cpu |
19 | 22 | ||
diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S index a0559930a180..c85e3ac99bba 100644 --- a/arch/x86/boot/compressed/head_32.S +++ b/arch/x86/boot/compressed/head_32.S | |||
@@ -33,6 +33,9 @@ | |||
33 | __HEAD | 33 | __HEAD |
34 | ENTRY(startup_32) | 34 | ENTRY(startup_32) |
35 | #ifdef CONFIG_EFI_STUB | 35 | #ifdef CONFIG_EFI_STUB |
36 | jmp preferred_addr | ||
37 | |||
38 | .balign 0x10 | ||
36 | /* | 39 | /* |
37 | * We don't need the return address, so set up the stack so | 40 | * We don't need the return address, so set up the stack so |
38 | * efi_main() can find its arugments. | 41 | * efi_main() can find its arugments. |
@@ -41,12 +44,17 @@ ENTRY(startup_32) | |||
41 | 44 | ||
42 | call efi_main | 45 | call efi_main |
43 | cmpl $0, %eax | 46 | cmpl $0, %eax |
44 | je preferred_addr | ||
45 | movl %eax, %esi | 47 | movl %eax, %esi |
46 | call 1f | 48 | jne 2f |
47 | 1: | 49 | 1: |
50 | /* EFI init failed, so hang. */ | ||
51 | hlt | ||
52 | jmp 1b | ||
53 | 2: | ||
54 | call 3f | ||
55 | 3: | ||
48 | popl %eax | 56 | popl %eax |
49 | subl $1b, %eax | 57 | subl $3b, %eax |
50 | subl BP_pref_address(%esi), %eax | 58 | subl BP_pref_address(%esi), %eax |
51 | add BP_code32_start(%esi), %eax | 59 | add BP_code32_start(%esi), %eax |
52 | leal preferred_addr(%eax), %eax | 60 | leal preferred_addr(%eax), %eax |
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index 558d76ce23bc..87e03a13d8e3 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S | |||
@@ -200,18 +200,28 @@ ENTRY(startup_64) | |||
200 | * entire text+data+bss and hopefully all of memory. | 200 | * entire text+data+bss and hopefully all of memory. |
201 | */ | 201 | */ |
202 | #ifdef CONFIG_EFI_STUB | 202 | #ifdef CONFIG_EFI_STUB |
203 | pushq %rsi | 203 | /* |
204 | * The entry point for the PE/COFF executable is 0x210, so only | ||
205 | * legacy boot loaders will execute this jmp. | ||
206 | */ | ||
207 | jmp preferred_addr | ||
208 | |||
209 | .org 0x210 | ||
204 | mov %rcx, %rdi | 210 | mov %rcx, %rdi |
205 | mov %rdx, %rsi | 211 | mov %rdx, %rsi |
206 | call efi_main | 212 | call efi_main |
207 | popq %rsi | ||
208 | cmpq $0,%rax | ||
209 | je preferred_addr | ||
210 | movq %rax,%rsi | 213 | movq %rax,%rsi |
211 | call 1f | 214 | cmpq $0,%rax |
215 | jne 2f | ||
212 | 1: | 216 | 1: |
217 | /* EFI init failed, so hang. */ | ||
218 | hlt | ||
219 | jmp 1b | ||
220 | 2: | ||
221 | call 3f | ||
222 | 3: | ||
213 | popq %rax | 223 | popq %rax |
214 | subq $1b, %rax | 224 | subq $3b, %rax |
215 | subq BP_pref_address(%rsi), %rax | 225 | subq BP_pref_address(%rsi), %rax |
216 | add BP_code32_start(%esi), %eax | 226 | add BP_code32_start(%esi), %eax |
217 | leaq preferred_addr(%rax), %rax | 227 | leaq preferred_addr(%rax), %rax |
diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c index d3c0b0277666..fb7117a4ade1 100644 --- a/arch/x86/boot/compressed/relocs.c +++ b/arch/x86/boot/compressed/relocs.c | |||
@@ -403,13 +403,11 @@ static void print_absolute_symbols(void) | |||
403 | for (i = 0; i < ehdr.e_shnum; i++) { | 403 | for (i = 0; i < ehdr.e_shnum; i++) { |
404 | struct section *sec = &secs[i]; | 404 | struct section *sec = &secs[i]; |
405 | char *sym_strtab; | 405 | char *sym_strtab; |
406 | Elf32_Sym *sh_symtab; | ||
407 | int j; | 406 | int j; |
408 | 407 | ||
409 | if (sec->shdr.sh_type != SHT_SYMTAB) { | 408 | if (sec->shdr.sh_type != SHT_SYMTAB) { |
410 | continue; | 409 | continue; |
411 | } | 410 | } |
412 | sh_symtab = sec->symtab; | ||
413 | sym_strtab = sec->link->strtab; | 411 | sym_strtab = sec->link->strtab; |
414 | for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Sym); j++) { | 412 | for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Sym); j++) { |
415 | Elf32_Sym *sym; | 413 | Elf32_Sym *sym; |
diff --git a/arch/x86/boot/tools/build.c b/arch/x86/boot/tools/build.c index ed549767a231..24443a332083 100644 --- a/arch/x86/boot/tools/build.c +++ b/arch/x86/boot/tools/build.c | |||
@@ -205,8 +205,13 @@ int main(int argc, char ** argv) | |||
205 | put_unaligned_le32(file_sz, &buf[pe_header + 0x50]); | 205 | put_unaligned_le32(file_sz, &buf[pe_header + 0x50]); |
206 | 206 | ||
207 | #ifdef CONFIG_X86_32 | 207 | #ifdef CONFIG_X86_32 |
208 | /* Address of entry point */ | 208 | /* |
209 | put_unaligned_le32(i, &buf[pe_header + 0x28]); | 209 | * Address of entry point. |
210 | * | ||
211 | * The EFI stub entry point is +16 bytes from the start of | ||
212 | * the .text section. | ||
213 | */ | ||
214 | put_unaligned_le32(i + 16, &buf[pe_header + 0x28]); | ||
210 | 215 | ||
211 | /* .text size */ | 216 | /* .text size */ |
212 | put_unaligned_le32(file_sz, &buf[pe_header + 0xb0]); | 217 | put_unaligned_le32(file_sz, &buf[pe_header + 0xb0]); |
@@ -217,9 +222,11 @@ int main(int argc, char ** argv) | |||
217 | /* | 222 | /* |
218 | * Address of entry point. startup_32 is at the beginning and | 223 | * Address of entry point. startup_32 is at the beginning and |
219 | * the 64-bit entry point (startup_64) is always 512 bytes | 224 | * the 64-bit entry point (startup_64) is always 512 bytes |
220 | * after. | 225 | * after. The EFI stub entry point is 16 bytes after that, as |
226 | * the first instruction allows legacy loaders to jump over | ||
227 | * the EFI stub initialisation | ||
221 | */ | 228 | */ |
222 | put_unaligned_le32(i + 512, &buf[pe_header + 0x28]); | 229 | put_unaligned_le32(i + 528, &buf[pe_header + 0x28]); |
223 | 230 | ||
224 | /* .text size */ | 231 | /* .text size */ |
225 | put_unaligned_le32(file_sz, &buf[pe_header + 0xc0]); | 232 | put_unaligned_le32(file_sz, &buf[pe_header + 0xc0]); |
diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c index d511d951a052..07b3a68d2d29 100644 --- a/arch/x86/ia32/ia32_aout.c +++ b/arch/x86/ia32/ia32_aout.c | |||
@@ -119,9 +119,7 @@ static void set_brk(unsigned long start, unsigned long end) | |||
119 | end = PAGE_ALIGN(end); | 119 | end = PAGE_ALIGN(end); |
120 | if (end <= start) | 120 | if (end <= start) |
121 | return; | 121 | return; |
122 | down_write(¤t->mm->mmap_sem); | 122 | vm_brk(start, end - start); |
123 | do_brk(start, end - start); | ||
124 | up_write(¤t->mm->mmap_sem); | ||
125 | } | 123 | } |
126 | 124 | ||
127 | #ifdef CORE_DUMP | 125 | #ifdef CORE_DUMP |
@@ -296,8 +294,7 @@ static int load_aout_binary(struct linux_binprm *bprm, struct pt_regs *regs) | |||
296 | 294 | ||
297 | /* OK, This is the point of no return */ | 295 | /* OK, This is the point of no return */ |
298 | set_personality(PER_LINUX); | 296 | set_personality(PER_LINUX); |
299 | set_thread_flag(TIF_IA32); | 297 | set_personality_ia32(false); |
300 | current->mm->context.ia32_compat = 1; | ||
301 | 298 | ||
302 | setup_new_exec(bprm); | 299 | setup_new_exec(bprm); |
303 | 300 | ||
@@ -332,9 +329,7 @@ static int load_aout_binary(struct linux_binprm *bprm, struct pt_regs *regs) | |||
332 | pos = 32; | 329 | pos = 32; |
333 | map_size = ex.a_text+ex.a_data; | 330 | map_size = ex.a_text+ex.a_data; |
334 | 331 | ||
335 | down_write(¤t->mm->mmap_sem); | 332 | error = vm_brk(text_addr & PAGE_MASK, map_size); |
336 | error = do_brk(text_addr & PAGE_MASK, map_size); | ||
337 | up_write(¤t->mm->mmap_sem); | ||
338 | 333 | ||
339 | if (error != (text_addr & PAGE_MASK)) { | 334 | if (error != (text_addr & PAGE_MASK)) { |
340 | send_sig(SIGKILL, current, 0); | 335 | send_sig(SIGKILL, current, 0); |
@@ -373,9 +368,7 @@ static int load_aout_binary(struct linux_binprm *bprm, struct pt_regs *regs) | |||
373 | if (!bprm->file->f_op->mmap || (fd_offset & ~PAGE_MASK) != 0) { | 368 | if (!bprm->file->f_op->mmap || (fd_offset & ~PAGE_MASK) != 0) { |
374 | loff_t pos = fd_offset; | 369 | loff_t pos = fd_offset; |
375 | 370 | ||
376 | down_write(¤t->mm->mmap_sem); | 371 | vm_brk(N_TXTADDR(ex), ex.a_text+ex.a_data); |
377 | do_brk(N_TXTADDR(ex), ex.a_text+ex.a_data); | ||
378 | up_write(¤t->mm->mmap_sem); | ||
379 | bprm->file->f_op->read(bprm->file, | 372 | bprm->file->f_op->read(bprm->file, |
380 | (char __user *)N_TXTADDR(ex), | 373 | (char __user *)N_TXTADDR(ex), |
381 | ex.a_text+ex.a_data, &pos); | 374 | ex.a_text+ex.a_data, &pos); |
@@ -385,26 +378,22 @@ static int load_aout_binary(struct linux_binprm *bprm, struct pt_regs *regs) | |||
385 | goto beyond_if; | 378 | goto beyond_if; |
386 | } | 379 | } |
387 | 380 | ||
388 | down_write(¤t->mm->mmap_sem); | 381 | error = vm_mmap(bprm->file, N_TXTADDR(ex), ex.a_text, |
389 | error = do_mmap(bprm->file, N_TXTADDR(ex), ex.a_text, | ||
390 | PROT_READ | PROT_EXEC, | 382 | PROT_READ | PROT_EXEC, |
391 | MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | | 383 | MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | |
392 | MAP_EXECUTABLE | MAP_32BIT, | 384 | MAP_EXECUTABLE | MAP_32BIT, |
393 | fd_offset); | 385 | fd_offset); |
394 | up_write(¤t->mm->mmap_sem); | ||
395 | 386 | ||
396 | if (error != N_TXTADDR(ex)) { | 387 | if (error != N_TXTADDR(ex)) { |
397 | send_sig(SIGKILL, current, 0); | 388 | send_sig(SIGKILL, current, 0); |
398 | return error; | 389 | return error; |
399 | } | 390 | } |
400 | 391 | ||
401 | down_write(¤t->mm->mmap_sem); | 392 | error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data, |
402 | error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data, | ||
403 | PROT_READ | PROT_WRITE | PROT_EXEC, | 393 | PROT_READ | PROT_WRITE | PROT_EXEC, |
404 | MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | | 394 | MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | |
405 | MAP_EXECUTABLE | MAP_32BIT, | 395 | MAP_EXECUTABLE | MAP_32BIT, |
406 | fd_offset + ex.a_text); | 396 | fd_offset + ex.a_text); |
407 | up_write(¤t->mm->mmap_sem); | ||
408 | if (error != N_DATADDR(ex)) { | 397 | if (error != N_DATADDR(ex)) { |
409 | send_sig(SIGKILL, current, 0); | 398 | send_sig(SIGKILL, current, 0); |
410 | return error; | 399 | return error; |
@@ -476,9 +465,7 @@ static int load_aout_library(struct file *file) | |||
476 | error_time = jiffies; | 465 | error_time = jiffies; |
477 | } | 466 | } |
478 | #endif | 467 | #endif |
479 | down_write(¤t->mm->mmap_sem); | 468 | vm_brk(start_addr, ex.a_text + ex.a_data + ex.a_bss); |
480 | do_brk(start_addr, ex.a_text + ex.a_data + ex.a_bss); | ||
481 | up_write(¤t->mm->mmap_sem); | ||
482 | 469 | ||
483 | file->f_op->read(file, (char __user *)start_addr, | 470 | file->f_op->read(file, (char __user *)start_addr, |
484 | ex.a_text + ex.a_data, &pos); | 471 | ex.a_text + ex.a_data, &pos); |
@@ -490,12 +477,10 @@ static int load_aout_library(struct file *file) | |||
490 | goto out; | 477 | goto out; |
491 | } | 478 | } |
492 | /* Now use mmap to map the library into memory. */ | 479 | /* Now use mmap to map the library into memory. */ |
493 | down_write(¤t->mm->mmap_sem); | 480 | error = vm_mmap(file, start_addr, ex.a_text + ex.a_data, |
494 | error = do_mmap(file, start_addr, ex.a_text + ex.a_data, | ||
495 | PROT_READ | PROT_WRITE | PROT_EXEC, | 481 | PROT_READ | PROT_WRITE | PROT_EXEC, |
496 | MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_32BIT, | 482 | MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_32BIT, |
497 | N_TXTOFF(ex)); | 483 | N_TXTOFF(ex)); |
498 | up_write(¤t->mm->mmap_sem); | ||
499 | retval = error; | 484 | retval = error; |
500 | if (error != start_addr) | 485 | if (error != start_addr) |
501 | goto out; | 486 | goto out; |
@@ -503,9 +488,7 @@ static int load_aout_library(struct file *file) | |||
503 | len = PAGE_ALIGN(ex.a_text + ex.a_data); | 488 | len = PAGE_ALIGN(ex.a_text + ex.a_data); |
504 | bss = ex.a_text + ex.a_data + ex.a_bss; | 489 | bss = ex.a_text + ex.a_data + ex.a_bss; |
505 | if (bss > len) { | 490 | if (bss > len) { |
506 | down_write(¤t->mm->mmap_sem); | 491 | error = vm_brk(start_addr + len, bss - len); |
507 | error = do_brk(start_addr + len, bss - len); | ||
508 | up_write(¤t->mm->mmap_sem); | ||
509 | retval = error; | 492 | retval = error; |
510 | if (error != start_addr + len) | 493 | if (error != start_addr + len) |
511 | goto out; | 494 | goto out; |
diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h index b3b733262909..99480e55973d 100644 --- a/arch/x86/include/asm/cmpxchg.h +++ b/arch/x86/include/asm/cmpxchg.h | |||
@@ -43,7 +43,7 @@ extern void __add_wrong_size(void) | |||
43 | switch (sizeof(*(ptr))) { \ | 43 | switch (sizeof(*(ptr))) { \ |
44 | case __X86_CASE_B: \ | 44 | case __X86_CASE_B: \ |
45 | asm volatile (lock #op "b %b0, %1\n" \ | 45 | asm volatile (lock #op "b %b0, %1\n" \ |
46 | : "+r" (__ret), "+m" (*(ptr)) \ | 46 | : "+q" (__ret), "+m" (*(ptr)) \ |
47 | : : "memory", "cc"); \ | 47 | : : "memory", "cc"); \ |
48 | break; \ | 48 | break; \ |
49 | case __X86_CASE_W: \ | 49 | case __X86_CASE_W: \ |
@@ -173,7 +173,7 @@ extern void __add_wrong_size(void) | |||
173 | switch (sizeof(*(ptr))) { \ | 173 | switch (sizeof(*(ptr))) { \ |
174 | case __X86_CASE_B: \ | 174 | case __X86_CASE_B: \ |
175 | asm volatile (lock "addb %b1, %0\n" \ | 175 | asm volatile (lock "addb %b1, %0\n" \ |
176 | : "+m" (*(ptr)) : "ri" (inc) \ | 176 | : "+m" (*(ptr)) : "qi" (inc) \ |
177 | : "memory", "cc"); \ | 177 | : "memory", "cc"); \ |
178 | break; \ | 178 | break; \ |
179 | case __X86_CASE_W: \ | 179 | case __X86_CASE_W: \ |
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index ed3065fd6314..4b4331d71935 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h | |||
@@ -59,7 +59,8 @@ extern int dma_supported(struct device *hwdev, u64 mask); | |||
59 | extern int dma_set_mask(struct device *dev, u64 mask); | 59 | extern int dma_set_mask(struct device *dev, u64 mask); |
60 | 60 | ||
61 | extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, | 61 | extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, |
62 | dma_addr_t *dma_addr, gfp_t flag); | 62 | dma_addr_t *dma_addr, gfp_t flag, |
63 | struct dma_attrs *attrs); | ||
63 | 64 | ||
64 | static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) | 65 | static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) |
65 | { | 66 | { |
@@ -111,9 +112,11 @@ static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp) | |||
111 | return gfp; | 112 | return gfp; |
112 | } | 113 | } |
113 | 114 | ||
115 | #define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL) | ||
116 | |||
114 | static inline void * | 117 | static inline void * |
115 | dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, | 118 | dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, |
116 | gfp_t gfp) | 119 | gfp_t gfp, struct dma_attrs *attrs) |
117 | { | 120 | { |
118 | struct dma_map_ops *ops = get_dma_ops(dev); | 121 | struct dma_map_ops *ops = get_dma_ops(dev); |
119 | void *memory; | 122 | void *memory; |
@@ -129,18 +132,21 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, | |||
129 | if (!is_device_dma_capable(dev)) | 132 | if (!is_device_dma_capable(dev)) |
130 | return NULL; | 133 | return NULL; |
131 | 134 | ||
132 | if (!ops->alloc_coherent) | 135 | if (!ops->alloc) |
133 | return NULL; | 136 | return NULL; |
134 | 137 | ||
135 | memory = ops->alloc_coherent(dev, size, dma_handle, | 138 | memory = ops->alloc(dev, size, dma_handle, |
136 | dma_alloc_coherent_gfp_flags(dev, gfp)); | 139 | dma_alloc_coherent_gfp_flags(dev, gfp), attrs); |
137 | debug_dma_alloc_coherent(dev, size, *dma_handle, memory); | 140 | debug_dma_alloc_coherent(dev, size, *dma_handle, memory); |
138 | 141 | ||
139 | return memory; | 142 | return memory; |
140 | } | 143 | } |
141 | 144 | ||
142 | static inline void dma_free_coherent(struct device *dev, size_t size, | 145 | #define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL) |
143 | void *vaddr, dma_addr_t bus) | 146 | |
147 | static inline void dma_free_attrs(struct device *dev, size_t size, | ||
148 | void *vaddr, dma_addr_t bus, | ||
149 | struct dma_attrs *attrs) | ||
144 | { | 150 | { |
145 | struct dma_map_ops *ops = get_dma_ops(dev); | 151 | struct dma_map_ops *ops = get_dma_ops(dev); |
146 | 152 | ||
@@ -150,8 +156,8 @@ static inline void dma_free_coherent(struct device *dev, size_t size, | |||
150 | return; | 156 | return; |
151 | 157 | ||
152 | debug_dma_free_coherent(dev, size, vaddr, bus); | 158 | debug_dma_free_coherent(dev, size, vaddr, bus); |
153 | if (ops->free_coherent) | 159 | if (ops->free) |
154 | ops->free_coherent(dev, size, vaddr, bus); | 160 | ops->free(dev, size, vaddr, bus, attrs); |
155 | } | 161 | } |
156 | 162 | ||
157 | #endif | 163 | #endif |
diff --git a/arch/x86/include/asm/posix_types.h b/arch/x86/include/asm/posix_types.h index 3427b7798dbc..7ef7c3020e5c 100644 --- a/arch/x86/include/asm/posix_types.h +++ b/arch/x86/include/asm/posix_types.h | |||
@@ -7,9 +7,9 @@ | |||
7 | #else | 7 | #else |
8 | # ifdef __i386__ | 8 | # ifdef __i386__ |
9 | # include "posix_types_32.h" | 9 | # include "posix_types_32.h" |
10 | # elif defined(__LP64__) | 10 | # elif defined(__ILP32__) |
11 | # include "posix_types_64.h" | ||
12 | # else | ||
13 | # include "posix_types_x32.h" | 11 | # include "posix_types_x32.h" |
12 | # else | ||
13 | # include "posix_types_64.h" | ||
14 | # endif | 14 | # endif |
15 | #endif | 15 | #endif |
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 7284c9a6a0b5..4fa7dcceb6c0 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h | |||
@@ -974,16 +974,6 @@ extern bool cpu_has_amd_erratum(const int *); | |||
974 | #define cpu_has_amd_erratum(x) (false) | 974 | #define cpu_has_amd_erratum(x) (false) |
975 | #endif /* CONFIG_CPU_SUP_AMD */ | 975 | #endif /* CONFIG_CPU_SUP_AMD */ |
976 | 976 | ||
977 | #ifdef CONFIG_X86_32 | ||
978 | /* | ||
979 | * disable hlt during certain critical i/o operations | ||
980 | */ | ||
981 | #define HAVE_DISABLE_HLT | ||
982 | #endif | ||
983 | |||
984 | void disable_hlt(void); | ||
985 | void enable_hlt(void); | ||
986 | |||
987 | void cpu_idle_wait(void); | 977 | void cpu_idle_wait(void); |
988 | 978 | ||
989 | extern unsigned long arch_align_stack(unsigned long sp); | 979 | extern unsigned long arch_align_stack(unsigned long sp); |
diff --git a/arch/x86/include/asm/sigcontext.h b/arch/x86/include/asm/sigcontext.h index 4a085383af27..5ca71c065eef 100644 --- a/arch/x86/include/asm/sigcontext.h +++ b/arch/x86/include/asm/sigcontext.h | |||
@@ -257,7 +257,7 @@ struct sigcontext { | |||
257 | __u64 oldmask; | 257 | __u64 oldmask; |
258 | __u64 cr2; | 258 | __u64 cr2; |
259 | struct _fpstate __user *fpstate; /* zero when no FPU context */ | 259 | struct _fpstate __user *fpstate; /* zero when no FPU context */ |
260 | #ifndef __LP64__ | 260 | #ifdef __ILP32__ |
261 | __u32 __fpstate_pad; | 261 | __u32 __fpstate_pad; |
262 | #endif | 262 | #endif |
263 | __u64 reserved1[8]; | 263 | __u64 reserved1[8]; |
diff --git a/arch/x86/include/asm/siginfo.h b/arch/x86/include/asm/siginfo.h index fc1aa5535646..34c47b3341c0 100644 --- a/arch/x86/include/asm/siginfo.h +++ b/arch/x86/include/asm/siginfo.h | |||
@@ -2,7 +2,13 @@ | |||
2 | #define _ASM_X86_SIGINFO_H | 2 | #define _ASM_X86_SIGINFO_H |
3 | 3 | ||
4 | #ifdef __x86_64__ | 4 | #ifdef __x86_64__ |
5 | # define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int)) | 5 | # ifdef __ILP32__ /* x32 */ |
6 | typedef long long __kernel_si_clock_t __attribute__((aligned(4))); | ||
7 | # define __ARCH_SI_CLOCK_T __kernel_si_clock_t | ||
8 | # define __ARCH_SI_ATTRIBUTES __attribute__((aligned(8))) | ||
9 | # else /* x86-64 */ | ||
10 | # define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int)) | ||
11 | # endif | ||
6 | #endif | 12 | #endif |
7 | 13 | ||
8 | #include <asm-generic/siginfo.h> | 14 | #include <asm-generic/siginfo.h> |
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 8be5f54d9360..e0544597cfe7 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h | |||
@@ -557,6 +557,8 @@ struct __large_struct { unsigned long buf[100]; }; | |||
557 | 557 | ||
558 | extern unsigned long | 558 | extern unsigned long |
559 | copy_from_user_nmi(void *to, const void __user *from, unsigned long n); | 559 | copy_from_user_nmi(void *to, const void __user *from, unsigned long n); |
560 | extern __must_check long | ||
561 | strncpy_from_user(char *dst, const char __user *src, long count); | ||
560 | 562 | ||
561 | /* | 563 | /* |
562 | * movsl can be slow when source and dest are not both 8-byte aligned | 564 | * movsl can be slow when source and dest are not both 8-byte aligned |
diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h index 566e803cc602..8084bc73b18c 100644 --- a/arch/x86/include/asm/uaccess_32.h +++ b/arch/x86/include/asm/uaccess_32.h | |||
@@ -213,11 +213,6 @@ static inline unsigned long __must_check copy_from_user(void *to, | |||
213 | return n; | 213 | return n; |
214 | } | 214 | } |
215 | 215 | ||
216 | long __must_check strncpy_from_user(char *dst, const char __user *src, | ||
217 | long count); | ||
218 | long __must_check __strncpy_from_user(char *dst, | ||
219 | const char __user *src, long count); | ||
220 | |||
221 | /** | 216 | /** |
222 | * strlen_user: - Get the size of a string in user space. | 217 | * strlen_user: - Get the size of a string in user space. |
223 | * @str: The string to measure. | 218 | * @str: The string to measure. |
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h index 1c66d30971ad..fcd4b6f3ef02 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h | |||
@@ -208,10 +208,6 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size) | |||
208 | } | 208 | } |
209 | } | 209 | } |
210 | 210 | ||
211 | __must_check long | ||
212 | strncpy_from_user(char *dst, const char __user *src, long count); | ||
213 | __must_check long | ||
214 | __strncpy_from_user(char *dst, const char __user *src, long count); | ||
215 | __must_check long strnlen_user(const char __user *str, long n); | 211 | __must_check long strnlen_user(const char __user *str, long n); |
216 | __must_check long __strnlen_user(const char __user *str, long n); | 212 | __must_check long __strnlen_user(const char __user *str, long n); |
217 | __must_check long strlen_user(const char __user *str); | 213 | __must_check long strlen_user(const char __user *str); |
diff --git a/arch/x86/include/asm/unistd.h b/arch/x86/include/asm/unistd.h index 37cdc9d99bb1..4437001d8e3d 100644 --- a/arch/x86/include/asm/unistd.h +++ b/arch/x86/include/asm/unistd.h | |||
@@ -63,10 +63,10 @@ | |||
63 | #else | 63 | #else |
64 | # ifdef __i386__ | 64 | # ifdef __i386__ |
65 | # include <asm/unistd_32.h> | 65 | # include <asm/unistd_32.h> |
66 | # elif defined(__LP64__) | 66 | # elif defined(__ILP32__) |
67 | # include <asm/unistd_64.h> | ||
68 | # else | ||
69 | # include <asm/unistd_x32.h> | 67 | # include <asm/unistd_x32.h> |
68 | # else | ||
69 | # include <asm/unistd_64.h> | ||
70 | # endif | 70 | # endif |
71 | #endif | 71 | #endif |
72 | 72 | ||
diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h new file mode 100644 index 000000000000..e58f03b206c3 --- /dev/null +++ b/arch/x86/include/asm/word-at-a-time.h | |||
@@ -0,0 +1,79 @@ | |||
1 | #ifndef _ASM_WORD_AT_A_TIME_H | ||
2 | #define _ASM_WORD_AT_A_TIME_H | ||
3 | |||
4 | /* | ||
5 | * This is largely generic for little-endian machines, but the | ||
6 | * optimal byte mask counting is probably going to be something | ||
7 | * that is architecture-specific. If you have a reliably fast | ||
8 | * bit count instruction, that might be better than the multiply | ||
9 | * and shift, for example. | ||
10 | */ | ||
11 | |||
12 | #ifdef CONFIG_64BIT | ||
13 | |||
14 | /* | ||
15 | * Jan Achrenius on G+: microoptimized version of | ||
16 | * the simpler "(mask & ONEBYTES) * ONEBYTES >> 56" | ||
17 | * that works for the bytemasks without having to | ||
18 | * mask them first. | ||
19 | */ | ||
20 | static inline long count_masked_bytes(unsigned long mask) | ||
21 | { | ||
22 | return mask*0x0001020304050608ul >> 56; | ||
23 | } | ||
24 | |||
25 | #else /* 32-bit case */ | ||
26 | |||
27 | /* Carl Chatfield / Jan Achrenius G+ version for 32-bit */ | ||
28 | static inline long count_masked_bytes(long mask) | ||
29 | { | ||
30 | /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */ | ||
31 | long a = (0x0ff0001+mask) >> 23; | ||
32 | /* Fix the 1 for 00 case */ | ||
33 | return a & mask; | ||
34 | } | ||
35 | |||
36 | #endif | ||
37 | |||
38 | #define REPEAT_BYTE(x) ((~0ul / 0xff) * (x)) | ||
39 | |||
40 | /* Return the high bit set in the first byte that is a zero */ | ||
41 | static inline unsigned long has_zero(unsigned long a) | ||
42 | { | ||
43 | return ((a - REPEAT_BYTE(0x01)) & ~a) & REPEAT_BYTE(0x80); | ||
44 | } | ||
45 | |||
46 | /* | ||
47 | * Load an unaligned word from kernel space. | ||
48 | * | ||
49 | * In the (very unlikely) case of the word being a page-crosser | ||
50 | * and the next page not being mapped, take the exception and | ||
51 | * return zeroes in the non-existing part. | ||
52 | */ | ||
53 | static inline unsigned long load_unaligned_zeropad(const void *addr) | ||
54 | { | ||
55 | unsigned long ret, dummy; | ||
56 | |||
57 | asm( | ||
58 | "1:\tmov %2,%0\n" | ||
59 | "2:\n" | ||
60 | ".section .fixup,\"ax\"\n" | ||
61 | "3:\t" | ||
62 | "lea %2,%1\n\t" | ||
63 | "and %3,%1\n\t" | ||
64 | "mov (%1),%0\n\t" | ||
65 | "leal %2,%%ecx\n\t" | ||
66 | "andl %4,%%ecx\n\t" | ||
67 | "shll $3,%%ecx\n\t" | ||
68 | "shr %%cl,%0\n\t" | ||
69 | "jmp 2b\n" | ||
70 | ".previous\n" | ||
71 | _ASM_EXTABLE(1b, 3b) | ||
72 | :"=&r" (ret),"=&c" (dummy) | ||
73 | :"m" (*(unsigned long *)addr), | ||
74 | "i" (-sizeof(unsigned long)), | ||
75 | "i" (sizeof(unsigned long)-1)); | ||
76 | return ret; | ||
77 | } | ||
78 | |||
79 | #endif /* _ASM_WORD_AT_A_TIME_H */ | ||
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h index baaca8defec8..764b66a4cf89 100644 --- a/arch/x86/include/asm/x86_init.h +++ b/arch/x86/include/asm/x86_init.h | |||
@@ -195,6 +195,5 @@ extern struct x86_msi_ops x86_msi; | |||
195 | 195 | ||
196 | extern void x86_init_noop(void); | 196 | extern void x86_init_noop(void); |
197 | extern void x86_init_uint_noop(unsigned int unused); | 197 | extern void x86_init_uint_noop(unsigned int unused); |
198 | extern void x86_default_fixup_cpu_id(struct cpuinfo_x86 *c, int node); | ||
199 | 198 | ||
200 | #endif | 199 | #endif |
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c index 103b6ab368d3..146a49c763a4 100644 --- a/arch/x86/kernel/acpi/sleep.c +++ b/arch/x86/kernel/acpi/sleep.c | |||
@@ -24,6 +24,10 @@ unsigned long acpi_realmode_flags; | |||
24 | static char temp_stack[4096]; | 24 | static char temp_stack[4096]; |
25 | #endif | 25 | #endif |
26 | 26 | ||
27 | asmlinkage void acpi_enter_s3(void) | ||
28 | { | ||
29 | acpi_enter_sleep_state(3, wake_sleep_flags); | ||
30 | } | ||
27 | /** | 31 | /** |
28 | * acpi_suspend_lowlevel - save kernel state | 32 | * acpi_suspend_lowlevel - save kernel state |
29 | * | 33 | * |
diff --git a/arch/x86/kernel/acpi/sleep.h b/arch/x86/kernel/acpi/sleep.h index 416d4be13fef..d68677a2a010 100644 --- a/arch/x86/kernel/acpi/sleep.h +++ b/arch/x86/kernel/acpi/sleep.h | |||
@@ -3,12 +3,16 @@ | |||
3 | */ | 3 | */ |
4 | 4 | ||
5 | #include <asm/trampoline.h> | 5 | #include <asm/trampoline.h> |
6 | #include <linux/linkage.h> | ||
6 | 7 | ||
7 | extern unsigned long saved_video_mode; | 8 | extern unsigned long saved_video_mode; |
8 | extern long saved_magic; | 9 | extern long saved_magic; |
9 | 10 | ||
10 | extern int wakeup_pmode_return; | 11 | extern int wakeup_pmode_return; |
11 | 12 | ||
13 | extern u8 wake_sleep_flags; | ||
14 | extern asmlinkage void acpi_enter_s3(void); | ||
15 | |||
12 | extern unsigned long acpi_copy_wakeup_routine(unsigned long); | 16 | extern unsigned long acpi_copy_wakeup_routine(unsigned long); |
13 | extern void wakeup_long64(void); | 17 | extern void wakeup_long64(void); |
14 | 18 | ||
diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S index 13ab720573e3..72610839f03b 100644 --- a/arch/x86/kernel/acpi/wakeup_32.S +++ b/arch/x86/kernel/acpi/wakeup_32.S | |||
@@ -74,9 +74,7 @@ restore_registers: | |||
74 | ENTRY(do_suspend_lowlevel) | 74 | ENTRY(do_suspend_lowlevel) |
75 | call save_processor_state | 75 | call save_processor_state |
76 | call save_registers | 76 | call save_registers |
77 | pushl $3 | 77 | call acpi_enter_s3 |
78 | call acpi_enter_sleep_state | ||
79 | addl $4, %esp | ||
80 | 78 | ||
81 | # In case of S3 failure, we'll emerge here. Jump | 79 | # In case of S3 failure, we'll emerge here. Jump |
82 | # to ret_point to recover | 80 | # to ret_point to recover |
diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S index 8ea5164cbd04..014d1d28c397 100644 --- a/arch/x86/kernel/acpi/wakeup_64.S +++ b/arch/x86/kernel/acpi/wakeup_64.S | |||
@@ -71,9 +71,7 @@ ENTRY(do_suspend_lowlevel) | |||
71 | movq %rsi, saved_rsi | 71 | movq %rsi, saved_rsi |
72 | 72 | ||
73 | addq $8, %rsp | 73 | addq $8, %rsp |
74 | movl $3, %edi | 74 | call acpi_enter_s3 |
75 | xorl %eax, %eax | ||
76 | call acpi_enter_sleep_state | ||
77 | /* in case something went wrong, restore the machine status and go on */ | 75 | /* in case something went wrong, restore the machine status and go on */ |
78 | jmp resume_point | 76 | jmp resume_point |
79 | 77 | ||
diff --git a/arch/x86/kernel/amd_gart_64.c b/arch/x86/kernel/amd_gart_64.c index b1e7c7f7a0af..e66311200cbd 100644 --- a/arch/x86/kernel/amd_gart_64.c +++ b/arch/x86/kernel/amd_gart_64.c | |||
@@ -477,7 +477,7 @@ error: | |||
477 | /* allocate and map a coherent mapping */ | 477 | /* allocate and map a coherent mapping */ |
478 | static void * | 478 | static void * |
479 | gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr, | 479 | gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr, |
480 | gfp_t flag) | 480 | gfp_t flag, struct dma_attrs *attrs) |
481 | { | 481 | { |
482 | dma_addr_t paddr; | 482 | dma_addr_t paddr; |
483 | unsigned long align_mask; | 483 | unsigned long align_mask; |
@@ -500,7 +500,8 @@ gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr, | |||
500 | } | 500 | } |
501 | __free_pages(page, get_order(size)); | 501 | __free_pages(page, get_order(size)); |
502 | } else | 502 | } else |
503 | return dma_generic_alloc_coherent(dev, size, dma_addr, flag); | 503 | return dma_generic_alloc_coherent(dev, size, dma_addr, flag, |
504 | attrs); | ||
504 | 505 | ||
505 | return NULL; | 506 | return NULL; |
506 | } | 507 | } |
@@ -508,7 +509,7 @@ gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr, | |||
508 | /* free a coherent mapping */ | 509 | /* free a coherent mapping */ |
509 | static void | 510 | static void |
510 | gart_free_coherent(struct device *dev, size_t size, void *vaddr, | 511 | gart_free_coherent(struct device *dev, size_t size, void *vaddr, |
511 | dma_addr_t dma_addr) | 512 | dma_addr_t dma_addr, struct dma_attrs *attrs) |
512 | { | 513 | { |
513 | gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, NULL); | 514 | gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, NULL); |
514 | free_pages((unsigned long)vaddr, get_order(size)); | 515 | free_pages((unsigned long)vaddr, get_order(size)); |
@@ -700,8 +701,8 @@ static struct dma_map_ops gart_dma_ops = { | |||
700 | .unmap_sg = gart_unmap_sg, | 701 | .unmap_sg = gart_unmap_sg, |
701 | .map_page = gart_map_page, | 702 | .map_page = gart_map_page, |
702 | .unmap_page = gart_unmap_page, | 703 | .unmap_page = gart_unmap_page, |
703 | .alloc_coherent = gart_alloc_coherent, | 704 | .alloc = gart_alloc_coherent, |
704 | .free_coherent = gart_free_coherent, | 705 | .free = gart_free_coherent, |
705 | .mapping_error = gart_mapping_error, | 706 | .mapping_error = gart_mapping_error, |
706 | }; | 707 | }; |
707 | 708 | ||
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 11544d8f1e97..edc24480469f 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c | |||
@@ -1637,9 +1637,11 @@ static int __init apic_verify(void) | |||
1637 | mp_lapic_addr = APIC_DEFAULT_PHYS_BASE; | 1637 | mp_lapic_addr = APIC_DEFAULT_PHYS_BASE; |
1638 | 1638 | ||
1639 | /* The BIOS may have set up the APIC at some other address */ | 1639 | /* The BIOS may have set up the APIC at some other address */ |
1640 | rdmsr(MSR_IA32_APICBASE, l, h); | 1640 | if (boot_cpu_data.x86 >= 6) { |
1641 | if (l & MSR_IA32_APICBASE_ENABLE) | 1641 | rdmsr(MSR_IA32_APICBASE, l, h); |
1642 | mp_lapic_addr = l & MSR_IA32_APICBASE_BASE; | 1642 | if (l & MSR_IA32_APICBASE_ENABLE) |
1643 | mp_lapic_addr = l & MSR_IA32_APICBASE_BASE; | ||
1644 | } | ||
1643 | 1645 | ||
1644 | pr_info("Found and enabled local APIC!\n"); | 1646 | pr_info("Found and enabled local APIC!\n"); |
1645 | return 0; | 1647 | return 0; |
@@ -1657,13 +1659,15 @@ int __init apic_force_enable(unsigned long addr) | |||
1657 | * MSR. This can only be done in software for Intel P6 or later | 1659 | * MSR. This can only be done in software for Intel P6 or later |
1658 | * and AMD K7 (Model > 1) or later. | 1660 | * and AMD K7 (Model > 1) or later. |
1659 | */ | 1661 | */ |
1660 | rdmsr(MSR_IA32_APICBASE, l, h); | 1662 | if (boot_cpu_data.x86 >= 6) { |
1661 | if (!(l & MSR_IA32_APICBASE_ENABLE)) { | 1663 | rdmsr(MSR_IA32_APICBASE, l, h); |
1662 | pr_info("Local APIC disabled by BIOS -- reenabling.\n"); | 1664 | if (!(l & MSR_IA32_APICBASE_ENABLE)) { |
1663 | l &= ~MSR_IA32_APICBASE_BASE; | 1665 | pr_info("Local APIC disabled by BIOS -- reenabling.\n"); |
1664 | l |= MSR_IA32_APICBASE_ENABLE | addr; | 1666 | l &= ~MSR_IA32_APICBASE_BASE; |
1665 | wrmsr(MSR_IA32_APICBASE, l, h); | 1667 | l |= MSR_IA32_APICBASE_ENABLE | addr; |
1666 | enabled_via_apicbase = 1; | 1668 | wrmsr(MSR_IA32_APICBASE, l, h); |
1669 | enabled_via_apicbase = 1; | ||
1670 | } | ||
1667 | } | 1671 | } |
1668 | return apic_verify(); | 1672 | return apic_verify(); |
1669 | } | 1673 | } |
@@ -2209,10 +2213,12 @@ static void lapic_resume(void) | |||
2209 | * FIXME! This will be wrong if we ever support suspend on | 2213 | * FIXME! This will be wrong if we ever support suspend on |
2210 | * SMP! We'll need to do this as part of the CPU restore! | 2214 | * SMP! We'll need to do this as part of the CPU restore! |
2211 | */ | 2215 | */ |
2212 | rdmsr(MSR_IA32_APICBASE, l, h); | 2216 | if (boot_cpu_data.x86 >= 6) { |
2213 | l &= ~MSR_IA32_APICBASE_BASE; | 2217 | rdmsr(MSR_IA32_APICBASE, l, h); |
2214 | l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr; | 2218 | l &= ~MSR_IA32_APICBASE_BASE; |
2215 | wrmsr(MSR_IA32_APICBASE, l, h); | 2219 | l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr; |
2220 | wrmsr(MSR_IA32_APICBASE, l, h); | ||
2221 | } | ||
2216 | } | 2222 | } |
2217 | 2223 | ||
2218 | maxlvt = lapic_get_maxlvt(); | 2224 | maxlvt = lapic_get_maxlvt(); |
diff --git a/arch/x86/kernel/apic/apic_numachip.c b/arch/x86/kernel/apic/apic_numachip.c index 899803e03214..23e75422e013 100644 --- a/arch/x86/kernel/apic/apic_numachip.c +++ b/arch/x86/kernel/apic/apic_numachip.c | |||
@@ -207,8 +207,11 @@ static void __init map_csrs(void) | |||
207 | 207 | ||
208 | static void fixup_cpu_id(struct cpuinfo_x86 *c, int node) | 208 | static void fixup_cpu_id(struct cpuinfo_x86 *c, int node) |
209 | { | 209 | { |
210 | c->phys_proc_id = node; | 210 | |
211 | per_cpu(cpu_llc_id, smp_processor_id()) = node; | 211 | if (c->phys_proc_id != node) { |
212 | c->phys_proc_id = node; | ||
213 | per_cpu(cpu_llc_id, smp_processor_id()) = node; | ||
214 | } | ||
212 | } | 215 | } |
213 | 216 | ||
214 | static int __init numachip_system_init(void) | 217 | static int __init numachip_system_init(void) |
diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c index 8a778db45e3a..991e315f4227 100644 --- a/arch/x86/kernel/apic/x2apic_phys.c +++ b/arch/x86/kernel/apic/x2apic_phys.c | |||
@@ -24,6 +24,12 @@ static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | |||
24 | { | 24 | { |
25 | if (x2apic_phys) | 25 | if (x2apic_phys) |
26 | return x2apic_enabled(); | 26 | return x2apic_enabled(); |
27 | else if ((acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) && | ||
28 | (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL) && | ||
29 | x2apic_enabled()) { | ||
30 | printk(KERN_DEBUG "System requires x2apic physical mode\n"); | ||
31 | return 1; | ||
32 | } | ||
27 | else | 33 | else |
28 | return 0; | 34 | return 0; |
29 | } | 35 | } |
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 0a44b90602b0..146bb6218eec 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
@@ -26,7 +26,8 @@ | |||
26 | * contact AMD for precise details and a CPU swap. | 26 | * contact AMD for precise details and a CPU swap. |
27 | * | 27 | * |
28 | * See http://www.multimania.com/poulot/k6bug.html | 28 | * See http://www.multimania.com/poulot/k6bug.html |
29 | * http://www.amd.com/K6/k6docs/revgd.html | 29 | * and section 2.6.2 of "AMD-K6 Processor Revision Guide - Model 6" |
30 | * (Publication # 21266 Issue Date: August 1998) | ||
30 | * | 31 | * |
31 | * The following test is erm.. interesting. AMD neglected to up | 32 | * The following test is erm.. interesting. AMD neglected to up |
32 | * the chip setting when fixing the bug but they also tweaked some | 33 | * the chip setting when fixing the bug but they also tweaked some |
@@ -94,7 +95,6 @@ static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c) | |||
94 | "system stability may be impaired when more than 32 MB are used.\n"); | 95 | "system stability may be impaired when more than 32 MB are used.\n"); |
95 | else | 96 | else |
96 | printk(KERN_CONT "probably OK (after B9730xxxx).\n"); | 97 | printk(KERN_CONT "probably OK (after B9730xxxx).\n"); |
97 | printk(KERN_INFO "Please see http://membres.lycos.fr/poulot/k6bug.html\n"); | ||
98 | } | 98 | } |
99 | 99 | ||
100 | /* K6 with old style WHCR */ | 100 | /* K6 with old style WHCR */ |
@@ -353,10 +353,11 @@ static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) | |||
353 | node = per_cpu(cpu_llc_id, cpu); | 353 | node = per_cpu(cpu_llc_id, cpu); |
354 | 354 | ||
355 | /* | 355 | /* |
356 | * If core numbers are inconsistent, it's likely a multi-fabric platform, | 356 | * On multi-fabric platform (e.g. Numascale NumaChip) a |
357 | * so invoke platform-specific handler | 357 | * platform-specific handler needs to be called to fixup some |
358 | * IDs of the CPU. | ||
358 | */ | 359 | */ |
359 | if (c->phys_proc_id != node) | 360 | if (x86_cpuinit.fixup_cpu_id) |
360 | x86_cpuinit.fixup_cpu_id(c, node); | 361 | x86_cpuinit.fixup_cpu_id(c, node); |
361 | 362 | ||
362 | if (!node_online(node)) { | 363 | if (!node_online(node)) { |
@@ -579,6 +580,24 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) | |||
579 | } | 580 | } |
580 | } | 581 | } |
581 | 582 | ||
583 | /* re-enable TopologyExtensions if switched off by BIOS */ | ||
584 | if ((c->x86 == 0x15) && | ||
585 | (c->x86_model >= 0x10) && (c->x86_model <= 0x1f) && | ||
586 | !cpu_has(c, X86_FEATURE_TOPOEXT)) { | ||
587 | u64 val; | ||
588 | |||
589 | if (!rdmsrl_amd_safe(0xc0011005, &val)) { | ||
590 | val |= 1ULL << 54; | ||
591 | wrmsrl_amd_safe(0xc0011005, val); | ||
592 | rdmsrl(0xc0011005, val); | ||
593 | if (val & (1ULL << 54)) { | ||
594 | set_cpu_cap(c, X86_FEATURE_TOPOEXT); | ||
595 | printk(KERN_INFO FW_INFO "CPU: Re-enabling " | ||
596 | "disabled Topology Extensions Support\n"); | ||
597 | } | ||
598 | } | ||
599 | } | ||
600 | |||
582 | cpu_detect_cache_sizes(c); | 601 | cpu_detect_cache_sizes(c); |
583 | 602 | ||
584 | /* Multi core CPU? */ | 603 | /* Multi core CPU? */ |
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 67e258362a3d..cf79302198a6 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -1163,15 +1163,6 @@ static void dbg_restore_debug_regs(void) | |||
1163 | #endif /* ! CONFIG_KGDB */ | 1163 | #endif /* ! CONFIG_KGDB */ |
1164 | 1164 | ||
1165 | /* | 1165 | /* |
1166 | * Prints an error where the NUMA and configured core-number mismatch and the | ||
1167 | * platform didn't override this to fix it up | ||
1168 | */ | ||
1169 | void __cpuinit x86_default_fixup_cpu_id(struct cpuinfo_x86 *c, int node) | ||
1170 | { | ||
1171 | pr_err("NUMA core number %d differs from configured core number %d\n", node, c->phys_proc_id); | ||
1172 | } | ||
1173 | |||
1174 | /* | ||
1175 | * cpu_init() initializes state that is per-CPU. Some data is already | 1166 | * cpu_init() initializes state that is per-CPU. Some data is already |
1176 | * initialized (naturally) in the bootstrap process, such as the GDT | 1167 | * initialized (naturally) in the bootstrap process, such as the GDT |
1177 | * and IDT. We reload them nevertheless, this function acts as a | 1168 | * and IDT. We reload them nevertheless, this function acts as a |
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 73d08ed98a64..b8f3653dddbc 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -433,14 +433,14 @@ int amd_set_l3_disable_slot(struct amd_northbridge *nb, int cpu, unsigned slot, | |||
433 | /* check if @slot is already used or the index is already disabled */ | 433 | /* check if @slot is already used or the index is already disabled */ |
434 | ret = amd_get_l3_disable_slot(nb, slot); | 434 | ret = amd_get_l3_disable_slot(nb, slot); |
435 | if (ret >= 0) | 435 | if (ret >= 0) |
436 | return -EINVAL; | 436 | return -EEXIST; |
437 | 437 | ||
438 | if (index > nb->l3_cache.indices) | 438 | if (index > nb->l3_cache.indices) |
439 | return -EINVAL; | 439 | return -EINVAL; |
440 | 440 | ||
441 | /* check whether the other slot has disabled the same index already */ | 441 | /* check whether the other slot has disabled the same index already */ |
442 | if (index == amd_get_l3_disable_slot(nb, !slot)) | 442 | if (index == amd_get_l3_disable_slot(nb, !slot)) |
443 | return -EINVAL; | 443 | return -EEXIST; |
444 | 444 | ||
445 | amd_l3_disable_index(nb, cpu, slot, index); | 445 | amd_l3_disable_index(nb, cpu, slot, index); |
446 | 446 | ||
@@ -468,8 +468,8 @@ static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf, | |||
468 | err = amd_set_l3_disable_slot(this_leaf->base.nb, cpu, slot, val); | 468 | err = amd_set_l3_disable_slot(this_leaf->base.nb, cpu, slot, val); |
469 | if (err) { | 469 | if (err) { |
470 | if (err == -EEXIST) | 470 | if (err == -EEXIST) |
471 | printk(KERN_WARNING "L3 disable slot %d in use!\n", | 471 | pr_warning("L3 slot %d in use/index already disabled!\n", |
472 | slot); | 472 | slot); |
473 | return err; | 473 | return err; |
474 | } | 474 | } |
475 | return count; | 475 | return count; |
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c index ef484d9d0a25..a2dfacfd7103 100644 --- a/arch/x86/kernel/cpu/perf_event_p4.c +++ b/arch/x86/kernel/cpu/perf_event_p4.c | |||
@@ -1271,6 +1271,17 @@ done: | |||
1271 | return num ? -EINVAL : 0; | 1271 | return num ? -EINVAL : 0; |
1272 | } | 1272 | } |
1273 | 1273 | ||
1274 | PMU_FORMAT_ATTR(cccr, "config:0-31" ); | ||
1275 | PMU_FORMAT_ATTR(escr, "config:32-62"); | ||
1276 | PMU_FORMAT_ATTR(ht, "config:63" ); | ||
1277 | |||
1278 | static struct attribute *intel_p4_formats_attr[] = { | ||
1279 | &format_attr_cccr.attr, | ||
1280 | &format_attr_escr.attr, | ||
1281 | &format_attr_ht.attr, | ||
1282 | NULL, | ||
1283 | }; | ||
1284 | |||
1274 | static __initconst const struct x86_pmu p4_pmu = { | 1285 | static __initconst const struct x86_pmu p4_pmu = { |
1275 | .name = "Netburst P4/Xeon", | 1286 | .name = "Netburst P4/Xeon", |
1276 | .handle_irq = p4_pmu_handle_irq, | 1287 | .handle_irq = p4_pmu_handle_irq, |
@@ -1305,6 +1316,8 @@ static __initconst const struct x86_pmu p4_pmu = { | |||
1305 | * the former idea is taken from OProfile code | 1316 | * the former idea is taken from OProfile code |
1306 | */ | 1317 | */ |
1307 | .perfctr_second_write = 1, | 1318 | .perfctr_second_write = 1, |
1319 | |||
1320 | .format_attrs = intel_p4_formats_attr, | ||
1308 | }; | 1321 | }; |
1309 | 1322 | ||
1310 | __init int p4_pmu_init(void) | 1323 | __init int p4_pmu_init(void) |
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c index 7734bcbb5a3a..2d6e6498c176 100644 --- a/arch/x86/kernel/i387.c +++ b/arch/x86/kernel/i387.c | |||
@@ -235,6 +235,7 @@ int init_fpu(struct task_struct *tsk) | |||
235 | if (tsk_used_math(tsk)) { | 235 | if (tsk_used_math(tsk)) { |
236 | if (HAVE_HWFP && tsk == current) | 236 | if (HAVE_HWFP && tsk == current) |
237 | unlazy_fpu(tsk); | 237 | unlazy_fpu(tsk); |
238 | tsk->thread.fpu.last_cpu = ~0; | ||
238 | return 0; | 239 | return 0; |
239 | } | 240 | } |
240 | 241 | ||
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 7943e0c21bde..3dafc6003b7c 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c | |||
@@ -282,8 +282,13 @@ void fixup_irqs(void) | |||
282 | else if (!(warned++)) | 282 | else if (!(warned++)) |
283 | set_affinity = 0; | 283 | set_affinity = 0; |
284 | 284 | ||
285 | /* | ||
286 | * We unmask if the irq was not marked masked by the | ||
287 | * core code. That respects the lazy irq disable | ||
288 | * behaviour. | ||
289 | */ | ||
285 | if (!irqd_can_move_in_process_context(data) && | 290 | if (!irqd_can_move_in_process_context(data) && |
286 | !irqd_irq_disabled(data) && chip->irq_unmask) | 291 | !irqd_irq_masked(data) && chip->irq_unmask) |
287 | chip->irq_unmask(data); | 292 | chip->irq_unmask(data); |
288 | 293 | ||
289 | raw_spin_unlock(&desc->lock); | 294 | raw_spin_unlock(&desc->lock); |
diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c index 90fcf62854bb..1d5d31ea686b 100644 --- a/arch/x86/kernel/kdebugfs.c +++ b/arch/x86/kernel/kdebugfs.c | |||
@@ -68,16 +68,9 @@ static ssize_t setup_data_read(struct file *file, char __user *user_buf, | |||
68 | return count; | 68 | return count; |
69 | } | 69 | } |
70 | 70 | ||
71 | static int setup_data_open(struct inode *inode, struct file *file) | ||
72 | { | ||
73 | file->private_data = inode->i_private; | ||
74 | |||
75 | return 0; | ||
76 | } | ||
77 | |||
78 | static const struct file_operations fops_setup_data = { | 71 | static const struct file_operations fops_setup_data = { |
79 | .read = setup_data_read, | 72 | .read = setup_data_read, |
80 | .open = setup_data_open, | 73 | .open = simple_open, |
81 | .llseek = default_llseek, | 74 | .llseek = default_llseek, |
82 | }; | 75 | }; |
83 | 76 | ||
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c index db6720edfdd0..8bfb6146f753 100644 --- a/arch/x86/kernel/kgdb.c +++ b/arch/x86/kernel/kgdb.c | |||
@@ -43,6 +43,8 @@ | |||
43 | #include <linux/smp.h> | 43 | #include <linux/smp.h> |
44 | #include <linux/nmi.h> | 44 | #include <linux/nmi.h> |
45 | #include <linux/hw_breakpoint.h> | 45 | #include <linux/hw_breakpoint.h> |
46 | #include <linux/uaccess.h> | ||
47 | #include <linux/memory.h> | ||
46 | 48 | ||
47 | #include <asm/debugreg.h> | 49 | #include <asm/debugreg.h> |
48 | #include <asm/apicdef.h> | 50 | #include <asm/apicdef.h> |
@@ -741,6 +743,64 @@ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip) | |||
741 | regs->ip = ip; | 743 | regs->ip = ip; |
742 | } | 744 | } |
743 | 745 | ||
746 | int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt) | ||
747 | { | ||
748 | int err; | ||
749 | char opc[BREAK_INSTR_SIZE]; | ||
750 | |||
751 | bpt->type = BP_BREAKPOINT; | ||
752 | err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr, | ||
753 | BREAK_INSTR_SIZE); | ||
754 | if (err) | ||
755 | return err; | ||
756 | err = probe_kernel_write((char *)bpt->bpt_addr, | ||
757 | arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE); | ||
758 | #ifdef CONFIG_DEBUG_RODATA | ||
759 | if (!err) | ||
760 | return err; | ||
761 | /* | ||
762 | * It is safe to call text_poke() because normal kernel execution | ||
763 | * is stopped on all cores, so long as the text_mutex is not locked. | ||
764 | */ | ||
765 | if (mutex_is_locked(&text_mutex)) | ||
766 | return -EBUSY; | ||
767 | text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr, | ||
768 | BREAK_INSTR_SIZE); | ||
769 | err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE); | ||
770 | if (err) | ||
771 | return err; | ||
772 | if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE)) | ||
773 | return -EINVAL; | ||
774 | bpt->type = BP_POKE_BREAKPOINT; | ||
775 | #endif /* CONFIG_DEBUG_RODATA */ | ||
776 | return err; | ||
777 | } | ||
778 | |||
779 | int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt) | ||
780 | { | ||
781 | #ifdef CONFIG_DEBUG_RODATA | ||
782 | int err; | ||
783 | char opc[BREAK_INSTR_SIZE]; | ||
784 | |||
785 | if (bpt->type != BP_POKE_BREAKPOINT) | ||
786 | goto knl_write; | ||
787 | /* | ||
788 | * It is safe to call text_poke() because normal kernel execution | ||
789 | * is stopped on all cores, so long as the text_mutex is not locked. | ||
790 | */ | ||
791 | if (mutex_is_locked(&text_mutex)) | ||
792 | goto knl_write; | ||
793 | text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE); | ||
794 | err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE); | ||
795 | if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE)) | ||
796 | goto knl_write; | ||
797 | return err; | ||
798 | knl_write: | ||
799 | #endif /* CONFIG_DEBUG_RODATA */ | ||
800 | return probe_kernel_write((char *)bpt->bpt_addr, | ||
801 | (char *)bpt->saved_instr, BREAK_INSTR_SIZE); | ||
802 | } | ||
803 | |||
744 | struct kgdb_arch arch_kgdb_ops = { | 804 | struct kgdb_arch arch_kgdb_ops = { |
745 | /* Breakpoint instruction: */ | 805 | /* Breakpoint instruction: */ |
746 | .gdb_bpt_instr = { 0xcc }, | 806 | .gdb_bpt_instr = { 0xcc }, |
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 694d801bf606..b8ba6e4a27e4 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <asm/traps.h> | 38 | #include <asm/traps.h> |
39 | #include <asm/desc.h> | 39 | #include <asm/desc.h> |
40 | #include <asm/tlbflush.h> | 40 | #include <asm/tlbflush.h> |
41 | #include <asm/idle.h> | ||
41 | 42 | ||
42 | static int kvmapf = 1; | 43 | static int kvmapf = 1; |
43 | 44 | ||
@@ -253,7 +254,10 @@ do_async_page_fault(struct pt_regs *regs, unsigned long error_code) | |||
253 | kvm_async_pf_task_wait((u32)read_cr2()); | 254 | kvm_async_pf_task_wait((u32)read_cr2()); |
254 | break; | 255 | break; |
255 | case KVM_PV_REASON_PAGE_READY: | 256 | case KVM_PV_REASON_PAGE_READY: |
257 | rcu_irq_enter(); | ||
258 | exit_idle(); | ||
256 | kvm_async_pf_task_wake((u32)read_cr2()); | 259 | kvm_async_pf_task_wake((u32)read_cr2()); |
260 | rcu_irq_exit(); | ||
257 | break; | 261 | break; |
258 | } | 262 | } |
259 | } | 263 | } |
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c index 73465aab28f8..8a2ce8fd41c0 100644 --- a/arch/x86/kernel/microcode_amd.c +++ b/arch/x86/kernel/microcode_amd.c | |||
@@ -82,11 +82,6 @@ static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig) | |||
82 | { | 82 | { |
83 | struct cpuinfo_x86 *c = &cpu_data(cpu); | 83 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
84 | 84 | ||
85 | if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) { | ||
86 | pr_warning("CPU%d: family %d not supported\n", cpu, c->x86); | ||
87 | return -1; | ||
88 | } | ||
89 | |||
90 | csig->rev = c->microcode; | 85 | csig->rev = c->microcode; |
91 | pr_info("CPU%d: patch_level=0x%08x\n", cpu, csig->rev); | 86 | pr_info("CPU%d: patch_level=0x%08x\n", cpu, csig->rev); |
92 | 87 | ||
@@ -380,6 +375,13 @@ static struct microcode_ops microcode_amd_ops = { | |||
380 | 375 | ||
381 | struct microcode_ops * __init init_amd_microcode(void) | 376 | struct microcode_ops * __init init_amd_microcode(void) |
382 | { | 377 | { |
378 | struct cpuinfo_x86 *c = &cpu_data(0); | ||
379 | |||
380 | if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) { | ||
381 | pr_warning("AMD CPU family 0x%x not supported\n", c->x86); | ||
382 | return NULL; | ||
383 | } | ||
384 | |||
383 | patch = (void *)get_zeroed_page(GFP_KERNEL); | 385 | patch = (void *)get_zeroed_page(GFP_KERNEL); |
384 | if (!patch) | 386 | if (!patch) |
385 | return NULL; | 387 | return NULL; |
diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c index 87a0f8688301..c9bda6d6035c 100644 --- a/arch/x86/kernel/microcode_core.c +++ b/arch/x86/kernel/microcode_core.c | |||
@@ -419,10 +419,8 @@ static int mc_device_add(struct device *dev, struct subsys_interface *sif) | |||
419 | if (err) | 419 | if (err) |
420 | return err; | 420 | return err; |
421 | 421 | ||
422 | if (microcode_init_cpu(cpu) == UCODE_ERROR) { | 422 | if (microcode_init_cpu(cpu) == UCODE_ERROR) |
423 | sysfs_remove_group(&dev->kobj, &mc_attr_group); | ||
424 | return -EINVAL; | 423 | return -EINVAL; |
425 | } | ||
426 | 424 | ||
427 | return err; | 425 | return err; |
428 | } | 426 | } |
@@ -528,11 +526,11 @@ static int __init microcode_init(void) | |||
528 | microcode_ops = init_intel_microcode(); | 526 | microcode_ops = init_intel_microcode(); |
529 | else if (c->x86_vendor == X86_VENDOR_AMD) | 527 | else if (c->x86_vendor == X86_VENDOR_AMD) |
530 | microcode_ops = init_amd_microcode(); | 528 | microcode_ops = init_amd_microcode(); |
531 | 529 | else | |
532 | if (!microcode_ops) { | ||
533 | pr_err("no support for this CPU vendor\n"); | 530 | pr_err("no support for this CPU vendor\n"); |
531 | |||
532 | if (!microcode_ops) | ||
534 | return -ENODEV; | 533 | return -ENODEV; |
535 | } | ||
536 | 534 | ||
537 | microcode_pdev = platform_device_register_simple("microcode", -1, | 535 | microcode_pdev = platform_device_register_simple("microcode", -1, |
538 | NULL, 0); | 536 | NULL, 0); |
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c index 6ac5782f4d6b..d0b2fb9ccbb1 100644 --- a/arch/x86/kernel/pci-calgary_64.c +++ b/arch/x86/kernel/pci-calgary_64.c | |||
@@ -430,7 +430,7 @@ static void calgary_unmap_page(struct device *dev, dma_addr_t dma_addr, | |||
430 | } | 430 | } |
431 | 431 | ||
432 | static void* calgary_alloc_coherent(struct device *dev, size_t size, | 432 | static void* calgary_alloc_coherent(struct device *dev, size_t size, |
433 | dma_addr_t *dma_handle, gfp_t flag) | 433 | dma_addr_t *dma_handle, gfp_t flag, struct dma_attrs *attrs) |
434 | { | 434 | { |
435 | void *ret = NULL; | 435 | void *ret = NULL; |
436 | dma_addr_t mapping; | 436 | dma_addr_t mapping; |
@@ -463,7 +463,8 @@ error: | |||
463 | } | 463 | } |
464 | 464 | ||
465 | static void calgary_free_coherent(struct device *dev, size_t size, | 465 | static void calgary_free_coherent(struct device *dev, size_t size, |
466 | void *vaddr, dma_addr_t dma_handle) | 466 | void *vaddr, dma_addr_t dma_handle, |
467 | struct dma_attrs *attrs) | ||
467 | { | 468 | { |
468 | unsigned int npages; | 469 | unsigned int npages; |
469 | struct iommu_table *tbl = find_iommu_table(dev); | 470 | struct iommu_table *tbl = find_iommu_table(dev); |
@@ -476,8 +477,8 @@ static void calgary_free_coherent(struct device *dev, size_t size, | |||
476 | } | 477 | } |
477 | 478 | ||
478 | static struct dma_map_ops calgary_dma_ops = { | 479 | static struct dma_map_ops calgary_dma_ops = { |
479 | .alloc_coherent = calgary_alloc_coherent, | 480 | .alloc = calgary_alloc_coherent, |
480 | .free_coherent = calgary_free_coherent, | 481 | .free = calgary_free_coherent, |
481 | .map_sg = calgary_map_sg, | 482 | .map_sg = calgary_map_sg, |
482 | .unmap_sg = calgary_unmap_sg, | 483 | .unmap_sg = calgary_unmap_sg, |
483 | .map_page = calgary_map_page, | 484 | .map_page = calgary_map_page, |
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index 28e5e06fcba4..3003250ac51d 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c | |||
@@ -96,7 +96,8 @@ void __init pci_iommu_alloc(void) | |||
96 | } | 96 | } |
97 | } | 97 | } |
98 | void *dma_generic_alloc_coherent(struct device *dev, size_t size, | 98 | void *dma_generic_alloc_coherent(struct device *dev, size_t size, |
99 | dma_addr_t *dma_addr, gfp_t flag) | 99 | dma_addr_t *dma_addr, gfp_t flag, |
100 | struct dma_attrs *attrs) | ||
100 | { | 101 | { |
101 | unsigned long dma_mask; | 102 | unsigned long dma_mask; |
102 | struct page *page; | 103 | struct page *page; |
diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c index 3af4af810c07..f96050685b46 100644 --- a/arch/x86/kernel/pci-nommu.c +++ b/arch/x86/kernel/pci-nommu.c | |||
@@ -75,7 +75,7 @@ static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg, | |||
75 | } | 75 | } |
76 | 76 | ||
77 | static void nommu_free_coherent(struct device *dev, size_t size, void *vaddr, | 77 | static void nommu_free_coherent(struct device *dev, size_t size, void *vaddr, |
78 | dma_addr_t dma_addr) | 78 | dma_addr_t dma_addr, struct dma_attrs *attrs) |
79 | { | 79 | { |
80 | free_pages((unsigned long)vaddr, get_order(size)); | 80 | free_pages((unsigned long)vaddr, get_order(size)); |
81 | } | 81 | } |
@@ -96,8 +96,8 @@ static void nommu_sync_sg_for_device(struct device *dev, | |||
96 | } | 96 | } |
97 | 97 | ||
98 | struct dma_map_ops nommu_dma_ops = { | 98 | struct dma_map_ops nommu_dma_ops = { |
99 | .alloc_coherent = dma_generic_alloc_coherent, | 99 | .alloc = dma_generic_alloc_coherent, |
100 | .free_coherent = nommu_free_coherent, | 100 | .free = nommu_free_coherent, |
101 | .map_sg = nommu_map_sg, | 101 | .map_sg = nommu_map_sg, |
102 | .map_page = nommu_map_page, | 102 | .map_page = nommu_map_page, |
103 | .sync_single_for_device = nommu_sync_single_for_device, | 103 | .sync_single_for_device = nommu_sync_single_for_device, |
diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c index 8f972cbddef0..6c483ba98b9c 100644 --- a/arch/x86/kernel/pci-swiotlb.c +++ b/arch/x86/kernel/pci-swiotlb.c | |||
@@ -15,21 +15,30 @@ | |||
15 | int swiotlb __read_mostly; | 15 | int swiotlb __read_mostly; |
16 | 16 | ||
17 | static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size, | 17 | static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size, |
18 | dma_addr_t *dma_handle, gfp_t flags) | 18 | dma_addr_t *dma_handle, gfp_t flags, |
19 | struct dma_attrs *attrs) | ||
19 | { | 20 | { |
20 | void *vaddr; | 21 | void *vaddr; |
21 | 22 | ||
22 | vaddr = dma_generic_alloc_coherent(hwdev, size, dma_handle, flags); | 23 | vaddr = dma_generic_alloc_coherent(hwdev, size, dma_handle, flags, |
24 | attrs); | ||
23 | if (vaddr) | 25 | if (vaddr) |
24 | return vaddr; | 26 | return vaddr; |
25 | 27 | ||
26 | return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags); | 28 | return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags); |
27 | } | 29 | } |
28 | 30 | ||
31 | static void x86_swiotlb_free_coherent(struct device *dev, size_t size, | ||
32 | void *vaddr, dma_addr_t dma_addr, | ||
33 | struct dma_attrs *attrs) | ||
34 | { | ||
35 | swiotlb_free_coherent(dev, size, vaddr, dma_addr); | ||
36 | } | ||
37 | |||
29 | static struct dma_map_ops swiotlb_dma_ops = { | 38 | static struct dma_map_ops swiotlb_dma_ops = { |
30 | .mapping_error = swiotlb_dma_mapping_error, | 39 | .mapping_error = swiotlb_dma_mapping_error, |
31 | .alloc_coherent = x86_swiotlb_alloc_coherent, | 40 | .alloc = x86_swiotlb_alloc_coherent, |
32 | .free_coherent = swiotlb_free_coherent, | 41 | .free = x86_swiotlb_free_coherent, |
33 | .sync_single_for_cpu = swiotlb_sync_single_for_cpu, | 42 | .sync_single_for_cpu = swiotlb_sync_single_for_cpu, |
34 | .sync_single_for_device = swiotlb_sync_single_for_device, | 43 | .sync_single_for_device = swiotlb_sync_single_for_device, |
35 | .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, | 44 | .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, |
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index a33afaa5ddb7..1d92a5ab6e8b 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
@@ -362,34 +362,10 @@ void (*pm_idle)(void); | |||
362 | EXPORT_SYMBOL(pm_idle); | 362 | EXPORT_SYMBOL(pm_idle); |
363 | #endif | 363 | #endif |
364 | 364 | ||
365 | #ifdef CONFIG_X86_32 | ||
366 | /* | ||
367 | * This halt magic was a workaround for ancient floppy DMA | ||
368 | * wreckage. It should be safe to remove. | ||
369 | */ | ||
370 | static int hlt_counter; | ||
371 | void disable_hlt(void) | ||
372 | { | ||
373 | hlt_counter++; | ||
374 | } | ||
375 | EXPORT_SYMBOL(disable_hlt); | ||
376 | |||
377 | void enable_hlt(void) | ||
378 | { | ||
379 | hlt_counter--; | ||
380 | } | ||
381 | EXPORT_SYMBOL(enable_hlt); | ||
382 | |||
383 | static inline int hlt_use_halt(void) | ||
384 | { | ||
385 | return (!hlt_counter && boot_cpu_data.hlt_works_ok); | ||
386 | } | ||
387 | #else | ||
388 | static inline int hlt_use_halt(void) | 365 | static inline int hlt_use_halt(void) |
389 | { | 366 | { |
390 | return 1; | 367 | return 1; |
391 | } | 368 | } |
392 | #endif | ||
393 | 369 | ||
394 | #ifndef CONFIG_SMP | 370 | #ifndef CONFIG_SMP |
395 | static inline void play_dead(void) | 371 | static inline void play_dead(void) |
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c index f386dc49f988..7515cf0e1805 100644 --- a/arch/x86/kernel/vsyscall_64.c +++ b/arch/x86/kernel/vsyscall_64.c | |||
@@ -216,9 +216,9 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address) | |||
216 | current_thread_info()->sig_on_uaccess_error = 1; | 216 | current_thread_info()->sig_on_uaccess_error = 1; |
217 | 217 | ||
218 | /* | 218 | /* |
219 | * 0 is a valid user pointer (in the access_ok sense) on 32-bit and | 219 | * NULL is a valid user pointer (in the access_ok sense) on 32-bit and |
220 | * 64-bit, so we don't need to special-case it here. For all the | 220 | * 64-bit, so we don't need to special-case it here. For all the |
221 | * vsyscalls, 0 means "don't write anything" not "write it at | 221 | * vsyscalls, NULL means "don't write anything" not "write it at |
222 | * address 0". | 222 | * address 0". |
223 | */ | 223 | */ |
224 | ret = -EFAULT; | 224 | ret = -EFAULT; |
@@ -247,7 +247,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address) | |||
247 | 247 | ||
248 | ret = sys_getcpu((unsigned __user *)regs->di, | 248 | ret = sys_getcpu((unsigned __user *)regs->di, |
249 | (unsigned __user *)regs->si, | 249 | (unsigned __user *)regs->si, |
250 | 0); | 250 | NULL); |
251 | break; | 251 | break; |
252 | } | 252 | } |
253 | 253 | ||
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c index e9f265fd79ae..9cf71d0b2d37 100644 --- a/arch/x86/kernel/x86_init.c +++ b/arch/x86/kernel/x86_init.c | |||
@@ -93,7 +93,6 @@ struct x86_init_ops x86_init __initdata = { | |||
93 | struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = { | 93 | struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = { |
94 | .early_percpu_clock_init = x86_init_noop, | 94 | .early_percpu_clock_init = x86_init_noop, |
95 | .setup_percpu_clockev = setup_secondary_APIC_clock, | 95 | .setup_percpu_clockev = setup_secondary_APIC_clock, |
96 | .fixup_cpu_id = x86_default_fixup_cpu_id, | ||
97 | }; | 96 | }; |
98 | 97 | ||
99 | static void default_nmi_init(void) { }; | 98 | static void default_nmi_init(void) { }; |
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index a73f0c104813..2e88438ffd83 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c | |||
@@ -369,7 +369,7 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data) | |||
369 | case MSR_CORE_PERF_FIXED_CTR_CTRL: | 369 | case MSR_CORE_PERF_FIXED_CTR_CTRL: |
370 | if (pmu->fixed_ctr_ctrl == data) | 370 | if (pmu->fixed_ctr_ctrl == data) |
371 | return 0; | 371 | return 0; |
372 | if (!(data & 0xfffffffffffff444)) { | 372 | if (!(data & 0xfffffffffffff444ull)) { |
373 | reprogram_fixed_counters(pmu, data); | 373 | reprogram_fixed_counters(pmu, data); |
374 | return 0; | 374 | return 0; |
375 | } | 375 | } |
@@ -459,17 +459,17 @@ void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu) | |||
459 | pmu->available_event_types = ~entry->ebx & ((1ull << bitmap_len) - 1); | 459 | pmu->available_event_types = ~entry->ebx & ((1ull << bitmap_len) - 1); |
460 | 460 | ||
461 | if (pmu->version == 1) { | 461 | if (pmu->version == 1) { |
462 | pmu->global_ctrl = (1 << pmu->nr_arch_gp_counters) - 1; | 462 | pmu->nr_arch_fixed_counters = 0; |
463 | return; | 463 | } else { |
464 | pmu->nr_arch_fixed_counters = min((int)(entry->edx & 0x1f), | ||
465 | X86_PMC_MAX_FIXED); | ||
466 | pmu->counter_bitmask[KVM_PMC_FIXED] = | ||
467 | ((u64)1 << ((entry->edx >> 5) & 0xff)) - 1; | ||
464 | } | 468 | } |
465 | 469 | ||
466 | pmu->nr_arch_fixed_counters = min((int)(entry->edx & 0x1f), | 470 | pmu->global_ctrl = ((1 << pmu->nr_arch_gp_counters) - 1) | |
467 | X86_PMC_MAX_FIXED); | 471 | (((1ull << pmu->nr_arch_fixed_counters) - 1) << X86_PMC_IDX_FIXED); |
468 | pmu->counter_bitmask[KVM_PMC_FIXED] = | 472 | pmu->global_ctrl_mask = ~pmu->global_ctrl; |
469 | ((u64)1 << ((entry->edx >> 5) & 0xff)) - 1; | ||
470 | pmu->global_ctrl_mask = ~(((1 << pmu->nr_arch_gp_counters) - 1) | ||
471 | | (((1ull << pmu->nr_arch_fixed_counters) - 1) | ||
472 | << X86_PMC_IDX_FIXED)); | ||
473 | } | 473 | } |
474 | 474 | ||
475 | void kvm_pmu_init(struct kvm_vcpu *vcpu) | 475 | void kvm_pmu_init(struct kvm_vcpu *vcpu) |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 280751c84724..4ff0ab9bc3c8 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -2210,9 +2210,12 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) | |||
2210 | msr = find_msr_entry(vmx, msr_index); | 2210 | msr = find_msr_entry(vmx, msr_index); |
2211 | if (msr) { | 2211 | if (msr) { |
2212 | msr->data = data; | 2212 | msr->data = data; |
2213 | if (msr - vmx->guest_msrs < vmx->save_nmsrs) | 2213 | if (msr - vmx->guest_msrs < vmx->save_nmsrs) { |
2214 | preempt_disable(); | ||
2214 | kvm_set_shared_msr(msr->index, msr->data, | 2215 | kvm_set_shared_msr(msr->index, msr->data, |
2215 | msr->mask); | 2216 | msr->mask); |
2217 | preempt_enable(); | ||
2218 | } | ||
2216 | break; | 2219 | break; |
2217 | } | 2220 | } |
2218 | ret = kvm_set_msr_common(vcpu, msr_index, data); | 2221 | ret = kvm_set_msr_common(vcpu, msr_index, data); |
@@ -3906,7 +3909,9 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu) | |||
3906 | vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid); | 3909 | vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid); |
3907 | 3910 | ||
3908 | vmx->vcpu.arch.cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET; | 3911 | vmx->vcpu.arch.cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET; |
3912 | vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); | ||
3909 | vmx_set_cr0(&vmx->vcpu, kvm_read_cr0(vcpu)); /* enter rmode */ | 3913 | vmx_set_cr0(&vmx->vcpu, kvm_read_cr0(vcpu)); /* enter rmode */ |
3914 | srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); | ||
3910 | vmx_set_cr4(&vmx->vcpu, 0); | 3915 | vmx_set_cr4(&vmx->vcpu, 0); |
3911 | vmx_set_efer(&vmx->vcpu, 0); | 3916 | vmx_set_efer(&vmx->vcpu, 0); |
3912 | vmx_fpu_activate(&vmx->vcpu); | 3917 | vmx_fpu_activate(&vmx->vcpu); |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 4044ce0bf7c1..91a5e989abcf 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -6336,13 +6336,11 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, | |||
6336 | if (npages && !old.rmap) { | 6336 | if (npages && !old.rmap) { |
6337 | unsigned long userspace_addr; | 6337 | unsigned long userspace_addr; |
6338 | 6338 | ||
6339 | down_write(¤t->mm->mmap_sem); | 6339 | userspace_addr = vm_mmap(NULL, 0, |
6340 | userspace_addr = do_mmap(NULL, 0, | ||
6341 | npages * PAGE_SIZE, | 6340 | npages * PAGE_SIZE, |
6342 | PROT_READ | PROT_WRITE, | 6341 | PROT_READ | PROT_WRITE, |
6343 | map_flags, | 6342 | map_flags, |
6344 | 0); | 6343 | 0); |
6345 | up_write(¤t->mm->mmap_sem); | ||
6346 | 6344 | ||
6347 | if (IS_ERR((void *)userspace_addr)) | 6345 | if (IS_ERR((void *)userspace_addr)) |
6348 | return PTR_ERR((void *)userspace_addr); | 6346 | return PTR_ERR((void *)userspace_addr); |
@@ -6366,10 +6364,8 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, | |||
6366 | if (!user_alloc && !old.user_alloc && old.rmap && !npages) { | 6364 | if (!user_alloc && !old.user_alloc && old.rmap && !npages) { |
6367 | int ret; | 6365 | int ret; |
6368 | 6366 | ||
6369 | down_write(¤t->mm->mmap_sem); | 6367 | ret = vm_munmap(old.userspace_addr, |
6370 | ret = do_munmap(current->mm, old.userspace_addr, | ||
6371 | old.npages * PAGE_SIZE); | 6368 | old.npages * PAGE_SIZE); |
6372 | up_write(¤t->mm->mmap_sem); | ||
6373 | if (ret < 0) | 6369 | if (ret < 0) |
6374 | printk(KERN_WARNING | 6370 | printk(KERN_WARNING |
6375 | "kvm_vm_ioctl_set_memory_region: " | 6371 | "kvm_vm_ioctl_set_memory_region: " |
diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c index 25feb1ae71c5..b1e6c4b2e8eb 100644 --- a/arch/x86/lib/insn.c +++ b/arch/x86/lib/insn.c | |||
@@ -379,8 +379,8 @@ err_out: | |||
379 | return; | 379 | return; |
380 | } | 380 | } |
381 | 381 | ||
382 | /* Decode moffset16/32/64 */ | 382 | /* Decode moffset16/32/64. Return 0 if failed */ |
383 | static void __get_moffset(struct insn *insn) | 383 | static int __get_moffset(struct insn *insn) |
384 | { | 384 | { |
385 | switch (insn->addr_bytes) { | 385 | switch (insn->addr_bytes) { |
386 | case 2: | 386 | case 2: |
@@ -397,15 +397,19 @@ static void __get_moffset(struct insn *insn) | |||
397 | insn->moffset2.value = get_next(int, insn); | 397 | insn->moffset2.value = get_next(int, insn); |
398 | insn->moffset2.nbytes = 4; | 398 | insn->moffset2.nbytes = 4; |
399 | break; | 399 | break; |
400 | default: /* opnd_bytes must be modified manually */ | ||
401 | goto err_out; | ||
400 | } | 402 | } |
401 | insn->moffset1.got = insn->moffset2.got = 1; | 403 | insn->moffset1.got = insn->moffset2.got = 1; |
402 | 404 | ||
405 | return 1; | ||
406 | |||
403 | err_out: | 407 | err_out: |
404 | return; | 408 | return 0; |
405 | } | 409 | } |
406 | 410 | ||
407 | /* Decode imm v32(Iz) */ | 411 | /* Decode imm v32(Iz). Return 0 if failed */ |
408 | static void __get_immv32(struct insn *insn) | 412 | static int __get_immv32(struct insn *insn) |
409 | { | 413 | { |
410 | switch (insn->opnd_bytes) { | 414 | switch (insn->opnd_bytes) { |
411 | case 2: | 415 | case 2: |
@@ -417,14 +421,18 @@ static void __get_immv32(struct insn *insn) | |||
417 | insn->immediate.value = get_next(int, insn); | 421 | insn->immediate.value = get_next(int, insn); |
418 | insn->immediate.nbytes = 4; | 422 | insn->immediate.nbytes = 4; |
419 | break; | 423 | break; |
424 | default: /* opnd_bytes must be modified manually */ | ||
425 | goto err_out; | ||
420 | } | 426 | } |
421 | 427 | ||
428 | return 1; | ||
429 | |||
422 | err_out: | 430 | err_out: |
423 | return; | 431 | return 0; |
424 | } | 432 | } |
425 | 433 | ||
426 | /* Decode imm v64(Iv/Ov) */ | 434 | /* Decode imm v64(Iv/Ov), Return 0 if failed */ |
427 | static void __get_immv(struct insn *insn) | 435 | static int __get_immv(struct insn *insn) |
428 | { | 436 | { |
429 | switch (insn->opnd_bytes) { | 437 | switch (insn->opnd_bytes) { |
430 | case 2: | 438 | case 2: |
@@ -441,15 +449,18 @@ static void __get_immv(struct insn *insn) | |||
441 | insn->immediate2.value = get_next(int, insn); | 449 | insn->immediate2.value = get_next(int, insn); |
442 | insn->immediate2.nbytes = 4; | 450 | insn->immediate2.nbytes = 4; |
443 | break; | 451 | break; |
452 | default: /* opnd_bytes must be modified manually */ | ||
453 | goto err_out; | ||
444 | } | 454 | } |
445 | insn->immediate1.got = insn->immediate2.got = 1; | 455 | insn->immediate1.got = insn->immediate2.got = 1; |
446 | 456 | ||
457 | return 1; | ||
447 | err_out: | 458 | err_out: |
448 | return; | 459 | return 0; |
449 | } | 460 | } |
450 | 461 | ||
451 | /* Decode ptr16:16/32(Ap) */ | 462 | /* Decode ptr16:16/32(Ap) */ |
452 | static void __get_immptr(struct insn *insn) | 463 | static int __get_immptr(struct insn *insn) |
453 | { | 464 | { |
454 | switch (insn->opnd_bytes) { | 465 | switch (insn->opnd_bytes) { |
455 | case 2: | 466 | case 2: |
@@ -462,14 +473,17 @@ static void __get_immptr(struct insn *insn) | |||
462 | break; | 473 | break; |
463 | case 8: | 474 | case 8: |
464 | /* ptr16:64 is not exist (no segment) */ | 475 | /* ptr16:64 is not exist (no segment) */ |
465 | return; | 476 | return 0; |
477 | default: /* opnd_bytes must be modified manually */ | ||
478 | goto err_out; | ||
466 | } | 479 | } |
467 | insn->immediate2.value = get_next(unsigned short, insn); | 480 | insn->immediate2.value = get_next(unsigned short, insn); |
468 | insn->immediate2.nbytes = 2; | 481 | insn->immediate2.nbytes = 2; |
469 | insn->immediate1.got = insn->immediate2.got = 1; | 482 | insn->immediate1.got = insn->immediate2.got = 1; |
470 | 483 | ||
484 | return 1; | ||
471 | err_out: | 485 | err_out: |
472 | return; | 486 | return 0; |
473 | } | 487 | } |
474 | 488 | ||
475 | /** | 489 | /** |
@@ -489,7 +503,8 @@ void insn_get_immediate(struct insn *insn) | |||
489 | insn_get_displacement(insn); | 503 | insn_get_displacement(insn); |
490 | 504 | ||
491 | if (inat_has_moffset(insn->attr)) { | 505 | if (inat_has_moffset(insn->attr)) { |
492 | __get_moffset(insn); | 506 | if (!__get_moffset(insn)) |
507 | goto err_out; | ||
493 | goto done; | 508 | goto done; |
494 | } | 509 | } |
495 | 510 | ||
@@ -517,16 +532,20 @@ void insn_get_immediate(struct insn *insn) | |||
517 | insn->immediate2.nbytes = 4; | 532 | insn->immediate2.nbytes = 4; |
518 | break; | 533 | break; |
519 | case INAT_IMM_PTR: | 534 | case INAT_IMM_PTR: |
520 | __get_immptr(insn); | 535 | if (!__get_immptr(insn)) |
536 | goto err_out; | ||
521 | break; | 537 | break; |
522 | case INAT_IMM_VWORD32: | 538 | case INAT_IMM_VWORD32: |
523 | __get_immv32(insn); | 539 | if (!__get_immv32(insn)) |
540 | goto err_out; | ||
524 | break; | 541 | break; |
525 | case INAT_IMM_VWORD: | 542 | case INAT_IMM_VWORD: |
526 | __get_immv(insn); | 543 | if (!__get_immv(insn)) |
544 | goto err_out; | ||
527 | break; | 545 | break; |
528 | default: | 546 | default: |
529 | break; | 547 | /* Here, insn must have an immediate, but failed */ |
548 | goto err_out; | ||
530 | } | 549 | } |
531 | if (inat_has_second_immediate(insn->attr)) { | 550 | if (inat_has_second_immediate(insn->attr)) { |
532 | insn->immediate2.value = get_next(char, insn); | 551 | insn->immediate2.value = get_next(char, insn); |
diff --git a/arch/x86/lib/usercopy.c b/arch/x86/lib/usercopy.c index 97be9cb54483..d6ae30bbd7bb 100644 --- a/arch/x86/lib/usercopy.c +++ b/arch/x86/lib/usercopy.c | |||
@@ -7,6 +7,8 @@ | |||
7 | #include <linux/highmem.h> | 7 | #include <linux/highmem.h> |
8 | #include <linux/module.h> | 8 | #include <linux/module.h> |
9 | 9 | ||
10 | #include <asm/word-at-a-time.h> | ||
11 | |||
10 | /* | 12 | /* |
11 | * best effort, GUP based copy_from_user() that is NMI-safe | 13 | * best effort, GUP based copy_from_user() that is NMI-safe |
12 | */ | 14 | */ |
@@ -41,3 +43,104 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n) | |||
41 | return len; | 43 | return len; |
42 | } | 44 | } |
43 | EXPORT_SYMBOL_GPL(copy_from_user_nmi); | 45 | EXPORT_SYMBOL_GPL(copy_from_user_nmi); |
46 | |||
47 | static inline unsigned long count_bytes(unsigned long mask) | ||
48 | { | ||
49 | mask = (mask - 1) & ~mask; | ||
50 | mask >>= 7; | ||
51 | return count_masked_bytes(mask); | ||
52 | } | ||
53 | |||
54 | /* | ||
55 | * Do a strncpy, return length of string without final '\0'. | ||
56 | * 'count' is the user-supplied count (return 'count' if we | ||
57 | * hit it), 'max' is the address space maximum (and we return | ||
58 | * -EFAULT if we hit it). | ||
59 | */ | ||
60 | static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max) | ||
61 | { | ||
62 | long res = 0; | ||
63 | |||
64 | /* | ||
65 | * Truncate 'max' to the user-specified limit, so that | ||
66 | * we only have one limit we need to check in the loop | ||
67 | */ | ||
68 | if (max > count) | ||
69 | max = count; | ||
70 | |||
71 | while (max >= sizeof(unsigned long)) { | ||
72 | unsigned long c; | ||
73 | |||
74 | /* Fall back to byte-at-a-time if we get a page fault */ | ||
75 | if (unlikely(__get_user(c,(unsigned long __user *)(src+res)))) | ||
76 | break; | ||
77 | /* This can write a few bytes past the NUL character, but that's ok */ | ||
78 | *(unsigned long *)(dst+res) = c; | ||
79 | c = has_zero(c); | ||
80 | if (c) | ||
81 | return res + count_bytes(c); | ||
82 | res += sizeof(unsigned long); | ||
83 | max -= sizeof(unsigned long); | ||
84 | } | ||
85 | |||
86 | while (max) { | ||
87 | char c; | ||
88 | |||
89 | if (unlikely(__get_user(c,src+res))) | ||
90 | return -EFAULT; | ||
91 | dst[res] = c; | ||
92 | if (!c) | ||
93 | return res; | ||
94 | res++; | ||
95 | max--; | ||
96 | } | ||
97 | |||
98 | /* | ||
99 | * Uhhuh. We hit 'max'. But was that the user-specified maximum | ||
100 | * too? If so, that's ok - we got as much as the user asked for. | ||
101 | */ | ||
102 | if (res >= count) | ||
103 | return res; | ||
104 | |||
105 | /* | ||
106 | * Nope: we hit the address space limit, and we still had more | ||
107 | * characters the caller would have wanted. That's an EFAULT. | ||
108 | */ | ||
109 | return -EFAULT; | ||
110 | } | ||
111 | |||
112 | /** | ||
113 | * strncpy_from_user: - Copy a NUL terminated string from userspace. | ||
114 | * @dst: Destination address, in kernel space. This buffer must be at | ||
115 | * least @count bytes long. | ||
116 | * @src: Source address, in user space. | ||
117 | * @count: Maximum number of bytes to copy, including the trailing NUL. | ||
118 | * | ||
119 | * Copies a NUL-terminated string from userspace to kernel space. | ||
120 | * | ||
121 | * On success, returns the length of the string (not including the trailing | ||
122 | * NUL). | ||
123 | * | ||
124 | * If access to userspace fails, returns -EFAULT (some data may have been | ||
125 | * copied). | ||
126 | * | ||
127 | * If @count is smaller than the length of the string, copies @count bytes | ||
128 | * and returns @count. | ||
129 | */ | ||
130 | long | ||
131 | strncpy_from_user(char *dst, const char __user *src, long count) | ||
132 | { | ||
133 | unsigned long max_addr, src_addr; | ||
134 | |||
135 | if (unlikely(count <= 0)) | ||
136 | return 0; | ||
137 | |||
138 | max_addr = current_thread_info()->addr_limit.seg; | ||
139 | src_addr = (unsigned long)src; | ||
140 | if (likely(src_addr < max_addr)) { | ||
141 | unsigned long max = max_addr - src_addr; | ||
142 | return do_strncpy_from_user(dst, src, count, max); | ||
143 | } | ||
144 | return -EFAULT; | ||
145 | } | ||
146 | EXPORT_SYMBOL(strncpy_from_user); | ||
diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c index d9b094ca7aaa..ef2a6a5d78e3 100644 --- a/arch/x86/lib/usercopy_32.c +++ b/arch/x86/lib/usercopy_32.c | |||
@@ -33,93 +33,6 @@ static inline int __movsl_is_ok(unsigned long a1, unsigned long a2, unsigned lon | |||
33 | __movsl_is_ok((unsigned long)(a1), (unsigned long)(a2), (n)) | 33 | __movsl_is_ok((unsigned long)(a1), (unsigned long)(a2), (n)) |
34 | 34 | ||
35 | /* | 35 | /* |
36 | * Copy a null terminated string from userspace. | ||
37 | */ | ||
38 | |||
39 | #define __do_strncpy_from_user(dst, src, count, res) \ | ||
40 | do { \ | ||
41 | int __d0, __d1, __d2; \ | ||
42 | might_fault(); \ | ||
43 | __asm__ __volatile__( \ | ||
44 | " testl %1,%1\n" \ | ||
45 | " jz 2f\n" \ | ||
46 | "0: lodsb\n" \ | ||
47 | " stosb\n" \ | ||
48 | " testb %%al,%%al\n" \ | ||
49 | " jz 1f\n" \ | ||
50 | " decl %1\n" \ | ||
51 | " jnz 0b\n" \ | ||
52 | "1: subl %1,%0\n" \ | ||
53 | "2:\n" \ | ||
54 | ".section .fixup,\"ax\"\n" \ | ||
55 | "3: movl %5,%0\n" \ | ||
56 | " jmp 2b\n" \ | ||
57 | ".previous\n" \ | ||
58 | _ASM_EXTABLE(0b,3b) \ | ||
59 | : "=&d"(res), "=&c"(count), "=&a" (__d0), "=&S" (__d1), \ | ||
60 | "=&D" (__d2) \ | ||
61 | : "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst) \ | ||
62 | : "memory"); \ | ||
63 | } while (0) | ||
64 | |||
65 | /** | ||
66 | * __strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking. | ||
67 | * @dst: Destination address, in kernel space. This buffer must be at | ||
68 | * least @count bytes long. | ||
69 | * @src: Source address, in user space. | ||
70 | * @count: Maximum number of bytes to copy, including the trailing NUL. | ||
71 | * | ||
72 | * Copies a NUL-terminated string from userspace to kernel space. | ||
73 | * Caller must check the specified block with access_ok() before calling | ||
74 | * this function. | ||
75 | * | ||
76 | * On success, returns the length of the string (not including the trailing | ||
77 | * NUL). | ||
78 | * | ||
79 | * If access to userspace fails, returns -EFAULT (some data may have been | ||
80 | * copied). | ||
81 | * | ||
82 | * If @count is smaller than the length of the string, copies @count bytes | ||
83 | * and returns @count. | ||
84 | */ | ||
85 | long | ||
86 | __strncpy_from_user(char *dst, const char __user *src, long count) | ||
87 | { | ||
88 | long res; | ||
89 | __do_strncpy_from_user(dst, src, count, res); | ||
90 | return res; | ||
91 | } | ||
92 | EXPORT_SYMBOL(__strncpy_from_user); | ||
93 | |||
94 | /** | ||
95 | * strncpy_from_user: - Copy a NUL terminated string from userspace. | ||
96 | * @dst: Destination address, in kernel space. This buffer must be at | ||
97 | * least @count bytes long. | ||
98 | * @src: Source address, in user space. | ||
99 | * @count: Maximum number of bytes to copy, including the trailing NUL. | ||
100 | * | ||
101 | * Copies a NUL-terminated string from userspace to kernel space. | ||
102 | * | ||
103 | * On success, returns the length of the string (not including the trailing | ||
104 | * NUL). | ||
105 | * | ||
106 | * If access to userspace fails, returns -EFAULT (some data may have been | ||
107 | * copied). | ||
108 | * | ||
109 | * If @count is smaller than the length of the string, copies @count bytes | ||
110 | * and returns @count. | ||
111 | */ | ||
112 | long | ||
113 | strncpy_from_user(char *dst, const char __user *src, long count) | ||
114 | { | ||
115 | long res = -EFAULT; | ||
116 | if (access_ok(VERIFY_READ, src, 1)) | ||
117 | __do_strncpy_from_user(dst, src, count, res); | ||
118 | return res; | ||
119 | } | ||
120 | EXPORT_SYMBOL(strncpy_from_user); | ||
121 | |||
122 | /* | ||
123 | * Zero Userspace | 36 | * Zero Userspace |
124 | */ | 37 | */ |
125 | 38 | ||
diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c index b7c2849ffb66..0d0326f388c0 100644 --- a/arch/x86/lib/usercopy_64.c +++ b/arch/x86/lib/usercopy_64.c | |||
@@ -9,55 +9,6 @@ | |||
9 | #include <asm/uaccess.h> | 9 | #include <asm/uaccess.h> |
10 | 10 | ||
11 | /* | 11 | /* |
12 | * Copy a null terminated string from userspace. | ||
13 | */ | ||
14 | |||
15 | #define __do_strncpy_from_user(dst,src,count,res) \ | ||
16 | do { \ | ||
17 | long __d0, __d1, __d2; \ | ||
18 | might_fault(); \ | ||
19 | __asm__ __volatile__( \ | ||
20 | " testq %1,%1\n" \ | ||
21 | " jz 2f\n" \ | ||
22 | "0: lodsb\n" \ | ||
23 | " stosb\n" \ | ||
24 | " testb %%al,%%al\n" \ | ||
25 | " jz 1f\n" \ | ||
26 | " decq %1\n" \ | ||
27 | " jnz 0b\n" \ | ||
28 | "1: subq %1,%0\n" \ | ||
29 | "2:\n" \ | ||
30 | ".section .fixup,\"ax\"\n" \ | ||
31 | "3: movq %5,%0\n" \ | ||
32 | " jmp 2b\n" \ | ||
33 | ".previous\n" \ | ||
34 | _ASM_EXTABLE(0b,3b) \ | ||
35 | : "=&r"(res), "=&c"(count), "=&a" (__d0), "=&S" (__d1), \ | ||
36 | "=&D" (__d2) \ | ||
37 | : "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst) \ | ||
38 | : "memory"); \ | ||
39 | } while (0) | ||
40 | |||
41 | long | ||
42 | __strncpy_from_user(char *dst, const char __user *src, long count) | ||
43 | { | ||
44 | long res; | ||
45 | __do_strncpy_from_user(dst, src, count, res); | ||
46 | return res; | ||
47 | } | ||
48 | EXPORT_SYMBOL(__strncpy_from_user); | ||
49 | |||
50 | long | ||
51 | strncpy_from_user(char *dst, const char __user *src, long count) | ||
52 | { | ||
53 | long res = -EFAULT; | ||
54 | if (access_ok(VERIFY_READ, src, 1)) | ||
55 | return __strncpy_from_user(dst, src, count); | ||
56 | return res; | ||
57 | } | ||
58 | EXPORT_SYMBOL(strncpy_from_user); | ||
59 | |||
60 | /* | ||
61 | * Zero Userspace | 12 | * Zero Userspace |
62 | */ | 13 | */ |
63 | 14 | ||
diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S index 66870223f8c5..877b9a1b2152 100644 --- a/arch/x86/net/bpf_jit.S +++ b/arch/x86/net/bpf_jit.S | |||
@@ -18,17 +18,17 @@ | |||
18 | * r9d : hlen = skb->len - skb->data_len | 18 | * r9d : hlen = skb->len - skb->data_len |
19 | */ | 19 | */ |
20 | #define SKBDATA %r8 | 20 | #define SKBDATA %r8 |
21 | 21 | #define SKF_MAX_NEG_OFF $(-0x200000) /* SKF_LL_OFF from filter.h */ | |
22 | sk_load_word_ind: | ||
23 | .globl sk_load_word_ind | ||
24 | |||
25 | add %ebx,%esi /* offset += X */ | ||
26 | # test %esi,%esi /* if (offset < 0) goto bpf_error; */ | ||
27 | js bpf_error | ||
28 | 22 | ||
29 | sk_load_word: | 23 | sk_load_word: |
30 | .globl sk_load_word | 24 | .globl sk_load_word |
31 | 25 | ||
26 | test %esi,%esi | ||
27 | js bpf_slow_path_word_neg | ||
28 | |||
29 | sk_load_word_positive_offset: | ||
30 | .globl sk_load_word_positive_offset | ||
31 | |||
32 | mov %r9d,%eax # hlen | 32 | mov %r9d,%eax # hlen |
33 | sub %esi,%eax # hlen - offset | 33 | sub %esi,%eax # hlen - offset |
34 | cmp $3,%eax | 34 | cmp $3,%eax |
@@ -37,16 +37,15 @@ sk_load_word: | |||
37 | bswap %eax /* ntohl() */ | 37 | bswap %eax /* ntohl() */ |
38 | ret | 38 | ret |
39 | 39 | ||
40 | |||
41 | sk_load_half_ind: | ||
42 | .globl sk_load_half_ind | ||
43 | |||
44 | add %ebx,%esi /* offset += X */ | ||
45 | js bpf_error | ||
46 | |||
47 | sk_load_half: | 40 | sk_load_half: |
48 | .globl sk_load_half | 41 | .globl sk_load_half |
49 | 42 | ||
43 | test %esi,%esi | ||
44 | js bpf_slow_path_half_neg | ||
45 | |||
46 | sk_load_half_positive_offset: | ||
47 | .globl sk_load_half_positive_offset | ||
48 | |||
50 | mov %r9d,%eax | 49 | mov %r9d,%eax |
51 | sub %esi,%eax # hlen - offset | 50 | sub %esi,%eax # hlen - offset |
52 | cmp $1,%eax | 51 | cmp $1,%eax |
@@ -55,14 +54,15 @@ sk_load_half: | |||
55 | rol $8,%ax # ntohs() | 54 | rol $8,%ax # ntohs() |
56 | ret | 55 | ret |
57 | 56 | ||
58 | sk_load_byte_ind: | ||
59 | .globl sk_load_byte_ind | ||
60 | add %ebx,%esi /* offset += X */ | ||
61 | js bpf_error | ||
62 | |||
63 | sk_load_byte: | 57 | sk_load_byte: |
64 | .globl sk_load_byte | 58 | .globl sk_load_byte |
65 | 59 | ||
60 | test %esi,%esi | ||
61 | js bpf_slow_path_byte_neg | ||
62 | |||
63 | sk_load_byte_positive_offset: | ||
64 | .globl sk_load_byte_positive_offset | ||
65 | |||
66 | cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */ | 66 | cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */ |
67 | jle bpf_slow_path_byte | 67 | jle bpf_slow_path_byte |
68 | movzbl (SKBDATA,%rsi),%eax | 68 | movzbl (SKBDATA,%rsi),%eax |
@@ -73,25 +73,21 @@ sk_load_byte: | |||
73 | * | 73 | * |
74 | * Implements BPF_S_LDX_B_MSH : ldxb 4*([offset]&0xf) | 74 | * Implements BPF_S_LDX_B_MSH : ldxb 4*([offset]&0xf) |
75 | * Must preserve A accumulator (%eax) | 75 | * Must preserve A accumulator (%eax) |
76 | * Inputs : %esi is the offset value, already known positive | 76 | * Inputs : %esi is the offset value |
77 | */ | 77 | */ |
78 | ENTRY(sk_load_byte_msh) | 78 | sk_load_byte_msh: |
79 | CFI_STARTPROC | 79 | .globl sk_load_byte_msh |
80 | test %esi,%esi | ||
81 | js bpf_slow_path_byte_msh_neg | ||
82 | |||
83 | sk_load_byte_msh_positive_offset: | ||
84 | .globl sk_load_byte_msh_positive_offset | ||
80 | cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte_msh */ | 85 | cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte_msh */ |
81 | jle bpf_slow_path_byte_msh | 86 | jle bpf_slow_path_byte_msh |
82 | movzbl (SKBDATA,%rsi),%ebx | 87 | movzbl (SKBDATA,%rsi),%ebx |
83 | and $15,%bl | 88 | and $15,%bl |
84 | shl $2,%bl | 89 | shl $2,%bl |
85 | ret | 90 | ret |
86 | CFI_ENDPROC | ||
87 | ENDPROC(sk_load_byte_msh) | ||
88 | |||
89 | bpf_error: | ||
90 | # force a return 0 from jit handler | ||
91 | xor %eax,%eax | ||
92 | mov -8(%rbp),%rbx | ||
93 | leaveq | ||
94 | ret | ||
95 | 91 | ||
96 | /* rsi contains offset and can be scratched */ | 92 | /* rsi contains offset and can be scratched */ |
97 | #define bpf_slow_path_common(LEN) \ | 93 | #define bpf_slow_path_common(LEN) \ |
@@ -138,3 +134,67 @@ bpf_slow_path_byte_msh: | |||
138 | shl $2,%al | 134 | shl $2,%al |
139 | xchg %eax,%ebx | 135 | xchg %eax,%ebx |
140 | ret | 136 | ret |
137 | |||
138 | #define sk_negative_common(SIZE) \ | ||
139 | push %rdi; /* save skb */ \ | ||
140 | push %r9; \ | ||
141 | push SKBDATA; \ | ||
142 | /* rsi already has offset */ \ | ||
143 | mov $SIZE,%ecx; /* size */ \ | ||
144 | call bpf_internal_load_pointer_neg_helper; \ | ||
145 | test %rax,%rax; \ | ||
146 | pop SKBDATA; \ | ||
147 | pop %r9; \ | ||
148 | pop %rdi; \ | ||
149 | jz bpf_error | ||
150 | |||
151 | |||
152 | bpf_slow_path_word_neg: | ||
153 | cmp SKF_MAX_NEG_OFF, %esi /* test range */ | ||
154 | jl bpf_error /* offset lower -> error */ | ||
155 | sk_load_word_negative_offset: | ||
156 | .globl sk_load_word_negative_offset | ||
157 | sk_negative_common(4) | ||
158 | mov (%rax), %eax | ||
159 | bswap %eax | ||
160 | ret | ||
161 | |||
162 | bpf_slow_path_half_neg: | ||
163 | cmp SKF_MAX_NEG_OFF, %esi | ||
164 | jl bpf_error | ||
165 | sk_load_half_negative_offset: | ||
166 | .globl sk_load_half_negative_offset | ||
167 | sk_negative_common(2) | ||
168 | mov (%rax),%ax | ||
169 | rol $8,%ax | ||
170 | movzwl %ax,%eax | ||
171 | ret | ||
172 | |||
173 | bpf_slow_path_byte_neg: | ||
174 | cmp SKF_MAX_NEG_OFF, %esi | ||
175 | jl bpf_error | ||
176 | sk_load_byte_negative_offset: | ||
177 | .globl sk_load_byte_negative_offset | ||
178 | sk_negative_common(1) | ||
179 | movzbl (%rax), %eax | ||
180 | ret | ||
181 | |||
182 | bpf_slow_path_byte_msh_neg: | ||
183 | cmp SKF_MAX_NEG_OFF, %esi | ||
184 | jl bpf_error | ||
185 | sk_load_byte_msh_negative_offset: | ||
186 | .globl sk_load_byte_msh_negative_offset | ||
187 | xchg %eax,%ebx /* dont lose A , X is about to be scratched */ | ||
188 | sk_negative_common(1) | ||
189 | movzbl (%rax),%eax | ||
190 | and $15,%al | ||
191 | shl $2,%al | ||
192 | xchg %eax,%ebx | ||
193 | ret | ||
194 | |||
195 | bpf_error: | ||
196 | # force a return 0 from jit handler | ||
197 | xor %eax,%eax | ||
198 | mov -8(%rbp),%rbx | ||
199 | leaveq | ||
200 | ret | ||
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 5671752f8d9c..0597f95b6da6 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c | |||
@@ -30,7 +30,10 @@ int bpf_jit_enable __read_mostly; | |||
30 | * assembly code in arch/x86/net/bpf_jit.S | 30 | * assembly code in arch/x86/net/bpf_jit.S |
31 | */ | 31 | */ |
32 | extern u8 sk_load_word[], sk_load_half[], sk_load_byte[], sk_load_byte_msh[]; | 32 | extern u8 sk_load_word[], sk_load_half[], sk_load_byte[], sk_load_byte_msh[]; |
33 | extern u8 sk_load_word_ind[], sk_load_half_ind[], sk_load_byte_ind[]; | 33 | extern u8 sk_load_word_positive_offset[], sk_load_half_positive_offset[]; |
34 | extern u8 sk_load_byte_positive_offset[], sk_load_byte_msh_positive_offset[]; | ||
35 | extern u8 sk_load_word_negative_offset[], sk_load_half_negative_offset[]; | ||
36 | extern u8 sk_load_byte_negative_offset[], sk_load_byte_msh_negative_offset[]; | ||
34 | 37 | ||
35 | static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len) | 38 | static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len) |
36 | { | 39 | { |
@@ -117,6 +120,8 @@ static inline void bpf_flush_icache(void *start, void *end) | |||
117 | set_fs(old_fs); | 120 | set_fs(old_fs); |
118 | } | 121 | } |
119 | 122 | ||
123 | #define CHOOSE_LOAD_FUNC(K, func) \ | ||
124 | ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset) | ||
120 | 125 | ||
121 | void bpf_jit_compile(struct sk_filter *fp) | 126 | void bpf_jit_compile(struct sk_filter *fp) |
122 | { | 127 | { |
@@ -289,7 +294,7 @@ void bpf_jit_compile(struct sk_filter *fp) | |||
289 | EMIT2(0x24, K & 0xFF); /* and imm8,%al */ | 294 | EMIT2(0x24, K & 0xFF); /* and imm8,%al */ |
290 | } else if (K >= 0xFFFF0000) { | 295 | } else if (K >= 0xFFFF0000) { |
291 | EMIT2(0x66, 0x25); /* and imm16,%ax */ | 296 | EMIT2(0x66, 0x25); /* and imm16,%ax */ |
292 | EMIT2(K, 2); | 297 | EMIT(K, 2); |
293 | } else { | 298 | } else { |
294 | EMIT1_off32(0x25, K); /* and imm32,%eax */ | 299 | EMIT1_off32(0x25, K); /* and imm32,%eax */ |
295 | } | 300 | } |
@@ -473,44 +478,46 @@ void bpf_jit_compile(struct sk_filter *fp) | |||
473 | #endif | 478 | #endif |
474 | break; | 479 | break; |
475 | case BPF_S_LD_W_ABS: | 480 | case BPF_S_LD_W_ABS: |
476 | func = sk_load_word; | 481 | func = CHOOSE_LOAD_FUNC(K, sk_load_word); |
477 | common_load: seen |= SEEN_DATAREF; | 482 | common_load: seen |= SEEN_DATAREF; |
478 | if ((int)K < 0) { | ||
479 | /* Abort the JIT because __load_pointer() is needed. */ | ||
480 | goto out; | ||
481 | } | ||
482 | t_offset = func - (image + addrs[i]); | 483 | t_offset = func - (image + addrs[i]); |
483 | EMIT1_off32(0xbe, K); /* mov imm32,%esi */ | 484 | EMIT1_off32(0xbe, K); /* mov imm32,%esi */ |
484 | EMIT1_off32(0xe8, t_offset); /* call */ | 485 | EMIT1_off32(0xe8, t_offset); /* call */ |
485 | break; | 486 | break; |
486 | case BPF_S_LD_H_ABS: | 487 | case BPF_S_LD_H_ABS: |
487 | func = sk_load_half; | 488 | func = CHOOSE_LOAD_FUNC(K, sk_load_half); |
488 | goto common_load; | 489 | goto common_load; |
489 | case BPF_S_LD_B_ABS: | 490 | case BPF_S_LD_B_ABS: |
490 | func = sk_load_byte; | 491 | func = CHOOSE_LOAD_FUNC(K, sk_load_byte); |
491 | goto common_load; | 492 | goto common_load; |
492 | case BPF_S_LDX_B_MSH: | 493 | case BPF_S_LDX_B_MSH: |
493 | if ((int)K < 0) { | 494 | func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh); |
494 | /* Abort the JIT because __load_pointer() is needed. */ | ||
495 | goto out; | ||
496 | } | ||
497 | seen |= SEEN_DATAREF | SEEN_XREG; | 495 | seen |= SEEN_DATAREF | SEEN_XREG; |
498 | t_offset = sk_load_byte_msh - (image + addrs[i]); | 496 | t_offset = func - (image + addrs[i]); |
499 | EMIT1_off32(0xbe, K); /* mov imm32,%esi */ | 497 | EMIT1_off32(0xbe, K); /* mov imm32,%esi */ |
500 | EMIT1_off32(0xe8, t_offset); /* call sk_load_byte_msh */ | 498 | EMIT1_off32(0xe8, t_offset); /* call sk_load_byte_msh */ |
501 | break; | 499 | break; |
502 | case BPF_S_LD_W_IND: | 500 | case BPF_S_LD_W_IND: |
503 | func = sk_load_word_ind; | 501 | func = sk_load_word; |
504 | common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG; | 502 | common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG; |
505 | t_offset = func - (image + addrs[i]); | 503 | t_offset = func - (image + addrs[i]); |
506 | EMIT1_off32(0xbe, K); /* mov imm32,%esi */ | 504 | if (K) { |
505 | if (is_imm8(K)) { | ||
506 | EMIT3(0x8d, 0x73, K); /* lea imm8(%rbx), %esi */ | ||
507 | } else { | ||
508 | EMIT2(0x8d, 0xb3); /* lea imm32(%rbx),%esi */ | ||
509 | EMIT(K, 4); | ||
510 | } | ||
511 | } else { | ||
512 | EMIT2(0x89,0xde); /* mov %ebx,%esi */ | ||
513 | } | ||
507 | EMIT1_off32(0xe8, t_offset); /* call sk_load_xxx_ind */ | 514 | EMIT1_off32(0xe8, t_offset); /* call sk_load_xxx_ind */ |
508 | break; | 515 | break; |
509 | case BPF_S_LD_H_IND: | 516 | case BPF_S_LD_H_IND: |
510 | func = sk_load_half_ind; | 517 | func = sk_load_half; |
511 | goto common_load_ind; | 518 | goto common_load_ind; |
512 | case BPF_S_LD_B_IND: | 519 | case BPF_S_LD_B_IND: |
513 | func = sk_load_byte_ind; | 520 | func = sk_load_byte; |
514 | goto common_load_ind; | 521 | goto common_load_ind; |
515 | case BPF_S_JMP_JA: | 522 | case BPF_S_JMP_JA: |
516 | t_offset = addrs[i + K] - addrs[i]; | 523 | t_offset = addrs[i + K] - addrs[i]; |
diff --git a/arch/x86/platform/geode/net5501.c b/arch/x86/platform/geode/net5501.c index 66d377e334f7..646e3b5b4bb6 100644 --- a/arch/x86/platform/geode/net5501.c +++ b/arch/x86/platform/geode/net5501.c | |||
@@ -63,7 +63,7 @@ static struct gpio_led net5501_leds[] = { | |||
63 | .name = "net5501:1", | 63 | .name = "net5501:1", |
64 | .gpio = 6, | 64 | .gpio = 6, |
65 | .default_trigger = "default-on", | 65 | .default_trigger = "default-on", |
66 | .active_low = 1, | 66 | .active_low = 0, |
67 | }, | 67 | }, |
68 | }; | 68 | }; |
69 | 69 | ||
diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c index e0a37233c0af..e31bcd8f2eee 100644 --- a/arch/x86/platform/mrst/mrst.c +++ b/arch/x86/platform/mrst/mrst.c | |||
@@ -805,7 +805,7 @@ void intel_scu_devices_create(void) | |||
805 | } else | 805 | } else |
806 | i2c_register_board_info(i2c_bus[i], i2c_devs[i], 1); | 806 | i2c_register_board_info(i2c_bus[i], i2c_devs[i], 1); |
807 | } | 807 | } |
808 | intel_scu_notifier_post(SCU_AVAILABLE, 0L); | 808 | intel_scu_notifier_post(SCU_AVAILABLE, NULL); |
809 | } | 809 | } |
810 | EXPORT_SYMBOL_GPL(intel_scu_devices_create); | 810 | EXPORT_SYMBOL_GPL(intel_scu_devices_create); |
811 | 811 | ||
@@ -814,7 +814,7 @@ void intel_scu_devices_destroy(void) | |||
814 | { | 814 | { |
815 | int i; | 815 | int i; |
816 | 816 | ||
817 | intel_scu_notifier_post(SCU_DOWN, 0L); | 817 | intel_scu_notifier_post(SCU_DOWN, NULL); |
818 | 818 | ||
819 | for (i = 0; i < ipc_next_dev; i++) | 819 | for (i = 0; i < ipc_next_dev; i++) |
820 | platform_device_del(ipc_devs[i]); | 820 | platform_device_del(ipc_devs[i]); |
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c index 47936830968c..218cdb16163c 100644 --- a/arch/x86/power/cpu.c +++ b/arch/x86/power/cpu.c | |||
@@ -225,13 +225,13 @@ static void __restore_processor_state(struct saved_context *ctxt) | |||
225 | fix_processor_context(); | 225 | fix_processor_context(); |
226 | 226 | ||
227 | do_fpu_end(); | 227 | do_fpu_end(); |
228 | x86_platform.restore_sched_clock_state(); | ||
228 | mtrr_bp_restore(); | 229 | mtrr_bp_restore(); |
229 | } | 230 | } |
230 | 231 | ||
231 | /* Needed by apm.c */ | 232 | /* Needed by apm.c */ |
232 | void restore_processor_state(void) | 233 | void restore_processor_state(void) |
233 | { | 234 | { |
234 | x86_platform.restore_sched_clock_state(); | ||
235 | __restore_processor_state(&saved_context); | 235 | __restore_processor_state(&saved_context); |
236 | } | 236 | } |
237 | #ifdef CONFIG_X86_32 | 237 | #ifdef CONFIG_X86_32 |
diff --git a/arch/x86/um/asm/barrier.h b/arch/x86/um/asm/barrier.h new file mode 100644 index 000000000000..7d01b8c56c00 --- /dev/null +++ b/arch/x86/um/asm/barrier.h | |||
@@ -0,0 +1,75 @@ | |||
1 | #ifndef _ASM_UM_BARRIER_H_ | ||
2 | #define _ASM_UM_BARRIER_H_ | ||
3 | |||
4 | #include <asm/asm.h> | ||
5 | #include <asm/segment.h> | ||
6 | #include <asm/cpufeature.h> | ||
7 | #include <asm/cmpxchg.h> | ||
8 | #include <asm/nops.h> | ||
9 | |||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/irqflags.h> | ||
12 | |||
13 | /* | ||
14 | * Force strict CPU ordering. | ||
15 | * And yes, this is required on UP too when we're talking | ||
16 | * to devices. | ||
17 | */ | ||
18 | #ifdef CONFIG_X86_32 | ||
19 | |||
20 | #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2) | ||
21 | #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2) | ||
22 | #define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM) | ||
23 | |||
24 | #else /* CONFIG_X86_32 */ | ||
25 | |||
26 | #define mb() asm volatile("mfence" : : : "memory") | ||
27 | #define rmb() asm volatile("lfence" : : : "memory") | ||
28 | #define wmb() asm volatile("sfence" : : : "memory") | ||
29 | |||
30 | #endif /* CONFIG_X86_32 */ | ||
31 | |||
32 | #define read_barrier_depends() do { } while (0) | ||
33 | |||
34 | #ifdef CONFIG_SMP | ||
35 | |||
36 | #define smp_mb() mb() | ||
37 | #ifdef CONFIG_X86_PPRO_FENCE | ||
38 | #define smp_rmb() rmb() | ||
39 | #else /* CONFIG_X86_PPRO_FENCE */ | ||
40 | #define smp_rmb() barrier() | ||
41 | #endif /* CONFIG_X86_PPRO_FENCE */ | ||
42 | |||
43 | #ifdef CONFIG_X86_OOSTORE | ||
44 | #define smp_wmb() wmb() | ||
45 | #else /* CONFIG_X86_OOSTORE */ | ||
46 | #define smp_wmb() barrier() | ||
47 | #endif /* CONFIG_X86_OOSTORE */ | ||
48 | |||
49 | #define smp_read_barrier_depends() read_barrier_depends() | ||
50 | #define set_mb(var, value) do { (void)xchg(&var, value); } while (0) | ||
51 | |||
52 | #else /* CONFIG_SMP */ | ||
53 | |||
54 | #define smp_mb() barrier() | ||
55 | #define smp_rmb() barrier() | ||
56 | #define smp_wmb() barrier() | ||
57 | #define smp_read_barrier_depends() do { } while (0) | ||
58 | #define set_mb(var, value) do { var = value; barrier(); } while (0) | ||
59 | |||
60 | #endif /* CONFIG_SMP */ | ||
61 | |||
62 | /* | ||
63 | * Stop RDTSC speculation. This is needed when you need to use RDTSC | ||
64 | * (or get_cycles or vread that possibly accesses the TSC) in a defined | ||
65 | * code region. | ||
66 | * | ||
67 | * (Could use an alternative three way for this if there was one.) | ||
68 | */ | ||
69 | static inline void rdtsc_barrier(void) | ||
70 | { | ||
71 | alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC); | ||
72 | alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC); | ||
73 | } | ||
74 | |||
75 | #endif | ||
diff --git a/arch/x86/um/asm/system.h b/arch/x86/um/asm/system.h deleted file mode 100644 index a459fd9b7598..000000000000 --- a/arch/x86/um/asm/system.h +++ /dev/null | |||
@@ -1,135 +0,0 @@ | |||
1 | #ifndef _ASM_X86_SYSTEM_H_ | ||
2 | #define _ASM_X86_SYSTEM_H_ | ||
3 | |||
4 | #include <asm/asm.h> | ||
5 | #include <asm/segment.h> | ||
6 | #include <asm/cpufeature.h> | ||
7 | #include <asm/cmpxchg.h> | ||
8 | #include <asm/nops.h> | ||
9 | |||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/irqflags.h> | ||
12 | |||
13 | /* entries in ARCH_DLINFO: */ | ||
14 | #ifdef CONFIG_IA32_EMULATION | ||
15 | # define AT_VECTOR_SIZE_ARCH 2 | ||
16 | #else | ||
17 | # define AT_VECTOR_SIZE_ARCH 1 | ||
18 | #endif | ||
19 | |||
20 | extern unsigned long arch_align_stack(unsigned long sp); | ||
21 | |||
22 | void default_idle(void); | ||
23 | |||
24 | /* | ||
25 | * Force strict CPU ordering. | ||
26 | * And yes, this is required on UP too when we're talking | ||
27 | * to devices. | ||
28 | */ | ||
29 | #ifdef CONFIG_X86_32 | ||
30 | /* | ||
31 | * Some non-Intel clones support out of order store. wmb() ceases to be a | ||
32 | * nop for these. | ||
33 | */ | ||
34 | #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2) | ||
35 | #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2) | ||
36 | #define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM) | ||
37 | #else | ||
38 | #define mb() asm volatile("mfence":::"memory") | ||
39 | #define rmb() asm volatile("lfence":::"memory") | ||
40 | #define wmb() asm volatile("sfence" ::: "memory") | ||
41 | #endif | ||
42 | |||
43 | /** | ||
44 | * read_barrier_depends - Flush all pending reads that subsequents reads | ||
45 | * depend on. | ||
46 | * | ||
47 | * No data-dependent reads from memory-like regions are ever reordered | ||
48 | * over this barrier. All reads preceding this primitive are guaranteed | ||
49 | * to access memory (but not necessarily other CPUs' caches) before any | ||
50 | * reads following this primitive that depend on the data return by | ||
51 | * any of the preceding reads. This primitive is much lighter weight than | ||
52 | * rmb() on most CPUs, and is never heavier weight than is | ||
53 | * rmb(). | ||
54 | * | ||
55 | * These ordering constraints are respected by both the local CPU | ||
56 | * and the compiler. | ||
57 | * | ||
58 | * Ordering is not guaranteed by anything other than these primitives, | ||
59 | * not even by data dependencies. See the documentation for | ||
60 | * memory_barrier() for examples and URLs to more information. | ||
61 | * | ||
62 | * For example, the following code would force ordering (the initial | ||
63 | * value of "a" is zero, "b" is one, and "p" is "&a"): | ||
64 | * | ||
65 | * <programlisting> | ||
66 | * CPU 0 CPU 1 | ||
67 | * | ||
68 | * b = 2; | ||
69 | * memory_barrier(); | ||
70 | * p = &b; q = p; | ||
71 | * read_barrier_depends(); | ||
72 | * d = *q; | ||
73 | * </programlisting> | ||
74 | * | ||
75 | * because the read of "*q" depends on the read of "p" and these | ||
76 | * two reads are separated by a read_barrier_depends(). However, | ||
77 | * the following code, with the same initial values for "a" and "b": | ||
78 | * | ||
79 | * <programlisting> | ||
80 | * CPU 0 CPU 1 | ||
81 | * | ||
82 | * a = 2; | ||
83 | * memory_barrier(); | ||
84 | * b = 3; y = b; | ||
85 | * read_barrier_depends(); | ||
86 | * x = a; | ||
87 | * </programlisting> | ||
88 | * | ||
89 | * does not enforce ordering, since there is no data dependency between | ||
90 | * the read of "a" and the read of "b". Therefore, on some CPUs, such | ||
91 | * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() | ||
92 | * in cases like this where there are no data dependencies. | ||
93 | **/ | ||
94 | |||
95 | #define read_barrier_depends() do { } while (0) | ||
96 | |||
97 | #ifdef CONFIG_SMP | ||
98 | #define smp_mb() mb() | ||
99 | #ifdef CONFIG_X86_PPRO_FENCE | ||
100 | # define smp_rmb() rmb() | ||
101 | #else | ||
102 | # define smp_rmb() barrier() | ||
103 | #endif | ||
104 | #ifdef CONFIG_X86_OOSTORE | ||
105 | # define smp_wmb() wmb() | ||
106 | #else | ||
107 | # define smp_wmb() barrier() | ||
108 | #endif | ||
109 | #define smp_read_barrier_depends() read_barrier_depends() | ||
110 | #define set_mb(var, value) do { (void)xchg(&var, value); } while (0) | ||
111 | #else | ||
112 | #define smp_mb() barrier() | ||
113 | #define smp_rmb() barrier() | ||
114 | #define smp_wmb() barrier() | ||
115 | #define smp_read_barrier_depends() do { } while (0) | ||
116 | #define set_mb(var, value) do { var = value; barrier(); } while (0) | ||
117 | #endif | ||
118 | |||
119 | /* | ||
120 | * Stop RDTSC speculation. This is needed when you need to use RDTSC | ||
121 | * (or get_cycles or vread that possibly accesses the TSC) in a defined | ||
122 | * code region. | ||
123 | * | ||
124 | * (Could use an alternative three way for this if there was one.) | ||
125 | */ | ||
126 | static inline void rdtsc_barrier(void) | ||
127 | { | ||
128 | alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC); | ||
129 | alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC); | ||
130 | } | ||
131 | |||
132 | extern void *_switch_to(void *prev, void *next, void *last); | ||
133 | #define switch_to(prev, next, last) prev = _switch_to(prev, next, last) | ||
134 | |||
135 | #endif | ||
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index b132ade26f77..a8f8844b8d32 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
@@ -261,7 +261,8 @@ static void xen_cpuid(unsigned int *ax, unsigned int *bx, | |||
261 | 261 | ||
262 | static bool __init xen_check_mwait(void) | 262 | static bool __init xen_check_mwait(void) |
263 | { | 263 | { |
264 | #ifdef CONFIG_ACPI | 264 | #if defined(CONFIG_ACPI) && !defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR) && \ |
265 | !defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR_MODULE) | ||
265 | struct xen_platform_op op = { | 266 | struct xen_platform_op op = { |
266 | .cmd = XENPF_set_processor_pminfo, | 267 | .cmd = XENPF_set_processor_pminfo, |
267 | .u.set_pminfo.id = -1, | 268 | .u.set_pminfo.id = -1, |
@@ -349,7 +350,6 @@ static void __init xen_init_cpuid_mask(void) | |||
349 | /* Xen will set CR4.OSXSAVE if supported and not disabled by force */ | 350 | /* Xen will set CR4.OSXSAVE if supported and not disabled by force */ |
350 | if ((cx & xsave_mask) != xsave_mask) | 351 | if ((cx & xsave_mask) != xsave_mask) |
351 | cpuid_leaf1_ecx_mask &= ~xsave_mask; /* disable XSAVE & OSXSAVE */ | 352 | cpuid_leaf1_ecx_mask &= ~xsave_mask; /* disable XSAVE & OSXSAVE */ |
352 | |||
353 | if (xen_check_mwait()) | 353 | if (xen_check_mwait()) |
354 | cpuid_leaf1_ecx_set_mask = (1 << (X86_FEATURE_MWAIT % 32)); | 354 | cpuid_leaf1_ecx_set_mask = (1 << (X86_FEATURE_MWAIT % 32)); |
355 | } | 355 | } |
@@ -967,7 +967,7 @@ void xen_setup_shared_info(void) | |||
967 | xen_setup_mfn_list_list(); | 967 | xen_setup_mfn_list_list(); |
968 | } | 968 | } |
969 | 969 | ||
970 | /* This is called once we have the cpu_possible_map */ | 970 | /* This is called once we have the cpu_possible_mask */ |
971 | void xen_setup_vcpu_info_placement(void) | 971 | void xen_setup_vcpu_info_placement(void) |
972 | { | 972 | { |
973 | int cpu; | 973 | int cpu; |
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 988828b479ed..b8e279479a6b 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -1859,6 +1859,7 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd, | |||
1859 | #endif /* CONFIG_X86_64 */ | 1859 | #endif /* CONFIG_X86_64 */ |
1860 | 1860 | ||
1861 | static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss; | 1861 | static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss; |
1862 | static unsigned char fake_ioapic_mapping[PAGE_SIZE] __page_aligned_bss; | ||
1862 | 1863 | ||
1863 | static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot) | 1864 | static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot) |
1864 | { | 1865 | { |
@@ -1899,7 +1900,7 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot) | |||
1899 | * We just don't map the IO APIC - all access is via | 1900 | * We just don't map the IO APIC - all access is via |
1900 | * hypercalls. Keep the address in the pte for reference. | 1901 | * hypercalls. Keep the address in the pte for reference. |
1901 | */ | 1902 | */ |
1902 | pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL); | 1903 | pte = pfn_pte(PFN_DOWN(__pa(fake_ioapic_mapping)), PAGE_KERNEL); |
1903 | break; | 1904 | break; |
1904 | #endif | 1905 | #endif |
1905 | 1906 | ||
@@ -2064,6 +2065,7 @@ void __init xen_init_mmu_ops(void) | |||
2064 | pv_mmu_ops = xen_mmu_ops; | 2065 | pv_mmu_ops = xen_mmu_ops; |
2065 | 2066 | ||
2066 | memset(dummy_mapping, 0xff, PAGE_SIZE); | 2067 | memset(dummy_mapping, 0xff, PAGE_SIZE); |
2068 | memset(fake_ioapic_mapping, 0xfd, PAGE_SIZE); | ||
2067 | } | 2069 | } |
2068 | 2070 | ||
2069 | /* Protected by xen_reservation_lock. */ | 2071 | /* Protected by xen_reservation_lock. */ |
diff --git a/arch/x86/xen/pci-swiotlb-xen.c b/arch/x86/xen/pci-swiotlb-xen.c index b480d4207a4c..967633ad98c4 100644 --- a/arch/x86/xen/pci-swiotlb-xen.c +++ b/arch/x86/xen/pci-swiotlb-xen.c | |||
@@ -12,8 +12,8 @@ int xen_swiotlb __read_mostly; | |||
12 | 12 | ||
13 | static struct dma_map_ops xen_swiotlb_dma_ops = { | 13 | static struct dma_map_ops xen_swiotlb_dma_ops = { |
14 | .mapping_error = xen_swiotlb_dma_mapping_error, | 14 | .mapping_error = xen_swiotlb_dma_mapping_error, |
15 | .alloc_coherent = xen_swiotlb_alloc_coherent, | 15 | .alloc = xen_swiotlb_alloc_coherent, |
16 | .free_coherent = xen_swiotlb_free_coherent, | 16 | .free = xen_swiotlb_free_coherent, |
17 | .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu, | 17 | .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu, |
18 | .sync_single_for_device = xen_swiotlb_sync_single_for_device, | 18 | .sync_single_for_device = xen_swiotlb_sync_single_for_device, |
19 | .sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu, | 19 | .sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu, |
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 02900e8ce26c..0503c0c493a9 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c | |||
@@ -59,7 +59,7 @@ static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id) | |||
59 | 59 | ||
60 | static void __cpuinit cpu_bringup(void) | 60 | static void __cpuinit cpu_bringup(void) |
61 | { | 61 | { |
62 | int cpu = smp_processor_id(); | 62 | int cpu; |
63 | 63 | ||
64 | cpu_init(); | 64 | cpu_init(); |
65 | touch_softlockup_watchdog(); | 65 | touch_softlockup_watchdog(); |
@@ -178,6 +178,7 @@ static void __init xen_fill_possible_map(void) | |||
178 | static void __init xen_filter_cpu_maps(void) | 178 | static void __init xen_filter_cpu_maps(void) |
179 | { | 179 | { |
180 | int i, rc; | 180 | int i, rc; |
181 | unsigned int subtract = 0; | ||
181 | 182 | ||
182 | if (!xen_initial_domain()) | 183 | if (!xen_initial_domain()) |
183 | return; | 184 | return; |
@@ -192,8 +193,22 @@ static void __init xen_filter_cpu_maps(void) | |||
192 | } else { | 193 | } else { |
193 | set_cpu_possible(i, false); | 194 | set_cpu_possible(i, false); |
194 | set_cpu_present(i, false); | 195 | set_cpu_present(i, false); |
196 | subtract++; | ||
195 | } | 197 | } |
196 | } | 198 | } |
199 | #ifdef CONFIG_HOTPLUG_CPU | ||
200 | /* This is akin to using 'nr_cpus' on the Linux command line. | ||
201 | * Which is OK as when we use 'dom0_max_vcpus=X' we can only | ||
202 | * have up to X, while nr_cpu_ids is greater than X. This | ||
203 | * normally is not a problem, except when CPU hotplugging | ||
204 | * is involved and then there might be more than X CPUs | ||
205 | * in the guest - which will not work as there is no | ||
206 | * hypercall to expand the max number of VCPUs an already | ||
207 | * running guest has. So cap it up to X. */ | ||
208 | if (subtract) | ||
209 | nr_cpu_ids = nr_cpu_ids - subtract; | ||
210 | #endif | ||
211 | |||
197 | } | 212 | } |
198 | 213 | ||
199 | static void __init xen_smp_prepare_boot_cpu(void) | 214 | static void __init xen_smp_prepare_boot_cpu(void) |
diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S index 79d7362ad6d1..3e45aa000718 100644 --- a/arch/x86/xen/xen-asm.S +++ b/arch/x86/xen/xen-asm.S | |||
@@ -96,7 +96,7 @@ ENTRY(xen_restore_fl_direct) | |||
96 | 96 | ||
97 | /* check for unmasked and pending */ | 97 | /* check for unmasked and pending */ |
98 | cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending | 98 | cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending |
99 | jz 1f | 99 | jnz 1f |
100 | 2: call check_events | 100 | 2: call check_events |
101 | 1: | 101 | 1: |
102 | ENDPATCH(xen_restore_fl_direct) | 102 | ENDPATCH(xen_restore_fl_direct) |