aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/include/asm/bug.h2
-rw-r--r--arch/arm/include/asm/cacheflush.h2
-rw-r--r--arch/arm64/include/asm/bug.h2
-rw-r--r--arch/blackfin/include/asm/bug.h4
-rw-r--r--arch/blackfin/include/asm/flat.h3
-rw-r--r--arch/blackfin/kernel/flat.c4
-rw-r--r--arch/h8300/include/asm/flat.h2
-rw-r--r--arch/m68k/include/asm/flat.h3
-rw-r--r--arch/mn10300/include/asm/bug.h2
-rw-r--r--arch/parisc/include/asm/bug.h6
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash.h1
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h1
-rw-r--r--arch/powerpc/include/asm/book3s/64/radix.h1
-rw-r--r--arch/powerpc/include/asm/bug.h8
-rw-r--r--arch/powerpc/include/asm/pgtable.h7
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S28
-rw-r--r--arch/powerpc/kernel/idle_book3s.S15
-rw-r--r--arch/powerpc/mm/mem.c1
-rw-r--r--arch/powerpc/mm/pgtable-hash64.c44
-rw-r--r--arch/powerpc/mm/pgtable-radix.c28
-rw-r--r--arch/powerpc/mm/pgtable_64.c8
-rw-r--r--arch/powerpc/platforms/powernv/opal.c2
-rw-r--r--arch/s390/include/asm/bug.h4
-rw-r--r--arch/sh/include/asm/bug.h4
-rw-r--r--arch/sparc/include/asm/trap_block.h1
-rw-r--r--arch/sparc/kernel/pci_sun4v.c12
-rw-r--r--arch/sparc/kernel/smp_64.c185
-rw-r--r--arch/sparc/kernel/sun4v_ivec.S15
-rw-r--r--arch/sparc/kernel/traps_64.c1
-rw-r--r--arch/x86/Kconfig.debug1
-rw-r--r--arch/x86/boot/Makefile5
-rw-r--r--arch/x86/configs/i386_defconfig3
-rw-r--r--arch/x86/configs/x86_64_defconfig3
-rw-r--r--arch/x86/events/core.c4
-rw-r--r--arch/x86/events/intel/core.c164
-rw-r--r--arch/x86/events/intel/cstate.c26
-rw-r--r--arch/x86/events/intel/ds.c22
-rw-r--r--arch/x86/events/intel/lbr.c4
-rw-r--r--arch/x86/events/perf_event.h2
-rw-r--r--arch/x86/include/asm/bug.h4
-rw-r--r--arch/x86/include/asm/io.h4
-rw-r--r--arch/x86/include/asm/kprobes.h8
-rw-r--r--arch/x86/include/asm/mmu_context.h2
-rw-r--r--arch/x86/include/asm/paravirt_types.h16
-rw-r--r--arch/x86/include/asm/processor.h2
-rw-r--r--arch/x86/kernel/acpi/boot.c8
-rw-r--r--arch/x86/kernel/apic/io_apic.c2
-rw-r--r--arch/x86/kernel/cpu/amd.c1
-rw-r--r--arch/x86/kernel/devicetree.c3
-rw-r--r--arch/x86/kvm/Kconfig2
-rw-r--r--arch/x86/kvm/hyperv.c7
-rw-r--r--arch/x86/kvm/vmx.c46
-rw-r--r--arch/x86/kvm/x86.c4
-rw-r--r--arch/x86/math-emu/Makefile4
-rw-r--r--arch/x86/math-emu/fpu_emu.h2
-rw-r--r--arch/x86/math-emu/reg_compare.c16
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_max7315.c6
-rw-r--r--arch/x86/platform/uv/tlb_uv.c29
58 files changed, 573 insertions, 223 deletions
diff --git a/arch/arm/include/asm/bug.h b/arch/arm/include/asm/bug.h
index 4e6e88a6b2f4..2244a94ed9c9 100644
--- a/arch/arm/include/asm/bug.h
+++ b/arch/arm/include/asm/bug.h
@@ -37,7 +37,7 @@ do { \
37 ".pushsection .rodata.str, \"aMS\", %progbits, 1\n" \ 37 ".pushsection .rodata.str, \"aMS\", %progbits, 1\n" \
38 "2:\t.asciz " #__file "\n" \ 38 "2:\t.asciz " #__file "\n" \
39 ".popsection\n" \ 39 ".popsection\n" \
40 ".pushsection __bug_table,\"a\"\n" \ 40 ".pushsection __bug_table,\"aw\"\n" \
41 ".align 2\n" \ 41 ".align 2\n" \
42 "3:\t.word 1b, 2b\n" \ 42 "3:\t.word 1b, 2b\n" \
43 "\t.hword " #__line ", 0\n" \ 43 "\t.hword " #__line ", 0\n" \
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index d69bebf697e7..74504b154256 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -116,7 +116,7 @@ struct cpu_cache_fns {
116 void (*dma_unmap_area)(const void *, size_t, int); 116 void (*dma_unmap_area)(const void *, size_t, int);
117 117
118 void (*dma_flush_range)(const void *, const void *); 118 void (*dma_flush_range)(const void *, const void *);
119}; 119} __no_randomize_layout;
120 120
121/* 121/*
122 * Select the calling method 122 * Select the calling method
diff --git a/arch/arm64/include/asm/bug.h b/arch/arm64/include/asm/bug.h
index 366448eb0fb7..a02a57186f56 100644
--- a/arch/arm64/include/asm/bug.h
+++ b/arch/arm64/include/asm/bug.h
@@ -36,7 +36,7 @@
36#ifdef CONFIG_GENERIC_BUG 36#ifdef CONFIG_GENERIC_BUG
37 37
38#define __BUG_ENTRY(flags) \ 38#define __BUG_ENTRY(flags) \
39 ".pushsection __bug_table,\"a\"\n\t" \ 39 ".pushsection __bug_table,\"aw\"\n\t" \
40 ".align 2\n\t" \ 40 ".align 2\n\t" \
41 "0: .long 1f - 0b\n\t" \ 41 "0: .long 1f - 0b\n\t" \
42_BUGVERBOSE_LOCATION(__FILE__, __LINE__) \ 42_BUGVERBOSE_LOCATION(__FILE__, __LINE__) \
diff --git a/arch/blackfin/include/asm/bug.h b/arch/blackfin/include/asm/bug.h
index 8d9b1eba89c4..76b2e82ee730 100644
--- a/arch/blackfin/include/asm/bug.h
+++ b/arch/blackfin/include/asm/bug.h
@@ -21,7 +21,7 @@
21#define _BUG_OR_WARN(flags) \ 21#define _BUG_OR_WARN(flags) \
22 asm volatile( \ 22 asm volatile( \
23 "1: .hword %0\n" \ 23 "1: .hword %0\n" \
24 " .section __bug_table,\"a\",@progbits\n" \ 24 " .section __bug_table,\"aw\",@progbits\n" \
25 "2: .long 1b\n" \ 25 "2: .long 1b\n" \
26 " .long %1\n" \ 26 " .long %1\n" \
27 " .short %2\n" \ 27 " .short %2\n" \
@@ -38,7 +38,7 @@
38#define _BUG_OR_WARN(flags) \ 38#define _BUG_OR_WARN(flags) \
39 asm volatile( \ 39 asm volatile( \
40 "1: .hword %0\n" \ 40 "1: .hword %0\n" \
41 " .section __bug_table,\"a\",@progbits\n" \ 41 " .section __bug_table,\"aw\",@progbits\n" \
42 "2: .long 1b\n" \ 42 "2: .long 1b\n" \
43 " .short %1\n" \ 43 " .short %1\n" \
44 " .org 2b + %2\n" \ 44 " .org 2b + %2\n" \
diff --git a/arch/blackfin/include/asm/flat.h b/arch/blackfin/include/asm/flat.h
index 296d7f56fbfd..f1d6ba7afbf2 100644
--- a/arch/blackfin/include/asm/flat.h
+++ b/arch/blackfin/include/asm/flat.h
@@ -44,8 +44,7 @@ flat_get_relocate_addr (unsigned long relval)
44 return relval & 0x03ffffff; /* Mask out top 6 bits */ 44 return relval & 0x03ffffff; /* Mask out top 6 bits */
45} 45}
46 46
47static inline int flat_set_persistent(unsigned long relval, 47static inline int flat_set_persistent(u32 relval, u32 *persistent)
48 unsigned long *persistent)
49{ 48{
50 int type = (relval >> 26) & 7; 49 int type = (relval >> 26) & 7;
51 if (type == 3) { 50 if (type == 3) {
diff --git a/arch/blackfin/kernel/flat.c b/arch/blackfin/kernel/flat.c
index d29ab6a2e909..8ebc54daaa8e 100644
--- a/arch/blackfin/kernel/flat.c
+++ b/arch/blackfin/kernel/flat.c
@@ -32,7 +32,7 @@ unsigned long bfin_get_addr_from_rp(u32 *ptr,
32 break; 32 break;
33 33
34 case FLAT_BFIN_RELOC_TYPE_32_BIT: 34 case FLAT_BFIN_RELOC_TYPE_32_BIT:
35 pr_debug("*ptr = %lx", get_unaligned(ptr)); 35 pr_debug("*ptr = %x", get_unaligned(ptr));
36 val = get_unaligned(ptr); 36 val = get_unaligned(ptr);
37 break; 37 break;
38 38
@@ -77,7 +77,7 @@ void bfin_put_addr_at_rp(u32 *ptr, u32 addr, u32 relval)
77 77
78 case FLAT_BFIN_RELOC_TYPE_32_BIT: 78 case FLAT_BFIN_RELOC_TYPE_32_BIT:
79 put_unaligned(addr, ptr); 79 put_unaligned(addr, ptr);
80 pr_debug("new ptr =%lx", get_unaligned(ptr)); 80 pr_debug("new ptr =%x", get_unaligned(ptr));
81 break; 81 break;
82 } 82 }
83} 83}
diff --git a/arch/h8300/include/asm/flat.h b/arch/h8300/include/asm/flat.h
index 18d024251738..7e0bd6fa1532 100644
--- a/arch/h8300/include/asm/flat.h
+++ b/arch/h8300/include/asm/flat.h
@@ -24,7 +24,7 @@ static inline int flat_get_addr_from_rp(u32 __user *rp, u32 relval, u32 flags,
24 u32 *addr, u32 *persistent) 24 u32 *addr, u32 *persistent)
25{ 25{
26 u32 val = get_unaligned((__force u32 *)rp); 26 u32 val = get_unaligned((__force u32 *)rp);
27 if (!(flags & FLAT_FLAG_GOTPIC) 27 if (!(flags & FLAT_FLAG_GOTPIC))
28 val &= 0x00ffffff; 28 val &= 0x00ffffff;
29 *addr = val; 29 *addr = val;
30 return 0; 30 return 0;
diff --git a/arch/m68k/include/asm/flat.h b/arch/m68k/include/asm/flat.h
index 48b62790fe70..b2a41f5b3890 100644
--- a/arch/m68k/include/asm/flat.h
+++ b/arch/m68k/include/asm/flat.h
@@ -30,8 +30,7 @@ static inline int flat_put_addr_at_rp(u32 __user *rp, u32 addr, u32 rel)
30} 30}
31#define flat_get_relocate_addr(rel) (rel) 31#define flat_get_relocate_addr(rel) (rel)
32 32
33static inline int flat_set_persistent(unsigned long relval, 33static inline int flat_set_persistent(u32 relval, u32 *persistent)
34 unsigned long *persistent)
35{ 34{
36 return 0; 35 return 0;
37} 36}
diff --git a/arch/mn10300/include/asm/bug.h b/arch/mn10300/include/asm/bug.h
index aa6a38886391..811414fb002d 100644
--- a/arch/mn10300/include/asm/bug.h
+++ b/arch/mn10300/include/asm/bug.h
@@ -21,7 +21,7 @@ do { \
21 asm volatile( \ 21 asm volatile( \
22 " syscall 15 \n" \ 22 " syscall 15 \n" \
23 "0: \n" \ 23 "0: \n" \
24 " .section __bug_table,\"a\" \n" \ 24 " .section __bug_table,\"aw\" \n" \
25 " .long 0b,%0,%1 \n" \ 25 " .long 0b,%0,%1 \n" \
26 " .previous \n" \ 26 " .previous \n" \
27 : \ 27 : \
diff --git a/arch/parisc/include/asm/bug.h b/arch/parisc/include/asm/bug.h
index d2742273a685..07ea467f22fc 100644
--- a/arch/parisc/include/asm/bug.h
+++ b/arch/parisc/include/asm/bug.h
@@ -27,7 +27,7 @@
27 do { \ 27 do { \
28 asm volatile("\n" \ 28 asm volatile("\n" \
29 "1:\t" PARISC_BUG_BREAK_ASM "\n" \ 29 "1:\t" PARISC_BUG_BREAK_ASM "\n" \
30 "\t.pushsection __bug_table,\"a\"\n" \ 30 "\t.pushsection __bug_table,\"aw\"\n" \
31 "2:\t" ASM_WORD_INSN "1b, %c0\n" \ 31 "2:\t" ASM_WORD_INSN "1b, %c0\n" \
32 "\t.short %c1, %c2\n" \ 32 "\t.short %c1, %c2\n" \
33 "\t.org 2b+%c3\n" \ 33 "\t.org 2b+%c3\n" \
@@ -50,7 +50,7 @@
50 do { \ 50 do { \
51 asm volatile("\n" \ 51 asm volatile("\n" \
52 "1:\t" PARISC_BUG_BREAK_ASM "\n" \ 52 "1:\t" PARISC_BUG_BREAK_ASM "\n" \
53 "\t.pushsection __bug_table,\"a\"\n" \ 53 "\t.pushsection __bug_table,\"aw\"\n" \
54 "2:\t" ASM_WORD_INSN "1b, %c0\n" \ 54 "2:\t" ASM_WORD_INSN "1b, %c0\n" \
55 "\t.short %c1, %c2\n" \ 55 "\t.short %c1, %c2\n" \
56 "\t.org 2b+%c3\n" \ 56 "\t.org 2b+%c3\n" \
@@ -64,7 +64,7 @@
64 do { \ 64 do { \
65 asm volatile("\n" \ 65 asm volatile("\n" \
66 "1:\t" PARISC_BUG_BREAK_ASM "\n" \ 66 "1:\t" PARISC_BUG_BREAK_ASM "\n" \
67 "\t.pushsection __bug_table,\"a\"\n" \ 67 "\t.pushsection __bug_table,\"aw\"\n" \
68 "2:\t" ASM_WORD_INSN "1b\n" \ 68 "2:\t" ASM_WORD_INSN "1b\n" \
69 "\t.short %c0\n" \ 69 "\t.short %c0\n" \
70 "\t.org 2b+%c1\n" \ 70 "\t.org 2b+%c1\n" \
diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h
index 0ce513f2926f..36fc7bfe9e11 100644
--- a/arch/powerpc/include/asm/book3s/64/hash.h
+++ b/arch/powerpc/include/asm/book3s/64/hash.h
@@ -91,6 +91,7 @@ static inline int hash__pgd_bad(pgd_t pgd)
91} 91}
92#ifdef CONFIG_STRICT_KERNEL_RWX 92#ifdef CONFIG_STRICT_KERNEL_RWX
93extern void hash__mark_rodata_ro(void); 93extern void hash__mark_rodata_ro(void);
94extern void hash__mark_initmem_nx(void);
94#endif 95#endif
95 96
96extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr, 97extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index c0737c86a362..d1da415e283c 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -1192,5 +1192,6 @@ static inline const int pud_pfn(pud_t pud)
1192 BUILD_BUG(); 1192 BUILD_BUG();
1193 return 0; 1193 return 0;
1194} 1194}
1195
1195#endif /* __ASSEMBLY__ */ 1196#endif /* __ASSEMBLY__ */
1196#endif /* _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ */ 1197#endif /* _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ */
diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h
index 487709ff6875..544440b5aff3 100644
--- a/arch/powerpc/include/asm/book3s/64/radix.h
+++ b/arch/powerpc/include/asm/book3s/64/radix.h
@@ -118,6 +118,7 @@
118 118
119#ifdef CONFIG_STRICT_KERNEL_RWX 119#ifdef CONFIG_STRICT_KERNEL_RWX
120extern void radix__mark_rodata_ro(void); 120extern void radix__mark_rodata_ro(void);
121extern void radix__mark_initmem_nx(void);
121#endif 122#endif
122 123
123static inline unsigned long __radix_pte_update(pte_t *ptep, unsigned long clr, 124static inline unsigned long __radix_pte_update(pte_t *ptep, unsigned long clr,
diff --git a/arch/powerpc/include/asm/bug.h b/arch/powerpc/include/asm/bug.h
index 0151af6c2a50..87fcc1948817 100644
--- a/arch/powerpc/include/asm/bug.h
+++ b/arch/powerpc/include/asm/bug.h
@@ -18,7 +18,7 @@
18#include <asm/asm-offsets.h> 18#include <asm/asm-offsets.h>
19#ifdef CONFIG_DEBUG_BUGVERBOSE 19#ifdef CONFIG_DEBUG_BUGVERBOSE
20.macro EMIT_BUG_ENTRY addr,file,line,flags 20.macro EMIT_BUG_ENTRY addr,file,line,flags
21 .section __bug_table,"a" 21 .section __bug_table,"aw"
225001: PPC_LONG \addr, 5002f 225001: PPC_LONG \addr, 5002f
23 .short \line, \flags 23 .short \line, \flags
24 .org 5001b+BUG_ENTRY_SIZE 24 .org 5001b+BUG_ENTRY_SIZE
@@ -29,7 +29,7 @@
29.endm 29.endm
30#else 30#else
31.macro EMIT_BUG_ENTRY addr,file,line,flags 31.macro EMIT_BUG_ENTRY addr,file,line,flags
32 .section __bug_table,"a" 32 .section __bug_table,"aw"
335001: PPC_LONG \addr 335001: PPC_LONG \addr
34 .short \flags 34 .short \flags
35 .org 5001b+BUG_ENTRY_SIZE 35 .org 5001b+BUG_ENTRY_SIZE
@@ -42,14 +42,14 @@
42 sizeof(struct bug_entry), respectively */ 42 sizeof(struct bug_entry), respectively */
43#ifdef CONFIG_DEBUG_BUGVERBOSE 43#ifdef CONFIG_DEBUG_BUGVERBOSE
44#define _EMIT_BUG_ENTRY \ 44#define _EMIT_BUG_ENTRY \
45 ".section __bug_table,\"a\"\n" \ 45 ".section __bug_table,\"aw\"\n" \
46 "2:\t" PPC_LONG "1b, %0\n" \ 46 "2:\t" PPC_LONG "1b, %0\n" \
47 "\t.short %1, %2\n" \ 47 "\t.short %1, %2\n" \
48 ".org 2b+%3\n" \ 48 ".org 2b+%3\n" \
49 ".previous\n" 49 ".previous\n"
50#else 50#else
51#define _EMIT_BUG_ENTRY \ 51#define _EMIT_BUG_ENTRY \
52 ".section __bug_table,\"a\"\n" \ 52 ".section __bug_table,\"aw\"\n" \
53 "2:\t" PPC_LONG "1b\n" \ 53 "2:\t" PPC_LONG "1b\n" \
54 "\t.short %2\n" \ 54 "\t.short %2\n" \
55 ".org 2b+%3\n" \ 55 ".org 2b+%3\n" \
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index dd01212935ac..afae9a336136 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -80,6 +80,13 @@ unsigned long vmalloc_to_phys(void *vmalloc_addr);
80 80
81void pgtable_cache_add(unsigned shift, void (*ctor)(void *)); 81void pgtable_cache_add(unsigned shift, void (*ctor)(void *));
82void pgtable_cache_init(void); 82void pgtable_cache_init(void);
83
84#ifdef CONFIG_STRICT_KERNEL_RWX
85void mark_initmem_nx(void);
86#else
87static inline void mark_initmem_nx(void) { }
88#endif
89
83#endif /* __ASSEMBLY__ */ 90#endif /* __ASSEMBLY__ */
84 91
85#endif /* _ASM_POWERPC_PGTABLE_H */ 92#endif /* _ASM_POWERPC_PGTABLE_H */
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index e6d8354d79ef..9029afd1fa2a 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -824,7 +824,7 @@ EXC_COMMON(trap_0b_common, 0xb00, unknown_exception)
824 * r3 volatile parameter and return value for status 824 * r3 volatile parameter and return value for status
825 * r4-r10 volatile input and output value 825 * r4-r10 volatile input and output value
826 * r11 volatile hypercall number and output value 826 * r11 volatile hypercall number and output value
827 * r12 volatile 827 * r12 volatile input and output value
828 * r13-r31 nonvolatile 828 * r13-r31 nonvolatile
829 * LR nonvolatile 829 * LR nonvolatile
830 * CTR volatile 830 * CTR volatile
@@ -834,25 +834,26 @@ EXC_COMMON(trap_0b_common, 0xb00, unknown_exception)
834 * Other registers nonvolatile 834 * Other registers nonvolatile
835 * 835 *
836 * The intersection of volatile registers that don't contain possible 836 * The intersection of volatile registers that don't contain possible
837 * inputs is: r12, cr0, xer, ctr. We may use these as scratch regs 837 * inputs is: cr0, xer, ctr. We may use these as scratch regs upon entry
838 * upon entry without saving. 838 * without saving, though xer is not a good idea to use, as hardware may
839 * interpret some bits so it may be costly to change them.
839 */ 840 */
840#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 841#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
841 /* 842 /*
842 * There is a little bit of juggling to get syscall and hcall 843 * There is a little bit of juggling to get syscall and hcall
843 * working well. Save r10 in ctr to be restored in case it is a 844 * working well. Save r13 in ctr to avoid using SPRG scratch
844 * hcall. 845 * register.
845 * 846 *
846 * Userspace syscalls have already saved the PPR, hcalls must save 847 * Userspace syscalls have already saved the PPR, hcalls must save
847 * it before setting HMT_MEDIUM. 848 * it before setting HMT_MEDIUM.
848 */ 849 */
849#define SYSCALL_KVMTEST \ 850#define SYSCALL_KVMTEST \
850 mr r12,r13; \ 851 mtctr r13; \
851 GET_PACA(r13); \ 852 GET_PACA(r13); \
852 mtctr r10; \ 853 std r10,PACA_EXGEN+EX_R10(r13); \
853 KVMTEST_PR(0xc00); /* uses r10, branch to do_kvm_0xc00_system_call */ \ 854 KVMTEST_PR(0xc00); /* uses r10, branch to do_kvm_0xc00_system_call */ \
854 HMT_MEDIUM; \ 855 HMT_MEDIUM; \
855 mr r9,r12; \ 856 mfctr r9;
856 857
857#else 858#else
858#define SYSCALL_KVMTEST \ 859#define SYSCALL_KVMTEST \
@@ -935,8 +936,8 @@ EXC_VIRT_END(system_call, 0x4c00, 0x100)
935 * This is a hcall, so register convention is as above, with these 936 * This is a hcall, so register convention is as above, with these
936 * differences: 937 * differences:
937 * r13 = PACA 938 * r13 = PACA
938 * r12 = orig r13 939 * ctr = orig r13
939 * ctr = orig r10 940 * orig r10 saved in PACA
940 */ 941 */
941TRAMP_KVM_BEGIN(do_kvm_0xc00) 942TRAMP_KVM_BEGIN(do_kvm_0xc00)
942 /* 943 /*
@@ -944,14 +945,13 @@ TRAMP_KVM_BEGIN(do_kvm_0xc00)
944 * HMT_MEDIUM. That allows the KVM code to save that value into the 945 * HMT_MEDIUM. That allows the KVM code to save that value into the
945 * guest state (it is the guest's PPR value). 946 * guest state (it is the guest's PPR value).
946 */ 947 */
947 OPT_GET_SPR(r0, SPRN_PPR, CPU_FTR_HAS_PPR) 948 OPT_GET_SPR(r10, SPRN_PPR, CPU_FTR_HAS_PPR)
948 HMT_MEDIUM 949 HMT_MEDIUM
949 OPT_SAVE_REG_TO_PACA(PACA_EXGEN+EX_PPR, r0, CPU_FTR_HAS_PPR) 950 OPT_SAVE_REG_TO_PACA(PACA_EXGEN+EX_PPR, r10, CPU_FTR_HAS_PPR)
950 mfctr r10 951 mfctr r10
951 SET_SCRATCH0(r12) 952 SET_SCRATCH0(r10)
952 std r9,PACA_EXGEN+EX_R9(r13) 953 std r9,PACA_EXGEN+EX_R9(r13)
953 mfcr r9 954 mfcr r9
954 std r10,PACA_EXGEN+EX_R10(r13)
955 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xc00) 955 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xc00)
956#endif 956#endif
957 957
diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S
index 5adb390e773b..516ebef905c0 100644
--- a/arch/powerpc/kernel/idle_book3s.S
+++ b/arch/powerpc/kernel/idle_book3s.S
@@ -30,6 +30,7 @@
30 * Use unused space in the interrupt stack to save and restore 30 * Use unused space in the interrupt stack to save and restore
31 * registers for winkle support. 31 * registers for winkle support.
32 */ 32 */
33#define _MMCR0 GPR0
33#define _SDR1 GPR3 34#define _SDR1 GPR3
34#define _PTCR GPR3 35#define _PTCR GPR3
35#define _RPR GPR4 36#define _RPR GPR4
@@ -272,6 +273,14 @@ power_enter_stop:
272 b pnv_wakeup_noloss 273 b pnv_wakeup_noloss
273 274
274.Lhandle_esl_ec_set: 275.Lhandle_esl_ec_set:
276 /*
277 * POWER9 DD2 can incorrectly set PMAO when waking up after a
278 * state-loss idle. Saving and restoring MMCR0 over idle is a
279 * workaround.
280 */
281 mfspr r4,SPRN_MMCR0
282 std r4,_MMCR0(r1)
283
275/* 284/*
276 * Check if the requested state is a deep idle state. 285 * Check if the requested state is a deep idle state.
277 */ 286 */
@@ -450,10 +459,14 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300)
450pnv_restore_hyp_resource_arch300: 459pnv_restore_hyp_resource_arch300:
451 /* 460 /*
452 * Workaround for POWER9, if we lost resources, the ERAT 461 * Workaround for POWER9, if we lost resources, the ERAT
453 * might have been mixed up and needs flushing. 462 * might have been mixed up and needs flushing. We also need
463 * to reload MMCR0 (see comment above).
454 */ 464 */
455 blt cr3,1f 465 blt cr3,1f
456 PPC_INVALIDATE_ERAT 466 PPC_INVALIDATE_ERAT
467 ld r1,PACAR1(r13)
468 ld r4,_MMCR0(r1)
469 mtspr SPRN_MMCR0,r4
4571: 4701:
458 /* 471 /*
459 * POWER ISA 3. Use PSSCR to determine if we 472 * POWER ISA 3. Use PSSCR to determine if we
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 8541f18694a4..46b4e67d2372 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -402,6 +402,7 @@ void __init mem_init(void)
402void free_initmem(void) 402void free_initmem(void)
403{ 403{
404 ppc_md.progress = ppc_printk_progress; 404 ppc_md.progress = ppc_printk_progress;
405 mark_initmem_nx();
405 free_initmem_default(POISON_FREE_INITMEM); 406 free_initmem_default(POISON_FREE_INITMEM);
406} 407}
407 408
diff --git a/arch/powerpc/mm/pgtable-hash64.c b/arch/powerpc/mm/pgtable-hash64.c
index 188b4107584d..443a2c66a304 100644
--- a/arch/powerpc/mm/pgtable-hash64.c
+++ b/arch/powerpc/mm/pgtable-hash64.c
@@ -425,33 +425,51 @@ int hash__has_transparent_hugepage(void)
425#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 425#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
426 426
427#ifdef CONFIG_STRICT_KERNEL_RWX 427#ifdef CONFIG_STRICT_KERNEL_RWX
428void hash__mark_rodata_ro(void) 428static bool hash__change_memory_range(unsigned long start, unsigned long end,
429 unsigned long newpp)
429{ 430{
430 unsigned long start = (unsigned long)_stext;
431 unsigned long end = (unsigned long)__init_begin;
432 unsigned long idx; 431 unsigned long idx;
433 unsigned int step, shift; 432 unsigned int step, shift;
434 unsigned long newpp = PP_RXXX;
435 433
436 shift = mmu_psize_defs[mmu_linear_psize].shift; 434 shift = mmu_psize_defs[mmu_linear_psize].shift;
437 step = 1 << shift; 435 step = 1 << shift;
438 436
439 start = ((start + step - 1) >> shift) << shift; 437 start = ALIGN_DOWN(start, step);
440 end = (end >> shift) << shift; 438 end = ALIGN(end, step); // aligns up
441 439
442 pr_devel("marking ro start %lx, end %lx, step %x\n", 440 if (start >= end)
443 start, end, step); 441 return false;
444 442
445 if (start == end) { 443 pr_debug("Changing page protection on range 0x%lx-0x%lx, to 0x%lx, step 0x%x\n",
446 pr_warn("could not set rodata ro, relocate the start" 444 start, end, newpp, step);
447 " of the kernel to a 0x%x boundary\n", step);
448 return;
449 }
450 445
451 for (idx = start; idx < end; idx += step) 446 for (idx = start; idx < end; idx += step)
452 /* Not sure if we can do much with the return value */ 447 /* Not sure if we can do much with the return value */
453 mmu_hash_ops.hpte_updateboltedpp(newpp, idx, mmu_linear_psize, 448 mmu_hash_ops.hpte_updateboltedpp(newpp, idx, mmu_linear_psize,
454 mmu_kernel_ssize); 449 mmu_kernel_ssize);
455 450
451 return true;
452}
453
454void hash__mark_rodata_ro(void)
455{
456 unsigned long start, end;
457
458 start = (unsigned long)_stext;
459 end = (unsigned long)__init_begin;
460
461 WARN_ON(!hash__change_memory_range(start, end, PP_RXXX));
462}
463
464void hash__mark_initmem_nx(void)
465{
466 unsigned long start, end, pp;
467
468 start = (unsigned long)__init_begin;
469 end = (unsigned long)__init_end;
470
471 pp = htab_convert_pte_flags(pgprot_val(PAGE_KERNEL));
472
473 WARN_ON(!hash__change_memory_range(start, end, pp));
456} 474}
457#endif 475#endif
diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
index 8c13e4282308..5cc50d47ce3f 100644
--- a/arch/powerpc/mm/pgtable-radix.c
+++ b/arch/powerpc/mm/pgtable-radix.c
@@ -112,10 +112,9 @@ set_the_pte:
112} 112}
113 113
114#ifdef CONFIG_STRICT_KERNEL_RWX 114#ifdef CONFIG_STRICT_KERNEL_RWX
115void radix__mark_rodata_ro(void) 115void radix__change_memory_range(unsigned long start, unsigned long end,
116 unsigned long clear)
116{ 117{
117 unsigned long start = (unsigned long)_stext;
118 unsigned long end = (unsigned long)__init_begin;
119 unsigned long idx; 118 unsigned long idx;
120 pgd_t *pgdp; 119 pgd_t *pgdp;
121 pud_t *pudp; 120 pud_t *pudp;
@@ -125,7 +124,8 @@ void radix__mark_rodata_ro(void)
125 start = ALIGN_DOWN(start, PAGE_SIZE); 124 start = ALIGN_DOWN(start, PAGE_SIZE);
126 end = PAGE_ALIGN(end); // aligns up 125 end = PAGE_ALIGN(end); // aligns up
127 126
128 pr_devel("marking ro start %lx, end %lx\n", start, end); 127 pr_debug("Changing flags on range %lx-%lx removing 0x%lx\n",
128 start, end, clear);
129 129
130 for (idx = start; idx < end; idx += PAGE_SIZE) { 130 for (idx = start; idx < end; idx += PAGE_SIZE) {
131 pgdp = pgd_offset_k(idx); 131 pgdp = pgd_offset_k(idx);
@@ -147,11 +147,29 @@ void radix__mark_rodata_ro(void)
147 if (!ptep) 147 if (!ptep)
148 continue; 148 continue;
149update_the_pte: 149update_the_pte:
150 radix__pte_update(&init_mm, idx, ptep, _PAGE_WRITE, 0, 0); 150 radix__pte_update(&init_mm, idx, ptep, clear, 0, 0);
151 } 151 }
152 152
153 radix__flush_tlb_kernel_range(start, end); 153 radix__flush_tlb_kernel_range(start, end);
154} 154}
155
156void radix__mark_rodata_ro(void)
157{
158 unsigned long start, end;
159
160 start = (unsigned long)_stext;
161 end = (unsigned long)__init_begin;
162
163 radix__change_memory_range(start, end, _PAGE_WRITE);
164}
165
166void radix__mark_initmem_nx(void)
167{
168 unsigned long start = (unsigned long)__init_begin;
169 unsigned long end = (unsigned long)__init_end;
170
171 radix__change_memory_range(start, end, _PAGE_EXEC);
172}
155#endif /* CONFIG_STRICT_KERNEL_RWX */ 173#endif /* CONFIG_STRICT_KERNEL_RWX */
156 174
157static inline void __meminit print_mapping(unsigned long start, 175static inline void __meminit print_mapping(unsigned long start,
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 5c0b795d656c..0736e94c7615 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -505,4 +505,12 @@ void mark_rodata_ro(void)
505 else 505 else
506 hash__mark_rodata_ro(); 506 hash__mark_rodata_ro();
507} 507}
508
509void mark_initmem_nx(void)
510{
511 if (radix_enabled())
512 radix__mark_initmem_nx();
513 else
514 hash__mark_initmem_nx();
515}
508#endif 516#endif
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index 9b87abb178f0..cad6b57ce494 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -78,7 +78,7 @@ void opal_configure_cores(void)
78 * ie. Host hash supports hash guests 78 * ie. Host hash supports hash guests
79 * Host radix supports hash/radix guests 79 * Host radix supports hash/radix guests
80 */ 80 */
81 if (cpu_has_feature(CPU_FTR_ARCH_300)) { 81 if (early_cpu_has_feature(CPU_FTR_ARCH_300)) {
82 reinit_flags |= OPAL_REINIT_CPUS_MMU_HASH; 82 reinit_flags |= OPAL_REINIT_CPUS_MMU_HASH;
83 if (early_radix_enabled()) 83 if (early_radix_enabled())
84 reinit_flags |= OPAL_REINIT_CPUS_MMU_RADIX; 84 reinit_flags |= OPAL_REINIT_CPUS_MMU_RADIX;
diff --git a/arch/s390/include/asm/bug.h b/arch/s390/include/asm/bug.h
index 1bbd9dbfe4e0..ce9cc123988b 100644
--- a/arch/s390/include/asm/bug.h
+++ b/arch/s390/include/asm/bug.h
@@ -14,7 +14,7 @@
14 ".section .rodata.str,\"aMS\",@progbits,1\n" \ 14 ".section .rodata.str,\"aMS\",@progbits,1\n" \
15 "2: .asciz \""__FILE__"\"\n" \ 15 "2: .asciz \""__FILE__"\"\n" \
16 ".previous\n" \ 16 ".previous\n" \
17 ".section __bug_table,\"a\"\n" \ 17 ".section __bug_table,\"aw\"\n" \
18 "3: .long 1b-3b,2b-3b\n" \ 18 "3: .long 1b-3b,2b-3b\n" \
19 " .short %0,%1\n" \ 19 " .short %0,%1\n" \
20 " .org 3b+%2\n" \ 20 " .org 3b+%2\n" \
@@ -30,7 +30,7 @@
30 asm volatile( \ 30 asm volatile( \
31 "0: j 0b+2\n" \ 31 "0: j 0b+2\n" \
32 "1:\n" \ 32 "1:\n" \
33 ".section __bug_table,\"a\"\n" \ 33 ".section __bug_table,\"aw\"\n" \
34 "2: .long 1b-2b\n" \ 34 "2: .long 1b-2b\n" \
35 " .short %0\n" \ 35 " .short %0\n" \
36 " .org 2b+%1\n" \ 36 " .org 2b+%1\n" \
diff --git a/arch/sh/include/asm/bug.h b/arch/sh/include/asm/bug.h
index c9828f785ca0..5b5086367639 100644
--- a/arch/sh/include/asm/bug.h
+++ b/arch/sh/include/asm/bug.h
@@ -24,14 +24,14 @@
24 */ 24 */
25#ifdef CONFIG_DEBUG_BUGVERBOSE 25#ifdef CONFIG_DEBUG_BUGVERBOSE
26#define _EMIT_BUG_ENTRY \ 26#define _EMIT_BUG_ENTRY \
27 "\t.pushsection __bug_table,\"a\"\n" \ 27 "\t.pushsection __bug_table,\"aw\"\n" \
28 "2:\t.long 1b, %O1\n" \ 28 "2:\t.long 1b, %O1\n" \
29 "\t.short %O2, %O3\n" \ 29 "\t.short %O2, %O3\n" \
30 "\t.org 2b+%O4\n" \ 30 "\t.org 2b+%O4\n" \
31 "\t.popsection\n" 31 "\t.popsection\n"
32#else 32#else
33#define _EMIT_BUG_ENTRY \ 33#define _EMIT_BUG_ENTRY \
34 "\t.pushsection __bug_table,\"a\"\n" \ 34 "\t.pushsection __bug_table,\"aw\"\n" \
35 "2:\t.long 1b\n" \ 35 "2:\t.long 1b\n" \
36 "\t.short %O3\n" \ 36 "\t.short %O3\n" \
37 "\t.org 2b+%O4\n" \ 37 "\t.org 2b+%O4\n" \
diff --git a/arch/sparc/include/asm/trap_block.h b/arch/sparc/include/asm/trap_block.h
index ec9c04de3664..ff05992dae7a 100644
--- a/arch/sparc/include/asm/trap_block.h
+++ b/arch/sparc/include/asm/trap_block.h
@@ -54,6 +54,7 @@ extern struct trap_per_cpu trap_block[NR_CPUS];
54void init_cur_cpu_trap(struct thread_info *); 54void init_cur_cpu_trap(struct thread_info *);
55void setup_tba(void); 55void setup_tba(void);
56extern int ncpus_probed; 56extern int ncpus_probed;
57extern u64 cpu_mondo_counter[NR_CPUS];
57 58
58unsigned long real_hard_smp_processor_id(void); 59unsigned long real_hard_smp_processor_id(void);
59 60
diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
index 24f21c726dfa..f10e2f712394 100644
--- a/arch/sparc/kernel/pci_sun4v.c
+++ b/arch/sparc/kernel/pci_sun4v.c
@@ -673,12 +673,14 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
673static int dma_4v_supported(struct device *dev, u64 device_mask) 673static int dma_4v_supported(struct device *dev, u64 device_mask)
674{ 674{
675 struct iommu *iommu = dev->archdata.iommu; 675 struct iommu *iommu = dev->archdata.iommu;
676 u64 dma_addr_mask; 676 u64 dma_addr_mask = iommu->dma_addr_mask;
677 677
678 if (device_mask > DMA_BIT_MASK(32) && iommu->atu) 678 if (device_mask > DMA_BIT_MASK(32)) {
679 dma_addr_mask = iommu->atu->dma_addr_mask; 679 if (iommu->atu)
680 else 680 dma_addr_mask = iommu->atu->dma_addr_mask;
681 dma_addr_mask = iommu->dma_addr_mask; 681 else
682 return 0;
683 }
682 684
683 if ((device_mask & dma_addr_mask) == dma_addr_mask) 685 if ((device_mask & dma_addr_mask) == dma_addr_mask)
684 return 1; 686 return 1;
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index fdf31040a7dc..3218bc43302e 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -622,22 +622,48 @@ retry:
622 } 622 }
623} 623}
624 624
625/* Multi-cpu list version. */ 625#define CPU_MONDO_COUNTER(cpuid) (cpu_mondo_counter[cpuid])
626#define MONDO_USEC_WAIT_MIN 2
627#define MONDO_USEC_WAIT_MAX 100
628#define MONDO_RETRY_LIMIT 500000
629
630/* Multi-cpu list version.
631 *
632 * Deliver xcalls to 'cnt' number of cpus in 'cpu_list'.
633 * Sometimes not all cpus receive the mondo, requiring us to re-send
634 * the mondo until all cpus have received, or cpus are truly stuck
635 * unable to receive mondo, and we timeout.
636 * Occasionally a target cpu strand is borrowed briefly by hypervisor to
637 * perform guest service, such as PCIe error handling. Consider the
638 * service time, 1 second overall wait is reasonable for 1 cpu.
639 * Here two in-between mondo check wait time are defined: 2 usec for
640 * single cpu quick turn around and up to 100usec for large cpu count.
641 * Deliver mondo to large number of cpus could take longer, we adjusts
642 * the retry count as long as target cpus are making forward progress.
643 */
626static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt) 644static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt)
627{ 645{
628 int retries, this_cpu, prev_sent, i, saw_cpu_error; 646 int this_cpu, tot_cpus, prev_sent, i, rem;
647 int usec_wait, retries, tot_retries;
648 u16 first_cpu = 0xffff;
649 unsigned long xc_rcvd = 0;
629 unsigned long status; 650 unsigned long status;
651 int ecpuerror_id = 0;
652 int enocpu_id = 0;
630 u16 *cpu_list; 653 u16 *cpu_list;
654 u16 cpu;
631 655
632 this_cpu = smp_processor_id(); 656 this_cpu = smp_processor_id();
633
634 cpu_list = __va(tb->cpu_list_pa); 657 cpu_list = __va(tb->cpu_list_pa);
635 658 usec_wait = cnt * MONDO_USEC_WAIT_MIN;
636 saw_cpu_error = 0; 659 if (usec_wait > MONDO_USEC_WAIT_MAX)
637 retries = 0; 660 usec_wait = MONDO_USEC_WAIT_MAX;
661 retries = tot_retries = 0;
662 tot_cpus = cnt;
638 prev_sent = 0; 663 prev_sent = 0;
664
639 do { 665 do {
640 int forward_progress, n_sent; 666 int n_sent, mondo_delivered, target_cpu_busy;
641 667
642 status = sun4v_cpu_mondo_send(cnt, 668 status = sun4v_cpu_mondo_send(cnt,
643 tb->cpu_list_pa, 669 tb->cpu_list_pa,
@@ -645,94 +671,113 @@ static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt)
645 671
646 /* HV_EOK means all cpus received the xcall, we're done. */ 672 /* HV_EOK means all cpus received the xcall, we're done. */
647 if (likely(status == HV_EOK)) 673 if (likely(status == HV_EOK))
648 break; 674 goto xcall_done;
675
676 /* If not these non-fatal errors, panic */
677 if (unlikely((status != HV_EWOULDBLOCK) &&
678 (status != HV_ECPUERROR) &&
679 (status != HV_ENOCPU)))
680 goto fatal_errors;
649 681
650 /* First, see if we made any forward progress. 682 /* First, see if we made any forward progress.
651 * 683 *
684 * Go through the cpu_list, count the target cpus that have
685 * received our mondo (n_sent), and those that did not (rem).
686 * Re-pack cpu_list with the cpus remain to be retried in the
687 * front - this simplifies tracking the truly stalled cpus.
688 *
652 * The hypervisor indicates successful sends by setting 689 * The hypervisor indicates successful sends by setting
653 * cpu list entries to the value 0xffff. 690 * cpu list entries to the value 0xffff.
691 *
692 * EWOULDBLOCK means some target cpus did not receive the
693 * mondo and retry usually helps.
694 *
695 * ECPUERROR means at least one target cpu is in error state,
696 * it's usually safe to skip the faulty cpu and retry.
697 *
698 * ENOCPU means one of the target cpu doesn't belong to the
699 * domain, perhaps offlined which is unexpected, but not
700 * fatal and it's okay to skip the offlined cpu.
654 */ 701 */
702 rem = 0;
655 n_sent = 0; 703 n_sent = 0;
656 for (i = 0; i < cnt; i++) { 704 for (i = 0; i < cnt; i++) {
657 if (likely(cpu_list[i] == 0xffff)) 705 cpu = cpu_list[i];
706 if (likely(cpu == 0xffff)) {
658 n_sent++; 707 n_sent++;
708 } else if ((status == HV_ECPUERROR) &&
709 (sun4v_cpu_state(cpu) == HV_CPU_STATE_ERROR)) {
710 ecpuerror_id = cpu + 1;
711 } else if (status == HV_ENOCPU && !cpu_online(cpu)) {
712 enocpu_id = cpu + 1;
713 } else {
714 cpu_list[rem++] = cpu;
715 }
659 } 716 }
660 717
661 forward_progress = 0; 718 /* No cpu remained, we're done. */
662 if (n_sent > prev_sent) 719 if (rem == 0)
663 forward_progress = 1; 720 break;
664 721
665 prev_sent = n_sent; 722 /* Otherwise, update the cpu count for retry. */
723 cnt = rem;
666 724
667 /* If we get a HV_ECPUERROR, then one or more of the cpus 725 /* Record the overall number of mondos received by the
668 * in the list are in error state. Use the cpu_state() 726 * first of the remaining cpus.
669 * hypervisor call to find out which cpus are in error state.
670 */ 727 */
671 if (unlikely(status == HV_ECPUERROR)) { 728 if (first_cpu != cpu_list[0]) {
672 for (i = 0; i < cnt; i++) { 729 first_cpu = cpu_list[0];
673 long err; 730 xc_rcvd = CPU_MONDO_COUNTER(first_cpu);
674 u16 cpu; 731 }
675 732
676 cpu = cpu_list[i]; 733 /* Was any mondo delivered successfully? */
677 if (cpu == 0xffff) 734 mondo_delivered = (n_sent > prev_sent);
678 continue; 735 prev_sent = n_sent;
679 736
680 err = sun4v_cpu_state(cpu); 737 /* or, was any target cpu busy processing other mondos? */
681 if (err == HV_CPU_STATE_ERROR) { 738 target_cpu_busy = (xc_rcvd < CPU_MONDO_COUNTER(first_cpu));
682 saw_cpu_error = (cpu + 1); 739 xc_rcvd = CPU_MONDO_COUNTER(first_cpu);
683 cpu_list[i] = 0xffff;
684 }
685 }
686 } else if (unlikely(status != HV_EWOULDBLOCK))
687 goto fatal_mondo_error;
688 740
689 /* Don't bother rewriting the CPU list, just leave the 741 /* Retry count is for no progress. If we're making progress,
690 * 0xffff and non-0xffff entries in there and the 742 * reset the retry count.
691 * hypervisor will do the right thing.
692 *
693 * Only advance timeout state if we didn't make any
694 * forward progress.
695 */ 743 */
696 if (unlikely(!forward_progress)) { 744 if (likely(mondo_delivered || target_cpu_busy)) {
697 if (unlikely(++retries > 10000)) 745 tot_retries += retries;
698 goto fatal_mondo_timeout; 746 retries = 0;
699 747 } else if (unlikely(retries > MONDO_RETRY_LIMIT)) {
700 /* Delay a little bit to let other cpus catch up 748 goto fatal_mondo_timeout;
701 * on their cpu mondo queue work.
702 */
703 udelay(2 * cnt);
704 } 749 }
705 } while (1);
706 750
707 if (unlikely(saw_cpu_error)) 751 /* Delay a little bit to let other cpus catch up on
708 goto fatal_mondo_cpu_error; 752 * their cpu mondo queue work.
753 */
754 if (!mondo_delivered)
755 udelay(usec_wait);
709 756
710 return; 757 retries++;
758 } while (1);
711 759
712fatal_mondo_cpu_error: 760xcall_done:
713 printk(KERN_CRIT "CPU[%d]: SUN4V mondo cpu error, some target cpus " 761 if (unlikely(ecpuerror_id > 0)) {
714 "(including %d) were in error state\n", 762 pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) was in error state\n",
715 this_cpu, saw_cpu_error - 1); 763 this_cpu, ecpuerror_id - 1);
764 } else if (unlikely(enocpu_id > 0)) {
765 pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) does not belong to the domain\n",
766 this_cpu, enocpu_id - 1);
767 }
716 return; 768 return;
717 769
770fatal_errors:
771 /* fatal errors include bad alignment, etc */
772 pr_crit("CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) mondo_block_pa(%lx)\n",
773 this_cpu, tot_cpus, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
774 panic("Unexpected SUN4V mondo error %lu\n", status);
775
718fatal_mondo_timeout: 776fatal_mondo_timeout:
719 printk(KERN_CRIT "CPU[%d]: SUN4V mondo timeout, no forward " 777 /* some cpus being non-responsive to the cpu mondo */
720 " progress after %d retries.\n", 778 pr_crit("CPU[%d]: SUN4V mondo timeout, cpu(%d) made no forward progress after %d retries. Total target cpus(%d).\n",
721 this_cpu, retries); 779 this_cpu, first_cpu, (tot_retries + retries), tot_cpus);
722 goto dump_cpu_list_and_out; 780 panic("SUN4V mondo timeout panic\n");
723
724fatal_mondo_error:
725 printk(KERN_CRIT "CPU[%d]: Unexpected SUN4V mondo error %lu\n",
726 this_cpu, status);
727 printk(KERN_CRIT "CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) "
728 "mondo_block_pa(%lx)\n",
729 this_cpu, cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
730
731dump_cpu_list_and_out:
732 printk(KERN_CRIT "CPU[%d]: CPU list [ ", this_cpu);
733 for (i = 0; i < cnt; i++)
734 printk("%u ", cpu_list[i]);
735 printk("]\n");
736} 781}
737 782
738static void (*xcall_deliver_impl)(struct trap_per_cpu *, int); 783static void (*xcall_deliver_impl)(struct trap_per_cpu *, int);
diff --git a/arch/sparc/kernel/sun4v_ivec.S b/arch/sparc/kernel/sun4v_ivec.S
index 559bc5e9c199..34631995859a 100644
--- a/arch/sparc/kernel/sun4v_ivec.S
+++ b/arch/sparc/kernel/sun4v_ivec.S
@@ -26,6 +26,21 @@ sun4v_cpu_mondo:
26 ldxa [%g0] ASI_SCRATCHPAD, %g4 26 ldxa [%g0] ASI_SCRATCHPAD, %g4
27 sub %g4, TRAP_PER_CPU_FAULT_INFO, %g4 27 sub %g4, TRAP_PER_CPU_FAULT_INFO, %g4
28 28
29 /* Get smp_processor_id() into %g3 */
30 sethi %hi(trap_block), %g5
31 or %g5, %lo(trap_block), %g5
32 sub %g4, %g5, %g3
33 srlx %g3, TRAP_BLOCK_SZ_SHIFT, %g3
34
35 /* Increment cpu_mondo_counter[smp_processor_id()] */
36 sethi %hi(cpu_mondo_counter), %g5
37 or %g5, %lo(cpu_mondo_counter), %g5
38 sllx %g3, 3, %g3
39 add %g5, %g3, %g5
40 ldx [%g5], %g3
41 add %g3, 1, %g3
42 stx %g3, [%g5]
43
29 /* Get CPU mondo queue base phys address into %g7. */ 44 /* Get CPU mondo queue base phys address into %g7. */
30 ldx [%g4 + TRAP_PER_CPU_CPU_MONDO_PA], %g7 45 ldx [%g4 + TRAP_PER_CPU_CPU_MONDO_PA], %g7
31 46
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
index 196ee5eb4d48..ad31af1dd726 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -2733,6 +2733,7 @@ void do_getpsr(struct pt_regs *regs)
2733 } 2733 }
2734} 2734}
2735 2735
2736u64 cpu_mondo_counter[NR_CPUS] = {0};
2736struct trap_per_cpu trap_block[NR_CPUS]; 2737struct trap_per_cpu trap_block[NR_CPUS];
2737EXPORT_SYMBOL(trap_block); 2738EXPORT_SYMBOL(trap_block);
2738 2739
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index fcb7604172ce..cd20ca0b4043 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -348,6 +348,7 @@ config X86_DEBUG_FPU
348 348
349config PUNIT_ATOM_DEBUG 349config PUNIT_ATOM_DEBUG
350 tristate "ATOM Punit debug driver" 350 tristate "ATOM Punit debug driver"
351 depends on PCI
351 select DEBUG_FS 352 select DEBUG_FS
352 select IOSF_MBI 353 select IOSF_MBI
353 ---help--- 354 ---help---
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
index 0d810fb15eac..d88a2fddba8c 100644
--- a/arch/x86/boot/Makefile
+++ b/arch/x86/boot/Makefile
@@ -73,12 +73,13 @@ UBSAN_SANITIZE := n
73$(obj)/bzImage: asflags-y := $(SVGA_MODE) 73$(obj)/bzImage: asflags-y := $(SVGA_MODE)
74 74
75quiet_cmd_image = BUILD $@ 75quiet_cmd_image = BUILD $@
76silent_redirect_image = >/dev/null
76cmd_image = $(obj)/tools/build $(obj)/setup.bin $(obj)/vmlinux.bin \ 77cmd_image = $(obj)/tools/build $(obj)/setup.bin $(obj)/vmlinux.bin \
77 $(obj)/zoffset.h $@ 78 $(obj)/zoffset.h $@ $($(quiet)redirect_image)
78 79
79$(obj)/bzImage: $(obj)/setup.bin $(obj)/vmlinux.bin $(obj)/tools/build FORCE 80$(obj)/bzImage: $(obj)/setup.bin $(obj)/vmlinux.bin $(obj)/tools/build FORCE
80 $(call if_changed,image) 81 $(call if_changed,image)
81 @echo 'Kernel: $@ is ready' ' (#'`cat .version`')' 82 @$(kecho) 'Kernel: $@ is ready' ' (#'`cat .version`')'
82 83
83OBJCOPYFLAGS_vmlinux.bin := -O binary -R .note -R .comment -S 84OBJCOPYFLAGS_vmlinux.bin := -O binary -R .note -R .comment -S
84$(obj)/vmlinux.bin: $(obj)/compressed/vmlinux FORCE 85$(obj)/vmlinux.bin: $(obj)/compressed/vmlinux FORCE
diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig
index 6cf79e1a6830..0eb9f92f3717 100644
--- a/arch/x86/configs/i386_defconfig
+++ b/arch/x86/configs/i386_defconfig
@@ -1,5 +1,4 @@
1# CONFIG_64BIT is not set 1# CONFIG_64BIT is not set
2CONFIG_EXPERIMENTAL=y
3# CONFIG_LOCALVERSION_AUTO is not set 2# CONFIG_LOCALVERSION_AUTO is not set
4CONFIG_SYSVIPC=y 3CONFIG_SYSVIPC=y
5CONFIG_POSIX_MQUEUE=y 4CONFIG_POSIX_MQUEUE=y
@@ -125,7 +124,6 @@ CONFIG_NF_CONNTRACK_IPV4=y
125CONFIG_IP_NF_IPTABLES=y 124CONFIG_IP_NF_IPTABLES=y
126CONFIG_IP_NF_FILTER=y 125CONFIG_IP_NF_FILTER=y
127CONFIG_IP_NF_TARGET_REJECT=y 126CONFIG_IP_NF_TARGET_REJECT=y
128CONFIG_IP_NF_TARGET_ULOG=y
129CONFIG_NF_NAT=y 127CONFIG_NF_NAT=y
130CONFIG_IP_NF_TARGET_MASQUERADE=y 128CONFIG_IP_NF_TARGET_MASQUERADE=y
131CONFIG_IP_NF_MANGLE=y 129CONFIG_IP_NF_MANGLE=y
@@ -255,7 +253,6 @@ CONFIG_USB_OHCI_HCD=y
255CONFIG_USB_UHCI_HCD=y 253CONFIG_USB_UHCI_HCD=y
256CONFIG_USB_PRINTER=y 254CONFIG_USB_PRINTER=y
257CONFIG_USB_STORAGE=y 255CONFIG_USB_STORAGE=y
258CONFIG_USB_LIBUSUAL=y
259CONFIG_EDAC=y 256CONFIG_EDAC=y
260CONFIG_RTC_CLASS=y 257CONFIG_RTC_CLASS=y
261# CONFIG_RTC_HCTOSYS is not set 258# CONFIG_RTC_HCTOSYS is not set
diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig
index de45f57b410d..4a4b16e56d35 100644
--- a/arch/x86/configs/x86_64_defconfig
+++ b/arch/x86/configs/x86_64_defconfig
@@ -1,4 +1,3 @@
1CONFIG_EXPERIMENTAL=y
2# CONFIG_LOCALVERSION_AUTO is not set 1# CONFIG_LOCALVERSION_AUTO is not set
3CONFIG_SYSVIPC=y 2CONFIG_SYSVIPC=y
4CONFIG_POSIX_MQUEUE=y 3CONFIG_POSIX_MQUEUE=y
@@ -124,7 +123,6 @@ CONFIG_NF_CONNTRACK_IPV4=y
124CONFIG_IP_NF_IPTABLES=y 123CONFIG_IP_NF_IPTABLES=y
125CONFIG_IP_NF_FILTER=y 124CONFIG_IP_NF_FILTER=y
126CONFIG_IP_NF_TARGET_REJECT=y 125CONFIG_IP_NF_TARGET_REJECT=y
127CONFIG_IP_NF_TARGET_ULOG=y
128CONFIG_NF_NAT=y 126CONFIG_NF_NAT=y
129CONFIG_IP_NF_TARGET_MASQUERADE=y 127CONFIG_IP_NF_TARGET_MASQUERADE=y
130CONFIG_IP_NF_MANGLE=y 128CONFIG_IP_NF_MANGLE=y
@@ -251,7 +249,6 @@ CONFIG_USB_OHCI_HCD=y
251CONFIG_USB_UHCI_HCD=y 249CONFIG_USB_UHCI_HCD=y
252CONFIG_USB_PRINTER=y 250CONFIG_USB_PRINTER=y
253CONFIG_USB_STORAGE=y 251CONFIG_USB_STORAGE=y
254CONFIG_USB_LIBUSUAL=y
255CONFIG_EDAC=y 252CONFIG_EDAC=y
256CONFIG_RTC_CLASS=y 253CONFIG_RTC_CLASS=y
257# CONFIG_RTC_HCTOSYS is not set 254# CONFIG_RTC_HCTOSYS is not set
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index ff1ea2fb9705..8e3db8f642a7 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -191,8 +191,8 @@ static void release_pmc_hardware(void) {}
191 191
192static bool check_hw_exists(void) 192static bool check_hw_exists(void)
193{ 193{
194 u64 val, val_fail, val_new= ~0; 194 u64 val, val_fail = -1, val_new= ~0;
195 int i, reg, reg_fail, ret = 0; 195 int i, reg, reg_fail = -1, ret = 0;
196 int bios_fail = 0; 196 int bios_fail = 0;
197 int reg_safe = -1; 197 int reg_safe = -1;
198 198
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index aa62437d1aa1..98b0f0729527 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -1708,6 +1708,120 @@ static __initconst const u64 glm_hw_cache_extra_regs
1708 }, 1708 },
1709}; 1709};
1710 1710
1711static __initconst const u64 glp_hw_cache_event_ids
1712 [PERF_COUNT_HW_CACHE_MAX]
1713 [PERF_COUNT_HW_CACHE_OP_MAX]
1714 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1715 [C(L1D)] = {
1716 [C(OP_READ)] = {
1717 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
1718 [C(RESULT_MISS)] = 0x0,
1719 },
1720 [C(OP_WRITE)] = {
1721 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
1722 [C(RESULT_MISS)] = 0x0,
1723 },
1724 [C(OP_PREFETCH)] = {
1725 [C(RESULT_ACCESS)] = 0x0,
1726 [C(RESULT_MISS)] = 0x0,
1727 },
1728 },
1729 [C(L1I)] = {
1730 [C(OP_READ)] = {
1731 [C(RESULT_ACCESS)] = 0x0380, /* ICACHE.ACCESSES */
1732 [C(RESULT_MISS)] = 0x0280, /* ICACHE.MISSES */
1733 },
1734 [C(OP_WRITE)] = {
1735 [C(RESULT_ACCESS)] = -1,
1736 [C(RESULT_MISS)] = -1,
1737 },
1738 [C(OP_PREFETCH)] = {
1739 [C(RESULT_ACCESS)] = 0x0,
1740 [C(RESULT_MISS)] = 0x0,
1741 },
1742 },
1743 [C(LL)] = {
1744 [C(OP_READ)] = {
1745 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
1746 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
1747 },
1748 [C(OP_WRITE)] = {
1749 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
1750 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
1751 },
1752 [C(OP_PREFETCH)] = {
1753 [C(RESULT_ACCESS)] = 0x0,
1754 [C(RESULT_MISS)] = 0x0,
1755 },
1756 },
1757 [C(DTLB)] = {
1758 [C(OP_READ)] = {
1759 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
1760 [C(RESULT_MISS)] = 0xe08, /* DTLB_LOAD_MISSES.WALK_COMPLETED */
1761 },
1762 [C(OP_WRITE)] = {
1763 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
1764 [C(RESULT_MISS)] = 0xe49, /* DTLB_STORE_MISSES.WALK_COMPLETED */
1765 },
1766 [C(OP_PREFETCH)] = {
1767 [C(RESULT_ACCESS)] = 0x0,
1768 [C(RESULT_MISS)] = 0x0,
1769 },
1770 },
1771 [C(ITLB)] = {
1772 [C(OP_READ)] = {
1773 [C(RESULT_ACCESS)] = 0x00c0, /* INST_RETIRED.ANY_P */
1774 [C(RESULT_MISS)] = 0x0481, /* ITLB.MISS */
1775 },
1776 [C(OP_WRITE)] = {
1777 [C(RESULT_ACCESS)] = -1,
1778 [C(RESULT_MISS)] = -1,
1779 },
1780 [C(OP_PREFETCH)] = {
1781 [C(RESULT_ACCESS)] = -1,
1782 [C(RESULT_MISS)] = -1,
1783 },
1784 },
1785 [C(BPU)] = {
1786 [C(OP_READ)] = {
1787 [C(RESULT_ACCESS)] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1788 [C(RESULT_MISS)] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
1789 },
1790 [C(OP_WRITE)] = {
1791 [C(RESULT_ACCESS)] = -1,
1792 [C(RESULT_MISS)] = -1,
1793 },
1794 [C(OP_PREFETCH)] = {
1795 [C(RESULT_ACCESS)] = -1,
1796 [C(RESULT_MISS)] = -1,
1797 },
1798 },
1799};
1800
1801static __initconst const u64 glp_hw_cache_extra_regs
1802 [PERF_COUNT_HW_CACHE_MAX]
1803 [PERF_COUNT_HW_CACHE_OP_MAX]
1804 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1805 [C(LL)] = {
1806 [C(OP_READ)] = {
1807 [C(RESULT_ACCESS)] = GLM_DEMAND_READ|
1808 GLM_LLC_ACCESS,
1809 [C(RESULT_MISS)] = GLM_DEMAND_READ|
1810 GLM_LLC_MISS,
1811 },
1812 [C(OP_WRITE)] = {
1813 [C(RESULT_ACCESS)] = GLM_DEMAND_WRITE|
1814 GLM_LLC_ACCESS,
1815 [C(RESULT_MISS)] = GLM_DEMAND_WRITE|
1816 GLM_LLC_MISS,
1817 },
1818 [C(OP_PREFETCH)] = {
1819 [C(RESULT_ACCESS)] = 0x0,
1820 [C(RESULT_MISS)] = 0x0,
1821 },
1822 },
1823};
1824
1711#define KNL_OT_L2_HITE BIT_ULL(19) /* Other Tile L2 Hit */ 1825#define KNL_OT_L2_HITE BIT_ULL(19) /* Other Tile L2 Hit */
1712#define KNL_OT_L2_HITF BIT_ULL(20) /* Other Tile L2 Hit */ 1826#define KNL_OT_L2_HITF BIT_ULL(20) /* Other Tile L2 Hit */
1713#define KNL_MCDRAM_LOCAL BIT_ULL(21) 1827#define KNL_MCDRAM_LOCAL BIT_ULL(21)
@@ -3016,6 +3130,9 @@ static int hsw_hw_config(struct perf_event *event)
3016 return 0; 3130 return 0;
3017} 3131}
3018 3132
3133static struct event_constraint counter0_constraint =
3134 INTEL_ALL_EVENT_CONSTRAINT(0, 0x1);
3135
3019static struct event_constraint counter2_constraint = 3136static struct event_constraint counter2_constraint =
3020 EVENT_CONSTRAINT(0, 0x4, 0); 3137 EVENT_CONSTRAINT(0, 0x4, 0);
3021 3138
@@ -3037,6 +3154,21 @@ hsw_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3037 return c; 3154 return c;
3038} 3155}
3039 3156
3157static struct event_constraint *
3158glp_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3159 struct perf_event *event)
3160{
3161 struct event_constraint *c;
3162
3163 /* :ppp means to do reduced skid PEBS which is PMC0 only. */
3164 if (event->attr.precise_ip == 3)
3165 return &counter0_constraint;
3166
3167 c = intel_get_event_constraints(cpuc, idx, event);
3168
3169 return c;
3170}
3171
3040/* 3172/*
3041 * Broadwell: 3173 * Broadwell:
3042 * 3174 *
@@ -3265,10 +3397,8 @@ static void intel_pmu_cpu_dying(int cpu)
3265static void intel_pmu_sched_task(struct perf_event_context *ctx, 3397static void intel_pmu_sched_task(struct perf_event_context *ctx,
3266 bool sched_in) 3398 bool sched_in)
3267{ 3399{
3268 if (x86_pmu.pebs_active) 3400 intel_pmu_pebs_sched_task(ctx, sched_in);
3269 intel_pmu_pebs_sched_task(ctx, sched_in); 3401 intel_pmu_lbr_sched_task(ctx, sched_in);
3270 if (x86_pmu.lbr_nr)
3271 intel_pmu_lbr_sched_task(ctx, sched_in);
3272} 3402}
3273 3403
3274PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63"); 3404PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
@@ -3838,6 +3968,32 @@ __init int intel_pmu_init(void)
3838 pr_cont("Goldmont events, "); 3968 pr_cont("Goldmont events, ");
3839 break; 3969 break;
3840 3970
3971 case INTEL_FAM6_ATOM_GEMINI_LAKE:
3972 memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
3973 sizeof(hw_cache_event_ids));
3974 memcpy(hw_cache_extra_regs, glp_hw_cache_extra_regs,
3975 sizeof(hw_cache_extra_regs));
3976
3977 intel_pmu_lbr_init_skl();
3978
3979 x86_pmu.event_constraints = intel_slm_event_constraints;
3980 x86_pmu.pebs_constraints = intel_glp_pebs_event_constraints;
3981 x86_pmu.extra_regs = intel_glm_extra_regs;
3982 /*
3983 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS
3984 * for precise cycles.
3985 */
3986 x86_pmu.pebs_aliases = NULL;
3987 x86_pmu.pebs_prec_dist = true;
3988 x86_pmu.lbr_pt_coexist = true;
3989 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
3990 x86_pmu.get_event_constraints = glp_get_event_constraints;
3991 x86_pmu.cpu_events = glm_events_attrs;
3992 /* Goldmont Plus has 4-wide pipeline */
3993 event_attr_td_total_slots_scale_glm.event_str = "4";
3994 pr_cont("Goldmont plus events, ");
3995 break;
3996
3841 case INTEL_FAM6_WESTMERE: 3997 case INTEL_FAM6_WESTMERE:
3842 case INTEL_FAM6_WESTMERE_EP: 3998 case INTEL_FAM6_WESTMERE_EP:
3843 case INTEL_FAM6_WESTMERE_EX: 3999 case INTEL_FAM6_WESTMERE_EX:
diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
index 238ae3248ba5..4cf100ff2a37 100644
--- a/arch/x86/events/intel/cstate.c
+++ b/arch/x86/events/intel/cstate.c
@@ -40,16 +40,16 @@
40 * Model specific counters: 40 * Model specific counters:
41 * MSR_CORE_C1_RES: CORE C1 Residency Counter 41 * MSR_CORE_C1_RES: CORE C1 Residency Counter
42 * perf code: 0x00 42 * perf code: 0x00
43 * Available model: SLM,AMT 43 * Available model: SLM,AMT,GLM
44 * Scope: Core (each processor core has a MSR) 44 * Scope: Core (each processor core has a MSR)
45 * MSR_CORE_C3_RESIDENCY: CORE C3 Residency Counter 45 * MSR_CORE_C3_RESIDENCY: CORE C3 Residency Counter
46 * perf code: 0x01 46 * perf code: 0x01
47 * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL 47 * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,GLM
48 * Scope: Core 48 * Scope: Core
49 * MSR_CORE_C6_RESIDENCY: CORE C6 Residency Counter 49 * MSR_CORE_C6_RESIDENCY: CORE C6 Residency Counter
50 * perf code: 0x02 50 * perf code: 0x02
51 * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW 51 * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW
52 * SKL,KNL 52 * SKL,KNL,GLM
53 * Scope: Core 53 * Scope: Core
54 * MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter 54 * MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter
55 * perf code: 0x03 55 * perf code: 0x03
@@ -57,16 +57,17 @@
57 * Scope: Core 57 * Scope: Core
58 * MSR_PKG_C2_RESIDENCY: Package C2 Residency Counter. 58 * MSR_PKG_C2_RESIDENCY: Package C2 Residency Counter.
59 * perf code: 0x00 59 * perf code: 0x00
60 * Available model: SNB,IVB,HSW,BDW,SKL,KNL 60 * Available model: SNB,IVB,HSW,BDW,SKL,KNL,GLM
61 * Scope: Package (physical package) 61 * Scope: Package (physical package)
62 * MSR_PKG_C3_RESIDENCY: Package C3 Residency Counter. 62 * MSR_PKG_C3_RESIDENCY: Package C3 Residency Counter.
63 * perf code: 0x01 63 * perf code: 0x01
64 * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,KNL 64 * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,KNL
65 * GLM
65 * Scope: Package (physical package) 66 * Scope: Package (physical package)
66 * MSR_PKG_C6_RESIDENCY: Package C6 Residency Counter. 67 * MSR_PKG_C6_RESIDENCY: Package C6 Residency Counter.
67 * perf code: 0x02 68 * perf code: 0x02
68 * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW 69 * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW
69 * SKL,KNL 70 * SKL,KNL,GLM
70 * Scope: Package (physical package) 71 * Scope: Package (physical package)
71 * MSR_PKG_C7_RESIDENCY: Package C7 Residency Counter. 72 * MSR_PKG_C7_RESIDENCY: Package C7 Residency Counter.
72 * perf code: 0x03 73 * perf code: 0x03
@@ -82,7 +83,7 @@
82 * Scope: Package (physical package) 83 * Scope: Package (physical package)
83 * MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter. 84 * MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter.
84 * perf code: 0x06 85 * perf code: 0x06
85 * Available model: HSW ULT only 86 * Available model: HSW ULT, GLM
86 * Scope: Package (physical package) 87 * Scope: Package (physical package)
87 * 88 *
88 */ 89 */
@@ -504,6 +505,17 @@ static const struct cstate_model knl_cstates __initconst = {
504}; 505};
505 506
506 507
508static const struct cstate_model glm_cstates __initconst = {
509 .core_events = BIT(PERF_CSTATE_CORE_C1_RES) |
510 BIT(PERF_CSTATE_CORE_C3_RES) |
511 BIT(PERF_CSTATE_CORE_C6_RES),
512
513 .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) |
514 BIT(PERF_CSTATE_PKG_C3_RES) |
515 BIT(PERF_CSTATE_PKG_C6_RES) |
516 BIT(PERF_CSTATE_PKG_C10_RES),
517};
518
507 519
508#define X86_CSTATES_MODEL(model, states) \ 520#define X86_CSTATES_MODEL(model, states) \
509 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long) &(states) } 521 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long) &(states) }
@@ -546,6 +558,8 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
546 558
547 X86_CSTATES_MODEL(INTEL_FAM6_XEON_PHI_KNL, knl_cstates), 559 X86_CSTATES_MODEL(INTEL_FAM6_XEON_PHI_KNL, knl_cstates),
548 X86_CSTATES_MODEL(INTEL_FAM6_XEON_PHI_KNM, knl_cstates), 560 X86_CSTATES_MODEL(INTEL_FAM6_XEON_PHI_KNM, knl_cstates),
561
562 X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT, glm_cstates),
549 { }, 563 { },
550}; 564};
551MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match); 565MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match);
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index c6d23ffe422d..a322fed5f8ed 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -606,12 +606,6 @@ static inline void intel_pmu_drain_pebs_buffer(void)
606 x86_pmu.drain_pebs(&regs); 606 x86_pmu.drain_pebs(&regs);
607} 607}
608 608
609void intel_pmu_pebs_sched_task(struct perf_event_context *ctx, bool sched_in)
610{
611 if (!sched_in)
612 intel_pmu_drain_pebs_buffer();
613}
614
615/* 609/*
616 * PEBS 610 * PEBS
617 */ 611 */
@@ -651,6 +645,12 @@ struct event_constraint intel_glm_pebs_event_constraints[] = {
651 EVENT_CONSTRAINT_END 645 EVENT_CONSTRAINT_END
652}; 646};
653 647
648struct event_constraint intel_glp_pebs_event_constraints[] = {
649 /* Allow all events as PEBS with no flags */
650 INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
651 EVENT_CONSTRAINT_END
652};
653
654struct event_constraint intel_nehalem_pebs_event_constraints[] = { 654struct event_constraint intel_nehalem_pebs_event_constraints[] = {
655 INTEL_PLD_CONSTRAINT(0x100b, 0xf), /* MEM_INST_RETIRED.* */ 655 INTEL_PLD_CONSTRAINT(0x100b, 0xf), /* MEM_INST_RETIRED.* */
656 INTEL_FLAGS_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */ 656 INTEL_FLAGS_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
@@ -816,6 +816,14 @@ static inline bool pebs_needs_sched_cb(struct cpu_hw_events *cpuc)
816 return cpuc->n_pebs && (cpuc->n_pebs == cpuc->n_large_pebs); 816 return cpuc->n_pebs && (cpuc->n_pebs == cpuc->n_large_pebs);
817} 817}
818 818
819void intel_pmu_pebs_sched_task(struct perf_event_context *ctx, bool sched_in)
820{
821 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
822
823 if (!sched_in && pebs_needs_sched_cb(cpuc))
824 intel_pmu_drain_pebs_buffer();
825}
826
819static inline void pebs_update_threshold(struct cpu_hw_events *cpuc) 827static inline void pebs_update_threshold(struct cpu_hw_events *cpuc)
820{ 828{
821 struct debug_store *ds = cpuc->ds; 829 struct debug_store *ds = cpuc->ds;
@@ -889,6 +897,8 @@ void intel_pmu_pebs_enable(struct perf_event *event)
889 if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) { 897 if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) {
890 ds->pebs_event_reset[hwc->idx] = 898 ds->pebs_event_reset[hwc->idx] =
891 (u64)(-hwc->sample_period) & x86_pmu.cntval_mask; 899 (u64)(-hwc->sample_period) & x86_pmu.cntval_mask;
900 } else {
901 ds->pebs_event_reset[hwc->idx] = 0;
892 } 902 }
893} 903}
894 904
diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
index eb261656a320..955457a30197 100644
--- a/arch/x86/events/intel/lbr.c
+++ b/arch/x86/events/intel/lbr.c
@@ -380,8 +380,12 @@ static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx)
380 380
381void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in) 381void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in)
382{ 382{
383 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
383 struct x86_perf_task_context *task_ctx; 384 struct x86_perf_task_context *task_ctx;
384 385
386 if (!cpuc->lbr_users)
387 return;
388
385 /* 389 /*
386 * If LBR callstack feature is enabled and the stack was saved when 390 * If LBR callstack feature is enabled and the stack was saved when
387 * the task was scheduled out, restore the stack. Otherwise flush 391 * the task was scheduled out, restore the stack. Otherwise flush
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index 53728eea1bed..476aec3a4cab 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -879,6 +879,8 @@ extern struct event_constraint intel_slm_pebs_event_constraints[];
879 879
880extern struct event_constraint intel_glm_pebs_event_constraints[]; 880extern struct event_constraint intel_glm_pebs_event_constraints[];
881 881
882extern struct event_constraint intel_glp_pebs_event_constraints[];
883
882extern struct event_constraint intel_nehalem_pebs_event_constraints[]; 884extern struct event_constraint intel_nehalem_pebs_event_constraints[];
883 885
884extern struct event_constraint intel_westmere_pebs_event_constraints[]; 886extern struct event_constraint intel_westmere_pebs_event_constraints[];
diff --git a/arch/x86/include/asm/bug.h b/arch/x86/include/asm/bug.h
index 39e702d90cdb..aa6b2023d8f8 100644
--- a/arch/x86/include/asm/bug.h
+++ b/arch/x86/include/asm/bug.h
@@ -35,7 +35,7 @@
35#define _BUG_FLAGS(ins, flags) \ 35#define _BUG_FLAGS(ins, flags) \
36do { \ 36do { \
37 asm volatile("1:\t" ins "\n" \ 37 asm volatile("1:\t" ins "\n" \
38 ".pushsection __bug_table,\"a\"\n" \ 38 ".pushsection __bug_table,\"aw\"\n" \
39 "2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n" \ 39 "2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n" \
40 "\t" __BUG_REL(%c0) "\t# bug_entry::file\n" \ 40 "\t" __BUG_REL(%c0) "\t# bug_entry::file\n" \
41 "\t.word %c1" "\t# bug_entry::line\n" \ 41 "\t.word %c1" "\t# bug_entry::line\n" \
@@ -52,7 +52,7 @@ do { \
52#define _BUG_FLAGS(ins, flags) \ 52#define _BUG_FLAGS(ins, flags) \
53do { \ 53do { \
54 asm volatile("1:\t" ins "\n" \ 54 asm volatile("1:\t" ins "\n" \
55 ".pushsection __bug_table,\"a\"\n" \ 55 ".pushsection __bug_table,\"aw\"\n" \
56 "2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n" \ 56 "2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n" \
57 "\t.word %c0" "\t# bug_entry::flags\n" \ 57 "\t.word %c0" "\t# bug_entry::flags\n" \
58 "\t.org 2b+%c1\n" \ 58 "\t.org 2b+%c1\n" \
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index 7afb0e2f07f4..48febf07e828 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -328,13 +328,13 @@ static inline unsigned type in##bwl##_p(int port) \
328static inline void outs##bwl(int port, const void *addr, unsigned long count) \ 328static inline void outs##bwl(int port, const void *addr, unsigned long count) \
329{ \ 329{ \
330 asm volatile("rep; outs" #bwl \ 330 asm volatile("rep; outs" #bwl \
331 : "+S"(addr), "+c"(count) : "d"(port)); \ 331 : "+S"(addr), "+c"(count) : "d"(port) : "memory"); \
332} \ 332} \
333 \ 333 \
334static inline void ins##bwl(int port, void *addr, unsigned long count) \ 334static inline void ins##bwl(int port, void *addr, unsigned long count) \
335{ \ 335{ \
336 asm volatile("rep; ins" #bwl \ 336 asm volatile("rep; ins" #bwl \
337 : "+D"(addr), "+c"(count) : "d"(port)); \ 337 : "+D"(addr), "+c"(count) : "d"(port) : "memory"); \
338} 338}
339 339
340BUILDIO(b, b, char) 340BUILDIO(b, b, char)
diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
index 34b984c60790..6cf65437b5e5 100644
--- a/arch/x86/include/asm/kprobes.h
+++ b/arch/x86/include/asm/kprobes.h
@@ -52,10 +52,10 @@ typedef u8 kprobe_opcode_t;
52#define flush_insn_slot(p) do { } while (0) 52#define flush_insn_slot(p) do { } while (0)
53 53
54/* optinsn template addresses */ 54/* optinsn template addresses */
55extern __visible kprobe_opcode_t optprobe_template_entry; 55extern __visible kprobe_opcode_t optprobe_template_entry[];
56extern __visible kprobe_opcode_t optprobe_template_val; 56extern __visible kprobe_opcode_t optprobe_template_val[];
57extern __visible kprobe_opcode_t optprobe_template_call; 57extern __visible kprobe_opcode_t optprobe_template_call[];
58extern __visible kprobe_opcode_t optprobe_template_end; 58extern __visible kprobe_opcode_t optprobe_template_end[];
59#define MAX_OPTIMIZED_LENGTH (MAX_INSN_SIZE + RELATIVE_ADDR_SIZE) 59#define MAX_OPTIMIZED_LENGTH (MAX_INSN_SIZE + RELATIVE_ADDR_SIZE)
60#define MAX_OPTINSN_SIZE \ 60#define MAX_OPTINSN_SIZE \
61 (((unsigned long)&optprobe_template_end - \ 61 (((unsigned long)&optprobe_template_end - \
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index ecfcb6643c9b..265c907d7d4c 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -293,7 +293,7 @@ static inline unsigned long __get_current_cr3_fast(void)
293 unsigned long cr3 = __pa(this_cpu_read(cpu_tlbstate.loaded_mm)->pgd); 293 unsigned long cr3 = __pa(this_cpu_read(cpu_tlbstate.loaded_mm)->pgd);
294 294
295 /* For now, be very restrictive about when this can be called. */ 295 /* For now, be very restrictive about when this can be called. */
296 VM_WARN_ON(in_nmi() || !in_atomic()); 296 VM_WARN_ON(in_nmi() || preemptible());
297 297
298 VM_BUG_ON(cr3 != __read_cr3()); 298 VM_BUG_ON(cr3 != __read_cr3());
299 return cr3; 299 return cr3;
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index cb976bab6299..9ffc36bfe4cd 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -84,7 +84,7 @@ struct pv_init_ops {
84 */ 84 */
85 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf, 85 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
86 unsigned long addr, unsigned len); 86 unsigned long addr, unsigned len);
87}; 87} __no_randomize_layout;
88 88
89 89
90struct pv_lazy_ops { 90struct pv_lazy_ops {
@@ -92,12 +92,12 @@ struct pv_lazy_ops {
92 void (*enter)(void); 92 void (*enter)(void);
93 void (*leave)(void); 93 void (*leave)(void);
94 void (*flush)(void); 94 void (*flush)(void);
95}; 95} __no_randomize_layout;
96 96
97struct pv_time_ops { 97struct pv_time_ops {
98 unsigned long long (*sched_clock)(void); 98 unsigned long long (*sched_clock)(void);
99 unsigned long long (*steal_clock)(int cpu); 99 unsigned long long (*steal_clock)(int cpu);
100}; 100} __no_randomize_layout;
101 101
102struct pv_cpu_ops { 102struct pv_cpu_ops {
103 /* hooks for various privileged instructions */ 103 /* hooks for various privileged instructions */
@@ -176,7 +176,7 @@ struct pv_cpu_ops {
176 176
177 void (*start_context_switch)(struct task_struct *prev); 177 void (*start_context_switch)(struct task_struct *prev);
178 void (*end_context_switch)(struct task_struct *next); 178 void (*end_context_switch)(struct task_struct *next);
179}; 179} __no_randomize_layout;
180 180
181struct pv_irq_ops { 181struct pv_irq_ops {
182 /* 182 /*
@@ -199,7 +199,7 @@ struct pv_irq_ops {
199#ifdef CONFIG_X86_64 199#ifdef CONFIG_X86_64
200 void (*adjust_exception_frame)(void); 200 void (*adjust_exception_frame)(void);
201#endif 201#endif
202}; 202} __no_randomize_layout;
203 203
204struct pv_mmu_ops { 204struct pv_mmu_ops {
205 unsigned long (*read_cr2)(void); 205 unsigned long (*read_cr2)(void);
@@ -305,7 +305,7 @@ struct pv_mmu_ops {
305 an mfn. We can tell which is which from the index. */ 305 an mfn. We can tell which is which from the index. */
306 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx, 306 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
307 phys_addr_t phys, pgprot_t flags); 307 phys_addr_t phys, pgprot_t flags);
308}; 308} __no_randomize_layout;
309 309
310struct arch_spinlock; 310struct arch_spinlock;
311#ifdef CONFIG_SMP 311#ifdef CONFIG_SMP
@@ -322,7 +322,7 @@ struct pv_lock_ops {
322 void (*kick)(int cpu); 322 void (*kick)(int cpu);
323 323
324 struct paravirt_callee_save vcpu_is_preempted; 324 struct paravirt_callee_save vcpu_is_preempted;
325}; 325} __no_randomize_layout;
326 326
327/* This contains all the paravirt structures: we get a convenient 327/* This contains all the paravirt structures: we get a convenient
328 * number for each function using the offset which we use to indicate 328 * number for each function using the offset which we use to indicate
@@ -334,7 +334,7 @@ struct paravirt_patch_template {
334 struct pv_irq_ops pv_irq_ops; 334 struct pv_irq_ops pv_irq_ops;
335 struct pv_mmu_ops pv_mmu_ops; 335 struct pv_mmu_ops pv_mmu_ops;
336 struct pv_lock_ops pv_lock_ops; 336 struct pv_lock_ops pv_lock_ops;
337}; 337} __no_randomize_layout;
338 338
339extern struct pv_info pv_info; 339extern struct pv_info pv_info;
340extern struct pv_init_ops pv_init_ops; 340extern struct pv_init_ops pv_init_ops;
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 6a79547e8ee0..028245e1c42b 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -129,7 +129,7 @@ struct cpuinfo_x86 {
129 /* Index into per_cpu list: */ 129 /* Index into per_cpu list: */
130 u16 cpu_index; 130 u16 cpu_index;
131 u32 microcode; 131 u32 microcode;
132}; 132} __randomize_layout;
133 133
134struct cpuid_regs { 134struct cpuid_regs {
135 u32 eax, ebx, ecx, edx; 135 u32 eax, ebx, ecx, edx;
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 6bb680671088..7491e73d9253 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -347,6 +347,14 @@ static void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger,
347 struct mpc_intsrc mp_irq; 347 struct mpc_intsrc mp_irq;
348 348
349 /* 349 /*
350 * Check bus_irq boundary.
351 */
352 if (bus_irq >= NR_IRQS_LEGACY) {
353 pr_warn("Invalid bus_irq %u for legacy override\n", bus_irq);
354 return;
355 }
356
357 /*
350 * Convert 'gsi' to 'ioapic.pin'. 358 * Convert 'gsi' to 'ioapic.pin'.
351 */ 359 */
352 ioapic = mp_find_ioapic(gsi); 360 ioapic = mp_find_ioapic(gsi);
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index b4f5f73febdb..237e9c2341c7 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -2093,7 +2093,7 @@ static inline void __init check_timer(void)
2093 int idx; 2093 int idx;
2094 idx = find_irq_entry(apic1, pin1, mp_INT); 2094 idx = find_irq_entry(apic1, pin1, mp_INT);
2095 if (idx != -1 && irq_trigger(idx)) 2095 if (idx != -1 && irq_trigger(idx))
2096 unmask_ioapic_irq(irq_get_chip_data(0)); 2096 unmask_ioapic_irq(irq_get_irq_data(0));
2097 } 2097 }
2098 irq_domain_deactivate_irq(irq_data); 2098 irq_domain_deactivate_irq(irq_data);
2099 irq_domain_activate_irq(irq_data); 2099 irq_domain_activate_irq(irq_data);
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index bb5abe8f5fd4..3b9e220621f8 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -134,6 +134,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c)
134 134
135 n = K6_BUG_LOOP; 135 n = K6_BUG_LOOP;
136 f_vide = vide; 136 f_vide = vide;
137 OPTIMIZER_HIDE_VAR(f_vide);
137 d = rdtsc(); 138 d = rdtsc();
138 while (n--) 139 while (n--)
139 f_vide(); 140 f_vide();
diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c
index 3fe45f84ced4..cbf1f6ba39a8 100644
--- a/arch/x86/kernel/devicetree.c
+++ b/arch/x86/kernel/devicetree.c
@@ -235,8 +235,7 @@ static void __init dtb_add_ioapic(struct device_node *dn)
235 235
236 ret = of_address_to_resource(dn, 0, &r); 236 ret = of_address_to_resource(dn, 0, &r);
237 if (ret) { 237 if (ret) {
238 printk(KERN_ERR "Can't obtain address from node %s.\n", 238 printk(KERN_ERR "Can't obtain address from device node %pOF.\n", dn);
239 dn->full_name);
240 return; 239 return;
241 } 240 }
242 mp_register_ioapic(++ioapic_id, r.start, gsi_top, &cfg); 241 mp_register_ioapic(++ioapic_id, r.start, gsi_top, &cfg);
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index 760433b2574a..2688c7dc5323 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -22,7 +22,7 @@ config KVM
22 depends on HAVE_KVM 22 depends on HAVE_KVM
23 depends on HIGH_RES_TIMERS 23 depends on HIGH_RES_TIMERS
24 # for TASKSTATS/TASK_DELAY_ACCT: 24 # for TASKSTATS/TASK_DELAY_ACCT:
25 depends on NET 25 depends on NET && MULTIUSER
26 select PREEMPT_NOTIFIERS 26 select PREEMPT_NOTIFIERS
27 select MMU_NOTIFIER 27 select MMU_NOTIFIER
28 select ANON_INODES 28 select ANON_INODES
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index 2695a34fa1c5..337b6d2730fa 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -649,9 +649,10 @@ void kvm_hv_process_stimers(struct kvm_vcpu *vcpu)
649 } 649 }
650 650
651 if ((stimer->config & HV_STIMER_ENABLE) && 651 if ((stimer->config & HV_STIMER_ENABLE) &&
652 stimer->count) 652 stimer->count) {
653 stimer_start(stimer); 653 if (!stimer->msg_pending)
654 else 654 stimer_start(stimer);
655 } else
655 stimer_cleanup(stimer); 656 stimer_cleanup(stimer);
656 } 657 }
657 } 658 }
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 84e62acf2dd8..29fd8af5c347 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -198,7 +198,8 @@ struct loaded_vmcs {
198 struct vmcs *vmcs; 198 struct vmcs *vmcs;
199 struct vmcs *shadow_vmcs; 199 struct vmcs *shadow_vmcs;
200 int cpu; 200 int cpu;
201 int launched; 201 bool launched;
202 bool nmi_known_unmasked;
202 struct list_head loaded_vmcss_on_cpu_link; 203 struct list_head loaded_vmcss_on_cpu_link;
203}; 204};
204 205
@@ -2326,6 +2327,11 @@ static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
2326 __vmx_load_host_state(to_vmx(vcpu)); 2327 __vmx_load_host_state(to_vmx(vcpu));
2327} 2328}
2328 2329
2330static bool emulation_required(struct kvm_vcpu *vcpu)
2331{
2332 return emulate_invalid_guest_state && !guest_state_valid(vcpu);
2333}
2334
2329static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu); 2335static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu);
2330 2336
2331/* 2337/*
@@ -2363,6 +2369,8 @@ static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
2363 2369
2364static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) 2370static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
2365{ 2371{
2372 unsigned long old_rflags = vmx_get_rflags(vcpu);
2373
2366 __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail); 2374 __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail);
2367 to_vmx(vcpu)->rflags = rflags; 2375 to_vmx(vcpu)->rflags = rflags;
2368 if (to_vmx(vcpu)->rmode.vm86_active) { 2376 if (to_vmx(vcpu)->rmode.vm86_active) {
@@ -2370,6 +2378,9 @@ static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
2370 rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; 2378 rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
2371 } 2379 }
2372 vmcs_writel(GUEST_RFLAGS, rflags); 2380 vmcs_writel(GUEST_RFLAGS, rflags);
2381
2382 if ((old_rflags ^ to_vmx(vcpu)->rflags) & X86_EFLAGS_VM)
2383 to_vmx(vcpu)->emulation_required = emulation_required(vcpu);
2373} 2384}
2374 2385
2375static u32 vmx_get_pkru(struct kvm_vcpu *vcpu) 2386static u32 vmx_get_pkru(struct kvm_vcpu *vcpu)
@@ -3857,11 +3868,6 @@ static __init int alloc_kvm_area(void)
3857 return 0; 3868 return 0;
3858} 3869}
3859 3870
3860static bool emulation_required(struct kvm_vcpu *vcpu)
3861{
3862 return emulate_invalid_guest_state && !guest_state_valid(vcpu);
3863}
3864
3865static void fix_pmode_seg(struct kvm_vcpu *vcpu, int seg, 3871static void fix_pmode_seg(struct kvm_vcpu *vcpu, int seg,
3866 struct kvm_segment *save) 3872 struct kvm_segment *save)
3867{ 3873{
@@ -5510,10 +5516,8 @@ static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
5510{ 5516{
5511 struct vcpu_vmx *vmx = to_vmx(vcpu); 5517 struct vcpu_vmx *vmx = to_vmx(vcpu);
5512 5518
5513 if (!is_guest_mode(vcpu)) { 5519 ++vcpu->stat.nmi_injections;
5514 ++vcpu->stat.nmi_injections; 5520 vmx->loaded_vmcs->nmi_known_unmasked = false;
5515 vmx->nmi_known_unmasked = false;
5516 }
5517 5521
5518 if (vmx->rmode.vm86_active) { 5522 if (vmx->rmode.vm86_active) {
5519 if (kvm_inject_realmode_interrupt(vcpu, NMI_VECTOR, 0) != EMULATE_DONE) 5523 if (kvm_inject_realmode_interrupt(vcpu, NMI_VECTOR, 0) != EMULATE_DONE)
@@ -5527,16 +5531,21 @@ static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
5527 5531
5528static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu) 5532static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu)
5529{ 5533{
5530 if (to_vmx(vcpu)->nmi_known_unmasked) 5534 struct vcpu_vmx *vmx = to_vmx(vcpu);
5535 bool masked;
5536
5537 if (vmx->loaded_vmcs->nmi_known_unmasked)
5531 return false; 5538 return false;
5532 return vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI; 5539 masked = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI;
5540 vmx->loaded_vmcs->nmi_known_unmasked = !masked;
5541 return masked;
5533} 5542}
5534 5543
5535static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked) 5544static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
5536{ 5545{
5537 struct vcpu_vmx *vmx = to_vmx(vcpu); 5546 struct vcpu_vmx *vmx = to_vmx(vcpu);
5538 5547
5539 vmx->nmi_known_unmasked = !masked; 5548 vmx->loaded_vmcs->nmi_known_unmasked = !masked;
5540 if (masked) 5549 if (masked)
5541 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, 5550 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
5542 GUEST_INTR_STATE_NMI); 5551 GUEST_INTR_STATE_NMI);
@@ -8736,7 +8745,7 @@ static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
8736 8745
8737 idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK; 8746 idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK;
8738 8747
8739 if (vmx->nmi_known_unmasked) 8748 if (vmx->loaded_vmcs->nmi_known_unmasked)
8740 return; 8749 return;
8741 /* 8750 /*
8742 * Can't use vmx->exit_intr_info since we're not sure what 8751 * Can't use vmx->exit_intr_info since we're not sure what
@@ -8760,7 +8769,7 @@ static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
8760 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, 8769 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
8761 GUEST_INTR_STATE_NMI); 8770 GUEST_INTR_STATE_NMI);
8762 else 8771 else
8763 vmx->nmi_known_unmasked = 8772 vmx->loaded_vmcs->nmi_known_unmasked =
8764 !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) 8773 !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO)
8765 & GUEST_INTR_STATE_NMI); 8774 & GUEST_INTR_STATE_NMI);
8766} 8775}
@@ -10488,6 +10497,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
10488{ 10497{
10489 struct vmcs12 *vmcs12; 10498 struct vmcs12 *vmcs12;
10490 struct vcpu_vmx *vmx = to_vmx(vcpu); 10499 struct vcpu_vmx *vmx = to_vmx(vcpu);
10500 u32 interrupt_shadow = vmx_get_interrupt_shadow(vcpu);
10491 u32 exit_qual; 10501 u32 exit_qual;
10492 int ret; 10502 int ret;
10493 10503
@@ -10512,6 +10522,12 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
10512 * for misconfigurations which will anyway be caught by the processor 10522 * for misconfigurations which will anyway be caught by the processor
10513 * when using the merged vmcs02. 10523 * when using the merged vmcs02.
10514 */ 10524 */
10525 if (interrupt_shadow & KVM_X86_SHADOW_INT_MOV_SS) {
10526 nested_vmx_failValid(vcpu,
10527 VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS);
10528 goto out;
10529 }
10530
10515 if (vmcs12->launch_state == launch) { 10531 if (vmcs12->launch_state == launch) {
10516 nested_vmx_failValid(vcpu, 10532 nested_vmx_failValid(vcpu,
10517 launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS 10533 launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 5b8f07889f6a..82a63c59f77b 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -597,8 +597,8 @@ bool pdptrs_changed(struct kvm_vcpu *vcpu)
597 (unsigned long *)&vcpu->arch.regs_avail)) 597 (unsigned long *)&vcpu->arch.regs_avail))
598 return true; 598 return true;
599 599
600 gfn = (kvm_read_cr3(vcpu) & ~31u) >> PAGE_SHIFT; 600 gfn = (kvm_read_cr3(vcpu) & ~31ul) >> PAGE_SHIFT;
601 offset = (kvm_read_cr3(vcpu) & ~31u) & (PAGE_SIZE - 1); 601 offset = (kvm_read_cr3(vcpu) & ~31ul) & (PAGE_SIZE - 1);
602 r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte), 602 r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte),
603 PFERR_USER_MASK | PFERR_WRITE_MASK); 603 PFERR_USER_MASK | PFERR_WRITE_MASK);
604 if (r < 0) 604 if (r < 0)
diff --git a/arch/x86/math-emu/Makefile b/arch/x86/math-emu/Makefile
index 9b0c63b60302..1b2dac174321 100644
--- a/arch/x86/math-emu/Makefile
+++ b/arch/x86/math-emu/Makefile
@@ -5,8 +5,8 @@
5#DEBUG = -DDEBUGGING 5#DEBUG = -DDEBUGGING
6DEBUG = 6DEBUG =
7PARANOID = -DPARANOID 7PARANOID = -DPARANOID
8EXTRA_CFLAGS := $(PARANOID) $(DEBUG) -fno-builtin $(MATH_EMULATION) 8ccflags-y += $(PARANOID) $(DEBUG) -fno-builtin $(MATH_EMULATION)
9EXTRA_AFLAGS := $(PARANOID) 9asflags-y += $(PARANOID)
10 10
11# From 'C' language sources: 11# From 'C' language sources:
12C_OBJS =fpu_entry.o errors.o \ 12C_OBJS =fpu_entry.o errors.o \
diff --git a/arch/x86/math-emu/fpu_emu.h b/arch/x86/math-emu/fpu_emu.h
index afbc4d805d66..c9c320dccca1 100644
--- a/arch/x86/math-emu/fpu_emu.h
+++ b/arch/x86/math-emu/fpu_emu.h
@@ -157,7 +157,7 @@ extern u_char const data_sizes_16[32];
157 157
158#define signbyte(a) (((u_char *)(a))[9]) 158#define signbyte(a) (((u_char *)(a))[9])
159#define getsign(a) (signbyte(a) & 0x80) 159#define getsign(a) (signbyte(a) & 0x80)
160#define setsign(a,b) { if (b) signbyte(a) |= 0x80; else signbyte(a) &= 0x7f; } 160#define setsign(a,b) { if ((b) != 0) signbyte(a) |= 0x80; else signbyte(a) &= 0x7f; }
161#define copysign(a,b) { if (getsign(a)) signbyte(b) |= 0x80; \ 161#define copysign(a,b) { if (getsign(a)) signbyte(b) |= 0x80; \
162 else signbyte(b) &= 0x7f; } 162 else signbyte(b) &= 0x7f; }
163#define changesign(a) { signbyte(a) ^= 0x80; } 163#define changesign(a) { signbyte(a) ^= 0x80; }
diff --git a/arch/x86/math-emu/reg_compare.c b/arch/x86/math-emu/reg_compare.c
index b77360fdbf4a..19b33b50adfa 100644
--- a/arch/x86/math-emu/reg_compare.c
+++ b/arch/x86/math-emu/reg_compare.c
@@ -168,7 +168,7 @@ static int compare(FPU_REG const *b, int tagb)
168/* This function requires that st(0) is not empty */ 168/* This function requires that st(0) is not empty */
169int FPU_compare_st_data(FPU_REG const *loaded_data, u_char loaded_tag) 169int FPU_compare_st_data(FPU_REG const *loaded_data, u_char loaded_tag)
170{ 170{
171 int f = 0, c; 171 int f, c;
172 172
173 c = compare(loaded_data, loaded_tag); 173 c = compare(loaded_data, loaded_tag);
174 174
@@ -189,12 +189,12 @@ int FPU_compare_st_data(FPU_REG const *loaded_data, u_char loaded_tag)
189 case COMP_No_Comp: 189 case COMP_No_Comp:
190 f = SW_C3 | SW_C2 | SW_C0; 190 f = SW_C3 | SW_C2 | SW_C0;
191 break; 191 break;
192#ifdef PARANOID
193 default: 192 default:
193#ifdef PARANOID
194 EXCEPTION(EX_INTERNAL | 0x121); 194 EXCEPTION(EX_INTERNAL | 0x121);
195#endif /* PARANOID */
195 f = SW_C3 | SW_C2 | SW_C0; 196 f = SW_C3 | SW_C2 | SW_C0;
196 break; 197 break;
197#endif /* PARANOID */
198 } 198 }
199 setcc(f); 199 setcc(f);
200 if (c & COMP_Denormal) { 200 if (c & COMP_Denormal) {
@@ -205,7 +205,7 @@ int FPU_compare_st_data(FPU_REG const *loaded_data, u_char loaded_tag)
205 205
206static int compare_st_st(int nr) 206static int compare_st_st(int nr)
207{ 207{
208 int f = 0, c; 208 int f, c;
209 FPU_REG *st_ptr; 209 FPU_REG *st_ptr;
210 210
211 if (!NOT_EMPTY(0) || !NOT_EMPTY(nr)) { 211 if (!NOT_EMPTY(0) || !NOT_EMPTY(nr)) {
@@ -235,12 +235,12 @@ static int compare_st_st(int nr)
235 case COMP_No_Comp: 235 case COMP_No_Comp:
236 f = SW_C3 | SW_C2 | SW_C0; 236 f = SW_C3 | SW_C2 | SW_C0;
237 break; 237 break;
238#ifdef PARANOID
239 default: 238 default:
239#ifdef PARANOID
240 EXCEPTION(EX_INTERNAL | 0x122); 240 EXCEPTION(EX_INTERNAL | 0x122);
241#endif /* PARANOID */
241 f = SW_C3 | SW_C2 | SW_C0; 242 f = SW_C3 | SW_C2 | SW_C0;
242 break; 243 break;
243#endif /* PARANOID */
244 } 244 }
245 setcc(f); 245 setcc(f);
246 if (c & COMP_Denormal) { 246 if (c & COMP_Denormal) {
@@ -283,12 +283,12 @@ static int compare_i_st_st(int nr)
283 case COMP_No_Comp: 283 case COMP_No_Comp:
284 f = X86_EFLAGS_ZF | X86_EFLAGS_PF | X86_EFLAGS_CF; 284 f = X86_EFLAGS_ZF | X86_EFLAGS_PF | X86_EFLAGS_CF;
285 break; 285 break;
286#ifdef PARANOID
287 default: 286 default:
287#ifdef PARANOID
288 EXCEPTION(EX_INTERNAL | 0x122); 288 EXCEPTION(EX_INTERNAL | 0x122);
289#endif /* PARANOID */
289 f = 0; 290 f = 0;
290 break; 291 break;
291#endif /* PARANOID */
292 } 292 }
293 FPU_EFLAGS = (FPU_EFLAGS & ~(X86_EFLAGS_ZF | X86_EFLAGS_PF | X86_EFLAGS_CF)) | f; 293 FPU_EFLAGS = (FPU_EFLAGS & ~(X86_EFLAGS_ZF | X86_EFLAGS_PF | X86_EFLAGS_CF)) | f;
294 if (c & COMP_Denormal) { 294 if (c & COMP_Denormal) {
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_max7315.c b/arch/x86/platform/intel-mid/device_libs/platform_max7315.c
index 6e075afa7877..58337b2bc682 100644
--- a/arch/x86/platform/intel-mid/device_libs/platform_max7315.c
+++ b/arch/x86/platform/intel-mid/device_libs/platform_max7315.c
@@ -38,8 +38,10 @@ static void __init *max7315_platform_data(void *info)
38 */ 38 */
39 strcpy(i2c_info->type, "max7315"); 39 strcpy(i2c_info->type, "max7315");
40 if (nr++) { 40 if (nr++) {
41 sprintf(base_pin_name, "max7315_%d_base", nr); 41 snprintf(base_pin_name, sizeof(base_pin_name),
42 sprintf(intr_pin_name, "max7315_%d_int", nr); 42 "max7315_%d_base", nr);
43 snprintf(intr_pin_name, sizeof(intr_pin_name),
44 "max7315_%d_int", nr);
43 } else { 45 } else {
44 strcpy(base_pin_name, "max7315_base"); 46 strcpy(base_pin_name, "max7315_base");
45 strcpy(intr_pin_name, "max7315_int"); 47 strcpy(intr_pin_name, "max7315_int");
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index d4a61ddf9e62..3e4bdb442fbc 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -40,7 +40,6 @@ static int timeout_base_ns[] = {
40static int timeout_us; 40static int timeout_us;
41static bool nobau = true; 41static bool nobau = true;
42static int nobau_perm; 42static int nobau_perm;
43static cycles_t congested_cycles;
44 43
45/* tunables: */ 44/* tunables: */
46static int max_concurr = MAX_BAU_CONCURRENT; 45static int max_concurr = MAX_BAU_CONCURRENT;
@@ -829,10 +828,10 @@ static void record_send_stats(cycles_t time1, cycles_t time2,
829 if ((completion_status == FLUSH_COMPLETE) && (try == 1)) { 828 if ((completion_status == FLUSH_COMPLETE) && (try == 1)) {
830 bcp->period_requests++; 829 bcp->period_requests++;
831 bcp->period_time += elapsed; 830 bcp->period_time += elapsed;
832 if ((elapsed > congested_cycles) && 831 if ((elapsed > usec_2_cycles(bcp->cong_response_us)) &&
833 (bcp->period_requests > bcp->cong_reps) && 832 (bcp->period_requests > bcp->cong_reps) &&
834 ((bcp->period_time / bcp->period_requests) > 833 ((bcp->period_time / bcp->period_requests) >
835 congested_cycles)) { 834 usec_2_cycles(bcp->cong_response_us))) {
836 stat->s_congested++; 835 stat->s_congested++;
837 disable_for_period(bcp, stat); 836 disable_for_period(bcp, stat);
838 } 837 }
@@ -2222,14 +2221,17 @@ static int __init uv_bau_init(void)
2222 else if (is_uv1_hub()) 2221 else if (is_uv1_hub())
2223 ops = uv1_bau_ops; 2222 ops = uv1_bau_ops;
2224 2223
2224 nuvhubs = uv_num_possible_blades();
2225 if (nuvhubs < 2) {
2226 pr_crit("UV: BAU disabled - insufficient hub count\n");
2227 goto err_bau_disable;
2228 }
2229
2225 for_each_possible_cpu(cur_cpu) { 2230 for_each_possible_cpu(cur_cpu) {
2226 mask = &per_cpu(uv_flush_tlb_mask, cur_cpu); 2231 mask = &per_cpu(uv_flush_tlb_mask, cur_cpu);
2227 zalloc_cpumask_var_node(mask, GFP_KERNEL, cpu_to_node(cur_cpu)); 2232 zalloc_cpumask_var_node(mask, GFP_KERNEL, cpu_to_node(cur_cpu));
2228 } 2233 }
2229 2234
2230 nuvhubs = uv_num_possible_blades();
2231 congested_cycles = usec_2_cycles(congested_respns_us);
2232
2233 uv_base_pnode = 0x7fffffff; 2235 uv_base_pnode = 0x7fffffff;
2234 for (uvhub = 0; uvhub < nuvhubs; uvhub++) { 2236 for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
2235 cpus = uv_blade_nr_possible_cpus(uvhub); 2237 cpus = uv_blade_nr_possible_cpus(uvhub);
@@ -2242,9 +2244,8 @@ static int __init uv_bau_init(void)
2242 enable_timeouts(); 2244 enable_timeouts();
2243 2245
2244 if (init_per_cpu(nuvhubs, uv_base_pnode)) { 2246 if (init_per_cpu(nuvhubs, uv_base_pnode)) {
2245 set_bau_off(); 2247 pr_crit("UV: BAU disabled - per CPU init failed\n");
2246 nobau_perm = 1; 2248 goto err_bau_disable;
2247 return 0;
2248 } 2249 }
2249 2250
2250 vector = UV_BAU_MESSAGE; 2251 vector = UV_BAU_MESSAGE;
@@ -2270,6 +2271,16 @@ static int __init uv_bau_init(void)
2270 } 2271 }
2271 2272
2272 return 0; 2273 return 0;
2274
2275err_bau_disable:
2276
2277 for_each_possible_cpu(cur_cpu)
2278 free_cpumask_var(per_cpu(uv_flush_tlb_mask, cur_cpu));
2279
2280 set_bau_off();
2281 nobau_perm = 1;
2282
2283 return -EINVAL;
2273} 2284}
2274core_initcall(uv_bau_init); 2285core_initcall(uv_bau_init);
2275fs_initcall(uv_ptc_init); 2286fs_initcall(uv_ptc_init);