aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-08-05 09:00:54 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-08-05 09:00:54 -0400
commit2cfd716d2777489db54a237f466a1c42700879c6 (patch)
treea6d3a676c38f2e2dc7bd13d012ad1d986a14a5bb
parent755b20f49220683bc2469f4d956dee39101440aa (diff)
parenteea8148c69f3aecbf297b12943a591467a1fb432 (diff)
Merge tag 'powerpc-4.8-2' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux
Pull more powerpc updates from Michael Ellerman: "These were delayed for various reasons, so I let them sit in next a bit longer, rather than including them in my first pull request. Fixes: - Fix early access to cpu_spec relocation from Benjamin Herrenschmidt - Fix incorrect event codes in power9-event-list from Madhavan Srinivasan - Move register_process_table() out of ppc_md from Michael Ellerman Use jump_label use for [cpu|mmu]_has_feature(): - Add mmu_early_init_devtree() from Michael Ellerman - Move disable_radix handling into mmu_early_init_devtree() from Michael Ellerman - Do hash device tree scanning earlier from Michael Ellerman - Do radix device tree scanning earlier from Michael Ellerman - Do feature patching before MMU init from Michael Ellerman - Check features don't change after patching from Michael Ellerman - Make MMU_FTR_RADIX a MMU family feature from Aneesh Kumar K.V - Convert mmu_has_feature() to returning bool from Michael Ellerman - Convert cpu_has_feature() to returning bool from Michael Ellerman - Define radix_enabled() in one place & use static inline from Michael Ellerman - Add early_[cpu|mmu]_has_feature() from Michael Ellerman - Convert early cpu/mmu feature check to use the new helpers from Aneesh Kumar K.V - jump_label: Make it possible for arches to invoke jump_label_init() earlier from Kevin Hao - Call jump_label_init() in apply_feature_fixups() from Aneesh Kumar K.V - Remove mfvtb() from Kevin Hao - Move cpu_has_feature() to a separate file from Kevin Hao - Add kconfig option to use jump labels for cpu/mmu_has_feature() from Michael Ellerman - Add option to use jump label for cpu_has_feature() from Kevin Hao - Add option to use jump label for mmu_has_feature() from Kevin Hao - Catch usage of cpu/mmu_has_feature() before jump label init from Aneesh Kumar K.V - Annotate jump label assembly from Michael Ellerman TLB flush enhancements from Aneesh Kumar K.V: - radix: Implement tlb mmu gather flush efficiently - Add helper for finding SLBE LLP encoding - Use hugetlb flush functions - Drop multiple definition of mm_is_core_local - radix: Add tlb flush of THP ptes - radix: Rename function and drop unused arg - radix/hugetlb: Add helper for finding page size - hugetlb: Add flush_hugetlb_tlb_range - remove flush_tlb_page_nohash Add new ptrace regsets from Anshuman Khandual and Simon Guo: - elf: Add powerpc specific core note sections - Add the function flush_tmregs_to_thread - Enable in transaction NT_PRFPREG ptrace requests - Enable in transaction NT_PPC_VMX ptrace requests - Enable in transaction NT_PPC_VSX ptrace requests - Adapt gpr32_get, gpr32_set functions for transaction - Enable support for NT_PPC_CGPR - Enable support for NT_PPC_CFPR - Enable support for NT_PPC_CVMX - Enable support for NT_PPC_CVSX - Enable support for TM SPR state - Enable NT_PPC_TM_CTAR, NT_PPC_TM_CPPR, NT_PPC_TM_CDSCR - Enable support for NT_PPPC_TAR, NT_PPC_PPR, NT_PPC_DSCR - Enable support for EBB registers - Enable support for Performance Monitor registers" * tag 'powerpc-4.8-2' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: (48 commits) powerpc/mm: Move register_process_table() out of ppc_md powerpc/perf: Fix incorrect event codes in power9-event-list powerpc/32: Fix early access to cpu_spec relocation powerpc/ptrace: Enable support for Performance Monitor registers powerpc/ptrace: Enable support for EBB registers powerpc/ptrace: Enable support for NT_PPPC_TAR, NT_PPC_PPR, NT_PPC_DSCR powerpc/ptrace: Enable NT_PPC_TM_CTAR, NT_PPC_TM_CPPR, NT_PPC_TM_CDSCR powerpc/ptrace: Enable support for TM SPR state powerpc/ptrace: Enable support for NT_PPC_CVSX powerpc/ptrace: Enable support for NT_PPC_CVMX powerpc/ptrace: Enable support for NT_PPC_CFPR powerpc/ptrace: Enable support for NT_PPC_CGPR powerpc/ptrace: Adapt gpr32_get, gpr32_set functions for transaction powerpc/ptrace: Enable in transaction NT_PPC_VSX ptrace requests powerpc/ptrace: Enable in transaction NT_PPC_VMX ptrace requests powerpc/ptrace: Enable in transaction NT_PRFPREG ptrace requests powerpc/process: Add the function flush_tmregs_to_thread elf: Add powerpc specific core note sections powerpc/mm: remove flush_tlb_page_nohash powerpc/mm/hugetlb: Add flush_hugetlb_tlb_range ...
-rw-r--r--arch/powerpc/Kconfig.debug19
-rw-r--r--arch/powerpc/include/asm/book3s/64/hugetlb-radix.h15
-rw-r--r--arch/powerpc/include/asm/book3s/64/mmu-hash.h10
-rw-r--r--arch/powerpc/include/asm/book3s/64/mmu.h16
-rw-r--r--arch/powerpc/include/asm/book3s/64/tlbflush-hash.h5
-rw-r--r--arch/powerpc/include/asm/book3s/64/tlbflush-radix.h16
-rw-r--r--arch/powerpc/include/asm/book3s/64/tlbflush.h27
-rw-r--r--arch/powerpc/include/asm/cacheflush.h1
-rw-r--r--arch/powerpc/include/asm/cpu_has_feature.h53
-rw-r--r--arch/powerpc/include/asm/cputable.h15
-rw-r--r--arch/powerpc/include/asm/cputime.h1
-rw-r--r--arch/powerpc/include/asm/dbell.h1
-rw-r--r--arch/powerpc/include/asm/dcr-native.h1
-rw-r--r--arch/powerpc/include/asm/hugetlb.h2
-rw-r--r--arch/powerpc/include/asm/jump_label.h4
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h3
-rw-r--r--arch/powerpc/include/asm/machdep.h2
-rw-r--r--arch/powerpc/include/asm/mman.h1
-rw-r--r--arch/powerpc/include/asm/mmu.h98
-rw-r--r--arch/powerpc/include/asm/reg.h9
-rw-r--r--arch/powerpc/include/asm/switch_to.h8
-rw-r--r--arch/powerpc/include/asm/time.h3
-rw-r--r--arch/powerpc/include/asm/tlb.h13
-rw-r--r--arch/powerpc/include/asm/tlbflush.h1
-rw-r--r--arch/powerpc/include/asm/xor.h1
-rw-r--r--arch/powerpc/include/uapi/asm/elf.h5
-rw-r--r--arch/powerpc/kernel/align.c1
-rw-r--r--arch/powerpc/kernel/cputable.c37
-rw-r--r--arch/powerpc/kernel/entry_64.S2
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S8
-rw-r--r--arch/powerpc/kernel/idle_book3s.S2
-rw-r--r--arch/powerpc/kernel/irq.c1
-rw-r--r--arch/powerpc/kernel/paca.c2
-rw-r--r--arch/powerpc/kernel/process.c21
-rw-r--r--arch/powerpc/kernel/prom.c17
-rw-r--r--arch/powerpc/kernel/ptrace.c1532
-rw-r--r--arch/powerpc/kernel/setup-common.c1
-rw-r--r--arch/powerpc/kernel/setup_32.c1
-rw-r--r--arch/powerpc/kernel/setup_64.c10
-rw-r--r--arch/powerpc/kernel/smp.c1
-rw-r--r--arch/powerpc/lib/feature-fixups.c39
-rw-r--r--arch/powerpc/mm/hash_native_64.c8
-rw-r--r--arch/powerpc/mm/hash_utils_64.c53
-rw-r--r--arch/powerpc/mm/hugetlbpage-radix.c39
-rw-r--r--arch/powerpc/mm/init_64.c22
-rw-r--r--arch/powerpc/mm/pgtable-book3s64.c7
-rw-r--r--arch/powerpc/mm/pgtable-radix.c7
-rw-r--r--arch/powerpc/mm/pgtable.c2
-rw-r--r--arch/powerpc/mm/tlb-radix.c90
-rw-r--r--arch/powerpc/mm/tlb_hash32.c11
-rw-r--r--arch/powerpc/mm/tlb_nohash.c6
-rw-r--r--arch/powerpc/perf/power9-events-list.h6
-rw-r--r--arch/powerpc/platforms/cell/pervasive.c1
-rw-r--r--arch/powerpc/xmon/ppc-dis.c1
-rw-r--r--include/uapi/linux/elf.h13
-rw-r--r--kernel/jump_label.c3
-rw-r--r--mm/hugetlb.c10
57 files changed, 2074 insertions, 210 deletions
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index 171047822b56..63292f64b25a 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -60,6 +60,25 @@ config CODE_PATCHING_SELFTEST
60 depends on DEBUG_KERNEL 60 depends on DEBUG_KERNEL
61 default n 61 default n
62 62
63config JUMP_LABEL_FEATURE_CHECKS
64 bool "Enable use of jump label for cpu/mmu_has_feature()"
65 depends on JUMP_LABEL
66 default y
67 help
68 Selecting this options enables use of jump labels for some internal
69 feature checks. This should generate more optimal code for those
70 checks.
71
72config JUMP_LABEL_FEATURE_CHECK_DEBUG
73 bool "Do extra check on feature fixup calls"
74 depends on DEBUG_KERNEL && JUMP_LABEL_FEATURE_CHECKS
75 default n
76 help
77 This tries to catch incorrect usage of cpu_has_feature() and
78 mmu_has_feature() in the code.
79
80 If you don't know what this means, say N.
81
63config FTR_FIXUP_SELFTEST 82config FTR_FIXUP_SELFTEST
64 bool "Run self-tests of the feature-fixup code" 83 bool "Run self-tests of the feature-fixup code"
65 depends on DEBUG_KERNEL 84 depends on DEBUG_KERNEL
diff --git a/arch/powerpc/include/asm/book3s/64/hugetlb-radix.h b/arch/powerpc/include/asm/book3s/64/hugetlb-radix.h
index 60f47649306f..c45189aa7476 100644
--- a/arch/powerpc/include/asm/book3s/64/hugetlb-radix.h
+++ b/arch/powerpc/include/asm/book3s/64/hugetlb-radix.h
@@ -11,4 +11,19 @@ extern unsigned long
11radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr, 11radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
12 unsigned long len, unsigned long pgoff, 12 unsigned long len, unsigned long pgoff,
13 unsigned long flags); 13 unsigned long flags);
14
15static inline int hstate_get_psize(struct hstate *hstate)
16{
17 unsigned long shift;
18
19 shift = huge_page_shift(hstate);
20 if (shift == mmu_psize_defs[MMU_PAGE_2M].shift)
21 return MMU_PAGE_2M;
22 else if (shift == mmu_psize_defs[MMU_PAGE_1G].shift)
23 return MMU_PAGE_1G;
24 else {
25 WARN(1, "Wrong huge page shift\n");
26 return mmu_virtual_psize;
27 }
28}
14#endif 29#endif
diff --git a/arch/powerpc/include/asm/book3s/64/mmu-hash.h b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
index 5eaf86ac143d..287a656ceb57 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu-hash.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
@@ -24,6 +24,7 @@
24#include <asm/book3s/64/pgtable.h> 24#include <asm/book3s/64/pgtable.h>
25#include <asm/bug.h> 25#include <asm/bug.h>
26#include <asm/processor.h> 26#include <asm/processor.h>
27#include <asm/cpu_has_feature.h>
27 28
28/* 29/*
29 * SLB 30 * SLB
@@ -190,6 +191,15 @@ static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
190 BUG(); 191 BUG();
191} 192}
192 193
194static inline unsigned long get_sllp_encoding(int psize)
195{
196 unsigned long sllp;
197
198 sllp = ((mmu_psize_defs[psize].sllp & SLB_VSID_L) >> 6) |
199 ((mmu_psize_defs[psize].sllp & SLB_VSID_LP) >> 4);
200 return sllp;
201}
202
193#endif /* __ASSEMBLY__ */ 203#endif /* __ASSEMBLY__ */
194 204
195/* 205/*
diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h
index d4eda6420523..8afb0e00f7d9 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu.h
@@ -23,13 +23,6 @@ struct mmu_psize_def {
23}; 23};
24extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT]; 24extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
25 25
26#ifdef CONFIG_PPC_RADIX_MMU
27#define radix_enabled() mmu_has_feature(MMU_FTR_RADIX)
28#else
29#define radix_enabled() (0)
30#endif
31
32
33#endif /* __ASSEMBLY__ */ 26#endif /* __ASSEMBLY__ */
34 27
35/* 64-bit classic hash table MMU */ 28/* 64-bit classic hash table MMU */
@@ -107,6 +100,9 @@ extern int mmu_vmemmap_psize;
107extern int mmu_io_psize; 100extern int mmu_io_psize;
108 101
109/* MMU initialization */ 102/* MMU initialization */
103void mmu_early_init_devtree(void);
104void hash__early_init_devtree(void);
105void radix__early_init_devtree(void);
110extern void radix_init_native(void); 106extern void radix_init_native(void);
111extern void hash__early_init_mmu(void); 107extern void hash__early_init_mmu(void);
112extern void radix__early_init_mmu(void); 108extern void radix__early_init_mmu(void);
@@ -132,11 +128,15 @@ extern void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base,
132static inline void setup_initial_memory_limit(phys_addr_t first_memblock_base, 128static inline void setup_initial_memory_limit(phys_addr_t first_memblock_base,
133 phys_addr_t first_memblock_size) 129 phys_addr_t first_memblock_size)
134{ 130{
135 if (radix_enabled()) 131 if (early_radix_enabled())
136 return radix__setup_initial_memory_limit(first_memblock_base, 132 return radix__setup_initial_memory_limit(first_memblock_base,
137 first_memblock_size); 133 first_memblock_size);
138 return hash__setup_initial_memory_limit(first_memblock_base, 134 return hash__setup_initial_memory_limit(first_memblock_base,
139 first_memblock_size); 135 first_memblock_size);
140} 136}
137
138extern int (*register_process_table)(unsigned long base, unsigned long page_size,
139 unsigned long tbl_size);
140
141#endif /* __ASSEMBLY__ */ 141#endif /* __ASSEMBLY__ */
142#endif /* _ASM_POWERPC_BOOK3S_64_MMU_H_ */ 142#endif /* _ASM_POWERPC_BOOK3S_64_MMU_H_ */
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h b/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h
index f12ddf5e8de5..2f6373144e2c 100644
--- a/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h
@@ -75,11 +75,6 @@ static inline void hash__flush_tlb_page(struct vm_area_struct *vma,
75{ 75{
76} 76}
77 77
78static inline void hash__flush_tlb_page_nohash(struct vm_area_struct *vma,
79 unsigned long vmaddr)
80{
81}
82
83static inline void hash__flush_tlb_range(struct vm_area_struct *vma, 78static inline void hash__flush_tlb_range(struct vm_area_struct *vma,
84 unsigned long start, unsigned long end) 79 unsigned long start, unsigned long end)
85{ 80{
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
index 00703e7e4c94..65037762b120 100644
--- a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
@@ -10,26 +10,32 @@ static inline int mmu_get_ap(int psize)
10 return mmu_psize_defs[psize].ap; 10 return mmu_psize_defs[psize].ap;
11} 11}
12 12
13extern void radix__flush_hugetlb_tlb_range(struct vm_area_struct *vma,
14 unsigned long start, unsigned long end);
15extern void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start,
16 unsigned long end, int psize);
17extern void radix__flush_pmd_tlb_range(struct vm_area_struct *vma,
18 unsigned long start, unsigned long end);
13extern void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start, 19extern void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
14 unsigned long end); 20 unsigned long end);
15extern void radix__flush_tlb_kernel_range(unsigned long start, unsigned long end); 21extern void radix__flush_tlb_kernel_range(unsigned long start, unsigned long end);
16 22
17extern void radix__local_flush_tlb_mm(struct mm_struct *mm); 23extern void radix__local_flush_tlb_mm(struct mm_struct *mm);
18extern void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); 24extern void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
19extern void radix___local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
20 unsigned long ap, int nid);
21extern void radix__local_flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr); 25extern void radix__local_flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr);
26extern void radix__local_flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
27 int psize);
22extern void radix__tlb_flush(struct mmu_gather *tlb); 28extern void radix__tlb_flush(struct mmu_gather *tlb);
23#ifdef CONFIG_SMP 29#ifdef CONFIG_SMP
24extern void radix__flush_tlb_mm(struct mm_struct *mm); 30extern void radix__flush_tlb_mm(struct mm_struct *mm);
25extern void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); 31extern void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
26extern void radix___flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
27 unsigned long ap, int nid);
28extern void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr); 32extern void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr);
33extern void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
34 int psize);
29#else 35#else
30#define radix__flush_tlb_mm(mm) radix__local_flush_tlb_mm(mm) 36#define radix__flush_tlb_mm(mm) radix__local_flush_tlb_mm(mm)
31#define radix__flush_tlb_page(vma,addr) radix__local_flush_tlb_page(vma,addr) 37#define radix__flush_tlb_page(vma,addr) radix__local_flush_tlb_page(vma,addr)
32#define radix___flush_tlb_page(mm,addr,p,i) radix___local_flush_tlb_page(mm,addr,p,i) 38#define radix__flush_tlb_page_psize(mm,addr,p) radix__local_flush_tlb_page_psize(mm,addr,p)
33#define radix__flush_tlb_pwc(tlb, addr) radix__local_flush_tlb_pwc(tlb, addr) 39#define radix__flush_tlb_pwc(tlb, addr) radix__local_flush_tlb_pwc(tlb, addr)
34#endif 40#endif
35extern void radix__flush_tlb_lpid_va(unsigned long lpid, unsigned long gpa, 41extern void radix__flush_tlb_lpid_va(unsigned long lpid, unsigned long gpa,
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush.h b/arch/powerpc/include/asm/book3s/64/tlbflush.h
index 96e5769b18b0..72b925f97bab 100644
--- a/arch/powerpc/include/asm/book3s/64/tlbflush.h
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush.h
@@ -7,6 +7,25 @@
7#include <asm/book3s/64/tlbflush-hash.h> 7#include <asm/book3s/64/tlbflush-hash.h>
8#include <asm/book3s/64/tlbflush-radix.h> 8#include <asm/book3s/64/tlbflush-radix.h>
9 9
10#define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
11static inline void flush_pmd_tlb_range(struct vm_area_struct *vma,
12 unsigned long start, unsigned long end)
13{
14 if (radix_enabled())
15 return radix__flush_pmd_tlb_range(vma, start, end);
16 return hash__flush_tlb_range(vma, start, end);
17}
18
19#define __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
20static inline void flush_hugetlb_tlb_range(struct vm_area_struct *vma,
21 unsigned long start,
22 unsigned long end)
23{
24 if (radix_enabled())
25 return radix__flush_hugetlb_tlb_range(vma, start, end);
26 return hash__flush_tlb_range(vma, start, end);
27}
28
10static inline void flush_tlb_range(struct vm_area_struct *vma, 29static inline void flush_tlb_range(struct vm_area_struct *vma,
11 unsigned long start, unsigned long end) 30 unsigned long start, unsigned long end)
12{ 31{
@@ -38,14 +57,6 @@ static inline void local_flush_tlb_page(struct vm_area_struct *vma,
38 return hash__local_flush_tlb_page(vma, vmaddr); 57 return hash__local_flush_tlb_page(vma, vmaddr);
39} 58}
40 59
41static inline void flush_tlb_page_nohash(struct vm_area_struct *vma,
42 unsigned long vmaddr)
43{
44 if (radix_enabled())
45 return radix__flush_tlb_page(vma, vmaddr);
46 return hash__flush_tlb_page_nohash(vma, vmaddr);
47}
48
49static inline void tlb_flush(struct mmu_gather *tlb) 60static inline void tlb_flush(struct mmu_gather *tlb)
50{ 61{
51 if (radix_enabled()) 62 if (radix_enabled())
diff --git a/arch/powerpc/include/asm/cacheflush.h b/arch/powerpc/include/asm/cacheflush.h
index 69fb16d7a811..b77f0364df94 100644
--- a/arch/powerpc/include/asm/cacheflush.h
+++ b/arch/powerpc/include/asm/cacheflush.h
@@ -11,6 +11,7 @@
11 11
12#include <linux/mm.h> 12#include <linux/mm.h>
13#include <asm/cputable.h> 13#include <asm/cputable.h>
14#include <asm/cpu_has_feature.h>
14 15
15/* 16/*
16 * No cache flushing is required when address mappings are changed, 17 * No cache flushing is required when address mappings are changed,
diff --git a/arch/powerpc/include/asm/cpu_has_feature.h b/arch/powerpc/include/asm/cpu_has_feature.h
new file mode 100644
index 000000000000..2ef55f8968a2
--- /dev/null
+++ b/arch/powerpc/include/asm/cpu_has_feature.h
@@ -0,0 +1,53 @@
1#ifndef __ASM_POWERPC_CPUFEATURES_H
2#define __ASM_POWERPC_CPUFEATURES_H
3
4#ifndef __ASSEMBLY__
5
6#include <linux/bug.h>
7#include <asm/cputable.h>
8
9static inline bool early_cpu_has_feature(unsigned long feature)
10{
11 return !!((CPU_FTRS_ALWAYS & feature) ||
12 (CPU_FTRS_POSSIBLE & cur_cpu_spec->cpu_features & feature));
13}
14
15#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECKS
16#include <linux/jump_label.h>
17
18#define NUM_CPU_FTR_KEYS 64
19
20extern struct static_key_true cpu_feature_keys[NUM_CPU_FTR_KEYS];
21
22static __always_inline bool cpu_has_feature(unsigned long feature)
23{
24 int i;
25
26 BUILD_BUG_ON(!__builtin_constant_p(feature));
27
28#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG
29 if (!static_key_initialized) {
30 printk("Warning! cpu_has_feature() used prior to jump label init!\n");
31 dump_stack();
32 return early_cpu_has_feature(feature);
33 }
34#endif
35
36 if (CPU_FTRS_ALWAYS & feature)
37 return true;
38
39 if (!(CPU_FTRS_POSSIBLE & feature))
40 return false;
41
42 i = __builtin_ctzl(feature);
43 return static_branch_likely(&cpu_feature_keys[i]);
44}
45#else
46static inline bool cpu_has_feature(unsigned long feature)
47{
48 return early_cpu_has_feature(feature);
49}
50#endif
51
52#endif /* __ASSEMBLY__ */
53#endif /* __ASM_POWERPC_CPUFEATURE_H */
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index df4fb5faba43..82026b419341 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -2,6 +2,7 @@
2#define __ASM_POWERPC_CPUTABLE_H 2#define __ASM_POWERPC_CPUTABLE_H
3 3
4 4
5#include <linux/types.h>
5#include <asm/asm-compat.h> 6#include <asm/asm-compat.h>
6#include <asm/feature-fixups.h> 7#include <asm/feature-fixups.h>
7#include <uapi/asm/cputable.h> 8#include <uapi/asm/cputable.h>
@@ -122,6 +123,12 @@ extern void do_feature_fixups(unsigned long value, void *fixup_start,
122 123
123extern const char *powerpc_base_platform; 124extern const char *powerpc_base_platform;
124 125
126#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECKS
127extern void cpu_feature_keys_init(void);
128#else
129static inline void cpu_feature_keys_init(void) { }
130#endif
131
125/* TLB flush actions. Used as argument to cpu_spec.flush_tlb() hook */ 132/* TLB flush actions. Used as argument to cpu_spec.flush_tlb() hook */
126enum { 133enum {
127 TLB_INVAL_SCOPE_GLOBAL = 0, /* invalidate all TLBs */ 134 TLB_INVAL_SCOPE_GLOBAL = 0, /* invalidate all TLBs */
@@ -576,14 +583,6 @@ enum {
576}; 583};
577#endif /* __powerpc64__ */ 584#endif /* __powerpc64__ */
578 585
579static inline int cpu_has_feature(unsigned long feature)
580{
581 return (CPU_FTRS_ALWAYS & feature) ||
582 (CPU_FTRS_POSSIBLE
583 & cur_cpu_spec->cpu_features
584 & feature);
585}
586
587#define HBP_NUM 1 586#define HBP_NUM 1
588 587
589#endif /* !__ASSEMBLY__ */ 588#endif /* !__ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/cputime.h b/arch/powerpc/include/asm/cputime.h
index 2dfd4fc41f3e..4f60db074725 100644
--- a/arch/powerpc/include/asm/cputime.h
+++ b/arch/powerpc/include/asm/cputime.h
@@ -28,6 +28,7 @@ static inline void setup_cputime_one_jiffy(void) { }
28#include <asm/div64.h> 28#include <asm/div64.h>
29#include <asm/time.h> 29#include <asm/time.h>
30#include <asm/param.h> 30#include <asm/param.h>
31#include <asm/cpu_has_feature.h>
31 32
32typedef u64 __nocast cputime_t; 33typedef u64 __nocast cputime_t;
33typedef u64 __nocast cputime64_t; 34typedef u64 __nocast cputime64_t;
diff --git a/arch/powerpc/include/asm/dbell.h b/arch/powerpc/include/asm/dbell.h
index 5fa6b20eba10..378167377065 100644
--- a/arch/powerpc/include/asm/dbell.h
+++ b/arch/powerpc/include/asm/dbell.h
@@ -16,6 +16,7 @@
16#include <linux/threads.h> 16#include <linux/threads.h>
17 17
18#include <asm/ppc-opcode.h> 18#include <asm/ppc-opcode.h>
19#include <asm/cpu_has_feature.h>
19 20
20#define PPC_DBELL_MSG_BRDCAST (0x04000000) 21#define PPC_DBELL_MSG_BRDCAST (0x04000000)
21#define PPC_DBELL_TYPE(x) (((x) & 0xf) << (63-36)) 22#define PPC_DBELL_TYPE(x) (((x) & 0xf) << (63-36))
diff --git a/arch/powerpc/include/asm/dcr-native.h b/arch/powerpc/include/asm/dcr-native.h
index 4efc11dacb98..4a2beef74277 100644
--- a/arch/powerpc/include/asm/dcr-native.h
+++ b/arch/powerpc/include/asm/dcr-native.h
@@ -24,6 +24,7 @@
24 24
25#include <linux/spinlock.h> 25#include <linux/spinlock.h>
26#include <asm/cputable.h> 26#include <asm/cputable.h>
27#include <asm/cpu_has_feature.h>
27 28
28typedef struct { 29typedef struct {
29 unsigned int base; 30 unsigned int base;
diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h
index e2d9f4996e5c..c5517f463ec7 100644
--- a/arch/powerpc/include/asm/hugetlb.h
+++ b/arch/powerpc/include/asm/hugetlb.h
@@ -147,7 +147,7 @@ static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
147{ 147{
148 pte_t pte; 148 pte_t pte;
149 pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); 149 pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
150 flush_tlb_page(vma, addr); 150 flush_hugetlb_page(vma, addr);
151} 151}
152 152
153static inline int huge_pte_none(pte_t pte) 153static inline int huge_pte_none(pte_t pte)
diff --git a/arch/powerpc/include/asm/jump_label.h b/arch/powerpc/include/asm/jump_label.h
index 9af103a23975..9a287e0ac8b1 100644
--- a/arch/powerpc/include/asm/jump_label.h
+++ b/arch/powerpc/include/asm/jump_label.h
@@ -22,7 +22,7 @@
22static __always_inline bool arch_static_branch(struct static_key *key, bool branch) 22static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
23{ 23{
24 asm_volatile_goto("1:\n\t" 24 asm_volatile_goto("1:\n\t"
25 "nop\n\t" 25 "nop # arch_static_branch\n\t"
26 ".pushsection __jump_table, \"aw\"\n\t" 26 ".pushsection __jump_table, \"aw\"\n\t"
27 JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t" 27 JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t"
28 ".popsection \n\t" 28 ".popsection \n\t"
@@ -36,7 +36,7 @@ l_yes:
36static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch) 36static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
37{ 37{
38 asm_volatile_goto("1:\n\t" 38 asm_volatile_goto("1:\n\t"
39 "b %l[l_yes]\n\t" 39 "b %l[l_yes] # arch_static_branch_jump\n\t"
40 ".pushsection __jump_table, \"aw\"\n\t" 40 ".pushsection __jump_table, \"aw\"\n\t"
41 JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t" 41 JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t"
42 ".popsection \n\t" 42 ".popsection \n\t"
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index 1f4497fb5b83..88d17b4ea9c8 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -181,8 +181,7 @@ static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
181 181
182 switch (b_psize) { 182 switch (b_psize) {
183 case MMU_PAGE_4K: 183 case MMU_PAGE_4K:
184 sllp = ((mmu_psize_defs[a_psize].sllp & SLB_VSID_L) >> 6) | 184 sllp = get_sllp_encoding(a_psize);
185 ((mmu_psize_defs[a_psize].sllp & SLB_VSID_LP) >> 4);
186 rb |= sllp << 5; /* AP field */ 185 rb |= sllp << 5; /* AP field */
187 rb |= (va_low & 0x7ff) << 12; /* remaining 11 bits of AVA */ 186 rb |= (va_low & 0x7ff) << 12; /* remaining 11 bits of AVA */
188 break; 187 break;
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index 76f5398e7152..0420b388dd83 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -219,8 +219,6 @@ struct machdep_calls {
219#ifdef CONFIG_ARCH_RANDOM 219#ifdef CONFIG_ARCH_RANDOM
220 int (*get_random_seed)(unsigned long *v); 220 int (*get_random_seed)(unsigned long *v);
221#endif 221#endif
222 int (*register_process_table)(unsigned long base, unsigned long page_size,
223 unsigned long tbl_size);
224}; 222};
225 223
226extern void e500_idle(void); 224extern void e500_idle(void);
diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
index fc420cedecae..30922f699341 100644
--- a/arch/powerpc/include/asm/mman.h
+++ b/arch/powerpc/include/asm/mman.h
@@ -13,6 +13,7 @@
13 13
14#include <asm/cputable.h> 14#include <asm/cputable.h>
15#include <linux/mm.h> 15#include <linux/mm.h>
16#include <asm/cpu_has_feature.h>
16 17
17/* 18/*
18 * This file is included by linux/mman.h, so we can't use cacl_vm_prot_bits() 19 * This file is included by linux/mman.h, so we can't use cacl_vm_prot_bits()
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index 54471228f7b8..e2fb408f8398 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -12,7 +12,7 @@
12 */ 12 */
13 13
14/* 14/*
15 * First half is MMU families 15 * MMU families
16 */ 16 */
17#define MMU_FTR_HPTE_TABLE ASM_CONST(0x00000001) 17#define MMU_FTR_HPTE_TABLE ASM_CONST(0x00000001)
18#define MMU_FTR_TYPE_8xx ASM_CONST(0x00000002) 18#define MMU_FTR_TYPE_8xx ASM_CONST(0x00000002)
@@ -21,9 +21,13 @@
21#define MMU_FTR_TYPE_FSL_E ASM_CONST(0x00000010) 21#define MMU_FTR_TYPE_FSL_E ASM_CONST(0x00000010)
22#define MMU_FTR_TYPE_47x ASM_CONST(0x00000020) 22#define MMU_FTR_TYPE_47x ASM_CONST(0x00000020)
23 23
24/* Radix page table supported and enabled */
25#define MMU_FTR_TYPE_RADIX ASM_CONST(0x00000040)
26
24/* 27/*
25 * This is individual features 28 * Individual features below.
26 */ 29 */
30
27/* 31/*
28 * We need to clear top 16bits of va (from the remaining 64 bits )in 32 * We need to clear top 16bits of va (from the remaining 64 bits )in
29 * tlbie* instructions 33 * tlbie* instructions
@@ -93,11 +97,6 @@
93 */ 97 */
94#define MMU_FTR_1T_SEGMENT ASM_CONST(0x40000000) 98#define MMU_FTR_1T_SEGMENT ASM_CONST(0x40000000)
95 99
96/*
97 * Radix page table available
98 */
99#define MMU_FTR_RADIX ASM_CONST(0x80000000)
100
101/* MMU feature bit sets for various CPUs */ 100/* MMU feature bit sets for various CPUs */
102#define MMU_FTRS_DEFAULT_HPTE_ARCH_V2 \ 101#define MMU_FTRS_DEFAULT_HPTE_ARCH_V2 \
103 MMU_FTR_HPTE_TABLE | MMU_FTR_PPCAS_ARCH_V2 102 MMU_FTR_HPTE_TABLE | MMU_FTR_PPCAS_ARCH_V2
@@ -113,6 +112,7 @@
113#define MMU_FTRS_PA6T MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \ 112#define MMU_FTRS_PA6T MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \
114 MMU_FTR_CI_LARGE_PAGE | MMU_FTR_NO_SLBIE_B 113 MMU_FTR_CI_LARGE_PAGE | MMU_FTR_NO_SLBIE_B
115#ifndef __ASSEMBLY__ 114#ifndef __ASSEMBLY__
115#include <linux/bug.h>
116#include <asm/cputable.h> 116#include <asm/cputable.h>
117 117
118#ifdef CONFIG_PPC_FSL_BOOK3E 118#ifdef CONFIG_PPC_FSL_BOOK3E
@@ -131,20 +131,71 @@ enum {
131 MMU_FTR_LOCKLESS_TLBIE | MMU_FTR_CI_LARGE_PAGE | 131 MMU_FTR_LOCKLESS_TLBIE | MMU_FTR_CI_LARGE_PAGE |
132 MMU_FTR_1T_SEGMENT | MMU_FTR_TLBIE_CROP_VA | 132 MMU_FTR_1T_SEGMENT | MMU_FTR_TLBIE_CROP_VA |
133#ifdef CONFIG_PPC_RADIX_MMU 133#ifdef CONFIG_PPC_RADIX_MMU
134 MMU_FTR_RADIX | 134 MMU_FTR_TYPE_RADIX |
135#endif 135#endif
136 0, 136 0,
137}; 137};
138 138
139static inline int mmu_has_feature(unsigned long feature) 139static inline bool early_mmu_has_feature(unsigned long feature)
140{ 140{
141 return (MMU_FTRS_POSSIBLE & cur_cpu_spec->mmu_features & feature); 141 return !!(MMU_FTRS_POSSIBLE & cur_cpu_spec->mmu_features & feature);
142}
143
144#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECKS
145#include <linux/jump_label.h>
146
147#define NUM_MMU_FTR_KEYS 32
148
149extern struct static_key_true mmu_feature_keys[NUM_MMU_FTR_KEYS];
150
151extern void mmu_feature_keys_init(void);
152
153static __always_inline bool mmu_has_feature(unsigned long feature)
154{
155 int i;
156
157 BUILD_BUG_ON(!__builtin_constant_p(feature));
158
159#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG
160 if (!static_key_initialized) {
161 printk("Warning! mmu_has_feature() used prior to jump label init!\n");
162 dump_stack();
163 return early_mmu_has_feature(feature);
164 }
165#endif
166
167 if (!(MMU_FTRS_POSSIBLE & feature))
168 return false;
169
170 i = __builtin_ctzl(feature);
171 return static_branch_likely(&mmu_feature_keys[i]);
142} 172}
143 173
144static inline void mmu_clear_feature(unsigned long feature) 174static inline void mmu_clear_feature(unsigned long feature)
145{ 175{
176 int i;
177
178 i = __builtin_ctzl(feature);
146 cur_cpu_spec->mmu_features &= ~feature; 179 cur_cpu_spec->mmu_features &= ~feature;
180 static_branch_disable(&mmu_feature_keys[i]);
147} 181}
182#else
183
184static inline void mmu_feature_keys_init(void)
185{
186
187}
188
189static inline bool mmu_has_feature(unsigned long feature)
190{
191 return early_mmu_has_feature(feature);
192}
193
194static inline void mmu_clear_feature(unsigned long feature)
195{
196 cur_cpu_spec->mmu_features &= ~feature;
197}
198#endif /* CONFIG_JUMP_LABEL */
148 199
149extern unsigned int __start___mmu_ftr_fixup, __stop___mmu_ftr_fixup; 200extern unsigned int __start___mmu_ftr_fixup, __stop___mmu_ftr_fixup;
150 201
@@ -164,6 +215,28 @@ static inline void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
164} 215}
165#endif /* !CONFIG_DEBUG_VM */ 216#endif /* !CONFIG_DEBUG_VM */
166 217
218#ifdef CONFIG_PPC_RADIX_MMU
219static inline bool radix_enabled(void)
220{
221 return mmu_has_feature(MMU_FTR_TYPE_RADIX);
222}
223
224static inline bool early_radix_enabled(void)
225{
226 return early_mmu_has_feature(MMU_FTR_TYPE_RADIX);
227}
228#else
229static inline bool radix_enabled(void)
230{
231 return false;
232}
233
234static inline bool early_radix_enabled(void)
235{
236 return false;
237}
238#endif
239
167#endif /* !__ASSEMBLY__ */ 240#endif /* !__ASSEMBLY__ */
168 241
169/* The kernel use the constants below to index in the page sizes array. 242/* The kernel use the constants below to index in the page sizes array.
@@ -210,6 +283,7 @@ extern void early_init_mmu(void);
210extern void early_init_mmu_secondary(void); 283extern void early_init_mmu_secondary(void);
211extern void setup_initial_memory_limit(phys_addr_t first_memblock_base, 284extern void setup_initial_memory_limit(phys_addr_t first_memblock_base,
212 phys_addr_t first_memblock_size); 285 phys_addr_t first_memblock_size);
286static inline void mmu_early_init_devtree(void) { }
213#endif /* __ASSEMBLY__ */ 287#endif /* __ASSEMBLY__ */
214#endif 288#endif
215 289
@@ -230,9 +304,5 @@ extern void setup_initial_memory_limit(phys_addr_t first_memblock_base,
230# include <asm/mmu-8xx.h> 304# include <asm/mmu-8xx.h>
231#endif 305#endif
232 306
233#ifndef radix_enabled
234#define radix_enabled() (0)
235#endif
236
237#endif /* __KERNEL__ */ 307#endif /* __KERNEL__ */
238#endif /* _ASM_POWERPC_MMU_H_ */ 308#endif /* _ASM_POWERPC_MMU_H_ */
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 40f3615bf940..f69f40f1519a 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -1256,15 +1256,6 @@ static inline void msr_check_and_clear(unsigned long bits)
1256 __msr_check_and_clear(bits); 1256 __msr_check_and_clear(bits);
1257} 1257}
1258 1258
1259static inline unsigned long mfvtb (void)
1260{
1261#ifdef CONFIG_PPC_BOOK3S_64
1262 if (cpu_has_feature(CPU_FTR_ARCH_207S))
1263 return mfspr(SPRN_VTB);
1264#endif
1265 return 0;
1266}
1267
1268#ifdef __powerpc64__ 1259#ifdef __powerpc64__
1269#if defined(CONFIG_PPC_CELL) || defined(CONFIG_PPC_FSL_BOOK3E) 1260#if defined(CONFIG_PPC_CELL) || defined(CONFIG_PPC_FSL_BOOK3E)
1270#define mftb() ({unsigned long rval; \ 1261#define mftb() ({unsigned long rval; \
diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h
index 17c8380673a6..0a74ebe934e1 100644
--- a/arch/powerpc/include/asm/switch_to.h
+++ b/arch/powerpc/include/asm/switch_to.h
@@ -75,6 +75,14 @@ static inline void disable_kernel_spe(void)
75static inline void __giveup_spe(struct task_struct *t) { } 75static inline void __giveup_spe(struct task_struct *t) { }
76#endif 76#endif
77 77
78#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
79extern void flush_tmregs_to_thread(struct task_struct *);
80#else
81static inline void flush_tmregs_to_thread(struct task_struct *t)
82{
83}
84#endif
85
78static inline void clear_task_ebb(struct task_struct *t) 86static inline void clear_task_ebb(struct task_struct *t)
79{ 87{
80#ifdef CONFIG_PPC_BOOK3S_64 88#ifdef CONFIG_PPC_BOOK3S_64
diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h
index 09211640a0e0..b240666b7bc1 100644
--- a/arch/powerpc/include/asm/time.h
+++ b/arch/powerpc/include/asm/time.h
@@ -18,6 +18,7 @@
18#include <linux/percpu.h> 18#include <linux/percpu.h>
19 19
20#include <asm/processor.h> 20#include <asm/processor.h>
21#include <asm/cpu_has_feature.h>
21 22
22/* time.c */ 23/* time.c */
23extern unsigned long tb_ticks_per_jiffy; 24extern unsigned long tb_ticks_per_jiffy;
@@ -103,7 +104,7 @@ static inline u64 get_vtb(void)
103{ 104{
104#ifdef CONFIG_PPC_BOOK3S_64 105#ifdef CONFIG_PPC_BOOK3S_64
105 if (cpu_has_feature(CPU_FTR_ARCH_207S)) 106 if (cpu_has_feature(CPU_FTR_ARCH_207S))
106 return mfvtb(); 107 return mfspr(SPRN_VTB);
107#endif 108#endif
108 return 0; 109 return 0;
109} 110}
diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h
index 20733fa518ae..f6f68f73e858 100644
--- a/arch/powerpc/include/asm/tlb.h
+++ b/arch/powerpc/include/asm/tlb.h
@@ -46,5 +46,18 @@ static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
46#endif 46#endif
47} 47}
48 48
49#ifdef CONFIG_SMP
50static inline int mm_is_core_local(struct mm_struct *mm)
51{
52 return cpumask_subset(mm_cpumask(mm),
53 topology_sibling_cpumask(smp_processor_id()));
54}
55#else
56static inline int mm_is_core_local(struct mm_struct *mm)
57{
58 return 1;
59}
60#endif
61
49#endif /* __KERNEL__ */ 62#endif /* __KERNEL__ */
50#endif /* __ASM_POWERPC_TLB_H */ 63#endif /* __ASM_POWERPC_TLB_H */
diff --git a/arch/powerpc/include/asm/tlbflush.h b/arch/powerpc/include/asm/tlbflush.h
index 1b38eea28e5a..13dbcd41885e 100644
--- a/arch/powerpc/include/asm/tlbflush.h
+++ b/arch/powerpc/include/asm/tlbflush.h
@@ -54,7 +54,6 @@ extern void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
54#define flush_tlb_page(vma,addr) local_flush_tlb_page(vma,addr) 54#define flush_tlb_page(vma,addr) local_flush_tlb_page(vma,addr)
55#define __flush_tlb_page(mm,addr,p,i) __local_flush_tlb_page(mm,addr,p,i) 55#define __flush_tlb_page(mm,addr,p,i) __local_flush_tlb_page(mm,addr,p,i)
56#endif 56#endif
57#define flush_tlb_page_nohash(vma,addr) flush_tlb_page(vma,addr)
58 57
59#elif defined(CONFIG_PPC_STD_MMU_32) 58#elif defined(CONFIG_PPC_STD_MMU_32)
60 59
diff --git a/arch/powerpc/include/asm/xor.h b/arch/powerpc/include/asm/xor.h
index 0abb97f3be10..a36c2069d8ed 100644
--- a/arch/powerpc/include/asm/xor.h
+++ b/arch/powerpc/include/asm/xor.h
@@ -23,6 +23,7 @@
23#ifdef CONFIG_ALTIVEC 23#ifdef CONFIG_ALTIVEC
24 24
25#include <asm/cputable.h> 25#include <asm/cputable.h>
26#include <asm/cpu_has_feature.h>
26 27
27void xor_altivec_2(unsigned long bytes, unsigned long *v1_in, 28void xor_altivec_2(unsigned long bytes, unsigned long *v1_in,
28 unsigned long *v2_in); 29 unsigned long *v2_in);
diff --git a/arch/powerpc/include/uapi/asm/elf.h b/arch/powerpc/include/uapi/asm/elf.h
index c2d21d11c2d2..3a9e44c45c78 100644
--- a/arch/powerpc/include/uapi/asm/elf.h
+++ b/arch/powerpc/include/uapi/asm/elf.h
@@ -91,6 +91,11 @@
91 91
92#define ELF_NGREG 48 /* includes nip, msr, lr, etc. */ 92#define ELF_NGREG 48 /* includes nip, msr, lr, etc. */
93#define ELF_NFPREG 33 /* includes fpscr */ 93#define ELF_NFPREG 33 /* includes fpscr */
94#define ELF_NVMX 34 /* includes all vector registers */
95#define ELF_NVSX 32 /* includes all VSX registers */
96#define ELF_NTMSPRREG 3 /* include tfhar, tfiar, texasr */
97#define ELF_NEBB 3 /* includes ebbrr, ebbhr, bescr */
98#define ELF_NPMU 5 /* includes siar, sdar, sier, mmcr2, mmcr0 */
94 99
95typedef unsigned long elf_greg_t64; 100typedef unsigned long elf_greg_t64;
96typedef elf_greg_t64 elf_gregset_t64[ELF_NGREG]; 101typedef elf_greg_t64 elf_gregset_t64[ELF_NGREG];
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
index c7097f933114..033f3385fa49 100644
--- a/arch/powerpc/kernel/align.c
+++ b/arch/powerpc/kernel/align.c
@@ -26,6 +26,7 @@
26#include <asm/emulated_ops.h> 26#include <asm/emulated_ops.h>
27#include <asm/switch_to.h> 27#include <asm/switch_to.h>
28#include <asm/disassemble.h> 28#include <asm/disassemble.h>
29#include <asm/cpu_has_feature.h>
29 30
30struct aligninfo { 31struct aligninfo {
31 unsigned char len; 32 unsigned char len;
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index d81f826d1029..74248ab18e98 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -15,6 +15,7 @@
15#include <linux/threads.h> 15#include <linux/threads.h>
16#include <linux/init.h> 16#include <linux/init.h>
17#include <linux/export.h> 17#include <linux/export.h>
18#include <linux/jump_label.h>
18 19
19#include <asm/oprofile_impl.h> 20#include <asm/oprofile_impl.h>
20#include <asm/cputable.h> 21#include <asm/cputable.h>
@@ -2224,3 +2225,39 @@ struct cpu_spec * __init identify_cpu(unsigned long offset, unsigned int pvr)
2224 2225
2225 return NULL; 2226 return NULL;
2226} 2227}
2228
2229#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECKS
2230struct static_key_true cpu_feature_keys[NUM_CPU_FTR_KEYS] = {
2231 [0 ... NUM_CPU_FTR_KEYS - 1] = STATIC_KEY_TRUE_INIT
2232};
2233EXPORT_SYMBOL_GPL(cpu_feature_keys);
2234
2235void __init cpu_feature_keys_init(void)
2236{
2237 int i;
2238
2239 for (i = 0; i < NUM_CPU_FTR_KEYS; i++) {
2240 unsigned long f = 1ul << i;
2241
2242 if (!(cur_cpu_spec->cpu_features & f))
2243 static_branch_disable(&cpu_feature_keys[i]);
2244 }
2245}
2246
2247struct static_key_true mmu_feature_keys[NUM_MMU_FTR_KEYS] = {
2248 [0 ... NUM_MMU_FTR_KEYS - 1] = STATIC_KEY_TRUE_INIT
2249};
2250EXPORT_SYMBOL_GPL(mmu_feature_keys);
2251
2252void __init mmu_feature_keys_init(void)
2253{
2254 int i;
2255
2256 for (i = 0; i < NUM_MMU_FTR_KEYS; i++) {
2257 unsigned long f = 1ul << i;
2258
2259 if (!(cur_cpu_spec->mmu_features & f))
2260 static_branch_disable(&mmu_feature_keys[i]);
2261 }
2262}
2263#endif
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index fcb2887f5a33..6b8bc0dd09d4 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -532,7 +532,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
532#ifdef CONFIG_PPC_STD_MMU_64 532#ifdef CONFIG_PPC_STD_MMU_64
533BEGIN_MMU_FTR_SECTION 533BEGIN_MMU_FTR_SECTION
534 b 2f 534 b 2f
535END_MMU_FTR_SECTION_IFSET(MMU_FTR_RADIX) 535END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
536BEGIN_FTR_SECTION 536BEGIN_FTR_SECTION
537 clrrdi r6,r8,28 /* get its ESID */ 537 clrrdi r6,r8,28 /* get its ESID */
538 clrrdi r9,r1,28 /* get current sp ESID */ 538 clrrdi r9,r1,28 /* get current sp ESID */
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 694def6c9d61..41091fdf9bd8 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -940,7 +940,7 @@ BEGIN_MMU_FTR_SECTION
940 b do_hash_page /* Try to handle as hpte fault */ 940 b do_hash_page /* Try to handle as hpte fault */
941MMU_FTR_SECTION_ELSE 941MMU_FTR_SECTION_ELSE
942 b handle_page_fault 942 b handle_page_fault
943ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_RADIX) 943ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
944 944
945 .align 7 945 .align 7
946 .globl h_data_storage_common 946 .globl h_data_storage_common
@@ -971,7 +971,7 @@ BEGIN_MMU_FTR_SECTION
971 b do_hash_page /* Try to handle as hpte fault */ 971 b do_hash_page /* Try to handle as hpte fault */
972MMU_FTR_SECTION_ELSE 972MMU_FTR_SECTION_ELSE
973 b handle_page_fault 973 b handle_page_fault
974ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_RADIX) 974ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
975 975
976 STD_EXCEPTION_COMMON(0xe20, h_instr_storage, unknown_exception) 976 STD_EXCEPTION_COMMON(0xe20, h_instr_storage, unknown_exception)
977 977
@@ -1392,7 +1392,7 @@ slb_miss_realmode:
1392#ifdef CONFIG_PPC_STD_MMU_64 1392#ifdef CONFIG_PPC_STD_MMU_64
1393BEGIN_MMU_FTR_SECTION 1393BEGIN_MMU_FTR_SECTION
1394 bl slb_allocate_realmode 1394 bl slb_allocate_realmode
1395END_MMU_FTR_SECTION_IFCLR(MMU_FTR_RADIX) 1395END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
1396#endif 1396#endif
1397 /* All done -- return from exception. */ 1397 /* All done -- return from exception. */
1398 1398
@@ -1406,7 +1406,7 @@ BEGIN_MMU_FTR_SECTION
1406 beq- 2f 1406 beq- 2f
1407FTR_SECTION_ELSE 1407FTR_SECTION_ELSE
1408 b 2f 1408 b 2f
1409ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_RADIX) 1409ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
1410 1410
1411.machine push 1411.machine push
1412.machine "power4" 1412.machine "power4"
diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S
index 8a56a51fc0cb..ba79d15f4ddd 100644
--- a/arch/powerpc/kernel/idle_book3s.S
+++ b/arch/powerpc/kernel/idle_book3s.S
@@ -572,7 +572,7 @@ common_exit:
572 572
573BEGIN_MMU_FTR_SECTION 573BEGIN_MMU_FTR_SECTION
574 b no_segments 574 b no_segments
575END_MMU_FTR_SECTION_IFSET(MMU_FTR_RADIX) 575END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
576 /* Restore SLB from PACA */ 576 /* Restore SLB from PACA */
577 ld r8,PACA_SLBSHADOWPTR(r13) 577 ld r8,PACA_SLBSHADOWPTR(r13)
578 578
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index ac910d9982df..08887cf2b20e 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -75,6 +75,7 @@
75#endif 75#endif
76#define CREATE_TRACE_POINTS 76#define CREATE_TRACE_POINTS
77#include <asm/trace.h> 77#include <asm/trace.h>
78#include <asm/cpu_has_feature.h>
78 79
79DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); 80DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
80EXPORT_PER_CPU_SYMBOL(irq_stat); 81EXPORT_PER_CPU_SYMBOL(irq_stat);
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index 93dae296b6be..fa20060ff7a5 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -184,7 +184,7 @@ void setup_paca(struct paca_struct *new_paca)
184 * if we do a GET_PACA() before the feature fixups have been 184 * if we do a GET_PACA() before the feature fixups have been
185 * applied 185 * applied
186 */ 186 */
187 if (cpu_has_feature(CPU_FTR_HVMODE)) 187 if (early_cpu_has_feature(CPU_FTR_HVMODE))
188 mtspr(SPRN_SPRG_HPACA, local_paca); 188 mtspr(SPRN_SPRG_HPACA, local_paca);
189#endif 189#endif
190 mtspr(SPRN_SPRG_PACA, local_paca); 190 mtspr(SPRN_SPRG_PACA, local_paca);
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index a8cca88e972f..58ccf86415b4 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -58,6 +58,7 @@
58#include <asm/code-patching.h> 58#include <asm/code-patching.h>
59#include <asm/exec.h> 59#include <asm/exec.h>
60#include <asm/livepatch.h> 60#include <asm/livepatch.h>
61#include <asm/cpu_has_feature.h>
61 62
62#include <linux/kprobes.h> 63#include <linux/kprobes.h>
63#include <linux/kdebug.h> 64#include <linux/kdebug.h>
@@ -1073,6 +1074,26 @@ static inline void restore_sprs(struct thread_struct *old_thread,
1073#endif 1074#endif
1074} 1075}
1075 1076
1077#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1078void flush_tmregs_to_thread(struct task_struct *tsk)
1079{
1080 /*
1081 * Process self tracing is not yet supported through
1082 * ptrace interface. Ptrace generic code should have
1083 * prevented this from happening in the first place.
1084 * Warn once here with the message, if some how it
1085 * is attempted.
1086 */
1087 WARN_ONCE(tsk == current,
1088 "Not expecting ptrace on self: TM regs may be incorrect\n");
1089
1090 /*
1091 * If task is not current, it should have been flushed
1092 * already to it's thread_struct during __switch_to().
1093 */
1094}
1095#endif
1096
1076struct task_struct *__switch_to(struct task_struct *prev, 1097struct task_struct *__switch_to(struct task_struct *prev,
1077 struct task_struct *new) 1098 struct task_struct *new)
1078{ 1099{
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index bae3db791150..b0245bed6f54 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -170,7 +170,7 @@ static struct ibm_pa_feature {
170 */ 170 */
171 {CPU_FTR_TM_COMP, 0, 0, 171 {CPU_FTR_TM_COMP, 0, 0,
172 PPC_FEATURE2_HTM_COMP|PPC_FEATURE2_HTM_NOSC_COMP, 22, 0, 0}, 172 PPC_FEATURE2_HTM_COMP|PPC_FEATURE2_HTM_NOSC_COMP, 22, 0, 0},
173 {0, MMU_FTR_RADIX, 0, 0, 40, 0, 0}, 173 {0, MMU_FTR_TYPE_RADIX, 0, 0, 40, 0, 0},
174}; 174};
175 175
176static void __init scan_features(unsigned long node, const unsigned char *ftrs, 176static void __init scan_features(unsigned long node, const unsigned char *ftrs,
@@ -647,14 +647,6 @@ static void __init early_reserve_mem(void)
647#endif 647#endif
648} 648}
649 649
650static bool disable_radix;
651static int __init parse_disable_radix(char *p)
652{
653 disable_radix = true;
654 return 0;
655}
656early_param("disable_radix", parse_disable_radix);
657
658void __init early_init_devtree(void *params) 650void __init early_init_devtree(void *params)
659{ 651{
660 phys_addr_t limit; 652 phys_addr_t limit;
@@ -744,11 +736,8 @@ void __init early_init_devtree(void *params)
744 */ 736 */
745 spinning_secondaries = boot_cpu_count - 1; 737 spinning_secondaries = boot_cpu_count - 1;
746#endif 738#endif
747 /* 739
748 * now fixup radix MMU mode based on kernel command line 740 mmu_early_init_devtree();
749 */
750 if (disable_radix)
751 cur_cpu_spec->mmu_features &= ~MMU_FTR_RADIX;
752 741
753#ifdef CONFIG_PPC_POWERNV 742#ifdef CONFIG_PPC_POWERNV
754 /* Scan and build the list of machine check recoverable ranges */ 743 /* Scan and build the list of machine check recoverable ranges */
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 134bee9ac664..4f3c5756cc09 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -64,6 +64,10 @@ struct pt_regs_offset {
64 {.name = STR(gpr##num), .offset = offsetof(struct pt_regs, gpr[num])} 64 {.name = STR(gpr##num), .offset = offsetof(struct pt_regs, gpr[num])}
65#define REG_OFFSET_END {.name = NULL, .offset = 0} 65#define REG_OFFSET_END {.name = NULL, .offset = 0}
66 66
67#define TVSO(f) (offsetof(struct thread_vr_state, f))
68#define TFSO(f) (offsetof(struct thread_fp_state, f))
69#define TSO(f) (offsetof(struct thread_struct, f))
70
67static const struct pt_regs_offset regoffset_table[] = { 71static const struct pt_regs_offset regoffset_table[] = {
68 GPR_OFFSET_NAME(0), 72 GPR_OFFSET_NAME(0),
69 GPR_OFFSET_NAME(1), 73 GPR_OFFSET_NAME(1),
@@ -181,6 +185,26 @@ static int set_user_msr(struct task_struct *task, unsigned long msr)
181 return 0; 185 return 0;
182} 186}
183 187
188#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
189static unsigned long get_user_ckpt_msr(struct task_struct *task)
190{
191 return task->thread.ckpt_regs.msr | task->thread.fpexc_mode;
192}
193
194static int set_user_ckpt_msr(struct task_struct *task, unsigned long msr)
195{
196 task->thread.ckpt_regs.msr &= ~MSR_DEBUGCHANGE;
197 task->thread.ckpt_regs.msr |= msr & MSR_DEBUGCHANGE;
198 return 0;
199}
200
201static int set_user_ckpt_trap(struct task_struct *task, unsigned long trap)
202{
203 task->thread.ckpt_regs.trap = trap & 0xfff0;
204 return 0;
205}
206#endif
207
184#ifdef CONFIG_PPC64 208#ifdef CONFIG_PPC64
185static int get_user_dscr(struct task_struct *task, unsigned long *data) 209static int get_user_dscr(struct task_struct *task, unsigned long *data)
186{ 210{
@@ -358,6 +382,29 @@ static int gpr_set(struct task_struct *target, const struct user_regset *regset,
358 return ret; 382 return ret;
359} 383}
360 384
385/*
386 * When the transaction is active, 'transact_fp' holds the current running
387 * value of all FPR registers and 'fp_state' holds the last checkpointed
388 * value of all FPR registers for the current transaction. When transaction
389 * is not active 'fp_state' holds the current running state of all the FPR
390 * registers. So this function which returns the current running values of
391 * all the FPR registers, needs to know whether any transaction is active
392 * or not.
393 *
394 * Userspace interface buffer layout:
395 *
396 * struct data {
397 * u64 fpr[32];
398 * u64 fpscr;
399 * };
400 *
401 * There are two config options CONFIG_VSX and CONFIG_PPC_TRANSACTIONAL_MEM
402 * which determines the final code in this function. All the combinations of
403 * these two config options are possible except the one below as transactional
404 * memory config pulls in CONFIG_VSX automatically.
405 *
406 * !defined(CONFIG_VSX) && defined(CONFIG_PPC_TRANSACTIONAL_MEM)
407 */
361static int fpr_get(struct task_struct *target, const struct user_regset *regset, 408static int fpr_get(struct task_struct *target, const struct user_regset *regset,
362 unsigned int pos, unsigned int count, 409 unsigned int pos, unsigned int count,
363 void *kbuf, void __user *ubuf) 410 void *kbuf, void __user *ubuf)
@@ -368,14 +415,31 @@ static int fpr_get(struct task_struct *target, const struct user_regset *regset,
368#endif 415#endif
369 flush_fp_to_thread(target); 416 flush_fp_to_thread(target);
370 417
371#ifdef CONFIG_VSX 418#if defined(CONFIG_VSX) && defined(CONFIG_PPC_TRANSACTIONAL_MEM)
419 /* copy to local buffer then write that out */
420 if (MSR_TM_ACTIVE(target->thread.regs->msr)) {
421 flush_altivec_to_thread(target);
422 flush_tmregs_to_thread(target);
423 for (i = 0; i < 32 ; i++)
424 buf[i] = target->thread.TS_TRANS_FPR(i);
425 buf[32] = target->thread.transact_fp.fpscr;
426 } else {
427 for (i = 0; i < 32 ; i++)
428 buf[i] = target->thread.TS_FPR(i);
429 buf[32] = target->thread.fp_state.fpscr;
430 }
431 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
432#endif
433
434#if defined(CONFIG_VSX) && !defined(CONFIG_PPC_TRANSACTIONAL_MEM)
372 /* copy to local buffer then write that out */ 435 /* copy to local buffer then write that out */
373 for (i = 0; i < 32 ; i++) 436 for (i = 0; i < 32 ; i++)
374 buf[i] = target->thread.TS_FPR(i); 437 buf[i] = target->thread.TS_FPR(i);
375 buf[32] = target->thread.fp_state.fpscr; 438 buf[32] = target->thread.fp_state.fpscr;
376 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1); 439 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
440#endif
377 441
378#else 442#if !defined(CONFIG_VSX) && !defined(CONFIG_PPC_TRANSACTIONAL_MEM)
379 BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) != 443 BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
380 offsetof(struct thread_fp_state, fpr[32])); 444 offsetof(struct thread_fp_state, fpr[32]));
381 445
@@ -384,6 +448,29 @@ static int fpr_get(struct task_struct *target, const struct user_regset *regset,
384#endif 448#endif
385} 449}
386 450
451/*
452 * When the transaction is active, 'transact_fp' holds the current running
453 * value of all FPR registers and 'fp_state' holds the last checkpointed
454 * value of all FPR registers for the current transaction. When transaction
455 * is not active 'fp_state' holds the current running state of all the FPR
456 * registers. So this function which setss the current running values of
457 * all the FPR registers, needs to know whether any transaction is active
458 * or not.
459 *
460 * Userspace interface buffer layout:
461 *
462 * struct data {
463 * u64 fpr[32];
464 * u64 fpscr;
465 * };
466 *
467 * There are two config options CONFIG_VSX and CONFIG_PPC_TRANSACTIONAL_MEM
468 * which determines the final code in this function. All the combinations of
469 * these two config options are possible except the one below as transactional
470 * memory config pulls in CONFIG_VSX automatically.
471 *
472 * !defined(CONFIG_VSX) && defined(CONFIG_PPC_TRANSACTIONAL_MEM)
473 */
387static int fpr_set(struct task_struct *target, const struct user_regset *regset, 474static int fpr_set(struct task_struct *target, const struct user_regset *regset,
388 unsigned int pos, unsigned int count, 475 unsigned int pos, unsigned int count,
389 const void *kbuf, const void __user *ubuf) 476 const void *kbuf, const void __user *ubuf)
@@ -394,7 +481,27 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
394#endif 481#endif
395 flush_fp_to_thread(target); 482 flush_fp_to_thread(target);
396 483
397#ifdef CONFIG_VSX 484#if defined(CONFIG_VSX) && defined(CONFIG_PPC_TRANSACTIONAL_MEM)
485 /* copy to local buffer then write that out */
486 i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
487 if (i)
488 return i;
489
490 if (MSR_TM_ACTIVE(target->thread.regs->msr)) {
491 flush_altivec_to_thread(target);
492 flush_tmregs_to_thread(target);
493 for (i = 0; i < 32 ; i++)
494 target->thread.TS_TRANS_FPR(i) = buf[i];
495 target->thread.transact_fp.fpscr = buf[32];
496 } else {
497 for (i = 0; i < 32 ; i++)
498 target->thread.TS_FPR(i) = buf[i];
499 target->thread.fp_state.fpscr = buf[32];
500 }
501 return 0;
502#endif
503
504#if defined(CONFIG_VSX) && !defined(CONFIG_PPC_TRANSACTIONAL_MEM)
398 /* copy to local buffer then write that out */ 505 /* copy to local buffer then write that out */
399 i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1); 506 i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
400 if (i) 507 if (i)
@@ -403,7 +510,9 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
403 target->thread.TS_FPR(i) = buf[i]; 510 target->thread.TS_FPR(i) = buf[i];
404 target->thread.fp_state.fpscr = buf[32]; 511 target->thread.fp_state.fpscr = buf[32];
405 return 0; 512 return 0;
406#else 513#endif
514
515#if !defined(CONFIG_VSX) && !defined(CONFIG_PPC_TRANSACTIONAL_MEM)
407 BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) != 516 BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
408 offsetof(struct thread_fp_state, fpr[32])); 517 offsetof(struct thread_fp_state, fpr[32]));
409 518
@@ -433,10 +542,28 @@ static int vr_active(struct task_struct *target,
433 return target->thread.used_vr ? regset->n : 0; 542 return target->thread.used_vr ? regset->n : 0;
434} 543}
435 544
545/*
546 * When the transaction is active, 'transact_vr' holds the current running
547 * value of all the VMX registers and 'vr_state' holds the last checkpointed
548 * value of all the VMX registers for the current transaction to fall back
549 * on in case it aborts. When transaction is not active 'vr_state' holds
550 * the current running state of all the VMX registers. So this function which
551 * gets the current running values of all the VMX registers, needs to know
552 * whether any transaction is active or not.
553 *
554 * Userspace interface buffer layout:
555 *
556 * struct data {
557 * vector128 vr[32];
558 * vector128 vscr;
559 * vector128 vrsave;
560 * };
561 */
436static int vr_get(struct task_struct *target, const struct user_regset *regset, 562static int vr_get(struct task_struct *target, const struct user_regset *regset,
437 unsigned int pos, unsigned int count, 563 unsigned int pos, unsigned int count,
438 void *kbuf, void __user *ubuf) 564 void *kbuf, void __user *ubuf)
439{ 565{
566 struct thread_vr_state *addr;
440 int ret; 567 int ret;
441 568
442 flush_altivec_to_thread(target); 569 flush_altivec_to_thread(target);
@@ -444,8 +571,19 @@ static int vr_get(struct task_struct *target, const struct user_regset *regset,
444 BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) != 571 BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) !=
445 offsetof(struct thread_vr_state, vr[32])); 572 offsetof(struct thread_vr_state, vr[32]));
446 573
574#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
575 if (MSR_TM_ACTIVE(target->thread.regs->msr)) {
576 flush_fp_to_thread(target);
577 flush_tmregs_to_thread(target);
578 addr = &target->thread.transact_vr;
579 } else {
580 addr = &target->thread.vr_state;
581 }
582#else
583 addr = &target->thread.vr_state;
584#endif
447 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, 585 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
448 &target->thread.vr_state, 0, 586 addr, 0,
449 33 * sizeof(vector128)); 587 33 * sizeof(vector128));
450 if (!ret) { 588 if (!ret) {
451 /* 589 /*
@@ -456,7 +594,16 @@ static int vr_get(struct task_struct *target, const struct user_regset *regset,
456 u32 word; 594 u32 word;
457 } vrsave; 595 } vrsave;
458 memset(&vrsave, 0, sizeof(vrsave)); 596 memset(&vrsave, 0, sizeof(vrsave));
597
598#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
599 if (MSR_TM_ACTIVE(target->thread.regs->msr))
600 vrsave.word = target->thread.transact_vrsave;
601 else
602 vrsave.word = target->thread.vrsave;
603#else
459 vrsave.word = target->thread.vrsave; 604 vrsave.word = target->thread.vrsave;
605#endif
606
460 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave, 607 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
461 33 * sizeof(vector128), -1); 608 33 * sizeof(vector128), -1);
462 } 609 }
@@ -464,10 +611,28 @@ static int vr_get(struct task_struct *target, const struct user_regset *regset,
464 return ret; 611 return ret;
465} 612}
466 613
614/*
615 * When the transaction is active, 'transact_vr' holds the current running
616 * value of all the VMX registers and 'vr_state' holds the last checkpointed
617 * value of all the VMX registers for the current transaction to fall back
618 * on in case it aborts. When transaction is not active 'vr_state' holds
619 * the current running state of all the VMX registers. So this function which
620 * sets the current running values of all the VMX registers, needs to know
621 * whether any transaction is active or not.
622 *
623 * Userspace interface buffer layout:
624 *
625 * struct data {
626 * vector128 vr[32];
627 * vector128 vscr;
628 * vector128 vrsave;
629 * };
630 */
467static int vr_set(struct task_struct *target, const struct user_regset *regset, 631static int vr_set(struct task_struct *target, const struct user_regset *regset,
468 unsigned int pos, unsigned int count, 632 unsigned int pos, unsigned int count,
469 const void *kbuf, const void __user *ubuf) 633 const void *kbuf, const void __user *ubuf)
470{ 634{
635 struct thread_vr_state *addr;
471 int ret; 636 int ret;
472 637
473 flush_altivec_to_thread(target); 638 flush_altivec_to_thread(target);
@@ -475,8 +640,19 @@ static int vr_set(struct task_struct *target, const struct user_regset *regset,
475 BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) != 640 BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) !=
476 offsetof(struct thread_vr_state, vr[32])); 641 offsetof(struct thread_vr_state, vr[32]));
477 642
643#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
644 if (MSR_TM_ACTIVE(target->thread.regs->msr)) {
645 flush_fp_to_thread(target);
646 flush_tmregs_to_thread(target);
647 addr = &target->thread.transact_vr;
648 } else {
649 addr = &target->thread.vr_state;
650 }
651#else
652 addr = &target->thread.vr_state;
653#endif
478 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 654 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
479 &target->thread.vr_state, 0, 655 addr, 0,
480 33 * sizeof(vector128)); 656 33 * sizeof(vector128));
481 if (!ret && count > 0) { 657 if (!ret && count > 0) {
482 /* 658 /*
@@ -487,11 +663,28 @@ static int vr_set(struct task_struct *target, const struct user_regset *regset,
487 u32 word; 663 u32 word;
488 } vrsave; 664 } vrsave;
489 memset(&vrsave, 0, sizeof(vrsave)); 665 memset(&vrsave, 0, sizeof(vrsave));
666
667#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
668 if (MSR_TM_ACTIVE(target->thread.regs->msr))
669 vrsave.word = target->thread.transact_vrsave;
670 else
671 vrsave.word = target->thread.vrsave;
672#else
490 vrsave.word = target->thread.vrsave; 673 vrsave.word = target->thread.vrsave;
674#endif
491 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave, 675 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
492 33 * sizeof(vector128), -1); 676 33 * sizeof(vector128), -1);
493 if (!ret) 677 if (!ret) {
678
679#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
680 if (MSR_TM_ACTIVE(target->thread.regs->msr))
681 target->thread.transact_vrsave = vrsave.word;
682 else
683 target->thread.vrsave = vrsave.word;
684#else
494 target->thread.vrsave = vrsave.word; 685 target->thread.vrsave = vrsave.word;
686#endif
687 }
495 } 688 }
496 689
497 return ret; 690 return ret;
@@ -512,6 +705,21 @@ static int vsr_active(struct task_struct *target,
512 return target->thread.used_vsr ? regset->n : 0; 705 return target->thread.used_vsr ? regset->n : 0;
513} 706}
514 707
708/*
709 * When the transaction is active, 'transact_fp' holds the current running
710 * value of all FPR registers and 'fp_state' holds the last checkpointed
711 * value of all FPR registers for the current transaction. When transaction
712 * is not active 'fp_state' holds the current running state of all the FPR
713 * registers. So this function which returns the current running values of
714 * all the FPR registers, needs to know whether any transaction is active
715 * or not.
716 *
717 * Userspace interface buffer layout:
718 *
719 * struct data {
720 * u64 vsx[32];
721 * };
722 */
515static int vsr_get(struct task_struct *target, const struct user_regset *regset, 723static int vsr_get(struct task_struct *target, const struct user_regset *regset,
516 unsigned int pos, unsigned int count, 724 unsigned int pos, unsigned int count,
517 void *kbuf, void __user *ubuf) 725 void *kbuf, void __user *ubuf)
@@ -519,16 +727,47 @@ static int vsr_get(struct task_struct *target, const struct user_regset *regset,
519 u64 buf[32]; 727 u64 buf[32];
520 int ret, i; 728 int ret, i;
521 729
730#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
731 flush_fp_to_thread(target);
732 flush_altivec_to_thread(target);
733 flush_tmregs_to_thread(target);
734#endif
522 flush_vsx_to_thread(target); 735 flush_vsx_to_thread(target);
523 736
737#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
738 if (MSR_TM_ACTIVE(target->thread.regs->msr)) {
739 for (i = 0; i < 32 ; i++)
740 buf[i] = target->thread.
741 transact_fp.fpr[i][TS_VSRLOWOFFSET];
742 } else {
743 for (i = 0; i < 32 ; i++)
744 buf[i] = target->thread.
745 fp_state.fpr[i][TS_VSRLOWOFFSET];
746 }
747#else
524 for (i = 0; i < 32 ; i++) 748 for (i = 0; i < 32 ; i++)
525 buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET]; 749 buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
750#endif
526 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, 751 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
527 buf, 0, 32 * sizeof(double)); 752 buf, 0, 32 * sizeof(double));
528 753
529 return ret; 754 return ret;
530} 755}
531 756
757/*
758 * When the transaction is active, 'transact_fp' holds the current running
759 * value of all FPR registers and 'fp_state' holds the last checkpointed
760 * value of all FPR registers for the current transaction. When transaction
761 * is not active 'fp_state' holds the current running state of all the FPR
762 * registers. So this function which sets the current running values of all
763 * the FPR registers, needs to know whether any transaction is active or not.
764 *
765 * Userspace interface buffer layout:
766 *
767 * struct data {
768 * u64 vsx[32];
769 * };
770 */
532static int vsr_set(struct task_struct *target, const struct user_regset *regset, 771static int vsr_set(struct task_struct *target, const struct user_regset *regset,
533 unsigned int pos, unsigned int count, 772 unsigned int pos, unsigned int count,
534 const void *kbuf, const void __user *ubuf) 773 const void *kbuf, const void __user *ubuf)
@@ -536,12 +775,30 @@ static int vsr_set(struct task_struct *target, const struct user_regset *regset,
536 u64 buf[32]; 775 u64 buf[32];
537 int ret,i; 776 int ret,i;
538 777
778#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
779 flush_fp_to_thread(target);
780 flush_altivec_to_thread(target);
781 flush_tmregs_to_thread(target);
782#endif
539 flush_vsx_to_thread(target); 783 flush_vsx_to_thread(target);
540 784
541 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 785 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
542 buf, 0, 32 * sizeof(double)); 786 buf, 0, 32 * sizeof(double));
787
788#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
789 if (MSR_TM_ACTIVE(target->thread.regs->msr)) {
790 for (i = 0; i < 32 ; i++)
791 target->thread.transact_fp.
792 fpr[i][TS_VSRLOWOFFSET] = buf[i];
793 } else {
794 for (i = 0; i < 32 ; i++)
795 target->thread.fp_state.
796 fpr[i][TS_VSRLOWOFFSET] = buf[i];
797 }
798#else
543 for (i = 0; i < 32 ; i++) 799 for (i = 0; i < 32 ; i++)
544 target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i]; 800 target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
801#endif
545 802
546 803
547 return ret; 804 return ret;
@@ -614,8 +871,1030 @@ static int evr_set(struct task_struct *target, const struct user_regset *regset,
614} 871}
615#endif /* CONFIG_SPE */ 872#endif /* CONFIG_SPE */
616 873
874#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
875/**
876 * tm_cgpr_active - get active number of registers in CGPR
877 * @target: The target task.
878 * @regset: The user regset structure.
879 *
880 * This function checks for the active number of available
881 * regisers in transaction checkpointed GPR category.
882 */
883static int tm_cgpr_active(struct task_struct *target,
884 const struct user_regset *regset)
885{
886 if (!cpu_has_feature(CPU_FTR_TM))
887 return -ENODEV;
888
889 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
890 return 0;
891
892 return regset->n;
893}
894
895/**
896 * tm_cgpr_get - get CGPR registers
897 * @target: The target task.
898 * @regset: The user regset structure.
899 * @pos: The buffer position.
900 * @count: Number of bytes to copy.
901 * @kbuf: Kernel buffer to copy from.
902 * @ubuf: User buffer to copy into.
903 *
904 * This function gets transaction checkpointed GPR registers.
905 *
906 * When the transaction is active, 'ckpt_regs' holds all the checkpointed
907 * GPR register values for the current transaction to fall back on if it
908 * aborts in between. This function gets those checkpointed GPR registers.
909 * The userspace interface buffer layout is as follows.
910 *
911 * struct data {
912 * struct pt_regs ckpt_regs;
913 * };
914 */
915static int tm_cgpr_get(struct task_struct *target,
916 const struct user_regset *regset,
917 unsigned int pos, unsigned int count,
918 void *kbuf, void __user *ubuf)
919{
920 int ret;
921
922 if (!cpu_has_feature(CPU_FTR_TM))
923 return -ENODEV;
924
925 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
926 return -ENODATA;
927
928 flush_fp_to_thread(target);
929 flush_altivec_to_thread(target);
930 flush_tmregs_to_thread(target);
931
932 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
933 &target->thread.ckpt_regs,
934 0, offsetof(struct pt_regs, msr));
935 if (!ret) {
936 unsigned long msr = get_user_ckpt_msr(target);
937
938 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &msr,
939 offsetof(struct pt_regs, msr),
940 offsetof(struct pt_regs, msr) +
941 sizeof(msr));
942 }
943
944 BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
945 offsetof(struct pt_regs, msr) + sizeof(long));
946
947 if (!ret)
948 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
949 &target->thread.ckpt_regs.orig_gpr3,
950 offsetof(struct pt_regs, orig_gpr3),
951 sizeof(struct pt_regs));
952 if (!ret)
953 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
954 sizeof(struct pt_regs), -1);
955
956 return ret;
957}
617 958
618/* 959/*
960 * tm_cgpr_set - set the CGPR registers
961 * @target: The target task.
962 * @regset: The user regset structure.
963 * @pos: The buffer position.
964 * @count: Number of bytes to copy.
965 * @kbuf: Kernel buffer to copy into.
966 * @ubuf: User buffer to copy from.
967 *
968 * This function sets in transaction checkpointed GPR registers.
969 *
970 * When the transaction is active, 'ckpt_regs' holds the checkpointed
971 * GPR register values for the current transaction to fall back on if it
972 * aborts in between. This function sets those checkpointed GPR registers.
973 * The userspace interface buffer layout is as follows.
974 *
975 * struct data {
976 * struct pt_regs ckpt_regs;
977 * };
978 */
979static int tm_cgpr_set(struct task_struct *target,
980 const struct user_regset *regset,
981 unsigned int pos, unsigned int count,
982 const void *kbuf, const void __user *ubuf)
983{
984 unsigned long reg;
985 int ret;
986
987 if (!cpu_has_feature(CPU_FTR_TM))
988 return -ENODEV;
989
990 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
991 return -ENODATA;
992
993 flush_fp_to_thread(target);
994 flush_altivec_to_thread(target);
995 flush_tmregs_to_thread(target);
996
997 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
998 &target->thread.ckpt_regs,
999 0, PT_MSR * sizeof(reg));
1000
1001 if (!ret && count > 0) {
1002 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
1003 PT_MSR * sizeof(reg),
1004 (PT_MSR + 1) * sizeof(reg));
1005 if (!ret)
1006 ret = set_user_ckpt_msr(target, reg);
1007 }
1008
1009 BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
1010 offsetof(struct pt_regs, msr) + sizeof(long));
1011
1012 if (!ret)
1013 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1014 &target->thread.ckpt_regs.orig_gpr3,
1015 PT_ORIG_R3 * sizeof(reg),
1016 (PT_MAX_PUT_REG + 1) * sizeof(reg));
1017
1018 if (PT_MAX_PUT_REG + 1 < PT_TRAP && !ret)
1019 ret = user_regset_copyin_ignore(
1020 &pos, &count, &kbuf, &ubuf,
1021 (PT_MAX_PUT_REG + 1) * sizeof(reg),
1022 PT_TRAP * sizeof(reg));
1023
1024 if (!ret && count > 0) {
1025 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
1026 PT_TRAP * sizeof(reg),
1027 (PT_TRAP + 1) * sizeof(reg));
1028 if (!ret)
1029 ret = set_user_ckpt_trap(target, reg);
1030 }
1031
1032 if (!ret)
1033 ret = user_regset_copyin_ignore(
1034 &pos, &count, &kbuf, &ubuf,
1035 (PT_TRAP + 1) * sizeof(reg), -1);
1036
1037 return ret;
1038}
1039
1040/**
1041 * tm_cfpr_active - get active number of registers in CFPR
1042 * @target: The target task.
1043 * @regset: The user regset structure.
1044 *
1045 * This function checks for the active number of available
1046 * regisers in transaction checkpointed FPR category.
1047 */
1048static int tm_cfpr_active(struct task_struct *target,
1049 const struct user_regset *regset)
1050{
1051 if (!cpu_has_feature(CPU_FTR_TM))
1052 return -ENODEV;
1053
1054 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1055 return 0;
1056
1057 return regset->n;
1058}
1059
1060/**
1061 * tm_cfpr_get - get CFPR registers
1062 * @target: The target task.
1063 * @regset: The user regset structure.
1064 * @pos: The buffer position.
1065 * @count: Number of bytes to copy.
1066 * @kbuf: Kernel buffer to copy from.
1067 * @ubuf: User buffer to copy into.
1068 *
1069 * This function gets in transaction checkpointed FPR registers.
1070 *
1071 * When the transaction is active 'fp_state' holds the checkpointed
1072 * values for the current transaction to fall back on if it aborts
1073 * in between. This function gets those checkpointed FPR registers.
1074 * The userspace interface buffer layout is as follows.
1075 *
1076 * struct data {
1077 * u64 fpr[32];
1078 * u64 fpscr;
1079 *};
1080 */
1081static int tm_cfpr_get(struct task_struct *target,
1082 const struct user_regset *regset,
1083 unsigned int pos, unsigned int count,
1084 void *kbuf, void __user *ubuf)
1085{
1086 u64 buf[33];
1087 int i;
1088
1089 if (!cpu_has_feature(CPU_FTR_TM))
1090 return -ENODEV;
1091
1092 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1093 return -ENODATA;
1094
1095 flush_fp_to_thread(target);
1096 flush_altivec_to_thread(target);
1097 flush_tmregs_to_thread(target);
1098
1099 /* copy to local buffer then write that out */
1100 for (i = 0; i < 32 ; i++)
1101 buf[i] = target->thread.TS_FPR(i);
1102 buf[32] = target->thread.fp_state.fpscr;
1103 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
1104}
1105
1106/**
1107 * tm_cfpr_set - set CFPR registers
1108 * @target: The target task.
1109 * @regset: The user regset structure.
1110 * @pos: The buffer position.
1111 * @count: Number of bytes to copy.
1112 * @kbuf: Kernel buffer to copy into.
1113 * @ubuf: User buffer to copy from.
1114 *
1115 * This function sets in transaction checkpointed FPR registers.
1116 *
1117 * When the transaction is active 'fp_state' holds the checkpointed
1118 * FPR register values for the current transaction to fall back on
1119 * if it aborts in between. This function sets these checkpointed
1120 * FPR registers. The userspace interface buffer layout is as follows.
1121 *
1122 * struct data {
1123 * u64 fpr[32];
1124 * u64 fpscr;
1125 *};
1126 */
1127static int tm_cfpr_set(struct task_struct *target,
1128 const struct user_regset *regset,
1129 unsigned int pos, unsigned int count,
1130 const void *kbuf, const void __user *ubuf)
1131{
1132 u64 buf[33];
1133 int i;
1134
1135 if (!cpu_has_feature(CPU_FTR_TM))
1136 return -ENODEV;
1137
1138 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1139 return -ENODATA;
1140
1141 flush_fp_to_thread(target);
1142 flush_altivec_to_thread(target);
1143 flush_tmregs_to_thread(target);
1144
1145 /* copy to local buffer then write that out */
1146 i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
1147 if (i)
1148 return i;
1149 for (i = 0; i < 32 ; i++)
1150 target->thread.TS_FPR(i) = buf[i];
1151 target->thread.fp_state.fpscr = buf[32];
1152 return 0;
1153}
1154
1155/**
1156 * tm_cvmx_active - get active number of registers in CVMX
1157 * @target: The target task.
1158 * @regset: The user regset structure.
1159 *
1160 * This function checks for the active number of available
1161 * regisers in checkpointed VMX category.
1162 */
1163static int tm_cvmx_active(struct task_struct *target,
1164 const struct user_regset *regset)
1165{
1166 if (!cpu_has_feature(CPU_FTR_TM))
1167 return -ENODEV;
1168
1169 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1170 return 0;
1171
1172 return regset->n;
1173}
1174
1175/**
1176 * tm_cvmx_get - get CMVX registers
1177 * @target: The target task.
1178 * @regset: The user regset structure.
1179 * @pos: The buffer position.
1180 * @count: Number of bytes to copy.
1181 * @kbuf: Kernel buffer to copy from.
1182 * @ubuf: User buffer to copy into.
1183 *
1184 * This function gets in transaction checkpointed VMX registers.
1185 *
1186 * When the transaction is active 'vr_state' and 'vr_save' hold
1187 * the checkpointed values for the current transaction to fall
1188 * back on if it aborts in between. The userspace interface buffer
1189 * layout is as follows.
1190 *
1191 * struct data {
1192 * vector128 vr[32];
1193 * vector128 vscr;
1194 * vector128 vrsave;
1195 *};
1196 */
1197static int tm_cvmx_get(struct task_struct *target,
1198 const struct user_regset *regset,
1199 unsigned int pos, unsigned int count,
1200 void *kbuf, void __user *ubuf)
1201{
1202 int ret;
1203
1204 BUILD_BUG_ON(TVSO(vscr) != TVSO(vr[32]));
1205
1206 if (!cpu_has_feature(CPU_FTR_TM))
1207 return -ENODEV;
1208
1209 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1210 return -ENODATA;
1211
1212 /* Flush the state */
1213 flush_fp_to_thread(target);
1214 flush_altivec_to_thread(target);
1215 flush_tmregs_to_thread(target);
1216
1217 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1218 &target->thread.vr_state, 0,
1219 33 * sizeof(vector128));
1220 if (!ret) {
1221 /*
1222 * Copy out only the low-order word of vrsave.
1223 */
1224 union {
1225 elf_vrreg_t reg;
1226 u32 word;
1227 } vrsave;
1228 memset(&vrsave, 0, sizeof(vrsave));
1229 vrsave.word = target->thread.vrsave;
1230 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
1231 33 * sizeof(vector128), -1);
1232 }
1233
1234 return ret;
1235}
1236
1237/**
1238 * tm_cvmx_set - set CMVX registers
1239 * @target: The target task.
1240 * @regset: The user regset structure.
1241 * @pos: The buffer position.
1242 * @count: Number of bytes to copy.
1243 * @kbuf: Kernel buffer to copy into.
1244 * @ubuf: User buffer to copy from.
1245 *
1246 * This function sets in transaction checkpointed VMX registers.
1247 *
1248 * When the transaction is active 'vr_state' and 'vr_save' hold
1249 * the checkpointed values for the current transaction to fall
1250 * back on if it aborts in between. The userspace interface buffer
1251 * layout is as follows.
1252 *
1253 * struct data {
1254 * vector128 vr[32];
1255 * vector128 vscr;
1256 * vector128 vrsave;
1257 *};
1258 */
1259static int tm_cvmx_set(struct task_struct *target,
1260 const struct user_regset *regset,
1261 unsigned int pos, unsigned int count,
1262 const void *kbuf, const void __user *ubuf)
1263{
1264 int ret;
1265
1266 BUILD_BUG_ON(TVSO(vscr) != TVSO(vr[32]));
1267
1268 if (!cpu_has_feature(CPU_FTR_TM))
1269 return -ENODEV;
1270
1271 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1272 return -ENODATA;
1273
1274 flush_fp_to_thread(target);
1275 flush_altivec_to_thread(target);
1276 flush_tmregs_to_thread(target);
1277
1278 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1279 &target->thread.vr_state, 0,
1280 33 * sizeof(vector128));
1281 if (!ret && count > 0) {
1282 /*
1283 * We use only the low-order word of vrsave.
1284 */
1285 union {
1286 elf_vrreg_t reg;
1287 u32 word;
1288 } vrsave;
1289 memset(&vrsave, 0, sizeof(vrsave));
1290 vrsave.word = target->thread.vrsave;
1291 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
1292 33 * sizeof(vector128), -1);
1293 if (!ret)
1294 target->thread.vrsave = vrsave.word;
1295 }
1296
1297 return ret;
1298}
1299
1300/**
1301 * tm_cvsx_active - get active number of registers in CVSX
1302 * @target: The target task.
1303 * @regset: The user regset structure.
1304 *
1305 * This function checks for the active number of available
1306 * regisers in transaction checkpointed VSX category.
1307 */
1308static int tm_cvsx_active(struct task_struct *target,
1309 const struct user_regset *regset)
1310{
1311 if (!cpu_has_feature(CPU_FTR_TM))
1312 return -ENODEV;
1313
1314 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1315 return 0;
1316
1317 flush_vsx_to_thread(target);
1318 return target->thread.used_vsr ? regset->n : 0;
1319}
1320
1321/**
1322 * tm_cvsx_get - get CVSX registers
1323 * @target: The target task.
1324 * @regset: The user regset structure.
1325 * @pos: The buffer position.
1326 * @count: Number of bytes to copy.
1327 * @kbuf: Kernel buffer to copy from.
1328 * @ubuf: User buffer to copy into.
1329 *
1330 * This function gets in transaction checkpointed VSX registers.
1331 *
1332 * When the transaction is active 'fp_state' holds the checkpointed
1333 * values for the current transaction to fall back on if it aborts
1334 * in between. This function gets those checkpointed VSX registers.
1335 * The userspace interface buffer layout is as follows.
1336 *
1337 * struct data {
1338 * u64 vsx[32];
1339 *};
1340 */
1341static int tm_cvsx_get(struct task_struct *target,
1342 const struct user_regset *regset,
1343 unsigned int pos, unsigned int count,
1344 void *kbuf, void __user *ubuf)
1345{
1346 u64 buf[32];
1347 int ret, i;
1348
1349 if (!cpu_has_feature(CPU_FTR_TM))
1350 return -ENODEV;
1351
1352 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1353 return -ENODATA;
1354
1355 /* Flush the state */
1356 flush_fp_to_thread(target);
1357 flush_altivec_to_thread(target);
1358 flush_tmregs_to_thread(target);
1359 flush_vsx_to_thread(target);
1360
1361 for (i = 0; i < 32 ; i++)
1362 buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
1363 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1364 buf, 0, 32 * sizeof(double));
1365
1366 return ret;
1367}
1368
1369/**
1370 * tm_cvsx_set - set CFPR registers
1371 * @target: The target task.
1372 * @regset: The user regset structure.
1373 * @pos: The buffer position.
1374 * @count: Number of bytes to copy.
1375 * @kbuf: Kernel buffer to copy into.
1376 * @ubuf: User buffer to copy from.
1377 *
1378 * This function sets in transaction checkpointed VSX registers.
1379 *
1380 * When the transaction is active 'fp_state' holds the checkpointed
1381 * VSX register values for the current transaction to fall back on
1382 * if it aborts in between. This function sets these checkpointed
1383 * FPR registers. The userspace interface buffer layout is as follows.
1384 *
1385 * struct data {
1386 * u64 vsx[32];
1387 *};
1388 */
1389static int tm_cvsx_set(struct task_struct *target,
1390 const struct user_regset *regset,
1391 unsigned int pos, unsigned int count,
1392 const void *kbuf, const void __user *ubuf)
1393{
1394 u64 buf[32];
1395 int ret, i;
1396
1397 if (!cpu_has_feature(CPU_FTR_TM))
1398 return -ENODEV;
1399
1400 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1401 return -ENODATA;
1402
1403 /* Flush the state */
1404 flush_fp_to_thread(target);
1405 flush_altivec_to_thread(target);
1406 flush_tmregs_to_thread(target);
1407 flush_vsx_to_thread(target);
1408
1409 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1410 buf, 0, 32 * sizeof(double));
1411 for (i = 0; i < 32 ; i++)
1412 target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
1413
1414 return ret;
1415}
1416
1417/**
1418 * tm_spr_active - get active number of registers in TM SPR
1419 * @target: The target task.
1420 * @regset: The user regset structure.
1421 *
1422 * This function checks the active number of available
1423 * regisers in the transactional memory SPR category.
1424 */
1425static int tm_spr_active(struct task_struct *target,
1426 const struct user_regset *regset)
1427{
1428 if (!cpu_has_feature(CPU_FTR_TM))
1429 return -ENODEV;
1430
1431 return regset->n;
1432}
1433
1434/**
1435 * tm_spr_get - get the TM related SPR registers
1436 * @target: The target task.
1437 * @regset: The user regset structure.
1438 * @pos: The buffer position.
1439 * @count: Number of bytes to copy.
1440 * @kbuf: Kernel buffer to copy from.
1441 * @ubuf: User buffer to copy into.
1442 *
1443 * This function gets transactional memory related SPR registers.
1444 * The userspace interface buffer layout is as follows.
1445 *
1446 * struct {
1447 * u64 tm_tfhar;
1448 * u64 tm_texasr;
1449 * u64 tm_tfiar;
1450 * };
1451 */
1452static int tm_spr_get(struct task_struct *target,
1453 const struct user_regset *regset,
1454 unsigned int pos, unsigned int count,
1455 void *kbuf, void __user *ubuf)
1456{
1457 int ret;
1458
1459 /* Build tests */
1460 BUILD_BUG_ON(TSO(tm_tfhar) + sizeof(u64) != TSO(tm_texasr));
1461 BUILD_BUG_ON(TSO(tm_texasr) + sizeof(u64) != TSO(tm_tfiar));
1462 BUILD_BUG_ON(TSO(tm_tfiar) + sizeof(u64) != TSO(ckpt_regs));
1463
1464 if (!cpu_has_feature(CPU_FTR_TM))
1465 return -ENODEV;
1466
1467 /* Flush the states */
1468 flush_fp_to_thread(target);
1469 flush_altivec_to_thread(target);
1470 flush_tmregs_to_thread(target);
1471
1472 /* TFHAR register */
1473 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1474 &target->thread.tm_tfhar, 0, sizeof(u64));
1475
1476 /* TEXASR register */
1477 if (!ret)
1478 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1479 &target->thread.tm_texasr, sizeof(u64),
1480 2 * sizeof(u64));
1481
1482 /* TFIAR register */
1483 if (!ret)
1484 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1485 &target->thread.tm_tfiar,
1486 2 * sizeof(u64), 3 * sizeof(u64));
1487 return ret;
1488}
1489
1490/**
1491 * tm_spr_set - set the TM related SPR registers
1492 * @target: The target task.
1493 * @regset: The user regset structure.
1494 * @pos: The buffer position.
1495 * @count: Number of bytes to copy.
1496 * @kbuf: Kernel buffer to copy into.
1497 * @ubuf: User buffer to copy from.
1498 *
1499 * This function sets transactional memory related SPR registers.
1500 * The userspace interface buffer layout is as follows.
1501 *
1502 * struct {
1503 * u64 tm_tfhar;
1504 * u64 tm_texasr;
1505 * u64 tm_tfiar;
1506 * };
1507 */
1508static int tm_spr_set(struct task_struct *target,
1509 const struct user_regset *regset,
1510 unsigned int pos, unsigned int count,
1511 const void *kbuf, const void __user *ubuf)
1512{
1513 int ret;
1514
1515 /* Build tests */
1516 BUILD_BUG_ON(TSO(tm_tfhar) + sizeof(u64) != TSO(tm_texasr));
1517 BUILD_BUG_ON(TSO(tm_texasr) + sizeof(u64) != TSO(tm_tfiar));
1518 BUILD_BUG_ON(TSO(tm_tfiar) + sizeof(u64) != TSO(ckpt_regs));
1519
1520 if (!cpu_has_feature(CPU_FTR_TM))
1521 return -ENODEV;
1522
1523 /* Flush the states */
1524 flush_fp_to_thread(target);
1525 flush_altivec_to_thread(target);
1526 flush_tmregs_to_thread(target);
1527
1528 /* TFHAR register */
1529 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1530 &target->thread.tm_tfhar, 0, sizeof(u64));
1531
1532 /* TEXASR register */
1533 if (!ret)
1534 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1535 &target->thread.tm_texasr, sizeof(u64),
1536 2 * sizeof(u64));
1537
1538 /* TFIAR register */
1539 if (!ret)
1540 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1541 &target->thread.tm_tfiar,
1542 2 * sizeof(u64), 3 * sizeof(u64));
1543 return ret;
1544}
1545
1546static int tm_tar_active(struct task_struct *target,
1547 const struct user_regset *regset)
1548{
1549 if (!cpu_has_feature(CPU_FTR_TM))
1550 return -ENODEV;
1551
1552 if (MSR_TM_ACTIVE(target->thread.regs->msr))
1553 return regset->n;
1554
1555 return 0;
1556}
1557
1558static int tm_tar_get(struct task_struct *target,
1559 const struct user_regset *regset,
1560 unsigned int pos, unsigned int count,
1561 void *kbuf, void __user *ubuf)
1562{
1563 int ret;
1564
1565 if (!cpu_has_feature(CPU_FTR_TM))
1566 return -ENODEV;
1567
1568 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1569 return -ENODATA;
1570
1571 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1572 &target->thread.tm_tar, 0, sizeof(u64));
1573 return ret;
1574}
1575
1576static int tm_tar_set(struct task_struct *target,
1577 const struct user_regset *regset,
1578 unsigned int pos, unsigned int count,
1579 const void *kbuf, const void __user *ubuf)
1580{
1581 int ret;
1582
1583 if (!cpu_has_feature(CPU_FTR_TM))
1584 return -ENODEV;
1585
1586 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1587 return -ENODATA;
1588
1589 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1590 &target->thread.tm_tar, 0, sizeof(u64));
1591 return ret;
1592}
1593
1594static int tm_ppr_active(struct task_struct *target,
1595 const struct user_regset *regset)
1596{
1597 if (!cpu_has_feature(CPU_FTR_TM))
1598 return -ENODEV;
1599
1600 if (MSR_TM_ACTIVE(target->thread.regs->msr))
1601 return regset->n;
1602
1603 return 0;
1604}
1605
1606
1607static int tm_ppr_get(struct task_struct *target,
1608 const struct user_regset *regset,
1609 unsigned int pos, unsigned int count,
1610 void *kbuf, void __user *ubuf)
1611{
1612 int ret;
1613
1614 if (!cpu_has_feature(CPU_FTR_TM))
1615 return -ENODEV;
1616
1617 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1618 return -ENODATA;
1619
1620 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1621 &target->thread.tm_ppr, 0, sizeof(u64));
1622 return ret;
1623}
1624
1625static int tm_ppr_set(struct task_struct *target,
1626 const struct user_regset *regset,
1627 unsigned int pos, unsigned int count,
1628 const void *kbuf, const void __user *ubuf)
1629{
1630 int ret;
1631
1632 if (!cpu_has_feature(CPU_FTR_TM))
1633 return -ENODEV;
1634
1635 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1636 return -ENODATA;
1637
1638 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1639 &target->thread.tm_ppr, 0, sizeof(u64));
1640 return ret;
1641}
1642
1643static int tm_dscr_active(struct task_struct *target,
1644 const struct user_regset *regset)
1645{
1646 if (!cpu_has_feature(CPU_FTR_TM))
1647 return -ENODEV;
1648
1649 if (MSR_TM_ACTIVE(target->thread.regs->msr))
1650 return regset->n;
1651
1652 return 0;
1653}
1654
1655static int tm_dscr_get(struct task_struct *target,
1656 const struct user_regset *regset,
1657 unsigned int pos, unsigned int count,
1658 void *kbuf, void __user *ubuf)
1659{
1660 int ret;
1661
1662 if (!cpu_has_feature(CPU_FTR_TM))
1663 return -ENODEV;
1664
1665 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1666 return -ENODATA;
1667
1668 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1669 &target->thread.tm_dscr, 0, sizeof(u64));
1670 return ret;
1671}
1672
1673static int tm_dscr_set(struct task_struct *target,
1674 const struct user_regset *regset,
1675 unsigned int pos, unsigned int count,
1676 const void *kbuf, const void __user *ubuf)
1677{
1678 int ret;
1679
1680 if (!cpu_has_feature(CPU_FTR_TM))
1681 return -ENODEV;
1682
1683 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1684 return -ENODATA;
1685
1686 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1687 &target->thread.tm_dscr, 0, sizeof(u64));
1688 return ret;
1689}
1690#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1691
1692#ifdef CONFIG_PPC64
1693static int ppr_get(struct task_struct *target,
1694 const struct user_regset *regset,
1695 unsigned int pos, unsigned int count,
1696 void *kbuf, void __user *ubuf)
1697{
1698 int ret;
1699
1700 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1701 &target->thread.ppr, 0, sizeof(u64));
1702 return ret;
1703}
1704
1705static int ppr_set(struct task_struct *target,
1706 const struct user_regset *regset,
1707 unsigned int pos, unsigned int count,
1708 const void *kbuf, const void __user *ubuf)
1709{
1710 int ret;
1711
1712 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1713 &target->thread.ppr, 0, sizeof(u64));
1714 return ret;
1715}
1716
1717static int dscr_get(struct task_struct *target,
1718 const struct user_regset *regset,
1719 unsigned int pos, unsigned int count,
1720 void *kbuf, void __user *ubuf)
1721{
1722 int ret;
1723
1724 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1725 &target->thread.dscr, 0, sizeof(u64));
1726 return ret;
1727}
1728static int dscr_set(struct task_struct *target,
1729 const struct user_regset *regset,
1730 unsigned int pos, unsigned int count,
1731 const void *kbuf, const void __user *ubuf)
1732{
1733 int ret;
1734
1735 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1736 &target->thread.dscr, 0, sizeof(u64));
1737 return ret;
1738}
1739#endif
1740#ifdef CONFIG_PPC_BOOK3S_64
1741static int tar_get(struct task_struct *target,
1742 const struct user_regset *regset,
1743 unsigned int pos, unsigned int count,
1744 void *kbuf, void __user *ubuf)
1745{
1746 int ret;
1747
1748 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1749 &target->thread.tar, 0, sizeof(u64));
1750 return ret;
1751}
1752static int tar_set(struct task_struct *target,
1753 const struct user_regset *regset,
1754 unsigned int pos, unsigned int count,
1755 const void *kbuf, const void __user *ubuf)
1756{
1757 int ret;
1758
1759 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1760 &target->thread.tar, 0, sizeof(u64));
1761 return ret;
1762}
1763
1764static int ebb_active(struct task_struct *target,
1765 const struct user_regset *regset)
1766{
1767 if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1768 return -ENODEV;
1769
1770 if (target->thread.used_ebb)
1771 return regset->n;
1772
1773 return 0;
1774}
1775
1776static int ebb_get(struct task_struct *target,
1777 const struct user_regset *regset,
1778 unsigned int pos, unsigned int count,
1779 void *kbuf, void __user *ubuf)
1780{
1781 /* Build tests */
1782 BUILD_BUG_ON(TSO(ebbrr) + sizeof(unsigned long) != TSO(ebbhr));
1783 BUILD_BUG_ON(TSO(ebbhr) + sizeof(unsigned long) != TSO(bescr));
1784
1785 if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1786 return -ENODEV;
1787
1788 if (!target->thread.used_ebb)
1789 return -ENODATA;
1790
1791 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1792 &target->thread.ebbrr, 0, 3 * sizeof(unsigned long));
1793}
1794
1795static int ebb_set(struct task_struct *target,
1796 const struct user_regset *regset,
1797 unsigned int pos, unsigned int count,
1798 const void *kbuf, const void __user *ubuf)
1799{
1800 int ret = 0;
1801
1802 /* Build tests */
1803 BUILD_BUG_ON(TSO(ebbrr) + sizeof(unsigned long) != TSO(ebbhr));
1804 BUILD_BUG_ON(TSO(ebbhr) + sizeof(unsigned long) != TSO(bescr));
1805
1806 if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1807 return -ENODEV;
1808
1809 if (target->thread.used_ebb)
1810 return -ENODATA;
1811
1812 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1813 &target->thread.ebbrr, 0, sizeof(unsigned long));
1814
1815 if (!ret)
1816 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1817 &target->thread.ebbhr, sizeof(unsigned long),
1818 2 * sizeof(unsigned long));
1819
1820 if (!ret)
1821 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1822 &target->thread.bescr,
1823 2 * sizeof(unsigned long), 3 * sizeof(unsigned long));
1824
1825 return ret;
1826}
1827static int pmu_active(struct task_struct *target,
1828 const struct user_regset *regset)
1829{
1830 if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1831 return -ENODEV;
1832
1833 return regset->n;
1834}
1835
1836static int pmu_get(struct task_struct *target,
1837 const struct user_regset *regset,
1838 unsigned int pos, unsigned int count,
1839 void *kbuf, void __user *ubuf)
1840{
1841 /* Build tests */
1842 BUILD_BUG_ON(TSO(siar) + sizeof(unsigned long) != TSO(sdar));
1843 BUILD_BUG_ON(TSO(sdar) + sizeof(unsigned long) != TSO(sier));
1844 BUILD_BUG_ON(TSO(sier) + sizeof(unsigned long) != TSO(mmcr2));
1845 BUILD_BUG_ON(TSO(mmcr2) + sizeof(unsigned long) != TSO(mmcr0));
1846
1847 if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1848 return -ENODEV;
1849
1850 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1851 &target->thread.siar, 0,
1852 5 * sizeof(unsigned long));
1853}
1854
1855static int pmu_set(struct task_struct *target,
1856 const struct user_regset *regset,
1857 unsigned int pos, unsigned int count,
1858 const void *kbuf, const void __user *ubuf)
1859{
1860 int ret = 0;
1861
1862 /* Build tests */
1863 BUILD_BUG_ON(TSO(siar) + sizeof(unsigned long) != TSO(sdar));
1864 BUILD_BUG_ON(TSO(sdar) + sizeof(unsigned long) != TSO(sier));
1865 BUILD_BUG_ON(TSO(sier) + sizeof(unsigned long) != TSO(mmcr2));
1866 BUILD_BUG_ON(TSO(mmcr2) + sizeof(unsigned long) != TSO(mmcr0));
1867
1868 if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1869 return -ENODEV;
1870
1871 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1872 &target->thread.siar, 0,
1873 sizeof(unsigned long));
1874
1875 if (!ret)
1876 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1877 &target->thread.sdar, sizeof(unsigned long),
1878 2 * sizeof(unsigned long));
1879
1880 if (!ret)
1881 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1882 &target->thread.sier, 2 * sizeof(unsigned long),
1883 3 * sizeof(unsigned long));
1884
1885 if (!ret)
1886 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1887 &target->thread.mmcr2, 3 * sizeof(unsigned long),
1888 4 * sizeof(unsigned long));
1889
1890 if (!ret)
1891 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1892 &target->thread.mmcr0, 4 * sizeof(unsigned long),
1893 5 * sizeof(unsigned long));
1894 return ret;
1895}
1896#endif
1897/*
619 * These are our native regset flavors. 1898 * These are our native regset flavors.
620 */ 1899 */
621enum powerpc_regset { 1900enum powerpc_regset {
@@ -630,6 +1909,25 @@ enum powerpc_regset {
630#ifdef CONFIG_SPE 1909#ifdef CONFIG_SPE
631 REGSET_SPE, 1910 REGSET_SPE,
632#endif 1911#endif
1912#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1913 REGSET_TM_CGPR, /* TM checkpointed GPR registers */
1914 REGSET_TM_CFPR, /* TM checkpointed FPR registers */
1915 REGSET_TM_CVMX, /* TM checkpointed VMX registers */
1916 REGSET_TM_CVSX, /* TM checkpointed VSX registers */
1917 REGSET_TM_SPR, /* TM specific SPR registers */
1918 REGSET_TM_CTAR, /* TM checkpointed TAR register */
1919 REGSET_TM_CPPR, /* TM checkpointed PPR register */
1920 REGSET_TM_CDSCR, /* TM checkpointed DSCR register */
1921#endif
1922#ifdef CONFIG_PPC64
1923 REGSET_PPR, /* PPR register */
1924 REGSET_DSCR, /* DSCR register */
1925#endif
1926#ifdef CONFIG_PPC_BOOK3S_64
1927 REGSET_TAR, /* TAR register */
1928 REGSET_EBB, /* EBB registers */
1929 REGSET_PMR, /* Performance Monitor Registers */
1930#endif
633}; 1931};
634 1932
635static const struct user_regset native_regsets[] = { 1933static const struct user_regset native_regsets[] = {
@@ -664,6 +1962,77 @@ static const struct user_regset native_regsets[] = {
664 .active = evr_active, .get = evr_get, .set = evr_set 1962 .active = evr_active, .get = evr_get, .set = evr_set
665 }, 1963 },
666#endif 1964#endif
1965#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1966 [REGSET_TM_CGPR] = {
1967 .core_note_type = NT_PPC_TM_CGPR, .n = ELF_NGREG,
1968 .size = sizeof(long), .align = sizeof(long),
1969 .active = tm_cgpr_active, .get = tm_cgpr_get, .set = tm_cgpr_set
1970 },
1971 [REGSET_TM_CFPR] = {
1972 .core_note_type = NT_PPC_TM_CFPR, .n = ELF_NFPREG,
1973 .size = sizeof(double), .align = sizeof(double),
1974 .active = tm_cfpr_active, .get = tm_cfpr_get, .set = tm_cfpr_set
1975 },
1976 [REGSET_TM_CVMX] = {
1977 .core_note_type = NT_PPC_TM_CVMX, .n = ELF_NVMX,
1978 .size = sizeof(vector128), .align = sizeof(vector128),
1979 .active = tm_cvmx_active, .get = tm_cvmx_get, .set = tm_cvmx_set
1980 },
1981 [REGSET_TM_CVSX] = {
1982 .core_note_type = NT_PPC_TM_CVSX, .n = ELF_NVSX,
1983 .size = sizeof(double), .align = sizeof(double),
1984 .active = tm_cvsx_active, .get = tm_cvsx_get, .set = tm_cvsx_set
1985 },
1986 [REGSET_TM_SPR] = {
1987 .core_note_type = NT_PPC_TM_SPR, .n = ELF_NTMSPRREG,
1988 .size = sizeof(u64), .align = sizeof(u64),
1989 .active = tm_spr_active, .get = tm_spr_get, .set = tm_spr_set
1990 },
1991 [REGSET_TM_CTAR] = {
1992 .core_note_type = NT_PPC_TM_CTAR, .n = 1,
1993 .size = sizeof(u64), .align = sizeof(u64),
1994 .active = tm_tar_active, .get = tm_tar_get, .set = tm_tar_set
1995 },
1996 [REGSET_TM_CPPR] = {
1997 .core_note_type = NT_PPC_TM_CPPR, .n = 1,
1998 .size = sizeof(u64), .align = sizeof(u64),
1999 .active = tm_ppr_active, .get = tm_ppr_get, .set = tm_ppr_set
2000 },
2001 [REGSET_TM_CDSCR] = {
2002 .core_note_type = NT_PPC_TM_CDSCR, .n = 1,
2003 .size = sizeof(u64), .align = sizeof(u64),
2004 .active = tm_dscr_active, .get = tm_dscr_get, .set = tm_dscr_set
2005 },
2006#endif
2007#ifdef CONFIG_PPC64
2008 [REGSET_PPR] = {
2009 .core_note_type = NT_PPC_PPR, .n = 1,
2010 .size = sizeof(u64), .align = sizeof(u64),
2011 .get = ppr_get, .set = ppr_set
2012 },
2013 [REGSET_DSCR] = {
2014 .core_note_type = NT_PPC_DSCR, .n = 1,
2015 .size = sizeof(u64), .align = sizeof(u64),
2016 .get = dscr_get, .set = dscr_set
2017 },
2018#endif
2019#ifdef CONFIG_PPC_BOOK3S_64
2020 [REGSET_TAR] = {
2021 .core_note_type = NT_PPC_TAR, .n = 1,
2022 .size = sizeof(u64), .align = sizeof(u64),
2023 .get = tar_get, .set = tar_set
2024 },
2025 [REGSET_EBB] = {
2026 .core_note_type = NT_PPC_EBB, .n = ELF_NEBB,
2027 .size = sizeof(u64), .align = sizeof(u64),
2028 .active = ebb_active, .get = ebb_get, .set = ebb_set
2029 },
2030 [REGSET_PMR] = {
2031 .core_note_type = NT_PPC_PMU, .n = ELF_NPMU,
2032 .size = sizeof(u64), .align = sizeof(u64),
2033 .active = pmu_active, .get = pmu_get, .set = pmu_set
2034 },
2035#endif
667}; 2036};
668 2037
669static const struct user_regset_view user_ppc_native_view = { 2038static const struct user_regset_view user_ppc_native_view = {
@@ -674,24 +2043,35 @@ static const struct user_regset_view user_ppc_native_view = {
674#ifdef CONFIG_PPC64 2043#ifdef CONFIG_PPC64
675#include <linux/compat.h> 2044#include <linux/compat.h>
676 2045
677static int gpr32_get(struct task_struct *target, 2046static int gpr32_get_common(struct task_struct *target,
678 const struct user_regset *regset, 2047 const struct user_regset *regset,
679 unsigned int pos, unsigned int count, 2048 unsigned int pos, unsigned int count,
680 void *kbuf, void __user *ubuf) 2049 void *kbuf, void __user *ubuf, bool tm_active)
681{ 2050{
682 const unsigned long *regs = &target->thread.regs->gpr[0]; 2051 const unsigned long *regs = &target->thread.regs->gpr[0];
2052 const unsigned long *ckpt_regs;
683 compat_ulong_t *k = kbuf; 2053 compat_ulong_t *k = kbuf;
684 compat_ulong_t __user *u = ubuf; 2054 compat_ulong_t __user *u = ubuf;
685 compat_ulong_t reg; 2055 compat_ulong_t reg;
686 int i; 2056 int i;
687 2057
688 if (target->thread.regs == NULL) 2058#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
689 return -EIO; 2059 ckpt_regs = &target->thread.ckpt_regs.gpr[0];
2060#endif
2061 if (tm_active) {
2062 regs = ckpt_regs;
2063 } else {
2064 if (target->thread.regs == NULL)
2065 return -EIO;
690 2066
691 if (!FULL_REGS(target->thread.regs)) { 2067 if (!FULL_REGS(target->thread.regs)) {
692 /* We have a partial register set. Fill 14-31 with bogus values */ 2068 /*
693 for (i = 14; i < 32; i++) 2069 * We have a partial register set.
694 target->thread.regs->gpr[i] = NV_REG_POISON; 2070 * Fill 14-31 with bogus values.
2071 */
2072 for (i = 14; i < 32; i++)
2073 target->thread.regs->gpr[i] = NV_REG_POISON;
2074 }
695 } 2075 }
696 2076
697 pos /= sizeof(reg); 2077 pos /= sizeof(reg);
@@ -731,20 +2111,31 @@ static int gpr32_get(struct task_struct *target,
731 PT_REGS_COUNT * sizeof(reg), -1); 2111 PT_REGS_COUNT * sizeof(reg), -1);
732} 2112}
733 2113
734static int gpr32_set(struct task_struct *target, 2114static int gpr32_set_common(struct task_struct *target,
735 const struct user_regset *regset, 2115 const struct user_regset *regset,
736 unsigned int pos, unsigned int count, 2116 unsigned int pos, unsigned int count,
737 const void *kbuf, const void __user *ubuf) 2117 const void *kbuf, const void __user *ubuf, bool tm_active)
738{ 2118{
739 unsigned long *regs = &target->thread.regs->gpr[0]; 2119 unsigned long *regs = &target->thread.regs->gpr[0];
2120 unsigned long *ckpt_regs;
740 const compat_ulong_t *k = kbuf; 2121 const compat_ulong_t *k = kbuf;
741 const compat_ulong_t __user *u = ubuf; 2122 const compat_ulong_t __user *u = ubuf;
742 compat_ulong_t reg; 2123 compat_ulong_t reg;
743 2124
744 if (target->thread.regs == NULL) 2125#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
745 return -EIO; 2126 ckpt_regs = &target->thread.ckpt_regs.gpr[0];
2127#endif
746 2128
747 CHECK_FULL_REGS(target->thread.regs); 2129 if (tm_active) {
2130 regs = ckpt_regs;
2131 } else {
2132 regs = &target->thread.regs->gpr[0];
2133
2134 if (target->thread.regs == NULL)
2135 return -EIO;
2136
2137 CHECK_FULL_REGS(target->thread.regs);
2138 }
748 2139
749 pos /= sizeof(reg); 2140 pos /= sizeof(reg);
750 count /= sizeof(reg); 2141 count /= sizeof(reg);
@@ -804,6 +2195,40 @@ static int gpr32_set(struct task_struct *target,
804 (PT_TRAP + 1) * sizeof(reg), -1); 2195 (PT_TRAP + 1) * sizeof(reg), -1);
805} 2196}
806 2197
2198#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2199static int tm_cgpr32_get(struct task_struct *target,
2200 const struct user_regset *regset,
2201 unsigned int pos, unsigned int count,
2202 void *kbuf, void __user *ubuf)
2203{
2204 return gpr32_get_common(target, regset, pos, count, kbuf, ubuf, 1);
2205}
2206
2207static int tm_cgpr32_set(struct task_struct *target,
2208 const struct user_regset *regset,
2209 unsigned int pos, unsigned int count,
2210 const void *kbuf, const void __user *ubuf)
2211{
2212 return gpr32_set_common(target, regset, pos, count, kbuf, ubuf, 1);
2213}
2214#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
2215
2216static int gpr32_get(struct task_struct *target,
2217 const struct user_regset *regset,
2218 unsigned int pos, unsigned int count,
2219 void *kbuf, void __user *ubuf)
2220{
2221 return gpr32_get_common(target, regset, pos, count, kbuf, ubuf, 0);
2222}
2223
2224static int gpr32_set(struct task_struct *target,
2225 const struct user_regset *regset,
2226 unsigned int pos, unsigned int count,
2227 const void *kbuf, const void __user *ubuf)
2228{
2229 return gpr32_set_common(target, regset, pos, count, kbuf, ubuf, 0);
2230}
2231
807/* 2232/*
808 * These are the regset flavors matching the CONFIG_PPC32 native set. 2233 * These are the regset flavors matching the CONFIG_PPC32 native set.
809 */ 2234 */
@@ -832,6 +2257,73 @@ static const struct user_regset compat_regsets[] = {
832 .active = evr_active, .get = evr_get, .set = evr_set 2257 .active = evr_active, .get = evr_get, .set = evr_set
833 }, 2258 },
834#endif 2259#endif
2260#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2261 [REGSET_TM_CGPR] = {
2262 .core_note_type = NT_PPC_TM_CGPR, .n = ELF_NGREG,
2263 .size = sizeof(long), .align = sizeof(long),
2264 .active = tm_cgpr_active,
2265 .get = tm_cgpr32_get, .set = tm_cgpr32_set
2266 },
2267 [REGSET_TM_CFPR] = {
2268 .core_note_type = NT_PPC_TM_CFPR, .n = ELF_NFPREG,
2269 .size = sizeof(double), .align = sizeof(double),
2270 .active = tm_cfpr_active, .get = tm_cfpr_get, .set = tm_cfpr_set
2271 },
2272 [REGSET_TM_CVMX] = {
2273 .core_note_type = NT_PPC_TM_CVMX, .n = ELF_NVMX,
2274 .size = sizeof(vector128), .align = sizeof(vector128),
2275 .active = tm_cvmx_active, .get = tm_cvmx_get, .set = tm_cvmx_set
2276 },
2277 [REGSET_TM_CVSX] = {
2278 .core_note_type = NT_PPC_TM_CVSX, .n = ELF_NVSX,
2279 .size = sizeof(double), .align = sizeof(double),
2280 .active = tm_cvsx_active, .get = tm_cvsx_get, .set = tm_cvsx_set
2281 },
2282 [REGSET_TM_SPR] = {
2283 .core_note_type = NT_PPC_TM_SPR, .n = ELF_NTMSPRREG,
2284 .size = sizeof(u64), .align = sizeof(u64),
2285 .active = tm_spr_active, .get = tm_spr_get, .set = tm_spr_set
2286 },
2287 [REGSET_TM_CTAR] = {
2288 .core_note_type = NT_PPC_TM_CTAR, .n = 1,
2289 .size = sizeof(u64), .align = sizeof(u64),
2290 .active = tm_tar_active, .get = tm_tar_get, .set = tm_tar_set
2291 },
2292 [REGSET_TM_CPPR] = {
2293 .core_note_type = NT_PPC_TM_CPPR, .n = 1,
2294 .size = sizeof(u64), .align = sizeof(u64),
2295 .active = tm_ppr_active, .get = tm_ppr_get, .set = tm_ppr_set
2296 },
2297 [REGSET_TM_CDSCR] = {
2298 .core_note_type = NT_PPC_TM_CDSCR, .n = 1,
2299 .size = sizeof(u64), .align = sizeof(u64),
2300 .active = tm_dscr_active, .get = tm_dscr_get, .set = tm_dscr_set
2301 },
2302#endif
2303#ifdef CONFIG_PPC64
2304 [REGSET_PPR] = {
2305 .core_note_type = NT_PPC_PPR, .n = 1,
2306 .size = sizeof(u64), .align = sizeof(u64),
2307 .get = ppr_get, .set = ppr_set
2308 },
2309 [REGSET_DSCR] = {
2310 .core_note_type = NT_PPC_DSCR, .n = 1,
2311 .size = sizeof(u64), .align = sizeof(u64),
2312 .get = dscr_get, .set = dscr_set
2313 },
2314#endif
2315#ifdef CONFIG_PPC_BOOK3S_64
2316 [REGSET_TAR] = {
2317 .core_note_type = NT_PPC_TAR, .n = 1,
2318 .size = sizeof(u64), .align = sizeof(u64),
2319 .get = tar_get, .set = tar_set
2320 },
2321 [REGSET_EBB] = {
2322 .core_note_type = NT_PPC_EBB, .n = ELF_NEBB,
2323 .size = sizeof(u64), .align = sizeof(u64),
2324 .active = ebb_active, .get = ebb_get, .set = ebb_set
2325 },
2326#endif
835}; 2327};
836 2328
837static const struct user_regset_view user_ppc_compat_view = { 2329static const struct user_regset_view user_ppc_compat_view = {
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 714b4ba7ab86..dba265c586df 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -66,6 +66,7 @@
66#include <asm/hugetlb.h> 66#include <asm/hugetlb.h>
67#include <asm/livepatch.h> 67#include <asm/livepatch.h>
68#include <asm/mmu_context.h> 68#include <asm/mmu_context.h>
69#include <asm/cpu_has_feature.h>
69 70
70#include "setup.h" 71#include "setup.h"
71 72
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 00f57754407e..c3e861df4b20 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -37,6 +37,7 @@
37#include <asm/serial.h> 37#include <asm/serial.h>
38#include <asm/udbg.h> 38#include <asm/udbg.h>
39#include <asm/code-patching.h> 39#include <asm/code-patching.h>
40#include <asm/cpu_has_feature.h>
40 41
41#define DBG(fmt...) 42#define DBG(fmt...)
42 43
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index d8216aed22b7..eafb9a79e011 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -227,8 +227,8 @@ static void __init configure_exceptions(void)
227 opal_configure_cores(); 227 opal_configure_cores();
228 228
229 /* Enable AIL if supported, and we are in hypervisor mode */ 229 /* Enable AIL if supported, and we are in hypervisor mode */
230 if (cpu_has_feature(CPU_FTR_HVMODE) && 230 if (early_cpu_has_feature(CPU_FTR_HVMODE) &&
231 cpu_has_feature(CPU_FTR_ARCH_207S)) { 231 early_cpu_has_feature(CPU_FTR_ARCH_207S)) {
232 unsigned long lpcr = mfspr(SPRN_LPCR); 232 unsigned long lpcr = mfspr(SPRN_LPCR);
233 mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3); 233 mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
234 } 234 }
@@ -298,12 +298,12 @@ void __init early_setup(unsigned long dt_ptr)
298 */ 298 */
299 configure_exceptions(); 299 configure_exceptions();
300 300
301 /* Initialize the hash table or TLB handling */
302 early_init_mmu();
303
304 /* Apply all the dynamic patching */ 301 /* Apply all the dynamic patching */
305 apply_feature_fixups(); 302 apply_feature_fixups();
306 303
304 /* Initialize the hash table or TLB handling */
305 early_init_mmu();
306
307 /* 307 /*
308 * At this point, we can let interrupts switch to virtual mode 308 * At this point, we can let interrupts switch to virtual mode
309 * (the MMU has been setup), so adjust the MSR in the PACA to 309 * (the MMU has been setup), so adjust the MSR in the PACA to
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 5a1f015ea9f3..25a39052bf6b 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -55,6 +55,7 @@
55#include <asm/debug.h> 55#include <asm/debug.h>
56#include <asm/kexec.h> 56#include <asm/kexec.h>
57#include <asm/asm-prototypes.h> 57#include <asm/asm-prototypes.h>
58#include <asm/cpu_has_feature.h>
58 59
59#ifdef DEBUG 60#ifdef DEBUG
60#include <asm/udbg.h> 61#include <asm/udbg.h>
diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
index defb2998b818..74145f02ad41 100644
--- a/arch/powerpc/lib/feature-fixups.c
+++ b/arch/powerpc/lib/feature-fixups.c
@@ -13,6 +13,7 @@
13 */ 13 */
14 14
15#include <linux/types.h> 15#include <linux/types.h>
16#include <linux/jump_label.h>
16#include <linux/kernel.h> 17#include <linux/kernel.h>
17#include <linux/string.h> 18#include <linux/string.h>
18#include <linux/init.h> 19#include <linux/init.h>
@@ -152,9 +153,18 @@ static void do_final_fixups(void)
152#endif 153#endif
153} 154}
154 155
155void apply_feature_fixups(void) 156static unsigned long __initdata saved_cpu_features;
157static unsigned int __initdata saved_mmu_features;
158#ifdef CONFIG_PPC64
159static unsigned long __initdata saved_firmware_features;
160#endif
161
162void __init apply_feature_fixups(void)
156{ 163{
157 struct cpu_spec *spec = *PTRRELOC(&cur_cpu_spec); 164 struct cpu_spec *spec = PTRRELOC(*PTRRELOC(&cur_cpu_spec));
165
166 *PTRRELOC(&saved_cpu_features) = spec->cpu_features;
167 *PTRRELOC(&saved_mmu_features) = spec->mmu_features;
158 168
159 /* 169 /*
160 * Apply the CPU-specific and firmware specific fixups to kernel text 170 * Apply the CPU-specific and firmware specific fixups to kernel text
@@ -173,11 +183,36 @@ void apply_feature_fixups(void)
173 PTRRELOC(&__stop___lwsync_fixup)); 183 PTRRELOC(&__stop___lwsync_fixup));
174 184
175#ifdef CONFIG_PPC64 185#ifdef CONFIG_PPC64
186 saved_firmware_features = powerpc_firmware_features;
176 do_feature_fixups(powerpc_firmware_features, 187 do_feature_fixups(powerpc_firmware_features,
177 &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup); 188 &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup);
178#endif 189#endif
179 do_final_fixups(); 190 do_final_fixups();
191
192 /*
193 * Initialise jump label. This causes all the cpu/mmu_has_feature()
194 * checks to take on their correct polarity based on the current set of
195 * CPU/MMU features.
196 */
197 jump_label_init();
198 cpu_feature_keys_init();
199 mmu_feature_keys_init();
200}
201
202static int __init check_features(void)
203{
204 WARN(saved_cpu_features != cur_cpu_spec->cpu_features,
205 "CPU features changed after feature patching!\n");
206 WARN(saved_mmu_features != cur_cpu_spec->mmu_features,
207 "MMU features changed after feature patching!\n");
208#ifdef CONFIG_PPC64
209 WARN(saved_firmware_features != powerpc_firmware_features,
210 "Firmware features changed after feature patching!\n");
211#endif
212
213 return 0;
180} 214}
215late_initcall(check_features);
181 216
182#ifdef CONFIG_FTR_FIXUP_SELFTEST 217#ifdef CONFIG_FTR_FIXUP_SELFTEST
183 218
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index 88ce7d212320..0e4e9654bd2c 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -72,8 +72,7 @@ static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
72 /* clear out bits after (52) [0....52.....63] */ 72 /* clear out bits after (52) [0....52.....63] */
73 va &= ~((1ul << (64 - 52)) - 1); 73 va &= ~((1ul << (64 - 52)) - 1);
74 va |= ssize << 8; 74 va |= ssize << 8;
75 sllp = ((mmu_psize_defs[apsize].sllp & SLB_VSID_L) >> 6) | 75 sllp = get_sllp_encoding(apsize);
76 ((mmu_psize_defs[apsize].sllp & SLB_VSID_LP) >> 4);
77 va |= sllp << 5; 76 va |= sllp << 5;
78 asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2) 77 asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2)
79 : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206) 78 : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
@@ -122,8 +121,7 @@ static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
122 /* clear out bits after(52) [0....52.....63] */ 121 /* clear out bits after(52) [0....52.....63] */
123 va &= ~((1ul << (64 - 52)) - 1); 122 va &= ~((1ul << (64 - 52)) - 1);
124 va |= ssize << 8; 123 va |= ssize << 8;
125 sllp = ((mmu_psize_defs[apsize].sllp & SLB_VSID_L) >> 6) | 124 sllp = get_sllp_encoding(apsize);
126 ((mmu_psize_defs[apsize].sllp & SLB_VSID_LP) >> 4);
127 va |= sllp << 5; 125 va |= sllp << 5;
128 asm volatile(".long 0x7c000224 | (%0 << 11) | (0 << 21)" 126 asm volatile(".long 0x7c000224 | (%0 << 11) | (0 << 21)"
129 : : "r"(va) : "memory"); 127 : : "r"(va) : "memory");
@@ -749,5 +747,5 @@ void __init hpte_init_native(void)
749 mmu_hash_ops.hugepage_invalidate = native_hugepage_invalidate; 747 mmu_hash_ops.hugepage_invalidate = native_hugepage_invalidate;
750 748
751 if (cpu_has_feature(CPU_FTR_ARCH_300)) 749 if (cpu_has_feature(CPU_FTR_ARCH_300))
752 ppc_md.register_process_table = native_register_proc_table; 750 register_process_table = native_register_proc_table;
753} 751}
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index b78b5d211278..0821556e16f4 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -363,11 +363,6 @@ static int __init htab_dt_scan_seg_sizes(unsigned long node,
363 return 0; 363 return 0;
364} 364}
365 365
366static void __init htab_init_seg_sizes(void)
367{
368 of_scan_flat_dt(htab_dt_scan_seg_sizes, NULL);
369}
370
371static int __init get_idx_from_shift(unsigned int shift) 366static int __init get_idx_from_shift(unsigned int shift)
372{ 367{
373 int idx = -1; 368 int idx = -1;
@@ -539,7 +534,7 @@ static bool might_have_hea(void)
539 534
540#endif /* #ifdef CONFIG_PPC_64K_PAGES */ 535#endif /* #ifdef CONFIG_PPC_64K_PAGES */
541 536
542static void __init htab_init_page_sizes(void) 537static void __init htab_scan_page_sizes(void)
543{ 538{
544 int rc; 539 int rc;
545 540
@@ -554,17 +549,23 @@ static void __init htab_init_page_sizes(void)
554 * Try to find the available page sizes in the device-tree 549 * Try to find the available page sizes in the device-tree
555 */ 550 */
556 rc = of_scan_flat_dt(htab_dt_scan_page_sizes, NULL); 551 rc = of_scan_flat_dt(htab_dt_scan_page_sizes, NULL);
557 if (rc != 0) /* Found */ 552 if (rc == 0 && early_mmu_has_feature(MMU_FTR_16M_PAGE)) {
558 goto found; 553 /*
559 554 * Nothing in the device-tree, but the CPU supports 16M pages,
560 /* 555 * so let's fallback on a known size list for 16M capable CPUs.
561 * Not in the device-tree, let's fallback on known size 556 */
562 * list for 16M capable GP & GR
563 */
564 if (mmu_has_feature(MMU_FTR_16M_PAGE))
565 memcpy(mmu_psize_defs, mmu_psize_defaults_gp, 557 memcpy(mmu_psize_defs, mmu_psize_defaults_gp,
566 sizeof(mmu_psize_defaults_gp)); 558 sizeof(mmu_psize_defaults_gp));
567found: 559 }
560
561#ifdef CONFIG_HUGETLB_PAGE
562 /* Reserve 16G huge page memory sections for huge pages */
563 of_scan_flat_dt(htab_dt_scan_hugepage_blocks, NULL);
564#endif /* CONFIG_HUGETLB_PAGE */
565}
566
567static void __init htab_init_page_sizes(void)
568{
568 if (!debug_pagealloc_enabled()) { 569 if (!debug_pagealloc_enabled()) {
569 /* 570 /*
570 * Pick a size for the linear mapping. Currently, we only 571 * Pick a size for the linear mapping. Currently, we only
@@ -630,11 +631,6 @@ found:
630 ,mmu_psize_defs[mmu_vmemmap_psize].shift 631 ,mmu_psize_defs[mmu_vmemmap_psize].shift
631#endif 632#endif
632 ); 633 );
633
634#ifdef CONFIG_HUGETLB_PAGE
635 /* Reserve 16G huge page memory sections for huge pages */
636 of_scan_flat_dt(htab_dt_scan_hugepage_blocks, NULL);
637#endif /* CONFIG_HUGETLB_PAGE */
638} 634}
639 635
640static int __init htab_dt_scan_pftsize(unsigned long node, 636static int __init htab_dt_scan_pftsize(unsigned long node,
@@ -759,12 +755,6 @@ static void __init htab_initialize(void)
759 755
760 DBG(" -> htab_initialize()\n"); 756 DBG(" -> htab_initialize()\n");
761 757
762 /* Initialize segment sizes */
763 htab_init_seg_sizes();
764
765 /* Initialize page sizes */
766 htab_init_page_sizes();
767
768 if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) { 758 if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) {
769 mmu_kernel_ssize = MMU_SEGSIZE_1T; 759 mmu_kernel_ssize = MMU_SEGSIZE_1T;
770 mmu_highuser_ssize = MMU_SEGSIZE_1T; 760 mmu_highuser_ssize = MMU_SEGSIZE_1T;
@@ -885,8 +875,19 @@ static void __init htab_initialize(void)
885#undef KB 875#undef KB
886#undef MB 876#undef MB
887 877
878void __init hash__early_init_devtree(void)
879{
880 /* Initialize segment sizes */
881 of_scan_flat_dt(htab_dt_scan_seg_sizes, NULL);
882
883 /* Initialize page sizes */
884 htab_scan_page_sizes();
885}
886
888void __init hash__early_init_mmu(void) 887void __init hash__early_init_mmu(void)
889{ 888{
889 htab_init_page_sizes();
890
890 /* 891 /*
891 * initialize page table size 892 * initialize page table size
892 */ 893 */
diff --git a/arch/powerpc/mm/hugetlbpage-radix.c b/arch/powerpc/mm/hugetlbpage-radix.c
index 1e11559e1aac..35254a678456 100644
--- a/arch/powerpc/mm/hugetlbpage-radix.c
+++ b/arch/powerpc/mm/hugetlbpage-radix.c
@@ -5,39 +5,34 @@
5#include <asm/cacheflush.h> 5#include <asm/cacheflush.h>
6#include <asm/machdep.h> 6#include <asm/machdep.h>
7#include <asm/mman.h> 7#include <asm/mman.h>
8#include <asm/tlb.h>
8 9
9void radix__flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr) 10void radix__flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
10{ 11{
11 unsigned long ap, shift; 12 int psize;
12 struct hstate *hstate = hstate_file(vma->vm_file); 13 struct hstate *hstate = hstate_file(vma->vm_file);
13 14
14 shift = huge_page_shift(hstate); 15 psize = hstate_get_psize(hstate);
15 if (shift == mmu_psize_defs[MMU_PAGE_2M].shift) 16 radix__flush_tlb_page_psize(vma->vm_mm, vmaddr, psize);
16 ap = mmu_get_ap(MMU_PAGE_2M);
17 else if (shift == mmu_psize_defs[MMU_PAGE_1G].shift)
18 ap = mmu_get_ap(MMU_PAGE_1G);
19 else {
20 WARN(1, "Wrong huge page shift\n");
21 return ;
22 }
23 radix___flush_tlb_page(vma->vm_mm, vmaddr, ap, 0);
24} 17}
25 18
26void radix__local_flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr) 19void radix__local_flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
27{ 20{
28 unsigned long ap, shift; 21 int psize;
29 struct hstate *hstate = hstate_file(vma->vm_file); 22 struct hstate *hstate = hstate_file(vma->vm_file);
30 23
31 shift = huge_page_shift(hstate); 24 psize = hstate_get_psize(hstate);
32 if (shift == mmu_psize_defs[MMU_PAGE_2M].shift) 25 radix__local_flush_tlb_page_psize(vma->vm_mm, vmaddr, psize);
33 ap = mmu_get_ap(MMU_PAGE_2M); 26}
34 else if (shift == mmu_psize_defs[MMU_PAGE_1G].shift) 27
35 ap = mmu_get_ap(MMU_PAGE_1G); 28void radix__flush_hugetlb_tlb_range(struct vm_area_struct *vma, unsigned long start,
36 else { 29 unsigned long end)
37 WARN(1, "Wrong huge page shift\n"); 30{
38 return ; 31 int psize;
39 } 32 struct hstate *hstate = hstate_file(vma->vm_file);
40 radix___local_flush_tlb_page(vma->vm_mm, vmaddr, ap, 0); 33
34 psize = hstate_get_psize(hstate);
35 radix__flush_tlb_range_psize(vma->vm_mm, start, end, psize);
41} 36}
42 37
43/* 38/*
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 33709bdb0419..16ada1eb7e26 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -411,3 +411,25 @@ struct page *realmode_pfn_to_page(unsigned long pfn)
411EXPORT_SYMBOL_GPL(realmode_pfn_to_page); 411EXPORT_SYMBOL_GPL(realmode_pfn_to_page);
412 412
413#endif /* CONFIG_SPARSEMEM_VMEMMAP/CONFIG_FLATMEM */ 413#endif /* CONFIG_SPARSEMEM_VMEMMAP/CONFIG_FLATMEM */
414
415#ifdef CONFIG_PPC_STD_MMU_64
416static bool disable_radix;
417static int __init parse_disable_radix(char *p)
418{
419 disable_radix = true;
420 return 0;
421}
422early_param("disable_radix", parse_disable_radix);
423
424void __init mmu_early_init_devtree(void)
425{
426 /* Disable radix mode based on kernel command line. */
427 if (disable_radix)
428 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
429
430 if (early_radix_enabled())
431 radix__early_init_devtree();
432 else
433 hash__early_init_devtree();
434}
435#endif /* CONFIG_PPC_STD_MMU_64 */
diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c
index 670318766545..34079302cc17 100644
--- a/arch/powerpc/mm/pgtable-book3s64.c
+++ b/arch/powerpc/mm/pgtable-book3s64.c
@@ -14,6 +14,9 @@
14#include "mmu_decl.h" 14#include "mmu_decl.h"
15#include <trace/events/thp.h> 15#include <trace/events/thp.h>
16 16
17int (*register_process_table)(unsigned long base, unsigned long page_size,
18 unsigned long tbl_size);
19
17#ifdef CONFIG_TRANSPARENT_HUGEPAGE 20#ifdef CONFIG_TRANSPARENT_HUGEPAGE
18/* 21/*
19 * This is called when relaxing access to a hugepage. It's also called in the page 22 * This is called when relaxing access to a hugepage. It's also called in the page
@@ -33,7 +36,7 @@ int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
33 changed = !pmd_same(*(pmdp), entry); 36 changed = !pmd_same(*(pmdp), entry);
34 if (changed) { 37 if (changed) {
35 __ptep_set_access_flags(pmdp_ptep(pmdp), pmd_pte(entry)); 38 __ptep_set_access_flags(pmdp_ptep(pmdp), pmd_pte(entry));
36 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); 39 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
37 } 40 }
38 return changed; 41 return changed;
39} 42}
@@ -66,7 +69,7 @@ void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
66 pmd_t *pmdp) 69 pmd_t *pmdp)
67{ 70{
68 pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0); 71 pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0);
69 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); 72 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
70 /* 73 /*
71 * This ensures that generic code that rely on IRQ disabling 74 * This ensures that generic code that rely on IRQ disabling
72 * to prevent a parallel THP split work as expected. 75 * to prevent a parallel THP split work as expected.
diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
index 003ff48a11b6..af897d91d09f 100644
--- a/arch/powerpc/mm/pgtable-radix.c
+++ b/arch/powerpc/mm/pgtable-radix.c
@@ -171,7 +171,7 @@ redo:
171 * of process table here. But our linear mapping also enable us to use 171 * of process table here. But our linear mapping also enable us to use
172 * physical address here. 172 * physical address here.
173 */ 173 */
174 ppc_md.register_process_table(__pa(process_tb), 0, PRTB_SIZE_SHIFT - 12); 174 register_process_table(__pa(process_tb), 0, PRTB_SIZE_SHIFT - 12);
175 pr_info("Process table %p and radix root for kernel: %p\n", process_tb, init_mm.pgd); 175 pr_info("Process table %p and radix root for kernel: %p\n", process_tb, init_mm.pgd);
176} 176}
177 177
@@ -198,7 +198,7 @@ static void __init radix_init_partition_table(void)
198 198
199void __init radix_init_native(void) 199void __init radix_init_native(void)
200{ 200{
201 ppc_md.register_process_table = native_register_process_table; 201 register_process_table = native_register_process_table;
202} 202}
203 203
204static int __init get_idx_from_shift(unsigned int shift) 204static int __init get_idx_from_shift(unsigned int shift)
@@ -264,7 +264,7 @@ static int __init radix_dt_scan_page_sizes(unsigned long node,
264 return 1; 264 return 1;
265} 265}
266 266
267static void __init radix_init_page_sizes(void) 267void __init radix__early_init_devtree(void)
268{ 268{
269 int rc; 269 int rc;
270 270
@@ -343,7 +343,6 @@ void __init radix__early_init_mmu(void)
343 __pte_frag_nr = H_PTE_FRAG_NR; 343 __pte_frag_nr = H_PTE_FRAG_NR;
344 __pte_frag_size_shift = H_PTE_FRAG_SIZE_SHIFT; 344 __pte_frag_size_shift = H_PTE_FRAG_SIZE_SHIFT;
345 345
346 radix_init_page_sizes();
347 if (!firmware_has_feature(FW_FEATURE_LPAR)) { 346 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
348 radix_init_native(); 347 radix_init_native();
349 lpcr = mfspr(SPRN_LPCR); 348 lpcr = mfspr(SPRN_LPCR);
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index 88a307504b5a..0b6fb244d0a1 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -225,7 +225,7 @@ int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
225 if (!is_vm_hugetlb_page(vma)) 225 if (!is_vm_hugetlb_page(vma))
226 assert_pte_locked(vma->vm_mm, address); 226 assert_pte_locked(vma->vm_mm, address);
227 __ptep_set_access_flags(ptep, entry); 227 __ptep_set_access_flags(ptep, entry);
228 flush_tlb_page_nohash(vma, address); 228 flush_tlb_page(vma, address);
229 } 229 }
230 return changed; 230 return changed;
231} 231}
diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c
index e1f22700fb16..48df05ef5231 100644
--- a/arch/powerpc/mm/tlb-radix.c
+++ b/arch/powerpc/mm/tlb-radix.c
@@ -140,10 +140,11 @@ void radix__local_flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr)
140} 140}
141EXPORT_SYMBOL(radix__local_flush_tlb_pwc); 141EXPORT_SYMBOL(radix__local_flush_tlb_pwc);
142 142
143void radix___local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, 143void radix__local_flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
144 unsigned long ap, int nid) 144 int psize)
145{ 145{
146 unsigned long pid; 146 unsigned long pid;
147 unsigned long ap = mmu_get_ap(psize);
147 148
148 preempt_disable(); 149 preempt_disable();
149 pid = mm ? mm->context.id : 0; 150 pid = mm ? mm->context.id : 0;
@@ -159,18 +160,12 @@ void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmadd
159 if (vma && is_vm_hugetlb_page(vma)) 160 if (vma && is_vm_hugetlb_page(vma))
160 return __local_flush_hugetlb_page(vma, vmaddr); 161 return __local_flush_hugetlb_page(vma, vmaddr);
161#endif 162#endif
162 radix___local_flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr, 163 radix__local_flush_tlb_page_psize(vma ? vma->vm_mm : NULL, vmaddr,
163 mmu_get_ap(mmu_virtual_psize), 0); 164 mmu_virtual_psize);
164} 165}
165EXPORT_SYMBOL(radix__local_flush_tlb_page); 166EXPORT_SYMBOL(radix__local_flush_tlb_page);
166 167
167#ifdef CONFIG_SMP 168#ifdef CONFIG_SMP
168static int mm_is_core_local(struct mm_struct *mm)
169{
170 return cpumask_subset(mm_cpumask(mm),
171 topology_sibling_cpumask(smp_processor_id()));
172}
173
174void radix__flush_tlb_mm(struct mm_struct *mm) 169void radix__flush_tlb_mm(struct mm_struct *mm)
175{ 170{
176 unsigned long pid; 171 unsigned long pid;
@@ -221,10 +216,11 @@ no_context:
221} 216}
222EXPORT_SYMBOL(radix__flush_tlb_pwc); 217EXPORT_SYMBOL(radix__flush_tlb_pwc);
223 218
224void radix___flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, 219void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
225 unsigned long ap, int nid) 220 int psize)
226{ 221{
227 unsigned long pid; 222 unsigned long pid;
223 unsigned long ap = mmu_get_ap(psize);
228 224
229 preempt_disable(); 225 preempt_disable();
230 pid = mm ? mm->context.id : 0; 226 pid = mm ? mm->context.id : 0;
@@ -250,8 +246,8 @@ void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
250 if (vma && is_vm_hugetlb_page(vma)) 246 if (vma && is_vm_hugetlb_page(vma))
251 return flush_hugetlb_page(vma, vmaddr); 247 return flush_hugetlb_page(vma, vmaddr);
252#endif 248#endif
253 radix___flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr, 249 radix__flush_tlb_page_psize(vma ? vma->vm_mm : NULL, vmaddr,
254 mmu_get_ap(mmu_virtual_psize), 0); 250 mmu_virtual_psize);
255} 251}
256EXPORT_SYMBOL(radix__flush_tlb_page); 252EXPORT_SYMBOL(radix__flush_tlb_page);
257 253
@@ -299,8 +295,65 @@ static int radix_get_mmu_psize(int page_size)
299 295
300void radix__tlb_flush(struct mmu_gather *tlb) 296void radix__tlb_flush(struct mmu_gather *tlb)
301{ 297{
298 int psize = 0;
302 struct mm_struct *mm = tlb->mm; 299 struct mm_struct *mm = tlb->mm;
303 radix__flush_tlb_mm(mm); 300 int page_size = tlb->page_size;
301
302 psize = radix_get_mmu_psize(page_size);
303 /*
304 * if page size is not something we understand, do a full mm flush
305 */
306 if (psize != -1 && !tlb->fullmm && !tlb->need_flush_all)
307 radix__flush_tlb_range_psize(mm, tlb->start, tlb->end, psize);
308 else
309 radix__flush_tlb_mm(mm);
310}
311
312#define TLB_FLUSH_ALL -1UL
313/*
314 * Number of pages above which we will do a bcast tlbie. Just a
315 * number at this point copied from x86
316 */
317static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
318
319void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start,
320 unsigned long end, int psize)
321{
322 unsigned long pid;
323 unsigned long addr;
324 int local = mm_is_core_local(mm);
325 unsigned long ap = mmu_get_ap(psize);
326 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
327 unsigned long page_size = 1UL << mmu_psize_defs[psize].shift;
328
329
330 preempt_disable();
331 pid = mm ? mm->context.id : 0;
332 if (unlikely(pid == MMU_NO_CONTEXT))
333 goto err_out;
334
335 if (end == TLB_FLUSH_ALL ||
336 (end - start) > tlb_single_page_flush_ceiling * page_size) {
337 if (local)
338 _tlbiel_pid(pid, RIC_FLUSH_TLB);
339 else
340 _tlbie_pid(pid, RIC_FLUSH_TLB);
341 goto err_out;
342 }
343 for (addr = start; addr < end; addr += page_size) {
344
345 if (local)
346 _tlbiel_va(addr, pid, ap, RIC_FLUSH_TLB);
347 else {
348 if (lock_tlbie)
349 raw_spin_lock(&native_tlbie_lock);
350 _tlbie_va(addr, pid, ap, RIC_FLUSH_TLB);
351 if (lock_tlbie)
352 raw_spin_unlock(&native_tlbie_lock);
353 }
354 }
355err_out:
356 preempt_enable();
304} 357}
305 358
306void radix__flush_tlb_lpid_va(unsigned long lpid, unsigned long gpa, 359void radix__flush_tlb_lpid_va(unsigned long lpid, unsigned long gpa,
@@ -340,3 +393,10 @@ void radix__flush_tlb_lpid(unsigned long lpid)
340 asm volatile("eieio; tlbsync; ptesync": : :"memory"); 393 asm volatile("eieio; tlbsync; ptesync": : :"memory");
341} 394}
342EXPORT_SYMBOL(radix__flush_tlb_lpid); 395EXPORT_SYMBOL(radix__flush_tlb_lpid);
396
397void radix__flush_pmd_tlb_range(struct vm_area_struct *vma,
398 unsigned long start, unsigned long end)
399{
400 radix__flush_tlb_range_psize(vma->vm_mm, start, end, MMU_PAGE_2M);
401}
402EXPORT_SYMBOL(radix__flush_pmd_tlb_range);
diff --git a/arch/powerpc/mm/tlb_hash32.c b/arch/powerpc/mm/tlb_hash32.c
index 558e30cce33e..702d7689d714 100644
--- a/arch/powerpc/mm/tlb_hash32.c
+++ b/arch/powerpc/mm/tlb_hash32.c
@@ -49,17 +49,6 @@ void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, unsigned long addr)
49EXPORT_SYMBOL(flush_hash_entry); 49EXPORT_SYMBOL(flush_hash_entry);
50 50
51/* 51/*
52 * Called by ptep_set_access_flags, must flush on CPUs for which the
53 * DSI handler can't just "fixup" the TLB on a write fault
54 */
55void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr)
56{
57 if (Hash != 0)
58 return;
59 _tlbie(addr);
60}
61
62/*
63 * Called at the end of a mmu_gather operation to make sure the 52 * Called at the end of a mmu_gather operation to make sure the
64 * TLB flush is completely done. 53 * TLB flush is completely done.
65 */ 54 */
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c
index f4668488512c..050badc0ebd3 100644
--- a/arch/powerpc/mm/tlb_nohash.c
+++ b/arch/powerpc/mm/tlb_nohash.c
@@ -215,12 +215,6 @@ EXPORT_SYMBOL(local_flush_tlb_page);
215 215
216static DEFINE_RAW_SPINLOCK(tlbivax_lock); 216static DEFINE_RAW_SPINLOCK(tlbivax_lock);
217 217
218static int mm_is_core_local(struct mm_struct *mm)
219{
220 return cpumask_subset(mm_cpumask(mm),
221 topology_sibling_cpumask(smp_processor_id()));
222}
223
224struct tlb_flush_param { 218struct tlb_flush_param {
225 unsigned long addr; 219 unsigned long addr;
226 unsigned int pid; 220 unsigned int pid;
diff --git a/arch/powerpc/perf/power9-events-list.h b/arch/powerpc/perf/power9-events-list.h
index cda6fcb809ca..6447dc1c3d89 100644
--- a/arch/powerpc/perf/power9-events-list.h
+++ b/arch/powerpc/perf/power9-events-list.h
@@ -34,15 +34,15 @@ EVENT(PM_L1_ICACHE_MISS, 0x200fd)
34/* Instruction Demand sectors wriittent into IL1 */ 34/* Instruction Demand sectors wriittent into IL1 */
35EVENT(PM_L1_DEMAND_WRITE, 0x0408c) 35EVENT(PM_L1_DEMAND_WRITE, 0x0408c)
36/* Instruction prefetch written into IL1 */ 36/* Instruction prefetch written into IL1 */
37EVENT(PM_IC_PREF_WRITE, 0x0408e) 37EVENT(PM_IC_PREF_WRITE, 0x0488c)
38/* The data cache was reloaded from local core's L3 due to a demand load */ 38/* The data cache was reloaded from local core's L3 due to a demand load */
39EVENT(PM_DATA_FROM_L3, 0x4c042) 39EVENT(PM_DATA_FROM_L3, 0x4c042)
40/* Demand LD - L3 Miss (not L2 hit and not L3 hit) */ 40/* Demand LD - L3 Miss (not L2 hit and not L3 hit) */
41EVENT(PM_DATA_FROM_L3MISS, 0x300fe) 41EVENT(PM_DATA_FROM_L3MISS, 0x300fe)
42/* All successful D-side store dispatches for this thread */ 42/* All successful D-side store dispatches for this thread */
43EVENT(PM_L2_ST, 0x16081) 43EVENT(PM_L2_ST, 0x16880)
44/* All successful D-side store dispatches for this thread that were L2 Miss */ 44/* All successful D-side store dispatches for this thread that were L2 Miss */
45EVENT(PM_L2_ST_MISS, 0x26081) 45EVENT(PM_L2_ST_MISS, 0x26880)
46/* Total HW L3 prefetches(Load+store) */ 46/* Total HW L3 prefetches(Load+store) */
47EVENT(PM_L3_PREF_ALL, 0x4e052) 47EVENT(PM_L3_PREF_ALL, 0x4e052)
48/* Data PTEG reload */ 48/* Data PTEG reload */
diff --git a/arch/powerpc/platforms/cell/pervasive.c b/arch/powerpc/platforms/cell/pervasive.c
index d17e98bc0c10..e7d075077cb0 100644
--- a/arch/powerpc/platforms/cell/pervasive.c
+++ b/arch/powerpc/platforms/cell/pervasive.c
@@ -35,6 +35,7 @@
35#include <asm/pgtable.h> 35#include <asm/pgtable.h>
36#include <asm/reg.h> 36#include <asm/reg.h>
37#include <asm/cell-regs.h> 37#include <asm/cell-regs.h>
38#include <asm/cpu_has_feature.h>
38 39
39#include "pervasive.h" 40#include "pervasive.h"
40 41
diff --git a/arch/powerpc/xmon/ppc-dis.c b/arch/powerpc/xmon/ppc-dis.c
index 89098f320ad5..ee9891734149 100644
--- a/arch/powerpc/xmon/ppc-dis.c
+++ b/arch/powerpc/xmon/ppc-dis.c
@@ -20,6 +20,7 @@ along with this file; see the file COPYING. If not, write to the Free
20Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */ 20Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
21 21
22#include <asm/cputable.h> 22#include <asm/cputable.h>
23#include <asm/cpu_has_feature.h>
23#include "nonstdio.h" 24#include "nonstdio.h"
24#include "ansidecl.h" 25#include "ansidecl.h"
25#include "ppc.h" 26#include "ppc.h"
diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
index 70b172ba41ce..b59ee077a596 100644
--- a/include/uapi/linux/elf.h
+++ b/include/uapi/linux/elf.h
@@ -382,6 +382,19 @@ typedef struct elf64_shdr {
382#define NT_PPC_VMX 0x100 /* PowerPC Altivec/VMX registers */ 382#define NT_PPC_VMX 0x100 /* PowerPC Altivec/VMX registers */
383#define NT_PPC_SPE 0x101 /* PowerPC SPE/EVR registers */ 383#define NT_PPC_SPE 0x101 /* PowerPC SPE/EVR registers */
384#define NT_PPC_VSX 0x102 /* PowerPC VSX registers */ 384#define NT_PPC_VSX 0x102 /* PowerPC VSX registers */
385#define NT_PPC_TAR 0x103 /* Target Address Register */
386#define NT_PPC_PPR 0x104 /* Program Priority Register */
387#define NT_PPC_DSCR 0x105 /* Data Stream Control Register */
388#define NT_PPC_EBB 0x106 /* Event Based Branch Registers */
389#define NT_PPC_PMU 0x107 /* Performance Monitor Registers */
390#define NT_PPC_TM_CGPR 0x108 /* TM checkpointed GPR Registers */
391#define NT_PPC_TM_CFPR 0x109 /* TM checkpointed FPR Registers */
392#define NT_PPC_TM_CVMX 0x10a /* TM checkpointed VMX Registers */
393#define NT_PPC_TM_CVSX 0x10b /* TM checkpointed VSX Registers */
394#define NT_PPC_TM_SPR 0x10c /* TM Special Purpose Registers */
395#define NT_PPC_TM_CTAR 0x10d /* TM checkpointed Target Address Register */
396#define NT_PPC_TM_CPPR 0x10e /* TM checkpointed Program Priority Register */
397#define NT_PPC_TM_CDSCR 0x10f /* TM checkpointed Data Stream Control Register */
385#define NT_386_TLS 0x200 /* i386 TLS slots (struct user_desc) */ 398#define NT_386_TLS 0x200 /* i386 TLS slots (struct user_desc) */
386#define NT_386_IOPERM 0x201 /* x86 io permission bitmap (1=deny) */ 399#define NT_386_IOPERM 0x201 /* x86 io permission bitmap (1=deny) */
387#define NT_X86_XSTATE 0x202 /* x86 extended state using xsave */ 400#define NT_X86_XSTATE 0x202 /* x86 extended state using xsave */
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index 20400055f177..93ad6c1fb9b6 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -288,6 +288,9 @@ void __init jump_label_init(void)
288 BUILD_BUG_ON((int)ATOMIC_INIT(0) != 0); 288 BUILD_BUG_ON((int)ATOMIC_INIT(0) != 0);
289 BUILD_BUG_ON((int)ATOMIC_INIT(1) != 1); 289 BUILD_BUG_ON((int)ATOMIC_INIT(1) != 1);
290 290
291 if (static_key_initialized)
292 return;
293
291 jump_label_lock(); 294 jump_label_lock();
292 jump_label_sort_entries(iter_start, iter_stop); 295 jump_label_sort_entries(iter_start, iter_stop);
293 296
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index ef968306fd5b..b9aa1b0b38b0 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3942,6 +3942,14 @@ same_page:
3942 return i ? i : -EFAULT; 3942 return i ? i : -EFAULT;
3943} 3943}
3944 3944
3945#ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
3946/*
3947 * ARCHes with special requirements for evicting HUGETLB backing TLB entries can
3948 * implement this.
3949 */
3950#define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
3951#endif
3952
3945unsigned long hugetlb_change_protection(struct vm_area_struct *vma, 3953unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
3946 unsigned long address, unsigned long end, pgprot_t newprot) 3954 unsigned long address, unsigned long end, pgprot_t newprot)
3947{ 3955{
@@ -4002,7 +4010,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
4002 * once we release i_mmap_rwsem, another task can do the final put_page 4010 * once we release i_mmap_rwsem, another task can do the final put_page
4003 * and that page table be reused and filled with junk. 4011 * and that page table be reused and filled with junk.
4004 */ 4012 */
4005 flush_tlb_range(vma, start, end); 4013 flush_hugetlb_tlb_range(vma, start, end);
4006 mmu_notifier_invalidate_range(mm, start, end); 4014 mmu_notifier_invalidate_range(mm, start, end);
4007 i_mmap_unlock_write(vma->vm_file->f_mapping); 4015 i_mmap_unlock_write(vma->vm_file->f_mapping);
4008 mmu_notifier_invalidate_range_end(mm, start, end); 4016 mmu_notifier_invalidate_range_end(mm, start, end);