aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/include/asm/book3s/32/pgtable.h1
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash-4k.h3
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash-64k.h16
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash.h13
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgalloc.h16
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h4
-rw-r--r--arch/powerpc/include/asm/exception-64s.h2
-rw-r--r--arch/powerpc/include/asm/hw_irq.h12
-rw-r--r--arch/powerpc/include/asm/kexec.h6
-rw-r--r--arch/powerpc/include/asm/nohash/32/pgtable.h1
-rw-r--r--arch/powerpc/include/asm/nohash/64/pgtable.h1
-rw-r--r--arch/powerpc/include/asm/topology.h10
-rw-r--r--arch/powerpc/kernel/exceptions-64e.S2
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S6
-rw-r--r--arch/powerpc/mm/hash64_4k.c4
-rw-r--r--arch/powerpc/mm/hash64_64k.c8
-rw-r--r--arch/powerpc/mm/hash_utils_64.c1
-rw-r--r--arch/powerpc/mm/hugetlbpage-hash64.c10
-rw-r--r--arch/powerpc/mm/init-common.c4
-rw-r--r--arch/powerpc/mm/numa.c5
-rw-r--r--arch/powerpc/mm/pgtable-radix.c117
-rw-r--r--arch/powerpc/mm/pgtable_64.c4
-rw-r--r--arch/powerpc/mm/tlb_hash64.c9
-rw-r--r--arch/powerpc/platforms/powernv/vas-window.c16
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-cpu.c4
-rw-r--r--arch/powerpc/platforms/pseries/ras.c31
-rw-r--r--drivers/misc/ocxl/file.c2
-rw-r--r--tools/testing/selftests/powerpc/alignment/alignment_handler.c2
28 files changed, 231 insertions, 79 deletions
diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
index 30a155c0a6b0..c615abdce119 100644
--- a/arch/powerpc/include/asm/book3s/32/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
@@ -16,6 +16,7 @@
16#define PGD_INDEX_SIZE (32 - PGDIR_SHIFT) 16#define PGD_INDEX_SIZE (32 - PGDIR_SHIFT)
17 17
18#define PMD_CACHE_INDEX PMD_INDEX_SIZE 18#define PMD_CACHE_INDEX PMD_INDEX_SIZE
19#define PUD_CACHE_INDEX PUD_INDEX_SIZE
19 20
20#ifndef __ASSEMBLY__ 21#ifndef __ASSEMBLY__
21#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE) 22#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE)
diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h
index 949d691094a4..67c5475311ee 100644
--- a/arch/powerpc/include/asm/book3s/64/hash-4k.h
+++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h
@@ -63,7 +63,8 @@ static inline int hash__hugepd_ok(hugepd_t hpd)
63 * keeping the prototype consistent across the two formats. 63 * keeping the prototype consistent across the two formats.
64 */ 64 */
65static inline unsigned long pte_set_hidx(pte_t *ptep, real_pte_t rpte, 65static inline unsigned long pte_set_hidx(pte_t *ptep, real_pte_t rpte,
66 unsigned int subpg_index, unsigned long hidx) 66 unsigned int subpg_index, unsigned long hidx,
67 int offset)
67{ 68{
68 return (hidx << H_PAGE_F_GIX_SHIFT) & 69 return (hidx << H_PAGE_F_GIX_SHIFT) &
69 (H_PAGE_F_SECOND | H_PAGE_F_GIX); 70 (H_PAGE_F_SECOND | H_PAGE_F_GIX);
diff --git a/arch/powerpc/include/asm/book3s/64/hash-64k.h b/arch/powerpc/include/asm/book3s/64/hash-64k.h
index 338b7da468ce..3bcf269f8f55 100644
--- a/arch/powerpc/include/asm/book3s/64/hash-64k.h
+++ b/arch/powerpc/include/asm/book3s/64/hash-64k.h
@@ -45,7 +45,7 @@
45 * generic accessors and iterators here 45 * generic accessors and iterators here
46 */ 46 */
47#define __real_pte __real_pte 47#define __real_pte __real_pte
48static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep) 48static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep, int offset)
49{ 49{
50 real_pte_t rpte; 50 real_pte_t rpte;
51 unsigned long *hidxp; 51 unsigned long *hidxp;
@@ -59,7 +59,7 @@ static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep)
59 */ 59 */
60 smp_rmb(); 60 smp_rmb();
61 61
62 hidxp = (unsigned long *)(ptep + PTRS_PER_PTE); 62 hidxp = (unsigned long *)(ptep + offset);
63 rpte.hidx = *hidxp; 63 rpte.hidx = *hidxp;
64 return rpte; 64 return rpte;
65} 65}
@@ -86,9 +86,10 @@ static inline unsigned long __rpte_to_hidx(real_pte_t rpte, unsigned long index)
86 * expected to modify the PTE bits accordingly and commit the PTE to memory. 86 * expected to modify the PTE bits accordingly and commit the PTE to memory.
87 */ 87 */
88static inline unsigned long pte_set_hidx(pte_t *ptep, real_pte_t rpte, 88static inline unsigned long pte_set_hidx(pte_t *ptep, real_pte_t rpte,
89 unsigned int subpg_index, unsigned long hidx) 89 unsigned int subpg_index,
90 unsigned long hidx, int offset)
90{ 91{
91 unsigned long *hidxp = (unsigned long *)(ptep + PTRS_PER_PTE); 92 unsigned long *hidxp = (unsigned long *)(ptep + offset);
92 93
93 rpte.hidx &= ~HIDX_BITS(0xfUL, subpg_index); 94 rpte.hidx &= ~HIDX_BITS(0xfUL, subpg_index);
94 *hidxp = rpte.hidx | HIDX_BITS(HIDX_SHIFT_BY_ONE(hidx), subpg_index); 95 *hidxp = rpte.hidx | HIDX_BITS(HIDX_SHIFT_BY_ONE(hidx), subpg_index);
@@ -140,13 +141,18 @@ static inline int hash__remap_4k_pfn(struct vm_area_struct *vma, unsigned long a
140} 141}
141 142
142#define H_PTE_TABLE_SIZE PTE_FRAG_SIZE 143#define H_PTE_TABLE_SIZE PTE_FRAG_SIZE
143#ifdef CONFIG_TRANSPARENT_HUGEPAGE 144#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined (CONFIG_HUGETLB_PAGE)
144#define H_PMD_TABLE_SIZE ((sizeof(pmd_t) << PMD_INDEX_SIZE) + \ 145#define H_PMD_TABLE_SIZE ((sizeof(pmd_t) << PMD_INDEX_SIZE) + \
145 (sizeof(unsigned long) << PMD_INDEX_SIZE)) 146 (sizeof(unsigned long) << PMD_INDEX_SIZE))
146#else 147#else
147#define H_PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE) 148#define H_PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE)
148#endif 149#endif
150#ifdef CONFIG_HUGETLB_PAGE
151#define H_PUD_TABLE_SIZE ((sizeof(pud_t) << PUD_INDEX_SIZE) + \
152 (sizeof(unsigned long) << PUD_INDEX_SIZE))
153#else
149#define H_PUD_TABLE_SIZE (sizeof(pud_t) << PUD_INDEX_SIZE) 154#define H_PUD_TABLE_SIZE (sizeof(pud_t) << PUD_INDEX_SIZE)
155#endif
150#define H_PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE) 156#define H_PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
151 157
152#ifdef CONFIG_TRANSPARENT_HUGEPAGE 158#ifdef CONFIG_TRANSPARENT_HUGEPAGE
diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h
index 0920eff731b3..935adcd92a81 100644
--- a/arch/powerpc/include/asm/book3s/64/hash.h
+++ b/arch/powerpc/include/asm/book3s/64/hash.h
@@ -23,7 +23,8 @@
23 H_PUD_INDEX_SIZE + H_PGD_INDEX_SIZE + PAGE_SHIFT) 23 H_PUD_INDEX_SIZE + H_PGD_INDEX_SIZE + PAGE_SHIFT)
24#define H_PGTABLE_RANGE (ASM_CONST(1) << H_PGTABLE_EADDR_SIZE) 24#define H_PGTABLE_RANGE (ASM_CONST(1) << H_PGTABLE_EADDR_SIZE)
25 25
26#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_PPC_64K_PAGES) 26#if (defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)) && \
27 defined(CONFIG_PPC_64K_PAGES)
27/* 28/*
28 * only with hash 64k we need to use the second half of pmd page table 29 * only with hash 64k we need to use the second half of pmd page table
29 * to store pointer to deposited pgtable_t 30 * to store pointer to deposited pgtable_t
@@ -33,6 +34,16 @@
33#define H_PMD_CACHE_INDEX H_PMD_INDEX_SIZE 34#define H_PMD_CACHE_INDEX H_PMD_INDEX_SIZE
34#endif 35#endif
35/* 36/*
37 * We store the slot details in the second half of page table.
38 * Increase the pud level table so that hugetlb ptes can be stored
39 * at pud level.
40 */
41#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_PPC_64K_PAGES)
42#define H_PUD_CACHE_INDEX (H_PUD_INDEX_SIZE + 1)
43#else
44#define H_PUD_CACHE_INDEX (H_PUD_INDEX_SIZE)
45#endif
46/*
36 * Define the address range of the kernel non-linear virtual area 47 * Define the address range of the kernel non-linear virtual area
37 */ 48 */
38#define H_KERN_VIRT_START ASM_CONST(0xD000000000000000) 49#define H_KERN_VIRT_START ASM_CONST(0xD000000000000000)
diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h
index 1fcfa425cefa..4746bc68d446 100644
--- a/arch/powerpc/include/asm/book3s/64/pgalloc.h
+++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h
@@ -73,10 +73,16 @@ static inline void radix__pgd_free(struct mm_struct *mm, pgd_t *pgd)
73 73
74static inline pgd_t *pgd_alloc(struct mm_struct *mm) 74static inline pgd_t *pgd_alloc(struct mm_struct *mm)
75{ 75{
76 pgd_t *pgd;
77
76 if (radix_enabled()) 78 if (radix_enabled())
77 return radix__pgd_alloc(mm); 79 return radix__pgd_alloc(mm);
78 return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE), 80
79 pgtable_gfp_flags(mm, GFP_KERNEL)); 81 pgd = kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
82 pgtable_gfp_flags(mm, GFP_KERNEL));
83 memset(pgd, 0, PGD_TABLE_SIZE);
84
85 return pgd;
80} 86}
81 87
82static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) 88static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
@@ -93,13 +99,13 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
93 99
94static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) 100static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
95{ 101{
96 return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE), 102 return kmem_cache_alloc(PGT_CACHE(PUD_CACHE_INDEX),
97 pgtable_gfp_flags(mm, GFP_KERNEL)); 103 pgtable_gfp_flags(mm, GFP_KERNEL));
98} 104}
99 105
100static inline void pud_free(struct mm_struct *mm, pud_t *pud) 106static inline void pud_free(struct mm_struct *mm, pud_t *pud)
101{ 107{
102 kmem_cache_free(PGT_CACHE(PUD_INDEX_SIZE), pud); 108 kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), pud);
103} 109}
104 110
105static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) 111static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
@@ -115,7 +121,7 @@ static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
115 * ahead and flush the page walk cache 121 * ahead and flush the page walk cache
116 */ 122 */
117 flush_tlb_pgtable(tlb, address); 123 flush_tlb_pgtable(tlb, address);
118 pgtable_free_tlb(tlb, pud, PUD_INDEX_SIZE); 124 pgtable_free_tlb(tlb, pud, PUD_CACHE_INDEX);
119} 125}
120 126
121static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) 127static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 51017726d495..a6b9f1d74600 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -232,11 +232,13 @@ extern unsigned long __pmd_index_size;
232extern unsigned long __pud_index_size; 232extern unsigned long __pud_index_size;
233extern unsigned long __pgd_index_size; 233extern unsigned long __pgd_index_size;
234extern unsigned long __pmd_cache_index; 234extern unsigned long __pmd_cache_index;
235extern unsigned long __pud_cache_index;
235#define PTE_INDEX_SIZE __pte_index_size 236#define PTE_INDEX_SIZE __pte_index_size
236#define PMD_INDEX_SIZE __pmd_index_size 237#define PMD_INDEX_SIZE __pmd_index_size
237#define PUD_INDEX_SIZE __pud_index_size 238#define PUD_INDEX_SIZE __pud_index_size
238#define PGD_INDEX_SIZE __pgd_index_size 239#define PGD_INDEX_SIZE __pgd_index_size
239#define PMD_CACHE_INDEX __pmd_cache_index 240#define PMD_CACHE_INDEX __pmd_cache_index
241#define PUD_CACHE_INDEX __pud_cache_index
240/* 242/*
241 * Because of use of pte fragments and THP, size of page table 243 * Because of use of pte fragments and THP, size of page table
242 * are not always derived out of index size above. 244 * are not always derived out of index size above.
@@ -348,7 +350,7 @@ extern unsigned long pci_io_base;
348 */ 350 */
349#ifndef __real_pte 351#ifndef __real_pte
350 352
351#define __real_pte(e,p) ((real_pte_t){(e)}) 353#define __real_pte(e, p, o) ((real_pte_t){(e)})
352#define __rpte_to_pte(r) ((r).pte) 354#define __rpte_to_pte(r) ((r).pte)
353#define __rpte_to_hidx(r,index) (pte_val(__rpte_to_pte(r)) >> H_PAGE_F_GIX_SHIFT) 355#define __rpte_to_hidx(r,index) (pte_val(__rpte_to_pte(r)) >> H_PAGE_F_GIX_SHIFT)
354 356
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index 176dfb73d42c..471b2274fbeb 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -645,7 +645,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
645 EXC_HV, SOFTEN_TEST_HV, bitmask) 645 EXC_HV, SOFTEN_TEST_HV, bitmask)
646 646
647#define MASKABLE_RELON_EXCEPTION_HV_OOL(vec, label, bitmask) \ 647#define MASKABLE_RELON_EXCEPTION_HV_OOL(vec, label, bitmask) \
648 MASKABLE_EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_NOTEST_HV, vec, bitmask);\ 648 MASKABLE_EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_TEST_HV, vec, bitmask);\
649 EXCEPTION_RELON_PROLOG_PSERIES_1(label, EXC_HV) 649 EXCEPTION_RELON_PROLOG_PSERIES_1(label, EXC_HV)
650 650
651/* 651/*
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index 88e5e8f17e98..855e17d158b1 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -30,6 +30,16 @@
30#define PACA_IRQ_PMI 0x40 30#define PACA_IRQ_PMI 0x40
31 31
32/* 32/*
33 * Some soft-masked interrupts must be hard masked until they are replayed
34 * (e.g., because the soft-masked handler does not clear the exception).
35 */
36#ifdef CONFIG_PPC_BOOK3S
37#define PACA_IRQ_MUST_HARD_MASK (PACA_IRQ_EE|PACA_IRQ_PMI)
38#else
39#define PACA_IRQ_MUST_HARD_MASK (PACA_IRQ_EE)
40#endif
41
42/*
33 * flags for paca->irq_soft_mask 43 * flags for paca->irq_soft_mask
34 */ 44 */
35#define IRQS_ENABLED 0 45#define IRQS_ENABLED 0
@@ -244,7 +254,7 @@ static inline bool lazy_irq_pending(void)
244static inline void may_hard_irq_enable(void) 254static inline void may_hard_irq_enable(void)
245{ 255{
246 get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS; 256 get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS;
247 if (!(get_paca()->irq_happened & PACA_IRQ_EE)) 257 if (!(get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK))
248 __hard_irq_enable(); 258 __hard_irq_enable();
249} 259}
250 260
diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h
index 9dcbfa6bbb91..d8b1e8e7e035 100644
--- a/arch/powerpc/include/asm/kexec.h
+++ b/arch/powerpc/include/asm/kexec.h
@@ -140,6 +140,12 @@ static inline bool kdump_in_progress(void)
140 return false; 140 return false;
141} 141}
142 142
143static inline void crash_ipi_callback(struct pt_regs *regs) { }
144
145static inline void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
146{
147}
148
143#endif /* CONFIG_KEXEC_CORE */ 149#endif /* CONFIG_KEXEC_CORE */
144#endif /* ! __ASSEMBLY__ */ 150#endif /* ! __ASSEMBLY__ */
145#endif /* __KERNEL__ */ 151#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h
index 504a3c36ce5c..03bbd1149530 100644
--- a/arch/powerpc/include/asm/nohash/32/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/32/pgtable.h
@@ -24,6 +24,7 @@ extern int icache_44x_need_flush;
24#define PGD_INDEX_SIZE (32 - PGDIR_SHIFT) 24#define PGD_INDEX_SIZE (32 - PGDIR_SHIFT)
25 25
26#define PMD_CACHE_INDEX PMD_INDEX_SIZE 26#define PMD_CACHE_INDEX PMD_INDEX_SIZE
27#define PUD_CACHE_INDEX PUD_INDEX_SIZE
27 28
28#ifndef __ASSEMBLY__ 29#ifndef __ASSEMBLY__
29#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE) 30#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE)
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h
index abddf5830ad5..5c5f75d005ad 100644
--- a/arch/powerpc/include/asm/nohash/64/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/64/pgtable.h
@@ -27,6 +27,7 @@
27#else 27#else
28#define PMD_CACHE_INDEX PMD_INDEX_SIZE 28#define PMD_CACHE_INDEX PMD_INDEX_SIZE
29#endif 29#endif
30#define PUD_CACHE_INDEX PUD_INDEX_SIZE
30 31
31/* 32/*
32 * Define the address range of the kernel non-linear virtual area 33 * Define the address range of the kernel non-linear virtual area
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
index 88187c285c70..593248110902 100644
--- a/arch/powerpc/include/asm/topology.h
+++ b/arch/powerpc/include/asm/topology.h
@@ -44,6 +44,11 @@ extern int sysfs_add_device_to_node(struct device *dev, int nid);
44extern void sysfs_remove_device_from_node(struct device *dev, int nid); 44extern void sysfs_remove_device_from_node(struct device *dev, int nid);
45extern int numa_update_cpu_topology(bool cpus_locked); 45extern int numa_update_cpu_topology(bool cpus_locked);
46 46
47static inline void update_numa_cpu_lookup_table(unsigned int cpu, int node)
48{
49 numa_cpu_lookup_table[cpu] = node;
50}
51
47static inline int early_cpu_to_node(int cpu) 52static inline int early_cpu_to_node(int cpu)
48{ 53{
49 int nid; 54 int nid;
@@ -82,6 +87,7 @@ static inline int numa_update_cpu_topology(bool cpus_locked)
82extern int start_topology_update(void); 87extern int start_topology_update(void);
83extern int stop_topology_update(void); 88extern int stop_topology_update(void);
84extern int prrn_is_enabled(void); 89extern int prrn_is_enabled(void);
90extern int find_and_online_cpu_nid(int cpu);
85#else 91#else
86static inline int start_topology_update(void) 92static inline int start_topology_update(void)
87{ 93{
@@ -95,6 +101,10 @@ static inline int prrn_is_enabled(void)
95{ 101{
96 return 0; 102 return 0;
97} 103}
104static inline int find_and_online_cpu_nid(int cpu)
105{
106 return 0;
107}
98#endif /* CONFIG_NUMA && CONFIG_PPC_SPLPAR */ 108#endif /* CONFIG_NUMA && CONFIG_PPC_SPLPAR */
99 109
100#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_NEED_MULTIPLE_NODES) 110#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_NEED_MULTIPLE_NODES)
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index ee832d344a5a..9b6e653e501a 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -943,6 +943,8 @@ kernel_dbg_exc:
943/* 943/*
944 * An interrupt came in while soft-disabled; We mark paca->irq_happened 944 * An interrupt came in while soft-disabled; We mark paca->irq_happened
945 * accordingly and if the interrupt is level sensitive, we hard disable 945 * accordingly and if the interrupt is level sensitive, we hard disable
946 * hard disable (full_mask) corresponds to PACA_IRQ_MUST_HARD_MASK, so
947 * keep these in synch.
946 */ 948 */
947 949
948.macro masked_interrupt_book3e paca_irq full_mask 950.macro masked_interrupt_book3e paca_irq full_mask
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 243d072a225a..3ac87e53b3da 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -1426,7 +1426,7 @@ EXC_COMMON_BEGIN(soft_nmi_common)
1426 * triggered and won't automatically refire. 1426 * triggered and won't automatically refire.
1427 * - If it was a HMI we return immediately since we handled it in realmode 1427 * - If it was a HMI we return immediately since we handled it in realmode
1428 * and it won't refire. 1428 * and it won't refire.
1429 * - else we hard disable and return. 1429 * - Else it is one of PACA_IRQ_MUST_HARD_MASK, so hard disable and return.
1430 * This is called with r10 containing the value to OR to the paca field. 1430 * This is called with r10 containing the value to OR to the paca field.
1431 */ 1431 */
1432#define MASKED_INTERRUPT(_H) \ 1432#define MASKED_INTERRUPT(_H) \
@@ -1441,8 +1441,8 @@ masked_##_H##interrupt: \
1441 ori r10,r10,0xffff; \ 1441 ori r10,r10,0xffff; \
1442 mtspr SPRN_DEC,r10; \ 1442 mtspr SPRN_DEC,r10; \
1443 b MASKED_DEC_HANDLER_LABEL; \ 1443 b MASKED_DEC_HANDLER_LABEL; \
14441: andi. r10,r10,(PACA_IRQ_DBELL|PACA_IRQ_HMI); \ 14441: andi. r10,r10,PACA_IRQ_MUST_HARD_MASK; \
1445 bne 2f; \ 1445 beq 2f; \
1446 mfspr r10,SPRN_##_H##SRR1; \ 1446 mfspr r10,SPRN_##_H##SRR1; \
1447 xori r10,r10,MSR_EE; /* clear MSR_EE */ \ 1447 xori r10,r10,MSR_EE; /* clear MSR_EE */ \
1448 mtspr SPRN_##_H##SRR1,r10; \ 1448 mtspr SPRN_##_H##SRR1,r10; \
diff --git a/arch/powerpc/mm/hash64_4k.c b/arch/powerpc/mm/hash64_4k.c
index 5a69b51d08a3..d573d7d07f25 100644
--- a/arch/powerpc/mm/hash64_4k.c
+++ b/arch/powerpc/mm/hash64_4k.c
@@ -55,7 +55,7 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
55 * need to add in 0x1 if it's a read-only user page 55 * need to add in 0x1 if it's a read-only user page
56 */ 56 */
57 rflags = htab_convert_pte_flags(new_pte); 57 rflags = htab_convert_pte_flags(new_pte);
58 rpte = __real_pte(__pte(old_pte), ptep); 58 rpte = __real_pte(__pte(old_pte), ptep, PTRS_PER_PTE);
59 59
60 if (cpu_has_feature(CPU_FTR_NOEXECUTE) && 60 if (cpu_has_feature(CPU_FTR_NOEXECUTE) &&
61 !cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) 61 !cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
@@ -117,7 +117,7 @@ repeat:
117 return -1; 117 return -1;
118 } 118 }
119 new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | H_PAGE_HASHPTE; 119 new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | H_PAGE_HASHPTE;
120 new_pte |= pte_set_hidx(ptep, rpte, 0, slot); 120 new_pte |= pte_set_hidx(ptep, rpte, 0, slot, PTRS_PER_PTE);
121 } 121 }
122 *ptep = __pte(new_pte & ~H_PAGE_BUSY); 122 *ptep = __pte(new_pte & ~H_PAGE_BUSY);
123 return 0; 123 return 0;
diff --git a/arch/powerpc/mm/hash64_64k.c b/arch/powerpc/mm/hash64_64k.c
index 2253bbc6a599..e601d95c3b20 100644
--- a/arch/powerpc/mm/hash64_64k.c
+++ b/arch/powerpc/mm/hash64_64k.c
@@ -86,7 +86,7 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
86 86
87 subpg_index = (ea & (PAGE_SIZE - 1)) >> shift; 87 subpg_index = (ea & (PAGE_SIZE - 1)) >> shift;
88 vpn = hpt_vpn(ea, vsid, ssize); 88 vpn = hpt_vpn(ea, vsid, ssize);
89 rpte = __real_pte(__pte(old_pte), ptep); 89 rpte = __real_pte(__pte(old_pte), ptep, PTRS_PER_PTE);
90 /* 90 /*
91 *None of the sub 4k page is hashed 91 *None of the sub 4k page is hashed
92 */ 92 */
@@ -214,7 +214,7 @@ repeat:
214 return -1; 214 return -1;
215 } 215 }
216 216
217 new_pte |= pte_set_hidx(ptep, rpte, subpg_index, slot); 217 new_pte |= pte_set_hidx(ptep, rpte, subpg_index, slot, PTRS_PER_PTE);
218 new_pte |= H_PAGE_HASHPTE; 218 new_pte |= H_PAGE_HASHPTE;
219 219
220 *ptep = __pte(new_pte & ~H_PAGE_BUSY); 220 *ptep = __pte(new_pte & ~H_PAGE_BUSY);
@@ -262,7 +262,7 @@ int __hash_page_64K(unsigned long ea, unsigned long access,
262 } while (!pte_xchg(ptep, __pte(old_pte), __pte(new_pte))); 262 } while (!pte_xchg(ptep, __pte(old_pte), __pte(new_pte)));
263 263
264 rflags = htab_convert_pte_flags(new_pte); 264 rflags = htab_convert_pte_flags(new_pte);
265 rpte = __real_pte(__pte(old_pte), ptep); 265 rpte = __real_pte(__pte(old_pte), ptep, PTRS_PER_PTE);
266 266
267 if (cpu_has_feature(CPU_FTR_NOEXECUTE) && 267 if (cpu_has_feature(CPU_FTR_NOEXECUTE) &&
268 !cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) 268 !cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
@@ -327,7 +327,7 @@ repeat:
327 } 327 }
328 328
329 new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | H_PAGE_HASHPTE; 329 new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | H_PAGE_HASHPTE;
330 new_pte |= pte_set_hidx(ptep, rpte, 0, slot); 330 new_pte |= pte_set_hidx(ptep, rpte, 0, slot, PTRS_PER_PTE);
331 } 331 }
332 *ptep = __pte(new_pte & ~H_PAGE_BUSY); 332 *ptep = __pte(new_pte & ~H_PAGE_BUSY);
333 return 0; 333 return 0;
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 7d07c7e17db6..cf290d415dcd 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -1008,6 +1008,7 @@ void __init hash__early_init_mmu(void)
1008 __pmd_index_size = H_PMD_INDEX_SIZE; 1008 __pmd_index_size = H_PMD_INDEX_SIZE;
1009 __pud_index_size = H_PUD_INDEX_SIZE; 1009 __pud_index_size = H_PUD_INDEX_SIZE;
1010 __pgd_index_size = H_PGD_INDEX_SIZE; 1010 __pgd_index_size = H_PGD_INDEX_SIZE;
1011 __pud_cache_index = H_PUD_CACHE_INDEX;
1011 __pmd_cache_index = H_PMD_CACHE_INDEX; 1012 __pmd_cache_index = H_PMD_CACHE_INDEX;
1012 __pte_table_size = H_PTE_TABLE_SIZE; 1013 __pte_table_size = H_PTE_TABLE_SIZE;
1013 __pmd_table_size = H_PMD_TABLE_SIZE; 1014 __pmd_table_size = H_PMD_TABLE_SIZE;
diff --git a/arch/powerpc/mm/hugetlbpage-hash64.c b/arch/powerpc/mm/hugetlbpage-hash64.c
index 12511f5a015f..b320f5097a06 100644
--- a/arch/powerpc/mm/hugetlbpage-hash64.c
+++ b/arch/powerpc/mm/hugetlbpage-hash64.c
@@ -27,7 +27,7 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
27 unsigned long vpn; 27 unsigned long vpn;
28 unsigned long old_pte, new_pte; 28 unsigned long old_pte, new_pte;
29 unsigned long rflags, pa, sz; 29 unsigned long rflags, pa, sz;
30 long slot; 30 long slot, offset;
31 31
32 BUG_ON(shift != mmu_psize_defs[mmu_psize].shift); 32 BUG_ON(shift != mmu_psize_defs[mmu_psize].shift);
33 33
@@ -63,7 +63,11 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
63 } while(!pte_xchg(ptep, __pte(old_pte), __pte(new_pte))); 63 } while(!pte_xchg(ptep, __pte(old_pte), __pte(new_pte)));
64 64
65 rflags = htab_convert_pte_flags(new_pte); 65 rflags = htab_convert_pte_flags(new_pte);
66 rpte = __real_pte(__pte(old_pte), ptep); 66 if (unlikely(mmu_psize == MMU_PAGE_16G))
67 offset = PTRS_PER_PUD;
68 else
69 offset = PTRS_PER_PMD;
70 rpte = __real_pte(__pte(old_pte), ptep, offset);
67 71
68 sz = ((1UL) << shift); 72 sz = ((1UL) << shift);
69 if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) 73 if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
@@ -104,7 +108,7 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
104 return -1; 108 return -1;
105 } 109 }
106 110
107 new_pte |= pte_set_hidx(ptep, rpte, 0, slot); 111 new_pte |= pte_set_hidx(ptep, rpte, 0, slot, offset);
108 } 112 }
109 113
110 /* 114 /*
diff --git a/arch/powerpc/mm/init-common.c b/arch/powerpc/mm/init-common.c
index eb8c6c8c4851..2b656e67f2ea 100644
--- a/arch/powerpc/mm/init-common.c
+++ b/arch/powerpc/mm/init-common.c
@@ -100,6 +100,6 @@ void pgtable_cache_init(void)
100 * same size as either the pgd or pmd index except with THP enabled 100 * same size as either the pgd or pmd index except with THP enabled
101 * on book3s 64 101 * on book3s 64
102 */ 102 */
103 if (PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE)) 103 if (PUD_CACHE_INDEX && !PGT_CACHE(PUD_CACHE_INDEX))
104 pgtable_cache_add(PUD_INDEX_SIZE, pud_ctor); 104 pgtable_cache_add(PUD_CACHE_INDEX, pud_ctor);
105} 105}
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 314d19ab9385..edd8d0bc9364 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -143,11 +143,6 @@ static void reset_numa_cpu_lookup_table(void)
143 numa_cpu_lookup_table[cpu] = -1; 143 numa_cpu_lookup_table[cpu] = -1;
144} 144}
145 145
146static void update_numa_cpu_lookup_table(unsigned int cpu, int node)
147{
148 numa_cpu_lookup_table[cpu] = node;
149}
150
151static void map_cpu_to_node(int cpu, int node) 146static void map_cpu_to_node(int cpu, int node)
152{ 147{
153 update_numa_cpu_lookup_table(cpu, node); 148 update_numa_cpu_lookup_table(cpu, node);
diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
index 573a9a2ee455..2e10a964e290 100644
--- a/arch/powerpc/mm/pgtable-radix.c
+++ b/arch/powerpc/mm/pgtable-radix.c
@@ -17,9 +17,11 @@
17#include <linux/of_fdt.h> 17#include <linux/of_fdt.h>
18#include <linux/mm.h> 18#include <linux/mm.h>
19#include <linux/string_helpers.h> 19#include <linux/string_helpers.h>
20#include <linux/stop_machine.h>
20 21
21#include <asm/pgtable.h> 22#include <asm/pgtable.h>
22#include <asm/pgalloc.h> 23#include <asm/pgalloc.h>
24#include <asm/mmu_context.h>
23#include <asm/dma.h> 25#include <asm/dma.h>
24#include <asm/machdep.h> 26#include <asm/machdep.h>
25#include <asm/mmu.h> 27#include <asm/mmu.h>
@@ -333,6 +335,22 @@ static void __init radix_init_pgtable(void)
333 "r" (TLBIEL_INVAL_SET_LPID), "r" (0)); 335 "r" (TLBIEL_INVAL_SET_LPID), "r" (0));
334 asm volatile("eieio; tlbsync; ptesync" : : : "memory"); 336 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
335 trace_tlbie(0, 0, TLBIEL_INVAL_SET_LPID, 0, 2, 1, 1); 337 trace_tlbie(0, 0, TLBIEL_INVAL_SET_LPID, 0, 2, 1, 1);
338
339 /*
340 * The init_mm context is given the first available (non-zero) PID,
341 * which is the "guard PID" and contains no page table. PIDR should
342 * never be set to zero because that duplicates the kernel address
343 * space at the 0x0... offset (quadrant 0)!
344 *
345 * An arbitrary PID that may later be allocated by the PID allocator
346 * for userspace processes must not be used either, because that
347 * would cause stale user mappings for that PID on CPUs outside of
348 * the TLB invalidation scheme (because it won't be in mm_cpumask).
349 *
350 * So permanently carve out one PID for the purpose of a guard PID.
351 */
352 init_mm.context.id = mmu_base_pid;
353 mmu_base_pid++;
336} 354}
337 355
338static void __init radix_init_partition_table(void) 356static void __init radix_init_partition_table(void)
@@ -535,6 +553,7 @@ void __init radix__early_init_mmu(void)
535 __pmd_index_size = RADIX_PMD_INDEX_SIZE; 553 __pmd_index_size = RADIX_PMD_INDEX_SIZE;
536 __pud_index_size = RADIX_PUD_INDEX_SIZE; 554 __pud_index_size = RADIX_PUD_INDEX_SIZE;
537 __pgd_index_size = RADIX_PGD_INDEX_SIZE; 555 __pgd_index_size = RADIX_PGD_INDEX_SIZE;
556 __pud_cache_index = RADIX_PUD_INDEX_SIZE;
538 __pmd_cache_index = RADIX_PMD_INDEX_SIZE; 557 __pmd_cache_index = RADIX_PMD_INDEX_SIZE;
539 __pte_table_size = RADIX_PTE_TABLE_SIZE; 558 __pte_table_size = RADIX_PTE_TABLE_SIZE;
540 __pmd_table_size = RADIX_PMD_TABLE_SIZE; 559 __pmd_table_size = RADIX_PMD_TABLE_SIZE;
@@ -579,7 +598,8 @@ void __init radix__early_init_mmu(void)
579 598
580 radix_init_iamr(); 599 radix_init_iamr();
581 radix_init_pgtable(); 600 radix_init_pgtable();
582 601 /* Switch to the guard PID before turning on MMU */
602 radix__switch_mmu_context(NULL, &init_mm);
583 if (cpu_has_feature(CPU_FTR_HVMODE)) 603 if (cpu_has_feature(CPU_FTR_HVMODE))
584 tlbiel_all(); 604 tlbiel_all();
585} 605}
@@ -604,6 +624,7 @@ void radix__early_init_mmu_secondary(void)
604 } 624 }
605 radix_init_iamr(); 625 radix_init_iamr();
606 626
627 radix__switch_mmu_context(NULL, &init_mm);
607 if (cpu_has_feature(CPU_FTR_HVMODE)) 628 if (cpu_has_feature(CPU_FTR_HVMODE))
608 tlbiel_all(); 629 tlbiel_all();
609} 630}
@@ -666,6 +687,30 @@ static void free_pmd_table(pmd_t *pmd_start, pud_t *pud)
666 pud_clear(pud); 687 pud_clear(pud);
667} 688}
668 689
690struct change_mapping_params {
691 pte_t *pte;
692 unsigned long start;
693 unsigned long end;
694 unsigned long aligned_start;
695 unsigned long aligned_end;
696};
697
698static int stop_machine_change_mapping(void *data)
699{
700 struct change_mapping_params *params =
701 (struct change_mapping_params *)data;
702
703 if (!data)
704 return -1;
705
706 spin_unlock(&init_mm.page_table_lock);
707 pte_clear(&init_mm, params->aligned_start, params->pte);
708 create_physical_mapping(params->aligned_start, params->start);
709 create_physical_mapping(params->end, params->aligned_end);
710 spin_lock(&init_mm.page_table_lock);
711 return 0;
712}
713
669static void remove_pte_table(pte_t *pte_start, unsigned long addr, 714static void remove_pte_table(pte_t *pte_start, unsigned long addr,
670 unsigned long end) 715 unsigned long end)
671{ 716{
@@ -694,6 +739,52 @@ static void remove_pte_table(pte_t *pte_start, unsigned long addr,
694 } 739 }
695} 740}
696 741
742/*
743 * clear the pte and potentially split the mapping helper
744 */
745static void split_kernel_mapping(unsigned long addr, unsigned long end,
746 unsigned long size, pte_t *pte)
747{
748 unsigned long mask = ~(size - 1);
749 unsigned long aligned_start = addr & mask;
750 unsigned long aligned_end = addr + size;
751 struct change_mapping_params params;
752 bool split_region = false;
753
754 if ((end - addr) < size) {
755 /*
756 * We're going to clear the PTE, but not flushed
757 * the mapping, time to remap and flush. The
758 * effects if visible outside the processor or
759 * if we are running in code close to the
760 * mapping we cleared, we are in trouble.
761 */
762 if (overlaps_kernel_text(aligned_start, addr) ||
763 overlaps_kernel_text(end, aligned_end)) {
764 /*
765 * Hack, just return, don't pte_clear
766 */
767 WARN_ONCE(1, "Linear mapping %lx->%lx overlaps kernel "
768 "text, not splitting\n", addr, end);
769 return;
770 }
771 split_region = true;
772 }
773
774 if (split_region) {
775 params.pte = pte;
776 params.start = addr;
777 params.end = end;
778 params.aligned_start = addr & ~(size - 1);
779 params.aligned_end = min_t(unsigned long, aligned_end,
780 (unsigned long)__va(memblock_end_of_DRAM()));
781 stop_machine(stop_machine_change_mapping, &params, NULL);
782 return;
783 }
784
785 pte_clear(&init_mm, addr, pte);
786}
787
697static void remove_pmd_table(pmd_t *pmd_start, unsigned long addr, 788static void remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
698 unsigned long end) 789 unsigned long end)
699{ 790{
@@ -709,13 +800,7 @@ static void remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
709 continue; 800 continue;
710 801
711 if (pmd_huge(*pmd)) { 802 if (pmd_huge(*pmd)) {
712 if (!IS_ALIGNED(addr, PMD_SIZE) || 803 split_kernel_mapping(addr, end, PMD_SIZE, (pte_t *)pmd);
713 !IS_ALIGNED(next, PMD_SIZE)) {
714 WARN_ONCE(1, "%s: unaligned range\n", __func__);
715 continue;
716 }
717
718 pte_clear(&init_mm, addr, (pte_t *)pmd);
719 continue; 804 continue;
720 } 805 }
721 806
@@ -740,13 +825,7 @@ static void remove_pud_table(pud_t *pud_start, unsigned long addr,
740 continue; 825 continue;
741 826
742 if (pud_huge(*pud)) { 827 if (pud_huge(*pud)) {
743 if (!IS_ALIGNED(addr, PUD_SIZE) || 828 split_kernel_mapping(addr, end, PUD_SIZE, (pte_t *)pud);
744 !IS_ALIGNED(next, PUD_SIZE)) {
745 WARN_ONCE(1, "%s: unaligned range\n", __func__);
746 continue;
747 }
748
749 pte_clear(&init_mm, addr, (pte_t *)pud);
750 continue; 829 continue;
751 } 830 }
752 831
@@ -772,13 +851,7 @@ static void remove_pagetable(unsigned long start, unsigned long end)
772 continue; 851 continue;
773 852
774 if (pgd_huge(*pgd)) { 853 if (pgd_huge(*pgd)) {
775 if (!IS_ALIGNED(addr, PGDIR_SIZE) || 854 split_kernel_mapping(addr, end, PGDIR_SIZE, (pte_t *)pgd);
776 !IS_ALIGNED(next, PGDIR_SIZE)) {
777 WARN_ONCE(1, "%s: unaligned range\n", __func__);
778 continue;
779 }
780
781 pte_clear(&init_mm, addr, (pte_t *)pgd);
782 continue; 855 continue;
783 } 856 }
784 857
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index c9a623c2d8a2..28c980eb4422 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -82,6 +82,8 @@ unsigned long __pgd_index_size;
82EXPORT_SYMBOL(__pgd_index_size); 82EXPORT_SYMBOL(__pgd_index_size);
83unsigned long __pmd_cache_index; 83unsigned long __pmd_cache_index;
84EXPORT_SYMBOL(__pmd_cache_index); 84EXPORT_SYMBOL(__pmd_cache_index);
85unsigned long __pud_cache_index;
86EXPORT_SYMBOL(__pud_cache_index);
85unsigned long __pte_table_size; 87unsigned long __pte_table_size;
86EXPORT_SYMBOL(__pte_table_size); 88EXPORT_SYMBOL(__pte_table_size);
87unsigned long __pmd_table_size; 89unsigned long __pmd_table_size;
@@ -471,6 +473,8 @@ void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0,
471 if (old & PATB_HR) { 473 if (old & PATB_HR) {
472 asm volatile(PPC_TLBIE_5(%0,%1,2,0,1) : : 474 asm volatile(PPC_TLBIE_5(%0,%1,2,0,1) : :
473 "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid)); 475 "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
476 asm volatile(PPC_TLBIE_5(%0,%1,2,1,1) : :
477 "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
474 trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 1); 478 trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 1);
475 } else { 479 } else {
476 asm volatile(PPC_TLBIE_5(%0,%1,2,0,0) : : 480 asm volatile(PPC_TLBIE_5(%0,%1,2,0,0) : :
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c
index 881ebd53ffc2..9b23f12e863c 100644
--- a/arch/powerpc/mm/tlb_hash64.c
+++ b/arch/powerpc/mm/tlb_hash64.c
@@ -51,7 +51,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
51 unsigned int psize; 51 unsigned int psize;
52 int ssize; 52 int ssize;
53 real_pte_t rpte; 53 real_pte_t rpte;
54 int i; 54 int i, offset;
55 55
56 i = batch->index; 56 i = batch->index;
57 57
@@ -67,6 +67,10 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
67 psize = get_slice_psize(mm, addr); 67 psize = get_slice_psize(mm, addr);
68 /* Mask the address for the correct page size */ 68 /* Mask the address for the correct page size */
69 addr &= ~((1UL << mmu_psize_defs[psize].shift) - 1); 69 addr &= ~((1UL << mmu_psize_defs[psize].shift) - 1);
70 if (unlikely(psize == MMU_PAGE_16G))
71 offset = PTRS_PER_PUD;
72 else
73 offset = PTRS_PER_PMD;
70#else 74#else
71 BUG(); 75 BUG();
72 psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */ 76 psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */
@@ -78,6 +82,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
78 * support 64k pages, this might be different from the 82 * support 64k pages, this might be different from the
79 * hardware page size encoded in the slice table. */ 83 * hardware page size encoded in the slice table. */
80 addr &= PAGE_MASK; 84 addr &= PAGE_MASK;
85 offset = PTRS_PER_PTE;
81 } 86 }
82 87
83 88
@@ -91,7 +96,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
91 } 96 }
92 WARN_ON(vsid == 0); 97 WARN_ON(vsid == 0);
93 vpn = hpt_vpn(addr, vsid, ssize); 98 vpn = hpt_vpn(addr, vsid, ssize);
94 rpte = __real_pte(__pte(pte), ptep); 99 rpte = __real_pte(__pte(pte), ptep, offset);
95 100
96 /* 101 /*
97 * Check if we have an active batch on this CPU. If not, just 102 * Check if we have an active batch on this CPU. If not, just
diff --git a/arch/powerpc/platforms/powernv/vas-window.c b/arch/powerpc/platforms/powernv/vas-window.c
index 2b3eb01ab110..b7c53a51c31b 100644
--- a/arch/powerpc/platforms/powernv/vas-window.c
+++ b/arch/powerpc/platforms/powernv/vas-window.c
@@ -1063,16 +1063,16 @@ struct vas_window *vas_tx_win_open(int vasid, enum vas_cop_type cop,
1063 rc = PTR_ERR(txwin->paste_kaddr); 1063 rc = PTR_ERR(txwin->paste_kaddr);
1064 goto free_window; 1064 goto free_window;
1065 } 1065 }
1066 } else {
1067 /*
1068 * A user mapping must ensure that context switch issues
1069 * CP_ABORT for this thread.
1070 */
1071 rc = set_thread_uses_vas();
1072 if (rc)
1073 goto free_window;
1066 } 1074 }
1067 1075
1068 /*
1069 * Now that we have a send window, ensure context switch issues
1070 * CP_ABORT for this thread.
1071 */
1072 rc = -EINVAL;
1073 if (set_thread_uses_vas() < 0)
1074 goto free_window;
1075
1076 set_vinst_win(vinst, txwin); 1076 set_vinst_win(vinst, txwin);
1077 1077
1078 return txwin; 1078 return txwin;
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
index dceb51454d8d..652d3e96b812 100644
--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
+++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
@@ -36,6 +36,7 @@
36#include <asm/xics.h> 36#include <asm/xics.h>
37#include <asm/xive.h> 37#include <asm/xive.h>
38#include <asm/plpar_wrappers.h> 38#include <asm/plpar_wrappers.h>
39#include <asm/topology.h>
39 40
40#include "pseries.h" 41#include "pseries.h"
41#include "offline_states.h" 42#include "offline_states.h"
@@ -331,6 +332,7 @@ static void pseries_remove_processor(struct device_node *np)
331 BUG_ON(cpu_online(cpu)); 332 BUG_ON(cpu_online(cpu));
332 set_cpu_present(cpu, false); 333 set_cpu_present(cpu, false);
333 set_hard_smp_processor_id(cpu, -1); 334 set_hard_smp_processor_id(cpu, -1);
335 update_numa_cpu_lookup_table(cpu, -1);
334 break; 336 break;
335 } 337 }
336 if (cpu >= nr_cpu_ids) 338 if (cpu >= nr_cpu_ids)
@@ -340,8 +342,6 @@ static void pseries_remove_processor(struct device_node *np)
340 cpu_maps_update_done(); 342 cpu_maps_update_done();
341} 343}
342 344
343extern int find_and_online_cpu_nid(int cpu);
344
345static int dlpar_online_cpu(struct device_node *dn) 345static int dlpar_online_cpu(struct device_node *dn)
346{ 346{
347 int rc = 0; 347 int rc = 0;
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
index 81d8614e7379..5e1ef9150182 100644
--- a/arch/powerpc/platforms/pseries/ras.c
+++ b/arch/powerpc/platforms/pseries/ras.c
@@ -49,6 +49,28 @@ static irqreturn_t ras_error_interrupt(int irq, void *dev_id);
49 49
50 50
51/* 51/*
52 * Enable the hotplug interrupt late because processing them may touch other
53 * devices or systems (e.g. hugepages) that have not been initialized at the
54 * subsys stage.
55 */
56int __init init_ras_hotplug_IRQ(void)
57{
58 struct device_node *np;
59
60 /* Hotplug Events */
61 np = of_find_node_by_path("/event-sources/hot-plug-events");
62 if (np != NULL) {
63 if (dlpar_workqueue_init() == 0)
64 request_event_sources_irqs(np, ras_hotplug_interrupt,
65 "RAS_HOTPLUG");
66 of_node_put(np);
67 }
68
69 return 0;
70}
71machine_late_initcall(pseries, init_ras_hotplug_IRQ);
72
73/*
52 * Initialize handlers for the set of interrupts caused by hardware errors 74 * Initialize handlers for the set of interrupts caused by hardware errors
53 * and power system events. 75 * and power system events.
54 */ 76 */
@@ -66,15 +88,6 @@ static int __init init_ras_IRQ(void)
66 of_node_put(np); 88 of_node_put(np);
67 } 89 }
68 90
69 /* Hotplug Events */
70 np = of_find_node_by_path("/event-sources/hot-plug-events");
71 if (np != NULL) {
72 if (dlpar_workqueue_init() == 0)
73 request_event_sources_irqs(np, ras_hotplug_interrupt,
74 "RAS_HOTPLUG");
75 of_node_put(np);
76 }
77
78 /* EPOW Events */ 91 /* EPOW Events */
79 np = of_find_node_by_path("/event-sources/epow-events"); 92 np = of_find_node_by_path("/event-sources/epow-events");
80 if (np != NULL) { 93 if (np != NULL) {
diff --git a/drivers/misc/ocxl/file.c b/drivers/misc/ocxl/file.c
index d9aa407db06a..2dd2db9bc1c9 100644
--- a/drivers/misc/ocxl/file.c
+++ b/drivers/misc/ocxl/file.c
@@ -277,7 +277,7 @@ static ssize_t afu_read(struct file *file, char __user *buf, size_t count,
277 struct ocxl_context *ctx = file->private_data; 277 struct ocxl_context *ctx = file->private_data;
278 struct ocxl_kernel_event_header header; 278 struct ocxl_kernel_event_header header;
279 ssize_t rc; 279 ssize_t rc;
280 size_t used = 0; 280 ssize_t used = 0;
281 DEFINE_WAIT(event_wait); 281 DEFINE_WAIT(event_wait);
282 282
283 memset(&header, 0, sizeof(header)); 283 memset(&header, 0, sizeof(header));
diff --git a/tools/testing/selftests/powerpc/alignment/alignment_handler.c b/tools/testing/selftests/powerpc/alignment/alignment_handler.c
index 39fd362415cf..0f2698f9fd6d 100644
--- a/tools/testing/selftests/powerpc/alignment/alignment_handler.c
+++ b/tools/testing/selftests/powerpc/alignment/alignment_handler.c
@@ -57,7 +57,7 @@ volatile int gotsig;
57 57
58void sighandler(int sig, siginfo_t *info, void *ctx) 58void sighandler(int sig, siginfo_t *info, void *ctx)
59{ 59{
60 struct ucontext *ucp = ctx; 60 ucontext_t *ucp = ctx;
61 61
62 if (!testing) { 62 if (!testing) {
63 signal(sig, SIG_DFL); 63 signal(sig, SIG_DFL);