aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ppc64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ppc64')
-rw-r--r--arch/ppc64/kernel/bpa_iommu.c6
-rw-r--r--arch/ppc64/kernel/kprobes.c8
-rw-r--r--arch/ppc64/kernel/machine_kexec.c1
-rw-r--r--arch/ppc64/mm/hash_native.c5
-rw-r--r--arch/ppc64/mm/tlb.c4
5 files changed, 15 insertions, 9 deletions
diff --git a/arch/ppc64/kernel/bpa_iommu.c b/arch/ppc64/kernel/bpa_iommu.c
index f33a7bccb0d7..507eb9d0223f 100644
--- a/arch/ppc64/kernel/bpa_iommu.c
+++ b/arch/ppc64/kernel/bpa_iommu.c
@@ -99,7 +99,11 @@ get_iost_entry(unsigned long iopt_base, unsigned long io_address, unsigned page_
99 break; 99 break;
100 100
101 default: /* not a known compile time constant */ 101 default: /* not a known compile time constant */
102 BUILD_BUG_ON(1); 102 {
103 /* BUILD_BUG_ON() is not usable here */
104 extern void __get_iost_entry_bad_page_size(void);
105 __get_iost_entry_bad_page_size();
106 }
103 break; 107 break;
104 } 108 }
105 109
diff --git a/arch/ppc64/kernel/kprobes.c b/arch/ppc64/kernel/kprobes.c
index 7e80d49c589a..9c6facc24f70 100644
--- a/arch/ppc64/kernel/kprobes.c
+++ b/arch/ppc64/kernel/kprobes.c
@@ -59,9 +59,9 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
59 59
60 /* insn must be on a special executable page on ppc64 */ 60 /* insn must be on a special executable page on ppc64 */
61 if (!ret) { 61 if (!ret) {
62 up(&kprobe_mutex);
63 p->ainsn.insn = get_insn_slot();
64 down(&kprobe_mutex); 62 down(&kprobe_mutex);
63 p->ainsn.insn = get_insn_slot();
64 up(&kprobe_mutex);
65 if (!p->ainsn.insn) 65 if (!p->ainsn.insn)
66 ret = -ENOMEM; 66 ret = -ENOMEM;
67 } 67 }
@@ -90,9 +90,9 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
90 90
91void __kprobes arch_remove_kprobe(struct kprobe *p) 91void __kprobes arch_remove_kprobe(struct kprobe *p)
92{ 92{
93 up(&kprobe_mutex);
94 free_insn_slot(p->ainsn.insn);
95 down(&kprobe_mutex); 93 down(&kprobe_mutex);
94 free_insn_slot(p->ainsn.insn);
95 up(&kprobe_mutex);
96} 96}
97 97
98static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs) 98static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
diff --git a/arch/ppc64/kernel/machine_kexec.c b/arch/ppc64/kernel/machine_kexec.c
index 4775f12a013c..bf7cc4f8210f 100644
--- a/arch/ppc64/kernel/machine_kexec.c
+++ b/arch/ppc64/kernel/machine_kexec.c
@@ -205,6 +205,7 @@ static void kexec_prepare_cpus(void)
205 continue; 205 continue;
206 206
207 while (paca[i].hw_cpu_id != -1) { 207 while (paca[i].hw_cpu_id != -1) {
208 barrier();
208 if (!cpu_possible(i)) { 209 if (!cpu_possible(i)) {
209 printk("kexec: cpu %d hw_cpu_id %d is not" 210 printk("kexec: cpu %d hw_cpu_id %d is not"
210 " possible, ignoring\n", 211 " possible, ignoring\n",
diff --git a/arch/ppc64/mm/hash_native.c b/arch/ppc64/mm/hash_native.c
index eb1bbb5b6c16..bfd385b7713c 100644
--- a/arch/ppc64/mm/hash_native.c
+++ b/arch/ppc64/mm/hash_native.c
@@ -343,7 +343,7 @@ static void native_flush_hash_range(unsigned long context,
343 hpte_t *hptep; 343 hpte_t *hptep;
344 unsigned long hpte_v; 344 unsigned long hpte_v;
345 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); 345 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
346 unsigned long large; 346 unsigned long large = batch->large;
347 347
348 local_irq_save(flags); 348 local_irq_save(flags);
349 349
@@ -356,7 +356,6 @@ static void native_flush_hash_range(unsigned long context,
356 356
357 va = (vsid << 28) | (batch->addr[i] & 0x0fffffff); 357 va = (vsid << 28) | (batch->addr[i] & 0x0fffffff);
358 batch->vaddr[j] = va; 358 batch->vaddr[j] = va;
359 large = pte_huge(batch->pte[i]);
360 if (large) 359 if (large)
361 vpn = va >> HPAGE_SHIFT; 360 vpn = va >> HPAGE_SHIFT;
362 else 361 else
@@ -406,7 +405,7 @@ static void native_flush_hash_range(unsigned long context,
406 asm volatile("ptesync":::"memory"); 405 asm volatile("ptesync":::"memory");
407 406
408 for (i = 0; i < j; i++) 407 for (i = 0; i < j; i++)
409 __tlbie(batch->vaddr[i], 0); 408 __tlbie(batch->vaddr[i], large);
410 409
411 asm volatile("eieio; tlbsync; ptesync":::"memory"); 410 asm volatile("eieio; tlbsync; ptesync":::"memory");
412 411
diff --git a/arch/ppc64/mm/tlb.c b/arch/ppc64/mm/tlb.c
index d8a6593a13f0..21fbffb23a43 100644
--- a/arch/ppc64/mm/tlb.c
+++ b/arch/ppc64/mm/tlb.c
@@ -143,7 +143,8 @@ void hpte_update(struct mm_struct *mm, unsigned long addr,
143 * up scanning and resetting referenced bits then our batch context 143 * up scanning and resetting referenced bits then our batch context
144 * will change mid stream. 144 * will change mid stream.
145 */ 145 */
146 if (unlikely(i != 0 && context != batch->context)) { 146 if (i != 0 && (context != batch->context ||
147 batch->large != pte_huge(pte))) {
147 flush_tlb_pending(); 148 flush_tlb_pending();
148 i = 0; 149 i = 0;
149 } 150 }
@@ -151,6 +152,7 @@ void hpte_update(struct mm_struct *mm, unsigned long addr,
151 if (i == 0) { 152 if (i == 0) {
152 batch->context = context; 153 batch->context = context;
153 batch->mm = mm; 154 batch->mm = mm;
155 batch->large = pte_huge(pte);
154 } 156 }
155 batch->pte[i] = __pte(pte); 157 batch->pte[i] = __pte(pte);
156 batch->addr[i] = addr; 158 batch->addr[i] = addr;