aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/Kconfig2
-rw-r--r--arch/powerpc/kernel/kprobes.c2
-rw-r--r--arch/powerpc/kernel/setup_64.c10
-rw-r--r--arch/powerpc/mm/hash_utils_64.c2
-rw-r--r--arch/powerpc/mm/hugetlbpage.c95
-rw-r--r--arch/powerpc/mm/numa.c2
-rw-r--r--arch/powerpc/mm/stab.c7
-rw-r--r--arch/powerpc/platforms/powermac/feature.c21
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c11
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c12
10 files changed, 114 insertions, 50 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index bb2efdd566a..db93dbc0e21 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -227,7 +227,7 @@ config SMP
227 If you don't know what to do here, say N. 227 If you don't know what to do here, say N.
228 228
229config NR_CPUS 229config NR_CPUS
230 int "Maximum number of CPUs (2-32)" 230 int "Maximum number of CPUs (2-128)"
231 range 2 128 231 range 2 128
232 depends on SMP 232 depends on SMP
233 default "32" if PPC64 233 default "32" if PPC64
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index 511af54e623..5368f9c2e6b 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -177,7 +177,7 @@ static inline int kprobe_handler(struct pt_regs *regs)
177 save_previous_kprobe(kcb); 177 save_previous_kprobe(kcb);
178 set_current_kprobe(p, regs, kcb); 178 set_current_kprobe(p, regs, kcb);
179 kcb->kprobe_saved_msr = regs->msr; 179 kcb->kprobe_saved_msr = regs->msr;
180 p->nmissed++; 180 kprobes_inc_nmissed_count(p);
181 prepare_singlestep(p, regs); 181 prepare_singlestep(p, regs);
182 kcb->kprobe_status = KPROBE_REENTER; 182 kcb->kprobe_status = KPROBE_REENTER;
183 return 1; 183 return 1;
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 608fee7c7e2..e3fb78397dc 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -102,7 +102,15 @@ int boot_cpuid_phys = 0;
102dev_t boot_dev; 102dev_t boot_dev;
103u64 ppc64_pft_size; 103u64 ppc64_pft_size;
104 104
105struct ppc64_caches ppc64_caches; 105/* Pick defaults since we might want to patch instructions
106 * before we've read this from the device tree.
107 */
108struct ppc64_caches ppc64_caches = {
109 .dline_size = 0x80,
110 .log_dline_size = 7,
111 .iline_size = 0x80,
112 .log_iline_size = 7
113};
106EXPORT_SYMBOL_GPL(ppc64_caches); 114EXPORT_SYMBOL_GPL(ppc64_caches);
107 115
108/* 116/*
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 706e8a63ced..a33583f3b0e 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -601,7 +601,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
601 /* Handle hugepage regions */ 601 /* Handle hugepage regions */
602 if (unlikely(in_hugepage_area(mm->context, ea))) { 602 if (unlikely(in_hugepage_area(mm->context, ea))) {
603 DBG_LOW(" -> huge page !\n"); 603 DBG_LOW(" -> huge page !\n");
604 return hash_huge_page(mm, access, ea, vsid, local); 604 return hash_huge_page(mm, access, ea, vsid, local, trap);
605 } 605 }
606 606
607 /* Get PTE and page size from page tables */ 607 /* Get PTE and page size from page tables */
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 6bc9dbad7de..54131b877da 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -148,43 +148,63 @@ int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
148 return 0; 148 return 0;
149} 149}
150 150
151struct slb_flush_info {
152 struct mm_struct *mm;
153 u16 newareas;
154};
155
151static void flush_low_segments(void *parm) 156static void flush_low_segments(void *parm)
152{ 157{
153 u16 areas = (unsigned long) parm; 158 struct slb_flush_info *fi = parm;
154 unsigned long i; 159 unsigned long i;
155 160
156 asm volatile("isync" : : : "memory"); 161 BUILD_BUG_ON((sizeof(fi->newareas)*8) != NUM_LOW_AREAS);
162
163 if (current->active_mm != fi->mm)
164 return;
165
166 /* Only need to do anything if this CPU is working in the same
167 * mm as the one which has changed */
157 168
158 BUILD_BUG_ON((sizeof(areas)*8) != NUM_LOW_AREAS); 169 /* update the paca copy of the context struct */
170 get_paca()->context = current->active_mm->context;
159 171
172 asm volatile("isync" : : : "memory");
160 for (i = 0; i < NUM_LOW_AREAS; i++) { 173 for (i = 0; i < NUM_LOW_AREAS; i++) {
161 if (! (areas & (1U << i))) 174 if (! (fi->newareas & (1U << i)))
162 continue; 175 continue;
163 asm volatile("slbie %0" 176 asm volatile("slbie %0"
164 : : "r" ((i << SID_SHIFT) | SLBIE_C)); 177 : : "r" ((i << SID_SHIFT) | SLBIE_C));
165 } 178 }
166
167 asm volatile("isync" : : : "memory"); 179 asm volatile("isync" : : : "memory");
168} 180}
169 181
170static void flush_high_segments(void *parm) 182static void flush_high_segments(void *parm)
171{ 183{
172 u16 areas = (unsigned long) parm; 184 struct slb_flush_info *fi = parm;
173 unsigned long i, j; 185 unsigned long i, j;
174 186
175 asm volatile("isync" : : : "memory");
176 187
177 BUILD_BUG_ON((sizeof(areas)*8) != NUM_HIGH_AREAS); 188 BUILD_BUG_ON((sizeof(fi->newareas)*8) != NUM_HIGH_AREAS);
178 189
190 if (current->active_mm != fi->mm)
191 return;
192
193 /* Only need to do anything if this CPU is working in the same
194 * mm as the one which has changed */
195
196 /* update the paca copy of the context struct */
197 get_paca()->context = current->active_mm->context;
198
199 asm volatile("isync" : : : "memory");
179 for (i = 0; i < NUM_HIGH_AREAS; i++) { 200 for (i = 0; i < NUM_HIGH_AREAS; i++) {
180 if (! (areas & (1U << i))) 201 if (! (fi->newareas & (1U << i)))
181 continue; 202 continue;
182 for (j = 0; j < (1UL << (HTLB_AREA_SHIFT-SID_SHIFT)); j++) 203 for (j = 0; j < (1UL << (HTLB_AREA_SHIFT-SID_SHIFT)); j++)
183 asm volatile("slbie %0" 204 asm volatile("slbie %0"
184 :: "r" (((i << HTLB_AREA_SHIFT) 205 :: "r" (((i << HTLB_AREA_SHIFT)
185 + (j << SID_SHIFT)) | SLBIE_C)); 206 + (j << SID_SHIFT)) | SLBIE_C));
186 } 207 }
187
188 asm volatile("isync" : : : "memory"); 208 asm volatile("isync" : : : "memory");
189} 209}
190 210
@@ -229,6 +249,7 @@ static int prepare_high_area_for_htlb(struct mm_struct *mm, unsigned long area)
229static int open_low_hpage_areas(struct mm_struct *mm, u16 newareas) 249static int open_low_hpage_areas(struct mm_struct *mm, u16 newareas)
230{ 250{
231 unsigned long i; 251 unsigned long i;
252 struct slb_flush_info fi;
232 253
233 BUILD_BUG_ON((sizeof(newareas)*8) != NUM_LOW_AREAS); 254 BUILD_BUG_ON((sizeof(newareas)*8) != NUM_LOW_AREAS);
234 BUILD_BUG_ON((sizeof(mm->context.low_htlb_areas)*8) != NUM_LOW_AREAS); 255 BUILD_BUG_ON((sizeof(mm->context.low_htlb_areas)*8) != NUM_LOW_AREAS);
@@ -244,19 +265,20 @@ static int open_low_hpage_areas(struct mm_struct *mm, u16 newareas)
244 265
245 mm->context.low_htlb_areas |= newareas; 266 mm->context.low_htlb_areas |= newareas;
246 267
247 /* update the paca copy of the context struct */
248 get_paca()->context = mm->context;
249
250 /* the context change must make it to memory before the flush, 268 /* the context change must make it to memory before the flush,
251 * so that further SLB misses do the right thing. */ 269 * so that further SLB misses do the right thing. */
252 mb(); 270 mb();
253 on_each_cpu(flush_low_segments, (void *)(unsigned long)newareas, 0, 1); 271
272 fi.mm = mm;
273 fi.newareas = newareas;
274 on_each_cpu(flush_low_segments, &fi, 0, 1);
254 275
255 return 0; 276 return 0;
256} 277}
257 278
258static int open_high_hpage_areas(struct mm_struct *mm, u16 newareas) 279static int open_high_hpage_areas(struct mm_struct *mm, u16 newareas)
259{ 280{
281 struct slb_flush_info fi;
260 unsigned long i; 282 unsigned long i;
261 283
262 BUILD_BUG_ON((sizeof(newareas)*8) != NUM_HIGH_AREAS); 284 BUILD_BUG_ON((sizeof(newareas)*8) != NUM_HIGH_AREAS);
@@ -280,7 +302,10 @@ static int open_high_hpage_areas(struct mm_struct *mm, u16 newareas)
280 /* the context change must make it to memory before the flush, 302 /* the context change must make it to memory before the flush,
281 * so that further SLB misses do the right thing. */ 303 * so that further SLB misses do the right thing. */
282 mb(); 304 mb();
283 on_each_cpu(flush_high_segments, (void *)(unsigned long)newareas, 0, 1); 305
306 fi.mm = mm;
307 fi.newareas = newareas;
308 on_each_cpu(flush_high_segments, &fi, 0, 1);
284 309
285 return 0; 310 return 0;
286} 311}
@@ -639,8 +664,36 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
639 return -ENOMEM; 664 return -ENOMEM;
640} 665}
641 666
667/*
668 * Called by asm hashtable.S for doing lazy icache flush
669 */
670static unsigned int hash_huge_page_do_lazy_icache(unsigned long rflags,
671 pte_t pte, int trap)
672{
673 struct page *page;
674 int i;
675
676 if (!pfn_valid(pte_pfn(pte)))
677 return rflags;
678
679 page = pte_page(pte);
680
681 /* page is dirty */
682 if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) {
683 if (trap == 0x400) {
684 for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++)
685 __flush_dcache_icache(page_address(page+i));
686 set_bit(PG_arch_1, &page->flags);
687 } else {
688 rflags |= HPTE_R_N;
689 }
690 }
691 return rflags;
692}
693
642int hash_huge_page(struct mm_struct *mm, unsigned long access, 694int hash_huge_page(struct mm_struct *mm, unsigned long access,
643 unsigned long ea, unsigned long vsid, int local) 695 unsigned long ea, unsigned long vsid, int local,
696 unsigned long trap)
644{ 697{
645 pte_t *ptep; 698 pte_t *ptep;
646 unsigned long old_pte, new_pte; 699 unsigned long old_pte, new_pte;
@@ -691,6 +744,11 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access,
691 rflags = 0x2 | (!(new_pte & _PAGE_RW)); 744 rflags = 0x2 | (!(new_pte & _PAGE_RW));
692 /* _PAGE_EXEC -> HW_NO_EXEC since it's inverted */ 745 /* _PAGE_EXEC -> HW_NO_EXEC since it's inverted */
693 rflags |= ((new_pte & _PAGE_EXEC) ? 0 : HPTE_R_N); 746 rflags |= ((new_pte & _PAGE_EXEC) ? 0 : HPTE_R_N);
747 if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
748 /* No CPU has hugepages but lacks no execute, so we
749 * don't need to worry about that case */
750 rflags = hash_huge_page_do_lazy_icache(rflags, __pte(old_pte),
751 trap);
694 752
695 /* Check if pte already has an hpte (case 2) */ 753 /* Check if pte already has an hpte (case 2) */
696 if (unlikely(old_pte & _PAGE_HASHPTE)) { 754 if (unlikely(old_pte & _PAGE_HASHPTE)) {
@@ -703,7 +761,8 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access,
703 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 761 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
704 slot += (old_pte & _PAGE_F_GIX) >> 12; 762 slot += (old_pte & _PAGE_F_GIX) >> 12;
705 763
706 if (ppc_md.hpte_updatepp(slot, rflags, va, 1, local) == -1) 764 if (ppc_md.hpte_updatepp(slot, rflags, va, mmu_huge_psize,
765 local) == -1)
707 old_pte &= ~_PAGE_HPTEFLAGS; 766 old_pte &= ~_PAGE_HPTEFLAGS;
708 } 767 }
709 768
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index f72cf87364c..ba7a3055a9f 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -125,7 +125,7 @@ void __init get_region(unsigned int nid, unsigned long *start_pfn,
125 125
126 /* We didnt find a matching region, return start/end as 0 */ 126 /* We didnt find a matching region, return start/end as 0 */
127 if (*start_pfn == -1UL) 127 if (*start_pfn == -1UL)
128 start_pfn = 0; 128 *start_pfn = 0;
129} 129}
130 130
131static inline void map_cpu_to_node(int cpu, int node) 131static inline void map_cpu_to_node(int cpu, int node)
diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c
index cfbb4e1f966..51e7951414e 100644
--- a/arch/powerpc/mm/stab.c
+++ b/arch/powerpc/mm/stab.c
@@ -288,11 +288,6 @@ void stab_initialize(unsigned long stab)
288 return; 288 return;
289 } 289 }
290#endif /* CONFIG_PPC_ISERIES */ 290#endif /* CONFIG_PPC_ISERIES */
291#ifdef CONFIG_PPC_PSERIES 291
292 if (platform_is_lpar()) {
293 plpar_hcall_norets(H_SET_ASR, stabreal);
294 return;
295 }
296#endif
297 mtspr(SPRN_ASR, stabreal); 292 mtspr(SPRN_ASR, stabreal);
298} 293}
diff --git a/arch/powerpc/platforms/powermac/feature.c b/arch/powerpc/platforms/powermac/feature.c
index 0d7fa00fcb0..f6e22da2a5d 100644
--- a/arch/powerpc/platforms/powermac/feature.c
+++ b/arch/powerpc/platforms/powermac/feature.c
@@ -1650,11 +1650,19 @@ void pmac_tweak_clock_spreading(int enable)
1650 */ 1650 */
1651 1651
1652 if (macio->type == macio_intrepid) { 1652 if (macio->type == macio_intrepid) {
1653 if (enable) 1653 struct device_node *clock =
1654 UN_OUT(UNI_N_CLOCK_SPREADING, 2); 1654 of_find_node_by_path("/uni-n@f8000000/hw-clock");
1655 else 1655 if (clock && get_property(clock, "platform-do-clockspreading",
1656 UN_OUT(UNI_N_CLOCK_SPREADING, 0); 1656 NULL)) {
1657 mdelay(40); 1657 printk(KERN_INFO "%sabling clock spreading on Intrepid"
1658 " ASIC\n", enable ? "En" : "Dis");
1659 if (enable)
1660 UN_OUT(UNI_N_CLOCK_SPREADING, 2);
1661 else
1662 UN_OUT(UNI_N_CLOCK_SPREADING, 0);
1663 mdelay(40);
1664 }
1665 of_node_put(clock);
1658 } 1666 }
1659 1667
1660 while (machine_is_compatible("PowerBook5,2") || 1668 while (machine_is_compatible("PowerBook5,2") ||
@@ -1724,6 +1732,9 @@ void pmac_tweak_clock_spreading(int enable)
1724 pmac_low_i2c_close(ui2c); 1732 pmac_low_i2c_close(ui2c);
1725 break; 1733 break;
1726 } 1734 }
1735 printk(KERN_INFO "%sabling clock spreading on i2c clock chip\n",
1736 enable ? "En" : "Dis");
1737
1727 pmac_low_i2c_setmode(ui2c, pmac_low_i2c_mode_stdsub); 1738 pmac_low_i2c_setmode(ui2c, pmac_low_i2c_mode_stdsub);
1728 rc = pmac_low_i2c_xfer(ui2c, 0xd2 | pmac_low_i2c_write, 0x80, buffer, 9); 1739 rc = pmac_low_i2c_xfer(ui2c, 0xd2 | pmac_low_i2c_write, 0x80, buffer, 9);
1729 DBG("write result: %d,", rc); 1740 DBG("write result: %d,", rc);
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index c78f2b290a7..2043659ea7b 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -109,6 +109,9 @@ static void tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum,
109 u64 rc; 109 u64 rc;
110 union tce_entry tce; 110 union tce_entry tce;
111 111
112 tcenum <<= TCE_PAGE_FACTOR;
113 npages <<= TCE_PAGE_FACTOR;
114
112 tce.te_word = 0; 115 tce.te_word = 0;
113 tce.te_rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT; 116 tce.te_rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT;
114 tce.te_rdwr = 1; 117 tce.te_rdwr = 1;
@@ -143,10 +146,7 @@ static void tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
143 union tce_entry tce, *tcep; 146 union tce_entry tce, *tcep;
144 long l, limit; 147 long l, limit;
145 148
146 tcenum <<= TCE_PAGE_FACTOR; 149 if (TCE_PAGE_FACTOR == 0 && npages == 1)
147 npages <<= TCE_PAGE_FACTOR;
148
149 if (npages == 1)
150 return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr, 150 return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
151 direction); 151 direction);
152 152
@@ -164,6 +164,9 @@ static void tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
164 __get_cpu_var(tce_page) = tcep; 164 __get_cpu_var(tce_page) = tcep;
165 } 165 }
166 166
167 tcenum <<= TCE_PAGE_FACTOR;
168 npages <<= TCE_PAGE_FACTOR;
169
167 tce.te_word = 0; 170 tce.te_word = 0;
168 tce.te_rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT; 171 tce.te_rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT;
169 tce.te_rdwr = 1; 172 tce.te_rdwr = 1;
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index a50e5f3f396..cf1bc11b334 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -298,18 +298,6 @@ long pSeries_lpar_hpte_insert(unsigned long hpte_group,
298 if (!(vflags & HPTE_V_BOLTED)) 298 if (!(vflags & HPTE_V_BOLTED))
299 DBG_LOW(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r); 299 DBG_LOW(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r);
300 300
301#if 1
302 {
303 int i;
304 for (i=0;i<8;i++) {
305 unsigned long w0, w1;
306 plpar_pte_read(0, hpte_group, &w0, &w1);
307 BUG_ON (HPTE_V_COMPARE(hpte_v, w0)
308 && (w0 & HPTE_V_VALID));
309 }
310 }
311#endif
312
313 /* Now fill in the actual HPTE */ 301 /* Now fill in the actual HPTE */
314 /* Set CEC cookie to 0 */ 302 /* Set CEC cookie to 0 */
315 /* Zero page = 0 */ 303 /* Zero page = 0 */