aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/Kconfig2
-rw-r--r--arch/powerpc/kernel/setup_64.c10
-rw-r--r--arch/powerpc/mm/hash_utils_64.c2
-rw-r--r--arch/powerpc/mm/hugetlbpage.c95
-rw-r--r--arch/powerpc/mm/numa.c2
-rw-r--r--arch/powerpc/mm/stab.c7
-rw-r--r--arch/powerpc/platforms/powermac/feature.c21
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c11
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c12
-rw-r--r--arch/ppc/Kconfig6
-rw-r--r--arch/ppc/kernel/smp.c4
-rw-r--r--arch/ppc/platforms/pmac_feature.c20
-rw-r--r--drivers/macintosh/windfarm_pm81.c4
-rw-r--r--include/asm-powerpc/mmu.h3
14 files changed, 139 insertions, 60 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index bb2efdd566a9..db93dbc0e21a 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -227,7 +227,7 @@ config SMP
227 If you don't know what to do here, say N. 227 If you don't know what to do here, say N.
228 228
229config NR_CPUS 229config NR_CPUS
230 int "Maximum number of CPUs (2-32)" 230 int "Maximum number of CPUs (2-128)"
231 range 2 128 231 range 2 128
232 depends on SMP 232 depends on SMP
233 default "32" if PPC64 233 default "32" if PPC64
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 608fee7c7e20..e3fb78397dc6 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -102,7 +102,15 @@ int boot_cpuid_phys = 0;
102dev_t boot_dev; 102dev_t boot_dev;
103u64 ppc64_pft_size; 103u64 ppc64_pft_size;
104 104
105struct ppc64_caches ppc64_caches; 105/* Pick defaults since we might want to patch instructions
106 * before we've read this from the device tree.
107 */
108struct ppc64_caches ppc64_caches = {
109 .dline_size = 0x80,
110 .log_dline_size = 7,
111 .iline_size = 0x80,
112 .log_iline_size = 7
113};
106EXPORT_SYMBOL_GPL(ppc64_caches); 114EXPORT_SYMBOL_GPL(ppc64_caches);
107 115
108/* 116/*
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 706e8a63ced9..a33583f3b0e7 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -601,7 +601,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
601 /* Handle hugepage regions */ 601 /* Handle hugepage regions */
602 if (unlikely(in_hugepage_area(mm->context, ea))) { 602 if (unlikely(in_hugepage_area(mm->context, ea))) {
603 DBG_LOW(" -> huge page !\n"); 603 DBG_LOW(" -> huge page !\n");
604 return hash_huge_page(mm, access, ea, vsid, local); 604 return hash_huge_page(mm, access, ea, vsid, local, trap);
605 } 605 }
606 606
607 /* Get PTE and page size from page tables */ 607 /* Get PTE and page size from page tables */
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 6bc9dbad7dea..54131b877da3 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -148,43 +148,63 @@ int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
148 return 0; 148 return 0;
149} 149}
150 150
151struct slb_flush_info {
152 struct mm_struct *mm;
153 u16 newareas;
154};
155
151static void flush_low_segments(void *parm) 156static void flush_low_segments(void *parm)
152{ 157{
153 u16 areas = (unsigned long) parm; 158 struct slb_flush_info *fi = parm;
154 unsigned long i; 159 unsigned long i;
155 160
156 asm volatile("isync" : : : "memory"); 161 BUILD_BUG_ON((sizeof(fi->newareas)*8) != NUM_LOW_AREAS);
162
163 if (current->active_mm != fi->mm)
164 return;
165
166 /* Only need to do anything if this CPU is working in the same
167 * mm as the one which has changed */
157 168
158 BUILD_BUG_ON((sizeof(areas)*8) != NUM_LOW_AREAS); 169 /* update the paca copy of the context struct */
170 get_paca()->context = current->active_mm->context;
159 171
172 asm volatile("isync" : : : "memory");
160 for (i = 0; i < NUM_LOW_AREAS; i++) { 173 for (i = 0; i < NUM_LOW_AREAS; i++) {
161 if (! (areas & (1U << i))) 174 if (! (fi->newareas & (1U << i)))
162 continue; 175 continue;
163 asm volatile("slbie %0" 176 asm volatile("slbie %0"
164 : : "r" ((i << SID_SHIFT) | SLBIE_C)); 177 : : "r" ((i << SID_SHIFT) | SLBIE_C));
165 } 178 }
166
167 asm volatile("isync" : : : "memory"); 179 asm volatile("isync" : : : "memory");
168} 180}
169 181
170static void flush_high_segments(void *parm) 182static void flush_high_segments(void *parm)
171{ 183{
172 u16 areas = (unsigned long) parm; 184 struct slb_flush_info *fi = parm;
173 unsigned long i, j; 185 unsigned long i, j;
174 186
175 asm volatile("isync" : : : "memory");
176 187
177 BUILD_BUG_ON((sizeof(areas)*8) != NUM_HIGH_AREAS); 188 BUILD_BUG_ON((sizeof(fi->newareas)*8) != NUM_HIGH_AREAS);
178 189
190 if (current->active_mm != fi->mm)
191 return;
192
193 /* Only need to do anything if this CPU is working in the same
194 * mm as the one which has changed */
195
196 /* update the paca copy of the context struct */
197 get_paca()->context = current->active_mm->context;
198
199 asm volatile("isync" : : : "memory");
179 for (i = 0; i < NUM_HIGH_AREAS; i++) { 200 for (i = 0; i < NUM_HIGH_AREAS; i++) {
180 if (! (areas & (1U << i))) 201 if (! (fi->newareas & (1U << i)))
181 continue; 202 continue;
182 for (j = 0; j < (1UL << (HTLB_AREA_SHIFT-SID_SHIFT)); j++) 203 for (j = 0; j < (1UL << (HTLB_AREA_SHIFT-SID_SHIFT)); j++)
183 asm volatile("slbie %0" 204 asm volatile("slbie %0"
184 :: "r" (((i << HTLB_AREA_SHIFT) 205 :: "r" (((i << HTLB_AREA_SHIFT)
185 + (j << SID_SHIFT)) | SLBIE_C)); 206 + (j << SID_SHIFT)) | SLBIE_C));
186 } 207 }
187
188 asm volatile("isync" : : : "memory"); 208 asm volatile("isync" : : : "memory");
189} 209}
190 210
@@ -229,6 +249,7 @@ static int prepare_high_area_for_htlb(struct mm_struct *mm, unsigned long area)
229static int open_low_hpage_areas(struct mm_struct *mm, u16 newareas) 249static int open_low_hpage_areas(struct mm_struct *mm, u16 newareas)
230{ 250{
231 unsigned long i; 251 unsigned long i;
252 struct slb_flush_info fi;
232 253
233 BUILD_BUG_ON((sizeof(newareas)*8) != NUM_LOW_AREAS); 254 BUILD_BUG_ON((sizeof(newareas)*8) != NUM_LOW_AREAS);
234 BUILD_BUG_ON((sizeof(mm->context.low_htlb_areas)*8) != NUM_LOW_AREAS); 255 BUILD_BUG_ON((sizeof(mm->context.low_htlb_areas)*8) != NUM_LOW_AREAS);
@@ -244,19 +265,20 @@ static int open_low_hpage_areas(struct mm_struct *mm, u16 newareas)
244 265
245 mm->context.low_htlb_areas |= newareas; 266 mm->context.low_htlb_areas |= newareas;
246 267
247 /* update the paca copy of the context struct */
248 get_paca()->context = mm->context;
249
250 /* the context change must make it to memory before the flush, 268 /* the context change must make it to memory before the flush,
251 * so that further SLB misses do the right thing. */ 269 * so that further SLB misses do the right thing. */
252 mb(); 270 mb();
253 on_each_cpu(flush_low_segments, (void *)(unsigned long)newareas, 0, 1); 271
272 fi.mm = mm;
273 fi.newareas = newareas;
274 on_each_cpu(flush_low_segments, &fi, 0, 1);
254 275
255 return 0; 276 return 0;
256} 277}
257 278
258static int open_high_hpage_areas(struct mm_struct *mm, u16 newareas) 279static int open_high_hpage_areas(struct mm_struct *mm, u16 newareas)
259{ 280{
281 struct slb_flush_info fi;
260 unsigned long i; 282 unsigned long i;
261 283
262 BUILD_BUG_ON((sizeof(newareas)*8) != NUM_HIGH_AREAS); 284 BUILD_BUG_ON((sizeof(newareas)*8) != NUM_HIGH_AREAS);
@@ -280,7 +302,10 @@ static int open_high_hpage_areas(struct mm_struct *mm, u16 newareas)
280 /* the context change must make it to memory before the flush, 302 /* the context change must make it to memory before the flush,
281 * so that further SLB misses do the right thing. */ 303 * so that further SLB misses do the right thing. */
282 mb(); 304 mb();
283 on_each_cpu(flush_high_segments, (void *)(unsigned long)newareas, 0, 1); 305
306 fi.mm = mm;
307 fi.newareas = newareas;
308 on_each_cpu(flush_high_segments, &fi, 0, 1);
284 309
285 return 0; 310 return 0;
286} 311}
@@ -639,8 +664,36 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
639 return -ENOMEM; 664 return -ENOMEM;
640} 665}
641 666
667/*
668 * Called by asm hashtable.S for doing lazy icache flush
669 */
670static unsigned int hash_huge_page_do_lazy_icache(unsigned long rflags,
671 pte_t pte, int trap)
672{
673 struct page *page;
674 int i;
675
676 if (!pfn_valid(pte_pfn(pte)))
677 return rflags;
678
679 page = pte_page(pte);
680
681 /* page is dirty */
682 if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) {
683 if (trap == 0x400) {
684 for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++)
685 __flush_dcache_icache(page_address(page+i));
686 set_bit(PG_arch_1, &page->flags);
687 } else {
688 rflags |= HPTE_R_N;
689 }
690 }
691 return rflags;
692}
693
642int hash_huge_page(struct mm_struct *mm, unsigned long access, 694int hash_huge_page(struct mm_struct *mm, unsigned long access,
643 unsigned long ea, unsigned long vsid, int local) 695 unsigned long ea, unsigned long vsid, int local,
696 unsigned long trap)
644{ 697{
645 pte_t *ptep; 698 pte_t *ptep;
646 unsigned long old_pte, new_pte; 699 unsigned long old_pte, new_pte;
@@ -691,6 +744,11 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access,
691 rflags = 0x2 | (!(new_pte & _PAGE_RW)); 744 rflags = 0x2 | (!(new_pte & _PAGE_RW));
692 /* _PAGE_EXEC -> HW_NO_EXEC since it's inverted */ 745 /* _PAGE_EXEC -> HW_NO_EXEC since it's inverted */
693 rflags |= ((new_pte & _PAGE_EXEC) ? 0 : HPTE_R_N); 746 rflags |= ((new_pte & _PAGE_EXEC) ? 0 : HPTE_R_N);
747 if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
748 /* No CPU has hugepages but lacks no execute, so we
749 * don't need to worry about that case */
750 rflags = hash_huge_page_do_lazy_icache(rflags, __pte(old_pte),
751 trap);
694 752
695 /* Check if pte already has an hpte (case 2) */ 753 /* Check if pte already has an hpte (case 2) */
696 if (unlikely(old_pte & _PAGE_HASHPTE)) { 754 if (unlikely(old_pte & _PAGE_HASHPTE)) {
@@ -703,7 +761,8 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access,
703 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 761 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
704 slot += (old_pte & _PAGE_F_GIX) >> 12; 762 slot += (old_pte & _PAGE_F_GIX) >> 12;
705 763
706 if (ppc_md.hpte_updatepp(slot, rflags, va, 1, local) == -1) 764 if (ppc_md.hpte_updatepp(slot, rflags, va, mmu_huge_psize,
765 local) == -1)
707 old_pte &= ~_PAGE_HPTEFLAGS; 766 old_pte &= ~_PAGE_HPTEFLAGS;
708 } 767 }
709 768
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index f72cf87364cb..ba7a3055a9fc 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -125,7 +125,7 @@ void __init get_region(unsigned int nid, unsigned long *start_pfn,
125 125
126 /* We didnt find a matching region, return start/end as 0 */ 126 /* We didnt find a matching region, return start/end as 0 */
127 if (*start_pfn == -1UL) 127 if (*start_pfn == -1UL)
128 start_pfn = 0; 128 *start_pfn = 0;
129} 129}
130 130
131static inline void map_cpu_to_node(int cpu, int node) 131static inline void map_cpu_to_node(int cpu, int node)
diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c
index cfbb4e1f966b..51e7951414e5 100644
--- a/arch/powerpc/mm/stab.c
+++ b/arch/powerpc/mm/stab.c
@@ -288,11 +288,6 @@ void stab_initialize(unsigned long stab)
288 return; 288 return;
289 } 289 }
290#endif /* CONFIG_PPC_ISERIES */ 290#endif /* CONFIG_PPC_ISERIES */
291#ifdef CONFIG_PPC_PSERIES 291
292 if (platform_is_lpar()) {
293 plpar_hcall_norets(H_SET_ASR, stabreal);
294 return;
295 }
296#endif
297 mtspr(SPRN_ASR, stabreal); 292 mtspr(SPRN_ASR, stabreal);
298} 293}
diff --git a/arch/powerpc/platforms/powermac/feature.c b/arch/powerpc/platforms/powermac/feature.c
index 0d7fa00fcb00..f6e22da2a5da 100644
--- a/arch/powerpc/platforms/powermac/feature.c
+++ b/arch/powerpc/platforms/powermac/feature.c
@@ -1650,11 +1650,19 @@ void pmac_tweak_clock_spreading(int enable)
1650 */ 1650 */
1651 1651
1652 if (macio->type == macio_intrepid) { 1652 if (macio->type == macio_intrepid) {
1653 if (enable) 1653 struct device_node *clock =
1654 UN_OUT(UNI_N_CLOCK_SPREADING, 2); 1654 of_find_node_by_path("/uni-n@f8000000/hw-clock");
1655 else 1655 if (clock && get_property(clock, "platform-do-clockspreading",
1656 UN_OUT(UNI_N_CLOCK_SPREADING, 0); 1656 NULL)) {
1657 mdelay(40); 1657 printk(KERN_INFO "%sabling clock spreading on Intrepid"
1658 " ASIC\n", enable ? "En" : "Dis");
1659 if (enable)
1660 UN_OUT(UNI_N_CLOCK_SPREADING, 2);
1661 else
1662 UN_OUT(UNI_N_CLOCK_SPREADING, 0);
1663 mdelay(40);
1664 }
1665 of_node_put(clock);
1658 } 1666 }
1659 1667
1660 while (machine_is_compatible("PowerBook5,2") || 1668 while (machine_is_compatible("PowerBook5,2") ||
@@ -1724,6 +1732,9 @@ void pmac_tweak_clock_spreading(int enable)
1724 pmac_low_i2c_close(ui2c); 1732 pmac_low_i2c_close(ui2c);
1725 break; 1733 break;
1726 } 1734 }
1735 printk(KERN_INFO "%sabling clock spreading on i2c clock chip\n",
1736 enable ? "En" : "Dis");
1737
1727 pmac_low_i2c_setmode(ui2c, pmac_low_i2c_mode_stdsub); 1738 pmac_low_i2c_setmode(ui2c, pmac_low_i2c_mode_stdsub);
1728 rc = pmac_low_i2c_xfer(ui2c, 0xd2 | pmac_low_i2c_write, 0x80, buffer, 9); 1739 rc = pmac_low_i2c_xfer(ui2c, 0xd2 | pmac_low_i2c_write, 0x80, buffer, 9);
1729 DBG("write result: %d,", rc); 1740 DBG("write result: %d,", rc);
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index c78f2b290a73..2043659ea7b1 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -109,6 +109,9 @@ static void tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum,
109 u64 rc; 109 u64 rc;
110 union tce_entry tce; 110 union tce_entry tce;
111 111
112 tcenum <<= TCE_PAGE_FACTOR;
113 npages <<= TCE_PAGE_FACTOR;
114
112 tce.te_word = 0; 115 tce.te_word = 0;
113 tce.te_rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT; 116 tce.te_rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT;
114 tce.te_rdwr = 1; 117 tce.te_rdwr = 1;
@@ -143,10 +146,7 @@ static void tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
143 union tce_entry tce, *tcep; 146 union tce_entry tce, *tcep;
144 long l, limit; 147 long l, limit;
145 148
146 tcenum <<= TCE_PAGE_FACTOR; 149 if (TCE_PAGE_FACTOR == 0 && npages == 1)
147 npages <<= TCE_PAGE_FACTOR;
148
149 if (npages == 1)
150 return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr, 150 return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
151 direction); 151 direction);
152 152
@@ -164,6 +164,9 @@ static void tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
164 __get_cpu_var(tce_page) = tcep; 164 __get_cpu_var(tce_page) = tcep;
165 } 165 }
166 166
167 tcenum <<= TCE_PAGE_FACTOR;
168 npages <<= TCE_PAGE_FACTOR;
169
167 tce.te_word = 0; 170 tce.te_word = 0;
168 tce.te_rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT; 171 tce.te_rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT;
169 tce.te_rdwr = 1; 172 tce.te_rdwr = 1;
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index a50e5f3f396d..cf1bc11b3346 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -298,18 +298,6 @@ long pSeries_lpar_hpte_insert(unsigned long hpte_group,
298 if (!(vflags & HPTE_V_BOLTED)) 298 if (!(vflags & HPTE_V_BOLTED))
299 DBG_LOW(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r); 299 DBG_LOW(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r);
300 300
301#if 1
302 {
303 int i;
304 for (i=0;i<8;i++) {
305 unsigned long w0, w1;
306 plpar_pte_read(0, hpte_group, &w0, &w1);
307 BUG_ON (HPTE_V_COMPARE(hpte_v, w0)
308 && (w0 & HPTE_V_VALID));
309 }
310 }
311#endif
312
313 /* Now fill in the actual HPTE */ 301 /* Now fill in the actual HPTE */
314 /* Set CEC cookie to 0 */ 302 /* Set CEC cookie to 0 */
315 /* Zero page = 0 */ 303 /* Zero page = 0 */
diff --git a/arch/ppc/Kconfig b/arch/ppc/Kconfig
index 8fa51b0a32d2..cc3f64c084c5 100644
--- a/arch/ppc/Kconfig
+++ b/arch/ppc/Kconfig
@@ -767,14 +767,14 @@ config CPM2
767 on it (826x, 827x, 8560). 767 on it (826x, 827x, 8560).
768 768
769config PPC_CHRP 769config PPC_CHRP
770 bool " Common Hardware Reference Platform (CHRP) based machines" 770 bool
771 depends on PPC_MULTIPLATFORM 771 depends on PPC_MULTIPLATFORM
772 select PPC_I8259 772 select PPC_I8259
773 select PPC_INDIRECT_PCI 773 select PPC_INDIRECT_PCI
774 default y 774 default y
775 775
776config PPC_PMAC 776config PPC_PMAC
777 bool " Apple PowerMac based machines" 777 bool
778 depends on PPC_MULTIPLATFORM 778 depends on PPC_MULTIPLATFORM
779 select PPC_INDIRECT_PCI 779 select PPC_INDIRECT_PCI
780 default y 780 default y
@@ -785,7 +785,7 @@ config PPC_PMAC64
785 default y 785 default y
786 786
787config PPC_PREP 787config PPC_PREP
788 bool " PowerPC Reference Platform (PReP) based machines" 788 bool
789 depends on PPC_MULTIPLATFORM 789 depends on PPC_MULTIPLATFORM
790 select PPC_I8259 790 select PPC_I8259
791 select PPC_INDIRECT_PCI 791 select PPC_INDIRECT_PCI
diff --git a/arch/ppc/kernel/smp.c b/arch/ppc/kernel/smp.c
index 43b8fc2ca591..becbfa397556 100644
--- a/arch/ppc/kernel/smp.c
+++ b/arch/ppc/kernel/smp.c
@@ -301,6 +301,10 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
301 301
302 /* Probe platform for CPUs: always linear. */ 302 /* Probe platform for CPUs: always linear. */
303 num_cpus = smp_ops->probe(); 303 num_cpus = smp_ops->probe();
304
305 if (num_cpus < 2)
306 smp_tb_synchronized = 1;
307
304 for (i = 0; i < num_cpus; ++i) 308 for (i = 0; i < num_cpus; ++i)
305 cpu_set(i, cpu_possible_map); 309 cpu_set(i, cpu_possible_map);
306 310
diff --git a/arch/ppc/platforms/pmac_feature.c b/arch/ppc/platforms/pmac_feature.c
index 1e69b0593162..6b7b3a150631 100644
--- a/arch/ppc/platforms/pmac_feature.c
+++ b/arch/ppc/platforms/pmac_feature.c
@@ -1606,11 +1606,19 @@ void pmac_tweak_clock_spreading(int enable)
1606 */ 1606 */
1607 1607
1608 if (macio->type == macio_intrepid) { 1608 if (macio->type == macio_intrepid) {
1609 if (enable) 1609 struct device_node *clock =
1610 UN_OUT(UNI_N_CLOCK_SPREADING, 2); 1610 of_find_node_by_path("/uni-n@f8000000/hw-clock");
1611 else 1611 if (clock && get_property(clock, "platform-do-clockspreading",
1612 UN_OUT(UNI_N_CLOCK_SPREADING, 0); 1612 NULL)) {
1613 mdelay(40); 1613 printk(KERN_INFO "%sabling clock spreading on Intrepid"
1614 " ASIC\n", enable ? "En" : "Dis");
1615 if (enable)
1616 UN_OUT(UNI_N_CLOCK_SPREADING, 2);
1617 else
1618 UN_OUT(UNI_N_CLOCK_SPREADING, 0);
1619 mdelay(40);
1620 }
1621 of_node_put(clock);
1614 } 1622 }
1615 1623
1616 while (machine_is_compatible("PowerBook5,2") || 1624 while (machine_is_compatible("PowerBook5,2") ||
@@ -1680,6 +1688,8 @@ void pmac_tweak_clock_spreading(int enable)
1680 pmac_low_i2c_close(ui2c); 1688 pmac_low_i2c_close(ui2c);
1681 break; 1689 break;
1682 } 1690 }
1691 printk(KERN_INFO "%sabling clock spreading on i2c clock chip\n",
1692 enable ? "En" : "Dis");
1683 pmac_low_i2c_setmode(ui2c, pmac_low_i2c_mode_stdsub); 1693 pmac_low_i2c_setmode(ui2c, pmac_low_i2c_mode_stdsub);
1684 rc = pmac_low_i2c_xfer(ui2c, 0xd2 | pmac_low_i2c_write, 0x80, buffer, 9); 1694 rc = pmac_low_i2c_xfer(ui2c, 0xd2 | pmac_low_i2c_write, 0x80, buffer, 9);
1685 DBG("write result: %d,", rc); 1695 DBG("write result: %d,", rc);
diff --git a/drivers/macintosh/windfarm_pm81.c b/drivers/macintosh/windfarm_pm81.c
index 322c74b2687f..80ddf9776bde 100644
--- a/drivers/macintosh/windfarm_pm81.c
+++ b/drivers/macintosh/windfarm_pm81.c
@@ -207,7 +207,7 @@ static struct wf_smu_sys_fans_param wf_smu_sys_all_params[] = {
207 }, 207 },
208 /* Model ID 3 */ 208 /* Model ID 3 */
209 { 209 {
210 .model_id = 2, 210 .model_id = 3,
211 .itarget = 0x350000, 211 .itarget = 0x350000,
212 .gd = 0x08e00000, 212 .gd = 0x08e00000,
213 .gp = 0x00566666, 213 .gp = 0x00566666,
@@ -219,7 +219,7 @@ static struct wf_smu_sys_fans_param wf_smu_sys_all_params[] = {
219 }, 219 },
220 /* Model ID 5 */ 220 /* Model ID 5 */
221 { 221 {
222 .model_id = 2, 222 .model_id = 5,
223 .itarget = 0x3a0000, 223 .itarget = 0x3a0000,
224 .gd = 0x15400000, 224 .gd = 0x15400000,
225 .gp = 0x00233333, 225 .gp = 0x00233333,
diff --git a/include/asm-powerpc/mmu.h b/include/asm-powerpc/mmu.h
index c1b4bbabbe97..29b0bb0086d3 100644
--- a/include/asm-powerpc/mmu.h
+++ b/include/asm-powerpc/mmu.h
@@ -220,7 +220,8 @@ extern int __hash_page_64K(unsigned long ea, unsigned long access,
220 unsigned int local); 220 unsigned int local);
221struct mm_struct; 221struct mm_struct;
222extern int hash_huge_page(struct mm_struct *mm, unsigned long access, 222extern int hash_huge_page(struct mm_struct *mm, unsigned long access,
223 unsigned long ea, unsigned long vsid, int local); 223 unsigned long ea, unsigned long vsid, int local,
224 unsigned long trap);
224 225
225extern void htab_finish_init(void); 226extern void htab_finish_init(void);
226extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend, 227extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend,