From cc8b5c96a7ac7d695c2887427b1003bd33ec9c97 Mon Sep 17 00:00:00 2001 From: Michal Ostrowski Date: Fri, 2 Dec 2005 13:09:13 +1100 Subject: [PATCH] powerpc/pseries: Fix TCE building with 64k pagesize Must adjust tcenum and npages by TCE_PAGE_FACTOR to convert between 64KB pages and TCE (4K) pages. (This is done in other places, except for this one location.) Signed-off-by: Michal Ostrowski Signed-off-by: Benjamin Herrenschmidt Signed-off-by: Paul Mackerras --- arch/powerpc/platforms/pseries/iommu.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index c78f2b290a..fa1edbdcf8 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c @@ -109,6 +109,9 @@ static void tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum, u64 rc; union tce_entry tce; + tcenum <<= TCE_PAGE_FACTOR; + npages <<= TCE_PAGE_FACTOR; + tce.te_word = 0; tce.te_rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT; tce.te_rdwr = 1; @@ -143,9 +146,6 @@ static void tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, union tce_entry tce, *tcep; long l, limit; - tcenum <<= TCE_PAGE_FACTOR; - npages <<= TCE_PAGE_FACTOR; - if (npages == 1) return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr, direction); @@ -164,6 +164,9 @@ static void tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, __get_cpu_var(tce_page) = tcep; } + tcenum <<= TCE_PAGE_FACTOR; + npages <<= TCE_PAGE_FACTOR; + tce.te_word = 0; tce.te_rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT; tce.te_rdwr = 1; -- cgit v1.2.2 From e8a167accb47de528d2ffddc0f13f8e84eaa71de Mon Sep 17 00:00:00 2001 From: Olaf Hering Date: Thu, 1 Dec 2005 21:10:10 +0100 Subject: [PATCH] powerpc: correct the NR_CPUS description text Update the help text to match the allowed range. Signed-off-by: Olaf Hering Signed-off-by: Paul Mackerras --- arch/powerpc/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index bb2efdd566..db93dbc0e2 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -227,7 +227,7 @@ config SMP If you don't know what to do here, say N. config NR_CPUS - int "Maximum number of CPUs (2-32)" + int "Maximum number of CPUs (2-128)" range 2 128 depends on SMP default "32" if PPC64 -- cgit v1.2.2 From 6fbb618f5da0424adcba5f666035e4772a8df526 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Mon, 5 Dec 2005 14:19:10 +1100 Subject: powerpc/pseries: Optimize IOMMU setup The previous commit will use the page-at-a-time hypervisor call for setting up IOMMU entries when we are using 64k pages and setting up one 64k page, even though that means 16 calls to the hypervisor, since the hypervisor still works on 4k pages. This optimizes this case by using the multi-page IOMMU setup hypervisor call instead. Signed-off-by: Paul Mackerras --- arch/powerpc/platforms/pseries/iommu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index fa1edbdcf8..2043659ea7 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c @@ -146,7 +146,7 @@ static void tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, union tce_entry tce, *tcep; long l, limit; - if (npages == 1) + if (TCE_PAGE_FACTOR == 0 && npages == 1) return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr, direction); -- cgit v1.2.2 From b6d78157ad129b1bce87cc831d0d038e97ca5551 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Mon, 5 Dec 2005 14:39:53 +1100 Subject: ppc: Build in all three of powermac, PREP and CHRP support This reverts commit da0825fd201a03294dbf7f8f030676d608da122c, making it so that if you select CONFIG_PPC_MULTIPLATFORM you get support for PMAC, PREP and CHRP built in. The reason for not allowing PMAC, PREP and CHRP to be selected individually for ARCH=ppc is that there is too much interdependency between them in the platform support code. For example, CHRP uses the PMAC nvram code. Configuring with ARCH=powerpc does allow you to select support for PMAC and CHRP separately. Support for PREP is not there yet but should be there soon. Signed-off-by: Paul Mackerras --- arch/ppc/Kconfig | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/ppc/Kconfig b/arch/ppc/Kconfig index 8fa51b0a32..cc3f64c084 100644 --- a/arch/ppc/Kconfig +++ b/arch/ppc/Kconfig @@ -767,14 +767,14 @@ config CPM2 on it (826x, 827x, 8560). config PPC_CHRP - bool " Common Hardware Reference Platform (CHRP) based machines" + bool depends on PPC_MULTIPLATFORM select PPC_I8259 select PPC_INDIRECT_PCI default y config PPC_PMAC - bool " Apple PowerMac based machines" + bool depends on PPC_MULTIPLATFORM select PPC_INDIRECT_PCI default y @@ -785,7 +785,7 @@ config PPC_PMAC64 default y config PPC_PREP - bool " PowerPC Reference Platform (PReP) based machines" + bool depends on PPC_MULTIPLATFORM select PPC_I8259 select PPC_INDIRECT_PCI -- cgit v1.2.2 From ca507eaf32fa599a92182ce91050046e807a994c Mon Sep 17 00:00:00 2001 From: Olof Johansson Date: Tue, 29 Nov 2005 14:04:17 -0600 Subject: [PATCH] powerpc: remove redundant code in stab init There's never been a hardware platform that has both pSeries/RPA LPAR hypervisor and stab (pre-POWER4 segment management). This removes the redundant code in stab_initalize(). Signed-off-by: Olof Johansson Signed-off-by: Paul Mackerras --- arch/powerpc/mm/stab.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c index cfbb4e1f96..51e7951414 100644 --- a/arch/powerpc/mm/stab.c +++ b/arch/powerpc/mm/stab.c @@ -288,11 +288,6 @@ void stab_initialize(unsigned long stab) return; } #endif /* CONFIG_PPC_ISERIES */ -#ifdef CONFIG_PPC_PSERIES - if (platform_is_lpar()) { - plpar_hcall_norets(H_SET_ASR, stabreal); - return; - } -#endif + mtspr(SPRN_ASR, stabreal); } -- cgit v1.2.2 From 6d91bb93e45857259ec80cf7393ea561dba1a1de Mon Sep 17 00:00:00 2001 From: Mike Kravetz Date: Wed, 7 Dec 2005 13:07:23 -0800 Subject: [PATCH] powerpc/pseries: boot failures on numa if no memory on node This bug exists in the current code and prevents machines from booting with numa enabled if there is a node that does not contain memory. Workaround is to boot with 'numa=off'. Looks like a simple typo. Signed-off-by: Mike Kravetz Signed-off-by: Paul Mackerras --- arch/powerpc/mm/numa.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index f72cf87364..ba7a3055a9 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -125,7 +125,7 @@ void __init get_region(unsigned int nid, unsigned long *start_pfn, /* We didnt find a matching region, return start/end as 0 */ if (*start_pfn == -1UL) - start_pfn = 0; + *start_pfn = 0; } static inline void map_cpu_to_node(int cpu, int node) -- cgit v1.2.2 From 325c82a029ca7ea80f8cb24815d6c9288d177190 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Thu, 8 Dec 2005 16:51:44 +1100 Subject: [PATCH] powerpc: Fix a huge page bug The 64k pages patch changed the meaning of one argument passed to the low level hash functions (from "large" it became "psize" or page size index), but one of the call sites wasn't properly updated, causing potential random weird problems with huge pages. This fixes it. Signed-off-by: Benjamin Herrenschmidt Signed-off-by: Paul Mackerras --- arch/powerpc/mm/hugetlbpage.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 6bc9dbad7d..8bce515dc3 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -703,7 +703,8 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access, slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; slot += (old_pte & _PAGE_F_GIX) >> 12; - if (ppc_md.hpte_updatepp(slot, rflags, va, 1, local) == -1) + if (ppc_md.hpte_updatepp(slot, rflags, va, mmu_huge_psize, + local) == -1) old_pte &= ~_PAGE_HPTEFLAGS; } -- cgit v1.2.2 From 6184b723876cd1a374e2d1094b3c73765d4c31c1 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Thu, 8 Dec 2005 16:53:34 +1100 Subject: [PATCH] powerpc: Remove debug code in hash path Some debug code wasn't properly removed from the initial 64k pages patch, and while it's harmless, it's also slowing down significantly a very hot code path, thus it should really be removed. Signed-off-by: Benjamin Herrenschmidt Signed-off-by: Paul Mackerras --- arch/powerpc/platforms/pseries/lpar.c | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index a50e5f3f39..cf1bc11b33 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -298,18 +298,6 @@ long pSeries_lpar_hpte_insert(unsigned long hpte_group, if (!(vflags & HPTE_V_BOLTED)) DBG_LOW(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r); -#if 1 - { - int i; - for (i=0;i<8;i++) { - unsigned long w0, w1; - plpar_pte_read(0, hpte_group, &w0, &w1); - BUG_ON (HPTE_V_COMPARE(hpte_v, w0) - && (w0 & HPTE_V_VALID)); - } - } -#endif - /* Now fill in the actual HPTE */ /* Set CEC cookie to 0 */ /* Zero page = 0 */ -- cgit v1.2.2 From 326743ead7d0c08e0da715832386660ff3823170 Mon Sep 17 00:00:00 2001 From: Michal Ostrowski Date: Thu, 8 Dec 2005 16:56:17 +1100 Subject: [PATCH] Fix windfarm model-id table model_id fields of wf_smu_sys_all_params should match the model ID they are supposed to represent (as commented). Fixes windfarm on some iMac 8,1 models. Signed-off-by: Michal Ostrowski Acked-by: Benjamin Herrenschmidt Signed-off-by: Paul Mackerras --- drivers/macintosh/windfarm_pm81.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/macintosh/windfarm_pm81.c b/drivers/macintosh/windfarm_pm81.c index 322c74b268..80ddf9776b 100644 --- a/drivers/macintosh/windfarm_pm81.c +++ b/drivers/macintosh/windfarm_pm81.c @@ -207,7 +207,7 @@ static struct wf_smu_sys_fans_param wf_smu_sys_all_params[] = { }, /* Model ID 3 */ { - .model_id = 2, + .model_id = 3, .itarget = 0x350000, .gd = 0x08e00000, .gp = 0x00566666, @@ -219,7 +219,7 @@ static struct wf_smu_sys_fans_param wf_smu_sys_all_params[] = { }, /* Model ID 5 */ { - .model_id = 2, + .model_id = 5, .itarget = 0x3a0000, .gd = 0x15400000, .gp = 0x00233333, -- cgit v1.2.2 From dabcafd3f363bacd6b89f537af27dc79128e4806 Mon Sep 17 00:00:00 2001 From: Olof Johansson Date: Thu, 8 Dec 2005 19:40:17 -0600 Subject: [PATCH] powerpc: Set cache info defaults Cache info is setup by walking the device tree in initialize_cache_info(). However, icache_flush_range might be called before that, in slb_initialize()->patch_slb_encoding, which modifies the load immediate instructions used with SLB fault code. Not only that, but depending on memory layout, we might take SLB faults during unflatten_device_tree. So that fault will load an SLB entry that might not contain the right LLP flags for the segment. Either we can walk the flattened device tree to setup cache info, or we can pick the known defaults that are known to work. Doing it in the flattened device tree is hairier since we need to know the machine type to know what property to look for, etc, etc. For now, it's just easier to go with the defaults. Worst thing that happens from it is that we might waste a few cycles doing too small dcbst/icbi increments. Signed-off-by: Olof Johansson Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/setup_64.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 608fee7c7e..e3fb78397d 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -102,7 +102,15 @@ int boot_cpuid_phys = 0; dev_t boot_dev; u64 ppc64_pft_size; -struct ppc64_caches ppc64_caches; +/* Pick defaults since we might want to patch instructions + * before we've read this from the device tree. + */ +struct ppc64_caches ppc64_caches = { + .dline_size = 0x80, + .log_dline_size = 7, + .iline_size = 0x80, + .log_iline_size = 7 +}; EXPORT_SYMBOL_GPL(ppc64_caches); /* -- cgit v1.2.2 From cbf52afdc0eb88492cf7808cc4b4f58a46f1b1ad Mon Sep 17 00:00:00 2001 From: David Gibson Date: Fri, 9 Dec 2005 14:20:52 +1100 Subject: [PATCH] powerpc: Add missing icache flushes for hugepages On most powerpc CPUs, the dcache and icache are not coherent so between writing and executing a page, the caches must be flushed. Userspace programs assume pages given to them by the kernel are icache clean, so we must do this flush between the kernel clearing a page and it being mapped into userspace for execute. We were not doing this for hugepages, this patch corrects the situation. We use the same lazy mechanism as we use for normal pages, delaying the flush until userspace actually attempts to execute from the page in question. Tested on G5. Signed-off-by: David Gibson Signed-off-by: Paul Mackerras --- arch/powerpc/mm/hash_utils_64.c | 2 +- arch/powerpc/mm/hugetlbpage.c | 35 ++++++++++++++++++++++++++++++++++- include/asm-powerpc/mmu.h | 3 ++- 3 files changed, 37 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 706e8a63ce..a33583f3b0 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -601,7 +601,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) /* Handle hugepage regions */ if (unlikely(in_hugepage_area(mm->context, ea))) { DBG_LOW(" -> huge page !\n"); - return hash_huge_page(mm, access, ea, vsid, local); + return hash_huge_page(mm, access, ea, vsid, local, trap); } /* Get PTE and page size from page tables */ diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 8bce515dc3..97512b89e7 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -639,8 +639,36 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, return -ENOMEM; } +/* + * Called by asm hashtable.S for doing lazy icache flush + */ +static unsigned int hash_huge_page_do_lazy_icache(unsigned long rflags, + pte_t pte, int trap) +{ + struct page *page; + int i; + + if (!pfn_valid(pte_pfn(pte))) + return rflags; + + page = pte_page(pte); + + /* page is dirty */ + if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) { + if (trap == 0x400) { + for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++) + __flush_dcache_icache(page_address(page+i)); + set_bit(PG_arch_1, &page->flags); + } else { + rflags |= HPTE_R_N; + } + } + return rflags; +} + int hash_huge_page(struct mm_struct *mm, unsigned long access, - unsigned long ea, unsigned long vsid, int local) + unsigned long ea, unsigned long vsid, int local, + unsigned long trap) { pte_t *ptep; unsigned long old_pte, new_pte; @@ -691,6 +719,11 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access, rflags = 0x2 | (!(new_pte & _PAGE_RW)); /* _PAGE_EXEC -> HW_NO_EXEC since it's inverted */ rflags |= ((new_pte & _PAGE_EXEC) ? 0 : HPTE_R_N); + if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) + /* No CPU has hugepages but lacks no execute, so we + * don't need to worry about that case */ + rflags = hash_huge_page_do_lazy_icache(rflags, __pte(old_pte), + trap); /* Check if pte already has an hpte (case 2) */ if (unlikely(old_pte & _PAGE_HASHPTE)) { diff --git a/include/asm-powerpc/mmu.h b/include/asm-powerpc/mmu.h index c1b4bbabbe..29b0bb0086 100644 --- a/include/asm-powerpc/mmu.h +++ b/include/asm-powerpc/mmu.h @@ -220,7 +220,8 @@ extern int __hash_page_64K(unsigned long ea, unsigned long access, unsigned int local); struct mm_struct; extern int hash_huge_page(struct mm_struct *mm, unsigned long access, - unsigned long ea, unsigned long vsid, int local); + unsigned long ea, unsigned long vsid, int local, + unsigned long trap); extern void htab_finish_init(void); extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend, -- cgit v1.2.2 From 23ed6cb9a237902cce6018a24d1993c346abddb4 Mon Sep 17 00:00:00 2001 From: David Gibson Date: Fri, 9 Dec 2005 16:45:17 +1100 Subject: [PATCH] powerpc: Fix SLB flushing path in hugepage On ppc64, when opening a new hugepage region, we need to make sure any old normal-page SLBs for the area are flushed on all CPUs. There was a bug in this logic - after putting the new hugepage area masks into the thread structure, we copied it into the paca (read by the SLB miss handler) only on one CPU, not on all. This could cause incorrect SLB entries to be loaded when a multithreaded program was running simultaneously on several CPUs. This patch corrects the error, copying the context information into the PACA on all CPUs using the mm in question before flushing any existing SLB entries. Signed-off-by: David Gibson Signed-off-by: Paul Mackerras --- arch/powerpc/mm/hugetlbpage.c | 57 +++++++++++++++++++++++++++++++------------ 1 file changed, 41 insertions(+), 16 deletions(-) diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 97512b89e7..54131b877d 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -148,43 +148,63 @@ int is_aligned_hugepage_range(unsigned long addr, unsigned long len) return 0; } +struct slb_flush_info { + struct mm_struct *mm; + u16 newareas; +}; + static void flush_low_segments(void *parm) { - u16 areas = (unsigned long) parm; + struct slb_flush_info *fi = parm; unsigned long i; - asm volatile("isync" : : : "memory"); + BUILD_BUG_ON((sizeof(fi->newareas)*8) != NUM_LOW_AREAS); + + if (current->active_mm != fi->mm) + return; - BUILD_BUG_ON((sizeof(areas)*8) != NUM_LOW_AREAS); + /* Only need to do anything if this CPU is working in the same + * mm as the one which has changed */ + + /* update the paca copy of the context struct */ + get_paca()->context = current->active_mm->context; + asm volatile("isync" : : : "memory"); for (i = 0; i < NUM_LOW_AREAS; i++) { - if (! (areas & (1U << i))) + if (! (fi->newareas & (1U << i))) continue; asm volatile("slbie %0" : : "r" ((i << SID_SHIFT) | SLBIE_C)); } - asm volatile("isync" : : : "memory"); } static void flush_high_segments(void *parm) { - u16 areas = (unsigned long) parm; + struct slb_flush_info *fi = parm; unsigned long i, j; - asm volatile("isync" : : : "memory"); - BUILD_BUG_ON((sizeof(areas)*8) != NUM_HIGH_AREAS); + BUILD_BUG_ON((sizeof(fi->newareas)*8) != NUM_HIGH_AREAS); + + if (current->active_mm != fi->mm) + return; + + /* Only need to do anything if this CPU is working in the same + * mm as the one which has changed */ + /* update the paca copy of the context struct */ + get_paca()->context = current->active_mm->context; + + asm volatile("isync" : : : "memory"); for (i = 0; i < NUM_HIGH_AREAS; i++) { - if (! (areas & (1U << i))) + if (! (fi->newareas & (1U << i))) continue; for (j = 0; j < (1UL << (HTLB_AREA_SHIFT-SID_SHIFT)); j++) asm volatile("slbie %0" :: "r" (((i << HTLB_AREA_SHIFT) - + (j << SID_SHIFT)) | SLBIE_C)); + + (j << SID_SHIFT)) | SLBIE_C)); } - asm volatile("isync" : : : "memory"); } @@ -229,6 +249,7 @@ static int prepare_high_area_for_htlb(struct mm_struct *mm, unsigned long area) static int open_low_hpage_areas(struct mm_struct *mm, u16 newareas) { unsigned long i; + struct slb_flush_info fi; BUILD_BUG_ON((sizeof(newareas)*8) != NUM_LOW_AREAS); BUILD_BUG_ON((sizeof(mm->context.low_htlb_areas)*8) != NUM_LOW_AREAS); @@ -244,19 +265,20 @@ static int open_low_hpage_areas(struct mm_struct *mm, u16 newareas) mm->context.low_htlb_areas |= newareas; - /* update the paca copy of the context struct */ - get_paca()->context = mm->context; - /* the context change must make it to memory before the flush, * so that further SLB misses do the right thing. */ mb(); - on_each_cpu(flush_low_segments, (void *)(unsigned long)newareas, 0, 1); + + fi.mm = mm; + fi.newareas = newareas; + on_each_cpu(flush_low_segments, &fi, 0, 1); return 0; } static int open_high_hpage_areas(struct mm_struct *mm, u16 newareas) { + struct slb_flush_info fi; unsigned long i; BUILD_BUG_ON((sizeof(newareas)*8) != NUM_HIGH_AREAS); @@ -280,7 +302,10 @@ static int open_high_hpage_areas(struct mm_struct *mm, u16 newareas) /* the context change must make it to memory before the flush, * so that further SLB misses do the right thing. */ mb(); - on_each_cpu(flush_high_segments, (void *)(unsigned long)newareas, 0, 1); + + fi.mm = mm; + fi.newareas = newareas; + on_each_cpu(flush_high_segments, &fi, 0, 1); return 0; } -- cgit v1.2.2 From b39f9485e6cfe1bf21b18b60fd8c631a72a1304d Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Mon, 12 Dec 2005 13:13:24 +1100 Subject: [PATCH] powerpc: Fix clock spreading setting on some powermacs The code that sets the clock spreading feature of the Intrepid ASIC must not be run on some machine models or those won't boot. This fixes it. Signed-off-by: Benjamin Herrenschmidt Signed-off-by: Paul Mackerras --- arch/powerpc/platforms/powermac/feature.c | 21 ++++++++++++++++----- arch/ppc/platforms/pmac_feature.c | 20 +++++++++++++++----- 2 files changed, 31 insertions(+), 10 deletions(-) diff --git a/arch/powerpc/platforms/powermac/feature.c b/arch/powerpc/platforms/powermac/feature.c index 0d7fa00fcb..f6e22da2a5 100644 --- a/arch/powerpc/platforms/powermac/feature.c +++ b/arch/powerpc/platforms/powermac/feature.c @@ -1650,11 +1650,19 @@ void pmac_tweak_clock_spreading(int enable) */ if (macio->type == macio_intrepid) { - if (enable) - UN_OUT(UNI_N_CLOCK_SPREADING, 2); - else - UN_OUT(UNI_N_CLOCK_SPREADING, 0); - mdelay(40); + struct device_node *clock = + of_find_node_by_path("/uni-n@f8000000/hw-clock"); + if (clock && get_property(clock, "platform-do-clockspreading", + NULL)) { + printk(KERN_INFO "%sabling clock spreading on Intrepid" + " ASIC\n", enable ? "En" : "Dis"); + if (enable) + UN_OUT(UNI_N_CLOCK_SPREADING, 2); + else + UN_OUT(UNI_N_CLOCK_SPREADING, 0); + mdelay(40); + } + of_node_put(clock); } while (machine_is_compatible("PowerBook5,2") || @@ -1724,6 +1732,9 @@ void pmac_tweak_clock_spreading(int enable) pmac_low_i2c_close(ui2c); break; } + printk(KERN_INFO "%sabling clock spreading on i2c clock chip\n", + enable ? "En" : "Dis"); + pmac_low_i2c_setmode(ui2c, pmac_low_i2c_mode_stdsub); rc = pmac_low_i2c_xfer(ui2c, 0xd2 | pmac_low_i2c_write, 0x80, buffer, 9); DBG("write result: %d,", rc); diff --git a/arch/ppc/platforms/pmac_feature.c b/arch/ppc/platforms/pmac_feature.c index 1e69b05931..6b7b3a1506 100644 --- a/arch/ppc/platforms/pmac_feature.c +++ b/arch/ppc/platforms/pmac_feature.c @@ -1606,11 +1606,19 @@ void pmac_tweak_clock_spreading(int enable) */ if (macio->type == macio_intrepid) { - if (enable) - UN_OUT(UNI_N_CLOCK_SPREADING, 2); - else - UN_OUT(UNI_N_CLOCK_SPREADING, 0); - mdelay(40); + struct device_node *clock = + of_find_node_by_path("/uni-n@f8000000/hw-clock"); + if (clock && get_property(clock, "platform-do-clockspreading", + NULL)) { + printk(KERN_INFO "%sabling clock spreading on Intrepid" + " ASIC\n", enable ? "En" : "Dis"); + if (enable) + UN_OUT(UNI_N_CLOCK_SPREADING, 2); + else + UN_OUT(UNI_N_CLOCK_SPREADING, 0); + mdelay(40); + } + of_node_put(clock); } while (machine_is_compatible("PowerBook5,2") || @@ -1680,6 +1688,8 @@ void pmac_tweak_clock_spreading(int enable) pmac_low_i2c_close(ui2c); break; } + printk(KERN_INFO "%sabling clock spreading on i2c clock chip\n", + enable ? "En" : "Dis"); pmac_low_i2c_setmode(ui2c, pmac_low_i2c_mode_stdsub); rc = pmac_low_i2c_xfer(ui2c, 0xd2 | pmac_low_i2c_write, 0x80, buffer, 9); DBG("write result: %d,", rc); -- cgit v1.2.2 From ef969434005e772218c0b8086539804605070fa8 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Sat, 10 Dec 2005 18:41:50 -0800 Subject: [PATCH] ppc32: set smp_tb_synchronized on UP with SMP kernel ppc32 kernel, when built with CONFIG_SMP and booted on a single CPU machine, will not properly set smp_tb_synchronized, thus causing gettimeofday() to not use the HW timebase and to be limited to jiffy resolution. This, among others, causes unacceptable pauses when launching X.org. Signed-Off-By: Johannes Berg Acked-by: Benjamin Herrenschmidt Signed-off-by: Andrew Morton Signed-off-by: Paul Mackerras --- arch/ppc/kernel/smp.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/ppc/kernel/smp.c b/arch/ppc/kernel/smp.c index 43b8fc2ca5..becbfa3975 100644 --- a/arch/ppc/kernel/smp.c +++ b/arch/ppc/kernel/smp.c @@ -301,6 +301,10 @@ void __init smp_prepare_cpus(unsigned int max_cpus) /* Probe platform for CPUs: always linear. */ num_cpus = smp_ops->probe(); + + if (num_cpus < 2) + smp_tb_synchronized = 1; + for (i = 0; i < num_cpus; ++i) cpu_set(i, cpu_possible_map); -- cgit v1.2.2