From c0f7c6cb5dbb6d90e0334e62376dbc6ac3d1d315 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Fri, 3 Aug 2007 14:08:24 +1000 Subject: [POWERPC] Expand RPN field to 34 bits when using 64k pages The real page number field in our PTEs when configured for 64kB pages is currently 32 bits, which turns out to be not quite enough for the resources that the eHCA driver wants to map. This expands the RPN field to include 2 adjacent, previously-unused bits. Signed-off-by: Paul Mackerras Acked-by: Benjamin Herrenschmidt --- include/asm-powerpc/pgtable-64k.h | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/include/asm-powerpc/pgtable-64k.h b/include/asm-powerpc/pgtable-64k.h index 31cbd3d7fce8..33ae9018fe72 100644 --- a/include/asm-powerpc/pgtable-64k.h +++ b/include/asm-powerpc/pgtable-64k.h @@ -49,12 +49,10 @@ /* Shift to put page number into pte. * - * That gives us a max RPN of 32 bits, which means a max of 48 bits - * of addressable physical space. - * We could get 3 more bits here by setting PTE_RPN_SHIFT to 29 but - * 32 makes PTEs more readable for debugging for now :) + * That gives us a max RPN of 34 bits, which means a max of 50 bits + * of addressable physical space, or 46 bits for the special 4k PFNs. */ -#define PTE_RPN_SHIFT (32) +#define PTE_RPN_SHIFT (30) #define PTE_RPN_MAX (1UL << (64 - PTE_RPN_SHIFT)) #define PTE_RPN_MASK (~((1UL< Date: Fri, 3 Aug 2007 19:16:11 +1000 Subject: [POWERPC] Fix special PTE code for secondary hash bucket The code for mapping special 4k pages on kernels using a 64kB base page size was missing the code for doing the RPN (real page number) manipulation when inserting the hardware PTE in the secondary hash bucket. It needs the same code as has already been added to the code that inserts the HPTE in the primary hash bucket. This adds it. Spotted by Ben Herrenschmidt. Signed-off-by: Paul Mackerras --- arch/powerpc/mm/hash_low_64.S | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S index 4762ff7c14df..35eabfb50723 100644 --- a/arch/powerpc/mm/hash_low_64.S +++ b/arch/powerpc/mm/hash_low_64.S @@ -472,10 +472,12 @@ _GLOBAL(htab_call_hpte_insert1) /* Now try secondary slot */ /* real page number in r5, PTE RPN value + index */ - rldicl r5,r31,64-PTE_RPN_SHIFT,PTE_RPN_SHIFT + andis. r0,r31,_PAGE_4K_PFN@h + srdi r5,r31,PTE_RPN_SHIFT + bne- 3f sldi r5,r5,PAGE_SHIFT-HW_PAGE_SHIFT add r5,r5,r25 - sldi r5,r5,HW_PAGE_SHIFT +3: sldi r5,r5,HW_PAGE_SHIFT /* Calculate secondary group hash */ andc r0,r27,r28 -- cgit v1.2.2 From 683e3ab2b54a7190ec8517705880171cc8ac1d92 Mon Sep 17 00:00:00 2001 From: Andre Detsch Date: Tue, 31 Jul 2007 09:48:11 +1000 Subject: [POWERPC] spufs: Fix affinity after introduction of node_allowed() calls This patch fixes affinity reference point placement, which was not being done in some situations, after the introduction of node_allowed() calls. The previously used parameter, 'ctx', is just the iterator of the previous list_for_each_entry_reverse loop, and its value might be invalid at the end of the loop. Also, the right context to seek for information when defining the reference ctx location _is_ the reference ctx. Signed-off-by: Andre Detsch Signed-off-by: Arnd Bergmann Signed-off-by: Jeremy Kerr Signed-off-by: Paul Mackerras --- arch/powerpc/platforms/cell/spufs/sched.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index 758a80ac080a..c784edd40ea7 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c @@ -351,7 +351,8 @@ static void aff_set_ref_point_location(struct spu_gang *gang) lowest_offset = ctx->aff_offset; } - gang->aff_ref_spu = aff_ref_location(ctx, mem_aff, gs, lowest_offset); + gang->aff_ref_spu = aff_ref_location(gang->aff_ref_ctx, mem_aff, gs, + lowest_offset); } static struct spu *ctx_location(struct spu *ref, int offset, int node) -- cgit v1.2.2 From cba684f56d7e8b82b08d4372375a42d6ebeab47d Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Tue, 31 Jul 2007 17:22:00 +1000 Subject: [POWERPC] ps3: Fix section mismatch in ps3/setup.c WARNING: vmlinux.o(.text+0x605d4): Section mismatch: reference to .init.text:.__alloc_bootmem (between '.prealloc' and '.ps3_power_save') Signed-off-by: Stephen Rothwell Signed-off-by: Paul Mackerras --- arch/powerpc/platforms/ps3/setup.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/platforms/ps3/setup.c b/arch/powerpc/platforms/ps3/setup.c index aa05288de64e..2952b22f1c84 100644 --- a/arch/powerpc/platforms/ps3/setup.c +++ b/arch/powerpc/platforms/ps3/setup.c @@ -109,7 +109,7 @@ static void ps3_panic(char *str) #if defined(CONFIG_FB_PS3) || defined(CONFIG_FB_PS3_MODULE) || \ defined(CONFIG_PS3_FLASH) || defined(CONFIG_PS3_FLASH_MODULE) -static void prealloc(struct ps3_prealloc *p) +static void __init prealloc(struct ps3_prealloc *p) { if (!p->size) return; -- cgit v1.2.2 From 17aa3a82aa2173a22405f862c4444656f0494a3f Mon Sep 17 00:00:00 2001 From: Kevin Corry Date: Wed, 1 Aug 2007 06:19:46 +1000 Subject: [POWERPC] Fix num_cpus calculation in smp_call_function_map() In smp_call_function_map(), num_cpus is set to the number of online CPUs minus one. However, if the CPU mask does not include all CPUs (except the one we're running on), the routine will hang in the first while() loop until the 8 second timeout occurs. The num_cpus should be set to the number of CPUs specified in the mask passed into the routine, after we've made any modifications to the mask. With this change, we can also get rid of the call to cpus_empty() and avoid adding another pass through the bitmask. Signed-off-by: Kevin Corry Signed-off-by: Carl Love Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/smp.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 087c92f2a3eb..1ea43160f543 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -212,11 +212,6 @@ int smp_call_function_map(void (*func) (void *info), void *info, int nonatomic, atomic_set(&data.finished, 0); spin_lock(&call_lock); - /* Must grab online cpu count with preempt disabled, otherwise - * it can change. */ - num_cpus = num_online_cpus() - 1; - if (!num_cpus) - goto done; /* remove 'self' from the map */ if (cpu_isset(smp_processor_id(), map)) @@ -224,7 +219,9 @@ int smp_call_function_map(void (*func) (void *info), void *info, int nonatomic, /* sanity check the map, remove any non-online processors. */ cpus_and(map, map, cpu_online_map); - if (cpus_empty(map)) + + num_cpus = cpus_weight(map); + if (!num_cpus) goto done; call_data = &data; -- cgit v1.2.2 From b9c3fdb0f0fe02ba33e87ef947f23cd12e6196fe Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Wed, 1 Aug 2007 11:34:38 +1000 Subject: [POWERPC] Fix parse_drconf_memory() for 64-bit start addresses Some new machines use the "ibm,dynamic-reconfiguration-memory" property to provide memory layout information, rather than via memory nodes. There is a bug in the code to parse this property for start addresses over 4GB; we store the start address in an unsigned int, which means we throw away the high bits and add apparently duplicate regions. This results in a BUG() in free_bootmem_core(). This fixes it by using an unsigned long instead. Signed-off-by: Michael Ellerman Signed-off-by: Paul Mackerras --- arch/powerpc/mm/numa.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index de45aa82d97b..c12adc3ddffd 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -307,9 +307,9 @@ static void __init parse_drconf_memory(struct device_node *memory) const unsigned int *lm, *dm, *aa; unsigned int ls, ld, la; unsigned int n, aam, aalen; - unsigned long lmb_size, size; + unsigned long lmb_size, size, start; int nid, default_nid = 0; - unsigned int start, ai, flags; + unsigned int ai, flags; lm = of_get_property(memory, "ibm,lmb-size", &ls); dm = of_get_property(memory, "ibm,dynamic-memory", &ld); -- cgit v1.2.2 From 3a77d291be53fbc619f14a6199b9b4cac036c476 Mon Sep 17 00:00:00 2001 From: Segher Boessenkool Date: Thu, 2 Aug 2007 01:41:13 +1000 Subject: [POWERPC] Fix a compile warning in pci_32.c __must_check, so do so. Signed-off-by: Segher Boessenkool Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/pci_32.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c index cd35c969bb28..04a3109ae3c6 100644 --- a/arch/powerpc/kernel/pci_32.c +++ b/arch/powerpc/kernel/pci_32.c @@ -581,8 +581,11 @@ pcibios_assign_resources(void) if ((r->flags & IORESOURCE_UNSET) && r->end && (!ppc_md.pcibios_enable_device_hook || !ppc_md.pcibios_enable_device_hook(dev, 1))) { + int rc; + r->flags &= ~IORESOURCE_UNSET; - pci_assign_resource(dev, idx); + rc = pci_assign_resource(dev, idx); + BUG_ON(rc); } } -- cgit v1.2.2 From 5628244059976009151d41c2798855290753d8d5 Mon Sep 17 00:00:00 2001 From: Segher Boessenkool Date: Thu, 2 Aug 2007 01:41:14 +1000 Subject: [POWERPC] Fix a compile warning in powermac/feature.c ...by using the pci_get API instead of the deprecated old stuff. Signed-off-by: Segher Boessenkool Signed-off-by: Paul Mackerras --- arch/powerpc/platforms/powermac/feature.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/platforms/powermac/feature.c b/arch/powerpc/platforms/powermac/feature.c index f29705f8047d..ba931be2175c 100644 --- a/arch/powerpc/platforms/powermac/feature.c +++ b/arch/powerpc/platforms/powermac/feature.c @@ -826,13 +826,15 @@ core99_ata100_enable(struct device_node *node, long value) if (value) { if (pci_device_from_OF_node(node, &pbus, &pid) == 0) - pdev = pci_find_slot(pbus, pid); + pdev = pci_get_bus_and_slot(pbus, pid); if (pdev == NULL) return 0; rc = pci_enable_device(pdev); + if (rc == 0) + pci_set_master(pdev); + pci_dev_put(pdev); if (rc) return rc; - pci_set_master(pdev); } return 0; } -- cgit v1.2.2 From 67439b76f29cb278bb3412fc873b980fc65110c9 Mon Sep 17 00:00:00 2001 From: Michael Neuling Date: Fri, 3 Aug 2007 11:55:39 +1000 Subject: [POWERPC] Fixes for the SLB shadow buffer code On a machine with hardware 64kB pages and a kernel configured for a 64kB base page size, we need to change the vmalloc segment from 64kB pages to 4kB pages if some driver creates a non-cacheable mapping in the vmalloc area. However, we never updated with SLB shadow buffer. This fixes it. Thanks to paulus for finding this. Also added some write barriers to ensure the shadow buffer contents are always consistent. Signed-off-by: Michael Neuling Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/entry_64.S | 3 +++ arch/powerpc/mm/hash_utils_64.c | 2 +- arch/powerpc/mm/slb.c | 28 ++++++++++++++++++---------- include/asm-powerpc/mmu-hash64.h | 1 + 4 files changed, 23 insertions(+), 11 deletions(-) diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 9ef28da2c7fe..952eba6701f4 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -389,8 +389,11 @@ BEGIN_FTR_SECTION ld r9,PACA_SLBSHADOWPTR(r13) li r12,0 std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */ + eieio std r7,SLBSHADOW_STACKVSID(r9) /* Save VSID */ + eieio std r0,SLBSHADOW_STACKESID(r9) /* Save ESID */ + eieio slbie r6 slbie r6 /* Workaround POWER5 < DD2.1 issue */ diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index bc7b0cedae5e..f1789578747a 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -759,7 +759,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) mmu_psize_defs[mmu_vmalloc_psize].sllp) { get_paca()->vmalloc_sllp = mmu_psize_defs[mmu_vmalloc_psize].sllp; - slb_flush_and_rebolt(); + slb_vmalloc_update(); } #endif /* CONFIG_PPC_64K_PAGES */ diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c index 304375a73574..b0697017d0e8 100644 --- a/arch/powerpc/mm/slb.c +++ b/arch/powerpc/mm/slb.c @@ -53,7 +53,8 @@ static inline unsigned long mk_vsid_data(unsigned long ea, unsigned long flags) return (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | flags; } -static inline void slb_shadow_update(unsigned long esid, unsigned long vsid, +static inline void slb_shadow_update(unsigned long ea, + unsigned long flags, unsigned long entry) { /* @@ -61,11 +62,11 @@ static inline void slb_shadow_update(unsigned long esid, unsigned long vsid, * updating it. */ get_slb_shadow()->save_area[entry].esid = 0; - barrier(); - get_slb_shadow()->save_area[entry].vsid = vsid; - barrier(); - get_slb_shadow()->save_area[entry].esid = esid; - + smp_wmb(); + get_slb_shadow()->save_area[entry].vsid = mk_vsid_data(ea, flags); + smp_wmb(); + get_slb_shadow()->save_area[entry].esid = mk_esid_data(ea, entry); + smp_wmb(); } static inline void create_shadowed_slbe(unsigned long ea, unsigned long flags, @@ -76,8 +77,7 @@ static inline void create_shadowed_slbe(unsigned long ea, unsigned long flags, * we don't get a stale entry here if we get preempted by PHYP * between these two statements. */ - slb_shadow_update(mk_esid_data(ea, entry), mk_vsid_data(ea, flags), - entry); + slb_shadow_update(ea, flags, entry); asm volatile("slbmte %0,%1" : : "r" (mk_vsid_data(ea, flags)), @@ -104,8 +104,7 @@ void slb_flush_and_rebolt(void) ksp_esid_data &= ~SLB_ESID_V; /* Only third entry (stack) may change here so only resave that */ - slb_shadow_update(ksp_esid_data, - mk_vsid_data(ksp_esid_data, lflags), 2); + slb_shadow_update(get_paca()->kstack, lflags, 2); /* We need to do this all in asm, so we're sure we don't touch * the stack between the slbia and rebolting it. */ @@ -123,6 +122,15 @@ void slb_flush_and_rebolt(void) : "memory"); } +void slb_vmalloc_update(void) +{ + unsigned long vflags; + + vflags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_vmalloc_psize].sllp; + slb_shadow_update(VMALLOC_START, vflags, 1); + slb_flush_and_rebolt(); +} + /* Flush all user entries from the segment table of the current processor. */ void switch_slb(struct task_struct *tsk, struct mm_struct *mm) { diff --git a/include/asm-powerpc/mmu-hash64.h b/include/asm-powerpc/mmu-hash64.h index 695962f02059..3112ad14ad95 100644 --- a/include/asm-powerpc/mmu-hash64.h +++ b/include/asm-powerpc/mmu-hash64.h @@ -262,6 +262,7 @@ extern void slb_initialize(void); extern void slb_flush_and_rebolt(void); extern void stab_initialize(unsigned long stab); +extern void slb_vmalloc_update(void); #endif /* __ASSEMBLY__ */ /* -- cgit v1.2.2