From cec08e7a948326b01555be6311480aa08e637de2 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Wed, 30 Apr 2008 15:41:48 +1000 Subject: [POWERPC] vmemmap fixes to use smaller pages This changes vmemmap to use a different region (region 0xf) of the address space, and to configure the page size of that region dynamically at boot. The problem with the current approach of always using 16M pages is that it's not well suited to machines that have small amounts of memory such as small partitions on pseries, or PS3's. In fact, on the PS3, failure to allocate the 16M page backing vmmemmap tends to prevent hotplugging the HV's "additional" memory, thus limiting the available memory even more, from my experience down to something like 80M total, which makes it really not very useable. The logic used by my match to choose the vmemmap page size is: - If 16M pages are available and there's 1G or more RAM at boot, use that size. - Else if 64K pages are available, use that - Else use 4K pages I've tested on a POWER6 (16M pages) and on an iSeries POWER3 (4K pages) and it seems to work fine. Note that I intend to change the way we organize the kernel regions & SLBs so the actual region will change from 0xf back to something else at one point, as I simplify the SLB miss handler, but that will be for a later patch. Signed-off-by: Paul Mackerras --- include/asm-powerpc/mmu-hash64.h | 1 + include/asm-powerpc/pgtable-ppc64.h | 10 +++++----- 2 files changed, 6 insertions(+), 5 deletions(-) (limited to 'include/asm-powerpc') diff --git a/include/asm-powerpc/mmu-hash64.h b/include/asm-powerpc/mmu-hash64.h index 0dff76776044..39c5c5f62bf5 100644 --- a/include/asm-powerpc/mmu-hash64.h +++ b/include/asm-powerpc/mmu-hash64.h @@ -177,6 +177,7 @@ extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT]; extern int mmu_linear_psize; extern int mmu_virtual_psize; extern int mmu_vmalloc_psize; +extern int mmu_vmemmap_psize; extern int mmu_io_psize; extern int mmu_kernel_ssize; extern int mmu_highuser_ssize; diff --git a/include/asm-powerpc/pgtable-ppc64.h b/include/asm-powerpc/pgtable-ppc64.h index 27f18695f7d6..cc6a43ba41d0 100644 --- a/include/asm-powerpc/pgtable-ppc64.h +++ b/include/asm-powerpc/pgtable-ppc64.h @@ -65,15 +65,15 @@ #define VMALLOC_REGION_ID (REGION_ID(VMALLOC_START)) #define KERNEL_REGION_ID (REGION_ID(PAGE_OFFSET)) +#define VMEMMAP_REGION_ID (0xfUL) #define USER_REGION_ID (0UL) /* - * Defines the address of the vmemap area, in the top 16th of the - * kernel region. + * Defines the address of the vmemap area, in its own region */ -#define VMEMMAP_BASE (ASM_CONST(CONFIG_KERNEL_START) + \ - (0xfUL << (REGION_SHIFT - 4))) -#define vmemmap ((struct page *)VMEMMAP_BASE) +#define VMEMMAP_BASE (VMEMMAP_REGION_ID << REGION_SHIFT) +#define vmemmap ((struct page *)VMEMMAP_BASE) + /* * Common bits in a linux-style PTE. These match the bits in the -- cgit v1.2.2 From 9c8387afdc93f90bf0241411d44e011d8d5b76df Mon Sep 17 00:00:00 2001 From: Nate Case Date: Tue, 13 May 2008 06:14:14 +1000 Subject: [POWERPC] Fix uninitialized variable bug in copy_{to|from}_user Calls to copy_to_user() or copy_from_user() can fail when copying N bytes, where N is a constant less than 8, but not 1, 2, 4, or 8, because 'ret' is not initialized and is only set if the size is 1, 2, 4 or 8, but is tested after the switch statement for any constant size <= 8. This fixes it by initializing 'ret' to 1, causing the code to fall through to the __copy_tofrom_user call for sizes other than 1, 2, 4 or 8. Signed-off-by: Dave Scidmore Signed-off-by: Nate Case Signed-off-by: Paul Mackerras --- include/asm-powerpc/uaccess.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/asm-powerpc') diff --git a/include/asm-powerpc/uaccess.h b/include/asm-powerpc/uaccess.h index 8e798e3758bc..1a0736f8803f 100644 --- a/include/asm-powerpc/uaccess.h +++ b/include/asm-powerpc/uaccess.h @@ -380,7 +380,7 @@ static inline unsigned long __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) { if (__builtin_constant_p(n) && (n <= 8)) { - unsigned long ret; + unsigned long ret = 1; switch (n) { case 1: @@ -406,7 +406,7 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) { if (__builtin_constant_p(n) && (n <= 8)) { - unsigned long ret; + unsigned long ret = 1; switch (n) { case 1: -- cgit v1.2.2 From 06a901c5621f85e07e00ac4816c7ca95620ee74a Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Wed, 21 May 2008 16:24:31 +1000 Subject: [POWERPC] mpic: Fix use of uninitialized variable Compiling ppc64_defconfig with gcc 4.3 gives thes warnings: arch/powerpc/sysdev/mpic.c: In function 'mpic_irq_get_priority': arch/powerpc/sysdev/mpic.c:1351: warning: 'is_ipi' may be used uninitialized in this function arch/powerpc/sysdev/mpic.c: In function 'mpic_irq_set_priority': arch/powerpc/sysdev/mpic.c:1328: warning: 'is_ipi' may be used uninitialized in this function It turns out that in the cases where is_ipi is uninitialized, another variable (mpic) will be NULL and it is dereferenced. Protect against this by returning if mpic is NULL in mpic_irq_set_priority, and removing mpic_irq_get_priority completely as it has no in tree callers. This has the nice side effect of making the warning go away. Signed-off-by: Stephen Rothwell Acked-by: Benjamin Herrenschmidt Signed-off-by: Paul Mackerras --- include/asm-powerpc/mpic.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'include/asm-powerpc') diff --git a/include/asm-powerpc/mpic.h b/include/asm-powerpc/mpic.h index 943c5a3fac8a..a4d0f876b427 100644 --- a/include/asm-powerpc/mpic.h +++ b/include/asm-powerpc/mpic.h @@ -428,12 +428,11 @@ extern void mpic_init(struct mpic *mpic); */ -/* Change/Read the priority of an interrupt. Default is 8 for irqs and +/* Change the priority of an interrupt. Default is 8 for irqs and * 10 for IPIs. You can call this on both IPIs and IRQ numbers, but the * IPI number is then the offset'ed (linux irq number mapped to the IPI) */ extern void mpic_irq_set_priority(unsigned int irq, unsigned int pri); -extern unsigned int mpic_irq_get_priority(unsigned int irq); /* Setup a non-boot CPU */ extern void mpic_setup_this_cpu(void); -- cgit v1.2.2 From cfab3bdf8292edec19492c89520b1ad11279a648 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Wed, 28 May 2008 10:18:17 +1000 Subject: [POWERPC] Add "memory" clobber to MMIO accessors Gcc might re-order MMIO accessors vs. surrounding consistent memory accesses, which is a "bad thing", and could break drivers. This fixes it by adding a "memory" clobber to the MMIO accessors, which should prevent gcc from doing that reordering. Signed-off-by: Benjamin Herrenschmidt Signed-off-by: Paul Mackerras --- include/asm-powerpc/io.h | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) (limited to 'include/asm-powerpc') diff --git a/include/asm-powerpc/io.h b/include/asm-powerpc/io.h index e0062d73db1c..89189488e286 100644 --- a/include/asm-powerpc/io.h +++ b/include/asm-powerpc/io.h @@ -100,7 +100,7 @@ static inline type name(const volatile type __iomem *addr) \ { \ type ret; \ __asm__ __volatile__("sync;" insn ";twi 0,%0,0;isync" \ - : "=r" (ret) : "r" (addr), "m" (*addr)); \ + : "=r" (ret) : "r" (addr), "m" (*addr) : "memory"); \ return ret; \ } @@ -108,8 +108,8 @@ static inline type name(const volatile type __iomem *addr) \ static inline void name(volatile type __iomem *addr, type val) \ { \ __asm__ __volatile__("sync;" insn \ - : "=m" (*addr) : "r" (val), "r" (addr)); \ - IO_SET_SYNC_FLAG(); \ + : "=m" (*addr) : "r" (val), "r" (addr) : "memory"); \ + IO_SET_SYNC_FLAG(); \ } @@ -333,7 +333,8 @@ static inline unsigned int name(unsigned int port) \ " .long 3b,5b\n" \ ".previous" \ : "=&r" (x) \ - : "r" (port + _IO_BASE)); \ + : "r" (port + _IO_BASE) \ + : "memory"); \ return x; \ } @@ -350,7 +351,8 @@ static inline void name(unsigned int val, unsigned int port) \ " .long 0b,2b\n" \ " .long 1b,2b\n" \ ".previous" \ - : : "r" (val), "r" (port + _IO_BASE)); \ + : : "r" (val), "r" (port + _IO_BASE) \ + : "memory"); \ } __do_in_asm(_rec_inb, "lbzx") -- cgit v1.2.2 From ce263d70e509287ee761f9bba519342f57b121ca Mon Sep 17 00:00:00 2001 From: Hollis Blanchard Date: Wed, 21 May 2008 18:22:51 -0500 Subject: KVM: ppc: Remove duplicate function This was left behind from some code movement. Signed-off-by: Hollis Blanchard Signed-off-by: Avi Kivity --- include/asm-powerpc/kvm_ppc.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/asm-powerpc') diff --git a/include/asm-powerpc/kvm_ppc.h b/include/asm-powerpc/kvm_ppc.h index b35a7e3ef978..5a21115228af 100644 --- a/include/asm-powerpc/kvm_ppc.h +++ b/include/asm-powerpc/kvm_ppc.h @@ -57,6 +57,7 @@ extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, extern int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu); +extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu); extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid, u32 flags); -- cgit v1.2.2