aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/platforms/cell
diff options
context:
space:
mode:
authorArnd Bergmann <arnd@arndb.de>2006-11-20 12:45:06 -0500
committerPaul Mackerras <paulus@samba.org>2006-12-04 04:39:59 -0500
commit932f535dd4c83dc3eb631c2cee1dfd6ae289b88c (patch)
treea5fcb59665a421867de33f53b9f63ed1fefa5268 /arch/powerpc/platforms/cell
parent5c3ecd659bd20cda214a402a3132c790cc886cd2 (diff)
[POWERPC] spufs: Always map local store non-guarded
When fixing spufs to map the 'mem' file backing store cacheable, I incorrectly set the physical mapping to use both cache-inhibited and guarded mapping, which resulted in a serious performance degradation. Debugged-by: Michael Ellerman <michael@ellerman.id.au> Signed-off-by: Arnd Bergmann <arnd.bergmann@de.ibm.com> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/platforms/cell')
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index 55d7e0f4bb3b..1c1af71d19cb 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -105,11 +105,11 @@ spufs_mem_mmap_nopage(struct vm_area_struct *vma,
105 105
106 if (ctx->state == SPU_STATE_SAVED) { 106 if (ctx->state == SPU_STATE_SAVED) {
107 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) 107 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
108 & ~(_PAGE_NO_CACHE | _PAGE_GUARDED)); 108 & ~_PAGE_NO_CACHE);
109 page = vmalloc_to_page(ctx->csa.lscsa->ls + offset); 109 page = vmalloc_to_page(ctx->csa.lscsa->ls + offset);
110 } else { 110 } else {
111 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) 111 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
112 | _PAGE_NO_CACHE | _PAGE_GUARDED); 112 | _PAGE_NO_CACHE);
113 page = pfn_to_page((ctx->spu->local_store_phys + offset) 113 page = pfn_to_page((ctx->spu->local_store_phys + offset)
114 >> PAGE_SHIFT); 114 >> PAGE_SHIFT);
115 } 115 }