diff options
author | Arnd Bergmann <arnd@arndb.de> | 2006-10-04 11:26:16 -0400 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2006-10-04 19:21:01 -0400 |
commit | ac91cb8dae061ced64e475d0d70fac4a95298819 (patch) | |
tree | 70d82bc7abea0353157a1ba9d1aa58c41a555eff /arch/powerpc | |
parent | 6263203ed6e9ff107129a1ebe613290b342a4465 (diff) |
[POWERPC] spufs: use correct pg_prot for mapping SPU local store
This hopefully fixes a long-standing bug in the spu file system.
An spu context comes with local memory that can be either saved
in kernel pages or point directly to a physical SPE.
When mapping the physical SPE, that mapping needs to be cache-inhibited.
For simplicity, we used to map the kernel backing memory that way
too, but unfortunately that was not only inefficient, but also incorrect
because the same page could then be accessed simultaneously through
a cacheable and a cache-inhibited mapping, which is not allowed
by the powerpc specification and in our case caused data inconsistency
for which we did a really ugly workaround in user space.
Signed-off-by: Arnd Bergmann <arnd.bergmann@de.ibm.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/file.c | 10 |
1 files changed, 7 insertions, 3 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c index 64f8b0a9b9e1..514a1d508035 100644 --- a/arch/powerpc/platforms/cell/spufs/file.c +++ b/arch/powerpc/platforms/cell/spufs/file.c | |||
@@ -102,12 +102,16 @@ spufs_mem_mmap_nopage(struct vm_area_struct *vma, | |||
102 | 102 | ||
103 | spu_acquire(ctx); | 103 | spu_acquire(ctx); |
104 | 104 | ||
105 | if (ctx->state == SPU_STATE_SAVED) | 105 | if (ctx->state == SPU_STATE_SAVED) { |
106 | vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) | ||
107 | & ~(_PAGE_NO_CACHE | _PAGE_GUARDED)); | ||
106 | page = vmalloc_to_page(ctx->csa.lscsa->ls + offset); | 108 | page = vmalloc_to_page(ctx->csa.lscsa->ls + offset); |
107 | else | 109 | } else { |
110 | vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) | ||
111 | | _PAGE_NO_CACHE | _PAGE_GUARDED); | ||
108 | page = pfn_to_page((ctx->spu->local_store_phys + offset) | 112 | page = pfn_to_page((ctx->spu->local_store_phys + offset) |
109 | >> PAGE_SHIFT); | 113 | >> PAGE_SHIFT); |
110 | 114 | } | |
111 | spu_release(ctx); | 115 | spu_release(ctx); |
112 | 116 | ||
113 | if (type) | 117 | if (type) |