aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMax Filippov <jcmvbkbc@gmail.com>2012-12-21 21:35:04 -0500
committerChris Zankel <chris@zankel.net>2013-02-23 22:12:53 -0500
commitde73b6b1bd7480301c8e8fbe58184448b1757945 (patch)
tree9aca0e26cfa96e18e1d353777349ee30cc25718d
parent475c32d0a1b836fc3798ea96cc334a4bfffec2cf (diff)
xtensa: avoid mmap cache aliasing
Provide arch_get_unmapped_area function aligning shared memory mapping addresses to the biggest of the page size or the cache way size. That guarantees that corresponding virtual addresses of shared mappings are cached by the same cache sets. Signed-off-by: Max Filippov <jcmvbkbc@gmail.com> Signed-off-by: Chris Zankel <chris@zankel.net>
-rw-r--r--arch/xtensa/include/asm/pgtable.h4
-rw-r--r--arch/xtensa/kernel/syscall.c41
2 files changed, 45 insertions, 0 deletions
diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h
index c90ea5bfa1b4..d7546c94da52 100644
--- a/arch/xtensa/include/asm/pgtable.h
+++ b/arch/xtensa/include/asm/pgtable.h
@@ -410,6 +410,10 @@ typedef pte_t *pte_addr_t;
410#define __HAVE_ARCH_PTEP_SET_WRPROTECT 410#define __HAVE_ARCH_PTEP_SET_WRPROTECT
411#define __HAVE_ARCH_PTEP_MKDIRTY 411#define __HAVE_ARCH_PTEP_MKDIRTY
412#define __HAVE_ARCH_PTE_SAME 412#define __HAVE_ARCH_PTE_SAME
413/* We provide our own get_unmapped_area to cope with
414 * SHM area cache aliasing for userland.
415 */
416#define HAVE_ARCH_UNMAPPED_AREA
413 417
414#include <asm-generic/pgtable.h> 418#include <asm-generic/pgtable.h>
415 419
diff --git a/arch/xtensa/kernel/syscall.c b/arch/xtensa/kernel/syscall.c
index 54fa8425cee2..5d3f7a119ed1 100644
--- a/arch/xtensa/kernel/syscall.c
+++ b/arch/xtensa/kernel/syscall.c
@@ -36,6 +36,10 @@ syscall_t sys_call_table[__NR_syscall_count] /* FIXME __cacheline_aligned */= {
36#include <uapi/asm/unistd.h> 36#include <uapi/asm/unistd.h>
37}; 37};
38 38
39#define COLOUR_ALIGN(addr, pgoff) \
40 ((((addr) + SHMLBA - 1) & ~(SHMLBA - 1)) + \
41 (((pgoff) << PAGE_SHIFT) & (SHMLBA - 1)))
42
39asmlinkage long xtensa_shmat(int shmid, char __user *shmaddr, int shmflg) 43asmlinkage long xtensa_shmat(int shmid, char __user *shmaddr, int shmflg)
40{ 44{
41 unsigned long ret; 45 unsigned long ret;
@@ -52,3 +56,40 @@ asmlinkage long xtensa_fadvise64_64(int fd, int advice,
52{ 56{
53 return sys_fadvise64_64(fd, offset, len, advice); 57 return sys_fadvise64_64(fd, offset, len, advice);
54} 58}
59
60unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
61 unsigned long len, unsigned long pgoff, unsigned long flags)
62{
63 struct vm_area_struct *vmm;
64
65 if (flags & MAP_FIXED) {
66 /* We do not accept a shared mapping if it would violate
67 * cache aliasing constraints.
68 */
69 if ((flags & MAP_SHARED) &&
70 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
71 return -EINVAL;
72 return addr;
73 }
74
75 if (len > TASK_SIZE)
76 return -ENOMEM;
77 if (!addr)
78 addr = TASK_UNMAPPED_BASE;
79
80 if (flags & MAP_SHARED)
81 addr = COLOUR_ALIGN(addr, pgoff);
82 else
83 addr = PAGE_ALIGN(addr);
84
85 for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
86 /* At this point: (!vmm || addr < vmm->vm_end). */
87 if (TASK_SIZE - len < addr)
88 return -ENOMEM;
89 if (!vmm || addr + len <= vmm->vm_start)
90 return addr;
91 addr = vmm->vm_end;
92 if (flags & MAP_SHARED)
93 addr = COLOUR_ALIGN(addr, pgoff);
94 }
95}