diff options
| -rw-r--r-- | arch/xtensa/include/asm/pgtable.h | 4 | ||||
| -rw-r--r-- | arch/xtensa/kernel/syscall.c | 41 |
2 files changed, 45 insertions, 0 deletions
diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h index c90ea5bfa1b4..d7546c94da52 100644 --- a/arch/xtensa/include/asm/pgtable.h +++ b/arch/xtensa/include/asm/pgtable.h | |||
| @@ -410,6 +410,10 @@ typedef pte_t *pte_addr_t; | |||
| 410 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT | 410 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT |
| 411 | #define __HAVE_ARCH_PTEP_MKDIRTY | 411 | #define __HAVE_ARCH_PTEP_MKDIRTY |
| 412 | #define __HAVE_ARCH_PTE_SAME | 412 | #define __HAVE_ARCH_PTE_SAME |
| 413 | /* We provide our own get_unmapped_area to cope with | ||
| 414 | * SHM area cache aliasing for userland. | ||
| 415 | */ | ||
| 416 | #define HAVE_ARCH_UNMAPPED_AREA | ||
| 413 | 417 | ||
| 414 | #include <asm-generic/pgtable.h> | 418 | #include <asm-generic/pgtable.h> |
| 415 | 419 | ||
diff --git a/arch/xtensa/kernel/syscall.c b/arch/xtensa/kernel/syscall.c index 54fa8425cee2..5d3f7a119ed1 100644 --- a/arch/xtensa/kernel/syscall.c +++ b/arch/xtensa/kernel/syscall.c | |||
| @@ -36,6 +36,10 @@ syscall_t sys_call_table[__NR_syscall_count] /* FIXME __cacheline_aligned */= { | |||
| 36 | #include <uapi/asm/unistd.h> | 36 | #include <uapi/asm/unistd.h> |
| 37 | }; | 37 | }; |
| 38 | 38 | ||
| 39 | #define COLOUR_ALIGN(addr, pgoff) \ | ||
| 40 | ((((addr) + SHMLBA - 1) & ~(SHMLBA - 1)) + \ | ||
| 41 | (((pgoff) << PAGE_SHIFT) & (SHMLBA - 1))) | ||
| 42 | |||
| 39 | asmlinkage long xtensa_shmat(int shmid, char __user *shmaddr, int shmflg) | 43 | asmlinkage long xtensa_shmat(int shmid, char __user *shmaddr, int shmflg) |
| 40 | { | 44 | { |
| 41 | unsigned long ret; | 45 | unsigned long ret; |
| @@ -52,3 +56,40 @@ asmlinkage long xtensa_fadvise64_64(int fd, int advice, | |||
| 52 | { | 56 | { |
| 53 | return sys_fadvise64_64(fd, offset, len, advice); | 57 | return sys_fadvise64_64(fd, offset, len, advice); |
| 54 | } | 58 | } |
| 59 | |||
| 60 | unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, | ||
| 61 | unsigned long len, unsigned long pgoff, unsigned long flags) | ||
| 62 | { | ||
| 63 | struct vm_area_struct *vmm; | ||
| 64 | |||
| 65 | if (flags & MAP_FIXED) { | ||
| 66 | /* We do not accept a shared mapping if it would violate | ||
| 67 | * cache aliasing constraints. | ||
| 68 | */ | ||
| 69 | if ((flags & MAP_SHARED) && | ||
| 70 | ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))) | ||
| 71 | return -EINVAL; | ||
| 72 | return addr; | ||
| 73 | } | ||
| 74 | |||
| 75 | if (len > TASK_SIZE) | ||
| 76 | return -ENOMEM; | ||
| 77 | if (!addr) | ||
| 78 | addr = TASK_UNMAPPED_BASE; | ||
| 79 | |||
| 80 | if (flags & MAP_SHARED) | ||
| 81 | addr = COLOUR_ALIGN(addr, pgoff); | ||
| 82 | else | ||
| 83 | addr = PAGE_ALIGN(addr); | ||
| 84 | |||
| 85 | for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) { | ||
| 86 | /* At this point: (!vmm || addr < vmm->vm_end). */ | ||
| 87 | if (TASK_SIZE - len < addr) | ||
| 88 | return -ENOMEM; | ||
| 89 | if (!vmm || addr + len <= vmm->vm_start) | ||
| 90 | return addr; | ||
| 91 | addr = vmm->vm_end; | ||
| 92 | if (flags & MAP_SHARED) | ||
| 93 | addr = COLOUR_ALIGN(addr, pgoff); | ||
| 94 | } | ||
| 95 | } | ||
