diff options
Diffstat (limited to 'arch/sh/kernel/sys_sh.c')
-rw-r--r-- | arch/sh/kernel/sys_sh.c | 56 |
1 files changed, 35 insertions, 21 deletions
diff --git a/arch/sh/kernel/sys_sh.c b/arch/sh/kernel/sys_sh.c index 917b2f32f260..b68ff705f067 100644 --- a/arch/sh/kernel/sys_sh.c +++ b/arch/sh/kernel/sys_sh.c | |||
@@ -21,7 +21,8 @@ | |||
21 | #include <linux/mman.h> | 21 | #include <linux/mman.h> |
22 | #include <linux/file.h> | 22 | #include <linux/file.h> |
23 | #include <linux/utsname.h> | 23 | #include <linux/utsname.h> |
24 | 24 | #include <linux/module.h> | |
25 | #include <asm/cacheflush.h> | ||
25 | #include <asm/uaccess.h> | 26 | #include <asm/uaccess.h> |
26 | #include <asm/ipc.h> | 27 | #include <asm/ipc.h> |
27 | 28 | ||
@@ -44,11 +45,16 @@ asmlinkage int sys_pipe(unsigned long r4, unsigned long r5, | |||
44 | return error; | 45 | return error; |
45 | } | 46 | } |
46 | 47 | ||
47 | #if defined(HAVE_ARCH_UNMAPPED_AREA) | 48 | unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */ |
49 | |||
50 | EXPORT_SYMBOL(shm_align_mask); | ||
51 | |||
48 | /* | 52 | /* |
49 | * To avoid cache alias, we map the shard page with same color. | 53 | * To avoid cache aliases, we map the shared page with same color. |
50 | */ | 54 | */ |
51 | #define COLOUR_ALIGN(addr) (((addr)+SHMLBA-1)&~(SHMLBA-1)) | 55 | #define COLOUR_ALIGN(addr, pgoff) \ |
56 | ((((addr) + shm_align_mask) & ~shm_align_mask) + \ | ||
57 | (((pgoff) << PAGE_SHIFT) & shm_align_mask)) | ||
52 | 58 | ||
53 | unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, | 59 | unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, |
54 | unsigned long len, unsigned long pgoff, unsigned long flags) | 60 | unsigned long len, unsigned long pgoff, unsigned long flags) |
@@ -56,43 +62,52 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, | |||
56 | struct mm_struct *mm = current->mm; | 62 | struct mm_struct *mm = current->mm; |
57 | struct vm_area_struct *vma; | 63 | struct vm_area_struct *vma; |
58 | unsigned long start_addr; | 64 | unsigned long start_addr; |
65 | int do_colour_align; | ||
59 | 66 | ||
60 | if (flags & MAP_FIXED) { | 67 | if (flags & MAP_FIXED) { |
61 | /* We do not accept a shared mapping if it would violate | 68 | /* We do not accept a shared mapping if it would violate |
62 | * cache aliasing constraints. | 69 | * cache aliasing constraints. |
63 | */ | 70 | */ |
64 | if ((flags & MAP_SHARED) && (addr & (SHMLBA - 1))) | 71 | if ((flags & MAP_SHARED) && (addr & shm_align_mask)) |
65 | return -EINVAL; | 72 | return -EINVAL; |
66 | return addr; | 73 | return addr; |
67 | } | 74 | } |
68 | 75 | ||
69 | if (len > TASK_SIZE) | 76 | if (unlikely(len > TASK_SIZE)) |
70 | return -ENOMEM; | 77 | return -ENOMEM; |
71 | 78 | ||
79 | do_colour_align = 0; | ||
80 | if (filp || (flags & MAP_SHARED)) | ||
81 | do_colour_align = 1; | ||
82 | |||
72 | if (addr) { | 83 | if (addr) { |
73 | if (flags & MAP_PRIVATE) | 84 | if (do_colour_align) |
74 | addr = PAGE_ALIGN(addr); | 85 | addr = COLOUR_ALIGN(addr, pgoff); |
75 | else | 86 | else |
76 | addr = COLOUR_ALIGN(addr); | 87 | addr = PAGE_ALIGN(addr); |
88 | |||
77 | vma = find_vma(mm, addr); | 89 | vma = find_vma(mm, addr); |
78 | if (TASK_SIZE - len >= addr && | 90 | if (TASK_SIZE - len >= addr && |
79 | (!vma || addr + len <= vma->vm_start)) | 91 | (!vma || addr + len <= vma->vm_start)) |
80 | return addr; | 92 | return addr; |
81 | } | 93 | } |
82 | if (len <= mm->cached_hole_size) { | 94 | |
95 | if (len > mm->cached_hole_size) { | ||
96 | start_addr = addr = mm->free_area_cache; | ||
97 | } else { | ||
83 | mm->cached_hole_size = 0; | 98 | mm->cached_hole_size = 0; |
84 | mm->free_area_cache = TASK_UNMAPPED_BASE; | 99 | start_addr = addr = TASK_UNMAPPED_BASE; |
85 | } | 100 | } |
86 | if (flags & MAP_PRIVATE) | ||
87 | addr = PAGE_ALIGN(mm->free_area_cache); | ||
88 | else | ||
89 | addr = COLOUR_ALIGN(mm->free_area_cache); | ||
90 | start_addr = addr; | ||
91 | 101 | ||
92 | full_search: | 102 | full_search: |
103 | if (do_colour_align) | ||
104 | addr = COLOUR_ALIGN(addr, pgoff); | ||
105 | else | ||
106 | addr = PAGE_ALIGN(mm->free_area_cache); | ||
107 | |||
93 | for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { | 108 | for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { |
94 | /* At this point: (!vma || addr < vma->vm_end). */ | 109 | /* At this point: (!vma || addr < vma->vm_end). */ |
95 | if (TASK_SIZE - len < addr) { | 110 | if (unlikely(TASK_SIZE - len < addr)) { |
96 | /* | 111 | /* |
97 | * Start a new search - just in case we missed | 112 | * Start a new search - just in case we missed |
98 | * some holes. | 113 | * some holes. |
@@ -104,7 +119,7 @@ full_search: | |||
104 | } | 119 | } |
105 | return -ENOMEM; | 120 | return -ENOMEM; |
106 | } | 121 | } |
107 | if (!vma || addr + len <= vma->vm_start) { | 122 | if (likely(!vma || addr + len <= vma->vm_start)) { |
108 | /* | 123 | /* |
109 | * Remember the place where we stopped the search: | 124 | * Remember the place where we stopped the search: |
110 | */ | 125 | */ |
@@ -115,11 +130,10 @@ full_search: | |||
115 | mm->cached_hole_size = vma->vm_start - addr; | 130 | mm->cached_hole_size = vma->vm_start - addr; |
116 | 131 | ||
117 | addr = vma->vm_end; | 132 | addr = vma->vm_end; |
118 | if (!(flags & MAP_PRIVATE)) | 133 | if (do_colour_align) |
119 | addr = COLOUR_ALIGN(addr); | 134 | addr = COLOUR_ALIGN(addr, pgoff); |
120 | } | 135 | } |
121 | } | 136 | } |
122 | #endif | ||
123 | 137 | ||
124 | static inline long | 138 | static inline long |
125 | do_mmap2(unsigned long addr, unsigned long len, unsigned long prot, | 139 | do_mmap2(unsigned long addr, unsigned long len, unsigned long prot, |