diff options
Diffstat (limited to 'arch/mips/mm/mmap.c')
-rw-r--r-- | arch/mips/mm/mmap.c | 193 |
1 files changed, 170 insertions, 23 deletions
diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c index ae3c20a9556e..9ff5d0fac556 100644 --- a/arch/mips/mm/mmap.c +++ b/arch/mips/mm/mmap.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/mm.h> | 10 | #include <linux/mm.h> |
11 | #include <linux/mman.h> | 11 | #include <linux/mman.h> |
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | #include <linux/personality.h> | ||
13 | #include <linux/random.h> | 14 | #include <linux/random.h> |
14 | #include <linux/sched.h> | 15 | #include <linux/sched.h> |
15 | 16 | ||
@@ -17,21 +18,65 @@ unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */ | |||
17 | 18 | ||
18 | EXPORT_SYMBOL(shm_align_mask); | 19 | EXPORT_SYMBOL(shm_align_mask); |
19 | 20 | ||
21 | /* gap between mmap and stack */ | ||
22 | #define MIN_GAP (128*1024*1024UL) | ||
23 | #define MAX_GAP ((TASK_SIZE)/6*5) | ||
24 | |||
25 | static int mmap_is_legacy(void) | ||
26 | { | ||
27 | if (current->personality & ADDR_COMPAT_LAYOUT) | ||
28 | return 1; | ||
29 | |||
30 | if (rlimit(RLIMIT_STACK) == RLIM_INFINITY) | ||
31 | return 1; | ||
32 | |||
33 | return sysctl_legacy_va_layout; | ||
34 | } | ||
35 | |||
36 | static unsigned long mmap_base(unsigned long rnd) | ||
37 | { | ||
38 | unsigned long gap = rlimit(RLIMIT_STACK); | ||
39 | |||
40 | if (gap < MIN_GAP) | ||
41 | gap = MIN_GAP; | ||
42 | else if (gap > MAX_GAP) | ||
43 | gap = MAX_GAP; | ||
44 | |||
45 | return PAGE_ALIGN(TASK_SIZE - gap - rnd); | ||
46 | } | ||
47 | |||
48 | static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr, | ||
49 | unsigned long pgoff) | ||
50 | { | ||
51 | unsigned long base = addr & ~shm_align_mask; | ||
52 | unsigned long off = (pgoff << PAGE_SHIFT) & shm_align_mask; | ||
53 | |||
54 | if (base + off <= addr) | ||
55 | return base + off; | ||
56 | |||
57 | return base - off; | ||
58 | } | ||
59 | |||
20 | #define COLOUR_ALIGN(addr,pgoff) \ | 60 | #define COLOUR_ALIGN(addr,pgoff) \ |
21 | ((((addr) + shm_align_mask) & ~shm_align_mask) + \ | 61 | ((((addr) + shm_align_mask) & ~shm_align_mask) + \ |
22 | (((pgoff) << PAGE_SHIFT) & shm_align_mask)) | 62 | (((pgoff) << PAGE_SHIFT) & shm_align_mask)) |
23 | 63 | ||
24 | unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, | 64 | enum mmap_allocation_direction {UP, DOWN}; |
25 | unsigned long len, unsigned long pgoff, unsigned long flags) | 65 | |
66 | static unsigned long arch_get_unmapped_area_foo(struct file *filp, | ||
67 | unsigned long addr0, unsigned long len, unsigned long pgoff, | ||
68 | unsigned long flags, enum mmap_allocation_direction dir) | ||
26 | { | 69 | { |
27 | struct vm_area_struct * vmm; | 70 | struct mm_struct *mm = current->mm; |
71 | struct vm_area_struct *vma; | ||
72 | unsigned long addr = addr0; | ||
28 | int do_color_align; | 73 | int do_color_align; |
29 | 74 | ||
30 | if (len > TASK_SIZE) | 75 | if (unlikely(len > TASK_SIZE)) |
31 | return -ENOMEM; | 76 | return -ENOMEM; |
32 | 77 | ||
33 | if (flags & MAP_FIXED) { | 78 | if (flags & MAP_FIXED) { |
34 | /* Even MAP_FIXED mappings must reside within TASK_SIZE. */ | 79 | /* Even MAP_FIXED mappings must reside within TASK_SIZE */ |
35 | if (TASK_SIZE - len < addr) | 80 | if (TASK_SIZE - len < addr) |
36 | return -EINVAL; | 81 | return -EINVAL; |
37 | 82 | ||
@@ -48,34 +93,130 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, | |||
48 | do_color_align = 0; | 93 | do_color_align = 0; |
49 | if (filp || (flags & MAP_SHARED)) | 94 | if (filp || (flags & MAP_SHARED)) |
50 | do_color_align = 1; | 95 | do_color_align = 1; |
96 | |||
97 | /* requesting a specific address */ | ||
51 | if (addr) { | 98 | if (addr) { |
52 | if (do_color_align) | 99 | if (do_color_align) |
53 | addr = COLOUR_ALIGN(addr, pgoff); | 100 | addr = COLOUR_ALIGN(addr, pgoff); |
54 | else | 101 | else |
55 | addr = PAGE_ALIGN(addr); | 102 | addr = PAGE_ALIGN(addr); |
56 | vmm = find_vma(current->mm, addr); | 103 | |
104 | vma = find_vma(mm, addr); | ||
57 | if (TASK_SIZE - len >= addr && | 105 | if (TASK_SIZE - len >= addr && |
58 | (!vmm || addr + len <= vmm->vm_start)) | 106 | (!vma || addr + len <= vma->vm_start)) |
59 | return addr; | 107 | return addr; |
60 | } | 108 | } |
61 | addr = current->mm->mmap_base; | ||
62 | if (do_color_align) | ||
63 | addr = COLOUR_ALIGN(addr, pgoff); | ||
64 | else | ||
65 | addr = PAGE_ALIGN(addr); | ||
66 | 109 | ||
67 | for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) { | 110 | if (dir == UP) { |
68 | /* At this point: (!vmm || addr < vmm->vm_end). */ | 111 | addr = mm->mmap_base; |
69 | if (TASK_SIZE - len < addr) | 112 | if (do_color_align) |
70 | return -ENOMEM; | 113 | addr = COLOUR_ALIGN(addr, pgoff); |
71 | if (!vmm || addr + len <= vmm->vm_start) | 114 | else |
72 | return addr; | 115 | addr = PAGE_ALIGN(addr); |
73 | addr = vmm->vm_end; | 116 | |
117 | for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) { | ||
118 | /* At this point: (!vma || addr < vma->vm_end). */ | ||
119 | if (TASK_SIZE - len < addr) | ||
120 | return -ENOMEM; | ||
121 | if (!vma || addr + len <= vma->vm_start) | ||
122 | return addr; | ||
123 | addr = vma->vm_end; | ||
124 | if (do_color_align) | ||
125 | addr = COLOUR_ALIGN(addr, pgoff); | ||
126 | } | ||
127 | } else { | ||
128 | /* check if free_area_cache is useful for us */ | ||
129 | if (len <= mm->cached_hole_size) { | ||
130 | mm->cached_hole_size = 0; | ||
131 | mm->free_area_cache = mm->mmap_base; | ||
132 | } | ||
133 | |||
134 | /* either no address requested or can't fit in requested address hole */ | ||
135 | addr = mm->free_area_cache; | ||
136 | if (do_color_align) { | ||
137 | unsigned long base = | ||
138 | COLOUR_ALIGN_DOWN(addr - len, pgoff); | ||
139 | |||
140 | addr = base + len; | ||
141 | } | ||
142 | |||
143 | /* make sure it can fit in the remaining address space */ | ||
144 | if (likely(addr > len)) { | ||
145 | vma = find_vma(mm, addr - len); | ||
146 | if (!vma || addr <= vma->vm_start) { | ||
147 | /* remember the address as a hint for next time */ | ||
148 | return mm->free_area_cache = addr-len; | ||
149 | } | ||
150 | } | ||
151 | |||
152 | if (unlikely(mm->mmap_base < len)) | ||
153 | goto bottomup; | ||
154 | |||
155 | addr = mm->mmap_base-len; | ||
74 | if (do_color_align) | 156 | if (do_color_align) |
75 | addr = COLOUR_ALIGN(addr, pgoff); | 157 | addr = COLOUR_ALIGN_DOWN(addr, pgoff); |
158 | |||
159 | do { | ||
160 | /* | ||
161 | * Lookup failure means no vma is above this address, | ||
162 | * else if new region fits below vma->vm_start, | ||
163 | * return with success: | ||
164 | */ | ||
165 | vma = find_vma(mm, addr); | ||
166 | if (likely(!vma || addr+len <= vma->vm_start)) { | ||
167 | /* remember the address as a hint for next time */ | ||
168 | return mm->free_area_cache = addr; | ||
169 | } | ||
170 | |||
171 | /* remember the largest hole we saw so far */ | ||
172 | if (addr + mm->cached_hole_size < vma->vm_start) | ||
173 | mm->cached_hole_size = vma->vm_start - addr; | ||
174 | |||
175 | /* try just below the current vma->vm_start */ | ||
176 | addr = vma->vm_start-len; | ||
177 | if (do_color_align) | ||
178 | addr = COLOUR_ALIGN_DOWN(addr, pgoff); | ||
179 | } while (likely(len < vma->vm_start)); | ||
180 | |||
181 | bottomup: | ||
182 | /* | ||
183 | * A failed mmap() very likely causes application failure, | ||
184 | * so fall back to the bottom-up function here. This scenario | ||
185 | * can happen with large stack limits and large mmap() | ||
186 | * allocations. | ||
187 | */ | ||
188 | mm->cached_hole_size = ~0UL; | ||
189 | mm->free_area_cache = TASK_UNMAPPED_BASE; | ||
190 | addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); | ||
191 | /* | ||
192 | * Restore the topdown base: | ||
193 | */ | ||
194 | mm->free_area_cache = mm->mmap_base; | ||
195 | mm->cached_hole_size = ~0UL; | ||
196 | |||
197 | return addr; | ||
76 | } | 198 | } |
77 | } | 199 | } |
78 | 200 | ||
201 | unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0, | ||
202 | unsigned long len, unsigned long pgoff, unsigned long flags) | ||
203 | { | ||
204 | return arch_get_unmapped_area_foo(filp, | ||
205 | addr0, len, pgoff, flags, UP); | ||
206 | } | ||
207 | |||
208 | /* | ||
209 | * There is no need to export this but sched.h declares the function as | ||
210 | * extern so making it static here results in an error. | ||
211 | */ | ||
212 | unsigned long arch_get_unmapped_area_topdown(struct file *filp, | ||
213 | unsigned long addr0, unsigned long len, unsigned long pgoff, | ||
214 | unsigned long flags) | ||
215 | { | ||
216 | return arch_get_unmapped_area_foo(filp, | ||
217 | addr0, len, pgoff, flags, DOWN); | ||
218 | } | ||
219 | |||
79 | void arch_pick_mmap_layout(struct mm_struct *mm) | 220 | void arch_pick_mmap_layout(struct mm_struct *mm) |
80 | { | 221 | { |
81 | unsigned long random_factor = 0UL; | 222 | unsigned long random_factor = 0UL; |
@@ -89,9 +230,15 @@ void arch_pick_mmap_layout(struct mm_struct *mm) | |||
89 | random_factor &= 0xffffffful; | 230 | random_factor &= 0xffffffful; |
90 | } | 231 | } |
91 | 232 | ||
92 | mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; | 233 | if (mmap_is_legacy()) { |
93 | mm->get_unmapped_area = arch_get_unmapped_area; | 234 | mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; |
94 | mm->unmap_area = arch_unmap_area; | 235 | mm->get_unmapped_area = arch_get_unmapped_area; |
236 | mm->unmap_area = arch_unmap_area; | ||
237 | } else { | ||
238 | mm->mmap_base = mmap_base(random_factor); | ||
239 | mm->get_unmapped_area = arch_get_unmapped_area_topdown; | ||
240 | mm->unmap_area = arch_unmap_area_topdown; | ||
241 | } | ||
95 | } | 242 | } |
96 | 243 | ||
97 | static inline unsigned long brk_rnd(void) | 244 | static inline unsigned long brk_rnd(void) |