aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/mm/mmap.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/mm/mmap.c')
-rw-r--r--arch/sh/mm/mmap.c139
1 files changed, 24 insertions, 115 deletions
diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
index afeb710ec5c3..6777177807c2 100644
--- a/arch/sh/mm/mmap.c
+++ b/arch/sh/mm/mmap.c
@@ -30,25 +30,13 @@ static inline unsigned long COLOUR_ALIGN(unsigned long addr,
30 return base + off; 30 return base + off;
31} 31}
32 32
33static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr,
34 unsigned long pgoff)
35{
36 unsigned long base = addr & ~shm_align_mask;
37 unsigned long off = (pgoff << PAGE_SHIFT) & shm_align_mask;
38
39 if (base + off <= addr)
40 return base + off;
41
42 return base - off;
43}
44
45unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, 33unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
46 unsigned long len, unsigned long pgoff, unsigned long flags) 34 unsigned long len, unsigned long pgoff, unsigned long flags)
47{ 35{
48 struct mm_struct *mm = current->mm; 36 struct mm_struct *mm = current->mm;
49 struct vm_area_struct *vma; 37 struct vm_area_struct *vma;
50 unsigned long start_addr;
51 int do_colour_align; 38 int do_colour_align;
39 struct vm_unmapped_area_info info;
52 40
53 if (flags & MAP_FIXED) { 41 if (flags & MAP_FIXED) {
54 /* We do not accept a shared mapping if it would violate 42 /* We do not accept a shared mapping if it would violate
@@ -79,47 +67,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
79 return addr; 67 return addr;
80 } 68 }
81 69
82 if (len > mm->cached_hole_size) { 70 info.flags = 0;
83 start_addr = addr = mm->free_area_cache; 71 info.length = len;
84 } else { 72 info.low_limit = TASK_UNMAPPED_BASE;
85 mm->cached_hole_size = 0; 73 info.high_limit = TASK_SIZE;
86 start_addr = addr = TASK_UNMAPPED_BASE; 74 info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
87 } 75 info.align_offset = pgoff << PAGE_SHIFT;
88 76 return vm_unmapped_area(&info);
89full_search:
90 if (do_colour_align)
91 addr = COLOUR_ALIGN(addr, pgoff);
92 else
93 addr = PAGE_ALIGN(mm->free_area_cache);
94
95 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
96 /* At this point: (!vma || addr < vma->vm_end). */
97 if (unlikely(TASK_SIZE - len < addr)) {
98 /*
99 * Start a new search - just in case we missed
100 * some holes.
101 */
102 if (start_addr != TASK_UNMAPPED_BASE) {
103 start_addr = addr = TASK_UNMAPPED_BASE;
104 mm->cached_hole_size = 0;
105 goto full_search;
106 }
107 return -ENOMEM;
108 }
109 if (likely(!vma || addr + len <= vma->vm_start)) {
110 /*
111 * Remember the place where we stopped the search:
112 */
113 mm->free_area_cache = addr + len;
114 return addr;
115 }
116 if (addr + mm->cached_hole_size < vma->vm_start)
117 mm->cached_hole_size = vma->vm_start - addr;
118
119 addr = vma->vm_end;
120 if (do_colour_align)
121 addr = COLOUR_ALIGN(addr, pgoff);
122 }
123} 77}
124 78
125unsigned long 79unsigned long
@@ -131,6 +85,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
131 struct mm_struct *mm = current->mm; 85 struct mm_struct *mm = current->mm;
132 unsigned long addr = addr0; 86 unsigned long addr = addr0;
133 int do_colour_align; 87 int do_colour_align;
88 struct vm_unmapped_area_info info;
134 89
135 if (flags & MAP_FIXED) { 90 if (flags & MAP_FIXED) {
136 /* We do not accept a shared mapping if it would violate 91 /* We do not accept a shared mapping if it would violate
@@ -162,73 +117,27 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
162 return addr; 117 return addr;
163 } 118 }
164 119
165 /* check if free_area_cache is useful for us */ 120 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
166 if (len <= mm->cached_hole_size) { 121 info.length = len;
167 mm->cached_hole_size = 0; 122 info.low_limit = PAGE_SIZE;
168 mm->free_area_cache = mm->mmap_base; 123 info.high_limit = mm->mmap_base;
169 } 124 info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
170 125 info.align_offset = pgoff << PAGE_SHIFT;
171 /* either no address requested or can't fit in requested address hole */ 126 addr = vm_unmapped_area(&info);
172 addr = mm->free_area_cache;
173 if (do_colour_align) {
174 unsigned long base = COLOUR_ALIGN_DOWN(addr-len, pgoff);
175 127
176 addr = base + len;
177 }
178
179 /* make sure it can fit in the remaining address space */
180 if (likely(addr > len)) {
181 vma = find_vma(mm, addr-len);
182 if (!vma || addr <= vma->vm_start) {
183 /* remember the address as a hint for next time */
184 return (mm->free_area_cache = addr-len);
185 }
186 }
187
188 if (unlikely(mm->mmap_base < len))
189 goto bottomup;
190
191 addr = mm->mmap_base-len;
192 if (do_colour_align)
193 addr = COLOUR_ALIGN_DOWN(addr, pgoff);
194
195 do {
196 /*
197 * Lookup failure means no vma is above this address,
198 * else if new region fits below vma->vm_start,
199 * return with success:
200 */
201 vma = find_vma(mm, addr);
202 if (likely(!vma || addr+len <= vma->vm_start)) {
203 /* remember the address as a hint for next time */
204 return (mm->free_area_cache = addr);
205 }
206
207 /* remember the largest hole we saw so far */
208 if (addr + mm->cached_hole_size < vma->vm_start)
209 mm->cached_hole_size = vma->vm_start - addr;
210
211 /* try just below the current vma->vm_start */
212 addr = vma->vm_start-len;
213 if (do_colour_align)
214 addr = COLOUR_ALIGN_DOWN(addr, pgoff);
215 } while (likely(len < vma->vm_start));
216
217bottomup:
218 /* 128 /*
219 * A failed mmap() very likely causes application failure, 129 * A failed mmap() very likely causes application failure,
220 * so fall back to the bottom-up function here. This scenario 130 * so fall back to the bottom-up function here. This scenario
221 * can happen with large stack limits and large mmap() 131 * can happen with large stack limits and large mmap()
222 * allocations. 132 * allocations.
223 */ 133 */
224 mm->cached_hole_size = ~0UL; 134 if (addr & ~PAGE_MASK) {
225 mm->free_area_cache = TASK_UNMAPPED_BASE; 135 VM_BUG_ON(addr != -ENOMEM);
226 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); 136 info.flags = 0;
227 /* 137 info.low_limit = TASK_UNMAPPED_BASE;
228 * Restore the topdown base: 138 info.high_limit = TASK_SIZE;
229 */ 139 addr = vm_unmapped_area(&info);
230 mm->free_area_cache = mm->mmap_base; 140 }
231 mm->cached_hole_size = ~0UL;
232 141
233 return addr; 142 return addr;
234} 143}
@@ -238,7 +147,7 @@ bottomup:
238 * You really shouldn't be using read() or write() on /dev/mem. This 147 * You really shouldn't be using read() or write() on /dev/mem. This
239 * might go away in the future. 148 * might go away in the future.
240 */ 149 */
241int valid_phys_addr_range(unsigned long addr, size_t count) 150int valid_phys_addr_range(phys_addr_t addr, size_t count)
242{ 151{
243 if (addr < __MEMORY_START) 152 if (addr < __MEMORY_START)
244 return 0; 153 return 0;