aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-12-11 21:05:37 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-12-11 21:05:37 -0500
commit608ff1a210ab0e8b969399039bf8e18693605910 (patch)
treefaea7bb1764461c73d0953089bd5439d91733a03 /arch/sh
parent414a6750e59b0b687034764c464e9ddecac0f7a6 (diff)
parent74d42d8fe146e870c52bde3b1c692f86cc8ff844 (diff)
Merge branch 'akpm' (Andrew's patchbomb)
Merge misc updates from Andrew Morton: "About half of most of MM. Going very early this time due to uncertainty over the coreautounifiednumasched things. I'll send the other half of most of MM tomorrow. The rest of MM awaits a slab merge from Pekka." * emailed patches from Andrew Morton: (71 commits) memory_hotplug: ensure every online node has NORMAL memory memory_hotplug: handle empty zone when online_movable/online_kernel mm, memory-hotplug: dynamic configure movable memory and portion memory drivers/base/node.c: cleanup node_state_attr[] bootmem: fix wrong call parameter for free_bootmem() avr32, kconfig: remove HAVE_ARCH_BOOTMEM mm: cma: remove watermark hacks mm: cma: skip watermarks check for already isolated blocks in split_free_page() mm, oom: fix race when specifying a thread as the oom origin mm, oom: change type of oom_score_adj to short mm: cleanup register_node() mm, mempolicy: remove duplicate code mm/vmscan.c: try_to_freeze() returns boolean mm: introduce putback_movable_pages() virtio_balloon: introduce migration primitives to balloon pages mm: introduce compaction and migration for ballooned pages mm: introduce a common interface for balloon pages mobility mm: redefine address_space.assoc_mapping mm: adjust address_space_operations.migratepage() return code arch/sparc/kernel/sys_sparc_64.c: s/COLOUR/COLOR/ ...
Diffstat (limited to 'arch/sh')
-rw-r--r--arch/sh/mm/mmap.c137
1 files changed, 23 insertions, 114 deletions
diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
index 80bf494ddbcb..6777177807c2 100644
--- a/arch/sh/mm/mmap.c
+++ b/arch/sh/mm/mmap.c
@@ -30,25 +30,13 @@ static inline unsigned long COLOUR_ALIGN(unsigned long addr,
30 return base + off; 30 return base + off;
31} 31}
32 32
33static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr,
34 unsigned long pgoff)
35{
36 unsigned long base = addr & ~shm_align_mask;
37 unsigned long off = (pgoff << PAGE_SHIFT) & shm_align_mask;
38
39 if (base + off <= addr)
40 return base + off;
41
42 return base - off;
43}
44
45unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, 33unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
46 unsigned long len, unsigned long pgoff, unsigned long flags) 34 unsigned long len, unsigned long pgoff, unsigned long flags)
47{ 35{
48 struct mm_struct *mm = current->mm; 36 struct mm_struct *mm = current->mm;
49 struct vm_area_struct *vma; 37 struct vm_area_struct *vma;
50 unsigned long start_addr;
51 int do_colour_align; 38 int do_colour_align;
39 struct vm_unmapped_area_info info;
52 40
53 if (flags & MAP_FIXED) { 41 if (flags & MAP_FIXED) {
54 /* We do not accept a shared mapping if it would violate 42 /* We do not accept a shared mapping if it would violate
@@ -79,47 +67,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
79 return addr; 67 return addr;
80 } 68 }
81 69
82 if (len > mm->cached_hole_size) { 70 info.flags = 0;
83 start_addr = addr = mm->free_area_cache; 71 info.length = len;
84 } else { 72 info.low_limit = TASK_UNMAPPED_BASE;
85 mm->cached_hole_size = 0; 73 info.high_limit = TASK_SIZE;
86 start_addr = addr = TASK_UNMAPPED_BASE; 74 info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
87 } 75 info.align_offset = pgoff << PAGE_SHIFT;
88 76 return vm_unmapped_area(&info);
89full_search:
90 if (do_colour_align)
91 addr = COLOUR_ALIGN(addr, pgoff);
92 else
93 addr = PAGE_ALIGN(mm->free_area_cache);
94
95 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
96 /* At this point: (!vma || addr < vma->vm_end). */
97 if (unlikely(TASK_SIZE - len < addr)) {
98 /*
99 * Start a new search - just in case we missed
100 * some holes.
101 */
102 if (start_addr != TASK_UNMAPPED_BASE) {
103 start_addr = addr = TASK_UNMAPPED_BASE;
104 mm->cached_hole_size = 0;
105 goto full_search;
106 }
107 return -ENOMEM;
108 }
109 if (likely(!vma || addr + len <= vma->vm_start)) {
110 /*
111 * Remember the place where we stopped the search:
112 */
113 mm->free_area_cache = addr + len;
114 return addr;
115 }
116 if (addr + mm->cached_hole_size < vma->vm_start)
117 mm->cached_hole_size = vma->vm_start - addr;
118
119 addr = vma->vm_end;
120 if (do_colour_align)
121 addr = COLOUR_ALIGN(addr, pgoff);
122 }
123} 77}
124 78
125unsigned long 79unsigned long
@@ -131,6 +85,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
131 struct mm_struct *mm = current->mm; 85 struct mm_struct *mm = current->mm;
132 unsigned long addr = addr0; 86 unsigned long addr = addr0;
133 int do_colour_align; 87 int do_colour_align;
88 struct vm_unmapped_area_info info;
134 89
135 if (flags & MAP_FIXED) { 90 if (flags & MAP_FIXED) {
136 /* We do not accept a shared mapping if it would violate 91 /* We do not accept a shared mapping if it would violate
@@ -162,73 +117,27 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
162 return addr; 117 return addr;
163 } 118 }
164 119
165 /* check if free_area_cache is useful for us */ 120 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
166 if (len <= mm->cached_hole_size) { 121 info.length = len;
167 mm->cached_hole_size = 0; 122 info.low_limit = PAGE_SIZE;
168 mm->free_area_cache = mm->mmap_base; 123 info.high_limit = mm->mmap_base;
169 } 124 info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
170 125 info.align_offset = pgoff << PAGE_SHIFT;
171 /* either no address requested or can't fit in requested address hole */ 126 addr = vm_unmapped_area(&info);
172 addr = mm->free_area_cache;
173 if (do_colour_align) {
174 unsigned long base = COLOUR_ALIGN_DOWN(addr-len, pgoff);
175 127
176 addr = base + len;
177 }
178
179 /* make sure it can fit in the remaining address space */
180 if (likely(addr > len)) {
181 vma = find_vma(mm, addr-len);
182 if (!vma || addr <= vma->vm_start) {
183 /* remember the address as a hint for next time */
184 return (mm->free_area_cache = addr-len);
185 }
186 }
187
188 if (unlikely(mm->mmap_base < len))
189 goto bottomup;
190
191 addr = mm->mmap_base-len;
192 if (do_colour_align)
193 addr = COLOUR_ALIGN_DOWN(addr, pgoff);
194
195 do {
196 /*
197 * Lookup failure means no vma is above this address,
198 * else if new region fits below vma->vm_start,
199 * return with success:
200 */
201 vma = find_vma(mm, addr);
202 if (likely(!vma || addr+len <= vma->vm_start)) {
203 /* remember the address as a hint for next time */
204 return (mm->free_area_cache = addr);
205 }
206
207 /* remember the largest hole we saw so far */
208 if (addr + mm->cached_hole_size < vma->vm_start)
209 mm->cached_hole_size = vma->vm_start - addr;
210
211 /* try just below the current vma->vm_start */
212 addr = vma->vm_start-len;
213 if (do_colour_align)
214 addr = COLOUR_ALIGN_DOWN(addr, pgoff);
215 } while (likely(len < vma->vm_start));
216
217bottomup:
218 /* 128 /*
219 * A failed mmap() very likely causes application failure, 129 * A failed mmap() very likely causes application failure,
220 * so fall back to the bottom-up function here. This scenario 130 * so fall back to the bottom-up function here. This scenario
221 * can happen with large stack limits and large mmap() 131 * can happen with large stack limits and large mmap()
222 * allocations. 132 * allocations.
223 */ 133 */
224 mm->cached_hole_size = ~0UL; 134 if (addr & ~PAGE_MASK) {
225 mm->free_area_cache = TASK_UNMAPPED_BASE; 135 VM_BUG_ON(addr != -ENOMEM);
226 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); 136 info.flags = 0;
227 /* 137 info.low_limit = TASK_UNMAPPED_BASE;
228 * Restore the topdown base: 138 info.high_limit = TASK_SIZE;
229 */ 139 addr = vm_unmapped_area(&info);
230 mm->free_area_cache = mm->mmap_base; 140 }
231 mm->cached_hole_size = ~0UL;
232 141
233 return addr; 142 return addr;
234} 143}