aboutsummaryrefslogtreecommitdiffstats
path: root/mm/mmap.c
diff options
context:
space:
mode:
authorSteve French <sfrench@us.ibm.com>2005-06-22 21:26:47 -0400
committerSteve French <sfrench@us.ibm.com>2005-06-22 21:26:47 -0400
commitea0daab4ae4a2f853f06c76961c0ed324fd0804c (patch)
treef6fbe2db5772695181b7a7257b05e43343bd8d75 /mm/mmap.c
parent58aab753de605c14b9878a897e7349c3063afeff (diff)
parent1bdf7a78c2b21fb94dfe7994dbe89310b18479d2 (diff)
Merge with rsync://rsync.kernel.org/pub/scm/linux/kernel/git/sfrench/cifs-2.6.git
Diffstat (limited to 'mm/mmap.c')
-rw-r--r--mm/mmap.c57
1 files changed, 44 insertions, 13 deletions
diff --git a/mm/mmap.c b/mm/mmap.c
index de54acd9942f..da3fa90a0aae 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1175,7 +1175,12 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1175 (!vma || addr + len <= vma->vm_start)) 1175 (!vma || addr + len <= vma->vm_start))
1176 return addr; 1176 return addr;
1177 } 1177 }
1178 start_addr = addr = mm->free_area_cache; 1178 if (len > mm->cached_hole_size) {
1179 start_addr = addr = mm->free_area_cache;
1180 } else {
1181 start_addr = addr = TASK_UNMAPPED_BASE;
1182 mm->cached_hole_size = 0;
1183 }
1179 1184
1180full_search: 1185full_search:
1181 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { 1186 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
@@ -1186,7 +1191,9 @@ full_search:
1186 * some holes. 1191 * some holes.
1187 */ 1192 */
1188 if (start_addr != TASK_UNMAPPED_BASE) { 1193 if (start_addr != TASK_UNMAPPED_BASE) {
1189 start_addr = addr = TASK_UNMAPPED_BASE; 1194 addr = TASK_UNMAPPED_BASE;
1195 start_addr = addr;
1196 mm->cached_hole_size = 0;
1190 goto full_search; 1197 goto full_search;
1191 } 1198 }
1192 return -ENOMEM; 1199 return -ENOMEM;
@@ -1198,19 +1205,22 @@ full_search:
1198 mm->free_area_cache = addr + len; 1205 mm->free_area_cache = addr + len;
1199 return addr; 1206 return addr;
1200 } 1207 }
1208 if (addr + mm->cached_hole_size < vma->vm_start)
1209 mm->cached_hole_size = vma->vm_start - addr;
1201 addr = vma->vm_end; 1210 addr = vma->vm_end;
1202 } 1211 }
1203} 1212}
1204#endif 1213#endif
1205 1214
1206void arch_unmap_area(struct vm_area_struct *area) 1215void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
1207{ 1216{
1208 /* 1217 /*
1209 * Is this a new hole at the lowest possible address? 1218 * Is this a new hole at the lowest possible address?
1210 */ 1219 */
1211 if (area->vm_start >= TASK_UNMAPPED_BASE && 1220 if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
1212 area->vm_start < area->vm_mm->free_area_cache) 1221 mm->free_area_cache = addr;
1213 area->vm_mm->free_area_cache = area->vm_start; 1222 mm->cached_hole_size = ~0UL;
1223 }
1214} 1224}
1215 1225
1216/* 1226/*
@@ -1240,6 +1250,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
1240 return addr; 1250 return addr;
1241 } 1251 }
1242 1252
1253 /* check if free_area_cache is useful for us */
1254 if (len <= mm->cached_hole_size) {
1255 mm->cached_hole_size = 0;
1256 mm->free_area_cache = mm->mmap_base;
1257 }
1258
1243 /* either no address requested or can't fit in requested address hole */ 1259 /* either no address requested or can't fit in requested address hole */
1244 addr = mm->free_area_cache; 1260 addr = mm->free_area_cache;
1245 1261
@@ -1251,6 +1267,9 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
1251 return (mm->free_area_cache = addr-len); 1267 return (mm->free_area_cache = addr-len);
1252 } 1268 }
1253 1269
1270 if (mm->mmap_base < len)
1271 goto bottomup;
1272
1254 addr = mm->mmap_base-len; 1273 addr = mm->mmap_base-len;
1255 1274
1256 do { 1275 do {
@@ -1264,38 +1283,45 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
1264 /* remember the address as a hint for next time */ 1283 /* remember the address as a hint for next time */
1265 return (mm->free_area_cache = addr); 1284 return (mm->free_area_cache = addr);
1266 1285
1286 /* remember the largest hole we saw so far */
1287 if (addr + mm->cached_hole_size < vma->vm_start)
1288 mm->cached_hole_size = vma->vm_start - addr;
1289
1267 /* try just below the current vma->vm_start */ 1290 /* try just below the current vma->vm_start */
1268 addr = vma->vm_start-len; 1291 addr = vma->vm_start-len;
1269 } while (len < vma->vm_start); 1292 } while (len < vma->vm_start);
1270 1293
1294bottomup:
1271 /* 1295 /*
1272 * A failed mmap() very likely causes application failure, 1296 * A failed mmap() very likely causes application failure,
1273 * so fall back to the bottom-up function here. This scenario 1297 * so fall back to the bottom-up function here. This scenario
1274 * can happen with large stack limits and large mmap() 1298 * can happen with large stack limits and large mmap()
1275 * allocations. 1299 * allocations.
1276 */ 1300 */
1277 mm->free_area_cache = TASK_UNMAPPED_BASE; 1301 mm->cached_hole_size = ~0UL;
1302 mm->free_area_cache = TASK_UNMAPPED_BASE;
1278 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); 1303 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
1279 /* 1304 /*
1280 * Restore the topdown base: 1305 * Restore the topdown base:
1281 */ 1306 */
1282 mm->free_area_cache = mm->mmap_base; 1307 mm->free_area_cache = mm->mmap_base;
1308 mm->cached_hole_size = ~0UL;
1283 1309
1284 return addr; 1310 return addr;
1285} 1311}
1286#endif 1312#endif
1287 1313
1288void arch_unmap_area_topdown(struct vm_area_struct *area) 1314void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
1289{ 1315{
1290 /* 1316 /*
1291 * Is this a new hole at the highest possible address? 1317 * Is this a new hole at the highest possible address?
1292 */ 1318 */
1293 if (area->vm_end > area->vm_mm->free_area_cache) 1319 if (addr > mm->free_area_cache)
1294 area->vm_mm->free_area_cache = area->vm_end; 1320 mm->free_area_cache = addr;
1295 1321
1296 /* dont allow allocations above current base */ 1322 /* dont allow allocations above current base */
1297 if (area->vm_mm->free_area_cache > area->vm_mm->mmap_base) 1323 if (mm->free_area_cache > mm->mmap_base)
1298 area->vm_mm->free_area_cache = area->vm_mm->mmap_base; 1324 mm->free_area_cache = mm->mmap_base;
1299} 1325}
1300 1326
1301unsigned long 1327unsigned long
@@ -1595,7 +1621,6 @@ static void unmap_vma(struct mm_struct *mm, struct vm_area_struct *area)
1595 if (area->vm_flags & VM_LOCKED) 1621 if (area->vm_flags & VM_LOCKED)
1596 area->vm_mm->locked_vm -= len >> PAGE_SHIFT; 1622 area->vm_mm->locked_vm -= len >> PAGE_SHIFT;
1597 vm_stat_unaccount(area); 1623 vm_stat_unaccount(area);
1598 area->vm_mm->unmap_area(area);
1599 remove_vm_struct(area); 1624 remove_vm_struct(area);
1600} 1625}
1601 1626
@@ -1649,6 +1674,7 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
1649{ 1674{
1650 struct vm_area_struct **insertion_point; 1675 struct vm_area_struct **insertion_point;
1651 struct vm_area_struct *tail_vma = NULL; 1676 struct vm_area_struct *tail_vma = NULL;
1677 unsigned long addr;
1652 1678
1653 insertion_point = (prev ? &prev->vm_next : &mm->mmap); 1679 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
1654 do { 1680 do {
@@ -1659,6 +1685,11 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
1659 } while (vma && vma->vm_start < end); 1685 } while (vma && vma->vm_start < end);
1660 *insertion_point = vma; 1686 *insertion_point = vma;
1661 tail_vma->vm_next = NULL; 1687 tail_vma->vm_next = NULL;
1688 if (mm->unmap_area == arch_unmap_area)
1689 addr = prev ? prev->vm_end : mm->mmap_base;
1690 else
1691 addr = vma ? vma->vm_start : mm->mmap_base;
1692 mm->unmap_area(mm, addr);
1662 mm->mmap_cache = NULL; /* Kill the cache. */ 1693 mm->mmap_cache = NULL; /* Kill the cache. */
1663} 1694}
1664 1695