aboutsummaryrefslogtreecommitdiffstats
path: root/mm/mmap.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2012-04-14 07:18:27 -0400
committerIngo Molnar <mingo@kernel.org>2012-04-14 07:19:04 -0400
commit6ac1ef482d7ae0c690f1640bf6eb818ff9a2d91e (patch)
tree021cc9f6b477146fcebe6f3be4752abfa2ba18a9 /mm/mmap.c
parent682968e0c425c60f0dde37977e5beb2b12ddc4cc (diff)
parenta385ec4f11bdcf81af094c03e2444ee9b7fad2e5 (diff)
Merge branch 'perf/core' into perf/uprobes
Merge in latest upstream (and the latest perf development tree), to prepare for tooling changes, and also to pick up v3.4 MM changes that the uprobes code needs to take care of. Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'mm/mmap.c')
-rw-r--r--mm/mmap.c90
1 files changed, 58 insertions, 32 deletions
diff --git a/mm/mmap.c b/mm/mmap.c
index 7c112fbca405..b17a39f31a5e 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -453,9 +453,8 @@ static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
453} 453}
454 454
455/* 455/*
456 * Helper for vma_adjust in the split_vma insert case: 456 * Helper for vma_adjust() in the split_vma insert case: insert a vma into the
457 * insert vm structure into list and rbtree and anon_vma, 457 * mm's list and rbtree. It has already been inserted into the prio_tree.
458 * but it has already been inserted into prio_tree earlier.
459 */ 458 */
460static void __insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) 459static void __insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
461{ 460{
@@ -954,6 +953,19 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags,
954#endif /* CONFIG_PROC_FS */ 953#endif /* CONFIG_PROC_FS */
955 954
956/* 955/*
956 * If a hint addr is less than mmap_min_addr change hint to be as
957 * low as possible but still greater than mmap_min_addr
958 */
959static inline unsigned long round_hint_to_min(unsigned long hint)
960{
961 hint &= PAGE_MASK;
962 if (((void *)hint != NULL) &&
963 (hint < mmap_min_addr))
964 return PAGE_ALIGN(mmap_min_addr);
965 return hint;
966}
967
968/*
957 * The caller must hold down_write(&current->mm->mmap_sem). 969 * The caller must hold down_write(&current->mm->mmap_sem).
958 */ 970 */
959 971
@@ -1117,9 +1129,9 @@ SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
1117 * A dummy user value is used because we are not locking 1129 * A dummy user value is used because we are not locking
1118 * memory so no accounting is necessary 1130 * memory so no accounting is necessary
1119 */ 1131 */
1120 len = ALIGN(len, huge_page_size(&default_hstate)); 1132 file = hugetlb_file_setup(HUGETLB_ANON_FILE, addr, len,
1121 file = hugetlb_file_setup(HUGETLB_ANON_FILE, len, VM_NORESERVE, 1133 VM_NORESERVE, &user,
1122 &user, HUGETLB_ANONHUGE_INODE); 1134 HUGETLB_ANONHUGE_INODE);
1123 if (IS_ERR(file)) 1135 if (IS_ERR(file))
1124 return PTR_ERR(file); 1136 return PTR_ERR(file);
1125 } 1137 }
@@ -1253,7 +1265,7 @@ munmap_back:
1253 */ 1265 */
1254 if (accountable_mapping(file, vm_flags)) { 1266 if (accountable_mapping(file, vm_flags)) {
1255 charged = len >> PAGE_SHIFT; 1267 charged = len >> PAGE_SHIFT;
1256 if (security_vm_enough_memory(charged)) 1268 if (security_vm_enough_memory_mm(mm, charged))
1257 return -ENOMEM; 1269 return -ENOMEM;
1258 vm_flags |= VM_ACCOUNT; 1270 vm_flags |= VM_ACCOUNT;
1259 } 1271 }
@@ -1284,8 +1296,9 @@ munmap_back:
1284 vma->vm_pgoff = pgoff; 1296 vma->vm_pgoff = pgoff;
1285 INIT_LIST_HEAD(&vma->anon_vma_chain); 1297 INIT_LIST_HEAD(&vma->anon_vma_chain);
1286 1298
1299 error = -EINVAL; /* when rejecting VM_GROWSDOWN|VM_GROWSUP */
1300
1287 if (file) { 1301 if (file) {
1288 error = -EINVAL;
1289 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP)) 1302 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
1290 goto free_vma; 1303 goto free_vma;
1291 if (vm_flags & VM_DENYWRITE) { 1304 if (vm_flags & VM_DENYWRITE) {
@@ -1311,6 +1324,8 @@ munmap_back:
1311 pgoff = vma->vm_pgoff; 1324 pgoff = vma->vm_pgoff;
1312 vm_flags = vma->vm_flags; 1325 vm_flags = vma->vm_flags;
1313 } else if (vm_flags & VM_SHARED) { 1326 } else if (vm_flags & VM_SHARED) {
1327 if (unlikely(vm_flags & (VM_GROWSDOWN|VM_GROWSUP)))
1328 goto free_vma;
1314 error = shmem_zero_setup(vma); 1329 error = shmem_zero_setup(vma);
1315 if (error) 1330 if (error)
1316 goto free_vma; 1331 goto free_vma;
@@ -1446,10 +1461,8 @@ void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
1446 /* 1461 /*
1447 * Is this a new hole at the lowest possible address? 1462 * Is this a new hole at the lowest possible address?
1448 */ 1463 */
1449 if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) { 1464 if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache)
1450 mm->free_area_cache = addr; 1465 mm->free_area_cache = addr;
1451 mm->cached_hole_size = ~0UL;
1452 }
1453} 1466}
1454 1467
1455/* 1468/*
@@ -1464,7 +1477,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
1464{ 1477{
1465 struct vm_area_struct *vma; 1478 struct vm_area_struct *vma;
1466 struct mm_struct *mm = current->mm; 1479 struct mm_struct *mm = current->mm;
1467 unsigned long addr = addr0; 1480 unsigned long addr = addr0, start_addr;
1468 1481
1469 /* requested length too big for entire address space */ 1482 /* requested length too big for entire address space */
1470 if (len > TASK_SIZE) 1483 if (len > TASK_SIZE)
@@ -1488,22 +1501,14 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
1488 mm->free_area_cache = mm->mmap_base; 1501 mm->free_area_cache = mm->mmap_base;
1489 } 1502 }
1490 1503
1504try_again:
1491 /* either no address requested or can't fit in requested address hole */ 1505 /* either no address requested or can't fit in requested address hole */
1492 addr = mm->free_area_cache; 1506 start_addr = addr = mm->free_area_cache;
1493
1494 /* make sure it can fit in the remaining address space */
1495 if (addr > len) {
1496 vma = find_vma(mm, addr-len);
1497 if (!vma || addr <= vma->vm_start)
1498 /* remember the address as a hint for next time */
1499 return (mm->free_area_cache = addr-len);
1500 }
1501 1507
1502 if (mm->mmap_base < len) 1508 if (addr < len)
1503 goto bottomup; 1509 goto fail;
1504
1505 addr = mm->mmap_base-len;
1506 1510
1511 addr -= len;
1507 do { 1512 do {
1508 /* 1513 /*
1509 * Lookup failure means no vma is above this address, 1514 * Lookup failure means no vma is above this address,
@@ -1523,7 +1528,21 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
1523 addr = vma->vm_start-len; 1528 addr = vma->vm_start-len;
1524 } while (len < vma->vm_start); 1529 } while (len < vma->vm_start);
1525 1530
1526bottomup: 1531fail:
1532 /*
1533 * if hint left us with no space for the requested
1534 * mapping then try again:
1535 *
1536 * Note: this is different with the case of bottomup
1537 * which does the fully line-search, but we use find_vma
1538 * here that causes some holes skipped.
1539 */
1540 if (start_addr != mm->mmap_base) {
1541 mm->free_area_cache = mm->mmap_base;
1542 mm->cached_hole_size = 0;
1543 goto try_again;
1544 }
1545
1527 /* 1546 /*
1528 * A failed mmap() very likely causes application failure, 1547 * A failed mmap() very likely causes application failure,
1529 * so fall back to the bottom-up function here. This scenario 1548 * so fall back to the bottom-up function here. This scenario
@@ -1628,7 +1647,6 @@ EXPORT_SYMBOL(find_vma);
1628 1647
1629/* 1648/*
1630 * Same as find_vma, but also return a pointer to the previous VMA in *pprev. 1649 * Same as find_vma, but also return a pointer to the previous VMA in *pprev.
1631 * Note: pprev is set to NULL when return value is NULL.
1632 */ 1650 */
1633struct vm_area_struct * 1651struct vm_area_struct *
1634find_vma_prev(struct mm_struct *mm, unsigned long addr, 1652find_vma_prev(struct mm_struct *mm, unsigned long addr,
@@ -1637,7 +1655,16 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
1637 struct vm_area_struct *vma; 1655 struct vm_area_struct *vma;
1638 1656
1639 vma = find_vma(mm, addr); 1657 vma = find_vma(mm, addr);
1640 *pprev = vma ? vma->vm_prev : NULL; 1658 if (vma) {
1659 *pprev = vma->vm_prev;
1660 } else {
1661 struct rb_node *rb_node = mm->mm_rb.rb_node;
1662 *pprev = NULL;
1663 while (rb_node) {
1664 *pprev = rb_entry(rb_node, struct vm_area_struct, vm_rb);
1665 rb_node = rb_node->rb_right;
1666 }
1667 }
1641 return vma; 1668 return vma;
1642} 1669}
1643 1670
@@ -2192,7 +2219,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
2192 if (mm->map_count > sysctl_max_map_count) 2219 if (mm->map_count > sysctl_max_map_count)
2193 return -ENOMEM; 2220 return -ENOMEM;
2194 2221
2195 if (security_vm_enough_memory(len >> PAGE_SHIFT)) 2222 if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
2196 return -ENOMEM; 2223 return -ENOMEM;
2197 2224
2198 /* Can we just expand an old private anonymous mapping? */ 2225 /* Can we just expand an old private anonymous mapping? */
@@ -2236,7 +2263,6 @@ void exit_mmap(struct mm_struct *mm)
2236 struct mmu_gather tlb; 2263 struct mmu_gather tlb;
2237 struct vm_area_struct *vma; 2264 struct vm_area_struct *vma;
2238 unsigned long nr_accounted = 0; 2265 unsigned long nr_accounted = 0;
2239 unsigned long end;
2240 2266
2241 /* mm's last user has gone, and its about to be pulled down */ 2267 /* mm's last user has gone, and its about to be pulled down */
2242 mmu_notifier_release(mm); 2268 mmu_notifier_release(mm);
@@ -2261,11 +2287,11 @@ void exit_mmap(struct mm_struct *mm)
2261 tlb_gather_mmu(&tlb, mm, 1); 2287 tlb_gather_mmu(&tlb, mm, 1);
2262 /* update_hiwater_rss(mm) here? but nobody should be looking */ 2288 /* update_hiwater_rss(mm) here? but nobody should be looking */
2263 /* Use -1 here to ensure all VMAs in the mm are unmapped */ 2289 /* Use -1 here to ensure all VMAs in the mm are unmapped */
2264 end = unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL); 2290 unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL);
2265 vm_unacct_memory(nr_accounted); 2291 vm_unacct_memory(nr_accounted);
2266 2292
2267 free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0); 2293 free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0);
2268 tlb_finish_mmu(&tlb, 0, end); 2294 tlb_finish_mmu(&tlb, 0, -1);
2269 2295
2270 /* 2296 /*
2271 * Walk the list again, actually closing and freeing it, 2297 * Walk the list again, actually closing and freeing it,