aboutsummaryrefslogtreecommitdiffstats
path: root/mm/mmap.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-03-22 12:04:48 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-22 12:04:48 -0400
commit95211279c5ad00a317c98221d7e4365e02f20836 (patch)
tree2ddc8625378d2915b8c96392f3cf6663b705ed55 /mm/mmap.c
parent5375871d432ae9fc581014ac117b96aaee3cd0c7 (diff)
parent12724850e8064f64b6223d26d78c0597c742c65a (diff)
Merge branch 'akpm' (Andrew's patch-bomb)
Merge first batch of patches from Andrew Morton: "A few misc things and all the MM queue" * emailed from Andrew Morton <akpm@linux-foundation.org>: (92 commits) memcg: avoid THP split in task migration thp: add HPAGE_PMD_* definitions for !CONFIG_TRANSPARENT_HUGEPAGE memcg: clean up existing move charge code mm/memcontrol.c: remove unnecessary 'break' in mem_cgroup_read() mm/memcontrol.c: remove redundant BUG_ON() in mem_cgroup_usage_unregister_event() mm/memcontrol.c: s/stealed/stolen/ memcg: fix performance of mem_cgroup_begin_update_page_stat() memcg: remove PCG_FILE_MAPPED memcg: use new logic for page stat accounting memcg: remove PCG_MOVE_LOCK flag from page_cgroup memcg: simplify move_account() check memcg: remove EXPORT_SYMBOL(mem_cgroup_update_page_stat) memcg: kill dead prev_priority stubs memcg: remove PCG_CACHE page_cgroup flag memcg: let css_get_next() rely upon rcu_read_lock() cgroup: revert ss_id_lock to spinlock idr: make idr_get_next() good for rcu_read_lock() memcg: remove unnecessary thp check in page stat accounting memcg: remove redundant returns memcg: enum lru_list lru ...
Diffstat (limited to 'mm/mmap.c')
-rw-r--r--mm/mmap.c51
1 files changed, 27 insertions, 24 deletions
diff --git a/mm/mmap.c b/mm/mmap.c
index 6f3766b57803..a7bf6a31c9f6 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -451,9 +451,8 @@ static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
451} 451}
452 452
453/* 453/*
454 * Helper for vma_adjust in the split_vma insert case: 454 * Helper for vma_adjust() in the split_vma insert case: insert a vma into the
455 * insert vm structure into list and rbtree and anon_vma, 455 * mm's list and rbtree. It has already been inserted into the prio_tree.
456 * but it has already been inserted into prio_tree earlier.
457 */ 456 */
458static void __insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) 457static void __insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
459{ 458{
@@ -1112,9 +1111,9 @@ SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
1112 * A dummy user value is used because we are not locking 1111 * A dummy user value is used because we are not locking
1113 * memory so no accounting is necessary 1112 * memory so no accounting is necessary
1114 */ 1113 */
1115 len = ALIGN(len, huge_page_size(&default_hstate)); 1114 file = hugetlb_file_setup(HUGETLB_ANON_FILE, addr, len,
1116 file = hugetlb_file_setup(HUGETLB_ANON_FILE, len, VM_NORESERVE, 1115 VM_NORESERVE, &user,
1117 &user, HUGETLB_ANONHUGE_INODE); 1116 HUGETLB_ANONHUGE_INODE);
1118 if (IS_ERR(file)) 1117 if (IS_ERR(file))
1119 return PTR_ERR(file); 1118 return PTR_ERR(file);
1120 } 1119 }
@@ -1439,10 +1438,8 @@ void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
1439 /* 1438 /*
1440 * Is this a new hole at the lowest possible address? 1439 * Is this a new hole at the lowest possible address?
1441 */ 1440 */
1442 if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) { 1441 if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache)
1443 mm->free_area_cache = addr; 1442 mm->free_area_cache = addr;
1444 mm->cached_hole_size = ~0UL;
1445 }
1446} 1443}
1447 1444
1448/* 1445/*
@@ -1457,7 +1454,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
1457{ 1454{
1458 struct vm_area_struct *vma; 1455 struct vm_area_struct *vma;
1459 struct mm_struct *mm = current->mm; 1456 struct mm_struct *mm = current->mm;
1460 unsigned long addr = addr0; 1457 unsigned long addr = addr0, start_addr;
1461 1458
1462 /* requested length too big for entire address space */ 1459 /* requested length too big for entire address space */
1463 if (len > TASK_SIZE) 1460 if (len > TASK_SIZE)
@@ -1481,22 +1478,14 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
1481 mm->free_area_cache = mm->mmap_base; 1478 mm->free_area_cache = mm->mmap_base;
1482 } 1479 }
1483 1480
1481try_again:
1484 /* either no address requested or can't fit in requested address hole */ 1482 /* either no address requested or can't fit in requested address hole */
1485 addr = mm->free_area_cache; 1483 start_addr = addr = mm->free_area_cache;
1486
1487 /* make sure it can fit in the remaining address space */
1488 if (addr > len) {
1489 vma = find_vma(mm, addr-len);
1490 if (!vma || addr <= vma->vm_start)
1491 /* remember the address as a hint for next time */
1492 return (mm->free_area_cache = addr-len);
1493 }
1494
1495 if (mm->mmap_base < len)
1496 goto bottomup;
1497 1484
1498 addr = mm->mmap_base-len; 1485 if (addr < len)
1486 goto fail;
1499 1487
1488 addr -= len;
1500 do { 1489 do {
1501 /* 1490 /*
1502 * Lookup failure means no vma is above this address, 1491 * Lookup failure means no vma is above this address,
@@ -1516,7 +1505,21 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
1516 addr = vma->vm_start-len; 1505 addr = vma->vm_start-len;
1517 } while (len < vma->vm_start); 1506 } while (len < vma->vm_start);
1518 1507
1519bottomup: 1508fail:
1509 /*
1510 * if hint left us with no space for the requested
1511 * mapping then try again:
1512 *
1513 * Note: this is different with the case of bottomup
1514 * which does the fully line-search, but we use find_vma
1515 * here that causes some holes skipped.
1516 */
1517 if (start_addr != mm->mmap_base) {
1518 mm->free_area_cache = mm->mmap_base;
1519 mm->cached_hole_size = 0;
1520 goto try_again;
1521 }
1522
1520 /* 1523 /*
1521 * A failed mmap() very likely causes application failure, 1524 * A failed mmap() very likely causes application failure,
1522 * so fall back to the bottom-up function here. This scenario 1525 * so fall back to the bottom-up function here. This scenario