diff options
author | Michel Lespinasse <walken@google.com> | 2012-12-11 19:01:49 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-12-11 20:22:25 -0500 |
commit | db4fbfb9523c93583c339e66023506f651c1d54b (patch) | |
tree | 8dfc250bb2249feaafe27c11cc4de5a654815833 /mm/mmap.c | |
parent | e4c6bfd2d79d063017ab19a18915f0bc759f32d9 (diff) |
mm: vm_unmapped_area() lookup function
Implement vm_unmapped_area() using the rb_subtree_gap and highest_vm_end
information to look up for suitable virtual address space gaps.
struct vm_unmapped_area_info is used to define the desired allocation
request:
- lowest or highest possible address matching the remaining constraints
- desired gap length
- low/high address limits that the gap must fit into
- alignment mask and offset
Also update the generic arch_get_unmapped_area[_topdown] functions to make
use of vm_unmapped_area() instead of implementing a brute force search.
[akpm@linux-foundation.org: checkpatch fixes]
Signed-off-by: Michel Lespinasse <walken@google.com>
Reviewed-by: Rik van Riel <riel@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Russell King <linux@arm.linux.org.uk>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Chris Metcalf <cmetcalf@tilera.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/mmap.c')
-rw-r--r-- | mm/mmap.c | 312 |
1 files changed, 222 insertions, 90 deletions
@@ -1539,6 +1539,206 @@ unacct_error: | |||
1539 | return error; | 1539 | return error; |
1540 | } | 1540 | } |
1541 | 1541 | ||
1542 | unsigned long unmapped_area(struct vm_unmapped_area_info *info) | ||
1543 | { | ||
1544 | /* | ||
1545 | * We implement the search by looking for an rbtree node that | ||
1546 | * immediately follows a suitable gap. That is, | ||
1547 | * - gap_start = vma->vm_prev->vm_end <= info->high_limit - length; | ||
1548 | * - gap_end = vma->vm_start >= info->low_limit + length; | ||
1549 | * - gap_end - gap_start >= length | ||
1550 | */ | ||
1551 | |||
1552 | struct mm_struct *mm = current->mm; | ||
1553 | struct vm_area_struct *vma; | ||
1554 | unsigned long length, low_limit, high_limit, gap_start, gap_end; | ||
1555 | |||
1556 | /* Adjust search length to account for worst case alignment overhead */ | ||
1557 | length = info->length + info->align_mask; | ||
1558 | if (length < info->length) | ||
1559 | return -ENOMEM; | ||
1560 | |||
1561 | /* Adjust search limits by the desired length */ | ||
1562 | if (info->high_limit < length) | ||
1563 | return -ENOMEM; | ||
1564 | high_limit = info->high_limit - length; | ||
1565 | |||
1566 | if (info->low_limit > high_limit) | ||
1567 | return -ENOMEM; | ||
1568 | low_limit = info->low_limit + length; | ||
1569 | |||
1570 | /* Check if rbtree root looks promising */ | ||
1571 | if (RB_EMPTY_ROOT(&mm->mm_rb)) | ||
1572 | goto check_highest; | ||
1573 | vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb); | ||
1574 | if (vma->rb_subtree_gap < length) | ||
1575 | goto check_highest; | ||
1576 | |||
1577 | while (true) { | ||
1578 | /* Visit left subtree if it looks promising */ | ||
1579 | gap_end = vma->vm_start; | ||
1580 | if (gap_end >= low_limit && vma->vm_rb.rb_left) { | ||
1581 | struct vm_area_struct *left = | ||
1582 | rb_entry(vma->vm_rb.rb_left, | ||
1583 | struct vm_area_struct, vm_rb); | ||
1584 | if (left->rb_subtree_gap >= length) { | ||
1585 | vma = left; | ||
1586 | continue; | ||
1587 | } | ||
1588 | } | ||
1589 | |||
1590 | gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0; | ||
1591 | check_current: | ||
1592 | /* Check if current node has a suitable gap */ | ||
1593 | if (gap_start > high_limit) | ||
1594 | return -ENOMEM; | ||
1595 | if (gap_end >= low_limit && gap_end - gap_start >= length) | ||
1596 | goto found; | ||
1597 | |||
1598 | /* Visit right subtree if it looks promising */ | ||
1599 | if (vma->vm_rb.rb_right) { | ||
1600 | struct vm_area_struct *right = | ||
1601 | rb_entry(vma->vm_rb.rb_right, | ||
1602 | struct vm_area_struct, vm_rb); | ||
1603 | if (right->rb_subtree_gap >= length) { | ||
1604 | vma = right; | ||
1605 | continue; | ||
1606 | } | ||
1607 | } | ||
1608 | |||
1609 | /* Go back up the rbtree to find next candidate node */ | ||
1610 | while (true) { | ||
1611 | struct rb_node *prev = &vma->vm_rb; | ||
1612 | if (!rb_parent(prev)) | ||
1613 | goto check_highest; | ||
1614 | vma = rb_entry(rb_parent(prev), | ||
1615 | struct vm_area_struct, vm_rb); | ||
1616 | if (prev == vma->vm_rb.rb_left) { | ||
1617 | gap_start = vma->vm_prev->vm_end; | ||
1618 | gap_end = vma->vm_start; | ||
1619 | goto check_current; | ||
1620 | } | ||
1621 | } | ||
1622 | } | ||
1623 | |||
1624 | check_highest: | ||
1625 | /* Check highest gap, which does not precede any rbtree node */ | ||
1626 | gap_start = mm->highest_vm_end; | ||
1627 | gap_end = ULONG_MAX; /* Only for VM_BUG_ON below */ | ||
1628 | if (gap_start > high_limit) | ||
1629 | return -ENOMEM; | ||
1630 | |||
1631 | found: | ||
1632 | /* We found a suitable gap. Clip it with the original low_limit. */ | ||
1633 | if (gap_start < info->low_limit) | ||
1634 | gap_start = info->low_limit; | ||
1635 | |||
1636 | /* Adjust gap address to the desired alignment */ | ||
1637 | gap_start += (info->align_offset - gap_start) & info->align_mask; | ||
1638 | |||
1639 | VM_BUG_ON(gap_start + info->length > info->high_limit); | ||
1640 | VM_BUG_ON(gap_start + info->length > gap_end); | ||
1641 | return gap_start; | ||
1642 | } | ||
1643 | |||
1644 | unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) | ||
1645 | { | ||
1646 | struct mm_struct *mm = current->mm; | ||
1647 | struct vm_area_struct *vma; | ||
1648 | unsigned long length, low_limit, high_limit, gap_start, gap_end; | ||
1649 | |||
1650 | /* Adjust search length to account for worst case alignment overhead */ | ||
1651 | length = info->length + info->align_mask; | ||
1652 | if (length < info->length) | ||
1653 | return -ENOMEM; | ||
1654 | |||
1655 | /* | ||
1656 | * Adjust search limits by the desired length. | ||
1657 | * See implementation comment at top of unmapped_area(). | ||
1658 | */ | ||
1659 | gap_end = info->high_limit; | ||
1660 | if (gap_end < length) | ||
1661 | return -ENOMEM; | ||
1662 | high_limit = gap_end - length; | ||
1663 | |||
1664 | if (info->low_limit > high_limit) | ||
1665 | return -ENOMEM; | ||
1666 | low_limit = info->low_limit + length; | ||
1667 | |||
1668 | /* Check highest gap, which does not precede any rbtree node */ | ||
1669 | gap_start = mm->highest_vm_end; | ||
1670 | if (gap_start <= high_limit) | ||
1671 | goto found_highest; | ||
1672 | |||
1673 | /* Check if rbtree root looks promising */ | ||
1674 | if (RB_EMPTY_ROOT(&mm->mm_rb)) | ||
1675 | return -ENOMEM; | ||
1676 | vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb); | ||
1677 | if (vma->rb_subtree_gap < length) | ||
1678 | return -ENOMEM; | ||
1679 | |||
1680 | while (true) { | ||
1681 | /* Visit right subtree if it looks promising */ | ||
1682 | gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0; | ||
1683 | if (gap_start <= high_limit && vma->vm_rb.rb_right) { | ||
1684 | struct vm_area_struct *right = | ||
1685 | rb_entry(vma->vm_rb.rb_right, | ||
1686 | struct vm_area_struct, vm_rb); | ||
1687 | if (right->rb_subtree_gap >= length) { | ||
1688 | vma = right; | ||
1689 | continue; | ||
1690 | } | ||
1691 | } | ||
1692 | |||
1693 | check_current: | ||
1694 | /* Check if current node has a suitable gap */ | ||
1695 | gap_end = vma->vm_start; | ||
1696 | if (gap_end < low_limit) | ||
1697 | return -ENOMEM; | ||
1698 | if (gap_start <= high_limit && gap_end - gap_start >= length) | ||
1699 | goto found; | ||
1700 | |||
1701 | /* Visit left subtree if it looks promising */ | ||
1702 | if (vma->vm_rb.rb_left) { | ||
1703 | struct vm_area_struct *left = | ||
1704 | rb_entry(vma->vm_rb.rb_left, | ||
1705 | struct vm_area_struct, vm_rb); | ||
1706 | if (left->rb_subtree_gap >= length) { | ||
1707 | vma = left; | ||
1708 | continue; | ||
1709 | } | ||
1710 | } | ||
1711 | |||
1712 | /* Go back up the rbtree to find next candidate node */ | ||
1713 | while (true) { | ||
1714 | struct rb_node *prev = &vma->vm_rb; | ||
1715 | if (!rb_parent(prev)) | ||
1716 | return -ENOMEM; | ||
1717 | vma = rb_entry(rb_parent(prev), | ||
1718 | struct vm_area_struct, vm_rb); | ||
1719 | if (prev == vma->vm_rb.rb_right) { | ||
1720 | gap_start = vma->vm_prev ? | ||
1721 | vma->vm_prev->vm_end : 0; | ||
1722 | goto check_current; | ||
1723 | } | ||
1724 | } | ||
1725 | } | ||
1726 | |||
1727 | found: | ||
1728 | /* We found a suitable gap. Clip it with the original high_limit. */ | ||
1729 | if (gap_end > info->high_limit) | ||
1730 | gap_end = info->high_limit; | ||
1731 | |||
1732 | found_highest: | ||
1733 | /* Compute highest gap address at the desired alignment */ | ||
1734 | gap_end -= info->length; | ||
1735 | gap_end -= (gap_end - info->align_offset) & info->align_mask; | ||
1736 | |||
1737 | VM_BUG_ON(gap_end < info->low_limit); | ||
1738 | VM_BUG_ON(gap_end < gap_start); | ||
1739 | return gap_end; | ||
1740 | } | ||
1741 | |||
1542 | /* Get an address range which is currently unmapped. | 1742 | /* Get an address range which is currently unmapped. |
1543 | * For shmat() with addr=0. | 1743 | * For shmat() with addr=0. |
1544 | * | 1744 | * |
@@ -1557,7 +1757,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, | |||
1557 | { | 1757 | { |
1558 | struct mm_struct *mm = current->mm; | 1758 | struct mm_struct *mm = current->mm; |
1559 | struct vm_area_struct *vma; | 1759 | struct vm_area_struct *vma; |
1560 | unsigned long start_addr; | 1760 | struct vm_unmapped_area_info info; |
1561 | 1761 | ||
1562 | if (len > TASK_SIZE) | 1762 | if (len > TASK_SIZE) |
1563 | return -ENOMEM; | 1763 | return -ENOMEM; |
@@ -1572,40 +1772,13 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, | |||
1572 | (!vma || addr + len <= vma->vm_start)) | 1772 | (!vma || addr + len <= vma->vm_start)) |
1573 | return addr; | 1773 | return addr; |
1574 | } | 1774 | } |
1575 | if (len > mm->cached_hole_size) { | ||
1576 | start_addr = addr = mm->free_area_cache; | ||
1577 | } else { | ||
1578 | start_addr = addr = TASK_UNMAPPED_BASE; | ||
1579 | mm->cached_hole_size = 0; | ||
1580 | } | ||
1581 | 1775 | ||
1582 | full_search: | 1776 | info.flags = 0; |
1583 | for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { | 1777 | info.length = len; |
1584 | /* At this point: (!vma || addr < vma->vm_end). */ | 1778 | info.low_limit = TASK_UNMAPPED_BASE; |
1585 | if (TASK_SIZE - len < addr) { | 1779 | info.high_limit = TASK_SIZE; |
1586 | /* | 1780 | info.align_mask = 0; |
1587 | * Start a new search - just in case we missed | 1781 | return vm_unmapped_area(&info); |
1588 | * some holes. | ||
1589 | */ | ||
1590 | if (start_addr != TASK_UNMAPPED_BASE) { | ||
1591 | addr = TASK_UNMAPPED_BASE; | ||
1592 | start_addr = addr; | ||
1593 | mm->cached_hole_size = 0; | ||
1594 | goto full_search; | ||
1595 | } | ||
1596 | return -ENOMEM; | ||
1597 | } | ||
1598 | if (!vma || addr + len <= vma->vm_start) { | ||
1599 | /* | ||
1600 | * Remember the place where we stopped the search: | ||
1601 | */ | ||
1602 | mm->free_area_cache = addr + len; | ||
1603 | return addr; | ||
1604 | } | ||
1605 | if (addr + mm->cached_hole_size < vma->vm_start) | ||
1606 | mm->cached_hole_size = vma->vm_start - addr; | ||
1607 | addr = vma->vm_end; | ||
1608 | } | ||
1609 | } | 1782 | } |
1610 | #endif | 1783 | #endif |
1611 | 1784 | ||
@@ -1630,7 +1803,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, | |||
1630 | { | 1803 | { |
1631 | struct vm_area_struct *vma; | 1804 | struct vm_area_struct *vma; |
1632 | struct mm_struct *mm = current->mm; | 1805 | struct mm_struct *mm = current->mm; |
1633 | unsigned long addr = addr0, start_addr; | 1806 | unsigned long addr = addr0; |
1807 | struct vm_unmapped_area_info info; | ||
1634 | 1808 | ||
1635 | /* requested length too big for entire address space */ | 1809 | /* requested length too big for entire address space */ |
1636 | if (len > TASK_SIZE) | 1810 | if (len > TASK_SIZE) |
@@ -1648,53 +1822,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, | |||
1648 | return addr; | 1822 | return addr; |
1649 | } | 1823 | } |
1650 | 1824 | ||
1651 | /* check if free_area_cache is useful for us */ | 1825 | info.flags = VM_UNMAPPED_AREA_TOPDOWN; |
1652 | if (len <= mm->cached_hole_size) { | 1826 | info.length = len; |
1653 | mm->cached_hole_size = 0; | 1827 | info.low_limit = PAGE_SIZE; |
1654 | mm->free_area_cache = mm->mmap_base; | 1828 | info.high_limit = mm->mmap_base; |
1655 | } | 1829 | info.align_mask = 0; |
1656 | 1830 | addr = vm_unmapped_area(&info); | |
1657 | try_again: | ||
1658 | /* either no address requested or can't fit in requested address hole */ | ||
1659 | start_addr = addr = mm->free_area_cache; | ||
1660 | |||
1661 | if (addr < len) | ||
1662 | goto fail; | ||
1663 | |||
1664 | addr -= len; | ||
1665 | do { | ||
1666 | /* | ||
1667 | * Lookup failure means no vma is above this address, | ||
1668 | * else if new region fits below vma->vm_start, | ||
1669 | * return with success: | ||
1670 | */ | ||
1671 | vma = find_vma(mm, addr); | ||
1672 | if (!vma || addr+len <= vma->vm_start) | ||
1673 | /* remember the address as a hint for next time */ | ||
1674 | return (mm->free_area_cache = addr); | ||
1675 | |||
1676 | /* remember the largest hole we saw so far */ | ||
1677 | if (addr + mm->cached_hole_size < vma->vm_start) | ||
1678 | mm->cached_hole_size = vma->vm_start - addr; | ||
1679 | |||
1680 | /* try just below the current vma->vm_start */ | ||
1681 | addr = vma->vm_start-len; | ||
1682 | } while (len < vma->vm_start); | ||
1683 | |||
1684 | fail: | ||
1685 | /* | ||
1686 | * if hint left us with no space for the requested | ||
1687 | * mapping then try again: | ||
1688 | * | ||
1689 | * Note: this is different with the case of bottomup | ||
1690 | * which does the fully line-search, but we use find_vma | ||
1691 | * here that causes some holes skipped. | ||
1692 | */ | ||
1693 | if (start_addr != mm->mmap_base) { | ||
1694 | mm->free_area_cache = mm->mmap_base; | ||
1695 | mm->cached_hole_size = 0; | ||
1696 | goto try_again; | ||
1697 | } | ||
1698 | 1831 | ||
1699 | /* | 1832 | /* |
1700 | * A failed mmap() very likely causes application failure, | 1833 | * A failed mmap() very likely causes application failure, |
@@ -1702,14 +1835,13 @@ fail: | |||
1702 | * can happen with large stack limits and large mmap() | 1835 | * can happen with large stack limits and large mmap() |
1703 | * allocations. | 1836 | * allocations. |
1704 | */ | 1837 | */ |
1705 | mm->cached_hole_size = ~0UL; | 1838 | if (addr & ~PAGE_MASK) { |
1706 | mm->free_area_cache = TASK_UNMAPPED_BASE; | 1839 | VM_BUG_ON(addr != -ENOMEM); |
1707 | addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); | 1840 | info.flags = 0; |
1708 | /* | 1841 | info.low_limit = TASK_UNMAPPED_BASE; |
1709 | * Restore the topdown base: | 1842 | info.high_limit = TASK_SIZE; |
1710 | */ | 1843 | addr = vm_unmapped_area(&info); |
1711 | mm->free_area_cache = mm->mmap_base; | 1844 | } |
1712 | mm->cached_hole_size = ~0UL; | ||
1713 | 1845 | ||
1714 | return addr; | 1846 | return addr; |
1715 | } | 1847 | } |