aboutsummaryrefslogtreecommitdiffstats
path: root/mm/mmap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/mmap.c')
-rw-r--r--mm/mmap.c126
1 files changed, 66 insertions, 60 deletions
diff --git a/mm/mmap.c b/mm/mmap.c
index fa11d91242e8..5ecc2cf3e1d7 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -181,26 +181,36 @@ static void __remove_shared_vm_struct(struct vm_area_struct *vma,
181} 181}
182 182
183/* 183/*
184 * Remove one vm structure and free it. 184 * Unlink a file-based vm structure from its prio_tree, to hide
185 * vma from rmap and vmtruncate before freeing its page tables.
185 */ 186 */
186static void remove_vm_struct(struct vm_area_struct *vma) 187void unlink_file_vma(struct vm_area_struct *vma)
187{ 188{
188 struct file *file = vma->vm_file; 189 struct file *file = vma->vm_file;
189 190
190 might_sleep();
191 if (file) { 191 if (file) {
192 struct address_space *mapping = file->f_mapping; 192 struct address_space *mapping = file->f_mapping;
193 spin_lock(&mapping->i_mmap_lock); 193 spin_lock(&mapping->i_mmap_lock);
194 __remove_shared_vm_struct(vma, file, mapping); 194 __remove_shared_vm_struct(vma, file, mapping);
195 spin_unlock(&mapping->i_mmap_lock); 195 spin_unlock(&mapping->i_mmap_lock);
196 } 196 }
197}
198
199/*
200 * Close a vm structure and free it, returning the next.
201 */
202static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
203{
204 struct vm_area_struct *next = vma->vm_next;
205
206 might_sleep();
197 if (vma->vm_ops && vma->vm_ops->close) 207 if (vma->vm_ops && vma->vm_ops->close)
198 vma->vm_ops->close(vma); 208 vma->vm_ops->close(vma);
199 if (file) 209 if (vma->vm_file)
200 fput(file); 210 fput(vma->vm_file);
201 anon_vma_unlink(vma);
202 mpol_free(vma_policy(vma)); 211 mpol_free(vma_policy(vma));
203 kmem_cache_free(vm_area_cachep, vma); 212 kmem_cache_free(vm_area_cachep, vma);
213 return next;
204} 214}
205 215
206asmlinkage unsigned long sys_brk(unsigned long brk) 216asmlinkage unsigned long sys_brk(unsigned long brk)
@@ -832,7 +842,7 @@ none:
832} 842}
833 843
834#ifdef CONFIG_PROC_FS 844#ifdef CONFIG_PROC_FS
835void __vm_stat_account(struct mm_struct *mm, unsigned long flags, 845void vm_stat_account(struct mm_struct *mm, unsigned long flags,
836 struct file *file, long pages) 846 struct file *file, long pages)
837{ 847{
838 const unsigned long stack_flags 848 const unsigned long stack_flags
@@ -1070,6 +1080,17 @@ munmap_back:
1070 error = file->f_op->mmap(file, vma); 1080 error = file->f_op->mmap(file, vma);
1071 if (error) 1081 if (error)
1072 goto unmap_and_free_vma; 1082 goto unmap_and_free_vma;
1083 if ((vma->vm_flags & (VM_SHARED | VM_WRITE | VM_RESERVED))
1084 == (VM_WRITE | VM_RESERVED)) {
1085 printk(KERN_WARNING "program %s is using MAP_PRIVATE, "
1086 "PROT_WRITE mmap of VM_RESERVED memory, which "
1087 "is deprecated. Please report this to "
1088 "linux-kernel@vger.kernel.org\n",current->comm);
1089 if (vma->vm_ops && vma->vm_ops->close)
1090 vma->vm_ops->close(vma);
1091 error = -EACCES;
1092 goto unmap_and_free_vma;
1093 }
1073 } else if (vm_flags & VM_SHARED) { 1094 } else if (vm_flags & VM_SHARED) {
1074 error = shmem_zero_setup(vma); 1095 error = shmem_zero_setup(vma);
1075 if (error) 1096 if (error)
@@ -1110,7 +1131,7 @@ munmap_back:
1110 } 1131 }
1111out: 1132out:
1112 mm->total_vm += len >> PAGE_SHIFT; 1133 mm->total_vm += len >> PAGE_SHIFT;
1113 __vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT); 1134 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
1114 if (vm_flags & VM_LOCKED) { 1135 if (vm_flags & VM_LOCKED) {
1115 mm->locked_vm += len >> PAGE_SHIFT; 1136 mm->locked_vm += len >> PAGE_SHIFT;
1116 make_pages_present(addr, addr + len); 1137 make_pages_present(addr, addr + len);
@@ -1475,15 +1496,19 @@ static int acct_stack_growth(struct vm_area_struct * vma, unsigned long size, un
1475 mm->total_vm += grow; 1496 mm->total_vm += grow;
1476 if (vma->vm_flags & VM_LOCKED) 1497 if (vma->vm_flags & VM_LOCKED)
1477 mm->locked_vm += grow; 1498 mm->locked_vm += grow;
1478 __vm_stat_account(mm, vma->vm_flags, vma->vm_file, grow); 1499 vm_stat_account(mm, vma->vm_flags, vma->vm_file, grow);
1479 return 0; 1500 return 0;
1480} 1501}
1481 1502
1482#ifdef CONFIG_STACK_GROWSUP 1503#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
1483/* 1504/*
1484 * vma is the first one with address > vma->vm_end. Have to extend vma. 1505 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
1506 * vma is the last one with address > vma->vm_end. Have to extend vma.
1485 */ 1507 */
1486int expand_stack(struct vm_area_struct * vma, unsigned long address) 1508#ifdef CONFIG_STACK_GROWSUP
1509static inline
1510#endif
1511int expand_upwards(struct vm_area_struct *vma, unsigned long address)
1487{ 1512{
1488 int error; 1513 int error;
1489 1514
@@ -1521,6 +1546,13 @@ int expand_stack(struct vm_area_struct * vma, unsigned long address)
1521 anon_vma_unlock(vma); 1546 anon_vma_unlock(vma);
1522 return error; 1547 return error;
1523} 1548}
1549#endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */
1550
1551#ifdef CONFIG_STACK_GROWSUP
1552int expand_stack(struct vm_area_struct *vma, unsigned long address)
1553{
1554 return expand_upwards(vma, address);
1555}
1524 1556
1525struct vm_area_struct * 1557struct vm_area_struct *
1526find_extend_vma(struct mm_struct *mm, unsigned long addr) 1558find_extend_vma(struct mm_struct *mm, unsigned long addr)
@@ -1603,36 +1635,24 @@ find_extend_vma(struct mm_struct * mm, unsigned long addr)
1603} 1635}
1604#endif 1636#endif
1605 1637
1606/* Normal function to fix up a mapping
1607 * This function is the default for when an area has no specific
1608 * function. This may be used as part of a more specific routine.
1609 *
1610 * By the time this function is called, the area struct has been
1611 * removed from the process mapping list.
1612 */
1613static void unmap_vma(struct mm_struct *mm, struct vm_area_struct *area)
1614{
1615 size_t len = area->vm_end - area->vm_start;
1616
1617 area->vm_mm->total_vm -= len >> PAGE_SHIFT;
1618 if (area->vm_flags & VM_LOCKED)
1619 area->vm_mm->locked_vm -= len >> PAGE_SHIFT;
1620 vm_stat_unaccount(area);
1621 remove_vm_struct(area);
1622}
1623
1624/* 1638/*
1625 * Update the VMA and inode share lists. 1639 * Ok - we have the memory areas we should free on the vma list,
1626 *
1627 * Ok - we have the memory areas we should free on the 'free' list,
1628 * so release them, and do the vma updates. 1640 * so release them, and do the vma updates.
1641 *
1642 * Called with the mm semaphore held.
1629 */ 1643 */
1630static void unmap_vma_list(struct mm_struct *mm, struct vm_area_struct *vma) 1644static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
1631{ 1645{
1646 /* Update high watermark before we lower total_vm */
1647 update_hiwater_vm(mm);
1632 do { 1648 do {
1633 struct vm_area_struct *next = vma->vm_next; 1649 long nrpages = vma_pages(vma);
1634 unmap_vma(mm, vma); 1650
1635 vma = next; 1651 mm->total_vm -= nrpages;
1652 if (vma->vm_flags & VM_LOCKED)
1653 mm->locked_vm -= nrpages;
1654 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
1655 vma = remove_vma(vma);
1636 } while (vma); 1656 } while (vma);
1637 validate_mm(mm); 1657 validate_mm(mm);
1638} 1658}
@@ -1651,14 +1671,13 @@ static void unmap_region(struct mm_struct *mm,
1651 unsigned long nr_accounted = 0; 1671 unsigned long nr_accounted = 0;
1652 1672
1653 lru_add_drain(); 1673 lru_add_drain();
1654 spin_lock(&mm->page_table_lock);
1655 tlb = tlb_gather_mmu(mm, 0); 1674 tlb = tlb_gather_mmu(mm, 0);
1656 unmap_vmas(&tlb, mm, vma, start, end, &nr_accounted, NULL); 1675 update_hiwater_rss(mm);
1676 unmap_vmas(&tlb, vma, start, end, &nr_accounted, NULL);
1657 vm_unacct_memory(nr_accounted); 1677 vm_unacct_memory(nr_accounted);
1658 free_pgtables(&tlb, vma, prev? prev->vm_end: FIRST_USER_ADDRESS, 1678 free_pgtables(&tlb, vma, prev? prev->vm_end: FIRST_USER_ADDRESS,
1659 next? next->vm_start: 0); 1679 next? next->vm_start: 0);
1660 tlb_finish_mmu(tlb, start, end); 1680 tlb_finish_mmu(tlb, start, end);
1661 spin_unlock(&mm->page_table_lock);
1662} 1681}
1663 1682
1664/* 1683/*
@@ -1799,7 +1818,7 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
1799 unmap_region(mm, vma, prev, start, end); 1818 unmap_region(mm, vma, prev, start, end);
1800 1819
1801 /* Fix up all other VM information */ 1820 /* Fix up all other VM information */
1802 unmap_vma_list(mm, vma); 1821 remove_vma_list(mm, vma);
1803 1822
1804 return 0; 1823 return 0;
1805} 1824}
@@ -1933,34 +1952,21 @@ void exit_mmap(struct mm_struct *mm)
1933 unsigned long end; 1952 unsigned long end;
1934 1953
1935 lru_add_drain(); 1954 lru_add_drain();
1936
1937 spin_lock(&mm->page_table_lock);
1938
1939 flush_cache_mm(mm); 1955 flush_cache_mm(mm);
1940 tlb = tlb_gather_mmu(mm, 1); 1956 tlb = tlb_gather_mmu(mm, 1);
1957 /* Don't update_hiwater_rss(mm) here, do_exit already did */
1941 /* Use -1 here to ensure all VMAs in the mm are unmapped */ 1958 /* Use -1 here to ensure all VMAs in the mm are unmapped */
1942 end = unmap_vmas(&tlb, mm, vma, 0, -1, &nr_accounted, NULL); 1959 end = unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL);
1943 vm_unacct_memory(nr_accounted); 1960 vm_unacct_memory(nr_accounted);
1944 free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0); 1961 free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0);
1945 tlb_finish_mmu(tlb, 0, end); 1962 tlb_finish_mmu(tlb, 0, end);
1946 1963
1947 mm->mmap = mm->mmap_cache = NULL;
1948 mm->mm_rb = RB_ROOT;
1949 set_mm_counter(mm, rss, 0);
1950 mm->total_vm = 0;
1951 mm->locked_vm = 0;
1952
1953 spin_unlock(&mm->page_table_lock);
1954
1955 /* 1964 /*
1956 * Walk the list again, actually closing and freeing it 1965 * Walk the list again, actually closing and freeing it,
1957 * without holding any MM locks. 1966 * with preemption enabled, without holding any MM locks.
1958 */ 1967 */
1959 while (vma) { 1968 while (vma)
1960 struct vm_area_struct *next = vma->vm_next; 1969 vma = remove_vma(vma);
1961 remove_vm_struct(vma);
1962 vma = next;
1963 }
1964 1970
1965 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT); 1971 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
1966} 1972}