aboutsummaryrefslogtreecommitdiffstats
path: root/mm/nommu.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/nommu.c')
-rw-r--r--mm/nommu.c119
1 files changed, 83 insertions, 36 deletions
diff --git a/mm/nommu.c b/mm/nommu.c
index 6f9248f89bde..48a2ecfaf059 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -432,6 +432,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
432 /* 432 /*
433 * Ok, looks good - let it rip. 433 * Ok, looks good - let it rip.
434 */ 434 */
435 flush_icache_range(mm->brk, brk);
435 return mm->brk = brk; 436 return mm->brk = brk;
436} 437}
437 438
@@ -551,11 +552,11 @@ static void free_page_series(unsigned long from, unsigned long to)
551static void __put_nommu_region(struct vm_region *region) 552static void __put_nommu_region(struct vm_region *region)
552 __releases(nommu_region_sem) 553 __releases(nommu_region_sem)
553{ 554{
554 kenter("%p{%d}", region, atomic_read(&region->vm_usage)); 555 kenter("%p{%d}", region, region->vm_usage);
555 556
556 BUG_ON(!nommu_region_tree.rb_node); 557 BUG_ON(!nommu_region_tree.rb_node);
557 558
558 if (atomic_dec_and_test(&region->vm_usage)) { 559 if (--region->vm_usage == 0) {
559 if (region->vm_top > region->vm_start) 560 if (region->vm_top > region->vm_start)
560 delete_nommu_region(region); 561 delete_nommu_region(region);
561 up_write(&nommu_region_sem); 562 up_write(&nommu_region_sem);
@@ -1204,7 +1205,7 @@ unsigned long do_mmap_pgoff(struct file *file,
1204 if (!vma) 1205 if (!vma)
1205 goto error_getting_vma; 1206 goto error_getting_vma;
1206 1207
1207 atomic_set(&region->vm_usage, 1); 1208 region->vm_usage = 1;
1208 region->vm_flags = vm_flags; 1209 region->vm_flags = vm_flags;
1209 region->vm_pgoff = pgoff; 1210 region->vm_pgoff = pgoff;
1210 1211
@@ -1271,7 +1272,7 @@ unsigned long do_mmap_pgoff(struct file *file,
1271 } 1272 }
1272 1273
1273 /* we've found a region we can share */ 1274 /* we've found a region we can share */
1274 atomic_inc(&pregion->vm_usage); 1275 pregion->vm_usage++;
1275 vma->vm_region = pregion; 1276 vma->vm_region = pregion;
1276 start = pregion->vm_start; 1277 start = pregion->vm_start;
1277 start += (pgoff - pregion->vm_pgoff) << PAGE_SHIFT; 1278 start += (pgoff - pregion->vm_pgoff) << PAGE_SHIFT;
@@ -1288,7 +1289,7 @@ unsigned long do_mmap_pgoff(struct file *file,
1288 vma->vm_region = NULL; 1289 vma->vm_region = NULL;
1289 vma->vm_start = 0; 1290 vma->vm_start = 0;
1290 vma->vm_end = 0; 1291 vma->vm_end = 0;
1291 atomic_dec(&pregion->vm_usage); 1292 pregion->vm_usage--;
1292 pregion = NULL; 1293 pregion = NULL;
1293 goto error_just_free; 1294 goto error_just_free;
1294 } 1295 }
@@ -1353,10 +1354,14 @@ unsigned long do_mmap_pgoff(struct file *file,
1353share: 1354share:
1354 add_vma_to_mm(current->mm, vma); 1355 add_vma_to_mm(current->mm, vma);
1355 1356
1356 up_write(&nommu_region_sem); 1357 /* we flush the region from the icache only when the first executable
1358 * mapping of it is made */
1359 if (vma->vm_flags & VM_EXEC && !region->vm_icache_flushed) {
1360 flush_icache_range(region->vm_start, region->vm_end);
1361 region->vm_icache_flushed = true;
1362 }
1357 1363
1358 if (prot & PROT_EXEC) 1364 up_write(&nommu_region_sem);
1359 flush_icache_range(result, result + len);
1360 1365
1361 kleave(" = %lx", result); 1366 kleave(" = %lx", result);
1362 return result; 1367 return result;
@@ -1436,10 +1441,9 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
1436 1441
1437 kenter(""); 1442 kenter("");
1438 1443
1439 /* we're only permitted to split anonymous regions that have a single 1444 /* we're only permitted to split anonymous regions (these should have
1440 * owner */ 1445 * only a single usage on the region) */
1441 if (vma->vm_file || 1446 if (vma->vm_file)
1442 atomic_read(&vma->vm_region->vm_usage) != 1)
1443 return -ENOMEM; 1447 return -ENOMEM;
1444 1448
1445 if (mm->map_count >= sysctl_max_map_count) 1449 if (mm->map_count >= sysctl_max_map_count)
@@ -1513,7 +1517,7 @@ static int shrink_vma(struct mm_struct *mm,
1513 1517
1514 /* cut the backing region down to size */ 1518 /* cut the backing region down to size */
1515 region = vma->vm_region; 1519 region = vma->vm_region;
1516 BUG_ON(atomic_read(&region->vm_usage) != 1); 1520 BUG_ON(region->vm_usage != 1);
1517 1521
1518 down_write(&nommu_region_sem); 1522 down_write(&nommu_region_sem);
1519 delete_nommu_region(region); 1523 delete_nommu_region(region);
@@ -1757,27 +1761,6 @@ void unmap_mapping_range(struct address_space *mapping,
1757EXPORT_SYMBOL(unmap_mapping_range); 1761EXPORT_SYMBOL(unmap_mapping_range);
1758 1762
1759/* 1763/*
1760 * ask for an unmapped area at which to create a mapping on a file
1761 */
1762unsigned long get_unmapped_area(struct file *file, unsigned long addr,
1763 unsigned long len, unsigned long pgoff,
1764 unsigned long flags)
1765{
1766 unsigned long (*get_area)(struct file *, unsigned long, unsigned long,
1767 unsigned long, unsigned long);
1768
1769 get_area = current->mm->get_unmapped_area;
1770 if (file && file->f_op && file->f_op->get_unmapped_area)
1771 get_area = file->f_op->get_unmapped_area;
1772
1773 if (!get_area)
1774 return -ENOSYS;
1775
1776 return get_area(file, addr, len, pgoff, flags);
1777}
1778EXPORT_SYMBOL(get_unmapped_area);
1779
1780/*
1781 * Check that a process has enough memory to allocate a new virtual 1764 * Check that a process has enough memory to allocate a new virtual
1782 * mapping. 0 means there is enough memory for the allocation to 1765 * mapping. 0 means there is enough memory for the allocation to
1783 * succeed and -ENOMEM implies there is not. 1766 * succeed and -ENOMEM implies there is not.
@@ -1916,9 +1899,11 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, in
1916 1899
1917 /* only read or write mappings where it is permitted */ 1900 /* only read or write mappings where it is permitted */
1918 if (write && vma->vm_flags & VM_MAYWRITE) 1901 if (write && vma->vm_flags & VM_MAYWRITE)
1919 len -= copy_to_user((void *) addr, buf, len); 1902 copy_to_user_page(vma, NULL, addr,
1903 (void *) addr, buf, len);
1920 else if (!write && vma->vm_flags & VM_MAYREAD) 1904 else if (!write && vma->vm_flags & VM_MAYREAD)
1921 len -= copy_from_user(buf, (void *) addr, len); 1905 copy_from_user_page(vma, NULL, addr,
1906 buf, (void *) addr, len);
1922 else 1907 else
1923 len = 0; 1908 len = 0;
1924 } else { 1909 } else {
@@ -1929,3 +1914,65 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, in
1929 mmput(mm); 1914 mmput(mm);
1930 return len; 1915 return len;
1931} 1916}
1917
1918/**
1919 * nommu_shrink_inode_mappings - Shrink the shared mappings on an inode
1920 * @inode: The inode to check
1921 * @size: The current filesize of the inode
1922 * @newsize: The proposed filesize of the inode
1923 *
1924 * Check the shared mappings on an inode on behalf of a shrinking truncate to
1925 * make sure that that any outstanding VMAs aren't broken and then shrink the
1926 * vm_regions that extend that beyond so that do_mmap_pgoff() doesn't
1927 * automatically grant mappings that are too large.
1928 */
1929int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
1930 size_t newsize)
1931{
1932 struct vm_area_struct *vma;
1933 struct prio_tree_iter iter;
1934 struct vm_region *region;
1935 pgoff_t low, high;
1936 size_t r_size, r_top;
1937
1938 low = newsize >> PAGE_SHIFT;
1939 high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1940
1941 down_write(&nommu_region_sem);
1942
1943 /* search for VMAs that fall within the dead zone */
1944 vma_prio_tree_foreach(vma, &iter, &inode->i_mapping->i_mmap,
1945 low, high) {
1946 /* found one - only interested if it's shared out of the page
1947 * cache */
1948 if (vma->vm_flags & VM_SHARED) {
1949 up_write(&nommu_region_sem);
1950 return -ETXTBSY; /* not quite true, but near enough */
1951 }
1952 }
1953
1954 /* reduce any regions that overlap the dead zone - if in existence,
1955 * these will be pointed to by VMAs that don't overlap the dead zone
1956 *
1957 * we don't check for any regions that start beyond the EOF as there
1958 * shouldn't be any
1959 */
1960 vma_prio_tree_foreach(vma, &iter, &inode->i_mapping->i_mmap,
1961 0, ULONG_MAX) {
1962 if (!(vma->vm_flags & VM_SHARED))
1963 continue;
1964
1965 region = vma->vm_region;
1966 r_size = region->vm_top - region->vm_start;
1967 r_top = (region->vm_pgoff << PAGE_SHIFT) + r_size;
1968
1969 if (r_top > newsize) {
1970 region->vm_top -= r_top - newsize;
1971 if (region->vm_end > region->vm_top)
1972 region->vm_end = region->vm_top;
1973 }
1974 }
1975
1976 up_write(&nommu_region_sem);
1977 return 0;
1978}