aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2010-01-18 06:47:37 -0500
committerPaul Mundt <lethal@linux-sh.org>2010-01-18 06:47:37 -0500
commit0c54de146ef4303ed3c5879b043894c8db637507 (patch)
treea4ff5bf27ffd2c4b71271b42014a0040490c7271 /mm
parent8faba6121566248330e738d25a2c43d7500fb9f0 (diff)
parent7dc9c484a71525794ca05cf7a47f283f1b54cd12 (diff)
Merge branch 'sh/stable-updates'
Diffstat (limited to 'mm')
-rw-r--r--mm/memcontrol.c11
-rw-r--r--mm/nommu.c102
-rw-r--r--mm/page_alloc.c4
-rw-r--r--mm/truncate.c30
-rw-r--r--mm/util.c2
-rw-r--r--mm/vmscan.c3
6 files changed, 95 insertions, 57 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 488b644e0e8e..954032b80bed 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2586,7 +2586,7 @@ static int mem_cgroup_force_empty(struct mem_cgroup *mem, bool free_all)
2586 if (free_all) 2586 if (free_all)
2587 goto try_to_free; 2587 goto try_to_free;
2588move_account: 2588move_account:
2589 while (mem->res.usage > 0) { 2589 do {
2590 ret = -EBUSY; 2590 ret = -EBUSY;
2591 if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children)) 2591 if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children))
2592 goto out; 2592 goto out;
@@ -2614,8 +2614,8 @@ move_account:
2614 if (ret == -ENOMEM) 2614 if (ret == -ENOMEM)
2615 goto try_to_free; 2615 goto try_to_free;
2616 cond_resched(); 2616 cond_resched();
2617 } 2617 /* "ret" should also be checked to ensure all lists are empty. */
2618 ret = 0; 2618 } while (mem->res.usage > 0 || ret);
2619out: 2619out:
2620 css_put(&mem->css); 2620 css_put(&mem->css);
2621 return ret; 2621 return ret;
@@ -2648,10 +2648,7 @@ try_to_free:
2648 } 2648 }
2649 lru_add_drain(); 2649 lru_add_drain();
2650 /* try move_account...there may be some *locked* pages. */ 2650 /* try move_account...there may be some *locked* pages. */
2651 if (mem->res.usage) 2651 goto move_account;
2652 goto move_account;
2653 ret = 0;
2654 goto out;
2655} 2652}
2656 2653
2657int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event) 2654int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
diff --git a/mm/nommu.c b/mm/nommu.c
index 17773862619b..48a2ecfaf059 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -552,11 +552,11 @@ static void free_page_series(unsigned long from, unsigned long to)
552static void __put_nommu_region(struct vm_region *region) 552static void __put_nommu_region(struct vm_region *region)
553 __releases(nommu_region_sem) 553 __releases(nommu_region_sem)
554{ 554{
555 kenter("%p{%d}", region, atomic_read(&region->vm_usage)); 555 kenter("%p{%d}", region, region->vm_usage);
556 556
557 BUG_ON(!nommu_region_tree.rb_node); 557 BUG_ON(!nommu_region_tree.rb_node);
558 558
559 if (atomic_dec_and_test(&region->vm_usage)) { 559 if (--region->vm_usage == 0) {
560 if (region->vm_top > region->vm_start) 560 if (region->vm_top > region->vm_start)
561 delete_nommu_region(region); 561 delete_nommu_region(region);
562 up_write(&nommu_region_sem); 562 up_write(&nommu_region_sem);
@@ -1205,7 +1205,7 @@ unsigned long do_mmap_pgoff(struct file *file,
1205 if (!vma) 1205 if (!vma)
1206 goto error_getting_vma; 1206 goto error_getting_vma;
1207 1207
1208 atomic_set(&region->vm_usage, 1); 1208 region->vm_usage = 1;
1209 region->vm_flags = vm_flags; 1209 region->vm_flags = vm_flags;
1210 region->vm_pgoff = pgoff; 1210 region->vm_pgoff = pgoff;
1211 1211
@@ -1272,7 +1272,7 @@ unsigned long do_mmap_pgoff(struct file *file,
1272 } 1272 }
1273 1273
1274 /* we've found a region we can share */ 1274 /* we've found a region we can share */
1275 atomic_inc(&pregion->vm_usage); 1275 pregion->vm_usage++;
1276 vma->vm_region = pregion; 1276 vma->vm_region = pregion;
1277 start = pregion->vm_start; 1277 start = pregion->vm_start;
1278 start += (pgoff - pregion->vm_pgoff) << PAGE_SHIFT; 1278 start += (pgoff - pregion->vm_pgoff) << PAGE_SHIFT;
@@ -1289,7 +1289,7 @@ unsigned long do_mmap_pgoff(struct file *file,
1289 vma->vm_region = NULL; 1289 vma->vm_region = NULL;
1290 vma->vm_start = 0; 1290 vma->vm_start = 0;
1291 vma->vm_end = 0; 1291 vma->vm_end = 0;
1292 atomic_dec(&pregion->vm_usage); 1292 pregion->vm_usage--;
1293 pregion = NULL; 1293 pregion = NULL;
1294 goto error_just_free; 1294 goto error_just_free;
1295 } 1295 }
@@ -1441,10 +1441,9 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
1441 1441
1442 kenter(""); 1442 kenter("");
1443 1443
1444 /* we're only permitted to split anonymous regions that have a single 1444 /* we're only permitted to split anonymous regions (these should have
1445 * owner */ 1445 * only a single usage on the region) */
1446 if (vma->vm_file || 1446 if (vma->vm_file)
1447 atomic_read(&vma->vm_region->vm_usage) != 1)
1448 return -ENOMEM; 1447 return -ENOMEM;
1449 1448
1450 if (mm->map_count >= sysctl_max_map_count) 1449 if (mm->map_count >= sysctl_max_map_count)
@@ -1518,7 +1517,7 @@ static int shrink_vma(struct mm_struct *mm,
1518 1517
1519 /* cut the backing region down to size */ 1518 /* cut the backing region down to size */
1520 region = vma->vm_region; 1519 region = vma->vm_region;
1521 BUG_ON(atomic_read(&region->vm_usage) != 1); 1520 BUG_ON(region->vm_usage != 1);
1522 1521
1523 down_write(&nommu_region_sem); 1522 down_write(&nommu_region_sem);
1524 delete_nommu_region(region); 1523 delete_nommu_region(region);
@@ -1762,27 +1761,6 @@ void unmap_mapping_range(struct address_space *mapping,
1762EXPORT_SYMBOL(unmap_mapping_range); 1761EXPORT_SYMBOL(unmap_mapping_range);
1763 1762
1764/* 1763/*
1765 * ask for an unmapped area at which to create a mapping on a file
1766 */
1767unsigned long get_unmapped_area(struct file *file, unsigned long addr,
1768 unsigned long len, unsigned long pgoff,
1769 unsigned long flags)
1770{
1771 unsigned long (*get_area)(struct file *, unsigned long, unsigned long,
1772 unsigned long, unsigned long);
1773
1774 get_area = current->mm->get_unmapped_area;
1775 if (file && file->f_op && file->f_op->get_unmapped_area)
1776 get_area = file->f_op->get_unmapped_area;
1777
1778 if (!get_area)
1779 return -ENOSYS;
1780
1781 return get_area(file, addr, len, pgoff, flags);
1782}
1783EXPORT_SYMBOL(get_unmapped_area);
1784
1785/*
1786 * Check that a process has enough memory to allocate a new virtual 1764 * Check that a process has enough memory to allocate a new virtual
1787 * mapping. 0 means there is enough memory for the allocation to 1765 * mapping. 0 means there is enough memory for the allocation to
1788 * succeed and -ENOMEM implies there is not. 1766 * succeed and -ENOMEM implies there is not.
@@ -1936,3 +1914,65 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, in
1936 mmput(mm); 1914 mmput(mm);
1937 return len; 1915 return len;
1938} 1916}
1917
1918/**
1919 * nommu_shrink_inode_mappings - Shrink the shared mappings on an inode
1920 * @inode: The inode to check
1921 * @size: The current filesize of the inode
1922 * @newsize: The proposed filesize of the inode
1923 *
1924 * Check the shared mappings on an inode on behalf of a shrinking truncate to
1925 * make sure that that any outstanding VMAs aren't broken and then shrink the
1926 * vm_regions that extend that beyond so that do_mmap_pgoff() doesn't
1927 * automatically grant mappings that are too large.
1928 */
1929int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
1930 size_t newsize)
1931{
1932 struct vm_area_struct *vma;
1933 struct prio_tree_iter iter;
1934 struct vm_region *region;
1935 pgoff_t low, high;
1936 size_t r_size, r_top;
1937
1938 low = newsize >> PAGE_SHIFT;
1939 high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1940
1941 down_write(&nommu_region_sem);
1942
1943 /* search for VMAs that fall within the dead zone */
1944 vma_prio_tree_foreach(vma, &iter, &inode->i_mapping->i_mmap,
1945 low, high) {
1946 /* found one - only interested if it's shared out of the page
1947 * cache */
1948 if (vma->vm_flags & VM_SHARED) {
1949 up_write(&nommu_region_sem);
1950 return -ETXTBSY; /* not quite true, but near enough */
1951 }
1952 }
1953
1954 /* reduce any regions that overlap the dead zone - if in existence,
1955 * these will be pointed to by VMAs that don't overlap the dead zone
1956 *
1957 * we don't check for any regions that start beyond the EOF as there
1958 * shouldn't be any
1959 */
1960 vma_prio_tree_foreach(vma, &iter, &inode->i_mapping->i_mmap,
1961 0, ULONG_MAX) {
1962 if (!(vma->vm_flags & VM_SHARED))
1963 continue;
1964
1965 region = vma->vm_region;
1966 r_size = region->vm_top - region->vm_start;
1967 r_top = (region->vm_pgoff << PAGE_SHIFT) + r_size;
1968
1969 if (r_top > newsize) {
1970 region->vm_top -= r_top - newsize;
1971 if (region->vm_end > region->vm_top)
1972 region->vm_end = region->vm_top;
1973 }
1974 }
1975
1976 up_write(&nommu_region_sem);
1977 return 0;
1978}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 4e9f5cc5fb59..d2a8889b4c58 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1222,10 +1222,10 @@ again:
1222 } 1222 }
1223 spin_lock_irqsave(&zone->lock, flags); 1223 spin_lock_irqsave(&zone->lock, flags);
1224 page = __rmqueue(zone, order, migratetype); 1224 page = __rmqueue(zone, order, migratetype);
1225 __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order));
1226 spin_unlock(&zone->lock); 1225 spin_unlock(&zone->lock);
1227 if (!page) 1226 if (!page)
1228 goto failed; 1227 goto failed;
1228 __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order));
1229 } 1229 }
1230 1230
1231 __count_zone_vm_events(PGALLOC, zone, 1 << order); 1231 __count_zone_vm_events(PGALLOC, zone, 1 << order);
@@ -3998,7 +3998,7 @@ void __init add_active_range(unsigned int nid, unsigned long start_pfn,
3998 } 3998 }
3999 3999
4000 /* Merge backward if suitable */ 4000 /* Merge backward if suitable */
4001 if (start_pfn < early_node_map[i].end_pfn && 4001 if (start_pfn < early_node_map[i].start_pfn &&
4002 end_pfn >= early_node_map[i].start_pfn) { 4002 end_pfn >= early_node_map[i].start_pfn) {
4003 early_node_map[i].start_pfn = start_pfn; 4003 early_node_map[i].start_pfn = start_pfn;
4004 return; 4004 return;
diff --git a/mm/truncate.c b/mm/truncate.c
index 342deee22684..e87e37244829 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -522,22 +522,20 @@ EXPORT_SYMBOL_GPL(invalidate_inode_pages2);
522 */ 522 */
523void truncate_pagecache(struct inode *inode, loff_t old, loff_t new) 523void truncate_pagecache(struct inode *inode, loff_t old, loff_t new)
524{ 524{
525 if (new < old) { 525 struct address_space *mapping = inode->i_mapping;
526 struct address_space *mapping = inode->i_mapping; 526
527 527 /*
528 /* 528 * unmap_mapping_range is called twice, first simply for
529 * unmap_mapping_range is called twice, first simply for 529 * efficiency so that truncate_inode_pages does fewer
530 * efficiency so that truncate_inode_pages does fewer 530 * single-page unmaps. However after this first call, and
531 * single-page unmaps. However after this first call, and 531 * before truncate_inode_pages finishes, it is possible for
532 * before truncate_inode_pages finishes, it is possible for 532 * private pages to be COWed, which remain after
533 * private pages to be COWed, which remain after 533 * truncate_inode_pages finishes, hence the second
534 * truncate_inode_pages finishes, hence the second 534 * unmap_mapping_range call must be made for correctness.
535 * unmap_mapping_range call must be made for correctness. 535 */
536 */ 536 unmap_mapping_range(mapping, new + PAGE_SIZE - 1, 0, 1);
537 unmap_mapping_range(mapping, new + PAGE_SIZE - 1, 0, 1); 537 truncate_inode_pages(mapping, new);
538 truncate_inode_pages(mapping, new); 538 unmap_mapping_range(mapping, new + PAGE_SIZE - 1, 0, 1);
539 unmap_mapping_range(mapping, new + PAGE_SIZE - 1, 0, 1);
540 }
541} 539}
542EXPORT_SYMBOL(truncate_pagecache); 540EXPORT_SYMBOL(truncate_pagecache);
543 541
diff --git a/mm/util.c b/mm/util.c
index 7c35ad95f927..834db7be240f 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -220,7 +220,7 @@ char *strndup_user(const char __user *s, long n)
220} 220}
221EXPORT_SYMBOL(strndup_user); 221EXPORT_SYMBOL(strndup_user);
222 222
223#ifndef HAVE_ARCH_PICK_MMAP_LAYOUT 223#if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
224void arch_pick_mmap_layout(struct mm_struct *mm) 224void arch_pick_mmap_layout(struct mm_struct *mm)
225{ 225{
226 mm->mmap_base = TASK_UNMAPPED_BASE; 226 mm->mmap_base = TASK_UNMAPPED_BASE;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 885207a6b6b7..c26986c85ce0 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1922,6 +1922,9 @@ static int sleeping_prematurely(pg_data_t *pgdat, int order, long remaining)
1922 if (!populated_zone(zone)) 1922 if (!populated_zone(zone))
1923 continue; 1923 continue;
1924 1924
1925 if (zone_is_all_unreclaimable(zone))
1926 continue;
1927
1925 if (!zone_watermark_ok(zone, order, high_wmark_pages(zone), 1928 if (!zone_watermark_ok(zone, order, high_wmark_pages(zone),
1926 0, 0)) 1929 0, 0))
1927 return 1; 1930 return 1;