aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2010-02-02 00:38:15 -0500
committerTejun Heo <tj@kernel.org>2010-02-02 00:38:15 -0500
commitab386128f20c44c458a90039ab1bdc265ac474c9 (patch)
tree2ad188744922b1bb951fd10ff50dc04c83acce22 /mm
parentdbfc196a3cc1a2514ad0737a82f764de23bd65e6 (diff)
parentab658321f32770b903a4426e2a6fae0392757755 (diff)
Merge branch 'master' into percpu
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c100
-rw-r--r--mm/hugetlb.c2
-rw-r--r--mm/maccess.c11
-rw-r--r--mm/memcontrol.c11
-rw-r--r--mm/nommu.c119
-rw-r--r--mm/page_alloc.c9
-rw-r--r--mm/percpu.c4
-rw-r--r--mm/truncate.c30
-rw-r--r--mm/util.c2
-rw-r--r--mm/vmalloc.c4
-rw-r--r--mm/vmscan.c3
11 files changed, 192 insertions, 103 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 96ac6b0eb6cb..e3736923220e 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1634,14 +1634,15 @@ EXPORT_SYMBOL(generic_file_readonly_mmap);
1634static struct page *__read_cache_page(struct address_space *mapping, 1634static struct page *__read_cache_page(struct address_space *mapping,
1635 pgoff_t index, 1635 pgoff_t index,
1636 int (*filler)(void *,struct page*), 1636 int (*filler)(void *,struct page*),
1637 void *data) 1637 void *data,
1638 gfp_t gfp)
1638{ 1639{
1639 struct page *page; 1640 struct page *page;
1640 int err; 1641 int err;
1641repeat: 1642repeat:
1642 page = find_get_page(mapping, index); 1643 page = find_get_page(mapping, index);
1643 if (!page) { 1644 if (!page) {
1644 page = page_cache_alloc_cold(mapping); 1645 page = __page_cache_alloc(gfp | __GFP_COLD);
1645 if (!page) 1646 if (!page)
1646 return ERR_PTR(-ENOMEM); 1647 return ERR_PTR(-ENOMEM);
1647 err = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL); 1648 err = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL);
@@ -1661,31 +1662,18 @@ repeat:
1661 return page; 1662 return page;
1662} 1663}
1663 1664
1664/** 1665static struct page *do_read_cache_page(struct address_space *mapping,
1665 * read_cache_page_async - read into page cache, fill it if needed
1666 * @mapping: the page's address_space
1667 * @index: the page index
1668 * @filler: function to perform the read
1669 * @data: destination for read data
1670 *
1671 * Same as read_cache_page, but don't wait for page to become unlocked
1672 * after submitting it to the filler.
1673 *
1674 * Read into the page cache. If a page already exists, and PageUptodate() is
1675 * not set, try to fill the page but don't wait for it to become unlocked.
1676 *
1677 * If the page does not get brought uptodate, return -EIO.
1678 */
1679struct page *read_cache_page_async(struct address_space *mapping,
1680 pgoff_t index, 1666 pgoff_t index,
1681 int (*filler)(void *,struct page*), 1667 int (*filler)(void *,struct page*),
1682 void *data) 1668 void *data,
1669 gfp_t gfp)
1670
1683{ 1671{
1684 struct page *page; 1672 struct page *page;
1685 int err; 1673 int err;
1686 1674
1687retry: 1675retry:
1688 page = __read_cache_page(mapping, index, filler, data); 1676 page = __read_cache_page(mapping, index, filler, data, gfp);
1689 if (IS_ERR(page)) 1677 if (IS_ERR(page))
1690 return page; 1678 return page;
1691 if (PageUptodate(page)) 1679 if (PageUptodate(page))
@@ -1710,8 +1698,67 @@ out:
1710 mark_page_accessed(page); 1698 mark_page_accessed(page);
1711 return page; 1699 return page;
1712} 1700}
1701
1702/**
1703 * read_cache_page_async - read into page cache, fill it if needed
1704 * @mapping: the page's address_space
1705 * @index: the page index
1706 * @filler: function to perform the read
1707 * @data: destination for read data
1708 *
1709 * Same as read_cache_page, but don't wait for page to become unlocked
1710 * after submitting it to the filler.
1711 *
1712 * Read into the page cache. If a page already exists, and PageUptodate() is
1713 * not set, try to fill the page but don't wait for it to become unlocked.
1714 *
1715 * If the page does not get brought uptodate, return -EIO.
1716 */
1717struct page *read_cache_page_async(struct address_space *mapping,
1718 pgoff_t index,
1719 int (*filler)(void *,struct page*),
1720 void *data)
1721{
1722 return do_read_cache_page(mapping, index, filler, data, mapping_gfp_mask(mapping));
1723}
1713EXPORT_SYMBOL(read_cache_page_async); 1724EXPORT_SYMBOL(read_cache_page_async);
1714 1725
1726static struct page *wait_on_page_read(struct page *page)
1727{
1728 if (!IS_ERR(page)) {
1729 wait_on_page_locked(page);
1730 if (!PageUptodate(page)) {
1731 page_cache_release(page);
1732 page = ERR_PTR(-EIO);
1733 }
1734 }
1735 return page;
1736}
1737
1738/**
1739 * read_cache_page_gfp - read into page cache, using specified page allocation flags.
1740 * @mapping: the page's address_space
1741 * @index: the page index
1742 * @gfp: the page allocator flags to use if allocating
1743 *
1744 * This is the same as "read_mapping_page(mapping, index, NULL)", but with
1745 * any new page allocations done using the specified allocation flags. Note
1746 * that the Radix tree operations will still use GFP_KERNEL, so you can't
1747 * expect to do this atomically or anything like that - but you can pass in
1748 * other page requirements.
1749 *
1750 * If the page does not get brought uptodate, return -EIO.
1751 */
1752struct page *read_cache_page_gfp(struct address_space *mapping,
1753 pgoff_t index,
1754 gfp_t gfp)
1755{
1756 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
1757
1758 return wait_on_page_read(do_read_cache_page(mapping, index, filler, NULL, gfp));
1759}
1760EXPORT_SYMBOL(read_cache_page_gfp);
1761
1715/** 1762/**
1716 * read_cache_page - read into page cache, fill it if needed 1763 * read_cache_page - read into page cache, fill it if needed
1717 * @mapping: the page's address_space 1764 * @mapping: the page's address_space
@@ -1729,18 +1776,7 @@ struct page *read_cache_page(struct address_space *mapping,
1729 int (*filler)(void *,struct page*), 1776 int (*filler)(void *,struct page*),
1730 void *data) 1777 void *data)
1731{ 1778{
1732 struct page *page; 1779 return wait_on_page_read(read_cache_page_async(mapping, index, filler, data));
1733
1734 page = read_cache_page_async(mapping, index, filler, data);
1735 if (IS_ERR(page))
1736 goto out;
1737 wait_on_page_locked(page);
1738 if (!PageUptodate(page)) {
1739 page_cache_release(page);
1740 page = ERR_PTR(-EIO);
1741 }
1742 out:
1743 return page;
1744} 1780}
1745EXPORT_SYMBOL(read_cache_page); 1781EXPORT_SYMBOL(read_cache_page);
1746 1782
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 65f38c218207..e91b81b63670 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -402,7 +402,7 @@ static void clear_huge_page(struct page *page,
402{ 402{
403 int i; 403 int i;
404 404
405 if (unlikely(sz > MAX_ORDER_NR_PAGES)) { 405 if (unlikely(sz/PAGE_SIZE > MAX_ORDER_NR_PAGES)) {
406 clear_gigantic_page(page, addr, sz); 406 clear_gigantic_page(page, addr, sz);
407 return; 407 return;
408 } 408 }
diff --git a/mm/maccess.c b/mm/maccess.c
index 9073695ff25f..4e348dbaecd7 100644
--- a/mm/maccess.c
+++ b/mm/maccess.c
@@ -14,7 +14,11 @@
14 * Safely read from address @src to the buffer at @dst. If a kernel fault 14 * Safely read from address @src to the buffer at @dst. If a kernel fault
15 * happens, handle that and return -EFAULT. 15 * happens, handle that and return -EFAULT.
16 */ 16 */
17long probe_kernel_read(void *dst, void *src, size_t size) 17
18long __weak probe_kernel_read(void *dst, void *src, size_t size)
19 __attribute__((alias("__probe_kernel_read")));
20
21long __probe_kernel_read(void *dst, void *src, size_t size)
18{ 22{
19 long ret; 23 long ret;
20 mm_segment_t old_fs = get_fs(); 24 mm_segment_t old_fs = get_fs();
@@ -39,7 +43,10 @@ EXPORT_SYMBOL_GPL(probe_kernel_read);
39 * Safely write to address @dst from the buffer at @src. If a kernel fault 43 * Safely write to address @dst from the buffer at @src. If a kernel fault
40 * happens, handle that and return -EFAULT. 44 * happens, handle that and return -EFAULT.
41 */ 45 */
42long notrace __weak probe_kernel_write(void *dst, void *src, size_t size) 46long __weak probe_kernel_write(void *dst, void *src, size_t size)
47 __attribute__((alias("__probe_kernel_write")));
48
49long __probe_kernel_write(void *dst, void *src, size_t size)
43{ 50{
44 long ret; 51 long ret;
45 mm_segment_t old_fs = get_fs(); 52 mm_segment_t old_fs = get_fs();
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 488b644e0e8e..954032b80bed 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2586,7 +2586,7 @@ static int mem_cgroup_force_empty(struct mem_cgroup *mem, bool free_all)
2586 if (free_all) 2586 if (free_all)
2587 goto try_to_free; 2587 goto try_to_free;
2588move_account: 2588move_account:
2589 while (mem->res.usage > 0) { 2589 do {
2590 ret = -EBUSY; 2590 ret = -EBUSY;
2591 if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children)) 2591 if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children))
2592 goto out; 2592 goto out;
@@ -2614,8 +2614,8 @@ move_account:
2614 if (ret == -ENOMEM) 2614 if (ret == -ENOMEM)
2615 goto try_to_free; 2615 goto try_to_free;
2616 cond_resched(); 2616 cond_resched();
2617 } 2617 /* "ret" should also be checked to ensure all lists are empty. */
2618 ret = 0; 2618 } while (mem->res.usage > 0 || ret);
2619out: 2619out:
2620 css_put(&mem->css); 2620 css_put(&mem->css);
2621 return ret; 2621 return ret;
@@ -2648,10 +2648,7 @@ try_to_free:
2648 } 2648 }
2649 lru_add_drain(); 2649 lru_add_drain();
2650 /* try move_account...there may be some *locked* pages. */ 2650 /* try move_account...there may be some *locked* pages. */
2651 if (mem->res.usage) 2651 goto move_account;
2652 goto move_account;
2653 ret = 0;
2654 goto out;
2655} 2652}
2656 2653
2657int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event) 2654int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
diff --git a/mm/nommu.c b/mm/nommu.c
index 6f9248f89bde..48a2ecfaf059 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -432,6 +432,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
432 /* 432 /*
433 * Ok, looks good - let it rip. 433 * Ok, looks good - let it rip.
434 */ 434 */
435 flush_icache_range(mm->brk, brk);
435 return mm->brk = brk; 436 return mm->brk = brk;
436} 437}
437 438
@@ -551,11 +552,11 @@ static void free_page_series(unsigned long from, unsigned long to)
551static void __put_nommu_region(struct vm_region *region) 552static void __put_nommu_region(struct vm_region *region)
552 __releases(nommu_region_sem) 553 __releases(nommu_region_sem)
553{ 554{
554 kenter("%p{%d}", region, atomic_read(&region->vm_usage)); 555 kenter("%p{%d}", region, region->vm_usage);
555 556
556 BUG_ON(!nommu_region_tree.rb_node); 557 BUG_ON(!nommu_region_tree.rb_node);
557 558
558 if (atomic_dec_and_test(&region->vm_usage)) { 559 if (--region->vm_usage == 0) {
559 if (region->vm_top > region->vm_start) 560 if (region->vm_top > region->vm_start)
560 delete_nommu_region(region); 561 delete_nommu_region(region);
561 up_write(&nommu_region_sem); 562 up_write(&nommu_region_sem);
@@ -1204,7 +1205,7 @@ unsigned long do_mmap_pgoff(struct file *file,
1204 if (!vma) 1205 if (!vma)
1205 goto error_getting_vma; 1206 goto error_getting_vma;
1206 1207
1207 atomic_set(&region->vm_usage, 1); 1208 region->vm_usage = 1;
1208 region->vm_flags = vm_flags; 1209 region->vm_flags = vm_flags;
1209 region->vm_pgoff = pgoff; 1210 region->vm_pgoff = pgoff;
1210 1211
@@ -1271,7 +1272,7 @@ unsigned long do_mmap_pgoff(struct file *file,
1271 } 1272 }
1272 1273
1273 /* we've found a region we can share */ 1274 /* we've found a region we can share */
1274 atomic_inc(&pregion->vm_usage); 1275 pregion->vm_usage++;
1275 vma->vm_region = pregion; 1276 vma->vm_region = pregion;
1276 start = pregion->vm_start; 1277 start = pregion->vm_start;
1277 start += (pgoff - pregion->vm_pgoff) << PAGE_SHIFT; 1278 start += (pgoff - pregion->vm_pgoff) << PAGE_SHIFT;
@@ -1288,7 +1289,7 @@ unsigned long do_mmap_pgoff(struct file *file,
1288 vma->vm_region = NULL; 1289 vma->vm_region = NULL;
1289 vma->vm_start = 0; 1290 vma->vm_start = 0;
1290 vma->vm_end = 0; 1291 vma->vm_end = 0;
1291 atomic_dec(&pregion->vm_usage); 1292 pregion->vm_usage--;
1292 pregion = NULL; 1293 pregion = NULL;
1293 goto error_just_free; 1294 goto error_just_free;
1294 } 1295 }
@@ -1353,10 +1354,14 @@ unsigned long do_mmap_pgoff(struct file *file,
1353share: 1354share:
1354 add_vma_to_mm(current->mm, vma); 1355 add_vma_to_mm(current->mm, vma);
1355 1356
1356 up_write(&nommu_region_sem); 1357 /* we flush the region from the icache only when the first executable
1358 * mapping of it is made */
1359 if (vma->vm_flags & VM_EXEC && !region->vm_icache_flushed) {
1360 flush_icache_range(region->vm_start, region->vm_end);
1361 region->vm_icache_flushed = true;
1362 }
1357 1363
1358 if (prot & PROT_EXEC) 1364 up_write(&nommu_region_sem);
1359 flush_icache_range(result, result + len);
1360 1365
1361 kleave(" = %lx", result); 1366 kleave(" = %lx", result);
1362 return result; 1367 return result;
@@ -1436,10 +1441,9 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
1436 1441
1437 kenter(""); 1442 kenter("");
1438 1443
1439 /* we're only permitted to split anonymous regions that have a single 1444 /* we're only permitted to split anonymous regions (these should have
1440 * owner */ 1445 * only a single usage on the region) */
1441 if (vma->vm_file || 1446 if (vma->vm_file)
1442 atomic_read(&vma->vm_region->vm_usage) != 1)
1443 return -ENOMEM; 1447 return -ENOMEM;
1444 1448
1445 if (mm->map_count >= sysctl_max_map_count) 1449 if (mm->map_count >= sysctl_max_map_count)
@@ -1513,7 +1517,7 @@ static int shrink_vma(struct mm_struct *mm,
1513 1517
1514 /* cut the backing region down to size */ 1518 /* cut the backing region down to size */
1515 region = vma->vm_region; 1519 region = vma->vm_region;
1516 BUG_ON(atomic_read(&region->vm_usage) != 1); 1520 BUG_ON(region->vm_usage != 1);
1517 1521
1518 down_write(&nommu_region_sem); 1522 down_write(&nommu_region_sem);
1519 delete_nommu_region(region); 1523 delete_nommu_region(region);
@@ -1757,27 +1761,6 @@ void unmap_mapping_range(struct address_space *mapping,
1757EXPORT_SYMBOL(unmap_mapping_range); 1761EXPORT_SYMBOL(unmap_mapping_range);
1758 1762
1759/* 1763/*
1760 * ask for an unmapped area at which to create a mapping on a file
1761 */
1762unsigned long get_unmapped_area(struct file *file, unsigned long addr,
1763 unsigned long len, unsigned long pgoff,
1764 unsigned long flags)
1765{
1766 unsigned long (*get_area)(struct file *, unsigned long, unsigned long,
1767 unsigned long, unsigned long);
1768
1769 get_area = current->mm->get_unmapped_area;
1770 if (file && file->f_op && file->f_op->get_unmapped_area)
1771 get_area = file->f_op->get_unmapped_area;
1772
1773 if (!get_area)
1774 return -ENOSYS;
1775
1776 return get_area(file, addr, len, pgoff, flags);
1777}
1778EXPORT_SYMBOL(get_unmapped_area);
1779
1780/*
1781 * Check that a process has enough memory to allocate a new virtual 1764 * Check that a process has enough memory to allocate a new virtual
1782 * mapping. 0 means there is enough memory for the allocation to 1765 * mapping. 0 means there is enough memory for the allocation to
1783 * succeed and -ENOMEM implies there is not. 1766 * succeed and -ENOMEM implies there is not.
@@ -1916,9 +1899,11 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, in
1916 1899
1917 /* only read or write mappings where it is permitted */ 1900 /* only read or write mappings where it is permitted */
1918 if (write && vma->vm_flags & VM_MAYWRITE) 1901 if (write && vma->vm_flags & VM_MAYWRITE)
1919 len -= copy_to_user((void *) addr, buf, len); 1902 copy_to_user_page(vma, NULL, addr,
1903 (void *) addr, buf, len);
1920 else if (!write && vma->vm_flags & VM_MAYREAD) 1904 else if (!write && vma->vm_flags & VM_MAYREAD)
1921 len -= copy_from_user(buf, (void *) addr, len); 1905 copy_from_user_page(vma, NULL, addr,
1906 buf, (void *) addr, len);
1922 else 1907 else
1923 len = 0; 1908 len = 0;
1924 } else { 1909 } else {
@@ -1929,3 +1914,65 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, in
1929 mmput(mm); 1914 mmput(mm);
1930 return len; 1915 return len;
1931} 1916}
1917
1918/**
1919 * nommu_shrink_inode_mappings - Shrink the shared mappings on an inode
1920 * @inode: The inode to check
1921 * @size: The current filesize of the inode
1922 * @newsize: The proposed filesize of the inode
1923 *
1924 * Check the shared mappings on an inode on behalf of a shrinking truncate to
1925 * make sure that that any outstanding VMAs aren't broken and then shrink the
1926 * vm_regions that extend that beyond so that do_mmap_pgoff() doesn't
1927 * automatically grant mappings that are too large.
1928 */
1929int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
1930 size_t newsize)
1931{
1932 struct vm_area_struct *vma;
1933 struct prio_tree_iter iter;
1934 struct vm_region *region;
1935 pgoff_t low, high;
1936 size_t r_size, r_top;
1937
1938 low = newsize >> PAGE_SHIFT;
1939 high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1940
1941 down_write(&nommu_region_sem);
1942
1943 /* search for VMAs that fall within the dead zone */
1944 vma_prio_tree_foreach(vma, &iter, &inode->i_mapping->i_mmap,
1945 low, high) {
1946 /* found one - only interested if it's shared out of the page
1947 * cache */
1948 if (vma->vm_flags & VM_SHARED) {
1949 up_write(&nommu_region_sem);
1950 return -ETXTBSY; /* not quite true, but near enough */
1951 }
1952 }
1953
1954 /* reduce any regions that overlap the dead zone - if in existence,
1955 * these will be pointed to by VMAs that don't overlap the dead zone
1956 *
1957 * we don't check for any regions that start beyond the EOF as there
1958 * shouldn't be any
1959 */
1960 vma_prio_tree_foreach(vma, &iter, &inode->i_mapping->i_mmap,
1961 0, ULONG_MAX) {
1962 if (!(vma->vm_flags & VM_SHARED))
1963 continue;
1964
1965 region = vma->vm_region;
1966 r_size = region->vm_top - region->vm_start;
1967 r_top = (region->vm_pgoff << PAGE_SHIFT) + r_size;
1968
1969 if (r_top > newsize) {
1970 region->vm_top -= r_top - newsize;
1971 if (region->vm_end > region->vm_top)
1972 region->vm_end = region->vm_top;
1973 }
1974 }
1975
1976 up_write(&nommu_region_sem);
1977 return 0;
1978}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 6849e870de54..9a7aaae07ab4 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -556,8 +556,9 @@ static void free_pcppages_bulk(struct zone *zone, int count,
556 page = list_entry(list->prev, struct page, lru); 556 page = list_entry(list->prev, struct page, lru);
557 /* must delete as __free_one_page list manipulates */ 557 /* must delete as __free_one_page list manipulates */
558 list_del(&page->lru); 558 list_del(&page->lru);
559 __free_one_page(page, zone, 0, migratetype); 559 /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
560 trace_mm_page_pcpu_drain(page, 0, migratetype); 560 __free_one_page(page, zone, 0, page_private(page));
561 trace_mm_page_pcpu_drain(page, 0, page_private(page));
561 } while (--count && --batch_free && !list_empty(list)); 562 } while (--count && --batch_free && !list_empty(list));
562 } 563 }
563 spin_unlock(&zone->lock); 564 spin_unlock(&zone->lock);
@@ -1219,10 +1220,10 @@ again:
1219 } 1220 }
1220 spin_lock_irqsave(&zone->lock, flags); 1221 spin_lock_irqsave(&zone->lock, flags);
1221 page = __rmqueue(zone, order, migratetype); 1222 page = __rmqueue(zone, order, migratetype);
1222 __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order));
1223 spin_unlock(&zone->lock); 1223 spin_unlock(&zone->lock);
1224 if (!page) 1224 if (!page)
1225 goto failed; 1225 goto failed;
1226 __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order));
1226 } 1227 }
1227 1228
1228 __count_zone_vm_events(PGALLOC, zone, 1 << order); 1229 __count_zone_vm_events(PGALLOC, zone, 1 << order);
@@ -3937,7 +3938,7 @@ void __init add_active_range(unsigned int nid, unsigned long start_pfn,
3937 } 3938 }
3938 3939
3939 /* Merge backward if suitable */ 3940 /* Merge backward if suitable */
3940 if (start_pfn < early_node_map[i].end_pfn && 3941 if (start_pfn < early_node_map[i].start_pfn &&
3941 end_pfn >= early_node_map[i].start_pfn) { 3942 end_pfn >= early_node_map[i].start_pfn) {
3942 early_node_map[i].start_pfn = start_pfn; 3943 early_node_map[i].start_pfn = start_pfn;
3943 return; 3944 return;
diff --git a/mm/percpu.c b/mm/percpu.c
index 626e43c99498..b336638d20e7 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1269,7 +1269,7 @@ static void pcpu_reclaim(struct work_struct *work)
1269 */ 1269 */
1270void free_percpu(void *ptr) 1270void free_percpu(void *ptr)
1271{ 1271{
1272 void *addr = __pcpu_ptr_to_addr(ptr); 1272 void *addr;
1273 struct pcpu_chunk *chunk; 1273 struct pcpu_chunk *chunk;
1274 unsigned long flags; 1274 unsigned long flags;
1275 int off; 1275 int off;
@@ -1277,6 +1277,8 @@ void free_percpu(void *ptr)
1277 if (!ptr) 1277 if (!ptr)
1278 return; 1278 return;
1279 1279
1280 addr = __pcpu_ptr_to_addr(ptr);
1281
1280 spin_lock_irqsave(&pcpu_lock, flags); 1282 spin_lock_irqsave(&pcpu_lock, flags);
1281 1283
1282 chunk = pcpu_chunk_addr_search(addr); 1284 chunk = pcpu_chunk_addr_search(addr);
diff --git a/mm/truncate.c b/mm/truncate.c
index 342deee22684..e87e37244829 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -522,22 +522,20 @@ EXPORT_SYMBOL_GPL(invalidate_inode_pages2);
522 */ 522 */
523void truncate_pagecache(struct inode *inode, loff_t old, loff_t new) 523void truncate_pagecache(struct inode *inode, loff_t old, loff_t new)
524{ 524{
525 if (new < old) { 525 struct address_space *mapping = inode->i_mapping;
526 struct address_space *mapping = inode->i_mapping; 526
527 527 /*
528 /* 528 * unmap_mapping_range is called twice, first simply for
529 * unmap_mapping_range is called twice, first simply for 529 * efficiency so that truncate_inode_pages does fewer
530 * efficiency so that truncate_inode_pages does fewer 530 * single-page unmaps. However after this first call, and
531 * single-page unmaps. However after this first call, and 531 * before truncate_inode_pages finishes, it is possible for
532 * before truncate_inode_pages finishes, it is possible for 532 * private pages to be COWed, which remain after
533 * private pages to be COWed, which remain after 533 * truncate_inode_pages finishes, hence the second
534 * truncate_inode_pages finishes, hence the second 534 * unmap_mapping_range call must be made for correctness.
535 * unmap_mapping_range call must be made for correctness. 535 */
536 */ 536 unmap_mapping_range(mapping, new + PAGE_SIZE - 1, 0, 1);
537 unmap_mapping_range(mapping, new + PAGE_SIZE - 1, 0, 1); 537 truncate_inode_pages(mapping, new);
538 truncate_inode_pages(mapping, new); 538 unmap_mapping_range(mapping, new + PAGE_SIZE - 1, 0, 1);
539 unmap_mapping_range(mapping, new + PAGE_SIZE - 1, 0, 1);
540 }
541} 539}
542EXPORT_SYMBOL(truncate_pagecache); 540EXPORT_SYMBOL(truncate_pagecache);
543 541
diff --git a/mm/util.c b/mm/util.c
index 7c35ad95f927..834db7be240f 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -220,7 +220,7 @@ char *strndup_user(const char __user *s, long n)
220} 220}
221EXPORT_SYMBOL(strndup_user); 221EXPORT_SYMBOL(strndup_user);
222 222
223#ifndef HAVE_ARCH_PICK_MMAP_LAYOUT 223#if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
224void arch_pick_mmap_layout(struct mm_struct *mm) 224void arch_pick_mmap_layout(struct mm_struct *mm)
225{ 225{
226 mm->mmap_base = TASK_UNMAPPED_BASE; 226 mm->mmap_base = TASK_UNMAPPED_BASE;
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 37e69295f250..d55d905463eb 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -555,10 +555,8 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
555 } 555 }
556 rcu_read_unlock(); 556 rcu_read_unlock();
557 557
558 if (nr) { 558 if (nr)
559 BUG_ON(nr > atomic_read(&vmap_lazy_nr));
560 atomic_sub(nr, &vmap_lazy_nr); 559 atomic_sub(nr, &vmap_lazy_nr);
561 }
562 560
563 if (nr || force_flush) 561 if (nr || force_flush)
564 flush_tlb_kernel_range(*start, *end); 562 flush_tlb_kernel_range(*start, *end);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 885207a6b6b7..c26986c85ce0 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1922,6 +1922,9 @@ static int sleeping_prematurely(pg_data_t *pgdat, int order, long remaining)
1922 if (!populated_zone(zone)) 1922 if (!populated_zone(zone))
1923 continue; 1923 continue;
1924 1924
1925 if (zone_is_all_unreclaimable(zone))
1926 continue;
1927
1925 if (!zone_watermark_ok(zone, order, high_wmark_pages(zone), 1928 if (!zone_watermark_ok(zone, order, high_wmark_pages(zone),
1926 0, 0)) 1929 0, 0))
1927 return 1; 1930 return 1;