aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/page-writeback.c34
-rw-r--r--mm/shmem.c5
-rw-r--r--mm/vmscan.c29
3 files changed, 41 insertions, 27 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index b289310e2c89..bbd396ac9546 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -597,7 +597,7 @@ static void balance_dirty_pages(struct address_space *mapping,
597 (!laptop_mode && ((global_page_state(NR_FILE_DIRTY) 597 (!laptop_mode && ((global_page_state(NR_FILE_DIRTY)
598 + global_page_state(NR_UNSTABLE_NFS)) 598 + global_page_state(NR_UNSTABLE_NFS))
599 > background_thresh))) 599 > background_thresh)))
600 bdi_start_writeback(bdi, NULL, 0, 0); 600 bdi_start_writeback(bdi, NULL, 0);
601} 601}
602 602
603void set_page_dirty_balance(struct page *page, int page_mkwrite) 603void set_page_dirty_balance(struct page *page, int page_mkwrite)
@@ -707,7 +707,7 @@ void laptop_mode_timer_fn(unsigned long data)
707 */ 707 */
708 708
709 if (bdi_has_dirty_io(&q->backing_dev_info)) 709 if (bdi_has_dirty_io(&q->backing_dev_info))
710 bdi_start_writeback(&q->backing_dev_info, NULL, nr_pages, 0); 710 bdi_start_writeback(&q->backing_dev_info, NULL, nr_pages);
711} 711}
712 712
713/* 713/*
@@ -835,7 +835,6 @@ int write_cache_pages(struct address_space *mapping,
835 pgoff_t done_index; 835 pgoff_t done_index;
836 int cycled; 836 int cycled;
837 int range_whole = 0; 837 int range_whole = 0;
838 long nr_to_write = wbc->nr_to_write;
839 838
840 pagevec_init(&pvec, 0); 839 pagevec_init(&pvec, 0);
841 if (wbc->range_cyclic) { 840 if (wbc->range_cyclic) {
@@ -852,7 +851,22 @@ int write_cache_pages(struct address_space *mapping,
852 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 851 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
853 range_whole = 1; 852 range_whole = 1;
854 cycled = 1; /* ignore range_cyclic tests */ 853 cycled = 1; /* ignore range_cyclic tests */
854
855 /*
856 * If this is a data integrity sync, cap the writeback to the
857 * current end of file. Any extension to the file that occurs
858 * after this is a new write and we don't need to write those
859 * pages out to fulfil our data integrity requirements. If we
860 * try to write them out, we can get stuck in this scan until
861 * the concurrent writer stops adding dirty pages and extending
862 * EOF.
863 */
864 if (wbc->sync_mode == WB_SYNC_ALL &&
865 wbc->range_end == LLONG_MAX) {
866 end = i_size_read(mapping->host) >> PAGE_CACHE_SHIFT;
867 }
855 } 868 }
869
856retry: 870retry:
857 done_index = index; 871 done_index = index;
858 while (!done && (index <= end)) { 872 while (!done && (index <= end)) {
@@ -935,11 +949,10 @@ continue_unlock:
935 done = 1; 949 done = 1;
936 break; 950 break;
937 } 951 }
938 } 952 }
939 953
940 if (nr_to_write > 0) { 954 if (wbc->nr_to_write > 0) {
941 nr_to_write--; 955 if (--wbc->nr_to_write == 0 &&
942 if (nr_to_write == 0 &&
943 wbc->sync_mode == WB_SYNC_NONE) { 956 wbc->sync_mode == WB_SYNC_NONE) {
944 /* 957 /*
945 * We stop writing back only if we are 958 * We stop writing back only if we are
@@ -970,11 +983,8 @@ continue_unlock:
970 end = writeback_index - 1; 983 end = writeback_index - 1;
971 goto retry; 984 goto retry;
972 } 985 }
973 if (!wbc->no_nrwrite_index_update) { 986 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
974 if (wbc->range_cyclic || (range_whole && nr_to_write > 0)) 987 mapping->writeback_index = done_index;
975 mapping->writeback_index = done_index;
976 wbc->nr_to_write = nr_to_write;
977 }
978 988
979 return ret; 989 return ret;
980} 990}
diff --git a/mm/shmem.c b/mm/shmem.c
index 7e5030ae18ff..f65f84062db5 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -764,10 +764,11 @@ done2:
764static int shmem_notify_change(struct dentry *dentry, struct iattr *attr) 764static int shmem_notify_change(struct dentry *dentry, struct iattr *attr)
765{ 765{
766 struct inode *inode = dentry->d_inode; 766 struct inode *inode = dentry->d_inode;
767 loff_t newsize = attr->ia_size;
767 int error; 768 int error;
768 769
769 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { 770 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)
770 loff_t newsize = attr->ia_size; 771 && newsize != inode->i_size) {
771 struct page *page = NULL; 772 struct page *page = NULL;
772 773
773 if (newsize < inode->i_size) { 774 if (newsize < inode->i_size) {
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 915dceb487c1..9c7e57cc63a3 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1724,13 +1724,13 @@ static void shrink_zone(int priority, struct zone *zone,
1724 * If a zone is deemed to be full of pinned pages then just give it a light 1724 * If a zone is deemed to be full of pinned pages then just give it a light
1725 * scan then give up on it. 1725 * scan then give up on it.
1726 */ 1726 */
1727static int shrink_zones(int priority, struct zonelist *zonelist, 1727static bool shrink_zones(int priority, struct zonelist *zonelist,
1728 struct scan_control *sc) 1728 struct scan_control *sc)
1729{ 1729{
1730 enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask); 1730 enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask);
1731 struct zoneref *z; 1731 struct zoneref *z;
1732 struct zone *zone; 1732 struct zone *zone;
1733 int progress = 0; 1733 bool all_unreclaimable = true;
1734 1734
1735 for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx, 1735 for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx,
1736 sc->nodemask) { 1736 sc->nodemask) {
@@ -1757,9 +1757,9 @@ static int shrink_zones(int priority, struct zonelist *zonelist,
1757 } 1757 }
1758 1758
1759 shrink_zone(priority, zone, sc); 1759 shrink_zone(priority, zone, sc);
1760 progress = 1; 1760 all_unreclaimable = false;
1761 } 1761 }
1762 return progress; 1762 return all_unreclaimable;
1763} 1763}
1764 1764
1765/* 1765/*
@@ -1782,7 +1782,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
1782 struct scan_control *sc) 1782 struct scan_control *sc)
1783{ 1783{
1784 int priority; 1784 int priority;
1785 unsigned long ret = 0; 1785 bool all_unreclaimable;
1786 unsigned long total_scanned = 0; 1786 unsigned long total_scanned = 0;
1787 struct reclaim_state *reclaim_state = current->reclaim_state; 1787 struct reclaim_state *reclaim_state = current->reclaim_state;
1788 unsigned long lru_pages = 0; 1788 unsigned long lru_pages = 0;
@@ -1813,7 +1813,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
1813 sc->nr_scanned = 0; 1813 sc->nr_scanned = 0;
1814 if (!priority) 1814 if (!priority)
1815 disable_swap_token(); 1815 disable_swap_token();
1816 ret = shrink_zones(priority, zonelist, sc); 1816 all_unreclaimable = shrink_zones(priority, zonelist, sc);
1817 /* 1817 /*
1818 * Don't shrink slabs when reclaiming memory from 1818 * Don't shrink slabs when reclaiming memory from
1819 * over limit cgroups 1819 * over limit cgroups
@@ -1826,10 +1826,8 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
1826 } 1826 }
1827 } 1827 }
1828 total_scanned += sc->nr_scanned; 1828 total_scanned += sc->nr_scanned;
1829 if (sc->nr_reclaimed >= sc->nr_to_reclaim) { 1829 if (sc->nr_reclaimed >= sc->nr_to_reclaim)
1830 ret = sc->nr_reclaimed;
1831 goto out; 1830 goto out;
1832 }
1833 1831
1834 /* 1832 /*
1835 * Try to write back as many pages as we just scanned. This 1833 * Try to write back as many pages as we just scanned. This
@@ -1849,9 +1847,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
1849 priority < DEF_PRIORITY - 2) 1847 priority < DEF_PRIORITY - 2)
1850 congestion_wait(BLK_RW_ASYNC, HZ/10); 1848 congestion_wait(BLK_RW_ASYNC, HZ/10);
1851 } 1849 }
1852 /* top priority shrink_zones still had more to do? don't OOM, then */ 1850
1853 if (ret && scanning_global_lru(sc))
1854 ret = sc->nr_reclaimed;
1855out: 1851out:
1856 /* 1852 /*
1857 * Now that we've scanned all the zones at this priority level, note 1853 * Now that we've scanned all the zones at this priority level, note
@@ -1877,7 +1873,14 @@ out:
1877 delayacct_freepages_end(); 1873 delayacct_freepages_end();
1878 put_mems_allowed(); 1874 put_mems_allowed();
1879 1875
1880 return ret; 1876 if (sc->nr_reclaimed)
1877 return sc->nr_reclaimed;
1878
1879 /* top priority shrink_zones still had more to do? don't OOM, then */
1880 if (scanning_global_lru(sc) && !all_unreclaimable)
1881 return 1;
1882
1883 return 0;
1881} 1884}
1882 1885
1883unsigned long try_to_free_pages(struct zonelist *zonelist, int order, 1886unsigned long try_to_free_pages(struct zonelist *zonelist, int order,