aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page-writeback.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page-writeback.c')
-rw-r--r--mm/page-writeback.c25
1 files changed, 21 insertions, 4 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 2cb01f6ec5d0..31f698862420 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -927,7 +927,7 @@ retry:
927 break; 927 break;
928 } 928 }
929 929
930 done_index = page->index + 1; 930 done_index = page->index;
931 931
932 lock_page(page); 932 lock_page(page);
933 933
@@ -977,6 +977,7 @@ continue_unlock:
977 * not be suitable for data integrity 977 * not be suitable for data integrity
978 * writeout). 978 * writeout).
979 */ 979 */
980 done_index = page->index + 1;
980 done = 1; 981 done = 1;
981 break; 982 break;
982 } 983 }
@@ -1039,11 +1040,17 @@ static int __writepage(struct page *page, struct writeback_control *wbc,
1039int generic_writepages(struct address_space *mapping, 1040int generic_writepages(struct address_space *mapping,
1040 struct writeback_control *wbc) 1041 struct writeback_control *wbc)
1041{ 1042{
1043 struct blk_plug plug;
1044 int ret;
1045
1042 /* deal with chardevs and other special file */ 1046 /* deal with chardevs and other special file */
1043 if (!mapping->a_ops->writepage) 1047 if (!mapping->a_ops->writepage)
1044 return 0; 1048 return 0;
1045 1049
1046 return write_cache_pages(mapping, wbc, __writepage, mapping); 1050 blk_start_plug(&plug);
1051 ret = write_cache_pages(mapping, wbc, __writepage, mapping);
1052 blk_finish_plug(&plug);
1053 return ret;
1047} 1054}
1048 1055
1049EXPORT_SYMBOL(generic_writepages); 1056EXPORT_SYMBOL(generic_writepages);
@@ -1211,6 +1218,17 @@ int set_page_dirty(struct page *page)
1211 1218
1212 if (likely(mapping)) { 1219 if (likely(mapping)) {
1213 int (*spd)(struct page *) = mapping->a_ops->set_page_dirty; 1220 int (*spd)(struct page *) = mapping->a_ops->set_page_dirty;
1221 /*
1222 * readahead/lru_deactivate_page could remain
1223 * PG_readahead/PG_reclaim due to race with end_page_writeback
1224 * About readahead, if the page is written, the flags would be
1225 * reset. So no problem.
1226 * About lru_deactivate_page, if the page is redirty, the flag
1227 * will be reset. So no problem. but if the page is used by readahead
1228 * it will confuse readahead and make it restart the size rampup
1229 * process. But it's a trivial problem.
1230 */
1231 ClearPageReclaim(page);
1214#ifdef CONFIG_BLOCK 1232#ifdef CONFIG_BLOCK
1215 if (!spd) 1233 if (!spd)
1216 spd = __set_page_dirty_buffers; 1234 spd = __set_page_dirty_buffers;
@@ -1239,7 +1257,7 @@ int set_page_dirty_lock(struct page *page)
1239{ 1257{
1240 int ret; 1258 int ret;
1241 1259
1242 lock_page_nosync(page); 1260 lock_page(page);
1243 ret = set_page_dirty(page); 1261 ret = set_page_dirty(page);
1244 unlock_page(page); 1262 unlock_page(page);
1245 return ret; 1263 return ret;
@@ -1266,7 +1284,6 @@ int clear_page_dirty_for_io(struct page *page)
1266 1284
1267 BUG_ON(!PageLocked(page)); 1285 BUG_ON(!PageLocked(page));
1268 1286
1269 ClearPageReclaim(page);
1270 if (mapping && mapping_cap_account_dirty(mapping)) { 1287 if (mapping && mapping_cap_account_dirty(mapping)) {
1271 /* 1288 /*
1272 * Yes, Virginia, this is indeed insane. 1289 * Yes, Virginia, this is indeed insane.