diff options
Diffstat (limited to 'mm/page-writeback.c')
| -rw-r--r-- | mm/page-writeback.c | 35 |
1 files changed, 20 insertions, 15 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index b493db7841dc..74dc57c74349 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
| @@ -209,7 +209,7 @@ int dirty_bytes_handler(struct ctl_table *table, int write, | |||
| 209 | struct file *filp, void __user *buffer, size_t *lenp, | 209 | struct file *filp, void __user *buffer, size_t *lenp, |
| 210 | loff_t *ppos) | 210 | loff_t *ppos) |
| 211 | { | 211 | { |
| 212 | int old_bytes = vm_dirty_bytes; | 212 | unsigned long old_bytes = vm_dirty_bytes; |
| 213 | int ret; | 213 | int ret; |
| 214 | 214 | ||
| 215 | ret = proc_doulongvec_minmax(table, write, filp, buffer, lenp, ppos); | 215 | ret = proc_doulongvec_minmax(table, write, filp, buffer, lenp, ppos); |
| @@ -240,7 +240,7 @@ void bdi_writeout_inc(struct backing_dev_info *bdi) | |||
| 240 | } | 240 | } |
| 241 | EXPORT_SYMBOL_GPL(bdi_writeout_inc); | 241 | EXPORT_SYMBOL_GPL(bdi_writeout_inc); |
| 242 | 242 | ||
| 243 | static inline void task_dirty_inc(struct task_struct *tsk) | 243 | void task_dirty_inc(struct task_struct *tsk) |
| 244 | { | 244 | { |
| 245 | prop_inc_single(&vm_dirties, &tsk->dirties); | 245 | prop_inc_single(&vm_dirties, &tsk->dirties); |
| 246 | } | 246 | } |
| @@ -1051,13 +1051,25 @@ continue_unlock: | |||
| 1051 | } | 1051 | } |
| 1052 | } | 1052 | } |
| 1053 | 1053 | ||
| 1054 | if (wbc->sync_mode == WB_SYNC_NONE) { | 1054 | if (nr_to_write > 0) { |
| 1055 | wbc->nr_to_write--; | 1055 | nr_to_write--; |
| 1056 | if (wbc->nr_to_write <= 0) { | 1056 | if (nr_to_write == 0 && |
| 1057 | wbc->sync_mode == WB_SYNC_NONE) { | ||
| 1058 | /* | ||
| 1059 | * We stop writing back only if we are | ||
| 1060 | * not doing integrity sync. In case of | ||
| 1061 | * integrity sync we have to keep going | ||
| 1062 | * because someone may be concurrently | ||
| 1063 | * dirtying pages, and we might have | ||
| 1064 | * synced a lot of newly appeared dirty | ||
| 1065 | * pages, but have not synced all of the | ||
| 1066 | * old dirty pages. | ||
| 1067 | */ | ||
| 1057 | done = 1; | 1068 | done = 1; |
| 1058 | break; | 1069 | break; |
| 1059 | } | 1070 | } |
| 1060 | } | 1071 | } |
| 1072 | |||
| 1061 | if (wbc->nonblocking && bdi_write_congested(bdi)) { | 1073 | if (wbc->nonblocking && bdi_write_congested(bdi)) { |
| 1062 | wbc->encountered_congestion = 1; | 1074 | wbc->encountered_congestion = 1; |
| 1063 | done = 1; | 1075 | done = 1; |
| @@ -1067,7 +1079,7 @@ continue_unlock: | |||
| 1067 | pagevec_release(&pvec); | 1079 | pagevec_release(&pvec); |
| 1068 | cond_resched(); | 1080 | cond_resched(); |
| 1069 | } | 1081 | } |
| 1070 | if (!cycled) { | 1082 | if (!cycled && !done) { |
| 1071 | /* | 1083 | /* |
| 1072 | * range_cyclic: | 1084 | * range_cyclic: |
| 1073 | * We hit the last page and there is more work to be done: wrap | 1085 | * We hit the last page and there is more work to be done: wrap |
| @@ -1218,6 +1230,7 @@ int __set_page_dirty_nobuffers(struct page *page) | |||
| 1218 | __inc_zone_page_state(page, NR_FILE_DIRTY); | 1230 | __inc_zone_page_state(page, NR_FILE_DIRTY); |
| 1219 | __inc_bdi_stat(mapping->backing_dev_info, | 1231 | __inc_bdi_stat(mapping->backing_dev_info, |
| 1220 | BDI_RECLAIMABLE); | 1232 | BDI_RECLAIMABLE); |
| 1233 | task_dirty_inc(current); | ||
| 1221 | task_io_account_write(PAGE_CACHE_SIZE); | 1234 | task_io_account_write(PAGE_CACHE_SIZE); |
| 1222 | } | 1235 | } |
| 1223 | radix_tree_tag_set(&mapping->page_tree, | 1236 | radix_tree_tag_set(&mapping->page_tree, |
| @@ -1250,7 +1263,7 @@ EXPORT_SYMBOL(redirty_page_for_writepage); | |||
| 1250 | * If the mapping doesn't provide a set_page_dirty a_op, then | 1263 | * If the mapping doesn't provide a set_page_dirty a_op, then |
| 1251 | * just fall through and assume that it wants buffer_heads. | 1264 | * just fall through and assume that it wants buffer_heads. |
| 1252 | */ | 1265 | */ |
| 1253 | static int __set_page_dirty(struct page *page) | 1266 | int set_page_dirty(struct page *page) |
| 1254 | { | 1267 | { |
| 1255 | struct address_space *mapping = page_mapping(page); | 1268 | struct address_space *mapping = page_mapping(page); |
| 1256 | 1269 | ||
| @@ -1268,14 +1281,6 @@ static int __set_page_dirty(struct page *page) | |||
| 1268 | } | 1281 | } |
| 1269 | return 0; | 1282 | return 0; |
| 1270 | } | 1283 | } |
| 1271 | |||
| 1272 | int set_page_dirty(struct page *page) | ||
| 1273 | { | ||
| 1274 | int ret = __set_page_dirty(page); | ||
| 1275 | if (ret) | ||
| 1276 | task_dirty_inc(current); | ||
| 1277 | return ret; | ||
| 1278 | } | ||
| 1279 | EXPORT_SYMBOL(set_page_dirty); | 1284 | EXPORT_SYMBOL(set_page_dirty); |
| 1280 | 1285 | ||
| 1281 | /* | 1286 | /* |
