diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
commit | c71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch) | |
tree | ecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /mm/page-writeback.c | |
parent | ea53c912f8a86a8567697115b6a0d8152beee5c8 (diff) | |
parent | 6a00f206debf8a5c8899055726ad127dbeeed098 (diff) |
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts:
litmus/sched_cedf.c
Diffstat (limited to 'mm/page-writeback.c')
-rw-r--r-- | mm/page-writeback.c | 67 |
1 files changed, 47 insertions, 20 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index e3bccac1f025..31f698862420 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
@@ -404,25 +404,22 @@ unsigned long determine_dirtyable_memory(void) | |||
404 | * - vm.dirty_background_ratio or vm.dirty_background_bytes | 404 | * - vm.dirty_background_ratio or vm.dirty_background_bytes |
405 | * - vm.dirty_ratio or vm.dirty_bytes | 405 | * - vm.dirty_ratio or vm.dirty_bytes |
406 | * The dirty limits will be lifted by 1/4 for PF_LESS_THROTTLE (ie. nfsd) and | 406 | * The dirty limits will be lifted by 1/4 for PF_LESS_THROTTLE (ie. nfsd) and |
407 | * runtime tasks. | 407 | * real-time tasks. |
408 | */ | 408 | */ |
409 | void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty) | 409 | void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty) |
410 | { | 410 | { |
411 | unsigned long background; | 411 | unsigned long background; |
412 | unsigned long dirty; | 412 | unsigned long dirty; |
413 | unsigned long available_memory = determine_dirtyable_memory(); | 413 | unsigned long uninitialized_var(available_memory); |
414 | struct task_struct *tsk; | 414 | struct task_struct *tsk; |
415 | 415 | ||
416 | if (!vm_dirty_bytes || !dirty_background_bytes) | ||
417 | available_memory = determine_dirtyable_memory(); | ||
418 | |||
416 | if (vm_dirty_bytes) | 419 | if (vm_dirty_bytes) |
417 | dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE); | 420 | dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE); |
418 | else { | 421 | else |
419 | int dirty_ratio; | 422 | dirty = (vm_dirty_ratio * available_memory) / 100; |
420 | |||
421 | dirty_ratio = vm_dirty_ratio; | ||
422 | if (dirty_ratio < 5) | ||
423 | dirty_ratio = 5; | ||
424 | dirty = (dirty_ratio * available_memory) / 100; | ||
425 | } | ||
426 | 423 | ||
427 | if (dirty_background_bytes) | 424 | if (dirty_background_bytes) |
428 | background = DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE); | 425 | background = DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE); |
@@ -510,7 +507,7 @@ static void balance_dirty_pages(struct address_space *mapping, | |||
510 | * catch-up. This avoids (excessively) small writeouts | 507 | * catch-up. This avoids (excessively) small writeouts |
511 | * when the bdi limits are ramping up. | 508 | * when the bdi limits are ramping up. |
512 | */ | 509 | */ |
513 | if (nr_reclaimable + nr_writeback < | 510 | if (nr_reclaimable + nr_writeback <= |
514 | (background_thresh + dirty_thresh) / 2) | 511 | (background_thresh + dirty_thresh) / 2) |
515 | break; | 512 | break; |
516 | 513 | ||
@@ -542,8 +539,8 @@ static void balance_dirty_pages(struct address_space *mapping, | |||
542 | * the last resort safeguard. | 539 | * the last resort safeguard. |
543 | */ | 540 | */ |
544 | dirty_exceeded = | 541 | dirty_exceeded = |
545 | (bdi_nr_reclaimable + bdi_nr_writeback >= bdi_thresh) | 542 | (bdi_nr_reclaimable + bdi_nr_writeback > bdi_thresh) |
546 | || (nr_reclaimable + nr_writeback >= dirty_thresh); | 543 | || (nr_reclaimable + nr_writeback > dirty_thresh); |
547 | 544 | ||
548 | if (!dirty_exceeded) | 545 | if (!dirty_exceeded) |
549 | break; | 546 | break; |
@@ -569,7 +566,7 @@ static void balance_dirty_pages(struct address_space *mapping, | |||
569 | break; /* We've done our duty */ | 566 | break; /* We've done our duty */ |
570 | } | 567 | } |
571 | trace_wbc_balance_dirty_wait(&wbc, bdi); | 568 | trace_wbc_balance_dirty_wait(&wbc, bdi); |
572 | __set_current_state(TASK_INTERRUPTIBLE); | 569 | __set_current_state(TASK_UNINTERRUPTIBLE); |
573 | io_schedule_timeout(pause); | 570 | io_schedule_timeout(pause); |
574 | 571 | ||
575 | /* | 572 | /* |
@@ -930,7 +927,7 @@ retry: | |||
930 | break; | 927 | break; |
931 | } | 928 | } |
932 | 929 | ||
933 | done_index = page->index + 1; | 930 | done_index = page->index; |
934 | 931 | ||
935 | lock_page(page); | 932 | lock_page(page); |
936 | 933 | ||
@@ -980,6 +977,7 @@ continue_unlock: | |||
980 | * not be suitable for data integrity | 977 | * not be suitable for data integrity |
981 | * writeout). | 978 | * writeout). |
982 | */ | 979 | */ |
980 | done_index = page->index + 1; | ||
983 | done = 1; | 981 | done = 1; |
984 | break; | 982 | break; |
985 | } | 983 | } |
@@ -1042,11 +1040,17 @@ static int __writepage(struct page *page, struct writeback_control *wbc, | |||
1042 | int generic_writepages(struct address_space *mapping, | 1040 | int generic_writepages(struct address_space *mapping, |
1043 | struct writeback_control *wbc) | 1041 | struct writeback_control *wbc) |
1044 | { | 1042 | { |
1043 | struct blk_plug plug; | ||
1044 | int ret; | ||
1045 | |||
1045 | /* deal with chardevs and other special file */ | 1046 | /* deal with chardevs and other special file */ |
1046 | if (!mapping->a_ops->writepage) | 1047 | if (!mapping->a_ops->writepage) |
1047 | return 0; | 1048 | return 0; |
1048 | 1049 | ||
1049 | return write_cache_pages(mapping, wbc, __writepage, mapping); | 1050 | blk_start_plug(&plug); |
1051 | ret = write_cache_pages(mapping, wbc, __writepage, mapping); | ||
1052 | blk_finish_plug(&plug); | ||
1053 | return ret; | ||
1050 | } | 1054 | } |
1051 | 1055 | ||
1052 | EXPORT_SYMBOL(generic_writepages); | 1056 | EXPORT_SYMBOL(generic_writepages); |
@@ -1109,7 +1113,7 @@ EXPORT_SYMBOL(write_one_page); | |||
1109 | int __set_page_dirty_no_writeback(struct page *page) | 1113 | int __set_page_dirty_no_writeback(struct page *page) |
1110 | { | 1114 | { |
1111 | if (!PageDirty(page)) | 1115 | if (!PageDirty(page)) |
1112 | SetPageDirty(page); | 1116 | return !TestSetPageDirty(page); |
1113 | return 0; | 1117 | return 0; |
1114 | } | 1118 | } |
1115 | 1119 | ||
@@ -1121,6 +1125,7 @@ void account_page_dirtied(struct page *page, struct address_space *mapping) | |||
1121 | { | 1125 | { |
1122 | if (mapping_cap_account_dirty(mapping)) { | 1126 | if (mapping_cap_account_dirty(mapping)) { |
1123 | __inc_zone_page_state(page, NR_FILE_DIRTY); | 1127 | __inc_zone_page_state(page, NR_FILE_DIRTY); |
1128 | __inc_zone_page_state(page, NR_DIRTIED); | ||
1124 | __inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE); | 1129 | __inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE); |
1125 | task_dirty_inc(current); | 1130 | task_dirty_inc(current); |
1126 | task_io_account_write(PAGE_CACHE_SIZE); | 1131 | task_io_account_write(PAGE_CACHE_SIZE); |
@@ -1129,6 +1134,18 @@ void account_page_dirtied(struct page *page, struct address_space *mapping) | |||
1129 | EXPORT_SYMBOL(account_page_dirtied); | 1134 | EXPORT_SYMBOL(account_page_dirtied); |
1130 | 1135 | ||
1131 | /* | 1136 | /* |
1137 | * Helper function for set_page_writeback family. | ||
1138 | * NOTE: Unlike account_page_dirtied this does not rely on being atomic | ||
1139 | * wrt interrupts. | ||
1140 | */ | ||
1141 | void account_page_writeback(struct page *page) | ||
1142 | { | ||
1143 | inc_zone_page_state(page, NR_WRITEBACK); | ||
1144 | inc_zone_page_state(page, NR_WRITTEN); | ||
1145 | } | ||
1146 | EXPORT_SYMBOL(account_page_writeback); | ||
1147 | |||
1148 | /* | ||
1132 | * For address_spaces which do not use buffers. Just tag the page as dirty in | 1149 | * For address_spaces which do not use buffers. Just tag the page as dirty in |
1133 | * its radix tree. | 1150 | * its radix tree. |
1134 | * | 1151 | * |
@@ -1201,6 +1218,17 @@ int set_page_dirty(struct page *page) | |||
1201 | 1218 | ||
1202 | if (likely(mapping)) { | 1219 | if (likely(mapping)) { |
1203 | int (*spd)(struct page *) = mapping->a_ops->set_page_dirty; | 1220 | int (*spd)(struct page *) = mapping->a_ops->set_page_dirty; |
1221 | /* | ||
1222 | * readahead/lru_deactivate_page could remain | ||
1223 | * PG_readahead/PG_reclaim due to race with end_page_writeback | ||
1224 | * About readahead, if the page is written, the flags would be | ||
1225 | * reset. So no problem. | ||
1226 | * About lru_deactivate_page, if the page is redirty, the flag | ||
1227 | * will be reset. So no problem. but if the page is used by readahead | ||
1228 | * it will confuse readahead and make it restart the size rampup | ||
1229 | * process. But it's a trivial problem. | ||
1230 | */ | ||
1231 | ClearPageReclaim(page); | ||
1204 | #ifdef CONFIG_BLOCK | 1232 | #ifdef CONFIG_BLOCK |
1205 | if (!spd) | 1233 | if (!spd) |
1206 | spd = __set_page_dirty_buffers; | 1234 | spd = __set_page_dirty_buffers; |
@@ -1229,7 +1257,7 @@ int set_page_dirty_lock(struct page *page) | |||
1229 | { | 1257 | { |
1230 | int ret; | 1258 | int ret; |
1231 | 1259 | ||
1232 | lock_page_nosync(page); | 1260 | lock_page(page); |
1233 | ret = set_page_dirty(page); | 1261 | ret = set_page_dirty(page); |
1234 | unlock_page(page); | 1262 | unlock_page(page); |
1235 | return ret; | 1263 | return ret; |
@@ -1256,7 +1284,6 @@ int clear_page_dirty_for_io(struct page *page) | |||
1256 | 1284 | ||
1257 | BUG_ON(!PageLocked(page)); | 1285 | BUG_ON(!PageLocked(page)); |
1258 | 1286 | ||
1259 | ClearPageReclaim(page); | ||
1260 | if (mapping && mapping_cap_account_dirty(mapping)) { | 1287 | if (mapping && mapping_cap_account_dirty(mapping)) { |
1261 | /* | 1288 | /* |
1262 | * Yes, Virginia, this is indeed insane. | 1289 | * Yes, Virginia, this is indeed insane. |
@@ -1366,7 +1393,7 @@ int test_set_page_writeback(struct page *page) | |||
1366 | ret = TestSetPageWriteback(page); | 1393 | ret = TestSetPageWriteback(page); |
1367 | } | 1394 | } |
1368 | if (!ret) | 1395 | if (!ret) |
1369 | inc_zone_page_state(page, NR_WRITEBACK); | 1396 | account_page_writeback(page); |
1370 | return ret; | 1397 | return ret; |
1371 | 1398 | ||
1372 | } | 1399 | } |