diff options
Diffstat (limited to 'mm/page-writeback.c')
| -rw-r--r-- | mm/page-writeback.c | 64 |
1 files changed, 36 insertions, 28 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 945559fb63d2..893d7677579e 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
| @@ -75,12 +75,12 @@ int vm_dirty_ratio = 40; | |||
| 75 | * The interval between `kupdate'-style writebacks, in centiseconds | 75 | * The interval between `kupdate'-style writebacks, in centiseconds |
| 76 | * (hundredths of a second) | 76 | * (hundredths of a second) |
| 77 | */ | 77 | */ |
| 78 | int dirty_writeback_centisecs = 5 * 100; | 78 | int dirty_writeback_interval = 5 * HZ; |
| 79 | 79 | ||
| 80 | /* | 80 | /* |
| 81 | * The longest number of centiseconds for which data is allowed to remain dirty | 81 | * The longest number of centiseconds for which data is allowed to remain dirty |
| 82 | */ | 82 | */ |
| 83 | int dirty_expire_centisecs = 30 * 100; | 83 | int dirty_expire_interval = 30 * HZ; |
| 84 | 84 | ||
| 85 | /* | 85 | /* |
| 86 | * Flag that makes the machine dump writes/reads and block dirtyings. | 86 | * Flag that makes the machine dump writes/reads and block dirtyings. |
| @@ -88,7 +88,8 @@ int dirty_expire_centisecs = 30 * 100; | |||
| 88 | int block_dump; | 88 | int block_dump; |
| 89 | 89 | ||
| 90 | /* | 90 | /* |
| 91 | * Flag that puts the machine in "laptop mode". | 91 | * Flag that puts the machine in "laptop mode". Doubles as a timeout in jiffies: |
| 92 | * a full sync is triggered after this time elapses without any disk activity. | ||
| 92 | */ | 93 | */ |
| 93 | int laptop_mode; | 94 | int laptop_mode; |
| 94 | 95 | ||
| @@ -255,8 +256,9 @@ static void balance_dirty_pages(struct address_space *mapping) | |||
| 255 | } | 256 | } |
| 256 | 257 | ||
| 257 | /** | 258 | /** |
| 258 | * balance_dirty_pages_ratelimited - balance dirty memory state | 259 | * balance_dirty_pages_ratelimited_nr - balance dirty memory state |
| 259 | * @mapping: address_space which was dirtied | 260 | * @mapping: address_space which was dirtied |
| 261 | * @nr_pages: number of pages which the caller has just dirtied | ||
| 260 | * | 262 | * |
| 261 | * Processes which are dirtying memory should call in here once for each page | 263 | * Processes which are dirtying memory should call in here once for each page |
| 262 | * which was newly dirtied. The function will periodically check the system's | 264 | * which was newly dirtied. The function will periodically check the system's |
| @@ -267,10 +269,12 @@ static void balance_dirty_pages(struct address_space *mapping) | |||
| 267 | * limit we decrease the ratelimiting by a lot, to prevent individual processes | 269 | * limit we decrease the ratelimiting by a lot, to prevent individual processes |
| 268 | * from overshooting the limit by (ratelimit_pages) each. | 270 | * from overshooting the limit by (ratelimit_pages) each. |
| 269 | */ | 271 | */ |
| 270 | void balance_dirty_pages_ratelimited(struct address_space *mapping) | 272 | void balance_dirty_pages_ratelimited_nr(struct address_space *mapping, |
| 273 | unsigned long nr_pages_dirtied) | ||
| 271 | { | 274 | { |
| 272 | static DEFINE_PER_CPU(int, ratelimits) = 0; | 275 | static DEFINE_PER_CPU(unsigned long, ratelimits) = 0; |
| 273 | long ratelimit; | 276 | unsigned long ratelimit; |
| 277 | unsigned long *p; | ||
| 274 | 278 | ||
| 275 | ratelimit = ratelimit_pages; | 279 | ratelimit = ratelimit_pages; |
| 276 | if (dirty_exceeded) | 280 | if (dirty_exceeded) |
| @@ -280,15 +284,18 @@ void balance_dirty_pages_ratelimited(struct address_space *mapping) | |||
| 280 | * Check the rate limiting. Also, we do not want to throttle real-time | 284 | * Check the rate limiting. Also, we do not want to throttle real-time |
| 281 | * tasks in balance_dirty_pages(). Period. | 285 | * tasks in balance_dirty_pages(). Period. |
| 282 | */ | 286 | */ |
| 283 | if (get_cpu_var(ratelimits)++ >= ratelimit) { | 287 | preempt_disable(); |
| 284 | __get_cpu_var(ratelimits) = 0; | 288 | p = &__get_cpu_var(ratelimits); |
| 285 | put_cpu_var(ratelimits); | 289 | *p += nr_pages_dirtied; |
| 290 | if (unlikely(*p >= ratelimit)) { | ||
| 291 | *p = 0; | ||
| 292 | preempt_enable(); | ||
| 286 | balance_dirty_pages(mapping); | 293 | balance_dirty_pages(mapping); |
| 287 | return; | 294 | return; |
| 288 | } | 295 | } |
| 289 | put_cpu_var(ratelimits); | 296 | preempt_enable(); |
| 290 | } | 297 | } |
| 291 | EXPORT_SYMBOL(balance_dirty_pages_ratelimited); | 298 | EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr); |
| 292 | 299 | ||
| 293 | void throttle_vm_writeout(void) | 300 | void throttle_vm_writeout(void) |
| 294 | { | 301 | { |
| @@ -380,8 +387,8 @@ static DEFINE_TIMER(laptop_mode_wb_timer, laptop_timer_fn, 0, 0); | |||
| 380 | * just walks the superblock inode list, writing back any inodes which are | 387 | * just walks the superblock inode list, writing back any inodes which are |
| 381 | * older than a specific point in time. | 388 | * older than a specific point in time. |
| 382 | * | 389 | * |
| 383 | * Try to run once per dirty_writeback_centisecs. But if a writeback event | 390 | * Try to run once per dirty_writeback_interval. But if a writeback event |
| 384 | * takes longer than a dirty_writeback_centisecs interval, then leave a | 391 | * takes longer than a dirty_writeback_interval interval, then leave a |
| 385 | * one-second gap. | 392 | * one-second gap. |
| 386 | * | 393 | * |
| 387 | * older_than_this takes precedence over nr_to_write. So we'll only write back | 394 | * older_than_this takes precedence over nr_to_write. So we'll only write back |
| @@ -406,9 +413,9 @@ static void wb_kupdate(unsigned long arg) | |||
| 406 | sync_supers(); | 413 | sync_supers(); |
| 407 | 414 | ||
| 408 | get_writeback_state(&wbs); | 415 | get_writeback_state(&wbs); |
| 409 | oldest_jif = jiffies - (dirty_expire_centisecs * HZ) / 100; | 416 | oldest_jif = jiffies - dirty_expire_interval; |
| 410 | start_jif = jiffies; | 417 | start_jif = jiffies; |
| 411 | next_jif = start_jif + (dirty_writeback_centisecs * HZ) / 100; | 418 | next_jif = start_jif + dirty_writeback_interval; |
| 412 | nr_to_write = wbs.nr_dirty + wbs.nr_unstable + | 419 | nr_to_write = wbs.nr_dirty + wbs.nr_unstable + |
| 413 | (inodes_stat.nr_inodes - inodes_stat.nr_unused); | 420 | (inodes_stat.nr_inodes - inodes_stat.nr_unused); |
| 414 | while (nr_to_write > 0) { | 421 | while (nr_to_write > 0) { |
| @@ -425,7 +432,7 @@ static void wb_kupdate(unsigned long arg) | |||
| 425 | } | 432 | } |
| 426 | if (time_before(next_jif, jiffies + HZ)) | 433 | if (time_before(next_jif, jiffies + HZ)) |
| 427 | next_jif = jiffies + HZ; | 434 | next_jif = jiffies + HZ; |
| 428 | if (dirty_writeback_centisecs) | 435 | if (dirty_writeback_interval) |
| 429 | mod_timer(&wb_timer, next_jif); | 436 | mod_timer(&wb_timer, next_jif); |
| 430 | } | 437 | } |
| 431 | 438 | ||
| @@ -435,11 +442,11 @@ static void wb_kupdate(unsigned long arg) | |||
| 435 | int dirty_writeback_centisecs_handler(ctl_table *table, int write, | 442 | int dirty_writeback_centisecs_handler(ctl_table *table, int write, |
| 436 | struct file *file, void __user *buffer, size_t *length, loff_t *ppos) | 443 | struct file *file, void __user *buffer, size_t *length, loff_t *ppos) |
| 437 | { | 444 | { |
| 438 | proc_dointvec(table, write, file, buffer, length, ppos); | 445 | proc_dointvec_userhz_jiffies(table, write, file, buffer, length, ppos); |
| 439 | if (dirty_writeback_centisecs) { | 446 | if (dirty_writeback_interval) { |
| 440 | mod_timer(&wb_timer, | 447 | mod_timer(&wb_timer, |
| 441 | jiffies + (dirty_writeback_centisecs * HZ) / 100); | 448 | jiffies + dirty_writeback_interval); |
| 442 | } else { | 449 | } else { |
| 443 | del_timer(&wb_timer); | 450 | del_timer(&wb_timer); |
| 444 | } | 451 | } |
| 445 | return 0; | 452 | return 0; |
| @@ -468,7 +475,7 @@ static void laptop_timer_fn(unsigned long unused) | |||
| 468 | */ | 475 | */ |
| 469 | void laptop_io_completion(void) | 476 | void laptop_io_completion(void) |
| 470 | { | 477 | { |
| 471 | mod_timer(&laptop_mode_wb_timer, jiffies + laptop_mode * HZ); | 478 | mod_timer(&laptop_mode_wb_timer, jiffies + laptop_mode); |
| 472 | } | 479 | } |
| 473 | 480 | ||
| 474 | /* | 481 | /* |
| @@ -544,7 +551,7 @@ void __init page_writeback_init(void) | |||
| 544 | if (vm_dirty_ratio <= 0) | 551 | if (vm_dirty_ratio <= 0) |
| 545 | vm_dirty_ratio = 1; | 552 | vm_dirty_ratio = 1; |
| 546 | } | 553 | } |
| 547 | mod_timer(&wb_timer, jiffies + (dirty_writeback_centisecs * HZ) / 100); | 554 | mod_timer(&wb_timer, jiffies + dirty_writeback_interval); |
| 548 | set_ratelimit(); | 555 | set_ratelimit(); |
| 549 | register_cpu_notifier(&ratelimit_nb); | 556 | register_cpu_notifier(&ratelimit_nb); |
| 550 | } | 557 | } |
| @@ -621,8 +628,6 @@ EXPORT_SYMBOL(write_one_page); | |||
| 621 | */ | 628 | */ |
| 622 | int __set_page_dirty_nobuffers(struct page *page) | 629 | int __set_page_dirty_nobuffers(struct page *page) |
| 623 | { | 630 | { |
| 624 | int ret = 0; | ||
| 625 | |||
| 626 | if (!TestSetPageDirty(page)) { | 631 | if (!TestSetPageDirty(page)) { |
| 627 | struct address_space *mapping = page_mapping(page); | 632 | struct address_space *mapping = page_mapping(page); |
| 628 | struct address_space *mapping2; | 633 | struct address_space *mapping2; |
| @@ -644,8 +649,9 @@ int __set_page_dirty_nobuffers(struct page *page) | |||
| 644 | I_DIRTY_PAGES); | 649 | I_DIRTY_PAGES); |
| 645 | } | 650 | } |
| 646 | } | 651 | } |
| 652 | return 1; | ||
| 647 | } | 653 | } |
| 648 | return ret; | 654 | return 0; |
| 649 | } | 655 | } |
| 650 | EXPORT_SYMBOL(__set_page_dirty_nobuffers); | 656 | EXPORT_SYMBOL(__set_page_dirty_nobuffers); |
| 651 | 657 | ||
| @@ -675,8 +681,10 @@ int fastcall set_page_dirty(struct page *page) | |||
| 675 | return (*spd)(page); | 681 | return (*spd)(page); |
| 676 | return __set_page_dirty_buffers(page); | 682 | return __set_page_dirty_buffers(page); |
| 677 | } | 683 | } |
| 678 | if (!PageDirty(page)) | 684 | if (!PageDirty(page)) { |
| 679 | SetPageDirty(page); | 685 | if (!TestSetPageDirty(page)) |
| 686 | return 1; | ||
| 687 | } | ||
| 680 | return 0; | 688 | return 0; |
| 681 | } | 689 | } |
| 682 | EXPORT_SYMBOL(set_page_dirty); | 690 | EXPORT_SYMBOL(set_page_dirty); |
