aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page-writeback.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page-writeback.c')
-rw-r--r--mm/page-writeback.c244
1 files changed, 65 insertions, 179 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 81627ebcd313..a3b14090b1fb 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -36,15 +36,6 @@
36#include <linux/pagevec.h> 36#include <linux/pagevec.h>
37 37
38/* 38/*
39 * The maximum number of pages to writeout in a single bdflush/kupdate
40 * operation. We do this so we don't hold I_SYNC against an inode for
41 * enormous amounts of time, which would block a userspace task which has
42 * been forced to throttle against that inode. Also, the code reevaluates
43 * the dirty each time it has written this many pages.
44 */
45#define MAX_WRITEBACK_PAGES 1024
46
47/*
48 * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited 39 * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited
49 * will look to see if it needs to force writeback or throttling. 40 * will look to see if it needs to force writeback or throttling.
50 */ 41 */
@@ -53,18 +44,21 @@ static long ratelimit_pages = 32;
53/* 44/*
54 * When balance_dirty_pages decides that the caller needs to perform some 45 * When balance_dirty_pages decides that the caller needs to perform some
55 * non-background writeback, this is how many pages it will attempt to write. 46 * non-background writeback, this is how many pages it will attempt to write.
56 * It should be somewhat larger than RATELIMIT_PAGES to ensure that reasonably 47 * It should be somewhat larger than dirtied pages to ensure that reasonably
57 * large amounts of I/O are submitted. 48 * large amounts of I/O are submitted.
58 */ 49 */
59static inline long sync_writeback_pages(void) 50static inline long sync_writeback_pages(unsigned long dirtied)
60{ 51{
61 return ratelimit_pages + ratelimit_pages / 2; 52 if (dirtied < ratelimit_pages)
53 dirtied = ratelimit_pages;
54
55 return dirtied + dirtied / 2;
62} 56}
63 57
64/* The following parameters are exported via /proc/sys/vm */ 58/* The following parameters are exported via /proc/sys/vm */
65 59
66/* 60/*
67 * Start background writeback (via pdflush) at this percentage 61 * Start background writeback (via writeback threads) at this percentage
68 */ 62 */
69int dirty_background_ratio = 10; 63int dirty_background_ratio = 10;
70 64
@@ -117,8 +111,6 @@ EXPORT_SYMBOL(laptop_mode);
117/* End of sysctl-exported parameters */ 111/* End of sysctl-exported parameters */
118 112
119 113
120static void background_writeout(unsigned long _min_pages);
121
122/* 114/*
123 * Scale the writeback cache size proportional to the relative writeout speeds. 115 * Scale the writeback cache size proportional to the relative writeout speeds.
124 * 116 *
@@ -166,37 +158,37 @@ static void update_completion_period(void)
166} 158}
167 159
168int dirty_background_ratio_handler(struct ctl_table *table, int write, 160int dirty_background_ratio_handler(struct ctl_table *table, int write,
169 struct file *filp, void __user *buffer, size_t *lenp, 161 void __user *buffer, size_t *lenp,
170 loff_t *ppos) 162 loff_t *ppos)
171{ 163{
172 int ret; 164 int ret;
173 165
174 ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos); 166 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
175 if (ret == 0 && write) 167 if (ret == 0 && write)
176 dirty_background_bytes = 0; 168 dirty_background_bytes = 0;
177 return ret; 169 return ret;
178} 170}
179 171
180int dirty_background_bytes_handler(struct ctl_table *table, int write, 172int dirty_background_bytes_handler(struct ctl_table *table, int write,
181 struct file *filp, void __user *buffer, size_t *lenp, 173 void __user *buffer, size_t *lenp,
182 loff_t *ppos) 174 loff_t *ppos)
183{ 175{
184 int ret; 176 int ret;
185 177
186 ret = proc_doulongvec_minmax(table, write, filp, buffer, lenp, ppos); 178 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
187 if (ret == 0 && write) 179 if (ret == 0 && write)
188 dirty_background_ratio = 0; 180 dirty_background_ratio = 0;
189 return ret; 181 return ret;
190} 182}
191 183
192int dirty_ratio_handler(struct ctl_table *table, int write, 184int dirty_ratio_handler(struct ctl_table *table, int write,
193 struct file *filp, void __user *buffer, size_t *lenp, 185 void __user *buffer, size_t *lenp,
194 loff_t *ppos) 186 loff_t *ppos)
195{ 187{
196 int old_ratio = vm_dirty_ratio; 188 int old_ratio = vm_dirty_ratio;
197 int ret; 189 int ret;
198 190
199 ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos); 191 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
200 if (ret == 0 && write && vm_dirty_ratio != old_ratio) { 192 if (ret == 0 && write && vm_dirty_ratio != old_ratio) {
201 update_completion_period(); 193 update_completion_period();
202 vm_dirty_bytes = 0; 194 vm_dirty_bytes = 0;
@@ -206,13 +198,13 @@ int dirty_ratio_handler(struct ctl_table *table, int write,
206 198
207 199
208int dirty_bytes_handler(struct ctl_table *table, int write, 200int dirty_bytes_handler(struct ctl_table *table, int write,
209 struct file *filp, void __user *buffer, size_t *lenp, 201 void __user *buffer, size_t *lenp,
210 loff_t *ppos) 202 loff_t *ppos)
211{ 203{
212 unsigned long old_bytes = vm_dirty_bytes; 204 unsigned long old_bytes = vm_dirty_bytes;
213 int ret; 205 int ret;
214 206
215 ret = proc_doulongvec_minmax(table, write, filp, buffer, lenp, ppos); 207 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
216 if (ret == 0 && write && vm_dirty_bytes != old_bytes) { 208 if (ret == 0 && write && vm_dirty_bytes != old_bytes) {
217 update_completion_period(); 209 update_completion_period();
218 vm_dirty_ratio = 0; 210 vm_dirty_ratio = 0;
@@ -320,15 +312,13 @@ static void task_dirty_limit(struct task_struct *tsk, unsigned long *pdirty)
320/* 312/*
321 * 313 *
322 */ 314 */
323static DEFINE_SPINLOCK(bdi_lock);
324static unsigned int bdi_min_ratio; 315static unsigned int bdi_min_ratio;
325 316
326int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio) 317int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)
327{ 318{
328 int ret = 0; 319 int ret = 0;
329 unsigned long flags;
330 320
331 spin_lock_irqsave(&bdi_lock, flags); 321 spin_lock_bh(&bdi_lock);
332 if (min_ratio > bdi->max_ratio) { 322 if (min_ratio > bdi->max_ratio) {
333 ret = -EINVAL; 323 ret = -EINVAL;
334 } else { 324 } else {
@@ -340,27 +330,26 @@ int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)
340 ret = -EINVAL; 330 ret = -EINVAL;
341 } 331 }
342 } 332 }
343 spin_unlock_irqrestore(&bdi_lock, flags); 333 spin_unlock_bh(&bdi_lock);
344 334
345 return ret; 335 return ret;
346} 336}
347 337
348int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned max_ratio) 338int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned max_ratio)
349{ 339{
350 unsigned long flags;
351 int ret = 0; 340 int ret = 0;
352 341
353 if (max_ratio > 100) 342 if (max_ratio > 100)
354 return -EINVAL; 343 return -EINVAL;
355 344
356 spin_lock_irqsave(&bdi_lock, flags); 345 spin_lock_bh(&bdi_lock);
357 if (bdi->min_ratio > max_ratio) { 346 if (bdi->min_ratio > max_ratio) {
358 ret = -EINVAL; 347 ret = -EINVAL;
359 } else { 348 } else {
360 bdi->max_ratio = max_ratio; 349 bdi->max_ratio = max_ratio;
361 bdi->max_prop_frac = (PROP_FRAC_BASE * max_ratio) / 100; 350 bdi->max_prop_frac = (PROP_FRAC_BASE * max_ratio) / 100;
362 } 351 }
363 spin_unlock_irqrestore(&bdi_lock, flags); 352 spin_unlock_bh(&bdi_lock);
364 353
365 return ret; 354 return ret;
366} 355}
@@ -394,7 +383,8 @@ static unsigned long highmem_dirtyable_memory(unsigned long total)
394 struct zone *z = 383 struct zone *z =
395 &NODE_DATA(node)->node_zones[ZONE_HIGHMEM]; 384 &NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
396 385
397 x += zone_page_state(z, NR_FREE_PAGES) + zone_lru_pages(z); 386 x += zone_page_state(z, NR_FREE_PAGES) +
387 zone_reclaimable_pages(z);
398 } 388 }
399 /* 389 /*
400 * Make sure that the number of highmem pages is never larger 390 * Make sure that the number of highmem pages is never larger
@@ -418,7 +408,7 @@ unsigned long determine_dirtyable_memory(void)
418{ 408{
419 unsigned long x; 409 unsigned long x;
420 410
421 x = global_page_state(NR_FREE_PAGES) + global_lru_pages(); 411 x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages();
422 412
423 if (!vm_highmem_is_dirtyable) 413 if (!vm_highmem_is_dirtyable)
424 x -= highmem_dirtyable_memory(x); 414 x -= highmem_dirtyable_memory(x);
@@ -487,10 +477,11 @@ get_dirty_limits(unsigned long *pbackground, unsigned long *pdirty,
487 * balance_dirty_pages() must be called by processes which are generating dirty 477 * balance_dirty_pages() must be called by processes which are generating dirty
488 * data. It looks at the number of dirty pages in the machine and will force 478 * data. It looks at the number of dirty pages in the machine and will force
489 * the caller to perform writeback if the system is over `vm_dirty_ratio'. 479 * the caller to perform writeback if the system is over `vm_dirty_ratio'.
490 * If we're over `background_thresh' then pdflush is woken to perform some 480 * If we're over `background_thresh' then the writeback threads are woken to
491 * writeout. 481 * perform some writeout.
492 */ 482 */
493static void balance_dirty_pages(struct address_space *mapping) 483static void balance_dirty_pages(struct address_space *mapping,
484 unsigned long write_chunk)
494{ 485{
495 long nr_reclaimable, bdi_nr_reclaimable; 486 long nr_reclaimable, bdi_nr_reclaimable;
496 long nr_writeback, bdi_nr_writeback; 487 long nr_writeback, bdi_nr_writeback;
@@ -498,7 +489,7 @@ static void balance_dirty_pages(struct address_space *mapping)
498 unsigned long dirty_thresh; 489 unsigned long dirty_thresh;
499 unsigned long bdi_thresh; 490 unsigned long bdi_thresh;
500 unsigned long pages_written = 0; 491 unsigned long pages_written = 0;
501 unsigned long write_chunk = sync_writeback_pages(); 492 unsigned long pause = 1;
502 493
503 struct backing_dev_info *bdi = mapping->backing_dev_info; 494 struct backing_dev_info *bdi = mapping->backing_dev_info;
504 495
@@ -546,7 +537,7 @@ static void balance_dirty_pages(struct address_space *mapping)
546 * up. 537 * up.
547 */ 538 */
548 if (bdi_nr_reclaimable > bdi_thresh) { 539 if (bdi_nr_reclaimable > bdi_thresh) {
549 writeback_inodes(&wbc); 540 writeback_inodes_wbc(&wbc);
550 pages_written += write_chunk - wbc.nr_to_write; 541 pages_written += write_chunk - wbc.nr_to_write;
551 get_dirty_limits(&background_thresh, &dirty_thresh, 542 get_dirty_limits(&background_thresh, &dirty_thresh,
552 &bdi_thresh, bdi); 543 &bdi_thresh, bdi);
@@ -575,7 +566,15 @@ static void balance_dirty_pages(struct address_space *mapping)
575 if (pages_written >= write_chunk) 566 if (pages_written >= write_chunk)
576 break; /* We've done our duty */ 567 break; /* We've done our duty */
577 568
578 congestion_wait(BLK_RW_ASYNC, HZ/10); 569 schedule_timeout_interruptible(pause);
570
571 /*
572 * Increase the delay for each loop, up to our previous
573 * default of taking a 100ms nap.
574 */
575 pause <<= 1;
576 if (pause > HZ / 10)
577 pause = HZ / 10;
579 } 578 }
580 579
581 if (bdi_nr_reclaimable + bdi_nr_writeback < bdi_thresh && 580 if (bdi_nr_reclaimable + bdi_nr_writeback < bdi_thresh &&
@@ -583,7 +582,7 @@ static void balance_dirty_pages(struct address_space *mapping)
583 bdi->dirty_exceeded = 0; 582 bdi->dirty_exceeded = 0;
584 583
585 if (writeback_in_progress(bdi)) 584 if (writeback_in_progress(bdi))
586 return; /* pdflush is already working this queue */ 585 return;
587 586
588 /* 587 /*
589 * In laptop mode, we wait until hitting the higher threshold before 588 * In laptop mode, we wait until hitting the higher threshold before
@@ -594,10 +593,10 @@ static void balance_dirty_pages(struct address_space *mapping)
594 * background_thresh, to keep the amount of dirty memory low. 593 * background_thresh, to keep the amount of dirty memory low.
595 */ 594 */
596 if ((laptop_mode && pages_written) || 595 if ((laptop_mode && pages_written) ||
597 (!laptop_mode && (global_page_state(NR_FILE_DIRTY) 596 (!laptop_mode && ((global_page_state(NR_FILE_DIRTY)
598 + global_page_state(NR_UNSTABLE_NFS) 597 + global_page_state(NR_UNSTABLE_NFS))
599 > background_thresh))) 598 > background_thresh)))
600 pdflush_operation(background_writeout, 0); 599 bdi_start_writeback(bdi, NULL, 0);
601} 600}
602 601
603void set_page_dirty_balance(struct page *page, int page_mkwrite) 602void set_page_dirty_balance(struct page *page, int page_mkwrite)
@@ -610,6 +609,8 @@ void set_page_dirty_balance(struct page *page, int page_mkwrite)
610 } 609 }
611} 610}
612 611
612static DEFINE_PER_CPU(unsigned long, bdp_ratelimits) = 0;
613
613/** 614/**
614 * balance_dirty_pages_ratelimited_nr - balance dirty memory state 615 * balance_dirty_pages_ratelimited_nr - balance dirty memory state
615 * @mapping: address_space which was dirtied 616 * @mapping: address_space which was dirtied
@@ -627,7 +628,6 @@ void set_page_dirty_balance(struct page *page, int page_mkwrite)
627void balance_dirty_pages_ratelimited_nr(struct address_space *mapping, 628void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
628 unsigned long nr_pages_dirtied) 629 unsigned long nr_pages_dirtied)
629{ 630{
630 static DEFINE_PER_CPU(unsigned long, ratelimits) = 0;
631 unsigned long ratelimit; 631 unsigned long ratelimit;
632 unsigned long *p; 632 unsigned long *p;
633 633
@@ -640,12 +640,13 @@ void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
640 * tasks in balance_dirty_pages(). Period. 640 * tasks in balance_dirty_pages(). Period.
641 */ 641 */
642 preempt_disable(); 642 preempt_disable();
643 p = &__get_cpu_var(ratelimits); 643 p = &__get_cpu_var(bdp_ratelimits);
644 *p += nr_pages_dirtied; 644 *p += nr_pages_dirtied;
645 if (unlikely(*p >= ratelimit)) { 645 if (unlikely(*p >= ratelimit)) {
646 ratelimit = sync_writeback_pages(*p);
646 *p = 0; 647 *p = 0;
647 preempt_enable(); 648 preempt_enable();
648 balance_dirty_pages(mapping); 649 balance_dirty_pages(mapping, ratelimit);
649 return; 650 return;
650 } 651 }
651 preempt_enable(); 652 preempt_enable();
@@ -681,153 +682,35 @@ void throttle_vm_writeout(gfp_t gfp_mask)
681 } 682 }
682} 683}
683 684
684/*
685 * writeback at least _min_pages, and keep writing until the amount of dirty
686 * memory is less than the background threshold, or until we're all clean.
687 */
688static void background_writeout(unsigned long _min_pages)
689{
690 long min_pages = _min_pages;
691 struct writeback_control wbc = {
692 .bdi = NULL,
693 .sync_mode = WB_SYNC_NONE,
694 .older_than_this = NULL,
695 .nr_to_write = 0,
696 .nonblocking = 1,
697 .range_cyclic = 1,
698 };
699
700 for ( ; ; ) {
701 unsigned long background_thresh;
702 unsigned long dirty_thresh;
703
704 get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL);
705 if (global_page_state(NR_FILE_DIRTY) +
706 global_page_state(NR_UNSTABLE_NFS) < background_thresh
707 && min_pages <= 0)
708 break;
709 wbc.more_io = 0;
710 wbc.encountered_congestion = 0;
711 wbc.nr_to_write = MAX_WRITEBACK_PAGES;
712 wbc.pages_skipped = 0;
713 writeback_inodes(&wbc);
714 min_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
715 if (wbc.nr_to_write > 0 || wbc.pages_skipped > 0) {
716 /* Wrote less than expected */
717 if (wbc.encountered_congestion || wbc.more_io)
718 congestion_wait(BLK_RW_ASYNC, HZ/10);
719 else
720 break;
721 }
722 }
723}
724
725/*
726 * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back
727 * the whole world. Returns 0 if a pdflush thread was dispatched. Returns
728 * -1 if all pdflush threads were busy.
729 */
730int wakeup_pdflush(long nr_pages)
731{
732 if (nr_pages == 0)
733 nr_pages = global_page_state(NR_FILE_DIRTY) +
734 global_page_state(NR_UNSTABLE_NFS);
735 return pdflush_operation(background_writeout, nr_pages);
736}
737
738static void wb_timer_fn(unsigned long unused);
739static void laptop_timer_fn(unsigned long unused); 685static void laptop_timer_fn(unsigned long unused);
740 686
741static DEFINE_TIMER(wb_timer, wb_timer_fn, 0, 0);
742static DEFINE_TIMER(laptop_mode_wb_timer, laptop_timer_fn, 0, 0); 687static DEFINE_TIMER(laptop_mode_wb_timer, laptop_timer_fn, 0, 0);
743 688
744/* 689/*
745 * Periodic writeback of "old" data.
746 *
747 * Define "old": the first time one of an inode's pages is dirtied, we mark the
748 * dirtying-time in the inode's address_space. So this periodic writeback code
749 * just walks the superblock inode list, writing back any inodes which are
750 * older than a specific point in time.
751 *
752 * Try to run once per dirty_writeback_interval. But if a writeback event
753 * takes longer than a dirty_writeback_interval interval, then leave a
754 * one-second gap.
755 *
756 * older_than_this takes precedence over nr_to_write. So we'll only write back
757 * all dirty pages if they are all attached to "old" mappings.
758 */
759static void wb_kupdate(unsigned long arg)
760{
761 unsigned long oldest_jif;
762 unsigned long start_jif;
763 unsigned long next_jif;
764 long nr_to_write;
765 struct writeback_control wbc = {
766 .bdi = NULL,
767 .sync_mode = WB_SYNC_NONE,
768 .older_than_this = &oldest_jif,
769 .nr_to_write = 0,
770 .nonblocking = 1,
771 .for_kupdate = 1,
772 .range_cyclic = 1,
773 };
774
775 sync_supers();
776
777 oldest_jif = jiffies - msecs_to_jiffies(dirty_expire_interval * 10);
778 start_jif = jiffies;
779 next_jif = start_jif + msecs_to_jiffies(dirty_writeback_interval * 10);
780 nr_to_write = global_page_state(NR_FILE_DIRTY) +
781 global_page_state(NR_UNSTABLE_NFS) +
782 (inodes_stat.nr_inodes - inodes_stat.nr_unused);
783 while (nr_to_write > 0) {
784 wbc.more_io = 0;
785 wbc.encountered_congestion = 0;
786 wbc.nr_to_write = MAX_WRITEBACK_PAGES;
787 writeback_inodes(&wbc);
788 if (wbc.nr_to_write > 0) {
789 if (wbc.encountered_congestion || wbc.more_io)
790 congestion_wait(BLK_RW_ASYNC, HZ/10);
791 else
792 break; /* All the old data is written */
793 }
794 nr_to_write -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
795 }
796 if (time_before(next_jif, jiffies + HZ))
797 next_jif = jiffies + HZ;
798 if (dirty_writeback_interval)
799 mod_timer(&wb_timer, next_jif);
800}
801
802/*
803 * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs 690 * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs
804 */ 691 */
805int dirty_writeback_centisecs_handler(ctl_table *table, int write, 692int dirty_writeback_centisecs_handler(ctl_table *table, int write,
806 struct file *file, void __user *buffer, size_t *length, loff_t *ppos) 693 void __user *buffer, size_t *length, loff_t *ppos)
807{ 694{
808 proc_dointvec(table, write, file, buffer, length, ppos); 695 proc_dointvec(table, write, buffer, length, ppos);
809 if (dirty_writeback_interval)
810 mod_timer(&wb_timer, jiffies +
811 msecs_to_jiffies(dirty_writeback_interval * 10));
812 else
813 del_timer(&wb_timer);
814 return 0; 696 return 0;
815} 697}
816 698
817static void wb_timer_fn(unsigned long unused) 699static void do_laptop_sync(struct work_struct *work)
818{ 700{
819 if (pdflush_operation(wb_kupdate, 0) < 0) 701 wakeup_flusher_threads(0);
820 mod_timer(&wb_timer, jiffies + HZ); /* delay 1 second */ 702 kfree(work);
821}
822
823static void laptop_flush(unsigned long unused)
824{
825 sys_sync();
826} 703}
827 704
828static void laptop_timer_fn(unsigned long unused) 705static void laptop_timer_fn(unsigned long unused)
829{ 706{
830 pdflush_operation(laptop_flush, 0); 707 struct work_struct *work;
708
709 work = kmalloc(sizeof(*work), GFP_ATOMIC);
710 if (work) {
711 INIT_WORK(work, do_laptop_sync);
712 schedule_work(work);
713 }
831} 714}
832 715
833/* 716/*
@@ -910,8 +793,6 @@ void __init page_writeback_init(void)
910{ 793{
911 int shift; 794 int shift;
912 795
913 mod_timer(&wb_timer,
914 jiffies + msecs_to_jiffies(dirty_writeback_interval * 10));
915 writeback_set_ratelimit(); 796 writeback_set_ratelimit();
916 register_cpu_notifier(&ratelimit_nb); 797 register_cpu_notifier(&ratelimit_nb);
917 798
@@ -1145,12 +1026,10 @@ int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
1145 1026
1146 if (wbc->nr_to_write <= 0) 1027 if (wbc->nr_to_write <= 0)
1147 return 0; 1028 return 0;
1148 wbc->for_writepages = 1;
1149 if (mapping->a_ops->writepages) 1029 if (mapping->a_ops->writepages)
1150 ret = mapping->a_ops->writepages(mapping, wbc); 1030 ret = mapping->a_ops->writepages(mapping, wbc);
1151 else 1031 else
1152 ret = generic_writepages(mapping, wbc); 1032 ret = generic_writepages(mapping, wbc);
1153 wbc->for_writepages = 0;
1154 return ret; 1033 return ret;
1155} 1034}
1156 1035
@@ -1274,6 +1153,13 @@ int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page)
1274EXPORT_SYMBOL(redirty_page_for_writepage); 1153EXPORT_SYMBOL(redirty_page_for_writepage);
1275 1154
1276/* 1155/*
1156 * Dirty a page.
1157 *
1158 * For pages with a mapping this should be done under the page lock
1159 * for the benefit of asynchronous memory errors who prefer a consistent
1160 * dirty state. This rule can be broken in some special cases,
1161 * but should be better not to.
1162 *
1277 * If the mapping doesn't provide a set_page_dirty a_op, then 1163 * If the mapping doesn't provide a set_page_dirty a_op, then
1278 * just fall through and assume that it wants buffer_heads. 1164 * just fall through and assume that it wants buffer_heads.
1279 */ 1165 */