aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/buffer.c10
-rw-r--r--fs/fs-writeback.c5
-rw-r--r--mm/page-writeback.c8
-rw-r--r--mm/shmem.c5
-rw-r--r--mm/vmscan.c8
5 files changed, 17 insertions, 19 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index 90a98865b0cc..fc22b4504087 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -274,7 +274,7 @@ void invalidate_bdev(struct block_device *bdev)
274} 274}
275 275
276/* 276/*
277 * Kick pdflush then try to free up some ZONE_NORMAL memory. 277 * Kick the writeback threads then try to free up some ZONE_NORMAL memory.
278 */ 278 */
279static void free_more_memory(void) 279static void free_more_memory(void)
280{ 280{
@@ -1699,9 +1699,9 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
1699 /* 1699 /*
1700 * If it's a fully non-blocking write attempt and we cannot 1700 * If it's a fully non-blocking write attempt and we cannot
1701 * lock the buffer then redirty the page. Note that this can 1701 * lock the buffer then redirty the page. Note that this can
1702 * potentially cause a busy-wait loop from pdflush and kswapd 1702 * potentially cause a busy-wait loop from writeback threads
1703 * activity, but those code paths have their own higher-level 1703 * and kswapd activity, but those code paths have their own
1704 * throttling. 1704 * higher-level throttling.
1705 */ 1705 */
1706 if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) { 1706 if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
1707 lock_buffer(bh); 1707 lock_buffer(bh);
@@ -3191,7 +3191,7 @@ void block_sync_page(struct page *page)
3191 * still running obsolete flush daemons, so we terminate them here. 3191 * still running obsolete flush daemons, so we terminate them here.
3192 * 3192 *
3193 * Use of bdflush() is deprecated and will be removed in a future kernel. 3193 * Use of bdflush() is deprecated and will be removed in a future kernel.
3194 * The `pdflush' kernel threads fully replace bdflush daemons and this call. 3194 * The `flush-X' kernel threads fully replace bdflush daemons and this call.
3195 */ 3195 */
3196SYSCALL_DEFINE2(bdflush, int, func, long, data) 3196SYSCALL_DEFINE2(bdflush, int, func, long, data)
3197{ 3197{
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 15e375bf93e6..15944f754e15 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -320,7 +320,7 @@ static bool inode_dirtied_after(struct inode *inode, unsigned long t)
320 * For inodes being constantly redirtied, dirtied_when can get stuck. 320 * For inodes being constantly redirtied, dirtied_when can get stuck.
321 * It _appears_ to be in the future, but is actually in distant past. 321 * It _appears_ to be in the future, but is actually in distant past.
322 * This test is necessary to prevent such wrapped-around relative times 322 * This test is necessary to prevent such wrapped-around relative times
323 * from permanently stopping the whole pdflush writeback. 323 * from permanently stopping the whole bdi writeback.
324 */ 324 */
325 ret = ret && time_before_eq(inode->dirtied_when, jiffies); 325 ret = ret && time_before_eq(inode->dirtied_when, jiffies);
326#endif 326#endif
@@ -1085,9 +1085,6 @@ EXPORT_SYMBOL(__mark_inode_dirty);
1085 * If older_than_this is non-NULL, then only write out inodes which 1085 * If older_than_this is non-NULL, then only write out inodes which
1086 * had their first dirtying at a time earlier than *older_than_this. 1086 * had their first dirtying at a time earlier than *older_than_this.
1087 * 1087 *
1088 * If we're a pdlfush thread, then implement pdflush collision avoidance
1089 * against the entire list.
1090 *
1091 * If `bdi' is non-zero then we're being asked to writeback a specific queue. 1088 * If `bdi' is non-zero then we're being asked to writeback a specific queue.
1092 * This function assumes that the blockdev superblock's inodes are backed by 1089 * This function assumes that the blockdev superblock's inodes are backed by
1093 * a variety of queues, so all inodes are searched. For other superblocks, 1090 * a variety of queues, so all inodes are searched. For other superblocks,
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 3c78fc316202..8bef063125b1 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -58,7 +58,7 @@ static inline long sync_writeback_pages(unsigned long dirtied)
58/* The following parameters are exported via /proc/sys/vm */ 58/* The following parameters are exported via /proc/sys/vm */
59 59
60/* 60/*
61 * Start background writeback (via pdflush) at this percentage 61 * Start background writeback (via writeback threads) at this percentage
62 */ 62 */
63int dirty_background_ratio = 10; 63int dirty_background_ratio = 10;
64 64
@@ -477,8 +477,8 @@ get_dirty_limits(unsigned long *pbackground, unsigned long *pdirty,
477 * balance_dirty_pages() must be called by processes which are generating dirty 477 * balance_dirty_pages() must be called by processes which are generating dirty
478 * data. It looks at the number of dirty pages in the machine and will force 478 * data. It looks at the number of dirty pages in the machine and will force
479 * the caller to perform writeback if the system is over `vm_dirty_ratio'. 479 * the caller to perform writeback if the system is over `vm_dirty_ratio'.
480 * If we're over `background_thresh' then pdflush is woken to perform some 480 * If we're over `background_thresh' then the writeback threads are woken to
481 * writeout. 481 * perform some writeout.
482 */ 482 */
483static void balance_dirty_pages(struct address_space *mapping, 483static void balance_dirty_pages(struct address_space *mapping,
484 unsigned long write_chunk) 484 unsigned long write_chunk)
@@ -582,7 +582,7 @@ static void balance_dirty_pages(struct address_space *mapping,
582 bdi->dirty_exceeded = 0; 582 bdi->dirty_exceeded = 0;
583 583
584 if (writeback_in_progress(bdi)) 584 if (writeback_in_progress(bdi))
585 return; /* pdflush is already working this queue */ 585 return;
586 586
587 /* 587 /*
588 * In laptop mode, we wait until hitting the higher threshold before 588 * In laptop mode, we wait until hitting the higher threshold before
diff --git a/mm/shmem.c b/mm/shmem.c
index b206a7a32e2a..aa9481166aae 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1046,8 +1046,9 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
1046 * sync from ever calling shmem_writepage; but a stacking filesystem 1046 * sync from ever calling shmem_writepage; but a stacking filesystem
1047 * may use the ->writepage of its underlying filesystem, in which case 1047 * may use the ->writepage of its underlying filesystem, in which case
1048 * tmpfs should write out to swap only in response to memory pressure, 1048 * tmpfs should write out to swap only in response to memory pressure,
1049 * and not for pdflush or sync. However, in those cases, we do still 1049 * and not for the writeback threads or sync. However, in those cases,
1050 * want to check if there's a redundant swappage to be discarded. 1050 * we do still want to check if there's a redundant swappage to be
1051 * discarded.
1051 */ 1052 */
1052 if (wbc->for_reclaim) 1053 if (wbc->for_reclaim)
1053 swap = get_swap_page(); 1054 swap = get_swap_page();
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 613e89f471d9..359c3c57ef85 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1709,10 +1709,10 @@ static void shrink_zones(int priority, struct zonelist *zonelist,
1709 * 1709 *
1710 * If the caller is !__GFP_FS then the probability of a failure is reasonably 1710 * If the caller is !__GFP_FS then the probability of a failure is reasonably
1711 * high - the zone may be full of dirty or under-writeback pages, which this 1711 * high - the zone may be full of dirty or under-writeback pages, which this
1712 * caller can't do much about. We kick pdflush and take explicit naps in the 1712 * caller can't do much about. We kick the writeback threads and take explicit
1713 * hope that some of these pages can be written. But if the allocating task 1713 * naps in the hope that some of these pages can be written. But if the
1714 * holds filesystem locks which prevent writeout this might not work, and the 1714 * allocating task holds filesystem locks which prevent writeout this might not
1715 * allocation attempt will fail. 1715 * work, and the allocation attempt will fail.
1716 * 1716 *
1717 * returns: 0, if no pages reclaimed 1717 * returns: 0, if no pages reclaimed
1718 * else, the number of pages reclaimed 1718 * else, the number of pages reclaimed