aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/i386/lib/usercopy.c3
-rw-r--r--block/ll_rw_blk.c71
-rw-r--r--drivers/md/dm-crypt.c3
-rw-r--r--fs/fat/file.c3
-rw-r--r--fs/nfs/write.c4
-rw-r--r--fs/reiserfs/journal.c3
-rw-r--r--fs/xfs/linux-2.6/kmem.c5
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c3
-rw-r--r--include/linux/backing-dev.h7
-rw-r--r--include/linux/blkdev.h24
-rw-r--r--include/linux/writeback.h1
-rw-r--r--mm/Makefile3
-rw-r--r--mm/backing-dev.c69
-rw-r--r--mm/page-writeback.c17
-rw-r--r--mm/page_alloc.c5
-rw-r--r--mm/shmem.c3
-rw-r--r--mm/vmscan.c6
17 files changed, 126 insertions, 104 deletions
diff --git a/arch/i386/lib/usercopy.c b/arch/i386/lib/usercopy.c
index 258df6b4d7d7..d22cfc9d656c 100644
--- a/arch/i386/lib/usercopy.c
+++ b/arch/i386/lib/usercopy.c
@@ -9,6 +9,7 @@
9#include <linux/highmem.h> 9#include <linux/highmem.h>
10#include <linux/blkdev.h> 10#include <linux/blkdev.h>
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/backing-dev.h>
12#include <asm/uaccess.h> 13#include <asm/uaccess.h>
13#include <asm/mmx.h> 14#include <asm/mmx.h>
14 15
@@ -741,7 +742,7 @@ survive:
741 742
742 if (retval == -ENOMEM && is_init(current)) { 743 if (retval == -ENOMEM && is_init(current)) {
743 up_read(&current->mm->mmap_sem); 744 up_read(&current->mm->mmap_sem);
744 blk_congestion_wait(WRITE, HZ/50); 745 congestion_wait(WRITE, HZ/50);
745 goto survive; 746 goto survive;
746 } 747 }
747 748
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 132a858ce2c5..136066583c68 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -56,11 +56,6 @@ static kmem_cache_t *requestq_cachep;
56 */ 56 */
57static kmem_cache_t *iocontext_cachep; 57static kmem_cache_t *iocontext_cachep;
58 58
59static wait_queue_head_t congestion_wqh[2] = {
60 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]),
61 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1])
62 };
63
64/* 59/*
65 * Controlling structure to kblockd 60 * Controlling structure to kblockd
66 */ 61 */
@@ -112,37 +107,6 @@ static void blk_queue_congestion_threshold(struct request_queue *q)
112 q->nr_congestion_off = nr; 107 q->nr_congestion_off = nr;
113} 108}
114 109
115/*
116 * A queue has just exitted congestion. Note this in the global counter of
117 * congested queues, and wake up anyone who was waiting for requests to be
118 * put back.
119 */
120void blk_clear_queue_congested(request_queue_t *q, int rw)
121{
122 enum bdi_state bit;
123 wait_queue_head_t *wqh = &congestion_wqh[rw];
124
125 bit = (rw == WRITE) ? BDI_write_congested : BDI_read_congested;
126 clear_bit(bit, &q->backing_dev_info.state);
127 smp_mb__after_clear_bit();
128 if (waitqueue_active(wqh))
129 wake_up(wqh);
130}
131EXPORT_SYMBOL(blk_clear_queue_congested);
132
133/*
134 * A queue has just entered congestion. Flag that in the queue's VM-visible
135 * state flags and increment the global gounter of congested queues.
136 */
137void blk_set_queue_congested(request_queue_t *q, int rw)
138{
139 enum bdi_state bit;
140
141 bit = (rw == WRITE) ? BDI_write_congested : BDI_read_congested;
142 set_bit(bit, &q->backing_dev_info.state);
143}
144EXPORT_SYMBOL(blk_set_queue_congested);
145
146/** 110/**
147 * blk_get_backing_dev_info - get the address of a queue's backing_dev_info 111 * blk_get_backing_dev_info - get the address of a queue's backing_dev_info
148 * @bdev: device 112 * @bdev: device
@@ -2755,41 +2719,6 @@ void blk_end_sync_rq(struct request *rq, int error)
2755} 2719}
2756EXPORT_SYMBOL(blk_end_sync_rq); 2720EXPORT_SYMBOL(blk_end_sync_rq);
2757 2721
2758/**
2759 * blk_congestion_wait - wait for a queue to become uncongested
2760 * @rw: READ or WRITE
2761 * @timeout: timeout in jiffies
2762 *
2763 * Waits for up to @timeout jiffies for a queue (any queue) to exit congestion.
2764 * If no queues are congested then just wait for the next request to be
2765 * returned.
2766 */
2767long blk_congestion_wait(int rw, long timeout)
2768{
2769 long ret;
2770 DEFINE_WAIT(wait);
2771 wait_queue_head_t *wqh = &congestion_wqh[rw];
2772
2773 prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
2774 ret = io_schedule_timeout(timeout);
2775 finish_wait(wqh, &wait);
2776 return ret;
2777}
2778
2779EXPORT_SYMBOL(blk_congestion_wait);
2780
2781/**
2782 * blk_congestion_end - wake up sleepers on a congestion queue
2783 * @rw: READ or WRITE
2784 */
2785void blk_congestion_end(int rw)
2786{
2787 wait_queue_head_t *wqh = &congestion_wqh[rw];
2788
2789 if (waitqueue_active(wqh))
2790 wake_up(wqh);
2791}
2792
2793/* 2722/*
2794 * Has to be called with the request spinlock acquired 2723 * Has to be called with the request spinlock acquired
2795 */ 2724 */
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 655d816760e5..a625576fdeeb 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -16,6 +16,7 @@
16#include <linux/slab.h> 16#include <linux/slab.h>
17#include <linux/crypto.h> 17#include <linux/crypto.h>
18#include <linux/workqueue.h> 18#include <linux/workqueue.h>
19#include <linux/backing-dev.h>
19#include <asm/atomic.h> 20#include <asm/atomic.h>
20#include <linux/scatterlist.h> 21#include <linux/scatterlist.h>
21#include <asm/page.h> 22#include <asm/page.h>
@@ -602,7 +603,7 @@ static void process_write(struct crypt_io *io)
602 603
603 /* out of memory -> run queues */ 604 /* out of memory -> run queues */
604 if (remaining) 605 if (remaining)
605 blk_congestion_wait(bio_data_dir(clone), HZ/100); 606 congestion_wait(bio_data_dir(clone), HZ/100);
606 } 607 }
607} 608}
608 609
diff --git a/fs/fat/file.c b/fs/fat/file.c
index f4b8f8b3fbdd..8337451e7897 100644
--- a/fs/fat/file.c
+++ b/fs/fat/file.c
@@ -13,6 +13,7 @@
13#include <linux/smp_lock.h> 13#include <linux/smp_lock.h>
14#include <linux/buffer_head.h> 14#include <linux/buffer_head.h>
15#include <linux/writeback.h> 15#include <linux/writeback.h>
16#include <linux/backing-dev.h>
16#include <linux/blkdev.h> 17#include <linux/blkdev.h>
17 18
18int fat_generic_ioctl(struct inode *inode, struct file *filp, 19int fat_generic_ioctl(struct inode *inode, struct file *filp,
@@ -118,7 +119,7 @@ static int fat_file_release(struct inode *inode, struct file *filp)
118 if ((filp->f_mode & FMODE_WRITE) && 119 if ((filp->f_mode & FMODE_WRITE) &&
119 MSDOS_SB(inode->i_sb)->options.flush) { 120 MSDOS_SB(inode->i_sb)->options.flush) {
120 fat_flush_inodes(inode->i_sb, inode, NULL); 121 fat_flush_inodes(inode->i_sb, inode, NULL);
121 blk_congestion_wait(WRITE, HZ/10); 122 congestion_wait(WRITE, HZ/10);
122 } 123 }
123 return 0; 124 return 0;
124} 125}
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index f6675d2c386c..ca92ac36fe9d 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -57,6 +57,8 @@
57#include <linux/nfs_fs.h> 57#include <linux/nfs_fs.h>
58#include <linux/nfs_mount.h> 58#include <linux/nfs_mount.h>
59#include <linux/nfs_page.h> 59#include <linux/nfs_page.h>
60#include <linux/backing-dev.h>
61
60#include <asm/uaccess.h> 62#include <asm/uaccess.h>
61#include <linux/smp_lock.h> 63#include <linux/smp_lock.h>
62 64
@@ -395,7 +397,7 @@ int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
395out: 397out:
396 clear_bit(BDI_write_congested, &bdi->state); 398 clear_bit(BDI_write_congested, &bdi->state);
397 wake_up_all(&nfs_write_congestion); 399 wake_up_all(&nfs_write_congestion);
398 writeback_congestion_end(); 400 congestion_end(WRITE);
399 return err; 401 return err;
400} 402}
401 403
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index ad8cbc49883a..85ce23268302 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -53,6 +53,7 @@
53#include <linux/workqueue.h> 53#include <linux/workqueue.h>
54#include <linux/writeback.h> 54#include <linux/writeback.h>
55#include <linux/blkdev.h> 55#include <linux/blkdev.h>
56#include <linux/backing-dev.h>
56 57
57/* gets a struct reiserfs_journal_list * from a list head */ 58/* gets a struct reiserfs_journal_list * from a list head */
58#define JOURNAL_LIST_ENTRY(h) (list_entry((h), struct reiserfs_journal_list, \ 59#define JOURNAL_LIST_ENTRY(h) (list_entry((h), struct reiserfs_journal_list, \
@@ -970,7 +971,7 @@ int reiserfs_async_progress_wait(struct super_block *s)
970 DEFINE_WAIT(wait); 971 DEFINE_WAIT(wait);
971 struct reiserfs_journal *j = SB_JOURNAL(s); 972 struct reiserfs_journal *j = SB_JOURNAL(s);
972 if (atomic_read(&j->j_async_throttle)) 973 if (atomic_read(&j->j_async_throttle))
973 blk_congestion_wait(WRITE, HZ / 10); 974 congestion_wait(WRITE, HZ / 10);
974 return 0; 975 return 0;
975} 976}
976 977
diff --git a/fs/xfs/linux-2.6/kmem.c b/fs/xfs/linux-2.6/kmem.c
index d59737589815..004baf600611 100644
--- a/fs/xfs/linux-2.6/kmem.c
+++ b/fs/xfs/linux-2.6/kmem.c
@@ -21,6 +21,7 @@
21#include <linux/highmem.h> 21#include <linux/highmem.h>
22#include <linux/swap.h> 22#include <linux/swap.h>
23#include <linux/blkdev.h> 23#include <linux/blkdev.h>
24#include <linux/backing-dev.h>
24#include "time.h" 25#include "time.h"
25#include "kmem.h" 26#include "kmem.h"
26 27
@@ -53,7 +54,7 @@ kmem_alloc(size_t size, unsigned int __nocast flags)
53 printk(KERN_ERR "XFS: possible memory allocation " 54 printk(KERN_ERR "XFS: possible memory allocation "
54 "deadlock in %s (mode:0x%x)\n", 55 "deadlock in %s (mode:0x%x)\n",
55 __FUNCTION__, lflags); 56 __FUNCTION__, lflags);
56 blk_congestion_wait(WRITE, HZ/50); 57 congestion_wait(WRITE, HZ/50);
57 } while (1); 58 } while (1);
58} 59}
59 60
@@ -131,7 +132,7 @@ kmem_zone_alloc(kmem_zone_t *zone, unsigned int __nocast flags)
131 printk(KERN_ERR "XFS: possible memory allocation " 132 printk(KERN_ERR "XFS: possible memory allocation "
132 "deadlock in %s (mode:0x%x)\n", 133 "deadlock in %s (mode:0x%x)\n",
133 __FUNCTION__, lflags); 134 __FUNCTION__, lflags);
134 blk_congestion_wait(WRITE, HZ/50); 135 congestion_wait(WRITE, HZ/50);
135 } while (1); 136 } while (1);
136} 137}
137 138
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 9bbadafdcb00..db5f5a3608ca 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -30,6 +30,7 @@
30#include <linux/hash.h> 30#include <linux/hash.h>
31#include <linux/kthread.h> 31#include <linux/kthread.h>
32#include <linux/migrate.h> 32#include <linux/migrate.h>
33#include <linux/backing-dev.h>
33#include "xfs_linux.h" 34#include "xfs_linux.h"
34 35
35STATIC kmem_zone_t *xfs_buf_zone; 36STATIC kmem_zone_t *xfs_buf_zone;
@@ -395,7 +396,7 @@ _xfs_buf_lookup_pages(
395 396
396 XFS_STATS_INC(xb_page_retries); 397 XFS_STATS_INC(xb_page_retries);
397 xfsbufd_wakeup(0, gfp_mask); 398 xfsbufd_wakeup(0, gfp_mask);
398 blk_congestion_wait(WRITE, HZ/50); 399 congestion_wait(WRITE, HZ/50);
399 goto retry; 400 goto retry;
400 } 401 }
401 402
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index f7a1390d67f5..7011d6255593 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -10,6 +10,8 @@
10 10
11#include <asm/atomic.h> 11#include <asm/atomic.h>
12 12
13struct page;
14
13/* 15/*
14 * Bits in backing_dev_info.state 16 * Bits in backing_dev_info.state
15 */ 17 */
@@ -88,6 +90,11 @@ static inline int bdi_rw_congested(struct backing_dev_info *bdi)
88 (1 << BDI_write_congested)); 90 (1 << BDI_write_congested));
89} 91}
90 92
93void clear_bdi_congested(struct backing_dev_info *bdi, int rw);
94void set_bdi_congested(struct backing_dev_info *bdi, int rw);
95long congestion_wait(int rw, long timeout);
96void congestion_end(int rw);
97
91#define bdi_cap_writeback_dirty(bdi) \ 98#define bdi_cap_writeback_dirty(bdi) \
92 (!((bdi)->capabilities & BDI_CAP_NO_WRITEBACK)) 99 (!((bdi)->capabilities & BDI_CAP_NO_WRITEBACK))
93 100
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 9575e3a5ff2a..7bfcde2d5578 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -651,8 +651,26 @@ extern void blk_recount_segments(request_queue_t *, struct bio *);
651extern int scsi_cmd_ioctl(struct file *, struct gendisk *, unsigned int, void __user *); 651extern int scsi_cmd_ioctl(struct file *, struct gendisk *, unsigned int, void __user *);
652extern int sg_scsi_ioctl(struct file *, struct request_queue *, 652extern int sg_scsi_ioctl(struct file *, struct request_queue *,
653 struct gendisk *, struct scsi_ioctl_command __user *); 653 struct gendisk *, struct scsi_ioctl_command __user *);
654extern void blk_clear_queue_congested(request_queue_t *q, int rw); 654
655extern void blk_set_queue_congested(request_queue_t *q, int rw); 655/*
656 * A queue has just exitted congestion. Note this in the global counter of
657 * congested queues, and wake up anyone who was waiting for requests to be
658 * put back.
659 */
660static inline void blk_clear_queue_congested(request_queue_t *q, int rw)
661{
662 clear_bdi_congested(&q->backing_dev_info, rw);
663}
664
665/*
666 * A queue has just entered congestion. Flag that in the queue's VM-visible
667 * state flags and increment the global gounter of congested queues.
668 */
669static inline void blk_set_queue_congested(request_queue_t *q, int rw)
670{
671 set_bdi_congested(&q->backing_dev_info, rw);
672}
673
656extern void blk_start_queue(request_queue_t *q); 674extern void blk_start_queue(request_queue_t *q);
657extern void blk_stop_queue(request_queue_t *q); 675extern void blk_stop_queue(request_queue_t *q);
658extern void blk_sync_queue(struct request_queue *q); 676extern void blk_sync_queue(struct request_queue *q);
@@ -767,10 +785,8 @@ extern int blk_queue_init_tags(request_queue_t *, int, struct blk_queue_tag *);
767extern void blk_queue_free_tags(request_queue_t *); 785extern void blk_queue_free_tags(request_queue_t *);
768extern int blk_queue_resize_tags(request_queue_t *, int); 786extern int blk_queue_resize_tags(request_queue_t *, int);
769extern void blk_queue_invalidate_tags(request_queue_t *); 787extern void blk_queue_invalidate_tags(request_queue_t *);
770extern long blk_congestion_wait(int rw, long timeout);
771extern struct blk_queue_tag *blk_init_tags(int); 788extern struct blk_queue_tag *blk_init_tags(int);
772extern void blk_free_tags(struct blk_queue_tag *); 789extern void blk_free_tags(struct blk_queue_tag *);
773extern void blk_congestion_end(int rw);
774 790
775static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, 791static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
776 int tag) 792 int tag)
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index a341c8032866..fc35e6bdfb93 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -85,7 +85,6 @@ int wakeup_pdflush(long nr_pages);
85void laptop_io_completion(void); 85void laptop_io_completion(void);
86void laptop_sync_completion(void); 86void laptop_sync_completion(void);
87void throttle_vm_writeout(void); 87void throttle_vm_writeout(void);
88void writeback_congestion_end(void);
89 88
90/* These are exported to sysctl. */ 89/* These are exported to sysctl. */
91extern int dirty_background_ratio; 90extern int dirty_background_ratio;
diff --git a/mm/Makefile b/mm/Makefile
index 12b3a4eee88d..f3c077eb0b8e 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -10,7 +10,8 @@ mmu-$(CONFIG_MMU) := fremap.o highmem.o madvise.o memory.o mincore.o \
10obj-y := bootmem.o filemap.o mempool.o oom_kill.o fadvise.o \ 10obj-y := bootmem.o filemap.o mempool.o oom_kill.o fadvise.o \
11 page_alloc.o page-writeback.o pdflush.o \ 11 page_alloc.o page-writeback.o pdflush.o \
12 readahead.o swap.o truncate.o vmscan.o \ 12 readahead.o swap.o truncate.o vmscan.o \
13 prio_tree.o util.o mmzone.o vmstat.o $(mmu-y) 13 prio_tree.o util.o mmzone.o vmstat.o backing-dev.o \
14 $(mmu-y)
14 15
15ifeq ($(CONFIG_MMU)$(CONFIG_BLOCK),yy) 16ifeq ($(CONFIG_MMU)$(CONFIG_BLOCK),yy)
16obj-y += bounce.o 17obj-y += bounce.o
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
new file mode 100644
index 000000000000..f50a2811f9dc
--- /dev/null
+++ b/mm/backing-dev.c
@@ -0,0 +1,69 @@
1
2#include <linux/wait.h>
3#include <linux/backing-dev.h>
4#include <linux/fs.h>
5#include <linux/sched.h>
6#include <linux/module.h>
7
8static wait_queue_head_t congestion_wqh[2] = {
9 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]),
10 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1])
11 };
12
13
14void clear_bdi_congested(struct backing_dev_info *bdi, int rw)
15{
16 enum bdi_state bit;
17 wait_queue_head_t *wqh = &congestion_wqh[rw];
18
19 bit = (rw == WRITE) ? BDI_write_congested : BDI_read_congested;
20 clear_bit(bit, &bdi->state);
21 smp_mb__after_clear_bit();
22 if (waitqueue_active(wqh))
23 wake_up(wqh);
24}
25EXPORT_SYMBOL(clear_bdi_congested);
26
27void set_bdi_congested(struct backing_dev_info *bdi, int rw)
28{
29 enum bdi_state bit;
30
31 bit = (rw == WRITE) ? BDI_write_congested : BDI_read_congested;
32 set_bit(bit, &bdi->state);
33}
34EXPORT_SYMBOL(set_bdi_congested);
35
36/**
37 * congestion_wait - wait for a backing_dev to become uncongested
38 * @rw: READ or WRITE
39 * @timeout: timeout in jiffies
40 *
41 * Waits for up to @timeout jiffies for a backing_dev (any backing_dev) to exit
42 * write congestion. If no backing_devs are congested then just wait for the
43 * next write to be completed.
44 */
45long congestion_wait(int rw, long timeout)
46{
47 long ret;
48 DEFINE_WAIT(wait);
49 wait_queue_head_t *wqh = &congestion_wqh[rw];
50
51 prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
52 ret = io_schedule_timeout(timeout);
53 finish_wait(wqh, &wait);
54 return ret;
55}
56EXPORT_SYMBOL(congestion_wait);
57
58/**
59 * congestion_end - wake up sleepers on a congested backing_dev_info
60 * @rw: READ or WRITE
61 */
62void congestion_end(int rw)
63{
64 wait_queue_head_t *wqh = &congestion_wqh[rw];
65
66 if (waitqueue_active(wqh))
67 wake_up(wqh);
68}
69EXPORT_SYMBOL(congestion_end);
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index a0f339057449..8d9b19f239c3 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -222,7 +222,7 @@ static void balance_dirty_pages(struct address_space *mapping)
222 if (pages_written >= write_chunk) 222 if (pages_written >= write_chunk)
223 break; /* We've done our duty */ 223 break; /* We've done our duty */
224 } 224 }
225 blk_congestion_wait(WRITE, HZ/10); 225 congestion_wait(WRITE, HZ/10);
226 } 226 }
227 227
228 if (nr_reclaimable + global_page_state(NR_WRITEBACK) 228 if (nr_reclaimable + global_page_state(NR_WRITEBACK)
@@ -314,7 +314,7 @@ void throttle_vm_writeout(void)
314 if (global_page_state(NR_UNSTABLE_NFS) + 314 if (global_page_state(NR_UNSTABLE_NFS) +
315 global_page_state(NR_WRITEBACK) <= dirty_thresh) 315 global_page_state(NR_WRITEBACK) <= dirty_thresh)
316 break; 316 break;
317 blk_congestion_wait(WRITE, HZ/10); 317 congestion_wait(WRITE, HZ/10);
318 } 318 }
319} 319}
320 320
@@ -351,7 +351,7 @@ static void background_writeout(unsigned long _min_pages)
351 min_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write; 351 min_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
352 if (wbc.nr_to_write > 0 || wbc.pages_skipped > 0) { 352 if (wbc.nr_to_write > 0 || wbc.pages_skipped > 0) {
353 /* Wrote less than expected */ 353 /* Wrote less than expected */
354 blk_congestion_wait(WRITE, HZ/10); 354 congestion_wait(WRITE, HZ/10);
355 if (!wbc.encountered_congestion) 355 if (!wbc.encountered_congestion)
356 break; 356 break;
357 } 357 }
@@ -422,7 +422,7 @@ static void wb_kupdate(unsigned long arg)
422 writeback_inodes(&wbc); 422 writeback_inodes(&wbc);
423 if (wbc.nr_to_write > 0) { 423 if (wbc.nr_to_write > 0) {
424 if (wbc.encountered_congestion) 424 if (wbc.encountered_congestion)
425 blk_congestion_wait(WRITE, HZ/10); 425 congestion_wait(WRITE, HZ/10);
426 else 426 else
427 break; /* All the old data is written */ 427 break; /* All the old data is written */
428 } 428 }
@@ -956,15 +956,6 @@ int test_set_page_writeback(struct page *page)
956EXPORT_SYMBOL(test_set_page_writeback); 956EXPORT_SYMBOL(test_set_page_writeback);
957 957
958/* 958/*
959 * Wakes up tasks that are being throttled due to writeback congestion
960 */
961void writeback_congestion_end(void)
962{
963 blk_congestion_end(WRITE);
964}
965EXPORT_SYMBOL(writeback_congestion_end);
966
967/*
968 * Return true if any of the pages in the mapping are marged with the 959 * Return true if any of the pages in the mapping are marged with the
969 * passed tag. 960 * passed tag.
970 */ 961 */
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 40db96a655d0..afee38f04d84 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -39,6 +39,7 @@
39#include <linux/stop_machine.h> 39#include <linux/stop_machine.h>
40#include <linux/sort.h> 40#include <linux/sort.h>
41#include <linux/pfn.h> 41#include <linux/pfn.h>
42#include <linux/backing-dev.h>
42 43
43#include <asm/tlbflush.h> 44#include <asm/tlbflush.h>
44#include <asm/div64.h> 45#include <asm/div64.h>
@@ -1050,7 +1051,7 @@ nofail_alloc:
1050 if (page) 1051 if (page)
1051 goto got_pg; 1052 goto got_pg;
1052 if (gfp_mask & __GFP_NOFAIL) { 1053 if (gfp_mask & __GFP_NOFAIL) {
1053 blk_congestion_wait(WRITE, HZ/50); 1054 congestion_wait(WRITE, HZ/50);
1054 goto nofail_alloc; 1055 goto nofail_alloc;
1055 } 1056 }
1056 } 1057 }
@@ -1113,7 +1114,7 @@ rebalance:
1113 do_retry = 1; 1114 do_retry = 1;
1114 } 1115 }
1115 if (do_retry) { 1116 if (do_retry) {
1116 blk_congestion_wait(WRITE, HZ/50); 1117 congestion_wait(WRITE, HZ/50);
1117 goto rebalance; 1118 goto rebalance;
1118 } 1119 }
1119 1120
diff --git a/mm/shmem.c b/mm/shmem.c
index b378f66cf2f9..4959535fc14c 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -48,6 +48,7 @@
48#include <linux/ctype.h> 48#include <linux/ctype.h>
49#include <linux/migrate.h> 49#include <linux/migrate.h>
50#include <linux/highmem.h> 50#include <linux/highmem.h>
51#include <linux/backing-dev.h>
51 52
52#include <asm/uaccess.h> 53#include <asm/uaccess.h>
53#include <asm/div64.h> 54#include <asm/div64.h>
@@ -1131,7 +1132,7 @@ repeat:
1131 page_cache_release(swappage); 1132 page_cache_release(swappage);
1132 if (error == -ENOMEM) { 1133 if (error == -ENOMEM) {
1133 /* let kswapd refresh zone for GFP_ATOMICs */ 1134 /* let kswapd refresh zone for GFP_ATOMICs */
1134 blk_congestion_wait(WRITE, HZ/50); 1135 congestion_wait(WRITE, HZ/50);
1135 } 1136 }
1136 goto repeat; 1137 goto repeat;
1137 } 1138 }
diff --git a/mm/vmscan.c b/mm/vmscan.c
index af73c14f9d88..f05527bf792b 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1059,7 +1059,7 @@ unsigned long try_to_free_pages(struct zone **zones, gfp_t gfp_mask)
1059 1059
1060 /* Take a nap, wait for some writeback to complete */ 1060 /* Take a nap, wait for some writeback to complete */
1061 if (sc.nr_scanned && priority < DEF_PRIORITY - 2) 1061 if (sc.nr_scanned && priority < DEF_PRIORITY - 2)
1062 blk_congestion_wait(WRITE, HZ/10); 1062 congestion_wait(WRITE, HZ/10);
1063 } 1063 }
1064 /* top priority shrink_caches still had more to do? don't OOM, then */ 1064 /* top priority shrink_caches still had more to do? don't OOM, then */
1065 if (!sc.all_unreclaimable) 1065 if (!sc.all_unreclaimable)
@@ -1214,7 +1214,7 @@ scan:
1214 * another pass across the zones. 1214 * another pass across the zones.
1215 */ 1215 */
1216 if (total_scanned && priority < DEF_PRIORITY - 2) 1216 if (total_scanned && priority < DEF_PRIORITY - 2)
1217 blk_congestion_wait(WRITE, HZ/10); 1217 congestion_wait(WRITE, HZ/10);
1218 1218
1219 /* 1219 /*
1220 * We do this so kswapd doesn't build up large priorities for 1220 * We do this so kswapd doesn't build up large priorities for
@@ -1458,7 +1458,7 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
1458 goto out; 1458 goto out;
1459 1459
1460 if (sc.nr_scanned && prio < DEF_PRIORITY - 2) 1460 if (sc.nr_scanned && prio < DEF_PRIORITY - 2)
1461 blk_congestion_wait(WRITE, HZ / 10); 1461 congestion_wait(WRITE, HZ / 10);
1462 } 1462 }
1463 1463
1464 lru_pages = 0; 1464 lru_pages = 0;