aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-06-29 13:42:52 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-06-29 13:42:52 -0400
commit984bc9601f64fd341b8573021d7c999f1f1499a9 (patch)
tree06c616a6285b00d1e577902face3e8a986370b34
parent57439f878afafefad8836ebf5c49da2a0a746105 (diff)
parent1b99973f1c82707e46e8cb9416865a1e955e8f8c (diff)
Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block
* 'for-linus' of git://git.kernel.dk/linux-2.6-block: block: Don't count_vm_events for discard bio in submit_bio. cfq: fix recursive call in cfq_blkiocg_update_completion_stats() cfq-iosched: Fixed boot warning with BLK_CGROUP=y and CFQ_GROUP_IOSCHED=n cfq: Don't allow queue merges for queues that have no process references block: fix DISCARD_BARRIER requests cciss: set SCSI max cmd len to 16, as default is wrong cpqarray: fix two more wrong section type cpqarray: fix wrong __init type on pci probe function drbd: Fixed a race between disk-attach and unexpected state changes writeback: fix pin_sb_for_writeback writeback: add missing requeue_io in writeback_inodes_wb writeback: simplify and split bdi_start_writeback writeback: simplify wakeup_flusher_threads writeback: fix writeback_inodes_wb from writeback_inodes_sb writeback: enforce s_umount locking in writeback_inodes_sb writeback: queue work on stack in writeback_inodes_sb writeback: fix writeback completion notifications
-rw-r--r--block/blk-core.c9
-rw-r--r--block/cfq-iosched.c67
-rw-r--r--block/cfq.h115
-rw-r--r--drivers/block/cciss_scsi.c1
-rw-r--r--drivers/block/cpqarray.c6
-rw-r--r--drivers/block/drbd/drbd_main.c2
-rw-r--r--drivers/block/drbd/drbd_nl.c6
-rw-r--r--fs/fs-writeback.c254
-rw-r--r--fs/ubifs/budget.c2
-rw-r--r--include/linux/backing-dev.h4
-rw-r--r--include/linux/drbd.h2
-rw-r--r--mm/page-writeback.c5
12 files changed, 280 insertions, 193 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index f84cce42fc58..f0640d7f800f 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1149,13 +1149,10 @@ void init_request_from_bio(struct request *req, struct bio *bio)
1149 else 1149 else
1150 req->cmd_flags |= bio->bi_rw & REQ_FAILFAST_MASK; 1150 req->cmd_flags |= bio->bi_rw & REQ_FAILFAST_MASK;
1151 1151
1152 if (unlikely(bio_rw_flagged(bio, BIO_RW_DISCARD))) { 1152 if (bio_rw_flagged(bio, BIO_RW_DISCARD))
1153 req->cmd_flags |= REQ_DISCARD; 1153 req->cmd_flags |= REQ_DISCARD;
1154 if (bio_rw_flagged(bio, BIO_RW_BARRIER)) 1154 if (bio_rw_flagged(bio, BIO_RW_BARRIER))
1155 req->cmd_flags |= REQ_SOFTBARRIER;
1156 } else if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER)))
1157 req->cmd_flags |= REQ_HARDBARRIER; 1155 req->cmd_flags |= REQ_HARDBARRIER;
1158
1159 if (bio_rw_flagged(bio, BIO_RW_SYNCIO)) 1156 if (bio_rw_flagged(bio, BIO_RW_SYNCIO))
1160 req->cmd_flags |= REQ_RW_SYNC; 1157 req->cmd_flags |= REQ_RW_SYNC;
1161 if (bio_rw_flagged(bio, BIO_RW_META)) 1158 if (bio_rw_flagged(bio, BIO_RW_META))
@@ -1586,7 +1583,7 @@ void submit_bio(int rw, struct bio *bio)
1586 * If it's a regular read/write or a barrier with data attached, 1583 * If it's a regular read/write or a barrier with data attached,
1587 * go through the normal accounting stuff before submission. 1584 * go through the normal accounting stuff before submission.
1588 */ 1585 */
1589 if (bio_has_data(bio)) { 1586 if (bio_has_data(bio) && !(rw & (1 << BIO_RW_DISCARD))) {
1590 if (rw & WRITE) { 1587 if (rw & WRITE) {
1591 count_vm_events(PGPGOUT, count); 1588 count_vm_events(PGPGOUT, count);
1592 } else { 1589 } else {
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 5ff4f4850e71..7982b830db58 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -14,7 +14,7 @@
14#include <linux/rbtree.h> 14#include <linux/rbtree.h>
15#include <linux/ioprio.h> 15#include <linux/ioprio.h>
16#include <linux/blktrace_api.h> 16#include <linux/blktrace_api.h>
17#include "blk-cgroup.h" 17#include "cfq.h"
18 18
19/* 19/*
20 * tunables 20 * tunables
@@ -879,7 +879,7 @@ cfq_group_service_tree_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
879 if (!RB_EMPTY_NODE(&cfqg->rb_node)) 879 if (!RB_EMPTY_NODE(&cfqg->rb_node))
880 cfq_rb_erase(&cfqg->rb_node, st); 880 cfq_rb_erase(&cfqg->rb_node, st);
881 cfqg->saved_workload_slice = 0; 881 cfqg->saved_workload_slice = 0;
882 blkiocg_update_dequeue_stats(&cfqg->blkg, 1); 882 cfq_blkiocg_update_dequeue_stats(&cfqg->blkg, 1);
883} 883}
884 884
885static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq) 885static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq)
@@ -939,8 +939,8 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
939 939
940 cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime, 940 cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
941 st->min_vdisktime); 941 st->min_vdisktime);
942 blkiocg_update_timeslice_used(&cfqg->blkg, used_sl); 942 cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl);
943 blkiocg_set_start_empty_time(&cfqg->blkg); 943 cfq_blkiocg_set_start_empty_time(&cfqg->blkg);
944} 944}
945 945
946#ifdef CONFIG_CFQ_GROUP_IOSCHED 946#ifdef CONFIG_CFQ_GROUP_IOSCHED
@@ -995,7 +995,7 @@ cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create)
995 995
996 /* Add group onto cgroup list */ 996 /* Add group onto cgroup list */
997 sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor); 997 sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
998 blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd, 998 cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd,
999 MKDEV(major, minor)); 999 MKDEV(major, minor));
1000 cfqg->weight = blkcg_get_weight(blkcg, cfqg->blkg.dev); 1000 cfqg->weight = blkcg_get_weight(blkcg, cfqg->blkg.dev);
1001 1001
@@ -1079,7 +1079,7 @@ static void cfq_release_cfq_groups(struct cfq_data *cfqd)
1079 * it from cgroup list, then it will take care of destroying 1079 * it from cgroup list, then it will take care of destroying
1080 * cfqg also. 1080 * cfqg also.
1081 */ 1081 */
1082 if (!blkiocg_del_blkio_group(&cfqg->blkg)) 1082 if (!cfq_blkiocg_del_blkio_group(&cfqg->blkg))
1083 cfq_destroy_cfqg(cfqd, cfqg); 1083 cfq_destroy_cfqg(cfqd, cfqg);
1084 } 1084 }
1085} 1085}
@@ -1421,10 +1421,10 @@ static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
1421{ 1421{
1422 elv_rb_del(&cfqq->sort_list, rq); 1422 elv_rb_del(&cfqq->sort_list, rq);
1423 cfqq->queued[rq_is_sync(rq)]--; 1423 cfqq->queued[rq_is_sync(rq)]--;
1424 blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg, rq_data_dir(rq), 1424 cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
1425 rq_is_sync(rq)); 1425 rq_data_dir(rq), rq_is_sync(rq));
1426 cfq_add_rq_rb(rq); 1426 cfq_add_rq_rb(rq);
1427 blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg, 1427 cfq_blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
1428 &cfqq->cfqd->serving_group->blkg, rq_data_dir(rq), 1428 &cfqq->cfqd->serving_group->blkg, rq_data_dir(rq),
1429 rq_is_sync(rq)); 1429 rq_is_sync(rq));
1430} 1430}
@@ -1482,8 +1482,8 @@ static void cfq_remove_request(struct request *rq)
1482 cfq_del_rq_rb(rq); 1482 cfq_del_rq_rb(rq);
1483 1483
1484 cfqq->cfqd->rq_queued--; 1484 cfqq->cfqd->rq_queued--;
1485 blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg, rq_data_dir(rq), 1485 cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
1486 rq_is_sync(rq)); 1486 rq_data_dir(rq), rq_is_sync(rq));
1487 if (rq_is_meta(rq)) { 1487 if (rq_is_meta(rq)) {
1488 WARN_ON(!cfqq->meta_pending); 1488 WARN_ON(!cfqq->meta_pending);
1489 cfqq->meta_pending--; 1489 cfqq->meta_pending--;
@@ -1518,8 +1518,8 @@ static void cfq_merged_request(struct request_queue *q, struct request *req,
1518static void cfq_bio_merged(struct request_queue *q, struct request *req, 1518static void cfq_bio_merged(struct request_queue *q, struct request *req,
1519 struct bio *bio) 1519 struct bio *bio)
1520{ 1520{
1521 blkiocg_update_io_merged_stats(&(RQ_CFQG(req))->blkg, bio_data_dir(bio), 1521 cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(req))->blkg,
1522 cfq_bio_sync(bio)); 1522 bio_data_dir(bio), cfq_bio_sync(bio));
1523} 1523}
1524 1524
1525static void 1525static void
@@ -1539,8 +1539,8 @@ cfq_merged_requests(struct request_queue *q, struct request *rq,
1539 if (cfqq->next_rq == next) 1539 if (cfqq->next_rq == next)
1540 cfqq->next_rq = rq; 1540 cfqq->next_rq = rq;
1541 cfq_remove_request(next); 1541 cfq_remove_request(next);
1542 blkiocg_update_io_merged_stats(&(RQ_CFQG(rq))->blkg, rq_data_dir(next), 1542 cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(rq))->blkg,
1543 rq_is_sync(next)); 1543 rq_data_dir(next), rq_is_sync(next));
1544} 1544}
1545 1545
1546static int cfq_allow_merge(struct request_queue *q, struct request *rq, 1546static int cfq_allow_merge(struct request_queue *q, struct request *rq,
@@ -1571,7 +1571,7 @@ static int cfq_allow_merge(struct request_queue *q, struct request *rq,
1571static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq) 1571static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1572{ 1572{
1573 del_timer(&cfqd->idle_slice_timer); 1573 del_timer(&cfqd->idle_slice_timer);
1574 blkiocg_update_idle_time_stats(&cfqq->cfqg->blkg); 1574 cfq_blkiocg_update_idle_time_stats(&cfqq->cfqg->blkg);
1575} 1575}
1576 1576
1577static void __cfq_set_active_queue(struct cfq_data *cfqd, 1577static void __cfq_set_active_queue(struct cfq_data *cfqd,
@@ -1580,7 +1580,7 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd,
1580 if (cfqq) { 1580 if (cfqq) {
1581 cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d", 1581 cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d",
1582 cfqd->serving_prio, cfqd->serving_type); 1582 cfqd->serving_prio, cfqd->serving_type);
1583 blkiocg_update_avg_queue_size_stats(&cfqq->cfqg->blkg); 1583 cfq_blkiocg_update_avg_queue_size_stats(&cfqq->cfqg->blkg);
1584 cfqq->slice_start = 0; 1584 cfqq->slice_start = 0;
1585 cfqq->dispatch_start = jiffies; 1585 cfqq->dispatch_start = jiffies;
1586 cfqq->allocated_slice = 0; 1586 cfqq->allocated_slice = 0;
@@ -1911,7 +1911,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
1911 sl = cfqd->cfq_slice_idle; 1911 sl = cfqd->cfq_slice_idle;
1912 1912
1913 mod_timer(&cfqd->idle_slice_timer, jiffies + sl); 1913 mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
1914 blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg); 1914 cfq_blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg);
1915 cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu", sl); 1915 cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu", sl);
1916} 1916}
1917 1917
@@ -1931,7 +1931,7 @@ static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
1931 elv_dispatch_sort(q, rq); 1931 elv_dispatch_sort(q, rq);
1932 1932
1933 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++; 1933 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
1934 blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq), 1934 cfq_blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq),
1935 rq_data_dir(rq), rq_is_sync(rq)); 1935 rq_data_dir(rq), rq_is_sync(rq));
1936} 1936}
1937 1937
@@ -1986,6 +1986,15 @@ static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
1986 int process_refs, new_process_refs; 1986 int process_refs, new_process_refs;
1987 struct cfq_queue *__cfqq; 1987 struct cfq_queue *__cfqq;
1988 1988
1989 /*
1990 * If there are no process references on the new_cfqq, then it is
1991 * unsafe to follow the ->new_cfqq chain as other cfqq's in the
1992 * chain may have dropped their last reference (not just their
1993 * last process reference).
1994 */
1995 if (!cfqq_process_refs(new_cfqq))
1996 return;
1997
1989 /* Avoid a circular list and skip interim queue merges */ 1998 /* Avoid a circular list and skip interim queue merges */
1990 while ((__cfqq = new_cfqq->new_cfqq)) { 1999 while ((__cfqq = new_cfqq->new_cfqq)) {
1991 if (__cfqq == cfqq) 2000 if (__cfqq == cfqq)
@@ -1994,17 +2003,17 @@ static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
1994 } 2003 }
1995 2004
1996 process_refs = cfqq_process_refs(cfqq); 2005 process_refs = cfqq_process_refs(cfqq);
2006 new_process_refs = cfqq_process_refs(new_cfqq);
1997 /* 2007 /*
1998 * If the process for the cfqq has gone away, there is no 2008 * If the process for the cfqq has gone away, there is no
1999 * sense in merging the queues. 2009 * sense in merging the queues.
2000 */ 2010 */
2001 if (process_refs == 0) 2011 if (process_refs == 0 || new_process_refs == 0)
2002 return; 2012 return;
2003 2013
2004 /* 2014 /*
2005 * Merge in the direction of the lesser amount of work. 2015 * Merge in the direction of the lesser amount of work.
2006 */ 2016 */
2007 new_process_refs = cfqq_process_refs(new_cfqq);
2008 if (new_process_refs >= process_refs) { 2017 if (new_process_refs >= process_refs) {
2009 cfqq->new_cfqq = new_cfqq; 2018 cfqq->new_cfqq = new_cfqq;
2010 atomic_add(process_refs, &new_cfqq->ref); 2019 atomic_add(process_refs, &new_cfqq->ref);
@@ -3248,7 +3257,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3248 cfq_clear_cfqq_wait_request(cfqq); 3257 cfq_clear_cfqq_wait_request(cfqq);
3249 __blk_run_queue(cfqd->queue); 3258 __blk_run_queue(cfqd->queue);
3250 } else { 3259 } else {
3251 blkiocg_update_idle_time_stats( 3260 cfq_blkiocg_update_idle_time_stats(
3252 &cfqq->cfqg->blkg); 3261 &cfqq->cfqg->blkg);
3253 cfq_mark_cfqq_must_dispatch(cfqq); 3262 cfq_mark_cfqq_must_dispatch(cfqq);
3254 } 3263 }
@@ -3276,7 +3285,7 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq)
3276 rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]); 3285 rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
3277 list_add_tail(&rq->queuelist, &cfqq->fifo); 3286 list_add_tail(&rq->queuelist, &cfqq->fifo);
3278 cfq_add_rq_rb(rq); 3287 cfq_add_rq_rb(rq);
3279 blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg, 3288 cfq_blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
3280 &cfqd->serving_group->blkg, rq_data_dir(rq), 3289 &cfqd->serving_group->blkg, rq_data_dir(rq),
3281 rq_is_sync(rq)); 3290 rq_is_sync(rq));
3282 cfq_rq_enqueued(cfqd, cfqq, rq); 3291 cfq_rq_enqueued(cfqd, cfqq, rq);
@@ -3364,9 +3373,9 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
3364 WARN_ON(!cfqq->dispatched); 3373 WARN_ON(!cfqq->dispatched);
3365 cfqd->rq_in_driver--; 3374 cfqd->rq_in_driver--;
3366 cfqq->dispatched--; 3375 cfqq->dispatched--;
3367 blkiocg_update_completion_stats(&cfqq->cfqg->blkg, rq_start_time_ns(rq), 3376 cfq_blkiocg_update_completion_stats(&cfqq->cfqg->blkg,
3368 rq_io_start_time_ns(rq), rq_data_dir(rq), 3377 rq_start_time_ns(rq), rq_io_start_time_ns(rq),
3369 rq_is_sync(rq)); 3378 rq_data_dir(rq), rq_is_sync(rq));
3370 3379
3371 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--; 3380 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
3372 3381
@@ -3730,7 +3739,7 @@ static void cfq_exit_queue(struct elevator_queue *e)
3730 3739
3731 cfq_put_async_queues(cfqd); 3740 cfq_put_async_queues(cfqd);
3732 cfq_release_cfq_groups(cfqd); 3741 cfq_release_cfq_groups(cfqd);
3733 blkiocg_del_blkio_group(&cfqd->root_group.blkg); 3742 cfq_blkiocg_del_blkio_group(&cfqd->root_group.blkg);
3734 3743
3735 spin_unlock_irq(q->queue_lock); 3744 spin_unlock_irq(q->queue_lock);
3736 3745
@@ -3798,8 +3807,8 @@ static void *cfq_init_queue(struct request_queue *q)
3798 */ 3807 */
3799 atomic_set(&cfqg->ref, 1); 3808 atomic_set(&cfqg->ref, 1);
3800 rcu_read_lock(); 3809 rcu_read_lock();
3801 blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg, (void *)cfqd, 3810 cfq_blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg,
3802 0); 3811 (void *)cfqd, 0);
3803 rcu_read_unlock(); 3812 rcu_read_unlock();
3804#endif 3813#endif
3805 /* 3814 /*
diff --git a/block/cfq.h b/block/cfq.h
new file mode 100644
index 000000000000..93448e5a2e41
--- /dev/null
+++ b/block/cfq.h
@@ -0,0 +1,115 @@
1#ifndef _CFQ_H
2#define _CFQ_H
3#include "blk-cgroup.h"
4
5#ifdef CONFIG_CFQ_GROUP_IOSCHED
6static inline void cfq_blkiocg_update_io_add_stats(struct blkio_group *blkg,
7 struct blkio_group *curr_blkg, bool direction, bool sync)
8{
9 blkiocg_update_io_add_stats(blkg, curr_blkg, direction, sync);
10}
11
12static inline void cfq_blkiocg_update_dequeue_stats(struct blkio_group *blkg,
13 unsigned long dequeue)
14{
15 blkiocg_update_dequeue_stats(blkg, dequeue);
16}
17
18static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg,
19 unsigned long time)
20{
21 blkiocg_update_timeslice_used(blkg, time);
22}
23
24static inline void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg)
25{
26 blkiocg_set_start_empty_time(blkg);
27}
28
29static inline void cfq_blkiocg_update_io_remove_stats(struct blkio_group *blkg,
30 bool direction, bool sync)
31{
32 blkiocg_update_io_remove_stats(blkg, direction, sync);
33}
34
35static inline void cfq_blkiocg_update_io_merged_stats(struct blkio_group *blkg,
36 bool direction, bool sync)
37{
38 blkiocg_update_io_merged_stats(blkg, direction, sync);
39}
40
41static inline void cfq_blkiocg_update_idle_time_stats(struct blkio_group *blkg)
42{
43 blkiocg_update_idle_time_stats(blkg);
44}
45
46static inline void
47cfq_blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg)
48{
49 blkiocg_update_avg_queue_size_stats(blkg);
50}
51
52static inline void
53cfq_blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
54{
55 blkiocg_update_set_idle_time_stats(blkg);
56}
57
58static inline void cfq_blkiocg_update_dispatch_stats(struct blkio_group *blkg,
59 uint64_t bytes, bool direction, bool sync)
60{
61 blkiocg_update_dispatch_stats(blkg, bytes, direction, sync);
62}
63
64static inline void cfq_blkiocg_update_completion_stats(struct blkio_group *blkg, uint64_t start_time, uint64_t io_start_time, bool direction, bool sync)
65{
66 blkiocg_update_completion_stats(blkg, start_time, io_start_time,
67 direction, sync);
68}
69
70static inline void cfq_blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
71 struct blkio_group *blkg, void *key, dev_t dev) {
72 blkiocg_add_blkio_group(blkcg, blkg, key, dev);
73}
74
75static inline int cfq_blkiocg_del_blkio_group(struct blkio_group *blkg)
76{
77 return blkiocg_del_blkio_group(blkg);
78}
79
80#else /* CFQ_GROUP_IOSCHED */
81static inline void cfq_blkiocg_update_io_add_stats(struct blkio_group *blkg,
82 struct blkio_group *curr_blkg, bool direction, bool sync) {}
83
84static inline void cfq_blkiocg_update_dequeue_stats(struct blkio_group *blkg,
85 unsigned long dequeue) {}
86
87static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg,
88 unsigned long time) {}
89static inline void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg) {}
90static inline void cfq_blkiocg_update_io_remove_stats(struct blkio_group *blkg,
91 bool direction, bool sync) {}
92static inline void cfq_blkiocg_update_io_merged_stats(struct blkio_group *blkg,
93 bool direction, bool sync) {}
94static inline void cfq_blkiocg_update_idle_time_stats(struct blkio_group *blkg)
95{
96}
97static inline void
98cfq_blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg) {}
99
100static inline void
101cfq_blkiocg_update_set_idle_time_stats(struct blkio_group *blkg) {}
102
103static inline void cfq_blkiocg_update_dispatch_stats(struct blkio_group *blkg,
104 uint64_t bytes, bool direction, bool sync) {}
105static inline void cfq_blkiocg_update_completion_stats(struct blkio_group *blkg, uint64_t start_time, uint64_t io_start_time, bool direction, bool sync) {}
106
107static inline void cfq_blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
108 struct blkio_group *blkg, void *key, dev_t dev) {}
109static inline int cfq_blkiocg_del_blkio_group(struct blkio_group *blkg)
110{
111 return 0;
112}
113
114#endif /* CFQ_GROUP_IOSCHED */
115#endif
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c
index 3381505c8a6c..72dae92f3cab 100644
--- a/drivers/block/cciss_scsi.c
+++ b/drivers/block/cciss_scsi.c
@@ -861,6 +861,7 @@ cciss_scsi_detect(int ctlr)
861 sh->n_io_port = 0; // I don't think we use these two... 861 sh->n_io_port = 0; // I don't think we use these two...
862 sh->this_id = SELF_SCSI_ID; 862 sh->this_id = SELF_SCSI_ID;
863 sh->sg_tablesize = hba[ctlr]->maxsgentries; 863 sh->sg_tablesize = hba[ctlr]->maxsgentries;
864 sh->max_cmd_len = MAX_COMMAND_SIZE;
864 865
865 ((struct cciss_scsi_adapter_data_t *) 866 ((struct cciss_scsi_adapter_data_t *)
866 hba[ctlr]->scsi_ctlr)->scsi_host = sh; 867 hba[ctlr]->scsi_ctlr)->scsi_host = sh;
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
index 91d11631cec9..abb4ec6690fc 100644
--- a/drivers/block/cpqarray.c
+++ b/drivers/block/cpqarray.c
@@ -386,7 +386,7 @@ static void __devexit cpqarray_remove_one_eisa (int i)
386} 386}
387 387
388/* pdev is NULL for eisa */ 388/* pdev is NULL for eisa */
389static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev) 389static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
390{ 390{
391 struct request_queue *q; 391 struct request_queue *q;
392 int j; 392 int j;
@@ -503,7 +503,7 @@ Enomem4:
503 return -1; 503 return -1;
504} 504}
505 505
506static int __init cpqarray_init_one( struct pci_dev *pdev, 506static int __devinit cpqarray_init_one( struct pci_dev *pdev,
507 const struct pci_device_id *ent) 507 const struct pci_device_id *ent)
508{ 508{
509 int i; 509 int i;
@@ -740,7 +740,7 @@ __setup("smart2=", cpqarray_setup);
740/* 740/*
741 * Find an EISA controller's signature. Set up an hba if we find it. 741 * Find an EISA controller's signature. Set up an hba if we find it.
742 */ 742 */
743static int __init cpqarray_eisa_detect(void) 743static int __devinit cpqarray_eisa_detect(void)
744{ 744{
745 int i=0, j; 745 int i=0, j;
746 __u32 board_id; 746 __u32 board_id;
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 6b077f93acc6..7258c95e895e 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -1236,8 +1236,6 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
1236 /* Last part of the attaching process ... */ 1236 /* Last part of the attaching process ... */
1237 if (ns.conn >= C_CONNECTED && 1237 if (ns.conn >= C_CONNECTED &&
1238 os.disk == D_ATTACHING && ns.disk == D_NEGOTIATING) { 1238 os.disk == D_ATTACHING && ns.disk == D_NEGOTIATING) {
1239 kfree(mdev->p_uuid); /* We expect to receive up-to-date UUIDs soon. */
1240 mdev->p_uuid = NULL; /* ...to not use the old ones in the mean time */
1241 drbd_send_sizes(mdev, 0, 0); /* to start sync... */ 1239 drbd_send_sizes(mdev, 0, 0); /* to start sync... */
1242 drbd_send_uuids(mdev); 1240 drbd_send_uuids(mdev);
1243 drbd_send_state(mdev); 1241 drbd_send_state(mdev);
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 632e3245d1bb..2151f18b21de 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -1114,6 +1114,12 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
1114 mdev->new_state_tmp.i = ns.i; 1114 mdev->new_state_tmp.i = ns.i;
1115 ns.i = os.i; 1115 ns.i = os.i;
1116 ns.disk = D_NEGOTIATING; 1116 ns.disk = D_NEGOTIATING;
1117
1118 /* We expect to receive up-to-date UUIDs soon.
1119 To avoid a race in receive_state, free p_uuid while
1120 holding req_lock. I.e. atomic with the state change */
1121 kfree(mdev->p_uuid);
1122 mdev->p_uuid = NULL;
1117 } 1123 }
1118 1124
1119 rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL); 1125 rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 1d1088f48bc2..0609607d3955 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -63,24 +63,16 @@ struct bdi_work {
63}; 63};
64 64
65enum { 65enum {
66 WS_USED_B = 0, 66 WS_INPROGRESS = 0,
67 WS_ONSTACK_B, 67 WS_ONSTACK,
68}; 68};
69 69
70#define WS_USED (1 << WS_USED_B)
71#define WS_ONSTACK (1 << WS_ONSTACK_B)
72
73static inline bool bdi_work_on_stack(struct bdi_work *work)
74{
75 return test_bit(WS_ONSTACK_B, &work->state);
76}
77
78static inline void bdi_work_init(struct bdi_work *work, 70static inline void bdi_work_init(struct bdi_work *work,
79 struct wb_writeback_args *args) 71 struct wb_writeback_args *args)
80{ 72{
81 INIT_RCU_HEAD(&work->rcu_head); 73 INIT_RCU_HEAD(&work->rcu_head);
82 work->args = *args; 74 work->args = *args;
83 work->state = WS_USED; 75 __set_bit(WS_INPROGRESS, &work->state);
84} 76}
85 77
86/** 78/**
@@ -95,43 +87,16 @@ int writeback_in_progress(struct backing_dev_info *bdi)
95 return !list_empty(&bdi->work_list); 87 return !list_empty(&bdi->work_list);
96} 88}
97 89
98static void bdi_work_clear(struct bdi_work *work)
99{
100 clear_bit(WS_USED_B, &work->state);
101 smp_mb__after_clear_bit();
102 /*
103 * work can have disappeared at this point. bit waitq functions
104 * should be able to tolerate this, provided bdi_sched_wait does
105 * not dereference it's pointer argument.
106 */
107 wake_up_bit(&work->state, WS_USED_B);
108}
109
110static void bdi_work_free(struct rcu_head *head) 90static void bdi_work_free(struct rcu_head *head)
111{ 91{
112 struct bdi_work *work = container_of(head, struct bdi_work, rcu_head); 92 struct bdi_work *work = container_of(head, struct bdi_work, rcu_head);
113 93
114 if (!bdi_work_on_stack(work)) 94 clear_bit(WS_INPROGRESS, &work->state);
115 kfree(work); 95 smp_mb__after_clear_bit();
116 else 96 wake_up_bit(&work->state, WS_INPROGRESS);
117 bdi_work_clear(work);
118}
119
120static void wb_work_complete(struct bdi_work *work)
121{
122 const enum writeback_sync_modes sync_mode = work->args.sync_mode;
123 int onstack = bdi_work_on_stack(work);
124 97
125 /* 98 if (!test_bit(WS_ONSTACK, &work->state))
126 * For allocated work, we can clear the done/seen bit right here. 99 kfree(work);
127 * For on-stack work, we need to postpone both the clear and free
128 * to after the RCU grace period, since the stack could be invalidated
129 * as soon as bdi_work_clear() has done the wakeup.
130 */
131 if (!onstack)
132 bdi_work_clear(work);
133 if (sync_mode == WB_SYNC_NONE || onstack)
134 call_rcu(&work->rcu_head, bdi_work_free);
135} 100}
136 101
137static void wb_clear_pending(struct bdi_writeback *wb, struct bdi_work *work) 102static void wb_clear_pending(struct bdi_writeback *wb, struct bdi_work *work)
@@ -147,7 +112,7 @@ static void wb_clear_pending(struct bdi_writeback *wb, struct bdi_work *work)
147 list_del_rcu(&work->list); 112 list_del_rcu(&work->list);
148 spin_unlock(&bdi->wb_lock); 113 spin_unlock(&bdi->wb_lock);
149 114
150 wb_work_complete(work); 115 call_rcu(&work->rcu_head, bdi_work_free);
151 } 116 }
152} 117}
153 118
@@ -185,9 +150,9 @@ static void bdi_queue_work(struct backing_dev_info *bdi, struct bdi_work *work)
185 * Used for on-stack allocated work items. The caller needs to wait until 150 * Used for on-stack allocated work items. The caller needs to wait until
186 * the wb threads have acked the work before it's safe to continue. 151 * the wb threads have acked the work before it's safe to continue.
187 */ 152 */
188static void bdi_wait_on_work_clear(struct bdi_work *work) 153static void bdi_wait_on_work_done(struct bdi_work *work)
189{ 154{
190 wait_on_bit(&work->state, WS_USED_B, bdi_sched_wait, 155 wait_on_bit(&work->state, WS_INPROGRESS, bdi_sched_wait,
191 TASK_UNINTERRUPTIBLE); 156 TASK_UNINTERRUPTIBLE);
192} 157}
193 158
@@ -213,37 +178,28 @@ static void bdi_alloc_queue_work(struct backing_dev_info *bdi,
213} 178}
214 179
215/** 180/**
216 * bdi_sync_writeback - start and wait for writeback 181 * bdi_queue_work_onstack - start and wait for writeback
217 * @bdi: the backing device to write from
218 * @sb: write inodes from this super_block 182 * @sb: write inodes from this super_block
219 * 183 *
220 * Description: 184 * Description:
221 * This does WB_SYNC_ALL data integrity writeback and waits for the 185 * This function initiates writeback and waits for the operation to
222 * IO to complete. Callers must hold the sb s_umount semaphore for 186 * complete. Callers must hold the sb s_umount semaphore for
223 * reading, to avoid having the super disappear before we are done. 187 * reading, to avoid having the super disappear before we are done.
224 */ 188 */
225static void bdi_sync_writeback(struct backing_dev_info *bdi, 189static void bdi_queue_work_onstack(struct wb_writeback_args *args)
226 struct super_block *sb)
227{ 190{
228 struct wb_writeback_args args = {
229 .sb = sb,
230 .sync_mode = WB_SYNC_ALL,
231 .nr_pages = LONG_MAX,
232 .range_cyclic = 0,
233 };
234 struct bdi_work work; 191 struct bdi_work work;
235 192
236 bdi_work_init(&work, &args); 193 bdi_work_init(&work, args);
237 work.state |= WS_ONSTACK; 194 __set_bit(WS_ONSTACK, &work.state);
238 195
239 bdi_queue_work(bdi, &work); 196 bdi_queue_work(args->sb->s_bdi, &work);
240 bdi_wait_on_work_clear(&work); 197 bdi_wait_on_work_done(&work);
241} 198}
242 199
243/** 200/**
244 * bdi_start_writeback - start writeback 201 * bdi_start_writeback - start writeback
245 * @bdi: the backing device to write from 202 * @bdi: the backing device to write from
246 * @sb: write inodes from this super_block
247 * @nr_pages: the number of pages to write 203 * @nr_pages: the number of pages to write
248 * 204 *
249 * Description: 205 * Description:
@@ -252,25 +208,34 @@ static void bdi_sync_writeback(struct backing_dev_info *bdi,
252 * completion. Caller need not hold sb s_umount semaphore. 208 * completion. Caller need not hold sb s_umount semaphore.
253 * 209 *
254 */ 210 */
255void bdi_start_writeback(struct backing_dev_info *bdi, struct super_block *sb, 211void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages)
256 long nr_pages)
257{ 212{
258 struct wb_writeback_args args = { 213 struct wb_writeback_args args = {
259 .sb = sb,
260 .sync_mode = WB_SYNC_NONE, 214 .sync_mode = WB_SYNC_NONE,
261 .nr_pages = nr_pages, 215 .nr_pages = nr_pages,
262 .range_cyclic = 1, 216 .range_cyclic = 1,
263 }; 217 };
264 218
265 /* 219 bdi_alloc_queue_work(bdi, &args);
266 * We treat @nr_pages=0 as the special case to do background writeback, 220}
267 * ie. to sync pages until the background dirty threshold is reached.
268 */
269 if (!nr_pages) {
270 args.nr_pages = LONG_MAX;
271 args.for_background = 1;
272 }
273 221
222/**
223 * bdi_start_background_writeback - start background writeback
224 * @bdi: the backing device to write from
225 *
226 * Description:
227 * This does WB_SYNC_NONE background writeback. The IO is only
228 * started when this function returns, we make no guarentees on
229 * completion. Caller need not hold sb s_umount semaphore.
230 */
231void bdi_start_background_writeback(struct backing_dev_info *bdi)
232{
233 struct wb_writeback_args args = {
234 .sync_mode = WB_SYNC_NONE,
235 .nr_pages = LONG_MAX,
236 .for_background = 1,
237 .range_cyclic = 1,
238 };
274 bdi_alloc_queue_work(bdi, &args); 239 bdi_alloc_queue_work(bdi, &args);
275} 240}
276 241
@@ -561,48 +526,30 @@ select_queue:
561 return ret; 526 return ret;
562} 527}
563 528
564static void unpin_sb_for_writeback(struct super_block *sb)
565{
566 up_read(&sb->s_umount);
567 put_super(sb);
568}
569
570enum sb_pin_state {
571 SB_PINNED,
572 SB_NOT_PINNED,
573 SB_PIN_FAILED
574};
575
576/* 529/*
577 * For WB_SYNC_NONE writeback, the caller does not have the sb pinned 530 * For background writeback the caller does not have the sb pinned
578 * before calling writeback. So make sure that we do pin it, so it doesn't 531 * before calling writeback. So make sure that we do pin it, so it doesn't
579 * go away while we are writing inodes from it. 532 * go away while we are writing inodes from it.
580 */ 533 */
581static enum sb_pin_state pin_sb_for_writeback(struct writeback_control *wbc, 534static bool pin_sb_for_writeback(struct super_block *sb)
582 struct super_block *sb)
583{ 535{
584 /*
585 * Caller must already hold the ref for this
586 */
587 if (wbc->sync_mode == WB_SYNC_ALL) {
588 WARN_ON(!rwsem_is_locked(&sb->s_umount));
589 return SB_NOT_PINNED;
590 }
591 spin_lock(&sb_lock); 536 spin_lock(&sb_lock);
537 if (list_empty(&sb->s_instances)) {
538 spin_unlock(&sb_lock);
539 return false;
540 }
541
592 sb->s_count++; 542 sb->s_count++;
543 spin_unlock(&sb_lock);
544
593 if (down_read_trylock(&sb->s_umount)) { 545 if (down_read_trylock(&sb->s_umount)) {
594 if (sb->s_root) { 546 if (sb->s_root)
595 spin_unlock(&sb_lock); 547 return true;
596 return SB_PINNED;
597 }
598 /*
599 * umounted, drop rwsem again and fall through to failure
600 */
601 up_read(&sb->s_umount); 548 up_read(&sb->s_umount);
602 } 549 }
603 sb->s_count--; 550
604 spin_unlock(&sb_lock); 551 put_super(sb);
605 return SB_PIN_FAILED; 552 return false;
606} 553}
607 554
608/* 555/*
@@ -681,24 +628,31 @@ static void writeback_inodes_wb(struct bdi_writeback *wb,
681 struct inode *inode = list_entry(wb->b_io.prev, 628 struct inode *inode = list_entry(wb->b_io.prev,
682 struct inode, i_list); 629 struct inode, i_list);
683 struct super_block *sb = inode->i_sb; 630 struct super_block *sb = inode->i_sb;
684 enum sb_pin_state state;
685 631
686 if (wbc->sb && sb != wbc->sb) { 632 if (wbc->sb) {
687 /* super block given and doesn't 633 /*
688 match, skip this inode */ 634 * We are requested to write out inodes for a specific
689 redirty_tail(inode); 635 * superblock. This means we already have s_umount
690 continue; 636 * taken by the caller which also waits for us to
691 } 637 * complete the writeout.
692 state = pin_sb_for_writeback(wbc, sb); 638 */
639 if (sb != wbc->sb) {
640 redirty_tail(inode);
641 continue;
642 }
693 643
694 if (state == SB_PIN_FAILED) { 644 WARN_ON(!rwsem_is_locked(&sb->s_umount));
695 requeue_io(inode); 645
696 continue; 646 ret = writeback_sb_inodes(sb, wb, wbc);
647 } else {
648 if (!pin_sb_for_writeback(sb)) {
649 requeue_io(inode);
650 continue;
651 }
652 ret = writeback_sb_inodes(sb, wb, wbc);
653 drop_super(sb);
697 } 654 }
698 ret = writeback_sb_inodes(sb, wb, wbc);
699 655
700 if (state == SB_PINNED)
701 unpin_sb_for_writeback(sb);
702 if (ret) 656 if (ret)
703 break; 657 break;
704 } 658 }
@@ -911,7 +865,7 @@ long wb_do_writeback(struct bdi_writeback *wb, int force_wait)
911 * If this isn't a data integrity operation, just notify 865 * If this isn't a data integrity operation, just notify
912 * that we have seen this work and we are now starting it. 866 * that we have seen this work and we are now starting it.
913 */ 867 */
914 if (args.sync_mode == WB_SYNC_NONE) 868 if (!test_bit(WS_ONSTACK, &work->state))
915 wb_clear_pending(wb, work); 869 wb_clear_pending(wb, work);
916 870
917 wrote += wb_writeback(wb, &args); 871 wrote += wb_writeback(wb, &args);
@@ -920,7 +874,7 @@ long wb_do_writeback(struct bdi_writeback *wb, int force_wait)
920 * This is a data integrity writeback, so only do the 874 * This is a data integrity writeback, so only do the
921 * notification when we have completed the work. 875 * notification when we have completed the work.
922 */ 876 */
923 if (args.sync_mode == WB_SYNC_ALL) 877 if (test_bit(WS_ONSTACK, &work->state))
924 wb_clear_pending(wb, work); 878 wb_clear_pending(wb, work);
925 } 879 }
926 880
@@ -978,42 +932,32 @@ int bdi_writeback_task(struct bdi_writeback *wb)
978} 932}
979 933
980/* 934/*
981 * Schedule writeback for all backing devices. This does WB_SYNC_NONE 935 * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back
982 * writeback, for integrity writeback see bdi_sync_writeback(). 936 * the whole world.
983 */ 937 */
984static void bdi_writeback_all(struct super_block *sb, long nr_pages) 938void wakeup_flusher_threads(long nr_pages)
985{ 939{
940 struct backing_dev_info *bdi;
986 struct wb_writeback_args args = { 941 struct wb_writeback_args args = {
987 .sb = sb,
988 .nr_pages = nr_pages,
989 .sync_mode = WB_SYNC_NONE, 942 .sync_mode = WB_SYNC_NONE,
990 }; 943 };
991 struct backing_dev_info *bdi;
992 944
993 rcu_read_lock(); 945 if (nr_pages) {
946 args.nr_pages = nr_pages;
947 } else {
948 args.nr_pages = global_page_state(NR_FILE_DIRTY) +
949 global_page_state(NR_UNSTABLE_NFS);
950 }
994 951
952 rcu_read_lock();
995 list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) { 953 list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
996 if (!bdi_has_dirty_io(bdi)) 954 if (!bdi_has_dirty_io(bdi))
997 continue; 955 continue;
998
999 bdi_alloc_queue_work(bdi, &args); 956 bdi_alloc_queue_work(bdi, &args);
1000 } 957 }
1001
1002 rcu_read_unlock(); 958 rcu_read_unlock();
1003} 959}
1004 960
1005/*
1006 * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back
1007 * the whole world.
1008 */
1009void wakeup_flusher_threads(long nr_pages)
1010{
1011 if (nr_pages == 0)
1012 nr_pages = global_page_state(NR_FILE_DIRTY) +
1013 global_page_state(NR_UNSTABLE_NFS);
1014 bdi_writeback_all(NULL, nr_pages);
1015}
1016
1017static noinline void block_dump___mark_inode_dirty(struct inode *inode) 961static noinline void block_dump___mark_inode_dirty(struct inode *inode)
1018{ 962{
1019 if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) { 963 if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) {
@@ -1218,12 +1162,17 @@ void writeback_inodes_sb(struct super_block *sb)
1218{ 1162{
1219 unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY); 1163 unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY);
1220 unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS); 1164 unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS);
1221 long nr_to_write; 1165 struct wb_writeback_args args = {
1166 .sb = sb,
1167 .sync_mode = WB_SYNC_NONE,
1168 };
1169
1170 WARN_ON(!rwsem_is_locked(&sb->s_umount));
1222 1171
1223 nr_to_write = nr_dirty + nr_unstable + 1172 args.nr_pages = nr_dirty + nr_unstable +
1224 (inodes_stat.nr_inodes - inodes_stat.nr_unused); 1173 (inodes_stat.nr_inodes - inodes_stat.nr_unused);
1225 1174
1226 bdi_start_writeback(sb->s_bdi, sb, nr_to_write); 1175 bdi_queue_work_onstack(&args);
1227} 1176}
1228EXPORT_SYMBOL(writeback_inodes_sb); 1177EXPORT_SYMBOL(writeback_inodes_sb);
1229 1178
@@ -1237,7 +1186,9 @@ EXPORT_SYMBOL(writeback_inodes_sb);
1237int writeback_inodes_sb_if_idle(struct super_block *sb) 1186int writeback_inodes_sb_if_idle(struct super_block *sb)
1238{ 1187{
1239 if (!writeback_in_progress(sb->s_bdi)) { 1188 if (!writeback_in_progress(sb->s_bdi)) {
1189 down_read(&sb->s_umount);
1240 writeback_inodes_sb(sb); 1190 writeback_inodes_sb(sb);
1191 up_read(&sb->s_umount);
1241 return 1; 1192 return 1;
1242 } else 1193 } else
1243 return 0; 1194 return 0;
@@ -1253,7 +1204,16 @@ EXPORT_SYMBOL(writeback_inodes_sb_if_idle);
1253 */ 1204 */
1254void sync_inodes_sb(struct super_block *sb) 1205void sync_inodes_sb(struct super_block *sb)
1255{ 1206{
1256 bdi_sync_writeback(sb->s_bdi, sb); 1207 struct wb_writeback_args args = {
1208 .sb = sb,
1209 .sync_mode = WB_SYNC_ALL,
1210 .nr_pages = LONG_MAX,
1211 .range_cyclic = 0,
1212 };
1213
1214 WARN_ON(!rwsem_is_locked(&sb->s_umount));
1215
1216 bdi_queue_work_onstack(&args);
1257 wait_sb_inodes(sb); 1217 wait_sb_inodes(sb);
1258} 1218}
1259EXPORT_SYMBOL(sync_inodes_sb); 1219EXPORT_SYMBOL(sync_inodes_sb);
diff --git a/fs/ubifs/budget.c b/fs/ubifs/budget.c
index 076ca50e9933..c8ff0d1ae5d3 100644
--- a/fs/ubifs/budget.c
+++ b/fs/ubifs/budget.c
@@ -62,7 +62,9 @@
62 */ 62 */
63static void shrink_liability(struct ubifs_info *c, int nr_to_write) 63static void shrink_liability(struct ubifs_info *c, int nr_to_write)
64{ 64{
65 down_read(&c->vfs_sb->s_umount);
65 writeback_inodes_sb(c->vfs_sb); 66 writeback_inodes_sb(c->vfs_sb);
67 up_read(&c->vfs_sb->s_umount);
66} 68}
67 69
68/** 70/**
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index aee5f6ce166e..9ae2889096b6 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -105,8 +105,8 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent,
105int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev); 105int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
106void bdi_unregister(struct backing_dev_info *bdi); 106void bdi_unregister(struct backing_dev_info *bdi);
107int bdi_setup_and_register(struct backing_dev_info *, char *, unsigned int); 107int bdi_setup_and_register(struct backing_dev_info *, char *, unsigned int);
108void bdi_start_writeback(struct backing_dev_info *bdi, struct super_block *sb, 108void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages);
109 long nr_pages); 109void bdi_start_background_writeback(struct backing_dev_info *bdi);
110int bdi_writeback_task(struct bdi_writeback *wb); 110int bdi_writeback_task(struct bdi_writeback *wb);
111int bdi_has_dirty_io(struct backing_dev_info *bdi); 111int bdi_has_dirty_io(struct backing_dev_info *bdi);
112void bdi_arm_supers_timer(void); 112void bdi_arm_supers_timer(void);
diff --git a/include/linux/drbd.h b/include/linux/drbd.h
index 30da4ae48972..b8d2516668aa 100644
--- a/include/linux/drbd.h
+++ b/include/linux/drbd.h
@@ -53,7 +53,7 @@
53 53
54 54
55extern const char *drbd_buildtag(void); 55extern const char *drbd_buildtag(void);
56#define REL_VERSION "8.3.8rc2" 56#define REL_VERSION "8.3.8"
57#define API_VERSION 88 57#define API_VERSION 88
58#define PRO_VERSION_MIN 86 58#define PRO_VERSION_MIN 86
59#define PRO_VERSION_MAX 94 59#define PRO_VERSION_MAX 94
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index bbd396ac9546..54f28bd493d3 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -597,7 +597,7 @@ static void balance_dirty_pages(struct address_space *mapping,
597 (!laptop_mode && ((global_page_state(NR_FILE_DIRTY) 597 (!laptop_mode && ((global_page_state(NR_FILE_DIRTY)
598 + global_page_state(NR_UNSTABLE_NFS)) 598 + global_page_state(NR_UNSTABLE_NFS))
599 > background_thresh))) 599 > background_thresh)))
600 bdi_start_writeback(bdi, NULL, 0); 600 bdi_start_background_writeback(bdi);
601} 601}
602 602
603void set_page_dirty_balance(struct page *page, int page_mkwrite) 603void set_page_dirty_balance(struct page *page, int page_mkwrite)
@@ -705,9 +705,8 @@ void laptop_mode_timer_fn(unsigned long data)
705 * We want to write everything out, not just down to the dirty 705 * We want to write everything out, not just down to the dirty
706 * threshold 706 * threshold
707 */ 707 */
708
709 if (bdi_has_dirty_io(&q->backing_dev_info)) 708 if (bdi_has_dirty_io(&q->backing_dev_info))
710 bdi_start_writeback(&q->backing_dev_info, NULL, nr_pages); 709 bdi_start_writeback(&q->backing_dev_info, nr_pages);
711} 710}
712 711
713/* 712/*