diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-08-10 18:22:42 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-08-10 18:22:42 -0400 |
commit | 2f9e825d3e0e2b407ae8f082de5c00afcf7378fb (patch) | |
tree | f8b3ee40674ce4acd5508a0a0bf52a30904caf6c /fs | |
parent | 7ae0dea900b027cd90e8a3e14deca9a19e17638b (diff) | |
parent | de75d60d5ea235e6e09f4962ab22541ce0fe176a (diff) |
Merge branch 'for-2.6.36' of git://git.kernel.dk/linux-2.6-block
* 'for-2.6.36' of git://git.kernel.dk/linux-2.6-block: (149 commits)
block: make sure that REQ_* types are seen even with CONFIG_BLOCK=n
xen-blkfront: fix missing out label
blkdev: fix blkdev_issue_zeroout return value
block: update request stacking methods to support discards
block: fix missing export of blk_types.h
writeback: fix bad _bh spinlock nesting
drbd: revert "delay probes", feature is being re-implemented differently
drbd: Initialize all members of sync_conf to their defaults [Bugz 315]
drbd: Disable delay probes for the upcomming release
writeback: cleanup bdi_register
writeback: add new tracepoints
writeback: remove unnecessary init_timer call
writeback: optimize periodic bdi thread wakeups
writeback: prevent unnecessary bdi threads wakeups
writeback: move bdi threads exiting logic to the forker thread
writeback: restructure bdi forker loop a little
writeback: move last_active to bdi
writeback: do not remove bdi from bdi_list
writeback: simplify bdi code a little
writeback: do not lose wake-ups in bdi threads
...
Fixed up pretty trivial conflicts in drivers/block/virtio_blk.c and
drivers/scsi/scsi_error.c as per Jens.
Diffstat (limited to 'fs')
-rw-r--r-- | fs/bio.c | 5 | ||||
-rw-r--r-- | fs/block_dev.c | 10 | ||||
-rw-r--r-- | fs/btrfs/disk-io.c | 8 | ||||
-rw-r--r-- | fs/btrfs/inode.c | 6 | ||||
-rw-r--r-- | fs/btrfs/volumes.c | 18 | ||||
-rw-r--r-- | fs/coda/psdev.c | 12 | ||||
-rw-r--r-- | fs/coda/upcall.c | 12 | ||||
-rw-r--r-- | fs/exofs/ios.c | 2 | ||||
-rw-r--r-- | fs/fs-writeback.c | 161 | ||||
-rw-r--r-- | fs/gfs2/log.c | 4 | ||||
-rw-r--r-- | fs/gfs2/meta_io.c | 8 | ||||
-rw-r--r-- | fs/gfs2/ops_fstype.c | 2 | ||||
-rw-r--r-- | fs/nilfs2/segbuf.c | 2 | ||||
-rw-r--r-- | fs/splice.c | 14 |
14 files changed, 147 insertions, 117 deletions
@@ -843,7 +843,8 @@ struct bio *bio_copy_user_iov(struct request_queue *q, | |||
843 | if (!bio) | 843 | if (!bio) |
844 | goto out_bmd; | 844 | goto out_bmd; |
845 | 845 | ||
846 | bio->bi_rw |= (!write_to_vm << BIO_RW); | 846 | if (!write_to_vm) |
847 | bio->bi_rw |= REQ_WRITE; | ||
847 | 848 | ||
848 | ret = 0; | 849 | ret = 0; |
849 | 850 | ||
@@ -1024,7 +1025,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q, | |||
1024 | * set data direction, and check if mapped pages need bouncing | 1025 | * set data direction, and check if mapped pages need bouncing |
1025 | */ | 1026 | */ |
1026 | if (!write_to_vm) | 1027 | if (!write_to_vm) |
1027 | bio->bi_rw |= (1 << BIO_RW); | 1028 | bio->bi_rw |= REQ_WRITE; |
1028 | 1029 | ||
1029 | bio->bi_bdev = bdev; | 1030 | bio->bi_bdev = bdev; |
1030 | bio->bi_flags |= (1 << BIO_USER_MAPPED); | 1031 | bio->bi_flags |= (1 << BIO_USER_MAPPED); |
diff --git a/fs/block_dev.c b/fs/block_dev.c index 451afbd543b5..66411463b734 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c | |||
@@ -1346,13 +1346,12 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) | |||
1346 | return ret; | 1346 | return ret; |
1347 | } | 1347 | } |
1348 | 1348 | ||
1349 | lock_kernel(); | ||
1350 | restart: | 1349 | restart: |
1351 | 1350 | ||
1352 | ret = -ENXIO; | 1351 | ret = -ENXIO; |
1353 | disk = get_gendisk(bdev->bd_dev, &partno); | 1352 | disk = get_gendisk(bdev->bd_dev, &partno); |
1354 | if (!disk) | 1353 | if (!disk) |
1355 | goto out_unlock_kernel; | 1354 | goto out; |
1356 | 1355 | ||
1357 | mutex_lock_nested(&bdev->bd_mutex, for_part); | 1356 | mutex_lock_nested(&bdev->bd_mutex, for_part); |
1358 | if (!bdev->bd_openers) { | 1357 | if (!bdev->bd_openers) { |
@@ -1432,7 +1431,6 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) | |||
1432 | if (for_part) | 1431 | if (for_part) |
1433 | bdev->bd_part_count++; | 1432 | bdev->bd_part_count++; |
1434 | mutex_unlock(&bdev->bd_mutex); | 1433 | mutex_unlock(&bdev->bd_mutex); |
1435 | unlock_kernel(); | ||
1436 | return 0; | 1434 | return 0; |
1437 | 1435 | ||
1438 | out_clear: | 1436 | out_clear: |
@@ -1445,9 +1443,7 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) | |||
1445 | bdev->bd_contains = NULL; | 1443 | bdev->bd_contains = NULL; |
1446 | out_unlock_bdev: | 1444 | out_unlock_bdev: |
1447 | mutex_unlock(&bdev->bd_mutex); | 1445 | mutex_unlock(&bdev->bd_mutex); |
1448 | out_unlock_kernel: | 1446 | out: |
1449 | unlock_kernel(); | ||
1450 | |||
1451 | if (disk) | 1447 | if (disk) |
1452 | module_put(disk->fops->owner); | 1448 | module_put(disk->fops->owner); |
1453 | put_disk(disk); | 1449 | put_disk(disk); |
@@ -1516,7 +1512,6 @@ static int __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part) | |||
1516 | struct block_device *victim = NULL; | 1512 | struct block_device *victim = NULL; |
1517 | 1513 | ||
1518 | mutex_lock_nested(&bdev->bd_mutex, for_part); | 1514 | mutex_lock_nested(&bdev->bd_mutex, for_part); |
1519 | lock_kernel(); | ||
1520 | if (for_part) | 1515 | if (for_part) |
1521 | bdev->bd_part_count--; | 1516 | bdev->bd_part_count--; |
1522 | 1517 | ||
@@ -1541,7 +1536,6 @@ static int __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part) | |||
1541 | victim = bdev->bd_contains; | 1536 | victim = bdev->bd_contains; |
1542 | bdev->bd_contains = NULL; | 1537 | bdev->bd_contains = NULL; |
1543 | } | 1538 | } |
1544 | unlock_kernel(); | ||
1545 | mutex_unlock(&bdev->bd_mutex); | 1539 | mutex_unlock(&bdev->bd_mutex); |
1546 | bdput(bdev); | 1540 | bdput(bdev); |
1547 | if (victim) | 1541 | if (victim) |
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 34f7c375567e..64f10082f048 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c | |||
@@ -480,7 +480,7 @@ static void end_workqueue_bio(struct bio *bio, int err) | |||
480 | end_io_wq->work.func = end_workqueue_fn; | 480 | end_io_wq->work.func = end_workqueue_fn; |
481 | end_io_wq->work.flags = 0; | 481 | end_io_wq->work.flags = 0; |
482 | 482 | ||
483 | if (bio->bi_rw & (1 << BIO_RW)) { | 483 | if (bio->bi_rw & REQ_WRITE) { |
484 | if (end_io_wq->metadata) | 484 | if (end_io_wq->metadata) |
485 | btrfs_queue_worker(&fs_info->endio_meta_write_workers, | 485 | btrfs_queue_worker(&fs_info->endio_meta_write_workers, |
486 | &end_io_wq->work); | 486 | &end_io_wq->work); |
@@ -604,7 +604,7 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode, | |||
604 | 604 | ||
605 | atomic_inc(&fs_info->nr_async_submits); | 605 | atomic_inc(&fs_info->nr_async_submits); |
606 | 606 | ||
607 | if (rw & (1 << BIO_RW_SYNCIO)) | 607 | if (rw & REQ_SYNC) |
608 | btrfs_set_work_high_prio(&async->work); | 608 | btrfs_set_work_high_prio(&async->work); |
609 | 609 | ||
610 | btrfs_queue_worker(&fs_info->workers, &async->work); | 610 | btrfs_queue_worker(&fs_info->workers, &async->work); |
@@ -668,7 +668,7 @@ static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, | |||
668 | bio, 1); | 668 | bio, 1); |
669 | BUG_ON(ret); | 669 | BUG_ON(ret); |
670 | 670 | ||
671 | if (!(rw & (1 << BIO_RW))) { | 671 | if (!(rw & REQ_WRITE)) { |
672 | /* | 672 | /* |
673 | * called for a read, do the setup so that checksum validation | 673 | * called for a read, do the setup so that checksum validation |
674 | * can happen in the async kernel threads | 674 | * can happen in the async kernel threads |
@@ -1427,7 +1427,7 @@ static void end_workqueue_fn(struct btrfs_work *work) | |||
1427 | * ram and up to date before trying to verify things. For | 1427 | * ram and up to date before trying to verify things. For |
1428 | * blocksize <= pagesize, it is basically a noop | 1428 | * blocksize <= pagesize, it is basically a noop |
1429 | */ | 1429 | */ |
1430 | if (!(bio->bi_rw & (1 << BIO_RW)) && end_io_wq->metadata && | 1430 | if (!(bio->bi_rw & REQ_WRITE) && end_io_wq->metadata && |
1431 | !bio_ready_for_csum(bio)) { | 1431 | !bio_ready_for_csum(bio)) { |
1432 | btrfs_queue_worker(&fs_info->endio_meta_workers, | 1432 | btrfs_queue_worker(&fs_info->endio_meta_workers, |
1433 | &end_io_wq->work); | 1433 | &end_io_wq->work); |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 8976c3343a96..c03864406af3 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
@@ -1429,7 +1429,7 @@ static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, | |||
1429 | ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); | 1429 | ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); |
1430 | BUG_ON(ret); | 1430 | BUG_ON(ret); |
1431 | 1431 | ||
1432 | if (!(rw & (1 << BIO_RW))) { | 1432 | if (!(rw & REQ_WRITE)) { |
1433 | if (bio_flags & EXTENT_BIO_COMPRESSED) { | 1433 | if (bio_flags & EXTENT_BIO_COMPRESSED) { |
1434 | return btrfs_submit_compressed_read(inode, bio, | 1434 | return btrfs_submit_compressed_read(inode, bio, |
1435 | mirror_num, bio_flags); | 1435 | mirror_num, bio_flags); |
@@ -1841,7 +1841,7 @@ static int btrfs_io_failed_hook(struct bio *failed_bio, | |||
1841 | bio->bi_size = 0; | 1841 | bio->bi_size = 0; |
1842 | 1842 | ||
1843 | bio_add_page(bio, page, failrec->len, start - page_offset(page)); | 1843 | bio_add_page(bio, page, failrec->len, start - page_offset(page)); |
1844 | if (failed_bio->bi_rw & (1 << BIO_RW)) | 1844 | if (failed_bio->bi_rw & REQ_WRITE) |
1845 | rw = WRITE; | 1845 | rw = WRITE; |
1846 | else | 1846 | else |
1847 | rw = READ; | 1847 | rw = READ; |
@@ -5647,7 +5647,7 @@ static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode, | |||
5647 | struct bio_vec *bvec = bio->bi_io_vec; | 5647 | struct bio_vec *bvec = bio->bi_io_vec; |
5648 | u64 start; | 5648 | u64 start; |
5649 | int skip_sum; | 5649 | int skip_sum; |
5650 | int write = rw & (1 << BIO_RW); | 5650 | int write = rw & REQ_WRITE; |
5651 | int ret = 0; | 5651 | int ret = 0; |
5652 | 5652 | ||
5653 | skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; | 5653 | skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; |
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index d6e3af8be95b..dd318ff280b2 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c | |||
@@ -258,7 +258,7 @@ loop_lock: | |||
258 | 258 | ||
259 | BUG_ON(atomic_read(&cur->bi_cnt) == 0); | 259 | BUG_ON(atomic_read(&cur->bi_cnt) == 0); |
260 | 260 | ||
261 | if (bio_rw_flagged(cur, BIO_RW_SYNCIO)) | 261 | if (cur->bi_rw & REQ_SYNC) |
262 | num_sync_run++; | 262 | num_sync_run++; |
263 | 263 | ||
264 | submit_bio(cur->bi_rw, cur); | 264 | submit_bio(cur->bi_rw, cur); |
@@ -2651,7 +2651,7 @@ static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw, | |||
2651 | int max_errors = 0; | 2651 | int max_errors = 0; |
2652 | struct btrfs_multi_bio *multi = NULL; | 2652 | struct btrfs_multi_bio *multi = NULL; |
2653 | 2653 | ||
2654 | if (multi_ret && !(rw & (1 << BIO_RW))) | 2654 | if (multi_ret && !(rw & REQ_WRITE)) |
2655 | stripes_allocated = 1; | 2655 | stripes_allocated = 1; |
2656 | again: | 2656 | again: |
2657 | if (multi_ret) { | 2657 | if (multi_ret) { |
@@ -2687,7 +2687,7 @@ again: | |||
2687 | mirror_num = 0; | 2687 | mirror_num = 0; |
2688 | 2688 | ||
2689 | /* if our multi bio struct is too small, back off and try again */ | 2689 | /* if our multi bio struct is too small, back off and try again */ |
2690 | if (rw & (1 << BIO_RW)) { | 2690 | if (rw & REQ_WRITE) { |
2691 | if (map->type & (BTRFS_BLOCK_GROUP_RAID1 | | 2691 | if (map->type & (BTRFS_BLOCK_GROUP_RAID1 | |
2692 | BTRFS_BLOCK_GROUP_DUP)) { | 2692 | BTRFS_BLOCK_GROUP_DUP)) { |
2693 | stripes_required = map->num_stripes; | 2693 | stripes_required = map->num_stripes; |
@@ -2697,7 +2697,7 @@ again: | |||
2697 | max_errors = 1; | 2697 | max_errors = 1; |
2698 | } | 2698 | } |
2699 | } | 2699 | } |
2700 | if (multi_ret && (rw & (1 << BIO_RW)) && | 2700 | if (multi_ret && (rw & REQ_WRITE) && |
2701 | stripes_allocated < stripes_required) { | 2701 | stripes_allocated < stripes_required) { |
2702 | stripes_allocated = map->num_stripes; | 2702 | stripes_allocated = map->num_stripes; |
2703 | free_extent_map(em); | 2703 | free_extent_map(em); |
@@ -2733,7 +2733,7 @@ again: | |||
2733 | num_stripes = 1; | 2733 | num_stripes = 1; |
2734 | stripe_index = 0; | 2734 | stripe_index = 0; |
2735 | if (map->type & BTRFS_BLOCK_GROUP_RAID1) { | 2735 | if (map->type & BTRFS_BLOCK_GROUP_RAID1) { |
2736 | if (unplug_page || (rw & (1 << BIO_RW))) | 2736 | if (unplug_page || (rw & REQ_WRITE)) |
2737 | num_stripes = map->num_stripes; | 2737 | num_stripes = map->num_stripes; |
2738 | else if (mirror_num) | 2738 | else if (mirror_num) |
2739 | stripe_index = mirror_num - 1; | 2739 | stripe_index = mirror_num - 1; |
@@ -2744,7 +2744,7 @@ again: | |||
2744 | } | 2744 | } |
2745 | 2745 | ||
2746 | } else if (map->type & BTRFS_BLOCK_GROUP_DUP) { | 2746 | } else if (map->type & BTRFS_BLOCK_GROUP_DUP) { |
2747 | if (rw & (1 << BIO_RW)) | 2747 | if (rw & REQ_WRITE) |
2748 | num_stripes = map->num_stripes; | 2748 | num_stripes = map->num_stripes; |
2749 | else if (mirror_num) | 2749 | else if (mirror_num) |
2750 | stripe_index = mirror_num - 1; | 2750 | stripe_index = mirror_num - 1; |
@@ -2755,7 +2755,7 @@ again: | |||
2755 | stripe_index = do_div(stripe_nr, factor); | 2755 | stripe_index = do_div(stripe_nr, factor); |
2756 | stripe_index *= map->sub_stripes; | 2756 | stripe_index *= map->sub_stripes; |
2757 | 2757 | ||
2758 | if (unplug_page || (rw & (1 << BIO_RW))) | 2758 | if (unplug_page || (rw & REQ_WRITE)) |
2759 | num_stripes = map->sub_stripes; | 2759 | num_stripes = map->sub_stripes; |
2760 | else if (mirror_num) | 2760 | else if (mirror_num) |
2761 | stripe_index += mirror_num - 1; | 2761 | stripe_index += mirror_num - 1; |
@@ -2945,7 +2945,7 @@ static noinline int schedule_bio(struct btrfs_root *root, | |||
2945 | struct btrfs_pending_bios *pending_bios; | 2945 | struct btrfs_pending_bios *pending_bios; |
2946 | 2946 | ||
2947 | /* don't bother with additional async steps for reads, right now */ | 2947 | /* don't bother with additional async steps for reads, right now */ |
2948 | if (!(rw & (1 << BIO_RW))) { | 2948 | if (!(rw & REQ_WRITE)) { |
2949 | bio_get(bio); | 2949 | bio_get(bio); |
2950 | submit_bio(rw, bio); | 2950 | submit_bio(rw, bio); |
2951 | bio_put(bio); | 2951 | bio_put(bio); |
@@ -2964,7 +2964,7 @@ static noinline int schedule_bio(struct btrfs_root *root, | |||
2964 | bio->bi_rw |= rw; | 2964 | bio->bi_rw |= rw; |
2965 | 2965 | ||
2966 | spin_lock(&device->io_lock); | 2966 | spin_lock(&device->io_lock); |
2967 | if (bio_rw_flagged(bio, BIO_RW_SYNCIO)) | 2967 | if (bio->bi_rw & REQ_SYNC) |
2968 | pending_bios = &device->pending_sync_bios; | 2968 | pending_bios = &device->pending_sync_bios; |
2969 | else | 2969 | else |
2970 | pending_bios = &device->pending_bios; | 2970 | pending_bios = &device->pending_bios; |
diff --git a/fs/coda/psdev.c b/fs/coda/psdev.c index 66b9cf79c5ba..de89645777c7 100644 --- a/fs/coda/psdev.c +++ b/fs/coda/psdev.c | |||
@@ -177,7 +177,7 @@ static ssize_t coda_psdev_write(struct file *file, const char __user *buf, | |||
177 | nbytes = req->uc_outSize; /* don't have more space! */ | 177 | nbytes = req->uc_outSize; /* don't have more space! */ |
178 | } | 178 | } |
179 | if (copy_from_user(req->uc_data, buf, nbytes)) { | 179 | if (copy_from_user(req->uc_data, buf, nbytes)) { |
180 | req->uc_flags |= REQ_ABORT; | 180 | req->uc_flags |= CODA_REQ_ABORT; |
181 | wake_up(&req->uc_sleep); | 181 | wake_up(&req->uc_sleep); |
182 | retval = -EFAULT; | 182 | retval = -EFAULT; |
183 | goto out; | 183 | goto out; |
@@ -254,8 +254,8 @@ static ssize_t coda_psdev_read(struct file * file, char __user * buf, | |||
254 | retval = -EFAULT; | 254 | retval = -EFAULT; |
255 | 255 | ||
256 | /* If request was not a signal, enqueue and don't free */ | 256 | /* If request was not a signal, enqueue and don't free */ |
257 | if (!(req->uc_flags & REQ_ASYNC)) { | 257 | if (!(req->uc_flags & CODA_REQ_ASYNC)) { |
258 | req->uc_flags |= REQ_READ; | 258 | req->uc_flags |= CODA_REQ_READ; |
259 | list_add_tail(&(req->uc_chain), &vcp->vc_processing); | 259 | list_add_tail(&(req->uc_chain), &vcp->vc_processing); |
260 | goto out; | 260 | goto out; |
261 | } | 261 | } |
@@ -315,19 +315,19 @@ static int coda_psdev_release(struct inode * inode, struct file * file) | |||
315 | list_del(&req->uc_chain); | 315 | list_del(&req->uc_chain); |
316 | 316 | ||
317 | /* Async requests need to be freed here */ | 317 | /* Async requests need to be freed here */ |
318 | if (req->uc_flags & REQ_ASYNC) { | 318 | if (req->uc_flags & CODA_REQ_ASYNC) { |
319 | CODA_FREE(req->uc_data, sizeof(struct coda_in_hdr)); | 319 | CODA_FREE(req->uc_data, sizeof(struct coda_in_hdr)); |
320 | kfree(req); | 320 | kfree(req); |
321 | continue; | 321 | continue; |
322 | } | 322 | } |
323 | req->uc_flags |= REQ_ABORT; | 323 | req->uc_flags |= CODA_REQ_ABORT; |
324 | wake_up(&req->uc_sleep); | 324 | wake_up(&req->uc_sleep); |
325 | } | 325 | } |
326 | 326 | ||
327 | list_for_each_entry_safe(req, tmp, &vcp->vc_processing, uc_chain) { | 327 | list_for_each_entry_safe(req, tmp, &vcp->vc_processing, uc_chain) { |
328 | list_del(&req->uc_chain); | 328 | list_del(&req->uc_chain); |
329 | 329 | ||
330 | req->uc_flags |= REQ_ABORT; | 330 | req->uc_flags |= CODA_REQ_ABORT; |
331 | wake_up(&req->uc_sleep); | 331 | wake_up(&req->uc_sleep); |
332 | } | 332 | } |
333 | 333 | ||
diff --git a/fs/coda/upcall.c b/fs/coda/upcall.c index f09c5ed76f6c..b8893ab6f9e6 100644 --- a/fs/coda/upcall.c +++ b/fs/coda/upcall.c | |||
@@ -604,7 +604,7 @@ static void coda_unblock_signals(sigset_t *old) | |||
604 | (((r)->uc_opcode != CODA_CLOSE && \ | 604 | (((r)->uc_opcode != CODA_CLOSE && \ |
605 | (r)->uc_opcode != CODA_STORE && \ | 605 | (r)->uc_opcode != CODA_STORE && \ |
606 | (r)->uc_opcode != CODA_RELEASE) || \ | 606 | (r)->uc_opcode != CODA_RELEASE) || \ |
607 | (r)->uc_flags & REQ_READ)) | 607 | (r)->uc_flags & CODA_REQ_READ)) |
608 | 608 | ||
609 | static inline void coda_waitfor_upcall(struct upc_req *req) | 609 | static inline void coda_waitfor_upcall(struct upc_req *req) |
610 | { | 610 | { |
@@ -624,7 +624,7 @@ static inline void coda_waitfor_upcall(struct upc_req *req) | |||
624 | set_current_state(TASK_UNINTERRUPTIBLE); | 624 | set_current_state(TASK_UNINTERRUPTIBLE); |
625 | 625 | ||
626 | /* got a reply */ | 626 | /* got a reply */ |
627 | if (req->uc_flags & (REQ_WRITE | REQ_ABORT)) | 627 | if (req->uc_flags & (CODA_REQ_WRITE | CODA_REQ_ABORT)) |
628 | break; | 628 | break; |
629 | 629 | ||
630 | if (blocked && time_after(jiffies, timeout) && | 630 | if (blocked && time_after(jiffies, timeout) && |
@@ -708,7 +708,7 @@ static int coda_upcall(struct venus_comm *vcp, | |||
708 | coda_waitfor_upcall(req); | 708 | coda_waitfor_upcall(req); |
709 | 709 | ||
710 | /* Op went through, interrupt or not... */ | 710 | /* Op went through, interrupt or not... */ |
711 | if (req->uc_flags & REQ_WRITE) { | 711 | if (req->uc_flags & CODA_REQ_WRITE) { |
712 | out = (union outputArgs *)req->uc_data; | 712 | out = (union outputArgs *)req->uc_data; |
713 | /* here we map positive Venus errors to kernel errors */ | 713 | /* here we map positive Venus errors to kernel errors */ |
714 | error = -out->oh.result; | 714 | error = -out->oh.result; |
@@ -717,13 +717,13 @@ static int coda_upcall(struct venus_comm *vcp, | |||
717 | } | 717 | } |
718 | 718 | ||
719 | error = -EINTR; | 719 | error = -EINTR; |
720 | if ((req->uc_flags & REQ_ABORT) || !signal_pending(current)) { | 720 | if ((req->uc_flags & CODA_REQ_ABORT) || !signal_pending(current)) { |
721 | printk(KERN_WARNING "coda: Unexpected interruption.\n"); | 721 | printk(KERN_WARNING "coda: Unexpected interruption.\n"); |
722 | goto exit; | 722 | goto exit; |
723 | } | 723 | } |
724 | 724 | ||
725 | /* Interrupted before venus read it. */ | 725 | /* Interrupted before venus read it. */ |
726 | if (!(req->uc_flags & REQ_READ)) | 726 | if (!(req->uc_flags & CODA_REQ_READ)) |
727 | goto exit; | 727 | goto exit; |
728 | 728 | ||
729 | /* Venus saw the upcall, make sure we can send interrupt signal */ | 729 | /* Venus saw the upcall, make sure we can send interrupt signal */ |
@@ -747,7 +747,7 @@ static int coda_upcall(struct venus_comm *vcp, | |||
747 | sig_inputArgs->ih.opcode = CODA_SIGNAL; | 747 | sig_inputArgs->ih.opcode = CODA_SIGNAL; |
748 | sig_inputArgs->ih.unique = req->uc_unique; | 748 | sig_inputArgs->ih.unique = req->uc_unique; |
749 | 749 | ||
750 | sig_req->uc_flags = REQ_ASYNC; | 750 | sig_req->uc_flags = CODA_REQ_ASYNC; |
751 | sig_req->uc_opcode = sig_inputArgs->ih.opcode; | 751 | sig_req->uc_opcode = sig_inputArgs->ih.opcode; |
752 | sig_req->uc_unique = sig_inputArgs->ih.unique; | 752 | sig_req->uc_unique = sig_inputArgs->ih.unique; |
753 | sig_req->uc_inSize = sizeof(struct coda_in_hdr); | 753 | sig_req->uc_inSize = sizeof(struct coda_in_hdr); |
diff --git a/fs/exofs/ios.c b/fs/exofs/ios.c index 4337cad7777b..e2732203fa93 100644 --- a/fs/exofs/ios.c +++ b/fs/exofs/ios.c | |||
@@ -599,7 +599,7 @@ static int _sbi_write_mirror(struct exofs_io_state *ios, int cur_comp) | |||
599 | } else { | 599 | } else { |
600 | bio = master_dev->bio; | 600 | bio = master_dev->bio; |
601 | /* FIXME: bio_set_dir() */ | 601 | /* FIXME: bio_set_dir() */ |
602 | bio->bi_rw |= (1 << BIO_RW); | 602 | bio->bi_rw |= REQ_WRITE; |
603 | } | 603 | } |
604 | 604 | ||
605 | osd_req_write(or, &ios->obj, per_dev->offset, bio, | 605 | osd_req_write(or, &ios->obj, per_dev->offset, bio, |
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index b7c7586caea1..2f76c4a081a2 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c | |||
@@ -26,15 +26,9 @@ | |||
26 | #include <linux/blkdev.h> | 26 | #include <linux/blkdev.h> |
27 | #include <linux/backing-dev.h> | 27 | #include <linux/backing-dev.h> |
28 | #include <linux/buffer_head.h> | 28 | #include <linux/buffer_head.h> |
29 | #include <linux/tracepoint.h> | ||
29 | #include "internal.h" | 30 | #include "internal.h" |
30 | 31 | ||
31 | #define inode_to_bdi(inode) ((inode)->i_mapping->backing_dev_info) | ||
32 | |||
33 | /* | ||
34 | * We don't actually have pdflush, but this one is exported though /proc... | ||
35 | */ | ||
36 | int nr_pdflush_threads; | ||
37 | |||
38 | /* | 32 | /* |
39 | * Passed into wb_writeback(), essentially a subset of writeback_control | 33 | * Passed into wb_writeback(), essentially a subset of writeback_control |
40 | */ | 34 | */ |
@@ -50,6 +44,21 @@ struct wb_writeback_work { | |||
50 | struct completion *done; /* set if the caller waits */ | 44 | struct completion *done; /* set if the caller waits */ |
51 | }; | 45 | }; |
52 | 46 | ||
47 | /* | ||
48 | * Include the creation of the trace points after defining the | ||
49 | * wb_writeback_work structure so that the definition remains local to this | ||
50 | * file. | ||
51 | */ | ||
52 | #define CREATE_TRACE_POINTS | ||
53 | #include <trace/events/writeback.h> | ||
54 | |||
55 | #define inode_to_bdi(inode) ((inode)->i_mapping->backing_dev_info) | ||
56 | |||
57 | /* | ||
58 | * We don't actually have pdflush, but this one is exported though /proc... | ||
59 | */ | ||
60 | int nr_pdflush_threads; | ||
61 | |||
53 | /** | 62 | /** |
54 | * writeback_in_progress - determine whether there is writeback in progress | 63 | * writeback_in_progress - determine whether there is writeback in progress |
55 | * @bdi: the device's backing_dev_info structure. | 64 | * @bdi: the device's backing_dev_info structure. |
@@ -65,22 +74,21 @@ int writeback_in_progress(struct backing_dev_info *bdi) | |||
65 | static void bdi_queue_work(struct backing_dev_info *bdi, | 74 | static void bdi_queue_work(struct backing_dev_info *bdi, |
66 | struct wb_writeback_work *work) | 75 | struct wb_writeback_work *work) |
67 | { | 76 | { |
68 | spin_lock(&bdi->wb_lock); | 77 | trace_writeback_queue(bdi, work); |
69 | list_add_tail(&work->list, &bdi->work_list); | ||
70 | spin_unlock(&bdi->wb_lock); | ||
71 | 78 | ||
72 | /* | 79 | spin_lock_bh(&bdi->wb_lock); |
73 | * If the default thread isn't there, make sure we add it. When | 80 | list_add_tail(&work->list, &bdi->work_list); |
74 | * it gets created and wakes up, we'll run this work. | 81 | if (bdi->wb.task) { |
75 | */ | 82 | wake_up_process(bdi->wb.task); |
76 | if (unlikely(list_empty_careful(&bdi->wb_list))) | 83 | } else { |
84 | /* | ||
85 | * The bdi thread isn't there, wake up the forker thread which | ||
86 | * will create and run it. | ||
87 | */ | ||
88 | trace_writeback_nothread(bdi, work); | ||
77 | wake_up_process(default_backing_dev_info.wb.task); | 89 | wake_up_process(default_backing_dev_info.wb.task); |
78 | else { | ||
79 | struct bdi_writeback *wb = &bdi->wb; | ||
80 | |||
81 | if (wb->task) | ||
82 | wake_up_process(wb->task); | ||
83 | } | 90 | } |
91 | spin_unlock_bh(&bdi->wb_lock); | ||
84 | } | 92 | } |
85 | 93 | ||
86 | static void | 94 | static void |
@@ -95,8 +103,10 @@ __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, | |||
95 | */ | 103 | */ |
96 | work = kzalloc(sizeof(*work), GFP_ATOMIC); | 104 | work = kzalloc(sizeof(*work), GFP_ATOMIC); |
97 | if (!work) { | 105 | if (!work) { |
98 | if (bdi->wb.task) | 106 | if (bdi->wb.task) { |
107 | trace_writeback_nowork(bdi); | ||
99 | wake_up_process(bdi->wb.task); | 108 | wake_up_process(bdi->wb.task); |
109 | } | ||
100 | return; | 110 | return; |
101 | } | 111 | } |
102 | 112 | ||
@@ -643,10 +653,14 @@ static long wb_writeback(struct bdi_writeback *wb, | |||
643 | wbc.more_io = 0; | 653 | wbc.more_io = 0; |
644 | wbc.nr_to_write = MAX_WRITEBACK_PAGES; | 654 | wbc.nr_to_write = MAX_WRITEBACK_PAGES; |
645 | wbc.pages_skipped = 0; | 655 | wbc.pages_skipped = 0; |
656 | |||
657 | trace_wbc_writeback_start(&wbc, wb->bdi); | ||
646 | if (work->sb) | 658 | if (work->sb) |
647 | __writeback_inodes_sb(work->sb, wb, &wbc); | 659 | __writeback_inodes_sb(work->sb, wb, &wbc); |
648 | else | 660 | else |
649 | writeback_inodes_wb(wb, &wbc); | 661 | writeback_inodes_wb(wb, &wbc); |
662 | trace_wbc_writeback_written(&wbc, wb->bdi); | ||
663 | |||
650 | work->nr_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write; | 664 | work->nr_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write; |
651 | wrote += MAX_WRITEBACK_PAGES - wbc.nr_to_write; | 665 | wrote += MAX_WRITEBACK_PAGES - wbc.nr_to_write; |
652 | 666 | ||
@@ -674,6 +688,7 @@ static long wb_writeback(struct bdi_writeback *wb, | |||
674 | if (!list_empty(&wb->b_more_io)) { | 688 | if (!list_empty(&wb->b_more_io)) { |
675 | inode = list_entry(wb->b_more_io.prev, | 689 | inode = list_entry(wb->b_more_io.prev, |
676 | struct inode, i_list); | 690 | struct inode, i_list); |
691 | trace_wbc_writeback_wait(&wbc, wb->bdi); | ||
677 | inode_wait_for_writeback(inode); | 692 | inode_wait_for_writeback(inode); |
678 | } | 693 | } |
679 | spin_unlock(&inode_lock); | 694 | spin_unlock(&inode_lock); |
@@ -686,17 +701,17 @@ static long wb_writeback(struct bdi_writeback *wb, | |||
686 | * Return the next wb_writeback_work struct that hasn't been processed yet. | 701 | * Return the next wb_writeback_work struct that hasn't been processed yet. |
687 | */ | 702 | */ |
688 | static struct wb_writeback_work * | 703 | static struct wb_writeback_work * |
689 | get_next_work_item(struct backing_dev_info *bdi, struct bdi_writeback *wb) | 704 | get_next_work_item(struct backing_dev_info *bdi) |
690 | { | 705 | { |
691 | struct wb_writeback_work *work = NULL; | 706 | struct wb_writeback_work *work = NULL; |
692 | 707 | ||
693 | spin_lock(&bdi->wb_lock); | 708 | spin_lock_bh(&bdi->wb_lock); |
694 | if (!list_empty(&bdi->work_list)) { | 709 | if (!list_empty(&bdi->work_list)) { |
695 | work = list_entry(bdi->work_list.next, | 710 | work = list_entry(bdi->work_list.next, |
696 | struct wb_writeback_work, list); | 711 | struct wb_writeback_work, list); |
697 | list_del_init(&work->list); | 712 | list_del_init(&work->list); |
698 | } | 713 | } |
699 | spin_unlock(&bdi->wb_lock); | 714 | spin_unlock_bh(&bdi->wb_lock); |
700 | return work; | 715 | return work; |
701 | } | 716 | } |
702 | 717 | ||
@@ -744,7 +759,7 @@ long wb_do_writeback(struct bdi_writeback *wb, int force_wait) | |||
744 | struct wb_writeback_work *work; | 759 | struct wb_writeback_work *work; |
745 | long wrote = 0; | 760 | long wrote = 0; |
746 | 761 | ||
747 | while ((work = get_next_work_item(bdi, wb)) != NULL) { | 762 | while ((work = get_next_work_item(bdi)) != NULL) { |
748 | /* | 763 | /* |
749 | * Override sync mode, in case we must wait for completion | 764 | * Override sync mode, in case we must wait for completion |
750 | * because this thread is exiting now. | 765 | * because this thread is exiting now. |
@@ -752,6 +767,8 @@ long wb_do_writeback(struct bdi_writeback *wb, int force_wait) | |||
752 | if (force_wait) | 767 | if (force_wait) |
753 | work->sync_mode = WB_SYNC_ALL; | 768 | work->sync_mode = WB_SYNC_ALL; |
754 | 769 | ||
770 | trace_writeback_exec(bdi, work); | ||
771 | |||
755 | wrote += wb_writeback(wb, work); | 772 | wrote += wb_writeback(wb, work); |
756 | 773 | ||
757 | /* | 774 | /* |
@@ -776,47 +793,66 @@ long wb_do_writeback(struct bdi_writeback *wb, int force_wait) | |||
776 | * Handle writeback of dirty data for the device backed by this bdi. Also | 793 | * Handle writeback of dirty data for the device backed by this bdi. Also |
777 | * wakes up periodically and does kupdated style flushing. | 794 | * wakes up periodically and does kupdated style flushing. |
778 | */ | 795 | */ |
779 | int bdi_writeback_task(struct bdi_writeback *wb) | 796 | int bdi_writeback_thread(void *data) |
780 | { | 797 | { |
781 | unsigned long last_active = jiffies; | 798 | struct bdi_writeback *wb = data; |
782 | unsigned long wait_jiffies = -1UL; | 799 | struct backing_dev_info *bdi = wb->bdi; |
783 | long pages_written; | 800 | long pages_written; |
784 | 801 | ||
802 | current->flags |= PF_FLUSHER | PF_SWAPWRITE; | ||
803 | set_freezable(); | ||
804 | wb->last_active = jiffies; | ||
805 | |||
806 | /* | ||
807 | * Our parent may run at a different priority, just set us to normal | ||
808 | */ | ||
809 | set_user_nice(current, 0); | ||
810 | |||
811 | trace_writeback_thread_start(bdi); | ||
812 | |||
785 | while (!kthread_should_stop()) { | 813 | while (!kthread_should_stop()) { |
814 | /* | ||
815 | * Remove own delayed wake-up timer, since we are already awake | ||
816 | * and we'll take care of the preriodic write-back. | ||
817 | */ | ||
818 | del_timer(&wb->wakeup_timer); | ||
819 | |||
786 | pages_written = wb_do_writeback(wb, 0); | 820 | pages_written = wb_do_writeback(wb, 0); |
787 | 821 | ||
822 | trace_writeback_pages_written(pages_written); | ||
823 | |||
788 | if (pages_written) | 824 | if (pages_written) |
789 | last_active = jiffies; | 825 | wb->last_active = jiffies; |
790 | else if (wait_jiffies != -1UL) { | ||
791 | unsigned long max_idle; | ||
792 | 826 | ||
793 | /* | 827 | set_current_state(TASK_INTERRUPTIBLE); |
794 | * Longest period of inactivity that we tolerate. If we | 828 | if (!list_empty(&bdi->work_list)) { |
795 | * see dirty data again later, the task will get | 829 | __set_current_state(TASK_RUNNING); |
796 | * recreated automatically. | 830 | continue; |
797 | */ | ||
798 | max_idle = max(5UL * 60 * HZ, wait_jiffies); | ||
799 | if (time_after(jiffies, max_idle + last_active)) | ||
800 | break; | ||
801 | } | 831 | } |
802 | 832 | ||
803 | if (dirty_writeback_interval) { | 833 | if (wb_has_dirty_io(wb) && dirty_writeback_interval) |
804 | wait_jiffies = msecs_to_jiffies(dirty_writeback_interval * 10); | 834 | schedule_timeout(msecs_to_jiffies(dirty_writeback_interval * 10)); |
805 | schedule_timeout_interruptible(wait_jiffies); | 835 | else { |
806 | } else { | 836 | /* |
807 | set_current_state(TASK_INTERRUPTIBLE); | 837 | * We have nothing to do, so can go sleep without any |
808 | if (list_empty_careful(&wb->bdi->work_list) && | 838 | * timeout and save power. When a work is queued or |
809 | !kthread_should_stop()) | 839 | * something is made dirty - we will be woken up. |
810 | schedule(); | 840 | */ |
811 | __set_current_state(TASK_RUNNING); | 841 | schedule(); |
812 | } | 842 | } |
813 | 843 | ||
814 | try_to_freeze(); | 844 | try_to_freeze(); |
815 | } | 845 | } |
816 | 846 | ||
847 | /* Flush any work that raced with us exiting */ | ||
848 | if (!list_empty(&bdi->work_list)) | ||
849 | wb_do_writeback(wb, 1); | ||
850 | |||
851 | trace_writeback_thread_stop(bdi); | ||
817 | return 0; | 852 | return 0; |
818 | } | 853 | } |
819 | 854 | ||
855 | |||
820 | /* | 856 | /* |
821 | * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back | 857 | * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back |
822 | * the whole world. | 858 | * the whole world. |
@@ -891,6 +927,8 @@ static noinline void block_dump___mark_inode_dirty(struct inode *inode) | |||
891 | void __mark_inode_dirty(struct inode *inode, int flags) | 927 | void __mark_inode_dirty(struct inode *inode, int flags) |
892 | { | 928 | { |
893 | struct super_block *sb = inode->i_sb; | 929 | struct super_block *sb = inode->i_sb; |
930 | struct backing_dev_info *bdi = NULL; | ||
931 | bool wakeup_bdi = false; | ||
894 | 932 | ||
895 | /* | 933 | /* |
896 | * Don't do this for I_DIRTY_PAGES - that doesn't actually | 934 | * Don't do this for I_DIRTY_PAGES - that doesn't actually |
@@ -944,22 +982,31 @@ void __mark_inode_dirty(struct inode *inode, int flags) | |||
944 | * reposition it (that would break b_dirty time-ordering). | 982 | * reposition it (that would break b_dirty time-ordering). |
945 | */ | 983 | */ |
946 | if (!was_dirty) { | 984 | if (!was_dirty) { |
947 | struct bdi_writeback *wb = &inode_to_bdi(inode)->wb; | 985 | bdi = inode_to_bdi(inode); |
948 | struct backing_dev_info *bdi = wb->bdi; | 986 | |
949 | 987 | if (bdi_cap_writeback_dirty(bdi)) { | |
950 | if (bdi_cap_writeback_dirty(bdi) && | 988 | WARN(!test_bit(BDI_registered, &bdi->state), |
951 | !test_bit(BDI_registered, &bdi->state)) { | 989 | "bdi-%s not registered\n", bdi->name); |
952 | WARN_ON(1); | 990 | |
953 | printk(KERN_ERR "bdi-%s not registered\n", | 991 | /* |
954 | bdi->name); | 992 | * If this is the first dirty inode for this |
993 | * bdi, we have to wake-up the corresponding | ||
994 | * bdi thread to make sure background | ||
995 | * write-back happens later. | ||
996 | */ | ||
997 | if (!wb_has_dirty_io(&bdi->wb)) | ||
998 | wakeup_bdi = true; | ||
955 | } | 999 | } |
956 | 1000 | ||
957 | inode->dirtied_when = jiffies; | 1001 | inode->dirtied_when = jiffies; |
958 | list_move(&inode->i_list, &wb->b_dirty); | 1002 | list_move(&inode->i_list, &bdi->wb.b_dirty); |
959 | } | 1003 | } |
960 | } | 1004 | } |
961 | out: | 1005 | out: |
962 | spin_unlock(&inode_lock); | 1006 | spin_unlock(&inode_lock); |
1007 | |||
1008 | if (wakeup_bdi) | ||
1009 | bdi_wakeup_thread_delayed(bdi); | ||
963 | } | 1010 | } |
964 | EXPORT_SYMBOL(__mark_inode_dirty); | 1011 | EXPORT_SYMBOL(__mark_inode_dirty); |
965 | 1012 | ||
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c index 6a857e24f947..cde1248a6225 100644 --- a/fs/gfs2/log.c +++ b/fs/gfs2/log.c | |||
@@ -595,7 +595,7 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags, int pull) | |||
595 | if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) | 595 | if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) |
596 | goto skip_barrier; | 596 | goto skip_barrier; |
597 | get_bh(bh); | 597 | get_bh(bh); |
598 | submit_bh(WRITE_SYNC | (1 << BIO_RW_BARRIER) | (1 << BIO_RW_META), bh); | 598 | submit_bh(WRITE_BARRIER | REQ_META, bh); |
599 | wait_on_buffer(bh); | 599 | wait_on_buffer(bh); |
600 | if (buffer_eopnotsupp(bh)) { | 600 | if (buffer_eopnotsupp(bh)) { |
601 | clear_buffer_eopnotsupp(bh); | 601 | clear_buffer_eopnotsupp(bh); |
@@ -605,7 +605,7 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags, int pull) | |||
605 | lock_buffer(bh); | 605 | lock_buffer(bh); |
606 | skip_barrier: | 606 | skip_barrier: |
607 | get_bh(bh); | 607 | get_bh(bh); |
608 | submit_bh(WRITE_SYNC | (1 << BIO_RW_META), bh); | 608 | submit_bh(WRITE_SYNC | REQ_META, bh); |
609 | wait_on_buffer(bh); | 609 | wait_on_buffer(bh); |
610 | } | 610 | } |
611 | if (!buffer_uptodate(bh)) | 611 | if (!buffer_uptodate(bh)) |
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c index 18176d0b75d7..f3b071f921aa 100644 --- a/fs/gfs2/meta_io.c +++ b/fs/gfs2/meta_io.c | |||
@@ -36,8 +36,8 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb | |||
36 | { | 36 | { |
37 | struct buffer_head *bh, *head; | 37 | struct buffer_head *bh, *head; |
38 | int nr_underway = 0; | 38 | int nr_underway = 0; |
39 | int write_op = (1 << BIO_RW_META) | ((wbc->sync_mode == WB_SYNC_ALL ? | 39 | int write_op = REQ_META | |
40 | WRITE_SYNC_PLUG : WRITE)); | 40 | (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC_PLUG : WRITE); |
41 | 41 | ||
42 | BUG_ON(!PageLocked(page)); | 42 | BUG_ON(!PageLocked(page)); |
43 | BUG_ON(!page_has_buffers(page)); | 43 | BUG_ON(!page_has_buffers(page)); |
@@ -225,7 +225,7 @@ int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags, | |||
225 | } | 225 | } |
226 | bh->b_end_io = end_buffer_read_sync; | 226 | bh->b_end_io = end_buffer_read_sync; |
227 | get_bh(bh); | 227 | get_bh(bh); |
228 | submit_bh(READ_SYNC | (1 << BIO_RW_META), bh); | 228 | submit_bh(READ_SYNC | REQ_META, bh); |
229 | if (!(flags & DIO_WAIT)) | 229 | if (!(flags & DIO_WAIT)) |
230 | return 0; | 230 | return 0; |
231 | 231 | ||
@@ -432,7 +432,7 @@ struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen) | |||
432 | if (buffer_uptodate(first_bh)) | 432 | if (buffer_uptodate(first_bh)) |
433 | goto out; | 433 | goto out; |
434 | if (!buffer_locked(first_bh)) | 434 | if (!buffer_locked(first_bh)) |
435 | ll_rw_block(READ_SYNC | (1 << BIO_RW_META), 1, &first_bh); | 435 | ll_rw_block(READ_SYNC | REQ_META, 1, &first_bh); |
436 | 436 | ||
437 | dblock++; | 437 | dblock++; |
438 | extlen--; | 438 | extlen--; |
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index 4f44bdeb2f03..4d4b1e8ac64c 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c | |||
@@ -274,7 +274,7 @@ static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector) | |||
274 | 274 | ||
275 | bio->bi_end_io = end_bio_io_page; | 275 | bio->bi_end_io = end_bio_io_page; |
276 | bio->bi_private = page; | 276 | bio->bi_private = page; |
277 | submit_bio(READ_SYNC | (1 << BIO_RW_META), bio); | 277 | submit_bio(READ_SYNC | REQ_META, bio); |
278 | wait_on_page_locked(page); | 278 | wait_on_page_locked(page); |
279 | bio_put(bio); | 279 | bio_put(bio); |
280 | if (!PageUptodate(page)) { | 280 | if (!PageUptodate(page)) { |
diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c index 2e6a2723b8fa..4588fb9e93df 100644 --- a/fs/nilfs2/segbuf.c +++ b/fs/nilfs2/segbuf.c | |||
@@ -508,7 +508,7 @@ static int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf, | |||
508 | * Last BIO is always sent through the following | 508 | * Last BIO is always sent through the following |
509 | * submission. | 509 | * submission. |
510 | */ | 510 | */ |
511 | rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG); | 511 | rw |= REQ_SYNC | REQ_UNPLUG; |
512 | res = nilfs_segbuf_submit_bio(segbuf, &wi, rw); | 512 | res = nilfs_segbuf_submit_bio(segbuf, &wi, rw); |
513 | } | 513 | } |
514 | 514 | ||
diff --git a/fs/splice.c b/fs/splice.c index efdbfece9932..8f1dfaecc8f0 100644 --- a/fs/splice.c +++ b/fs/splice.c | |||
@@ -399,17 +399,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos, | |||
399 | * If the page isn't uptodate, we may need to start io on it | 399 | * If the page isn't uptodate, we may need to start io on it |
400 | */ | 400 | */ |
401 | if (!PageUptodate(page)) { | 401 | if (!PageUptodate(page)) { |
402 | /* | 402 | lock_page(page); |
403 | * If in nonblock mode then dont block on waiting | ||
404 | * for an in-flight io page | ||
405 | */ | ||
406 | if (flags & SPLICE_F_NONBLOCK) { | ||
407 | if (!trylock_page(page)) { | ||
408 | error = -EAGAIN; | ||
409 | break; | ||
410 | } | ||
411 | } else | ||
412 | lock_page(page); | ||
413 | 403 | ||
414 | /* | 404 | /* |
415 | * Page was truncated, or invalidated by the | 405 | * Page was truncated, or invalidated by the |
@@ -597,7 +587,6 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos, | |||
597 | struct page *pages[PIPE_DEF_BUFFERS]; | 587 | struct page *pages[PIPE_DEF_BUFFERS]; |
598 | struct partial_page partial[PIPE_DEF_BUFFERS]; | 588 | struct partial_page partial[PIPE_DEF_BUFFERS]; |
599 | struct iovec *vec, __vec[PIPE_DEF_BUFFERS]; | 589 | struct iovec *vec, __vec[PIPE_DEF_BUFFERS]; |
600 | pgoff_t index; | ||
601 | ssize_t res; | 590 | ssize_t res; |
602 | size_t this_len; | 591 | size_t this_len; |
603 | int error; | 592 | int error; |
@@ -621,7 +610,6 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos, | |||
621 | goto shrink_ret; | 610 | goto shrink_ret; |
622 | } | 611 | } |
623 | 612 | ||
624 | index = *ppos >> PAGE_CACHE_SHIFT; | ||
625 | offset = *ppos & ~PAGE_CACHE_MASK; | 613 | offset = *ppos & ~PAGE_CACHE_MASK; |
626 | nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; | 614 | nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; |
627 | 615 | ||