diff options
-rw-r--r-- | block/blk-cgroup.c | 4 | ||||
-rw-r--r-- | block/cfq-iosched.c | 2 | ||||
-rw-r--r-- | drivers/block/drbd/drbd_main.c | 2 | ||||
-rw-r--r-- | drivers/block/drbd/drbd_receiver.c | 2 | ||||
-rw-r--r-- | fs/bio-integrity.c | 3 | ||||
-rw-r--r-- | fs/bio.c | 7 |
6 files changed, 13 insertions, 7 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 1fa2654db0a6..e7dbbaf5fb3e 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c | |||
@@ -147,16 +147,16 @@ blkiocg_weight_write(struct cgroup *cgroup, struct cftype *cftype, u64 val) | |||
147 | return -EINVAL; | 147 | return -EINVAL; |
148 | 148 | ||
149 | blkcg = cgroup_to_blkio_cgroup(cgroup); | 149 | blkcg = cgroup_to_blkio_cgroup(cgroup); |
150 | spin_lock(&blkio_list_lock); | ||
150 | spin_lock_irq(&blkcg->lock); | 151 | spin_lock_irq(&blkcg->lock); |
151 | blkcg->weight = (unsigned int)val; | 152 | blkcg->weight = (unsigned int)val; |
152 | hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) { | 153 | hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) { |
153 | spin_lock(&blkio_list_lock); | ||
154 | list_for_each_entry(blkiop, &blkio_list, list) | 154 | list_for_each_entry(blkiop, &blkio_list, list) |
155 | blkiop->ops.blkio_update_group_weight_fn(blkg, | 155 | blkiop->ops.blkio_update_group_weight_fn(blkg, |
156 | blkcg->weight); | 156 | blkcg->weight); |
157 | spin_unlock(&blkio_list_lock); | ||
158 | } | 157 | } |
159 | spin_unlock_irq(&blkcg->lock); | 158 | spin_unlock_irq(&blkcg->lock); |
159 | spin_unlock(&blkio_list_lock); | ||
160 | return 0; | 160 | return 0; |
161 | } | 161 | } |
162 | 162 | ||
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index ee130f14d1fc..17b768d0d42f 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -1803,7 +1803,7 @@ static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
1803 | * Otherwise, we do only if they are the last ones | 1803 | * Otherwise, we do only if they are the last ones |
1804 | * in their service tree. | 1804 | * in their service tree. |
1805 | */ | 1805 | */ |
1806 | return service_tree->count == 1; | 1806 | return service_tree->count == 1 && cfq_cfqq_sync(cfqq); |
1807 | } | 1807 | } |
1808 | 1808 | ||
1809 | static void cfq_arm_slice_timer(struct cfq_data *cfqd) | 1809 | static void cfq_arm_slice_timer(struct cfq_data *cfqd) |
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index e898ad9eb1c3..ab871e00ffc5 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c | |||
@@ -2973,7 +2973,6 @@ struct drbd_conf *drbd_new_device(unsigned int minor) | |||
2973 | goto out_no_q; | 2973 | goto out_no_q; |
2974 | mdev->rq_queue = q; | 2974 | mdev->rq_queue = q; |
2975 | q->queuedata = mdev; | 2975 | q->queuedata = mdev; |
2976 | blk_queue_max_segment_size(q, DRBD_MAX_SEGMENT_SIZE); | ||
2977 | 2976 | ||
2978 | disk = alloc_disk(1); | 2977 | disk = alloc_disk(1); |
2979 | if (!disk) | 2978 | if (!disk) |
@@ -2997,6 +2996,7 @@ struct drbd_conf *drbd_new_device(unsigned int minor) | |||
2997 | q->backing_dev_info.congested_data = mdev; | 2996 | q->backing_dev_info.congested_data = mdev; |
2998 | 2997 | ||
2999 | blk_queue_make_request(q, drbd_make_request_26); | 2998 | blk_queue_make_request(q, drbd_make_request_26); |
2999 | blk_queue_max_segment_size(q, DRBD_MAX_SEGMENT_SIZE); | ||
3000 | blk_queue_bounce_limit(q, BLK_BOUNCE_ANY); | 3000 | blk_queue_bounce_limit(q, BLK_BOUNCE_ANY); |
3001 | blk_queue_merge_bvec(q, drbd_merge_bvec); | 3001 | blk_queue_merge_bvec(q, drbd_merge_bvec); |
3002 | q->queue_lock = &mdev->req_lock; /* needed since we use */ | 3002 | q->queue_lock = &mdev->req_lock; /* needed since we use */ |
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index f22a5283128a..d065c646b35a 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c | |||
@@ -1224,7 +1224,7 @@ static int receive_Barrier(struct drbd_conf *mdev, struct p_header *h) | |||
1224 | epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO); | 1224 | epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO); |
1225 | if (!epoch) { | 1225 | if (!epoch) { |
1226 | dev_warn(DEV, "Allocation of an epoch failed, slowing down\n"); | 1226 | dev_warn(DEV, "Allocation of an epoch failed, slowing down\n"); |
1227 | issue_flush = !test_and_set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags); | 1227 | issue_flush = !test_and_set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &mdev->current_epoch->flags); |
1228 | drbd_wait_ee_list_empty(mdev, &mdev->active_ee); | 1228 | drbd_wait_ee_list_empty(mdev, &mdev->active_ee); |
1229 | if (issue_flush) { | 1229 | if (issue_flush) { |
1230 | rv = drbd_flush_after_epoch(mdev, mdev->current_epoch); | 1230 | rv = drbd_flush_after_epoch(mdev, mdev->current_epoch); |
diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c index 49a34e7f7306..a16f29e888cd 100644 --- a/fs/bio-integrity.c +++ b/fs/bio-integrity.c | |||
@@ -61,7 +61,7 @@ static inline unsigned int vecs_to_idx(unsigned int nr) | |||
61 | 61 | ||
62 | static inline int use_bip_pool(unsigned int idx) | 62 | static inline int use_bip_pool(unsigned int idx) |
63 | { | 63 | { |
64 | if (idx == BIOVEC_NR_POOLS) | 64 | if (idx == BIOVEC_MAX_IDX) |
65 | return 1; | 65 | return 1; |
66 | 66 | ||
67 | return 0; | 67 | return 0; |
@@ -95,6 +95,7 @@ struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *bio, | |||
95 | 95 | ||
96 | /* Use mempool if lower order alloc failed or max vecs were requested */ | 96 | /* Use mempool if lower order alloc failed or max vecs were requested */ |
97 | if (bip == NULL) { | 97 | if (bip == NULL) { |
98 | idx = BIOVEC_MAX_IDX; /* so we free the payload properly later */ | ||
98 | bip = mempool_alloc(bs->bio_integrity_pool, gfp_mask); | 99 | bip = mempool_alloc(bs->bio_integrity_pool, gfp_mask); |
99 | 100 | ||
100 | if (unlikely(bip == NULL)) { | 101 | if (unlikely(bip == NULL)) { |
@@ -542,13 +542,18 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page | |||
542 | 542 | ||
543 | if (page == prev->bv_page && | 543 | if (page == prev->bv_page && |
544 | offset == prev->bv_offset + prev->bv_len) { | 544 | offset == prev->bv_offset + prev->bv_len) { |
545 | unsigned int prev_bv_len = prev->bv_len; | ||
545 | prev->bv_len += len; | 546 | prev->bv_len += len; |
546 | 547 | ||
547 | if (q->merge_bvec_fn) { | 548 | if (q->merge_bvec_fn) { |
548 | struct bvec_merge_data bvm = { | 549 | struct bvec_merge_data bvm = { |
550 | /* prev_bvec is already charged in | ||
551 | bi_size, discharge it in order to | ||
552 | simulate merging updated prev_bvec | ||
553 | as new bvec. */ | ||
549 | .bi_bdev = bio->bi_bdev, | 554 | .bi_bdev = bio->bi_bdev, |
550 | .bi_sector = bio->bi_sector, | 555 | .bi_sector = bio->bi_sector, |
551 | .bi_size = bio->bi_size, | 556 | .bi_size = bio->bi_size - prev_bv_len, |
552 | .bi_rw = bio->bi_rw, | 557 | .bi_rw = bio->bi_rw, |
553 | }; | 558 | }; |
554 | 559 | ||