diff options
| -rw-r--r-- | block/blk-map.c | 2 | ||||
| -rw-r--r-- | block/blk-tag.c | 13 | ||||
| -rw-r--r-- | block/cfq-iosched.c | 12 |
3 files changed, 15 insertions, 12 deletions
diff --git a/block/blk-map.c b/block/blk-map.c index 164cd0059706..623e1cd4cffe 100644 --- a/block/blk-map.c +++ b/block/blk-map.c | |||
| @@ -311,7 +311,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, | |||
| 311 | if (IS_ERR(bio)) | 311 | if (IS_ERR(bio)) |
| 312 | return PTR_ERR(bio); | 312 | return PTR_ERR(bio); |
| 313 | 313 | ||
| 314 | if (rq_data_dir(rq) == WRITE) | 314 | if (!reading) |
| 315 | bio->bi_rw |= REQ_WRITE; | 315 | bio->bi_rw |= REQ_WRITE; |
| 316 | 316 | ||
| 317 | if (do_copy) | 317 | if (do_copy) |
diff --git a/block/blk-tag.c b/block/blk-tag.c index e74d6d13838f..4af6f5cc1167 100644 --- a/block/blk-tag.c +++ b/block/blk-tag.c | |||
| @@ -282,18 +282,9 @@ EXPORT_SYMBOL(blk_queue_resize_tags); | |||
| 282 | void blk_queue_end_tag(struct request_queue *q, struct request *rq) | 282 | void blk_queue_end_tag(struct request_queue *q, struct request *rq) |
| 283 | { | 283 | { |
| 284 | struct blk_queue_tag *bqt = q->queue_tags; | 284 | struct blk_queue_tag *bqt = q->queue_tags; |
| 285 | int tag = rq->tag; | 285 | unsigned tag = rq->tag; /* negative tags invalid */ |
| 286 | 286 | ||
| 287 | BUG_ON(tag == -1); | 287 | BUG_ON(tag >= bqt->real_max_depth); |
| 288 | |||
| 289 | if (unlikely(tag >= bqt->max_depth)) { | ||
| 290 | /* | ||
| 291 | * This can happen after tag depth has been reduced. | ||
| 292 | * But tag shouldn't be larger than real_max_depth. | ||
| 293 | */ | ||
| 294 | WARN_ON(tag >= bqt->real_max_depth); | ||
| 295 | return; | ||
| 296 | } | ||
| 297 | 288 | ||
| 298 | list_del_init(&rq->queuelist); | 289 | list_del_init(&rq->queuelist); |
| 299 | rq->cmd_flags &= ~REQ_QUEUED; | 290 | rq->cmd_flags &= ~REQ_QUEUED; |
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 4c12869fcf77..3548705b04e4 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
| @@ -1655,6 +1655,8 @@ cfq_merged_requests(struct request_queue *q, struct request *rq, | |||
| 1655 | struct request *next) | 1655 | struct request *next) |
| 1656 | { | 1656 | { |
| 1657 | struct cfq_queue *cfqq = RQ_CFQQ(rq); | 1657 | struct cfq_queue *cfqq = RQ_CFQQ(rq); |
| 1658 | struct cfq_data *cfqd = q->elevator->elevator_data; | ||
| 1659 | |||
| 1658 | /* | 1660 | /* |
| 1659 | * reposition in fifo if next is older than rq | 1661 | * reposition in fifo if next is older than rq |
| 1660 | */ | 1662 | */ |
| @@ -1669,6 +1671,16 @@ cfq_merged_requests(struct request_queue *q, struct request *rq, | |||
| 1669 | cfq_remove_request(next); | 1671 | cfq_remove_request(next); |
| 1670 | cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(rq))->blkg, | 1672 | cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(rq))->blkg, |
| 1671 | rq_data_dir(next), rq_is_sync(next)); | 1673 | rq_data_dir(next), rq_is_sync(next)); |
| 1674 | |||
| 1675 | cfqq = RQ_CFQQ(next); | ||
| 1676 | /* | ||
| 1677 | * all requests of this queue are merged to other queues, delete it | ||
| 1678 | * from the service tree. If it's the active_queue, | ||
| 1679 | * cfq_dispatch_requests() will choose to expire it or do idle | ||
| 1680 | */ | ||
| 1681 | if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list) && | ||
| 1682 | cfqq != cfqd->active_queue) | ||
| 1683 | cfq_del_cfqq_rr(cfqd, cfqq); | ||
| 1672 | } | 1684 | } |
| 1673 | 1685 | ||
| 1674 | static int cfq_allow_merge(struct request_queue *q, struct request *rq, | 1686 | static int cfq_allow_merge(struct request_queue *q, struct request *rq, |
