aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-core.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/blk-core.c')
-rw-r--r--block/blk-core.c29
1 files changed, 17 insertions, 12 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 3bc5579d6f54..f0640d7f800f 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -467,6 +467,9 @@ static int blk_init_free_list(struct request_queue *q)
467{ 467{
468 struct request_list *rl = &q->rq; 468 struct request_list *rl = &q->rq;
469 469
470 if (unlikely(rl->rq_pool))
471 return 0;
472
470 rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0; 473 rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0;
471 rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0; 474 rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0;
472 rl->elvpriv = 0; 475 rl->elvpriv = 0;
@@ -570,9 +573,17 @@ EXPORT_SYMBOL(blk_init_queue);
570struct request_queue * 573struct request_queue *
571blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) 574blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
572{ 575{
573 struct request_queue *q = blk_alloc_queue_node(GFP_KERNEL, node_id); 576 struct request_queue *uninit_q, *q;
577
578 uninit_q = blk_alloc_queue_node(GFP_KERNEL, node_id);
579 if (!uninit_q)
580 return NULL;
581
582 q = blk_init_allocated_queue_node(uninit_q, rfn, lock, node_id);
583 if (!q)
584 blk_cleanup_queue(uninit_q);
574 585
575 return blk_init_allocated_queue_node(q, rfn, lock, node_id); 586 return q;
576} 587}
577EXPORT_SYMBOL(blk_init_queue_node); 588EXPORT_SYMBOL(blk_init_queue_node);
578 589
@@ -592,10 +603,8 @@ blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn,
592 return NULL; 603 return NULL;
593 604
594 q->node = node_id; 605 q->node = node_id;
595 if (blk_init_free_list(q)) { 606 if (blk_init_free_list(q))
596 kmem_cache_free(blk_requestq_cachep, q);
597 return NULL; 607 return NULL;
598 }
599 608
600 q->request_fn = rfn; 609 q->request_fn = rfn;
601 q->prep_rq_fn = NULL; 610 q->prep_rq_fn = NULL;
@@ -618,7 +627,6 @@ blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn,
618 return q; 627 return q;
619 } 628 }
620 629
621 blk_put_queue(q);
622 return NULL; 630 return NULL;
623} 631}
624EXPORT_SYMBOL(blk_init_allocated_queue_node); 632EXPORT_SYMBOL(blk_init_allocated_queue_node);
@@ -1141,13 +1149,10 @@ void init_request_from_bio(struct request *req, struct bio *bio)
1141 else 1149 else
1142 req->cmd_flags |= bio->bi_rw & REQ_FAILFAST_MASK; 1150 req->cmd_flags |= bio->bi_rw & REQ_FAILFAST_MASK;
1143 1151
1144 if (unlikely(bio_rw_flagged(bio, BIO_RW_DISCARD))) { 1152 if (bio_rw_flagged(bio, BIO_RW_DISCARD))
1145 req->cmd_flags |= REQ_DISCARD; 1153 req->cmd_flags |= REQ_DISCARD;
1146 if (bio_rw_flagged(bio, BIO_RW_BARRIER)) 1154 if (bio_rw_flagged(bio, BIO_RW_BARRIER))
1147 req->cmd_flags |= REQ_SOFTBARRIER;
1148 } else if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER)))
1149 req->cmd_flags |= REQ_HARDBARRIER; 1155 req->cmd_flags |= REQ_HARDBARRIER;
1150
1151 if (bio_rw_flagged(bio, BIO_RW_SYNCIO)) 1156 if (bio_rw_flagged(bio, BIO_RW_SYNCIO))
1152 req->cmd_flags |= REQ_RW_SYNC; 1157 req->cmd_flags |= REQ_RW_SYNC;
1153 if (bio_rw_flagged(bio, BIO_RW_META)) 1158 if (bio_rw_flagged(bio, BIO_RW_META))
@@ -1578,7 +1583,7 @@ void submit_bio(int rw, struct bio *bio)
1578 * If it's a regular read/write or a barrier with data attached, 1583 * If it's a regular read/write or a barrier with data attached,
1579 * go through the normal accounting stuff before submission. 1584 * go through the normal accounting stuff before submission.
1580 */ 1585 */
1581 if (bio_has_data(bio)) { 1586 if (bio_has_data(bio) && !(rw & (1 << BIO_RW_DISCARD))) {
1582 if (rw & WRITE) { 1587 if (rw & WRITE) {
1583 count_vm_events(PGPGOUT, count); 1588 count_vm_events(PGPGOUT, count);
1584 } else { 1589 } else {