aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2014-03-25 20:29:20 -0400
committerDavid S. Miller <davem@davemloft.net>2014-03-25 20:29:20 -0400
commit04f58c88542b6b351efb4eea01134eb672e22e6e (patch)
tree47bb617212f8c8951f35730e324bdc43487a01ca /block
parent0fc31966035d7a540c011b6c967ce8eae1db121b (diff)
parent632b06aa2842b12c6d6a510ec080fb6ebdb38ea5 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: Documentation/devicetree/bindings/net/micrel-ks8851.txt net/core/netpoll.c The net/core/netpoll.c conflict is a bug fix in 'net' happening to code which is completely removed in 'net-next'. In micrel-ks8851.txt we simply have overlapping changes. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c25
-rw-r--r--block/blk-flush.c11
2 files changed, 19 insertions, 17 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 853f92749202..bfe16d5af9f9 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -693,20 +693,11 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
693 if (!uninit_q) 693 if (!uninit_q)
694 return NULL; 694 return NULL;
695 695
696 uninit_q->flush_rq = kzalloc(sizeof(struct request), GFP_KERNEL);
697 if (!uninit_q->flush_rq)
698 goto out_cleanup_queue;
699
700 q = blk_init_allocated_queue(uninit_q, rfn, lock); 696 q = blk_init_allocated_queue(uninit_q, rfn, lock);
701 if (!q) 697 if (!q)
702 goto out_free_flush_rq; 698 blk_cleanup_queue(uninit_q);
703 return q;
704 699
705out_free_flush_rq: 700 return q;
706 kfree(uninit_q->flush_rq);
707out_cleanup_queue:
708 blk_cleanup_queue(uninit_q);
709 return NULL;
710} 701}
711EXPORT_SYMBOL(blk_init_queue_node); 702EXPORT_SYMBOL(blk_init_queue_node);
712 703
@@ -717,9 +708,13 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
717 if (!q) 708 if (!q)
718 return NULL; 709 return NULL;
719 710
720 if (blk_init_rl(&q->root_rl, q, GFP_KERNEL)) 711 q->flush_rq = kzalloc(sizeof(struct request), GFP_KERNEL);
712 if (!q->flush_rq)
721 return NULL; 713 return NULL;
722 714
715 if (blk_init_rl(&q->root_rl, q, GFP_KERNEL))
716 goto fail;
717
723 q->request_fn = rfn; 718 q->request_fn = rfn;
724 q->prep_rq_fn = NULL; 719 q->prep_rq_fn = NULL;
725 q->unprep_rq_fn = NULL; 720 q->unprep_rq_fn = NULL;
@@ -742,12 +737,16 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
742 /* init elevator */ 737 /* init elevator */
743 if (elevator_init(q, NULL)) { 738 if (elevator_init(q, NULL)) {
744 mutex_unlock(&q->sysfs_lock); 739 mutex_unlock(&q->sysfs_lock);
745 return NULL; 740 goto fail;
746 } 741 }
747 742
748 mutex_unlock(&q->sysfs_lock); 743 mutex_unlock(&q->sysfs_lock);
749 744
750 return q; 745 return q;
746
747fail:
748 kfree(q->flush_rq);
749 return NULL;
751} 750}
752EXPORT_SYMBOL(blk_init_allocated_queue); 751EXPORT_SYMBOL(blk_init_allocated_queue);
753 752
diff --git a/block/blk-flush.c b/block/blk-flush.c
index f598f794c3c6..43e6b4755e9a 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -140,14 +140,17 @@ static void mq_flush_run(struct work_struct *work)
140 blk_mq_insert_request(rq, false, true, false); 140 blk_mq_insert_request(rq, false, true, false);
141} 141}
142 142
143static bool blk_flush_queue_rq(struct request *rq) 143static bool blk_flush_queue_rq(struct request *rq, bool add_front)
144{ 144{
145 if (rq->q->mq_ops) { 145 if (rq->q->mq_ops) {
146 INIT_WORK(&rq->mq_flush_work, mq_flush_run); 146 INIT_WORK(&rq->mq_flush_work, mq_flush_run);
147 kblockd_schedule_work(rq->q, &rq->mq_flush_work); 147 kblockd_schedule_work(rq->q, &rq->mq_flush_work);
148 return false; 148 return false;
149 } else { 149 } else {
150 list_add_tail(&rq->queuelist, &rq->q->queue_head); 150 if (add_front)
151 list_add(&rq->queuelist, &rq->q->queue_head);
152 else
153 list_add_tail(&rq->queuelist, &rq->q->queue_head);
151 return true; 154 return true;
152 } 155 }
153} 156}
@@ -193,7 +196,7 @@ static bool blk_flush_complete_seq(struct request *rq, unsigned int seq,
193 196
194 case REQ_FSEQ_DATA: 197 case REQ_FSEQ_DATA:
195 list_move_tail(&rq->flush.list, &q->flush_data_in_flight); 198 list_move_tail(&rq->flush.list, &q->flush_data_in_flight);
196 queued = blk_flush_queue_rq(rq); 199 queued = blk_flush_queue_rq(rq, true);
197 break; 200 break;
198 201
199 case REQ_FSEQ_DONE: 202 case REQ_FSEQ_DONE:
@@ -326,7 +329,7 @@ static bool blk_kick_flush(struct request_queue *q)
326 q->flush_rq->rq_disk = first_rq->rq_disk; 329 q->flush_rq->rq_disk = first_rq->rq_disk;
327 q->flush_rq->end_io = flush_end_io; 330 q->flush_rq->end_io = flush_end_io;
328 331
329 return blk_flush_queue_rq(q->flush_rq); 332 return blk_flush_queue_rq(q->flush_rq, false);
330} 333}
331 334
332static void flush_data_end_io(struct request *rq, int error) 335static void flush_data_end_io(struct request *rq, int error)