aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-core.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/blk-core.c')
-rw-r--r--block/blk-core.c36
1 files changed, 20 insertions, 16 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 97e9e5405b83..79e41a76d96a 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -346,9 +346,10 @@ void blk_put_queue(struct request_queue *q)
346EXPORT_SYMBOL(blk_put_queue); 346EXPORT_SYMBOL(blk_put_queue);
347 347
348/* 348/*
349 * Note: If a driver supplied the queue lock, it should not zap that lock 349 * Note: If a driver supplied the queue lock, it is disconnected
350 * unexpectedly as some queue cleanup components like elevator_exit() and 350 * by this function. The actual state of the lock doesn't matter
351 * blk_throtl_exit() need queue lock. 351 * here as the request_queue isn't accessible after this point
352 * (QUEUE_FLAG_DEAD is set) and no other requests will be queued.
352 */ 353 */
353void blk_cleanup_queue(struct request_queue *q) 354void blk_cleanup_queue(struct request_queue *q)
354{ 355{
@@ -365,10 +366,8 @@ void blk_cleanup_queue(struct request_queue *q)
365 queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q); 366 queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
366 mutex_unlock(&q->sysfs_lock); 367 mutex_unlock(&q->sysfs_lock);
367 368
368 if (q->elevator) 369 if (q->queue_lock != &q->__queue_lock)
369 elevator_exit(q->elevator); 370 q->queue_lock = &q->__queue_lock;
370
371 blk_throtl_exit(q);
372 371
373 blk_put_queue(q); 372 blk_put_queue(q);
374} 373}
@@ -1165,7 +1164,7 @@ static bool bio_attempt_front_merge(struct request_queue *q,
1165 * true if merge was successful, otherwise false. 1164 * true if merge was successful, otherwise false.
1166 */ 1165 */
1167static bool attempt_plug_merge(struct task_struct *tsk, struct request_queue *q, 1166static bool attempt_plug_merge(struct task_struct *tsk, struct request_queue *q,
1168 struct bio *bio) 1167 struct bio *bio, unsigned int *request_count)
1169{ 1168{
1170 struct blk_plug *plug; 1169 struct blk_plug *plug;
1171 struct request *rq; 1170 struct request *rq;
@@ -1174,10 +1173,13 @@ static bool attempt_plug_merge(struct task_struct *tsk, struct request_queue *q,
1174 plug = tsk->plug; 1173 plug = tsk->plug;
1175 if (!plug) 1174 if (!plug)
1176 goto out; 1175 goto out;
1176 *request_count = 0;
1177 1177
1178 list_for_each_entry_reverse(rq, &plug->list, queuelist) { 1178 list_for_each_entry_reverse(rq, &plug->list, queuelist) {
1179 int el_ret; 1179 int el_ret;
1180 1180
1181 (*request_count)++;
1182
1181 if (rq->q != q) 1183 if (rq->q != q)
1182 continue; 1184 continue;
1183 1185
@@ -1217,6 +1219,7 @@ void blk_queue_bio(struct request_queue *q, struct bio *bio)
1217 struct blk_plug *plug; 1219 struct blk_plug *plug;
1218 int el_ret, rw_flags, where = ELEVATOR_INSERT_SORT; 1220 int el_ret, rw_flags, where = ELEVATOR_INSERT_SORT;
1219 struct request *req; 1221 struct request *req;
1222 unsigned int request_count = 0;
1220 1223
1221 /* 1224 /*
1222 * low level driver can indicate that it wants pages above a 1225 * low level driver can indicate that it wants pages above a
@@ -1235,7 +1238,7 @@ void blk_queue_bio(struct request_queue *q, struct bio *bio)
1235 * Check if we can merge with the plugged list before grabbing 1238 * Check if we can merge with the plugged list before grabbing
1236 * any locks. 1239 * any locks.
1237 */ 1240 */
1238 if (attempt_plug_merge(current, q, bio)) 1241 if (attempt_plug_merge(current, q, bio, &request_count))
1239 return; 1242 return;
1240 1243
1241 spin_lock_irq(q->queue_lock); 1244 spin_lock_irq(q->queue_lock);
@@ -1300,11 +1303,10 @@ get_rq:
1300 if (__rq->q != q) 1303 if (__rq->q != q)
1301 plug->should_sort = 1; 1304 plug->should_sort = 1;
1302 } 1305 }
1306 if (request_count >= BLK_MAX_REQUEST_COUNT)
1307 blk_flush_plug_list(plug, false);
1303 list_add_tail(&req->queuelist, &plug->list); 1308 list_add_tail(&req->queuelist, &plug->list);
1304 plug->count++;
1305 drive_stat_acct(req, 1); 1309 drive_stat_acct(req, 1);
1306 if (plug->count >= BLK_MAX_REQUEST_COUNT)
1307 blk_flush_plug_list(plug, false);
1308 } else { 1310 } else {
1309 spin_lock_irq(q->queue_lock); 1311 spin_lock_irq(q->queue_lock);
1310 add_acct_request(q, req, where); 1312 add_acct_request(q, req, where);
@@ -1675,6 +1677,7 @@ EXPORT_SYMBOL_GPL(blk_rq_check_limits);
1675int blk_insert_cloned_request(struct request_queue *q, struct request *rq) 1677int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
1676{ 1678{
1677 unsigned long flags; 1679 unsigned long flags;
1680 int where = ELEVATOR_INSERT_BACK;
1678 1681
1679 if (blk_rq_check_limits(q, rq)) 1682 if (blk_rq_check_limits(q, rq))
1680 return -EIO; 1683 return -EIO;
@@ -1691,7 +1694,10 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
1691 */ 1694 */
1692 BUG_ON(blk_queued_rq(rq)); 1695 BUG_ON(blk_queued_rq(rq));
1693 1696
1694 add_acct_request(q, rq, ELEVATOR_INSERT_BACK); 1697 if (rq->cmd_flags & (REQ_FLUSH|REQ_FUA))
1698 where = ELEVATOR_INSERT_FLUSH;
1699
1700 add_acct_request(q, rq, where);
1695 spin_unlock_irqrestore(q->queue_lock, flags); 1701 spin_unlock_irqrestore(q->queue_lock, flags);
1696 1702
1697 return 0; 1703 return 0;
@@ -2248,7 +2254,7 @@ static bool blk_end_bidi_request(struct request *rq, int error,
2248 * %false - we are done with this request 2254 * %false - we are done with this request
2249 * %true - still buffers pending for this request 2255 * %true - still buffers pending for this request
2250 **/ 2256 **/
2251static bool __blk_end_bidi_request(struct request *rq, int error, 2257bool __blk_end_bidi_request(struct request *rq, int error,
2252 unsigned int nr_bytes, unsigned int bidi_bytes) 2258 unsigned int nr_bytes, unsigned int bidi_bytes)
2253{ 2259{
2254 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) 2260 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
@@ -2617,7 +2623,6 @@ void blk_start_plug(struct blk_plug *plug)
2617 INIT_LIST_HEAD(&plug->list); 2623 INIT_LIST_HEAD(&plug->list);
2618 INIT_LIST_HEAD(&plug->cb_list); 2624 INIT_LIST_HEAD(&plug->cb_list);
2619 plug->should_sort = 0; 2625 plug->should_sort = 0;
2620 plug->count = 0;
2621 2626
2622 /* 2627 /*
2623 * If this is a nested plug, don't actually assign it. It will be 2628 * If this is a nested plug, don't actually assign it. It will be
@@ -2701,7 +2706,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
2701 return; 2706 return;
2702 2707
2703 list_splice_init(&plug->list, &list); 2708 list_splice_init(&plug->list, &list);
2704 plug->count = 0;
2705 2709
2706 if (plug->should_sort) { 2710 if (plug->should_sort) {
2707 list_sort(NULL, &list, plug_rq_cmp); 2711 list_sort(NULL, &list, plug_rq_cmp);