aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/Kconfig2
-rw-r--r--block/blk-cgroup.c10
-rw-r--r--block/blk-core.c3
-rw-r--r--block/blk-exec.c8
4 files changed, 20 insertions, 3 deletions
diff --git a/block/Kconfig b/block/Kconfig
index 09acf1b39905..a7e40a7c8214 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -89,7 +89,7 @@ config BLK_DEV_INTEGRITY
89 89
90config BLK_DEV_THROTTLING 90config BLK_DEV_THROTTLING
91 bool "Block layer bio throttling support" 91 bool "Block layer bio throttling support"
92 depends on BLK_CGROUP=y && EXPERIMENTAL 92 depends on BLK_CGROUP=y
93 default n 93 default n
94 ---help--- 94 ---help---
95 Block layer bio throttling support. It can be used to limit 95 Block layer bio throttling support. It can be used to limit
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index cafcd7431189..d0b770391ad4 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -285,6 +285,13 @@ static void blkg_destroy_all(struct request_queue *q)
285 blkg_destroy(blkg); 285 blkg_destroy(blkg);
286 spin_unlock(&blkcg->lock); 286 spin_unlock(&blkcg->lock);
287 } 287 }
288
289 /*
290 * root blkg is destroyed. Just clear the pointer since
291 * root_rl does not take reference on root blkg.
292 */
293 q->root_blkg = NULL;
294 q->root_rl.blkg = NULL;
288} 295}
289 296
290static void blkg_rcu_free(struct rcu_head *rcu_head) 297static void blkg_rcu_free(struct rcu_head *rcu_head)
@@ -326,6 +333,9 @@ struct request_list *__blk_queue_next_rl(struct request_list *rl,
326 */ 333 */
327 if (rl == &q->root_rl) { 334 if (rl == &q->root_rl) {
328 ent = &q->blkg_list; 335 ent = &q->blkg_list;
336 /* There are no more block groups, hence no request lists */
337 if (list_empty(ent))
338 return NULL;
329 } else { 339 } else {
330 blkg = container_of(rl, struct blkcg_gq, rl); 340 blkg = container_of(rl, struct blkcg_gq, rl);
331 ent = &blkg->q_node; 341 ent = &blkg->q_node;
diff --git a/block/blk-core.c b/block/blk-core.c
index a33870b1847b..3c95c4d6e31a 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -2868,7 +2868,8 @@ static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
2868 struct request *rqa = container_of(a, struct request, queuelist); 2868 struct request *rqa = container_of(a, struct request, queuelist);
2869 struct request *rqb = container_of(b, struct request, queuelist); 2869 struct request *rqb = container_of(b, struct request, queuelist);
2870 2870
2871 return !(rqa->q <= rqb->q); 2871 return !(rqa->q < rqb->q ||
2872 (rqa->q == rqb->q && blk_rq_pos(rqa) < blk_rq_pos(rqb)));
2872} 2873}
2873 2874
2874/* 2875/*
diff --git a/block/blk-exec.c b/block/blk-exec.c
index 8b6dc5bd4dd0..f71eac35c1b9 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -52,11 +52,17 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
52 rq_end_io_fn *done) 52 rq_end_io_fn *done)
53{ 53{
54 int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK; 54 int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
55 bool is_pm_resume;
55 56
56 WARN_ON(irqs_disabled()); 57 WARN_ON(irqs_disabled());
57 58
58 rq->rq_disk = bd_disk; 59 rq->rq_disk = bd_disk;
59 rq->end_io = done; 60 rq->end_io = done;
61 /*
62 * need to check this before __blk_run_queue(), because rq can
63 * be freed before that returns.
64 */
65 is_pm_resume = rq->cmd_type == REQ_TYPE_PM_RESUME;
60 66
61 spin_lock_irq(q->queue_lock); 67 spin_lock_irq(q->queue_lock);
62 68
@@ -71,7 +77,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
71 __elv_add_request(q, rq, where); 77 __elv_add_request(q, rq, where);
72 __blk_run_queue(q); 78 __blk_run_queue(q);
73 /* the queue is stopped so it won't be run */ 79 /* the queue is stopped so it won't be run */
74 if (rq->cmd_type == REQ_TYPE_PM_RESUME) 80 if (is_pm_resume)
75 q->request_fn(q); 81 q->request_fn(q);
76 spin_unlock_irq(q->queue_lock); 82 spin_unlock_irq(q->queue_lock);
77} 83}