aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-throttle.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/blk-throttle.c')
-rw-r--r--block/blk-throttle.c17
1 files changed, 13 insertions, 4 deletions
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index af53f37c1b13..bc2936b80add 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -87,7 +87,7 @@ struct throtl_data
87 unsigned int nr_queued[2]; 87 unsigned int nr_queued[2];
88 88
89 /* 89 /*
90 * number of total undestroyed groups (excluding root group) 90 * number of total undestroyed groups
91 */ 91 */
92 unsigned int nr_undestroyed_grps; 92 unsigned int nr_undestroyed_grps;
93 93
@@ -940,7 +940,17 @@ int blk_throtl_init(struct request_queue *q)
940 /* Practically unlimited BW */ 940 /* Practically unlimited BW */
941 tg->bps[0] = tg->bps[1] = -1; 941 tg->bps[0] = tg->bps[1] = -1;
942 tg->iops[0] = tg->iops[1] = -1; 942 tg->iops[0] = tg->iops[1] = -1;
943 atomic_set(&tg->ref, 1); 943
944 /*
945 * Set root group reference to 2. One reference will be dropped when
946 * all groups on tg_list are being deleted during queue exit. Other
947 * reference will remain there as we don't want to delete this group
948 * as it is statically allocated and gets destroyed when throtl_data
949 * goes away.
950 */
951 atomic_set(&tg->ref, 2);
952 hlist_add_head(&tg->tg_node, &td->tg_list);
953 td->nr_undestroyed_grps++;
944 954
945 INIT_DELAYED_WORK(&td->throtl_work, blk_throtl_work); 955 INIT_DELAYED_WORK(&td->throtl_work, blk_throtl_work);
946 956
@@ -966,10 +976,9 @@ void blk_throtl_exit(struct request_queue *q)
966 976
967 spin_lock_irq(q->queue_lock); 977 spin_lock_irq(q->queue_lock);
968 throtl_release_tgs(td); 978 throtl_release_tgs(td);
969 blkiocg_del_blkio_group(&td->root_tg.blkg);
970 979
971 /* If there are other groups */ 980 /* If there are other groups */
972 if (td->nr_undestroyed_grps >= 1) 981 if (td->nr_undestroyed_grps > 0)
973 wait = true; 982 wait = true;
974 983
975 spin_unlock_irq(q->queue_lock); 984 spin_unlock_irq(q->queue_lock);