aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/blk-throttle.c68
1 files changed, 45 insertions, 23 deletions
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 420eaa150d11..a8d23f0cf357 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -44,6 +44,7 @@ struct throtl_service_queue {
44 struct rb_node *first_pending; /* first node in the tree */ 44 struct rb_node *first_pending; /* first node in the tree */
45 unsigned int nr_pending; /* # queued in the tree */ 45 unsigned int nr_pending; /* # queued in the tree */
46 unsigned long first_pending_disptime; /* disptime of the first tg */ 46 unsigned long first_pending_disptime; /* disptime of the first tg */
47 struct timer_list pending_timer; /* fires on first_pending_disptime */
47}; 48};
48 49
49enum tg_state_flags { 50enum tg_state_flags {
@@ -121,7 +122,7 @@ struct throtl_data
121 unsigned int nr_undestroyed_grps; 122 unsigned int nr_undestroyed_grps;
122 123
123 /* Work for dispatching throttled bios */ 124 /* Work for dispatching throttled bios */
124 struct delayed_work dispatch_work; 125 struct work_struct dispatch_work;
125}; 126};
126 127
127/* list and work item to allocate percpu group stats */ 128/* list and work item to allocate percpu group stats */
@@ -131,6 +132,8 @@ static LIST_HEAD(tg_stats_alloc_list);
131static void tg_stats_alloc_fn(struct work_struct *); 132static void tg_stats_alloc_fn(struct work_struct *);
132static DECLARE_DELAYED_WORK(tg_stats_alloc_work, tg_stats_alloc_fn); 133static DECLARE_DELAYED_WORK(tg_stats_alloc_work, tg_stats_alloc_fn);
133 134
135static void throtl_pending_timer_fn(unsigned long arg);
136
134static inline struct throtl_grp *pd_to_tg(struct blkg_policy_data *pd) 137static inline struct throtl_grp *pd_to_tg(struct blkg_policy_data *pd)
135{ 138{
136 return pd ? container_of(pd, struct throtl_grp, pd) : NULL; 139 return pd ? container_of(pd, struct throtl_grp, pd) : NULL;
@@ -255,6 +258,13 @@ static void throtl_service_queue_init(struct throtl_service_queue *sq,
255 bio_list_init(&sq->bio_lists[1]); 258 bio_list_init(&sq->bio_lists[1]);
256 sq->pending_tree = RB_ROOT; 259 sq->pending_tree = RB_ROOT;
257 sq->parent_sq = parent_sq; 260 sq->parent_sq = parent_sq;
261 setup_timer(&sq->pending_timer, throtl_pending_timer_fn,
262 (unsigned long)sq);
263}
264
265static void throtl_service_queue_exit(struct throtl_service_queue *sq)
266{
267 del_timer_sync(&sq->pending_timer);
258} 268}
259 269
260static void throtl_pd_init(struct blkcg_gq *blkg) 270static void throtl_pd_init(struct blkcg_gq *blkg)
@@ -293,6 +303,8 @@ static void throtl_pd_exit(struct blkcg_gq *blkg)
293 spin_unlock_irqrestore(&tg_stats_alloc_lock, flags); 303 spin_unlock_irqrestore(&tg_stats_alloc_lock, flags);
294 304
295 free_percpu(tg->stats_cpu); 305 free_percpu(tg->stats_cpu);
306
307 throtl_service_queue_exit(&tg->service_queue);
296} 308}
297 309
298static void throtl_pd_reset_stats(struct blkcg_gq *blkg) 310static void throtl_pd_reset_stats(struct blkcg_gq *blkg)
@@ -447,19 +459,17 @@ static void throtl_dequeue_tg(struct throtl_grp *tg)
447} 459}
448 460
449/* Call with queue lock held */ 461/* Call with queue lock held */
450static void throtl_schedule_delayed_work(struct throtl_data *td, 462static void throtl_schedule_pending_timer(struct throtl_service_queue *sq,
451 unsigned long delay) 463 unsigned long expires)
452{ 464{
453 struct delayed_work *dwork = &td->dispatch_work; 465 mod_timer(&sq->pending_timer, expires);
454 struct throtl_service_queue *sq = &td->service_queue; 466 throtl_log(sq, "schedule timer. delay=%lu jiffies=%lu",
455 467 expires - jiffies, jiffies);
456 mod_delayed_work(kthrotld_workqueue, dwork, delay);
457 throtl_log(sq, "schedule work. delay=%lu jiffies=%lu", delay, jiffies);
458} 468}
459 469
460static void throtl_schedule_next_dispatch(struct throtl_data *td) 470static void throtl_schedule_next_dispatch(struct throtl_service_queue *sq)
461{ 471{
462 struct throtl_service_queue *sq = &td->service_queue; 472 struct throtl_data *td = sq_to_td(sq);
463 473
464 /* any pending children left? */ 474 /* any pending children left? */
465 if (!sq->nr_pending) 475 if (!sq->nr_pending)
@@ -467,10 +477,14 @@ static void throtl_schedule_next_dispatch(struct throtl_data *td)
467 477
468 update_min_dispatch_time(sq); 478 update_min_dispatch_time(sq);
469 479
470 if (time_before_eq(sq->first_pending_disptime, jiffies)) 480 /* is the next dispatch time in the future? */
471 throtl_schedule_delayed_work(td, 0); 481 if (time_after(sq->first_pending_disptime, jiffies)) {
472 else 482 throtl_schedule_pending_timer(sq, sq->first_pending_disptime);
473 throtl_schedule_delayed_work(td, sq->first_pending_disptime - jiffies); 483 return;
484 }
485
486 /* kick immediate execution */
487 queue_work(kthrotld_workqueue, &td->dispatch_work);
474} 488}
475 489
476static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw) 490static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw)
@@ -901,11 +915,19 @@ static int throtl_select_dispatch(struct throtl_service_queue *parent_sq)
901 return nr_disp; 915 return nr_disp;
902} 916}
903 917
918static void throtl_pending_timer_fn(unsigned long arg)
919{
920 struct throtl_service_queue *sq = (void *)arg;
921 struct throtl_data *td = sq_to_td(sq);
922
923 queue_work(kthrotld_workqueue, &td->dispatch_work);
924}
925
904/* work function to dispatch throttled bios */ 926/* work function to dispatch throttled bios */
905void blk_throtl_dispatch_work_fn(struct work_struct *work) 927void blk_throtl_dispatch_work_fn(struct work_struct *work)
906{ 928{
907 struct throtl_data *td = container_of(to_delayed_work(work), 929 struct throtl_data *td = container_of(work, struct throtl_data,
908 struct throtl_data, dispatch_work); 930 dispatch_work);
909 struct throtl_service_queue *sq = &td->service_queue; 931 struct throtl_service_queue *sq = &td->service_queue;
910 struct request_queue *q = td->queue; 932 struct request_queue *q = td->queue;
911 unsigned int nr_disp = 0; 933 unsigned int nr_disp = 0;
@@ -932,7 +954,7 @@ void blk_throtl_dispatch_work_fn(struct work_struct *work)
932 throtl_log(sq, "bios disp=%u", nr_disp); 954 throtl_log(sq, "bios disp=%u", nr_disp);
933 } 955 }
934 956
935 throtl_schedule_next_dispatch(td); 957 throtl_schedule_next_dispatch(sq);
936 958
937 spin_unlock_irq(q->queue_lock); 959 spin_unlock_irq(q->queue_lock);
938 960
@@ -1020,7 +1042,7 @@ static int tg_set_conf(struct cgroup *cgrp, struct cftype *cft, const char *buf,
1020 struct blkcg *blkcg = cgroup_to_blkcg(cgrp); 1042 struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
1021 struct blkg_conf_ctx ctx; 1043 struct blkg_conf_ctx ctx;
1022 struct throtl_grp *tg; 1044 struct throtl_grp *tg;
1023 struct throtl_data *td; 1045 struct throtl_service_queue *sq;
1024 int ret; 1046 int ret;
1025 1047
1026 ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx); 1048 ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
@@ -1028,7 +1050,7 @@ static int tg_set_conf(struct cgroup *cgrp, struct cftype *cft, const char *buf,
1028 return ret; 1050 return ret;
1029 1051
1030 tg = blkg_to_tg(ctx.blkg); 1052 tg = blkg_to_tg(ctx.blkg);
1031 td = ctx.blkg->q->td; 1053 sq = &tg->service_queue;
1032 1054
1033 if (!ctx.v) 1055 if (!ctx.v)
1034 ctx.v = -1; 1056 ctx.v = -1;
@@ -1056,7 +1078,7 @@ static int tg_set_conf(struct cgroup *cgrp, struct cftype *cft, const char *buf,
1056 1078
1057 if (tg->flags & THROTL_TG_PENDING) { 1079 if (tg->flags & THROTL_TG_PENDING) {
1058 tg_update_disptime(tg); 1080 tg_update_disptime(tg);
1059 throtl_schedule_next_dispatch(td); 1081 throtl_schedule_next_dispatch(sq->parent_sq);
1060 } 1082 }
1061 1083
1062 blkg_conf_finish(&ctx); 1084 blkg_conf_finish(&ctx);
@@ -1121,7 +1143,7 @@ static void throtl_shutdown_wq(struct request_queue *q)
1121{ 1143{
1122 struct throtl_data *td = q->td; 1144 struct throtl_data *td = q->td;
1123 1145
1124 cancel_delayed_work_sync(&td->dispatch_work); 1146 cancel_work_sync(&td->dispatch_work);
1125} 1147}
1126 1148
1127static struct blkcg_policy blkcg_policy_throtl = { 1149static struct blkcg_policy blkcg_policy_throtl = {
@@ -1210,7 +1232,7 @@ queue_bio:
1210 /* update @tg's dispatch time if @tg was empty before @bio */ 1232 /* update @tg's dispatch time if @tg was empty before @bio */
1211 if (tg->flags & THROTL_TG_WAS_EMPTY) { 1233 if (tg->flags & THROTL_TG_WAS_EMPTY) {
1212 tg_update_disptime(tg); 1234 tg_update_disptime(tg);
1213 throtl_schedule_next_dispatch(td); 1235 throtl_schedule_next_dispatch(tg->service_queue.parent_sq);
1214 } 1236 }
1215 1237
1216out_unlock: 1238out_unlock:
@@ -1273,7 +1295,7 @@ int blk_throtl_init(struct request_queue *q)
1273 if (!td) 1295 if (!td)
1274 return -ENOMEM; 1296 return -ENOMEM;
1275 1297
1276 INIT_DELAYED_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn); 1298 INIT_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn);
1277 throtl_service_queue_init(&td->service_queue, NULL); 1299 throtl_service_queue_init(&td->service_queue, NULL);
1278 1300
1279 q->td = td; 1301 q->td = td;