summaryrefslogtreecommitdiffstats
path: root/block/blk-wbt.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@fb.com>2016-11-10 23:50:51 -0500
committerJens Axboe <axboe@fb.com>2016-11-11 18:18:24 -0500
commit8054b89f8fca75d514965ee627a15b47020d2053 (patch)
treeeb5848111e22d9cd2cb81aacb3cc3a3fa7489cee /block/blk-wbt.c
parentd8a0cbfd73cb7281120d1b49f90afeef26ad48a2 (diff)
blk-wbt: remove stat ops
Again a leftover from when the throttling code was generic. Now that we just have the block user, get rid of the stat ops and indirections. Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/blk-wbt.c')
-rw-r--r--block/blk-wbt.c15
1 files changed, 5 insertions, 10 deletions
diff --git a/block/blk-wbt.c b/block/blk-wbt.c
index 4ab9cebc8003..f6ec7e587fa6 100644
--- a/block/blk-wbt.c
+++ b/block/blk-wbt.c
@@ -308,7 +308,7 @@ static int __latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat)
308 * waited or still has writes in flights, consider us doing 308 * waited or still has writes in flights, consider us doing
309 * just writes as well. 309 * just writes as well.
310 */ 310 */
311 if ((stat[1].nr_samples && rwb->stat_ops->is_current(stat)) || 311 if ((stat[1].nr_samples && blk_stat_is_current(stat)) ||
312 wb_recent_wait(rwb) || wbt_inflight(rwb)) 312 wb_recent_wait(rwb) || wbt_inflight(rwb))
313 return LAT_UNKNOWN_WRITES; 313 return LAT_UNKNOWN_WRITES;
314 return LAT_UNKNOWN; 314 return LAT_UNKNOWN;
@@ -333,7 +333,7 @@ static int latency_exceeded(struct rq_wb *rwb)
333{ 333{
334 struct blk_rq_stat stat[2]; 334 struct blk_rq_stat stat[2];
335 335
336 rwb->stat_ops->get(rwb->ops_data, stat); 336 blk_queue_stat_get(rwb->queue, stat);
337 return __latency_exceeded(rwb, stat); 337 return __latency_exceeded(rwb, stat);
338} 338}
339 339
@@ -355,7 +355,7 @@ static void scale_up(struct rq_wb *rwb)
355 355
356 rwb->scale_step--; 356 rwb->scale_step--;
357 rwb->unknown_cnt = 0; 357 rwb->unknown_cnt = 0;
358 rwb->stat_ops->clear(rwb->ops_data); 358 blk_stat_clear(rwb->queue);
359 359
360 rwb->scaled_max = calc_wb_limits(rwb); 360 rwb->scaled_max = calc_wb_limits(rwb);
361 361
@@ -385,7 +385,7 @@ static void scale_down(struct rq_wb *rwb, bool hard_throttle)
385 385
386 rwb->scaled_max = false; 386 rwb->scaled_max = false;
387 rwb->unknown_cnt = 0; 387 rwb->unknown_cnt = 0;
388 rwb->stat_ops->clear(rwb->ops_data); 388 blk_stat_clear(rwb->queue);
389 calc_wb_limits(rwb); 389 calc_wb_limits(rwb);
390 rwb_trace_step(rwb, "step down"); 390 rwb_trace_step(rwb, "step down");
391} 391}
@@ -675,7 +675,7 @@ void wbt_disable(struct rq_wb *rwb)
675} 675}
676EXPORT_SYMBOL_GPL(wbt_disable); 676EXPORT_SYMBOL_GPL(wbt_disable);
677 677
678int wbt_init(struct request_queue *q, struct wb_stat_ops *ops) 678int wbt_init(struct request_queue *q)
679{ 679{
680 struct rq_wb *rwb; 680 struct rq_wb *rwb;
681 int i; 681 int i;
@@ -688,9 +688,6 @@ int wbt_init(struct request_queue *q, struct wb_stat_ops *ops)
688 BUILD_BUG_ON(RWB_WINDOW_NSEC > BLK_STAT_NSEC); 688 BUILD_BUG_ON(RWB_WINDOW_NSEC > BLK_STAT_NSEC);
689 BUILD_BUG_ON(WBT_NR_BITS > BLK_STAT_RES_BITS); 689 BUILD_BUG_ON(WBT_NR_BITS > BLK_STAT_RES_BITS);
690 690
691 if (!ops->get || !ops->is_current || !ops->clear)
692 return -EINVAL;
693
694 rwb = kzalloc(sizeof(*rwb), GFP_KERNEL); 691 rwb = kzalloc(sizeof(*rwb), GFP_KERNEL);
695 if (!rwb) 692 if (!rwb)
696 return -ENOMEM; 693 return -ENOMEM;
@@ -706,8 +703,6 @@ int wbt_init(struct request_queue *q, struct wb_stat_ops *ops)
706 rwb->last_comp = rwb->last_issue = jiffies; 703 rwb->last_comp = rwb->last_issue = jiffies;
707 rwb->queue = q; 704 rwb->queue = q;
708 rwb->win_nsec = RWB_WINDOW_NSEC; 705 rwb->win_nsec = RWB_WINDOW_NSEC;
709 rwb->stat_ops = ops;
710 rwb->ops_data = q;
711 wbt_update_limits(rwb); 706 wbt_update_limits(rwb);
712 707
713 /* 708 /*