aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorDivyesh Shah <dpshah@google.com>2010-04-14 05:22:38 -0400
committerJens Axboe <jens.axboe@oracle.com>2010-04-14 05:22:38 -0400
commit28baf44299e0480d66ebb3093de5d51deff04e9f (patch)
tree2cac2546027638c269441f4035a67abd3d267ca4 /block
parent4facdaec1ce186e731e6baa04f074804849e9a49 (diff)
blkio: Fix compile errors
Fixes compile errors in blk-cgroup code for empty_time stat and a merge fix in CFQ. The first error was when CONFIG_DEBUG_CFQ_IOSCHED is not set. Signed-off-by: Divyesh Shah <dpshah@google.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-cgroup.c54
-rw-r--r--block/cfq-iosched.c2
2 files changed, 28 insertions, 28 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index aa97cd455cef..80c1261a7d38 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -219,6 +219,33 @@ void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg)
219} 219}
220EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats); 220EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats);
221 221
222void blkiocg_set_start_empty_time(struct blkio_group *blkg, bool ignore)
223{
224 unsigned long flags;
225 struct blkio_group_stats *stats;
226
227 spin_lock_irqsave(&blkg->stats_lock, flags);
228 stats = &blkg->stats;
229
230 if (stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] ||
231 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE]) {
232 spin_unlock_irqrestore(&blkg->stats_lock, flags);
233 return;
234 }
235
236 /*
237 * If ignore is set, we do not panic on the empty flag being set
238 * already. This is to avoid cases where there are superfluous timeslice
239 * complete events (for eg., forced_dispatch in CFQ) when no IOs are
240 * served which could result in triggering the empty check incorrectly.
241 */
242 BUG_ON(!ignore && blkio_blkg_empty(stats));
243 stats->start_empty_time = sched_clock();
244 blkio_mark_blkg_empty(stats);
245 spin_unlock_irqrestore(&blkg->stats_lock, flags);
246}
247EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time);
248
222void blkiocg_update_dequeue_stats(struct blkio_group *blkg, 249void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
223 unsigned long dequeue) 250 unsigned long dequeue)
224{ 251{
@@ -268,33 +295,6 @@ void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time)
268} 295}
269EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used); 296EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
270 297
271void blkiocg_set_start_empty_time(struct blkio_group *blkg, bool ignore)
272{
273 unsigned long flags;
274 struct blkio_group_stats *stats;
275
276 spin_lock_irqsave(&blkg->stats_lock, flags);
277 stats = &blkg->stats;
278
279 if (stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] ||
280 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE]) {
281 spin_unlock_irqrestore(&blkg->stats_lock, flags);
282 return;
283 }
284
285 /*
286 * If ignore is set, we do not panic on the empty flag being set
287 * already. This is to avoid cases where there are superfluous timeslice
288 * complete events (for eg., forced_dispatch in CFQ) when no IOs are
289 * served which could result in triggering the empty check incorrectly.
290 */
291 BUG_ON(!ignore && blkio_blkg_empty(stats));
292 stats->start_empty_time = sched_clock();
293 blkio_mark_blkg_empty(stats);
294 spin_unlock_irqrestore(&blkg->stats_lock, flags);
295}
296EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time);
297
298void blkiocg_update_dispatch_stats(struct blkio_group *blkg, 298void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
299 uint64_t bytes, bool direction, bool sync) 299 uint64_t bytes, bool direction, bool sync)
300{ 300{
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 9e0df2bdcf21..01771098355d 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -2231,7 +2231,7 @@ static int cfq_forced_dispatch(struct cfq_data *cfqd)
2231 int dispatched = 0; 2231 int dispatched = 0;
2232 2232
2233 /* Expire the timeslice of the current active queue first */ 2233 /* Expire the timeslice of the current active queue first */
2234 cfq_slice_expired(cfqd, 0); 2234 cfq_slice_expired(cfqd, 0, true);
2235 while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) { 2235 while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) {
2236 __cfq_set_active_queue(cfqd, cfqq); 2236 __cfq_set_active_queue(cfqd, cfqq);
2237 dispatched += __cfq_forced_dispatch_cfqq(cfqq); 2237 dispatched += __cfq_forced_dispatch_cfqq(cfqq);