aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c9
-rw-r--r--block/cfq-iosched.c67
-rw-r--r--block/cfq.h115
3 files changed, 156 insertions, 35 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index f84cce42fc58..f0640d7f800f 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1149,13 +1149,10 @@ void init_request_from_bio(struct request *req, struct bio *bio)
1149 else 1149 else
1150 req->cmd_flags |= bio->bi_rw & REQ_FAILFAST_MASK; 1150 req->cmd_flags |= bio->bi_rw & REQ_FAILFAST_MASK;
1151 1151
1152 if (unlikely(bio_rw_flagged(bio, BIO_RW_DISCARD))) { 1152 if (bio_rw_flagged(bio, BIO_RW_DISCARD))
1153 req->cmd_flags |= REQ_DISCARD; 1153 req->cmd_flags |= REQ_DISCARD;
1154 if (bio_rw_flagged(bio, BIO_RW_BARRIER)) 1154 if (bio_rw_flagged(bio, BIO_RW_BARRIER))
1155 req->cmd_flags |= REQ_SOFTBARRIER;
1156 } else if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER)))
1157 req->cmd_flags |= REQ_HARDBARRIER; 1155 req->cmd_flags |= REQ_HARDBARRIER;
1158
1159 if (bio_rw_flagged(bio, BIO_RW_SYNCIO)) 1156 if (bio_rw_flagged(bio, BIO_RW_SYNCIO))
1160 req->cmd_flags |= REQ_RW_SYNC; 1157 req->cmd_flags |= REQ_RW_SYNC;
1161 if (bio_rw_flagged(bio, BIO_RW_META)) 1158 if (bio_rw_flagged(bio, BIO_RW_META))
@@ -1586,7 +1583,7 @@ void submit_bio(int rw, struct bio *bio)
1586 * If it's a regular read/write or a barrier with data attached, 1583 * If it's a regular read/write or a barrier with data attached,
1587 * go through the normal accounting stuff before submission. 1584 * go through the normal accounting stuff before submission.
1588 */ 1585 */
1589 if (bio_has_data(bio)) { 1586 if (bio_has_data(bio) && !(rw & (1 << BIO_RW_DISCARD))) {
1590 if (rw & WRITE) { 1587 if (rw & WRITE) {
1591 count_vm_events(PGPGOUT, count); 1588 count_vm_events(PGPGOUT, count);
1592 } else { 1589 } else {
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 5ff4f4850e71..7982b830db58 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -14,7 +14,7 @@
14#include <linux/rbtree.h> 14#include <linux/rbtree.h>
15#include <linux/ioprio.h> 15#include <linux/ioprio.h>
16#include <linux/blktrace_api.h> 16#include <linux/blktrace_api.h>
17#include "blk-cgroup.h" 17#include "cfq.h"
18 18
19/* 19/*
20 * tunables 20 * tunables
@@ -879,7 +879,7 @@ cfq_group_service_tree_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
879 if (!RB_EMPTY_NODE(&cfqg->rb_node)) 879 if (!RB_EMPTY_NODE(&cfqg->rb_node))
880 cfq_rb_erase(&cfqg->rb_node, st); 880 cfq_rb_erase(&cfqg->rb_node, st);
881 cfqg->saved_workload_slice = 0; 881 cfqg->saved_workload_slice = 0;
882 blkiocg_update_dequeue_stats(&cfqg->blkg, 1); 882 cfq_blkiocg_update_dequeue_stats(&cfqg->blkg, 1);
883} 883}
884 884
885static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq) 885static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq)
@@ -939,8 +939,8 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
939 939
940 cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime, 940 cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
941 st->min_vdisktime); 941 st->min_vdisktime);
942 blkiocg_update_timeslice_used(&cfqg->blkg, used_sl); 942 cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl);
943 blkiocg_set_start_empty_time(&cfqg->blkg); 943 cfq_blkiocg_set_start_empty_time(&cfqg->blkg);
944} 944}
945 945
946#ifdef CONFIG_CFQ_GROUP_IOSCHED 946#ifdef CONFIG_CFQ_GROUP_IOSCHED
@@ -995,7 +995,7 @@ cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create)
995 995
996 /* Add group onto cgroup list */ 996 /* Add group onto cgroup list */
997 sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor); 997 sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
998 blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd, 998 cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd,
999 MKDEV(major, minor)); 999 MKDEV(major, minor));
1000 cfqg->weight = blkcg_get_weight(blkcg, cfqg->blkg.dev); 1000 cfqg->weight = blkcg_get_weight(blkcg, cfqg->blkg.dev);
1001 1001
@@ -1079,7 +1079,7 @@ static void cfq_release_cfq_groups(struct cfq_data *cfqd)
1079 * it from cgroup list, then it will take care of destroying 1079 * it from cgroup list, then it will take care of destroying
1080 * cfqg also. 1080 * cfqg also.
1081 */ 1081 */
1082 if (!blkiocg_del_blkio_group(&cfqg->blkg)) 1082 if (!cfq_blkiocg_del_blkio_group(&cfqg->blkg))
1083 cfq_destroy_cfqg(cfqd, cfqg); 1083 cfq_destroy_cfqg(cfqd, cfqg);
1084 } 1084 }
1085} 1085}
@@ -1421,10 +1421,10 @@ static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
1421{ 1421{
1422 elv_rb_del(&cfqq->sort_list, rq); 1422 elv_rb_del(&cfqq->sort_list, rq);
1423 cfqq->queued[rq_is_sync(rq)]--; 1423 cfqq->queued[rq_is_sync(rq)]--;
1424 blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg, rq_data_dir(rq), 1424 cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
1425 rq_is_sync(rq)); 1425 rq_data_dir(rq), rq_is_sync(rq));
1426 cfq_add_rq_rb(rq); 1426 cfq_add_rq_rb(rq);
1427 blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg, 1427 cfq_blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
1428 &cfqq->cfqd->serving_group->blkg, rq_data_dir(rq), 1428 &cfqq->cfqd->serving_group->blkg, rq_data_dir(rq),
1429 rq_is_sync(rq)); 1429 rq_is_sync(rq));
1430} 1430}
@@ -1482,8 +1482,8 @@ static void cfq_remove_request(struct request *rq)
1482 cfq_del_rq_rb(rq); 1482 cfq_del_rq_rb(rq);
1483 1483
1484 cfqq->cfqd->rq_queued--; 1484 cfqq->cfqd->rq_queued--;
1485 blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg, rq_data_dir(rq), 1485 cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
1486 rq_is_sync(rq)); 1486 rq_data_dir(rq), rq_is_sync(rq));
1487 if (rq_is_meta(rq)) { 1487 if (rq_is_meta(rq)) {
1488 WARN_ON(!cfqq->meta_pending); 1488 WARN_ON(!cfqq->meta_pending);
1489 cfqq->meta_pending--; 1489 cfqq->meta_pending--;
@@ -1518,8 +1518,8 @@ static void cfq_merged_request(struct request_queue *q, struct request *req,
1518static void cfq_bio_merged(struct request_queue *q, struct request *req, 1518static void cfq_bio_merged(struct request_queue *q, struct request *req,
1519 struct bio *bio) 1519 struct bio *bio)
1520{ 1520{
1521 blkiocg_update_io_merged_stats(&(RQ_CFQG(req))->blkg, bio_data_dir(bio), 1521 cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(req))->blkg,
1522 cfq_bio_sync(bio)); 1522 bio_data_dir(bio), cfq_bio_sync(bio));
1523} 1523}
1524 1524
1525static void 1525static void
@@ -1539,8 +1539,8 @@ cfq_merged_requests(struct request_queue *q, struct request *rq,
1539 if (cfqq->next_rq == next) 1539 if (cfqq->next_rq == next)
1540 cfqq->next_rq = rq; 1540 cfqq->next_rq = rq;
1541 cfq_remove_request(next); 1541 cfq_remove_request(next);
1542 blkiocg_update_io_merged_stats(&(RQ_CFQG(rq))->blkg, rq_data_dir(next), 1542 cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(rq))->blkg,
1543 rq_is_sync(next)); 1543 rq_data_dir(next), rq_is_sync(next));
1544} 1544}
1545 1545
1546static int cfq_allow_merge(struct request_queue *q, struct request *rq, 1546static int cfq_allow_merge(struct request_queue *q, struct request *rq,
@@ -1571,7 +1571,7 @@ static int cfq_allow_merge(struct request_queue *q, struct request *rq,
1571static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq) 1571static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1572{ 1572{
1573 del_timer(&cfqd->idle_slice_timer); 1573 del_timer(&cfqd->idle_slice_timer);
1574 blkiocg_update_idle_time_stats(&cfqq->cfqg->blkg); 1574 cfq_blkiocg_update_idle_time_stats(&cfqq->cfqg->blkg);
1575} 1575}
1576 1576
1577static void __cfq_set_active_queue(struct cfq_data *cfqd, 1577static void __cfq_set_active_queue(struct cfq_data *cfqd,
@@ -1580,7 +1580,7 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd,
1580 if (cfqq) { 1580 if (cfqq) {
1581 cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d", 1581 cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d",
1582 cfqd->serving_prio, cfqd->serving_type); 1582 cfqd->serving_prio, cfqd->serving_type);
1583 blkiocg_update_avg_queue_size_stats(&cfqq->cfqg->blkg); 1583 cfq_blkiocg_update_avg_queue_size_stats(&cfqq->cfqg->blkg);
1584 cfqq->slice_start = 0; 1584 cfqq->slice_start = 0;
1585 cfqq->dispatch_start = jiffies; 1585 cfqq->dispatch_start = jiffies;
1586 cfqq->allocated_slice = 0; 1586 cfqq->allocated_slice = 0;
@@ -1911,7 +1911,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
1911 sl = cfqd->cfq_slice_idle; 1911 sl = cfqd->cfq_slice_idle;
1912 1912
1913 mod_timer(&cfqd->idle_slice_timer, jiffies + sl); 1913 mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
1914 blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg); 1914 cfq_blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg);
1915 cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu", sl); 1915 cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu", sl);
1916} 1916}
1917 1917
@@ -1931,7 +1931,7 @@ static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
1931 elv_dispatch_sort(q, rq); 1931 elv_dispatch_sort(q, rq);
1932 1932
1933 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++; 1933 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
1934 blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq), 1934 cfq_blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq),
1935 rq_data_dir(rq), rq_is_sync(rq)); 1935 rq_data_dir(rq), rq_is_sync(rq));
1936} 1936}
1937 1937
@@ -1986,6 +1986,15 @@ static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
1986 int process_refs, new_process_refs; 1986 int process_refs, new_process_refs;
1987 struct cfq_queue *__cfqq; 1987 struct cfq_queue *__cfqq;
1988 1988
1989 /*
1990 * If there are no process references on the new_cfqq, then it is
1991 * unsafe to follow the ->new_cfqq chain as other cfqq's in the
1992 * chain may have dropped their last reference (not just their
1993 * last process reference).
1994 */
1995 if (!cfqq_process_refs(new_cfqq))
1996 return;
1997
1989 /* Avoid a circular list and skip interim queue merges */ 1998 /* Avoid a circular list and skip interim queue merges */
1990 while ((__cfqq = new_cfqq->new_cfqq)) { 1999 while ((__cfqq = new_cfqq->new_cfqq)) {
1991 if (__cfqq == cfqq) 2000 if (__cfqq == cfqq)
@@ -1994,17 +2003,17 @@ static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
1994 } 2003 }
1995 2004
1996 process_refs = cfqq_process_refs(cfqq); 2005 process_refs = cfqq_process_refs(cfqq);
2006 new_process_refs = cfqq_process_refs(new_cfqq);
1997 /* 2007 /*
1998 * If the process for the cfqq has gone away, there is no 2008 * If the process for the cfqq has gone away, there is no
1999 * sense in merging the queues. 2009 * sense in merging the queues.
2000 */ 2010 */
2001 if (process_refs == 0) 2011 if (process_refs == 0 || new_process_refs == 0)
2002 return; 2012 return;
2003 2013
2004 /* 2014 /*
2005 * Merge in the direction of the lesser amount of work. 2015 * Merge in the direction of the lesser amount of work.
2006 */ 2016 */
2007 new_process_refs = cfqq_process_refs(new_cfqq);
2008 if (new_process_refs >= process_refs) { 2017 if (new_process_refs >= process_refs) {
2009 cfqq->new_cfqq = new_cfqq; 2018 cfqq->new_cfqq = new_cfqq;
2010 atomic_add(process_refs, &new_cfqq->ref); 2019 atomic_add(process_refs, &new_cfqq->ref);
@@ -3248,7 +3257,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3248 cfq_clear_cfqq_wait_request(cfqq); 3257 cfq_clear_cfqq_wait_request(cfqq);
3249 __blk_run_queue(cfqd->queue); 3258 __blk_run_queue(cfqd->queue);
3250 } else { 3259 } else {
3251 blkiocg_update_idle_time_stats( 3260 cfq_blkiocg_update_idle_time_stats(
3252 &cfqq->cfqg->blkg); 3261 &cfqq->cfqg->blkg);
3253 cfq_mark_cfqq_must_dispatch(cfqq); 3262 cfq_mark_cfqq_must_dispatch(cfqq);
3254 } 3263 }
@@ -3276,7 +3285,7 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq)
3276 rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]); 3285 rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
3277 list_add_tail(&rq->queuelist, &cfqq->fifo); 3286 list_add_tail(&rq->queuelist, &cfqq->fifo);
3278 cfq_add_rq_rb(rq); 3287 cfq_add_rq_rb(rq);
3279 blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg, 3288 cfq_blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
3280 &cfqd->serving_group->blkg, rq_data_dir(rq), 3289 &cfqd->serving_group->blkg, rq_data_dir(rq),
3281 rq_is_sync(rq)); 3290 rq_is_sync(rq));
3282 cfq_rq_enqueued(cfqd, cfqq, rq); 3291 cfq_rq_enqueued(cfqd, cfqq, rq);
@@ -3364,9 +3373,9 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
3364 WARN_ON(!cfqq->dispatched); 3373 WARN_ON(!cfqq->dispatched);
3365 cfqd->rq_in_driver--; 3374 cfqd->rq_in_driver--;
3366 cfqq->dispatched--; 3375 cfqq->dispatched--;
3367 blkiocg_update_completion_stats(&cfqq->cfqg->blkg, rq_start_time_ns(rq), 3376 cfq_blkiocg_update_completion_stats(&cfqq->cfqg->blkg,
3368 rq_io_start_time_ns(rq), rq_data_dir(rq), 3377 rq_start_time_ns(rq), rq_io_start_time_ns(rq),
3369 rq_is_sync(rq)); 3378 rq_data_dir(rq), rq_is_sync(rq));
3370 3379
3371 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--; 3380 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
3372 3381
@@ -3730,7 +3739,7 @@ static void cfq_exit_queue(struct elevator_queue *e)
3730 3739
3731 cfq_put_async_queues(cfqd); 3740 cfq_put_async_queues(cfqd);
3732 cfq_release_cfq_groups(cfqd); 3741 cfq_release_cfq_groups(cfqd);
3733 blkiocg_del_blkio_group(&cfqd->root_group.blkg); 3742 cfq_blkiocg_del_blkio_group(&cfqd->root_group.blkg);
3734 3743
3735 spin_unlock_irq(q->queue_lock); 3744 spin_unlock_irq(q->queue_lock);
3736 3745
@@ -3798,8 +3807,8 @@ static void *cfq_init_queue(struct request_queue *q)
3798 */ 3807 */
3799 atomic_set(&cfqg->ref, 1); 3808 atomic_set(&cfqg->ref, 1);
3800 rcu_read_lock(); 3809 rcu_read_lock();
3801 blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg, (void *)cfqd, 3810 cfq_blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg,
3802 0); 3811 (void *)cfqd, 0);
3803 rcu_read_unlock(); 3812 rcu_read_unlock();
3804#endif 3813#endif
3805 /* 3814 /*
diff --git a/block/cfq.h b/block/cfq.h
new file mode 100644
index 000000000000..93448e5a2e41
--- /dev/null
+++ b/block/cfq.h
@@ -0,0 +1,115 @@
1#ifndef _CFQ_H
2#define _CFQ_H
3#include "blk-cgroup.h"
4
5#ifdef CONFIG_CFQ_GROUP_IOSCHED
6static inline void cfq_blkiocg_update_io_add_stats(struct blkio_group *blkg,
7 struct blkio_group *curr_blkg, bool direction, bool sync)
8{
9 blkiocg_update_io_add_stats(blkg, curr_blkg, direction, sync);
10}
11
12static inline void cfq_blkiocg_update_dequeue_stats(struct blkio_group *blkg,
13 unsigned long dequeue)
14{
15 blkiocg_update_dequeue_stats(blkg, dequeue);
16}
17
18static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg,
19 unsigned long time)
20{
21 blkiocg_update_timeslice_used(blkg, time);
22}
23
24static inline void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg)
25{
26 blkiocg_set_start_empty_time(blkg);
27}
28
29static inline void cfq_blkiocg_update_io_remove_stats(struct blkio_group *blkg,
30 bool direction, bool sync)
31{
32 blkiocg_update_io_remove_stats(blkg, direction, sync);
33}
34
35static inline void cfq_blkiocg_update_io_merged_stats(struct blkio_group *blkg,
36 bool direction, bool sync)
37{
38 blkiocg_update_io_merged_stats(blkg, direction, sync);
39}
40
41static inline void cfq_blkiocg_update_idle_time_stats(struct blkio_group *blkg)
42{
43 blkiocg_update_idle_time_stats(blkg);
44}
45
46static inline void
47cfq_blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg)
48{
49 blkiocg_update_avg_queue_size_stats(blkg);
50}
51
52static inline void
53cfq_blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
54{
55 blkiocg_update_set_idle_time_stats(blkg);
56}
57
58static inline void cfq_blkiocg_update_dispatch_stats(struct blkio_group *blkg,
59 uint64_t bytes, bool direction, bool sync)
60{
61 blkiocg_update_dispatch_stats(blkg, bytes, direction, sync);
62}
63
64static inline void cfq_blkiocg_update_completion_stats(struct blkio_group *blkg, uint64_t start_time, uint64_t io_start_time, bool direction, bool sync)
65{
66 blkiocg_update_completion_stats(blkg, start_time, io_start_time,
67 direction, sync);
68}
69
70static inline void cfq_blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
71 struct blkio_group *blkg, void *key, dev_t dev) {
72 blkiocg_add_blkio_group(blkcg, blkg, key, dev);
73}
74
75static inline int cfq_blkiocg_del_blkio_group(struct blkio_group *blkg)
76{
77 return blkiocg_del_blkio_group(blkg);
78}
79
80#else /* CFQ_GROUP_IOSCHED */
81static inline void cfq_blkiocg_update_io_add_stats(struct blkio_group *blkg,
82 struct blkio_group *curr_blkg, bool direction, bool sync) {}
83
84static inline void cfq_blkiocg_update_dequeue_stats(struct blkio_group *blkg,
85 unsigned long dequeue) {}
86
87static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg,
88 unsigned long time) {}
89static inline void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg) {}
90static inline void cfq_blkiocg_update_io_remove_stats(struct blkio_group *blkg,
91 bool direction, bool sync) {}
92static inline void cfq_blkiocg_update_io_merged_stats(struct blkio_group *blkg,
93 bool direction, bool sync) {}
94static inline void cfq_blkiocg_update_idle_time_stats(struct blkio_group *blkg)
95{
96}
97static inline void
98cfq_blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg) {}
99
100static inline void
101cfq_blkiocg_update_set_idle_time_stats(struct blkio_group *blkg) {}
102
103static inline void cfq_blkiocg_update_dispatch_stats(struct blkio_group *blkg,
104 uint64_t bytes, bool direction, bool sync) {}
105static inline void cfq_blkiocg_update_completion_stats(struct blkio_group *blkg, uint64_t start_time, uint64_t io_start_time, bool direction, bool sync) {}
106
107static inline void cfq_blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
108 struct blkio_group *blkg, void *key, dev_t dev) {}
109static inline int cfq_blkiocg_del_blkio_group(struct blkio_group *blkg)
110{
111 return 0;
112}
113
114#endif /* CFQ_GROUP_IOSCHED */
115#endif