diff options
author | Vivek Goyal <vgoyal@redhat.com> | 2009-12-03 12:59:46 -0500 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2009-12-03 13:28:52 -0500 |
commit | 25fb5169d4c9d4255107abbb7c08ab712434efc8 (patch) | |
tree | 8939bd6f73f9888d954672dfaac1cc25e313cb2f | |
parent | dae739ebc4c590630039533a5bbd05865966094f (diff) |
blkio: Dynamic cfq group creation based on cgroup tasks belongs to
o Determine the cgroup IO submitting task belongs to and create the cfq
group if it does not exist already.
o Also link cfqq and associated cfq group.
o Currently all async IO is mapped to root group.
Signed-off-by: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
-rw-r--r-- | block/cfq-iosched.c | 111 |
1 files changed, 100 insertions, 11 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 55d2a21f7f06..a877eeee80af 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -189,6 +189,10 @@ struct cfq_group { | |||
189 | unsigned long saved_workload_slice; | 189 | unsigned long saved_workload_slice; |
190 | enum wl_type_t saved_workload; | 190 | enum wl_type_t saved_workload; |
191 | enum wl_prio_t saved_serving_prio; | 191 | enum wl_prio_t saved_serving_prio; |
192 | struct blkio_group blkg; | ||
193 | #ifdef CONFIG_CFQ_GROUP_IOSCHED | ||
194 | struct hlist_node cfqd_node; | ||
195 | #endif | ||
192 | }; | 196 | }; |
193 | 197 | ||
194 | /* | 198 | /* |
@@ -274,8 +278,13 @@ struct cfq_data { | |||
274 | struct cfq_queue oom_cfqq; | 278 | struct cfq_queue oom_cfqq; |
275 | 279 | ||
276 | unsigned long last_end_sync_rq; | 280 | unsigned long last_end_sync_rq; |
281 | |||
282 | /* List of cfq groups being managed on this device*/ | ||
283 | struct hlist_head cfqg_list; | ||
277 | }; | 284 | }; |
278 | 285 | ||
286 | static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd); | ||
287 | |||
279 | static struct cfq_rb_root *service_tree_for(struct cfq_group *cfqg, | 288 | static struct cfq_rb_root *service_tree_for(struct cfq_group *cfqg, |
280 | enum wl_prio_t prio, | 289 | enum wl_prio_t prio, |
281 | enum wl_type_t type, | 290 | enum wl_type_t type, |
@@ -881,6 +890,89 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg, | |||
881 | cfqg->saved_workload_slice = 0; | 890 | cfqg->saved_workload_slice = 0; |
882 | } | 891 | } |
883 | 892 | ||
893 | #ifdef CONFIG_CFQ_GROUP_IOSCHED | ||
894 | static inline struct cfq_group *cfqg_of_blkg(struct blkio_group *blkg) | ||
895 | { | ||
896 | if (blkg) | ||
897 | return container_of(blkg, struct cfq_group, blkg); | ||
898 | return NULL; | ||
899 | } | ||
900 | |||
901 | static struct cfq_group * | ||
902 | cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create) | ||
903 | { | ||
904 | struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup); | ||
905 | struct cfq_group *cfqg = NULL; | ||
906 | void *key = cfqd; | ||
907 | int i, j; | ||
908 | struct cfq_rb_root *st; | ||
909 | |||
910 | /* Do we need to take this reference */ | ||
911 | if (!css_tryget(&blkcg->css)) | ||
912 | return NULL;; | ||
913 | |||
914 | cfqg = cfqg_of_blkg(blkiocg_lookup_group(blkcg, key)); | ||
915 | if (cfqg || !create) | ||
916 | goto done; | ||
917 | |||
918 | cfqg = kzalloc_node(sizeof(*cfqg), GFP_ATOMIC, cfqd->queue->node); | ||
919 | if (!cfqg) | ||
920 | goto done; | ||
921 | |||
922 | cfqg->weight = blkcg->weight; | ||
923 | for_each_cfqg_st(cfqg, i, j, st) | ||
924 | *st = CFQ_RB_ROOT; | ||
925 | RB_CLEAR_NODE(&cfqg->rb_node); | ||
926 | |||
927 | /* Add group onto cgroup list */ | ||
928 | blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd); | ||
929 | |||
930 | /* Add group on cfqd list */ | ||
931 | hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list); | ||
932 | |||
933 | done: | ||
934 | css_put(&blkcg->css); | ||
935 | return cfqg; | ||
936 | } | ||
937 | |||
938 | /* | ||
939 | * Search for the cfq group current task belongs to. If create = 1, then also | ||
940 | * create the cfq group if it does not exist. request_queue lock must be held. | ||
941 | */ | ||
942 | static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, int create) | ||
943 | { | ||
944 | struct cgroup *cgroup; | ||
945 | struct cfq_group *cfqg = NULL; | ||
946 | |||
947 | rcu_read_lock(); | ||
948 | cgroup = task_cgroup(current, blkio_subsys_id); | ||
949 | cfqg = cfq_find_alloc_cfqg(cfqd, cgroup, create); | ||
950 | if (!cfqg && create) | ||
951 | cfqg = &cfqd->root_group; | ||
952 | rcu_read_unlock(); | ||
953 | return cfqg; | ||
954 | } | ||
955 | |||
956 | static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) | ||
957 | { | ||
958 | /* Currently, all async queues are mapped to root group */ | ||
959 | if (!cfq_cfqq_sync(cfqq)) | ||
960 | cfqg = &cfqq->cfqd->root_group; | ||
961 | |||
962 | cfqq->cfqg = cfqg; | ||
963 | } | ||
964 | #else /* GROUP_IOSCHED */ | ||
965 | static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, int create) | ||
966 | { | ||
967 | return &cfqd->root_group; | ||
968 | } | ||
969 | static inline void | ||
970 | cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) { | ||
971 | cfqq->cfqg = cfqg; | ||
972 | } | ||
973 | |||
974 | #endif /* GROUP_IOSCHED */ | ||
975 | |||
884 | /* | 976 | /* |
885 | * The cfqd->service_trees holds all pending cfq_queue's that have | 977 | * The cfqd->service_trees holds all pending cfq_queue's that have |
886 | * requests waiting to be processed. It is sorted in the order that | 978 | * requests waiting to be processed. It is sorted in the order that |
@@ -1372,7 +1464,7 @@ static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd) | |||
1372 | 1464 | ||
1373 | static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd) | 1465 | static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd) |
1374 | { | 1466 | { |
1375 | struct cfq_group *cfqg = &cfqd->root_group; | 1467 | struct cfq_group *cfqg; |
1376 | struct cfq_queue *cfqq; | 1468 | struct cfq_queue *cfqq; |
1377 | int i, j; | 1469 | int i, j; |
1378 | struct cfq_rb_root *st; | 1470 | struct cfq_rb_root *st; |
@@ -1380,6 +1472,10 @@ static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd) | |||
1380 | if (!cfqd->rq_queued) | 1472 | if (!cfqd->rq_queued) |
1381 | return NULL; | 1473 | return NULL; |
1382 | 1474 | ||
1475 | cfqg = cfq_get_next_cfqg(cfqd); | ||
1476 | if (!cfqg) | ||
1477 | return NULL; | ||
1478 | |||
1383 | for_each_cfqg_st(cfqg, i, j, st) | 1479 | for_each_cfqg_st(cfqg, i, j, st) |
1384 | if ((cfqq = cfq_rb_first(st)) != NULL) | 1480 | if ((cfqq = cfq_rb_first(st)) != NULL) |
1385 | return cfqq; | 1481 | return cfqq; |
@@ -2390,16 +2486,6 @@ static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
2390 | cfqq->pid = pid; | 2486 | cfqq->pid = pid; |
2391 | } | 2487 | } |
2392 | 2488 | ||
2393 | static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) | ||
2394 | { | ||
2395 | cfqq->cfqg = cfqg; | ||
2396 | } | ||
2397 | |||
2398 | static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, int create) | ||
2399 | { | ||
2400 | return &cfqd->root_group; | ||
2401 | } | ||
2402 | |||
2403 | static struct cfq_queue * | 2489 | static struct cfq_queue * |
2404 | cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync, | 2490 | cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync, |
2405 | struct io_context *ioc, gfp_t gfp_mask) | 2491 | struct io_context *ioc, gfp_t gfp_mask) |
@@ -3314,6 +3400,9 @@ static void *cfq_init_queue(struct request_queue *q) | |||
3314 | /* Give preference to root group over other groups */ | 3400 | /* Give preference to root group over other groups */ |
3315 | cfqg->weight = 2*BLKIO_WEIGHT_DEFAULT; | 3401 | cfqg->weight = 2*BLKIO_WEIGHT_DEFAULT; |
3316 | 3402 | ||
3403 | #ifdef CONFIG_CFQ_GROUP_IOSCHED | ||
3404 | blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg, (void *)cfqd); | ||
3405 | #endif | ||
3317 | /* | 3406 | /* |
3318 | * Not strictly needed (since RB_ROOT just clears the node and we | 3407 | * Not strictly needed (since RB_ROOT just clears the node and we |
3319 | * zeroed cfqd on alloc), but better be safe in case someone decides | 3408 | * zeroed cfqd on alloc), but better be safe in case someone decides |