diff options
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r-- | block/cfq-iosched.c | 163 |
1 files changed, 85 insertions, 78 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index ea83a4f0c27d..7785169f3c8f 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -54,9 +54,9 @@ static const int cfq_hist_divisor = 4; | |||
54 | #define CFQQ_SEEKY(cfqq) (hweight32(cfqq->seek_history) > 32/8) | 54 | #define CFQQ_SEEKY(cfqq) (hweight32(cfqq->seek_history) > 32/8) |
55 | 55 | ||
56 | #define RQ_CIC(rq) \ | 56 | #define RQ_CIC(rq) \ |
57 | ((struct cfq_io_context *) (rq)->elevator_private) | 57 | ((struct cfq_io_context *) (rq)->elevator_private[0]) |
58 | #define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elevator_private2) | 58 | #define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elevator_private[1]) |
59 | #define RQ_CFQG(rq) (struct cfq_group *) ((rq)->elevator_private3) | 59 | #define RQ_CFQG(rq) (struct cfq_group *) ((rq)->elevator_private[2]) |
60 | 60 | ||
61 | static struct kmem_cache *cfq_pool; | 61 | static struct kmem_cache *cfq_pool; |
62 | static struct kmem_cache *cfq_ioc_pool; | 62 | static struct kmem_cache *cfq_ioc_pool; |
@@ -146,7 +146,6 @@ struct cfq_queue { | |||
146 | struct cfq_rb_root *service_tree; | 146 | struct cfq_rb_root *service_tree; |
147 | struct cfq_queue *new_cfqq; | 147 | struct cfq_queue *new_cfqq; |
148 | struct cfq_group *cfqg; | 148 | struct cfq_group *cfqg; |
149 | struct cfq_group *orig_cfqg; | ||
150 | /* Number of sectors dispatched from queue in single dispatch round */ | 149 | /* Number of sectors dispatched from queue in single dispatch round */ |
151 | unsigned long nr_sectors; | 150 | unsigned long nr_sectors; |
152 | }; | 151 | }; |
@@ -179,6 +178,8 @@ struct cfq_group { | |||
179 | /* group service_tree key */ | 178 | /* group service_tree key */ |
180 | u64 vdisktime; | 179 | u64 vdisktime; |
181 | unsigned int weight; | 180 | unsigned int weight; |
181 | unsigned int new_weight; | ||
182 | bool needs_update; | ||
182 | 183 | ||
183 | /* number of cfqq currently on this group */ | 184 | /* number of cfqq currently on this group */ |
184 | int nr_cfqq; | 185 | int nr_cfqq; |
@@ -238,6 +239,7 @@ struct cfq_data { | |||
238 | struct rb_root prio_trees[CFQ_PRIO_LISTS]; | 239 | struct rb_root prio_trees[CFQ_PRIO_LISTS]; |
239 | 240 | ||
240 | unsigned int busy_queues; | 241 | unsigned int busy_queues; |
242 | unsigned int busy_sync_queues; | ||
241 | 243 | ||
242 | int rq_in_driver; | 244 | int rq_in_driver; |
243 | int rq_in_flight[2]; | 245 | int rq_in_flight[2]; |
@@ -285,7 +287,6 @@ struct cfq_data { | |||
285 | unsigned int cfq_slice_idle; | 287 | unsigned int cfq_slice_idle; |
286 | unsigned int cfq_group_idle; | 288 | unsigned int cfq_group_idle; |
287 | unsigned int cfq_latency; | 289 | unsigned int cfq_latency; |
288 | unsigned int cfq_group_isolation; | ||
289 | 290 | ||
290 | unsigned int cic_index; | 291 | unsigned int cic_index; |
291 | struct list_head cic_list; | 292 | struct list_head cic_list; |
@@ -501,13 +502,6 @@ static inline void cfq_schedule_dispatch(struct cfq_data *cfqd) | |||
501 | } | 502 | } |
502 | } | 503 | } |
503 | 504 | ||
504 | static int cfq_queue_empty(struct request_queue *q) | ||
505 | { | ||
506 | struct cfq_data *cfqd = q->elevator->elevator_data; | ||
507 | |||
508 | return !cfqd->rq_queued; | ||
509 | } | ||
510 | |||
511 | /* | 505 | /* |
512 | * Scale schedule slice based on io priority. Use the sync time slice only | 506 | * Scale schedule slice based on io priority. Use the sync time slice only |
513 | * if a queue is marked sync and has sync io queued. A sync queue with async | 507 | * if a queue is marked sync and has sync io queued. A sync queue with async |
@@ -558,15 +552,13 @@ static inline u64 min_vdisktime(u64 min_vdisktime, u64 vdisktime) | |||
558 | 552 | ||
559 | static void update_min_vdisktime(struct cfq_rb_root *st) | 553 | static void update_min_vdisktime(struct cfq_rb_root *st) |
560 | { | 554 | { |
561 | u64 vdisktime = st->min_vdisktime; | ||
562 | struct cfq_group *cfqg; | 555 | struct cfq_group *cfqg; |
563 | 556 | ||
564 | if (st->left) { | 557 | if (st->left) { |
565 | cfqg = rb_entry_cfqg(st->left); | 558 | cfqg = rb_entry_cfqg(st->left); |
566 | vdisktime = min_vdisktime(vdisktime, cfqg->vdisktime); | 559 | st->min_vdisktime = max_vdisktime(st->min_vdisktime, |
560 | cfqg->vdisktime); | ||
567 | } | 561 | } |
568 | |||
569 | st->min_vdisktime = max_vdisktime(st->min_vdisktime, vdisktime); | ||
570 | } | 562 | } |
571 | 563 | ||
572 | /* | 564 | /* |
@@ -863,7 +855,27 @@ __cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg) | |||
863 | } | 855 | } |
864 | 856 | ||
865 | static void | 857 | static void |
866 | cfq_group_service_tree_add(struct cfq_data *cfqd, struct cfq_group *cfqg) | 858 | cfq_update_group_weight(struct cfq_group *cfqg) |
859 | { | ||
860 | BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node)); | ||
861 | if (cfqg->needs_update) { | ||
862 | cfqg->weight = cfqg->new_weight; | ||
863 | cfqg->needs_update = false; | ||
864 | } | ||
865 | } | ||
866 | |||
867 | static void | ||
868 | cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg) | ||
869 | { | ||
870 | BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node)); | ||
871 | |||
872 | cfq_update_group_weight(cfqg); | ||
873 | __cfq_group_service_tree_add(st, cfqg); | ||
874 | st->total_weight += cfqg->weight; | ||
875 | } | ||
876 | |||
877 | static void | ||
878 | cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg) | ||
867 | { | 879 | { |
868 | struct cfq_rb_root *st = &cfqd->grp_service_tree; | 880 | struct cfq_rb_root *st = &cfqd->grp_service_tree; |
869 | struct cfq_group *__cfqg; | 881 | struct cfq_group *__cfqg; |
@@ -884,13 +896,19 @@ cfq_group_service_tree_add(struct cfq_data *cfqd, struct cfq_group *cfqg) | |||
884 | cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY; | 896 | cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY; |
885 | } else | 897 | } else |
886 | cfqg->vdisktime = st->min_vdisktime; | 898 | cfqg->vdisktime = st->min_vdisktime; |
899 | cfq_group_service_tree_add(st, cfqg); | ||
900 | } | ||
887 | 901 | ||
888 | __cfq_group_service_tree_add(st, cfqg); | 902 | static void |
889 | st->total_weight += cfqg->weight; | 903 | cfq_group_service_tree_del(struct cfq_rb_root *st, struct cfq_group *cfqg) |
904 | { | ||
905 | st->total_weight -= cfqg->weight; | ||
906 | if (!RB_EMPTY_NODE(&cfqg->rb_node)) | ||
907 | cfq_rb_erase(&cfqg->rb_node, st); | ||
890 | } | 908 | } |
891 | 909 | ||
892 | static void | 910 | static void |
893 | cfq_group_service_tree_del(struct cfq_data *cfqd, struct cfq_group *cfqg) | 911 | cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg) |
894 | { | 912 | { |
895 | struct cfq_rb_root *st = &cfqd->grp_service_tree; | 913 | struct cfq_rb_root *st = &cfqd->grp_service_tree; |
896 | 914 | ||
@@ -902,14 +920,13 @@ cfq_group_service_tree_del(struct cfq_data *cfqd, struct cfq_group *cfqg) | |||
902 | return; | 920 | return; |
903 | 921 | ||
904 | cfq_log_cfqg(cfqd, cfqg, "del_from_rr group"); | 922 | cfq_log_cfqg(cfqd, cfqg, "del_from_rr group"); |
905 | st->total_weight -= cfqg->weight; | 923 | cfq_group_service_tree_del(st, cfqg); |
906 | if (!RB_EMPTY_NODE(&cfqg->rb_node)) | ||
907 | cfq_rb_erase(&cfqg->rb_node, st); | ||
908 | cfqg->saved_workload_slice = 0; | 924 | cfqg->saved_workload_slice = 0; |
909 | cfq_blkiocg_update_dequeue_stats(&cfqg->blkg, 1); | 925 | cfq_blkiocg_update_dequeue_stats(&cfqg->blkg, 1); |
910 | } | 926 | } |
911 | 927 | ||
912 | static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq) | 928 | static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq, |
929 | unsigned int *unaccounted_time) | ||
913 | { | 930 | { |
914 | unsigned int slice_used; | 931 | unsigned int slice_used; |
915 | 932 | ||
@@ -928,8 +945,13 @@ static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq) | |||
928 | 1); | 945 | 1); |
929 | } else { | 946 | } else { |
930 | slice_used = jiffies - cfqq->slice_start; | 947 | slice_used = jiffies - cfqq->slice_start; |
931 | if (slice_used > cfqq->allocated_slice) | 948 | if (slice_used > cfqq->allocated_slice) { |
949 | *unaccounted_time = slice_used - cfqq->allocated_slice; | ||
932 | slice_used = cfqq->allocated_slice; | 950 | slice_used = cfqq->allocated_slice; |
951 | } | ||
952 | if (time_after(cfqq->slice_start, cfqq->dispatch_start)) | ||
953 | *unaccounted_time += cfqq->slice_start - | ||
954 | cfqq->dispatch_start; | ||
933 | } | 955 | } |
934 | 956 | ||
935 | return slice_used; | 957 | return slice_used; |
@@ -939,12 +961,12 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg, | |||
939 | struct cfq_queue *cfqq) | 961 | struct cfq_queue *cfqq) |
940 | { | 962 | { |
941 | struct cfq_rb_root *st = &cfqd->grp_service_tree; | 963 | struct cfq_rb_root *st = &cfqd->grp_service_tree; |
942 | unsigned int used_sl, charge; | 964 | unsigned int used_sl, charge, unaccounted_sl = 0; |
943 | int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg) | 965 | int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg) |
944 | - cfqg->service_tree_idle.count; | 966 | - cfqg->service_tree_idle.count; |
945 | 967 | ||
946 | BUG_ON(nr_sync < 0); | 968 | BUG_ON(nr_sync < 0); |
947 | used_sl = charge = cfq_cfqq_slice_usage(cfqq); | 969 | used_sl = charge = cfq_cfqq_slice_usage(cfqq, &unaccounted_sl); |
948 | 970 | ||
949 | if (iops_mode(cfqd)) | 971 | if (iops_mode(cfqd)) |
950 | charge = cfqq->slice_dispatch; | 972 | charge = cfqq->slice_dispatch; |
@@ -952,9 +974,10 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg, | |||
952 | charge = cfqq->allocated_slice; | 974 | charge = cfqq->allocated_slice; |
953 | 975 | ||
954 | /* Can't update vdisktime while group is on service tree */ | 976 | /* Can't update vdisktime while group is on service tree */ |
955 | cfq_rb_erase(&cfqg->rb_node, st); | 977 | cfq_group_service_tree_del(st, cfqg); |
956 | cfqg->vdisktime += cfq_scale_slice(charge, cfqg); | 978 | cfqg->vdisktime += cfq_scale_slice(charge, cfqg); |
957 | __cfq_group_service_tree_add(st, cfqg); | 979 | /* If a new weight was requested, update now, off tree */ |
980 | cfq_group_service_tree_add(st, cfqg); | ||
958 | 981 | ||
959 | /* This group is being expired. Save the context */ | 982 | /* This group is being expired. Save the context */ |
960 | if (time_after(cfqd->workload_expires, jiffies)) { | 983 | if (time_after(cfqd->workload_expires, jiffies)) { |
@@ -970,7 +993,8 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg, | |||
970 | cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u disp=%u charge=%u iops=%u" | 993 | cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u disp=%u charge=%u iops=%u" |
971 | " sect=%u", used_sl, cfqq->slice_dispatch, charge, | 994 | " sect=%u", used_sl, cfqq->slice_dispatch, charge, |
972 | iops_mode(cfqd), cfqq->nr_sectors); | 995 | iops_mode(cfqd), cfqq->nr_sectors); |
973 | cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl); | 996 | cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl, |
997 | unaccounted_sl); | ||
974 | cfq_blkiocg_set_start_empty_time(&cfqg->blkg); | 998 | cfq_blkiocg_set_start_empty_time(&cfqg->blkg); |
975 | } | 999 | } |
976 | 1000 | ||
@@ -985,7 +1009,9 @@ static inline struct cfq_group *cfqg_of_blkg(struct blkio_group *blkg) | |||
985 | void cfq_update_blkio_group_weight(void *key, struct blkio_group *blkg, | 1009 | void cfq_update_blkio_group_weight(void *key, struct blkio_group *blkg, |
986 | unsigned int weight) | 1010 | unsigned int weight) |
987 | { | 1011 | { |
988 | cfqg_of_blkg(blkg)->weight = weight; | 1012 | struct cfq_group *cfqg = cfqg_of_blkg(blkg); |
1013 | cfqg->new_weight = weight; | ||
1014 | cfqg->needs_update = true; | ||
989 | } | 1015 | } |
990 | 1016 | ||
991 | static struct cfq_group * | 1017 | static struct cfq_group * |
@@ -1187,32 +1213,6 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
1187 | int new_cfqq = 1; | 1213 | int new_cfqq = 1; |
1188 | int group_changed = 0; | 1214 | int group_changed = 0; |
1189 | 1215 | ||
1190 | #ifdef CONFIG_CFQ_GROUP_IOSCHED | ||
1191 | if (!cfqd->cfq_group_isolation | ||
1192 | && cfqq_type(cfqq) == SYNC_NOIDLE_WORKLOAD | ||
1193 | && cfqq->cfqg && cfqq->cfqg != &cfqd->root_group) { | ||
1194 | /* Move this cfq to root group */ | ||
1195 | cfq_log_cfqq(cfqd, cfqq, "moving to root group"); | ||
1196 | if (!RB_EMPTY_NODE(&cfqq->rb_node)) | ||
1197 | cfq_group_service_tree_del(cfqd, cfqq->cfqg); | ||
1198 | cfqq->orig_cfqg = cfqq->cfqg; | ||
1199 | cfqq->cfqg = &cfqd->root_group; | ||
1200 | cfqd->root_group.ref++; | ||
1201 | group_changed = 1; | ||
1202 | } else if (!cfqd->cfq_group_isolation | ||
1203 | && cfqq_type(cfqq) == SYNC_WORKLOAD && cfqq->orig_cfqg) { | ||
1204 | /* cfqq is sequential now needs to go to its original group */ | ||
1205 | BUG_ON(cfqq->cfqg != &cfqd->root_group); | ||
1206 | if (!RB_EMPTY_NODE(&cfqq->rb_node)) | ||
1207 | cfq_group_service_tree_del(cfqd, cfqq->cfqg); | ||
1208 | cfq_put_cfqg(cfqq->cfqg); | ||
1209 | cfqq->cfqg = cfqq->orig_cfqg; | ||
1210 | cfqq->orig_cfqg = NULL; | ||
1211 | group_changed = 1; | ||
1212 | cfq_log_cfqq(cfqd, cfqq, "moved to origin group"); | ||
1213 | } | ||
1214 | #endif | ||
1215 | |||
1216 | service_tree = service_tree_for(cfqq->cfqg, cfqq_prio(cfqq), | 1216 | service_tree = service_tree_for(cfqq->cfqg, cfqq_prio(cfqq), |
1217 | cfqq_type(cfqq)); | 1217 | cfqq_type(cfqq)); |
1218 | if (cfq_class_idle(cfqq)) { | 1218 | if (cfq_class_idle(cfqq)) { |
@@ -1284,7 +1284,7 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
1284 | service_tree->count++; | 1284 | service_tree->count++; |
1285 | if ((add_front || !new_cfqq) && !group_changed) | 1285 | if ((add_front || !new_cfqq) && !group_changed) |
1286 | return; | 1286 | return; |
1287 | cfq_group_service_tree_add(cfqd, cfqq->cfqg); | 1287 | cfq_group_notify_queue_add(cfqd, cfqq->cfqg); |
1288 | } | 1288 | } |
1289 | 1289 | ||
1290 | static struct cfq_queue * | 1290 | static struct cfq_queue * |
@@ -1372,6 +1372,8 @@ static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
1372 | BUG_ON(cfq_cfqq_on_rr(cfqq)); | 1372 | BUG_ON(cfq_cfqq_on_rr(cfqq)); |
1373 | cfq_mark_cfqq_on_rr(cfqq); | 1373 | cfq_mark_cfqq_on_rr(cfqq); |
1374 | cfqd->busy_queues++; | 1374 | cfqd->busy_queues++; |
1375 | if (cfq_cfqq_sync(cfqq)) | ||
1376 | cfqd->busy_sync_queues++; | ||
1375 | 1377 | ||
1376 | cfq_resort_rr_list(cfqd, cfqq); | 1378 | cfq_resort_rr_list(cfqd, cfqq); |
1377 | } | 1379 | } |
@@ -1395,9 +1397,11 @@ static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
1395 | cfqq->p_root = NULL; | 1397 | cfqq->p_root = NULL; |
1396 | } | 1398 | } |
1397 | 1399 | ||
1398 | cfq_group_service_tree_del(cfqd, cfqq->cfqg); | 1400 | cfq_group_notify_queue_del(cfqd, cfqq->cfqg); |
1399 | BUG_ON(!cfqd->busy_queues); | 1401 | BUG_ON(!cfqd->busy_queues); |
1400 | cfqd->busy_queues--; | 1402 | cfqd->busy_queues--; |
1403 | if (cfq_cfqq_sync(cfqq)) | ||
1404 | cfqd->busy_sync_queues--; | ||
1401 | } | 1405 | } |
1402 | 1406 | ||
1403 | /* | 1407 | /* |
@@ -2405,6 +2409,7 @@ static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
2405 | * Does this cfqq already have too much IO in flight? | 2409 | * Does this cfqq already have too much IO in flight? |
2406 | */ | 2410 | */ |
2407 | if (cfqq->dispatched >= max_dispatch) { | 2411 | if (cfqq->dispatched >= max_dispatch) { |
2412 | bool promote_sync = false; | ||
2408 | /* | 2413 | /* |
2409 | * idle queue must always only have a single IO in flight | 2414 | * idle queue must always only have a single IO in flight |
2410 | */ | 2415 | */ |
@@ -2412,15 +2417,26 @@ static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
2412 | return false; | 2417 | return false; |
2413 | 2418 | ||
2414 | /* | 2419 | /* |
2420 | * If there is only one sync queue | ||
2421 | * we can ignore async queue here and give the sync | ||
2422 | * queue no dispatch limit. The reason is a sync queue can | ||
2423 | * preempt async queue, limiting the sync queue doesn't make | ||
2424 | * sense. This is useful for aiostress test. | ||
2425 | */ | ||
2426 | if (cfq_cfqq_sync(cfqq) && cfqd->busy_sync_queues == 1) | ||
2427 | promote_sync = true; | ||
2428 | |||
2429 | /* | ||
2415 | * We have other queues, don't allow more IO from this one | 2430 | * We have other queues, don't allow more IO from this one |
2416 | */ | 2431 | */ |
2417 | if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq)) | 2432 | if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq) && |
2433 | !promote_sync) | ||
2418 | return false; | 2434 | return false; |
2419 | 2435 | ||
2420 | /* | 2436 | /* |
2421 | * Sole queue user, no limit | 2437 | * Sole queue user, no limit |
2422 | */ | 2438 | */ |
2423 | if (cfqd->busy_queues == 1) | 2439 | if (cfqd->busy_queues == 1 || promote_sync) |
2424 | max_dispatch = -1; | 2440 | max_dispatch = -1; |
2425 | else | 2441 | else |
2426 | /* | 2442 | /* |
@@ -2542,7 +2558,7 @@ static int cfq_dispatch_requests(struct request_queue *q, int force) | |||
2542 | static void cfq_put_queue(struct cfq_queue *cfqq) | 2558 | static void cfq_put_queue(struct cfq_queue *cfqq) |
2543 | { | 2559 | { |
2544 | struct cfq_data *cfqd = cfqq->cfqd; | 2560 | struct cfq_data *cfqd = cfqq->cfqd; |
2545 | struct cfq_group *cfqg, *orig_cfqg; | 2561 | struct cfq_group *cfqg; |
2546 | 2562 | ||
2547 | BUG_ON(cfqq->ref <= 0); | 2563 | BUG_ON(cfqq->ref <= 0); |
2548 | 2564 | ||
@@ -2554,7 +2570,6 @@ static void cfq_put_queue(struct cfq_queue *cfqq) | |||
2554 | BUG_ON(rb_first(&cfqq->sort_list)); | 2570 | BUG_ON(rb_first(&cfqq->sort_list)); |
2555 | BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]); | 2571 | BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]); |
2556 | cfqg = cfqq->cfqg; | 2572 | cfqg = cfqq->cfqg; |
2557 | orig_cfqg = cfqq->orig_cfqg; | ||
2558 | 2573 | ||
2559 | if (unlikely(cfqd->active_queue == cfqq)) { | 2574 | if (unlikely(cfqd->active_queue == cfqq)) { |
2560 | __cfq_slice_expired(cfqd, cfqq, 0); | 2575 | __cfq_slice_expired(cfqd, cfqq, 0); |
@@ -2564,8 +2579,6 @@ static void cfq_put_queue(struct cfq_queue *cfqq) | |||
2564 | BUG_ON(cfq_cfqq_on_rr(cfqq)); | 2579 | BUG_ON(cfq_cfqq_on_rr(cfqq)); |
2565 | kmem_cache_free(cfq_pool, cfqq); | 2580 | kmem_cache_free(cfq_pool, cfqq); |
2566 | cfq_put_cfqg(cfqg); | 2581 | cfq_put_cfqg(cfqg); |
2567 | if (orig_cfqg) | ||
2568 | cfq_put_cfqg(orig_cfqg); | ||
2569 | } | 2582 | } |
2570 | 2583 | ||
2571 | /* | 2584 | /* |
@@ -3613,12 +3626,12 @@ static void cfq_put_request(struct request *rq) | |||
3613 | 3626 | ||
3614 | put_io_context(RQ_CIC(rq)->ioc); | 3627 | put_io_context(RQ_CIC(rq)->ioc); |
3615 | 3628 | ||
3616 | rq->elevator_private = NULL; | 3629 | rq->elevator_private[0] = NULL; |
3617 | rq->elevator_private2 = NULL; | 3630 | rq->elevator_private[1] = NULL; |
3618 | 3631 | ||
3619 | /* Put down rq reference on cfqg */ | 3632 | /* Put down rq reference on cfqg */ |
3620 | cfq_put_cfqg(RQ_CFQG(rq)); | 3633 | cfq_put_cfqg(RQ_CFQG(rq)); |
3621 | rq->elevator_private3 = NULL; | 3634 | rq->elevator_private[2] = NULL; |
3622 | 3635 | ||
3623 | cfq_put_queue(cfqq); | 3636 | cfq_put_queue(cfqq); |
3624 | } | 3637 | } |
@@ -3705,13 +3718,12 @@ new_queue: | |||
3705 | } | 3718 | } |
3706 | 3719 | ||
3707 | cfqq->allocated[rw]++; | 3720 | cfqq->allocated[rw]++; |
3708 | cfqq->ref++; | ||
3709 | rq->elevator_private = cic; | ||
3710 | rq->elevator_private2 = cfqq; | ||
3711 | rq->elevator_private3 = cfq_ref_get_cfqg(cfqq->cfqg); | ||
3712 | 3721 | ||
3722 | cfqq->ref++; | ||
3723 | rq->elevator_private[0] = cic; | ||
3724 | rq->elevator_private[1] = cfqq; | ||
3725 | rq->elevator_private[2] = cfq_ref_get_cfqg(cfqq->cfqg); | ||
3713 | spin_unlock_irqrestore(q->queue_lock, flags); | 3726 | spin_unlock_irqrestore(q->queue_lock, flags); |
3714 | |||
3715 | return 0; | 3727 | return 0; |
3716 | 3728 | ||
3717 | queue_fail: | 3729 | queue_fail: |
@@ -3953,7 +3965,6 @@ static void *cfq_init_queue(struct request_queue *q) | |||
3953 | cfqd->cfq_slice_idle = cfq_slice_idle; | 3965 | cfqd->cfq_slice_idle = cfq_slice_idle; |
3954 | cfqd->cfq_group_idle = cfq_group_idle; | 3966 | cfqd->cfq_group_idle = cfq_group_idle; |
3955 | cfqd->cfq_latency = 1; | 3967 | cfqd->cfq_latency = 1; |
3956 | cfqd->cfq_group_isolation = 0; | ||
3957 | cfqd->hw_tag = -1; | 3968 | cfqd->hw_tag = -1; |
3958 | /* | 3969 | /* |
3959 | * we optimistically start assuming sync ops weren't delayed in last | 3970 | * we optimistically start assuming sync ops weren't delayed in last |
@@ -4029,7 +4040,6 @@ SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1); | |||
4029 | SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); | 4040 | SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); |
4030 | SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0); | 4041 | SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0); |
4031 | SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0); | 4042 | SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0); |
4032 | SHOW_FUNCTION(cfq_group_isolation_show, cfqd->cfq_group_isolation, 0); | ||
4033 | #undef SHOW_FUNCTION | 4043 | #undef SHOW_FUNCTION |
4034 | 4044 | ||
4035 | #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ | 4045 | #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ |
@@ -4063,7 +4073,6 @@ STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); | |||
4063 | STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, | 4073 | STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, |
4064 | UINT_MAX, 0); | 4074 | UINT_MAX, 0); |
4065 | STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0); | 4075 | STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0); |
4066 | STORE_FUNCTION(cfq_group_isolation_store, &cfqd->cfq_group_isolation, 0, 1, 0); | ||
4067 | #undef STORE_FUNCTION | 4076 | #undef STORE_FUNCTION |
4068 | 4077 | ||
4069 | #define CFQ_ATTR(name) \ | 4078 | #define CFQ_ATTR(name) \ |
@@ -4081,7 +4090,6 @@ static struct elv_fs_entry cfq_attrs[] = { | |||
4081 | CFQ_ATTR(slice_idle), | 4090 | CFQ_ATTR(slice_idle), |
4082 | CFQ_ATTR(group_idle), | 4091 | CFQ_ATTR(group_idle), |
4083 | CFQ_ATTR(low_latency), | 4092 | CFQ_ATTR(low_latency), |
4084 | CFQ_ATTR(group_isolation), | ||
4085 | __ATTR_NULL | 4093 | __ATTR_NULL |
4086 | }; | 4094 | }; |
4087 | 4095 | ||
@@ -4096,7 +4104,6 @@ static struct elevator_type iosched_cfq = { | |||
4096 | .elevator_add_req_fn = cfq_insert_request, | 4104 | .elevator_add_req_fn = cfq_insert_request, |
4097 | .elevator_activate_req_fn = cfq_activate_request, | 4105 | .elevator_activate_req_fn = cfq_activate_request, |
4098 | .elevator_deactivate_req_fn = cfq_deactivate_request, | 4106 | .elevator_deactivate_req_fn = cfq_deactivate_request, |
4099 | .elevator_queue_empty_fn = cfq_queue_empty, | ||
4100 | .elevator_completed_req_fn = cfq_completed_request, | 4107 | .elevator_completed_req_fn = cfq_completed_request, |
4101 | .elevator_former_req_fn = elv_rb_former_request, | 4108 | .elevator_former_req_fn = elv_rb_former_request, |
4102 | .elevator_latter_req_fn = elv_rb_latter_request, | 4109 | .elevator_latter_req_fn = elv_rb_latter_request, |