diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-10-22 20:00:32 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-10-22 20:00:32 -0400 |
commit | e9dd2b6837e26fe202708cce5ea4bb4ee3e3482e (patch) | |
tree | f42fd892495bfc4cbb740d06b016d267c9c42d00 /block/cfq-iosched.c | |
parent | 4f3a29dadaf999a273f1e7fe2476595d0283eef3 (diff) | |
parent | b4627321e18582dcbdeb45d77df29d3177107c65 (diff) |
Merge branch 'for-2.6.37/core' of git://git.kernel.dk/linux-2.6-block
* 'for-2.6.37/core' of git://git.kernel.dk/linux-2.6-block: (39 commits)
cfq-iosched: Fix a gcc 4.5 warning and put some comments
block: Turn bvec_k{un,}map_irq() into static inline functions
block: fix accounting bug on cross partition merges
block: Make the integrity mapped property a bio flag
block: Fix double free in blk_integrity_unregister
block: Ensure physical block size is unsigned int
blkio-throttle: Fix possible multiplication overflow in iops calculations
blkio-throttle: limit max iops value to UINT_MAX
blkio-throttle: There is no need to convert jiffies to milli seconds
blkio-throttle: Fix link failure failure on i386
blkio: Recalculate the throttled bio dispatch time upon throttle limit change
blkio: Add root group to td->tg_list
blkio: deletion of a cgroup was causes oops
blkio: Do not export throttle files if CONFIG_BLK_DEV_THROTTLING=n
block: set the bounce_pfn to the actual DMA limit rather than to max memory
block: revert bad fix for memory hotplug causing bounces
Fix compile error in blk-exec.c for !CONFIG_DETECT_HUNG_TASK
block: set the bounce_pfn to the actual DMA limit rather than to max memory
block: Prevent hang_check firing during long I/O
cfq: improve fsync performance for small files
...
Fix up trivial conflicts due to __rcu sparse annotation in include/linux/genhd.h
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r-- | block/cfq-iosched.c | 39 |
1 files changed, 20 insertions, 19 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 9eba291eb6f..4cd59b0d7c1 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -160,6 +160,7 @@ enum wl_prio_t { | |||
160 | BE_WORKLOAD = 0, | 160 | BE_WORKLOAD = 0, |
161 | RT_WORKLOAD = 1, | 161 | RT_WORKLOAD = 1, |
162 | IDLE_WORKLOAD = 2, | 162 | IDLE_WORKLOAD = 2, |
163 | CFQ_PRIO_NR, | ||
163 | }; | 164 | }; |
164 | 165 | ||
165 | /* | 166 | /* |
@@ -184,10 +185,19 @@ struct cfq_group { | |||
184 | /* number of cfqq currently on this group */ | 185 | /* number of cfqq currently on this group */ |
185 | int nr_cfqq; | 186 | int nr_cfqq; |
186 | 187 | ||
187 | /* Per group busy queus average. Useful for workload slice calc. */ | ||
188 | unsigned int busy_queues_avg[2]; | ||
189 | /* | 188 | /* |
190 | * rr lists of queues with requests, onle rr for each priority class. | 189 | * Per group busy queus average. Useful for workload slice calc. We |
190 | * create the array for each prio class but at run time it is used | ||
191 | * only for RT and BE class and slot for IDLE class remains unused. | ||
192 | * This is primarily done to avoid confusion and a gcc warning. | ||
193 | */ | ||
194 | unsigned int busy_queues_avg[CFQ_PRIO_NR]; | ||
195 | /* | ||
196 | * rr lists of queues with requests. We maintain service trees for | ||
197 | * RT and BE classes. These trees are subdivided in subclasses | ||
198 | * of SYNC, SYNC_NOIDLE and ASYNC based on workload type. For IDLE | ||
199 | * class there is no subclassification and all the cfq queues go on | ||
200 | * a single tree service_tree_idle. | ||
191 | * Counts are embedded in the cfq_rb_root | 201 | * Counts are embedded in the cfq_rb_root |
192 | */ | 202 | */ |
193 | struct cfq_rb_root service_trees[2][3]; | 203 | struct cfq_rb_root service_trees[2][3]; |
@@ -221,7 +231,6 @@ struct cfq_data { | |||
221 | enum wl_type_t serving_type; | 231 | enum wl_type_t serving_type; |
222 | unsigned long workload_expires; | 232 | unsigned long workload_expires; |
223 | struct cfq_group *serving_group; | 233 | struct cfq_group *serving_group; |
224 | bool noidle_tree_requires_idle; | ||
225 | 234 | ||
226 | /* | 235 | /* |
227 | * Each priority tree is sorted by next_request position. These | 236 | * Each priority tree is sorted by next_request position. These |
@@ -977,8 +986,8 @@ static inline struct cfq_group *cfqg_of_blkg(struct blkio_group *blkg) | |||
977 | return NULL; | 986 | return NULL; |
978 | } | 987 | } |
979 | 988 | ||
980 | void | 989 | void cfq_update_blkio_group_weight(void *key, struct blkio_group *blkg, |
981 | cfq_update_blkio_group_weight(struct blkio_group *blkg, unsigned int weight) | 990 | unsigned int weight) |
982 | { | 991 | { |
983 | cfqg_of_blkg(blkg)->weight = weight; | 992 | cfqg_of_blkg(blkg)->weight = weight; |
984 | } | 993 | } |
@@ -2180,7 +2189,6 @@ static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg) | |||
2180 | slice = max_t(unsigned, slice, CFQ_MIN_TT); | 2189 | slice = max_t(unsigned, slice, CFQ_MIN_TT); |
2181 | cfq_log(cfqd, "workload slice:%d", slice); | 2190 | cfq_log(cfqd, "workload slice:%d", slice); |
2182 | cfqd->workload_expires = jiffies + slice; | 2191 | cfqd->workload_expires = jiffies + slice; |
2183 | cfqd->noidle_tree_requires_idle = false; | ||
2184 | } | 2192 | } |
2185 | 2193 | ||
2186 | static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd) | 2194 | static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd) |
@@ -3177,7 +3185,9 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
3177 | if (cfqq->queued[0] + cfqq->queued[1] >= 4) | 3185 | if (cfqq->queued[0] + cfqq->queued[1] >= 4) |
3178 | cfq_mark_cfqq_deep(cfqq); | 3186 | cfq_mark_cfqq_deep(cfqq); |
3179 | 3187 | ||
3180 | if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle || | 3188 | if (cfqq->next_rq && (cfqq->next_rq->cmd_flags & REQ_NOIDLE)) |
3189 | enable_idle = 0; | ||
3190 | else if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle || | ||
3181 | (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq))) | 3191 | (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq))) |
3182 | enable_idle = 0; | 3192 | enable_idle = 0; |
3183 | else if (sample_valid(cic->ttime_samples)) { | 3193 | else if (sample_valid(cic->ttime_samples)) { |
@@ -3494,17 +3504,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq) | |||
3494 | cfq_slice_expired(cfqd, 1); | 3504 | cfq_slice_expired(cfqd, 1); |
3495 | else if (sync && cfqq_empty && | 3505 | else if (sync && cfqq_empty && |
3496 | !cfq_close_cooperator(cfqd, cfqq)) { | 3506 | !cfq_close_cooperator(cfqd, cfqq)) { |
3497 | cfqd->noidle_tree_requires_idle |= | 3507 | cfq_arm_slice_timer(cfqd); |
3498 | !(rq->cmd_flags & REQ_NOIDLE); | ||
3499 | /* | ||
3500 | * Idling is enabled for SYNC_WORKLOAD. | ||
3501 | * SYNC_NOIDLE_WORKLOAD idles at the end of the tree | ||
3502 | * only if we processed at least one !REQ_NOIDLE request | ||
3503 | */ | ||
3504 | if (cfqd->serving_type == SYNC_WORKLOAD | ||
3505 | || cfqd->noidle_tree_requires_idle | ||
3506 | || cfqq->cfqg->nr_cfqq == 1) | ||
3507 | cfq_arm_slice_timer(cfqd); | ||
3508 | } | 3508 | } |
3509 | } | 3509 | } |
3510 | 3510 | ||
@@ -4090,6 +4090,7 @@ static struct blkio_policy_type blkio_policy_cfq = { | |||
4090 | .blkio_unlink_group_fn = cfq_unlink_blkio_group, | 4090 | .blkio_unlink_group_fn = cfq_unlink_blkio_group, |
4091 | .blkio_update_group_weight_fn = cfq_update_blkio_group_weight, | 4091 | .blkio_update_group_weight_fn = cfq_update_blkio_group_weight, |
4092 | }, | 4092 | }, |
4093 | .plid = BLKIO_POLICY_PROP, | ||
4093 | }; | 4094 | }; |
4094 | #else | 4095 | #else |
4095 | static struct blkio_policy_type blkio_policy_cfq; | 4096 | static struct blkio_policy_type blkio_policy_cfq; |