diff options
author | Paolo Valente <paolo.valente@linaro.org> | 2017-04-19 10:48:24 -0400 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2017-04-19 10:48:24 -0400 |
commit | ea25da48086d3bbebf3a2eeff387ea00ed96f5c4 (patch) | |
tree | 1e7858910a647ae1a174ad019304bc3ffc2b5926 /block/bfq-iosched.c | |
parent | 6fa3e8d34204d532268ddb4dc5d2a904197c972d (diff) |
block, bfq: split bfq-iosched.c into multiple source files
The BFQ I/O scheduler features an optimal fair-queuing
(proportional-share) scheduling algorithm, enriched with several
mechanisms to boost throughput and reduce latency for interactive and
real-time applications. This makes BFQ a large and complex piece of
code. This commit addresses this issue by splitting BFQ into three
main, independent components, and by moving each component into a
separate source file:
1. Main algorithm: handles the interaction with the kernel, and
decides which requests to dispatch; it uses the following two further
components to achieve its goals.
2. Scheduling engine (Hierarchical B-WF2Q+ scheduling algorithm):
computes the schedule, using weights and budgets provided by the above
component.
3. cgroups support: handles group operations (creation, destruction,
move, ...).
Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/bfq-iosched.c')
-rw-r--r-- | block/bfq-iosched.c | 3663 |
1 files changed, 39 insertions, 3624 deletions
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index 30bb8f905733..6d14f18c0d45 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c | |||
@@ -102,646 +102,18 @@ | |||
102 | #include "blk-mq.h" | 102 | #include "blk-mq.h" |
103 | #include "blk-mq-tag.h" | 103 | #include "blk-mq-tag.h" |
104 | #include "blk-mq-sched.h" | 104 | #include "blk-mq-sched.h" |
105 | #include <linux/blktrace_api.h> | 105 | #include "bfq-iosched.h" |
106 | #include <linux/hrtimer.h> | ||
107 | #include <linux/blk-cgroup.h> | ||
108 | |||
109 | #define BFQ_IOPRIO_CLASSES 3 | ||
110 | #define BFQ_CL_IDLE_TIMEOUT (HZ/5) | ||
111 | |||
112 | #define BFQ_MIN_WEIGHT 1 | ||
113 | #define BFQ_MAX_WEIGHT 1000 | ||
114 | #define BFQ_WEIGHT_CONVERSION_COEFF 10 | ||
115 | |||
116 | #define BFQ_DEFAULT_QUEUE_IOPRIO 4 | ||
117 | |||
118 | #define BFQ_WEIGHT_LEGACY_DFL 100 | ||
119 | #define BFQ_DEFAULT_GRP_IOPRIO 0 | ||
120 | #define BFQ_DEFAULT_GRP_CLASS IOPRIO_CLASS_BE | ||
121 | |||
122 | /* | ||
123 | * Soft real-time applications are extremely more latency sensitive | ||
124 | * than interactive ones. Over-raise the weight of the former to | ||
125 | * privilege them against the latter. | ||
126 | */ | ||
127 | #define BFQ_SOFTRT_WEIGHT_FACTOR 100 | ||
128 | |||
129 | struct bfq_entity; | ||
130 | |||
131 | /** | ||
132 | * struct bfq_service_tree - per ioprio_class service tree. | ||
133 | * | ||
134 | * Each service tree represents a B-WF2Q+ scheduler on its own. Each | ||
135 | * ioprio_class has its own independent scheduler, and so its own | ||
136 | * bfq_service_tree. All the fields are protected by the queue lock | ||
137 | * of the containing bfqd. | ||
138 | */ | ||
139 | struct bfq_service_tree { | ||
140 | /* tree for active entities (i.e., those backlogged) */ | ||
141 | struct rb_root active; | ||
142 | /* tree for idle entities (i.e., not backlogged, with V <= F_i)*/ | ||
143 | struct rb_root idle; | ||
144 | |||
145 | /* idle entity with minimum F_i */ | ||
146 | struct bfq_entity *first_idle; | ||
147 | /* idle entity with maximum F_i */ | ||
148 | struct bfq_entity *last_idle; | ||
149 | |||
150 | /* scheduler virtual time */ | ||
151 | u64 vtime; | ||
152 | /* scheduler weight sum; active and idle entities contribute to it */ | ||
153 | unsigned long wsum; | ||
154 | }; | ||
155 | |||
156 | /** | ||
157 | * struct bfq_sched_data - multi-class scheduler. | ||
158 | * | ||
159 | * bfq_sched_data is the basic scheduler queue. It supports three | ||
160 | * ioprio_classes, and can be used either as a toplevel queue or as an | ||
161 | * intermediate queue on a hierarchical setup. @next_in_service | ||
162 | * points to the active entity of the sched_data service trees that | ||
163 | * will be scheduled next. It is used to reduce the number of steps | ||
164 | * needed for each hierarchical-schedule update. | ||
165 | * | ||
166 | * The supported ioprio_classes are the same as in CFQ, in descending | ||
167 | * priority order, IOPRIO_CLASS_RT, IOPRIO_CLASS_BE, IOPRIO_CLASS_IDLE. | ||
168 | * Requests from higher priority queues are served before all the | ||
169 | * requests from lower priority queues; among requests of the same | ||
170 | * queue requests are served according to B-WF2Q+. | ||
171 | * All the fields are protected by the queue lock of the containing bfqd. | ||
172 | */ | ||
173 | struct bfq_sched_data { | ||
174 | /* entity in service */ | ||
175 | struct bfq_entity *in_service_entity; | ||
176 | /* head-of-line entity (see comments above) */ | ||
177 | struct bfq_entity *next_in_service; | ||
178 | /* array of service trees, one per ioprio_class */ | ||
179 | struct bfq_service_tree service_tree[BFQ_IOPRIO_CLASSES]; | ||
180 | /* last time CLASS_IDLE was served */ | ||
181 | unsigned long bfq_class_idle_last_service; | ||
182 | |||
183 | }; | ||
184 | |||
185 | /** | ||
186 | * struct bfq_weight_counter - counter of the number of all active entities | ||
187 | * with a given weight. | ||
188 | */ | ||
189 | struct bfq_weight_counter { | ||
190 | unsigned int weight; /* weight of the entities this counter refers to */ | ||
191 | unsigned int num_active; /* nr of active entities with this weight */ | ||
192 | /* | ||
193 | * Weights tree member (see bfq_data's @queue_weights_tree and | ||
194 | * @group_weights_tree) | ||
195 | */ | ||
196 | struct rb_node weights_node; | ||
197 | }; | ||
198 | |||
199 | /** | ||
200 | * struct bfq_entity - schedulable entity. | ||
201 | * | ||
202 | * A bfq_entity is used to represent either a bfq_queue (leaf node in the | ||
203 | * cgroup hierarchy) or a bfq_group into the upper level scheduler. Each | ||
204 | * entity belongs to the sched_data of the parent group in the cgroup | ||
205 | * hierarchy. Non-leaf entities have also their own sched_data, stored | ||
206 | * in @my_sched_data. | ||
207 | * | ||
208 | * Each entity stores independently its priority values; this would | ||
209 | * allow different weights on different devices, but this | ||
210 | * functionality is not exported to userspace by now. Priorities and | ||
211 | * weights are updated lazily, first storing the new values into the | ||
212 | * new_* fields, then setting the @prio_changed flag. As soon as | ||
213 | * there is a transition in the entity state that allows the priority | ||
214 | * update to take place the effective and the requested priority | ||
215 | * values are synchronized. | ||
216 | * | ||
217 | * Unless cgroups are used, the weight value is calculated from the | ||
218 | * ioprio to export the same interface as CFQ. When dealing with | ||
219 | * ``well-behaved'' queues (i.e., queues that do not spend too much | ||
220 | * time to consume their budget and have true sequential behavior, and | ||
221 | * when there are no external factors breaking anticipation) the | ||
222 | * relative weights at each level of the cgroups hierarchy should be | ||
223 | * guaranteed. All the fields are protected by the queue lock of the | ||
224 | * containing bfqd. | ||
225 | */ | ||
226 | struct bfq_entity { | ||
227 | /* service_tree member */ | ||
228 | struct rb_node rb_node; | ||
229 | /* pointer to the weight counter associated with this entity */ | ||
230 | struct bfq_weight_counter *weight_counter; | ||
231 | |||
232 | /* | ||
233 | * Flag, true if the entity is on a tree (either the active or | ||
234 | * the idle one of its service_tree) or is in service. | ||
235 | */ | ||
236 | bool on_st; | ||
237 | |||
238 | /* B-WF2Q+ start and finish timestamps [sectors/weight] */ | ||
239 | u64 start, finish; | ||
240 | |||
241 | /* tree the entity is enqueued into; %NULL if not on a tree */ | ||
242 | struct rb_root *tree; | ||
243 | |||
244 | /* | ||
245 | * minimum start time of the (active) subtree rooted at this | ||
246 | * entity; used for O(log N) lookups into active trees | ||
247 | */ | ||
248 | u64 min_start; | ||
249 | |||
250 | /* amount of service received during the last service slot */ | ||
251 | int service; | ||
252 | |||
253 | /* budget, used also to calculate F_i: F_i = S_i + @budget / @weight */ | ||
254 | int budget; | ||
255 | |||
256 | /* weight of the queue */ | ||
257 | int weight; | ||
258 | /* next weight if a change is in progress */ | ||
259 | int new_weight; | ||
260 | |||
261 | /* original weight, used to implement weight boosting */ | ||
262 | int orig_weight; | ||
263 | |||
264 | /* parent entity, for hierarchical scheduling */ | ||
265 | struct bfq_entity *parent; | ||
266 | |||
267 | /* | ||
268 | * For non-leaf nodes in the hierarchy, the associated | ||
269 | * scheduler queue, %NULL on leaf nodes. | ||
270 | */ | ||
271 | struct bfq_sched_data *my_sched_data; | ||
272 | /* the scheduler queue this entity belongs to */ | ||
273 | struct bfq_sched_data *sched_data; | ||
274 | |||
275 | /* flag, set to request a weight, ioprio or ioprio_class change */ | ||
276 | int prio_changed; | ||
277 | }; | ||
278 | |||
279 | struct bfq_group; | ||
280 | |||
281 | /** | ||
282 | * struct bfq_ttime - per process thinktime stats. | ||
283 | */ | ||
284 | struct bfq_ttime { | ||
285 | /* completion time of the last request */ | ||
286 | u64 last_end_request; | ||
287 | |||
288 | /* total process thinktime */ | ||
289 | u64 ttime_total; | ||
290 | /* number of thinktime samples */ | ||
291 | unsigned long ttime_samples; | ||
292 | /* average process thinktime */ | ||
293 | u64 ttime_mean; | ||
294 | }; | ||
295 | |||
296 | /** | ||
297 | * struct bfq_queue - leaf schedulable entity. | ||
298 | * | ||
299 | * A bfq_queue is a leaf request queue; it can be associated with an | ||
300 | * io_context or more, if it is async or shared between cooperating | ||
301 | * processes. @cgroup holds a reference to the cgroup, to be sure that it | ||
302 | * does not disappear while a bfqq still references it (mostly to avoid | ||
303 | * races between request issuing and task migration followed by cgroup | ||
304 | * destruction). | ||
305 | * All the fields are protected by the queue lock of the containing bfqd. | ||
306 | */ | ||
307 | struct bfq_queue { | ||
308 | /* reference counter */ | ||
309 | int ref; | ||
310 | /* parent bfq_data */ | ||
311 | struct bfq_data *bfqd; | ||
312 | |||
313 | /* current ioprio and ioprio class */ | ||
314 | unsigned short ioprio, ioprio_class; | ||
315 | /* next ioprio and ioprio class if a change is in progress */ | ||
316 | unsigned short new_ioprio, new_ioprio_class; | ||
317 | |||
318 | /* | ||
319 | * Shared bfq_queue if queue is cooperating with one or more | ||
320 | * other queues. | ||
321 | */ | ||
322 | struct bfq_queue *new_bfqq; | ||
323 | /* request-position tree member (see bfq_group's @rq_pos_tree) */ | ||
324 | struct rb_node pos_node; | ||
325 | /* request-position tree root (see bfq_group's @rq_pos_tree) */ | ||
326 | struct rb_root *pos_root; | ||
327 | |||
328 | /* sorted list of pending requests */ | ||
329 | struct rb_root sort_list; | ||
330 | /* if fifo isn't expired, next request to serve */ | ||
331 | struct request *next_rq; | ||
332 | /* number of sync and async requests queued */ | ||
333 | int queued[2]; | ||
334 | /* number of requests currently allocated */ | ||
335 | int allocated; | ||
336 | /* number of pending metadata requests */ | ||
337 | int meta_pending; | ||
338 | /* fifo list of requests in sort_list */ | ||
339 | struct list_head fifo; | ||
340 | |||
341 | /* entity representing this queue in the scheduler */ | ||
342 | struct bfq_entity entity; | ||
343 | |||
344 | /* maximum budget allowed from the feedback mechanism */ | ||
345 | int max_budget; | ||
346 | /* budget expiration (in jiffies) */ | ||
347 | unsigned long budget_timeout; | ||
348 | |||
349 | /* number of requests on the dispatch list or inside driver */ | ||
350 | int dispatched; | ||
351 | |||
352 | /* status flags */ | ||
353 | unsigned long flags; | ||
354 | |||
355 | /* node for active/idle bfqq list inside parent bfqd */ | ||
356 | struct list_head bfqq_list; | ||
357 | |||
358 | /* associated @bfq_ttime struct */ | ||
359 | struct bfq_ttime ttime; | ||
360 | |||
361 | /* bit vector: a 1 for each seeky requests in history */ | ||
362 | u32 seek_history; | ||
363 | |||
364 | /* node for the device's burst list */ | ||
365 | struct hlist_node burst_list_node; | ||
366 | |||
367 | /* position of the last request enqueued */ | ||
368 | sector_t last_request_pos; | ||
369 | |||
370 | /* Number of consecutive pairs of request completion and | ||
371 | * arrival, such that the queue becomes idle after the | ||
372 | * completion, but the next request arrives within an idle | ||
373 | * time slice; used only if the queue's IO_bound flag has been | ||
374 | * cleared. | ||
375 | */ | ||
376 | unsigned int requests_within_timer; | ||
377 | |||
378 | /* pid of the process owning the queue, used for logging purposes */ | ||
379 | pid_t pid; | ||
380 | |||
381 | /* | ||
382 | * Pointer to the bfq_io_cq owning the bfq_queue, set to %NULL | ||
383 | * if the queue is shared. | ||
384 | */ | ||
385 | struct bfq_io_cq *bic; | ||
386 | |||
387 | /* current maximum weight-raising time for this queue */ | ||
388 | unsigned long wr_cur_max_time; | ||
389 | /* | ||
390 | * Minimum time instant such that, only if a new request is | ||
391 | * enqueued after this time instant in an idle @bfq_queue with | ||
392 | * no outstanding requests, then the task associated with the | ||
393 | * queue it is deemed as soft real-time (see the comments on | ||
394 | * the function bfq_bfqq_softrt_next_start()) | ||
395 | */ | ||
396 | unsigned long soft_rt_next_start; | ||
397 | /* | ||
398 | * Start time of the current weight-raising period if | ||
399 | * the @bfq-queue is being weight-raised, otherwise | ||
400 | * finish time of the last weight-raising period. | ||
401 | */ | ||
402 | unsigned long last_wr_start_finish; | ||
403 | /* factor by which the weight of this queue is multiplied */ | ||
404 | unsigned int wr_coeff; | ||
405 | /* | ||
406 | * Time of the last transition of the @bfq_queue from idle to | ||
407 | * backlogged. | ||
408 | */ | ||
409 | unsigned long last_idle_bklogged; | ||
410 | /* | ||
411 | * Cumulative service received from the @bfq_queue since the | ||
412 | * last transition from idle to backlogged. | ||
413 | */ | ||
414 | unsigned long service_from_backlogged; | ||
415 | |||
416 | /* | ||
417 | * Value of wr start time when switching to soft rt | ||
418 | */ | ||
419 | unsigned long wr_start_at_switch_to_srt; | ||
420 | |||
421 | unsigned long split_time; /* time of last split */ | ||
422 | }; | ||
423 | |||
424 | /** | ||
425 | * struct bfq_io_cq - per (request_queue, io_context) structure. | ||
426 | */ | ||
427 | struct bfq_io_cq { | ||
428 | /* associated io_cq structure */ | ||
429 | struct io_cq icq; /* must be the first member */ | ||
430 | /* array of two process queues, the sync and the async */ | ||
431 | struct bfq_queue *bfqq[2]; | ||
432 | /* per (request_queue, blkcg) ioprio */ | ||
433 | int ioprio; | ||
434 | #ifdef CONFIG_BFQ_GROUP_IOSCHED | ||
435 | uint64_t blkcg_serial_nr; /* the current blkcg serial */ | ||
436 | #endif | ||
437 | /* | ||
438 | * Snapshot of the idle window before merging; taken to | ||
439 | * remember this value while the queue is merged, so as to be | ||
440 | * able to restore it in case of split. | ||
441 | */ | ||
442 | bool saved_idle_window; | ||
443 | /* | ||
444 | * Same purpose as the previous two fields for the I/O bound | ||
445 | * classification of a queue. | ||
446 | */ | ||
447 | bool saved_IO_bound; | ||
448 | |||
449 | /* | ||
450 | * Same purpose as the previous fields for the value of the | ||
451 | * field keeping the queue's belonging to a large burst | ||
452 | */ | ||
453 | bool saved_in_large_burst; | ||
454 | /* | ||
455 | * True if the queue belonged to a burst list before its merge | ||
456 | * with another cooperating queue. | ||
457 | */ | ||
458 | bool was_in_burst_list; | ||
459 | |||
460 | /* | ||
461 | * Similar to previous fields: save wr information. | ||
462 | */ | ||
463 | unsigned long saved_wr_coeff; | ||
464 | unsigned long saved_last_wr_start_finish; | ||
465 | unsigned long saved_wr_start_at_switch_to_srt; | ||
466 | unsigned int saved_wr_cur_max_time; | ||
467 | struct bfq_ttime saved_ttime; | ||
468 | }; | ||
469 | |||
470 | enum bfq_device_speed { | ||
471 | BFQ_BFQD_FAST, | ||
472 | BFQ_BFQD_SLOW, | ||
473 | }; | ||
474 | |||
475 | /** | ||
476 | * struct bfq_data - per-device data structure. | ||
477 | * | ||
478 | * All the fields are protected by @lock. | ||
479 | */ | ||
480 | struct bfq_data { | ||
481 | /* device request queue */ | ||
482 | struct request_queue *queue; | ||
483 | /* dispatch queue */ | ||
484 | struct list_head dispatch; | ||
485 | |||
486 | /* root bfq_group for the device */ | ||
487 | struct bfq_group *root_group; | ||
488 | |||
489 | /* | ||
490 | * rbtree of weight counters of @bfq_queues, sorted by | ||
491 | * weight. Used to keep track of whether all @bfq_queues have | ||
492 | * the same weight. The tree contains one counter for each | ||
493 | * distinct weight associated to some active and not | ||
494 | * weight-raised @bfq_queue (see the comments to the functions | ||
495 | * bfq_weights_tree_[add|remove] for further details). | ||
496 | */ | ||
497 | struct rb_root queue_weights_tree; | ||
498 | /* | ||
499 | * rbtree of non-queue @bfq_entity weight counters, sorted by | ||
500 | * weight. Used to keep track of whether all @bfq_groups have | ||
501 | * the same weight. The tree contains one counter for each | ||
502 | * distinct weight associated to some active @bfq_group (see | ||
503 | * the comments to the functions bfq_weights_tree_[add|remove] | ||
504 | * for further details). | ||
505 | */ | ||
506 | struct rb_root group_weights_tree; | ||
507 | |||
508 | /* | ||
509 | * Number of bfq_queues containing requests (including the | ||
510 | * queue in service, even if it is idling). | ||
511 | */ | ||
512 | int busy_queues; | ||
513 | /* number of weight-raised busy @bfq_queues */ | ||
514 | int wr_busy_queues; | ||
515 | /* number of queued requests */ | ||
516 | int queued; | ||
517 | /* number of requests dispatched and waiting for completion */ | ||
518 | int rq_in_driver; | ||
519 | |||
520 | /* | ||
521 | * Maximum number of requests in driver in the last | ||
522 | * @hw_tag_samples completed requests. | ||
523 | */ | ||
524 | int max_rq_in_driver; | ||
525 | /* number of samples used to calculate hw_tag */ | ||
526 | int hw_tag_samples; | ||
527 | /* flag set to one if the driver is showing a queueing behavior */ | ||
528 | int hw_tag; | ||
529 | |||
530 | /* number of budgets assigned */ | ||
531 | int budgets_assigned; | ||
532 | |||
533 | /* | ||
534 | * Timer set when idling (waiting) for the next request from | ||
535 | * the queue in service. | ||
536 | */ | ||
537 | struct hrtimer idle_slice_timer; | ||
538 | |||
539 | /* bfq_queue in service */ | ||
540 | struct bfq_queue *in_service_queue; | ||
541 | |||
542 | /* on-disk position of the last served request */ | ||
543 | sector_t last_position; | ||
544 | |||
545 | /* time of last request completion (ns) */ | ||
546 | u64 last_completion; | ||
547 | |||
548 | /* time of first rq dispatch in current observation interval (ns) */ | ||
549 | u64 first_dispatch; | ||
550 | /* time of last rq dispatch in current observation interval (ns) */ | ||
551 | u64 last_dispatch; | ||
552 | |||
553 | /* beginning of the last budget */ | ||
554 | ktime_t last_budget_start; | ||
555 | /* beginning of the last idle slice */ | ||
556 | ktime_t last_idling_start; | ||
557 | |||
558 | /* number of samples in current observation interval */ | ||
559 | int peak_rate_samples; | ||
560 | /* num of samples of seq dispatches in current observation interval */ | ||
561 | u32 sequential_samples; | ||
562 | /* total num of sectors transferred in current observation interval */ | ||
563 | u64 tot_sectors_dispatched; | ||
564 | /* max rq size seen during current observation interval (sectors) */ | ||
565 | u32 last_rq_max_size; | ||
566 | /* time elapsed from first dispatch in current observ. interval (us) */ | ||
567 | u64 delta_from_first; | ||
568 | /* | ||
569 | * Current estimate of the device peak rate, measured in | ||
570 | * [BFQ_RATE_SHIFT * sectors/usec]. The left-shift by | ||
571 | * BFQ_RATE_SHIFT is performed to increase precision in | ||
572 | * fixed-point calculations. | ||
573 | */ | ||
574 | u32 peak_rate; | ||
575 | |||
576 | /* maximum budget allotted to a bfq_queue before rescheduling */ | ||
577 | int bfq_max_budget; | ||
578 | |||
579 | /* list of all the bfq_queues active on the device */ | ||
580 | struct list_head active_list; | ||
581 | /* list of all the bfq_queues idle on the device */ | ||
582 | struct list_head idle_list; | ||
583 | |||
584 | /* | ||
585 | * Timeout for async/sync requests; when it fires, requests | ||
586 | * are served in fifo order. | ||
587 | */ | ||
588 | u64 bfq_fifo_expire[2]; | ||
589 | /* weight of backward seeks wrt forward ones */ | ||
590 | unsigned int bfq_back_penalty; | ||
591 | /* maximum allowed backward seek */ | ||
592 | unsigned int bfq_back_max; | ||
593 | /* maximum idling time */ | ||
594 | u32 bfq_slice_idle; | ||
595 | |||
596 | /* user-configured max budget value (0 for auto-tuning) */ | ||
597 | int bfq_user_max_budget; | ||
598 | /* | ||
599 | * Timeout for bfq_queues to consume their budget; used to | ||
600 | * prevent seeky queues from imposing long latencies to | ||
601 | * sequential or quasi-sequential ones (this also implies that | ||
602 | * seeky queues cannot receive guarantees in the service | ||
603 | * domain; after a timeout they are charged for the time they | ||
604 | * have been in service, to preserve fairness among them, but | ||
605 | * without service-domain guarantees). | ||
606 | */ | ||
607 | unsigned int bfq_timeout; | ||
608 | |||
609 | /* | ||
610 | * Number of consecutive requests that must be issued within | ||
611 | * the idle time slice to set again idling to a queue which | ||
612 | * was marked as non-I/O-bound (see the definition of the | ||
613 | * IO_bound flag for further details). | ||
614 | */ | ||
615 | unsigned int bfq_requests_within_timer; | ||
616 | |||
617 | /* | ||
618 | * Force device idling whenever needed to provide accurate | ||
619 | * service guarantees, without caring about throughput | ||
620 | * issues. CAVEAT: this may even increase latencies, in case | ||
621 | * of useless idling for processes that did stop doing I/O. | ||
622 | */ | ||
623 | bool strict_guarantees; | ||
624 | |||
625 | /* | ||
626 | * Last time at which a queue entered the current burst of | ||
627 | * queues being activated shortly after each other; for more | ||
628 | * details about this and the following parameters related to | ||
629 | * a burst of activations, see the comments on the function | ||
630 | * bfq_handle_burst. | ||
631 | */ | ||
632 | unsigned long last_ins_in_burst; | ||
633 | /* | ||
634 | * Reference time interval used to decide whether a queue has | ||
635 | * been activated shortly after @last_ins_in_burst. | ||
636 | */ | ||
637 | unsigned long bfq_burst_interval; | ||
638 | /* number of queues in the current burst of queue activations */ | ||
639 | int burst_size; | ||
640 | |||
641 | /* common parent entity for the queues in the burst */ | ||
642 | struct bfq_entity *burst_parent_entity; | ||
643 | /* Maximum burst size above which the current queue-activation | ||
644 | * burst is deemed as 'large'. | ||
645 | */ | ||
646 | unsigned long bfq_large_burst_thresh; | ||
647 | /* true if a large queue-activation burst is in progress */ | ||
648 | bool large_burst; | ||
649 | /* | ||
650 | * Head of the burst list (as for the above fields, more | ||
651 | * details in the comments on the function bfq_handle_burst). | ||
652 | */ | ||
653 | struct hlist_head burst_list; | ||
654 | |||
655 | /* if set to true, low-latency heuristics are enabled */ | ||
656 | bool low_latency; | ||
657 | /* | ||
658 | * Maximum factor by which the weight of a weight-raised queue | ||
659 | * is multiplied. | ||
660 | */ | ||
661 | unsigned int bfq_wr_coeff; | ||
662 | /* maximum duration of a weight-raising period (jiffies) */ | ||
663 | unsigned int bfq_wr_max_time; | ||
664 | |||
665 | /* Maximum weight-raising duration for soft real-time processes */ | ||
666 | unsigned int bfq_wr_rt_max_time; | ||
667 | /* | ||
668 | * Minimum idle period after which weight-raising may be | ||
669 | * reactivated for a queue (in jiffies). | ||
670 | */ | ||
671 | unsigned int bfq_wr_min_idle_time; | ||
672 | /* | ||
673 | * Minimum period between request arrivals after which | ||
674 | * weight-raising may be reactivated for an already busy async | ||
675 | * queue (in jiffies). | ||
676 | */ | ||
677 | unsigned long bfq_wr_min_inter_arr_async; | ||
678 | |||
679 | /* Max service-rate for a soft real-time queue, in sectors/sec */ | ||
680 | unsigned int bfq_wr_max_softrt_rate; | ||
681 | /* | ||
682 | * Cached value of the product R*T, used for computing the | ||
683 | * maximum duration of weight raising automatically. | ||
684 | */ | ||
685 | u64 RT_prod; | ||
686 | /* device-speed class for the low-latency heuristic */ | ||
687 | enum bfq_device_speed device_speed; | ||
688 | |||
689 | /* fallback dummy bfqq for extreme OOM conditions */ | ||
690 | struct bfq_queue oom_bfqq; | ||
691 | |||
692 | spinlock_t lock; | ||
693 | |||
694 | /* | ||
695 | * bic associated with the task issuing current bio for | ||
696 | * merging. This and the next field are used as a support to | ||
697 | * be able to perform the bic lookup, needed by bio-merge | ||
698 | * functions, before the scheduler lock is taken, and thus | ||
699 | * avoid taking the request-queue lock while the scheduler | ||
700 | * lock is being held. | ||
701 | */ | ||
702 | struct bfq_io_cq *bio_bic; | ||
703 | /* bfqq associated with the task issuing current bio for merging */ | ||
704 | struct bfq_queue *bio_bfqq; | ||
705 | }; | ||
706 | |||
707 | enum bfqq_state_flags { | ||
708 | BFQQF_just_created = 0, /* queue just allocated */ | ||
709 | BFQQF_busy, /* has requests or is in service */ | ||
710 | BFQQF_wait_request, /* waiting for a request */ | ||
711 | BFQQF_non_blocking_wait_rq, /* | ||
712 | * waiting for a request | ||
713 | * without idling the device | ||
714 | */ | ||
715 | BFQQF_fifo_expire, /* FIFO checked in this slice */ | ||
716 | BFQQF_idle_window, /* slice idling enabled */ | ||
717 | BFQQF_sync, /* synchronous queue */ | ||
718 | BFQQF_IO_bound, /* | ||
719 | * bfqq has timed-out at least once | ||
720 | * having consumed at most 2/10 of | ||
721 | * its budget | ||
722 | */ | ||
723 | BFQQF_in_large_burst, /* | ||
724 | * bfqq activated in a large burst, | ||
725 | * see comments to bfq_handle_burst. | ||
726 | */ | ||
727 | BFQQF_softrt_update, /* | ||
728 | * may need softrt-next-start | ||
729 | * update | ||
730 | */ | ||
731 | BFQQF_coop, /* bfqq is shared */ | ||
732 | BFQQF_split_coop /* shared bfqq will be split */ | ||
733 | }; | ||
734 | 106 | ||
735 | #define BFQ_BFQQ_FNS(name) \ | 107 | #define BFQ_BFQQ_FNS(name) \ |
736 | static void bfq_mark_bfqq_##name(struct bfq_queue *bfqq) \ | 108 | void bfq_mark_bfqq_##name(struct bfq_queue *bfqq) \ |
737 | { \ | 109 | { \ |
738 | __set_bit(BFQQF_##name, &(bfqq)->flags); \ | 110 | __set_bit(BFQQF_##name, &(bfqq)->flags); \ |
739 | } \ | 111 | } \ |
740 | static void bfq_clear_bfqq_##name(struct bfq_queue *bfqq) \ | 112 | void bfq_clear_bfqq_##name(struct bfq_queue *bfqq) \ |
741 | { \ | 113 | { \ |
742 | __clear_bit(BFQQF_##name, &(bfqq)->flags); \ | 114 | __clear_bit(BFQQF_##name, &(bfqq)->flags); \ |
743 | } \ | 115 | } \ |
744 | static int bfq_bfqq_##name(const struct bfq_queue *bfqq) \ | 116 | int bfq_bfqq_##name(const struct bfq_queue *bfqq) \ |
745 | { \ | 117 | { \ |
746 | return test_bit(BFQQF_##name, &(bfqq)->flags); \ | 118 | return test_bit(BFQQF_##name, &(bfqq)->flags); \ |
747 | } | 119 | } |
@@ -758,230 +130,7 @@ BFQ_BFQQ_FNS(in_large_burst); | |||
758 | BFQ_BFQQ_FNS(coop); | 130 | BFQ_BFQQ_FNS(coop); |
759 | BFQ_BFQQ_FNS(split_coop); | 131 | BFQ_BFQQ_FNS(split_coop); |
760 | BFQ_BFQQ_FNS(softrt_update); | 132 | BFQ_BFQQ_FNS(softrt_update); |
761 | #undef BFQ_BFQQ_FNS | 133 | #undef BFQ_BFQQ_FNS \ |
762 | |||
763 | /* Logging facilities. */ | ||
764 | #ifdef CONFIG_BFQ_GROUP_IOSCHED | ||
765 | static struct bfq_group *bfqq_group(struct bfq_queue *bfqq); | ||
766 | static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg); | ||
767 | |||
768 | #define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \ | ||
769 | char __pbuf[128]; \ | ||
770 | \ | ||
771 | blkg_path(bfqg_to_blkg(bfqq_group(bfqq)), __pbuf, sizeof(__pbuf)); \ | ||
772 | blk_add_trace_msg((bfqd)->queue, "bfq%d%c %s " fmt, (bfqq)->pid, \ | ||
773 | bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \ | ||
774 | __pbuf, ##args); \ | ||
775 | } while (0) | ||
776 | |||
777 | #define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do { \ | ||
778 | char __pbuf[128]; \ | ||
779 | \ | ||
780 | blkg_path(bfqg_to_blkg(bfqg), __pbuf, sizeof(__pbuf)); \ | ||
781 | blk_add_trace_msg((bfqd)->queue, "%s " fmt, __pbuf, ##args); \ | ||
782 | } while (0) | ||
783 | |||
784 | #else /* CONFIG_BFQ_GROUP_IOSCHED */ | ||
785 | |||
786 | #define bfq_log_bfqq(bfqd, bfqq, fmt, args...) \ | ||
787 | blk_add_trace_msg((bfqd)->queue, "bfq%d%c " fmt, (bfqq)->pid, \ | ||
788 | bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \ | ||
789 | ##args) | ||
790 | #define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do {} while (0) | ||
791 | |||
792 | #endif /* CONFIG_BFQ_GROUP_IOSCHED */ | ||
793 | |||
794 | #define bfq_log(bfqd, fmt, args...) \ | ||
795 | blk_add_trace_msg((bfqd)->queue, "bfq " fmt, ##args) | ||
796 | |||
797 | /* Expiration reasons. */ | ||
798 | enum bfqq_expiration { | ||
799 | BFQQE_TOO_IDLE = 0, /* | ||
800 | * queue has been idling for | ||
801 | * too long | ||
802 | */ | ||
803 | BFQQE_BUDGET_TIMEOUT, /* budget took too long to be used */ | ||
804 | BFQQE_BUDGET_EXHAUSTED, /* budget consumed */ | ||
805 | BFQQE_NO_MORE_REQUESTS, /* the queue has no more requests */ | ||
806 | BFQQE_PREEMPTED /* preemption in progress */ | ||
807 | }; | ||
808 | |||
809 | struct bfqg_stats { | ||
810 | #ifdef CONFIG_BFQ_GROUP_IOSCHED | ||
811 | /* number of ios merged */ | ||
812 | struct blkg_rwstat merged; | ||
813 | /* total time spent on device in ns, may not be accurate w/ queueing */ | ||
814 | struct blkg_rwstat service_time; | ||
815 | /* total time spent waiting in scheduler queue in ns */ | ||
816 | struct blkg_rwstat wait_time; | ||
817 | /* number of IOs queued up */ | ||
818 | struct blkg_rwstat queued; | ||
819 | /* total disk time and nr sectors dispatched by this group */ | ||
820 | struct blkg_stat time; | ||
821 | /* sum of number of ios queued across all samples */ | ||
822 | struct blkg_stat avg_queue_size_sum; | ||
823 | /* count of samples taken for average */ | ||
824 | struct blkg_stat avg_queue_size_samples; | ||
825 | /* how many times this group has been removed from service tree */ | ||
826 | struct blkg_stat dequeue; | ||
827 | /* total time spent waiting for it to be assigned a timeslice. */ | ||
828 | struct blkg_stat group_wait_time; | ||
829 | /* time spent idling for this blkcg_gq */ | ||
830 | struct blkg_stat idle_time; | ||
831 | /* total time with empty current active q with other requests queued */ | ||
832 | struct blkg_stat empty_time; | ||
833 | /* fields after this shouldn't be cleared on stat reset */ | ||
834 | uint64_t start_group_wait_time; | ||
835 | uint64_t start_idle_time; | ||
836 | uint64_t start_empty_time; | ||
837 | uint16_t flags; | ||
838 | #endif /* CONFIG_BFQ_GROUP_IOSCHED */ | ||
839 | }; | ||
840 | |||
841 | #ifdef CONFIG_BFQ_GROUP_IOSCHED | ||
842 | |||
843 | /* | ||
844 | * struct bfq_group_data - per-blkcg storage for the blkio subsystem. | ||
845 | * | ||
846 | * @ps: @blkcg_policy_storage that this structure inherits | ||
847 | * @weight: weight of the bfq_group | ||
848 | */ | ||
849 | struct bfq_group_data { | ||
850 | /* must be the first member */ | ||
851 | struct blkcg_policy_data pd; | ||
852 | |||
853 | unsigned int weight; | ||
854 | }; | ||
855 | |||
856 | /** | ||
857 | * struct bfq_group - per (device, cgroup) data structure. | ||
858 | * @entity: schedulable entity to insert into the parent group sched_data. | ||
859 | * @sched_data: own sched_data, to contain child entities (they may be | ||
860 | * both bfq_queues and bfq_groups). | ||
861 | * @bfqd: the bfq_data for the device this group acts upon. | ||
862 | * @async_bfqq: array of async queues for all the tasks belonging to | ||
863 | * the group, one queue per ioprio value per ioprio_class, | ||
864 | * except for the idle class that has only one queue. | ||
865 | * @async_idle_bfqq: async queue for the idle class (ioprio is ignored). | ||
866 | * @my_entity: pointer to @entity, %NULL for the toplevel group; used | ||
867 | * to avoid too many special cases during group creation/ | ||
868 | * migration. | ||
869 | * @stats: stats for this bfqg. | ||
870 | * @active_entities: number of active entities belonging to the group; | ||
871 | * unused for the root group. Used to know whether there | ||
872 | * are groups with more than one active @bfq_entity | ||
873 | * (see the comments to the function | ||
874 | * bfq_bfqq_may_idle()). | ||
875 | * @rq_pos_tree: rbtree sorted by next_request position, used when | ||
876 | * determining if two or more queues have interleaving | ||
877 | * requests (see bfq_find_close_cooperator()). | ||
878 | * | ||
879 | * Each (device, cgroup) pair has its own bfq_group, i.e., for each cgroup | ||
880 | * there is a set of bfq_groups, each one collecting the lower-level | ||
881 | * entities belonging to the group that are acting on the same device. | ||
882 | * | ||
883 | * Locking works as follows: | ||
884 | * o @bfqd is protected by the queue lock, RCU is used to access it | ||
885 | * from the readers. | ||
886 | * o All the other fields are protected by the @bfqd queue lock. | ||
887 | */ | ||
888 | struct bfq_group { | ||
889 | /* must be the first member */ | ||
890 | struct blkg_policy_data pd; | ||
891 | |||
892 | struct bfq_entity entity; | ||
893 | struct bfq_sched_data sched_data; | ||
894 | |||
895 | void *bfqd; | ||
896 | |||
897 | struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR]; | ||
898 | struct bfq_queue *async_idle_bfqq; | ||
899 | |||
900 | struct bfq_entity *my_entity; | ||
901 | |||
902 | int active_entities; | ||
903 | |||
904 | struct rb_root rq_pos_tree; | ||
905 | |||
906 | struct bfqg_stats stats; | ||
907 | }; | ||
908 | |||
909 | #else | ||
910 | struct bfq_group { | ||
911 | struct bfq_sched_data sched_data; | ||
912 | |||
913 | struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR]; | ||
914 | struct bfq_queue *async_idle_bfqq; | ||
915 | |||
916 | struct rb_root rq_pos_tree; | ||
917 | }; | ||
918 | #endif | ||
919 | |||
920 | static struct bfq_queue *bfq_entity_to_bfqq(struct bfq_entity *entity); | ||
921 | |||
922 | static unsigned int bfq_class_idx(struct bfq_entity *entity) | ||
923 | { | ||
924 | struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); | ||
925 | |||
926 | return bfqq ? bfqq->ioprio_class - 1 : | ||
927 | BFQ_DEFAULT_GRP_CLASS - 1; | ||
928 | } | ||
929 | |||
930 | static struct bfq_service_tree * | ||
931 | bfq_entity_service_tree(struct bfq_entity *entity) | ||
932 | { | ||
933 | struct bfq_sched_data *sched_data = entity->sched_data; | ||
934 | unsigned int idx = bfq_class_idx(entity); | ||
935 | |||
936 | return sched_data->service_tree + idx; | ||
937 | } | ||
938 | |||
939 | static struct bfq_queue *bic_to_bfqq(struct bfq_io_cq *bic, bool is_sync) | ||
940 | { | ||
941 | return bic->bfqq[is_sync]; | ||
942 | } | ||
943 | |||
944 | static void bic_set_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq, | ||
945 | bool is_sync) | ||
946 | { | ||
947 | bic->bfqq[is_sync] = bfqq; | ||
948 | } | ||
949 | |||
950 | static struct bfq_data *bic_to_bfqd(struct bfq_io_cq *bic) | ||
951 | { | ||
952 | return bic->icq.q->elevator->elevator_data; | ||
953 | } | ||
954 | |||
955 | #ifdef CONFIG_BFQ_GROUP_IOSCHED | ||
956 | |||
957 | static struct bfq_group *bfq_bfqq_to_bfqg(struct bfq_queue *bfqq) | ||
958 | { | ||
959 | struct bfq_entity *group_entity = bfqq->entity.parent; | ||
960 | |||
961 | if (!group_entity) | ||
962 | group_entity = &bfqq->bfqd->root_group->entity; | ||
963 | |||
964 | return container_of(group_entity, struct bfq_group, entity); | ||
965 | } | ||
966 | |||
967 | #else | ||
968 | |||
969 | static struct bfq_group *bfq_bfqq_to_bfqg(struct bfq_queue *bfqq) | ||
970 | { | ||
971 | return bfqq->bfqd->root_group; | ||
972 | } | ||
973 | |||
974 | #endif | ||
975 | |||
976 | static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio); | ||
977 | static void bfq_put_queue(struct bfq_queue *bfqq); | ||
978 | static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd, | ||
979 | struct bio *bio, bool is_sync, | ||
980 | struct bfq_io_cq *bic); | ||
981 | static void bfq_end_wr_async_queues(struct bfq_data *bfqd, | ||
982 | struct bfq_group *bfqg); | ||
983 | static void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg); | ||
984 | static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq); | ||
985 | 134 | ||
986 | /* Expiration time of sync (0) and async (1) requests, in ns. */ | 135 | /* Expiration time of sync (0) and async (1) requests, in ns. */ |
987 | static const u64 bfq_fifo_expire[2] = { NSEC_PER_SEC / 4, NSEC_PER_SEC / 8 }; | 136 | static const u64 bfq_fifo_expire[2] = { NSEC_PER_SEC / 4, NSEC_PER_SEC / 8 }; |
@@ -1009,7 +158,7 @@ static const int bfq_default_max_budget = 16 * 1024; | |||
1009 | static const int bfq_async_charge_factor = 10; | 158 | static const int bfq_async_charge_factor = 10; |
1010 | 159 | ||
1011 | /* Default timeout values, in jiffies, approximating CFQ defaults. */ | 160 | /* Default timeout values, in jiffies, approximating CFQ defaults. */ |
1012 | static const int bfq_timeout = HZ / 8; | 161 | const int bfq_timeout = HZ / 8; |
1013 | 162 | ||
1014 | static struct kmem_cache *bfq_pool; | 163 | static struct kmem_cache *bfq_pool; |
1015 | 164 | ||
@@ -1085,12 +234,24 @@ static int T_slow[2]; | |||
1085 | static int T_fast[2]; | 234 | static int T_fast[2]; |
1086 | static int device_speed_thresh[2]; | 235 | static int device_speed_thresh[2]; |
1087 | 236 | ||
1088 | #define BFQ_SERVICE_TREE_INIT ((struct bfq_service_tree) \ | ||
1089 | { RB_ROOT, RB_ROOT, NULL, NULL, 0, 0 }) | ||
1090 | |||
1091 | #define RQ_BIC(rq) ((struct bfq_io_cq *) (rq)->elv.priv[0]) | 237 | #define RQ_BIC(rq) ((struct bfq_io_cq *) (rq)->elv.priv[0]) |
1092 | #define RQ_BFQQ(rq) ((rq)->elv.priv[1]) | 238 | #define RQ_BFQQ(rq) ((rq)->elv.priv[1]) |
1093 | 239 | ||
240 | struct bfq_queue *bic_to_bfqq(struct bfq_io_cq *bic, bool is_sync) | ||
241 | { | ||
242 | return bic->bfqq[is_sync]; | ||
243 | } | ||
244 | |||
245 | void bic_set_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq, bool is_sync) | ||
246 | { | ||
247 | bic->bfqq[is_sync] = bfqq; | ||
248 | } | ||
249 | |||
250 | struct bfq_data *bic_to_bfqd(struct bfq_io_cq *bic) | ||
251 | { | ||
252 | return bic->icq.q->elevator->elevator_data; | ||
253 | } | ||
254 | |||
1094 | /** | 255 | /** |
1095 | * icq_to_bic - convert iocontext queue structure to bfq_io_cq. | 256 | * icq_to_bic - convert iocontext queue structure to bfq_io_cq. |
1096 | * @icq: the iocontext queue. | 257 | * @icq: the iocontext queue. |
@@ -1129,7 +290,7 @@ static struct bfq_io_cq *bfq_bic_lookup(struct bfq_data *bfqd, | |||
1129 | * Scheduler run of queue, if there are requests pending and no one in the | 290 | * Scheduler run of queue, if there are requests pending and no one in the |
1130 | * driver that will restart queueing. | 291 | * driver that will restart queueing. |
1131 | */ | 292 | */ |
1132 | static void bfq_schedule_dispatch(struct bfq_data *bfqd) | 293 | void bfq_schedule_dispatch(struct bfq_data *bfqd) |
1133 | { | 294 | { |
1134 | if (bfqd->queued != 0) { | 295 | if (bfqd->queued != 0) { |
1135 | bfq_log(bfqd, "schedule dispatch"); | 296 | bfq_log(bfqd, "schedule dispatch"); |
@@ -1137,2731 +298,6 @@ static void bfq_schedule_dispatch(struct bfq_data *bfqd) | |||
1137 | } | 298 | } |
1138 | } | 299 | } |
1139 | 300 | ||
1140 | /** | ||
1141 | * bfq_gt - compare two timestamps. | ||
1142 | * @a: first ts. | ||
1143 | * @b: second ts. | ||
1144 | * | ||
1145 | * Return @a > @b, dealing with wrapping correctly. | ||
1146 | */ | ||
1147 | static int bfq_gt(u64 a, u64 b) | ||
1148 | { | ||
1149 | return (s64)(a - b) > 0; | ||
1150 | } | ||
1151 | |||
1152 | static struct bfq_entity *bfq_root_active_entity(struct rb_root *tree) | ||
1153 | { | ||
1154 | struct rb_node *node = tree->rb_node; | ||
1155 | |||
1156 | return rb_entry(node, struct bfq_entity, rb_node); | ||
1157 | } | ||
1158 | |||
1159 | static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd); | ||
1160 | |||
1161 | static bool bfq_update_parent_budget(struct bfq_entity *next_in_service); | ||
1162 | |||
1163 | /** | ||
1164 | * bfq_update_next_in_service - update sd->next_in_service | ||
1165 | * @sd: sched_data for which to perform the update. | ||
1166 | * @new_entity: if not NULL, pointer to the entity whose activation, | ||
1167 | * requeueing or repositionig triggered the invocation of | ||
1168 | * this function. | ||
1169 | * | ||
1170 | * This function is called to update sd->next_in_service, which, in | ||
1171 | * its turn, may change as a consequence of the insertion or | ||
1172 | * extraction of an entity into/from one of the active trees of | ||
1173 | * sd. These insertions/extractions occur as a consequence of | ||
1174 | * activations/deactivations of entities, with some activations being | ||
1175 | * 'true' activations, and other activations being requeueings (i.e., | ||
1176 | * implementing the second, requeueing phase of the mechanism used to | ||
1177 | * reposition an entity in its active tree; see comments on | ||
1178 | * __bfq_activate_entity and __bfq_requeue_entity for details). In | ||
1179 | * both the last two activation sub-cases, new_entity points to the | ||
1180 | * just activated or requeued entity. | ||
1181 | * | ||
1182 | * Returns true if sd->next_in_service changes in such a way that | ||
1183 | * entity->parent may become the next_in_service for its parent | ||
1184 | * entity. | ||
1185 | */ | ||
1186 | static bool bfq_update_next_in_service(struct bfq_sched_data *sd, | ||
1187 | struct bfq_entity *new_entity) | ||
1188 | { | ||
1189 | struct bfq_entity *next_in_service = sd->next_in_service; | ||
1190 | bool parent_sched_may_change = false; | ||
1191 | |||
1192 | /* | ||
1193 | * If this update is triggered by the activation, requeueing | ||
1194 | * or repositiong of an entity that does not coincide with | ||
1195 | * sd->next_in_service, then a full lookup in the active tree | ||
1196 | * can be avoided. In fact, it is enough to check whether the | ||
1197 | * just-modified entity has a higher priority than | ||
1198 | * sd->next_in_service, or, even if it has the same priority | ||
1199 | * as sd->next_in_service, is eligible and has a lower virtual | ||
1200 | * finish time than sd->next_in_service. If this compound | ||
1201 | * condition holds, then the new entity becomes the new | ||
1202 | * next_in_service. Otherwise no change is needed. | ||
1203 | */ | ||
1204 | if (new_entity && new_entity != sd->next_in_service) { | ||
1205 | /* | ||
1206 | * Flag used to decide whether to replace | ||
1207 | * sd->next_in_service with new_entity. Tentatively | ||
1208 | * set to true, and left as true if | ||
1209 | * sd->next_in_service is NULL. | ||
1210 | */ | ||
1211 | bool replace_next = true; | ||
1212 | |||
1213 | /* | ||
1214 | * If there is already a next_in_service candidate | ||
1215 | * entity, then compare class priorities or timestamps | ||
1216 | * to decide whether to replace sd->service_tree with | ||
1217 | * new_entity. | ||
1218 | */ | ||
1219 | if (next_in_service) { | ||
1220 | unsigned int new_entity_class_idx = | ||
1221 | bfq_class_idx(new_entity); | ||
1222 | struct bfq_service_tree *st = | ||
1223 | sd->service_tree + new_entity_class_idx; | ||
1224 | |||
1225 | /* | ||
1226 | * For efficiency, evaluate the most likely | ||
1227 | * sub-condition first. | ||
1228 | */ | ||
1229 | replace_next = | ||
1230 | (new_entity_class_idx == | ||
1231 | bfq_class_idx(next_in_service) | ||
1232 | && | ||
1233 | !bfq_gt(new_entity->start, st->vtime) | ||
1234 | && | ||
1235 | bfq_gt(next_in_service->finish, | ||
1236 | new_entity->finish)) | ||
1237 | || | ||
1238 | new_entity_class_idx < | ||
1239 | bfq_class_idx(next_in_service); | ||
1240 | } | ||
1241 | |||
1242 | if (replace_next) | ||
1243 | next_in_service = new_entity; | ||
1244 | } else /* invoked because of a deactivation: lookup needed */ | ||
1245 | next_in_service = bfq_lookup_next_entity(sd); | ||
1246 | |||
1247 | if (next_in_service) { | ||
1248 | parent_sched_may_change = !sd->next_in_service || | ||
1249 | bfq_update_parent_budget(next_in_service); | ||
1250 | } | ||
1251 | |||
1252 | sd->next_in_service = next_in_service; | ||
1253 | |||
1254 | if (!next_in_service) | ||
1255 | return parent_sched_may_change; | ||
1256 | |||
1257 | return parent_sched_may_change; | ||
1258 | } | ||
1259 | |||
1260 | #ifdef CONFIG_BFQ_GROUP_IOSCHED | ||
1261 | /* both next loops stop at one of the child entities of the root group */ | ||
1262 | #define for_each_entity(entity) \ | ||
1263 | for (; entity ; entity = entity->parent) | ||
1264 | |||
1265 | /* | ||
1266 | * For each iteration, compute parent in advance, so as to be safe if | ||
1267 | * entity is deallocated during the iteration. Such a deallocation may | ||
1268 | * happen as a consequence of a bfq_put_queue that frees the bfq_queue | ||
1269 | * containing entity. | ||
1270 | */ | ||
1271 | #define for_each_entity_safe(entity, parent) \ | ||
1272 | for (; entity && ({ parent = entity->parent; 1; }); entity = parent) | ||
1273 | |||
1274 | /* | ||
1275 | * Returns true if this budget changes may let next_in_service->parent | ||
1276 | * become the next_in_service entity for its parent entity. | ||
1277 | */ | ||
1278 | static bool bfq_update_parent_budget(struct bfq_entity *next_in_service) | ||
1279 | { | ||
1280 | struct bfq_entity *bfqg_entity; | ||
1281 | struct bfq_group *bfqg; | ||
1282 | struct bfq_sched_data *group_sd; | ||
1283 | bool ret = false; | ||
1284 | |||
1285 | group_sd = next_in_service->sched_data; | ||
1286 | |||
1287 | bfqg = container_of(group_sd, struct bfq_group, sched_data); | ||
1288 | /* | ||
1289 | * bfq_group's my_entity field is not NULL only if the group | ||
1290 | * is not the root group. We must not touch the root entity | ||
1291 | * as it must never become an in-service entity. | ||
1292 | */ | ||
1293 | bfqg_entity = bfqg->my_entity; | ||
1294 | if (bfqg_entity) { | ||
1295 | if (bfqg_entity->budget > next_in_service->budget) | ||
1296 | ret = true; | ||
1297 | bfqg_entity->budget = next_in_service->budget; | ||
1298 | } | ||
1299 | |||
1300 | return ret; | ||
1301 | } | ||
1302 | |||
1303 | /* | ||
1304 | * This function tells whether entity stops being a candidate for next | ||
1305 | * service, according to the following logic. | ||
1306 | * | ||
1307 | * This function is invoked for an entity that is about to be set in | ||
1308 | * service. If such an entity is a queue, then the entity is no longer | ||
1309 | * a candidate for next service (i.e, a candidate entity to serve | ||
1310 | * after the in-service entity is expired). The function then returns | ||
1311 | * true. | ||
1312 | * | ||
1313 | * In contrast, the entity could stil be a candidate for next service | ||
1314 | * if it is not a queue, and has more than one child. In fact, even if | ||
1315 | * one of its children is about to be set in service, other children | ||
1316 | * may still be the next to serve. As a consequence, a non-queue | ||
1317 | * entity is not a candidate for next-service only if it has only one | ||
1318 | * child. And only if this condition holds, then the function returns | ||
1319 | * true for a non-queue entity. | ||
1320 | */ | ||
1321 | static bool bfq_no_longer_next_in_service(struct bfq_entity *entity) | ||
1322 | { | ||
1323 | struct bfq_group *bfqg; | ||
1324 | |||
1325 | if (bfq_entity_to_bfqq(entity)) | ||
1326 | return true; | ||
1327 | |||
1328 | bfqg = container_of(entity, struct bfq_group, entity); | ||
1329 | |||
1330 | if (bfqg->active_entities == 1) | ||
1331 | return true; | ||
1332 | |||
1333 | return false; | ||
1334 | } | ||
1335 | |||
1336 | #else /* CONFIG_BFQ_GROUP_IOSCHED */ | ||
1337 | /* | ||
1338 | * Next two macros are fake loops when cgroups support is not | ||
1339 | * enabled. I fact, in such a case, there is only one level to go up | ||
1340 | * (to reach the root group). | ||
1341 | */ | ||
1342 | #define for_each_entity(entity) \ | ||
1343 | for (; entity ; entity = NULL) | ||
1344 | |||
1345 | #define for_each_entity_safe(entity, parent) \ | ||
1346 | for (parent = NULL; entity ; entity = parent) | ||
1347 | |||
1348 | static bool bfq_update_parent_budget(struct bfq_entity *next_in_service) | ||
1349 | { | ||
1350 | return false; | ||
1351 | } | ||
1352 | |||
1353 | static bool bfq_no_longer_next_in_service(struct bfq_entity *entity) | ||
1354 | { | ||
1355 | return true; | ||
1356 | } | ||
1357 | |||
1358 | #endif /* CONFIG_BFQ_GROUP_IOSCHED */ | ||
1359 | |||
1360 | /* | ||
1361 | * Shift for timestamp calculations. This actually limits the maximum | ||
1362 | * service allowed in one timestamp delta (small shift values increase it), | ||
1363 | * the maximum total weight that can be used for the queues in the system | ||
1364 | * (big shift values increase it), and the period of virtual time | ||
1365 | * wraparounds. | ||
1366 | */ | ||
1367 | #define WFQ_SERVICE_SHIFT 22 | ||
1368 | |||
1369 | static struct bfq_queue *bfq_entity_to_bfqq(struct bfq_entity *entity) | ||
1370 | { | ||
1371 | struct bfq_queue *bfqq = NULL; | ||
1372 | |||
1373 | if (!entity->my_sched_data) | ||
1374 | bfqq = container_of(entity, struct bfq_queue, entity); | ||
1375 | |||
1376 | return bfqq; | ||
1377 | } | ||
1378 | |||
1379 | |||
1380 | /** | ||
1381 | * bfq_delta - map service into the virtual time domain. | ||
1382 | * @service: amount of service. | ||
1383 | * @weight: scale factor (weight of an entity or weight sum). | ||
1384 | */ | ||
1385 | static u64 bfq_delta(unsigned long service, unsigned long weight) | ||
1386 | { | ||
1387 | u64 d = (u64)service << WFQ_SERVICE_SHIFT; | ||
1388 | |||
1389 | do_div(d, weight); | ||
1390 | return d; | ||
1391 | } | ||
1392 | |||
1393 | /** | ||
1394 | * bfq_calc_finish - assign the finish time to an entity. | ||
1395 | * @entity: the entity to act upon. | ||
1396 | * @service: the service to be charged to the entity. | ||
1397 | */ | ||
1398 | static void bfq_calc_finish(struct bfq_entity *entity, unsigned long service) | ||
1399 | { | ||
1400 | struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); | ||
1401 | |||
1402 | entity->finish = entity->start + | ||
1403 | bfq_delta(service, entity->weight); | ||
1404 | |||
1405 | if (bfqq) { | ||
1406 | bfq_log_bfqq(bfqq->bfqd, bfqq, | ||
1407 | "calc_finish: serv %lu, w %d", | ||
1408 | service, entity->weight); | ||
1409 | bfq_log_bfqq(bfqq->bfqd, bfqq, | ||
1410 | "calc_finish: start %llu, finish %llu, delta %llu", | ||
1411 | entity->start, entity->finish, | ||
1412 | bfq_delta(service, entity->weight)); | ||
1413 | } | ||
1414 | } | ||
1415 | |||
1416 | /** | ||
1417 | * bfq_entity_of - get an entity from a node. | ||
1418 | * @node: the node field of the entity. | ||
1419 | * | ||
1420 | * Convert a node pointer to the relative entity. This is used only | ||
1421 | * to simplify the logic of some functions and not as the generic | ||
1422 | * conversion mechanism because, e.g., in the tree walking functions, | ||
1423 | * the check for a %NULL value would be redundant. | ||
1424 | */ | ||
1425 | static struct bfq_entity *bfq_entity_of(struct rb_node *node) | ||
1426 | { | ||
1427 | struct bfq_entity *entity = NULL; | ||
1428 | |||
1429 | if (node) | ||
1430 | entity = rb_entry(node, struct bfq_entity, rb_node); | ||
1431 | |||
1432 | return entity; | ||
1433 | } | ||
1434 | |||
1435 | /** | ||
1436 | * bfq_extract - remove an entity from a tree. | ||
1437 | * @root: the tree root. | ||
1438 | * @entity: the entity to remove. | ||
1439 | */ | ||
1440 | static void bfq_extract(struct rb_root *root, struct bfq_entity *entity) | ||
1441 | { | ||
1442 | entity->tree = NULL; | ||
1443 | rb_erase(&entity->rb_node, root); | ||
1444 | } | ||
1445 | |||
1446 | /** | ||
1447 | * bfq_idle_extract - extract an entity from the idle tree. | ||
1448 | * @st: the service tree of the owning @entity. | ||
1449 | * @entity: the entity being removed. | ||
1450 | */ | ||
1451 | static void bfq_idle_extract(struct bfq_service_tree *st, | ||
1452 | struct bfq_entity *entity) | ||
1453 | { | ||
1454 | struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); | ||
1455 | struct rb_node *next; | ||
1456 | |||
1457 | if (entity == st->first_idle) { | ||
1458 | next = rb_next(&entity->rb_node); | ||
1459 | st->first_idle = bfq_entity_of(next); | ||
1460 | } | ||
1461 | |||
1462 | if (entity == st->last_idle) { | ||
1463 | next = rb_prev(&entity->rb_node); | ||
1464 | st->last_idle = bfq_entity_of(next); | ||
1465 | } | ||
1466 | |||
1467 | bfq_extract(&st->idle, entity); | ||
1468 | |||
1469 | if (bfqq) | ||
1470 | list_del(&bfqq->bfqq_list); | ||
1471 | } | ||
1472 | |||
1473 | /** | ||
1474 | * bfq_insert - generic tree insertion. | ||
1475 | * @root: tree root. | ||
1476 | * @entity: entity to insert. | ||
1477 | * | ||
1478 | * This is used for the idle and the active tree, since they are both | ||
1479 | * ordered by finish time. | ||
1480 | */ | ||
1481 | static void bfq_insert(struct rb_root *root, struct bfq_entity *entity) | ||
1482 | { | ||
1483 | struct bfq_entity *entry; | ||
1484 | struct rb_node **node = &root->rb_node; | ||
1485 | struct rb_node *parent = NULL; | ||
1486 | |||
1487 | while (*node) { | ||
1488 | parent = *node; | ||
1489 | entry = rb_entry(parent, struct bfq_entity, rb_node); | ||
1490 | |||
1491 | if (bfq_gt(entry->finish, entity->finish)) | ||
1492 | node = &parent->rb_left; | ||
1493 | else | ||
1494 | node = &parent->rb_right; | ||
1495 | } | ||
1496 | |||
1497 | rb_link_node(&entity->rb_node, parent, node); | ||
1498 | rb_insert_color(&entity->rb_node, root); | ||
1499 | |||
1500 | entity->tree = root; | ||
1501 | } | ||
1502 | |||
1503 | /** | ||
1504 | * bfq_update_min - update the min_start field of a entity. | ||
1505 | * @entity: the entity to update. | ||
1506 | * @node: one of its children. | ||
1507 | * | ||
1508 | * This function is called when @entity may store an invalid value for | ||
1509 | * min_start due to updates to the active tree. The function assumes | ||
1510 | * that the subtree rooted at @node (which may be its left or its right | ||
1511 | * child) has a valid min_start value. | ||
1512 | */ | ||
1513 | static void bfq_update_min(struct bfq_entity *entity, struct rb_node *node) | ||
1514 | { | ||
1515 | struct bfq_entity *child; | ||
1516 | |||
1517 | if (node) { | ||
1518 | child = rb_entry(node, struct bfq_entity, rb_node); | ||
1519 | if (bfq_gt(entity->min_start, child->min_start)) | ||
1520 | entity->min_start = child->min_start; | ||
1521 | } | ||
1522 | } | ||
1523 | |||
1524 | /** | ||
1525 | * bfq_update_active_node - recalculate min_start. | ||
1526 | * @node: the node to update. | ||
1527 | * | ||
1528 | * @node may have changed position or one of its children may have moved, | ||
1529 | * this function updates its min_start value. The left and right subtrees | ||
1530 | * are assumed to hold a correct min_start value. | ||
1531 | */ | ||
1532 | static void bfq_update_active_node(struct rb_node *node) | ||
1533 | { | ||
1534 | struct bfq_entity *entity = rb_entry(node, struct bfq_entity, rb_node); | ||
1535 | |||
1536 | entity->min_start = entity->start; | ||
1537 | bfq_update_min(entity, node->rb_right); | ||
1538 | bfq_update_min(entity, node->rb_left); | ||
1539 | } | ||
1540 | |||
1541 | /** | ||
1542 | * bfq_update_active_tree - update min_start for the whole active tree. | ||
1543 | * @node: the starting node. | ||
1544 | * | ||
1545 | * @node must be the deepest modified node after an update. This function | ||
1546 | * updates its min_start using the values held by its children, assuming | ||
1547 | * that they did not change, and then updates all the nodes that may have | ||
1548 | * changed in the path to the root. The only nodes that may have changed | ||
1549 | * are the ones in the path or their siblings. | ||
1550 | */ | ||
1551 | static void bfq_update_active_tree(struct rb_node *node) | ||
1552 | { | ||
1553 | struct rb_node *parent; | ||
1554 | |||
1555 | up: | ||
1556 | bfq_update_active_node(node); | ||
1557 | |||
1558 | parent = rb_parent(node); | ||
1559 | if (!parent) | ||
1560 | return; | ||
1561 | |||
1562 | if (node == parent->rb_left && parent->rb_right) | ||
1563 | bfq_update_active_node(parent->rb_right); | ||
1564 | else if (parent->rb_left) | ||
1565 | bfq_update_active_node(parent->rb_left); | ||
1566 | |||
1567 | node = parent; | ||
1568 | goto up; | ||
1569 | } | ||
1570 | |||
1571 | static void bfq_weights_tree_add(struct bfq_data *bfqd, | ||
1572 | struct bfq_entity *entity, | ||
1573 | struct rb_root *root); | ||
1574 | |||
1575 | static void bfq_weights_tree_remove(struct bfq_data *bfqd, | ||
1576 | struct bfq_entity *entity, | ||
1577 | struct rb_root *root); | ||
1578 | |||
1579 | |||
1580 | /** | ||
1581 | * bfq_active_insert - insert an entity in the active tree of its | ||
1582 | * group/device. | ||
1583 | * @st: the service tree of the entity. | ||
1584 | * @entity: the entity being inserted. | ||
1585 | * | ||
1586 | * The active tree is ordered by finish time, but an extra key is kept | ||
1587 | * per each node, containing the minimum value for the start times of | ||
1588 | * its children (and the node itself), so it's possible to search for | ||
1589 | * the eligible node with the lowest finish time in logarithmic time. | ||
1590 | */ | ||
1591 | static void bfq_active_insert(struct bfq_service_tree *st, | ||
1592 | struct bfq_entity *entity) | ||
1593 | { | ||
1594 | struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); | ||
1595 | struct rb_node *node = &entity->rb_node; | ||
1596 | #ifdef CONFIG_BFQ_GROUP_IOSCHED | ||
1597 | struct bfq_sched_data *sd = NULL; | ||
1598 | struct bfq_group *bfqg = NULL; | ||
1599 | struct bfq_data *bfqd = NULL; | ||
1600 | #endif | ||
1601 | |||
1602 | bfq_insert(&st->active, entity); | ||
1603 | |||
1604 | if (node->rb_left) | ||
1605 | node = node->rb_left; | ||
1606 | else if (node->rb_right) | ||
1607 | node = node->rb_right; | ||
1608 | |||
1609 | bfq_update_active_tree(node); | ||
1610 | |||
1611 | #ifdef CONFIG_BFQ_GROUP_IOSCHED | ||
1612 | sd = entity->sched_data; | ||
1613 | bfqg = container_of(sd, struct bfq_group, sched_data); | ||
1614 | bfqd = (struct bfq_data *)bfqg->bfqd; | ||
1615 | #endif | ||
1616 | if (bfqq) | ||
1617 | list_add(&bfqq->bfqq_list, &bfqq->bfqd->active_list); | ||
1618 | #ifdef CONFIG_BFQ_GROUP_IOSCHED | ||
1619 | else /* bfq_group */ | ||
1620 | bfq_weights_tree_add(bfqd, entity, &bfqd->group_weights_tree); | ||
1621 | |||
1622 | if (bfqg != bfqd->root_group) | ||
1623 | bfqg->active_entities++; | ||
1624 | #endif | ||
1625 | } | ||
1626 | |||
1627 | /** | ||
1628 | * bfq_ioprio_to_weight - calc a weight from an ioprio. | ||
1629 | * @ioprio: the ioprio value to convert. | ||
1630 | */ | ||
1631 | static unsigned short bfq_ioprio_to_weight(int ioprio) | ||
1632 | { | ||
1633 | return (IOPRIO_BE_NR - ioprio) * BFQ_WEIGHT_CONVERSION_COEFF; | ||
1634 | } | ||
1635 | |||
1636 | /** | ||
1637 | * bfq_weight_to_ioprio - calc an ioprio from a weight. | ||
1638 | * @weight: the weight value to convert. | ||
1639 | * | ||
1640 | * To preserve as much as possible the old only-ioprio user interface, | ||
1641 | * 0 is used as an escape ioprio value for weights (numerically) equal or | ||
1642 | * larger than IOPRIO_BE_NR * BFQ_WEIGHT_CONVERSION_COEFF. | ||
1643 | */ | ||
1644 | static unsigned short bfq_weight_to_ioprio(int weight) | ||
1645 | { | ||
1646 | return max_t(int, 0, | ||
1647 | IOPRIO_BE_NR * BFQ_WEIGHT_CONVERSION_COEFF - weight); | ||
1648 | } | ||
1649 | |||
1650 | static void bfq_get_entity(struct bfq_entity *entity) | ||
1651 | { | ||
1652 | struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); | ||
1653 | |||
1654 | if (bfqq) { | ||
1655 | bfqq->ref++; | ||
1656 | bfq_log_bfqq(bfqq->bfqd, bfqq, "get_entity: %p %d", | ||
1657 | bfqq, bfqq->ref); | ||
1658 | } | ||
1659 | } | ||
1660 | |||
1661 | /** | ||
1662 | * bfq_find_deepest - find the deepest node that an extraction can modify. | ||
1663 | * @node: the node being removed. | ||
1664 | * | ||
1665 | * Do the first step of an extraction in an rb tree, looking for the | ||
1666 | * node that will replace @node, and returning the deepest node that | ||
1667 | * the following modifications to the tree can touch. If @node is the | ||
1668 | * last node in the tree return %NULL. | ||
1669 | */ | ||
1670 | static struct rb_node *bfq_find_deepest(struct rb_node *node) | ||
1671 | { | ||
1672 | struct rb_node *deepest; | ||
1673 | |||
1674 | if (!node->rb_right && !node->rb_left) | ||
1675 | deepest = rb_parent(node); | ||
1676 | else if (!node->rb_right) | ||
1677 | deepest = node->rb_left; | ||
1678 | else if (!node->rb_left) | ||
1679 | deepest = node->rb_right; | ||
1680 | else { | ||
1681 | deepest = rb_next(node); | ||
1682 | if (deepest->rb_right) | ||
1683 | deepest = deepest->rb_right; | ||
1684 | else if (rb_parent(deepest) != node) | ||
1685 | deepest = rb_parent(deepest); | ||
1686 | } | ||
1687 | |||
1688 | return deepest; | ||
1689 | } | ||
1690 | |||
1691 | /** | ||
1692 | * bfq_active_extract - remove an entity from the active tree. | ||
1693 | * @st: the service_tree containing the tree. | ||
1694 | * @entity: the entity being removed. | ||
1695 | */ | ||
1696 | static void bfq_active_extract(struct bfq_service_tree *st, | ||
1697 | struct bfq_entity *entity) | ||
1698 | { | ||
1699 | struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); | ||
1700 | struct rb_node *node; | ||
1701 | #ifdef CONFIG_BFQ_GROUP_IOSCHED | ||
1702 | struct bfq_sched_data *sd = NULL; | ||
1703 | struct bfq_group *bfqg = NULL; | ||
1704 | struct bfq_data *bfqd = NULL; | ||
1705 | #endif | ||
1706 | |||
1707 | node = bfq_find_deepest(&entity->rb_node); | ||
1708 | bfq_extract(&st->active, entity); | ||
1709 | |||
1710 | if (node) | ||
1711 | bfq_update_active_tree(node); | ||
1712 | |||
1713 | #ifdef CONFIG_BFQ_GROUP_IOSCHED | ||
1714 | sd = entity->sched_data; | ||
1715 | bfqg = container_of(sd, struct bfq_group, sched_data); | ||
1716 | bfqd = (struct bfq_data *)bfqg->bfqd; | ||
1717 | #endif | ||
1718 | if (bfqq) | ||
1719 | list_del(&bfqq->bfqq_list); | ||
1720 | #ifdef CONFIG_BFQ_GROUP_IOSCHED | ||
1721 | else /* bfq_group */ | ||
1722 | bfq_weights_tree_remove(bfqd, entity, | ||
1723 | &bfqd->group_weights_tree); | ||
1724 | |||
1725 | if (bfqg != bfqd->root_group) | ||
1726 | bfqg->active_entities--; | ||
1727 | #endif | ||
1728 | } | ||
1729 | |||
1730 | /** | ||
1731 | * bfq_idle_insert - insert an entity into the idle tree. | ||
1732 | * @st: the service tree containing the tree. | ||
1733 | * @entity: the entity to insert. | ||
1734 | */ | ||
1735 | static void bfq_idle_insert(struct bfq_service_tree *st, | ||
1736 | struct bfq_entity *entity) | ||
1737 | { | ||
1738 | struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); | ||
1739 | struct bfq_entity *first_idle = st->first_idle; | ||
1740 | struct bfq_entity *last_idle = st->last_idle; | ||
1741 | |||
1742 | if (!first_idle || bfq_gt(first_idle->finish, entity->finish)) | ||
1743 | st->first_idle = entity; | ||
1744 | if (!last_idle || bfq_gt(entity->finish, last_idle->finish)) | ||
1745 | st->last_idle = entity; | ||
1746 | |||
1747 | bfq_insert(&st->idle, entity); | ||
1748 | |||
1749 | if (bfqq) | ||
1750 | list_add(&bfqq->bfqq_list, &bfqq->bfqd->idle_list); | ||
1751 | } | ||
1752 | |||
1753 | /** | ||
1754 | * bfq_forget_entity - do not consider entity any longer for scheduling | ||
1755 | * @st: the service tree. | ||
1756 | * @entity: the entity being removed. | ||
1757 | * @is_in_service: true if entity is currently the in-service entity. | ||
1758 | * | ||
1759 | * Forget everything about @entity. In addition, if entity represents | ||
1760 | * a queue, and the latter is not in service, then release the service | ||
1761 | * reference to the queue (the one taken through bfq_get_entity). In | ||
1762 | * fact, in this case, there is really no more service reference to | ||
1763 | * the queue, as the latter is also outside any service tree. If, | ||
1764 | * instead, the queue is in service, then __bfq_bfqd_reset_in_service | ||
1765 | * will take care of putting the reference when the queue finally | ||
1766 | * stops being served. | ||
1767 | */ | ||
1768 | static void bfq_forget_entity(struct bfq_service_tree *st, | ||
1769 | struct bfq_entity *entity, | ||
1770 | bool is_in_service) | ||
1771 | { | ||
1772 | struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); | ||
1773 | |||
1774 | entity->on_st = false; | ||
1775 | st->wsum -= entity->weight; | ||
1776 | if (bfqq && !is_in_service) | ||
1777 | bfq_put_queue(bfqq); | ||
1778 | } | ||
1779 | |||
1780 | /** | ||
1781 | * bfq_put_idle_entity - release the idle tree ref of an entity. | ||
1782 | * @st: service tree for the entity. | ||
1783 | * @entity: the entity being released. | ||
1784 | */ | ||
1785 | static void bfq_put_idle_entity(struct bfq_service_tree *st, | ||
1786 | struct bfq_entity *entity) | ||
1787 | { | ||
1788 | bfq_idle_extract(st, entity); | ||
1789 | bfq_forget_entity(st, entity, | ||
1790 | entity == entity->sched_data->in_service_entity); | ||
1791 | } | ||
1792 | |||
1793 | /** | ||
1794 | * bfq_forget_idle - update the idle tree if necessary. | ||
1795 | * @st: the service tree to act upon. | ||
1796 | * | ||
1797 | * To preserve the global O(log N) complexity we only remove one entry here; | ||
1798 | * as the idle tree will not grow indefinitely this can be done safely. | ||
1799 | */ | ||
1800 | static void bfq_forget_idle(struct bfq_service_tree *st) | ||
1801 | { | ||
1802 | struct bfq_entity *first_idle = st->first_idle; | ||
1803 | struct bfq_entity *last_idle = st->last_idle; | ||
1804 | |||
1805 | if (RB_EMPTY_ROOT(&st->active) && last_idle && | ||
1806 | !bfq_gt(last_idle->finish, st->vtime)) { | ||
1807 | /* | ||
1808 | * Forget the whole idle tree, increasing the vtime past | ||
1809 | * the last finish time of idle entities. | ||
1810 | */ | ||
1811 | st->vtime = last_idle->finish; | ||
1812 | } | ||
1813 | |||
1814 | if (first_idle && !bfq_gt(first_idle->finish, st->vtime)) | ||
1815 | bfq_put_idle_entity(st, first_idle); | ||
1816 | } | ||
1817 | |||
1818 | static struct bfq_service_tree * | ||
1819 | __bfq_entity_update_weight_prio(struct bfq_service_tree *old_st, | ||
1820 | struct bfq_entity *entity) | ||
1821 | { | ||
1822 | struct bfq_service_tree *new_st = old_st; | ||
1823 | |||
1824 | if (entity->prio_changed) { | ||
1825 | struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); | ||
1826 | unsigned int prev_weight, new_weight; | ||
1827 | struct bfq_data *bfqd = NULL; | ||
1828 | struct rb_root *root; | ||
1829 | #ifdef CONFIG_BFQ_GROUP_IOSCHED | ||
1830 | struct bfq_sched_data *sd; | ||
1831 | struct bfq_group *bfqg; | ||
1832 | #endif | ||
1833 | |||
1834 | if (bfqq) | ||
1835 | bfqd = bfqq->bfqd; | ||
1836 | #ifdef CONFIG_BFQ_GROUP_IOSCHED | ||
1837 | else { | ||
1838 | sd = entity->my_sched_data; | ||
1839 | bfqg = container_of(sd, struct bfq_group, sched_data); | ||
1840 | bfqd = (struct bfq_data *)bfqg->bfqd; | ||
1841 | } | ||
1842 | #endif | ||
1843 | |||
1844 | old_st->wsum -= entity->weight; | ||
1845 | |||
1846 | if (entity->new_weight != entity->orig_weight) { | ||
1847 | if (entity->new_weight < BFQ_MIN_WEIGHT || | ||
1848 | entity->new_weight > BFQ_MAX_WEIGHT) { | ||
1849 | pr_crit("update_weight_prio: new_weight %d\n", | ||
1850 | entity->new_weight); | ||
1851 | if (entity->new_weight < BFQ_MIN_WEIGHT) | ||
1852 | entity->new_weight = BFQ_MIN_WEIGHT; | ||
1853 | else | ||
1854 | entity->new_weight = BFQ_MAX_WEIGHT; | ||
1855 | } | ||
1856 | entity->orig_weight = entity->new_weight; | ||
1857 | if (bfqq) | ||
1858 | bfqq->ioprio = | ||
1859 | bfq_weight_to_ioprio(entity->orig_weight); | ||
1860 | } | ||
1861 | |||
1862 | if (bfqq) | ||
1863 | bfqq->ioprio_class = bfqq->new_ioprio_class; | ||
1864 | entity->prio_changed = 0; | ||
1865 | |||
1866 | /* | ||
1867 | * NOTE: here we may be changing the weight too early, | ||
1868 | * this will cause unfairness. The correct approach | ||
1869 | * would have required additional complexity to defer | ||
1870 | * weight changes to the proper time instants (i.e., | ||
1871 | * when entity->finish <= old_st->vtime). | ||
1872 | */ | ||
1873 | new_st = bfq_entity_service_tree(entity); | ||
1874 | |||
1875 | prev_weight = entity->weight; | ||
1876 | new_weight = entity->orig_weight * | ||
1877 | (bfqq ? bfqq->wr_coeff : 1); | ||
1878 | /* | ||
1879 | * If the weight of the entity changes, remove the entity | ||
1880 | * from its old weight counter (if there is a counter | ||
1881 | * associated with the entity), and add it to the counter | ||
1882 | * associated with its new weight. | ||
1883 | */ | ||
1884 | if (prev_weight != new_weight) { | ||
1885 | root = bfqq ? &bfqd->queue_weights_tree : | ||
1886 | &bfqd->group_weights_tree; | ||
1887 | bfq_weights_tree_remove(bfqd, entity, root); | ||
1888 | } | ||
1889 | entity->weight = new_weight; | ||
1890 | /* | ||
1891 | * Add the entity to its weights tree only if it is | ||
1892 | * not associated with a weight-raised queue. | ||
1893 | */ | ||
1894 | if (prev_weight != new_weight && | ||
1895 | (bfqq ? bfqq->wr_coeff == 1 : 1)) | ||
1896 | /* If we get here, root has been initialized. */ | ||
1897 | bfq_weights_tree_add(bfqd, entity, root); | ||
1898 | |||
1899 | new_st->wsum += entity->weight; | ||
1900 | |||
1901 | if (new_st != old_st) | ||
1902 | entity->start = new_st->vtime; | ||
1903 | } | ||
1904 | |||
1905 | return new_st; | ||
1906 | } | ||
1907 | |||
1908 | static void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg); | ||
1909 | static struct bfq_group *bfqq_group(struct bfq_queue *bfqq); | ||
1910 | |||
1911 | /** | ||
1912 | * bfq_bfqq_served - update the scheduler status after selection for | ||
1913 | * service. | ||
1914 | * @bfqq: the queue being served. | ||
1915 | * @served: bytes to transfer. | ||
1916 | * | ||
1917 | * NOTE: this can be optimized, as the timestamps of upper level entities | ||
1918 | * are synchronized every time a new bfqq is selected for service. By now, | ||
1919 | * we keep it to better check consistency. | ||
1920 | */ | ||
1921 | static void bfq_bfqq_served(struct bfq_queue *bfqq, int served) | ||
1922 | { | ||
1923 | struct bfq_entity *entity = &bfqq->entity; | ||
1924 | struct bfq_service_tree *st; | ||
1925 | |||
1926 | for_each_entity(entity) { | ||
1927 | st = bfq_entity_service_tree(entity); | ||
1928 | |||
1929 | entity->service += served; | ||
1930 | |||
1931 | st->vtime += bfq_delta(served, st->wsum); | ||
1932 | bfq_forget_idle(st); | ||
1933 | } | ||
1934 | bfqg_stats_set_start_empty_time(bfqq_group(bfqq)); | ||
1935 | bfq_log_bfqq(bfqq->bfqd, bfqq, "bfqq_served %d secs", served); | ||
1936 | } | ||
1937 | |||
1938 | /** | ||
1939 | * bfq_bfqq_charge_time - charge an amount of service equivalent to the length | ||
1940 | * of the time interval during which bfqq has been in | ||
1941 | * service. | ||
1942 | * @bfqd: the device | ||
1943 | * @bfqq: the queue that needs a service update. | ||
1944 | * @time_ms: the amount of time during which the queue has received service | ||
1945 | * | ||
1946 | * If a queue does not consume its budget fast enough, then providing | ||
1947 | * the queue with service fairness may impair throughput, more or less | ||
1948 | * severely. For this reason, queues that consume their budget slowly | ||
1949 | * are provided with time fairness instead of service fairness. This | ||
1950 | * goal is achieved through the BFQ scheduling engine, even if such an | ||
1951 | * engine works in the service, and not in the time domain. The trick | ||
1952 | * is charging these queues with an inflated amount of service, equal | ||
1953 | * to the amount of service that they would have received during their | ||
1954 | * service slot if they had been fast, i.e., if their requests had | ||
1955 | * been dispatched at a rate equal to the estimated peak rate. | ||
1956 | * | ||
1957 | * It is worth noting that time fairness can cause important | ||
1958 | * distortions in terms of bandwidth distribution, on devices with | ||
1959 | * internal queueing. The reason is that I/O requests dispatched | ||
1960 | * during the service slot of a queue may be served after that service | ||
1961 | * slot is finished, and may have a total processing time loosely | ||
1962 | * correlated with the duration of the service slot. This is | ||
1963 | * especially true for short service slots. | ||
1964 | */ | ||
1965 | static void bfq_bfqq_charge_time(struct bfq_data *bfqd, struct bfq_queue *bfqq, | ||
1966 | unsigned long time_ms) | ||
1967 | { | ||
1968 | struct bfq_entity *entity = &bfqq->entity; | ||
1969 | int tot_serv_to_charge = entity->service; | ||
1970 | unsigned int timeout_ms = jiffies_to_msecs(bfq_timeout); | ||
1971 | |||
1972 | if (time_ms > 0 && time_ms < timeout_ms) | ||
1973 | tot_serv_to_charge = | ||
1974 | (bfqd->bfq_max_budget * time_ms) / timeout_ms; | ||
1975 | |||
1976 | if (tot_serv_to_charge < entity->service) | ||
1977 | tot_serv_to_charge = entity->service; | ||
1978 | |||
1979 | /* Increase budget to avoid inconsistencies */ | ||
1980 | if (tot_serv_to_charge > entity->budget) | ||
1981 | entity->budget = tot_serv_to_charge; | ||
1982 | |||
1983 | bfq_bfqq_served(bfqq, | ||
1984 | max_t(int, 0, tot_serv_to_charge - entity->service)); | ||
1985 | } | ||
1986 | |||
1987 | static void bfq_update_fin_time_enqueue(struct bfq_entity *entity, | ||
1988 | struct bfq_service_tree *st, | ||
1989 | bool backshifted) | ||
1990 | { | ||
1991 | struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); | ||
1992 | |||
1993 | st = __bfq_entity_update_weight_prio(st, entity); | ||
1994 | bfq_calc_finish(entity, entity->budget); | ||
1995 | |||
1996 | /* | ||
1997 | * If some queues enjoy backshifting for a while, then their | ||
1998 | * (virtual) finish timestamps may happen to become lower and | ||
1999 | * lower than the system virtual time. In particular, if | ||
2000 | * these queues often happen to be idle for short time | ||
2001 | * periods, and during such time periods other queues with | ||
2002 | * higher timestamps happen to be busy, then the backshifted | ||
2003 | * timestamps of the former queues can become much lower than | ||
2004 | * the system virtual time. In fact, to serve the queues with | ||
2005 | * higher timestamps while the ones with lower timestamps are | ||
2006 | * idle, the system virtual time may be pushed-up to much | ||
2007 | * higher values than the finish timestamps of the idle | ||
2008 | * queues. As a consequence, the finish timestamps of all new | ||
2009 | * or newly activated queues may end up being much larger than | ||
2010 | * those of lucky queues with backshifted timestamps. The | ||
2011 | * latter queues may then monopolize the device for a lot of | ||
2012 | * time. This would simply break service guarantees. | ||
2013 | * | ||
2014 | * To reduce this problem, push up a little bit the | ||
2015 | * backshifted timestamps of the queue associated with this | ||
2016 | * entity (only a queue can happen to have the backshifted | ||
2017 | * flag set): just enough to let the finish timestamp of the | ||
2018 | * queue be equal to the current value of the system virtual | ||
2019 | * time. This may introduce a little unfairness among queues | ||
2020 | * with backshifted timestamps, but it does not break | ||
2021 | * worst-case fairness guarantees. | ||
2022 | * | ||
2023 | * As a special case, if bfqq is weight-raised, push up | ||
2024 | * timestamps much less, to keep very low the probability that | ||
2025 | * this push up causes the backshifted finish timestamps of | ||
2026 | * weight-raised queues to become higher than the backshifted | ||
2027 | * finish timestamps of non weight-raised queues. | ||
2028 | */ | ||
2029 | if (backshifted && bfq_gt(st->vtime, entity->finish)) { | ||
2030 | unsigned long delta = st->vtime - entity->finish; | ||
2031 | |||
2032 | if (bfqq) | ||
2033 | delta /= bfqq->wr_coeff; | ||
2034 | |||
2035 | entity->start += delta; | ||
2036 | entity->finish += delta; | ||
2037 | } | ||
2038 | |||
2039 | bfq_active_insert(st, entity); | ||
2040 | } | ||
2041 | |||
2042 | /** | ||
2043 | * __bfq_activate_entity - handle activation of entity. | ||
2044 | * @entity: the entity being activated. | ||
2045 | * @non_blocking_wait_rq: true if entity was waiting for a request | ||
2046 | * | ||
2047 | * Called for a 'true' activation, i.e., if entity is not active and | ||
2048 | * one of its children receives a new request. | ||
2049 | * | ||
2050 | * Basically, this function updates the timestamps of entity and | ||
2051 | * inserts entity into its active tree, ater possible extracting it | ||
2052 | * from its idle tree. | ||
2053 | */ | ||
2054 | static void __bfq_activate_entity(struct bfq_entity *entity, | ||
2055 | bool non_blocking_wait_rq) | ||
2056 | { | ||
2057 | struct bfq_service_tree *st = bfq_entity_service_tree(entity); | ||
2058 | bool backshifted = false; | ||
2059 | unsigned long long min_vstart; | ||
2060 | |||
2061 | /* See comments on bfq_fqq_update_budg_for_activation */ | ||
2062 | if (non_blocking_wait_rq && bfq_gt(st->vtime, entity->finish)) { | ||
2063 | backshifted = true; | ||
2064 | min_vstart = entity->finish; | ||
2065 | } else | ||
2066 | min_vstart = st->vtime; | ||
2067 | |||
2068 | if (entity->tree == &st->idle) { | ||
2069 | /* | ||
2070 | * Must be on the idle tree, bfq_idle_extract() will | ||
2071 | * check for that. | ||
2072 | */ | ||
2073 | bfq_idle_extract(st, entity); | ||
2074 | entity->start = bfq_gt(min_vstart, entity->finish) ? | ||
2075 | min_vstart : entity->finish; | ||
2076 | } else { | ||
2077 | /* | ||
2078 | * The finish time of the entity may be invalid, and | ||
2079 | * it is in the past for sure, otherwise the queue | ||
2080 | * would have been on the idle tree. | ||
2081 | */ | ||
2082 | entity->start = min_vstart; | ||
2083 | st->wsum += entity->weight; | ||
2084 | /* | ||
2085 | * entity is about to be inserted into a service tree, | ||
2086 | * and then set in service: get a reference to make | ||
2087 | * sure entity does not disappear until it is no | ||
2088 | * longer in service or scheduled for service. | ||
2089 | */ | ||
2090 | bfq_get_entity(entity); | ||
2091 | |||
2092 | entity->on_st = true; | ||
2093 | } | ||
2094 | |||
2095 | bfq_update_fin_time_enqueue(entity, st, backshifted); | ||
2096 | } | ||
2097 | |||
2098 | /** | ||
2099 | * __bfq_requeue_entity - handle requeueing or repositioning of an entity. | ||
2100 | * @entity: the entity being requeued or repositioned. | ||
2101 | * | ||
2102 | * Requeueing is needed if this entity stops being served, which | ||
2103 | * happens if a leaf descendant entity has expired. On the other hand, | ||
2104 | * repositioning is needed if the next_inservice_entity for the child | ||
2105 | * entity has changed. See the comments inside the function for | ||
2106 | * details. | ||
2107 | * | ||
2108 | * Basically, this function: 1) removes entity from its active tree if | ||
2109 | * present there, 2) updates the timestamps of entity and 3) inserts | ||
2110 | * entity back into its active tree (in the new, right position for | ||
2111 | * the new values of the timestamps). | ||
2112 | */ | ||
2113 | static void __bfq_requeue_entity(struct bfq_entity *entity) | ||
2114 | { | ||
2115 | struct bfq_sched_data *sd = entity->sched_data; | ||
2116 | struct bfq_service_tree *st = bfq_entity_service_tree(entity); | ||
2117 | |||
2118 | if (entity == sd->in_service_entity) { | ||
2119 | /* | ||
2120 | * We are requeueing the current in-service entity, | ||
2121 | * which may have to be done for one of the following | ||
2122 | * reasons: | ||
2123 | * - entity represents the in-service queue, and the | ||
2124 | * in-service queue is being requeued after an | ||
2125 | * expiration; | ||
2126 | * - entity represents a group, and its budget has | ||
2127 | * changed because one of its child entities has | ||
2128 | * just been either activated or requeued for some | ||
2129 | * reason; the timestamps of the entity need then to | ||
2130 | * be updated, and the entity needs to be enqueued | ||
2131 | * or repositioned accordingly. | ||
2132 | * | ||
2133 | * In particular, before requeueing, the start time of | ||
2134 | * the entity must be moved forward to account for the | ||
2135 | * service that the entity has received while in | ||
2136 | * service. This is done by the next instructions. The | ||
2137 | * finish time will then be updated according to this | ||
2138 | * new value of the start time, and to the budget of | ||
2139 | * the entity. | ||
2140 | */ | ||
2141 | bfq_calc_finish(entity, entity->service); | ||
2142 | entity->start = entity->finish; | ||
2143 | /* | ||
2144 | * In addition, if the entity had more than one child | ||
2145 | * when set in service, then was not extracted from | ||
2146 | * the active tree. This implies that the position of | ||
2147 | * the entity in the active tree may need to be | ||
2148 | * changed now, because we have just updated the start | ||
2149 | * time of the entity, and we will update its finish | ||
2150 | * time in a moment (the requeueing is then, more | ||
2151 | * precisely, a repositioning in this case). To | ||
2152 | * implement this repositioning, we: 1) dequeue the | ||
2153 | * entity here, 2) update the finish time and | ||
2154 | * requeue the entity according to the new | ||
2155 | * timestamps below. | ||
2156 | */ | ||
2157 | if (entity->tree) | ||
2158 | bfq_active_extract(st, entity); | ||
2159 | } else { /* The entity is already active, and not in service */ | ||
2160 | /* | ||
2161 | * In this case, this function gets called only if the | ||
2162 | * next_in_service entity below this entity has | ||
2163 | * changed, and this change has caused the budget of | ||
2164 | * this entity to change, which, finally implies that | ||
2165 | * the finish time of this entity must be | ||
2166 | * updated. Such an update may cause the scheduling, | ||
2167 | * i.e., the position in the active tree, of this | ||
2168 | * entity to change. We handle this change by: 1) | ||
2169 | * dequeueing the entity here, 2) updating the finish | ||
2170 | * time and requeueing the entity according to the new | ||
2171 | * timestamps below. This is the same approach as the | ||
2172 | * non-extracted-entity sub-case above. | ||
2173 | */ | ||
2174 | bfq_active_extract(st, entity); | ||
2175 | } | ||
2176 | |||
2177 | bfq_update_fin_time_enqueue(entity, st, false); | ||
2178 | } | ||
2179 | |||
2180 | static void __bfq_activate_requeue_entity(struct bfq_entity *entity, | ||
2181 | struct bfq_sched_data *sd, | ||
2182 | bool non_blocking_wait_rq) | ||
2183 | { | ||
2184 | struct bfq_service_tree *st = bfq_entity_service_tree(entity); | ||
2185 | |||
2186 | if (sd->in_service_entity == entity || entity->tree == &st->active) | ||
2187 | /* | ||
2188 | * in service or already queued on the active tree, | ||
2189 | * requeue or reposition | ||
2190 | */ | ||
2191 | __bfq_requeue_entity(entity); | ||
2192 | else | ||
2193 | /* | ||
2194 | * Not in service and not queued on its active tree: | ||
2195 | * the activity is idle and this is a true activation. | ||
2196 | */ | ||
2197 | __bfq_activate_entity(entity, non_blocking_wait_rq); | ||
2198 | } | ||
2199 | |||
2200 | |||
2201 | /** | ||
2202 | * bfq_activate_entity - activate or requeue an entity representing a bfq_queue, | ||
2203 | * and activate, requeue or reposition all ancestors | ||
2204 | * for which such an update becomes necessary. | ||
2205 | * @entity: the entity to activate. | ||
2206 | * @non_blocking_wait_rq: true if this entity was waiting for a request | ||
2207 | * @requeue: true if this is a requeue, which implies that bfqq is | ||
2208 | * being expired; thus ALL its ancestors stop being served and must | ||
2209 | * therefore be requeued | ||
2210 | */ | ||
2211 | static void bfq_activate_requeue_entity(struct bfq_entity *entity, | ||
2212 | bool non_blocking_wait_rq, | ||
2213 | bool requeue) | ||
2214 | { | ||
2215 | struct bfq_sched_data *sd; | ||
2216 | |||
2217 | for_each_entity(entity) { | ||
2218 | sd = entity->sched_data; | ||
2219 | __bfq_activate_requeue_entity(entity, sd, non_blocking_wait_rq); | ||
2220 | |||
2221 | if (!bfq_update_next_in_service(sd, entity) && !requeue) | ||
2222 | break; | ||
2223 | } | ||
2224 | } | ||
2225 | |||
2226 | /** | ||
2227 | * __bfq_deactivate_entity - deactivate an entity from its service tree. | ||
2228 | * @entity: the entity to deactivate. | ||
2229 | * @ins_into_idle_tree: if false, the entity will not be put into the | ||
2230 | * idle tree. | ||
2231 | * | ||
2232 | * Deactivates an entity, independently from its previous state. Must | ||
2233 | * be invoked only if entity is on a service tree. Extracts the entity | ||
2234 | * from that tree, and if necessary and allowed, puts it on the idle | ||
2235 | * tree. | ||
2236 | */ | ||
2237 | static bool __bfq_deactivate_entity(struct bfq_entity *entity, | ||
2238 | bool ins_into_idle_tree) | ||
2239 | { | ||
2240 | struct bfq_sched_data *sd = entity->sched_data; | ||
2241 | struct bfq_service_tree *st = bfq_entity_service_tree(entity); | ||
2242 | int is_in_service = entity == sd->in_service_entity; | ||
2243 | |||
2244 | if (!entity->on_st) /* entity never activated, or already inactive */ | ||
2245 | return false; | ||
2246 | |||
2247 | if (is_in_service) | ||
2248 | bfq_calc_finish(entity, entity->service); | ||
2249 | |||
2250 | if (entity->tree == &st->active) | ||
2251 | bfq_active_extract(st, entity); | ||
2252 | else if (!is_in_service && entity->tree == &st->idle) | ||
2253 | bfq_idle_extract(st, entity); | ||
2254 | |||
2255 | if (!ins_into_idle_tree || !bfq_gt(entity->finish, st->vtime)) | ||
2256 | bfq_forget_entity(st, entity, is_in_service); | ||
2257 | else | ||
2258 | bfq_idle_insert(st, entity); | ||
2259 | |||
2260 | return true; | ||
2261 | } | ||
2262 | |||
2263 | /** | ||
2264 | * bfq_deactivate_entity - deactivate an entity representing a bfq_queue. | ||
2265 | * @entity: the entity to deactivate. | ||
2266 | * @ins_into_idle_tree: true if the entity can be put on the idle tree | ||
2267 | */ | ||
2268 | static void bfq_deactivate_entity(struct bfq_entity *entity, | ||
2269 | bool ins_into_idle_tree, | ||
2270 | bool expiration) | ||
2271 | { | ||
2272 | struct bfq_sched_data *sd; | ||
2273 | struct bfq_entity *parent = NULL; | ||
2274 | |||
2275 | for_each_entity_safe(entity, parent) { | ||
2276 | sd = entity->sched_data; | ||
2277 | |||
2278 | if (!__bfq_deactivate_entity(entity, ins_into_idle_tree)) { | ||
2279 | /* | ||
2280 | * entity is not in any tree any more, so | ||
2281 | * this deactivation is a no-op, and there is | ||
2282 | * nothing to change for upper-level entities | ||
2283 | * (in case of expiration, this can never | ||
2284 | * happen). | ||
2285 | */ | ||
2286 | return; | ||
2287 | } | ||
2288 | |||
2289 | if (sd->next_in_service == entity) | ||
2290 | /* | ||
2291 | * entity was the next_in_service entity, | ||
2292 | * then, since entity has just been | ||
2293 | * deactivated, a new one must be found. | ||
2294 | */ | ||
2295 | bfq_update_next_in_service(sd, NULL); | ||
2296 | |||
2297 | if (sd->next_in_service) | ||
2298 | /* | ||
2299 | * The parent entity is still backlogged, | ||
2300 | * because next_in_service is not NULL. So, no | ||
2301 | * further upwards deactivation must be | ||
2302 | * performed. Yet, next_in_service has | ||
2303 | * changed. Then the schedule does need to be | ||
2304 | * updated upwards. | ||
2305 | */ | ||
2306 | break; | ||
2307 | |||
2308 | /* | ||
2309 | * If we get here, then the parent is no more | ||
2310 | * backlogged and we need to propagate the | ||
2311 | * deactivation upwards. Thus let the loop go on. | ||
2312 | */ | ||
2313 | |||
2314 | /* | ||
2315 | * Also let parent be queued into the idle tree on | ||
2316 | * deactivation, to preserve service guarantees, and | ||
2317 | * assuming that who invoked this function does not | ||
2318 | * need parent entities too to be removed completely. | ||
2319 | */ | ||
2320 | ins_into_idle_tree = true; | ||
2321 | } | ||
2322 | |||
2323 | /* | ||
2324 | * If the deactivation loop is fully executed, then there are | ||
2325 | * no more entities to touch and next loop is not executed at | ||
2326 | * all. Otherwise, requeue remaining entities if they are | ||
2327 | * about to stop receiving service, or reposition them if this | ||
2328 | * is not the case. | ||
2329 | */ | ||
2330 | entity = parent; | ||
2331 | for_each_entity(entity) { | ||
2332 | /* | ||
2333 | * Invoke __bfq_requeue_entity on entity, even if | ||
2334 | * already active, to requeue/reposition it in the | ||
2335 | * active tree (because sd->next_in_service has | ||
2336 | * changed) | ||
2337 | */ | ||
2338 | __bfq_requeue_entity(entity); | ||
2339 | |||
2340 | sd = entity->sched_data; | ||
2341 | if (!bfq_update_next_in_service(sd, entity) && | ||
2342 | !expiration) | ||
2343 | /* | ||
2344 | * next_in_service unchanged or not causing | ||
2345 | * any change in entity->parent->sd, and no | ||
2346 | * requeueing needed for expiration: stop | ||
2347 | * here. | ||
2348 | */ | ||
2349 | break; | ||
2350 | } | ||
2351 | } | ||
2352 | |||
2353 | /** | ||
2354 | * bfq_calc_vtime_jump - compute the value to which the vtime should jump, | ||
2355 | * if needed, to have at least one entity eligible. | ||
2356 | * @st: the service tree to act upon. | ||
2357 | * | ||
2358 | * Assumes that st is not empty. | ||
2359 | */ | ||
2360 | static u64 bfq_calc_vtime_jump(struct bfq_service_tree *st) | ||
2361 | { | ||
2362 | struct bfq_entity *root_entity = bfq_root_active_entity(&st->active); | ||
2363 | |||
2364 | if (bfq_gt(root_entity->min_start, st->vtime)) | ||
2365 | return root_entity->min_start; | ||
2366 | |||
2367 | return st->vtime; | ||
2368 | } | ||
2369 | |||
2370 | static void bfq_update_vtime(struct bfq_service_tree *st, u64 new_value) | ||
2371 | { | ||
2372 | if (new_value > st->vtime) { | ||
2373 | st->vtime = new_value; | ||
2374 | bfq_forget_idle(st); | ||
2375 | } | ||
2376 | } | ||
2377 | |||
2378 | /** | ||
2379 | * bfq_first_active_entity - find the eligible entity with | ||
2380 | * the smallest finish time | ||
2381 | * @st: the service tree to select from. | ||
2382 | * @vtime: the system virtual to use as a reference for eligibility | ||
2383 | * | ||
2384 | * This function searches the first schedulable entity, starting from the | ||
2385 | * root of the tree and going on the left every time on this side there is | ||
2386 | * a subtree with at least one eligible (start >= vtime) entity. The path on | ||
2387 | * the right is followed only if a) the left subtree contains no eligible | ||
2388 | * entities and b) no eligible entity has been found yet. | ||
2389 | */ | ||
2390 | static struct bfq_entity *bfq_first_active_entity(struct bfq_service_tree *st, | ||
2391 | u64 vtime) | ||
2392 | { | ||
2393 | struct bfq_entity *entry, *first = NULL; | ||
2394 | struct rb_node *node = st->active.rb_node; | ||
2395 | |||
2396 | while (node) { | ||
2397 | entry = rb_entry(node, struct bfq_entity, rb_node); | ||
2398 | left: | ||
2399 | if (!bfq_gt(entry->start, vtime)) | ||
2400 | first = entry; | ||
2401 | |||
2402 | if (node->rb_left) { | ||
2403 | entry = rb_entry(node->rb_left, | ||
2404 | struct bfq_entity, rb_node); | ||
2405 | if (!bfq_gt(entry->min_start, vtime)) { | ||
2406 | node = node->rb_left; | ||
2407 | goto left; | ||
2408 | } | ||
2409 | } | ||
2410 | if (first) | ||
2411 | break; | ||
2412 | node = node->rb_right; | ||
2413 | } | ||
2414 | |||
2415 | return first; | ||
2416 | } | ||
2417 | |||
2418 | /** | ||
2419 | * __bfq_lookup_next_entity - return the first eligible entity in @st. | ||
2420 | * @st: the service tree. | ||
2421 | * | ||
2422 | * If there is no in-service entity for the sched_data st belongs to, | ||
2423 | * then return the entity that will be set in service if: | ||
2424 | * 1) the parent entity this st belongs to is set in service; | ||
2425 | * 2) no entity belonging to such parent entity undergoes a state change | ||
2426 | * that would influence the timestamps of the entity (e.g., becomes idle, | ||
2427 | * becomes backlogged, changes its budget, ...). | ||
2428 | * | ||
2429 | * In this first case, update the virtual time in @st too (see the | ||
2430 | * comments on this update inside the function). | ||
2431 | * | ||
2432 | * In constrast, if there is an in-service entity, then return the | ||
2433 | * entity that would be set in service if not only the above | ||
2434 | * conditions, but also the next one held true: the currently | ||
2435 | * in-service entity, on expiration, | ||
2436 | * 1) gets a finish time equal to the current one, or | ||
2437 | * 2) is not eligible any more, or | ||
2438 | * 3) is idle. | ||
2439 | */ | ||
2440 | static struct bfq_entity * | ||
2441 | __bfq_lookup_next_entity(struct bfq_service_tree *st, bool in_service) | ||
2442 | { | ||
2443 | struct bfq_entity *entity; | ||
2444 | u64 new_vtime; | ||
2445 | |||
2446 | if (RB_EMPTY_ROOT(&st->active)) | ||
2447 | return NULL; | ||
2448 | |||
2449 | /* | ||
2450 | * Get the value of the system virtual time for which at | ||
2451 | * least one entity is eligible. | ||
2452 | */ | ||
2453 | new_vtime = bfq_calc_vtime_jump(st); | ||
2454 | |||
2455 | /* | ||
2456 | * If there is no in-service entity for the sched_data this | ||
2457 | * active tree belongs to, then push the system virtual time | ||
2458 | * up to the value that guarantees that at least one entity is | ||
2459 | * eligible. If, instead, there is an in-service entity, then | ||
2460 | * do not make any such update, because there is already an | ||
2461 | * eligible entity, namely the in-service one (even if the | ||
2462 | * entity is not on st, because it was extracted when set in | ||
2463 | * service). | ||
2464 | */ | ||
2465 | if (!in_service) | ||
2466 | bfq_update_vtime(st, new_vtime); | ||
2467 | |||
2468 | entity = bfq_first_active_entity(st, new_vtime); | ||
2469 | |||
2470 | return entity; | ||
2471 | } | ||
2472 | |||
2473 | /** | ||
2474 | * bfq_lookup_next_entity - return the first eligible entity in @sd. | ||
2475 | * @sd: the sched_data. | ||
2476 | * | ||
2477 | * This function is invoked when there has been a change in the trees | ||
2478 | * for sd, and we need know what is the new next entity after this | ||
2479 | * change. | ||
2480 | */ | ||
2481 | static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd) | ||
2482 | { | ||
2483 | struct bfq_service_tree *st = sd->service_tree; | ||
2484 | struct bfq_service_tree *idle_class_st = st + (BFQ_IOPRIO_CLASSES - 1); | ||
2485 | struct bfq_entity *entity = NULL; | ||
2486 | int class_idx = 0; | ||
2487 | |||
2488 | /* | ||
2489 | * Choose from idle class, if needed to guarantee a minimum | ||
2490 | * bandwidth to this class (and if there is some active entity | ||
2491 | * in idle class). This should also mitigate | ||
2492 | * priority-inversion problems in case a low priority task is | ||
2493 | * holding file system resources. | ||
2494 | */ | ||
2495 | if (time_is_before_jiffies(sd->bfq_class_idle_last_service + | ||
2496 | BFQ_CL_IDLE_TIMEOUT)) { | ||
2497 | if (!RB_EMPTY_ROOT(&idle_class_st->active)) | ||
2498 | class_idx = BFQ_IOPRIO_CLASSES - 1; | ||
2499 | /* About to be served if backlogged, or not yet backlogged */ | ||
2500 | sd->bfq_class_idle_last_service = jiffies; | ||
2501 | } | ||
2502 | |||
2503 | /* | ||
2504 | * Find the next entity to serve for the highest-priority | ||
2505 | * class, unless the idle class needs to be served. | ||
2506 | */ | ||
2507 | for (; class_idx < BFQ_IOPRIO_CLASSES; class_idx++) { | ||
2508 | entity = __bfq_lookup_next_entity(st + class_idx, | ||
2509 | sd->in_service_entity); | ||
2510 | |||
2511 | if (entity) | ||
2512 | break; | ||
2513 | } | ||
2514 | |||
2515 | if (!entity) | ||
2516 | return NULL; | ||
2517 | |||
2518 | return entity; | ||
2519 | } | ||
2520 | |||
2521 | static bool next_queue_may_preempt(struct bfq_data *bfqd) | ||
2522 | { | ||
2523 | struct bfq_sched_data *sd = &bfqd->root_group->sched_data; | ||
2524 | |||
2525 | return sd->next_in_service != sd->in_service_entity; | ||
2526 | } | ||
2527 | |||
2528 | /* | ||
2529 | * Get next queue for service. | ||
2530 | */ | ||
2531 | static struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd) | ||
2532 | { | ||
2533 | struct bfq_entity *entity = NULL; | ||
2534 | struct bfq_sched_data *sd; | ||
2535 | struct bfq_queue *bfqq; | ||
2536 | |||
2537 | if (bfqd->busy_queues == 0) | ||
2538 | return NULL; | ||
2539 | |||
2540 | /* | ||
2541 | * Traverse the path from the root to the leaf entity to | ||
2542 | * serve. Set in service all the entities visited along the | ||
2543 | * way. | ||
2544 | */ | ||
2545 | sd = &bfqd->root_group->sched_data; | ||
2546 | for (; sd ; sd = entity->my_sched_data) { | ||
2547 | /* | ||
2548 | * WARNING. We are about to set the in-service entity | ||
2549 | * to sd->next_in_service, i.e., to the (cached) value | ||
2550 | * returned by bfq_lookup_next_entity(sd) the last | ||
2551 | * time it was invoked, i.e., the last time when the | ||
2552 | * service order in sd changed as a consequence of the | ||
2553 | * activation or deactivation of an entity. In this | ||
2554 | * respect, if we execute bfq_lookup_next_entity(sd) | ||
2555 | * in this very moment, it may, although with low | ||
2556 | * probability, yield a different entity than that | ||
2557 | * pointed to by sd->next_in_service. This rare event | ||
2558 | * happens in case there was no CLASS_IDLE entity to | ||
2559 | * serve for sd when bfq_lookup_next_entity(sd) was | ||
2560 | * invoked for the last time, while there is now one | ||
2561 | * such entity. | ||
2562 | * | ||
2563 | * If the above event happens, then the scheduling of | ||
2564 | * such entity in CLASS_IDLE is postponed until the | ||
2565 | * service of the sd->next_in_service entity | ||
2566 | * finishes. In fact, when the latter is expired, | ||
2567 | * bfq_lookup_next_entity(sd) gets called again, | ||
2568 | * exactly to update sd->next_in_service. | ||
2569 | */ | ||
2570 | |||
2571 | /* Make next_in_service entity become in_service_entity */ | ||
2572 | entity = sd->next_in_service; | ||
2573 | sd->in_service_entity = entity; | ||
2574 | |||
2575 | /* | ||
2576 | * Reset the accumulator of the amount of service that | ||
2577 | * the entity is about to receive. | ||
2578 | */ | ||
2579 | entity->service = 0; | ||
2580 | |||
2581 | /* | ||
2582 | * If entity is no longer a candidate for next | ||
2583 | * service, then we extract it from its active tree, | ||
2584 | * for the following reason. To further boost the | ||
2585 | * throughput in some special case, BFQ needs to know | ||
2586 | * which is the next candidate entity to serve, while | ||
2587 | * there is already an entity in service. In this | ||
2588 | * respect, to make it easy to compute/update the next | ||
2589 | * candidate entity to serve after the current | ||
2590 | * candidate has been set in service, there is a case | ||
2591 | * where it is necessary to extract the current | ||
2592 | * candidate from its service tree. Such a case is | ||
2593 | * when the entity just set in service cannot be also | ||
2594 | * a candidate for next service. Details about when | ||
2595 | * this conditions holds are reported in the comments | ||
2596 | * on the function bfq_no_longer_next_in_service() | ||
2597 | * invoked below. | ||
2598 | */ | ||
2599 | if (bfq_no_longer_next_in_service(entity)) | ||
2600 | bfq_active_extract(bfq_entity_service_tree(entity), | ||
2601 | entity); | ||
2602 | |||
2603 | /* | ||
2604 | * For the same reason why we may have just extracted | ||
2605 | * entity from its active tree, we may need to update | ||
2606 | * next_in_service for the sched_data of entity too, | ||
2607 | * regardless of whether entity has been extracted. | ||
2608 | * In fact, even if entity has not been extracted, a | ||
2609 | * descendant entity may get extracted. Such an event | ||
2610 | * would cause a change in next_in_service for the | ||
2611 | * level of the descendant entity, and thus possibly | ||
2612 | * back to upper levels. | ||
2613 | * | ||
2614 | * We cannot perform the resulting needed update | ||
2615 | * before the end of this loop, because, to know which | ||
2616 | * is the correct next-to-serve candidate entity for | ||
2617 | * each level, we need first to find the leaf entity | ||
2618 | * to set in service. In fact, only after we know | ||
2619 | * which is the next-to-serve leaf entity, we can | ||
2620 | * discover whether the parent entity of the leaf | ||
2621 | * entity becomes the next-to-serve, and so on. | ||
2622 | */ | ||
2623 | |||
2624 | } | ||
2625 | |||
2626 | bfqq = bfq_entity_to_bfqq(entity); | ||
2627 | |||
2628 | /* | ||
2629 | * We can finally update all next-to-serve entities along the | ||
2630 | * path from the leaf entity just set in service to the root. | ||
2631 | */ | ||
2632 | for_each_entity(entity) { | ||
2633 | struct bfq_sched_data *sd = entity->sched_data; | ||
2634 | |||
2635 | if (!bfq_update_next_in_service(sd, NULL)) | ||
2636 | break; | ||
2637 | } | ||
2638 | |||
2639 | return bfqq; | ||
2640 | } | ||
2641 | |||
2642 | static void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd) | ||
2643 | { | ||
2644 | struct bfq_queue *in_serv_bfqq = bfqd->in_service_queue; | ||
2645 | struct bfq_entity *in_serv_entity = &in_serv_bfqq->entity; | ||
2646 | struct bfq_entity *entity = in_serv_entity; | ||
2647 | |||
2648 | bfq_clear_bfqq_wait_request(in_serv_bfqq); | ||
2649 | hrtimer_try_to_cancel(&bfqd->idle_slice_timer); | ||
2650 | bfqd->in_service_queue = NULL; | ||
2651 | |||
2652 | /* | ||
2653 | * When this function is called, all in-service entities have | ||
2654 | * been properly deactivated or requeued, so we can safely | ||
2655 | * execute the final step: reset in_service_entity along the | ||
2656 | * path from entity to the root. | ||
2657 | */ | ||
2658 | for_each_entity(entity) | ||
2659 | entity->sched_data->in_service_entity = NULL; | ||
2660 | |||
2661 | /* | ||
2662 | * in_serv_entity is no longer in service, so, if it is in no | ||
2663 | * service tree either, then release the service reference to | ||
2664 | * the queue it represents (taken with bfq_get_entity). | ||
2665 | */ | ||
2666 | if (!in_serv_entity->on_st) | ||
2667 | bfq_put_queue(in_serv_bfqq); | ||
2668 | } | ||
2669 | |||
2670 | static void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, | ||
2671 | bool ins_into_idle_tree, bool expiration) | ||
2672 | { | ||
2673 | struct bfq_entity *entity = &bfqq->entity; | ||
2674 | |||
2675 | bfq_deactivate_entity(entity, ins_into_idle_tree, expiration); | ||
2676 | } | ||
2677 | |||
2678 | static void bfq_activate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq) | ||
2679 | { | ||
2680 | struct bfq_entity *entity = &bfqq->entity; | ||
2681 | |||
2682 | bfq_activate_requeue_entity(entity, bfq_bfqq_non_blocking_wait_rq(bfqq), | ||
2683 | false); | ||
2684 | bfq_clear_bfqq_non_blocking_wait_rq(bfqq); | ||
2685 | } | ||
2686 | |||
2687 | static void bfq_requeue_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq) | ||
2688 | { | ||
2689 | struct bfq_entity *entity = &bfqq->entity; | ||
2690 | |||
2691 | bfq_activate_requeue_entity(entity, false, | ||
2692 | bfqq == bfqd->in_service_queue); | ||
2693 | } | ||
2694 | |||
2695 | static void bfqg_stats_update_dequeue(struct bfq_group *bfqg); | ||
2696 | |||
2697 | /* | ||
2698 | * Called when the bfqq no longer has requests pending, remove it from | ||
2699 | * the service tree. As a special case, it can be invoked during an | ||
2700 | * expiration. | ||
2701 | */ | ||
2702 | static void bfq_del_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq, | ||
2703 | bool expiration) | ||
2704 | { | ||
2705 | bfq_log_bfqq(bfqd, bfqq, "del from busy"); | ||
2706 | |||
2707 | bfq_clear_bfqq_busy(bfqq); | ||
2708 | |||
2709 | bfqd->busy_queues--; | ||
2710 | |||
2711 | if (!bfqq->dispatched) | ||
2712 | bfq_weights_tree_remove(bfqd, &bfqq->entity, | ||
2713 | &bfqd->queue_weights_tree); | ||
2714 | |||
2715 | if (bfqq->wr_coeff > 1) | ||
2716 | bfqd->wr_busy_queues--; | ||
2717 | |||
2718 | bfqg_stats_update_dequeue(bfqq_group(bfqq)); | ||
2719 | |||
2720 | bfq_deactivate_bfqq(bfqd, bfqq, true, expiration); | ||
2721 | } | ||
2722 | |||
2723 | /* | ||
2724 | * Called when an inactive queue receives a new request. | ||
2725 | */ | ||
2726 | static void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq) | ||
2727 | { | ||
2728 | bfq_log_bfqq(bfqd, bfqq, "add to busy"); | ||
2729 | |||
2730 | bfq_activate_bfqq(bfqd, bfqq); | ||
2731 | |||
2732 | bfq_mark_bfqq_busy(bfqq); | ||
2733 | bfqd->busy_queues++; | ||
2734 | |||
2735 | if (!bfqq->dispatched) | ||
2736 | if (bfqq->wr_coeff == 1) | ||
2737 | bfq_weights_tree_add(bfqd, &bfqq->entity, | ||
2738 | &bfqd->queue_weights_tree); | ||
2739 | |||
2740 | if (bfqq->wr_coeff > 1) | ||
2741 | bfqd->wr_busy_queues++; | ||
2742 | } | ||
2743 | |||
2744 | #ifdef CONFIG_BFQ_GROUP_IOSCHED | ||
2745 | |||
2746 | /* bfqg stats flags */ | ||
2747 | enum bfqg_stats_flags { | ||
2748 | BFQG_stats_waiting = 0, | ||
2749 | BFQG_stats_idling, | ||
2750 | BFQG_stats_empty, | ||
2751 | }; | ||
2752 | |||
2753 | #define BFQG_FLAG_FNS(name) \ | ||
2754 | static void bfqg_stats_mark_##name(struct bfqg_stats *stats) \ | ||
2755 | { \ | ||
2756 | stats->flags |= (1 << BFQG_stats_##name); \ | ||
2757 | } \ | ||
2758 | static void bfqg_stats_clear_##name(struct bfqg_stats *stats) \ | ||
2759 | { \ | ||
2760 | stats->flags &= ~(1 << BFQG_stats_##name); \ | ||
2761 | } \ | ||
2762 | static int bfqg_stats_##name(struct bfqg_stats *stats) \ | ||
2763 | { \ | ||
2764 | return (stats->flags & (1 << BFQG_stats_##name)) != 0; \ | ||
2765 | } \ | ||
2766 | |||
2767 | BFQG_FLAG_FNS(waiting) | ||
2768 | BFQG_FLAG_FNS(idling) | ||
2769 | BFQG_FLAG_FNS(empty) | ||
2770 | #undef BFQG_FLAG_FNS | ||
2771 | |||
2772 | /* This should be called with the queue_lock held. */ | ||
2773 | static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats) | ||
2774 | { | ||
2775 | unsigned long long now; | ||
2776 | |||
2777 | if (!bfqg_stats_waiting(stats)) | ||
2778 | return; | ||
2779 | |||
2780 | now = sched_clock(); | ||
2781 | if (time_after64(now, stats->start_group_wait_time)) | ||
2782 | blkg_stat_add(&stats->group_wait_time, | ||
2783 | now - stats->start_group_wait_time); | ||
2784 | bfqg_stats_clear_waiting(stats); | ||
2785 | } | ||
2786 | |||
2787 | /* This should be called with the queue_lock held. */ | ||
2788 | static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg, | ||
2789 | struct bfq_group *curr_bfqg) | ||
2790 | { | ||
2791 | struct bfqg_stats *stats = &bfqg->stats; | ||
2792 | |||
2793 | if (bfqg_stats_waiting(stats)) | ||
2794 | return; | ||
2795 | if (bfqg == curr_bfqg) | ||
2796 | return; | ||
2797 | stats->start_group_wait_time = sched_clock(); | ||
2798 | bfqg_stats_mark_waiting(stats); | ||
2799 | } | ||
2800 | |||
2801 | /* This should be called with the queue_lock held. */ | ||
2802 | static void bfqg_stats_end_empty_time(struct bfqg_stats *stats) | ||
2803 | { | ||
2804 | unsigned long long now; | ||
2805 | |||
2806 | if (!bfqg_stats_empty(stats)) | ||
2807 | return; | ||
2808 | |||
2809 | now = sched_clock(); | ||
2810 | if (time_after64(now, stats->start_empty_time)) | ||
2811 | blkg_stat_add(&stats->empty_time, | ||
2812 | now - stats->start_empty_time); | ||
2813 | bfqg_stats_clear_empty(stats); | ||
2814 | } | ||
2815 | |||
2816 | static void bfqg_stats_update_dequeue(struct bfq_group *bfqg) | ||
2817 | { | ||
2818 | blkg_stat_add(&bfqg->stats.dequeue, 1); | ||
2819 | } | ||
2820 | |||
2821 | static void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg) | ||
2822 | { | ||
2823 | struct bfqg_stats *stats = &bfqg->stats; | ||
2824 | |||
2825 | if (blkg_rwstat_total(&stats->queued)) | ||
2826 | return; | ||
2827 | |||
2828 | /* | ||
2829 | * group is already marked empty. This can happen if bfqq got new | ||
2830 | * request in parent group and moved to this group while being added | ||
2831 | * to service tree. Just ignore the event and move on. | ||
2832 | */ | ||
2833 | if (bfqg_stats_empty(stats)) | ||
2834 | return; | ||
2835 | |||
2836 | stats->start_empty_time = sched_clock(); | ||
2837 | bfqg_stats_mark_empty(stats); | ||
2838 | } | ||
2839 | |||
2840 | static void bfqg_stats_update_idle_time(struct bfq_group *bfqg) | ||
2841 | { | ||
2842 | struct bfqg_stats *stats = &bfqg->stats; | ||
2843 | |||
2844 | if (bfqg_stats_idling(stats)) { | ||
2845 | unsigned long long now = sched_clock(); | ||
2846 | |||
2847 | if (time_after64(now, stats->start_idle_time)) | ||
2848 | blkg_stat_add(&stats->idle_time, | ||
2849 | now - stats->start_idle_time); | ||
2850 | bfqg_stats_clear_idling(stats); | ||
2851 | } | ||
2852 | } | ||
2853 | |||
2854 | static void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg) | ||
2855 | { | ||
2856 | struct bfqg_stats *stats = &bfqg->stats; | ||
2857 | |||
2858 | stats->start_idle_time = sched_clock(); | ||
2859 | bfqg_stats_mark_idling(stats); | ||
2860 | } | ||
2861 | |||
2862 | static void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg) | ||
2863 | { | ||
2864 | struct bfqg_stats *stats = &bfqg->stats; | ||
2865 | |||
2866 | blkg_stat_add(&stats->avg_queue_size_sum, | ||
2867 | blkg_rwstat_total(&stats->queued)); | ||
2868 | blkg_stat_add(&stats->avg_queue_size_samples, 1); | ||
2869 | bfqg_stats_update_group_wait_time(stats); | ||
2870 | } | ||
2871 | |||
2872 | /* | ||
2873 | * blk-cgroup policy-related handlers | ||
2874 | * The following functions help in converting between blk-cgroup | ||
2875 | * internal structures and BFQ-specific structures. | ||
2876 | */ | ||
2877 | |||
2878 | static struct bfq_group *pd_to_bfqg(struct blkg_policy_data *pd) | ||
2879 | { | ||
2880 | return pd ? container_of(pd, struct bfq_group, pd) : NULL; | ||
2881 | } | ||
2882 | |||
2883 | static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg) | ||
2884 | { | ||
2885 | return pd_to_blkg(&bfqg->pd); | ||
2886 | } | ||
2887 | |||
2888 | static struct blkcg_policy blkcg_policy_bfq; | ||
2889 | |||
2890 | static struct bfq_group *blkg_to_bfqg(struct blkcg_gq *blkg) | ||
2891 | { | ||
2892 | return pd_to_bfqg(blkg_to_pd(blkg, &blkcg_policy_bfq)); | ||
2893 | } | ||
2894 | |||
2895 | /* | ||
2896 | * bfq_group handlers | ||
2897 | * The following functions help in navigating the bfq_group hierarchy | ||
2898 | * by allowing to find the parent of a bfq_group or the bfq_group | ||
2899 | * associated to a bfq_queue. | ||
2900 | */ | ||
2901 | |||
2902 | static struct bfq_group *bfqg_parent(struct bfq_group *bfqg) | ||
2903 | { | ||
2904 | struct blkcg_gq *pblkg = bfqg_to_blkg(bfqg)->parent; | ||
2905 | |||
2906 | return pblkg ? blkg_to_bfqg(pblkg) : NULL; | ||
2907 | } | ||
2908 | |||
2909 | static struct bfq_group *bfqq_group(struct bfq_queue *bfqq) | ||
2910 | { | ||
2911 | struct bfq_entity *group_entity = bfqq->entity.parent; | ||
2912 | |||
2913 | return group_entity ? container_of(group_entity, struct bfq_group, | ||
2914 | entity) : | ||
2915 | bfqq->bfqd->root_group; | ||
2916 | } | ||
2917 | |||
2918 | /* | ||
2919 | * The following two functions handle get and put of a bfq_group by | ||
2920 | * wrapping the related blk-cgroup hooks. | ||
2921 | */ | ||
2922 | |||
2923 | static void bfqg_get(struct bfq_group *bfqg) | ||
2924 | { | ||
2925 | return blkg_get(bfqg_to_blkg(bfqg)); | ||
2926 | } | ||
2927 | |||
2928 | static void bfqg_put(struct bfq_group *bfqg) | ||
2929 | { | ||
2930 | return blkg_put(bfqg_to_blkg(bfqg)); | ||
2931 | } | ||
2932 | |||
2933 | static void bfqg_stats_update_io_add(struct bfq_group *bfqg, | ||
2934 | struct bfq_queue *bfqq, | ||
2935 | unsigned int op) | ||
2936 | { | ||
2937 | blkg_rwstat_add(&bfqg->stats.queued, op, 1); | ||
2938 | bfqg_stats_end_empty_time(&bfqg->stats); | ||
2939 | if (!(bfqq == ((struct bfq_data *)bfqg->bfqd)->in_service_queue)) | ||
2940 | bfqg_stats_set_start_group_wait_time(bfqg, bfqq_group(bfqq)); | ||
2941 | } | ||
2942 | |||
2943 | static void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op) | ||
2944 | { | ||
2945 | blkg_rwstat_add(&bfqg->stats.queued, op, -1); | ||
2946 | } | ||
2947 | |||
2948 | static void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op) | ||
2949 | { | ||
2950 | blkg_rwstat_add(&bfqg->stats.merged, op, 1); | ||
2951 | } | ||
2952 | |||
2953 | static void bfqg_stats_update_completion(struct bfq_group *bfqg, | ||
2954 | uint64_t start_time, uint64_t io_start_time, | ||
2955 | unsigned int op) | ||
2956 | { | ||
2957 | struct bfqg_stats *stats = &bfqg->stats; | ||
2958 | unsigned long long now = sched_clock(); | ||
2959 | |||
2960 | if (time_after64(now, io_start_time)) | ||
2961 | blkg_rwstat_add(&stats->service_time, op, | ||
2962 | now - io_start_time); | ||
2963 | if (time_after64(io_start_time, start_time)) | ||
2964 | blkg_rwstat_add(&stats->wait_time, op, | ||
2965 | io_start_time - start_time); | ||
2966 | } | ||
2967 | |||
2968 | /* @stats = 0 */ | ||
2969 | static void bfqg_stats_reset(struct bfqg_stats *stats) | ||
2970 | { | ||
2971 | /* queued stats shouldn't be cleared */ | ||
2972 | blkg_rwstat_reset(&stats->merged); | ||
2973 | blkg_rwstat_reset(&stats->service_time); | ||
2974 | blkg_rwstat_reset(&stats->wait_time); | ||
2975 | blkg_stat_reset(&stats->time); | ||
2976 | blkg_stat_reset(&stats->avg_queue_size_sum); | ||
2977 | blkg_stat_reset(&stats->avg_queue_size_samples); | ||
2978 | blkg_stat_reset(&stats->dequeue); | ||
2979 | blkg_stat_reset(&stats->group_wait_time); | ||
2980 | blkg_stat_reset(&stats->idle_time); | ||
2981 | blkg_stat_reset(&stats->empty_time); | ||
2982 | } | ||
2983 | |||
2984 | /* @to += @from */ | ||
2985 | static void bfqg_stats_add_aux(struct bfqg_stats *to, struct bfqg_stats *from) | ||
2986 | { | ||
2987 | if (!to || !from) | ||
2988 | return; | ||
2989 | |||
2990 | /* queued stats shouldn't be cleared */ | ||
2991 | blkg_rwstat_add_aux(&to->merged, &from->merged); | ||
2992 | blkg_rwstat_add_aux(&to->service_time, &from->service_time); | ||
2993 | blkg_rwstat_add_aux(&to->wait_time, &from->wait_time); | ||
2994 | blkg_stat_add_aux(&from->time, &from->time); | ||
2995 | blkg_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum); | ||
2996 | blkg_stat_add_aux(&to->avg_queue_size_samples, | ||
2997 | &from->avg_queue_size_samples); | ||
2998 | blkg_stat_add_aux(&to->dequeue, &from->dequeue); | ||
2999 | blkg_stat_add_aux(&to->group_wait_time, &from->group_wait_time); | ||
3000 | blkg_stat_add_aux(&to->idle_time, &from->idle_time); | ||
3001 | blkg_stat_add_aux(&to->empty_time, &from->empty_time); | ||
3002 | } | ||
3003 | |||
3004 | /* | ||
3005 | * Transfer @bfqg's stats to its parent's aux counts so that the ancestors' | ||
3006 | * recursive stats can still account for the amount used by this bfqg after | ||
3007 | * it's gone. | ||
3008 | */ | ||
3009 | static void bfqg_stats_xfer_dead(struct bfq_group *bfqg) | ||
3010 | { | ||
3011 | struct bfq_group *parent; | ||
3012 | |||
3013 | if (!bfqg) /* root_group */ | ||
3014 | return; | ||
3015 | |||
3016 | parent = bfqg_parent(bfqg); | ||
3017 | |||
3018 | lockdep_assert_held(bfqg_to_blkg(bfqg)->q->queue_lock); | ||
3019 | |||
3020 | if (unlikely(!parent)) | ||
3021 | return; | ||
3022 | |||
3023 | bfqg_stats_add_aux(&parent->stats, &bfqg->stats); | ||
3024 | bfqg_stats_reset(&bfqg->stats); | ||
3025 | } | ||
3026 | |||
3027 | static void bfq_init_entity(struct bfq_entity *entity, | ||
3028 | struct bfq_group *bfqg) | ||
3029 | { | ||
3030 | struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); | ||
3031 | |||
3032 | entity->weight = entity->new_weight; | ||
3033 | entity->orig_weight = entity->new_weight; | ||
3034 | if (bfqq) { | ||
3035 | bfqq->ioprio = bfqq->new_ioprio; | ||
3036 | bfqq->ioprio_class = bfqq->new_ioprio_class; | ||
3037 | bfqg_get(bfqg); | ||
3038 | } | ||
3039 | entity->parent = bfqg->my_entity; /* NULL for root group */ | ||
3040 | entity->sched_data = &bfqg->sched_data; | ||
3041 | } | ||
3042 | |||
3043 | static void bfqg_stats_exit(struct bfqg_stats *stats) | ||
3044 | { | ||
3045 | blkg_rwstat_exit(&stats->merged); | ||
3046 | blkg_rwstat_exit(&stats->service_time); | ||
3047 | blkg_rwstat_exit(&stats->wait_time); | ||
3048 | blkg_rwstat_exit(&stats->queued); | ||
3049 | blkg_stat_exit(&stats->time); | ||
3050 | blkg_stat_exit(&stats->avg_queue_size_sum); | ||
3051 | blkg_stat_exit(&stats->avg_queue_size_samples); | ||
3052 | blkg_stat_exit(&stats->dequeue); | ||
3053 | blkg_stat_exit(&stats->group_wait_time); | ||
3054 | blkg_stat_exit(&stats->idle_time); | ||
3055 | blkg_stat_exit(&stats->empty_time); | ||
3056 | } | ||
3057 | |||
3058 | static int bfqg_stats_init(struct bfqg_stats *stats, gfp_t gfp) | ||
3059 | { | ||
3060 | if (blkg_rwstat_init(&stats->merged, gfp) || | ||
3061 | blkg_rwstat_init(&stats->service_time, gfp) || | ||
3062 | blkg_rwstat_init(&stats->wait_time, gfp) || | ||
3063 | blkg_rwstat_init(&stats->queued, gfp) || | ||
3064 | blkg_stat_init(&stats->time, gfp) || | ||
3065 | blkg_stat_init(&stats->avg_queue_size_sum, gfp) || | ||
3066 | blkg_stat_init(&stats->avg_queue_size_samples, gfp) || | ||
3067 | blkg_stat_init(&stats->dequeue, gfp) || | ||
3068 | blkg_stat_init(&stats->group_wait_time, gfp) || | ||
3069 | blkg_stat_init(&stats->idle_time, gfp) || | ||
3070 | blkg_stat_init(&stats->empty_time, gfp)) { | ||
3071 | bfqg_stats_exit(stats); | ||
3072 | return -ENOMEM; | ||
3073 | } | ||
3074 | |||
3075 | return 0; | ||
3076 | } | ||
3077 | |||
3078 | static struct bfq_group_data *cpd_to_bfqgd(struct blkcg_policy_data *cpd) | ||
3079 | { | ||
3080 | return cpd ? container_of(cpd, struct bfq_group_data, pd) : NULL; | ||
3081 | } | ||
3082 | |||
3083 | static struct bfq_group_data *blkcg_to_bfqgd(struct blkcg *blkcg) | ||
3084 | { | ||
3085 | return cpd_to_bfqgd(blkcg_to_cpd(blkcg, &blkcg_policy_bfq)); | ||
3086 | } | ||
3087 | |||
3088 | static struct blkcg_policy_data *bfq_cpd_alloc(gfp_t gfp) | ||
3089 | { | ||
3090 | struct bfq_group_data *bgd; | ||
3091 | |||
3092 | bgd = kzalloc(sizeof(*bgd), gfp); | ||
3093 | if (!bgd) | ||
3094 | return NULL; | ||
3095 | return &bgd->pd; | ||
3096 | } | ||
3097 | |||
3098 | static void bfq_cpd_init(struct blkcg_policy_data *cpd) | ||
3099 | { | ||
3100 | struct bfq_group_data *d = cpd_to_bfqgd(cpd); | ||
3101 | |||
3102 | d->weight = cgroup_subsys_on_dfl(io_cgrp_subsys) ? | ||
3103 | CGROUP_WEIGHT_DFL : BFQ_WEIGHT_LEGACY_DFL; | ||
3104 | } | ||
3105 | |||
3106 | static void bfq_cpd_free(struct blkcg_policy_data *cpd) | ||
3107 | { | ||
3108 | kfree(cpd_to_bfqgd(cpd)); | ||
3109 | } | ||
3110 | |||
3111 | static struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, int node) | ||
3112 | { | ||
3113 | struct bfq_group *bfqg; | ||
3114 | |||
3115 | bfqg = kzalloc_node(sizeof(*bfqg), gfp, node); | ||
3116 | if (!bfqg) | ||
3117 | return NULL; | ||
3118 | |||
3119 | if (bfqg_stats_init(&bfqg->stats, gfp)) { | ||
3120 | kfree(bfqg); | ||
3121 | return NULL; | ||
3122 | } | ||
3123 | |||
3124 | return &bfqg->pd; | ||
3125 | } | ||
3126 | |||
3127 | static void bfq_pd_init(struct blkg_policy_data *pd) | ||
3128 | { | ||
3129 | struct blkcg_gq *blkg = pd_to_blkg(pd); | ||
3130 | struct bfq_group *bfqg = blkg_to_bfqg(blkg); | ||
3131 | struct bfq_data *bfqd = blkg->q->elevator->elevator_data; | ||
3132 | struct bfq_entity *entity = &bfqg->entity; | ||
3133 | struct bfq_group_data *d = blkcg_to_bfqgd(blkg->blkcg); | ||
3134 | |||
3135 | entity->orig_weight = entity->weight = entity->new_weight = d->weight; | ||
3136 | entity->my_sched_data = &bfqg->sched_data; | ||
3137 | bfqg->my_entity = entity; /* | ||
3138 | * the root_group's will be set to NULL | ||
3139 | * in bfq_init_queue() | ||
3140 | */ | ||
3141 | bfqg->bfqd = bfqd; | ||
3142 | bfqg->active_entities = 0; | ||
3143 | bfqg->rq_pos_tree = RB_ROOT; | ||
3144 | } | ||
3145 | |||
3146 | static void bfq_pd_free(struct blkg_policy_data *pd) | ||
3147 | { | ||
3148 | struct bfq_group *bfqg = pd_to_bfqg(pd); | ||
3149 | |||
3150 | bfqg_stats_exit(&bfqg->stats); | ||
3151 | return kfree(bfqg); | ||
3152 | } | ||
3153 | |||
3154 | static void bfq_pd_reset_stats(struct blkg_policy_data *pd) | ||
3155 | { | ||
3156 | struct bfq_group *bfqg = pd_to_bfqg(pd); | ||
3157 | |||
3158 | bfqg_stats_reset(&bfqg->stats); | ||
3159 | } | ||
3160 | |||
3161 | static void bfq_group_set_parent(struct bfq_group *bfqg, | ||
3162 | struct bfq_group *parent) | ||
3163 | { | ||
3164 | struct bfq_entity *entity; | ||
3165 | |||
3166 | entity = &bfqg->entity; | ||
3167 | entity->parent = parent->my_entity; | ||
3168 | entity->sched_data = &parent->sched_data; | ||
3169 | } | ||
3170 | |||
3171 | static struct bfq_group *bfq_lookup_bfqg(struct bfq_data *bfqd, | ||
3172 | struct blkcg *blkcg) | ||
3173 | { | ||
3174 | struct blkcg_gq *blkg; | ||
3175 | |||
3176 | blkg = blkg_lookup(blkcg, bfqd->queue); | ||
3177 | if (likely(blkg)) | ||
3178 | return blkg_to_bfqg(blkg); | ||
3179 | return NULL; | ||
3180 | } | ||
3181 | |||
3182 | static struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd, | ||
3183 | struct blkcg *blkcg) | ||
3184 | { | ||
3185 | struct bfq_group *bfqg, *parent; | ||
3186 | struct bfq_entity *entity; | ||
3187 | |||
3188 | bfqg = bfq_lookup_bfqg(bfqd, blkcg); | ||
3189 | |||
3190 | if (unlikely(!bfqg)) | ||
3191 | return NULL; | ||
3192 | |||
3193 | /* | ||
3194 | * Update chain of bfq_groups as we might be handling a leaf group | ||
3195 | * which, along with some of its relatives, has not been hooked yet | ||
3196 | * to the private hierarchy of BFQ. | ||
3197 | */ | ||
3198 | entity = &bfqg->entity; | ||
3199 | for_each_entity(entity) { | ||
3200 | bfqg = container_of(entity, struct bfq_group, entity); | ||
3201 | if (bfqg != bfqd->root_group) { | ||
3202 | parent = bfqg_parent(bfqg); | ||
3203 | if (!parent) | ||
3204 | parent = bfqd->root_group; | ||
3205 | bfq_group_set_parent(bfqg, parent); | ||
3206 | } | ||
3207 | } | ||
3208 | |||
3209 | return bfqg; | ||
3210 | } | ||
3211 | |||
3212 | static void bfq_pos_tree_add_move(struct bfq_data *bfqd, | ||
3213 | struct bfq_queue *bfqq); | ||
3214 | static void bfq_bfqq_expire(struct bfq_data *bfqd, | ||
3215 | struct bfq_queue *bfqq, | ||
3216 | bool compensate, | ||
3217 | enum bfqq_expiration reason); | ||
3218 | |||
3219 | /** | ||
3220 | * bfq_bfqq_move - migrate @bfqq to @bfqg. | ||
3221 | * @bfqd: queue descriptor. | ||
3222 | * @bfqq: the queue to move. | ||
3223 | * @bfqg: the group to move to. | ||
3224 | * | ||
3225 | * Move @bfqq to @bfqg, deactivating it from its old group and reactivating | ||
3226 | * it on the new one. Avoid putting the entity on the old group idle tree. | ||
3227 | * | ||
3228 | * Must be called under the queue lock; the cgroup owning @bfqg must | ||
3229 | * not disappear (by now this just means that we are called under | ||
3230 | * rcu_read_lock()). | ||
3231 | */ | ||
3232 | static void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, | ||
3233 | struct bfq_group *bfqg) | ||
3234 | { | ||
3235 | struct bfq_entity *entity = &bfqq->entity; | ||
3236 | |||
3237 | /* If bfqq is empty, then bfq_bfqq_expire also invokes | ||
3238 | * bfq_del_bfqq_busy, thereby removing bfqq and its entity | ||
3239 | * from data structures related to current group. Otherwise we | ||
3240 | * need to remove bfqq explicitly with bfq_deactivate_bfqq, as | ||
3241 | * we do below. | ||
3242 | */ | ||
3243 | if (bfqq == bfqd->in_service_queue) | ||
3244 | bfq_bfqq_expire(bfqd, bfqd->in_service_queue, | ||
3245 | false, BFQQE_PREEMPTED); | ||
3246 | |||
3247 | if (bfq_bfqq_busy(bfqq)) | ||
3248 | bfq_deactivate_bfqq(bfqd, bfqq, false, false); | ||
3249 | else if (entity->on_st) | ||
3250 | bfq_put_idle_entity(bfq_entity_service_tree(entity), entity); | ||
3251 | bfqg_put(bfqq_group(bfqq)); | ||
3252 | |||
3253 | /* | ||
3254 | * Here we use a reference to bfqg. We don't need a refcounter | ||
3255 | * as the cgroup reference will not be dropped, so that its | ||
3256 | * destroy() callback will not be invoked. | ||
3257 | */ | ||
3258 | entity->parent = bfqg->my_entity; | ||
3259 | entity->sched_data = &bfqg->sched_data; | ||
3260 | bfqg_get(bfqg); | ||
3261 | |||
3262 | if (bfq_bfqq_busy(bfqq)) { | ||
3263 | bfq_pos_tree_add_move(bfqd, bfqq); | ||
3264 | bfq_activate_bfqq(bfqd, bfqq); | ||
3265 | } | ||
3266 | |||
3267 | if (!bfqd->in_service_queue && !bfqd->rq_in_driver) | ||
3268 | bfq_schedule_dispatch(bfqd); | ||
3269 | } | ||
3270 | |||
3271 | /** | ||
3272 | * __bfq_bic_change_cgroup - move @bic to @cgroup. | ||
3273 | * @bfqd: the queue descriptor. | ||
3274 | * @bic: the bic to move. | ||
3275 | * @blkcg: the blk-cgroup to move to. | ||
3276 | * | ||
3277 | * Move bic to blkcg, assuming that bfqd->queue is locked; the caller | ||
3278 | * has to make sure that the reference to cgroup is valid across the call. | ||
3279 | * | ||
3280 | * NOTE: an alternative approach might have been to store the current | ||
3281 | * cgroup in bfqq and getting a reference to it, reducing the lookup | ||
3282 | * time here, at the price of slightly more complex code. | ||
3283 | */ | ||
3284 | static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd, | ||
3285 | struct bfq_io_cq *bic, | ||
3286 | struct blkcg *blkcg) | ||
3287 | { | ||
3288 | struct bfq_queue *async_bfqq = bic_to_bfqq(bic, 0); | ||
3289 | struct bfq_queue *sync_bfqq = bic_to_bfqq(bic, 1); | ||
3290 | struct bfq_group *bfqg; | ||
3291 | struct bfq_entity *entity; | ||
3292 | |||
3293 | bfqg = bfq_find_set_group(bfqd, blkcg); | ||
3294 | |||
3295 | if (unlikely(!bfqg)) | ||
3296 | bfqg = bfqd->root_group; | ||
3297 | |||
3298 | if (async_bfqq) { | ||
3299 | entity = &async_bfqq->entity; | ||
3300 | |||
3301 | if (entity->sched_data != &bfqg->sched_data) { | ||
3302 | bic_set_bfqq(bic, NULL, 0); | ||
3303 | bfq_log_bfqq(bfqd, async_bfqq, | ||
3304 | "bic_change_group: %p %d", | ||
3305 | async_bfqq, async_bfqq->ref); | ||
3306 | bfq_put_queue(async_bfqq); | ||
3307 | } | ||
3308 | } | ||
3309 | |||
3310 | if (sync_bfqq) { | ||
3311 | entity = &sync_bfqq->entity; | ||
3312 | if (entity->sched_data != &bfqg->sched_data) | ||
3313 | bfq_bfqq_move(bfqd, sync_bfqq, bfqg); | ||
3314 | } | ||
3315 | |||
3316 | return bfqg; | ||
3317 | } | ||
3318 | |||
3319 | static void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) | ||
3320 | { | ||
3321 | struct bfq_data *bfqd = bic_to_bfqd(bic); | ||
3322 | struct bfq_group *bfqg = NULL; | ||
3323 | uint64_t serial_nr; | ||
3324 | |||
3325 | rcu_read_lock(); | ||
3326 | serial_nr = bio_blkcg(bio)->css.serial_nr; | ||
3327 | |||
3328 | /* | ||
3329 | * Check whether blkcg has changed. The condition may trigger | ||
3330 | * spuriously on a newly created cic but there's no harm. | ||
3331 | */ | ||
3332 | if (unlikely(!bfqd) || likely(bic->blkcg_serial_nr == serial_nr)) | ||
3333 | goto out; | ||
3334 | |||
3335 | bfqg = __bfq_bic_change_cgroup(bfqd, bic, bio_blkcg(bio)); | ||
3336 | bic->blkcg_serial_nr = serial_nr; | ||
3337 | out: | ||
3338 | rcu_read_unlock(); | ||
3339 | } | ||
3340 | |||
3341 | /** | ||
3342 | * bfq_flush_idle_tree - deactivate any entity on the idle tree of @st. | ||
3343 | * @st: the service tree being flushed. | ||
3344 | */ | ||
3345 | static void bfq_flush_idle_tree(struct bfq_service_tree *st) | ||
3346 | { | ||
3347 | struct bfq_entity *entity = st->first_idle; | ||
3348 | |||
3349 | for (; entity ; entity = st->first_idle) | ||
3350 | __bfq_deactivate_entity(entity, false); | ||
3351 | } | ||
3352 | |||
3353 | /** | ||
3354 | * bfq_reparent_leaf_entity - move leaf entity to the root_group. | ||
3355 | * @bfqd: the device data structure with the root group. | ||
3356 | * @entity: the entity to move. | ||
3357 | */ | ||
3358 | static void bfq_reparent_leaf_entity(struct bfq_data *bfqd, | ||
3359 | struct bfq_entity *entity) | ||
3360 | { | ||
3361 | struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); | ||
3362 | |||
3363 | bfq_bfqq_move(bfqd, bfqq, bfqd->root_group); | ||
3364 | } | ||
3365 | |||
3366 | /** | ||
3367 | * bfq_reparent_active_entities - move to the root group all active | ||
3368 | * entities. | ||
3369 | * @bfqd: the device data structure with the root group. | ||
3370 | * @bfqg: the group to move from. | ||
3371 | * @st: the service tree with the entities. | ||
3372 | * | ||
3373 | * Needs queue_lock to be taken and reference to be valid over the call. | ||
3374 | */ | ||
3375 | static void bfq_reparent_active_entities(struct bfq_data *bfqd, | ||
3376 | struct bfq_group *bfqg, | ||
3377 | struct bfq_service_tree *st) | ||
3378 | { | ||
3379 | struct rb_root *active = &st->active; | ||
3380 | struct bfq_entity *entity = NULL; | ||
3381 | |||
3382 | if (!RB_EMPTY_ROOT(&st->active)) | ||
3383 | entity = bfq_entity_of(rb_first(active)); | ||
3384 | |||
3385 | for (; entity ; entity = bfq_entity_of(rb_first(active))) | ||
3386 | bfq_reparent_leaf_entity(bfqd, entity); | ||
3387 | |||
3388 | if (bfqg->sched_data.in_service_entity) | ||
3389 | bfq_reparent_leaf_entity(bfqd, | ||
3390 | bfqg->sched_data.in_service_entity); | ||
3391 | } | ||
3392 | |||
3393 | /** | ||
3394 | * bfq_pd_offline - deactivate the entity associated with @pd, | ||
3395 | * and reparent its children entities. | ||
3396 | * @pd: descriptor of the policy going offline. | ||
3397 | * | ||
3398 | * blkio already grabs the queue_lock for us, so no need to use | ||
3399 | * RCU-based magic | ||
3400 | */ | ||
3401 | static void bfq_pd_offline(struct blkg_policy_data *pd) | ||
3402 | { | ||
3403 | struct bfq_service_tree *st; | ||
3404 | struct bfq_group *bfqg = pd_to_bfqg(pd); | ||
3405 | struct bfq_data *bfqd = bfqg->bfqd; | ||
3406 | struct bfq_entity *entity = bfqg->my_entity; | ||
3407 | unsigned long flags; | ||
3408 | int i; | ||
3409 | |||
3410 | if (!entity) /* root group */ | ||
3411 | return; | ||
3412 | |||
3413 | spin_lock_irqsave(&bfqd->lock, flags); | ||
3414 | /* | ||
3415 | * Empty all service_trees belonging to this group before | ||
3416 | * deactivating the group itself. | ||
3417 | */ | ||
3418 | for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) { | ||
3419 | st = bfqg->sched_data.service_tree + i; | ||
3420 | |||
3421 | /* | ||
3422 | * The idle tree may still contain bfq_queues belonging | ||
3423 | * to exited task because they never migrated to a different | ||
3424 | * cgroup from the one being destroyed now. No one else | ||
3425 | * can access them so it's safe to act without any lock. | ||
3426 | */ | ||
3427 | bfq_flush_idle_tree(st); | ||
3428 | |||
3429 | /* | ||
3430 | * It may happen that some queues are still active | ||
3431 | * (busy) upon group destruction (if the corresponding | ||
3432 | * processes have been forced to terminate). We move | ||
3433 | * all the leaf entities corresponding to these queues | ||
3434 | * to the root_group. | ||
3435 | * Also, it may happen that the group has an entity | ||
3436 | * in service, which is disconnected from the active | ||
3437 | * tree: it must be moved, too. | ||
3438 | * There is no need to put the sync queues, as the | ||
3439 | * scheduler has taken no reference. | ||
3440 | */ | ||
3441 | bfq_reparent_active_entities(bfqd, bfqg, st); | ||
3442 | } | ||
3443 | |||
3444 | __bfq_deactivate_entity(entity, false); | ||
3445 | bfq_put_async_queues(bfqd, bfqg); | ||
3446 | |||
3447 | spin_unlock_irqrestore(&bfqd->lock, flags); | ||
3448 | /* | ||
3449 | * @blkg is going offline and will be ignored by | ||
3450 | * blkg_[rw]stat_recursive_sum(). Transfer stats to the parent so | ||
3451 | * that they don't get lost. If IOs complete after this point, the | ||
3452 | * stats for them will be lost. Oh well... | ||
3453 | */ | ||
3454 | bfqg_stats_xfer_dead(bfqg); | ||
3455 | } | ||
3456 | |||
3457 | static void bfq_end_wr_async(struct bfq_data *bfqd) | ||
3458 | { | ||
3459 | struct blkcg_gq *blkg; | ||
3460 | |||
3461 | list_for_each_entry(blkg, &bfqd->queue->blkg_list, q_node) { | ||
3462 | struct bfq_group *bfqg = blkg_to_bfqg(blkg); | ||
3463 | |||
3464 | bfq_end_wr_async_queues(bfqd, bfqg); | ||
3465 | } | ||
3466 | bfq_end_wr_async_queues(bfqd, bfqd->root_group); | ||
3467 | } | ||
3468 | |||
3469 | static int bfq_io_show_weight(struct seq_file *sf, void *v) | ||
3470 | { | ||
3471 | struct blkcg *blkcg = css_to_blkcg(seq_css(sf)); | ||
3472 | struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg); | ||
3473 | unsigned int val = 0; | ||
3474 | |||
3475 | if (bfqgd) | ||
3476 | val = bfqgd->weight; | ||
3477 | |||
3478 | seq_printf(sf, "%u\n", val); | ||
3479 | |||
3480 | return 0; | ||
3481 | } | ||
3482 | |||
3483 | static int bfq_io_set_weight_legacy(struct cgroup_subsys_state *css, | ||
3484 | struct cftype *cftype, | ||
3485 | u64 val) | ||
3486 | { | ||
3487 | struct blkcg *blkcg = css_to_blkcg(css); | ||
3488 | struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg); | ||
3489 | struct blkcg_gq *blkg; | ||
3490 | int ret = -ERANGE; | ||
3491 | |||
3492 | if (val < BFQ_MIN_WEIGHT || val > BFQ_MAX_WEIGHT) | ||
3493 | return ret; | ||
3494 | |||
3495 | ret = 0; | ||
3496 | spin_lock_irq(&blkcg->lock); | ||
3497 | bfqgd->weight = (unsigned short)val; | ||
3498 | hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) { | ||
3499 | struct bfq_group *bfqg = blkg_to_bfqg(blkg); | ||
3500 | |||
3501 | if (!bfqg) | ||
3502 | continue; | ||
3503 | /* | ||
3504 | * Setting the prio_changed flag of the entity | ||
3505 | * to 1 with new_weight == weight would re-set | ||
3506 | * the value of the weight to its ioprio mapping. | ||
3507 | * Set the flag only if necessary. | ||
3508 | */ | ||
3509 | if ((unsigned short)val != bfqg->entity.new_weight) { | ||
3510 | bfqg->entity.new_weight = (unsigned short)val; | ||
3511 | /* | ||
3512 | * Make sure that the above new value has been | ||
3513 | * stored in bfqg->entity.new_weight before | ||
3514 | * setting the prio_changed flag. In fact, | ||
3515 | * this flag may be read asynchronously (in | ||
3516 | * critical sections protected by a different | ||
3517 | * lock than that held here), and finding this | ||
3518 | * flag set may cause the execution of the code | ||
3519 | * for updating parameters whose value may | ||
3520 | * depend also on bfqg->entity.new_weight (in | ||
3521 | * __bfq_entity_update_weight_prio). | ||
3522 | * This barrier makes sure that the new value | ||
3523 | * of bfqg->entity.new_weight is correctly | ||
3524 | * seen in that code. | ||
3525 | */ | ||
3526 | smp_wmb(); | ||
3527 | bfqg->entity.prio_changed = 1; | ||
3528 | } | ||
3529 | } | ||
3530 | spin_unlock_irq(&blkcg->lock); | ||
3531 | |||
3532 | return ret; | ||
3533 | } | ||
3534 | |||
3535 | static ssize_t bfq_io_set_weight(struct kernfs_open_file *of, | ||
3536 | char *buf, size_t nbytes, | ||
3537 | loff_t off) | ||
3538 | { | ||
3539 | u64 weight; | ||
3540 | /* First unsigned long found in the file is used */ | ||
3541 | int ret = kstrtoull(strim(buf), 0, &weight); | ||
3542 | |||
3543 | if (ret) | ||
3544 | return ret; | ||
3545 | |||
3546 | return bfq_io_set_weight_legacy(of_css(of), NULL, weight); | ||
3547 | } | ||
3548 | |||
3549 | static int bfqg_print_stat(struct seq_file *sf, void *v) | ||
3550 | { | ||
3551 | blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_stat, | ||
3552 | &blkcg_policy_bfq, seq_cft(sf)->private, false); | ||
3553 | return 0; | ||
3554 | } | ||
3555 | |||
3556 | static int bfqg_print_rwstat(struct seq_file *sf, void *v) | ||
3557 | { | ||
3558 | blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_rwstat, | ||
3559 | &blkcg_policy_bfq, seq_cft(sf)->private, true); | ||
3560 | return 0; | ||
3561 | } | ||
3562 | |||
3563 | static u64 bfqg_prfill_stat_recursive(struct seq_file *sf, | ||
3564 | struct blkg_policy_data *pd, int off) | ||
3565 | { | ||
3566 | u64 sum = blkg_stat_recursive_sum(pd_to_blkg(pd), | ||
3567 | &blkcg_policy_bfq, off); | ||
3568 | return __blkg_prfill_u64(sf, pd, sum); | ||
3569 | } | ||
3570 | |||
3571 | static u64 bfqg_prfill_rwstat_recursive(struct seq_file *sf, | ||
3572 | struct blkg_policy_data *pd, int off) | ||
3573 | { | ||
3574 | struct blkg_rwstat sum = blkg_rwstat_recursive_sum(pd_to_blkg(pd), | ||
3575 | &blkcg_policy_bfq, | ||
3576 | off); | ||
3577 | return __blkg_prfill_rwstat(sf, pd, &sum); | ||
3578 | } | ||
3579 | |||
3580 | static int bfqg_print_stat_recursive(struct seq_file *sf, void *v) | ||
3581 | { | ||
3582 | blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), | ||
3583 | bfqg_prfill_stat_recursive, &blkcg_policy_bfq, | ||
3584 | seq_cft(sf)->private, false); | ||
3585 | return 0; | ||
3586 | } | ||
3587 | |||
3588 | static int bfqg_print_rwstat_recursive(struct seq_file *sf, void *v) | ||
3589 | { | ||
3590 | blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), | ||
3591 | bfqg_prfill_rwstat_recursive, &blkcg_policy_bfq, | ||
3592 | seq_cft(sf)->private, true); | ||
3593 | return 0; | ||
3594 | } | ||
3595 | |||
3596 | static u64 bfqg_prfill_sectors(struct seq_file *sf, struct blkg_policy_data *pd, | ||
3597 | int off) | ||
3598 | { | ||
3599 | u64 sum = blkg_rwstat_total(&pd->blkg->stat_bytes); | ||
3600 | |||
3601 | return __blkg_prfill_u64(sf, pd, sum >> 9); | ||
3602 | } | ||
3603 | |||
3604 | static int bfqg_print_stat_sectors(struct seq_file *sf, void *v) | ||
3605 | { | ||
3606 | blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), | ||
3607 | bfqg_prfill_sectors, &blkcg_policy_bfq, 0, false); | ||
3608 | return 0; | ||
3609 | } | ||
3610 | |||
3611 | static u64 bfqg_prfill_sectors_recursive(struct seq_file *sf, | ||
3612 | struct blkg_policy_data *pd, int off) | ||
3613 | { | ||
3614 | struct blkg_rwstat tmp = blkg_rwstat_recursive_sum(pd->blkg, NULL, | ||
3615 | offsetof(struct blkcg_gq, stat_bytes)); | ||
3616 | u64 sum = atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_READ]) + | ||
3617 | atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]); | ||
3618 | |||
3619 | return __blkg_prfill_u64(sf, pd, sum >> 9); | ||
3620 | } | ||
3621 | |||
3622 | static int bfqg_print_stat_sectors_recursive(struct seq_file *sf, void *v) | ||
3623 | { | ||
3624 | blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), | ||
3625 | bfqg_prfill_sectors_recursive, &blkcg_policy_bfq, 0, | ||
3626 | false); | ||
3627 | return 0; | ||
3628 | } | ||
3629 | |||
3630 | static u64 bfqg_prfill_avg_queue_size(struct seq_file *sf, | ||
3631 | struct blkg_policy_data *pd, int off) | ||
3632 | { | ||
3633 | struct bfq_group *bfqg = pd_to_bfqg(pd); | ||
3634 | u64 samples = blkg_stat_read(&bfqg->stats.avg_queue_size_samples); | ||
3635 | u64 v = 0; | ||
3636 | |||
3637 | if (samples) { | ||
3638 | v = blkg_stat_read(&bfqg->stats.avg_queue_size_sum); | ||
3639 | v = div64_u64(v, samples); | ||
3640 | } | ||
3641 | __blkg_prfill_u64(sf, pd, v); | ||
3642 | return 0; | ||
3643 | } | ||
3644 | |||
3645 | /* print avg_queue_size */ | ||
3646 | static int bfqg_print_avg_queue_size(struct seq_file *sf, void *v) | ||
3647 | { | ||
3648 | blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), | ||
3649 | bfqg_prfill_avg_queue_size, &blkcg_policy_bfq, | ||
3650 | 0, false); | ||
3651 | return 0; | ||
3652 | } | ||
3653 | |||
3654 | static struct bfq_group * | ||
3655 | bfq_create_group_hierarchy(struct bfq_data *bfqd, int node) | ||
3656 | { | ||
3657 | int ret; | ||
3658 | |||
3659 | ret = blkcg_activate_policy(bfqd->queue, &blkcg_policy_bfq); | ||
3660 | if (ret) | ||
3661 | return NULL; | ||
3662 | |||
3663 | return blkg_to_bfqg(bfqd->queue->root_blkg); | ||
3664 | } | ||
3665 | |||
3666 | static struct cftype bfq_blkcg_legacy_files[] = { | ||
3667 | { | ||
3668 | .name = "bfq.weight", | ||
3669 | .flags = CFTYPE_NOT_ON_ROOT, | ||
3670 | .seq_show = bfq_io_show_weight, | ||
3671 | .write_u64 = bfq_io_set_weight_legacy, | ||
3672 | }, | ||
3673 | |||
3674 | /* statistics, covers only the tasks in the bfqg */ | ||
3675 | { | ||
3676 | .name = "bfq.time", | ||
3677 | .private = offsetof(struct bfq_group, stats.time), | ||
3678 | .seq_show = bfqg_print_stat, | ||
3679 | }, | ||
3680 | { | ||
3681 | .name = "bfq.sectors", | ||
3682 | .seq_show = bfqg_print_stat_sectors, | ||
3683 | }, | ||
3684 | { | ||
3685 | .name = "bfq.io_service_bytes", | ||
3686 | .private = (unsigned long)&blkcg_policy_bfq, | ||
3687 | .seq_show = blkg_print_stat_bytes, | ||
3688 | }, | ||
3689 | { | ||
3690 | .name = "bfq.io_serviced", | ||
3691 | .private = (unsigned long)&blkcg_policy_bfq, | ||
3692 | .seq_show = blkg_print_stat_ios, | ||
3693 | }, | ||
3694 | { | ||
3695 | .name = "bfq.io_service_time", | ||
3696 | .private = offsetof(struct bfq_group, stats.service_time), | ||
3697 | .seq_show = bfqg_print_rwstat, | ||
3698 | }, | ||
3699 | { | ||
3700 | .name = "bfq.io_wait_time", | ||
3701 | .private = offsetof(struct bfq_group, stats.wait_time), | ||
3702 | .seq_show = bfqg_print_rwstat, | ||
3703 | }, | ||
3704 | { | ||
3705 | .name = "bfq.io_merged", | ||
3706 | .private = offsetof(struct bfq_group, stats.merged), | ||
3707 | .seq_show = bfqg_print_rwstat, | ||
3708 | }, | ||
3709 | { | ||
3710 | .name = "bfq.io_queued", | ||
3711 | .private = offsetof(struct bfq_group, stats.queued), | ||
3712 | .seq_show = bfqg_print_rwstat, | ||
3713 | }, | ||
3714 | |||
3715 | /* the same statictics which cover the bfqg and its descendants */ | ||
3716 | { | ||
3717 | .name = "bfq.time_recursive", | ||
3718 | .private = offsetof(struct bfq_group, stats.time), | ||
3719 | .seq_show = bfqg_print_stat_recursive, | ||
3720 | }, | ||
3721 | { | ||
3722 | .name = "bfq.sectors_recursive", | ||
3723 | .seq_show = bfqg_print_stat_sectors_recursive, | ||
3724 | }, | ||
3725 | { | ||
3726 | .name = "bfq.io_service_bytes_recursive", | ||
3727 | .private = (unsigned long)&blkcg_policy_bfq, | ||
3728 | .seq_show = blkg_print_stat_bytes_recursive, | ||
3729 | }, | ||
3730 | { | ||
3731 | .name = "bfq.io_serviced_recursive", | ||
3732 | .private = (unsigned long)&blkcg_policy_bfq, | ||
3733 | .seq_show = blkg_print_stat_ios_recursive, | ||
3734 | }, | ||
3735 | { | ||
3736 | .name = "bfq.io_service_time_recursive", | ||
3737 | .private = offsetof(struct bfq_group, stats.service_time), | ||
3738 | .seq_show = bfqg_print_rwstat_recursive, | ||
3739 | }, | ||
3740 | { | ||
3741 | .name = "bfq.io_wait_time_recursive", | ||
3742 | .private = offsetof(struct bfq_group, stats.wait_time), | ||
3743 | .seq_show = bfqg_print_rwstat_recursive, | ||
3744 | }, | ||
3745 | { | ||
3746 | .name = "bfq.io_merged_recursive", | ||
3747 | .private = offsetof(struct bfq_group, stats.merged), | ||
3748 | .seq_show = bfqg_print_rwstat_recursive, | ||
3749 | }, | ||
3750 | { | ||
3751 | .name = "bfq.io_queued_recursive", | ||
3752 | .private = offsetof(struct bfq_group, stats.queued), | ||
3753 | .seq_show = bfqg_print_rwstat_recursive, | ||
3754 | }, | ||
3755 | { | ||
3756 | .name = "bfq.avg_queue_size", | ||
3757 | .seq_show = bfqg_print_avg_queue_size, | ||
3758 | }, | ||
3759 | { | ||
3760 | .name = "bfq.group_wait_time", | ||
3761 | .private = offsetof(struct bfq_group, stats.group_wait_time), | ||
3762 | .seq_show = bfqg_print_stat, | ||
3763 | }, | ||
3764 | { | ||
3765 | .name = "bfq.idle_time", | ||
3766 | .private = offsetof(struct bfq_group, stats.idle_time), | ||
3767 | .seq_show = bfqg_print_stat, | ||
3768 | }, | ||
3769 | { | ||
3770 | .name = "bfq.empty_time", | ||
3771 | .private = offsetof(struct bfq_group, stats.empty_time), | ||
3772 | .seq_show = bfqg_print_stat, | ||
3773 | }, | ||
3774 | { | ||
3775 | .name = "bfq.dequeue", | ||
3776 | .private = offsetof(struct bfq_group, stats.dequeue), | ||
3777 | .seq_show = bfqg_print_stat, | ||
3778 | }, | ||
3779 | { } /* terminate */ | ||
3780 | }; | ||
3781 | |||
3782 | static struct cftype bfq_blkg_files[] = { | ||
3783 | { | ||
3784 | .name = "bfq.weight", | ||
3785 | .flags = CFTYPE_NOT_ON_ROOT, | ||
3786 | .seq_show = bfq_io_show_weight, | ||
3787 | .write = bfq_io_set_weight, | ||
3788 | }, | ||
3789 | {} /* terminate */ | ||
3790 | }; | ||
3791 | |||
3792 | #else /* CONFIG_BFQ_GROUP_IOSCHED */ | ||
3793 | |||
3794 | static inline void bfqg_stats_update_io_add(struct bfq_group *bfqg, | ||
3795 | struct bfq_queue *bfqq, unsigned int op) { } | ||
3796 | static inline void | ||
3797 | bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op) { } | ||
3798 | static inline void | ||
3799 | bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op) { } | ||
3800 | static inline void bfqg_stats_update_completion(struct bfq_group *bfqg, | ||
3801 | uint64_t start_time, uint64_t io_start_time, | ||
3802 | unsigned int op) { } | ||
3803 | static inline void | ||
3804 | bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg, | ||
3805 | struct bfq_group *curr_bfqg) { } | ||
3806 | static inline void bfqg_stats_end_empty_time(struct bfqg_stats *stats) { } | ||
3807 | static inline void bfqg_stats_update_dequeue(struct bfq_group *bfqg) { } | ||
3808 | static inline void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg) { } | ||
3809 | static inline void bfqg_stats_update_idle_time(struct bfq_group *bfqg) { } | ||
3810 | static inline void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg) { } | ||
3811 | static inline void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg) { } | ||
3812 | |||
3813 | static void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, | ||
3814 | struct bfq_group *bfqg) {} | ||
3815 | |||
3816 | static void bfq_init_entity(struct bfq_entity *entity, | ||
3817 | struct bfq_group *bfqg) | ||
3818 | { | ||
3819 | struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); | ||
3820 | |||
3821 | entity->weight = entity->new_weight; | ||
3822 | entity->orig_weight = entity->new_weight; | ||
3823 | if (bfqq) { | ||
3824 | bfqq->ioprio = bfqq->new_ioprio; | ||
3825 | bfqq->ioprio_class = bfqq->new_ioprio_class; | ||
3826 | } | ||
3827 | entity->sched_data = &bfqg->sched_data; | ||
3828 | } | ||
3829 | |||
3830 | static void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) {} | ||
3831 | |||
3832 | static void bfq_end_wr_async(struct bfq_data *bfqd) | ||
3833 | { | ||
3834 | bfq_end_wr_async_queues(bfqd, bfqd->root_group); | ||
3835 | } | ||
3836 | |||
3837 | static struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd, | ||
3838 | struct blkcg *blkcg) | ||
3839 | { | ||
3840 | return bfqd->root_group; | ||
3841 | } | ||
3842 | |||
3843 | static struct bfq_group *bfqq_group(struct bfq_queue *bfqq) | ||
3844 | { | ||
3845 | return bfqq->bfqd->root_group; | ||
3846 | } | ||
3847 | |||
3848 | static struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, | ||
3849 | int node) | ||
3850 | { | ||
3851 | struct bfq_group *bfqg; | ||
3852 | int i; | ||
3853 | |||
3854 | bfqg = kmalloc_node(sizeof(*bfqg), GFP_KERNEL | __GFP_ZERO, node); | ||
3855 | if (!bfqg) | ||
3856 | return NULL; | ||
3857 | |||
3858 | for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) | ||
3859 | bfqg->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT; | ||
3860 | |||
3861 | return bfqg; | ||
3862 | } | ||
3863 | #endif /* CONFIG_BFQ_GROUP_IOSCHED */ | ||
3864 | |||
3865 | #define bfq_class_idle(bfqq) ((bfqq)->ioprio_class == IOPRIO_CLASS_IDLE) | 301 | #define bfq_class_idle(bfqq) ((bfqq)->ioprio_class == IOPRIO_CLASS_IDLE) |
3866 | #define bfq_class_rt(bfqq) ((bfqq)->ioprio_class == IOPRIO_CLASS_RT) | 302 | #define bfq_class_rt(bfqq) ((bfqq)->ioprio_class == IOPRIO_CLASS_RT) |
3867 | 303 | ||
@@ -4002,7 +438,7 @@ bfq_rq_pos_tree_lookup(struct bfq_data *bfqd, struct rb_root *root, | |||
4002 | return bfqq; | 438 | return bfqq; |
4003 | } | 439 | } |
4004 | 440 | ||
4005 | static void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq) | 441 | void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq) |
4006 | { | 442 | { |
4007 | struct rb_node **p, *parent; | 443 | struct rb_node **p, *parent; |
4008 | struct bfq_queue *__bfqq; | 444 | struct bfq_queue *__bfqq; |
@@ -4091,9 +527,8 @@ static bool bfq_symmetric_scenario(struct bfq_data *bfqd) | |||
4091 | * In most scenarios, the rate at which nodes are created/destroyed | 527 | * In most scenarios, the rate at which nodes are created/destroyed |
4092 | * should be low too. | 528 | * should be low too. |
4093 | */ | 529 | */ |
4094 | static void bfq_weights_tree_add(struct bfq_data *bfqd, | 530 | void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_entity *entity, |
4095 | struct bfq_entity *entity, | 531 | struct rb_root *root) |
4096 | struct rb_root *root) | ||
4097 | { | 532 | { |
4098 | struct rb_node **new = &(root->rb_node), *parent = NULL; | 533 | struct rb_node **new = &(root->rb_node), *parent = NULL; |
4099 | 534 | ||
@@ -4161,9 +596,8 @@ inc_counter: | |||
4161 | * See the comments to the function bfq_weights_tree_add() for considerations | 596 | * See the comments to the function bfq_weights_tree_add() for considerations |
4162 | * about overhead. | 597 | * about overhead. |
4163 | */ | 598 | */ |
4164 | static void bfq_weights_tree_remove(struct bfq_data *bfqd, | 599 | void bfq_weights_tree_remove(struct bfq_data *bfqd, struct bfq_entity *entity, |
4165 | struct bfq_entity *entity, | 600 | struct rb_root *root) |
4166 | struct rb_root *root) | ||
4167 | { | 601 | { |
4168 | if (!entity->weight_counter) | 602 | if (!entity->weight_counter) |
4169 | return; | 603 | return; |
@@ -4580,11 +1014,6 @@ static int bfq_min_budget(struct bfq_data *bfqd) | |||
4580 | return bfqd->bfq_max_budget / 32; | 1014 | return bfqd->bfq_max_budget / 32; |
4581 | } | 1015 | } |
4582 | 1016 | ||
4583 | static void bfq_bfqq_expire(struct bfq_data *bfqd, | ||
4584 | struct bfq_queue *bfqq, | ||
4585 | bool compensate, | ||
4586 | enum bfqq_expiration reason); | ||
4587 | |||
4588 | /* | 1017 | /* |
4589 | * The next function, invoked after the input queue bfqq switches from | 1018 | * The next function, invoked after the input queue bfqq switches from |
4590 | * idle to busy, updates the budget of bfqq. The function also tells | 1019 | * idle to busy, updates the budget of bfqq. The function also tells |
@@ -5275,8 +1704,8 @@ static void bfq_bfqq_end_wr(struct bfq_queue *bfqq) | |||
5275 | bfqq->entity.prio_changed = 1; | 1704 | bfqq->entity.prio_changed = 1; |
5276 | } | 1705 | } |
5277 | 1706 | ||
5278 | static void bfq_end_wr_async_queues(struct bfq_data *bfqd, | 1707 | void bfq_end_wr_async_queues(struct bfq_data *bfqd, |
5279 | struct bfq_group *bfqg) | 1708 | struct bfq_group *bfqg) |
5280 | { | 1709 | { |
5281 | int i, j; | 1710 | int i, j; |
5282 | 1711 | ||
@@ -6495,10 +2924,10 @@ static unsigned long bfq_smallest_from_now(void) | |||
6495 | * former on a timeslice basis, without violating service domain | 2924 | * former on a timeslice basis, without violating service domain |
6496 | * guarantees among the latter. | 2925 | * guarantees among the latter. |
6497 | */ | 2926 | */ |
6498 | static void bfq_bfqq_expire(struct bfq_data *bfqd, | 2927 | void bfq_bfqq_expire(struct bfq_data *bfqd, |
6499 | struct bfq_queue *bfqq, | 2928 | struct bfq_queue *bfqq, |
6500 | bool compensate, | 2929 | bool compensate, |
6501 | enum bfqq_expiration reason) | 2930 | enum bfqq_expiration reason) |
6502 | { | 2931 | { |
6503 | bool slow; | 2932 | bool slow; |
6504 | unsigned long delta = 0; | 2933 | unsigned long delta = 0; |
@@ -7204,7 +3633,7 @@ static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) | |||
7204 | * Scheduler lock must be held here. Recall not to use bfqq after calling | 3633 | * Scheduler lock must be held here. Recall not to use bfqq after calling |
7205 | * this function on it. | 3634 | * this function on it. |
7206 | */ | 3635 | */ |
7207 | static void bfq_put_queue(struct bfq_queue *bfqq) | 3636 | void bfq_put_queue(struct bfq_queue *bfqq) |
7208 | { | 3637 | { |
7209 | #ifdef CONFIG_BFQ_GROUP_IOSCHED | 3638 | #ifdef CONFIG_BFQ_GROUP_IOSCHED |
7210 | struct bfq_group *bfqg = bfqq_group(bfqq); | 3639 | struct bfq_group *bfqg = bfqq_group(bfqq); |
@@ -7345,6 +3774,10 @@ bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic) | |||
7345 | bfqq->entity.prio_changed = 1; | 3774 | bfqq->entity.prio_changed = 1; |
7346 | } | 3775 | } |
7347 | 3776 | ||
3777 | static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd, | ||
3778 | struct bio *bio, bool is_sync, | ||
3779 | struct bfq_io_cq *bic); | ||
3780 | |||
7348 | static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio) | 3781 | static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio) |
7349 | { | 3782 | { |
7350 | struct bfq_data *bfqd = bic_to_bfqd(bic); | 3783 | struct bfq_data *bfqd = bic_to_bfqd(bic); |
@@ -8121,7 +4554,7 @@ static void __bfq_put_async_bfqq(struct bfq_data *bfqd, | |||
8121 | * we reparent them to the root cgroup (i.e., the only one that will | 4554 | * we reparent them to the root cgroup (i.e., the only one that will |
8122 | * exist for sure until all the requests on a device are gone). | 4555 | * exist for sure until all the requests on a device are gone). |
8123 | */ | 4556 | */ |
8124 | static void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg) | 4557 | void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg) |
8125 | { | 4558 | { |
8126 | int i, j; | 4559 | int i, j; |
8127 | 4560 | ||
@@ -8537,24 +4970,6 @@ static struct elevator_type iosched_bfq_mq = { | |||
8537 | .elevator_owner = THIS_MODULE, | 4970 | .elevator_owner = THIS_MODULE, |
8538 | }; | 4971 | }; |
8539 | 4972 | ||
8540 | #ifdef CONFIG_BFQ_GROUP_IOSCHED | ||
8541 | static struct blkcg_policy blkcg_policy_bfq = { | ||
8542 | .dfl_cftypes = bfq_blkg_files, | ||
8543 | .legacy_cftypes = bfq_blkcg_legacy_files, | ||
8544 | |||
8545 | .cpd_alloc_fn = bfq_cpd_alloc, | ||
8546 | .cpd_init_fn = bfq_cpd_init, | ||
8547 | .cpd_bind_fn = bfq_cpd_init, | ||
8548 | .cpd_free_fn = bfq_cpd_free, | ||
8549 | |||
8550 | .pd_alloc_fn = bfq_pd_alloc, | ||
8551 | .pd_init_fn = bfq_pd_init, | ||
8552 | .pd_offline_fn = bfq_pd_offline, | ||
8553 | .pd_free_fn = bfq_pd_free, | ||
8554 | .pd_reset_stats_fn = bfq_pd_reset_stats, | ||
8555 | }; | ||
8556 | #endif | ||
8557 | |||
8558 | static int __init bfq_init(void) | 4973 | static int __init bfq_init(void) |
8559 | { | 4974 | { |
8560 | int ret; | 4975 | int ret; |