diff options
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r-- | block/cfq-iosched.c | 280 |
1 files changed, 229 insertions, 51 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 2e13e9e689bd..49913804e8dd 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/ioprio.h> | 15 | #include <linux/ioprio.h> |
16 | #include <linux/blktrace_api.h> | 16 | #include <linux/blktrace_api.h> |
17 | #include "blk.h" | 17 | #include "blk.h" |
18 | #include "blk-cgroup.h" | ||
18 | 19 | ||
19 | static struct blkio_policy_type blkio_policy_cfq; | 20 | static struct blkio_policy_type blkio_policy_cfq; |
20 | 21 | ||
@@ -365,9 +366,177 @@ CFQ_CFQQ_FNS(deep); | |||
365 | CFQ_CFQQ_FNS(wait_busy); | 366 | CFQ_CFQQ_FNS(wait_busy); |
366 | #undef CFQ_CFQQ_FNS | 367 | #undef CFQ_CFQQ_FNS |
367 | 368 | ||
368 | #ifdef CONFIG_CFQ_GROUP_IOSCHED | 369 | #if defined(CONFIG_CFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP) |
369 | 370 | ||
370 | #include "blk-cgroup.h" | 371 | /* blkg state flags */ |
372 | enum blkg_state_flags { | ||
373 | BLKG_waiting = 0, | ||
374 | BLKG_idling, | ||
375 | BLKG_empty, | ||
376 | }; | ||
377 | |||
378 | #define BLKG_FLAG_FNS(name) \ | ||
379 | static inline void blkio_mark_blkg_##name( \ | ||
380 | struct blkio_group_stats *stats) \ | ||
381 | { \ | ||
382 | stats->flags |= (1 << BLKG_##name); \ | ||
383 | } \ | ||
384 | static inline void blkio_clear_blkg_##name( \ | ||
385 | struct blkio_group_stats *stats) \ | ||
386 | { \ | ||
387 | stats->flags &= ~(1 << BLKG_##name); \ | ||
388 | } \ | ||
389 | static inline int blkio_blkg_##name(struct blkio_group_stats *stats) \ | ||
390 | { \ | ||
391 | return (stats->flags & (1 << BLKG_##name)) != 0; \ | ||
392 | } \ | ||
393 | |||
394 | BLKG_FLAG_FNS(waiting) | ||
395 | BLKG_FLAG_FNS(idling) | ||
396 | BLKG_FLAG_FNS(empty) | ||
397 | #undef BLKG_FLAG_FNS | ||
398 | |||
399 | /* This should be called with the queue_lock held. */ | ||
400 | static void blkio_update_group_wait_time(struct blkio_group_stats *stats) | ||
401 | { | ||
402 | unsigned long long now; | ||
403 | |||
404 | if (!blkio_blkg_waiting(stats)) | ||
405 | return; | ||
406 | |||
407 | now = sched_clock(); | ||
408 | if (time_after64(now, stats->start_group_wait_time)) | ||
409 | blkg_stat_add(&stats->group_wait_time, | ||
410 | now - stats->start_group_wait_time); | ||
411 | blkio_clear_blkg_waiting(stats); | ||
412 | } | ||
413 | |||
414 | /* This should be called with the queue_lock held. */ | ||
415 | static void blkio_set_start_group_wait_time(struct blkio_group *blkg, | ||
416 | struct blkio_policy_type *pol, | ||
417 | struct blkio_group *curr_blkg) | ||
418 | { | ||
419 | struct blkg_policy_data *pd = blkg->pd[pol->plid]; | ||
420 | |||
421 | if (blkio_blkg_waiting(&pd->stats)) | ||
422 | return; | ||
423 | if (blkg == curr_blkg) | ||
424 | return; | ||
425 | pd->stats.start_group_wait_time = sched_clock(); | ||
426 | blkio_mark_blkg_waiting(&pd->stats); | ||
427 | } | ||
428 | |||
429 | /* This should be called with the queue_lock held. */ | ||
430 | static void blkio_end_empty_time(struct blkio_group_stats *stats) | ||
431 | { | ||
432 | unsigned long long now; | ||
433 | |||
434 | if (!blkio_blkg_empty(stats)) | ||
435 | return; | ||
436 | |||
437 | now = sched_clock(); | ||
438 | if (time_after64(now, stats->start_empty_time)) | ||
439 | blkg_stat_add(&stats->empty_time, | ||
440 | now - stats->start_empty_time); | ||
441 | blkio_clear_blkg_empty(stats); | ||
442 | } | ||
443 | |||
444 | static void cfq_blkiocg_update_dequeue_stats(struct blkio_group *blkg, | ||
445 | struct blkio_policy_type *pol, | ||
446 | unsigned long dequeue) | ||
447 | { | ||
448 | struct blkg_policy_data *pd = blkg->pd[pol->plid]; | ||
449 | |||
450 | lockdep_assert_held(blkg->q->queue_lock); | ||
451 | |||
452 | blkg_stat_add(&pd->stats.dequeue, dequeue); | ||
453 | } | ||
454 | |||
455 | static void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg, | ||
456 | struct blkio_policy_type *pol) | ||
457 | { | ||
458 | struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats; | ||
459 | |||
460 | lockdep_assert_held(blkg->q->queue_lock); | ||
461 | |||
462 | if (blkg_rwstat_sum(&stats->queued)) | ||
463 | return; | ||
464 | |||
465 | /* | ||
466 | * group is already marked empty. This can happen if cfqq got new | ||
467 | * request in parent group and moved to this group while being added | ||
468 | * to service tree. Just ignore the event and move on. | ||
469 | */ | ||
470 | if (blkio_blkg_empty(stats)) | ||
471 | return; | ||
472 | |||
473 | stats->start_empty_time = sched_clock(); | ||
474 | blkio_mark_blkg_empty(stats); | ||
475 | } | ||
476 | |||
477 | static void cfq_blkiocg_update_idle_time_stats(struct blkio_group *blkg, | ||
478 | struct blkio_policy_type *pol) | ||
479 | { | ||
480 | struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats; | ||
481 | |||
482 | lockdep_assert_held(blkg->q->queue_lock); | ||
483 | |||
484 | if (blkio_blkg_idling(stats)) { | ||
485 | unsigned long long now = sched_clock(); | ||
486 | |||
487 | if (time_after64(now, stats->start_idle_time)) | ||
488 | blkg_stat_add(&stats->idle_time, | ||
489 | now - stats->start_idle_time); | ||
490 | blkio_clear_blkg_idling(stats); | ||
491 | } | ||
492 | } | ||
493 | |||
494 | static void cfq_blkiocg_update_set_idle_time_stats(struct blkio_group *blkg, | ||
495 | struct blkio_policy_type *pol) | ||
496 | { | ||
497 | struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats; | ||
498 | |||
499 | lockdep_assert_held(blkg->q->queue_lock); | ||
500 | BUG_ON(blkio_blkg_idling(stats)); | ||
501 | |||
502 | stats->start_idle_time = sched_clock(); | ||
503 | blkio_mark_blkg_idling(stats); | ||
504 | } | ||
505 | |||
506 | static void cfq_blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg, | ||
507 | struct blkio_policy_type *pol) | ||
508 | { | ||
509 | struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats; | ||
510 | |||
511 | lockdep_assert_held(blkg->q->queue_lock); | ||
512 | |||
513 | blkg_stat_add(&stats->avg_queue_size_sum, | ||
514 | blkg_rwstat_sum(&stats->queued)); | ||
515 | blkg_stat_add(&stats->avg_queue_size_samples, 1); | ||
516 | blkio_update_group_wait_time(stats); | ||
517 | } | ||
518 | |||
519 | #else /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */ | ||
520 | |||
521 | static void blkio_set_start_group_wait_time(struct blkio_group *blkg, | ||
522 | struct blkio_policy_type *pol, | ||
523 | struct blkio_group *curr_blkg) { } | ||
524 | static void blkio_end_empty_time(struct blkio_group_stats *stats) { } | ||
525 | static void cfq_blkiocg_update_dequeue_stats(struct blkio_group *blkg, | ||
526 | struct blkio_policy_type *pol, | ||
527 | unsigned long dequeue) { } | ||
528 | static void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg, | ||
529 | struct blkio_policy_type *pol) { } | ||
530 | static void cfq_blkiocg_update_idle_time_stats(struct blkio_group *blkg, | ||
531 | struct blkio_policy_type *pol) { } | ||
532 | static void cfq_blkiocg_update_set_idle_time_stats(struct blkio_group *blkg, | ||
533 | struct blkio_policy_type *pol) { } | ||
534 | static void cfq_blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg, | ||
535 | struct blkio_policy_type *pol) { } | ||
536 | |||
537 | #endif /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */ | ||
538 | |||
539 | #ifdef CONFIG_CFQ_GROUP_IOSCHED | ||
371 | 540 | ||
372 | static inline struct cfq_group *blkg_to_cfqg(struct blkio_group *blkg) | 541 | static inline struct cfq_group *blkg_to_cfqg(struct blkio_group *blkg) |
373 | { | 542 | { |
@@ -403,75 +572,98 @@ static inline void cfq_blkiocg_update_io_add_stats(struct blkio_group *blkg, | |||
403 | struct blkio_group *curr_blkg, | 572 | struct blkio_group *curr_blkg, |
404 | bool direction, bool sync) | 573 | bool direction, bool sync) |
405 | { | 574 | { |
406 | blkiocg_update_io_add_stats(blkg, pol, curr_blkg, direction, sync); | 575 | struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats; |
407 | } | 576 | int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0); |
408 | 577 | ||
409 | static inline void cfq_blkiocg_update_dequeue_stats(struct blkio_group *blkg, | 578 | lockdep_assert_held(blkg->q->queue_lock); |
410 | struct blkio_policy_type *pol, unsigned long dequeue) | 579 | |
411 | { | 580 | blkg_rwstat_add(&stats->queued, rw, 1); |
412 | blkiocg_update_dequeue_stats(blkg, pol, dequeue); | 581 | blkio_end_empty_time(stats); |
582 | blkio_set_start_group_wait_time(blkg, pol, curr_blkg); | ||
413 | } | 583 | } |
414 | 584 | ||
415 | static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg, | 585 | static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg, |
416 | struct blkio_policy_type *pol, unsigned long time, | 586 | struct blkio_policy_type *pol, unsigned long time, |
417 | unsigned long unaccounted_time) | 587 | unsigned long unaccounted_time) |
418 | { | 588 | { |
419 | blkiocg_update_timeslice_used(blkg, pol, time, unaccounted_time); | 589 | struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats; |
420 | } | ||
421 | 590 | ||
422 | static inline void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg, | 591 | lockdep_assert_held(blkg->q->queue_lock); |
423 | struct blkio_policy_type *pol) | 592 | |
424 | { | 593 | blkg_stat_add(&stats->time, time); |
425 | blkiocg_set_start_empty_time(blkg, pol); | 594 | #ifdef CONFIG_DEBUG_BLK_CGROUP |
595 | blkg_stat_add(&stats->unaccounted_time, unaccounted_time); | ||
596 | #endif | ||
426 | } | 597 | } |
427 | 598 | ||
428 | static inline void cfq_blkiocg_update_io_remove_stats(struct blkio_group *blkg, | 599 | static inline void cfq_blkiocg_update_io_remove_stats(struct blkio_group *blkg, |
429 | struct blkio_policy_type *pol, bool direction, | 600 | struct blkio_policy_type *pol, bool direction, |
430 | bool sync) | 601 | bool sync) |
431 | { | 602 | { |
432 | blkiocg_update_io_remove_stats(blkg, pol, direction, sync); | 603 | struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats; |
604 | int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0); | ||
605 | |||
606 | lockdep_assert_held(blkg->q->queue_lock); | ||
607 | |||
608 | blkg_rwstat_add(&stats->queued, rw, -1); | ||
433 | } | 609 | } |
434 | 610 | ||
435 | static inline void cfq_blkiocg_update_io_merged_stats(struct blkio_group *blkg, | 611 | static inline void cfq_blkiocg_update_io_merged_stats(struct blkio_group *blkg, |
436 | struct blkio_policy_type *pol, bool direction, | 612 | struct blkio_policy_type *pol, bool direction, |
437 | bool sync) | 613 | bool sync) |
438 | { | 614 | { |
439 | blkiocg_update_io_merged_stats(blkg, pol, direction, sync); | 615 | struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats; |
440 | } | 616 | int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0); |
441 | 617 | ||
442 | static inline void cfq_blkiocg_update_idle_time_stats(struct blkio_group *blkg, | 618 | lockdep_assert_held(blkg->q->queue_lock); |
443 | struct blkio_policy_type *pol) | ||
444 | { | ||
445 | blkiocg_update_idle_time_stats(blkg, pol); | ||
446 | } | ||
447 | 619 | ||
448 | static inline void | 620 | blkg_rwstat_add(&stats->merged, rw, 1); |
449 | cfq_blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg, | ||
450 | struct blkio_policy_type *pol) | ||
451 | { | ||
452 | blkiocg_update_avg_queue_size_stats(blkg, pol); | ||
453 | } | ||
454 | |||
455 | static inline void | ||
456 | cfq_blkiocg_update_set_idle_time_stats(struct blkio_group *blkg, | ||
457 | struct blkio_policy_type *pol) | ||
458 | { | ||
459 | blkiocg_update_set_idle_time_stats(blkg, pol); | ||
460 | } | 621 | } |
461 | 622 | ||
462 | static inline void cfq_blkiocg_update_dispatch_stats(struct blkio_group *blkg, | 623 | static inline void cfq_blkiocg_update_dispatch_stats(struct blkio_group *blkg, |
463 | struct blkio_policy_type *pol, uint64_t bytes, | 624 | struct blkio_policy_type *pol, uint64_t bytes, |
464 | bool direction, bool sync) | 625 | bool direction, bool sync) |
465 | { | 626 | { |
466 | blkiocg_update_dispatch_stats(blkg, pol, bytes, direction, sync); | 627 | int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0); |
628 | struct blkg_policy_data *pd = blkg->pd[pol->plid]; | ||
629 | struct blkio_group_stats_cpu *stats_cpu; | ||
630 | unsigned long flags; | ||
631 | |||
632 | /* If per cpu stats are not allocated yet, don't do any accounting. */ | ||
633 | if (pd->stats_cpu == NULL) | ||
634 | return; | ||
635 | |||
636 | /* | ||
637 | * Disabling interrupts to provide mutual exclusion between two | ||
638 | * writes on same cpu. It probably is not needed for 64bit. Not | ||
639 | * optimizing that case yet. | ||
640 | */ | ||
641 | local_irq_save(flags); | ||
642 | |||
643 | stats_cpu = this_cpu_ptr(pd->stats_cpu); | ||
644 | |||
645 | blkg_stat_add(&stats_cpu->sectors, bytes >> 9); | ||
646 | blkg_rwstat_add(&stats_cpu->serviced, rw, 1); | ||
647 | blkg_rwstat_add(&stats_cpu->service_bytes, rw, bytes); | ||
648 | |||
649 | local_irq_restore(flags); | ||
467 | } | 650 | } |
468 | 651 | ||
469 | static inline void cfq_blkiocg_update_completion_stats(struct blkio_group *blkg, | 652 | static inline void cfq_blkiocg_update_completion_stats(struct blkio_group *blkg, |
470 | struct blkio_policy_type *pol, uint64_t start_time, | 653 | struct blkio_policy_type *pol, uint64_t start_time, |
471 | uint64_t io_start_time, bool direction, bool sync) | 654 | uint64_t io_start_time, bool direction, bool sync) |
472 | { | 655 | { |
473 | blkiocg_update_completion_stats(blkg, pol, start_time, io_start_time, | 656 | struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats; |
474 | direction, sync); | 657 | unsigned long long now = sched_clock(); |
658 | int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0); | ||
659 | |||
660 | lockdep_assert_held(blkg->q->queue_lock); | ||
661 | |||
662 | if (time_after64(now, io_start_time)) | ||
663 | blkg_rwstat_add(&stats->service_time, rw, now - io_start_time); | ||
664 | if (time_after64(io_start_time, start_time)) | ||
665 | blkg_rwstat_add(&stats->wait_time, rw, | ||
666 | io_start_time - start_time); | ||
475 | } | 667 | } |
476 | 668 | ||
477 | #else /* CONFIG_CFQ_GROUP_IOSCHED */ | 669 | #else /* CONFIG_CFQ_GROUP_IOSCHED */ |
@@ -489,29 +681,15 @@ static inline void cfq_blkiocg_update_io_add_stats(struct blkio_group *blkg, | |||
489 | struct blkio_policy_type *pol, | 681 | struct blkio_policy_type *pol, |
490 | struct blkio_group *curr_blkg, bool direction, | 682 | struct blkio_group *curr_blkg, bool direction, |
491 | bool sync) { } | 683 | bool sync) { } |
492 | static inline void cfq_blkiocg_update_dequeue_stats(struct blkio_group *blkg, | ||
493 | struct blkio_policy_type *pol, unsigned long dequeue) { } | ||
494 | static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg, | 684 | static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg, |
495 | struct blkio_policy_type *pol, unsigned long time, | 685 | struct blkio_policy_type *pol, unsigned long time, |
496 | unsigned long unaccounted_time) { } | 686 | unsigned long unaccounted_time) { } |
497 | static inline void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg, | ||
498 | struct blkio_policy_type *pol) { } | ||
499 | static inline void cfq_blkiocg_update_io_remove_stats(struct blkio_group *blkg, | 687 | static inline void cfq_blkiocg_update_io_remove_stats(struct blkio_group *blkg, |
500 | struct blkio_policy_type *pol, bool direction, | 688 | struct blkio_policy_type *pol, bool direction, |
501 | bool sync) { } | 689 | bool sync) { } |
502 | static inline void cfq_blkiocg_update_io_merged_stats(struct blkio_group *blkg, | 690 | static inline void cfq_blkiocg_update_io_merged_stats(struct blkio_group *blkg, |
503 | struct blkio_policy_type *pol, bool direction, | 691 | struct blkio_policy_type *pol, bool direction, |
504 | bool sync) { } | 692 | bool sync) { } |
505 | static inline void cfq_blkiocg_update_idle_time_stats(struct blkio_group *blkg, | ||
506 | struct blkio_policy_type *pol) { } | ||
507 | static inline void | ||
508 | cfq_blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg, | ||
509 | struct blkio_policy_type *pol) { } | ||
510 | |||
511 | static inline void | ||
512 | cfq_blkiocg_update_set_idle_time_stats(struct blkio_group *blkg, | ||
513 | struct blkio_policy_type *pol) { } | ||
514 | |||
515 | static inline void cfq_blkiocg_update_dispatch_stats(struct blkio_group *blkg, | 693 | static inline void cfq_blkiocg_update_dispatch_stats(struct blkio_group *blkg, |
516 | struct blkio_policy_type *pol, uint64_t bytes, | 694 | struct blkio_policy_type *pol, uint64_t bytes, |
517 | bool direction, bool sync) { } | 695 | bool direction, bool sync) { } |