diff options
-rw-r--r-- | block/cfq-iosched.c | 65 |
1 files changed, 55 insertions, 10 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 32aa3674f8a3..0ebb626a25d3 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/elevator.h> | 11 | #include <linux/elevator.h> |
12 | #include <linux/rbtree.h> | 12 | #include <linux/rbtree.h> |
13 | #include <linux/ioprio.h> | 13 | #include <linux/ioprio.h> |
14 | #include <linux/blktrace_api.h> | ||
14 | 15 | ||
15 | /* | 16 | /* |
16 | * tunables | 17 | * tunables |
@@ -41,7 +42,7 @@ static int cfq_slice_idle = HZ / 125; | |||
41 | 42 | ||
42 | #define RQ_CIC(rq) \ | 43 | #define RQ_CIC(rq) \ |
43 | ((struct cfq_io_context *) (rq)->elevator_private) | 44 | ((struct cfq_io_context *) (rq)->elevator_private) |
44 | #define RQ_CFQQ(rq) ((rq)->elevator_private2) | 45 | #define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elevator_private2) |
45 | 46 | ||
46 | static struct kmem_cache *cfq_pool; | 47 | static struct kmem_cache *cfq_pool; |
47 | static struct kmem_cache *cfq_ioc_pool; | 48 | static struct kmem_cache *cfq_ioc_pool; |
@@ -156,6 +157,7 @@ struct cfq_queue { | |||
156 | unsigned short ioprio, org_ioprio; | 157 | unsigned short ioprio, org_ioprio; |
157 | unsigned short ioprio_class, org_ioprio_class; | 158 | unsigned short ioprio_class, org_ioprio_class; |
158 | 159 | ||
160 | pid_t pid; | ||
159 | }; | 161 | }; |
160 | 162 | ||
161 | enum cfqq_state_flags { | 163 | enum cfqq_state_flags { |
@@ -199,6 +201,11 @@ CFQ_CFQQ_FNS(slice_new); | |||
199 | CFQ_CFQQ_FNS(sync); | 201 | CFQ_CFQQ_FNS(sync); |
200 | #undef CFQ_CFQQ_FNS | 202 | #undef CFQ_CFQQ_FNS |
201 | 203 | ||
204 | #define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \ | ||
205 | blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args) | ||
206 | #define cfq_log(cfqd, fmt, args...) \ | ||
207 | blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args) | ||
208 | |||
202 | static void cfq_dispatch_insert(struct request_queue *, struct request *); | 209 | static void cfq_dispatch_insert(struct request_queue *, struct request *); |
203 | static struct cfq_queue *cfq_get_queue(struct cfq_data *, int, | 210 | static struct cfq_queue *cfq_get_queue(struct cfq_data *, int, |
204 | struct io_context *, gfp_t); | 211 | struct io_context *, gfp_t); |
@@ -235,8 +242,10 @@ static inline int cfq_bio_sync(struct bio *bio) | |||
235 | */ | 242 | */ |
236 | static inline void cfq_schedule_dispatch(struct cfq_data *cfqd) | 243 | static inline void cfq_schedule_dispatch(struct cfq_data *cfqd) |
237 | { | 244 | { |
238 | if (cfqd->busy_queues) | 245 | if (cfqd->busy_queues) { |
246 | cfq_log(cfqd, "schedule dispatch"); | ||
239 | kblockd_schedule_work(&cfqd->unplug_work); | 247 | kblockd_schedule_work(&cfqd->unplug_work); |
248 | } | ||
240 | } | 249 | } |
241 | 250 | ||
242 | static int cfq_queue_empty(struct request_queue *q) | 251 | static int cfq_queue_empty(struct request_queue *q) |
@@ -271,6 +280,7 @@ static inline void | |||
271 | cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) | 280 | cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) |
272 | { | 281 | { |
273 | cfqq->slice_end = cfq_prio_to_slice(cfqd, cfqq) + jiffies; | 282 | cfqq->slice_end = cfq_prio_to_slice(cfqd, cfqq) + jiffies; |
283 | cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies); | ||
274 | } | 284 | } |
275 | 285 | ||
276 | /* | 286 | /* |
@@ -540,6 +550,7 @@ static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
540 | */ | 550 | */ |
541 | static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) | 551 | static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) |
542 | { | 552 | { |
553 | cfq_log_cfqq(cfqd, cfqq, "add_to_rr"); | ||
543 | BUG_ON(cfq_cfqq_on_rr(cfqq)); | 554 | BUG_ON(cfq_cfqq_on_rr(cfqq)); |
544 | cfq_mark_cfqq_on_rr(cfqq); | 555 | cfq_mark_cfqq_on_rr(cfqq); |
545 | cfqd->busy_queues++; | 556 | cfqd->busy_queues++; |
@@ -553,6 +564,7 @@ static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
553 | */ | 564 | */ |
554 | static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) | 565 | static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) |
555 | { | 566 | { |
567 | cfq_log_cfqq(cfqd, cfqq, "del_from_rr"); | ||
556 | BUG_ON(!cfq_cfqq_on_rr(cfqq)); | 568 | BUG_ON(!cfq_cfqq_on_rr(cfqq)); |
557 | cfq_clear_cfqq_on_rr(cfqq); | 569 | cfq_clear_cfqq_on_rr(cfqq); |
558 | 570 | ||
@@ -639,6 +651,8 @@ static void cfq_activate_request(struct request_queue *q, struct request *rq) | |||
639 | struct cfq_data *cfqd = q->elevator->elevator_data; | 651 | struct cfq_data *cfqd = q->elevator->elevator_data; |
640 | 652 | ||
641 | cfqd->rq_in_driver++; | 653 | cfqd->rq_in_driver++; |
654 | cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d", | ||
655 | cfqd->rq_in_driver); | ||
642 | 656 | ||
643 | /* | 657 | /* |
644 | * If the depth is larger 1, it really could be queueing. But lets | 658 | * If the depth is larger 1, it really could be queueing. But lets |
@@ -658,6 +672,8 @@ static void cfq_deactivate_request(struct request_queue *q, struct request *rq) | |||
658 | 672 | ||
659 | WARN_ON(!cfqd->rq_in_driver); | 673 | WARN_ON(!cfqd->rq_in_driver); |
660 | cfqd->rq_in_driver--; | 674 | cfqd->rq_in_driver--; |
675 | cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d", | ||
676 | cfqd->rq_in_driver); | ||
661 | } | 677 | } |
662 | 678 | ||
663 | static void cfq_remove_request(struct request *rq) | 679 | static void cfq_remove_request(struct request *rq) |
@@ -747,6 +763,7 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd, | |||
747 | struct cfq_queue *cfqq) | 763 | struct cfq_queue *cfqq) |
748 | { | 764 | { |
749 | if (cfqq) { | 765 | if (cfqq) { |
766 | cfq_log_cfqq(cfqd, cfqq, "set_active"); | ||
750 | cfqq->slice_end = 0; | 767 | cfqq->slice_end = 0; |
751 | cfq_clear_cfqq_must_alloc_slice(cfqq); | 768 | cfq_clear_cfqq_must_alloc_slice(cfqq); |
752 | cfq_clear_cfqq_fifo_expire(cfqq); | 769 | cfq_clear_cfqq_fifo_expire(cfqq); |
@@ -764,6 +781,8 @@ static void | |||
764 | __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, | 781 | __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, |
765 | int timed_out) | 782 | int timed_out) |
766 | { | 783 | { |
784 | cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out); | ||
785 | |||
767 | if (cfq_cfqq_wait_request(cfqq)) | 786 | if (cfq_cfqq_wait_request(cfqq)) |
768 | del_timer(&cfqd->idle_slice_timer); | 787 | del_timer(&cfqd->idle_slice_timer); |
769 | 788 | ||
@@ -773,8 +792,10 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
773 | /* | 792 | /* |
774 | * store what was left of this slice, if the queue idled/timed out | 793 | * store what was left of this slice, if the queue idled/timed out |
775 | */ | 794 | */ |
776 | if (timed_out && !cfq_cfqq_slice_new(cfqq)) | 795 | if (timed_out && !cfq_cfqq_slice_new(cfqq)) { |
777 | cfqq->slice_resid = cfqq->slice_end - jiffies; | 796 | cfqq->slice_resid = cfqq->slice_end - jiffies; |
797 | cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid); | ||
798 | } | ||
778 | 799 | ||
779 | cfq_resort_rr_list(cfqd, cfqq); | 800 | cfq_resort_rr_list(cfqd, cfqq); |
780 | 801 | ||
@@ -867,6 +888,12 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd) | |||
867 | return; | 888 | return; |
868 | 889 | ||
869 | /* | 890 | /* |
891 | * still requests with the driver, don't idle | ||
892 | */ | ||
893 | if (cfqd->rq_in_driver) | ||
894 | return; | ||
895 | |||
896 | /* | ||
870 | * task has exited, don't wait | 897 | * task has exited, don't wait |
871 | */ | 898 | */ |
872 | cic = cfqd->active_cic; | 899 | cic = cfqd->active_cic; |
@@ -893,6 +920,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd) | |||
893 | sl = min(sl, msecs_to_jiffies(CFQ_MIN_TT)); | 920 | sl = min(sl, msecs_to_jiffies(CFQ_MIN_TT)); |
894 | 921 | ||
895 | mod_timer(&cfqd->idle_slice_timer, jiffies + sl); | 922 | mod_timer(&cfqd->idle_slice_timer, jiffies + sl); |
923 | cfq_log(cfqd, "arm_idle: %lu", sl); | ||
896 | } | 924 | } |
897 | 925 | ||
898 | /* | 926 | /* |
@@ -903,6 +931,8 @@ static void cfq_dispatch_insert(struct request_queue *q, struct request *rq) | |||
903 | struct cfq_data *cfqd = q->elevator->elevator_data; | 931 | struct cfq_data *cfqd = q->elevator->elevator_data; |
904 | struct cfq_queue *cfqq = RQ_CFQQ(rq); | 932 | struct cfq_queue *cfqq = RQ_CFQQ(rq); |
905 | 933 | ||
934 | cfq_log_cfqq(cfqd, cfqq, "dispatch_insert"); | ||
935 | |||
906 | cfq_remove_request(rq); | 936 | cfq_remove_request(rq); |
907 | cfqq->dispatched++; | 937 | cfqq->dispatched++; |
908 | elv_dispatch_sort(q, rq); | 938 | elv_dispatch_sort(q, rq); |
@@ -932,8 +962,9 @@ static struct request *cfq_check_fifo(struct cfq_queue *cfqq) | |||
932 | rq = rq_entry_fifo(cfqq->fifo.next); | 962 | rq = rq_entry_fifo(cfqq->fifo.next); |
933 | 963 | ||
934 | if (time_before(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo])) | 964 | if (time_before(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo])) |
935 | return NULL; | 965 | rq = NULL; |
936 | 966 | ||
967 | cfq_log_cfqq(cfqd, cfqq, "fifo=%p", rq); | ||
937 | return rq; | 968 | return rq; |
938 | } | 969 | } |
939 | 970 | ||
@@ -1073,6 +1104,7 @@ static int cfq_forced_dispatch(struct cfq_data *cfqd) | |||
1073 | 1104 | ||
1074 | BUG_ON(cfqd->busy_queues); | 1105 | BUG_ON(cfqd->busy_queues); |
1075 | 1106 | ||
1107 | cfq_log(cfqd, "forced_dispatch=%d\n", dispatched); | ||
1076 | return dispatched; | 1108 | return dispatched; |
1077 | } | 1109 | } |
1078 | 1110 | ||
@@ -1113,6 +1145,7 @@ static int cfq_dispatch_requests(struct request_queue *q, int force) | |||
1113 | dispatched += __cfq_dispatch_requests(cfqd, cfqq, max_dispatch); | 1145 | dispatched += __cfq_dispatch_requests(cfqd, cfqq, max_dispatch); |
1114 | } | 1146 | } |
1115 | 1147 | ||
1148 | cfq_log(cfqd, "dispatched=%d", dispatched); | ||
1116 | return dispatched; | 1149 | return dispatched; |
1117 | } | 1150 | } |
1118 | 1151 | ||
@@ -1131,6 +1164,7 @@ static void cfq_put_queue(struct cfq_queue *cfqq) | |||
1131 | if (!atomic_dec_and_test(&cfqq->ref)) | 1164 | if (!atomic_dec_and_test(&cfqq->ref)) |
1132 | return; | 1165 | return; |
1133 | 1166 | ||
1167 | cfq_log_cfqq(cfqd, cfqq, "put_queue"); | ||
1134 | BUG_ON(rb_first(&cfqq->sort_list)); | 1168 | BUG_ON(rb_first(&cfqq->sort_list)); |
1135 | BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]); | 1169 | BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]); |
1136 | BUG_ON(cfq_cfqq_on_rr(cfqq)); | 1170 | BUG_ON(cfq_cfqq_on_rr(cfqq)); |
@@ -1439,6 +1473,8 @@ retry: | |||
1439 | cfq_mark_cfqq_idle_window(cfqq); | 1473 | cfq_mark_cfqq_idle_window(cfqq); |
1440 | cfq_mark_cfqq_sync(cfqq); | 1474 | cfq_mark_cfqq_sync(cfqq); |
1441 | } | 1475 | } |
1476 | cfqq->pid = current->pid; | ||
1477 | cfq_log_cfqq(cfqd, cfqq, "alloced"); | ||
1442 | } | 1478 | } |
1443 | 1479 | ||
1444 | if (new_cfqq) | 1480 | if (new_cfqq) |
@@ -1687,7 +1723,7 @@ static void | |||
1687 | cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq, | 1723 | cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq, |
1688 | struct cfq_io_context *cic) | 1724 | struct cfq_io_context *cic) |
1689 | { | 1725 | { |
1690 | int enable_idle; | 1726 | int old_idle, enable_idle; |
1691 | 1727 | ||
1692 | /* | 1728 | /* |
1693 | * Don't idle for async or idle io prio class | 1729 | * Don't idle for async or idle io prio class |
@@ -1695,7 +1731,7 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
1695 | if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq)) | 1731 | if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq)) |
1696 | return; | 1732 | return; |
1697 | 1733 | ||
1698 | enable_idle = cfq_cfqq_idle_window(cfqq); | 1734 | old_idle = cfq_cfqq_idle_window(cfqq); |
1699 | 1735 | ||
1700 | if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle || | 1736 | if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle || |
1701 | (cfqd->hw_tag && CIC_SEEKY(cic))) | 1737 | (cfqd->hw_tag && CIC_SEEKY(cic))) |
@@ -1707,10 +1743,13 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
1707 | enable_idle = 1; | 1743 | enable_idle = 1; |
1708 | } | 1744 | } |
1709 | 1745 | ||
1710 | if (enable_idle) | 1746 | if (old_idle != enable_idle) { |
1711 | cfq_mark_cfqq_idle_window(cfqq); | 1747 | cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle); |
1712 | else | 1748 | if (enable_idle) |
1713 | cfq_clear_cfqq_idle_window(cfqq); | 1749 | cfq_mark_cfqq_idle_window(cfqq); |
1750 | else | ||
1751 | cfq_clear_cfqq_idle_window(cfqq); | ||
1752 | } | ||
1714 | } | 1753 | } |
1715 | 1754 | ||
1716 | /* | 1755 | /* |
@@ -1769,6 +1808,7 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, | |||
1769 | */ | 1808 | */ |
1770 | static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) | 1809 | static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) |
1771 | { | 1810 | { |
1811 | cfq_log_cfqq(cfqd, cfqq, "preempt"); | ||
1772 | cfq_slice_expired(cfqd, 1); | 1812 | cfq_slice_expired(cfqd, 1); |
1773 | 1813 | ||
1774 | /* | 1814 | /* |
@@ -1830,6 +1870,7 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq) | |||
1830 | struct cfq_data *cfqd = q->elevator->elevator_data; | 1870 | struct cfq_data *cfqd = q->elevator->elevator_data; |
1831 | struct cfq_queue *cfqq = RQ_CFQQ(rq); | 1871 | struct cfq_queue *cfqq = RQ_CFQQ(rq); |
1832 | 1872 | ||
1873 | cfq_log_cfqq(cfqd, cfqq, "insert_request"); | ||
1833 | cfq_init_prio_data(cfqq, RQ_CIC(rq)->ioc); | 1874 | cfq_init_prio_data(cfqq, RQ_CIC(rq)->ioc); |
1834 | 1875 | ||
1835 | cfq_add_rq_rb(rq); | 1876 | cfq_add_rq_rb(rq); |
@@ -1847,6 +1888,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq) | |||
1847 | unsigned long now; | 1888 | unsigned long now; |
1848 | 1889 | ||
1849 | now = jiffies; | 1890 | now = jiffies; |
1891 | cfq_log_cfqq(cfqd, cfqq, "complete"); | ||
1850 | 1892 | ||
1851 | WARN_ON(!cfqd->rq_in_driver); | 1893 | WARN_ON(!cfqd->rq_in_driver); |
1852 | WARN_ON(!cfqq->dispatched); | 1894 | WARN_ON(!cfqq->dispatched); |
@@ -2016,6 +2058,7 @@ queue_fail: | |||
2016 | 2058 | ||
2017 | cfq_schedule_dispatch(cfqd); | 2059 | cfq_schedule_dispatch(cfqd); |
2018 | spin_unlock_irqrestore(q->queue_lock, flags); | 2060 | spin_unlock_irqrestore(q->queue_lock, flags); |
2061 | cfq_log(cfqd, "set_request fail"); | ||
2019 | return 1; | 2062 | return 1; |
2020 | } | 2063 | } |
2021 | 2064 | ||
@@ -2041,6 +2084,8 @@ static void cfq_idle_slice_timer(unsigned long data) | |||
2041 | unsigned long flags; | 2084 | unsigned long flags; |
2042 | int timed_out = 1; | 2085 | int timed_out = 1; |
2043 | 2086 | ||
2087 | cfq_log(cfqd, "idle timer fired"); | ||
2088 | |||
2044 | spin_lock_irqsave(cfqd->queue->queue_lock, flags); | 2089 | spin_lock_irqsave(cfqd->queue->queue_lock, flags); |
2045 | 2090 | ||
2046 | cfqq = cfqd->active_queue; | 2091 | cfqq = cfqd->active_queue; |