aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorJens Axboe <axboe@suse.de>2006-07-22 10:48:31 -0400
committerJens Axboe <axboe@nelson.home.kernel.dk>2006-09-30 14:29:35 -0400
commit89850f7ee905410c89f9295e89dc4c33502a34ac (patch)
treec499b53c052a6e515ec232fdbde4537f20f1d23d /block
parente6a1c874a064e7d07f24986aba7cd537b7f4a25d (diff)
[PATCH] cfq-iosched: cleanups, fixes, dead code removal
A collection of little fixes and cleanups: - We don't use the 'queued' sysfs exported attribute, since the may_queue() logic was rewritten. So kill it. - Remove dead defines. - cfq_set_active_queue() can be rewritten cleaner with else if conditions. - Several places had cfq_exit_cfqq() like logic, abstract that out and use that. - Annotate the cfqq kmem_cache_alloc() so the allocator knows that this is a repeat allocation if it fails with __GFP_WAIT set. Allows the allocator to start freeing some memory, if needed. CFQ already loops for this condition, so might as well pass the hint down. - Remove cfqd->rq_starved logic. It's not needed anymore after we dropped the crq allocation in cfq_set_request(). - Remove uneeded parameter passing. Signed-off-by: Jens Axboe <axboe@suse.de>
Diffstat (limited to 'block')
-rw-r--r--block/cfq-iosched.c184
1 files changed, 70 insertions, 114 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 2ac35aacbbf9..ec24284e9d39 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -17,7 +17,6 @@
17 * tunables 17 * tunables
18 */ 18 */
19static const int cfq_quantum = 4; /* max queue in one round of service */ 19static const int cfq_quantum = 4; /* max queue in one round of service */
20static const int cfq_queued = 8; /* minimum rq allocate limit per-queue*/
21static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 }; 20static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
22static const int cfq_back_max = 16 * 1024; /* maximum backwards seek, in KiB */ 21static const int cfq_back_max = 16 * 1024; /* maximum backwards seek, in KiB */
23static const int cfq_back_penalty = 2; /* penalty of a backwards seek */ 22static const int cfq_back_penalty = 2; /* penalty of a backwards seek */
@@ -54,7 +53,6 @@ static struct completion *ioc_gone;
54 53
55#define CFQ_PRIO_LISTS IOPRIO_BE_NR 54#define CFQ_PRIO_LISTS IOPRIO_BE_NR
56#define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE) 55#define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
57#define cfq_class_be(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_BE)
58#define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT) 56#define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
59 57
60#define ASYNC (0) 58#define ASYNC (0)
@@ -99,9 +97,6 @@ struct cfq_data {
99 int hw_tag; 97 int hw_tag;
100 98
101 /* 99 /*
102 * schedule slice state info
103 */
104 /*
105 * idle window management 100 * idle window management
106 */ 101 */
107 struct timer_list idle_slice_timer; 102 struct timer_list idle_slice_timer;
@@ -117,13 +112,10 @@ struct cfq_data {
117 sector_t last_sector; 112 sector_t last_sector;
118 unsigned long last_end_request; 113 unsigned long last_end_request;
119 114
120 unsigned int rq_starved;
121
122 /* 115 /*
123 * tunables, see top of file 116 * tunables, see top of file
124 */ 117 */
125 unsigned int cfq_quantum; 118 unsigned int cfq_quantum;
126 unsigned int cfq_queued;
127 unsigned int cfq_fifo_expire[2]; 119 unsigned int cfq_fifo_expire[2];
128 unsigned int cfq_back_penalty; 120 unsigned int cfq_back_penalty;
129 unsigned int cfq_back_max; 121 unsigned int cfq_back_max;
@@ -484,12 +476,14 @@ cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
484{ 476{
485 struct task_struct *tsk = current; 477 struct task_struct *tsk = current;
486 pid_t key = cfq_queue_pid(tsk, bio_data_dir(bio)); 478 pid_t key = cfq_queue_pid(tsk, bio_data_dir(bio));
487 sector_t sector = bio->bi_sector + bio_sectors(bio);
488 struct cfq_queue *cfqq; 479 struct cfq_queue *cfqq;
489 480
490 cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio); 481 cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio);
491 if (cfqq) 482 if (cfqq) {
483 sector_t sector = bio->bi_sector + bio_sectors(bio);
484
492 return elv_rb_find(&cfqq->sort_list, sector); 485 return elv_rb_find(&cfqq->sort_list, sector);
486 }
493 487
494 return NULL; 488 return NULL;
495} 489}
@@ -699,26 +693,25 @@ static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd)
699{ 693{
700 struct cfq_queue *cfqq = NULL; 694 struct cfq_queue *cfqq = NULL;
701 695
702 /* 696 if (!list_empty(&cfqd->cur_rr) || cfq_get_next_prio_level(cfqd) != -1) {
703 * if current list is non-empty, grab first entry. if it is empty, 697 /*
704 * get next prio level and grab first entry then if any are spliced 698 * if current list is non-empty, grab first entry. if it is
705 */ 699 * empty, get next prio level and grab first entry then if any
706 if (!list_empty(&cfqd->cur_rr) || cfq_get_next_prio_level(cfqd) != -1) 700 * are spliced
701 */
707 cfqq = list_entry_cfqq(cfqd->cur_rr.next); 702 cfqq = list_entry_cfqq(cfqd->cur_rr.next);
708 703 } else if (!list_empty(&cfqd->busy_rr)) {
709 /* 704 /*
710 * If no new queues are available, check if the busy list has some 705 * If no new queues are available, check if the busy list has
711 * before falling back to idle io. 706 * some before falling back to idle io.
712 */ 707 */
713 if (!cfqq && !list_empty(&cfqd->busy_rr))
714 cfqq = list_entry_cfqq(cfqd->busy_rr.next); 708 cfqq = list_entry_cfqq(cfqd->busy_rr.next);
715 709 } else if (!list_empty(&cfqd->idle_rr)) {
716 /* 710 /*
717 * if we have idle queues and no rt or be queues had pending 711 * if we have idle queues and no rt or be queues had pending
718 * requests, either allow immediate service if the grace period 712 * requests, either allow immediate service if the grace period
719 * has passed or arm the idle grace timer 713 * has passed or arm the idle grace timer
720 */ 714 */
721 if (!cfqq && !list_empty(&cfqd->idle_rr)) {
722 unsigned long end = cfqd->last_end_request + CFQ_IDLE_GRACE; 715 unsigned long end = cfqd->last_end_request + CFQ_IDLE_GRACE;
723 716
724 if (time_after_eq(jiffies, end)) 717 if (time_after_eq(jiffies, end))
@@ -793,18 +786,19 @@ static inline struct request *cfq_check_fifo(struct cfq_queue *cfqq)
793{ 786{
794 struct cfq_data *cfqd = cfqq->cfqd; 787 struct cfq_data *cfqd = cfqq->cfqd;
795 struct request *rq; 788 struct request *rq;
789 int fifo;
796 790
797 if (cfq_cfqq_fifo_expire(cfqq)) 791 if (cfq_cfqq_fifo_expire(cfqq))
798 return NULL; 792 return NULL;
793 if (list_empty(&cfqq->fifo))
794 return NULL;
799 795
800 if (!list_empty(&cfqq->fifo)) { 796 fifo = cfq_cfqq_class_sync(cfqq);
801 int fifo = cfq_cfqq_class_sync(cfqq); 797 rq = rq_entry_fifo(cfqq->fifo.next);
802 798
803 rq = rq_entry_fifo(cfqq->fifo.next); 799 if (time_after(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo])) {
804 if (time_after(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo])) { 800 cfq_mark_cfqq_fifo_expire(cfqq);
805 cfq_mark_cfqq_fifo_expire(cfqq); 801 return rq;
806 return rq;
807 }
808 } 802 }
809 803
810 return NULL; 804 return NULL;
@@ -1096,40 +1090,48 @@ static void cfq_trim(struct io_context *ioc)
1096 cfq_free_io_context(ioc); 1090 cfq_free_io_context(ioc);
1097} 1091}
1098 1092
1099/* 1093static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1100 * Called with interrupts disabled
1101 */
1102static void cfq_exit_single_io_context(struct cfq_io_context *cic)
1103{ 1094{
1104 struct cfq_data *cfqd = cic->key; 1095 if (unlikely(cfqq == cfqd->active_queue))
1105 request_queue_t *q; 1096 __cfq_slice_expired(cfqd, cfqq, 0);
1106
1107 if (!cfqd)
1108 return;
1109
1110 q = cfqd->queue;
1111
1112 WARN_ON(!irqs_disabled());
1113 1097
1114 spin_lock(q->queue_lock); 1098 cfq_put_queue(cfqq);
1099}
1115 1100
1101static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
1102 struct cfq_io_context *cic)
1103{
1116 if (cic->cfqq[ASYNC]) { 1104 if (cic->cfqq[ASYNC]) {
1117 if (unlikely(cic->cfqq[ASYNC] == cfqd->active_queue)) 1105 cfq_exit_cfqq(cfqd, cic->cfqq[ASYNC]);
1118 __cfq_slice_expired(cfqd, cic->cfqq[ASYNC], 0);
1119 cfq_put_queue(cic->cfqq[ASYNC]);
1120 cic->cfqq[ASYNC] = NULL; 1106 cic->cfqq[ASYNC] = NULL;
1121 } 1107 }
1122 1108
1123 if (cic->cfqq[SYNC]) { 1109 if (cic->cfqq[SYNC]) {
1124 if (unlikely(cic->cfqq[SYNC] == cfqd->active_queue)) 1110 cfq_exit_cfqq(cfqd, cic->cfqq[SYNC]);
1125 __cfq_slice_expired(cfqd, cic->cfqq[SYNC], 0);
1126 cfq_put_queue(cic->cfqq[SYNC]);
1127 cic->cfqq[SYNC] = NULL; 1111 cic->cfqq[SYNC] = NULL;
1128 } 1112 }
1129 1113
1130 cic->key = NULL; 1114 cic->key = NULL;
1131 list_del_init(&cic->queue_list); 1115 list_del_init(&cic->queue_list);
1132 spin_unlock(q->queue_lock); 1116}
1117
1118
1119/*
1120 * Called with interrupts disabled
1121 */
1122static void cfq_exit_single_io_context(struct cfq_io_context *cic)
1123{
1124 struct cfq_data *cfqd = cic->key;
1125
1126 WARN_ON(!irqs_disabled());
1127
1128 if (cfqd) {
1129 request_queue_t *q = cfqd->queue;
1130
1131 spin_lock(q->queue_lock);
1132 __cfq_exit_single_io_context(cfqd, cic);
1133 spin_unlock(q->queue_lock);
1134 }
1133} 1135}
1134 1136
1135static void cfq_exit_io_context(struct io_context *ioc) 1137static void cfq_exit_io_context(struct io_context *ioc)
@@ -1286,8 +1288,14 @@ retry:
1286 cfqq = new_cfqq; 1288 cfqq = new_cfqq;
1287 new_cfqq = NULL; 1289 new_cfqq = NULL;
1288 } else if (gfp_mask & __GFP_WAIT) { 1290 } else if (gfp_mask & __GFP_WAIT) {
1291 /*
1292 * Inform the allocator of the fact that we will
1293 * just repeat this allocation if it fails, to allow
1294 * the allocator to do whatever it needs to attempt to
1295 * free memory.
1296 */
1289 spin_unlock_irq(cfqd->queue->queue_lock); 1297 spin_unlock_irq(cfqd->queue->queue_lock);
1290 new_cfqq = kmem_cache_alloc(cfq_pool, gfp_mask); 1298 new_cfqq = kmem_cache_alloc(cfq_pool, gfp_mask|__GFP_NOFAIL);
1291 spin_lock_irq(cfqd->queue->queue_lock); 1299 spin_lock_irq(cfqd->queue->queue_lock);
1292 goto retry; 1300 goto retry;
1293 } else { 1301 } else {
@@ -1739,9 +1747,7 @@ static void cfq_prio_boost(struct cfq_queue *cfqq)
1739 cfq_resort_rr_list(cfqq, 0); 1747 cfq_resort_rr_list(cfqq, 0);
1740} 1748}
1741 1749
1742static inline int 1750static inline int __cfq_may_queue(struct cfq_queue *cfqq)
1743__cfq_may_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1744 struct task_struct *task, int rw)
1745{ 1751{
1746 if ((cfq_cfqq_wait_request(cfqq) || cfq_cfqq_must_alloc(cfqq)) && 1752 if ((cfq_cfqq_wait_request(cfqq) || cfq_cfqq_must_alloc(cfqq)) &&
1747 !cfq_cfqq_must_alloc_slice(cfqq)) { 1753 !cfq_cfqq_must_alloc_slice(cfqq)) {
@@ -1769,27 +1775,12 @@ static int cfq_may_queue(request_queue_t *q, int rw)
1769 cfq_init_prio_data(cfqq); 1775 cfq_init_prio_data(cfqq);
1770 cfq_prio_boost(cfqq); 1776 cfq_prio_boost(cfqq);
1771 1777
1772 return __cfq_may_queue(cfqd, cfqq, tsk, rw); 1778 return __cfq_may_queue(cfqq);
1773 } 1779 }
1774 1780
1775 return ELV_MQUEUE_MAY; 1781 return ELV_MQUEUE_MAY;
1776} 1782}
1777 1783
1778static void cfq_check_waiters(request_queue_t *q, struct cfq_queue *cfqq)
1779{
1780 struct cfq_data *cfqd = q->elevator->elevator_data;
1781
1782 if (unlikely(cfqd->rq_starved)) {
1783 struct request_list *rl = &q->rq;
1784
1785 smp_mb();
1786 if (waitqueue_active(&rl->wait[READ]))
1787 wake_up(&rl->wait[READ]);
1788 if (waitqueue_active(&rl->wait[WRITE]))
1789 wake_up(&rl->wait[WRITE]);
1790 }
1791}
1792
1793/* 1784/*
1794 * queue lock held here 1785 * queue lock held here
1795 */ 1786 */
@@ -1808,7 +1799,6 @@ static void cfq_put_request(request_queue_t *q, struct request *rq)
1808 rq->elevator_private = NULL; 1799 rq->elevator_private = NULL;
1809 rq->elevator_private2 = NULL; 1800 rq->elevator_private2 = NULL;
1810 1801
1811 cfq_check_waiters(q, cfqq);
1812 cfq_put_queue(cfqq); 1802 cfq_put_queue(cfqq);
1813 } 1803 }
1814} 1804}
@@ -1848,7 +1838,6 @@ cfq_set_request(request_queue_t *q, struct request *rq, gfp_t gfp_mask)
1848 1838
1849 cfqq->allocated[rw]++; 1839 cfqq->allocated[rw]++;
1850 cfq_clear_cfqq_must_alloc(cfqq); 1840 cfq_clear_cfqq_must_alloc(cfqq);
1851 cfqd->rq_starved = 0;
1852 atomic_inc(&cfqq->ref); 1841 atomic_inc(&cfqq->ref);
1853 1842
1854 spin_unlock_irqrestore(q->queue_lock, flags); 1843 spin_unlock_irqrestore(q->queue_lock, flags);
@@ -1860,12 +1849,7 @@ cfq_set_request(request_queue_t *q, struct request *rq, gfp_t gfp_mask)
1860queue_fail: 1849queue_fail:
1861 if (cic) 1850 if (cic)
1862 put_io_context(cic->ioc); 1851 put_io_context(cic->ioc);
1863 /* 1852
1864 * mark us rq allocation starved. we need to kickstart the process
1865 * ourselves if there are no pending requests that can do it for us.
1866 * that would be an extremely rare OOM situation
1867 */
1868 cfqd->rq_starved = 1;
1869 cfq_schedule_dispatch(cfqd); 1853 cfq_schedule_dispatch(cfqd);
1870 spin_unlock_irqrestore(q->queue_lock, flags); 1854 spin_unlock_irqrestore(q->queue_lock, flags);
1871 return 1; 1855 return 1;
@@ -1874,25 +1858,9 @@ queue_fail:
1874static void cfq_kick_queue(void *data) 1858static void cfq_kick_queue(void *data)
1875{ 1859{
1876 request_queue_t *q = data; 1860 request_queue_t *q = data;
1877 struct cfq_data *cfqd = q->elevator->elevator_data;
1878 unsigned long flags; 1861 unsigned long flags;
1879 1862
1880 spin_lock_irqsave(q->queue_lock, flags); 1863 spin_lock_irqsave(q->queue_lock, flags);
1881
1882 if (cfqd->rq_starved) {
1883 struct request_list *rl = &q->rq;
1884
1885 /*
1886 * we aren't guaranteed to get a request after this, but we
1887 * have to be opportunistic
1888 */
1889 smp_mb();
1890 if (waitqueue_active(&rl->wait[READ]))
1891 wake_up(&rl->wait[READ]);
1892 if (waitqueue_active(&rl->wait[WRITE]))
1893 wake_up(&rl->wait[WRITE]);
1894 }
1895
1896 blk_remove_plug(q); 1864 blk_remove_plug(q);
1897 q->request_fn(q); 1865 q->request_fn(q);
1898 spin_unlock_irqrestore(q->queue_lock, flags); 1866 spin_unlock_irqrestore(q->queue_lock, flags);
@@ -1987,16 +1955,8 @@ static void cfq_exit_queue(elevator_t *e)
1987 struct cfq_io_context *cic = list_entry(cfqd->cic_list.next, 1955 struct cfq_io_context *cic = list_entry(cfqd->cic_list.next,
1988 struct cfq_io_context, 1956 struct cfq_io_context,
1989 queue_list); 1957 queue_list);
1990 if (cic->cfqq[ASYNC]) { 1958
1991 cfq_put_queue(cic->cfqq[ASYNC]); 1959 __cfq_exit_single_io_context(cfqd, cic);
1992 cic->cfqq[ASYNC] = NULL;
1993 }
1994 if (cic->cfqq[SYNC]) {
1995 cfq_put_queue(cic->cfqq[SYNC]);
1996 cic->cfqq[SYNC] = NULL;
1997 }
1998 cic->key = NULL;
1999 list_del_init(&cic->queue_list);
2000 } 1960 }
2001 1961
2002 spin_unlock_irq(q->queue_lock); 1962 spin_unlock_irq(q->queue_lock);
@@ -2047,7 +2007,6 @@ static void *cfq_init_queue(request_queue_t *q, elevator_t *e)
2047 2007
2048 INIT_WORK(&cfqd->unplug_work, cfq_kick_queue, q); 2008 INIT_WORK(&cfqd->unplug_work, cfq_kick_queue, q);
2049 2009
2050 cfqd->cfq_queued = cfq_queued;
2051 cfqd->cfq_quantum = cfq_quantum; 2010 cfqd->cfq_quantum = cfq_quantum;
2052 cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0]; 2011 cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
2053 cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1]; 2012 cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
@@ -2119,7 +2078,6 @@ static ssize_t __FUNC(elevator_t *e, char *page) \
2119 return cfq_var_show(__data, (page)); \ 2078 return cfq_var_show(__data, (page)); \
2120} 2079}
2121SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0); 2080SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
2122SHOW_FUNCTION(cfq_queued_show, cfqd->cfq_queued, 0);
2123SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1); 2081SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
2124SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1); 2082SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
2125SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0); 2083SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
@@ -2147,7 +2105,6 @@ static ssize_t __FUNC(elevator_t *e, const char *page, size_t count) \
2147 return ret; \ 2105 return ret; \
2148} 2106}
2149STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0); 2107STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
2150STORE_FUNCTION(cfq_queued_store, &cfqd->cfq_queued, 1, UINT_MAX, 0);
2151STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1, UINT_MAX, 1); 2108STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1, UINT_MAX, 1);
2152STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1, UINT_MAX, 1); 2109STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1, UINT_MAX, 1);
2153STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0); 2110STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
@@ -2163,7 +2120,6 @@ STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, UINT_MAX,
2163 2120
2164static struct elv_fs_entry cfq_attrs[] = { 2121static struct elv_fs_entry cfq_attrs[] = {
2165 CFQ_ATTR(quantum), 2122 CFQ_ATTR(quantum),
2166 CFQ_ATTR(queued),
2167 CFQ_ATTR(fifo_expire_sync), 2123 CFQ_ATTR(fifo_expire_sync),
2168 CFQ_ATTR(fifo_expire_async), 2124 CFQ_ATTR(fifo_expire_async),
2169 CFQ_ATTR(back_seek_max), 2125 CFQ_ATTR(back_seek_max),