diff options
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r-- | block/cfq-iosched.c | 167 |
1 files changed, 67 insertions, 100 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index e859b4966e4c..94f53a1f4677 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -9,7 +9,6 @@ | |||
9 | #include <linux/module.h> | 9 | #include <linux/module.h> |
10 | #include <linux/blkdev.h> | 10 | #include <linux/blkdev.h> |
11 | #include <linux/elevator.h> | 11 | #include <linux/elevator.h> |
12 | #include <linux/hash.h> | ||
13 | #include <linux/rbtree.h> | 12 | #include <linux/rbtree.h> |
14 | #include <linux/ioprio.h> | 13 | #include <linux/ioprio.h> |
15 | 14 | ||
@@ -38,14 +37,6 @@ static int cfq_slice_idle = HZ / 125; | |||
38 | 37 | ||
39 | #define CFQ_SLICE_SCALE (5) | 38 | #define CFQ_SLICE_SCALE (5) |
40 | 39 | ||
41 | #define CFQ_KEY_ASYNC (0) | ||
42 | |||
43 | /* | ||
44 | * for the hash of cfqq inside the cfqd | ||
45 | */ | ||
46 | #define CFQ_QHASH_SHIFT 6 | ||
47 | #define CFQ_QHASH_ENTRIES (1 << CFQ_QHASH_SHIFT) | ||
48 | |||
49 | #define RQ_CIC(rq) ((struct cfq_io_context*)(rq)->elevator_private) | 40 | #define RQ_CIC(rq) ((struct cfq_io_context*)(rq)->elevator_private) |
50 | #define RQ_CFQQ(rq) ((rq)->elevator_private2) | 41 | #define RQ_CFQQ(rq) ((rq)->elevator_private2) |
51 | 42 | ||
@@ -62,8 +53,6 @@ static struct completion *ioc_gone; | |||
62 | #define ASYNC (0) | 53 | #define ASYNC (0) |
63 | #define SYNC (1) | 54 | #define SYNC (1) |
64 | 55 | ||
65 | #define cfq_cfqq_sync(cfqq) ((cfqq)->key != CFQ_KEY_ASYNC) | ||
66 | |||
67 | #define sample_valid(samples) ((samples) > 80) | 56 | #define sample_valid(samples) ((samples) > 80) |
68 | 57 | ||
69 | /* | 58 | /* |
@@ -90,11 +79,6 @@ struct cfq_data { | |||
90 | struct cfq_rb_root service_tree; | 79 | struct cfq_rb_root service_tree; |
91 | unsigned int busy_queues; | 80 | unsigned int busy_queues; |
92 | 81 | ||
93 | /* | ||
94 | * cfqq lookup hash | ||
95 | */ | ||
96 | struct hlist_head *cfq_hash; | ||
97 | |||
98 | int rq_in_driver; | 82 | int rq_in_driver; |
99 | int sync_flight; | 83 | int sync_flight; |
100 | int hw_tag; | 84 | int hw_tag; |
@@ -138,10 +122,6 @@ struct cfq_queue { | |||
138 | atomic_t ref; | 122 | atomic_t ref; |
139 | /* parent cfq_data */ | 123 | /* parent cfq_data */ |
140 | struct cfq_data *cfqd; | 124 | struct cfq_data *cfqd; |
141 | /* cfqq lookup hash */ | ||
142 | struct hlist_node cfq_hash; | ||
143 | /* hash key */ | ||
144 | unsigned int key; | ||
145 | /* service_tree member */ | 125 | /* service_tree member */ |
146 | struct rb_node rb_node; | 126 | struct rb_node rb_node; |
147 | /* service_tree key */ | 127 | /* service_tree key */ |
@@ -186,6 +166,7 @@ enum cfqq_state_flags { | |||
186 | CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */ | 166 | CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */ |
187 | CFQ_CFQQ_FLAG_queue_new, /* queue never been serviced */ | 167 | CFQ_CFQQ_FLAG_queue_new, /* queue never been serviced */ |
188 | CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */ | 168 | CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */ |
169 | CFQ_CFQQ_FLAG_sync, /* synchronous queue */ | ||
189 | }; | 170 | }; |
190 | 171 | ||
191 | #define CFQ_CFQQ_FNS(name) \ | 172 | #define CFQ_CFQQ_FNS(name) \ |
@@ -212,11 +193,38 @@ CFQ_CFQQ_FNS(idle_window); | |||
212 | CFQ_CFQQ_FNS(prio_changed); | 193 | CFQ_CFQQ_FNS(prio_changed); |
213 | CFQ_CFQQ_FNS(queue_new); | 194 | CFQ_CFQQ_FNS(queue_new); |
214 | CFQ_CFQQ_FNS(slice_new); | 195 | CFQ_CFQQ_FNS(slice_new); |
196 | CFQ_CFQQ_FNS(sync); | ||
215 | #undef CFQ_CFQQ_FNS | 197 | #undef CFQ_CFQQ_FNS |
216 | 198 | ||
217 | static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int, unsigned short); | ||
218 | static void cfq_dispatch_insert(request_queue_t *, struct request *); | 199 | static void cfq_dispatch_insert(request_queue_t *, struct request *); |
219 | static struct cfq_queue *cfq_get_queue(struct cfq_data *, unsigned int, struct task_struct *, gfp_t); | 200 | static struct cfq_queue *cfq_get_queue(struct cfq_data *, int, |
201 | struct task_struct *, gfp_t); | ||
202 | static struct cfq_io_context *cfq_cic_rb_lookup(struct cfq_data *, | ||
203 | struct io_context *); | ||
204 | |||
205 | static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic, | ||
206 | int is_sync) | ||
207 | { | ||
208 | return cic->cfqq[!!is_sync]; | ||
209 | } | ||
210 | |||
211 | static inline void cic_set_cfqq(struct cfq_io_context *cic, | ||
212 | struct cfq_queue *cfqq, int is_sync) | ||
213 | { | ||
214 | cic->cfqq[!!is_sync] = cfqq; | ||
215 | } | ||
216 | |||
217 | /* | ||
218 | * We regard a request as SYNC, if it's either a read or has the SYNC bit | ||
219 | * set (in which case it could also be direct WRITE). | ||
220 | */ | ||
221 | static inline int cfq_bio_sync(struct bio *bio) | ||
222 | { | ||
223 | if (bio_data_dir(bio) == READ || bio_sync(bio)) | ||
224 | return 1; | ||
225 | |||
226 | return 0; | ||
227 | } | ||
220 | 228 | ||
221 | /* | 229 | /* |
222 | * scheduler run of queue, if there are requests pending and no one in the | 230 | * scheduler run of queue, if there are requests pending and no one in the |
@@ -235,17 +243,6 @@ static int cfq_queue_empty(request_queue_t *q) | |||
235 | return !cfqd->busy_queues; | 243 | return !cfqd->busy_queues; |
236 | } | 244 | } |
237 | 245 | ||
238 | static inline pid_t cfq_queue_pid(struct task_struct *task, int rw, int is_sync) | ||
239 | { | ||
240 | /* | ||
241 | * Use the per-process queue, for read requests and syncronous writes | ||
242 | */ | ||
243 | if (!(rw & REQ_RW) || is_sync) | ||
244 | return task->pid; | ||
245 | |||
246 | return CFQ_KEY_ASYNC; | ||
247 | } | ||
248 | |||
249 | /* | 246 | /* |
250 | * Scale schedule slice based on io priority. Use the sync time slice only | 247 | * Scale schedule slice based on io priority. Use the sync time slice only |
251 | * if a queue is marked sync and has sync io queued. A sync queue with async | 248 | * if a queue is marked sync and has sync io queued. A sync queue with async |
@@ -608,10 +605,14 @@ static struct request * | |||
608 | cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio) | 605 | cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio) |
609 | { | 606 | { |
610 | struct task_struct *tsk = current; | 607 | struct task_struct *tsk = current; |
611 | pid_t key = cfq_queue_pid(tsk, bio_data_dir(bio), bio_sync(bio)); | 608 | struct cfq_io_context *cic; |
612 | struct cfq_queue *cfqq; | 609 | struct cfq_queue *cfqq; |
613 | 610 | ||
614 | cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio); | 611 | cic = cfq_cic_rb_lookup(cfqd, tsk->io_context); |
612 | if (!cic) | ||
613 | return NULL; | ||
614 | |||
615 | cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio)); | ||
615 | if (cfqq) { | 616 | if (cfqq) { |
616 | sector_t sector = bio->bi_sector + bio_sectors(bio); | 617 | sector_t sector = bio->bi_sector + bio_sectors(bio); |
617 | 618 | ||
@@ -705,23 +706,24 @@ static int cfq_allow_merge(request_queue_t *q, struct request *rq, | |||
705 | struct bio *bio) | 706 | struct bio *bio) |
706 | { | 707 | { |
707 | struct cfq_data *cfqd = q->elevator->elevator_data; | 708 | struct cfq_data *cfqd = q->elevator->elevator_data; |
708 | const int rw = bio_data_dir(bio); | 709 | struct cfq_io_context *cic; |
709 | struct cfq_queue *cfqq; | 710 | struct cfq_queue *cfqq; |
710 | pid_t key; | ||
711 | 711 | ||
712 | /* | 712 | /* |
713 | * Disallow merge of a sync bio into an async request. | 713 | * Disallow merge of a sync bio into an async request. |
714 | */ | 714 | */ |
715 | if ((bio_data_dir(bio) == READ || bio_sync(bio)) && !rq_is_sync(rq)) | 715 | if (cfq_bio_sync(bio) && !rq_is_sync(rq)) |
716 | return 0; | 716 | return 0; |
717 | 717 | ||
718 | /* | 718 | /* |
719 | * Lookup the cfqq that this bio will be queued with. Allow | 719 | * Lookup the cfqq that this bio will be queued with. Allow |
720 | * merge only if rq is queued there. | 720 | * merge only if rq is queued there. |
721 | */ | 721 | */ |
722 | key = cfq_queue_pid(current, rw, bio_sync(bio)); | 722 | cic = cfq_cic_rb_lookup(cfqd, current->io_context); |
723 | cfqq = cfq_find_cfq_hash(cfqd, key, current->ioprio); | 723 | if (!cic) |
724 | return 0; | ||
724 | 725 | ||
726 | cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio)); | ||
725 | if (cfqq == RQ_CFQQ(rq)) | 727 | if (cfqq == RQ_CFQQ(rq)) |
726 | return 1; | 728 | return 1; |
727 | 729 | ||
@@ -1154,37 +1156,9 @@ static void cfq_put_queue(struct cfq_queue *cfqq) | |||
1154 | cfq_schedule_dispatch(cfqd); | 1156 | cfq_schedule_dispatch(cfqd); |
1155 | } | 1157 | } |
1156 | 1158 | ||
1157 | /* | ||
1158 | * it's on the empty list and still hashed | ||
1159 | */ | ||
1160 | hlist_del(&cfqq->cfq_hash); | ||
1161 | kmem_cache_free(cfq_pool, cfqq); | 1159 | kmem_cache_free(cfq_pool, cfqq); |
1162 | } | 1160 | } |
1163 | 1161 | ||
1164 | static struct cfq_queue * | ||
1165 | __cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned int key, unsigned int prio, | ||
1166 | const int hashval) | ||
1167 | { | ||
1168 | struct hlist_head *hash_list = &cfqd->cfq_hash[hashval]; | ||
1169 | struct hlist_node *entry; | ||
1170 | struct cfq_queue *__cfqq; | ||
1171 | |||
1172 | hlist_for_each_entry(__cfqq, entry, hash_list, cfq_hash) { | ||
1173 | const unsigned short __p = IOPRIO_PRIO_VALUE(__cfqq->org_ioprio_class, __cfqq->org_ioprio); | ||
1174 | |||
1175 | if (__cfqq->key == key && (__p == prio || !prio)) | ||
1176 | return __cfqq; | ||
1177 | } | ||
1178 | |||
1179 | return NULL; | ||
1180 | } | ||
1181 | |||
1182 | static struct cfq_queue * | ||
1183 | cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned int key, unsigned short prio) | ||
1184 | { | ||
1185 | return __cfq_find_cfq_hash(cfqd, key, prio, hash_long(key, CFQ_QHASH_SHIFT)); | ||
1186 | } | ||
1187 | |||
1188 | static void cfq_free_io_context(struct io_context *ioc) | 1162 | static void cfq_free_io_context(struct io_context *ioc) |
1189 | { | 1163 | { |
1190 | struct cfq_io_context *__cic; | 1164 | struct cfq_io_context *__cic; |
@@ -1342,7 +1316,7 @@ static inline void changed_ioprio(struct cfq_io_context *cic) | |||
1342 | cfqq = cic->cfqq[ASYNC]; | 1316 | cfqq = cic->cfqq[ASYNC]; |
1343 | if (cfqq) { | 1317 | if (cfqq) { |
1344 | struct cfq_queue *new_cfqq; | 1318 | struct cfq_queue *new_cfqq; |
1345 | new_cfqq = cfq_get_queue(cfqd, CFQ_KEY_ASYNC, cic->ioc->task, | 1319 | new_cfqq = cfq_get_queue(cfqd, ASYNC, cic->ioc->task, |
1346 | GFP_ATOMIC); | 1320 | GFP_ATOMIC); |
1347 | if (new_cfqq) { | 1321 | if (new_cfqq) { |
1348 | cic->cfqq[ASYNC] = new_cfqq; | 1322 | cic->cfqq[ASYNC] = new_cfqq; |
@@ -1374,16 +1348,16 @@ static void cfq_ioc_set_ioprio(struct io_context *ioc) | |||
1374 | } | 1348 | } |
1375 | 1349 | ||
1376 | static struct cfq_queue * | 1350 | static struct cfq_queue * |
1377 | cfq_get_queue(struct cfq_data *cfqd, unsigned int key, struct task_struct *tsk, | 1351 | cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct task_struct *tsk, |
1378 | gfp_t gfp_mask) | 1352 | gfp_t gfp_mask) |
1379 | { | 1353 | { |
1380 | const int hashval = hash_long(key, CFQ_QHASH_SHIFT); | ||
1381 | struct cfq_queue *cfqq, *new_cfqq = NULL; | 1354 | struct cfq_queue *cfqq, *new_cfqq = NULL; |
1382 | unsigned short ioprio; | 1355 | struct cfq_io_context *cic; |
1383 | 1356 | ||
1384 | retry: | 1357 | retry: |
1385 | ioprio = tsk->ioprio; | 1358 | cic = cfq_cic_rb_lookup(cfqd, tsk->io_context); |
1386 | cfqq = __cfq_find_cfq_hash(cfqd, key, ioprio, hashval); | 1359 | /* cic always exists here */ |
1360 | cfqq = cic_to_cfqq(cic, is_sync); | ||
1387 | 1361 | ||
1388 | if (!cfqq) { | 1362 | if (!cfqq) { |
1389 | if (new_cfqq) { | 1363 | if (new_cfqq) { |
@@ -1408,20 +1382,20 @@ retry: | |||
1408 | 1382 | ||
1409 | memset(cfqq, 0, sizeof(*cfqq)); | 1383 | memset(cfqq, 0, sizeof(*cfqq)); |
1410 | 1384 | ||
1411 | INIT_HLIST_NODE(&cfqq->cfq_hash); | ||
1412 | RB_CLEAR_NODE(&cfqq->rb_node); | 1385 | RB_CLEAR_NODE(&cfqq->rb_node); |
1413 | INIT_LIST_HEAD(&cfqq->fifo); | 1386 | INIT_LIST_HEAD(&cfqq->fifo); |
1414 | 1387 | ||
1415 | cfqq->key = key; | ||
1416 | hlist_add_head(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]); | ||
1417 | atomic_set(&cfqq->ref, 0); | 1388 | atomic_set(&cfqq->ref, 0); |
1418 | cfqq->cfqd = cfqd; | 1389 | cfqq->cfqd = cfqd; |
1419 | 1390 | ||
1420 | if (key != CFQ_KEY_ASYNC) | 1391 | if (is_sync) { |
1421 | cfq_mark_cfqq_idle_window(cfqq); | 1392 | cfq_mark_cfqq_idle_window(cfqq); |
1393 | cfq_mark_cfqq_sync(cfqq); | ||
1394 | } | ||
1422 | 1395 | ||
1423 | cfq_mark_cfqq_prio_changed(cfqq); | 1396 | cfq_mark_cfqq_prio_changed(cfqq); |
1424 | cfq_mark_cfqq_queue_new(cfqq); | 1397 | cfq_mark_cfqq_queue_new(cfqq); |
1398 | |||
1425 | cfq_init_prio_data(cfqq); | 1399 | cfq_init_prio_data(cfqq); |
1426 | } | 1400 | } |
1427 | 1401 | ||
@@ -1453,6 +1427,9 @@ cfq_cic_rb_lookup(struct cfq_data *cfqd, struct io_context *ioc) | |||
1453 | struct cfq_io_context *cic; | 1427 | struct cfq_io_context *cic; |
1454 | void *k, *key = cfqd; | 1428 | void *k, *key = cfqd; |
1455 | 1429 | ||
1430 | if (unlikely(!ioc)) | ||
1431 | return NULL; | ||
1432 | |||
1456 | restart: | 1433 | restart: |
1457 | n = ioc->cic_root.rb_node; | 1434 | n = ioc->cic_root.rb_node; |
1458 | while (n) { | 1435 | while (n) { |
@@ -1839,10 +1816,8 @@ static int cfq_may_queue(request_queue_t *q, int rw) | |||
1839 | { | 1816 | { |
1840 | struct cfq_data *cfqd = q->elevator->elevator_data; | 1817 | struct cfq_data *cfqd = q->elevator->elevator_data; |
1841 | struct task_struct *tsk = current; | 1818 | struct task_struct *tsk = current; |
1819 | struct cfq_io_context *cic; | ||
1842 | struct cfq_queue *cfqq; | 1820 | struct cfq_queue *cfqq; |
1843 | unsigned int key; | ||
1844 | |||
1845 | key = cfq_queue_pid(tsk, rw, rw & REQ_RW_SYNC); | ||
1846 | 1821 | ||
1847 | /* | 1822 | /* |
1848 | * don't force setup of a queue from here, as a call to may_queue | 1823 | * don't force setup of a queue from here, as a call to may_queue |
@@ -1850,7 +1825,11 @@ static int cfq_may_queue(request_queue_t *q, int rw) | |||
1850 | * so just lookup a possibly existing queue, or return 'may queue' | 1825 | * so just lookup a possibly existing queue, or return 'may queue' |
1851 | * if that fails | 1826 | * if that fails |
1852 | */ | 1827 | */ |
1853 | cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio); | 1828 | cic = cfq_cic_rb_lookup(cfqd, tsk->io_context); |
1829 | if (!cic) | ||
1830 | return ELV_MQUEUE_MAY; | ||
1831 | |||
1832 | cfqq = cic_to_cfqq(cic, rw & REQ_RW_SYNC); | ||
1854 | if (cfqq) { | 1833 | if (cfqq) { |
1855 | cfq_init_prio_data(cfqq); | 1834 | cfq_init_prio_data(cfqq); |
1856 | cfq_prio_boost(cfqq); | 1835 | cfq_prio_boost(cfqq); |
@@ -1894,7 +1873,6 @@ cfq_set_request(request_queue_t *q, struct request *rq, gfp_t gfp_mask) | |||
1894 | struct cfq_io_context *cic; | 1873 | struct cfq_io_context *cic; |
1895 | const int rw = rq_data_dir(rq); | 1874 | const int rw = rq_data_dir(rq); |
1896 | const int is_sync = rq_is_sync(rq); | 1875 | const int is_sync = rq_is_sync(rq); |
1897 | pid_t key = cfq_queue_pid(tsk, rw, is_sync); | ||
1898 | struct cfq_queue *cfqq; | 1876 | struct cfq_queue *cfqq; |
1899 | unsigned long flags; | 1877 | unsigned long flags; |
1900 | 1878 | ||
@@ -1907,14 +1885,15 @@ cfq_set_request(request_queue_t *q, struct request *rq, gfp_t gfp_mask) | |||
1907 | if (!cic) | 1885 | if (!cic) |
1908 | goto queue_fail; | 1886 | goto queue_fail; |
1909 | 1887 | ||
1910 | if (!cic->cfqq[is_sync]) { | 1888 | cfqq = cic_to_cfqq(cic, is_sync); |
1911 | cfqq = cfq_get_queue(cfqd, key, tsk, gfp_mask); | 1889 | if (!cfqq) { |
1890 | cfqq = cfq_get_queue(cfqd, is_sync, tsk, gfp_mask); | ||
1891 | |||
1912 | if (!cfqq) | 1892 | if (!cfqq) |
1913 | goto queue_fail; | 1893 | goto queue_fail; |
1914 | 1894 | ||
1915 | cic->cfqq[is_sync] = cfqq; | 1895 | cic_set_cfqq(cic, cfqq, is_sync); |
1916 | } else | 1896 | } |
1917 | cfqq = cic->cfqq[is_sync]; | ||
1918 | 1897 | ||
1919 | cfqq->allocated[rw]++; | 1898 | cfqq->allocated[rw]++; |
1920 | cfq_clear_cfqq_must_alloc(cfqq); | 1899 | cfq_clear_cfqq_must_alloc(cfqq); |
@@ -2044,14 +2023,12 @@ static void cfq_exit_queue(elevator_t *e) | |||
2044 | 2023 | ||
2045 | cfq_shutdown_timer_wq(cfqd); | 2024 | cfq_shutdown_timer_wq(cfqd); |
2046 | 2025 | ||
2047 | kfree(cfqd->cfq_hash); | ||
2048 | kfree(cfqd); | 2026 | kfree(cfqd); |
2049 | } | 2027 | } |
2050 | 2028 | ||
2051 | static void *cfq_init_queue(request_queue_t *q) | 2029 | static void *cfq_init_queue(request_queue_t *q) |
2052 | { | 2030 | { |
2053 | struct cfq_data *cfqd; | 2031 | struct cfq_data *cfqd; |
2054 | int i; | ||
2055 | 2032 | ||
2056 | cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL, q->node); | 2033 | cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL, q->node); |
2057 | if (!cfqd) | 2034 | if (!cfqd) |
@@ -2062,13 +2039,6 @@ static void *cfq_init_queue(request_queue_t *q) | |||
2062 | cfqd->service_tree = CFQ_RB_ROOT; | 2039 | cfqd->service_tree = CFQ_RB_ROOT; |
2063 | INIT_LIST_HEAD(&cfqd->cic_list); | 2040 | INIT_LIST_HEAD(&cfqd->cic_list); |
2064 | 2041 | ||
2065 | cfqd->cfq_hash = kmalloc_node(sizeof(struct hlist_head) * CFQ_QHASH_ENTRIES, GFP_KERNEL, q->node); | ||
2066 | if (!cfqd->cfq_hash) | ||
2067 | goto out_free; | ||
2068 | |||
2069 | for (i = 0; i < CFQ_QHASH_ENTRIES; i++) | ||
2070 | INIT_HLIST_HEAD(&cfqd->cfq_hash[i]); | ||
2071 | |||
2072 | cfqd->queue = q; | 2042 | cfqd->queue = q; |
2073 | 2043 | ||
2074 | init_timer(&cfqd->idle_slice_timer); | 2044 | init_timer(&cfqd->idle_slice_timer); |
@@ -2092,9 +2062,6 @@ static void *cfq_init_queue(request_queue_t *q) | |||
2092 | cfqd->cfq_slice_idle = cfq_slice_idle; | 2062 | cfqd->cfq_slice_idle = cfq_slice_idle; |
2093 | 2063 | ||
2094 | return cfqd; | 2064 | return cfqd; |
2095 | out_free: | ||
2096 | kfree(cfqd); | ||
2097 | return NULL; | ||
2098 | } | 2065 | } |
2099 | 2066 | ||
2100 | static void cfq_slab_kill(void) | 2067 | static void cfq_slab_kill(void) |