aboutsummaryrefslogtreecommitdiffstats
path: root/block/cfq-iosched.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@suse.de>2006-07-13 06:39:25 -0400
committerJens Axboe <axboe@nelson.home.kernel.dk>2006-09-30 14:27:02 -0400
commit5e705374796e72b36e7bb9c59c8d46d2dc5db36a (patch)
tree94b365b5b56d08dfa1eb127285f7da9019bbf670 /block/cfq-iosched.c
parentff7d145fd911266ae42af7552edc32681c01addb (diff)
[PATCH] cfq-iosched: kill crq
Get rid of the cfq_rq request type. With the added elevator_private2, we have enough room in struct request to get rid of any crq allocation/free for each request. Signed-off-by: Jens Axboe <axboe@suse.de>
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r--block/cfq-iosched.c239
1 files changed, 95 insertions, 144 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 86f0f4796fd..3c5fd9c2c20 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -43,9 +43,9 @@ static DEFINE_SPINLOCK(cfq_exit_lock);
43 43
44#define list_entry_cfqq(ptr) list_entry((ptr), struct cfq_queue, cfq_list) 44#define list_entry_cfqq(ptr) list_entry((ptr), struct cfq_queue, cfq_list)
45 45
46#define RQ_DATA(rq) (rq)->elevator_private 46#define RQ_CIC(rq) ((struct cfq_io_context*)(rq)->elevator_private)
47#define RQ_CFQQ(rq) ((rq)->elevator_private2)
47 48
48static kmem_cache_t *crq_pool;
49static kmem_cache_t *cfq_pool; 49static kmem_cache_t *cfq_pool;
50static kmem_cache_t *cfq_ioc_pool; 50static kmem_cache_t *cfq_ioc_pool;
51 51
@@ -95,8 +95,6 @@ struct cfq_data {
95 */ 95 */
96 struct hlist_head *cfq_hash; 96 struct hlist_head *cfq_hash;
97 97
98 mempool_t *crq_pool;
99
100 int rq_in_driver; 98 int rq_in_driver;
101 int hw_tag; 99 int hw_tag;
102 100
@@ -153,7 +151,7 @@ struct cfq_queue {
153 /* sorted list of pending requests */ 151 /* sorted list of pending requests */
154 struct rb_root sort_list; 152 struct rb_root sort_list;
155 /* if fifo isn't expired, next request to serve */ 153 /* if fifo isn't expired, next request to serve */
156 struct cfq_rq *next_crq; 154 struct request *next_rq;
157 /* requests queued in sort_list */ 155 /* requests queued in sort_list */
158 int queued[2]; 156 int queued[2];
159 /* currently allocated requests */ 157 /* currently allocated requests */
@@ -177,13 +175,6 @@ struct cfq_queue {
177 unsigned int flags; 175 unsigned int flags;
178}; 176};
179 177
180struct cfq_rq {
181 struct request *request;
182
183 struct cfq_queue *cfq_queue;
184 struct cfq_io_context *io_context;
185};
186
187enum cfqq_state_flags { 178enum cfqq_state_flags {
188 CFQ_CFQQ_FLAG_on_rr = 0, 179 CFQ_CFQQ_FLAG_on_rr = 0,
189 CFQ_CFQQ_FLAG_wait_request, 180 CFQ_CFQQ_FLAG_wait_request,
@@ -220,7 +211,7 @@ CFQ_CFQQ_FNS(prio_changed);
220#undef CFQ_CFQQ_FNS 211#undef CFQ_CFQQ_FNS
221 212
222static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int, unsigned short); 213static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int, unsigned short);
223static void cfq_dispatch_insert(request_queue_t *, struct cfq_rq *); 214static void cfq_dispatch_insert(request_queue_t *, struct request *);
224static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, unsigned int key, struct task_struct *tsk, gfp_t gfp_mask); 215static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, unsigned int key, struct task_struct *tsk, gfp_t gfp_mask);
225 216
226/* 217/*
@@ -249,12 +240,12 @@ static inline pid_t cfq_queue_pid(struct task_struct *task, int rw)
249} 240}
250 241
251/* 242/*
252 * Lifted from AS - choose which of crq1 and crq2 that is best served now. 243 * Lifted from AS - choose which of rq1 and rq2 that is best served now.
253 * We choose the request that is closest to the head right now. Distance 244 * We choose the request that is closest to the head right now. Distance
254 * behind the head is penalized and only allowed to a certain extent. 245 * behind the head is penalized and only allowed to a certain extent.
255 */ 246 */
256static struct cfq_rq * 247static struct request *
257cfq_choose_req(struct cfq_data *cfqd, struct cfq_rq *crq1, struct cfq_rq *crq2) 248cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2)
258{ 249{
259 sector_t last, s1, s2, d1 = 0, d2 = 0; 250 sector_t last, s1, s2, d1 = 0, d2 = 0;
260 unsigned long back_max; 251 unsigned long back_max;
@@ -262,18 +253,18 @@ cfq_choose_req(struct cfq_data *cfqd, struct cfq_rq *crq1, struct cfq_rq *crq2)
262#define CFQ_RQ2_WRAP 0x02 /* request 2 wraps */ 253#define CFQ_RQ2_WRAP 0x02 /* request 2 wraps */
263 unsigned wrap = 0; /* bit mask: requests behind the disk head? */ 254 unsigned wrap = 0; /* bit mask: requests behind the disk head? */
264 255
265 if (crq1 == NULL || crq1 == crq2) 256 if (rq1 == NULL || rq1 == rq2)
266 return crq2; 257 return rq2;
267 if (crq2 == NULL) 258 if (rq2 == NULL)
268 return crq1; 259 return rq1;
269 260
270 if (rq_is_sync(crq1->request) && !rq_is_sync(crq2->request)) 261 if (rq_is_sync(rq1) && !rq_is_sync(rq2))
271 return crq1; 262 return rq1;
272 else if (rq_is_sync(crq2->request) && !rq_is_sync(crq1->request)) 263 else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
273 return crq2; 264 return rq2;
274 265
275 s1 = crq1->request->sector; 266 s1 = rq1->sector;
276 s2 = crq2->request->sector; 267 s2 = rq2->sector;
277 268
278 last = cfqd->last_sector; 269 last = cfqd->last_sector;
279 270
@@ -308,23 +299,23 @@ cfq_choose_req(struct cfq_data *cfqd, struct cfq_rq *crq1, struct cfq_rq *crq2)
308 * check two variables for all permutations: --> faster! 299 * check two variables for all permutations: --> faster!
309 */ 300 */
310 switch (wrap) { 301 switch (wrap) {
311 case 0: /* common case for CFQ: crq1 and crq2 not wrapped */ 302 case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
312 if (d1 < d2) 303 if (d1 < d2)
313 return crq1; 304 return rq1;
314 else if (d2 < d1) 305 else if (d2 < d1)
315 return crq2; 306 return rq2;
316 else { 307 else {
317 if (s1 >= s2) 308 if (s1 >= s2)
318 return crq1; 309 return rq1;
319 else 310 else
320 return crq2; 311 return rq2;
321 } 312 }
322 313
323 case CFQ_RQ2_WRAP: 314 case CFQ_RQ2_WRAP:
324 return crq1; 315 return rq1;
325 case CFQ_RQ1_WRAP: 316 case CFQ_RQ1_WRAP:
326 return crq2; 317 return rq2;
327 case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both crqs wrapped */ 318 case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
328 default: 319 default:
329 /* 320 /*
330 * Since both rqs are wrapped, 321 * Since both rqs are wrapped,
@@ -333,35 +324,34 @@ cfq_choose_req(struct cfq_data *cfqd, struct cfq_rq *crq1, struct cfq_rq *crq2)
333 * since back seek takes more time than forward. 324 * since back seek takes more time than forward.
334 */ 325 */
335 if (s1 <= s2) 326 if (s1 <= s2)
336 return crq1; 327 return rq1;
337 else 328 else
338 return crq2; 329 return rq2;
339 } 330 }
340} 331}
341 332
342/* 333/*
343 * would be nice to take fifo expire time into account as well 334 * would be nice to take fifo expire time into account as well
344 */ 335 */
345static struct cfq_rq * 336static struct request *
346cfq_find_next_crq(struct cfq_data *cfqd, struct cfq_queue *cfqq, 337cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
347 struct cfq_rq *last_crq) 338 struct request *last)
348{ 339{
349 struct request *last = last_crq->request;
350 struct rb_node *rbnext = rb_next(&last->rb_node); 340 struct rb_node *rbnext = rb_next(&last->rb_node);
351 struct rb_node *rbprev = rb_prev(&last->rb_node); 341 struct rb_node *rbprev = rb_prev(&last->rb_node);
352 struct cfq_rq *next = NULL, *prev = NULL; 342 struct request *next = NULL, *prev = NULL;
353 343
354 BUG_ON(RB_EMPTY_NODE(&last->rb_node)); 344 BUG_ON(RB_EMPTY_NODE(&last->rb_node));
355 345
356 if (rbprev) 346 if (rbprev)
357 prev = RQ_DATA(rb_entry_rq(rbprev)); 347 prev = rb_entry_rq(rbprev);
358 348
359 if (rbnext) 349 if (rbnext)
360 next = RQ_DATA(rb_entry_rq(rbnext)); 350 next = rb_entry_rq(rbnext);
361 else { 351 else {
362 rbnext = rb_first(&cfqq->sort_list); 352 rbnext = rb_first(&cfqq->sort_list);
363 if (rbnext && rbnext != &last->rb_node) 353 if (rbnext && rbnext != &last->rb_node)
364 next = RQ_DATA(rb_entry_rq(rbnext)); 354 next = rb_entry_rq(rbnext);
365 } 355 }
366 356
367 return cfq_choose_req(cfqd, next, prev); 357 return cfq_choose_req(cfqd, next, prev);
@@ -450,26 +440,25 @@ cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
450/* 440/*
451 * rb tree support functions 441 * rb tree support functions
452 */ 442 */
453static inline void cfq_del_crq_rb(struct cfq_rq *crq) 443static inline void cfq_del_rq_rb(struct request *rq)
454{ 444{
455 struct cfq_queue *cfqq = crq->cfq_queue; 445 struct cfq_queue *cfqq = RQ_CFQQ(rq);
456 struct cfq_data *cfqd = cfqq->cfqd; 446 struct cfq_data *cfqd = cfqq->cfqd;
457 const int sync = rq_is_sync(crq->request); 447 const int sync = rq_is_sync(rq);
458 448
459 BUG_ON(!cfqq->queued[sync]); 449 BUG_ON(!cfqq->queued[sync]);
460 cfqq->queued[sync]--; 450 cfqq->queued[sync]--;
461 451
462 elv_rb_del(&cfqq->sort_list, crq->request); 452 elv_rb_del(&cfqq->sort_list, rq);
463 453
464 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) 454 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
465 cfq_del_cfqq_rr(cfqd, cfqq); 455 cfq_del_cfqq_rr(cfqd, cfqq);
466} 456}
467 457
468static void cfq_add_crq_rb(struct cfq_rq *crq) 458static void cfq_add_rq_rb(struct request *rq)
469{ 459{
470 struct cfq_queue *cfqq = crq->cfq_queue; 460 struct cfq_queue *cfqq = RQ_CFQQ(rq);
471 struct cfq_data *cfqd = cfqq->cfqd; 461 struct cfq_data *cfqd = cfqq->cfqd;
472 struct request *rq = crq->request;
473 struct request *__alias; 462 struct request *__alias;
474 463
475 cfqq->queued[rq_is_sync(rq)]++; 464 cfqq->queued[rq_is_sync(rq)]++;
@@ -479,17 +468,15 @@ static void cfq_add_crq_rb(struct cfq_rq *crq)
479 * if that happens, put the alias on the dispatch list 468 * if that happens, put the alias on the dispatch list
480 */ 469 */
481 while ((__alias = elv_rb_add(&cfqq->sort_list, rq)) != NULL) 470 while ((__alias = elv_rb_add(&cfqq->sort_list, rq)) != NULL)
482 cfq_dispatch_insert(cfqd->queue, RQ_DATA(__alias)); 471 cfq_dispatch_insert(cfqd->queue, __alias);
483} 472}
484 473
485static inline void 474static inline void
486cfq_reposition_crq_rb(struct cfq_queue *cfqq, struct cfq_rq *crq) 475cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
487{ 476{
488 struct request *rq = crq->request;
489
490 elv_rb_del(&cfqq->sort_list, rq); 477 elv_rb_del(&cfqq->sort_list, rq);
491 cfqq->queued[rq_is_sync(rq)]--; 478 cfqq->queued[rq_is_sync(rq)]--;
492 cfq_add_crq_rb(crq); 479 cfq_add_rq_rb(rq);
493} 480}
494 481
495static struct request * 482static struct request *
@@ -533,14 +520,13 @@ static void cfq_deactivate_request(request_queue_t *q, struct request *rq)
533 520
534static void cfq_remove_request(struct request *rq) 521static void cfq_remove_request(struct request *rq)
535{ 522{
536 struct cfq_rq *crq = RQ_DATA(rq); 523 struct cfq_queue *cfqq = RQ_CFQQ(rq);
537 struct cfq_queue *cfqq = crq->cfq_queue;
538 524
539 if (cfqq->next_crq == crq) 525 if (cfqq->next_rq == rq)
540 cfqq->next_crq = cfq_find_next_crq(cfqq->cfqd, cfqq, crq); 526 cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
541 527
542 list_del_init(&rq->queuelist); 528 list_del_init(&rq->queuelist);
543 cfq_del_crq_rb(crq); 529 cfq_del_rq_rb(rq);
544} 530}
545 531
546static int 532static int
@@ -561,12 +547,10 @@ cfq_merge(request_queue_t *q, struct request **req, struct bio *bio)
561static void cfq_merged_request(request_queue_t *q, struct request *req, 547static void cfq_merged_request(request_queue_t *q, struct request *req,
562 int type) 548 int type)
563{ 549{
564 struct cfq_rq *crq = RQ_DATA(req);
565
566 if (type == ELEVATOR_FRONT_MERGE) { 550 if (type == ELEVATOR_FRONT_MERGE) {
567 struct cfq_queue *cfqq = crq->cfq_queue; 551 struct cfq_queue *cfqq = RQ_CFQQ(req);
568 552
569 cfq_reposition_crq_rb(cfqq, crq); 553 cfq_reposition_rq_rb(cfqq, req);
570 } 554 }
571} 555}
572 556
@@ -789,11 +773,10 @@ static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
789 return 1; 773 return 1;
790} 774}
791 775
792static void cfq_dispatch_insert(request_queue_t *q, struct cfq_rq *crq) 776static void cfq_dispatch_insert(request_queue_t *q, struct request *rq)
793{ 777{
794 struct cfq_data *cfqd = q->elevator->elevator_data; 778 struct cfq_data *cfqd = q->elevator->elevator_data;
795 struct cfq_queue *cfqq = crq->cfq_queue; 779 struct cfq_queue *cfqq = RQ_CFQQ(rq);
796 struct request *rq = crq->request;
797 780
798 cfq_remove_request(rq); 781 cfq_remove_request(rq);
799 cfqq->on_dispatch[rq_is_sync(rq)]++; 782 cfqq->on_dispatch[rq_is_sync(rq)]++;
@@ -806,11 +789,10 @@ static void cfq_dispatch_insert(request_queue_t *q, struct cfq_rq *crq)
806/* 789/*
807 * return expired entry, or NULL to just start from scratch in rbtree 790 * return expired entry, or NULL to just start from scratch in rbtree
808 */ 791 */
809static inline struct cfq_rq *cfq_check_fifo(struct cfq_queue *cfqq) 792static inline struct request *cfq_check_fifo(struct cfq_queue *cfqq)
810{ 793{
811 struct cfq_data *cfqd = cfqq->cfqd; 794 struct cfq_data *cfqd = cfqq->cfqd;
812 struct request *rq; 795 struct request *rq;
813 struct cfq_rq *crq;
814 796
815 if (cfq_cfqq_fifo_expire(cfqq)) 797 if (cfq_cfqq_fifo_expire(cfqq))
816 return NULL; 798 return NULL;
@@ -818,11 +800,10 @@ static inline struct cfq_rq *cfq_check_fifo(struct cfq_queue *cfqq)
818 if (!list_empty(&cfqq->fifo)) { 800 if (!list_empty(&cfqq->fifo)) {
819 int fifo = cfq_cfqq_class_sync(cfqq); 801 int fifo = cfq_cfqq_class_sync(cfqq);
820 802
821 crq = RQ_DATA(rq_entry_fifo(cfqq->fifo.next)); 803 rq = rq_entry_fifo(cfqq->fifo.next);
822 rq = crq->request;
823 if (time_after(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo])) { 804 if (time_after(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo])) {
824 cfq_mark_cfqq_fifo_expire(cfqq); 805 cfq_mark_cfqq_fifo_expire(cfqq);
825 return crq; 806 return rq;
826 } 807 }
827 } 808 }
828 809
@@ -909,25 +890,25 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
909 BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list)); 890 BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
910 891
911 do { 892 do {
912 struct cfq_rq *crq; 893 struct request *rq;
913 894
914 /* 895 /*
915 * follow expired path, else get first next available 896 * follow expired path, else get first next available
916 */ 897 */
917 if ((crq = cfq_check_fifo(cfqq)) == NULL) 898 if ((rq = cfq_check_fifo(cfqq)) == NULL)
918 crq = cfqq->next_crq; 899 rq = cfqq->next_rq;
919 900
920 /* 901 /*
921 * finally, insert request into driver dispatch list 902 * finally, insert request into driver dispatch list
922 */ 903 */
923 cfq_dispatch_insert(cfqd->queue, crq); 904 cfq_dispatch_insert(cfqd->queue, rq);
924 905
925 cfqd->dispatch_slice++; 906 cfqd->dispatch_slice++;
926 dispatched++; 907 dispatched++;
927 908
928 if (!cfqd->active_cic) { 909 if (!cfqd->active_cic) {
929 atomic_inc(&crq->io_context->ioc->refcount); 910 atomic_inc(&RQ_CIC(rq)->ioc->refcount);
930 cfqd->active_cic = crq->io_context; 911 cfqd->active_cic = RQ_CIC(rq);
931 } 912 }
932 913
933 if (RB_EMPTY_ROOT(&cfqq->sort_list)) 914 if (RB_EMPTY_ROOT(&cfqq->sort_list))
@@ -958,13 +939,12 @@ static int
958cfq_forced_dispatch_cfqqs(struct list_head *list) 939cfq_forced_dispatch_cfqqs(struct list_head *list)
959{ 940{
960 struct cfq_queue *cfqq, *next; 941 struct cfq_queue *cfqq, *next;
961 struct cfq_rq *crq;
962 int dispatched; 942 int dispatched;
963 943
964 dispatched = 0; 944 dispatched = 0;
965 list_for_each_entry_safe(cfqq, next, list, cfq_list) { 945 list_for_each_entry_safe(cfqq, next, list, cfq_list) {
966 while ((crq = cfqq->next_crq)) { 946 while (cfqq->next_rq) {
967 cfq_dispatch_insert(cfqq->cfqd->queue, crq); 947 cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
968 dispatched++; 948 dispatched++;
969 } 949 }
970 BUG_ON(!list_empty(&cfqq->fifo)); 950 BUG_ON(!list_empty(&cfqq->fifo));
@@ -1040,8 +1020,8 @@ cfq_dispatch_requests(request_queue_t *q, int force)
1040} 1020}
1041 1021
1042/* 1022/*
1043 * task holds one reference to the queue, dropped when task exits. each crq 1023 * task holds one reference to the queue, dropped when task exits. each rq
1044 * in-flight on this queue also holds a reference, dropped when crq is freed. 1024 * in-flight on this queue also holds a reference, dropped when rq is freed.
1045 * 1025 *
1046 * queue lock must be held here. 1026 * queue lock must be held here.
1047 */ 1027 */
@@ -1486,15 +1466,15 @@ cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic)
1486 1466
1487static void 1467static void
1488cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_io_context *cic, 1468cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_io_context *cic,
1489 struct cfq_rq *crq) 1469 struct request *rq)
1490{ 1470{
1491 sector_t sdist; 1471 sector_t sdist;
1492 u64 total; 1472 u64 total;
1493 1473
1494 if (cic->last_request_pos < crq->request->sector) 1474 if (cic->last_request_pos < rq->sector)
1495 sdist = crq->request->sector - cic->last_request_pos; 1475 sdist = rq->sector - cic->last_request_pos;
1496 else 1476 else
1497 sdist = cic->last_request_pos - crq->request->sector; 1477 sdist = cic->last_request_pos - rq->sector;
1498 1478
1499 /* 1479 /*
1500 * Don't allow the seek distance to get too large from the 1480 * Don't allow the seek distance to get too large from the
@@ -1545,7 +1525,7 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1545 */ 1525 */
1546static int 1526static int
1547cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, 1527cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
1548 struct cfq_rq *crq) 1528 struct request *rq)
1549{ 1529{
1550 struct cfq_queue *cfqq = cfqd->active_queue; 1530 struct cfq_queue *cfqq = cfqd->active_queue;
1551 1531
@@ -1564,7 +1544,7 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
1564 */ 1544 */
1565 if (new_cfqq->slice_left < cfqd->cfq_slice_idle) 1545 if (new_cfqq->slice_left < cfqd->cfq_slice_idle)
1566 return 0; 1546 return 0;
1567 if (rq_is_sync(crq->request) && !cfq_cfqq_sync(cfqq)) 1547 if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
1568 return 1; 1548 return 1;
1569 1549
1570 return 0; 1550 return 0;
@@ -1603,26 +1583,26 @@ static void cfq_start_queueing(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1603} 1583}
1604 1584
1605/* 1585/*
1606 * Called when a new fs request (crq) is added (to cfqq). Check if there's 1586 * Called when a new fs request (rq) is added (to cfqq). Check if there's
1607 * something we should do about it 1587 * something we should do about it
1608 */ 1588 */
1609static void 1589static void
1610cfq_crq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, 1590cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1611 struct cfq_rq *crq) 1591 struct request *rq)
1612{ 1592{
1613 struct cfq_io_context *cic = crq->io_context; 1593 struct cfq_io_context *cic = RQ_CIC(rq);
1614 1594
1615 /* 1595 /*
1616 * check if this request is a better next-serve candidate)) { 1596 * check if this request is a better next-serve candidate)) {
1617 */ 1597 */
1618 cfqq->next_crq = cfq_choose_req(cfqd, cfqq->next_crq, crq); 1598 cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq);
1619 BUG_ON(!cfqq->next_crq); 1599 BUG_ON(!cfqq->next_rq);
1620 1600
1621 /* 1601 /*
1622 * we never wait for an async request and we don't allow preemption 1602 * we never wait for an async request and we don't allow preemption
1623 * of an async request. so just return early 1603 * of an async request. so just return early
1624 */ 1604 */
1625 if (!rq_is_sync(crq->request)) { 1605 if (!rq_is_sync(rq)) {
1626 /* 1606 /*
1627 * sync process issued an async request, if it's waiting 1607 * sync process issued an async request, if it's waiting
1628 * then expire it and kick rq handling. 1608 * then expire it and kick rq handling.
@@ -1636,11 +1616,11 @@ cfq_crq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1636 } 1616 }
1637 1617
1638 cfq_update_io_thinktime(cfqd, cic); 1618 cfq_update_io_thinktime(cfqd, cic);
1639 cfq_update_io_seektime(cfqd, cic, crq); 1619 cfq_update_io_seektime(cfqd, cic, rq);
1640 cfq_update_idle_window(cfqd, cfqq, cic); 1620 cfq_update_idle_window(cfqd, cfqq, cic);
1641 1621
1642 cic->last_queue = jiffies; 1622 cic->last_queue = jiffies;
1643 cic->last_request_pos = crq->request->sector + crq->request->nr_sectors; 1623 cic->last_request_pos = rq->sector + rq->nr_sectors;
1644 1624
1645 if (cfqq == cfqd->active_queue) { 1625 if (cfqq == cfqd->active_queue) {
1646 /* 1626 /*
@@ -1653,7 +1633,7 @@ cfq_crq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1653 del_timer(&cfqd->idle_slice_timer); 1633 del_timer(&cfqd->idle_slice_timer);
1654 cfq_start_queueing(cfqd, cfqq); 1634 cfq_start_queueing(cfqd, cfqq);
1655 } 1635 }
1656 } else if (cfq_should_preempt(cfqd, cfqq, crq)) { 1636 } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
1657 /* 1637 /*
1658 * not the active queue - expire current slice if it is 1638 * not the active queue - expire current slice if it is
1659 * idle and has expired it's mean thinktime or this new queue 1639 * idle and has expired it's mean thinktime or this new queue
@@ -1668,25 +1648,23 @@ cfq_crq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1668static void cfq_insert_request(request_queue_t *q, struct request *rq) 1648static void cfq_insert_request(request_queue_t *q, struct request *rq)
1669{ 1649{
1670 struct cfq_data *cfqd = q->elevator->elevator_data; 1650 struct cfq_data *cfqd = q->elevator->elevator_data;
1671 struct cfq_rq *crq = RQ_DATA(rq); 1651 struct cfq_queue *cfqq = RQ_CFQQ(rq);
1672 struct cfq_queue *cfqq = crq->cfq_queue;
1673 1652
1674 cfq_init_prio_data(cfqq); 1653 cfq_init_prio_data(cfqq);
1675 1654
1676 cfq_add_crq_rb(crq); 1655 cfq_add_rq_rb(rq);
1677 1656
1678 if (!cfq_cfqq_on_rr(cfqq)) 1657 if (!cfq_cfqq_on_rr(cfqq))
1679 cfq_add_cfqq_rr(cfqd, cfqq); 1658 cfq_add_cfqq_rr(cfqd, cfqq);
1680 1659
1681 list_add_tail(&rq->queuelist, &cfqq->fifo); 1660 list_add_tail(&rq->queuelist, &cfqq->fifo);
1682 1661
1683 cfq_crq_enqueued(cfqd, cfqq, crq); 1662 cfq_rq_enqueued(cfqd, cfqq, rq);
1684} 1663}
1685 1664
1686static void cfq_completed_request(request_queue_t *q, struct request *rq) 1665static void cfq_completed_request(request_queue_t *q, struct request *rq)
1687{ 1666{
1688 struct cfq_rq *crq = RQ_DATA(rq); 1667 struct cfq_queue *cfqq = RQ_CFQQ(rq);
1689 struct cfq_queue *cfqq = crq->cfq_queue;
1690 struct cfq_data *cfqd = cfqq->cfqd; 1668 struct cfq_data *cfqd = cfqq->cfqd;
1691 const int sync = rq_is_sync(rq); 1669 const int sync = rq_is_sync(rq);
1692 unsigned long now; 1670 unsigned long now;
@@ -1709,7 +1687,7 @@ static void cfq_completed_request(request_queue_t *q, struct request *rq)
1709 } 1687 }
1710 1688
1711 if (sync) 1689 if (sync)
1712 crq->io_context->last_end_request = now; 1690 RQ_CIC(rq)->last_end_request = now;
1713 1691
1714 /* 1692 /*
1715 * If this is the active queue, check if it needs to be expired, 1693 * If this is the active queue, check if it needs to be expired,
@@ -1817,20 +1795,18 @@ static void cfq_check_waiters(request_queue_t *q, struct cfq_queue *cfqq)
1817 */ 1795 */
1818static void cfq_put_request(request_queue_t *q, struct request *rq) 1796static void cfq_put_request(request_queue_t *q, struct request *rq)
1819{ 1797{
1820 struct cfq_data *cfqd = q->elevator->elevator_data; 1798 struct cfq_queue *cfqq = RQ_CFQQ(rq);
1821 struct cfq_rq *crq = RQ_DATA(rq);
1822 1799
1823 if (crq) { 1800 if (cfqq) {
1824 struct cfq_queue *cfqq = crq->cfq_queue;
1825 const int rw = rq_data_dir(rq); 1801 const int rw = rq_data_dir(rq);
1826 1802
1827 BUG_ON(!cfqq->allocated[rw]); 1803 BUG_ON(!cfqq->allocated[rw]);
1828 cfqq->allocated[rw]--; 1804 cfqq->allocated[rw]--;
1829 1805
1830 put_io_context(crq->io_context->ioc); 1806 put_io_context(RQ_CIC(rq)->ioc);
1831 1807
1832 mempool_free(crq, cfqd->crq_pool);
1833 rq->elevator_private = NULL; 1808 rq->elevator_private = NULL;
1809 rq->elevator_private2 = NULL;
1834 1810
1835 cfq_check_waiters(q, cfqq); 1811 cfq_check_waiters(q, cfqq);
1836 cfq_put_queue(cfqq); 1812 cfq_put_queue(cfqq);
@@ -1850,7 +1826,6 @@ cfq_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
1850 const int rw = rq_data_dir(rq); 1826 const int rw = rq_data_dir(rq);
1851 pid_t key = cfq_queue_pid(tsk, rw); 1827 pid_t key = cfq_queue_pid(tsk, rw);
1852 struct cfq_queue *cfqq; 1828 struct cfq_queue *cfqq;
1853 struct cfq_rq *crq;
1854 unsigned long flags; 1829 unsigned long flags;
1855 int is_sync = key != CFQ_KEY_ASYNC; 1830 int is_sync = key != CFQ_KEY_ASYNC;
1856 1831
@@ -1876,23 +1851,13 @@ cfq_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
1876 cfq_clear_cfqq_must_alloc(cfqq); 1851 cfq_clear_cfqq_must_alloc(cfqq);
1877 cfqd->rq_starved = 0; 1852 cfqd->rq_starved = 0;
1878 atomic_inc(&cfqq->ref); 1853 atomic_inc(&cfqq->ref);
1879 spin_unlock_irqrestore(q->queue_lock, flags);
1880 1854
1881 crq = mempool_alloc(cfqd->crq_pool, gfp_mask); 1855 spin_unlock_irqrestore(q->queue_lock, flags);
1882 if (crq) {
1883 crq->request = rq;
1884 crq->cfq_queue = cfqq;
1885 crq->io_context = cic;
1886 1856
1887 rq->elevator_private = crq; 1857 rq->elevator_private = cic;
1888 return 0; 1858 rq->elevator_private2 = cfqq;
1889 } 1859 return 0;
1890 1860
1891 spin_lock_irqsave(q->queue_lock, flags);
1892 cfqq->allocated[rw]--;
1893 if (!(cfqq->allocated[0] + cfqq->allocated[1]))
1894 cfq_mark_cfqq_must_alloc(cfqq);
1895 cfq_put_queue(cfqq);
1896queue_fail: 1861queue_fail:
1897 if (cic) 1862 if (cic)
1898 put_io_context(cic->ioc); 1863 put_io_context(cic->ioc);
@@ -2040,7 +2005,6 @@ static void cfq_exit_queue(elevator_t *e)
2040 2005
2041 cfq_shutdown_timer_wq(cfqd); 2006 cfq_shutdown_timer_wq(cfqd);
2042 2007
2043 mempool_destroy(cfqd->crq_pool);
2044 kfree(cfqd->cfq_hash); 2008 kfree(cfqd->cfq_hash);
2045 kfree(cfqd); 2009 kfree(cfqd);
2046} 2010}
@@ -2067,11 +2031,7 @@ static void *cfq_init_queue(request_queue_t *q, elevator_t *e)
2067 2031
2068 cfqd->cfq_hash = kmalloc(sizeof(struct hlist_head) * CFQ_QHASH_ENTRIES, GFP_KERNEL); 2032 cfqd->cfq_hash = kmalloc(sizeof(struct hlist_head) * CFQ_QHASH_ENTRIES, GFP_KERNEL);
2069 if (!cfqd->cfq_hash) 2033 if (!cfqd->cfq_hash)
2070 goto out_crqhash; 2034 goto out_free;
2071
2072 cfqd->crq_pool = mempool_create_slab_pool(BLKDEV_MIN_RQ, crq_pool);
2073 if (!cfqd->crq_pool)
2074 goto out_crqpool;
2075 2035
2076 for (i = 0; i < CFQ_QHASH_ENTRIES; i++) 2036 for (i = 0; i < CFQ_QHASH_ENTRIES; i++)
2077 INIT_HLIST_HEAD(&cfqd->cfq_hash[i]); 2037 INIT_HLIST_HEAD(&cfqd->cfq_hash[i]);
@@ -2100,17 +2060,13 @@ static void *cfq_init_queue(request_queue_t *q, elevator_t *e)
2100 cfqd->cfq_slice_idle = cfq_slice_idle; 2060 cfqd->cfq_slice_idle = cfq_slice_idle;
2101 2061
2102 return cfqd; 2062 return cfqd;
2103out_crqpool: 2063out_free:
2104 kfree(cfqd->cfq_hash);
2105out_crqhash:
2106 kfree(cfqd); 2064 kfree(cfqd);
2107 return NULL; 2065 return NULL;
2108} 2066}
2109 2067
2110static void cfq_slab_kill(void) 2068static void cfq_slab_kill(void)
2111{ 2069{
2112 if (crq_pool)
2113 kmem_cache_destroy(crq_pool);
2114 if (cfq_pool) 2070 if (cfq_pool)
2115 kmem_cache_destroy(cfq_pool); 2071 kmem_cache_destroy(cfq_pool);
2116 if (cfq_ioc_pool) 2072 if (cfq_ioc_pool)
@@ -2119,11 +2075,6 @@ static void cfq_slab_kill(void)
2119 2075
2120static int __init cfq_slab_setup(void) 2076static int __init cfq_slab_setup(void)
2121{ 2077{
2122 crq_pool = kmem_cache_create("crq_pool", sizeof(struct cfq_rq), 0, 0,
2123 NULL, NULL);
2124 if (!crq_pool)
2125 goto fail;
2126
2127 cfq_pool = kmem_cache_create("cfq_pool", sizeof(struct cfq_queue), 0, 0, 2078 cfq_pool = kmem_cache_create("cfq_pool", sizeof(struct cfq_queue), 0, 0,
2128 NULL, NULL); 2079 NULL, NULL);
2129 if (!cfq_pool) 2080 if (!cfq_pool)