aboutsummaryrefslogtreecommitdiffstats
path: root/block/as-iosched.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@suse.de>2006-07-18 15:07:29 -0400
committerJens Axboe <axboe@nelson.home.kernel.dk>2006-09-30 14:27:02 -0400
commit8a8e674cb1dafc818ffea93d97e4c1c1f01fdbb6 (patch)
tree98716fa9974c79928113c887c57fb5d8f7b6cc42 /block/as-iosched.c
parent5e705374796e72b36e7bb9c59c8d46d2dc5db36a (diff)
[PATCH] as-iosched: kill arq
Get rid of the as_rq request type. With the added elevator_private2, we have enough room in struct request to get rid of any arq allocation/free for each request. Signed-off-by: Jens Axboe <axboe@suse.de> Signed-off-by: Nick Piggin <npiggin@suse.de>
Diffstat (limited to 'block/as-iosched.c')
-rw-r--r--block/as-iosched.c313
1 files changed, 118 insertions, 195 deletions
diff --git a/block/as-iosched.c b/block/as-iosched.c
index dca0b0563ca0..02eb9333898f 100644
--- a/block/as-iosched.c
+++ b/block/as-iosched.c
@@ -92,7 +92,7 @@ struct as_data {
92 struct rb_root sort_list[2]; 92 struct rb_root sort_list[2];
93 struct list_head fifo_list[2]; 93 struct list_head fifo_list[2];
94 94
95 struct as_rq *next_arq[2]; /* next in sort order */ 95 struct request *next_rq[2]; /* next in sort order */
96 sector_t last_sector[2]; /* last REQ_SYNC & REQ_ASYNC sectors */ 96 sector_t last_sector[2]; /* last REQ_SYNC & REQ_ASYNC sectors */
97 97
98 unsigned long exit_prob; /* probability a task will exit while 98 unsigned long exit_prob; /* probability a task will exit while
@@ -113,7 +113,6 @@ struct as_data {
113 int write_batch_count; /* max # of reqs in a write batch */ 113 int write_batch_count; /* max # of reqs in a write batch */
114 int current_write_count; /* how many requests left this batch */ 114 int current_write_count; /* how many requests left this batch */
115 int write_batch_idled; /* has the write batch gone idle? */ 115 int write_batch_idled; /* has the write batch gone idle? */
116 mempool_t *arq_pool;
117 116
118 enum anticipation_status antic_status; 117 enum anticipation_status antic_status;
119 unsigned long antic_start; /* jiffies: when it started */ 118 unsigned long antic_start; /* jiffies: when it started */
@@ -146,22 +145,14 @@ enum arq_state {
146 AS_RQ_POSTSCHED, /* when they shouldn't be */ 145 AS_RQ_POSTSCHED, /* when they shouldn't be */
147}; 146};
148 147
149struct as_rq { 148#define RQ_IOC(rq) ((struct io_context *) (rq)->elevator_private)
150 struct request *request; 149#define RQ_STATE(rq) ((enum arq_state)(rq)->elevator_private2)
151 150#define RQ_SET_STATE(rq, state) ((rq)->elevator_private2 = (void *) state)
152 struct io_context *io_context; /* The submitting task */
153
154 enum arq_state state;
155};
156
157#define RQ_DATA(rq) ((struct as_rq *) (rq)->elevator_private)
158
159static kmem_cache_t *arq_pool;
160 151
161static atomic_t ioc_count = ATOMIC_INIT(0); 152static atomic_t ioc_count = ATOMIC_INIT(0);
162static struct completion *ioc_gone; 153static struct completion *ioc_gone;
163 154
164static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq); 155static void as_move_to_dispatch(struct as_data *ad, struct request *rq);
165static void as_antic_stop(struct as_data *ad); 156static void as_antic_stop(struct as_data *ad);
166 157
167/* 158/*
@@ -231,23 +222,23 @@ static struct io_context *as_get_io_context(void)
231 return ioc; 222 return ioc;
232} 223}
233 224
234static void as_put_io_context(struct as_rq *arq) 225static void as_put_io_context(struct request *rq)
235{ 226{
236 struct as_io_context *aic; 227 struct as_io_context *aic;
237 228
238 if (unlikely(!arq->io_context)) 229 if (unlikely(!RQ_IOC(rq)))
239 return; 230 return;
240 231
241 aic = arq->io_context->aic; 232 aic = RQ_IOC(rq)->aic;
242 233
243 if (rq_is_sync(arq->request) && aic) { 234 if (rq_is_sync(rq) && aic) {
244 spin_lock(&aic->lock); 235 spin_lock(&aic->lock);
245 set_bit(AS_TASK_IORUNNING, &aic->state); 236 set_bit(AS_TASK_IORUNNING, &aic->state);
246 aic->last_end_request = jiffies; 237 aic->last_end_request = jiffies;
247 spin_unlock(&aic->lock); 238 spin_unlock(&aic->lock);
248 } 239 }
249 240
250 put_io_context(arq->io_context); 241 put_io_context(RQ_IOC(rq));
251} 242}
252 243
253/* 244/*
@@ -255,17 +246,17 @@ static void as_put_io_context(struct as_rq *arq)
255 */ 246 */
256#define RQ_RB_ROOT(ad, rq) (&(ad)->sort_list[rq_is_sync((rq))]) 247#define RQ_RB_ROOT(ad, rq) (&(ad)->sort_list[rq_is_sync((rq))])
257 248
258static void as_add_arq_rb(struct as_data *ad, struct request *rq) 249static void as_add_rq_rb(struct as_data *ad, struct request *rq)
259{ 250{
260 struct request *alias; 251 struct request *alias;
261 252
262 while ((unlikely(alias = elv_rb_add(RQ_RB_ROOT(ad, rq), rq)))) { 253 while ((unlikely(alias = elv_rb_add(RQ_RB_ROOT(ad, rq), rq)))) {
263 as_move_to_dispatch(ad, RQ_DATA(alias)); 254 as_move_to_dispatch(ad, alias);
264 as_antic_stop(ad); 255 as_antic_stop(ad);
265 } 256 }
266} 257}
267 258
268static inline void as_del_arq_rb(struct as_data *ad, struct request *rq) 259static inline void as_del_rq_rb(struct as_data *ad, struct request *rq)
269{ 260{
270 elv_rb_del(RQ_RB_ROOT(ad, rq), rq); 261 elv_rb_del(RQ_RB_ROOT(ad, rq), rq);
271} 262}
@@ -285,26 +276,26 @@ static inline void as_del_arq_rb(struct as_data *ad, struct request *rq)
285 * as_choose_req selects the preferred one of two requests of the same data_dir 276 * as_choose_req selects the preferred one of two requests of the same data_dir
286 * ignoring time - eg. timeouts, which is the job of as_dispatch_request 277 * ignoring time - eg. timeouts, which is the job of as_dispatch_request
287 */ 278 */
288static struct as_rq * 279static struct request *
289as_choose_req(struct as_data *ad, struct as_rq *arq1, struct as_rq *arq2) 280as_choose_req(struct as_data *ad, struct request *rq1, struct request *rq2)
290{ 281{
291 int data_dir; 282 int data_dir;
292 sector_t last, s1, s2, d1, d2; 283 sector_t last, s1, s2, d1, d2;
293 int r1_wrap=0, r2_wrap=0; /* requests are behind the disk head */ 284 int r1_wrap=0, r2_wrap=0; /* requests are behind the disk head */
294 const sector_t maxback = MAXBACK; 285 const sector_t maxback = MAXBACK;
295 286
296 if (arq1 == NULL || arq1 == arq2) 287 if (rq1 == NULL || rq1 == rq2)
297 return arq2; 288 return rq2;
298 if (arq2 == NULL) 289 if (rq2 == NULL)
299 return arq1; 290 return rq1;
300 291
301 data_dir = rq_is_sync(arq1->request); 292 data_dir = rq_is_sync(rq1);
302 293
303 last = ad->last_sector[data_dir]; 294 last = ad->last_sector[data_dir];
304 s1 = arq1->request->sector; 295 s1 = rq1->sector;
305 s2 = arq2->request->sector; 296 s2 = rq2->sector;
306 297
307 BUG_ON(data_dir != rq_is_sync(arq2->request)); 298 BUG_ON(data_dir != rq_is_sync(rq2));
308 299
309 /* 300 /*
310 * Strict one way elevator _except_ in the case where we allow 301 * Strict one way elevator _except_ in the case where we allow
@@ -331,55 +322,55 @@ as_choose_req(struct as_data *ad, struct as_rq *arq1, struct as_rq *arq2)
331 322
332 /* Found required data */ 323 /* Found required data */
333 if (!r1_wrap && r2_wrap) 324 if (!r1_wrap && r2_wrap)
334 return arq1; 325 return rq1;
335 else if (!r2_wrap && r1_wrap) 326 else if (!r2_wrap && r1_wrap)
336 return arq2; 327 return rq2;
337 else if (r1_wrap && r2_wrap) { 328 else if (r1_wrap && r2_wrap) {
338 /* both behind the head */ 329 /* both behind the head */
339 if (s1 <= s2) 330 if (s1 <= s2)
340 return arq1; 331 return rq1;
341 else 332 else
342 return arq2; 333 return rq2;
343 } 334 }
344 335
345 /* Both requests in front of the head */ 336 /* Both requests in front of the head */
346 if (d1 < d2) 337 if (d1 < d2)
347 return arq1; 338 return rq1;
348 else if (d2 < d1) 339 else if (d2 < d1)
349 return arq2; 340 return rq2;
350 else { 341 else {
351 if (s1 >= s2) 342 if (s1 >= s2)
352 return arq1; 343 return rq1;
353 else 344 else
354 return arq2; 345 return rq2;
355 } 346 }
356} 347}
357 348
358/* 349/*
359 * as_find_next_arq finds the next request after @prev in elevator order. 350 * as_find_next_rq finds the next request after @prev in elevator order.
360 * this with as_choose_req form the basis for how the scheduler chooses 351 * this with as_choose_req form the basis for how the scheduler chooses
361 * what request to process next. Anticipation works on top of this. 352 * what request to process next. Anticipation works on top of this.
362 */ 353 */
363static struct as_rq *as_find_next_arq(struct as_data *ad, struct as_rq *arq) 354static struct request *
355as_find_next_rq(struct as_data *ad, struct request *last)
364{ 356{
365 struct request *last = arq->request;
366 struct rb_node *rbnext = rb_next(&last->rb_node); 357 struct rb_node *rbnext = rb_next(&last->rb_node);
367 struct rb_node *rbprev = rb_prev(&last->rb_node); 358 struct rb_node *rbprev = rb_prev(&last->rb_node);
368 struct as_rq *next = NULL, *prev = NULL; 359 struct request *next = NULL, *prev = NULL;
369 360
370 BUG_ON(RB_EMPTY_NODE(&last->rb_node)); 361 BUG_ON(RB_EMPTY_NODE(&last->rb_node));
371 362
372 if (rbprev) 363 if (rbprev)
373 prev = RQ_DATA(rb_entry_rq(rbprev)); 364 prev = rb_entry_rq(rbprev);
374 365
375 if (rbnext) 366 if (rbnext)
376 next = RQ_DATA(rb_entry_rq(rbnext)); 367 next = rb_entry_rq(rbnext);
377 else { 368 else {
378 const int data_dir = rq_is_sync(last); 369 const int data_dir = rq_is_sync(last);
379 370
380 rbnext = rb_first(&ad->sort_list[data_dir]); 371 rbnext = rb_first(&ad->sort_list[data_dir]);
381 if (rbnext && rbnext != &last->rb_node) 372 if (rbnext && rbnext != &last->rb_node)
382 next = RQ_DATA(rb_entry_rq(rbnext)); 373 next = rb_entry_rq(rbnext);
383 } 374 }
384 375
385 return as_choose_req(ad, next, prev); 376 return as_choose_req(ad, next, prev);
@@ -575,11 +566,11 @@ static void as_update_iohist(struct as_data *ad, struct as_io_context *aic,
575 * previous one issued. 566 * previous one issued.
576 */ 567 */
577static int as_close_req(struct as_data *ad, struct as_io_context *aic, 568static int as_close_req(struct as_data *ad, struct as_io_context *aic,
578 struct as_rq *arq) 569 struct request *rq)
579{ 570{
580 unsigned long delay; /* milliseconds */ 571 unsigned long delay; /* milliseconds */
581 sector_t last = ad->last_sector[ad->batch_data_dir]; 572 sector_t last = ad->last_sector[ad->batch_data_dir];
582 sector_t next = arq->request->sector; 573 sector_t next = rq->sector;
583 sector_t delta; /* acceptable close offset (in sectors) */ 574 sector_t delta; /* acceptable close offset (in sectors) */
584 sector_t s; 575 sector_t s;
585 576
@@ -636,7 +627,7 @@ static int as_close_req(struct as_data *ad, struct as_io_context *aic,
636 * 627 *
637 * If this task has queued some other IO, do not enter enticipation. 628 * If this task has queued some other IO, do not enter enticipation.
638 */ 629 */
639static int as_can_break_anticipation(struct as_data *ad, struct as_rq *arq) 630static int as_can_break_anticipation(struct as_data *ad, struct request *rq)
640{ 631{
641 struct io_context *ioc; 632 struct io_context *ioc;
642 struct as_io_context *aic; 633 struct as_io_context *aic;
@@ -644,7 +635,7 @@ static int as_can_break_anticipation(struct as_data *ad, struct as_rq *arq)
644 ioc = ad->io_context; 635 ioc = ad->io_context;
645 BUG_ON(!ioc); 636 BUG_ON(!ioc);
646 637
647 if (arq && ioc == arq->io_context) { 638 if (rq && ioc == RQ_IOC(rq)) {
648 /* request from same process */ 639 /* request from same process */
649 return 1; 640 return 1;
650 } 641 }
@@ -671,7 +662,7 @@ static int as_can_break_anticipation(struct as_data *ad, struct as_rq *arq)
671 return 1; 662 return 1;
672 } 663 }
673 664
674 if (arq && rq_is_sync(arq->request) && as_close_req(ad, aic, arq)) { 665 if (rq && rq_is_sync(rq) && as_close_req(ad, aic, rq)) {
675 /* 666 /*
676 * Found a close request that is not one of ours. 667 * Found a close request that is not one of ours.
677 * 668 *
@@ -687,7 +678,7 @@ static int as_can_break_anticipation(struct as_data *ad, struct as_rq *arq)
687 ad->exit_no_coop = (7*ad->exit_no_coop)/8; 678 ad->exit_no_coop = (7*ad->exit_no_coop)/8;
688 } 679 }
689 680
690 as_update_iohist(ad, aic, arq->request); 681 as_update_iohist(ad, aic, rq);
691 return 1; 682 return 1;
692 } 683 }
693 684
@@ -714,10 +705,10 @@ static int as_can_break_anticipation(struct as_data *ad, struct as_rq *arq)
714} 705}
715 706
716/* 707/*
717 * as_can_anticipate indicates whether we should either run arq 708 * as_can_anticipate indicates whether we should either run rq
718 * or keep anticipating a better request. 709 * or keep anticipating a better request.
719 */ 710 */
720static int as_can_anticipate(struct as_data *ad, struct as_rq *arq) 711static int as_can_anticipate(struct as_data *ad, struct request *rq)
721{ 712{
722 if (!ad->io_context) 713 if (!ad->io_context)
723 /* 714 /*
@@ -731,7 +722,7 @@ static int as_can_anticipate(struct as_data *ad, struct as_rq *arq)
731 */ 722 */
732 return 0; 723 return 0;
733 724
734 if (as_can_break_anticipation(ad, arq)) 725 if (as_can_break_anticipation(ad, rq))
735 /* 726 /*
736 * This request is a good candidate. Don't keep anticipating, 727 * This request is a good candidate. Don't keep anticipating,
737 * run it. 728 * run it.
@@ -749,16 +740,16 @@ static int as_can_anticipate(struct as_data *ad, struct as_rq *arq)
749} 740}
750 741
751/* 742/*
752 * as_update_arq must be called whenever a request (arq) is added to 743 * as_update_rq must be called whenever a request (rq) is added to
753 * the sort_list. This function keeps caches up to date, and checks if the 744 * the sort_list. This function keeps caches up to date, and checks if the
754 * request might be one we are "anticipating" 745 * request might be one we are "anticipating"
755 */ 746 */
756static void as_update_arq(struct as_data *ad, struct as_rq *arq) 747static void as_update_rq(struct as_data *ad, struct request *rq)
757{ 748{
758 const int data_dir = rq_is_sync(arq->request); 749 const int data_dir = rq_is_sync(rq);
759 750
760 /* keep the next_arq cache up to date */ 751 /* keep the next_rq cache up to date */
761 ad->next_arq[data_dir] = as_choose_req(ad, arq, ad->next_arq[data_dir]); 752 ad->next_rq[data_dir] = as_choose_req(ad, rq, ad->next_rq[data_dir]);
762 753
763 /* 754 /*
764 * have we been anticipating this request? 755 * have we been anticipating this request?
@@ -767,7 +758,7 @@ static void as_update_arq(struct as_data *ad, struct as_rq *arq)
767 */ 758 */
768 if (ad->antic_status == ANTIC_WAIT_REQ 759 if (ad->antic_status == ANTIC_WAIT_REQ
769 || ad->antic_status == ANTIC_WAIT_NEXT) { 760 || ad->antic_status == ANTIC_WAIT_NEXT) {
770 if (as_can_break_anticipation(ad, arq)) 761 if (as_can_break_anticipation(ad, rq))
771 as_antic_stop(ad); 762 as_antic_stop(ad);
772 } 763 }
773} 764}
@@ -807,12 +798,11 @@ static void update_write_batch(struct as_data *ad)
807static void as_completed_request(request_queue_t *q, struct request *rq) 798static void as_completed_request(request_queue_t *q, struct request *rq)
808{ 799{
809 struct as_data *ad = q->elevator->elevator_data; 800 struct as_data *ad = q->elevator->elevator_data;
810 struct as_rq *arq = RQ_DATA(rq);
811 801
812 WARN_ON(!list_empty(&rq->queuelist)); 802 WARN_ON(!list_empty(&rq->queuelist));
813 803
814 if (arq->state != AS_RQ_REMOVED) { 804 if (RQ_STATE(rq) != AS_RQ_REMOVED) {
815 printk("arq->state %d\n", arq->state); 805 printk("rq->state %d\n", RQ_STATE(rq));
816 WARN_ON(1); 806 WARN_ON(1);
817 goto out; 807 goto out;
818 } 808 }
@@ -839,7 +829,7 @@ static void as_completed_request(request_queue_t *q, struct request *rq)
839 ad->new_batch = 0; 829 ad->new_batch = 0;
840 } 830 }
841 831
842 if (ad->io_context == arq->io_context && ad->io_context) { 832 if (ad->io_context == RQ_IOC(rq) && ad->io_context) {
843 ad->antic_start = jiffies; 833 ad->antic_start = jiffies;
844 ad->ioc_finished = 1; 834 ad->ioc_finished = 1;
845 if (ad->antic_status == ANTIC_WAIT_REQ) { 835 if (ad->antic_status == ANTIC_WAIT_REQ) {
@@ -851,9 +841,9 @@ static void as_completed_request(request_queue_t *q, struct request *rq)
851 } 841 }
852 } 842 }
853 843
854 as_put_io_context(arq); 844 as_put_io_context(rq);
855out: 845out:
856 arq->state = AS_RQ_POSTSCHED; 846 RQ_SET_STATE(rq, AS_RQ_POSTSCHED);
857} 847}
858 848
859/* 849/*
@@ -864,26 +854,27 @@ out:
864 */ 854 */
865static void as_remove_queued_request(request_queue_t *q, struct request *rq) 855static void as_remove_queued_request(request_queue_t *q, struct request *rq)
866{ 856{
867 struct as_rq *arq = RQ_DATA(rq);
868 const int data_dir = rq_is_sync(rq); 857 const int data_dir = rq_is_sync(rq);
869 struct as_data *ad = q->elevator->elevator_data; 858 struct as_data *ad = q->elevator->elevator_data;
859 struct io_context *ioc;
870 860
871 WARN_ON(arq->state != AS_RQ_QUEUED); 861 WARN_ON(RQ_STATE(rq) != AS_RQ_QUEUED);
872 862
873 if (arq->io_context && arq->io_context->aic) { 863 ioc = RQ_IOC(rq);
874 BUG_ON(!atomic_read(&arq->io_context->aic->nr_queued)); 864 if (ioc && ioc->aic) {
875 atomic_dec(&arq->io_context->aic->nr_queued); 865 BUG_ON(!atomic_read(&ioc->aic->nr_queued));
866 atomic_dec(&ioc->aic->nr_queued);
876 } 867 }
877 868
878 /* 869 /*
879 * Update the "next_arq" cache if we are about to remove its 870 * Update the "next_rq" cache if we are about to remove its
880 * entry 871 * entry
881 */ 872 */
882 if (ad->next_arq[data_dir] == arq) 873 if (ad->next_rq[data_dir] == rq)
883 ad->next_arq[data_dir] = as_find_next_arq(ad, arq); 874 ad->next_rq[data_dir] = as_find_next_rq(ad, rq);
884 875
885 rq_fifo_clear(rq); 876 rq_fifo_clear(rq);
886 as_del_arq_rb(ad, rq); 877 as_del_rq_rb(ad, rq);
887} 878}
888 879
889/* 880/*
@@ -935,9 +926,8 @@ static inline int as_batch_expired(struct as_data *ad)
935/* 926/*
936 * move an entry to dispatch queue 927 * move an entry to dispatch queue
937 */ 928 */
938static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq) 929static void as_move_to_dispatch(struct as_data *ad, struct request *rq)
939{ 930{
940 struct request *rq = arq->request;
941 const int data_dir = rq_is_sync(rq); 931 const int data_dir = rq_is_sync(rq);
942 932
943 BUG_ON(RB_EMPTY_NODE(&rq->rb_node)); 933 BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
@@ -947,13 +937,14 @@ static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq)
947 937
948 /* 938 /*
949 * This has to be set in order to be correctly updated by 939 * This has to be set in order to be correctly updated by
950 * as_find_next_arq 940 * as_find_next_rq
951 */ 941 */
952 ad->last_sector[data_dir] = rq->sector + rq->nr_sectors; 942 ad->last_sector[data_dir] = rq->sector + rq->nr_sectors;
953 943
954 if (data_dir == REQ_SYNC) { 944 if (data_dir == REQ_SYNC) {
945 struct io_context *ioc = RQ_IOC(rq);
955 /* In case we have to anticipate after this */ 946 /* In case we have to anticipate after this */
956 copy_io_context(&ad->io_context, &arq->io_context); 947 copy_io_context(&ad->io_context, &ioc);
957 } else { 948 } else {
958 if (ad->io_context) { 949 if (ad->io_context) {
959 put_io_context(ad->io_context); 950 put_io_context(ad->io_context);
@@ -965,19 +956,19 @@ static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq)
965 } 956 }
966 ad->ioc_finished = 0; 957 ad->ioc_finished = 0;
967 958
968 ad->next_arq[data_dir] = as_find_next_arq(ad, arq); 959 ad->next_rq[data_dir] = as_find_next_rq(ad, rq);
969 960
970 /* 961 /*
971 * take it off the sort and fifo list, add to dispatch queue 962 * take it off the sort and fifo list, add to dispatch queue
972 */ 963 */
973 as_remove_queued_request(ad->q, rq); 964 as_remove_queued_request(ad->q, rq);
974 WARN_ON(arq->state != AS_RQ_QUEUED); 965 WARN_ON(RQ_STATE(rq) != AS_RQ_QUEUED);
975 966
976 elv_dispatch_sort(ad->q, rq); 967 elv_dispatch_sort(ad->q, rq);
977 968
978 arq->state = AS_RQ_DISPATCHED; 969 RQ_SET_STATE(rq, AS_RQ_DISPATCHED);
979 if (arq->io_context && arq->io_context->aic) 970 if (RQ_IOC(rq) && RQ_IOC(rq)->aic)
980 atomic_inc(&arq->io_context->aic->nr_dispatched); 971 atomic_inc(&RQ_IOC(rq)->aic->nr_dispatched);
981 ad->nr_dispatched++; 972 ad->nr_dispatched++;
982} 973}
983 974
@@ -989,9 +980,9 @@ static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq)
989static int as_dispatch_request(request_queue_t *q, int force) 980static int as_dispatch_request(request_queue_t *q, int force)
990{ 981{
991 struct as_data *ad = q->elevator->elevator_data; 982 struct as_data *ad = q->elevator->elevator_data;
992 struct as_rq *arq;
993 const int reads = !list_empty(&ad->fifo_list[REQ_SYNC]); 983 const int reads = !list_empty(&ad->fifo_list[REQ_SYNC]);
994 const int writes = !list_empty(&ad->fifo_list[REQ_ASYNC]); 984 const int writes = !list_empty(&ad->fifo_list[REQ_ASYNC]);
985 struct request *rq;
995 986
996 if (unlikely(force)) { 987 if (unlikely(force)) {
997 /* 988 /*
@@ -1007,14 +998,14 @@ static int as_dispatch_request(request_queue_t *q, int force)
1007 ad->changed_batch = 0; 998 ad->changed_batch = 0;
1008 ad->new_batch = 0; 999 ad->new_batch = 0;
1009 1000
1010 while (ad->next_arq[REQ_SYNC]) { 1001 while (ad->next_rq[REQ_SYNC]) {
1011 as_move_to_dispatch(ad, ad->next_arq[REQ_SYNC]); 1002 as_move_to_dispatch(ad, ad->next_rq[REQ_SYNC]);
1012 dispatched++; 1003 dispatched++;
1013 } 1004 }
1014 ad->last_check_fifo[REQ_SYNC] = jiffies; 1005 ad->last_check_fifo[REQ_SYNC] = jiffies;
1015 1006
1016 while (ad->next_arq[REQ_ASYNC]) { 1007 while (ad->next_rq[REQ_ASYNC]) {
1017 as_move_to_dispatch(ad, ad->next_arq[REQ_ASYNC]); 1008 as_move_to_dispatch(ad, ad->next_rq[REQ_ASYNC]);
1018 dispatched++; 1009 dispatched++;
1019 } 1010 }
1020 ad->last_check_fifo[REQ_ASYNC] = jiffies; 1011 ad->last_check_fifo[REQ_ASYNC] = jiffies;
@@ -1038,19 +1029,19 @@ static int as_dispatch_request(request_queue_t *q, int force)
1038 /* 1029 /*
1039 * batch is still running or no reads or no writes 1030 * batch is still running or no reads or no writes
1040 */ 1031 */
1041 arq = ad->next_arq[ad->batch_data_dir]; 1032 rq = ad->next_rq[ad->batch_data_dir];
1042 1033
1043 if (ad->batch_data_dir == REQ_SYNC && ad->antic_expire) { 1034 if (ad->batch_data_dir == REQ_SYNC && ad->antic_expire) {
1044 if (as_fifo_expired(ad, REQ_SYNC)) 1035 if (as_fifo_expired(ad, REQ_SYNC))
1045 goto fifo_expired; 1036 goto fifo_expired;
1046 1037
1047 if (as_can_anticipate(ad, arq)) { 1038 if (as_can_anticipate(ad, rq)) {
1048 as_antic_waitreq(ad); 1039 as_antic_waitreq(ad);
1049 return 0; 1040 return 0;
1050 } 1041 }
1051 } 1042 }
1052 1043
1053 if (arq) { 1044 if (rq) {
1054 /* we have a "next request" */ 1045 /* we have a "next request" */
1055 if (reads && !writes) 1046 if (reads && !writes)
1056 ad->current_batch_expires = 1047 ad->current_batch_expires =
@@ -1078,7 +1069,7 @@ static int as_dispatch_request(request_queue_t *q, int force)
1078 ad->changed_batch = 1; 1069 ad->changed_batch = 1;
1079 } 1070 }
1080 ad->batch_data_dir = REQ_SYNC; 1071 ad->batch_data_dir = REQ_SYNC;
1081 arq = RQ_DATA(rq_entry_fifo(ad->fifo_list[REQ_SYNC].next)); 1072 rq = rq_entry_fifo(ad->fifo_list[REQ_SYNC].next);
1082 ad->last_check_fifo[ad->batch_data_dir] = jiffies; 1073 ad->last_check_fifo[ad->batch_data_dir] = jiffies;
1083 goto dispatch_request; 1074 goto dispatch_request;
1084 } 1075 }
@@ -1104,7 +1095,7 @@ dispatch_writes:
1104 ad->batch_data_dir = REQ_ASYNC; 1095 ad->batch_data_dir = REQ_ASYNC;
1105 ad->current_write_count = ad->write_batch_count; 1096 ad->current_write_count = ad->write_batch_count;
1106 ad->write_batch_idled = 0; 1097 ad->write_batch_idled = 0;
1107 arq = ad->next_arq[ad->batch_data_dir]; 1098 rq = ad->next_rq[ad->batch_data_dir];
1108 goto dispatch_request; 1099 goto dispatch_request;
1109 } 1100 }
1110 1101
@@ -1118,7 +1109,7 @@ dispatch_request:
1118 1109
1119 if (as_fifo_expired(ad, ad->batch_data_dir)) { 1110 if (as_fifo_expired(ad, ad->batch_data_dir)) {
1120fifo_expired: 1111fifo_expired:
1121 arq = RQ_DATA(rq_entry_fifo(ad->fifo_list[ad->batch_data_dir].next)); 1112 rq = rq_entry_fifo(ad->fifo_list[ad->batch_data_dir].next);
1122 } 1113 }
1123 1114
1124 if (ad->changed_batch) { 1115 if (ad->changed_batch) {
@@ -1137,34 +1128,33 @@ fifo_expired:
1137 } 1128 }
1138 1129
1139 /* 1130 /*
1140 * arq is the selected appropriate request. 1131 * rq is the selected appropriate request.
1141 */ 1132 */
1142 as_move_to_dispatch(ad, arq); 1133 as_move_to_dispatch(ad, rq);
1143 1134
1144 return 1; 1135 return 1;
1145} 1136}
1146 1137
1147/* 1138/*
1148 * add arq to rbtree and fifo 1139 * add rq to rbtree and fifo
1149 */ 1140 */
1150static void as_add_request(request_queue_t *q, struct request *rq) 1141static void as_add_request(request_queue_t *q, struct request *rq)
1151{ 1142{
1152 struct as_data *ad = q->elevator->elevator_data; 1143 struct as_data *ad = q->elevator->elevator_data;
1153 struct as_rq *arq = RQ_DATA(rq);
1154 int data_dir; 1144 int data_dir;
1155 1145
1156 arq->state = AS_RQ_NEW; 1146 RQ_SET_STATE(rq, AS_RQ_NEW);
1157 1147
1158 data_dir = rq_is_sync(rq); 1148 data_dir = rq_is_sync(rq);
1159 1149
1160 arq->io_context = as_get_io_context(); 1150 rq->elevator_private = as_get_io_context();
1161 1151
1162 if (arq->io_context) { 1152 if (RQ_IOC(rq)) {
1163 as_update_iohist(ad, arq->io_context->aic, arq->request); 1153 as_update_iohist(ad, RQ_IOC(rq)->aic, rq);
1164 atomic_inc(&arq->io_context->aic->nr_queued); 1154 atomic_inc(&RQ_IOC(rq)->aic->nr_queued);
1165 } 1155 }
1166 1156
1167 as_add_arq_rb(ad, rq); 1157 as_add_rq_rb(ad, rq);
1168 1158
1169 /* 1159 /*
1170 * set expire time (only used for reads) and add to fifo list 1160 * set expire time (only used for reads) and add to fifo list
@@ -1172,28 +1162,24 @@ static void as_add_request(request_queue_t *q, struct request *rq)
1172 rq_set_fifo_time(rq, jiffies + ad->fifo_expire[data_dir]); 1162 rq_set_fifo_time(rq, jiffies + ad->fifo_expire[data_dir]);
1173 list_add_tail(&rq->queuelist, &ad->fifo_list[data_dir]); 1163 list_add_tail(&rq->queuelist, &ad->fifo_list[data_dir]);
1174 1164
1175 as_update_arq(ad, arq); /* keep state machine up to date */ 1165 as_update_rq(ad, rq); /* keep state machine up to date */
1176 arq->state = AS_RQ_QUEUED; 1166 RQ_SET_STATE(rq, AS_RQ_QUEUED);
1177} 1167}
1178 1168
1179static void as_activate_request(request_queue_t *q, struct request *rq) 1169static void as_activate_request(request_queue_t *q, struct request *rq)
1180{ 1170{
1181 struct as_rq *arq = RQ_DATA(rq); 1171 WARN_ON(RQ_STATE(rq) != AS_RQ_DISPATCHED);
1182 1172 RQ_SET_STATE(rq, AS_RQ_REMOVED);
1183 WARN_ON(arq->state != AS_RQ_DISPATCHED); 1173 if (RQ_IOC(rq) && RQ_IOC(rq)->aic)
1184 arq->state = AS_RQ_REMOVED; 1174 atomic_dec(&RQ_IOC(rq)->aic->nr_dispatched);
1185 if (arq->io_context && arq->io_context->aic)
1186 atomic_dec(&arq->io_context->aic->nr_dispatched);
1187} 1175}
1188 1176
1189static void as_deactivate_request(request_queue_t *q, struct request *rq) 1177static void as_deactivate_request(request_queue_t *q, struct request *rq)
1190{ 1178{
1191 struct as_rq *arq = RQ_DATA(rq); 1179 WARN_ON(RQ_STATE(rq) != AS_RQ_REMOVED);
1192 1180 RQ_SET_STATE(rq, AS_RQ_DISPATCHED);
1193 WARN_ON(arq->state != AS_RQ_REMOVED); 1181 if (RQ_IOC(rq) && RQ_IOC(rq)->aic)
1194 arq->state = AS_RQ_DISPATCHED; 1182 atomic_inc(&RQ_IOC(rq)->aic->nr_dispatched);
1195 if (arq->io_context && arq->io_context->aic)
1196 atomic_inc(&arq->io_context->aic->nr_dispatched);
1197} 1183}
1198 1184
1199/* 1185/*
@@ -1237,8 +1223,8 @@ static void as_merged_request(request_queue_t *q, struct request *req, int type)
1237 * if the merge was a front merge, we need to reposition request 1223 * if the merge was a front merge, we need to reposition request
1238 */ 1224 */
1239 if (type == ELEVATOR_FRONT_MERGE) { 1225 if (type == ELEVATOR_FRONT_MERGE) {
1240 as_del_arq_rb(ad, req); 1226 as_del_rq_rb(ad, req);
1241 as_add_arq_rb(ad, req); 1227 as_add_rq_rb(ad, req);
1242 /* 1228 /*
1243 * Note! At this stage of this and the next function, our next 1229 * Note! At this stage of this and the next function, our next
1244 * request may not be optimal - eg the request may have "grown" 1230 * request may not be optimal - eg the request may have "grown"
@@ -1250,25 +1236,22 @@ static void as_merged_request(request_queue_t *q, struct request *req, int type)
1250static void as_merged_requests(request_queue_t *q, struct request *req, 1236static void as_merged_requests(request_queue_t *q, struct request *req,
1251 struct request *next) 1237 struct request *next)
1252{ 1238{
1253 struct as_rq *arq = RQ_DATA(req);
1254 struct as_rq *anext = RQ_DATA(next);
1255
1256 BUG_ON(!arq);
1257 BUG_ON(!anext);
1258
1259 /* 1239 /*
1260 * if anext expires before arq, assign its expire time to arq 1240 * if next expires before rq, assign its expire time to arq
1261 * and move into anext position (anext will be deleted) in fifo 1241 * and move into next position (next will be deleted) in fifo
1262 */ 1242 */
1263 if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) { 1243 if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
1264 if (time_before(rq_fifo_time(next), rq_fifo_time(req))) { 1244 if (time_before(rq_fifo_time(next), rq_fifo_time(req))) {
1245 struct io_context *rioc = RQ_IOC(req);
1246 struct io_context *nioc = RQ_IOC(next);
1247
1265 list_move(&req->queuelist, &next->queuelist); 1248 list_move(&req->queuelist, &next->queuelist);
1266 rq_set_fifo_time(req, rq_fifo_time(next)); 1249 rq_set_fifo_time(req, rq_fifo_time(next));
1267 /* 1250 /*
1268 * Don't copy here but swap, because when anext is 1251 * Don't copy here but swap, because when anext is
1269 * removed below, it must contain the unused context 1252 * removed below, it must contain the unused context
1270 */ 1253 */
1271 swap_io_context(&arq->io_context, &anext->io_context); 1254 swap_io_context(&rioc, &nioc);
1272 } 1255 }
1273 } 1256 }
1274 1257
@@ -1276,9 +1259,9 @@ static void as_merged_requests(request_queue_t *q, struct request *req,
1276 * kill knowledge of next, this one is a goner 1259 * kill knowledge of next, this one is a goner
1277 */ 1260 */
1278 as_remove_queued_request(q, next); 1261 as_remove_queued_request(q, next);
1279 as_put_io_context(anext); 1262 as_put_io_context(next);
1280 1263
1281 anext->state = AS_RQ_MERGED; 1264 RQ_SET_STATE(next, AS_RQ_MERGED);
1282} 1265}
1283 1266
1284/* 1267/*
@@ -1301,45 +1284,6 @@ static void as_work_handler(void *data)
1301 spin_unlock_irqrestore(q->queue_lock, flags); 1284 spin_unlock_irqrestore(q->queue_lock, flags);
1302} 1285}
1303 1286
1304static void as_put_request(request_queue_t *q, struct request *rq)
1305{
1306 struct as_data *ad = q->elevator->elevator_data;
1307 struct as_rq *arq = RQ_DATA(rq);
1308
1309 if (!arq) {
1310 WARN_ON(1);
1311 return;
1312 }
1313
1314 if (unlikely(arq->state != AS_RQ_POSTSCHED &&
1315 arq->state != AS_RQ_PRESCHED &&
1316 arq->state != AS_RQ_MERGED)) {
1317 printk("arq->state %d\n", arq->state);
1318 WARN_ON(1);
1319 }
1320
1321 mempool_free(arq, ad->arq_pool);
1322 rq->elevator_private = NULL;
1323}
1324
1325static int as_set_request(request_queue_t *q, struct request *rq,
1326 struct bio *bio, gfp_t gfp_mask)
1327{
1328 struct as_data *ad = q->elevator->elevator_data;
1329 struct as_rq *arq = mempool_alloc(ad->arq_pool, gfp_mask);
1330
1331 if (arq) {
1332 memset(arq, 0, sizeof(*arq));
1333 arq->request = rq;
1334 arq->state = AS_RQ_PRESCHED;
1335 arq->io_context = NULL;
1336 rq->elevator_private = arq;
1337 return 0;
1338 }
1339
1340 return 1;
1341}
1342
1343static int as_may_queue(request_queue_t *q, int rw, struct bio *bio) 1287static int as_may_queue(request_queue_t *q, int rw, struct bio *bio)
1344{ 1288{
1345 int ret = ELV_MQUEUE_MAY; 1289 int ret = ELV_MQUEUE_MAY;
@@ -1366,22 +1310,17 @@ static void as_exit_queue(elevator_t *e)
1366 BUG_ON(!list_empty(&ad->fifo_list[REQ_SYNC])); 1310 BUG_ON(!list_empty(&ad->fifo_list[REQ_SYNC]));
1367 BUG_ON(!list_empty(&ad->fifo_list[REQ_ASYNC])); 1311 BUG_ON(!list_empty(&ad->fifo_list[REQ_ASYNC]));
1368 1312
1369 mempool_destroy(ad->arq_pool);
1370 put_io_context(ad->io_context); 1313 put_io_context(ad->io_context);
1371 kfree(ad); 1314 kfree(ad);
1372} 1315}
1373 1316
1374/* 1317/*
1375 * initialize elevator private data (as_data), and alloc a arq for 1318 * initialize elevator private data (as_data).
1376 * each request on the free lists
1377 */ 1319 */
1378static void *as_init_queue(request_queue_t *q, elevator_t *e) 1320static void *as_init_queue(request_queue_t *q, elevator_t *e)
1379{ 1321{
1380 struct as_data *ad; 1322 struct as_data *ad;
1381 1323
1382 if (!arq_pool)
1383 return NULL;
1384
1385 ad = kmalloc_node(sizeof(*ad), GFP_KERNEL, q->node); 1324 ad = kmalloc_node(sizeof(*ad), GFP_KERNEL, q->node);
1386 if (!ad) 1325 if (!ad)
1387 return NULL; 1326 return NULL;
@@ -1389,13 +1328,6 @@ static void *as_init_queue(request_queue_t *q, elevator_t *e)
1389 1328
1390 ad->q = q; /* Identify what queue the data belongs to */ 1329 ad->q = q; /* Identify what queue the data belongs to */
1391 1330
1392 ad->arq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
1393 mempool_free_slab, arq_pool, q->node);
1394 if (!ad->arq_pool) {
1395 kfree(ad);
1396 return NULL;
1397 }
1398
1399 /* anticipatory scheduling helpers */ 1331 /* anticipatory scheduling helpers */
1400 ad->antic_timer.function = as_antic_timeout; 1332 ad->antic_timer.function = as_antic_timeout;
1401 ad->antic_timer.data = (unsigned long)q; 1333 ad->antic_timer.data = (unsigned long)q;
@@ -1516,8 +1448,6 @@ static struct elevator_type iosched_as = {
1516 .elevator_completed_req_fn = as_completed_request, 1448 .elevator_completed_req_fn = as_completed_request,
1517 .elevator_former_req_fn = elv_rb_former_request, 1449 .elevator_former_req_fn = elv_rb_former_request,
1518 .elevator_latter_req_fn = elv_rb_latter_request, 1450 .elevator_latter_req_fn = elv_rb_latter_request,
1519 .elevator_set_req_fn = as_set_request,
1520 .elevator_put_req_fn = as_put_request,
1521 .elevator_may_queue_fn = as_may_queue, 1451 .elevator_may_queue_fn = as_may_queue,
1522 .elevator_init_fn = as_init_queue, 1452 .elevator_init_fn = as_init_queue,
1523 .elevator_exit_fn = as_exit_queue, 1453 .elevator_exit_fn = as_exit_queue,
@@ -1533,11 +1463,6 @@ static int __init as_init(void)
1533{ 1463{
1534 int ret; 1464 int ret;
1535 1465
1536 arq_pool = kmem_cache_create("as_arq", sizeof(struct as_rq),
1537 0, 0, NULL, NULL);
1538 if (!arq_pool)
1539 return -ENOMEM;
1540
1541 ret = elv_register(&iosched_as); 1466 ret = elv_register(&iosched_as);
1542 if (!ret) { 1467 if (!ret) {
1543 /* 1468 /*
@@ -1549,7 +1474,6 @@ static int __init as_init(void)
1549 return 0; 1474 return 0;
1550 } 1475 }
1551 1476
1552 kmem_cache_destroy(arq_pool);
1553 return ret; 1477 return ret;
1554} 1478}
1555 1479
@@ -1563,7 +1487,6 @@ static void __exit as_exit(void)
1563 if (atomic_read(&ioc_count)) 1487 if (atomic_read(&ioc_count))
1564 wait_for_completion(ioc_gone); 1488 wait_for_completion(ioc_gone);
1565 synchronize_rcu(); 1489 synchronize_rcu();
1566 kmem_cache_destroy(arq_pool);
1567} 1490}
1568 1491
1569module_init(as_init); 1492module_init(as_init);