aboutsummaryrefslogtreecommitdiffstats
path: root/block/as-iosched.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@suse.de>2006-07-28 03:26:13 -0400
committerJens Axboe <axboe@nelson.home.kernel.dk>2006-09-30 14:27:00 -0400
commit9e2585a8a23f3a42f815b2a638725d85a921cd65 (patch)
tree6c318a746596ea8523ccf07aaa270cf4295cf50e /block/as-iosched.c
parentd4f2f4629ea6a003cd021a9ea1a8a23ec0cd70ac (diff)
[PATCH] as-iosched: remove arq->is_sync member
We can track this in struct request. Signed-off-by: Jens Axboe <axboe@suse.de> Signed-off-by: Nick Piggin <npiggin@suse.de>
Diffstat (limited to 'block/as-iosched.c')
-rw-r--r--block/as-iosched.c36
1 files changed, 14 insertions, 22 deletions
diff --git a/block/as-iosched.c b/block/as-iosched.c
index c2665467950e..dca0b0563ca0 100644
--- a/block/as-iosched.c
+++ b/block/as-iosched.c
@@ -151,7 +151,6 @@ struct as_rq {
151 151
152 struct io_context *io_context; /* The submitting task */ 152 struct io_context *io_context; /* The submitting task */
153 153
154 unsigned int is_sync;
155 enum arq_state state; 154 enum arq_state state;
156}; 155};
157 156
@@ -241,7 +240,7 @@ static void as_put_io_context(struct as_rq *arq)
241 240
242 aic = arq->io_context->aic; 241 aic = arq->io_context->aic;
243 242
244 if (arq->is_sync == REQ_SYNC && aic) { 243 if (rq_is_sync(arq->request) && aic) {
245 spin_lock(&aic->lock); 244 spin_lock(&aic->lock);
246 set_bit(AS_TASK_IORUNNING, &aic->state); 245 set_bit(AS_TASK_IORUNNING, &aic->state);
247 aic->last_end_request = jiffies; 246 aic->last_end_request = jiffies;
@@ -254,14 +253,13 @@ static void as_put_io_context(struct as_rq *arq)
254/* 253/*
255 * rb tree support functions 254 * rb tree support functions
256 */ 255 */
257#define ARQ_RB_ROOT(ad, arq) (&(ad)->sort_list[(arq)->is_sync]) 256#define RQ_RB_ROOT(ad, rq) (&(ad)->sort_list[rq_is_sync((rq))])
258 257
259static void as_add_arq_rb(struct as_data *ad, struct request *rq) 258static void as_add_arq_rb(struct as_data *ad, struct request *rq)
260{ 259{
261 struct as_rq *arq = RQ_DATA(rq);
262 struct request *alias; 260 struct request *alias;
263 261
264 while ((unlikely(alias = elv_rb_add(ARQ_RB_ROOT(ad, arq), rq)))) { 262 while ((unlikely(alias = elv_rb_add(RQ_RB_ROOT(ad, rq), rq)))) {
265 as_move_to_dispatch(ad, RQ_DATA(alias)); 263 as_move_to_dispatch(ad, RQ_DATA(alias));
266 as_antic_stop(ad); 264 as_antic_stop(ad);
267 } 265 }
@@ -269,7 +267,7 @@ static void as_add_arq_rb(struct as_data *ad, struct request *rq)
269 267
270static inline void as_del_arq_rb(struct as_data *ad, struct request *rq) 268static inline void as_del_arq_rb(struct as_data *ad, struct request *rq)
271{ 269{
272 elv_rb_del(ARQ_RB_ROOT(ad, RQ_DATA(rq)), rq); 270 elv_rb_del(RQ_RB_ROOT(ad, rq), rq);
273} 271}
274 272
275/* 273/*
@@ -300,13 +298,13 @@ as_choose_req(struct as_data *ad, struct as_rq *arq1, struct as_rq *arq2)
300 if (arq2 == NULL) 298 if (arq2 == NULL)
301 return arq1; 299 return arq1;
302 300
303 data_dir = arq1->is_sync; 301 data_dir = rq_is_sync(arq1->request);
304 302
305 last = ad->last_sector[data_dir]; 303 last = ad->last_sector[data_dir];
306 s1 = arq1->request->sector; 304 s1 = arq1->request->sector;
307 s2 = arq2->request->sector; 305 s2 = arq2->request->sector;
308 306
309 BUG_ON(data_dir != arq2->is_sync); 307 BUG_ON(data_dir != rq_is_sync(arq2->request));
310 308
311 /* 309 /*
312 * Strict one way elevator _except_ in the case where we allow 310 * Strict one way elevator _except_ in the case where we allow
@@ -377,7 +375,7 @@ static struct as_rq *as_find_next_arq(struct as_data *ad, struct as_rq *arq)
377 if (rbnext) 375 if (rbnext)
378 next = RQ_DATA(rb_entry_rq(rbnext)); 376 next = RQ_DATA(rb_entry_rq(rbnext));
379 else { 377 else {
380 const int data_dir = arq->is_sync; 378 const int data_dir = rq_is_sync(last);
381 379
382 rbnext = rb_first(&ad->sort_list[data_dir]); 380 rbnext = rb_first(&ad->sort_list[data_dir]);
383 if (rbnext && rbnext != &last->rb_node) 381 if (rbnext && rbnext != &last->rb_node)
@@ -538,8 +536,7 @@ static void as_update_seekdist(struct as_data *ad, struct as_io_context *aic,
538static void as_update_iohist(struct as_data *ad, struct as_io_context *aic, 536static void as_update_iohist(struct as_data *ad, struct as_io_context *aic,
539 struct request *rq) 537 struct request *rq)
540{ 538{
541 struct as_rq *arq = RQ_DATA(rq); 539 int data_dir = rq_is_sync(rq);
542 int data_dir = arq->is_sync;
543 unsigned long thinktime = 0; 540 unsigned long thinktime = 0;
544 sector_t seek_dist; 541 sector_t seek_dist;
545 542
@@ -674,7 +671,7 @@ static int as_can_break_anticipation(struct as_data *ad, struct as_rq *arq)
674 return 1; 671 return 1;
675 } 672 }
676 673
677 if (arq && arq->is_sync == REQ_SYNC && as_close_req(ad, aic, arq)) { 674 if (arq && rq_is_sync(arq->request) && as_close_req(ad, aic, arq)) {
678 /* 675 /*
679 * Found a close request that is not one of ours. 676 * Found a close request that is not one of ours.
680 * 677 *
@@ -758,7 +755,7 @@ static int as_can_anticipate(struct as_data *ad, struct as_rq *arq)
758 */ 755 */
759static void as_update_arq(struct as_data *ad, struct as_rq *arq) 756static void as_update_arq(struct as_data *ad, struct as_rq *arq)
760{ 757{
761 const int data_dir = arq->is_sync; 758 const int data_dir = rq_is_sync(arq->request);
762 759
763 /* keep the next_arq cache up to date */ 760 /* keep the next_arq cache up to date */
764 ad->next_arq[data_dir] = as_choose_req(ad, arq, ad->next_arq[data_dir]); 761 ad->next_arq[data_dir] = as_choose_req(ad, arq, ad->next_arq[data_dir]);
@@ -835,7 +832,7 @@ static void as_completed_request(request_queue_t *q, struct request *rq)
835 * actually serviced. This should help devices with big TCQ windows 832 * actually serviced. This should help devices with big TCQ windows
836 * and writeback caches 833 * and writeback caches
837 */ 834 */
838 if (ad->new_batch && ad->batch_data_dir == arq->is_sync) { 835 if (ad->new_batch && ad->batch_data_dir == rq_is_sync(rq)) {
839 update_write_batch(ad); 836 update_write_batch(ad);
840 ad->current_batch_expires = jiffies + 837 ad->current_batch_expires = jiffies +
841 ad->batch_expire[REQ_SYNC]; 838 ad->batch_expire[REQ_SYNC];
@@ -868,7 +865,7 @@ out:
868static void as_remove_queued_request(request_queue_t *q, struct request *rq) 865static void as_remove_queued_request(request_queue_t *q, struct request *rq)
869{ 866{
870 struct as_rq *arq = RQ_DATA(rq); 867 struct as_rq *arq = RQ_DATA(rq);
871 const int data_dir = arq->is_sync; 868 const int data_dir = rq_is_sync(rq);
872 struct as_data *ad = q->elevator->elevator_data; 869 struct as_data *ad = q->elevator->elevator_data;
873 870
874 WARN_ON(arq->state != AS_RQ_QUEUED); 871 WARN_ON(arq->state != AS_RQ_QUEUED);
@@ -941,7 +938,7 @@ static inline int as_batch_expired(struct as_data *ad)
941static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq) 938static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq)
942{ 939{
943 struct request *rq = arq->request; 940 struct request *rq = arq->request;
944 const int data_dir = arq->is_sync; 941 const int data_dir = rq_is_sync(rq);
945 942
946 BUG_ON(RB_EMPTY_NODE(&rq->rb_node)); 943 BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
947 944
@@ -1158,12 +1155,7 @@ static void as_add_request(request_queue_t *q, struct request *rq)
1158 1155
1159 arq->state = AS_RQ_NEW; 1156 arq->state = AS_RQ_NEW;
1160 1157
1161 if (rq_data_dir(arq->request) == READ 1158 data_dir = rq_is_sync(rq);
1162 || (arq->request->cmd_flags & REQ_RW_SYNC))
1163 arq->is_sync = 1;
1164 else
1165 arq->is_sync = 0;
1166 data_dir = arq->is_sync;
1167 1159
1168 arq->io_context = as_get_io_context(); 1160 arq->io_context = as_get_io_context();
1169 1161