diff options
author | Jens Axboe <axboe@suse.de> | 2006-07-13 06:34:24 -0400 |
---|---|---|
committer | Jens Axboe <axboe@nelson.home.kernel.dk> | 2006-09-30 14:26:58 -0400 |
commit | b8aca35af5e9fbc2e41a3ba1b2e66f24e80ca9b6 (patch) | |
tree | cb25861431d320eade140875bdd326faa0de19e2 | |
parent | 21183b07ee4be405362af8454f3647781c77df1b (diff) |
[PATCH] deadline-iosched: migrate to using the elevator rb functions
This removes the rbtree handling from deadline.
Signed-off-by: Jens Axboe <axboe@suse.de>
-rw-r--r-- | block/deadline-iosched.c | 170 |
1 files changed, 34 insertions, 136 deletions
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c index b66e820f544d..8300ba1f15a0 100644 --- a/block/deadline-iosched.c +++ b/block/deadline-iosched.c | |||
@@ -57,12 +57,6 @@ struct deadline_data { | |||
57 | * pre-request data. | 57 | * pre-request data. |
58 | */ | 58 | */ |
59 | struct deadline_rq { | 59 | struct deadline_rq { |
60 | /* | ||
61 | * rbtree index, key is the starting offset | ||
62 | */ | ||
63 | struct rb_node rb_node; | ||
64 | sector_t rb_key; | ||
65 | |||
66 | struct request *request; | 60 | struct request *request; |
67 | 61 | ||
68 | /* | 62 | /* |
@@ -78,108 +72,38 @@ static kmem_cache_t *drq_pool; | |||
78 | 72 | ||
79 | #define RQ_DATA(rq) ((struct deadline_rq *) (rq)->elevator_private) | 73 | #define RQ_DATA(rq) ((struct deadline_rq *) (rq)->elevator_private) |
80 | 74 | ||
81 | /* | 75 | #define RQ_RB_ROOT(dd, rq) (&(dd)->sort_list[rq_data_dir((rq))]) |
82 | * rb tree support functions | 76 | #define DRQ_RB_ROOT(dd, drq) RQ_RB_ROOT((drq)->request) |
83 | */ | ||
84 | #define rb_entry_drq(node) rb_entry((node), struct deadline_rq, rb_node) | ||
85 | #define DRQ_RB_ROOT(dd, drq) (&(dd)->sort_list[rq_data_dir((drq)->request)]) | ||
86 | #define rq_rb_key(rq) (rq)->sector | ||
87 | |||
88 | static struct deadline_rq * | ||
89 | __deadline_add_drq_rb(struct deadline_data *dd, struct deadline_rq *drq) | ||
90 | { | ||
91 | struct rb_node **p = &DRQ_RB_ROOT(dd, drq)->rb_node; | ||
92 | struct rb_node *parent = NULL; | ||
93 | struct deadline_rq *__drq; | ||
94 | |||
95 | while (*p) { | ||
96 | parent = *p; | ||
97 | __drq = rb_entry_drq(parent); | ||
98 | |||
99 | if (drq->rb_key < __drq->rb_key) | ||
100 | p = &(*p)->rb_left; | ||
101 | else if (drq->rb_key > __drq->rb_key) | ||
102 | p = &(*p)->rb_right; | ||
103 | else | ||
104 | return __drq; | ||
105 | } | ||
106 | |||
107 | rb_link_node(&drq->rb_node, parent, p); | ||
108 | return NULL; | ||
109 | } | ||
110 | 77 | ||
111 | static void | 78 | static void |
112 | deadline_add_drq_rb(struct deadline_data *dd, struct deadline_rq *drq) | 79 | deadline_add_drq_rb(struct deadline_data *dd, struct request *rq) |
113 | { | 80 | { |
114 | struct deadline_rq *__alias; | 81 | struct rb_root *root = RQ_RB_ROOT(dd, rq); |
115 | 82 | struct request *__alias; | |
116 | drq->rb_key = rq_rb_key(drq->request); | ||
117 | 83 | ||
118 | retry: | 84 | retry: |
119 | __alias = __deadline_add_drq_rb(dd, drq); | 85 | __alias = elv_rb_add(root, rq); |
120 | if (!__alias) { | 86 | if (unlikely(__alias)) { |
121 | rb_insert_color(&drq->rb_node, DRQ_RB_ROOT(dd, drq)); | 87 | deadline_move_request(dd, RQ_DATA(__alias)); |
122 | return; | 88 | goto retry; |
123 | } | 89 | } |
124 | |||
125 | deadline_move_request(dd, __alias); | ||
126 | goto retry; | ||
127 | } | 90 | } |
128 | 91 | ||
129 | static inline void | 92 | static inline void |
130 | deadline_del_drq_rb(struct deadline_data *dd, struct deadline_rq *drq) | 93 | deadline_del_drq_rb(struct deadline_data *dd, struct deadline_rq *drq) |
131 | { | 94 | { |
132 | const int data_dir = rq_data_dir(drq->request); | 95 | struct request *rq = drq->request; |
96 | const int data_dir = rq_data_dir(rq); | ||
133 | 97 | ||
134 | if (dd->next_drq[data_dir] == drq) { | 98 | if (dd->next_drq[data_dir] == drq) { |
135 | struct rb_node *rbnext = rb_next(&drq->rb_node); | 99 | struct rb_node *rbnext = rb_next(&rq->rb_node); |
136 | 100 | ||
137 | dd->next_drq[data_dir] = NULL; | 101 | dd->next_drq[data_dir] = NULL; |
138 | if (rbnext) | 102 | if (rbnext) |
139 | dd->next_drq[data_dir] = rb_entry_drq(rbnext); | 103 | dd->next_drq[data_dir] = RQ_DATA(rb_entry_rq(rbnext)); |
140 | } | ||
141 | |||
142 | BUG_ON(!RB_EMPTY_NODE(&drq->rb_node)); | ||
143 | rb_erase(&drq->rb_node, DRQ_RB_ROOT(dd, drq)); | ||
144 | RB_CLEAR_NODE(&drq->rb_node); | ||
145 | } | ||
146 | |||
147 | static struct request * | ||
148 | deadline_find_drq_rb(struct deadline_data *dd, sector_t sector, int data_dir) | ||
149 | { | ||
150 | struct rb_node *n = dd->sort_list[data_dir].rb_node; | ||
151 | struct deadline_rq *drq; | ||
152 | |||
153 | while (n) { | ||
154 | drq = rb_entry_drq(n); | ||
155 | |||
156 | if (sector < drq->rb_key) | ||
157 | n = n->rb_left; | ||
158 | else if (sector > drq->rb_key) | ||
159 | n = n->rb_right; | ||
160 | else | ||
161 | return drq->request; | ||
162 | } | 104 | } |
163 | 105 | ||
164 | return NULL; | 106 | elv_rb_del(RQ_RB_ROOT(dd, rq), rq); |
165 | } | ||
166 | |||
167 | /* | ||
168 | * deadline_find_first_drq finds the first (lowest sector numbered) request | ||
169 | * for the specified data_dir. Used to sweep back to the start of the disk | ||
170 | * (1-way elevator) after we process the last (highest sector) request. | ||
171 | */ | ||
172 | static struct deadline_rq * | ||
173 | deadline_find_first_drq(struct deadline_data *dd, int data_dir) | ||
174 | { | ||
175 | struct rb_node *n = dd->sort_list[data_dir].rb_node; | ||
176 | |||
177 | for (;;) { | ||
178 | if (n->rb_left == NULL) | ||
179 | return rb_entry_drq(n); | ||
180 | |||
181 | n = n->rb_left; | ||
182 | } | ||
183 | } | 107 | } |
184 | 108 | ||
185 | /* | 109 | /* |
@@ -192,7 +116,7 @@ deadline_add_request(struct request_queue *q, struct request *rq) | |||
192 | struct deadline_rq *drq = RQ_DATA(rq); | 116 | struct deadline_rq *drq = RQ_DATA(rq); |
193 | const int data_dir = rq_data_dir(drq->request); | 117 | const int data_dir = rq_data_dir(drq->request); |
194 | 118 | ||
195 | deadline_add_drq_rb(dd, drq); | 119 | deadline_add_drq_rb(dd, rq); |
196 | 120 | ||
197 | /* | 121 | /* |
198 | * set expire time (only used for reads) and add to fifo list | 122 | * set expire time (only used for reads) and add to fifo list |
@@ -224,11 +148,11 @@ deadline_merge(request_queue_t *q, struct request **req, struct bio *bio) | |||
224 | * check for front merge | 148 | * check for front merge |
225 | */ | 149 | */ |
226 | if (dd->front_merges) { | 150 | if (dd->front_merges) { |
227 | sector_t rb_key = bio->bi_sector + bio_sectors(bio); | 151 | sector_t sector = bio->bi_sector + bio_sectors(bio); |
228 | 152 | ||
229 | __rq = deadline_find_drq_rb(dd, rb_key, bio_data_dir(bio)); | 153 | __rq = elv_rb_find(&dd->sort_list[bio_data_dir(bio)], sector); |
230 | if (__rq) { | 154 | if (__rq) { |
231 | BUG_ON(rb_key != rq_rb_key(__rq)); | 155 | BUG_ON(sector != __rq->sector); |
232 | 156 | ||
233 | if (elv_rq_merge_ok(__rq, bio)) { | 157 | if (elv_rq_merge_ok(__rq, bio)) { |
234 | ret = ELEVATOR_FRONT_MERGE; | 158 | ret = ELEVATOR_FRONT_MERGE; |
@@ -243,17 +167,17 @@ out: | |||
243 | return ret; | 167 | return ret; |
244 | } | 168 | } |
245 | 169 | ||
246 | static void deadline_merged_request(request_queue_t *q, struct request *req) | 170 | static void deadline_merged_request(request_queue_t *q, struct request *req, |
171 | int type) | ||
247 | { | 172 | { |
248 | struct deadline_data *dd = q->elevator->elevator_data; | 173 | struct deadline_data *dd = q->elevator->elevator_data; |
249 | struct deadline_rq *drq = RQ_DATA(req); | ||
250 | 174 | ||
251 | /* | 175 | /* |
252 | * if the merge was a front merge, we need to reposition request | 176 | * if the merge was a front merge, we need to reposition request |
253 | */ | 177 | */ |
254 | if (rq_rb_key(req) != drq->rb_key) { | 178 | if (type == ELEVATOR_FRONT_MERGE) { |
255 | deadline_del_drq_rb(dd, drq); | 179 | elv_rb_del(RQ_RB_ROOT(dd, req), req); |
256 | deadline_add_drq_rb(dd, drq); | 180 | deadline_add_drq_rb(dd, req); |
257 | } | 181 | } |
258 | } | 182 | } |
259 | 183 | ||
@@ -261,18 +185,12 @@ static void | |||
261 | deadline_merged_requests(request_queue_t *q, struct request *req, | 185 | deadline_merged_requests(request_queue_t *q, struct request *req, |
262 | struct request *next) | 186 | struct request *next) |
263 | { | 187 | { |
264 | struct deadline_data *dd = q->elevator->elevator_data; | ||
265 | struct deadline_rq *drq = RQ_DATA(req); | 188 | struct deadline_rq *drq = RQ_DATA(req); |
266 | struct deadline_rq *dnext = RQ_DATA(next); | 189 | struct deadline_rq *dnext = RQ_DATA(next); |
267 | 190 | ||
268 | BUG_ON(!drq); | 191 | BUG_ON(!drq); |
269 | BUG_ON(!dnext); | 192 | BUG_ON(!dnext); |
270 | 193 | ||
271 | if (rq_rb_key(req) != drq->rb_key) { | ||
272 | deadline_del_drq_rb(dd, drq); | ||
273 | deadline_add_drq_rb(dd, drq); | ||
274 | } | ||
275 | |||
276 | /* | 194 | /* |
277 | * if dnext expires before drq, assign its expire time to drq | 195 | * if dnext expires before drq, assign its expire time to drq |
278 | * and move into dnext position (dnext will be deleted) in fifo | 196 | * and move into dnext position (dnext will be deleted) in fifo |
@@ -308,14 +226,15 @@ deadline_move_to_dispatch(struct deadline_data *dd, struct deadline_rq *drq) | |||
308 | static void | 226 | static void |
309 | deadline_move_request(struct deadline_data *dd, struct deadline_rq *drq) | 227 | deadline_move_request(struct deadline_data *dd, struct deadline_rq *drq) |
310 | { | 228 | { |
311 | const int data_dir = rq_data_dir(drq->request); | 229 | struct request *rq = drq->request; |
312 | struct rb_node *rbnext = rb_next(&drq->rb_node); | 230 | const int data_dir = rq_data_dir(rq); |
231 | struct rb_node *rbnext = rb_next(&rq->rb_node); | ||
313 | 232 | ||
314 | dd->next_drq[READ] = NULL; | 233 | dd->next_drq[READ] = NULL; |
315 | dd->next_drq[WRITE] = NULL; | 234 | dd->next_drq[WRITE] = NULL; |
316 | 235 | ||
317 | if (rbnext) | 236 | if (rbnext) |
318 | dd->next_drq[data_dir] = rb_entry_drq(rbnext); | 237 | dd->next_drq[data_dir] = RQ_DATA(rb_entry_rq(rbnext)); |
319 | 238 | ||
320 | dd->last_sector = drq->request->sector + drq->request->nr_sectors; | 239 | dd->last_sector = drq->request->sector + drq->request->nr_sectors; |
321 | 240 | ||
@@ -426,13 +345,17 @@ dispatch_find_request: | |||
426 | */ | 345 | */ |
427 | drq = dd->next_drq[data_dir]; | 346 | drq = dd->next_drq[data_dir]; |
428 | } else { | 347 | } else { |
348 | struct rb_node *n; | ||
349 | |||
429 | /* | 350 | /* |
430 | * The last req was the other direction or we have run out of | 351 | * The last req was the other direction or we have run out of |
431 | * higher-sectored requests. Go back to the lowest sectored | 352 | * higher-sectored requests. Go back to the lowest sectored |
432 | * request (1 way elevator) and start a new batch. | 353 | * request (1 way elevator) and start a new batch. |
433 | */ | 354 | */ |
434 | dd->batching = 0; | 355 | dd->batching = 0; |
435 | drq = deadline_find_first_drq(dd, data_dir); | 356 | n = rb_first(&dd->sort_list[data_dir]); |
357 | if (n) | ||
358 | drq = RQ_DATA(rb_entry_rq(n)); | ||
436 | } | 359 | } |
437 | 360 | ||
438 | dispatch_request: | 361 | dispatch_request: |
@@ -453,30 +376,6 @@ static int deadline_queue_empty(request_queue_t *q) | |||
453 | && list_empty(&dd->fifo_list[READ]); | 376 | && list_empty(&dd->fifo_list[READ]); |
454 | } | 377 | } |
455 | 378 | ||
456 | static struct request * | ||
457 | deadline_former_request(request_queue_t *q, struct request *rq) | ||
458 | { | ||
459 | struct deadline_rq *drq = RQ_DATA(rq); | ||
460 | struct rb_node *rbprev = rb_prev(&drq->rb_node); | ||
461 | |||
462 | if (rbprev) | ||
463 | return rb_entry_drq(rbprev)->request; | ||
464 | |||
465 | return NULL; | ||
466 | } | ||
467 | |||
468 | static struct request * | ||
469 | deadline_latter_request(request_queue_t *q, struct request *rq) | ||
470 | { | ||
471 | struct deadline_rq *drq = RQ_DATA(rq); | ||
472 | struct rb_node *rbnext = rb_next(&drq->rb_node); | ||
473 | |||
474 | if (rbnext) | ||
475 | return rb_entry_drq(rbnext)->request; | ||
476 | |||
477 | return NULL; | ||
478 | } | ||
479 | |||
480 | static void deadline_exit_queue(elevator_t *e) | 379 | static void deadline_exit_queue(elevator_t *e) |
481 | { | 380 | { |
482 | struct deadline_data *dd = e->elevator_data; | 381 | struct deadline_data *dd = e->elevator_data; |
@@ -542,7 +441,6 @@ deadline_set_request(request_queue_t *q, struct request *rq, struct bio *bio, | |||
542 | drq = mempool_alloc(dd->drq_pool, gfp_mask); | 441 | drq = mempool_alloc(dd->drq_pool, gfp_mask); |
543 | if (drq) { | 442 | if (drq) { |
544 | memset(drq, 0, sizeof(*drq)); | 443 | memset(drq, 0, sizeof(*drq)); |
545 | RB_CLEAR_NODE(&drq->rb_node); | ||
546 | drq->request = rq; | 444 | drq->request = rq; |
547 | 445 | ||
548 | INIT_LIST_HEAD(&drq->fifo); | 446 | INIT_LIST_HEAD(&drq->fifo); |
@@ -633,8 +531,8 @@ static struct elevator_type iosched_deadline = { | |||
633 | .elevator_dispatch_fn = deadline_dispatch_requests, | 531 | .elevator_dispatch_fn = deadline_dispatch_requests, |
634 | .elevator_add_req_fn = deadline_add_request, | 532 | .elevator_add_req_fn = deadline_add_request, |
635 | .elevator_queue_empty_fn = deadline_queue_empty, | 533 | .elevator_queue_empty_fn = deadline_queue_empty, |
636 | .elevator_former_req_fn = deadline_former_request, | 534 | .elevator_former_req_fn = elv_rb_former_request, |
637 | .elevator_latter_req_fn = deadline_latter_request, | 535 | .elevator_latter_req_fn = elv_rb_latter_request, |
638 | .elevator_set_req_fn = deadline_set_request, | 536 | .elevator_set_req_fn = deadline_set_request, |
639 | .elevator_put_req_fn = deadline_put_request, | 537 | .elevator_put_req_fn = deadline_put_request, |
640 | .elevator_init_fn = deadline_init_queue, | 538 | .elevator_init_fn = deadline_init_queue, |