aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/as-iosched.c17
-rw-r--r--block/cfq-iosched.c22
-rw-r--r--block/deadline-iosched.c13
3 files changed, 20 insertions, 32 deletions
diff --git a/block/as-iosched.c b/block/as-iosched.c
index 56c99fa037df..1ec5df466708 100644
--- a/block/as-iosched.c
+++ b/block/as-iosched.c
@@ -347,9 +347,6 @@ static struct request *as_find_arq_hash(struct as_data *ad, sector_t offset)
347/* 347/*
348 * rb tree support functions 348 * rb tree support functions
349 */ 349 */
350#define RB_EMPTY(root) ((root)->rb_node == NULL)
351#define ON_RB(node) (rb_parent(node) != node)
352#define RB_CLEAR(node) (rb_set_parent(node, node))
353#define rb_entry_arq(node) rb_entry((node), struct as_rq, rb_node) 350#define rb_entry_arq(node) rb_entry((node), struct as_rq, rb_node)
354#define ARQ_RB_ROOT(ad, arq) (&(ad)->sort_list[(arq)->is_sync]) 351#define ARQ_RB_ROOT(ad, arq) (&(ad)->sort_list[(arq)->is_sync])
355#define rq_rb_key(rq) (rq)->sector 352#define rq_rb_key(rq) (rq)->sector
@@ -418,13 +415,13 @@ static void as_add_arq_rb(struct as_data *ad, struct as_rq *arq)
418 415
419static inline void as_del_arq_rb(struct as_data *ad, struct as_rq *arq) 416static inline void as_del_arq_rb(struct as_data *ad, struct as_rq *arq)
420{ 417{
421 if (!ON_RB(&arq->rb_node)) { 418 if (!RB_EMPTY_NODE(&arq->rb_node)) {
422 WARN_ON(1); 419 WARN_ON(1);
423 return; 420 return;
424 } 421 }
425 422
426 rb_erase(&arq->rb_node, ARQ_RB_ROOT(ad, arq)); 423 rb_erase(&arq->rb_node, ARQ_RB_ROOT(ad, arq));
427 RB_CLEAR(&arq->rb_node); 424 RB_CLEAR_NODE(&arq->rb_node);
428} 425}
429 426
430static struct request * 427static struct request *
@@ -545,7 +542,7 @@ static struct as_rq *as_find_next_arq(struct as_data *ad, struct as_rq *last)
545 struct rb_node *rbprev = rb_prev(&last->rb_node); 542 struct rb_node *rbprev = rb_prev(&last->rb_node);
546 struct as_rq *arq_next, *arq_prev; 543 struct as_rq *arq_next, *arq_prev;
547 544
548 BUG_ON(!ON_RB(&last->rb_node)); 545 BUG_ON(!RB_EMPTY_NODE(&last->rb_node));
549 546
550 if (rbprev) 547 if (rbprev)
551 arq_prev = rb_entry_arq(rbprev); 548 arq_prev = rb_entry_arq(rbprev);
@@ -1122,7 +1119,7 @@ static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq)
1122 struct request *rq = arq->request; 1119 struct request *rq = arq->request;
1123 const int data_dir = arq->is_sync; 1120 const int data_dir = arq->is_sync;
1124 1121
1125 BUG_ON(!ON_RB(&arq->rb_node)); 1122 BUG_ON(!RB_EMPTY_NODE(&arq->rb_node));
1126 1123
1127 as_antic_stop(ad); 1124 as_antic_stop(ad);
1128 ad->antic_status = ANTIC_OFF; 1125 ad->antic_status = ANTIC_OFF;
@@ -1247,7 +1244,7 @@ static int as_dispatch_request(request_queue_t *q, int force)
1247 */ 1244 */
1248 1245
1249 if (reads) { 1246 if (reads) {
1250 BUG_ON(RB_EMPTY(&ad->sort_list[REQ_SYNC])); 1247 BUG_ON(RB_EMPTY_ROOT(&ad->sort_list[REQ_SYNC]));
1251 1248
1252 if (writes && ad->batch_data_dir == REQ_SYNC) 1249 if (writes && ad->batch_data_dir == REQ_SYNC)
1253 /* 1250 /*
@@ -1271,7 +1268,7 @@ static int as_dispatch_request(request_queue_t *q, int force)
1271 1268
1272 if (writes) { 1269 if (writes) {
1273dispatch_writes: 1270dispatch_writes:
1274 BUG_ON(RB_EMPTY(&ad->sort_list[REQ_ASYNC])); 1271 BUG_ON(RB_EMPTY_ROOT(&ad->sort_list[REQ_ASYNC]));
1275 1272
1276 if (ad->batch_data_dir == REQ_SYNC) { 1273 if (ad->batch_data_dir == REQ_SYNC) {
1277 ad->changed_batch = 1; 1274 ad->changed_batch = 1;
@@ -1591,7 +1588,7 @@ static int as_set_request(request_queue_t *q, struct request *rq,
1591 1588
1592 if (arq) { 1589 if (arq) {
1593 memset(arq, 0, sizeof(*arq)); 1590 memset(arq, 0, sizeof(*arq));
1594 RB_CLEAR(&arq->rb_node); 1591 RB_CLEAR_NODE(&arq->rb_node);
1595 arq->request = rq; 1592 arq->request = rq;
1596 arq->state = AS_RQ_PRESCHED; 1593 arq->state = AS_RQ_PRESCHED;
1597 arq->io_context = NULL; 1594 arq->io_context = NULL;
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 940364edf2b9..e25223e147a2 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -60,11 +60,6 @@ static DEFINE_SPINLOCK(cfq_exit_lock);
60/* 60/*
61 * rb-tree defines 61 * rb-tree defines
62 */ 62 */
63#define RB_EMPTY(node) ((node)->rb_node == NULL)
64#define RB_CLEAR(node) do { \
65 memset(node, 0, sizeof(*node)); \
66} while (0)
67#define RB_CLEAR_ROOT(root) ((root)->rb_node = NULL)
68#define rb_entry_crq(node) rb_entry((node), struct cfq_rq, rb_node) 63#define rb_entry_crq(node) rb_entry((node), struct cfq_rq, rb_node)
69#define rq_rb_key(rq) (rq)->sector 64#define rq_rb_key(rq) (rq)->sector
70 65
@@ -559,7 +554,7 @@ static inline void cfq_del_crq_rb(struct cfq_rq *crq)
559 554
560 rb_erase(&crq->rb_node, &cfqq->sort_list); 555 rb_erase(&crq->rb_node, &cfqq->sort_list);
561 556
562 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY(&cfqq->sort_list)) 557 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
563 cfq_del_cfqq_rr(cfqd, cfqq); 558 cfq_del_cfqq_rr(cfqd, cfqq);
564} 559}
565 560
@@ -914,7 +909,7 @@ static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
914 struct cfq_io_context *cic; 909 struct cfq_io_context *cic;
915 unsigned long sl; 910 unsigned long sl;
916 911
917 WARN_ON(!RB_EMPTY(&cfqq->sort_list)); 912 WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
918 WARN_ON(cfqq != cfqd->active_queue); 913 WARN_ON(cfqq != cfqd->active_queue);
919 914
920 /* 915 /*
@@ -1042,7 +1037,7 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
1042 * if queue has requests, dispatch one. if not, check if 1037 * if queue has requests, dispatch one. if not, check if
1043 * enough slice is left to wait for one 1038 * enough slice is left to wait for one
1044 */ 1039 */
1045 if (!RB_EMPTY(&cfqq->sort_list)) 1040 if (!RB_EMPTY_ROOT(&cfqq->sort_list))
1046 goto keep_queue; 1041 goto keep_queue;
1047 else if (cfq_cfqq_dispatched(cfqq)) { 1042 else if (cfq_cfqq_dispatched(cfqq)) {
1048 cfqq = NULL; 1043 cfqq = NULL;
@@ -1066,7 +1061,7 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1066{ 1061{
1067 int dispatched = 0; 1062 int dispatched = 0;
1068 1063
1069 BUG_ON(RB_EMPTY(&cfqq->sort_list)); 1064 BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
1070 1065
1071 do { 1066 do {
1072 struct cfq_rq *crq; 1067 struct cfq_rq *crq;
@@ -1090,7 +1085,7 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1090 cfqd->active_cic = crq->io_context; 1085 cfqd->active_cic = crq->io_context;
1091 } 1086 }
1092 1087
1093 if (RB_EMPTY(&cfqq->sort_list)) 1088 if (RB_EMPTY_ROOT(&cfqq->sort_list))
1094 break; 1089 break;
1095 1090
1096 } while (dispatched < max_dispatch); 1091 } while (dispatched < max_dispatch);
@@ -1480,7 +1475,6 @@ retry:
1480 1475
1481 INIT_HLIST_NODE(&cfqq->cfq_hash); 1476 INIT_HLIST_NODE(&cfqq->cfq_hash);
1482 INIT_LIST_HEAD(&cfqq->cfq_list); 1477 INIT_LIST_HEAD(&cfqq->cfq_list);
1483 RB_CLEAR_ROOT(&cfqq->sort_list);
1484 INIT_LIST_HEAD(&cfqq->fifo); 1478 INIT_LIST_HEAD(&cfqq->fifo);
1485 1479
1486 cfqq->key = key; 1480 cfqq->key = key;
@@ -1873,7 +1867,7 @@ static void cfq_completed_request(request_queue_t *q, struct request *rq)
1873 if (cfqd->active_queue == cfqq) { 1867 if (cfqd->active_queue == cfqq) {
1874 if (time_after(now, cfqq->slice_end)) 1868 if (time_after(now, cfqq->slice_end))
1875 cfq_slice_expired(cfqd, 0); 1869 cfq_slice_expired(cfqd, 0);
1876 else if (sync && RB_EMPTY(&cfqq->sort_list)) { 1870 else if (sync && RB_EMPTY_ROOT(&cfqq->sort_list)) {
1877 if (!cfq_arm_slice_timer(cfqd, cfqq)) 1871 if (!cfq_arm_slice_timer(cfqd, cfqq))
1878 cfq_schedule_dispatch(cfqd); 1872 cfq_schedule_dispatch(cfqd);
1879 } 1873 }
@@ -2059,7 +2053,7 @@ cfq_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
2059 2053
2060 crq = mempool_alloc(cfqd->crq_pool, gfp_mask); 2054 crq = mempool_alloc(cfqd->crq_pool, gfp_mask);
2061 if (crq) { 2055 if (crq) {
2062 RB_CLEAR(&crq->rb_node); 2056 RB_CLEAR_NODE(&crq->rb_node);
2063 crq->rb_key = 0; 2057 crq->rb_key = 0;
2064 crq->request = rq; 2058 crq->request = rq;
2065 INIT_HLIST_NODE(&crq->hash); 2059 INIT_HLIST_NODE(&crq->hash);
@@ -2151,7 +2145,7 @@ static void cfq_idle_slice_timer(unsigned long data)
2151 /* 2145 /*
2152 * not expired and it has a request pending, let it dispatch 2146 * not expired and it has a request pending, let it dispatch
2153 */ 2147 */
2154 if (!RB_EMPTY(&cfqq->sort_list)) { 2148 if (!RB_EMPTY_ROOT(&cfqq->sort_list)) {
2155 cfq_mark_cfqq_must_dispatch(cfqq); 2149 cfq_mark_cfqq_must_dispatch(cfqq);
2156 goto out_kick; 2150 goto out_kick;
2157 } 2151 }
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index e5bccaaed563..4469dd84623c 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -159,9 +159,6 @@ deadline_find_drq_hash(struct deadline_data *dd, sector_t offset)
159/* 159/*
160 * rb tree support functions 160 * rb tree support functions
161 */ 161 */
162#define RB_EMPTY(root) ((root)->rb_node == NULL)
163#define ON_RB(node) (rb_parent(node) != node)
164#define RB_CLEAR(node) (rb_set_parent(node, node))
165#define rb_entry_drq(node) rb_entry((node), struct deadline_rq, rb_node) 162#define rb_entry_drq(node) rb_entry((node), struct deadline_rq, rb_node)
166#define DRQ_RB_ROOT(dd, drq) (&(dd)->sort_list[rq_data_dir((drq)->request)]) 163#define DRQ_RB_ROOT(dd, drq) (&(dd)->sort_list[rq_data_dir((drq)->request)])
167#define rq_rb_key(rq) (rq)->sector 164#define rq_rb_key(rq) (rq)->sector
@@ -220,9 +217,9 @@ deadline_del_drq_rb(struct deadline_data *dd, struct deadline_rq *drq)
220 dd->next_drq[data_dir] = rb_entry_drq(rbnext); 217 dd->next_drq[data_dir] = rb_entry_drq(rbnext);
221 } 218 }
222 219
223 BUG_ON(!ON_RB(&drq->rb_node)); 220 BUG_ON(!RB_EMPTY_NODE(&drq->rb_node));
224 rb_erase(&drq->rb_node, DRQ_RB_ROOT(dd, drq)); 221 rb_erase(&drq->rb_node, DRQ_RB_ROOT(dd, drq));
225 RB_CLEAR(&drq->rb_node); 222 RB_CLEAR_NODE(&drq->rb_node);
226} 223}
227 224
228static struct request * 225static struct request *
@@ -496,7 +493,7 @@ static int deadline_dispatch_requests(request_queue_t *q, int force)
496 */ 493 */
497 494
498 if (reads) { 495 if (reads) {
499 BUG_ON(RB_EMPTY(&dd->sort_list[READ])); 496 BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[READ]));
500 497
501 if (writes && (dd->starved++ >= dd->writes_starved)) 498 if (writes && (dd->starved++ >= dd->writes_starved))
502 goto dispatch_writes; 499 goto dispatch_writes;
@@ -512,7 +509,7 @@ static int deadline_dispatch_requests(request_queue_t *q, int force)
512 509
513 if (writes) { 510 if (writes) {
514dispatch_writes: 511dispatch_writes:
515 BUG_ON(RB_EMPTY(&dd->sort_list[WRITE])); 512 BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[WRITE]));
516 513
517 dd->starved = 0; 514 dd->starved = 0;
518 515
@@ -668,7 +665,7 @@ deadline_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
668 drq = mempool_alloc(dd->drq_pool, gfp_mask); 665 drq = mempool_alloc(dd->drq_pool, gfp_mask);
669 if (drq) { 666 if (drq) {
670 memset(drq, 0, sizeof(*drq)); 667 memset(drq, 0, sizeof(*drq));
671 RB_CLEAR(&drq->rb_node); 668 RB_CLEAR_NODE(&drq->rb_node);
672 drq->request = rq; 669 drq->request = rq;
673 670
674 INIT_HLIST_NODE(&drq->hash); 671 INIT_HLIST_NODE(&drq->hash);