aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2008-01-31 18:37:27 -0500
committerJens Axboe <jens.axboe@oracle.com>2008-02-01 03:26:33 -0500
commit4eb166d9874b4917d79ccd14577a60d795a0cb4a (patch)
treeb29e657c7f07ad4a0f852bc4803404a7b15bd4ef
parentfe094d98e79351344c9e0e2c1446794240d247a4 (diff)
block: make elevator lib checkpatch compliant
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
-rw-r--r--block/elevator.c57
1 files changed, 30 insertions, 27 deletions
diff --git a/block/elevator.c b/block/elevator.c
index 8cd5775acd7..bafbae0344d 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -45,7 +45,8 @@ static LIST_HEAD(elv_list);
45 */ 45 */
46static const int elv_hash_shift = 6; 46static const int elv_hash_shift = 6;
47#define ELV_HASH_BLOCK(sec) ((sec) >> 3) 47#define ELV_HASH_BLOCK(sec) ((sec) >> 3)
48#define ELV_HASH_FN(sec) (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift)) 48#define ELV_HASH_FN(sec) \
49 (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift))
49#define ELV_HASH_ENTRIES (1 << elv_hash_shift) 50#define ELV_HASH_ENTRIES (1 << elv_hash_shift)
50#define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors) 51#define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors)
51#define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash)) 52#define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash))
@@ -224,15 +225,27 @@ int elevator_init(struct request_queue *q, char *name)
224 q->end_sector = 0; 225 q->end_sector = 0;
225 q->boundary_rq = NULL; 226 q->boundary_rq = NULL;
226 227
227 if (name && !(e = elevator_get(name))) 228 if (name) {
228 return -EINVAL; 229 e = elevator_get(name);
230 if (!e)
231 return -EINVAL;
232 }
229 233
230 if (!e && *chosen_elevator && !(e = elevator_get(chosen_elevator))) 234 if (!e && *chosen_elevator) {
231 printk("I/O scheduler %s not found\n", chosen_elevator); 235 e = elevator_get(chosen_elevator);
236 if (!e)
237 printk(KERN_ERR "I/O scheduler %s not found\n",
238 chosen_elevator);
239 }
232 240
233 if (!e && !(e = elevator_get(CONFIG_DEFAULT_IOSCHED))) { 241 if (!e) {
234 printk("Default I/O scheduler not found, using no-op\n"); 242 e = elevator_get(CONFIG_DEFAULT_IOSCHED);
235 e = elevator_get("noop"); 243 if (!e) {
244 printk(KERN_ERR
245 "Default I/O scheduler not found. " \
246 "Using noop.\n");
247 e = elevator_get("noop");
248 }
236 } 249 }
237 250
238 eq = elevator_alloc(q, e); 251 eq = elevator_alloc(q, e);
@@ -248,7 +261,6 @@ int elevator_init(struct request_queue *q, char *name)
248 elevator_attach(q, eq, data); 261 elevator_attach(q, eq, data);
249 return ret; 262 return ret;
250} 263}
251
252EXPORT_SYMBOL(elevator_init); 264EXPORT_SYMBOL(elevator_init);
253 265
254void elevator_exit(elevator_t *e) 266void elevator_exit(elevator_t *e)
@@ -261,7 +273,6 @@ void elevator_exit(elevator_t *e)
261 273
262 kobject_put(&e->kobj); 274 kobject_put(&e->kobj);
263} 275}
264
265EXPORT_SYMBOL(elevator_exit); 276EXPORT_SYMBOL(elevator_exit);
266 277
267static void elv_activate_rq(struct request_queue *q, struct request *rq) 278static void elv_activate_rq(struct request_queue *q, struct request *rq)
@@ -353,7 +364,6 @@ struct request *elv_rb_add(struct rb_root *root, struct request *rq)
353 rb_insert_color(&rq->rb_node, root); 364 rb_insert_color(&rq->rb_node, root);
354 return NULL; 365 return NULL;
355} 366}
356
357EXPORT_SYMBOL(elv_rb_add); 367EXPORT_SYMBOL(elv_rb_add);
358 368
359void elv_rb_del(struct rb_root *root, struct request *rq) 369void elv_rb_del(struct rb_root *root, struct request *rq)
@@ -362,7 +372,6 @@ void elv_rb_del(struct rb_root *root, struct request *rq)
362 rb_erase(&rq->rb_node, root); 372 rb_erase(&rq->rb_node, root);
363 RB_CLEAR_NODE(&rq->rb_node); 373 RB_CLEAR_NODE(&rq->rb_node);
364} 374}
365
366EXPORT_SYMBOL(elv_rb_del); 375EXPORT_SYMBOL(elv_rb_del);
367 376
368struct request *elv_rb_find(struct rb_root *root, sector_t sector) 377struct request *elv_rb_find(struct rb_root *root, sector_t sector)
@@ -383,7 +392,6 @@ struct request *elv_rb_find(struct rb_root *root, sector_t sector)
383 392
384 return NULL; 393 return NULL;
385} 394}
386
387EXPORT_SYMBOL(elv_rb_find); 395EXPORT_SYMBOL(elv_rb_find);
388 396
389/* 397/*
@@ -395,6 +403,7 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
395{ 403{
396 sector_t boundary; 404 sector_t boundary;
397 struct list_head *entry; 405 struct list_head *entry;
406 int stop_flags;
398 407
399 if (q->last_merge == rq) 408 if (q->last_merge == rq)
400 q->last_merge = NULL; 409 q->last_merge = NULL;
@@ -404,13 +413,13 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
404 q->nr_sorted--; 413 q->nr_sorted--;
405 414
406 boundary = q->end_sector; 415 boundary = q->end_sector;
407 416 stop_flags = REQ_SOFTBARRIER | REQ_HARDBARRIER | REQ_STARTED;
408 list_for_each_prev(entry, &q->queue_head) { 417 list_for_each_prev(entry, &q->queue_head) {
409 struct request *pos = list_entry_rq(entry); 418 struct request *pos = list_entry_rq(entry);
410 419
411 if (rq_data_dir(rq) != rq_data_dir(pos)) 420 if (rq_data_dir(rq) != rq_data_dir(pos))
412 break; 421 break;
413 if (pos->cmd_flags & (REQ_SOFTBARRIER|REQ_HARDBARRIER|REQ_STARTED)) 422 if (pos->cmd_flags & stop_flags)
414 break; 423 break;
415 if (rq->sector >= boundary) { 424 if (rq->sector >= boundary) {
416 if (pos->sector < boundary) 425 if (pos->sector < boundary)
@@ -425,7 +434,6 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
425 434
426 list_add(&rq->queuelist, entry); 435 list_add(&rq->queuelist, entry);
427} 436}
428
429EXPORT_SYMBOL(elv_dispatch_sort); 437EXPORT_SYMBOL(elv_dispatch_sort);
430 438
431/* 439/*
@@ -446,7 +454,6 @@ void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
446 q->boundary_rq = rq; 454 q->boundary_rq = rq;
447 list_add_tail(&rq->queuelist, &q->queue_head); 455 list_add_tail(&rq->queuelist, &q->queue_head);
448} 456}
449
450EXPORT_SYMBOL(elv_dispatch_add_tail); 457EXPORT_SYMBOL(elv_dispatch_add_tail);
451 458
452int elv_merge(struct request_queue *q, struct request **req, struct bio *bio) 459int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
@@ -665,7 +672,8 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where,
665 q->end_sector = rq_end_sector(rq); 672 q->end_sector = rq_end_sector(rq);
666 q->boundary_rq = rq; 673 q->boundary_rq = rq;
667 } 674 }
668 } else if (!(rq->cmd_flags & REQ_ELVPRIV) && where == ELEVATOR_INSERT_SORT) 675 } else if (!(rq->cmd_flags & REQ_ELVPRIV) &&
676 where == ELEVATOR_INSERT_SORT)
669 where = ELEVATOR_INSERT_BACK; 677 where = ELEVATOR_INSERT_BACK;
670 678
671 if (plug) 679 if (plug)
@@ -673,7 +681,6 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where,
673 681
674 elv_insert(q, rq, where); 682 elv_insert(q, rq, where);
675} 683}
676
677EXPORT_SYMBOL(__elv_add_request); 684EXPORT_SYMBOL(__elv_add_request);
678 685
679void elv_add_request(struct request_queue *q, struct request *rq, int where, 686void elv_add_request(struct request_queue *q, struct request *rq, int where,
@@ -685,7 +692,6 @@ void elv_add_request(struct request_queue *q, struct request *rq, int where,
685 __elv_add_request(q, rq, where, plug); 692 __elv_add_request(q, rq, where, plug);
686 spin_unlock_irqrestore(q->queue_lock, flags); 693 spin_unlock_irqrestore(q->queue_lock, flags);
687} 694}
688
689EXPORT_SYMBOL(elv_add_request); 695EXPORT_SYMBOL(elv_add_request);
690 696
691static inline struct request *__elv_next_request(struct request_queue *q) 697static inline struct request *__elv_next_request(struct request_queue *q)
@@ -792,7 +798,6 @@ struct request *elv_next_request(struct request_queue *q)
792 798
793 return rq; 799 return rq;
794} 800}
795
796EXPORT_SYMBOL(elv_next_request); 801EXPORT_SYMBOL(elv_next_request);
797 802
798void elv_dequeue_request(struct request_queue *q, struct request *rq) 803void elv_dequeue_request(struct request_queue *q, struct request *rq)
@@ -810,7 +815,6 @@ void elv_dequeue_request(struct request_queue *q, struct request *rq)
810 if (blk_account_rq(rq)) 815 if (blk_account_rq(rq))
811 q->in_flight++; 816 q->in_flight++;
812} 817}
813
814EXPORT_SYMBOL(elv_dequeue_request); 818EXPORT_SYMBOL(elv_dequeue_request);
815 819
816int elv_queue_empty(struct request_queue *q) 820int elv_queue_empty(struct request_queue *q)
@@ -825,7 +829,6 @@ int elv_queue_empty(struct request_queue *q)
825 829
826 return 1; 830 return 1;
827} 831}
828
829EXPORT_SYMBOL(elv_queue_empty); 832EXPORT_SYMBOL(elv_queue_empty);
830 833
831struct request *elv_latter_request(struct request_queue *q, struct request *rq) 834struct request *elv_latter_request(struct request_queue *q, struct request *rq)
@@ -994,7 +997,8 @@ void elv_register(struct elevator_type *e)
994 !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED))) 997 !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED)))
995 def = " (default)"; 998 def = " (default)";
996 999
997 printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name, def); 1000 printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name,
1001 def);
998} 1002}
999EXPORT_SYMBOL_GPL(elv_register); 1003EXPORT_SYMBOL_GPL(elv_register);
1000 1004
@@ -1126,7 +1130,8 @@ ssize_t elv_iosched_store(struct request_queue *q, const char *name,
1126 } 1130 }
1127 1131
1128 if (!elevator_switch(q, e)) 1132 if (!elevator_switch(q, e))
1129 printk(KERN_ERR "elevator: switch to %s failed\n",elevator_name); 1133 printk(KERN_ERR "elevator: switch to %s failed\n",
1134 elevator_name);
1130 return count; 1135 return count;
1131} 1136}
1132 1137
@@ -1160,7 +1165,6 @@ struct request *elv_rb_former_request(struct request_queue *q,
1160 1165
1161 return NULL; 1166 return NULL;
1162} 1167}
1163
1164EXPORT_SYMBOL(elv_rb_former_request); 1168EXPORT_SYMBOL(elv_rb_former_request);
1165 1169
1166struct request *elv_rb_latter_request(struct request_queue *q, 1170struct request *elv_rb_latter_request(struct request_queue *q,
@@ -1173,5 +1177,4 @@ struct request *elv_rb_latter_request(struct request_queue *q,
1173 1177
1174 return NULL; 1178 return NULL;
1175} 1179}
1176
1177EXPORT_SYMBOL(elv_rb_latter_request); 1180EXPORT_SYMBOL(elv_rb_latter_request);