aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/blkdev.h
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2008-04-29 08:48:33 -0400
committerJens Axboe <jens.axboe@oracle.com>2008-04-29 08:48:33 -0400
commit75ad23bc0fcb4f992a5d06982bf0857ab1738e9e (patch)
tree8668ef63b1f420252ae41aed9e13737d49fd8054 /include/linux/blkdev.h
parent68154e90c9d1492d570671ae181d9a8f8530da55 (diff)
block: make queue flags non-atomic
We can save some atomic ops in the IO path, if we clearly define the rules of how to modify the queue flags. Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r--include/linux/blkdev.h33
1 files changed, 29 insertions, 4 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index c5065e3d2ca9..8ca481cd7d73 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -408,6 +408,30 @@ struct request_queue
408#define QUEUE_FLAG_ELVSWITCH 8 /* don't use elevator, just do FIFO */ 408#define QUEUE_FLAG_ELVSWITCH 8 /* don't use elevator, just do FIFO */
409#define QUEUE_FLAG_BIDI 9 /* queue supports bidi requests */ 409#define QUEUE_FLAG_BIDI 9 /* queue supports bidi requests */
410 410
411static inline void queue_flag_set_unlocked(unsigned int flag,
412 struct request_queue *q)
413{
414 __set_bit(flag, &q->queue_flags);
415}
416
417static inline void queue_flag_set(unsigned int flag, struct request_queue *q)
418{
419 WARN_ON_ONCE(!spin_is_locked(q->queue_lock));
420 __set_bit(flag, &q->queue_flags);
421}
422
423static inline void queue_flag_clear_unlocked(unsigned int flag,
424 struct request_queue *q)
425{
426 __clear_bit(flag, &q->queue_flags);
427}
428
429static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
430{
431 WARN_ON_ONCE(!spin_is_locked(q->queue_lock));
432 __clear_bit(flag, &q->queue_flags);
433}
434
411enum { 435enum {
412 /* 436 /*
413 * Hardbarrier is supported with one of the following methods. 437 * Hardbarrier is supported with one of the following methods.
@@ -496,17 +520,17 @@ static inline int blk_queue_full(struct request_queue *q, int rw)
496static inline void blk_set_queue_full(struct request_queue *q, int rw) 520static inline void blk_set_queue_full(struct request_queue *q, int rw)
497{ 521{
498 if (rw == READ) 522 if (rw == READ)
499 set_bit(QUEUE_FLAG_READFULL, &q->queue_flags); 523 queue_flag_set(QUEUE_FLAG_READFULL, q);
500 else 524 else
501 set_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags); 525 queue_flag_set(QUEUE_FLAG_WRITEFULL, q);
502} 526}
503 527
504static inline void blk_clear_queue_full(struct request_queue *q, int rw) 528static inline void blk_clear_queue_full(struct request_queue *q, int rw)
505{ 529{
506 if (rw == READ) 530 if (rw == READ)
507 clear_bit(QUEUE_FLAG_READFULL, &q->queue_flags); 531 queue_flag_clear(QUEUE_FLAG_READFULL, q);
508 else 532 else
509 clear_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags); 533 queue_flag_clear(QUEUE_FLAG_WRITEFULL, q);
510} 534}
511 535
512 536
@@ -626,6 +650,7 @@ extern void blk_start_queue(struct request_queue *q);
626extern void blk_stop_queue(struct request_queue *q); 650extern void blk_stop_queue(struct request_queue *q);
627extern void blk_sync_queue(struct request_queue *q); 651extern void blk_sync_queue(struct request_queue *q);
628extern void __blk_stop_queue(struct request_queue *q); 652extern void __blk_stop_queue(struct request_queue *q);
653extern void __blk_run_queue(struct request_queue *);
629extern void blk_run_queue(struct request_queue *); 654extern void blk_run_queue(struct request_queue *);
630extern void blk_start_queueing(struct request_queue *); 655extern void blk_start_queueing(struct request_queue *);
631extern int blk_rq_map_user(struct request_queue *, struct request *, void __user *, unsigned long); 656extern int blk_rq_map_user(struct request_queue *, struct request *, void __user *, unsigned long);