diff options
Diffstat (limited to 'include/linux/blkdev.h')
| -rw-r--r-- | include/linux/blkdev.h | 51 |
1 files changed, 45 insertions, 6 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index c5065e3d2ca9..d2a1b71e93c3 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
| @@ -215,8 +215,9 @@ struct request { | |||
| 215 | /* | 215 | /* |
| 216 | * when request is used as a packet command carrier | 216 | * when request is used as a packet command carrier |
| 217 | */ | 217 | */ |
| 218 | unsigned int cmd_len; | 218 | unsigned short cmd_len; |
| 219 | unsigned char cmd[BLK_MAX_CDB]; | 219 | unsigned char __cmd[BLK_MAX_CDB]; |
| 220 | unsigned char *cmd; | ||
| 220 | 221 | ||
| 221 | unsigned int data_len; | 222 | unsigned int data_len; |
| 222 | unsigned int extra_len; /* length of alignment and padding */ | 223 | unsigned int extra_len; /* length of alignment and padding */ |
| @@ -407,6 +408,41 @@ struct request_queue | |||
| 407 | #define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */ | 408 | #define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */ |
| 408 | #define QUEUE_FLAG_ELVSWITCH 8 /* don't use elevator, just do FIFO */ | 409 | #define QUEUE_FLAG_ELVSWITCH 8 /* don't use elevator, just do FIFO */ |
| 409 | #define QUEUE_FLAG_BIDI 9 /* queue supports bidi requests */ | 410 | #define QUEUE_FLAG_BIDI 9 /* queue supports bidi requests */ |
| 411 | #define QUEUE_FLAG_NOMERGES 10 /* disable merge attempts */ | ||
| 412 | |||
| 413 | static inline int queue_is_locked(struct request_queue *q) | ||
| 414 | { | ||
| 415 | #ifdef CONFIG_SMP | ||
| 416 | spinlock_t *lock = q->queue_lock; | ||
| 417 | return lock && spin_is_locked(lock); | ||
| 418 | #else | ||
| 419 | return 1; | ||
| 420 | #endif | ||
| 421 | } | ||
| 422 | |||
| 423 | static inline void queue_flag_set_unlocked(unsigned int flag, | ||
| 424 | struct request_queue *q) | ||
| 425 | { | ||
| 426 | __set_bit(flag, &q->queue_flags); | ||
| 427 | } | ||
| 428 | |||
| 429 | static inline void queue_flag_set(unsigned int flag, struct request_queue *q) | ||
| 430 | { | ||
| 431 | WARN_ON_ONCE(!queue_is_locked(q)); | ||
| 432 | __set_bit(flag, &q->queue_flags); | ||
| 433 | } | ||
| 434 | |||
| 435 | static inline void queue_flag_clear_unlocked(unsigned int flag, | ||
| 436 | struct request_queue *q) | ||
| 437 | { | ||
| 438 | __clear_bit(flag, &q->queue_flags); | ||
| 439 | } | ||
| 440 | |||
| 441 | static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) | ||
| 442 | { | ||
| 443 | WARN_ON_ONCE(!queue_is_locked(q)); | ||
| 444 | __clear_bit(flag, &q->queue_flags); | ||
| 445 | } | ||
| 410 | 446 | ||
| 411 | enum { | 447 | enum { |
| 412 | /* | 448 | /* |
| @@ -451,6 +487,7 @@ enum { | |||
| 451 | #define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags) | 487 | #define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags) |
| 452 | #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) | 488 | #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) |
| 453 | #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) | 489 | #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) |
| 490 | #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) | ||
| 454 | #define blk_queue_flushing(q) ((q)->ordseq) | 491 | #define blk_queue_flushing(q) ((q)->ordseq) |
| 455 | 492 | ||
| 456 | #define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS) | 493 | #define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS) |
| @@ -496,17 +533,17 @@ static inline int blk_queue_full(struct request_queue *q, int rw) | |||
| 496 | static inline void blk_set_queue_full(struct request_queue *q, int rw) | 533 | static inline void blk_set_queue_full(struct request_queue *q, int rw) |
| 497 | { | 534 | { |
| 498 | if (rw == READ) | 535 | if (rw == READ) |
| 499 | set_bit(QUEUE_FLAG_READFULL, &q->queue_flags); | 536 | queue_flag_set(QUEUE_FLAG_READFULL, q); |
| 500 | else | 537 | else |
| 501 | set_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags); | 538 | queue_flag_set(QUEUE_FLAG_WRITEFULL, q); |
| 502 | } | 539 | } |
| 503 | 540 | ||
| 504 | static inline void blk_clear_queue_full(struct request_queue *q, int rw) | 541 | static inline void blk_clear_queue_full(struct request_queue *q, int rw) |
| 505 | { | 542 | { |
| 506 | if (rw == READ) | 543 | if (rw == READ) |
| 507 | clear_bit(QUEUE_FLAG_READFULL, &q->queue_flags); | 544 | queue_flag_clear(QUEUE_FLAG_READFULL, q); |
| 508 | else | 545 | else |
| 509 | clear_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags); | 546 | queue_flag_clear(QUEUE_FLAG_WRITEFULL, q); |
| 510 | } | 547 | } |
| 511 | 548 | ||
| 512 | 549 | ||
| @@ -583,6 +620,7 @@ extern int blk_register_queue(struct gendisk *disk); | |||
| 583 | extern void blk_unregister_queue(struct gendisk *disk); | 620 | extern void blk_unregister_queue(struct gendisk *disk); |
| 584 | extern void register_disk(struct gendisk *dev); | 621 | extern void register_disk(struct gendisk *dev); |
| 585 | extern void generic_make_request(struct bio *bio); | 622 | extern void generic_make_request(struct bio *bio); |
| 623 | extern void blk_rq_init(struct request_queue *q, struct request *rq); | ||
| 586 | extern void blk_put_request(struct request *); | 624 | extern void blk_put_request(struct request *); |
| 587 | extern void __blk_put_request(struct request_queue *, struct request *); | 625 | extern void __blk_put_request(struct request_queue *, struct request *); |
| 588 | extern void blk_end_sync_rq(struct request *rq, int error); | 626 | extern void blk_end_sync_rq(struct request *rq, int error); |
| @@ -626,6 +664,7 @@ extern void blk_start_queue(struct request_queue *q); | |||
| 626 | extern void blk_stop_queue(struct request_queue *q); | 664 | extern void blk_stop_queue(struct request_queue *q); |
| 627 | extern void blk_sync_queue(struct request_queue *q); | 665 | extern void blk_sync_queue(struct request_queue *q); |
| 628 | extern void __blk_stop_queue(struct request_queue *q); | 666 | extern void __blk_stop_queue(struct request_queue *q); |
| 667 | extern void __blk_run_queue(struct request_queue *); | ||
| 629 | extern void blk_run_queue(struct request_queue *); | 668 | extern void blk_run_queue(struct request_queue *); |
| 630 | extern void blk_start_queueing(struct request_queue *); | 669 | extern void blk_start_queueing(struct request_queue *); |
| 631 | extern int blk_rq_map_user(struct request_queue *, struct request *, void __user *, unsigned long); | 670 | extern int blk_rq_map_user(struct request_queue *, struct request *, void __user *, unsigned long); |
