diff options
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r-- | include/linux/blkdev.h | 17 |
1 files changed, 13 insertions, 4 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 1756001210d2..acb4f7bbbd32 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -378,6 +378,12 @@ struct request_queue { | |||
378 | 378 | ||
379 | unsigned int nr_sorted; | 379 | unsigned int nr_sorted; |
380 | unsigned int in_flight[2]; | 380 | unsigned int in_flight[2]; |
381 | /* | ||
382 | * Number of active block driver functions for which blk_drain_queue() | ||
383 | * must wait. Must be incremented around functions that unlock the | ||
384 | * queue_lock internally, e.g. scsi_request_fn(). | ||
385 | */ | ||
386 | unsigned int request_fn_active; | ||
381 | 387 | ||
382 | unsigned int rq_timeout; | 388 | unsigned int rq_timeout; |
383 | struct timer_list timeout; | 389 | struct timer_list timeout; |
@@ -437,7 +443,7 @@ struct request_queue { | |||
437 | #define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ | 443 | #define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ |
438 | #define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ | 444 | #define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ |
439 | #define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */ | 445 | #define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */ |
440 | #define QUEUE_FLAG_DEAD 5 /* queue being torn down */ | 446 | #define QUEUE_FLAG_DYING 5 /* queue being torn down */ |
441 | #define QUEUE_FLAG_BYPASS 6 /* act as dumb FIFO queue */ | 447 | #define QUEUE_FLAG_BYPASS 6 /* act as dumb FIFO queue */ |
442 | #define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */ | 448 | #define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */ |
443 | #define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */ | 449 | #define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */ |
@@ -452,6 +458,7 @@ struct request_queue { | |||
452 | #define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */ | 458 | #define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */ |
453 | #define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */ | 459 | #define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */ |
454 | #define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */ | 460 | #define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */ |
461 | #define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */ | ||
455 | 462 | ||
456 | #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ | 463 | #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ |
457 | (1 << QUEUE_FLAG_STACKABLE) | \ | 464 | (1 << QUEUE_FLAG_STACKABLE) | \ |
@@ -521,6 +528,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) | |||
521 | 528 | ||
522 | #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) | 529 | #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) |
523 | #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) | 530 | #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) |
531 | #define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags) | ||
524 | #define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags) | 532 | #define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags) |
525 | #define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags) | 533 | #define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags) |
526 | #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) | 534 | #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) |
@@ -1180,13 +1188,14 @@ static inline int queue_discard_alignment(struct request_queue *q) | |||
1180 | 1188 | ||
1181 | static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector) | 1189 | static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector) |
1182 | { | 1190 | { |
1183 | unsigned int alignment = (sector << 9) & (lim->discard_granularity - 1); | 1191 | sector_t alignment = sector << 9; |
1192 | alignment = sector_div(alignment, lim->discard_granularity); | ||
1184 | 1193 | ||
1185 | if (!lim->max_discard_sectors) | 1194 | if (!lim->max_discard_sectors) |
1186 | return 0; | 1195 | return 0; |
1187 | 1196 | ||
1188 | return (lim->discard_granularity + lim->discard_alignment - alignment) | 1197 | alignment = lim->discard_granularity + lim->discard_alignment - alignment; |
1189 | & (lim->discard_granularity - 1); | 1198 | return sector_div(alignment, lim->discard_granularity); |
1190 | } | 1199 | } |
1191 | 1200 | ||
1192 | static inline int bdev_discard_alignment(struct block_device *bdev) | 1201 | static inline int bdev_discard_alignment(struct block_device *bdev) |