diff options
Diffstat (limited to 'include/linux/blkdev.h')
| -rw-r--r-- | include/linux/blkdev.h | 28 |
1 files changed, 24 insertions, 4 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 1756001210d2..f94bc83011ed 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
| @@ -378,6 +378,12 @@ struct request_queue { | |||
| 378 | 378 | ||
| 379 | unsigned int nr_sorted; | 379 | unsigned int nr_sorted; |
| 380 | unsigned int in_flight[2]; | 380 | unsigned int in_flight[2]; |
| 381 | /* | ||
| 382 | * Number of active block driver functions for which blk_drain_queue() | ||
| 383 | * must wait. Must be incremented around functions that unlock the | ||
| 384 | * queue_lock internally, e.g. scsi_request_fn(). | ||
| 385 | */ | ||
| 386 | unsigned int request_fn_active; | ||
| 381 | 387 | ||
| 382 | unsigned int rq_timeout; | 388 | unsigned int rq_timeout; |
| 383 | struct timer_list timeout; | 389 | struct timer_list timeout; |
| @@ -437,7 +443,7 @@ struct request_queue { | |||
| 437 | #define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ | 443 | #define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ |
| 438 | #define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ | 444 | #define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ |
| 439 | #define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */ | 445 | #define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */ |
| 440 | #define QUEUE_FLAG_DEAD 5 /* queue being torn down */ | 446 | #define QUEUE_FLAG_DYING 5 /* queue being torn down */ |
| 441 | #define QUEUE_FLAG_BYPASS 6 /* act as dumb FIFO queue */ | 447 | #define QUEUE_FLAG_BYPASS 6 /* act as dumb FIFO queue */ |
| 442 | #define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */ | 448 | #define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */ |
| 443 | #define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */ | 449 | #define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */ |
| @@ -452,6 +458,7 @@ struct request_queue { | |||
| 452 | #define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */ | 458 | #define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */ |
| 453 | #define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */ | 459 | #define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */ |
| 454 | #define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */ | 460 | #define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */ |
| 461 | #define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */ | ||
| 455 | 462 | ||
| 456 | #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ | 463 | #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ |
| 457 | (1 << QUEUE_FLAG_STACKABLE) | \ | 464 | (1 << QUEUE_FLAG_STACKABLE) | \ |
| @@ -521,6 +528,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) | |||
| 521 | 528 | ||
| 522 | #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) | 529 | #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) |
| 523 | #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) | 530 | #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) |
| 531 | #define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags) | ||
| 524 | #define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags) | 532 | #define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags) |
| 525 | #define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags) | 533 | #define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags) |
| 526 | #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) | 534 | #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) |
| @@ -1180,13 +1188,25 @@ static inline int queue_discard_alignment(struct request_queue *q) | |||
| 1180 | 1188 | ||
| 1181 | static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector) | 1189 | static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector) |
| 1182 | { | 1190 | { |
| 1183 | unsigned int alignment = (sector << 9) & (lim->discard_granularity - 1); | 1191 | unsigned int alignment, granularity, offset; |
| 1184 | 1192 | ||
| 1185 | if (!lim->max_discard_sectors) | 1193 | if (!lim->max_discard_sectors) |
| 1186 | return 0; | 1194 | return 0; |
| 1187 | 1195 | ||
| 1188 | return (lim->discard_granularity + lim->discard_alignment - alignment) | 1196 | /* Why are these in bytes, not sectors? */ |
| 1189 | & (lim->discard_granularity - 1); | 1197 | alignment = lim->discard_alignment >> 9; |
| 1198 | granularity = lim->discard_granularity >> 9; | ||
| 1199 | if (!granularity) | ||
| 1200 | return 0; | ||
| 1201 | |||
| 1202 | /* Offset of the partition start in 'granularity' sectors */ | ||
| 1203 | offset = sector_div(sector, granularity); | ||
| 1204 | |||
| 1205 | /* And why do we do this modulus *again* in blkdev_issue_discard()? */ | ||
| 1206 | offset = (granularity + alignment - offset) % granularity; | ||
| 1207 | |||
| 1208 | /* Turn it back into bytes, gaah */ | ||
| 1209 | return offset << 9; | ||
| 1190 | } | 1210 | } |
| 1191 | 1211 | ||
| 1192 | static inline int bdev_discard_alignment(struct block_device *bdev) | 1212 | static inline int bdev_discard_alignment(struct block_device *bdev) |
