diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-05-08 13:13:35 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-05-08 13:13:35 -0400 |
commit | 4de13d7aa8f4d02f4dc99d4609575659f92b3c5a (patch) | |
tree | 3bc9729eabe79c6164cd29a5d605000bc82bf837 /include/linux/blkdev.h | |
parent | 5af43c24ca59a448c9312dd4a4a51d27ec3b9a73 (diff) | |
parent | b8d4a5bf6a049303a29a3275f463f09a490b50ea (diff) |
Merge branch 'for-3.10/core' of git://git.kernel.dk/linux-block
Pull block core updates from Jens Axboe:
- Major bit is Kents prep work for immutable bio vecs.
- Stable candidate fix for a scheduling-while-atomic in the queue
bypass operation.
- Fix for the hang on exceeded rq->datalen 32-bit unsigned when merging
discard bios.
- Tejuns changes to convert the writeback thread pool to the generic
workqueue mechanism.
- Runtime PM framework, SCSI patches exists on top of these in James'
tree.
- A few random fixes.
* 'for-3.10/core' of git://git.kernel.dk/linux-block: (40 commits)
relay: move remove_buf_file inside relay_close_buf
partitions/efi.c: replace useless kzalloc's by kmalloc's
fs/block_dev.c: fix iov_shorten() criteria in blkdev_aio_read()
block: fix max discard sectors limit
blkcg: fix "scheduling while atomic" in blk_queue_bypass_start
Documentation: cfq-iosched: update documentation help for cfq tunables
writeback: expose the bdi_wq workqueue
writeback: replace custom worker pool implementation with unbound workqueue
writeback: remove unused bdi_pending_list
aoe: Fix unitialized var usage
bio-integrity: Add explicit field for owner of bip_buf
block: Add an explicit bio flag for bios that own their bvec
block: Add bio_alloc_pages()
block: Convert some code to bio_for_each_segment_all()
block: Add bio_for_each_segment_all()
bounce: Refactor __blk_queue_bounce to not use bi_io_vec
raid1: use bio_copy_data()
pktcdvd: Use bio_reset() in disabled code to kill bi_idx usage
pktcdvd: use bio_copy_data()
block: Add bio_copy_data()
...
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r-- | include/linux/blkdev.h | 29 |
1 files changed, 28 insertions, 1 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index e38cfe77f7f0..2fdb4a451b49 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -361,6 +361,12 @@ struct request_queue { | |||
361 | */ | 361 | */ |
362 | struct kobject kobj; | 362 | struct kobject kobj; |
363 | 363 | ||
364 | #ifdef CONFIG_PM_RUNTIME | ||
365 | struct device *dev; | ||
366 | int rpm_status; | ||
367 | unsigned int nr_pending; | ||
368 | #endif | ||
369 | |||
364 | /* | 370 | /* |
365 | * queue settings | 371 | * queue settings |
366 | */ | 372 | */ |
@@ -838,7 +844,7 @@ static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, | |||
838 | unsigned int cmd_flags) | 844 | unsigned int cmd_flags) |
839 | { | 845 | { |
840 | if (unlikely(cmd_flags & REQ_DISCARD)) | 846 | if (unlikely(cmd_flags & REQ_DISCARD)) |
841 | return q->limits.max_discard_sectors; | 847 | return min(q->limits.max_discard_sectors, UINT_MAX >> 9); |
842 | 848 | ||
843 | if (unlikely(cmd_flags & REQ_WRITE_SAME)) | 849 | if (unlikely(cmd_flags & REQ_WRITE_SAME)) |
844 | return q->limits.max_write_same_sectors; | 850 | return q->limits.max_write_same_sectors; |
@@ -961,6 +967,27 @@ struct request_queue *blk_alloc_queue_node(gfp_t, int); | |||
961 | extern void blk_put_queue(struct request_queue *); | 967 | extern void blk_put_queue(struct request_queue *); |
962 | 968 | ||
963 | /* | 969 | /* |
970 | * block layer runtime pm functions | ||
971 | */ | ||
972 | #ifdef CONFIG_PM_RUNTIME | ||
973 | extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev); | ||
974 | extern int blk_pre_runtime_suspend(struct request_queue *q); | ||
975 | extern void blk_post_runtime_suspend(struct request_queue *q, int err); | ||
976 | extern void blk_pre_runtime_resume(struct request_queue *q); | ||
977 | extern void blk_post_runtime_resume(struct request_queue *q, int err); | ||
978 | #else | ||
979 | static inline void blk_pm_runtime_init(struct request_queue *q, | ||
980 | struct device *dev) {} | ||
981 | static inline int blk_pre_runtime_suspend(struct request_queue *q) | ||
982 | { | ||
983 | return -ENOSYS; | ||
984 | } | ||
985 | static inline void blk_post_runtime_suspend(struct request_queue *q, int err) {} | ||
986 | static inline void blk_pre_runtime_resume(struct request_queue *q) {} | ||
987 | static inline void blk_post_runtime_resume(struct request_queue *q, int err) {} | ||
988 | #endif | ||
989 | |||
990 | /* | ||
964 | * blk_plug permits building a queue of related requests by holding the I/O | 991 | * blk_plug permits building a queue of related requests by holding the I/O |
965 | * fragments for a short period. This allows merging of sequential requests | 992 | * fragments for a short period. This allows merging of sequential requests |
966 | * into single larger request. As the requests are moved from a per-task list to | 993 | * into single larger request. As the requests are moved from a per-task list to |