aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorshaohua.li@intel.com <shaohua.li@intel.com>2011-05-06 13:34:32 -0400
committerJens Axboe <jaxboe@fusionio.com>2011-05-06 13:36:25 -0400
commitf3876930952390a31c3a7fd68dd621464a36eb80 (patch)
tree7fe2306a8dc2022ac7724b2d47629e38e3fb354b
parent490b94be0282c3b67f56453628ff0aaae827a670 (diff)
block: add a non-queueable flush flag
flush request isn't queueable in some drives. Add a flag to let driver notify block layer about this. We can optimize flush performance with the knowledge. Stable: 2.6.39 only Cc: stable@kernel.org Signed-off-by: Shaohua Li <shaohua.li@intel.com> Acked-by: Tejun Heo <tj@kernel.org> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
-rw-r--r--block/blk-settings.c6
-rw-r--r--include/linux/blkdev.h7
2 files changed, 13 insertions, 0 deletions
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 1fa769293597..cd3c428e194f 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -790,6 +790,12 @@ void blk_queue_flush(struct request_queue *q, unsigned int flush)
790} 790}
791EXPORT_SYMBOL_GPL(blk_queue_flush); 791EXPORT_SYMBOL_GPL(blk_queue_flush);
792 792
793void blk_queue_flush_queueable(struct request_queue *q, bool queueable)
794{
795 q->flush_not_queueable = !queueable;
796}
797EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
798
793static int __init blk_settings_init(void) 799static int __init blk_settings_init(void)
794{ 800{
795 blk_max_low_pfn = max_low_pfn - 1; 801 blk_max_low_pfn = max_low_pfn - 1;
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index cbbfd98ad4a3..8bd2a271b2d8 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -364,6 +364,7 @@ struct request_queue
364 * for flush operations 364 * for flush operations
365 */ 365 */
366 unsigned int flush_flags; 366 unsigned int flush_flags;
367 unsigned int flush_not_queueable:1;
367 unsigned int flush_pending_idx:1; 368 unsigned int flush_pending_idx:1;
368 unsigned int flush_running_idx:1; 369 unsigned int flush_running_idx:1;
369 unsigned long flush_pending_since; 370 unsigned long flush_pending_since;
@@ -843,6 +844,7 @@ extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
843extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); 844extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
844extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); 845extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
845extern void blk_queue_flush(struct request_queue *q, unsigned int flush); 846extern void blk_queue_flush(struct request_queue *q, unsigned int flush);
847extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable);
846extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); 848extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
847 849
848extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); 850extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
@@ -1111,6 +1113,11 @@ static inline unsigned int block_size(struct block_device *bdev)
1111 return bdev->bd_block_size; 1113 return bdev->bd_block_size;
1112} 1114}
1113 1115
1116static inline bool queue_flush_queueable(struct request_queue *q)
1117{
1118 return !q->flush_not_queueable;
1119}
1120
1114typedef struct {struct page *v;} Sector; 1121typedef struct {struct page *v;} Sector;
1115 1122
1116unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *); 1123unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *);