aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/blkdev.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-10-22 20:07:18 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-10-22 20:07:18 -0400
commita2887097f25cd38cadfc11d10769e2b349fb5eca (patch)
treecd4adcb305365d6ba9acd2c02d4eb9d0125c6f8d /include/linux/blkdev.h
parent8abfc6e7a45eb74e51904bbae676fae008b11366 (diff)
parent005a1d15f5a6b2bb4ada80349513effbf22b4588 (diff)
Merge branch 'for-2.6.37/barrier' of git://git.kernel.dk/linux-2.6-block
* 'for-2.6.37/barrier' of git://git.kernel.dk/linux-2.6-block: (46 commits) xen-blkfront: disable barrier/flush write support Added blk-lib.c and blk-barrier.c was renamed to blk-flush.c block: remove BLKDEV_IFL_WAIT aic7xxx_old: removed unused 'req' variable block: remove the BH_Eopnotsupp flag block: remove the BLKDEV_IFL_BARRIER flag block: remove the WRITE_BARRIER flag swap: do not send discards as barriers fat: do not send discards as barriers ext4: do not send discards as barriers jbd2: replace barriers with explicit flush / FUA usage jbd2: Modify ASYNC_COMMIT code to not rely on queue draining on barrier jbd: replace barriers with explicit flush / FUA usage nilfs2: replace barriers with explicit flush / FUA usage reiserfs: replace barriers with explicit flush / FUA usage gfs2: replace barriers with explicit flush / FUA usage btrfs: replace barriers with explicit flush / FUA usage xfs: replace barriers with explicit flush / FUA usage block: pass gfp_mask and flags to sb_issue_discard dm: convey that all flushes are processed as empty ...
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r--include/linux/blkdev.h101
1 files changed, 21 insertions, 80 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 16f7f1be1acf..009b80e49f53 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -360,12 +360,14 @@ struct request_queue
360 struct blk_trace *blk_trace; 360 struct blk_trace *blk_trace;
361#endif 361#endif
362 /* 362 /*
363 * reserved for flush operations 363 * for flush operations
364 */ 364 */
365 unsigned int ordered, next_ordered, ordseq; 365 unsigned int flush_flags;
366 int orderr, ordcolor; 366 unsigned int flush_seq;
367 struct request pre_flush_rq, bar_rq, post_flush_rq; 367 int flush_err;
368 struct request *orig_bar_rq; 368 struct request flush_rq;
369 struct request *orig_flush_rq;
370 struct list_head pending_flushes;
369 371
370 struct mutex sysfs_lock; 372 struct mutex sysfs_lock;
371 373
@@ -472,56 +474,6 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
472 __clear_bit(flag, &q->queue_flags); 474 __clear_bit(flag, &q->queue_flags);
473} 475}
474 476
475enum {
476 /*
477 * Hardbarrier is supported with one of the following methods.
478 *
479 * NONE : hardbarrier unsupported
480 * DRAIN : ordering by draining is enough
481 * DRAIN_FLUSH : ordering by draining w/ pre and post flushes
482 * DRAIN_FUA : ordering by draining w/ pre flush and FUA write
483 * TAG : ordering by tag is enough
484 * TAG_FLUSH : ordering by tag w/ pre and post flushes
485 * TAG_FUA : ordering by tag w/ pre flush and FUA write
486 */
487 QUEUE_ORDERED_BY_DRAIN = 0x01,
488 QUEUE_ORDERED_BY_TAG = 0x02,
489 QUEUE_ORDERED_DO_PREFLUSH = 0x10,
490 QUEUE_ORDERED_DO_BAR = 0x20,
491 QUEUE_ORDERED_DO_POSTFLUSH = 0x40,
492 QUEUE_ORDERED_DO_FUA = 0x80,
493
494 QUEUE_ORDERED_NONE = 0x00,
495
496 QUEUE_ORDERED_DRAIN = QUEUE_ORDERED_BY_DRAIN |
497 QUEUE_ORDERED_DO_BAR,
498 QUEUE_ORDERED_DRAIN_FLUSH = QUEUE_ORDERED_DRAIN |
499 QUEUE_ORDERED_DO_PREFLUSH |
500 QUEUE_ORDERED_DO_POSTFLUSH,
501 QUEUE_ORDERED_DRAIN_FUA = QUEUE_ORDERED_DRAIN |
502 QUEUE_ORDERED_DO_PREFLUSH |
503 QUEUE_ORDERED_DO_FUA,
504
505 QUEUE_ORDERED_TAG = QUEUE_ORDERED_BY_TAG |
506 QUEUE_ORDERED_DO_BAR,
507 QUEUE_ORDERED_TAG_FLUSH = QUEUE_ORDERED_TAG |
508 QUEUE_ORDERED_DO_PREFLUSH |
509 QUEUE_ORDERED_DO_POSTFLUSH,
510 QUEUE_ORDERED_TAG_FUA = QUEUE_ORDERED_TAG |
511 QUEUE_ORDERED_DO_PREFLUSH |
512 QUEUE_ORDERED_DO_FUA,
513
514 /*
515 * Ordered operation sequence
516 */
517 QUEUE_ORDSEQ_STARTED = 0x01, /* flushing in progress */
518 QUEUE_ORDSEQ_DRAIN = 0x02, /* waiting for the queue to be drained */
519 QUEUE_ORDSEQ_PREFLUSH = 0x04, /* pre-flushing in progress */
520 QUEUE_ORDSEQ_BAR = 0x08, /* original barrier req in progress */
521 QUEUE_ORDSEQ_POSTFLUSH = 0x10, /* post-flushing in progress */
522 QUEUE_ORDSEQ_DONE = 0x20,
523};
524
525#define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags) 477#define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags)
526#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) 478#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
527#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 479#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
@@ -531,7 +483,6 @@ enum {
531#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) 483#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
532#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) 484#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
533#define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags) 485#define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags)
534#define blk_queue_flushing(q) ((q)->ordseq)
535#define blk_queue_stackable(q) \ 486#define blk_queue_stackable(q) \
536 test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) 487 test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)
537#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) 488#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
@@ -602,7 +553,8 @@ static inline void blk_clear_queue_full(struct request_queue *q, int sync)
602 * it already be started by driver. 553 * it already be started by driver.
603 */ 554 */
604#define RQ_NOMERGE_FLAGS \ 555#define RQ_NOMERGE_FLAGS \
605 (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER) 556 (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER | \
557 REQ_FLUSH | REQ_FUA)
606#define rq_mergeable(rq) \ 558#define rq_mergeable(rq) \
607 (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \ 559 (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \
608 (((rq)->cmd_flags & REQ_DISCARD) || \ 560 (((rq)->cmd_flags & REQ_DISCARD) || \
@@ -891,12 +843,8 @@ extern void blk_queue_update_dma_alignment(struct request_queue *, int);
891extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); 843extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
892extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); 844extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
893extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); 845extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
846extern void blk_queue_flush(struct request_queue *q, unsigned int flush);
894extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); 847extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
895extern int blk_queue_ordered(struct request_queue *, unsigned);
896extern bool blk_do_ordered(struct request_queue *, struct request **);
897extern unsigned blk_ordered_cur_seq(struct request_queue *);
898extern unsigned blk_ordered_req_seq(struct request *);
899extern bool blk_ordered_complete_seq(struct request_queue *, unsigned, int);
900 848
901extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); 849extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
902extern void blk_dump_rq_flags(struct request *, char *); 850extern void blk_dump_rq_flags(struct request *, char *);
@@ -929,27 +877,20 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
929 return NULL; 877 return NULL;
930 return bqt->tag_index[tag]; 878 return bqt->tag_index[tag];
931} 879}
932enum{ 880
933 BLKDEV_WAIT, /* wait for completion */ 881#define BLKDEV_DISCARD_SECURE 0x01 /* secure discard */
934 BLKDEV_BARRIER, /* issue request with barrier */ 882
935 BLKDEV_SECURE, /* secure discard */ 883extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *);
936};
937#define BLKDEV_IFL_WAIT (1 << BLKDEV_WAIT)
938#define BLKDEV_IFL_BARRIER (1 << BLKDEV_BARRIER)
939#define BLKDEV_IFL_SECURE (1 << BLKDEV_SECURE)
940extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *,
941 unsigned long);
942extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, 884extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
943 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); 885 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
944extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 886extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
945 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); 887 sector_t nr_sects, gfp_t gfp_mask);
946static inline int sb_issue_discard(struct super_block *sb, 888static inline int sb_issue_discard(struct super_block *sb, sector_t block,
947 sector_t block, sector_t nr_blocks) 889 sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags)
948{ 890{
949 block <<= (sb->s_blocksize_bits - 9); 891 return blkdev_issue_discard(sb->s_bdev, block << (sb->s_blocksize_bits - 9),
950 nr_blocks <<= (sb->s_blocksize_bits - 9); 892 nr_blocks << (sb->s_blocksize_bits - 9),
951 return blkdev_issue_discard(sb->s_bdev, block, nr_blocks, GFP_NOFS, 893 gfp_mask, flags);
952 BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER);
953} 894}
954 895
955extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm); 896extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm);