aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/blkdev.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r--include/linux/blkdev.h31
1 files changed, 29 insertions, 2 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 1d79b8d4ca6d..7bfcde2d5578 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -157,6 +157,7 @@ enum rq_cmd_type_bits {
157 REQ_TYPE_ATA_CMD, 157 REQ_TYPE_ATA_CMD,
158 REQ_TYPE_ATA_TASK, 158 REQ_TYPE_ATA_TASK,
159 REQ_TYPE_ATA_TASKFILE, 159 REQ_TYPE_ATA_TASKFILE,
160 REQ_TYPE_ATA_PC,
160}; 161};
161 162
162/* 163/*
@@ -650,6 +651,26 @@ extern void blk_recount_segments(request_queue_t *, struct bio *);
650extern int scsi_cmd_ioctl(struct file *, struct gendisk *, unsigned int, void __user *); 651extern int scsi_cmd_ioctl(struct file *, struct gendisk *, unsigned int, void __user *);
651extern int sg_scsi_ioctl(struct file *, struct request_queue *, 652extern int sg_scsi_ioctl(struct file *, struct request_queue *,
652 struct gendisk *, struct scsi_ioctl_command __user *); 653 struct gendisk *, struct scsi_ioctl_command __user *);
654
655/*
656 * A queue has just exitted congestion. Note this in the global counter of
657 * congested queues, and wake up anyone who was waiting for requests to be
658 * put back.
659 */
660static inline void blk_clear_queue_congested(request_queue_t *q, int rw)
661{
662 clear_bdi_congested(&q->backing_dev_info, rw);
663}
664
665/*
666 * A queue has just entered congestion. Flag that in the queue's VM-visible
667 * state flags and increment the global gounter of congested queues.
668 */
669static inline void blk_set_queue_congested(request_queue_t *q, int rw)
670{
671 set_bdi_congested(&q->backing_dev_info, rw);
672}
673
653extern void blk_start_queue(request_queue_t *q); 674extern void blk_start_queue(request_queue_t *q);
654extern void blk_stop_queue(request_queue_t *q); 675extern void blk_stop_queue(request_queue_t *q);
655extern void blk_sync_queue(struct request_queue *q); 676extern void blk_sync_queue(struct request_queue *q);
@@ -764,10 +785,16 @@ extern int blk_queue_init_tags(request_queue_t *, int, struct blk_queue_tag *);
764extern void blk_queue_free_tags(request_queue_t *); 785extern void blk_queue_free_tags(request_queue_t *);
765extern int blk_queue_resize_tags(request_queue_t *, int); 786extern int blk_queue_resize_tags(request_queue_t *, int);
766extern void blk_queue_invalidate_tags(request_queue_t *); 787extern void blk_queue_invalidate_tags(request_queue_t *);
767extern long blk_congestion_wait(int rw, long timeout);
768extern struct blk_queue_tag *blk_init_tags(int); 788extern struct blk_queue_tag *blk_init_tags(int);
769extern void blk_free_tags(struct blk_queue_tag *); 789extern void blk_free_tags(struct blk_queue_tag *);
770extern void blk_congestion_end(int rw); 790
791static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
792 int tag)
793{
794 if (unlikely(bqt == NULL || tag >= bqt->real_max_depth))
795 return NULL;
796 return bqt->tag_index[tag];
797}
771 798
772extern void blk_rq_bio_prep(request_queue_t *, struct request *, struct bio *); 799extern void blk_rq_bio_prep(request_queue_t *, struct request *, struct bio *);
773extern int blkdev_issue_flush(struct block_device *, sector_t *); 800extern int blkdev_issue_flush(struct block_device *, sector_t *);