aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/blkdev.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r--include/linux/blkdev.h183
1 files changed, 88 insertions, 95 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index f78965fc6426..5ed888b04b29 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -1,6 +1,8 @@
1#ifndef _LINUX_BLKDEV_H 1#ifndef _LINUX_BLKDEV_H
2#define _LINUX_BLKDEV_H 2#define _LINUX_BLKDEV_H
3 3
4#ifdef CONFIG_BLOCK
5
4#include <linux/sched.h> 6#include <linux/sched.h>
5#include <linux/major.h> 7#include <linux/major.h>
6#include <linux/genhd.h> 8#include <linux/genhd.h>
@@ -18,26 +20,10 @@
18 20
19#include <asm/scatterlist.h> 21#include <asm/scatterlist.h>
20 22
21#ifdef CONFIG_LBD
22# include <asm/div64.h>
23# define sector_div(a, b) do_div(a, b)
24#else
25# define sector_div(n, b)( \
26{ \
27 int _res; \
28 _res = (n) % (b); \
29 (n) /= (b); \
30 _res; \
31} \
32)
33#endif
34
35#ifdef CONFIG_BLOCK
36
37struct scsi_ioctl_command; 23struct scsi_ioctl_command;
38 24
39struct request_queue; 25struct request_queue;
40typedef struct request_queue request_queue_t; 26typedef struct request_queue request_queue_t __deprecated;
41struct elevator_queue; 27struct elevator_queue;
42typedef struct elevator_queue elevator_t; 28typedef struct elevator_queue elevator_t;
43struct request_pm_state; 29struct request_pm_state;
@@ -233,7 +219,7 @@ struct request {
233 struct list_head queuelist; 219 struct list_head queuelist;
234 struct list_head donelist; 220 struct list_head donelist;
235 221
236 request_queue_t *q; 222 struct request_queue *q;
237 223
238 unsigned int cmd_flags; 224 unsigned int cmd_flags;
239 enum rq_cmd_type_bits cmd_type; 225 enum rq_cmd_type_bits cmd_type;
@@ -337,15 +323,15 @@ struct request_pm_state
337 323
338#include <linux/elevator.h> 324#include <linux/elevator.h>
339 325
340typedef void (request_fn_proc) (request_queue_t *q); 326typedef void (request_fn_proc) (struct request_queue *q);
341typedef int (make_request_fn) (request_queue_t *q, struct bio *bio); 327typedef int (make_request_fn) (struct request_queue *q, struct bio *bio);
342typedef int (prep_rq_fn) (request_queue_t *, struct request *); 328typedef int (prep_rq_fn) (struct request_queue *, struct request *);
343typedef void (unplug_fn) (request_queue_t *); 329typedef void (unplug_fn) (struct request_queue *);
344 330
345struct bio_vec; 331struct bio_vec;
346typedef int (merge_bvec_fn) (request_queue_t *, struct bio *, struct bio_vec *); 332typedef int (merge_bvec_fn) (struct request_queue *, struct bio *, struct bio_vec *);
347typedef int (issue_flush_fn) (request_queue_t *, struct gendisk *, sector_t *); 333typedef int (issue_flush_fn) (struct request_queue *, struct gendisk *, sector_t *);
348typedef void (prepare_flush_fn) (request_queue_t *, struct request *); 334typedef void (prepare_flush_fn) (struct request_queue *, struct request *);
349typedef void (softirq_done_fn)(struct request *); 335typedef void (softirq_done_fn)(struct request *);
350 336
351enum blk_queue_state { 337enum blk_queue_state {
@@ -471,7 +457,6 @@ struct request_queue
471 int orderr, ordcolor; 457 int orderr, ordcolor;
472 struct request pre_flush_rq, bar_rq, post_flush_rq; 458 struct request pre_flush_rq, bar_rq, post_flush_rq;
473 struct request *orig_bar_rq; 459 struct request *orig_bar_rq;
474 unsigned int bi_size;
475 460
476 struct mutex sysfs_lock; 461 struct mutex sysfs_lock;
477 462
@@ -483,8 +468,8 @@ struct request_queue
483#define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */ 468#define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */
484#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ 469#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
485#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ 470#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */
486#define QUEUE_FLAG_READFULL 3 /* write queue has been filled */ 471#define QUEUE_FLAG_READFULL 3 /* read queue has been filled */
487#define QUEUE_FLAG_WRITEFULL 4 /* read queue has been filled */ 472#define QUEUE_FLAG_WRITEFULL 4 /* write queue has been filled */
488#define QUEUE_FLAG_DEAD 5 /* queue being torn down */ 473#define QUEUE_FLAG_DEAD 5 /* queue being torn down */
489#define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */ 474#define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */
490#define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */ 475#define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */
@@ -626,34 +611,47 @@ extern unsigned long blk_max_low_pfn, blk_max_pfn;
626 611
627#ifdef CONFIG_BOUNCE 612#ifdef CONFIG_BOUNCE
628extern int init_emergency_isa_pool(void); 613extern int init_emergency_isa_pool(void);
629extern void blk_queue_bounce(request_queue_t *q, struct bio **bio); 614extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
630#else 615#else
631static inline int init_emergency_isa_pool(void) 616static inline int init_emergency_isa_pool(void)
632{ 617{
633 return 0; 618 return 0;
634} 619}
635static inline void blk_queue_bounce(request_queue_t *q, struct bio **bio) 620static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
636{ 621{
637} 622}
638#endif /* CONFIG_MMU */ 623#endif /* CONFIG_MMU */
639 624
640#define rq_for_each_bio(_bio, rq) \ 625struct req_iterator {
626 int i;
627 struct bio *bio;
628};
629
630/* This should not be used directly - use rq_for_each_segment */
631#define __rq_for_each_bio(_bio, rq) \
641 if ((rq->bio)) \ 632 if ((rq->bio)) \
642 for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next) 633 for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
643 634
635#define rq_for_each_segment(bvl, _rq, _iter) \
636 __rq_for_each_bio(_iter.bio, _rq) \
637 bio_for_each_segment(bvl, _iter.bio, _iter.i)
638
639#define rq_iter_last(rq, _iter) \
640 (_iter.bio->bi_next == NULL && _iter.i == _iter.bio->bi_vcnt-1)
641
644extern int blk_register_queue(struct gendisk *disk); 642extern int blk_register_queue(struct gendisk *disk);
645extern void blk_unregister_queue(struct gendisk *disk); 643extern void blk_unregister_queue(struct gendisk *disk);
646extern void register_disk(struct gendisk *dev); 644extern void register_disk(struct gendisk *dev);
647extern void generic_make_request(struct bio *bio); 645extern void generic_make_request(struct bio *bio);
648extern void blk_put_request(struct request *); 646extern void blk_put_request(struct request *);
649extern void __blk_put_request(request_queue_t *, struct request *); 647extern void __blk_put_request(struct request_queue *, struct request *);
650extern void blk_end_sync_rq(struct request *rq, int error); 648extern void blk_end_sync_rq(struct request *rq, int error);
651extern struct request *blk_get_request(request_queue_t *, int, gfp_t); 649extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
652extern void blk_insert_request(request_queue_t *, struct request *, int, void *); 650extern void blk_insert_request(struct request_queue *, struct request *, int, void *);
653extern void blk_requeue_request(request_queue_t *, struct request *); 651extern void blk_requeue_request(struct request_queue *, struct request *);
654extern void blk_plug_device(request_queue_t *); 652extern void blk_plug_device(struct request_queue *);
655extern int blk_remove_plug(request_queue_t *); 653extern int blk_remove_plug(struct request_queue *);
656extern void blk_recount_segments(request_queue_t *, struct bio *); 654extern void blk_recount_segments(struct request_queue *, struct bio *);
657extern int scsi_cmd_ioctl(struct file *, struct request_queue *, 655extern int scsi_cmd_ioctl(struct file *, struct request_queue *,
658 struct gendisk *, unsigned int, void __user *); 656 struct gendisk *, unsigned int, void __user *);
659extern int sg_scsi_ioctl(struct file *, struct request_queue *, 657extern int sg_scsi_ioctl(struct file *, struct request_queue *,
@@ -662,14 +660,15 @@ extern int sg_scsi_ioctl(struct file *, struct request_queue *,
662/* 660/*
663 * Temporary export, until SCSI gets fixed up. 661 * Temporary export, until SCSI gets fixed up.
664 */ 662 */
665extern int ll_back_merge_fn(request_queue_t *, struct request *, struct bio *); 663extern int blk_rq_append_bio(struct request_queue *q, struct request *rq,
664 struct bio *bio);
666 665
667/* 666/*
668 * A queue has just exitted congestion. Note this in the global counter of 667 * A queue has just exitted congestion. Note this in the global counter of
669 * congested queues, and wake up anyone who was waiting for requests to be 668 * congested queues, and wake up anyone who was waiting for requests to be
670 * put back. 669 * put back.
671 */ 670 */
672static inline void blk_clear_queue_congested(request_queue_t *q, int rw) 671static inline void blk_clear_queue_congested(struct request_queue *q, int rw)
673{ 672{
674 clear_bdi_congested(&q->backing_dev_info, rw); 673 clear_bdi_congested(&q->backing_dev_info, rw);
675} 674}
@@ -678,34 +677,29 @@ static inline void blk_clear_queue_congested(request_queue_t *q, int rw)
678 * A queue has just entered congestion. Flag that in the queue's VM-visible 677 * A queue has just entered congestion. Flag that in the queue's VM-visible
679 * state flags and increment the global gounter of congested queues. 678 * state flags and increment the global gounter of congested queues.
680 */ 679 */
681static inline void blk_set_queue_congested(request_queue_t *q, int rw) 680static inline void blk_set_queue_congested(struct request_queue *q, int rw)
682{ 681{
683 set_bdi_congested(&q->backing_dev_info, rw); 682 set_bdi_congested(&q->backing_dev_info, rw);
684} 683}
685 684
686extern void blk_start_queue(request_queue_t *q); 685extern void blk_start_queue(struct request_queue *q);
687extern void blk_stop_queue(request_queue_t *q); 686extern void blk_stop_queue(struct request_queue *q);
688extern void blk_sync_queue(struct request_queue *q); 687extern void blk_sync_queue(struct request_queue *q);
689extern void __blk_stop_queue(request_queue_t *q); 688extern void __blk_stop_queue(struct request_queue *q);
690extern void blk_run_queue(request_queue_t *); 689extern void blk_run_queue(struct request_queue *);
691extern void blk_start_queueing(request_queue_t *); 690extern void blk_start_queueing(struct request_queue *);
692extern int blk_rq_map_user(request_queue_t *, struct request *, void __user *, unsigned long); 691extern int blk_rq_map_user(struct request_queue *, struct request *, void __user *, unsigned long);
693extern int blk_rq_unmap_user(struct bio *); 692extern int blk_rq_unmap_user(struct bio *);
694extern int blk_rq_map_kern(request_queue_t *, struct request *, void *, unsigned int, gfp_t); 693extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t);
695extern int blk_rq_map_user_iov(request_queue_t *, struct request *, 694extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
696 struct sg_iovec *, int, unsigned int); 695 struct sg_iovec *, int, unsigned int);
697extern int blk_execute_rq(request_queue_t *, struct gendisk *, 696extern int blk_execute_rq(struct request_queue *, struct gendisk *,
698 struct request *, int); 697 struct request *, int);
699extern void blk_execute_rq_nowait(request_queue_t *, struct gendisk *, 698extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
700 struct request *, int, rq_end_io_fn *); 699 struct request *, int, rq_end_io_fn *);
701extern int blk_fill_sghdr_rq(request_queue_t *, struct request *,
702 struct sg_io_hdr *, int);
703extern int blk_unmap_sghdr_rq(struct request *, struct sg_io_hdr *);
704extern int blk_complete_sghdr_rq(struct request *, struct sg_io_hdr *,
705 struct bio *);
706extern int blk_verify_command(unsigned char *, int); 700extern int blk_verify_command(unsigned char *, int);
707 701
708static inline request_queue_t *bdev_get_queue(struct block_device *bdev) 702static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
709{ 703{
710 return bdev->bd_disk->queue; 704 return bdev->bd_disk->queue;
711} 705}
@@ -754,41 +748,41 @@ static inline void blkdev_dequeue_request(struct request *req)
754/* 748/*
755 * Access functions for manipulating queue properties 749 * Access functions for manipulating queue properties
756 */ 750 */
757extern request_queue_t *blk_init_queue_node(request_fn_proc *rfn, 751extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
758 spinlock_t *lock, int node_id); 752 spinlock_t *lock, int node_id);
759extern request_queue_t *blk_init_queue(request_fn_proc *, spinlock_t *); 753extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
760extern void blk_cleanup_queue(request_queue_t *); 754extern void blk_cleanup_queue(struct request_queue *);
761extern void blk_queue_make_request(request_queue_t *, make_request_fn *); 755extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
762extern void blk_queue_bounce_limit(request_queue_t *, u64); 756extern void blk_queue_bounce_limit(struct request_queue *, u64);
763extern void blk_queue_max_sectors(request_queue_t *, unsigned int); 757extern void blk_queue_max_sectors(struct request_queue *, unsigned int);
764extern void blk_queue_max_phys_segments(request_queue_t *, unsigned short); 758extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short);
765extern void blk_queue_max_hw_segments(request_queue_t *, unsigned short); 759extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short);
766extern void blk_queue_max_segment_size(request_queue_t *, unsigned int); 760extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
767extern void blk_queue_hardsect_size(request_queue_t *, unsigned short); 761extern void blk_queue_hardsect_size(struct request_queue *, unsigned short);
768extern void blk_queue_stack_limits(request_queue_t *t, request_queue_t *b); 762extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
769extern void blk_queue_segment_boundary(request_queue_t *, unsigned long); 763extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
770extern void blk_queue_prep_rq(request_queue_t *, prep_rq_fn *pfn); 764extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
771extern void blk_queue_merge_bvec(request_queue_t *, merge_bvec_fn *); 765extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *);
772extern void blk_queue_dma_alignment(request_queue_t *, int); 766extern void blk_queue_dma_alignment(struct request_queue *, int);
773extern void blk_queue_softirq_done(request_queue_t *, softirq_done_fn *); 767extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
774extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); 768extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
775extern int blk_queue_ordered(request_queue_t *, unsigned, prepare_flush_fn *); 769extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *);
776extern void blk_queue_issue_flush_fn(request_queue_t *, issue_flush_fn *); 770extern void blk_queue_issue_flush_fn(struct request_queue *, issue_flush_fn *);
777extern int blk_do_ordered(request_queue_t *, struct request **); 771extern int blk_do_ordered(struct request_queue *, struct request **);
778extern unsigned blk_ordered_cur_seq(request_queue_t *); 772extern unsigned blk_ordered_cur_seq(struct request_queue *);
779extern unsigned blk_ordered_req_seq(struct request *); 773extern unsigned blk_ordered_req_seq(struct request *);
780extern void blk_ordered_complete_seq(request_queue_t *, unsigned, int); 774extern void blk_ordered_complete_seq(struct request_queue *, unsigned, int);
781 775
782extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *); 776extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
783extern void blk_dump_rq_flags(struct request *, char *); 777extern void blk_dump_rq_flags(struct request *, char *);
784extern void generic_unplug_device(request_queue_t *); 778extern void generic_unplug_device(struct request_queue *);
785extern void __generic_unplug_device(request_queue_t *); 779extern void __generic_unplug_device(struct request_queue *);
786extern long nr_blockdev_pages(void); 780extern long nr_blockdev_pages(void);
787 781
788int blk_get_queue(request_queue_t *); 782int blk_get_queue(struct request_queue *);
789request_queue_t *blk_alloc_queue(gfp_t); 783struct request_queue *blk_alloc_queue(gfp_t);
790request_queue_t *blk_alloc_queue_node(gfp_t, int); 784struct request_queue *blk_alloc_queue_node(gfp_t, int);
791extern void blk_put_queue(request_queue_t *); 785extern void blk_put_queue(struct request_queue *);
792 786
793/* 787/*
794 * tag stuff 788 * tag stuff
@@ -796,13 +790,13 @@ extern void blk_put_queue(request_queue_t *);
796#define blk_queue_tag_depth(q) ((q)->queue_tags->busy) 790#define blk_queue_tag_depth(q) ((q)->queue_tags->busy)
797#define blk_queue_tag_queue(q) ((q)->queue_tags->busy < (q)->queue_tags->max_depth) 791#define blk_queue_tag_queue(q) ((q)->queue_tags->busy < (q)->queue_tags->max_depth)
798#define blk_rq_tagged(rq) ((rq)->cmd_flags & REQ_QUEUED) 792#define blk_rq_tagged(rq) ((rq)->cmd_flags & REQ_QUEUED)
799extern int blk_queue_start_tag(request_queue_t *, struct request *); 793extern int blk_queue_start_tag(struct request_queue *, struct request *);
800extern struct request *blk_queue_find_tag(request_queue_t *, int); 794extern struct request *blk_queue_find_tag(struct request_queue *, int);
801extern void blk_queue_end_tag(request_queue_t *, struct request *); 795extern void blk_queue_end_tag(struct request_queue *, struct request *);
802extern int blk_queue_init_tags(request_queue_t *, int, struct blk_queue_tag *); 796extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *);
803extern void blk_queue_free_tags(request_queue_t *); 797extern void blk_queue_free_tags(struct request_queue *);
804extern int blk_queue_resize_tags(request_queue_t *, int); 798extern int blk_queue_resize_tags(struct request_queue *, int);
805extern void blk_queue_invalidate_tags(request_queue_t *); 799extern void blk_queue_invalidate_tags(struct request_queue *);
806extern struct blk_queue_tag *blk_init_tags(int); 800extern struct blk_queue_tag *blk_init_tags(int);
807extern void blk_free_tags(struct blk_queue_tag *); 801extern void blk_free_tags(struct blk_queue_tag *);
808 802
@@ -814,7 +808,6 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
814 return bqt->tag_index[tag]; 808 return bqt->tag_index[tag];
815} 809}
816 810
817extern void blk_rq_bio_prep(request_queue_t *, struct request *, struct bio *);
818extern int blkdev_issue_flush(struct block_device *, sector_t *); 811extern int blkdev_issue_flush(struct block_device *, sector_t *);
819 812
820#define MAX_PHYS_SEGMENTS 128 813#define MAX_PHYS_SEGMENTS 128
@@ -826,7 +819,7 @@ extern int blkdev_issue_flush(struct block_device *, sector_t *);
826 819
827#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) 820#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
828 821
829static inline int queue_hardsect_size(request_queue_t *q) 822static inline int queue_hardsect_size(struct request_queue *q)
830{ 823{
831 int retval = 512; 824 int retval = 512;
832 825
@@ -841,7 +834,7 @@ static inline int bdev_hardsect_size(struct block_device *bdev)
841 return queue_hardsect_size(bdev_get_queue(bdev)); 834 return queue_hardsect_size(bdev_get_queue(bdev));
842} 835}
843 836
844static inline int queue_dma_alignment(request_queue_t *q) 837static inline int queue_dma_alignment(struct request_queue *q)
845{ 838{
846 int retval = 511; 839 int retval = 511;
847 840