aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/blkdev.h
diff options
context:
space:
mode:
authorFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>2008-08-28 03:17:06 -0400
committerJens Axboe <jens.axboe@oracle.com>2008-10-09 02:56:10 -0400
commit152e283fdfea0cd11e297d982378b55937842dde (patch)
treea97a57108353f167a1e2911e8ee09c527ef42d3e /include/linux/blkdev.h
parenta3bce90edd8f6cafe3f63b1a943800792e830178 (diff)
block: introduce struct rq_map_data to use reserved pages
This patch introduces struct rq_map_data to enable bio_copy_use_iov() use reserved pages. Currently, bio_copy_user_iov allocates bounce pages but drivers/scsi/sg.c wants to allocate pages by itself and use them. struct rq_map_data can be used to pass allocated pages to bio_copy_user_iov. The current users of bio_copy_user_iov simply passes NULL (they don't want to use pre-allocated pages). Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Cc: Jens Axboe <jens.axboe@oracle.com> Cc: Douglas Gilbert <dougg@torque.net> Cc: Mike Christie <michaelc@cs.wisc.edu> Cc: James Bottomley <James.Bottomley@HansenPartnership.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r--include/linux/blkdev.h12
1 files changed, 10 insertions, 2 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 00e388d0e22..358ac423ed2 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -642,6 +642,12 @@ static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
642} 642}
643#endif /* CONFIG_MMU */ 643#endif /* CONFIG_MMU */
644 644
645struct rq_map_data {
646 struct page **pages;
647 int page_order;
648 int nr_entries;
649};
650
645struct req_iterator { 651struct req_iterator {
646 int i; 652 int i;
647 struct bio *bio; 653 struct bio *bio;
@@ -711,11 +717,13 @@ extern void __blk_run_queue(struct request_queue *);
711extern void blk_run_queue(struct request_queue *); 717extern void blk_run_queue(struct request_queue *);
712extern void blk_start_queueing(struct request_queue *); 718extern void blk_start_queueing(struct request_queue *);
713extern int blk_rq_map_user(struct request_queue *, struct request *, 719extern int blk_rq_map_user(struct request_queue *, struct request *,
714 void __user *, unsigned long, gfp_t); 720 struct rq_map_data *, void __user *, unsigned long,
721 gfp_t);
715extern int blk_rq_unmap_user(struct bio *); 722extern int blk_rq_unmap_user(struct bio *);
716extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t); 723extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t);
717extern int blk_rq_map_user_iov(struct request_queue *, struct request *, 724extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
718 struct sg_iovec *, int, unsigned int, gfp_t); 725 struct rq_map_data *, struct sg_iovec *, int,
726 unsigned int, gfp_t);
719extern int blk_execute_rq(struct request_queue *, struct gendisk *, 727extern int blk_execute_rq(struct request_queue *, struct gendisk *,
720 struct request *, int); 728 struct request *, int);
721extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, 729extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,