aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-map.c
diff options
context:
space:
mode:
authorFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>2008-08-28 03:17:05 -0400
committerJens Axboe <jens.axboe@oracle.com>2008-10-09 02:56:10 -0400
commita3bce90edd8f6cafe3f63b1a943800792e830178 (patch)
treec07a2962987e57997b4ff6f6c63febe1feaa0a9c /block/blk-map.c
parent45333d5a31296d0af886d94f1d08f128231cab8e (diff)
block: add gfp_mask argument to blk_rq_map_user and blk_rq_map_user_iov
Currently, blk_rq_map_user and blk_rq_map_user_iov always do GFP_KERNEL allocation. This adds gfp_mask argument to blk_rq_map_user and blk_rq_map_user_iov so sg can use it (sg always does GFP_ATOMIC allocation). Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Signed-off-by: Douglas Gilbert <dougg@torque.net> Cc: Mike Christie <michaelc@cs.wisc.edu> Cc: James Bottomley <James.Bottomley@HansenPartnership.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/blk-map.c')
-rw-r--r--block/blk-map.c20
1 files changed, 12 insertions, 8 deletions
diff --git a/block/blk-map.c b/block/blk-map.c
index ea1bf53929e4..ac21b7397e15 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -41,7 +41,8 @@ static int __blk_rq_unmap_user(struct bio *bio)
41} 41}
42 42
43static int __blk_rq_map_user(struct request_queue *q, struct request *rq, 43static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
44 void __user *ubuf, unsigned int len) 44 void __user *ubuf, unsigned int len,
45 gfp_t gfp_mask)
45{ 46{
46 unsigned long uaddr; 47 unsigned long uaddr;
47 unsigned int alignment; 48 unsigned int alignment;
@@ -57,9 +58,9 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
57 uaddr = (unsigned long) ubuf; 58 uaddr = (unsigned long) ubuf;
58 alignment = queue_dma_alignment(q) | q->dma_pad_mask; 59 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
59 if (!(uaddr & alignment) && !(len & alignment)) 60 if (!(uaddr & alignment) && !(len & alignment))
60 bio = bio_map_user(q, NULL, uaddr, len, reading); 61 bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
61 else 62 else
62 bio = bio_copy_user(q, uaddr, len, reading); 63 bio = bio_copy_user(q, uaddr, len, reading, gfp_mask);
63 64
64 if (IS_ERR(bio)) 65 if (IS_ERR(bio))
65 return PTR_ERR(bio); 66 return PTR_ERR(bio);
@@ -90,6 +91,7 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
90 * @rq: request structure to fill 91 * @rq: request structure to fill
91 * @ubuf: the user buffer 92 * @ubuf: the user buffer
92 * @len: length of user data 93 * @len: length of user data
94 * @gfp_mask: memory allocation flags
93 * 95 *
94 * Description: 96 * Description:
95 * Data will be mapped directly for zero copy I/O, if possible. Otherwise 97 * Data will be mapped directly for zero copy I/O, if possible. Otherwise
@@ -105,7 +107,7 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
105 * unmapping. 107 * unmapping.
106 */ 108 */
107int blk_rq_map_user(struct request_queue *q, struct request *rq, 109int blk_rq_map_user(struct request_queue *q, struct request *rq,
108 void __user *ubuf, unsigned long len) 110 void __user *ubuf, unsigned long len, gfp_t gfp_mask)
109{ 111{
110 unsigned long bytes_read = 0; 112 unsigned long bytes_read = 0;
111 struct bio *bio = NULL; 113 struct bio *bio = NULL;
@@ -132,7 +134,7 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
132 if (end - start > BIO_MAX_PAGES) 134 if (end - start > BIO_MAX_PAGES)
133 map_len -= PAGE_SIZE; 135 map_len -= PAGE_SIZE;
134 136
135 ret = __blk_rq_map_user(q, rq, ubuf, map_len); 137 ret = __blk_rq_map_user(q, rq, ubuf, map_len, gfp_mask);
136 if (ret < 0) 138 if (ret < 0)
137 goto unmap_rq; 139 goto unmap_rq;
138 if (!bio) 140 if (!bio)
@@ -160,6 +162,7 @@ EXPORT_SYMBOL(blk_rq_map_user);
160 * @iov: pointer to the iovec 162 * @iov: pointer to the iovec
161 * @iov_count: number of elements in the iovec 163 * @iov_count: number of elements in the iovec
162 * @len: I/O byte count 164 * @len: I/O byte count
165 * @gfp_mask: memory allocation flags
163 * 166 *
164 * Description: 167 * Description:
165 * Data will be mapped directly for zero copy I/O, if possible. Otherwise 168 * Data will be mapped directly for zero copy I/O, if possible. Otherwise
@@ -175,7 +178,8 @@ EXPORT_SYMBOL(blk_rq_map_user);
175 * unmapping. 178 * unmapping.
176 */ 179 */
177int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, 180int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
178 struct sg_iovec *iov, int iov_count, unsigned int len) 181 struct sg_iovec *iov, int iov_count, unsigned int len,
182 gfp_t gfp_mask)
179{ 183{
180 struct bio *bio; 184 struct bio *bio;
181 int i, read = rq_data_dir(rq) == READ; 185 int i, read = rq_data_dir(rq) == READ;
@@ -194,9 +198,9 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
194 } 198 }
195 199
196 if (unaligned || (q->dma_pad_mask & len)) 200 if (unaligned || (q->dma_pad_mask & len))
197 bio = bio_copy_user_iov(q, iov, iov_count, read); 201 bio = bio_copy_user_iov(q, iov, iov_count, read, gfp_mask);
198 else 202 else
199 bio = bio_map_user_iov(q, NULL, iov, iov_count, read); 203 bio = bio_map_user_iov(q, NULL, iov, iov_count, read, gfp_mask);
200 204
201 if (IS_ERR(bio)) 205 if (IS_ERR(bio))
202 return PTR_ERR(bio); 206 return PTR_ERR(bio);