diff options
author | FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> | 2008-08-28 03:17:05 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2008-10-09 02:56:10 -0400 |
commit | a3bce90edd8f6cafe3f63b1a943800792e830178 (patch) | |
tree | c07a2962987e57997b4ff6f6c63febe1feaa0a9c /block | |
parent | 45333d5a31296d0af886d94f1d08f128231cab8e (diff) |
block: add gfp_mask argument to blk_rq_map_user and blk_rq_map_user_iov
Currently, blk_rq_map_user and blk_rq_map_user_iov always do
GFP_KERNEL allocation.
This adds gfp_mask argument to blk_rq_map_user and blk_rq_map_user_iov
so sg can use it (sg always does GFP_ATOMIC allocation).
Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Signed-off-by: Douglas Gilbert <dougg@torque.net>
Cc: Mike Christie <michaelc@cs.wisc.edu>
Cc: James Bottomley <James.Bottomley@HansenPartnership.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-map.c | 20 | ||||
-rw-r--r-- | block/bsg.c | 5 | ||||
-rw-r--r-- | block/scsi_ioctl.c | 5 |
3 files changed, 18 insertions, 12 deletions
diff --git a/block/blk-map.c b/block/blk-map.c index ea1bf53929e4..ac21b7397e15 100644 --- a/block/blk-map.c +++ b/block/blk-map.c | |||
@@ -41,7 +41,8 @@ static int __blk_rq_unmap_user(struct bio *bio) | |||
41 | } | 41 | } |
42 | 42 | ||
43 | static int __blk_rq_map_user(struct request_queue *q, struct request *rq, | 43 | static int __blk_rq_map_user(struct request_queue *q, struct request *rq, |
44 | void __user *ubuf, unsigned int len) | 44 | void __user *ubuf, unsigned int len, |
45 | gfp_t gfp_mask) | ||
45 | { | 46 | { |
46 | unsigned long uaddr; | 47 | unsigned long uaddr; |
47 | unsigned int alignment; | 48 | unsigned int alignment; |
@@ -57,9 +58,9 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq, | |||
57 | uaddr = (unsigned long) ubuf; | 58 | uaddr = (unsigned long) ubuf; |
58 | alignment = queue_dma_alignment(q) | q->dma_pad_mask; | 59 | alignment = queue_dma_alignment(q) | q->dma_pad_mask; |
59 | if (!(uaddr & alignment) && !(len & alignment)) | 60 | if (!(uaddr & alignment) && !(len & alignment)) |
60 | bio = bio_map_user(q, NULL, uaddr, len, reading); | 61 | bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask); |
61 | else | 62 | else |
62 | bio = bio_copy_user(q, uaddr, len, reading); | 63 | bio = bio_copy_user(q, uaddr, len, reading, gfp_mask); |
63 | 64 | ||
64 | if (IS_ERR(bio)) | 65 | if (IS_ERR(bio)) |
65 | return PTR_ERR(bio); | 66 | return PTR_ERR(bio); |
@@ -90,6 +91,7 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq, | |||
90 | * @rq: request structure to fill | 91 | * @rq: request structure to fill |
91 | * @ubuf: the user buffer | 92 | * @ubuf: the user buffer |
92 | * @len: length of user data | 93 | * @len: length of user data |
94 | * @gfp_mask: memory allocation flags | ||
93 | * | 95 | * |
94 | * Description: | 96 | * Description: |
95 | * Data will be mapped directly for zero copy I/O, if possible. Otherwise | 97 | * Data will be mapped directly for zero copy I/O, if possible. Otherwise |
@@ -105,7 +107,7 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq, | |||
105 | * unmapping. | 107 | * unmapping. |
106 | */ | 108 | */ |
107 | int blk_rq_map_user(struct request_queue *q, struct request *rq, | 109 | int blk_rq_map_user(struct request_queue *q, struct request *rq, |
108 | void __user *ubuf, unsigned long len) | 110 | void __user *ubuf, unsigned long len, gfp_t gfp_mask) |
109 | { | 111 | { |
110 | unsigned long bytes_read = 0; | 112 | unsigned long bytes_read = 0; |
111 | struct bio *bio = NULL; | 113 | struct bio *bio = NULL; |
@@ -132,7 +134,7 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq, | |||
132 | if (end - start > BIO_MAX_PAGES) | 134 | if (end - start > BIO_MAX_PAGES) |
133 | map_len -= PAGE_SIZE; | 135 | map_len -= PAGE_SIZE; |
134 | 136 | ||
135 | ret = __blk_rq_map_user(q, rq, ubuf, map_len); | 137 | ret = __blk_rq_map_user(q, rq, ubuf, map_len, gfp_mask); |
136 | if (ret < 0) | 138 | if (ret < 0) |
137 | goto unmap_rq; | 139 | goto unmap_rq; |
138 | if (!bio) | 140 | if (!bio) |
@@ -160,6 +162,7 @@ EXPORT_SYMBOL(blk_rq_map_user); | |||
160 | * @iov: pointer to the iovec | 162 | * @iov: pointer to the iovec |
161 | * @iov_count: number of elements in the iovec | 163 | * @iov_count: number of elements in the iovec |
162 | * @len: I/O byte count | 164 | * @len: I/O byte count |
165 | * @gfp_mask: memory allocation flags | ||
163 | * | 166 | * |
164 | * Description: | 167 | * Description: |
165 | * Data will be mapped directly for zero copy I/O, if possible. Otherwise | 168 | * Data will be mapped directly for zero copy I/O, if possible. Otherwise |
@@ -175,7 +178,8 @@ EXPORT_SYMBOL(blk_rq_map_user); | |||
175 | * unmapping. | 178 | * unmapping. |
176 | */ | 179 | */ |
177 | int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, | 180 | int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, |
178 | struct sg_iovec *iov, int iov_count, unsigned int len) | 181 | struct sg_iovec *iov, int iov_count, unsigned int len, |
182 | gfp_t gfp_mask) | ||
179 | { | 183 | { |
180 | struct bio *bio; | 184 | struct bio *bio; |
181 | int i, read = rq_data_dir(rq) == READ; | 185 | int i, read = rq_data_dir(rq) == READ; |
@@ -194,9 +198,9 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, | |||
194 | } | 198 | } |
195 | 199 | ||
196 | if (unaligned || (q->dma_pad_mask & len)) | 200 | if (unaligned || (q->dma_pad_mask & len)) |
197 | bio = bio_copy_user_iov(q, iov, iov_count, read); | 201 | bio = bio_copy_user_iov(q, iov, iov_count, read, gfp_mask); |
198 | else | 202 | else |
199 | bio = bio_map_user_iov(q, NULL, iov, iov_count, read); | 203 | bio = bio_map_user_iov(q, NULL, iov, iov_count, read, gfp_mask); |
200 | 204 | ||
201 | if (IS_ERR(bio)) | 205 | if (IS_ERR(bio)) |
202 | return PTR_ERR(bio); | 206 | return PTR_ERR(bio); |
diff --git a/block/bsg.c b/block/bsg.c index 0aae8d7ba99c..e7a142e9916c 100644 --- a/block/bsg.c +++ b/block/bsg.c | |||
@@ -283,7 +283,8 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, int has_write_perm) | |||
283 | next_rq->cmd_type = rq->cmd_type; | 283 | next_rq->cmd_type = rq->cmd_type; |
284 | 284 | ||
285 | dxferp = (void*)(unsigned long)hdr->din_xferp; | 285 | dxferp = (void*)(unsigned long)hdr->din_xferp; |
286 | ret = blk_rq_map_user(q, next_rq, dxferp, hdr->din_xfer_len); | 286 | ret = blk_rq_map_user(q, next_rq, dxferp, hdr->din_xfer_len, |
287 | GFP_KERNEL); | ||
287 | if (ret) | 288 | if (ret) |
288 | goto out; | 289 | goto out; |
289 | } | 290 | } |
@@ -298,7 +299,7 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, int has_write_perm) | |||
298 | dxfer_len = 0; | 299 | dxfer_len = 0; |
299 | 300 | ||
300 | if (dxfer_len) { | 301 | if (dxfer_len) { |
301 | ret = blk_rq_map_user(q, rq, dxferp, dxfer_len); | 302 | ret = blk_rq_map_user(q, rq, dxferp, dxfer_len, GFP_KERNEL); |
302 | if (ret) | 303 | if (ret) |
303 | goto out; | 304 | goto out; |
304 | } | 305 | } |
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c index 3aab80a4c484..f49d6a11a69e 100644 --- a/block/scsi_ioctl.c +++ b/block/scsi_ioctl.c | |||
@@ -315,10 +315,11 @@ static int sg_io(struct file *file, struct request_queue *q, | |||
315 | } | 315 | } |
316 | 316 | ||
317 | ret = blk_rq_map_user_iov(q, rq, iov, hdr->iovec_count, | 317 | ret = blk_rq_map_user_iov(q, rq, iov, hdr->iovec_count, |
318 | hdr->dxfer_len); | 318 | hdr->dxfer_len, GFP_KERNEL); |
319 | kfree(iov); | 319 | kfree(iov); |
320 | } else if (hdr->dxfer_len) | 320 | } else if (hdr->dxfer_len) |
321 | ret = blk_rq_map_user(q, rq, hdr->dxferp, hdr->dxfer_len); | 321 | ret = blk_rq_map_user(q, rq, hdr->dxferp, hdr->dxfer_len, |
322 | GFP_KERNEL); | ||
322 | 323 | ||
323 | if (ret) | 324 | if (ret) |
324 | goto out; | 325 | goto out; |