diff options
author | Al Viro <viro@zeniv.linux.org.uk> | 2015-11-12 05:09:35 -0500 |
---|---|---|
committer | Al Viro <viro@zeniv.linux.org.uk> | 2016-12-26 21:35:13 -0500 |
commit | c9f2b6aeb92286f15ffc80d2ba16bc24e530f560 (patch) | |
tree | 8307a262414211afe5ab48747ef4bdee7ff9a285 | |
parent | c1696cab700588f8493df7b51e096abf5bfb1d40 (diff) |
[nbd] pass iov_iter to nbd_xmit()
... and don't mess with kmap() - just use BVEC_ITER for those parts.
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
-rw-r--r-- | drivers/block/nbd.c | 66 |
1 files changed, 27 insertions, 39 deletions
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 8e63caecdd00..3c2dbe412c02 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c | |||
@@ -209,13 +209,12 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req, | |||
209 | /* | 209 | /* |
210 | * Send or receive packet. | 210 | * Send or receive packet. |
211 | */ | 211 | */ |
212 | static int sock_xmit(struct nbd_device *nbd, int index, int send, void *buf, | 212 | static int sock_xmit(struct nbd_device *nbd, int index, int send, |
213 | int size, int msg_flags) | 213 | struct iov_iter *iter, int msg_flags) |
214 | { | 214 | { |
215 | struct socket *sock = nbd->socks[index]->sock; | 215 | struct socket *sock = nbd->socks[index]->sock; |
216 | int result; | 216 | int result; |
217 | struct msghdr msg; | 217 | struct msghdr msg; |
218 | struct kvec iov = {.iov_base = buf, .iov_len = size}; | ||
219 | unsigned long pflags = current->flags; | 218 | unsigned long pflags = current->flags; |
220 | 219 | ||
221 | if (unlikely(!sock)) { | 220 | if (unlikely(!sock)) { |
@@ -225,8 +224,7 @@ static int sock_xmit(struct nbd_device *nbd, int index, int send, void *buf, | |||
225 | return -EINVAL; | 224 | return -EINVAL; |
226 | } | 225 | } |
227 | 226 | ||
228 | iov_iter_kvec(&msg.msg_iter, (send ? WRITE : READ) | ITER_KVEC, | 227 | msg.msg_iter = *iter; |
229 | &iov, 1, size); | ||
230 | 228 | ||
231 | current->flags |= PF_MEMALLOC; | 229 | current->flags |= PF_MEMALLOC; |
232 | do { | 230 | do { |
@@ -254,28 +252,21 @@ static int sock_xmit(struct nbd_device *nbd, int index, int send, void *buf, | |||
254 | return result; | 252 | return result; |
255 | } | 253 | } |
256 | 254 | ||
257 | static inline int sock_send_bvec(struct nbd_device *nbd, int index, | ||
258 | struct bio_vec *bvec, int flags) | ||
259 | { | ||
260 | int result; | ||
261 | void *kaddr = kmap(bvec->bv_page); | ||
262 | result = sock_xmit(nbd, index, 1, kaddr + bvec->bv_offset, | ||
263 | bvec->bv_len, flags); | ||
264 | kunmap(bvec->bv_page); | ||
265 | return result; | ||
266 | } | ||
267 | |||
268 | /* always call with the tx_lock held */ | 255 | /* always call with the tx_lock held */ |
269 | static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) | 256 | static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) |
270 | { | 257 | { |
271 | struct request *req = blk_mq_rq_from_pdu(cmd); | 258 | struct request *req = blk_mq_rq_from_pdu(cmd); |
272 | int result, flags; | 259 | int result, flags; |
273 | struct nbd_request request; | 260 | struct nbd_request request = {.magic = htonl(NBD_REQUEST_MAGIC)}; |
261 | struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)}; | ||
262 | struct iov_iter from; | ||
274 | unsigned long size = blk_rq_bytes(req); | 263 | unsigned long size = blk_rq_bytes(req); |
275 | struct bio *bio; | 264 | struct bio *bio; |
276 | u32 type; | 265 | u32 type; |
277 | u32 tag = blk_mq_unique_tag(req); | 266 | u32 tag = blk_mq_unique_tag(req); |
278 | 267 | ||
268 | iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request)); | ||
269 | |||
279 | if (req_op(req) == REQ_OP_DISCARD) | 270 | if (req_op(req) == REQ_OP_DISCARD) |
280 | type = NBD_CMD_TRIM; | 271 | type = NBD_CMD_TRIM; |
281 | else if (req_op(req) == REQ_OP_FLUSH) | 272 | else if (req_op(req) == REQ_OP_FLUSH) |
@@ -285,8 +276,6 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) | |||
285 | else | 276 | else |
286 | type = NBD_CMD_READ; | 277 | type = NBD_CMD_READ; |
287 | 278 | ||
288 | memset(&request, 0, sizeof(request)); | ||
289 | request.magic = htonl(NBD_REQUEST_MAGIC); | ||
290 | request.type = htonl(type); | 279 | request.type = htonl(type); |
291 | if (type != NBD_CMD_FLUSH) { | 280 | if (type != NBD_CMD_FLUSH) { |
292 | request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9); | 281 | request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9); |
@@ -297,7 +286,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) | |||
297 | dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n", | 286 | dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n", |
298 | cmd, nbdcmd_to_ascii(type), | 287 | cmd, nbdcmd_to_ascii(type), |
299 | (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req)); | 288 | (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req)); |
300 | result = sock_xmit(nbd, index, 1, &request, sizeof(request), | 289 | result = sock_xmit(nbd, index, 1, &from, |
301 | (type == NBD_CMD_WRITE) ? MSG_MORE : 0); | 290 | (type == NBD_CMD_WRITE) ? MSG_MORE : 0); |
302 | if (result <= 0) { | 291 | if (result <= 0) { |
303 | dev_err_ratelimited(disk_to_dev(nbd->disk), | 292 | dev_err_ratelimited(disk_to_dev(nbd->disk), |
@@ -322,7 +311,9 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) | |||
322 | flags = MSG_MORE; | 311 | flags = MSG_MORE; |
323 | dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n", | 312 | dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n", |
324 | cmd, bvec.bv_len); | 313 | cmd, bvec.bv_len); |
325 | result = sock_send_bvec(nbd, index, &bvec, flags); | 314 | iov_iter_bvec(&from, ITER_BVEC | WRITE, |
315 | &bvec, 1, bvec.bv_len); | ||
316 | result = sock_xmit(nbd, index, 1, &from, flags); | ||
326 | if (result <= 0) { | 317 | if (result <= 0) { |
327 | dev_err(disk_to_dev(nbd->disk), | 318 | dev_err(disk_to_dev(nbd->disk), |
328 | "Send data failed (result %d)\n", | 319 | "Send data failed (result %d)\n", |
@@ -343,17 +334,6 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) | |||
343 | return 0; | 334 | return 0; |
344 | } | 335 | } |
345 | 336 | ||
346 | static inline int sock_recv_bvec(struct nbd_device *nbd, int index, | ||
347 | struct bio_vec *bvec) | ||
348 | { | ||
349 | int result; | ||
350 | void *kaddr = kmap(bvec->bv_page); | ||
351 | result = sock_xmit(nbd, index, 0, kaddr + bvec->bv_offset, | ||
352 | bvec->bv_len, MSG_WAITALL); | ||
353 | kunmap(bvec->bv_page); | ||
354 | return result; | ||
355 | } | ||
356 | |||
357 | /* NULL returned = something went wrong, inform userspace */ | 337 | /* NULL returned = something went wrong, inform userspace */ |
358 | static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index) | 338 | static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index) |
359 | { | 339 | { |
@@ -363,9 +343,12 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index) | |||
363 | struct request *req = NULL; | 343 | struct request *req = NULL; |
364 | u16 hwq; | 344 | u16 hwq; |
365 | u32 tag; | 345 | u32 tag; |
346 | struct kvec iov = {.iov_base = &reply, .iov_len = sizeof(reply)}; | ||
347 | struct iov_iter to; | ||
366 | 348 | ||
367 | reply.magic = 0; | 349 | reply.magic = 0; |
368 | result = sock_xmit(nbd, index, 0, &reply, sizeof(reply), MSG_WAITALL); | 350 | iov_iter_kvec(&to, READ | ITER_KVEC, &iov, 1, sizeof(reply)); |
351 | result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL); | ||
369 | if (result <= 0) { | 352 | if (result <= 0) { |
370 | if (!test_bit(NBD_DISCONNECTED, &nbd->runtime_flags) && | 353 | if (!test_bit(NBD_DISCONNECTED, &nbd->runtime_flags) && |
371 | !test_bit(NBD_DISCONNECT_REQUESTED, &nbd->runtime_flags)) | 354 | !test_bit(NBD_DISCONNECT_REQUESTED, &nbd->runtime_flags)) |
@@ -405,7 +388,9 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index) | |||
405 | struct bio_vec bvec; | 388 | struct bio_vec bvec; |
406 | 389 | ||
407 | rq_for_each_segment(bvec, req, iter) { | 390 | rq_for_each_segment(bvec, req, iter) { |
408 | result = sock_recv_bvec(nbd, index, &bvec); | 391 | iov_iter_bvec(&to, ITER_BVEC | READ, |
392 | &bvec, 1, bvec.bv_len); | ||
393 | result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL); | ||
409 | if (result <= 0) { | 394 | if (result <= 0) { |
410 | dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n", | 395 | dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n", |
411 | result); | 396 | result); |
@@ -645,14 +630,17 @@ static void nbd_parse_flags(struct nbd_device *nbd, struct block_device *bdev) | |||
645 | 630 | ||
646 | static void send_disconnects(struct nbd_device *nbd) | 631 | static void send_disconnects(struct nbd_device *nbd) |
647 | { | 632 | { |
648 | struct nbd_request request = {}; | 633 | struct nbd_request request = { |
634 | .magic = htonl(NBD_REQUEST_MAGIC), | ||
635 | .type = htonl(NBD_CMD_DISC), | ||
636 | }; | ||
637 | struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)}; | ||
638 | struct iov_iter from; | ||
649 | int i, ret; | 639 | int i, ret; |
650 | 640 | ||
651 | request.magic = htonl(NBD_REQUEST_MAGIC); | ||
652 | request.type = htonl(NBD_CMD_DISC); | ||
653 | |||
654 | for (i = 0; i < nbd->num_connections; i++) { | 641 | for (i = 0; i < nbd->num_connections; i++) { |
655 | ret = sock_xmit(nbd, i, 1, &request, sizeof(request), 0); | 642 | iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request)); |
643 | ret = sock_xmit(nbd, i, 1, &from, 0); | ||
656 | if (ret <= 0) | 644 | if (ret <= 0) |
657 | dev_err(disk_to_dev(nbd->disk), | 645 | dev_err(disk_to_dev(nbd->disk), |
658 | "Send disconnect failed %d\n", ret); | 646 | "Send disconnect failed %d\n", ret); |