aboutsummaryrefslogtreecommitdiffstats
path: root/net/rds
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2014-11-20 09:21:14 -0500
committerAl Viro <viro@zeniv.linux.org.uk>2014-11-24 05:16:43 -0500
commitc310e72c89926e06138e4881f21e4c8da3e7ef18 (patch)
treecf40fae1e3639f0ff3607561c1e4fd484b7b53e9 /net/rds
parent7424ce65065852bdf7a040bf2490da4a8fc4b464 (diff)
rds: switch ->inc_copy_to_user() to passing iov_iter
instances get considerably simpler from that... Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'net/rds')
-rw-r--r--net/rds/ib.h3
-rw-r--r--net/rds/ib_recv.c37
-rw-r--r--net/rds/iw.h3
-rw-r--r--net/rds/iw_recv.c37
-rw-r--r--net/rds/message.c35
-rw-r--r--net/rds/rds.h6
-rw-r--r--net/rds/recv.c5
-rw-r--r--net/rds/tcp.h3
-rw-r--r--net/rds/tcp_recv.c38
9 files changed, 47 insertions, 120 deletions
diff --git a/net/rds/ib.h b/net/rds/ib.h
index 7280ab8810c2..c36d713229e0 100644
--- a/net/rds/ib.h
+++ b/net/rds/ib.h
@@ -316,8 +316,7 @@ int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic);
316void rds_ib_recv_free_caches(struct rds_ib_connection *ic); 316void rds_ib_recv_free_caches(struct rds_ib_connection *ic);
317void rds_ib_recv_refill(struct rds_connection *conn, int prefill); 317void rds_ib_recv_refill(struct rds_connection *conn, int prefill);
318void rds_ib_inc_free(struct rds_incoming *inc); 318void rds_ib_inc_free(struct rds_incoming *inc);
319int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov, 319int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to);
320 size_t size);
321void rds_ib_recv_cq_comp_handler(struct ib_cq *cq, void *context); 320void rds_ib_recv_cq_comp_handler(struct ib_cq *cq, void *context);
322void rds_ib_recv_tasklet_fn(unsigned long data); 321void rds_ib_recv_tasklet_fn(unsigned long data);
323void rds_ib_recv_init_ring(struct rds_ib_connection *ic); 322void rds_ib_recv_init_ring(struct rds_ib_connection *ic);
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
index d67de453c35a..1b981a4e42c2 100644
--- a/net/rds/ib_recv.c
+++ b/net/rds/ib_recv.c
@@ -472,15 +472,12 @@ static struct list_head *rds_ib_recv_cache_get(struct rds_ib_refill_cache *cache
472 return head; 472 return head;
473} 473}
474 474
475int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iovec *first_iov, 475int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to)
476 size_t size)
477{ 476{
478 struct rds_ib_incoming *ibinc; 477 struct rds_ib_incoming *ibinc;
479 struct rds_page_frag *frag; 478 struct rds_page_frag *frag;
480 struct iovec *iov = first_iov;
481 unsigned long to_copy; 479 unsigned long to_copy;
482 unsigned long frag_off = 0; 480 unsigned long frag_off = 0;
483 unsigned long iov_off = 0;
484 int copied = 0; 481 int copied = 0;
485 int ret; 482 int ret;
486 u32 len; 483 u32 len;
@@ -489,37 +486,25 @@ int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iovec *first_iov,
489 frag = list_entry(ibinc->ii_frags.next, struct rds_page_frag, f_item); 486 frag = list_entry(ibinc->ii_frags.next, struct rds_page_frag, f_item);
490 len = be32_to_cpu(inc->i_hdr.h_len); 487 len = be32_to_cpu(inc->i_hdr.h_len);
491 488
492 while (copied < size && copied < len) { 489 while (iov_iter_count(to) && copied < len) {
493 if (frag_off == RDS_FRAG_SIZE) { 490 if (frag_off == RDS_FRAG_SIZE) {
494 frag = list_entry(frag->f_item.next, 491 frag = list_entry(frag->f_item.next,
495 struct rds_page_frag, f_item); 492 struct rds_page_frag, f_item);
496 frag_off = 0; 493 frag_off = 0;
497 } 494 }
498 while (iov_off == iov->iov_len) { 495 to_copy = min_t(unsigned long, iov_iter_count(to),
499 iov_off = 0; 496 RDS_FRAG_SIZE - frag_off);
500 iov++;
501 }
502
503 to_copy = min(iov->iov_len - iov_off, RDS_FRAG_SIZE - frag_off);
504 to_copy = min_t(size_t, to_copy, size - copied);
505 to_copy = min_t(unsigned long, to_copy, len - copied); 497 to_copy = min_t(unsigned long, to_copy, len - copied);
506 498
507 rdsdebug("%lu bytes to user [%p, %zu] + %lu from frag "
508 "[%p, %u] + %lu\n",
509 to_copy, iov->iov_base, iov->iov_len, iov_off,
510 sg_page(&frag->f_sg), frag->f_sg.offset, frag_off);
511
512 /* XXX needs + offset for multiple recvs per page */ 499 /* XXX needs + offset for multiple recvs per page */
513 ret = rds_page_copy_to_user(sg_page(&frag->f_sg), 500 rds_stats_add(s_copy_to_user, to_copy);
514 frag->f_sg.offset + frag_off, 501 ret = copy_page_to_iter(sg_page(&frag->f_sg),
515 iov->iov_base + iov_off, 502 frag->f_sg.offset + frag_off,
516 to_copy); 503 to_copy,
517 if (ret) { 504 to);
518 copied = ret; 505 if (ret != to_copy)
519 break; 506 return -EFAULT;
520 }
521 507
522 iov_off += to_copy;
523 frag_off += to_copy; 508 frag_off += to_copy;
524 copied += to_copy; 509 copied += to_copy;
525 } 510 }
diff --git a/net/rds/iw.h b/net/rds/iw.h
index 04ce3b193f79..cbe6674e31ee 100644
--- a/net/rds/iw.h
+++ b/net/rds/iw.h
@@ -325,8 +325,7 @@ int rds_iw_recv(struct rds_connection *conn);
325int rds_iw_recv_refill(struct rds_connection *conn, gfp_t kptr_gfp, 325int rds_iw_recv_refill(struct rds_connection *conn, gfp_t kptr_gfp,
326 gfp_t page_gfp, int prefill); 326 gfp_t page_gfp, int prefill);
327void rds_iw_inc_free(struct rds_incoming *inc); 327void rds_iw_inc_free(struct rds_incoming *inc);
328int rds_iw_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov, 328int rds_iw_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to);
329 size_t size);
330void rds_iw_recv_cq_comp_handler(struct ib_cq *cq, void *context); 329void rds_iw_recv_cq_comp_handler(struct ib_cq *cq, void *context);
331void rds_iw_recv_tasklet_fn(unsigned long data); 330void rds_iw_recv_tasklet_fn(unsigned long data);
332void rds_iw_recv_init_ring(struct rds_iw_connection *ic); 331void rds_iw_recv_init_ring(struct rds_iw_connection *ic);
diff --git a/net/rds/iw_recv.c b/net/rds/iw_recv.c
index aa8bf6786008..a66d1794b2d0 100644
--- a/net/rds/iw_recv.c
+++ b/net/rds/iw_recv.c
@@ -303,15 +303,12 @@ void rds_iw_inc_free(struct rds_incoming *inc)
303 BUG_ON(atomic_read(&rds_iw_allocation) < 0); 303 BUG_ON(atomic_read(&rds_iw_allocation) < 0);
304} 304}
305 305
306int rds_iw_inc_copy_to_user(struct rds_incoming *inc, struct iovec *first_iov, 306int rds_iw_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to)
307 size_t size)
308{ 307{
309 struct rds_iw_incoming *iwinc; 308 struct rds_iw_incoming *iwinc;
310 struct rds_page_frag *frag; 309 struct rds_page_frag *frag;
311 struct iovec *iov = first_iov;
312 unsigned long to_copy; 310 unsigned long to_copy;
313 unsigned long frag_off = 0; 311 unsigned long frag_off = 0;
314 unsigned long iov_off = 0;
315 int copied = 0; 312 int copied = 0;
316 int ret; 313 int ret;
317 u32 len; 314 u32 len;
@@ -320,37 +317,25 @@ int rds_iw_inc_copy_to_user(struct rds_incoming *inc, struct iovec *first_iov,
320 frag = list_entry(iwinc->ii_frags.next, struct rds_page_frag, f_item); 317 frag = list_entry(iwinc->ii_frags.next, struct rds_page_frag, f_item);
321 len = be32_to_cpu(inc->i_hdr.h_len); 318 len = be32_to_cpu(inc->i_hdr.h_len);
322 319
323 while (copied < size && copied < len) { 320 while (iov_iter_count(to) && copied < len) {
324 if (frag_off == RDS_FRAG_SIZE) { 321 if (frag_off == RDS_FRAG_SIZE) {
325 frag = list_entry(frag->f_item.next, 322 frag = list_entry(frag->f_item.next,
326 struct rds_page_frag, f_item); 323 struct rds_page_frag, f_item);
327 frag_off = 0; 324 frag_off = 0;
328 } 325 }
329 while (iov_off == iov->iov_len) { 326 to_copy = min_t(unsigned long, iov_iter_count(to),
330 iov_off = 0; 327 RDS_FRAG_SIZE - frag_off);
331 iov++;
332 }
333
334 to_copy = min(iov->iov_len - iov_off, RDS_FRAG_SIZE - frag_off);
335 to_copy = min_t(size_t, to_copy, size - copied);
336 to_copy = min_t(unsigned long, to_copy, len - copied); 328 to_copy = min_t(unsigned long, to_copy, len - copied);
337 329
338 rdsdebug("%lu bytes to user [%p, %zu] + %lu from frag "
339 "[%p, %lu] + %lu\n",
340 to_copy, iov->iov_base, iov->iov_len, iov_off,
341 frag->f_page, frag->f_offset, frag_off);
342
343 /* XXX needs + offset for multiple recvs per page */ 330 /* XXX needs + offset for multiple recvs per page */
344 ret = rds_page_copy_to_user(frag->f_page, 331 rds_stats_add(s_copy_to_user, to_copy);
345 frag->f_offset + frag_off, 332 ret = copy_page_to_iter(frag->f_page,
346 iov->iov_base + iov_off, 333 frag->f_offset + frag_off,
347 to_copy); 334 to_copy,
348 if (ret) { 335 to);
349 copied = ret; 336 if (ret != to_copy)
350 break; 337 return -EFAULT;
351 }
352 338
353 iov_off += to_copy;
354 frag_off += to_copy; 339 frag_off += to_copy;
355 copied += to_copy; 340 copied += to_copy;
356 } 341 }
diff --git a/net/rds/message.c b/net/rds/message.c
index aba232f9f308..7a546e089a57 100644
--- a/net/rds/message.c
+++ b/net/rds/message.c
@@ -325,14 +325,11 @@ out:
325 return ret; 325 return ret;
326} 326}
327 327
328int rds_message_inc_copy_to_user(struct rds_incoming *inc, 328int rds_message_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to)
329 struct iovec *first_iov, size_t size)
330{ 329{
331 struct rds_message *rm; 330 struct rds_message *rm;
332 struct iovec *iov;
333 struct scatterlist *sg; 331 struct scatterlist *sg;
334 unsigned long to_copy; 332 unsigned long to_copy;
335 unsigned long iov_off;
336 unsigned long vec_off; 333 unsigned long vec_off;
337 int copied; 334 int copied;
338 int ret; 335 int ret;
@@ -341,36 +338,20 @@ int rds_message_inc_copy_to_user(struct rds_incoming *inc,
341 rm = container_of(inc, struct rds_message, m_inc); 338 rm = container_of(inc, struct rds_message, m_inc);
342 len = be32_to_cpu(rm->m_inc.i_hdr.h_len); 339 len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
343 340
344 iov = first_iov;
345 iov_off = 0;
346 sg = rm->data.op_sg; 341 sg = rm->data.op_sg;
347 vec_off = 0; 342 vec_off = 0;
348 copied = 0; 343 copied = 0;
349 344
350 while (copied < size && copied < len) { 345 while (iov_iter_count(to) && copied < len) {
351 while (iov_off == iov->iov_len) { 346 to_copy = min(iov_iter_count(to), sg->length - vec_off);
352 iov_off = 0;
353 iov++;
354 }
355
356 to_copy = min(iov->iov_len - iov_off, sg->length - vec_off);
357 to_copy = min_t(size_t, to_copy, size - copied);
358 to_copy = min_t(unsigned long, to_copy, len - copied); 347 to_copy = min_t(unsigned long, to_copy, len - copied);
359 348
360 rdsdebug("copying %lu bytes to user iov [%p, %zu] + %lu to " 349 rds_stats_add(s_copy_to_user, to_copy);
361 "sg [%p, %u, %u] + %lu\n", 350 ret = copy_page_to_iter(sg_page(sg), sg->offset + vec_off,
362 to_copy, iov->iov_base, iov->iov_len, iov_off, 351 to_copy, to);
363 sg_page(sg), sg->offset, sg->length, vec_off); 352 if (ret != to_copy)
364 353 return -EFAULT;
365 ret = rds_page_copy_to_user(sg_page(sg), sg->offset + vec_off,
366 iov->iov_base + iov_off,
367 to_copy);
368 if (ret) {
369 copied = ret;
370 break;
371 }
372 354
373 iov_off += to_copy;
374 vec_off += to_copy; 355 vec_off += to_copy;
375 copied += to_copy; 356 copied += to_copy;
376 357
diff --git a/net/rds/rds.h b/net/rds/rds.h
index 48f8ffc60f8f..b22dad91697c 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -431,8 +431,7 @@ struct rds_transport {
431 int (*xmit_rdma)(struct rds_connection *conn, struct rm_rdma_op *op); 431 int (*xmit_rdma)(struct rds_connection *conn, struct rm_rdma_op *op);
432 int (*xmit_atomic)(struct rds_connection *conn, struct rm_atomic_op *op); 432 int (*xmit_atomic)(struct rds_connection *conn, struct rm_atomic_op *op);
433 int (*recv)(struct rds_connection *conn); 433 int (*recv)(struct rds_connection *conn);
434 int (*inc_copy_to_user)(struct rds_incoming *inc, struct iovec *iov, 434 int (*inc_copy_to_user)(struct rds_incoming *inc, struct iov_iter *to);
435 size_t size);
436 void (*inc_free)(struct rds_incoming *inc); 435 void (*inc_free)(struct rds_incoming *inc);
437 436
438 int (*cm_handle_connect)(struct rdma_cm_id *cm_id, 437 int (*cm_handle_connect)(struct rdma_cm_id *cm_id,
@@ -667,8 +666,7 @@ int rds_message_add_extension(struct rds_header *hdr,
667int rds_message_next_extension(struct rds_header *hdr, 666int rds_message_next_extension(struct rds_header *hdr,
668 unsigned int *pos, void *buf, unsigned int *buflen); 667 unsigned int *pos, void *buf, unsigned int *buflen);
669int rds_message_add_rdma_dest_extension(struct rds_header *hdr, u32 r_key, u32 offset); 668int rds_message_add_rdma_dest_extension(struct rds_header *hdr, u32 r_key, u32 offset);
670int rds_message_inc_copy_to_user(struct rds_incoming *inc, 669int rds_message_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to);
671 struct iovec *first_iov, size_t size);
672void rds_message_inc_free(struct rds_incoming *inc); 670void rds_message_inc_free(struct rds_incoming *inc);
673void rds_message_addref(struct rds_message *rm); 671void rds_message_addref(struct rds_message *rm);
674void rds_message_put(struct rds_message *rm); 672void rds_message_put(struct rds_message *rm);
diff --git a/net/rds/recv.c b/net/rds/recv.c
index bd82522534fc..47d7b1029b33 100644
--- a/net/rds/recv.c
+++ b/net/rds/recv.c
@@ -404,6 +404,7 @@ int rds_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
404 int ret = 0, nonblock = msg_flags & MSG_DONTWAIT; 404 int ret = 0, nonblock = msg_flags & MSG_DONTWAIT;
405 DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name); 405 DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name);
406 struct rds_incoming *inc = NULL; 406 struct rds_incoming *inc = NULL;
407 struct iov_iter to;
407 408
408 /* udp_recvmsg()->sock_recvtimeo() gets away without locking too.. */ 409 /* udp_recvmsg()->sock_recvtimeo() gets away without locking too.. */
409 timeo = sock_rcvtimeo(sk, nonblock); 410 timeo = sock_rcvtimeo(sk, nonblock);
@@ -449,8 +450,8 @@ int rds_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
449 rdsdebug("copying inc %p from %pI4:%u to user\n", inc, 450 rdsdebug("copying inc %p from %pI4:%u to user\n", inc,
450 &inc->i_conn->c_faddr, 451 &inc->i_conn->c_faddr,
451 ntohs(inc->i_hdr.h_sport)); 452 ntohs(inc->i_hdr.h_sport));
452 ret = inc->i_conn->c_trans->inc_copy_to_user(inc, msg->msg_iov, 453 iov_iter_init(&to, READ, msg->msg_iov, msg->msg_iovlen, size);
453 size); 454 ret = inc->i_conn->c_trans->inc_copy_to_user(inc, &to);
454 if (ret < 0) 455 if (ret < 0)
455 break; 456 break;
456 457
diff --git a/net/rds/tcp.h b/net/rds/tcp.h
index 65637491f728..0dbdd37162da 100644
--- a/net/rds/tcp.h
+++ b/net/rds/tcp.h
@@ -69,8 +69,7 @@ void rds_tcp_recv_exit(void);
69void rds_tcp_data_ready(struct sock *sk); 69void rds_tcp_data_ready(struct sock *sk);
70int rds_tcp_recv(struct rds_connection *conn); 70int rds_tcp_recv(struct rds_connection *conn);
71void rds_tcp_inc_free(struct rds_incoming *inc); 71void rds_tcp_inc_free(struct rds_incoming *inc);
72int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov, 72int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to);
73 size_t size);
74 73
75/* tcp_send.c */ 74/* tcp_send.c */
76void rds_tcp_xmit_prepare(struct rds_connection *conn); 75void rds_tcp_xmit_prepare(struct rds_connection *conn);
diff --git a/net/rds/tcp_recv.c b/net/rds/tcp_recv.c
index 9ae6e0a264ec..fbc5ef88bc0e 100644
--- a/net/rds/tcp_recv.c
+++ b/net/rds/tcp_recv.c
@@ -59,50 +59,30 @@ void rds_tcp_inc_free(struct rds_incoming *inc)
59/* 59/*
60 * this is pretty lame, but, whatever. 60 * this is pretty lame, but, whatever.
61 */ 61 */
62int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iovec *first_iov, 62int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to)
63 size_t size)
64{ 63{
65 struct rds_tcp_incoming *tinc; 64 struct rds_tcp_incoming *tinc;
66 struct iovec *iov, tmp;
67 struct sk_buff *skb; 65 struct sk_buff *skb;
68 unsigned long to_copy, skb_off;
69 int ret = 0; 66 int ret = 0;
70 67
71 if (size == 0) 68 if (!iov_iter_count(to))
72 goto out; 69 goto out;
73 70
74 tinc = container_of(inc, struct rds_tcp_incoming, ti_inc); 71 tinc = container_of(inc, struct rds_tcp_incoming, ti_inc);
75 iov = first_iov;
76 tmp = *iov;
77 72
78 skb_queue_walk(&tinc->ti_skb_list, skb) { 73 skb_queue_walk(&tinc->ti_skb_list, skb) {
79 skb_off = 0; 74 unsigned long to_copy, skb_off;
80 while (skb_off < skb->len) { 75 for (skb_off = 0; skb_off < skb->len; skb_off += to_copy) {
81 while (tmp.iov_len == 0) { 76 to_copy = iov_iter_count(to);
82 iov++;
83 tmp = *iov;
84 }
85
86 to_copy = min(tmp.iov_len, size);
87 to_copy = min(to_copy, skb->len - skb_off); 77 to_copy = min(to_copy, skb->len - skb_off);
88 78
89 rdsdebug("ret %d size %zu skb %p skb_off %lu " 79 if (skb_copy_datagram_iter(skb, skb_off, to, to_copy))
90 "skblen %d iov_base %p iov_len %zu cpy %lu\n", 80 return -EFAULT;
91 ret, size, skb, skb_off, skb->len,
92 tmp.iov_base, tmp.iov_len, to_copy);
93
94 /* modifies tmp as it copies */
95 if (skb_copy_datagram_iovec(skb, skb_off, &tmp,
96 to_copy)) {
97 ret = -EFAULT;
98 goto out;
99 }
100 81
101 rds_stats_add(s_copy_to_user, to_copy); 82 rds_stats_add(s_copy_to_user, to_copy);
102 size -= to_copy;
103 ret += to_copy; 83 ret += to_copy;
104 skb_off += to_copy; 84
105 if (size == 0) 85 if (!iov_iter_count(to))
106 goto out; 86 goto out;
107 } 87 }
108 } 88 }