aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-03-02 18:16:38 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2017-03-02 18:16:38 -0500
commit69fd110eb650ea7baa82158f3b89a7d86da1d056 (patch)
tree091e4e8e5863654042638d4165eecdc856bc2bff
parent821fd6f6cb6500cd04a6c7e8f701f9b311a5c2b3 (diff)
parent4038a2a37e3595c299aecdaa20cb01ceb9c78303 (diff)
Merge branch 'work.sendmsg' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
Pull vfs sendmsg updates from Al Viro: "More sendmsg work. This is a fairly separate isolated stuff (there's a continuation around lustre, but that one was too late to soak in -next), thus the separate pull request" * 'work.sendmsg' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: ncpfs: switch to sock_sendmsg() ncpfs: don't mess with manually advancing iovec on send ncpfs: sendmsg does *not* bugger iovec these days ceph_tcp_sendpage(): use ITER_BVEC sendmsg afs_send_pages(): use ITER_BVEC rds: remove dead code ceph: switch to sock_recvmsg() usbip_recv(): switch to sock_recvmsg() iscsi_target: deal with short writes on the tx side [nbd] pass iov_iter to nbd_xmit() [nbd] switch sock_xmit() to sock_{send,recv}msg() [drbd] use sock_sendmsg()
-rw-r--r--drivers/block/drbd/drbd_main.c11
-rw-r--r--drivers/block/nbd.c76
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c64
-rw-r--r--drivers/usb/usbip/usbip_common.c32
-rw-r--r--fs/afs/rxrpc.c15
-rw-r--r--fs/ncpfs/sock.c111
-rw-r--r--net/ceph/messenger.c44
-rw-r--r--net/rds/page.c29
-rw-r--r--net/rds/rds.h7
9 files changed, 147 insertions, 242 deletions
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 116509852a34..37000c6bb7f4 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -1846,7 +1846,7 @@ int drbd_send_out_of_sync(struct drbd_peer_device *peer_device, struct drbd_requ
1846int drbd_send(struct drbd_connection *connection, struct socket *sock, 1846int drbd_send(struct drbd_connection *connection, struct socket *sock,
1847 void *buf, size_t size, unsigned msg_flags) 1847 void *buf, size_t size, unsigned msg_flags)
1848{ 1848{
1849 struct kvec iov; 1849 struct kvec iov = {.iov_base = buf, .iov_len = size};
1850 struct msghdr msg; 1850 struct msghdr msg;
1851 int rv, sent = 0; 1851 int rv, sent = 0;
1852 1852
@@ -1855,15 +1855,14 @@ int drbd_send(struct drbd_connection *connection, struct socket *sock,
1855 1855
1856 /* THINK if (signal_pending) return ... ? */ 1856 /* THINK if (signal_pending) return ... ? */
1857 1857
1858 iov.iov_base = buf;
1859 iov.iov_len = size;
1860
1861 msg.msg_name = NULL; 1858 msg.msg_name = NULL;
1862 msg.msg_namelen = 0; 1859 msg.msg_namelen = 0;
1863 msg.msg_control = NULL; 1860 msg.msg_control = NULL;
1864 msg.msg_controllen = 0; 1861 msg.msg_controllen = 0;
1865 msg.msg_flags = msg_flags | MSG_NOSIGNAL; 1862 msg.msg_flags = msg_flags | MSG_NOSIGNAL;
1866 1863
1864 iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, &iov, 1, size);
1865
1867 if (sock == connection->data.socket) { 1866 if (sock == connection->data.socket) {
1868 rcu_read_lock(); 1867 rcu_read_lock();
1869 connection->ko_count = rcu_dereference(connection->net_conf)->ko_count; 1868 connection->ko_count = rcu_dereference(connection->net_conf)->ko_count;
@@ -1871,7 +1870,7 @@ int drbd_send(struct drbd_connection *connection, struct socket *sock,
1871 drbd_update_congested(connection); 1870 drbd_update_congested(connection);
1872 } 1871 }
1873 do { 1872 do {
1874 rv = kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len); 1873 rv = sock_sendmsg(sock, &msg);
1875 if (rv == -EAGAIN) { 1874 if (rv == -EAGAIN) {
1876 if (we_should_drop_the_connection(connection, sock)) 1875 if (we_should_drop_the_connection(connection, sock))
1877 break; 1876 break;
@@ -1885,8 +1884,6 @@ int drbd_send(struct drbd_connection *connection, struct socket *sock,
1885 if (rv < 0) 1884 if (rv < 0)
1886 break; 1885 break;
1887 sent += rv; 1886 sent += rv;
1888 iov.iov_base += rv;
1889 iov.iov_len -= rv;
1890 } while (sent < size); 1887 } while (sent < size);
1891 1888
1892 if (sock == connection->data.socket) 1889 if (sock == connection->data.socket)
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 0bf2b21a62cb..1541cb880744 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -201,13 +201,12 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
201/* 201/*
202 * Send or receive packet. 202 * Send or receive packet.
203 */ 203 */
204static int sock_xmit(struct nbd_device *nbd, int index, int send, void *buf, 204static int sock_xmit(struct nbd_device *nbd, int index, int send,
205 int size, int msg_flags) 205 struct iov_iter *iter, int msg_flags)
206{ 206{
207 struct socket *sock = nbd->socks[index]->sock; 207 struct socket *sock = nbd->socks[index]->sock;
208 int result; 208 int result;
209 struct msghdr msg; 209 struct msghdr msg;
210 struct kvec iov;
211 unsigned long pflags = current->flags; 210 unsigned long pflags = current->flags;
212 211
213 if (unlikely(!sock)) { 212 if (unlikely(!sock)) {
@@ -217,11 +216,11 @@ static int sock_xmit(struct nbd_device *nbd, int index, int send, void *buf,
217 return -EINVAL; 216 return -EINVAL;
218 } 217 }
219 218
219 msg.msg_iter = *iter;
220
220 current->flags |= PF_MEMALLOC; 221 current->flags |= PF_MEMALLOC;
221 do { 222 do {
222 sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC; 223 sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC;
223 iov.iov_base = buf;
224 iov.iov_len = size;
225 msg.msg_name = NULL; 224 msg.msg_name = NULL;
226 msg.msg_namelen = 0; 225 msg.msg_namelen = 0;
227 msg.msg_control = NULL; 226 msg.msg_control = NULL;
@@ -229,47 +228,37 @@ static int sock_xmit(struct nbd_device *nbd, int index, int send, void *buf,
229 msg.msg_flags = msg_flags | MSG_NOSIGNAL; 228 msg.msg_flags = msg_flags | MSG_NOSIGNAL;
230 229
231 if (send) 230 if (send)
232 result = kernel_sendmsg(sock, &msg, &iov, 1, size); 231 result = sock_sendmsg(sock, &msg);
233 else 232 else
234 result = kernel_recvmsg(sock, &msg, &iov, 1, size, 233 result = sock_recvmsg(sock, &msg, msg.msg_flags);
235 msg.msg_flags);
236 234
237 if (result <= 0) { 235 if (result <= 0) {
238 if (result == 0) 236 if (result == 0)
239 result = -EPIPE; /* short read */ 237 result = -EPIPE; /* short read */
240 break; 238 break;
241 } 239 }
242 size -= result; 240 } while (msg_data_left(&msg));
243 buf += result;
244 } while (size > 0);
245 241
246 tsk_restore_flags(current, pflags, PF_MEMALLOC); 242 tsk_restore_flags(current, pflags, PF_MEMALLOC);
247 243
248 return result; 244 return result;
249} 245}
250 246
251static inline int sock_send_bvec(struct nbd_device *nbd, int index,
252 struct bio_vec *bvec, int flags)
253{
254 int result;
255 void *kaddr = kmap(bvec->bv_page);
256 result = sock_xmit(nbd, index, 1, kaddr + bvec->bv_offset,
257 bvec->bv_len, flags);
258 kunmap(bvec->bv_page);
259 return result;
260}
261
262/* always call with the tx_lock held */ 247/* always call with the tx_lock held */
263static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) 248static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
264{ 249{
265 struct request *req = blk_mq_rq_from_pdu(cmd); 250 struct request *req = blk_mq_rq_from_pdu(cmd);
266 int result; 251 int result;
267 struct nbd_request request; 252 struct nbd_request request = {.magic = htonl(NBD_REQUEST_MAGIC)};
253 struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)};
254 struct iov_iter from;
268 unsigned long size = blk_rq_bytes(req); 255 unsigned long size = blk_rq_bytes(req);
269 struct bio *bio; 256 struct bio *bio;
270 u32 type; 257 u32 type;
271 u32 tag = blk_mq_unique_tag(req); 258 u32 tag = blk_mq_unique_tag(req);
272 259
260 iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request));
261
273 switch (req_op(req)) { 262 switch (req_op(req)) {
274 case REQ_OP_DISCARD: 263 case REQ_OP_DISCARD:
275 type = NBD_CMD_TRIM; 264 type = NBD_CMD_TRIM;
@@ -294,8 +283,6 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
294 return -EIO; 283 return -EIO;
295 } 284 }
296 285
297 memset(&request, 0, sizeof(request));
298 request.magic = htonl(NBD_REQUEST_MAGIC);
299 request.type = htonl(type); 286 request.type = htonl(type);
300 if (type != NBD_CMD_FLUSH) { 287 if (type != NBD_CMD_FLUSH) {
301 request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9); 288 request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
@@ -306,7 +293,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
306 dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n", 293 dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
307 cmd, nbdcmd_to_ascii(type), 294 cmd, nbdcmd_to_ascii(type),
308 (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req)); 295 (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req));
309 result = sock_xmit(nbd, index, 1, &request, sizeof(request), 296 result = sock_xmit(nbd, index, 1, &from,
310 (type == NBD_CMD_WRITE) ? MSG_MORE : 0); 297 (type == NBD_CMD_WRITE) ? MSG_MORE : 0);
311 if (result <= 0) { 298 if (result <= 0) {
312 dev_err_ratelimited(disk_to_dev(nbd->disk), 299 dev_err_ratelimited(disk_to_dev(nbd->disk),
@@ -329,7 +316,9 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
329 316
330 dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n", 317 dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
331 cmd, bvec.bv_len); 318 cmd, bvec.bv_len);
332 result = sock_send_bvec(nbd, index, &bvec, flags); 319 iov_iter_bvec(&from, ITER_BVEC | WRITE,
320 &bvec, 1, bvec.bv_len);
321 result = sock_xmit(nbd, index, 1, &from, flags);
333 if (result <= 0) { 322 if (result <= 0) {
334 dev_err(disk_to_dev(nbd->disk), 323 dev_err(disk_to_dev(nbd->disk),
335 "Send data failed (result %d)\n", 324 "Send data failed (result %d)\n",
@@ -350,17 +339,6 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
350 return 0; 339 return 0;
351} 340}
352 341
353static inline int sock_recv_bvec(struct nbd_device *nbd, int index,
354 struct bio_vec *bvec)
355{
356 int result;
357 void *kaddr = kmap(bvec->bv_page);
358 result = sock_xmit(nbd, index, 0, kaddr + bvec->bv_offset,
359 bvec->bv_len, MSG_WAITALL);
360 kunmap(bvec->bv_page);
361 return result;
362}
363
364/* NULL returned = something went wrong, inform userspace */ 342/* NULL returned = something went wrong, inform userspace */
365static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index) 343static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
366{ 344{
@@ -370,9 +348,12 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
370 struct request *req = NULL; 348 struct request *req = NULL;
371 u16 hwq; 349 u16 hwq;
372 u32 tag; 350 u32 tag;
351 struct kvec iov = {.iov_base = &reply, .iov_len = sizeof(reply)};
352 struct iov_iter to;
373 353
374 reply.magic = 0; 354 reply.magic = 0;
375 result = sock_xmit(nbd, index, 0, &reply, sizeof(reply), MSG_WAITALL); 355 iov_iter_kvec(&to, READ | ITER_KVEC, &iov, 1, sizeof(reply));
356 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL);
376 if (result <= 0) { 357 if (result <= 0) {
377 if (!test_bit(NBD_DISCONNECTED, &nbd->runtime_flags) && 358 if (!test_bit(NBD_DISCONNECTED, &nbd->runtime_flags) &&
378 !test_bit(NBD_DISCONNECT_REQUESTED, &nbd->runtime_flags)) 359 !test_bit(NBD_DISCONNECT_REQUESTED, &nbd->runtime_flags))
@@ -412,7 +393,9 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
412 struct bio_vec bvec; 393 struct bio_vec bvec;
413 394
414 rq_for_each_segment(bvec, req, iter) { 395 rq_for_each_segment(bvec, req, iter) {
415 result = sock_recv_bvec(nbd, index, &bvec); 396 iov_iter_bvec(&to, ITER_BVEC | READ,
397 &bvec, 1, bvec.bv_len);
398 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL);
416 if (result <= 0) { 399 if (result <= 0) {
417 dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n", 400 dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
418 result); 401 result);
@@ -641,14 +624,17 @@ static void nbd_parse_flags(struct nbd_device *nbd, struct block_device *bdev)
641 624
642static void send_disconnects(struct nbd_device *nbd) 625static void send_disconnects(struct nbd_device *nbd)
643{ 626{
644 struct nbd_request request = {}; 627 struct nbd_request request = {
628 .magic = htonl(NBD_REQUEST_MAGIC),
629 .type = htonl(NBD_CMD_DISC),
630 };
631 struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)};
632 struct iov_iter from;
645 int i, ret; 633 int i, ret;
646 634
647 request.magic = htonl(NBD_REQUEST_MAGIC);
648 request.type = htonl(NBD_CMD_DISC);
649
650 for (i = 0; i < nbd->num_connections; i++) { 635 for (i = 0; i < nbd->num_connections; i++) {
651 ret = sock_xmit(nbd, i, 1, &request, sizeof(request), 0); 636 iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request));
637 ret = sock_xmit(nbd, i, 1, &from, 0);
652 if (ret <= 0) 638 if (ret <= 0)
653 dev_err(disk_to_dev(nbd->disk), 639 dev_err(disk_to_dev(nbd->disk),
654 "Send disconnect failed %d\n", ret); 640 "Send disconnect failed %d\n", ret);
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index cc5958882431..5041a9c8bdcb 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -1305,39 +1305,6 @@ static int iscsit_do_rx_data(
1305 return total_rx; 1305 return total_rx;
1306} 1306}
1307 1307
1308static int iscsit_do_tx_data(
1309 struct iscsi_conn *conn,
1310 struct iscsi_data_count *count)
1311{
1312 int ret, iov_len;
1313 struct kvec *iov_p;
1314 struct msghdr msg;
1315
1316 if (!conn || !conn->sock || !conn->conn_ops)
1317 return -1;
1318
1319 if (count->data_length <= 0) {
1320 pr_err("Data length is: %d\n", count->data_length);
1321 return -1;
1322 }
1323
1324 memset(&msg, 0, sizeof(struct msghdr));
1325
1326 iov_p = count->iov;
1327 iov_len = count->iov_count;
1328
1329 ret = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len,
1330 count->data_length);
1331 if (ret != count->data_length) {
1332 pr_err("Unexpected ret: %d send data %d\n",
1333 ret, count->data_length);
1334 return -EPIPE;
1335 }
1336 pr_debug("ret: %d, sent data: %d\n", ret, count->data_length);
1337
1338 return ret;
1339}
1340
1341int rx_data( 1308int rx_data(
1342 struct iscsi_conn *conn, 1309 struct iscsi_conn *conn,
1343 struct kvec *iov, 1310 struct kvec *iov,
@@ -1364,18 +1331,35 @@ int tx_data(
1364 int iov_count, 1331 int iov_count,
1365 int data) 1332 int data)
1366{ 1333{
1367 struct iscsi_data_count c; 1334 struct msghdr msg;
1335 int total_tx = 0;
1368 1336
1369 if (!conn || !conn->sock || !conn->conn_ops) 1337 if (!conn || !conn->sock || !conn->conn_ops)
1370 return -1; 1338 return -1;
1371 1339
1372 memset(&c, 0, sizeof(struct iscsi_data_count)); 1340 if (data <= 0) {
1373 c.iov = iov; 1341 pr_err("Data length is: %d\n", data);
1374 c.iov_count = iov_count; 1342 return -1;
1375 c.data_length = data; 1343 }
1376 c.type = ISCSI_TX_DATA; 1344
1345 memset(&msg, 0, sizeof(struct msghdr));
1346
1347 iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC,
1348 iov, iov_count, data);
1349
1350 while (msg_data_left(&msg)) {
1351 int tx_loop = sock_sendmsg(conn->sock, &msg);
1352 if (tx_loop <= 0) {
1353 pr_debug("tx_loop: %d total_tx %d\n",
1354 tx_loop, total_tx);
1355 return tx_loop;
1356 }
1357 total_tx += tx_loop;
1358 pr_debug("tx_loop: %d, total_tx: %d, data: %d\n",
1359 tx_loop, total_tx, data);
1360 }
1377 1361
1378 return iscsit_do_tx_data(conn, &c); 1362 return total_tx;
1379} 1363}
1380 1364
1381void iscsit_collect_login_stats( 1365void iscsit_collect_login_stats(
diff --git a/drivers/usb/usbip/usbip_common.c b/drivers/usb/usbip/usbip_common.c
index 1a6f78d7d027..cab2b71a80d0 100644
--- a/drivers/usb/usbip/usbip_common.c
+++ b/drivers/usb/usbip/usbip_common.c
@@ -327,13 +327,11 @@ EXPORT_SYMBOL_GPL(usbip_dump_header);
327int usbip_recv(struct socket *sock, void *buf, int size) 327int usbip_recv(struct socket *sock, void *buf, int size)
328{ 328{
329 int result; 329 int result;
330 struct msghdr msg; 330 struct kvec iov = {.iov_base = buf, .iov_len = size};
331 struct kvec iov; 331 struct msghdr msg = {.msg_flags = MSG_NOSIGNAL};
332 int total = 0; 332 int total = 0;
333 333
334 /* for blocks of if (usbip_dbg_flag_xmit) */ 334 iov_iter_kvec(&msg.msg_iter, READ|ITER_KVEC, &iov, 1, size);
335 char *bp = buf;
336 int osize = size;
337 335
338 usbip_dbg_xmit("enter\n"); 336 usbip_dbg_xmit("enter\n");
339 337
@@ -344,26 +342,18 @@ int usbip_recv(struct socket *sock, void *buf, int size)
344 } 342 }
345 343
346 do { 344 do {
345 int sz = msg_data_left(&msg);
347 sock->sk->sk_allocation = GFP_NOIO; 346 sock->sk->sk_allocation = GFP_NOIO;
348 iov.iov_base = buf; 347
349 iov.iov_len = size; 348 result = sock_recvmsg(sock, &msg, MSG_WAITALL);
350 msg.msg_name = NULL;
351 msg.msg_namelen = 0;
352 msg.msg_control = NULL;
353 msg.msg_controllen = 0;
354 msg.msg_flags = MSG_NOSIGNAL;
355
356 result = kernel_recvmsg(sock, &msg, &iov, 1, size, MSG_WAITALL);
357 if (result <= 0) { 349 if (result <= 0) {
358 pr_debug("receive sock %p buf %p size %u ret %d total %d\n", 350 pr_debug("receive sock %p buf %p size %u ret %d total %d\n",
359 sock, buf, size, result, total); 351 sock, buf + total, sz, result, total);
360 goto err; 352 goto err;
361 } 353 }
362 354
363 size -= result;
364 buf += result;
365 total += result; 355 total += result;
366 } while (size > 0); 356 } while (msg_data_left(&msg));
367 357
368 if (usbip_dbg_flag_xmit) { 358 if (usbip_dbg_flag_xmit) {
369 if (!in_interrupt()) 359 if (!in_interrupt())
@@ -372,9 +362,9 @@ int usbip_recv(struct socket *sock, void *buf, int size)
372 pr_debug("interrupt :"); 362 pr_debug("interrupt :");
373 363
374 pr_debug("receiving....\n"); 364 pr_debug("receiving....\n");
375 usbip_dump_buffer(bp, osize); 365 usbip_dump_buffer(buf, size);
376 pr_debug("received, osize %d ret %d size %d total %d\n", 366 pr_debug("received, osize %d ret %d size %zd total %d\n",
377 osize, result, size, total); 367 size, result, msg_data_left(&msg), total);
378 } 368 }
379 369
380 return total; 370 return total;
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index 95f42872b787..f3c1b40eb11f 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -260,8 +260,7 @@ void afs_flat_call_destructor(struct afs_call *call)
260/* 260/*
261 * attach the data from a bunch of pages on an inode to a call 261 * attach the data from a bunch of pages on an inode to a call
262 */ 262 */
263static int afs_send_pages(struct afs_call *call, struct msghdr *msg, 263static int afs_send_pages(struct afs_call *call, struct msghdr *msg)
264 struct kvec *iov)
265{ 264{
266 struct page *pages[8]; 265 struct page *pages[8];
267 unsigned count, n, loop, offset, to; 266 unsigned count, n, loop, offset, to;
@@ -284,20 +283,21 @@ static int afs_send_pages(struct afs_call *call, struct msghdr *msg,
284 283
285 loop = 0; 284 loop = 0;
286 do { 285 do {
286 struct bio_vec bvec = {.bv_page = pages[loop],
287 .bv_offset = offset};
287 msg->msg_flags = 0; 288 msg->msg_flags = 0;
288 to = PAGE_SIZE; 289 to = PAGE_SIZE;
289 if (first + loop >= last) 290 if (first + loop >= last)
290 to = call->last_to; 291 to = call->last_to;
291 else 292 else
292 msg->msg_flags = MSG_MORE; 293 msg->msg_flags = MSG_MORE;
293 iov->iov_base = kmap(pages[loop]) + offset; 294 bvec.bv_len = to - offset;
294 iov->iov_len = to - offset;
295 offset = 0; 295 offset = 0;
296 296
297 _debug("- range %u-%u%s", 297 _debug("- range %u-%u%s",
298 offset, to, msg->msg_flags ? " [more]" : ""); 298 offset, to, msg->msg_flags ? " [more]" : "");
299 iov_iter_kvec(&msg->msg_iter, WRITE | ITER_KVEC, 299 iov_iter_bvec(&msg->msg_iter, WRITE | ITER_BVEC,
300 iov, 1, to - offset); 300 &bvec, 1, to - offset);
301 301
302 /* have to change the state *before* sending the last 302 /* have to change the state *before* sending the last
303 * packet as RxRPC might give us the reply before it 303 * packet as RxRPC might give us the reply before it
@@ -306,7 +306,6 @@ static int afs_send_pages(struct afs_call *call, struct msghdr *msg,
306 call->state = AFS_CALL_AWAIT_REPLY; 306 call->state = AFS_CALL_AWAIT_REPLY;
307 ret = rxrpc_kernel_send_data(afs_socket, call->rxcall, 307 ret = rxrpc_kernel_send_data(afs_socket, call->rxcall,
308 msg, to - offset); 308 msg, to - offset);
309 kunmap(pages[loop]);
310 if (ret < 0) 309 if (ret < 0)
311 break; 310 break;
312 } while (++loop < count); 311 } while (++loop < count);
@@ -391,7 +390,7 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp,
391 goto error_do_abort; 390 goto error_do_abort;
392 391
393 if (call->send_pages) { 392 if (call->send_pages) {
394 ret = afs_send_pages(call, &msg, iov); 393 ret = afs_send_pages(call, &msg);
395 if (ret < 0) 394 if (ret < 0)
396 goto error_do_abort; 395 goto error_do_abort;
397 } 396 }
diff --git a/fs/ncpfs/sock.c b/fs/ncpfs/sock.c
index 97b111d79489..4bfeae289b00 100644
--- a/fs/ncpfs/sock.c
+++ b/fs/ncpfs/sock.c
@@ -40,19 +40,12 @@ static int _recv(struct socket *sock, void *buf, int size, unsigned flags)
40 return kernel_recvmsg(sock, &msg, &iov, 1, size, flags); 40 return kernel_recvmsg(sock, &msg, &iov, 1, size, flags);
41} 41}
42 42
43static inline int do_send(struct socket *sock, struct kvec *vec, int count,
44 int len, unsigned flags)
45{
46 struct msghdr msg = { .msg_flags = flags };
47 return kernel_sendmsg(sock, &msg, vec, count, len);
48}
49
50static int _send(struct socket *sock, const void *buff, int len) 43static int _send(struct socket *sock, const void *buff, int len)
51{ 44{
52 struct kvec vec; 45 struct msghdr msg = { .msg_flags = 0 };
53 vec.iov_base = (void *) buff; 46 struct kvec vec = {.iov_base = (void *)buff, .iov_len = len};
54 vec.iov_len = len; 47 iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, &vec, 1, len);
55 return do_send(sock, &vec, 1, len, 0); 48 return sock_sendmsg(sock, &msg);
56} 49}
57 50
58struct ncp_request_reply { 51struct ncp_request_reply {
@@ -63,9 +56,7 @@ struct ncp_request_reply {
63 size_t datalen; 56 size_t datalen;
64 int result; 57 int result;
65 enum { RQ_DONE, RQ_INPROGRESS, RQ_QUEUED, RQ_IDLE, RQ_ABANDONED } status; 58 enum { RQ_DONE, RQ_INPROGRESS, RQ_QUEUED, RQ_IDLE, RQ_ABANDONED } status;
66 struct kvec* tx_ciov; 59 struct iov_iter from;
67 size_t tx_totallen;
68 size_t tx_iovlen;
69 struct kvec tx_iov[3]; 60 struct kvec tx_iov[3];
70 u_int16_t tx_type; 61 u_int16_t tx_type;
71 u_int32_t sign[6]; 62 u_int32_t sign[6];
@@ -205,28 +196,22 @@ static inline void __ncptcp_abort(struct ncp_server *server)
205 196
206static int ncpdgram_send(struct socket *sock, struct ncp_request_reply *req) 197static int ncpdgram_send(struct socket *sock, struct ncp_request_reply *req)
207{ 198{
208 struct kvec vec[3]; 199 struct msghdr msg = { .msg_iter = req->from, .msg_flags = MSG_DONTWAIT };
209 /* sock_sendmsg updates iov pointers for us :-( */ 200 return sock_sendmsg(sock, &msg);
210 memcpy(vec, req->tx_ciov, req->tx_iovlen * sizeof(vec[0]));
211 return do_send(sock, vec, req->tx_iovlen,
212 req->tx_totallen, MSG_DONTWAIT);
213} 201}
214 202
215static void __ncptcp_try_send(struct ncp_server *server) 203static void __ncptcp_try_send(struct ncp_server *server)
216{ 204{
217 struct ncp_request_reply *rq; 205 struct ncp_request_reply *rq;
218 struct kvec *iov; 206 struct msghdr msg = { .msg_flags = MSG_NOSIGNAL | MSG_DONTWAIT };
219 struct kvec iovc[3];
220 int result; 207 int result;
221 208
222 rq = server->tx.creq; 209 rq = server->tx.creq;
223 if (!rq) 210 if (!rq)
224 return; 211 return;
225 212
226 /* sock_sendmsg updates iov pointers for us :-( */ 213 msg.msg_iter = rq->from;
227 memcpy(iovc, rq->tx_ciov, rq->tx_iovlen * sizeof(iov[0])); 214 result = sock_sendmsg(server->ncp_sock, &msg);
228 result = do_send(server->ncp_sock, iovc, rq->tx_iovlen,
229 rq->tx_totallen, MSG_NOSIGNAL | MSG_DONTWAIT);
230 215
231 if (result == -EAGAIN) 216 if (result == -EAGAIN)
232 return; 217 return;
@@ -236,21 +221,12 @@ static void __ncptcp_try_send(struct ncp_server *server)
236 __ncp_abort_request(server, rq, result); 221 __ncp_abort_request(server, rq, result);
237 return; 222 return;
238 } 223 }
239 if (result >= rq->tx_totallen) { 224 if (!msg_data_left(&msg)) {
240 server->rcv.creq = rq; 225 server->rcv.creq = rq;
241 server->tx.creq = NULL; 226 server->tx.creq = NULL;
242 return; 227 return;
243 } 228 }
244 rq->tx_totallen -= result; 229 rq->from = msg.msg_iter;
245 iov = rq->tx_ciov;
246 while (iov->iov_len <= result) {
247 result -= iov->iov_len;
248 iov++;
249 rq->tx_iovlen--;
250 }
251 iov->iov_base += result;
252 iov->iov_len -= result;
253 rq->tx_ciov = iov;
254} 230}
255 231
256static inline void ncp_init_header(struct ncp_server *server, struct ncp_request_reply *req, struct ncp_request_header *h) 232static inline void ncp_init_header(struct ncp_server *server, struct ncp_request_reply *req, struct ncp_request_header *h)
@@ -263,22 +239,21 @@ static inline void ncp_init_header(struct ncp_server *server, struct ncp_request
263 239
264static void ncpdgram_start_request(struct ncp_server *server, struct ncp_request_reply *req) 240static void ncpdgram_start_request(struct ncp_server *server, struct ncp_request_reply *req)
265{ 241{
266 size_t signlen; 242 size_t signlen, len = req->tx_iov[1].iov_len;
267 struct ncp_request_header* h; 243 struct ncp_request_header *h = req->tx_iov[1].iov_base;
268 244
269 req->tx_ciov = req->tx_iov + 1;
270
271 h = req->tx_iov[1].iov_base;
272 ncp_init_header(server, req, h); 245 ncp_init_header(server, req, h);
273 signlen = sign_packet(server, req->tx_iov[1].iov_base + sizeof(struct ncp_request_header) - 1, 246 signlen = sign_packet(server,
274 req->tx_iov[1].iov_len - sizeof(struct ncp_request_header) + 1, 247 req->tx_iov[1].iov_base + sizeof(struct ncp_request_header) - 1,
275 cpu_to_le32(req->tx_totallen), req->sign); 248 len - sizeof(struct ncp_request_header) + 1,
249 cpu_to_le32(len), req->sign);
276 if (signlen) { 250 if (signlen) {
277 req->tx_ciov[1].iov_base = req->sign; 251 /* NCP over UDP appends signature */
278 req->tx_ciov[1].iov_len = signlen; 252 req->tx_iov[2].iov_base = req->sign;
279 req->tx_iovlen += 1; 253 req->tx_iov[2].iov_len = signlen;
280 req->tx_totallen += signlen;
281 } 254 }
255 iov_iter_kvec(&req->from, WRITE | ITER_KVEC,
256 req->tx_iov + 1, signlen ? 2 : 1, len + signlen);
282 server->rcv.creq = req; 257 server->rcv.creq = req;
283 server->timeout_last = server->m.time_out; 258 server->timeout_last = server->m.time_out;
284 server->timeout_retries = server->m.retry_count; 259 server->timeout_retries = server->m.retry_count;
@@ -292,24 +267,23 @@ static void ncpdgram_start_request(struct ncp_server *server, struct ncp_request
292 267
293static void ncptcp_start_request(struct ncp_server *server, struct ncp_request_reply *req) 268static void ncptcp_start_request(struct ncp_server *server, struct ncp_request_reply *req)
294{ 269{
295 size_t signlen; 270 size_t signlen, len = req->tx_iov[1].iov_len;
296 struct ncp_request_header* h; 271 struct ncp_request_header *h = req->tx_iov[1].iov_base;
297 272
298 req->tx_ciov = req->tx_iov;
299 h = req->tx_iov[1].iov_base;
300 ncp_init_header(server, req, h); 273 ncp_init_header(server, req, h);
301 signlen = sign_packet(server, req->tx_iov[1].iov_base + sizeof(struct ncp_request_header) - 1, 274 signlen = sign_packet(server, req->tx_iov[1].iov_base + sizeof(struct ncp_request_header) - 1,
302 req->tx_iov[1].iov_len - sizeof(struct ncp_request_header) + 1, 275 len - sizeof(struct ncp_request_header) + 1,
303 cpu_to_be32(req->tx_totallen + 24), req->sign + 4) + 16; 276 cpu_to_be32(len + 24), req->sign + 4) + 16;
304 277
305 req->sign[0] = htonl(NCP_TCP_XMIT_MAGIC); 278 req->sign[0] = htonl(NCP_TCP_XMIT_MAGIC);
306 req->sign[1] = htonl(req->tx_totallen + signlen); 279 req->sign[1] = htonl(len + signlen);
307 req->sign[2] = htonl(NCP_TCP_XMIT_VERSION); 280 req->sign[2] = htonl(NCP_TCP_XMIT_VERSION);
308 req->sign[3] = htonl(req->datalen + 8); 281 req->sign[3] = htonl(req->datalen + 8);
282 /* NCP over TCP prepends signature */
309 req->tx_iov[0].iov_base = req->sign; 283 req->tx_iov[0].iov_base = req->sign;
310 req->tx_iov[0].iov_len = signlen; 284 req->tx_iov[0].iov_len = signlen;
311 req->tx_iovlen += 1; 285 iov_iter_kvec(&req->from, WRITE | ITER_KVEC,
312 req->tx_totallen += signlen; 286 req->tx_iov, 2, len + signlen);
313 287
314 server->tx.creq = req; 288 server->tx.creq = req;
315 __ncptcp_try_send(server); 289 __ncptcp_try_send(server);
@@ -364,18 +338,17 @@ static void __ncp_next_request(struct ncp_server *server)
364static void info_server(struct ncp_server *server, unsigned int id, const void * data, size_t len) 338static void info_server(struct ncp_server *server, unsigned int id, const void * data, size_t len)
365{ 339{
366 if (server->info_sock) { 340 if (server->info_sock) {
367 struct kvec iov[2]; 341 struct msghdr msg = { .msg_flags = MSG_NOSIGNAL };
368 __be32 hdr[2]; 342 __be32 hdr[2] = {cpu_to_be32(len + 8), cpu_to_be32(id)};
369 343 struct kvec iov[2] = {
370 hdr[0] = cpu_to_be32(len + 8); 344 {.iov_base = hdr, .iov_len = 8},
371 hdr[1] = cpu_to_be32(id); 345 {.iov_base = (void *)data, .iov_len = len},
372 346 };
373 iov[0].iov_base = hdr; 347
374 iov[0].iov_len = 8; 348 iov_iter_kvec(&msg.msg_iter, ITER_KVEC | WRITE,
375 iov[1].iov_base = (void *) data; 349 iov, 2, len + 8);
376 iov[1].iov_len = len;
377 350
378 do_send(server->info_sock, iov, 2, len + 8, MSG_NOSIGNAL); 351 sock_sendmsg(server->info_sock, &msg);
379 } 352 }
380} 353}
381 354
@@ -711,8 +684,6 @@ static int do_ncp_rpc_call(struct ncp_server *server, int size,
711 req->datalen = max_reply_size; 684 req->datalen = max_reply_size;
712 req->tx_iov[1].iov_base = server->packet; 685 req->tx_iov[1].iov_base = server->packet;
713 req->tx_iov[1].iov_len = size; 686 req->tx_iov[1].iov_len = size;
714 req->tx_iovlen = 1;
715 req->tx_totallen = size;
716 req->tx_type = *(u_int16_t*)server->packet; 687 req->tx_type = *(u_int16_t*)server->packet;
717 688
718 result = ncp_add_request(server, req); 689 result = ncp_add_request(server, req);
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index bad3d4ae43f6..38dcf1eb427d 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -520,7 +520,8 @@ static int ceph_tcp_recvmsg(struct socket *sock, void *buf, size_t len)
520 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL }; 520 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
521 int r; 521 int r;
522 522
523 r = kernel_recvmsg(sock, &msg, &iov, 1, len, msg.msg_flags); 523 iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &iov, 1, len);
524 r = sock_recvmsg(sock, &msg, msg.msg_flags);
524 if (r == -EAGAIN) 525 if (r == -EAGAIN)
525 r = 0; 526 r = 0;
526 return r; 527 return r;
@@ -529,17 +530,20 @@ static int ceph_tcp_recvmsg(struct socket *sock, void *buf, size_t len)
529static int ceph_tcp_recvpage(struct socket *sock, struct page *page, 530static int ceph_tcp_recvpage(struct socket *sock, struct page *page,
530 int page_offset, size_t length) 531 int page_offset, size_t length)
531{ 532{
532 void *kaddr; 533 struct bio_vec bvec = {
533 int ret; 534 .bv_page = page,
535 .bv_offset = page_offset,
536 .bv_len = length
537 };
538 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
539 int r;
534 540
535 BUG_ON(page_offset + length > PAGE_SIZE); 541 BUG_ON(page_offset + length > PAGE_SIZE);
536 542 iov_iter_bvec(&msg.msg_iter, READ | ITER_BVEC, &bvec, 1, length);
537 kaddr = kmap(page); 543 r = sock_recvmsg(sock, &msg, msg.msg_flags);
538 BUG_ON(!kaddr); 544 if (r == -EAGAIN)
539 ret = ceph_tcp_recvmsg(sock, kaddr + page_offset, length); 545 r = 0;
540 kunmap(page); 546 return r;
541
542 return ret;
543} 547}
544 548
545/* 549/*
@@ -579,18 +583,28 @@ static int __ceph_tcp_sendpage(struct socket *sock, struct page *page,
579static int ceph_tcp_sendpage(struct socket *sock, struct page *page, 583static int ceph_tcp_sendpage(struct socket *sock, struct page *page,
580 int offset, size_t size, bool more) 584 int offset, size_t size, bool more)
581{ 585{
586 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
587 struct bio_vec bvec;
582 int ret; 588 int ret;
583 struct kvec iov;
584 589
585 /* sendpage cannot properly handle pages with page_count == 0, 590 /* sendpage cannot properly handle pages with page_count == 0,
586 * we need to fallback to sendmsg if that's the case */ 591 * we need to fallback to sendmsg if that's the case */
587 if (page_count(page) >= 1) 592 if (page_count(page) >= 1)
588 return __ceph_tcp_sendpage(sock, page, offset, size, more); 593 return __ceph_tcp_sendpage(sock, page, offset, size, more);
589 594
590 iov.iov_base = kmap(page) + offset; 595 bvec.bv_page = page;
591 iov.iov_len = size; 596 bvec.bv_offset = offset;
592 ret = ceph_tcp_sendmsg(sock, &iov, 1, size, more); 597 bvec.bv_len = size;
593 kunmap(page); 598
599 if (more)
600 msg.msg_flags |= MSG_MORE;
601 else
602 msg.msg_flags |= MSG_EOR; /* superfluous, but what the hell */
603
604 iov_iter_bvec(&msg.msg_iter, WRITE | ITER_BVEC, &bvec, 1, size);
605 ret = sock_sendmsg(sock, &msg);
606 if (ret == -EAGAIN)
607 ret = 0;
594 608
595 return ret; 609 return ret;
596} 610}
diff --git a/net/rds/page.c b/net/rds/page.c
index e2b5a5832d3d..7cc57e098ddb 100644
--- a/net/rds/page.c
+++ b/net/rds/page.c
@@ -45,35 +45,6 @@ struct rds_page_remainder {
45static 45static
46DEFINE_PER_CPU_SHARED_ALIGNED(struct rds_page_remainder, rds_page_remainders); 46DEFINE_PER_CPU_SHARED_ALIGNED(struct rds_page_remainder, rds_page_remainders);
47 47
48/*
49 * returns 0 on success or -errno on failure.
50 *
51 * We don't have to worry about flush_dcache_page() as this only works
52 * with private pages. If, say, we were to do directed receive to pinned
53 * user pages we'd have to worry more about cache coherence. (Though
54 * the flush_dcache_page() in get_user_pages() would probably be enough).
55 */
56int rds_page_copy_user(struct page *page, unsigned long offset,
57 void __user *ptr, unsigned long bytes,
58 int to_user)
59{
60 unsigned long ret;
61 void *addr;
62
63 addr = kmap(page);
64 if (to_user) {
65 rds_stats_add(s_copy_to_user, bytes);
66 ret = copy_to_user(ptr, addr + offset, bytes);
67 } else {
68 rds_stats_add(s_copy_from_user, bytes);
69 ret = copy_from_user(addr + offset, ptr, bytes);
70 }
71 kunmap(page);
72
73 return ret ? -EFAULT : 0;
74}
75EXPORT_SYMBOL_GPL(rds_page_copy_user);
76
77/** 48/**
78 * rds_page_remainder_alloc - build up regions of a message. 49 * rds_page_remainder_alloc - build up regions of a message.
79 * 50 *
diff --git a/net/rds/rds.h b/net/rds/rds.h
index 07fff73dd4f3..966d2ee1f107 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -798,13 +798,6 @@ static inline int rds_message_verify_checksum(const struct rds_header *hdr)
798/* page.c */ 798/* page.c */
799int rds_page_remainder_alloc(struct scatterlist *scat, unsigned long bytes, 799int rds_page_remainder_alloc(struct scatterlist *scat, unsigned long bytes,
800 gfp_t gfp); 800 gfp_t gfp);
801int rds_page_copy_user(struct page *page, unsigned long offset,
802 void __user *ptr, unsigned long bytes,
803 int to_user);
804#define rds_page_copy_to_user(page, offset, ptr, bytes) \
805 rds_page_copy_user(page, offset, ptr, bytes, 1)
806#define rds_page_copy_from_user(page, offset, ptr, bytes) \
807 rds_page_copy_user(page, offset, ptr, bytes, 0)
808void rds_page_exit(void); 801void rds_page_exit(void);
809 802
810/* recv.c */ 803/* recv.c */