aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/Makefile2
-rw-r--r--net/ceph/auth_x.c76
-rw-r--r--net/ceph/auth_x.h1
-rw-r--r--net/ceph/buffer.c4
-rw-r--r--net/ceph/ceph_common.c21
-rw-r--r--net/ceph/messenger.c34
-rw-r--r--net/ceph/osd_client.c118
-rw-r--r--net/core/net_namespace.c39
-rw-r--r--net/nonet.c26
-rw-r--r--net/socket.c19
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c2
-rw-r--r--net/sunrpc/cache.c26
-rw-r--r--net/sunrpc/svc.c42
-rw-r--r--net/sunrpc/svc_xprt.c292
-rw-r--r--net/sunrpc/svcsock.c5
-rw-r--r--net/sunrpc/xdr.c9
16 files changed, 423 insertions, 293 deletions
diff --git a/net/Makefile b/net/Makefile
index 95fc694e4ddc..38704bdf941a 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -5,8 +5,6 @@
5# Rewritten to use lists instead of if-statements. 5# Rewritten to use lists instead of if-statements.
6# 6#
7 7
8obj-y := nonet.o
9
10obj-$(CONFIG_NET) := socket.o core/ 8obj-$(CONFIG_NET) := socket.o core/
11 9
12tmp-$(CONFIG_COMPAT) := compat.o 10tmp-$(CONFIG_COMPAT) := compat.o
diff --git a/net/ceph/auth_x.c b/net/ceph/auth_x.c
index 7e38b729696a..15845814a0f2 100644
--- a/net/ceph/auth_x.c
+++ b/net/ceph/auth_x.c
@@ -8,6 +8,7 @@
8 8
9#include <linux/ceph/decode.h> 9#include <linux/ceph/decode.h>
10#include <linux/ceph/auth.h> 10#include <linux/ceph/auth.h>
11#include <linux/ceph/messenger.h>
11 12
12#include "crypto.h" 13#include "crypto.h"
13#include "auth_x.h" 14#include "auth_x.h"
@@ -293,6 +294,11 @@ static int ceph_x_build_authorizer(struct ceph_auth_client *ac,
293 dout("build_authorizer for %s %p\n", 294 dout("build_authorizer for %s %p\n",
294 ceph_entity_type_name(th->service), au); 295 ceph_entity_type_name(th->service), au);
295 296
297 ceph_crypto_key_destroy(&au->session_key);
298 ret = ceph_crypto_key_clone(&au->session_key, &th->session_key);
299 if (ret)
300 return ret;
301
296 maxlen = sizeof(*msg_a) + sizeof(msg_b) + 302 maxlen = sizeof(*msg_a) + sizeof(msg_b) +
297 ceph_x_encrypt_buflen(ticket_blob_len); 303 ceph_x_encrypt_buflen(ticket_blob_len);
298 dout(" need len %d\n", maxlen); 304 dout(" need len %d\n", maxlen);
@@ -302,8 +308,10 @@ static int ceph_x_build_authorizer(struct ceph_auth_client *ac,
302 } 308 }
303 if (!au->buf) { 309 if (!au->buf) {
304 au->buf = ceph_buffer_new(maxlen, GFP_NOFS); 310 au->buf = ceph_buffer_new(maxlen, GFP_NOFS);
305 if (!au->buf) 311 if (!au->buf) {
312 ceph_crypto_key_destroy(&au->session_key);
306 return -ENOMEM; 313 return -ENOMEM;
314 }
307 } 315 }
308 au->service = th->service; 316 au->service = th->service;
309 au->secret_id = th->secret_id; 317 au->secret_id = th->secret_id;
@@ -329,7 +337,7 @@ static int ceph_x_build_authorizer(struct ceph_auth_client *ac,
329 get_random_bytes(&au->nonce, sizeof(au->nonce)); 337 get_random_bytes(&au->nonce, sizeof(au->nonce));
330 msg_b.struct_v = 1; 338 msg_b.struct_v = 1;
331 msg_b.nonce = cpu_to_le64(au->nonce); 339 msg_b.nonce = cpu_to_le64(au->nonce);
332 ret = ceph_x_encrypt(&th->session_key, &msg_b, sizeof(msg_b), 340 ret = ceph_x_encrypt(&au->session_key, &msg_b, sizeof(msg_b),
333 p, end - p); 341 p, end - p);
334 if (ret < 0) 342 if (ret < 0)
335 goto out_buf; 343 goto out_buf;
@@ -560,6 +568,8 @@ static int ceph_x_create_authorizer(
560 auth->authorizer_buf_len = au->buf->vec.iov_len; 568 auth->authorizer_buf_len = au->buf->vec.iov_len;
561 auth->authorizer_reply_buf = au->reply_buf; 569 auth->authorizer_reply_buf = au->reply_buf;
562 auth->authorizer_reply_buf_len = sizeof (au->reply_buf); 570 auth->authorizer_reply_buf_len = sizeof (au->reply_buf);
571 auth->sign_message = ac->ops->sign_message;
572 auth->check_message_signature = ac->ops->check_message_signature;
563 573
564 return 0; 574 return 0;
565} 575}
@@ -588,17 +598,13 @@ static int ceph_x_verify_authorizer_reply(struct ceph_auth_client *ac,
588 struct ceph_authorizer *a, size_t len) 598 struct ceph_authorizer *a, size_t len)
589{ 599{
590 struct ceph_x_authorizer *au = (void *)a; 600 struct ceph_x_authorizer *au = (void *)a;
591 struct ceph_x_ticket_handler *th;
592 int ret = 0; 601 int ret = 0;
593 struct ceph_x_authorize_reply reply; 602 struct ceph_x_authorize_reply reply;
594 void *preply = &reply; 603 void *preply = &reply;
595 void *p = au->reply_buf; 604 void *p = au->reply_buf;
596 void *end = p + sizeof(au->reply_buf); 605 void *end = p + sizeof(au->reply_buf);
597 606
598 th = get_ticket_handler(ac, au->service); 607 ret = ceph_x_decrypt(&au->session_key, &p, end, &preply, sizeof(reply));
599 if (IS_ERR(th))
600 return PTR_ERR(th);
601 ret = ceph_x_decrypt(&th->session_key, &p, end, &preply, sizeof(reply));
602 if (ret < 0) 608 if (ret < 0)
603 return ret; 609 return ret;
604 if (ret != sizeof(reply)) 610 if (ret != sizeof(reply))
@@ -618,6 +624,7 @@ static void ceph_x_destroy_authorizer(struct ceph_auth_client *ac,
618{ 624{
619 struct ceph_x_authorizer *au = (void *)a; 625 struct ceph_x_authorizer *au = (void *)a;
620 626
627 ceph_crypto_key_destroy(&au->session_key);
621 ceph_buffer_put(au->buf); 628 ceph_buffer_put(au->buf);
622 kfree(au); 629 kfree(au);
623} 630}
@@ -663,6 +670,59 @@ static void ceph_x_invalidate_authorizer(struct ceph_auth_client *ac,
663 memset(&th->validity, 0, sizeof(th->validity)); 670 memset(&th->validity, 0, sizeof(th->validity));
664} 671}
665 672
673static int calcu_signature(struct ceph_x_authorizer *au,
674 struct ceph_msg *msg, __le64 *sig)
675{
676 int ret;
677 char tmp_enc[40];
678 __le32 tmp[5] = {
679 16u, msg->hdr.crc, msg->footer.front_crc,
680 msg->footer.middle_crc, msg->footer.data_crc,
681 };
682 ret = ceph_x_encrypt(&au->session_key, &tmp, sizeof(tmp),
683 tmp_enc, sizeof(tmp_enc));
684 if (ret < 0)
685 return ret;
686 *sig = *(__le64*)(tmp_enc + 4);
687 return 0;
688}
689
690static int ceph_x_sign_message(struct ceph_auth_handshake *auth,
691 struct ceph_msg *msg)
692{
693 int ret;
694 if (!auth->authorizer)
695 return 0;
696 ret = calcu_signature((struct ceph_x_authorizer *)auth->authorizer,
697 msg, &msg->footer.sig);
698 if (ret < 0)
699 return ret;
700 msg->footer.flags |= CEPH_MSG_FOOTER_SIGNED;
701 return 0;
702}
703
704static int ceph_x_check_message_signature(struct ceph_auth_handshake *auth,
705 struct ceph_msg *msg)
706{
707 __le64 sig_check;
708 int ret;
709
710 if (!auth->authorizer)
711 return 0;
712 ret = calcu_signature((struct ceph_x_authorizer *)auth->authorizer,
713 msg, &sig_check);
714 if (ret < 0)
715 return ret;
716 if (sig_check == msg->footer.sig)
717 return 0;
718 if (msg->footer.flags & CEPH_MSG_FOOTER_SIGNED)
719 dout("ceph_x_check_message_signature %p has signature %llx "
720 "expect %llx\n", msg, msg->footer.sig, sig_check);
721 else
722 dout("ceph_x_check_message_signature %p sender did not set "
723 "CEPH_MSG_FOOTER_SIGNED\n", msg);
724 return -EBADMSG;
725}
666 726
667static const struct ceph_auth_client_ops ceph_x_ops = { 727static const struct ceph_auth_client_ops ceph_x_ops = {
668 .name = "x", 728 .name = "x",
@@ -677,6 +737,8 @@ static const struct ceph_auth_client_ops ceph_x_ops = {
677 .invalidate_authorizer = ceph_x_invalidate_authorizer, 737 .invalidate_authorizer = ceph_x_invalidate_authorizer,
678 .reset = ceph_x_reset, 738 .reset = ceph_x_reset,
679 .destroy = ceph_x_destroy, 739 .destroy = ceph_x_destroy,
740 .sign_message = ceph_x_sign_message,
741 .check_message_signature = ceph_x_check_message_signature,
680}; 742};
681 743
682 744
diff --git a/net/ceph/auth_x.h b/net/ceph/auth_x.h
index 65ee72082d99..e8b7c6917d47 100644
--- a/net/ceph/auth_x.h
+++ b/net/ceph/auth_x.h
@@ -26,6 +26,7 @@ struct ceph_x_ticket_handler {
26 26
27 27
28struct ceph_x_authorizer { 28struct ceph_x_authorizer {
29 struct ceph_crypto_key session_key;
29 struct ceph_buffer *buf; 30 struct ceph_buffer *buf;
30 unsigned int service; 31 unsigned int service;
31 u64 nonce; 32 u64 nonce;
diff --git a/net/ceph/buffer.c b/net/ceph/buffer.c
index 621b5f65407f..add5f921a0ff 100644
--- a/net/ceph/buffer.c
+++ b/net/ceph/buffer.c
@@ -6,7 +6,7 @@
6 6
7#include <linux/ceph/buffer.h> 7#include <linux/ceph/buffer.h>
8#include <linux/ceph/decode.h> 8#include <linux/ceph/decode.h>
9#include <linux/ceph/libceph.h> /* for ceph_kv{malloc,free} */ 9#include <linux/ceph/libceph.h> /* for ceph_kvmalloc */
10 10
11struct ceph_buffer *ceph_buffer_new(size_t len, gfp_t gfp) 11struct ceph_buffer *ceph_buffer_new(size_t len, gfp_t gfp)
12{ 12{
@@ -35,7 +35,7 @@ void ceph_buffer_release(struct kref *kref)
35 struct ceph_buffer *b = container_of(kref, struct ceph_buffer, kref); 35 struct ceph_buffer *b = container_of(kref, struct ceph_buffer, kref);
36 36
37 dout("buffer_release %p\n", b); 37 dout("buffer_release %p\n", b);
38 ceph_kvfree(b->vec.iov_base); 38 kvfree(b->vec.iov_base);
39 kfree(b); 39 kfree(b);
40} 40}
41EXPORT_SYMBOL(ceph_buffer_release); 41EXPORT_SYMBOL(ceph_buffer_release);
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index 58fbfe134f93..5d5ab67f516d 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -184,14 +184,6 @@ void *ceph_kvmalloc(size_t size, gfp_t flags)
184 return __vmalloc(size, flags | __GFP_HIGHMEM, PAGE_KERNEL); 184 return __vmalloc(size, flags | __GFP_HIGHMEM, PAGE_KERNEL);
185} 185}
186 186
187void ceph_kvfree(const void *ptr)
188{
189 if (is_vmalloc_addr(ptr))
190 vfree(ptr);
191 else
192 kfree(ptr);
193}
194
195 187
196static int parse_fsid(const char *str, struct ceph_fsid *fsid) 188static int parse_fsid(const char *str, struct ceph_fsid *fsid)
197{ 189{
@@ -245,6 +237,8 @@ enum {
245 Opt_noshare, 237 Opt_noshare,
246 Opt_crc, 238 Opt_crc,
247 Opt_nocrc, 239 Opt_nocrc,
240 Opt_cephx_require_signatures,
241 Opt_nocephx_require_signatures,
248}; 242};
249 243
250static match_table_t opt_tokens = { 244static match_table_t opt_tokens = {
@@ -263,6 +257,8 @@ static match_table_t opt_tokens = {
263 {Opt_noshare, "noshare"}, 257 {Opt_noshare, "noshare"},
264 {Opt_crc, "crc"}, 258 {Opt_crc, "crc"},
265 {Opt_nocrc, "nocrc"}, 259 {Opt_nocrc, "nocrc"},
260 {Opt_cephx_require_signatures, "cephx_require_signatures"},
261 {Opt_nocephx_require_signatures, "nocephx_require_signatures"},
266 {-1, NULL} 262 {-1, NULL}
267}; 263};
268 264
@@ -461,6 +457,12 @@ ceph_parse_options(char *options, const char *dev_name,
461 case Opt_nocrc: 457 case Opt_nocrc:
462 opt->flags |= CEPH_OPT_NOCRC; 458 opt->flags |= CEPH_OPT_NOCRC;
463 break; 459 break;
460 case Opt_cephx_require_signatures:
461 opt->flags &= ~CEPH_OPT_NOMSGAUTH;
462 break;
463 case Opt_nocephx_require_signatures:
464 opt->flags |= CEPH_OPT_NOMSGAUTH;
465 break;
464 466
465 default: 467 default:
466 BUG_ON(token); 468 BUG_ON(token);
@@ -504,6 +506,9 @@ struct ceph_client *ceph_create_client(struct ceph_options *opt, void *private,
504 init_waitqueue_head(&client->auth_wq); 506 init_waitqueue_head(&client->auth_wq);
505 client->auth_err = 0; 507 client->auth_err = 0;
506 508
509 if (!ceph_test_opt(client, NOMSGAUTH))
510 required_features |= CEPH_FEATURE_MSG_AUTH;
511
507 client->extra_mon_dispatch = NULL; 512 client->extra_mon_dispatch = NULL;
508 client->supported_features = CEPH_FEATURES_SUPPORTED_DEFAULT | 513 client->supported_features = CEPH_FEATURES_SUPPORTED_DEFAULT |
509 supported_features; 514 supported_features;
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 8d1653caffdb..33a2f201e460 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -1196,8 +1196,18 @@ static void prepare_write_message_footer(struct ceph_connection *con)
1196 dout("prepare_write_message_footer %p\n", con); 1196 dout("prepare_write_message_footer %p\n", con);
1197 con->out_kvec_is_msg = true; 1197 con->out_kvec_is_msg = true;
1198 con->out_kvec[v].iov_base = &m->footer; 1198 con->out_kvec[v].iov_base = &m->footer;
1199 con->out_kvec[v].iov_len = sizeof(m->footer); 1199 if (con->peer_features & CEPH_FEATURE_MSG_AUTH) {
1200 con->out_kvec_bytes += sizeof(m->footer); 1200 if (con->ops->sign_message)
1201 con->ops->sign_message(con, m);
1202 else
1203 m->footer.sig = 0;
1204 con->out_kvec[v].iov_len = sizeof(m->footer);
1205 con->out_kvec_bytes += sizeof(m->footer);
1206 } else {
1207 m->old_footer.flags = m->footer.flags;
1208 con->out_kvec[v].iov_len = sizeof(m->old_footer);
1209 con->out_kvec_bytes += sizeof(m->old_footer);
1210 }
1201 con->out_kvec_left++; 1211 con->out_kvec_left++;
1202 con->out_more = m->more_to_follow; 1212 con->out_more = m->more_to_follow;
1203 con->out_msg_done = true; 1213 con->out_msg_done = true;
@@ -2249,6 +2259,7 @@ static int read_partial_message(struct ceph_connection *con)
2249 int ret; 2259 int ret;
2250 unsigned int front_len, middle_len, data_len; 2260 unsigned int front_len, middle_len, data_len;
2251 bool do_datacrc = !con->msgr->nocrc; 2261 bool do_datacrc = !con->msgr->nocrc;
2262 bool need_sign = (con->peer_features & CEPH_FEATURE_MSG_AUTH);
2252 u64 seq; 2263 u64 seq;
2253 u32 crc; 2264 u32 crc;
2254 2265
@@ -2361,12 +2372,21 @@ static int read_partial_message(struct ceph_connection *con)
2361 } 2372 }
2362 2373
2363 /* footer */ 2374 /* footer */
2364 size = sizeof (m->footer); 2375 if (need_sign)
2376 size = sizeof(m->footer);
2377 else
2378 size = sizeof(m->old_footer);
2379
2365 end += size; 2380 end += size;
2366 ret = read_partial(con, end, size, &m->footer); 2381 ret = read_partial(con, end, size, &m->footer);
2367 if (ret <= 0) 2382 if (ret <= 0)
2368 return ret; 2383 return ret;
2369 2384
2385 if (!need_sign) {
2386 m->footer.flags = m->old_footer.flags;
2387 m->footer.sig = 0;
2388 }
2389
2370 dout("read_partial_message got msg %p %d (%u) + %d (%u) + %d (%u)\n", 2390 dout("read_partial_message got msg %p %d (%u) + %d (%u) + %d (%u)\n",
2371 m, front_len, m->footer.front_crc, middle_len, 2391 m, front_len, m->footer.front_crc, middle_len,
2372 m->footer.middle_crc, data_len, m->footer.data_crc); 2392 m->footer.middle_crc, data_len, m->footer.data_crc);
@@ -2390,6 +2410,12 @@ static int read_partial_message(struct ceph_connection *con)
2390 return -EBADMSG; 2410 return -EBADMSG;
2391 } 2411 }
2392 2412
2413 if (need_sign && con->ops->check_message_signature &&
2414 con->ops->check_message_signature(con, m)) {
2415 pr_err("read_partial_message %p signature check failed\n", m);
2416 return -EBADMSG;
2417 }
2418
2393 return 1; /* done! */ 2419 return 1; /* done! */
2394} 2420}
2395 2421
@@ -3288,7 +3314,7 @@ static int ceph_con_in_msg_alloc(struct ceph_connection *con, int *skip)
3288static void ceph_msg_free(struct ceph_msg *m) 3314static void ceph_msg_free(struct ceph_msg *m)
3289{ 3315{
3290 dout("%s %p\n", __func__, m); 3316 dout("%s %p\n", __func__, m);
3291 ceph_kvfree(m->front.iov_base); 3317 kvfree(m->front.iov_base);
3292 kmem_cache_free(ceph_msg_cache, m); 3318 kmem_cache_free(ceph_msg_cache, m);
3293} 3319}
3294 3320
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 6f164289bde8..53299c7b0ca4 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -292,6 +292,10 @@ static void osd_req_op_data_release(struct ceph_osd_request *osd_req,
292 ceph_osd_data_release(&op->cls.request_data); 292 ceph_osd_data_release(&op->cls.request_data);
293 ceph_osd_data_release(&op->cls.response_data); 293 ceph_osd_data_release(&op->cls.response_data);
294 break; 294 break;
295 case CEPH_OSD_OP_SETXATTR:
296 case CEPH_OSD_OP_CMPXATTR:
297 ceph_osd_data_release(&op->xattr.osd_data);
298 break;
295 default: 299 default:
296 break; 300 break;
297 } 301 }
@@ -476,8 +480,7 @@ void osd_req_op_extent_init(struct ceph_osd_request *osd_req,
476 size_t payload_len = 0; 480 size_t payload_len = 0;
477 481
478 BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE && 482 BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
479 opcode != CEPH_OSD_OP_DELETE && opcode != CEPH_OSD_OP_ZERO && 483 opcode != CEPH_OSD_OP_ZERO && opcode != CEPH_OSD_OP_TRUNCATE);
480 opcode != CEPH_OSD_OP_TRUNCATE);
481 484
482 op->extent.offset = offset; 485 op->extent.offset = offset;
483 op->extent.length = length; 486 op->extent.length = length;
@@ -545,6 +548,39 @@ void osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which,
545} 548}
546EXPORT_SYMBOL(osd_req_op_cls_init); 549EXPORT_SYMBOL(osd_req_op_cls_init);
547 550
551int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which,
552 u16 opcode, const char *name, const void *value,
553 size_t size, u8 cmp_op, u8 cmp_mode)
554{
555 struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which, opcode);
556 struct ceph_pagelist *pagelist;
557 size_t payload_len;
558
559 BUG_ON(opcode != CEPH_OSD_OP_SETXATTR && opcode != CEPH_OSD_OP_CMPXATTR);
560
561 pagelist = kmalloc(sizeof(*pagelist), GFP_NOFS);
562 if (!pagelist)
563 return -ENOMEM;
564
565 ceph_pagelist_init(pagelist);
566
567 payload_len = strlen(name);
568 op->xattr.name_len = payload_len;
569 ceph_pagelist_append(pagelist, name, payload_len);
570
571 op->xattr.value_len = size;
572 ceph_pagelist_append(pagelist, value, size);
573 payload_len += size;
574
575 op->xattr.cmp_op = cmp_op;
576 op->xattr.cmp_mode = cmp_mode;
577
578 ceph_osd_data_pagelist_init(&op->xattr.osd_data, pagelist);
579 op->payload_len = payload_len;
580 return 0;
581}
582EXPORT_SYMBOL(osd_req_op_xattr_init);
583
548void osd_req_op_watch_init(struct ceph_osd_request *osd_req, 584void osd_req_op_watch_init(struct ceph_osd_request *osd_req,
549 unsigned int which, u16 opcode, 585 unsigned int which, u16 opcode,
550 u64 cookie, u64 version, int flag) 586 u64 cookie, u64 version, int flag)
@@ -626,7 +662,6 @@ static u64 osd_req_encode_op(struct ceph_osd_request *req,
626 case CEPH_OSD_OP_READ: 662 case CEPH_OSD_OP_READ:
627 case CEPH_OSD_OP_WRITE: 663 case CEPH_OSD_OP_WRITE:
628 case CEPH_OSD_OP_ZERO: 664 case CEPH_OSD_OP_ZERO:
629 case CEPH_OSD_OP_DELETE:
630 case CEPH_OSD_OP_TRUNCATE: 665 case CEPH_OSD_OP_TRUNCATE:
631 if (src->op == CEPH_OSD_OP_WRITE) 666 if (src->op == CEPH_OSD_OP_WRITE)
632 request_data_len = src->extent.length; 667 request_data_len = src->extent.length;
@@ -676,6 +711,19 @@ static u64 osd_req_encode_op(struct ceph_osd_request *req,
676 dst->alloc_hint.expected_write_size = 711 dst->alloc_hint.expected_write_size =
677 cpu_to_le64(src->alloc_hint.expected_write_size); 712 cpu_to_le64(src->alloc_hint.expected_write_size);
678 break; 713 break;
714 case CEPH_OSD_OP_SETXATTR:
715 case CEPH_OSD_OP_CMPXATTR:
716 dst->xattr.name_len = cpu_to_le32(src->xattr.name_len);
717 dst->xattr.value_len = cpu_to_le32(src->xattr.value_len);
718 dst->xattr.cmp_op = src->xattr.cmp_op;
719 dst->xattr.cmp_mode = src->xattr.cmp_mode;
720 osd_data = &src->xattr.osd_data;
721 ceph_osdc_msg_data_add(req->r_request, osd_data);
722 request_data_len = osd_data->pagelist->length;
723 break;
724 case CEPH_OSD_OP_CREATE:
725 case CEPH_OSD_OP_DELETE:
726 break;
679 default: 727 default:
680 pr_err("unsupported osd opcode %s\n", 728 pr_err("unsupported osd opcode %s\n",
681 ceph_osd_op_name(src->op)); 729 ceph_osd_op_name(src->op));
@@ -705,7 +753,8 @@ static u64 osd_req_encode_op(struct ceph_osd_request *req,
705struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, 753struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
706 struct ceph_file_layout *layout, 754 struct ceph_file_layout *layout,
707 struct ceph_vino vino, 755 struct ceph_vino vino,
708 u64 off, u64 *plen, int num_ops, 756 u64 off, u64 *plen,
757 unsigned int which, int num_ops,
709 int opcode, int flags, 758 int opcode, int flags,
710 struct ceph_snap_context *snapc, 759 struct ceph_snap_context *snapc,
711 u32 truncate_seq, 760 u32 truncate_seq,
@@ -716,13 +765,11 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
716 u64 objnum = 0; 765 u64 objnum = 0;
717 u64 objoff = 0; 766 u64 objoff = 0;
718 u64 objlen = 0; 767 u64 objlen = 0;
719 u32 object_size;
720 u64 object_base;
721 int r; 768 int r;
722 769
723 BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE && 770 BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
724 opcode != CEPH_OSD_OP_DELETE && opcode != CEPH_OSD_OP_ZERO && 771 opcode != CEPH_OSD_OP_ZERO && opcode != CEPH_OSD_OP_TRUNCATE &&
725 opcode != CEPH_OSD_OP_TRUNCATE); 772 opcode != CEPH_OSD_OP_CREATE && opcode != CEPH_OSD_OP_DELETE);
726 773
727 req = ceph_osdc_alloc_request(osdc, snapc, num_ops, use_mempool, 774 req = ceph_osdc_alloc_request(osdc, snapc, num_ops, use_mempool,
728 GFP_NOFS); 775 GFP_NOFS);
@@ -738,29 +785,24 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
738 return ERR_PTR(r); 785 return ERR_PTR(r);
739 } 786 }
740 787
741 object_size = le32_to_cpu(layout->fl_object_size); 788 if (opcode == CEPH_OSD_OP_CREATE || opcode == CEPH_OSD_OP_DELETE) {
742 object_base = off - objoff; 789 osd_req_op_init(req, which, opcode);
743 if (!(truncate_seq == 1 && truncate_size == -1ULL)) { 790 } else {
744 if (truncate_size <= object_base) { 791 u32 object_size = le32_to_cpu(layout->fl_object_size);
745 truncate_size = 0; 792 u32 object_base = off - objoff;
746 } else { 793 if (!(truncate_seq == 1 && truncate_size == -1ULL)) {
747 truncate_size -= object_base; 794 if (truncate_size <= object_base) {
748 if (truncate_size > object_size) 795 truncate_size = 0;
749 truncate_size = object_size; 796 } else {
797 truncate_size -= object_base;
798 if (truncate_size > object_size)
799 truncate_size = object_size;
800 }
750 } 801 }
802 osd_req_op_extent_init(req, which, opcode, objoff, objlen,
803 truncate_size, truncate_seq);
751 } 804 }
752 805
753 osd_req_op_extent_init(req, 0, opcode, objoff, objlen,
754 truncate_size, truncate_seq);
755
756 /*
757 * A second op in the ops array means the caller wants to
758 * also issue a include a 'startsync' command so that the
759 * osd will flush data quickly.
760 */
761 if (num_ops > 1)
762 osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC);
763
764 req->r_base_oloc.pool = ceph_file_layout_pg_pool(*layout); 806 req->r_base_oloc.pool = ceph_file_layout_pg_pool(*layout);
765 807
766 snprintf(req->r_base_oid.name, sizeof(req->r_base_oid.name), 808 snprintf(req->r_base_oid.name, sizeof(req->r_base_oid.name),
@@ -2626,7 +2668,7 @@ int ceph_osdc_readpages(struct ceph_osd_client *osdc,
2626 2668
2627 dout("readpages on ino %llx.%llx on %llu~%llu\n", vino.ino, 2669 dout("readpages on ino %llx.%llx on %llu~%llu\n", vino.ino,
2628 vino.snap, off, *plen); 2670 vino.snap, off, *plen);
2629 req = ceph_osdc_new_request(osdc, layout, vino, off, plen, 1, 2671 req = ceph_osdc_new_request(osdc, layout, vino, off, plen, 0, 1,
2630 CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ, 2672 CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
2631 NULL, truncate_seq, truncate_size, 2673 NULL, truncate_seq, truncate_size,
2632 false); 2674 false);
@@ -2669,7 +2711,7 @@ int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino,
2669 int page_align = off & ~PAGE_MASK; 2711 int page_align = off & ~PAGE_MASK;
2670 2712
2671 BUG_ON(vino.snap != CEPH_NOSNAP); /* snapshots aren't writeable */ 2713 BUG_ON(vino.snap != CEPH_NOSNAP); /* snapshots aren't writeable */
2672 req = ceph_osdc_new_request(osdc, layout, vino, off, &len, 1, 2714 req = ceph_osdc_new_request(osdc, layout, vino, off, &len, 0, 1,
2673 CEPH_OSD_OP_WRITE, 2715 CEPH_OSD_OP_WRITE,
2674 CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE, 2716 CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE,
2675 snapc, truncate_seq, truncate_size, 2717 snapc, truncate_seq, truncate_size,
@@ -2920,6 +2962,20 @@ static int invalidate_authorizer(struct ceph_connection *con)
2920 return ceph_monc_validate_auth(&osdc->client->monc); 2962 return ceph_monc_validate_auth(&osdc->client->monc);
2921} 2963}
2922 2964
2965static int sign_message(struct ceph_connection *con, struct ceph_msg *msg)
2966{
2967 struct ceph_osd *o = con->private;
2968 struct ceph_auth_handshake *auth = &o->o_auth;
2969 return ceph_auth_sign_message(auth, msg);
2970}
2971
2972static int check_message_signature(struct ceph_connection *con, struct ceph_msg *msg)
2973{
2974 struct ceph_osd *o = con->private;
2975 struct ceph_auth_handshake *auth = &o->o_auth;
2976 return ceph_auth_check_message_signature(auth, msg);
2977}
2978
2923static const struct ceph_connection_operations osd_con_ops = { 2979static const struct ceph_connection_operations osd_con_ops = {
2924 .get = get_osd_con, 2980 .get = get_osd_con,
2925 .put = put_osd_con, 2981 .put = put_osd_con,
@@ -2928,5 +2984,7 @@ static const struct ceph_connection_operations osd_con_ops = {
2928 .verify_authorizer_reply = verify_authorizer_reply, 2984 .verify_authorizer_reply = verify_authorizer_reply,
2929 .invalidate_authorizer = invalidate_authorizer, 2985 .invalidate_authorizer = invalidate_authorizer,
2930 .alloc_msg = alloc_msg, 2986 .alloc_msg = alloc_msg,
2987 .sign_message = sign_message,
2988 .check_message_signature = check_message_signature,
2931 .fault = osd_reset, 2989 .fault = osd_reset,
2932}; 2990};
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 7f155175bba8..ce780c722e48 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -337,17 +337,17 @@ EXPORT_SYMBOL_GPL(__put_net);
337 337
338struct net *get_net_ns_by_fd(int fd) 338struct net *get_net_ns_by_fd(int fd)
339{ 339{
340 struct proc_ns *ei;
341 struct file *file; 340 struct file *file;
341 struct ns_common *ns;
342 struct net *net; 342 struct net *net;
343 343
344 file = proc_ns_fget(fd); 344 file = proc_ns_fget(fd);
345 if (IS_ERR(file)) 345 if (IS_ERR(file))
346 return ERR_CAST(file); 346 return ERR_CAST(file);
347 347
348 ei = get_proc_ns(file_inode(file)); 348 ns = get_proc_ns(file_inode(file));
349 if (ei->ns_ops == &netns_operations) 349 if (ns->ops == &netns_operations)
350 net = get_net(ei->ns); 350 net = get_net(container_of(ns, struct net, ns));
351 else 351 else
352 net = ERR_PTR(-EINVAL); 352 net = ERR_PTR(-EINVAL);
353 353
@@ -386,12 +386,15 @@ EXPORT_SYMBOL_GPL(get_net_ns_by_pid);
386 386
387static __net_init int net_ns_net_init(struct net *net) 387static __net_init int net_ns_net_init(struct net *net)
388{ 388{
389 return proc_alloc_inum(&net->proc_inum); 389#ifdef CONFIG_NET_NS
390 net->ns.ops = &netns_operations;
391#endif
392 return ns_alloc_inum(&net->ns);
390} 393}
391 394
392static __net_exit void net_ns_net_exit(struct net *net) 395static __net_exit void net_ns_net_exit(struct net *net)
393{ 396{
394 proc_free_inum(net->proc_inum); 397 ns_free_inum(&net->ns);
395} 398}
396 399
397static struct pernet_operations __net_initdata net_ns_ops = { 400static struct pernet_operations __net_initdata net_ns_ops = {
@@ -629,7 +632,7 @@ void unregister_pernet_device(struct pernet_operations *ops)
629EXPORT_SYMBOL_GPL(unregister_pernet_device); 632EXPORT_SYMBOL_GPL(unregister_pernet_device);
630 633
631#ifdef CONFIG_NET_NS 634#ifdef CONFIG_NET_NS
632static void *netns_get(struct task_struct *task) 635static struct ns_common *netns_get(struct task_struct *task)
633{ 636{
634 struct net *net = NULL; 637 struct net *net = NULL;
635 struct nsproxy *nsproxy; 638 struct nsproxy *nsproxy;
@@ -640,17 +643,22 @@ static void *netns_get(struct task_struct *task)
640 net = get_net(nsproxy->net_ns); 643 net = get_net(nsproxy->net_ns);
641 task_unlock(task); 644 task_unlock(task);
642 645
643 return net; 646 return net ? &net->ns : NULL;
644} 647}
645 648
646static void netns_put(void *ns) 649static inline struct net *to_net_ns(struct ns_common *ns)
647{ 650{
648 put_net(ns); 651 return container_of(ns, struct net, ns);
649} 652}
650 653
651static int netns_install(struct nsproxy *nsproxy, void *ns) 654static void netns_put(struct ns_common *ns)
652{ 655{
653 struct net *net = ns; 656 put_net(to_net_ns(ns));
657}
658
659static int netns_install(struct nsproxy *nsproxy, struct ns_common *ns)
660{
661 struct net *net = to_net_ns(ns);
654 662
655 if (!ns_capable(net->user_ns, CAP_SYS_ADMIN) || 663 if (!ns_capable(net->user_ns, CAP_SYS_ADMIN) ||
656 !ns_capable(current_user_ns(), CAP_SYS_ADMIN)) 664 !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
@@ -661,18 +669,11 @@ static int netns_install(struct nsproxy *nsproxy, void *ns)
661 return 0; 669 return 0;
662} 670}
663 671
664static unsigned int netns_inum(void *ns)
665{
666 struct net *net = ns;
667 return net->proc_inum;
668}
669
670const struct proc_ns_operations netns_operations = { 672const struct proc_ns_operations netns_operations = {
671 .name = "net", 673 .name = "net",
672 .type = CLONE_NEWNET, 674 .type = CLONE_NEWNET,
673 .get = netns_get, 675 .get = netns_get,
674 .put = netns_put, 676 .put = netns_put,
675 .install = netns_install, 677 .install = netns_install,
676 .inum = netns_inum,
677}; 678};
678#endif 679#endif
diff --git a/net/nonet.c b/net/nonet.c
deleted file mode 100644
index b1a73fda9c12..000000000000
--- a/net/nonet.c
+++ /dev/null
@@ -1,26 +0,0 @@
1/*
2 * net/nonet.c
3 *
4 * Dummy functions to allow us to configure network support entirely
5 * out of the kernel.
6 *
7 * Distributed under the terms of the GNU GPL version 2.
8 * Copyright (c) Matthew Wilcox 2003
9 */
10
11#include <linux/module.h>
12#include <linux/errno.h>
13#include <linux/fs.h>
14#include <linux/init.h>
15#include <linux/kernel.h>
16
17static int sock_no_open(struct inode *irrelevant, struct file *dontcare)
18{
19 return -ENXIO;
20}
21
22const struct file_operations bad_sock_fops = {
23 .owner = THIS_MODULE,
24 .open = sock_no_open,
25 .llseek = noop_llseek,
26};
diff --git a/net/socket.c b/net/socket.c
index 8809afccf7fa..70bbde65e4ca 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -113,7 +113,6 @@ unsigned int sysctl_net_busy_read __read_mostly;
113unsigned int sysctl_net_busy_poll __read_mostly; 113unsigned int sysctl_net_busy_poll __read_mostly;
114#endif 114#endif
115 115
116static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
117static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov, 116static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
118 unsigned long nr_segs, loff_t pos); 117 unsigned long nr_segs, loff_t pos);
119static ssize_t sock_aio_write(struct kiocb *iocb, const struct iovec *iov, 118static ssize_t sock_aio_write(struct kiocb *iocb, const struct iovec *iov,
@@ -151,7 +150,6 @@ static const struct file_operations socket_file_ops = {
151 .compat_ioctl = compat_sock_ioctl, 150 .compat_ioctl = compat_sock_ioctl,
152#endif 151#endif
153 .mmap = sock_mmap, 152 .mmap = sock_mmap,
154 .open = sock_no_open, /* special open code to disallow open via /proc */
155 .release = sock_close, 153 .release = sock_close,
156 .fasync = sock_fasync, 154 .fasync = sock_fasync,
157 .sendpage = sock_sendpage, 155 .sendpage = sock_sendpage,
@@ -559,23 +557,6 @@ static struct socket *sock_alloc(void)
559 return sock; 557 return sock;
560} 558}
561 559
562/*
563 * In theory you can't get an open on this inode, but /proc provides
564 * a back door. Remember to keep it shut otherwise you'll let the
565 * creepy crawlies in.
566 */
567
568static int sock_no_open(struct inode *irrelevant, struct file *dontcare)
569{
570 return -ENXIO;
571}
572
573const struct file_operations bad_sock_fops = {
574 .owner = THIS_MODULE,
575 .open = sock_no_open,
576 .llseek = noop_llseek,
577};
578
579/** 560/**
580 * sock_release - close a socket 561 * sock_release - close a socket
581 * @sock: socket to close 562 * @sock: socket to close
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index de856ddf5fed..224a82f24d3c 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -886,7 +886,7 @@ unwrap_priv_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct gs
886 u32 priv_len, maj_stat; 886 u32 priv_len, maj_stat;
887 int pad, saved_len, remaining_len, offset; 887 int pad, saved_len, remaining_len, offset;
888 888
889 rqstp->rq_splice_ok = false; 889 clear_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
890 890
891 priv_len = svc_getnl(&buf->head[0]); 891 priv_len = svc_getnl(&buf->head[0]);
892 if (rqstp->rq_deferred) { 892 if (rqstp->rq_deferred) {
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 066362141133..33fb105d4352 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -20,6 +20,7 @@
20#include <linux/list.h> 20#include <linux/list.h>
21#include <linux/module.h> 21#include <linux/module.h>
22#include <linux/ctype.h> 22#include <linux/ctype.h>
23#include <linux/string_helpers.h>
23#include <asm/uaccess.h> 24#include <asm/uaccess.h>
24#include <linux/poll.h> 25#include <linux/poll.h>
25#include <linux/seq_file.h> 26#include <linux/seq_file.h>
@@ -1067,30 +1068,15 @@ void qword_add(char **bpp, int *lp, char *str)
1067{ 1068{
1068 char *bp = *bpp; 1069 char *bp = *bpp;
1069 int len = *lp; 1070 int len = *lp;
1070 char c; 1071 int ret;
1071 1072
1072 if (len < 0) return; 1073 if (len < 0) return;
1073 1074
1074 while ((c=*str++) && len) 1075 ret = string_escape_str(str, &bp, len, ESCAPE_OCTAL, "\\ \n\t");
1075 switch(c) { 1076 if (ret < 0 || ret == len)
1076 case ' ': 1077 len = -1;
1077 case '\t':
1078 case '\n':
1079 case '\\':
1080 if (len >= 4) {
1081 *bp++ = '\\';
1082 *bp++ = '0' + ((c & 0300)>>6);
1083 *bp++ = '0' + ((c & 0070)>>3);
1084 *bp++ = '0' + ((c & 0007)>>0);
1085 }
1086 len -= 4;
1087 break;
1088 default:
1089 *bp++ = c;
1090 len--;
1091 }
1092 if (c || len <1) len = -1;
1093 else { 1078 else {
1079 len -= ret;
1094 *bp++ = ' '; 1080 *bp++ = ' ';
1095 len--; 1081 len--;
1096 } 1082 }
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 2783fd80c229..91eaef1844c8 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -191,7 +191,7 @@ svc_pool_map_init_percpu(struct svc_pool_map *m)
191 return err; 191 return err;
192 192
193 for_each_online_cpu(cpu) { 193 for_each_online_cpu(cpu) {
194 BUG_ON(pidx > maxpools); 194 BUG_ON(pidx >= maxpools);
195 m->to_pool[cpu] = pidx; 195 m->to_pool[cpu] = pidx;
196 m->pool_to[pidx] = cpu; 196 m->pool_to[pidx] = cpu;
197 pidx++; 197 pidx++;
@@ -476,15 +476,11 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools,
476 i, serv->sv_name); 476 i, serv->sv_name);
477 477
478 pool->sp_id = i; 478 pool->sp_id = i;
479 INIT_LIST_HEAD(&pool->sp_threads);
480 INIT_LIST_HEAD(&pool->sp_sockets); 479 INIT_LIST_HEAD(&pool->sp_sockets);
481 INIT_LIST_HEAD(&pool->sp_all_threads); 480 INIT_LIST_HEAD(&pool->sp_all_threads);
482 spin_lock_init(&pool->sp_lock); 481 spin_lock_init(&pool->sp_lock);
483 } 482 }
484 483
485 if (svc_uses_rpcbind(serv) && (!serv->sv_shutdown))
486 serv->sv_shutdown = svc_rpcb_cleanup;
487
488 return serv; 484 return serv;
489} 485}
490 486
@@ -505,13 +501,15 @@ svc_create_pooled(struct svc_program *prog, unsigned int bufsize,
505 unsigned int npools = svc_pool_map_get(); 501 unsigned int npools = svc_pool_map_get();
506 502
507 serv = __svc_create(prog, bufsize, npools, shutdown); 503 serv = __svc_create(prog, bufsize, npools, shutdown);
504 if (!serv)
505 goto out_err;
508 506
509 if (serv != NULL) { 507 serv->sv_function = func;
510 serv->sv_function = func; 508 serv->sv_module = mod;
511 serv->sv_module = mod;
512 }
513
514 return serv; 509 return serv;
510out_err:
511 svc_pool_map_put();
512 return NULL;
515} 513}
516EXPORT_SYMBOL_GPL(svc_create_pooled); 514EXPORT_SYMBOL_GPL(svc_create_pooled);
517 515
@@ -615,12 +613,14 @@ svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node)
615 goto out_enomem; 613 goto out_enomem;
616 614
617 serv->sv_nrthreads++; 615 serv->sv_nrthreads++;
616 __set_bit(RQ_BUSY, &rqstp->rq_flags);
617 spin_lock_init(&rqstp->rq_lock);
618 rqstp->rq_server = serv;
619 rqstp->rq_pool = pool;
618 spin_lock_bh(&pool->sp_lock); 620 spin_lock_bh(&pool->sp_lock);
619 pool->sp_nrthreads++; 621 pool->sp_nrthreads++;
620 list_add(&rqstp->rq_all, &pool->sp_all_threads); 622 list_add_rcu(&rqstp->rq_all, &pool->sp_all_threads);
621 spin_unlock_bh(&pool->sp_lock); 623 spin_unlock_bh(&pool->sp_lock);
622 rqstp->rq_server = serv;
623 rqstp->rq_pool = pool;
624 624
625 rqstp->rq_argp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node); 625 rqstp->rq_argp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node);
626 if (!rqstp->rq_argp) 626 if (!rqstp->rq_argp)
@@ -685,7 +685,8 @@ found_pool:
685 * so we don't try to kill it again. 685 * so we don't try to kill it again.
686 */ 686 */
687 rqstp = list_entry(pool->sp_all_threads.next, struct svc_rqst, rq_all); 687 rqstp = list_entry(pool->sp_all_threads.next, struct svc_rqst, rq_all);
688 list_del_init(&rqstp->rq_all); 688 set_bit(RQ_VICTIM, &rqstp->rq_flags);
689 list_del_rcu(&rqstp->rq_all);
689 task = rqstp->rq_task; 690 task = rqstp->rq_task;
690 } 691 }
691 spin_unlock_bh(&pool->sp_lock); 692 spin_unlock_bh(&pool->sp_lock);
@@ -783,10 +784,11 @@ svc_exit_thread(struct svc_rqst *rqstp)
783 784
784 spin_lock_bh(&pool->sp_lock); 785 spin_lock_bh(&pool->sp_lock);
785 pool->sp_nrthreads--; 786 pool->sp_nrthreads--;
786 list_del(&rqstp->rq_all); 787 if (!test_and_set_bit(RQ_VICTIM, &rqstp->rq_flags))
788 list_del_rcu(&rqstp->rq_all);
787 spin_unlock_bh(&pool->sp_lock); 789 spin_unlock_bh(&pool->sp_lock);
788 790
789 kfree(rqstp); 791 kfree_rcu(rqstp, rq_rcu_head);
790 792
791 /* Release the server */ 793 /* Release the server */
792 if (serv) 794 if (serv)
@@ -1086,10 +1088,10 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
1086 goto err_short_len; 1088 goto err_short_len;
1087 1089
1088 /* Will be turned off only in gss privacy case: */ 1090 /* Will be turned off only in gss privacy case: */
1089 rqstp->rq_splice_ok = true; 1091 set_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
1090 /* Will be turned off only when NFSv4 Sessions are used */ 1092 /* Will be turned off only when NFSv4 Sessions are used */
1091 rqstp->rq_usedeferral = true; 1093 set_bit(RQ_USEDEFERRAL, &rqstp->rq_flags);
1092 rqstp->rq_dropme = false; 1094 clear_bit(RQ_DROPME, &rqstp->rq_flags);
1093 1095
1094 /* Setup reply header */ 1096 /* Setup reply header */
1095 rqstp->rq_xprt->xpt_ops->xpo_prep_reply_hdr(rqstp); 1097 rqstp->rq_xprt->xpt_ops->xpo_prep_reply_hdr(rqstp);
@@ -1189,7 +1191,7 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
1189 *statp = procp->pc_func(rqstp, rqstp->rq_argp, rqstp->rq_resp); 1191 *statp = procp->pc_func(rqstp, rqstp->rq_argp, rqstp->rq_resp);
1190 1192
1191 /* Encode reply */ 1193 /* Encode reply */
1192 if (rqstp->rq_dropme) { 1194 if (test_bit(RQ_DROPME, &rqstp->rq_flags)) {
1193 if (procp->pc_release) 1195 if (procp->pc_release)
1194 procp->pc_release(rqstp, NULL, rqstp->rq_resp); 1196 procp->pc_release(rqstp, NULL, rqstp->rq_resp);
1195 goto dropit; 1197 goto dropit;
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index bbb3b044b877..c69358b3cf7f 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -220,9 +220,11 @@ static struct svc_xprt *__svc_xpo_create(struct svc_xprt_class *xcl,
220 */ 220 */
221static void svc_xprt_received(struct svc_xprt *xprt) 221static void svc_xprt_received(struct svc_xprt *xprt)
222{ 222{
223 WARN_ON_ONCE(!test_bit(XPT_BUSY, &xprt->xpt_flags)); 223 if (!test_bit(XPT_BUSY, &xprt->xpt_flags)) {
224 if (!test_bit(XPT_BUSY, &xprt->xpt_flags)) 224 WARN_ONCE(1, "xprt=0x%p already busy!", xprt);
225 return; 225 return;
226 }
227
226 /* As soon as we clear busy, the xprt could be closed and 228 /* As soon as we clear busy, the xprt could be closed and
227 * 'put', so we need a reference to call svc_xprt_do_enqueue with: 229 * 'put', so we need a reference to call svc_xprt_do_enqueue with:
228 */ 230 */
@@ -310,25 +312,6 @@ char *svc_print_addr(struct svc_rqst *rqstp, char *buf, size_t len)
310} 312}
311EXPORT_SYMBOL_GPL(svc_print_addr); 313EXPORT_SYMBOL_GPL(svc_print_addr);
312 314
313/*
314 * Queue up an idle server thread. Must have pool->sp_lock held.
315 * Note: this is really a stack rather than a queue, so that we only
316 * use as many different threads as we need, and the rest don't pollute
317 * the cache.
318 */
319static void svc_thread_enqueue(struct svc_pool *pool, struct svc_rqst *rqstp)
320{
321 list_add(&rqstp->rq_list, &pool->sp_threads);
322}
323
324/*
325 * Dequeue an nfsd thread. Must have pool->sp_lock held.
326 */
327static void svc_thread_dequeue(struct svc_pool *pool, struct svc_rqst *rqstp)
328{
329 list_del(&rqstp->rq_list);
330}
331
332static bool svc_xprt_has_something_to_do(struct svc_xprt *xprt) 315static bool svc_xprt_has_something_to_do(struct svc_xprt *xprt)
333{ 316{
334 if (xprt->xpt_flags & ((1<<XPT_CONN)|(1<<XPT_CLOSE))) 317 if (xprt->xpt_flags & ((1<<XPT_CONN)|(1<<XPT_CLOSE)))
@@ -341,11 +324,12 @@ static bool svc_xprt_has_something_to_do(struct svc_xprt *xprt)
341static void svc_xprt_do_enqueue(struct svc_xprt *xprt) 324static void svc_xprt_do_enqueue(struct svc_xprt *xprt)
342{ 325{
343 struct svc_pool *pool; 326 struct svc_pool *pool;
344 struct svc_rqst *rqstp; 327 struct svc_rqst *rqstp = NULL;
345 int cpu; 328 int cpu;
329 bool queued = false;
346 330
347 if (!svc_xprt_has_something_to_do(xprt)) 331 if (!svc_xprt_has_something_to_do(xprt))
348 return; 332 goto out;
349 333
350 /* Mark transport as busy. It will remain in this state until 334 /* Mark transport as busy. It will remain in this state until
351 * the provider calls svc_xprt_received. We update XPT_BUSY 335 * the provider calls svc_xprt_received. We update XPT_BUSY
@@ -355,43 +339,69 @@ static void svc_xprt_do_enqueue(struct svc_xprt *xprt)
355 if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags)) { 339 if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags)) {
356 /* Don't enqueue transport while already enqueued */ 340 /* Don't enqueue transport while already enqueued */
357 dprintk("svc: transport %p busy, not enqueued\n", xprt); 341 dprintk("svc: transport %p busy, not enqueued\n", xprt);
358 return; 342 goto out;
359 } 343 }
360 344
361 cpu = get_cpu(); 345 cpu = get_cpu();
362 pool = svc_pool_for_cpu(xprt->xpt_server, cpu); 346 pool = svc_pool_for_cpu(xprt->xpt_server, cpu);
363 spin_lock_bh(&pool->sp_lock);
364 347
365 pool->sp_stats.packets++; 348 atomic_long_inc(&pool->sp_stats.packets);
366 349
367 if (!list_empty(&pool->sp_threads)) { 350redo_search:
368 rqstp = list_entry(pool->sp_threads.next, 351 /* find a thread for this xprt */
369 struct svc_rqst, 352 rcu_read_lock();
370 rq_list); 353 list_for_each_entry_rcu(rqstp, &pool->sp_all_threads, rq_all) {
371 dprintk("svc: transport %p served by daemon %p\n", 354 /* Do a lockless check first */
372 xprt, rqstp); 355 if (test_bit(RQ_BUSY, &rqstp->rq_flags))
373 svc_thread_dequeue(pool, rqstp); 356 continue;
374 if (rqstp->rq_xprt) 357
375 printk(KERN_ERR 358 /*
376 "svc_xprt_enqueue: server %p, rq_xprt=%p!\n", 359 * Once the xprt has been queued, it can only be dequeued by
377 rqstp, rqstp->rq_xprt); 360 * the task that intends to service it. All we can do at that
378 /* Note the order of the following 3 lines: 361 * point is to try to wake this thread back up so that it can
379 * We want to assign xprt to rqstp->rq_xprt only _after_ 362 * do so.
380 * we've woken up the process, so that we don't race with
381 * the lockless check in svc_get_next_xprt().
382 */ 363 */
383 svc_xprt_get(xprt); 364 if (!queued) {
365 spin_lock_bh(&rqstp->rq_lock);
366 if (test_and_set_bit(RQ_BUSY, &rqstp->rq_flags)) {
367 /* already busy, move on... */
368 spin_unlock_bh(&rqstp->rq_lock);
369 continue;
370 }
371
372 /* this one will do */
373 rqstp->rq_xprt = xprt;
374 svc_xprt_get(xprt);
375 spin_unlock_bh(&rqstp->rq_lock);
376 }
377 rcu_read_unlock();
378
379 atomic_long_inc(&pool->sp_stats.threads_woken);
384 wake_up_process(rqstp->rq_task); 380 wake_up_process(rqstp->rq_task);
385 rqstp->rq_xprt = xprt; 381 put_cpu();
386 pool->sp_stats.threads_woken++; 382 goto out;
387 } else { 383 }
384 rcu_read_unlock();
385
386 /*
387 * We didn't find an idle thread to use, so we need to queue the xprt.
388 * Do so and then search again. If we find one, we can't hook this one
389 * up to it directly but we can wake the thread up in the hopes that it
390 * will pick it up once it searches for a xprt to service.
391 */
392 if (!queued) {
393 queued = true;
388 dprintk("svc: transport %p put into queue\n", xprt); 394 dprintk("svc: transport %p put into queue\n", xprt);
395 spin_lock_bh(&pool->sp_lock);
389 list_add_tail(&xprt->xpt_ready, &pool->sp_sockets); 396 list_add_tail(&xprt->xpt_ready, &pool->sp_sockets);
390 pool->sp_stats.sockets_queued++; 397 pool->sp_stats.sockets_queued++;
398 spin_unlock_bh(&pool->sp_lock);
399 goto redo_search;
391 } 400 }
392 401 rqstp = NULL;
393 spin_unlock_bh(&pool->sp_lock);
394 put_cpu(); 402 put_cpu();
403out:
404 trace_svc_xprt_do_enqueue(xprt, rqstp);
395} 405}
396 406
397/* 407/*
@@ -408,22 +418,28 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
408EXPORT_SYMBOL_GPL(svc_xprt_enqueue); 418EXPORT_SYMBOL_GPL(svc_xprt_enqueue);
409 419
410/* 420/*
411 * Dequeue the first transport. Must be called with the pool->sp_lock held. 421 * Dequeue the first transport, if there is one.
412 */ 422 */
413static struct svc_xprt *svc_xprt_dequeue(struct svc_pool *pool) 423static struct svc_xprt *svc_xprt_dequeue(struct svc_pool *pool)
414{ 424{
415 struct svc_xprt *xprt; 425 struct svc_xprt *xprt = NULL;
416 426
417 if (list_empty(&pool->sp_sockets)) 427 if (list_empty(&pool->sp_sockets))
418 return NULL; 428 goto out;
419
420 xprt = list_entry(pool->sp_sockets.next,
421 struct svc_xprt, xpt_ready);
422 list_del_init(&xprt->xpt_ready);
423 429
424 dprintk("svc: transport %p dequeued, inuse=%d\n", 430 spin_lock_bh(&pool->sp_lock);
425 xprt, atomic_read(&xprt->xpt_ref.refcount)); 431 if (likely(!list_empty(&pool->sp_sockets))) {
432 xprt = list_first_entry(&pool->sp_sockets,
433 struct svc_xprt, xpt_ready);
434 list_del_init(&xprt->xpt_ready);
435 svc_xprt_get(xprt);
426 436
437 dprintk("svc: transport %p dequeued, inuse=%d\n",
438 xprt, atomic_read(&xprt->xpt_ref.refcount));
439 }
440 spin_unlock_bh(&pool->sp_lock);
441out:
442 trace_svc_xprt_dequeue(xprt);
427 return xprt; 443 return xprt;
428} 444}
429 445
@@ -484,34 +500,36 @@ static void svc_xprt_release(struct svc_rqst *rqstp)
484} 500}
485 501
486/* 502/*
487 * External function to wake up a server waiting for data 503 * Some svc_serv's will have occasional work to do, even when a xprt is not
488 * This really only makes sense for services like lockd 504 * waiting to be serviced. This function is there to "kick" a task in one of
489 * which have exactly one thread anyway. 505 * those services so that it can wake up and do that work. Note that we only
506 * bother with pool 0 as we don't need to wake up more than one thread for
507 * this purpose.
490 */ 508 */
491void svc_wake_up(struct svc_serv *serv) 509void svc_wake_up(struct svc_serv *serv)
492{ 510{
493 struct svc_rqst *rqstp; 511 struct svc_rqst *rqstp;
494 unsigned int i;
495 struct svc_pool *pool; 512 struct svc_pool *pool;
496 513
497 for (i = 0; i < serv->sv_nrpools; i++) { 514 pool = &serv->sv_pools[0];
498 pool = &serv->sv_pools[i];
499 515
500 spin_lock_bh(&pool->sp_lock); 516 rcu_read_lock();
501 if (!list_empty(&pool->sp_threads)) { 517 list_for_each_entry_rcu(rqstp, &pool->sp_all_threads, rq_all) {
502 rqstp = list_entry(pool->sp_threads.next, 518 /* skip any that aren't queued */
503 struct svc_rqst, 519 if (test_bit(RQ_BUSY, &rqstp->rq_flags))
504 rq_list); 520 continue;
505 dprintk("svc: daemon %p woken up.\n", rqstp); 521 rcu_read_unlock();
506 /* 522 dprintk("svc: daemon %p woken up.\n", rqstp);
507 svc_thread_dequeue(pool, rqstp); 523 wake_up_process(rqstp->rq_task);
508 rqstp->rq_xprt = NULL; 524 trace_svc_wake_up(rqstp->rq_task->pid);
509 */ 525 return;
510 wake_up_process(rqstp->rq_task);
511 } else
512 pool->sp_task_pending = 1;
513 spin_unlock_bh(&pool->sp_lock);
514 } 526 }
527 rcu_read_unlock();
528
529 /* No free entries available */
530 set_bit(SP_TASK_PENDING, &pool->sp_flags);
531 smp_wmb();
532 trace_svc_wake_up(0);
515} 533}
516EXPORT_SYMBOL_GPL(svc_wake_up); 534EXPORT_SYMBOL_GPL(svc_wake_up);
517 535
@@ -622,75 +640,86 @@ static int svc_alloc_arg(struct svc_rqst *rqstp)
622 return 0; 640 return 0;
623} 641}
624 642
643static bool
644rqst_should_sleep(struct svc_rqst *rqstp)
645{
646 struct svc_pool *pool = rqstp->rq_pool;
647
648 /* did someone call svc_wake_up? */
649 if (test_and_clear_bit(SP_TASK_PENDING, &pool->sp_flags))
650 return false;
651
652 /* was a socket queued? */
653 if (!list_empty(&pool->sp_sockets))
654 return false;
655
656 /* are we shutting down? */
657 if (signalled() || kthread_should_stop())
658 return false;
659
660 /* are we freezing? */
661 if (freezing(current))
662 return false;
663
664 return true;
665}
666
625static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout) 667static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout)
626{ 668{
627 struct svc_xprt *xprt; 669 struct svc_xprt *xprt;
628 struct svc_pool *pool = rqstp->rq_pool; 670 struct svc_pool *pool = rqstp->rq_pool;
629 long time_left = 0; 671 long time_left = 0;
630 672
673 /* rq_xprt should be clear on entry */
674 WARN_ON_ONCE(rqstp->rq_xprt);
675
631 /* Normally we will wait up to 5 seconds for any required 676 /* Normally we will wait up to 5 seconds for any required
632 * cache information to be provided. 677 * cache information to be provided.
633 */ 678 */
634 rqstp->rq_chandle.thread_wait = 5*HZ; 679 rqstp->rq_chandle.thread_wait = 5*HZ;
635 680
636 spin_lock_bh(&pool->sp_lock);
637 xprt = svc_xprt_dequeue(pool); 681 xprt = svc_xprt_dequeue(pool);
638 if (xprt) { 682 if (xprt) {
639 rqstp->rq_xprt = xprt; 683 rqstp->rq_xprt = xprt;
640 svc_xprt_get(xprt);
641 684
642 /* As there is a shortage of threads and this request 685 /* As there is a shortage of threads and this request
643 * had to be queued, don't allow the thread to wait so 686 * had to be queued, don't allow the thread to wait so
644 * long for cache updates. 687 * long for cache updates.
645 */ 688 */
646 rqstp->rq_chandle.thread_wait = 1*HZ; 689 rqstp->rq_chandle.thread_wait = 1*HZ;
647 pool->sp_task_pending = 0; 690 clear_bit(SP_TASK_PENDING, &pool->sp_flags);
648 } else { 691 return xprt;
649 if (pool->sp_task_pending) { 692 }
650 pool->sp_task_pending = 0;
651 xprt = ERR_PTR(-EAGAIN);
652 goto out;
653 }
654 /*
655 * We have to be able to interrupt this wait
656 * to bring down the daemons ...
657 */
658 set_current_state(TASK_INTERRUPTIBLE);
659 693
660 /* No data pending. Go to sleep */ 694 /*
661 svc_thread_enqueue(pool, rqstp); 695 * We have to be able to interrupt this wait
662 spin_unlock_bh(&pool->sp_lock); 696 * to bring down the daemons ...
697 */
698 set_current_state(TASK_INTERRUPTIBLE);
699 clear_bit(RQ_BUSY, &rqstp->rq_flags);
700 smp_mb();
663 701
664 if (!(signalled() || kthread_should_stop())) { 702 if (likely(rqst_should_sleep(rqstp)))
665 time_left = schedule_timeout(timeout); 703 time_left = schedule_timeout(timeout);
666 __set_current_state(TASK_RUNNING); 704 else
705 __set_current_state(TASK_RUNNING);
667 706
668 try_to_freeze(); 707 try_to_freeze();
669 708
670 xprt = rqstp->rq_xprt; 709 spin_lock_bh(&rqstp->rq_lock);
671 if (xprt != NULL) 710 set_bit(RQ_BUSY, &rqstp->rq_flags);
672 return xprt; 711 spin_unlock_bh(&rqstp->rq_lock);
673 } else
674 __set_current_state(TASK_RUNNING);
675 712
676 spin_lock_bh(&pool->sp_lock); 713 xprt = rqstp->rq_xprt;
677 if (!time_left) 714 if (xprt != NULL)
678 pool->sp_stats.threads_timedout++; 715 return xprt;
679 716
680 xprt = rqstp->rq_xprt; 717 if (!time_left)
681 if (!xprt) { 718 atomic_long_inc(&pool->sp_stats.threads_timedout);
682 svc_thread_dequeue(pool, rqstp); 719
683 spin_unlock_bh(&pool->sp_lock); 720 if (signalled() || kthread_should_stop())
684 dprintk("svc: server %p, no data yet\n", rqstp); 721 return ERR_PTR(-EINTR);
685 if (signalled() || kthread_should_stop()) 722 return ERR_PTR(-EAGAIN);
686 return ERR_PTR(-EINTR);
687 else
688 return ERR_PTR(-EAGAIN);
689 }
690 }
691out:
692 spin_unlock_bh(&pool->sp_lock);
693 return xprt;
694} 723}
695 724
696static void svc_add_new_temp_xprt(struct svc_serv *serv, struct svc_xprt *newxpt) 725static void svc_add_new_temp_xprt(struct svc_serv *serv, struct svc_xprt *newxpt)
@@ -719,7 +748,7 @@ static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt)
719 dprintk("svc_recv: found XPT_CLOSE\n"); 748 dprintk("svc_recv: found XPT_CLOSE\n");
720 svc_delete_xprt(xprt); 749 svc_delete_xprt(xprt);
721 /* Leave XPT_BUSY set on the dead xprt: */ 750 /* Leave XPT_BUSY set on the dead xprt: */
722 return 0; 751 goto out;
723 } 752 }
724 if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) { 753 if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) {
725 struct svc_xprt *newxpt; 754 struct svc_xprt *newxpt;
@@ -750,6 +779,8 @@ static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt)
750 } 779 }
751 /* clear XPT_BUSY: */ 780 /* clear XPT_BUSY: */
752 svc_xprt_received(xprt); 781 svc_xprt_received(xprt);
782out:
783 trace_svc_handle_xprt(xprt, len);
753 return len; 784 return len;
754} 785}
755 786
@@ -797,7 +828,10 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
797 828
798 clear_bit(XPT_OLD, &xprt->xpt_flags); 829 clear_bit(XPT_OLD, &xprt->xpt_flags);
799 830
800 rqstp->rq_secure = xprt->xpt_ops->xpo_secure_port(rqstp); 831 if (xprt->xpt_ops->xpo_secure_port(rqstp))
832 set_bit(RQ_SECURE, &rqstp->rq_flags);
833 else
834 clear_bit(RQ_SECURE, &rqstp->rq_flags);
801 rqstp->rq_chandle.defer = svc_defer; 835 rqstp->rq_chandle.defer = svc_defer;
802 rqstp->rq_xid = svc_getu32(&rqstp->rq_arg.head[0]); 836 rqstp->rq_xid = svc_getu32(&rqstp->rq_arg.head[0]);
803 837
@@ -895,7 +929,6 @@ static void svc_age_temp_xprts(unsigned long closure)
895 continue; 929 continue;
896 list_del_init(le); 930 list_del_init(le);
897 set_bit(XPT_CLOSE, &xprt->xpt_flags); 931 set_bit(XPT_CLOSE, &xprt->xpt_flags);
898 set_bit(XPT_DETACHED, &xprt->xpt_flags);
899 dprintk("queuing xprt %p for closing\n", xprt); 932 dprintk("queuing xprt %p for closing\n", xprt);
900 933
901 /* a thread will dequeue and close it soon */ 934 /* a thread will dequeue and close it soon */
@@ -935,8 +968,7 @@ static void svc_delete_xprt(struct svc_xprt *xprt)
935 xprt->xpt_ops->xpo_detach(xprt); 968 xprt->xpt_ops->xpo_detach(xprt);
936 969
937 spin_lock_bh(&serv->sv_lock); 970 spin_lock_bh(&serv->sv_lock);
938 if (!test_and_set_bit(XPT_DETACHED, &xprt->xpt_flags)) 971 list_del_init(&xprt->xpt_list);
939 list_del_init(&xprt->xpt_list);
940 WARN_ON_ONCE(!list_empty(&xprt->xpt_ready)); 972 WARN_ON_ONCE(!list_empty(&xprt->xpt_ready));
941 if (test_bit(XPT_TEMP, &xprt->xpt_flags)) 973 if (test_bit(XPT_TEMP, &xprt->xpt_flags))
942 serv->sv_tmpcnt--; 974 serv->sv_tmpcnt--;
@@ -1080,7 +1112,7 @@ static struct cache_deferred_req *svc_defer(struct cache_req *req)
1080 struct svc_rqst *rqstp = container_of(req, struct svc_rqst, rq_chandle); 1112 struct svc_rqst *rqstp = container_of(req, struct svc_rqst, rq_chandle);
1081 struct svc_deferred_req *dr; 1113 struct svc_deferred_req *dr;
1082 1114
1083 if (rqstp->rq_arg.page_len || !rqstp->rq_usedeferral) 1115 if (rqstp->rq_arg.page_len || !test_bit(RQ_USEDEFERRAL, &rqstp->rq_flags))
1084 return NULL; /* if more than a page, give up FIXME */ 1116 return NULL; /* if more than a page, give up FIXME */
1085 if (rqstp->rq_deferred) { 1117 if (rqstp->rq_deferred) {
1086 dr = rqstp->rq_deferred; 1118 dr = rqstp->rq_deferred;
@@ -1109,7 +1141,7 @@ static struct cache_deferred_req *svc_defer(struct cache_req *req)
1109 } 1141 }
1110 svc_xprt_get(rqstp->rq_xprt); 1142 svc_xprt_get(rqstp->rq_xprt);
1111 dr->xprt = rqstp->rq_xprt; 1143 dr->xprt = rqstp->rq_xprt;
1112 rqstp->rq_dropme = true; 1144 set_bit(RQ_DROPME, &rqstp->rq_flags);
1113 1145
1114 dr->handle.revisit = svc_revisit; 1146 dr->handle.revisit = svc_revisit;
1115 return &dr->handle; 1147 return &dr->handle;
@@ -1311,10 +1343,10 @@ static int svc_pool_stats_show(struct seq_file *m, void *p)
1311 1343
1312 seq_printf(m, "%u %lu %lu %lu %lu\n", 1344 seq_printf(m, "%u %lu %lu %lu %lu\n",
1313 pool->sp_id, 1345 pool->sp_id,
1314 pool->sp_stats.packets, 1346 (unsigned long)atomic_long_read(&pool->sp_stats.packets),
1315 pool->sp_stats.sockets_queued, 1347 pool->sp_stats.sockets_queued,
1316 pool->sp_stats.threads_woken, 1348 (unsigned long)atomic_long_read(&pool->sp_stats.threads_woken),
1317 pool->sp_stats.threads_timedout); 1349 (unsigned long)atomic_long_read(&pool->sp_stats.threads_timedout));
1318 1350
1319 return 0; 1351 return 0;
1320} 1352}
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index f9c052d508f0..cc331b6cf573 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -1145,7 +1145,10 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
1145 1145
1146 rqstp->rq_xprt_ctxt = NULL; 1146 rqstp->rq_xprt_ctxt = NULL;
1147 rqstp->rq_prot = IPPROTO_TCP; 1147 rqstp->rq_prot = IPPROTO_TCP;
1148 rqstp->rq_local = !!test_bit(XPT_LOCAL, &svsk->sk_xprt.xpt_flags); 1148 if (test_bit(XPT_LOCAL, &svsk->sk_xprt.xpt_flags))
1149 set_bit(RQ_LOCAL, &rqstp->rq_flags);
1150 else
1151 clear_bit(RQ_LOCAL, &rqstp->rq_flags);
1149 1152
1150 p = (__be32 *)rqstp->rq_arg.head[0].iov_base; 1153 p = (__be32 *)rqstp->rq_arg.head[0].iov_base;
1151 calldir = p[1]; 1154 calldir = p[1];
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index 290af97bf6f9..1cb61242e55e 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -617,9 +617,10 @@ void xdr_truncate_encode(struct xdr_stream *xdr, size_t len)
617 fraglen = min_t(int, buf->len - len, tail->iov_len); 617 fraglen = min_t(int, buf->len - len, tail->iov_len);
618 tail->iov_len -= fraglen; 618 tail->iov_len -= fraglen;
619 buf->len -= fraglen; 619 buf->len -= fraglen;
620 if (tail->iov_len && buf->len == len) { 620 if (tail->iov_len) {
621 xdr->p = tail->iov_base + tail->iov_len; 621 xdr->p = tail->iov_base + tail->iov_len;
622 /* xdr->end, xdr->iov should be set already */ 622 WARN_ON_ONCE(!xdr->end);
623 WARN_ON_ONCE(!xdr->iov);
623 return; 624 return;
624 } 625 }
625 WARN_ON_ONCE(fraglen); 626 WARN_ON_ONCE(fraglen);
@@ -631,11 +632,11 @@ void xdr_truncate_encode(struct xdr_stream *xdr, size_t len)
631 old = new + fraglen; 632 old = new + fraglen;
632 xdr->page_ptr -= (old >> PAGE_SHIFT) - (new >> PAGE_SHIFT); 633 xdr->page_ptr -= (old >> PAGE_SHIFT) - (new >> PAGE_SHIFT);
633 634
634 if (buf->page_len && buf->len == len) { 635 if (buf->page_len) {
635 xdr->p = page_address(*xdr->page_ptr); 636 xdr->p = page_address(*xdr->page_ptr);
636 xdr->end = (void *)xdr->p + PAGE_SIZE; 637 xdr->end = (void *)xdr->p + PAGE_SIZE;
637 xdr->p = (void *)xdr->p + (new % PAGE_SIZE); 638 xdr->p = (void *)xdr->p + (new % PAGE_SIZE);
638 /* xdr->iov should already be NULL */ 639 WARN_ON_ONCE(xdr->iov);
639 return; 640 return;
640 } 641 }
641 if (fraglen) { 642 if (fraglen) {