aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/core/skbuff.c3
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c67
-rw-r--r--net/sunrpc/svc.c79
-rw-r--r--net/sunrpc/svcauth_unix.c47
-rw-r--r--net/sunrpc/svcsock.c51
5 files changed, 161 insertions, 86 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index c448c7f6fde2..3c23760c5827 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -156,7 +156,8 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
156 156
157 /* Get the DATA. Size must match skb_add_mtu(). */ 157 /* Get the DATA. Size must match skb_add_mtu(). */
158 size = SKB_DATA_ALIGN(size); 158 size = SKB_DATA_ALIGN(size);
159 data = ____kmalloc(size + sizeof(struct skb_shared_info), gfp_mask); 159 data = kmalloc_track_caller(size + sizeof(struct skb_shared_info),
160 gfp_mask);
160 if (!data) 161 if (!data)
161 goto nodata; 162 goto nodata;
162 163
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 638c0b576203..447d9aef4605 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -903,9 +903,9 @@ out_seq:
903struct gss_svc_data { 903struct gss_svc_data {
904 /* decoded gss client cred: */ 904 /* decoded gss client cred: */
905 struct rpc_gss_wire_cred clcred; 905 struct rpc_gss_wire_cred clcred;
906 /* pointer to the beginning of the procedure-specific results, 906 /* save a pointer to the beginning of the encoded verifier,
907 * which may be encrypted/checksummed in svcauth_gss_release: */ 907 * for use in encryption/checksumming in svcauth_gss_release: */
908 __be32 *body_start; 908 __be32 *verf_start;
909 struct rsc *rsci; 909 struct rsc *rsci;
910}; 910};
911 911
@@ -968,7 +968,7 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp)
968 if (!svcdata) 968 if (!svcdata)
969 goto auth_err; 969 goto auth_err;
970 rqstp->rq_auth_data = svcdata; 970 rqstp->rq_auth_data = svcdata;
971 svcdata->body_start = NULL; 971 svcdata->verf_start = NULL;
972 svcdata->rsci = NULL; 972 svcdata->rsci = NULL;
973 gc = &svcdata->clcred; 973 gc = &svcdata->clcred;
974 974
@@ -1097,6 +1097,7 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp)
1097 goto complete; 1097 goto complete;
1098 case RPC_GSS_PROC_DATA: 1098 case RPC_GSS_PROC_DATA:
1099 *authp = rpcsec_gsserr_ctxproblem; 1099 *authp = rpcsec_gsserr_ctxproblem;
1100 svcdata->verf_start = resv->iov_base + resv->iov_len;
1100 if (gss_write_verf(rqstp, rsci->mechctx, gc->gc_seq)) 1101 if (gss_write_verf(rqstp, rsci->mechctx, gc->gc_seq))
1101 goto auth_err; 1102 goto auth_err;
1102 rqstp->rq_cred = rsci->cred; 1103 rqstp->rq_cred = rsci->cred;
@@ -1110,7 +1111,6 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp)
1110 gc->gc_seq, rsci->mechctx)) 1111 gc->gc_seq, rsci->mechctx))
1111 goto auth_err; 1112 goto auth_err;
1112 /* placeholders for length and seq. number: */ 1113 /* placeholders for length and seq. number: */
1113 svcdata->body_start = resv->iov_base + resv->iov_len;
1114 svc_putnl(resv, 0); 1114 svc_putnl(resv, 0);
1115 svc_putnl(resv, 0); 1115 svc_putnl(resv, 0);
1116 break; 1116 break;
@@ -1119,7 +1119,6 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp)
1119 gc->gc_seq, rsci->mechctx)) 1119 gc->gc_seq, rsci->mechctx))
1120 goto auth_err; 1120 goto auth_err;
1121 /* placeholders for length and seq. number: */ 1121 /* placeholders for length and seq. number: */
1122 svcdata->body_start = resv->iov_base + resv->iov_len;
1123 svc_putnl(resv, 0); 1122 svc_putnl(resv, 0);
1124 svc_putnl(resv, 0); 1123 svc_putnl(resv, 0);
1125 break; 1124 break;
@@ -1147,6 +1146,32 @@ out:
1147 return ret; 1146 return ret;
1148} 1147}
1149 1148
1149u32 *
1150svcauth_gss_prepare_to_wrap(struct xdr_buf *resbuf, struct gss_svc_data *gsd)
1151{
1152 u32 *p, verf_len;
1153
1154 p = gsd->verf_start;
1155 gsd->verf_start = NULL;
1156
1157 /* If the reply stat is nonzero, don't wrap: */
1158 if (*(p-1) != rpc_success)
1159 return NULL;
1160 /* Skip the verifier: */
1161 p += 1;
1162 verf_len = ntohl(*p++);
1163 p += XDR_QUADLEN(verf_len);
1164 /* move accept_stat to right place: */
1165 memcpy(p, p + 2, 4);
1166 /* Also don't wrap if the accept stat is nonzero: */
1167 if (*p != rpc_success) {
1168 resbuf->head[0].iov_len -= 2 * 4;
1169 return NULL;
1170 }
1171 p++;
1172 return p;
1173}
1174
1150static inline int 1175static inline int
1151svcauth_gss_wrap_resp_integ(struct svc_rqst *rqstp) 1176svcauth_gss_wrap_resp_integ(struct svc_rqst *rqstp)
1152{ 1177{
@@ -1160,17 +1185,9 @@ svcauth_gss_wrap_resp_integ(struct svc_rqst *rqstp)
1160 int integ_offset, integ_len; 1185 int integ_offset, integ_len;
1161 int stat = -EINVAL; 1186 int stat = -EINVAL;
1162 1187
1163 p = gsd->body_start; 1188 p = svcauth_gss_prepare_to_wrap(resbuf, gsd);
1164 gsd->body_start = NULL; 1189 if (p == NULL)
1165 /* move accept_stat to right place: */
1166 memcpy(p, p + 2, 4);
1167 /* Don't wrap in failure case: */
1168 /* Counting on not getting here if call was not even accepted! */
1169 if (*p != rpc_success) {
1170 resbuf->head[0].iov_len -= 2 * 4;
1171 goto out; 1190 goto out;
1172 }
1173 p++;
1174 integ_offset = (u8 *)(p + 1) - (u8 *)resbuf->head[0].iov_base; 1191 integ_offset = (u8 *)(p + 1) - (u8 *)resbuf->head[0].iov_base;
1175 integ_len = resbuf->len - integ_offset; 1192 integ_len = resbuf->len - integ_offset;
1176 BUG_ON(integ_len % 4); 1193 BUG_ON(integ_len % 4);
@@ -1191,7 +1208,6 @@ svcauth_gss_wrap_resp_integ(struct svc_rqst *rqstp)
1191 resbuf->tail[0].iov_base = resbuf->head[0].iov_base 1208 resbuf->tail[0].iov_base = resbuf->head[0].iov_base
1192 + resbuf->head[0].iov_len; 1209 + resbuf->head[0].iov_len;
1193 resbuf->tail[0].iov_len = 0; 1210 resbuf->tail[0].iov_len = 0;
1194 rqstp->rq_restailpage = 0;
1195 resv = &resbuf->tail[0]; 1211 resv = &resbuf->tail[0];
1196 } else { 1212 } else {
1197 resv = &resbuf->tail[0]; 1213 resv = &resbuf->tail[0];
@@ -1223,24 +1239,16 @@ svcauth_gss_wrap_resp_priv(struct svc_rqst *rqstp)
1223 int offset; 1239 int offset;
1224 int pad; 1240 int pad;
1225 1241
1226 p = gsd->body_start; 1242 p = svcauth_gss_prepare_to_wrap(resbuf, gsd);
1227 gsd->body_start = NULL; 1243 if (p == NULL)
1228 /* move accept_stat to right place: */
1229 memcpy(p, p + 2, 4);
1230 /* Don't wrap in failure case: */
1231 /* Counting on not getting here if call was not even accepted! */
1232 if (*p != rpc_success) {
1233 resbuf->head[0].iov_len -= 2 * 4;
1234 return 0; 1244 return 0;
1235 }
1236 p++;
1237 len = p++; 1245 len = p++;
1238 offset = (u8 *)p - (u8 *)resbuf->head[0].iov_base; 1246 offset = (u8 *)p - (u8 *)resbuf->head[0].iov_base;
1239 *p++ = htonl(gc->gc_seq); 1247 *p++ = htonl(gc->gc_seq);
1240 inpages = resbuf->pages; 1248 inpages = resbuf->pages;
1241 /* XXX: Would be better to write some xdr helper functions for 1249 /* XXX: Would be better to write some xdr helper functions for
1242 * nfs{2,3,4}xdr.c that place the data right, instead of copying: */ 1250 * nfs{2,3,4}xdr.c that place the data right, instead of copying: */
1243 if (resbuf->tail[0].iov_base && rqstp->rq_restailpage == 0) { 1251 if (resbuf->tail[0].iov_base) {
1244 BUG_ON(resbuf->tail[0].iov_base >= resbuf->head[0].iov_base 1252 BUG_ON(resbuf->tail[0].iov_base >= resbuf->head[0].iov_base
1245 + PAGE_SIZE); 1253 + PAGE_SIZE);
1246 BUG_ON(resbuf->tail[0].iov_base < resbuf->head[0].iov_base); 1254 BUG_ON(resbuf->tail[0].iov_base < resbuf->head[0].iov_base);
@@ -1258,7 +1266,6 @@ svcauth_gss_wrap_resp_priv(struct svc_rqst *rqstp)
1258 resbuf->tail[0].iov_base = resbuf->head[0].iov_base 1266 resbuf->tail[0].iov_base = resbuf->head[0].iov_base
1259 + resbuf->head[0].iov_len + RPC_MAX_AUTH_SIZE; 1267 + resbuf->head[0].iov_len + RPC_MAX_AUTH_SIZE;
1260 resbuf->tail[0].iov_len = 0; 1268 resbuf->tail[0].iov_len = 0;
1261 rqstp->rq_restailpage = 0;
1262 } 1269 }
1263 if (gss_wrap(gsd->rsci->mechctx, offset, resbuf, inpages)) 1270 if (gss_wrap(gsd->rsci->mechctx, offset, resbuf, inpages))
1264 return -ENOMEM; 1271 return -ENOMEM;
@@ -1282,7 +1289,7 @@ svcauth_gss_release(struct svc_rqst *rqstp)
1282 if (gc->gc_proc != RPC_GSS_PROC_DATA) 1289 if (gc->gc_proc != RPC_GSS_PROC_DATA)
1283 goto out; 1290 goto out;
1284 /* Release can be called twice, but we only wrap once. */ 1291 /* Release can be called twice, but we only wrap once. */
1285 if (gsd->body_start == NULL) 1292 if (gsd->verf_start == NULL)
1286 goto out; 1293 goto out;
1287 /* normally not set till svc_send, but we need it here: */ 1294 /* normally not set till svc_send, but we need it here: */
1288 /* XXX: what for? Do we mess it up the moment we call svc_putu32 1295 /* XXX: what for? Do we mess it up the moment we call svc_putu32
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index a99e67b164c1..c2c8bb20d07f 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -417,18 +417,15 @@ svc_init_buffer(struct svc_rqst *rqstp, unsigned int size)
417 if (size > RPCSVC_MAXPAYLOAD) 417 if (size > RPCSVC_MAXPAYLOAD)
418 size = RPCSVC_MAXPAYLOAD; 418 size = RPCSVC_MAXPAYLOAD;
419 pages = 2 + (size+ PAGE_SIZE -1) / PAGE_SIZE; 419 pages = 2 + (size+ PAGE_SIZE -1) / PAGE_SIZE;
420 rqstp->rq_argused = 0;
421 rqstp->rq_resused = 0;
422 arghi = 0; 420 arghi = 0;
423 BUG_ON(pages > RPCSVC_MAXPAGES); 421 BUG_ON(pages > RPCSVC_MAXPAGES);
424 while (pages) { 422 while (pages) {
425 struct page *p = alloc_page(GFP_KERNEL); 423 struct page *p = alloc_page(GFP_KERNEL);
426 if (!p) 424 if (!p)
427 break; 425 break;
428 rqstp->rq_argpages[arghi++] = p; 426 rqstp->rq_pages[arghi++] = p;
429 pages--; 427 pages--;
430 } 428 }
431 rqstp->rq_arghi = arghi;
432 return ! pages; 429 return ! pages;
433} 430}
434 431
@@ -438,14 +435,10 @@ svc_init_buffer(struct svc_rqst *rqstp, unsigned int size)
438static void 435static void
439svc_release_buffer(struct svc_rqst *rqstp) 436svc_release_buffer(struct svc_rqst *rqstp)
440{ 437{
441 while (rqstp->rq_arghi) 438 int i;
442 put_page(rqstp->rq_argpages[--rqstp->rq_arghi]); 439 for (i=0; i<ARRAY_SIZE(rqstp->rq_pages); i++)
443 while (rqstp->rq_resused) { 440 if (rqstp->rq_pages[i])
444 if (rqstp->rq_respages[--rqstp->rq_resused] == NULL) 441 put_page(rqstp->rq_pages[i]);
445 continue;
446 put_page(rqstp->rq_respages[rqstp->rq_resused]);
447 }
448 rqstp->rq_argused = 0;
449} 442}
450 443
451/* 444/*
@@ -651,23 +644,32 @@ svc_register(struct svc_serv *serv, int proto, unsigned short port)
651 unsigned long flags; 644 unsigned long flags;
652 int i, error = 0, dummy; 645 int i, error = 0, dummy;
653 646
654 progp = serv->sv_program;
655
656 dprintk("RPC: svc_register(%s, %s, %d)\n",
657 progp->pg_name, proto == IPPROTO_UDP? "udp" : "tcp", port);
658
659 if (!port) 647 if (!port)
660 clear_thread_flag(TIF_SIGPENDING); 648 clear_thread_flag(TIF_SIGPENDING);
661 649
662 for (i = 0; i < progp->pg_nvers; i++) { 650 for (progp = serv->sv_program; progp; progp = progp->pg_next) {
663 if (progp->pg_vers[i] == NULL) 651 for (i = 0; i < progp->pg_nvers; i++) {
664 continue; 652 if (progp->pg_vers[i] == NULL)
665 error = rpc_register(progp->pg_prog, i, proto, port, &dummy); 653 continue;
666 if (error < 0) 654
667 break; 655 dprintk("RPC: svc_register(%s, %s, %d, %d)%s\n",
668 if (port && !dummy) { 656 progp->pg_name,
669 error = -EACCES; 657 proto == IPPROTO_UDP? "udp" : "tcp",
670 break; 658 port,
659 i,
660 progp->pg_vers[i]->vs_hidden?
661 " (but not telling portmap)" : "");
662
663 if (progp->pg_vers[i]->vs_hidden)
664 continue;
665
666 error = rpc_register(progp->pg_prog, i, proto, port, &dummy);
667 if (error < 0)
668 break;
669 if (port && !dummy) {
670 error = -EACCES;
671 break;
672 }
671 } 673 }
672 } 674 }
673 675
@@ -697,7 +699,7 @@ svc_process(struct svc_rqst *rqstp)
697 u32 dir, prog, vers, proc; 699 u32 dir, prog, vers, proc;
698 __be32 auth_stat, rpc_stat; 700 __be32 auth_stat, rpc_stat;
699 int auth_res; 701 int auth_res;
700 __be32 *accept_statp; 702 __be32 *reply_statp;
701 703
702 rpc_stat = rpc_success; 704 rpc_stat = rpc_success;
703 705
@@ -707,10 +709,10 @@ svc_process(struct svc_rqst *rqstp)
707 /* setup response xdr_buf. 709 /* setup response xdr_buf.
708 * Initially it has just one page 710 * Initially it has just one page
709 */ 711 */
710 svc_take_page(rqstp); /* must succeed */ 712 rqstp->rq_resused = 1;
711 resv->iov_base = page_address(rqstp->rq_respages[0]); 713 resv->iov_base = page_address(rqstp->rq_respages[0]);
712 resv->iov_len = 0; 714 resv->iov_len = 0;
713 rqstp->rq_res.pages = rqstp->rq_respages+1; 715 rqstp->rq_res.pages = rqstp->rq_respages + 1;
714 rqstp->rq_res.len = 0; 716 rqstp->rq_res.len = 0;
715 rqstp->rq_res.page_base = 0; 717 rqstp->rq_res.page_base = 0;
716 rqstp->rq_res.page_len = 0; 718 rqstp->rq_res.page_len = 0;
@@ -738,7 +740,7 @@ svc_process(struct svc_rqst *rqstp)
738 goto err_bad_rpc; 740 goto err_bad_rpc;
739 741
740 /* Save position in case we later decide to reject: */ 742 /* Save position in case we later decide to reject: */
741 accept_statp = resv->iov_base + resv->iov_len; 743 reply_statp = resv->iov_base + resv->iov_len;
742 744
743 svc_putnl(resv, 0); /* ACCEPT */ 745 svc_putnl(resv, 0); /* ACCEPT */
744 746
@@ -886,7 +888,7 @@ err_bad_auth:
886 dprintk("svc: authentication failed (%d)\n", ntohl(auth_stat)); 888 dprintk("svc: authentication failed (%d)\n", ntohl(auth_stat));
887 serv->sv_stats->rpcbadauth++; 889 serv->sv_stats->rpcbadauth++;
888 /* Restore write pointer to location of accept status: */ 890 /* Restore write pointer to location of accept status: */
889 xdr_ressize_check(rqstp, accept_statp); 891 xdr_ressize_check(rqstp, reply_statp);
890 svc_putnl(resv, 1); /* REJECT */ 892 svc_putnl(resv, 1); /* REJECT */
891 svc_putnl(resv, 1); /* AUTH_ERROR */ 893 svc_putnl(resv, 1); /* AUTH_ERROR */
892 svc_putnl(resv, ntohl(auth_stat)); /* status */ 894 svc_putnl(resv, ntohl(auth_stat)); /* status */
@@ -926,3 +928,18 @@ err_bad:
926 svc_putnl(resv, ntohl(rpc_stat)); 928 svc_putnl(resv, ntohl(rpc_stat));
927 goto sendit; 929 goto sendit;
928} 930}
931
932/*
933 * Return (transport-specific) limit on the rpc payload.
934 */
935u32 svc_max_payload(const struct svc_rqst *rqstp)
936{
937 int max = RPCSVC_MAXPAYLOAD_TCP;
938
939 if (rqstp->rq_sock->sk_sock->type == SOCK_DGRAM)
940 max = RPCSVC_MAXPAYLOAD_UDP;
941 if (rqstp->rq_server->sv_bufsz < max)
942 max = rqstp->rq_server->sv_bufsz;
943 return max;
944}
945EXPORT_SYMBOL_GPL(svc_max_payload);
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index 40d41a2831d7..e1bd933629fe 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -9,6 +9,7 @@
9#include <linux/seq_file.h> 9#include <linux/seq_file.h>
10#include <linux/hash.h> 10#include <linux/hash.h>
11#include <linux/string.h> 11#include <linux/string.h>
12#include <net/sock.h>
12 13
13#define RPCDBG_FACILITY RPCDBG_AUTH 14#define RPCDBG_FACILITY RPCDBG_AUTH
14 15
@@ -375,6 +376,44 @@ void svcauth_unix_purge(void)
375 cache_purge(&ip_map_cache); 376 cache_purge(&ip_map_cache);
376} 377}
377 378
379static inline struct ip_map *
380ip_map_cached_get(struct svc_rqst *rqstp)
381{
382 struct ip_map *ipm = rqstp->rq_sock->sk_info_authunix;
383 if (ipm != NULL) {
384 if (!cache_valid(&ipm->h)) {
385 /*
386 * The entry has been invalidated since it was
387 * remembered, e.g. by a second mount from the
388 * same IP address.
389 */
390 rqstp->rq_sock->sk_info_authunix = NULL;
391 cache_put(&ipm->h, &ip_map_cache);
392 return NULL;
393 }
394 cache_get(&ipm->h);
395 }
396 return ipm;
397}
398
399static inline void
400ip_map_cached_put(struct svc_rqst *rqstp, struct ip_map *ipm)
401{
402 struct svc_sock *svsk = rqstp->rq_sock;
403
404 if (svsk->sk_sock->type == SOCK_STREAM && svsk->sk_info_authunix == NULL)
405 svsk->sk_info_authunix = ipm; /* newly cached, keep the reference */
406 else
407 cache_put(&ipm->h, &ip_map_cache);
408}
409
410void
411svcauth_unix_info_release(void *info)
412{
413 struct ip_map *ipm = info;
414 cache_put(&ipm->h, &ip_map_cache);
415}
416
378static int 417static int
379svcauth_unix_set_client(struct svc_rqst *rqstp) 418svcauth_unix_set_client(struct svc_rqst *rqstp)
380{ 419{
@@ -384,8 +423,10 @@ svcauth_unix_set_client(struct svc_rqst *rqstp)
384 if (rqstp->rq_proc == 0) 423 if (rqstp->rq_proc == 0)
385 return SVC_OK; 424 return SVC_OK;
386 425
387 ipm = ip_map_lookup(rqstp->rq_server->sv_program->pg_class, 426 ipm = ip_map_cached_get(rqstp);
388 rqstp->rq_addr.sin_addr); 427 if (ipm == NULL)
428 ipm = ip_map_lookup(rqstp->rq_server->sv_program->pg_class,
429 rqstp->rq_addr.sin_addr);
389 430
390 if (ipm == NULL) 431 if (ipm == NULL)
391 return SVC_DENIED; 432 return SVC_DENIED;
@@ -400,7 +441,7 @@ svcauth_unix_set_client(struct svc_rqst *rqstp)
400 case 0: 441 case 0:
401 rqstp->rq_client = &ipm->m_client->h; 442 rqstp->rq_client = &ipm->m_client->h;
402 kref_get(&rqstp->rq_client->ref); 443 kref_get(&rqstp->rq_client->ref);
403 cache_put(&ipm->h, &ip_map_cache); 444 ip_map_cached_put(rqstp, ipm);
404 break; 445 break;
405 } 446 }
406 return SVC_OK; 447 return SVC_OK;
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index cba85d195222..b39e7e2b648f 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -313,7 +313,7 @@ svc_sock_release(struct svc_rqst *rqstp)
313 313
314 svc_release_skb(rqstp); 314 svc_release_skb(rqstp);
315 315
316 svc_free_allpages(rqstp); 316 svc_free_res_pages(rqstp);
317 rqstp->rq_res.page_len = 0; 317 rqstp->rq_res.page_len = 0;
318 rqstp->rq_res.page_base = 0; 318 rqstp->rq_res.page_base = 0;
319 319
@@ -412,7 +412,8 @@ svc_sendto(struct svc_rqst *rqstp, struct xdr_buf *xdr)
412 /* send head */ 412 /* send head */
413 if (slen == xdr->head[0].iov_len) 413 if (slen == xdr->head[0].iov_len)
414 flags = 0; 414 flags = 0;
415 len = kernel_sendpage(sock, rqstp->rq_respages[0], 0, xdr->head[0].iov_len, flags); 415 len = kernel_sendpage(sock, rqstp->rq_respages[0], 0,
416 xdr->head[0].iov_len, flags);
416 if (len != xdr->head[0].iov_len) 417 if (len != xdr->head[0].iov_len)
417 goto out; 418 goto out;
418 slen -= xdr->head[0].iov_len; 419 slen -= xdr->head[0].iov_len;
@@ -437,8 +438,9 @@ svc_sendto(struct svc_rqst *rqstp, struct xdr_buf *xdr)
437 } 438 }
438 /* send tail */ 439 /* send tail */
439 if (xdr->tail[0].iov_len) { 440 if (xdr->tail[0].iov_len) {
440 result = kernel_sendpage(sock, rqstp->rq_respages[rqstp->rq_restailpage], 441 result = kernel_sendpage(sock, rqstp->rq_respages[0],
441 ((unsigned long)xdr->tail[0].iov_base)& (PAGE_SIZE-1), 442 ((unsigned long)xdr->tail[0].iov_base)
443 & (PAGE_SIZE-1),
442 xdr->tail[0].iov_len, 0); 444 xdr->tail[0].iov_len, 0);
443 445
444 if (result > 0) 446 if (result > 0)
@@ -492,7 +494,12 @@ svc_sock_names(char *buf, struct svc_serv *serv, char *toclose)
492 } 494 }
493 spin_unlock(&serv->sv_lock); 495 spin_unlock(&serv->sv_lock);
494 if (closesk) 496 if (closesk)
497 /* Should unregister with portmap, but you cannot
498 * unregister just one protocol...
499 */
495 svc_delete_socket(closesk); 500 svc_delete_socket(closesk);
501 else if (toclose)
502 return -ENOENT;
496 return len; 503 return len;
497} 504}
498EXPORT_SYMBOL(svc_sock_names); 505EXPORT_SYMBOL(svc_sock_names);
@@ -703,9 +710,11 @@ svc_udp_recvfrom(struct svc_rqst *rqstp)
703 if (len <= rqstp->rq_arg.head[0].iov_len) { 710 if (len <= rqstp->rq_arg.head[0].iov_len) {
704 rqstp->rq_arg.head[0].iov_len = len; 711 rqstp->rq_arg.head[0].iov_len = len;
705 rqstp->rq_arg.page_len = 0; 712 rqstp->rq_arg.page_len = 0;
713 rqstp->rq_respages = rqstp->rq_pages+1;
706 } else { 714 } else {
707 rqstp->rq_arg.page_len = len - rqstp->rq_arg.head[0].iov_len; 715 rqstp->rq_arg.page_len = len - rqstp->rq_arg.head[0].iov_len;
708 rqstp->rq_argused += (rqstp->rq_arg.page_len + PAGE_SIZE - 1)/ PAGE_SIZE; 716 rqstp->rq_respages = rqstp->rq_pages + 1 +
717 (rqstp->rq_arg.page_len + PAGE_SIZE - 1)/ PAGE_SIZE;
709 } 718 }
710 719
711 if (serv->sv_stats) 720 if (serv->sv_stats)
@@ -946,7 +955,7 @@ svc_tcp_recvfrom(struct svc_rqst *rqstp)
946 struct svc_sock *svsk = rqstp->rq_sock; 955 struct svc_sock *svsk = rqstp->rq_sock;
947 struct svc_serv *serv = svsk->sk_server; 956 struct svc_serv *serv = svsk->sk_server;
948 int len; 957 int len;
949 struct kvec vec[RPCSVC_MAXPAGES]; 958 struct kvec *vec;
950 int pnum, vlen; 959 int pnum, vlen;
951 960
952 dprintk("svc: tcp_recv %p data %d conn %d close %d\n", 961 dprintk("svc: tcp_recv %p data %d conn %d close %d\n",
@@ -1044,15 +1053,17 @@ svc_tcp_recvfrom(struct svc_rqst *rqstp)
1044 len = svsk->sk_reclen; 1053 len = svsk->sk_reclen;
1045 set_bit(SK_DATA, &svsk->sk_flags); 1054 set_bit(SK_DATA, &svsk->sk_flags);
1046 1055
1056 vec = rqstp->rq_vec;
1047 vec[0] = rqstp->rq_arg.head[0]; 1057 vec[0] = rqstp->rq_arg.head[0];
1048 vlen = PAGE_SIZE; 1058 vlen = PAGE_SIZE;
1049 pnum = 1; 1059 pnum = 1;
1050 while (vlen < len) { 1060 while (vlen < len) {
1051 vec[pnum].iov_base = page_address(rqstp->rq_argpages[rqstp->rq_argused++]); 1061 vec[pnum].iov_base = page_address(rqstp->rq_pages[pnum]);
1052 vec[pnum].iov_len = PAGE_SIZE; 1062 vec[pnum].iov_len = PAGE_SIZE;
1053 pnum++; 1063 pnum++;
1054 vlen += PAGE_SIZE; 1064 vlen += PAGE_SIZE;
1055 } 1065 }
1066 rqstp->rq_respages = &rqstp->rq_pages[pnum];
1056 1067
1057 /* Now receive data */ 1068 /* Now receive data */
1058 len = svc_recvfrom(rqstp, vec, pnum, len); 1069 len = svc_recvfrom(rqstp, vec, pnum, len);
@@ -1204,7 +1215,7 @@ svc_recv(struct svc_rqst *rqstp, long timeout)
1204 struct svc_sock *svsk =NULL; 1215 struct svc_sock *svsk =NULL;
1205 struct svc_serv *serv = rqstp->rq_server; 1216 struct svc_serv *serv = rqstp->rq_server;
1206 struct svc_pool *pool = rqstp->rq_pool; 1217 struct svc_pool *pool = rqstp->rq_pool;
1207 int len; 1218 int len, i;
1208 int pages; 1219 int pages;
1209 struct xdr_buf *arg; 1220 struct xdr_buf *arg;
1210 DECLARE_WAITQUEUE(wait, current); 1221 DECLARE_WAITQUEUE(wait, current);
@@ -1221,27 +1232,22 @@ svc_recv(struct svc_rqst *rqstp, long timeout)
1221 "svc_recv: service %p, wait queue active!\n", 1232 "svc_recv: service %p, wait queue active!\n",
1222 rqstp); 1233 rqstp);
1223 1234
1224 /* Initialize the buffers */
1225 /* first reclaim pages that were moved to response list */
1226 svc_pushback_allpages(rqstp);
1227 1235
1228 /* now allocate needed pages. If we get a failure, sleep briefly */ 1236 /* now allocate needed pages. If we get a failure, sleep briefly */
1229 pages = 2 + (serv->sv_bufsz + PAGE_SIZE -1) / PAGE_SIZE; 1237 pages = 2 + (serv->sv_bufsz + PAGE_SIZE -1) / PAGE_SIZE;
1230 while (rqstp->rq_arghi < pages) { 1238 for (i=0; i < pages ; i++)
1231 struct page *p = alloc_page(GFP_KERNEL); 1239 while (rqstp->rq_pages[i] == NULL) {
1232 if (!p) { 1240 struct page *p = alloc_page(GFP_KERNEL);
1233 schedule_timeout_uninterruptible(msecs_to_jiffies(500)); 1241 if (!p)
1234 continue; 1242 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
1243 rqstp->rq_pages[i] = p;
1235 } 1244 }
1236 rqstp->rq_argpages[rqstp->rq_arghi++] = p;
1237 }
1238 1245
1239 /* Make arg->head point to first page and arg->pages point to rest */ 1246 /* Make arg->head point to first page and arg->pages point to rest */
1240 arg = &rqstp->rq_arg; 1247 arg = &rqstp->rq_arg;
1241 arg->head[0].iov_base = page_address(rqstp->rq_argpages[0]); 1248 arg->head[0].iov_base = page_address(rqstp->rq_pages[0]);
1242 arg->head[0].iov_len = PAGE_SIZE; 1249 arg->head[0].iov_len = PAGE_SIZE;
1243 rqstp->rq_argused = 1; 1250 arg->pages = rqstp->rq_pages + 1;
1244 arg->pages = rqstp->rq_argpages + 1;
1245 arg->page_base = 0; 1251 arg->page_base = 0;
1246 /* save at least one page for response */ 1252 /* save at least one page for response */
1247 arg->page_len = (pages-2)*PAGE_SIZE; 1253 arg->page_len = (pages-2)*PAGE_SIZE;
@@ -1604,6 +1610,8 @@ svc_delete_socket(struct svc_sock *svsk)
1604 sockfd_put(svsk->sk_sock); 1610 sockfd_put(svsk->sk_sock);
1605 else 1611 else
1606 sock_release(svsk->sk_sock); 1612 sock_release(svsk->sk_sock);
1613 if (svsk->sk_info_authunix != NULL)
1614 svcauth_unix_info_release(svsk->sk_info_authunix);
1607 kfree(svsk); 1615 kfree(svsk);
1608 } else { 1616 } else {
1609 spin_unlock_bh(&serv->sv_lock); 1617 spin_unlock_bh(&serv->sv_lock);
@@ -1699,6 +1707,7 @@ static int svc_deferred_recv(struct svc_rqst *rqstp)
1699 rqstp->rq_prot = dr->prot; 1707 rqstp->rq_prot = dr->prot;
1700 rqstp->rq_addr = dr->addr; 1708 rqstp->rq_addr = dr->addr;
1701 rqstp->rq_daddr = dr->daddr; 1709 rqstp->rq_daddr = dr->daddr;
1710 rqstp->rq_respages = rqstp->rq_pages;
1702 return dr->argslen<<2; 1711 return dr->argslen<<2;
1703} 1712}
1704 1713