aboutsummaryrefslogtreecommitdiffstats
path: root/net/sunrpc
diff options
context:
space:
mode:
authorNeilBrown <neilb@suse.de>2006-10-04 05:15:46 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-10-04 10:55:15 -0400
commit4452435948424e5322c2a2fefbdc2cf3732cc45d (patch)
treed2082c68d33298e85298852cafde7999ccca3364 /net/sunrpc
parent5680c44632053a6c9464bca43083f01776d318da (diff)
[PATCH] knfsd: Replace two page lists in struct svc_rqst with one
We are planning to increase RPCSVC_MAXPAGES from about 8 to about 256. This means we need to be a bit careful about arrays of size RPCSVC_MAXPAGES. struct svc_rqst contains two such arrays. However the there are never more that RPCSVC_MAXPAGES pages in the two arrays together, so only one array is needed. The two arrays are for the pages holding the request, and the pages holding the reply. Instead of two arrays, we can simply keep an index into where the first reply page is. This patch also removes a number of small inline functions that probably server to obscure what is going on rather than clarify it, and opencode the needed functionality. Also remove the 'rq_restailpage' variable as it is *always* 0. i.e. if the response 'xdr' structure has a non-empty tail it is always in the same pages as the head. check counters are initilised and incr properly check for consistant usage of ++ etc maybe extra some inlines for common approach general review Signed-off-by: Neil Brown <neilb@suse.de> Cc: Magnus Maatta <novell@kiruna.se> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'net/sunrpc')
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c4
-rw-r--r--net/sunrpc/svc.c21
-rw-r--r--net/sunrpc/svcsock.c41
3 files changed, 29 insertions, 37 deletions
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 638c0b576203..558692d7e465 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -1191,7 +1191,6 @@ svcauth_gss_wrap_resp_integ(struct svc_rqst *rqstp)
1191 resbuf->tail[0].iov_base = resbuf->head[0].iov_base 1191 resbuf->tail[0].iov_base = resbuf->head[0].iov_base
1192 + resbuf->head[0].iov_len; 1192 + resbuf->head[0].iov_len;
1193 resbuf->tail[0].iov_len = 0; 1193 resbuf->tail[0].iov_len = 0;
1194 rqstp->rq_restailpage = 0;
1195 resv = &resbuf->tail[0]; 1194 resv = &resbuf->tail[0];
1196 } else { 1195 } else {
1197 resv = &resbuf->tail[0]; 1196 resv = &resbuf->tail[0];
@@ -1240,7 +1239,7 @@ svcauth_gss_wrap_resp_priv(struct svc_rqst *rqstp)
1240 inpages = resbuf->pages; 1239 inpages = resbuf->pages;
1241 /* XXX: Would be better to write some xdr helper functions for 1240 /* XXX: Would be better to write some xdr helper functions for
1242 * nfs{2,3,4}xdr.c that place the data right, instead of copying: */ 1241 * nfs{2,3,4}xdr.c that place the data right, instead of copying: */
1243 if (resbuf->tail[0].iov_base && rqstp->rq_restailpage == 0) { 1242 if (resbuf->tail[0].iov_base) {
1244 BUG_ON(resbuf->tail[0].iov_base >= resbuf->head[0].iov_base 1243 BUG_ON(resbuf->tail[0].iov_base >= resbuf->head[0].iov_base
1245 + PAGE_SIZE); 1244 + PAGE_SIZE);
1246 BUG_ON(resbuf->tail[0].iov_base < resbuf->head[0].iov_base); 1245 BUG_ON(resbuf->tail[0].iov_base < resbuf->head[0].iov_base);
@@ -1258,7 +1257,6 @@ svcauth_gss_wrap_resp_priv(struct svc_rqst *rqstp)
1258 resbuf->tail[0].iov_base = resbuf->head[0].iov_base 1257 resbuf->tail[0].iov_base = resbuf->head[0].iov_base
1259 + resbuf->head[0].iov_len + RPC_MAX_AUTH_SIZE; 1258 + resbuf->head[0].iov_len + RPC_MAX_AUTH_SIZE;
1260 resbuf->tail[0].iov_len = 0; 1259 resbuf->tail[0].iov_len = 0;
1261 rqstp->rq_restailpage = 0;
1262 } 1260 }
1263 if (gss_wrap(gsd->rsci->mechctx, offset, resbuf, inpages)) 1261 if (gss_wrap(gsd->rsci->mechctx, offset, resbuf, inpages))
1264 return -ENOMEM; 1262 return -ENOMEM;
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index a99e67b164c1..f4a509a925b5 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -417,18 +417,15 @@ svc_init_buffer(struct svc_rqst *rqstp, unsigned int size)
417 if (size > RPCSVC_MAXPAYLOAD) 417 if (size > RPCSVC_MAXPAYLOAD)
418 size = RPCSVC_MAXPAYLOAD; 418 size = RPCSVC_MAXPAYLOAD;
419 pages = 2 + (size+ PAGE_SIZE -1) / PAGE_SIZE; 419 pages = 2 + (size+ PAGE_SIZE -1) / PAGE_SIZE;
420 rqstp->rq_argused = 0;
421 rqstp->rq_resused = 0;
422 arghi = 0; 420 arghi = 0;
423 BUG_ON(pages > RPCSVC_MAXPAGES); 421 BUG_ON(pages > RPCSVC_MAXPAGES);
424 while (pages) { 422 while (pages) {
425 struct page *p = alloc_page(GFP_KERNEL); 423 struct page *p = alloc_page(GFP_KERNEL);
426 if (!p) 424 if (!p)
427 break; 425 break;
428 rqstp->rq_argpages[arghi++] = p; 426 rqstp->rq_pages[arghi++] = p;
429 pages--; 427 pages--;
430 } 428 }
431 rqstp->rq_arghi = arghi;
432 return ! pages; 429 return ! pages;
433} 430}
434 431
@@ -438,14 +435,10 @@ svc_init_buffer(struct svc_rqst *rqstp, unsigned int size)
438static void 435static void
439svc_release_buffer(struct svc_rqst *rqstp) 436svc_release_buffer(struct svc_rqst *rqstp)
440{ 437{
441 while (rqstp->rq_arghi) 438 int i;
442 put_page(rqstp->rq_argpages[--rqstp->rq_arghi]); 439 for (i=0; i<ARRAY_SIZE(rqstp->rq_pages); i++)
443 while (rqstp->rq_resused) { 440 if (rqstp->rq_pages[i])
444 if (rqstp->rq_respages[--rqstp->rq_resused] == NULL) 441 put_page(rqstp->rq_pages[i]);
445 continue;
446 put_page(rqstp->rq_respages[rqstp->rq_resused]);
447 }
448 rqstp->rq_argused = 0;
449} 442}
450 443
451/* 444/*
@@ -707,10 +700,10 @@ svc_process(struct svc_rqst *rqstp)
707 /* setup response xdr_buf. 700 /* setup response xdr_buf.
708 * Initially it has just one page 701 * Initially it has just one page
709 */ 702 */
710 svc_take_page(rqstp); /* must succeed */ 703 rqstp->rq_resused = 1;
711 resv->iov_base = page_address(rqstp->rq_respages[0]); 704 resv->iov_base = page_address(rqstp->rq_respages[0]);
712 resv->iov_len = 0; 705 resv->iov_len = 0;
713 rqstp->rq_res.pages = rqstp->rq_respages+1; 706 rqstp->rq_res.pages = rqstp->rq_respages + 1;
714 rqstp->rq_res.len = 0; 707 rqstp->rq_res.len = 0;
715 rqstp->rq_res.page_base = 0; 708 rqstp->rq_res.page_base = 0;
716 rqstp->rq_res.page_len = 0; 709 rqstp->rq_res.page_len = 0;
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 4de8626e4f54..25096d53667a 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -313,7 +313,7 @@ svc_sock_release(struct svc_rqst *rqstp)
313 313
314 svc_release_skb(rqstp); 314 svc_release_skb(rqstp);
315 315
316 svc_free_allpages(rqstp); 316 svc_free_res_pages(rqstp);
317 rqstp->rq_res.page_len = 0; 317 rqstp->rq_res.page_len = 0;
318 rqstp->rq_res.page_base = 0; 318 rqstp->rq_res.page_base = 0;
319 319
@@ -412,7 +412,8 @@ svc_sendto(struct svc_rqst *rqstp, struct xdr_buf *xdr)
412 /* send head */ 412 /* send head */
413 if (slen == xdr->head[0].iov_len) 413 if (slen == xdr->head[0].iov_len)
414 flags = 0; 414 flags = 0;
415 len = kernel_sendpage(sock, rqstp->rq_respages[0], 0, xdr->head[0].iov_len, flags); 415 len = kernel_sendpage(sock, rqstp->rq_respages[0], 0,
416 xdr->head[0].iov_len, flags);
416 if (len != xdr->head[0].iov_len) 417 if (len != xdr->head[0].iov_len)
417 goto out; 418 goto out;
418 slen -= xdr->head[0].iov_len; 419 slen -= xdr->head[0].iov_len;
@@ -437,8 +438,9 @@ svc_sendto(struct svc_rqst *rqstp, struct xdr_buf *xdr)
437 } 438 }
438 /* send tail */ 439 /* send tail */
439 if (xdr->tail[0].iov_len) { 440 if (xdr->tail[0].iov_len) {
440 result = kernel_sendpage(sock, rqstp->rq_respages[rqstp->rq_restailpage], 441 result = kernel_sendpage(sock, rqstp->rq_respages[0],
441 ((unsigned long)xdr->tail[0].iov_base)& (PAGE_SIZE-1), 442 ((unsigned long)xdr->tail[0].iov_base)
443 & (PAGE_SIZE-1),
442 xdr->tail[0].iov_len, 0); 444 xdr->tail[0].iov_len, 0);
443 445
444 if (result > 0) 446 if (result > 0)
@@ -708,9 +710,11 @@ svc_udp_recvfrom(struct svc_rqst *rqstp)
708 if (len <= rqstp->rq_arg.head[0].iov_len) { 710 if (len <= rqstp->rq_arg.head[0].iov_len) {
709 rqstp->rq_arg.head[0].iov_len = len; 711 rqstp->rq_arg.head[0].iov_len = len;
710 rqstp->rq_arg.page_len = 0; 712 rqstp->rq_arg.page_len = 0;
713 rqstp->rq_respages = rqstp->rq_pages+1;
711 } else { 714 } else {
712 rqstp->rq_arg.page_len = len - rqstp->rq_arg.head[0].iov_len; 715 rqstp->rq_arg.page_len = len - rqstp->rq_arg.head[0].iov_len;
713 rqstp->rq_argused += (rqstp->rq_arg.page_len + PAGE_SIZE - 1)/ PAGE_SIZE; 716 rqstp->rq_respages = rqstp->rq_pages + 1 +
717 (rqstp->rq_arg.page_len + PAGE_SIZE - 1)/ PAGE_SIZE;
714 } 718 }
715 719
716 if (serv->sv_stats) 720 if (serv->sv_stats)
@@ -1053,11 +1057,12 @@ svc_tcp_recvfrom(struct svc_rqst *rqstp)
1053 vlen = PAGE_SIZE; 1057 vlen = PAGE_SIZE;
1054 pnum = 1; 1058 pnum = 1;
1055 while (vlen < len) { 1059 while (vlen < len) {
1056 vec[pnum].iov_base = page_address(rqstp->rq_argpages[rqstp->rq_argused++]); 1060 vec[pnum].iov_base = page_address(rqstp->rq_pages[pnum]);
1057 vec[pnum].iov_len = PAGE_SIZE; 1061 vec[pnum].iov_len = PAGE_SIZE;
1058 pnum++; 1062 pnum++;
1059 vlen += PAGE_SIZE; 1063 vlen += PAGE_SIZE;
1060 } 1064 }
1065 rqstp->rq_respages = &rqstp->rq_pages[pnum];
1061 1066
1062 /* Now receive data */ 1067 /* Now receive data */
1063 len = svc_recvfrom(rqstp, vec, pnum, len); 1068 len = svc_recvfrom(rqstp, vec, pnum, len);
@@ -1209,7 +1214,7 @@ svc_recv(struct svc_rqst *rqstp, long timeout)
1209 struct svc_sock *svsk =NULL; 1214 struct svc_sock *svsk =NULL;
1210 struct svc_serv *serv = rqstp->rq_server; 1215 struct svc_serv *serv = rqstp->rq_server;
1211 struct svc_pool *pool = rqstp->rq_pool; 1216 struct svc_pool *pool = rqstp->rq_pool;
1212 int len; 1217 int len, i;
1213 int pages; 1218 int pages;
1214 struct xdr_buf *arg; 1219 struct xdr_buf *arg;
1215 DECLARE_WAITQUEUE(wait, current); 1220 DECLARE_WAITQUEUE(wait, current);
@@ -1226,27 +1231,22 @@ svc_recv(struct svc_rqst *rqstp, long timeout)
1226 "svc_recv: service %p, wait queue active!\n", 1231 "svc_recv: service %p, wait queue active!\n",
1227 rqstp); 1232 rqstp);
1228 1233
1229 /* Initialize the buffers */
1230 /* first reclaim pages that were moved to response list */
1231 svc_pushback_allpages(rqstp);
1232 1234
1233 /* now allocate needed pages. If we get a failure, sleep briefly */ 1235 /* now allocate needed pages. If we get a failure, sleep briefly */
1234 pages = 2 + (serv->sv_bufsz + PAGE_SIZE -1) / PAGE_SIZE; 1236 pages = 2 + (serv->sv_bufsz + PAGE_SIZE -1) / PAGE_SIZE;
1235 while (rqstp->rq_arghi < pages) { 1237 for (i=0; i < pages ; i++)
1236 struct page *p = alloc_page(GFP_KERNEL); 1238 while (rqstp->rq_pages[i] == NULL) {
1237 if (!p) { 1239 struct page *p = alloc_page(GFP_KERNEL);
1238 schedule_timeout_uninterruptible(msecs_to_jiffies(500)); 1240 if (!p)
1239 continue; 1241 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
1242 rqstp->rq_pages[i] = p;
1240 } 1243 }
1241 rqstp->rq_argpages[rqstp->rq_arghi++] = p;
1242 }
1243 1244
1244 /* Make arg->head point to first page and arg->pages point to rest */ 1245 /* Make arg->head point to first page and arg->pages point to rest */
1245 arg = &rqstp->rq_arg; 1246 arg = &rqstp->rq_arg;
1246 arg->head[0].iov_base = page_address(rqstp->rq_argpages[0]); 1247 arg->head[0].iov_base = page_address(rqstp->rq_pages[0]);
1247 arg->head[0].iov_len = PAGE_SIZE; 1248 arg->head[0].iov_len = PAGE_SIZE;
1248 rqstp->rq_argused = 1; 1249 arg->pages = rqstp->rq_pages + 1;
1249 arg->pages = rqstp->rq_argpages + 1;
1250 arg->page_base = 0; 1250 arg->page_base = 0;
1251 /* save at least one page for response */ 1251 /* save at least one page for response */
1252 arg->page_len = (pages-2)*PAGE_SIZE; 1252 arg->page_len = (pages-2)*PAGE_SIZE;
@@ -1704,6 +1704,7 @@ static int svc_deferred_recv(struct svc_rqst *rqstp)
1704 rqstp->rq_prot = dr->prot; 1704 rqstp->rq_prot = dr->prot;
1705 rqstp->rq_addr = dr->addr; 1705 rqstp->rq_addr = dr->addr;
1706 rqstp->rq_daddr = dr->daddr; 1706 rqstp->rq_daddr = dr->daddr;
1707 rqstp->rq_respages = rqstp->rq_pages;
1707 return dr->argslen<<2; 1708 return dr->argslen<<2;
1708} 1709}
1709 1710