aboutsummaryrefslogtreecommitdiffstats
path: root/net/sunrpc/svcsock.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/sunrpc/svcsock.c')
-rw-r--r--net/sunrpc/svcsock.c41
1 files changed, 21 insertions, 20 deletions
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 4de8626e4f54..25096d53667a 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -313,7 +313,7 @@ svc_sock_release(struct svc_rqst *rqstp)
313 313
314 svc_release_skb(rqstp); 314 svc_release_skb(rqstp);
315 315
316 svc_free_allpages(rqstp); 316 svc_free_res_pages(rqstp);
317 rqstp->rq_res.page_len = 0; 317 rqstp->rq_res.page_len = 0;
318 rqstp->rq_res.page_base = 0; 318 rqstp->rq_res.page_base = 0;
319 319
@@ -412,7 +412,8 @@ svc_sendto(struct svc_rqst *rqstp, struct xdr_buf *xdr)
412 /* send head */ 412 /* send head */
413 if (slen == xdr->head[0].iov_len) 413 if (slen == xdr->head[0].iov_len)
414 flags = 0; 414 flags = 0;
415 len = kernel_sendpage(sock, rqstp->rq_respages[0], 0, xdr->head[0].iov_len, flags); 415 len = kernel_sendpage(sock, rqstp->rq_respages[0], 0,
416 xdr->head[0].iov_len, flags);
416 if (len != xdr->head[0].iov_len) 417 if (len != xdr->head[0].iov_len)
417 goto out; 418 goto out;
418 slen -= xdr->head[0].iov_len; 419 slen -= xdr->head[0].iov_len;
@@ -437,8 +438,9 @@ svc_sendto(struct svc_rqst *rqstp, struct xdr_buf *xdr)
437 } 438 }
438 /* send tail */ 439 /* send tail */
439 if (xdr->tail[0].iov_len) { 440 if (xdr->tail[0].iov_len) {
440 result = kernel_sendpage(sock, rqstp->rq_respages[rqstp->rq_restailpage], 441 result = kernel_sendpage(sock, rqstp->rq_respages[0],
441 ((unsigned long)xdr->tail[0].iov_base)& (PAGE_SIZE-1), 442 ((unsigned long)xdr->tail[0].iov_base)
443 & (PAGE_SIZE-1),
442 xdr->tail[0].iov_len, 0); 444 xdr->tail[0].iov_len, 0);
443 445
444 if (result > 0) 446 if (result > 0)
@@ -708,9 +710,11 @@ svc_udp_recvfrom(struct svc_rqst *rqstp)
708 if (len <= rqstp->rq_arg.head[0].iov_len) { 710 if (len <= rqstp->rq_arg.head[0].iov_len) {
709 rqstp->rq_arg.head[0].iov_len = len; 711 rqstp->rq_arg.head[0].iov_len = len;
710 rqstp->rq_arg.page_len = 0; 712 rqstp->rq_arg.page_len = 0;
713 rqstp->rq_respages = rqstp->rq_pages+1;
711 } else { 714 } else {
712 rqstp->rq_arg.page_len = len - rqstp->rq_arg.head[0].iov_len; 715 rqstp->rq_arg.page_len = len - rqstp->rq_arg.head[0].iov_len;
713 rqstp->rq_argused += (rqstp->rq_arg.page_len + PAGE_SIZE - 1)/ PAGE_SIZE; 716 rqstp->rq_respages = rqstp->rq_pages + 1 +
717 (rqstp->rq_arg.page_len + PAGE_SIZE - 1)/ PAGE_SIZE;
714 } 718 }
715 719
716 if (serv->sv_stats) 720 if (serv->sv_stats)
@@ -1053,11 +1057,12 @@ svc_tcp_recvfrom(struct svc_rqst *rqstp)
1053 vlen = PAGE_SIZE; 1057 vlen = PAGE_SIZE;
1054 pnum = 1; 1058 pnum = 1;
1055 while (vlen < len) { 1059 while (vlen < len) {
1056 vec[pnum].iov_base = page_address(rqstp->rq_argpages[rqstp->rq_argused++]); 1060 vec[pnum].iov_base = page_address(rqstp->rq_pages[pnum]);
1057 vec[pnum].iov_len = PAGE_SIZE; 1061 vec[pnum].iov_len = PAGE_SIZE;
1058 pnum++; 1062 pnum++;
1059 vlen += PAGE_SIZE; 1063 vlen += PAGE_SIZE;
1060 } 1064 }
1065 rqstp->rq_respages = &rqstp->rq_pages[pnum];
1061 1066
1062 /* Now receive data */ 1067 /* Now receive data */
1063 len = svc_recvfrom(rqstp, vec, pnum, len); 1068 len = svc_recvfrom(rqstp, vec, pnum, len);
@@ -1209,7 +1214,7 @@ svc_recv(struct svc_rqst *rqstp, long timeout)
1209 struct svc_sock *svsk =NULL; 1214 struct svc_sock *svsk =NULL;
1210 struct svc_serv *serv = rqstp->rq_server; 1215 struct svc_serv *serv = rqstp->rq_server;
1211 struct svc_pool *pool = rqstp->rq_pool; 1216 struct svc_pool *pool = rqstp->rq_pool;
1212 int len; 1217 int len, i;
1213 int pages; 1218 int pages;
1214 struct xdr_buf *arg; 1219 struct xdr_buf *arg;
1215 DECLARE_WAITQUEUE(wait, current); 1220 DECLARE_WAITQUEUE(wait, current);
@@ -1226,27 +1231,22 @@ svc_recv(struct svc_rqst *rqstp, long timeout)
1226 "svc_recv: service %p, wait queue active!\n", 1231 "svc_recv: service %p, wait queue active!\n",
1227 rqstp); 1232 rqstp);
1228 1233
1229 /* Initialize the buffers */
1230 /* first reclaim pages that were moved to response list */
1231 svc_pushback_allpages(rqstp);
1232 1234
1233 /* now allocate needed pages. If we get a failure, sleep briefly */ 1235 /* now allocate needed pages. If we get a failure, sleep briefly */
1234 pages = 2 + (serv->sv_bufsz + PAGE_SIZE -1) / PAGE_SIZE; 1236 pages = 2 + (serv->sv_bufsz + PAGE_SIZE -1) / PAGE_SIZE;
1235 while (rqstp->rq_arghi < pages) { 1237 for (i=0; i < pages ; i++)
1236 struct page *p = alloc_page(GFP_KERNEL); 1238 while (rqstp->rq_pages[i] == NULL) {
1237 if (!p) { 1239 struct page *p = alloc_page(GFP_KERNEL);
1238 schedule_timeout_uninterruptible(msecs_to_jiffies(500)); 1240 if (!p)
1239 continue; 1241 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
1242 rqstp->rq_pages[i] = p;
1240 } 1243 }
1241 rqstp->rq_argpages[rqstp->rq_arghi++] = p;
1242 }
1243 1244
1244 /* Make arg->head point to first page and arg->pages point to rest */ 1245 /* Make arg->head point to first page and arg->pages point to rest */
1245 arg = &rqstp->rq_arg; 1246 arg = &rqstp->rq_arg;
1246 arg->head[0].iov_base = page_address(rqstp->rq_argpages[0]); 1247 arg->head[0].iov_base = page_address(rqstp->rq_pages[0]);
1247 arg->head[0].iov_len = PAGE_SIZE; 1248 arg->head[0].iov_len = PAGE_SIZE;
1248 rqstp->rq_argused = 1; 1249 arg->pages = rqstp->rq_pages + 1;
1249 arg->pages = rqstp->rq_argpages + 1;
1250 arg->page_base = 0; 1250 arg->page_base = 0;
1251 /* save at least one page for response */ 1251 /* save at least one page for response */
1252 arg->page_len = (pages-2)*PAGE_SIZE; 1252 arg->page_len = (pages-2)*PAGE_SIZE;
@@ -1704,6 +1704,7 @@ static int svc_deferred_recv(struct svc_rqst *rqstp)
1704 rqstp->rq_prot = dr->prot; 1704 rqstp->rq_prot = dr->prot;
1705 rqstp->rq_addr = dr->addr; 1705 rqstp->rq_addr = dr->addr;
1706 rqstp->rq_daddr = dr->daddr; 1706 rqstp->rq_daddr = dr->daddr;
1707 rqstp->rq_respages = rqstp->rq_pages;
1707 return dr->argslen<<2; 1708 return dr->argslen<<2;
1708} 1709}
1709 1710