aboutsummaryrefslogtreecommitdiffstats
path: root/net/sunrpc
diff options
context:
space:
mode:
Diffstat (limited to 'net/sunrpc')
-rw-r--r--net/sunrpc/svc.c17
-rw-r--r--net/sunrpc/svcsock.c28
2 files changed, 24 insertions, 21 deletions
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index c2c8bb20d07f..2807fa0eab40 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -282,7 +282,10 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools,
282 serv->sv_program = prog; 282 serv->sv_program = prog;
283 serv->sv_nrthreads = 1; 283 serv->sv_nrthreads = 1;
284 serv->sv_stats = prog->pg_stats; 284 serv->sv_stats = prog->pg_stats;
285 serv->sv_bufsz = bufsize? bufsize : 4096; 285 if (bufsize > RPCSVC_MAXPAYLOAD)
286 bufsize = RPCSVC_MAXPAYLOAD;
287 serv->sv_max_payload = bufsize? bufsize : 4096;
288 serv->sv_max_mesg = roundup(serv->sv_max_payload + PAGE_SIZE, PAGE_SIZE);
286 serv->sv_shutdown = shutdown; 289 serv->sv_shutdown = shutdown;
287 xdrsize = 0; 290 xdrsize = 0;
288 while (prog) { 291 while (prog) {
@@ -414,9 +417,9 @@ svc_init_buffer(struct svc_rqst *rqstp, unsigned int size)
414 int pages; 417 int pages;
415 int arghi; 418 int arghi;
416 419
417 if (size > RPCSVC_MAXPAYLOAD) 420 pages = size / PAGE_SIZE + 1; /* extra page as we hold both request and reply.
418 size = RPCSVC_MAXPAYLOAD; 421 * We assume one is at most one page
419 pages = 2 + (size+ PAGE_SIZE -1) / PAGE_SIZE; 422 */
420 arghi = 0; 423 arghi = 0;
421 BUG_ON(pages > RPCSVC_MAXPAGES); 424 BUG_ON(pages > RPCSVC_MAXPAGES);
422 while (pages) { 425 while (pages) {
@@ -463,7 +466,7 @@ __svc_create_thread(svc_thread_fn func, struct svc_serv *serv,
463 466
464 if (!(rqstp->rq_argp = kmalloc(serv->sv_xdrsize, GFP_KERNEL)) 467 if (!(rqstp->rq_argp = kmalloc(serv->sv_xdrsize, GFP_KERNEL))
465 || !(rqstp->rq_resp = kmalloc(serv->sv_xdrsize, GFP_KERNEL)) 468 || !(rqstp->rq_resp = kmalloc(serv->sv_xdrsize, GFP_KERNEL))
466 || !svc_init_buffer(rqstp, serv->sv_bufsz)) 469 || !svc_init_buffer(rqstp, serv->sv_max_mesg))
467 goto out_thread; 470 goto out_thread;
468 471
469 serv->sv_nrthreads++; 472 serv->sv_nrthreads++;
@@ -938,8 +941,8 @@ u32 svc_max_payload(const struct svc_rqst *rqstp)
938 941
939 if (rqstp->rq_sock->sk_sock->type == SOCK_DGRAM) 942 if (rqstp->rq_sock->sk_sock->type == SOCK_DGRAM)
940 max = RPCSVC_MAXPAYLOAD_UDP; 943 max = RPCSVC_MAXPAYLOAD_UDP;
941 if (rqstp->rq_server->sv_bufsz < max) 944 if (rqstp->rq_server->sv_max_payload < max)
942 max = rqstp->rq_server->sv_bufsz; 945 max = rqstp->rq_server->sv_max_payload;
943 return max; 946 return max;
944} 947}
945EXPORT_SYMBOL_GPL(svc_max_payload); 948EXPORT_SYMBOL_GPL(svc_max_payload);
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index b39e7e2b648f..61e307cca13d 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -192,13 +192,13 @@ svc_sock_enqueue(struct svc_sock *svsk)
192 svsk->sk_pool = pool; 192 svsk->sk_pool = pool;
193 193
194 set_bit(SOCK_NOSPACE, &svsk->sk_sock->flags); 194 set_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
195 if (((atomic_read(&svsk->sk_reserved) + serv->sv_bufsz)*2 195 if (((atomic_read(&svsk->sk_reserved) + serv->sv_max_mesg)*2
196 > svc_sock_wspace(svsk)) 196 > svc_sock_wspace(svsk))
197 && !test_bit(SK_CLOSE, &svsk->sk_flags) 197 && !test_bit(SK_CLOSE, &svsk->sk_flags)
198 && !test_bit(SK_CONN, &svsk->sk_flags)) { 198 && !test_bit(SK_CONN, &svsk->sk_flags)) {
199 /* Don't enqueue while not enough space for reply */ 199 /* Don't enqueue while not enough space for reply */
200 dprintk("svc: socket %p no space, %d*2 > %ld, not enqueued\n", 200 dprintk("svc: socket %p no space, %d*2 > %ld, not enqueued\n",
201 svsk->sk_sk, atomic_read(&svsk->sk_reserved)+serv->sv_bufsz, 201 svsk->sk_sk, atomic_read(&svsk->sk_reserved)+serv->sv_max_mesg,
202 svc_sock_wspace(svsk)); 202 svc_sock_wspace(svsk));
203 svsk->sk_pool = NULL; 203 svsk->sk_pool = NULL;
204 clear_bit(SK_BUSY, &svsk->sk_flags); 204 clear_bit(SK_BUSY, &svsk->sk_flags);
@@ -220,7 +220,7 @@ svc_sock_enqueue(struct svc_sock *svsk)
220 rqstp, rqstp->rq_sock); 220 rqstp, rqstp->rq_sock);
221 rqstp->rq_sock = svsk; 221 rqstp->rq_sock = svsk;
222 atomic_inc(&svsk->sk_inuse); 222 atomic_inc(&svsk->sk_inuse);
223 rqstp->rq_reserved = serv->sv_bufsz; 223 rqstp->rq_reserved = serv->sv_max_mesg;
224 atomic_add(rqstp->rq_reserved, &svsk->sk_reserved); 224 atomic_add(rqstp->rq_reserved, &svsk->sk_reserved);
225 BUG_ON(svsk->sk_pool != pool); 225 BUG_ON(svsk->sk_pool != pool);
226 wake_up(&rqstp->rq_wait); 226 wake_up(&rqstp->rq_wait);
@@ -639,8 +639,8 @@ svc_udp_recvfrom(struct svc_rqst *rqstp)
639 * which will access the socket. 639 * which will access the socket.
640 */ 640 */
641 svc_sock_setbufsize(svsk->sk_sock, 641 svc_sock_setbufsize(svsk->sk_sock,
642 (serv->sv_nrthreads+3) * serv->sv_bufsz, 642 (serv->sv_nrthreads+3) * serv->sv_max_mesg,
643 (serv->sv_nrthreads+3) * serv->sv_bufsz); 643 (serv->sv_nrthreads+3) * serv->sv_max_mesg);
644 644
645 if ((rqstp->rq_deferred = svc_deferred_dequeue(svsk))) { 645 if ((rqstp->rq_deferred = svc_deferred_dequeue(svsk))) {
646 svc_sock_received(svsk); 646 svc_sock_received(svsk);
@@ -749,8 +749,8 @@ svc_udp_init(struct svc_sock *svsk)
749 * svc_udp_recvfrom will re-adjust if necessary 749 * svc_udp_recvfrom will re-adjust if necessary
750 */ 750 */
751 svc_sock_setbufsize(svsk->sk_sock, 751 svc_sock_setbufsize(svsk->sk_sock,
752 3 * svsk->sk_server->sv_bufsz, 752 3 * svsk->sk_server->sv_max_mesg,
753 3 * svsk->sk_server->sv_bufsz); 753 3 * svsk->sk_server->sv_max_mesg);
754 754
755 set_bit(SK_DATA, &svsk->sk_flags); /* might have come in before data_ready set up */ 755 set_bit(SK_DATA, &svsk->sk_flags); /* might have come in before data_ready set up */
756 set_bit(SK_CHNGBUF, &svsk->sk_flags); 756 set_bit(SK_CHNGBUF, &svsk->sk_flags);
@@ -993,8 +993,8 @@ svc_tcp_recvfrom(struct svc_rqst *rqstp)
993 * as soon a a complete request arrives. 993 * as soon a a complete request arrives.
994 */ 994 */
995 svc_sock_setbufsize(svsk->sk_sock, 995 svc_sock_setbufsize(svsk->sk_sock,
996 (serv->sv_nrthreads+3) * serv->sv_bufsz, 996 (serv->sv_nrthreads+3) * serv->sv_max_mesg,
997 3 * serv->sv_bufsz); 997 3 * serv->sv_max_mesg);
998 998
999 clear_bit(SK_DATA, &svsk->sk_flags); 999 clear_bit(SK_DATA, &svsk->sk_flags);
1000 1000
@@ -1032,7 +1032,7 @@ svc_tcp_recvfrom(struct svc_rqst *rqstp)
1032 } 1032 }
1033 svsk->sk_reclen &= 0x7fffffff; 1033 svsk->sk_reclen &= 0x7fffffff;
1034 dprintk("svc: TCP record, %d bytes\n", svsk->sk_reclen); 1034 dprintk("svc: TCP record, %d bytes\n", svsk->sk_reclen);
1035 if (svsk->sk_reclen > serv->sv_bufsz) { 1035 if (svsk->sk_reclen > serv->sv_max_mesg) {
1036 printk(KERN_NOTICE "RPC: bad TCP reclen 0x%08lx (large)\n", 1036 printk(KERN_NOTICE "RPC: bad TCP reclen 0x%08lx (large)\n",
1037 (unsigned long) svsk->sk_reclen); 1037 (unsigned long) svsk->sk_reclen);
1038 goto err_delete; 1038 goto err_delete;
@@ -1171,8 +1171,8 @@ svc_tcp_init(struct svc_sock *svsk)
1171 * svc_tcp_recvfrom will re-adjust if necessary 1171 * svc_tcp_recvfrom will re-adjust if necessary
1172 */ 1172 */
1173 svc_sock_setbufsize(svsk->sk_sock, 1173 svc_sock_setbufsize(svsk->sk_sock,
1174 3 * svsk->sk_server->sv_bufsz, 1174 3 * svsk->sk_server->sv_max_mesg,
1175 3 * svsk->sk_server->sv_bufsz); 1175 3 * svsk->sk_server->sv_max_mesg);
1176 1176
1177 set_bit(SK_CHNGBUF, &svsk->sk_flags); 1177 set_bit(SK_CHNGBUF, &svsk->sk_flags);
1178 set_bit(SK_DATA, &svsk->sk_flags); 1178 set_bit(SK_DATA, &svsk->sk_flags);
@@ -1234,7 +1234,7 @@ svc_recv(struct svc_rqst *rqstp, long timeout)
1234 1234
1235 1235
1236 /* now allocate needed pages. If we get a failure, sleep briefly */ 1236 /* now allocate needed pages. If we get a failure, sleep briefly */
1237 pages = 2 + (serv->sv_bufsz + PAGE_SIZE -1) / PAGE_SIZE; 1237 pages = (serv->sv_max_mesg + PAGE_SIZE) / PAGE_SIZE;
1238 for (i=0; i < pages ; i++) 1238 for (i=0; i < pages ; i++)
1239 while (rqstp->rq_pages[i] == NULL) { 1239 while (rqstp->rq_pages[i] == NULL) {
1240 struct page *p = alloc_page(GFP_KERNEL); 1240 struct page *p = alloc_page(GFP_KERNEL);
@@ -1263,7 +1263,7 @@ svc_recv(struct svc_rqst *rqstp, long timeout)
1263 if ((svsk = svc_sock_dequeue(pool)) != NULL) { 1263 if ((svsk = svc_sock_dequeue(pool)) != NULL) {
1264 rqstp->rq_sock = svsk; 1264 rqstp->rq_sock = svsk;
1265 atomic_inc(&svsk->sk_inuse); 1265 atomic_inc(&svsk->sk_inuse);
1266 rqstp->rq_reserved = serv->sv_bufsz; 1266 rqstp->rq_reserved = serv->sv_max_mesg;
1267 atomic_add(rqstp->rq_reserved, &svsk->sk_reserved); 1267 atomic_add(rqstp->rq_reserved, &svsk->sk_reserved);
1268 } else { 1268 } else {
1269 /* No data pending. Go to sleep */ 1269 /* No data pending. Go to sleep */