aboutsummaryrefslogtreecommitdiffstats
path: root/net/sunrpc/clnt.c
diff options
context:
space:
mode:
authorChuck Lever <cel@netapp.com>2006-01-03 03:55:49 -0500
committerTrond Myklebust <Trond.Myklebust@netapp.com>2006-01-06 14:58:55 -0500
commit02107148349f31eee7c0fb06fd7a880df73dbd20 (patch)
tree37bffd81e08b8e50394ce89a1aa7a3961f0ffbe7 /net/sunrpc/clnt.c
parent03c21733938aad0758f5f88e1cc7ede69fc3c910 (diff)
SUNRPC: switchable buffer allocation
Add RPC client transport switch support for replacing buffer management on a per-transport basis. In the current IPv4 socket transport implementation, RPC buffers are allocated as needed for each RPC message that is sent. Some transport implementations may choose to use pre-allocated buffers for encoding, sending, receiving, and unmarshalling RPC messages, however. For transports capable of direct data placement, the buffers can be carved out of a pre-registered area of memory rather than from a slab cache. Test-plan: Millions of fsx operations. Performance characterization with "sio" and "iozone". Use oprofile and other tools to look for significant regression in CPU utilization. Signed-off-by: Chuck Lever <cel@netapp.com> Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'net/sunrpc/clnt.c')
-rw-r--r--net/sunrpc/clnt.c14
1 files changed, 8 insertions, 6 deletions
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index b23c0d328c9c..25cba94c5683 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -644,24 +644,26 @@ call_reserveresult(struct rpc_task *task)
644 644
645/* 645/*
646 * 2. Allocate the buffer. For details, see sched.c:rpc_malloc. 646 * 2. Allocate the buffer. For details, see sched.c:rpc_malloc.
647 * (Note: buffer memory is freed in rpc_task_release). 647 * (Note: buffer memory is freed in xprt_release).
648 */ 648 */
649static void 649static void
650call_allocate(struct rpc_task *task) 650call_allocate(struct rpc_task *task)
651{ 651{
652 struct rpc_rqst *req = task->tk_rqstp;
653 struct rpc_xprt *xprt = task->tk_xprt;
652 unsigned int bufsiz; 654 unsigned int bufsiz;
653 655
654 dprintk("RPC: %4d call_allocate (status %d)\n", 656 dprintk("RPC: %4d call_allocate (status %d)\n",
655 task->tk_pid, task->tk_status); 657 task->tk_pid, task->tk_status);
656 task->tk_action = call_bind; 658 task->tk_action = call_bind;
657 if (task->tk_buffer) 659 if (req->rq_buffer)
658 return; 660 return;
659 661
660 /* FIXME: compute buffer requirements more exactly using 662 /* FIXME: compute buffer requirements more exactly using
661 * auth->au_wslack */ 663 * auth->au_wslack */
662 bufsiz = task->tk_msg.rpc_proc->p_bufsiz + RPC_SLACK_SPACE; 664 bufsiz = task->tk_msg.rpc_proc->p_bufsiz + RPC_SLACK_SPACE;
663 665
664 if (rpc_malloc(task, bufsiz << 1) != NULL) 666 if (xprt->ops->buf_alloc(task, bufsiz << 1) != NULL)
665 return; 667 return;
666 printk(KERN_INFO "RPC: buffer allocation failed for task %p\n", task); 668 printk(KERN_INFO "RPC: buffer allocation failed for task %p\n", task);
667 669
@@ -704,14 +706,14 @@ call_encode(struct rpc_task *task)
704 task->tk_pid, task->tk_status); 706 task->tk_pid, task->tk_status);
705 707
706 /* Default buffer setup */ 708 /* Default buffer setup */
707 bufsiz = task->tk_bufsize >> 1; 709 bufsiz = req->rq_bufsize >> 1;
708 sndbuf->head[0].iov_base = (void *)task->tk_buffer; 710 sndbuf->head[0].iov_base = (void *)req->rq_buffer;
709 sndbuf->head[0].iov_len = bufsiz; 711 sndbuf->head[0].iov_len = bufsiz;
710 sndbuf->tail[0].iov_len = 0; 712 sndbuf->tail[0].iov_len = 0;
711 sndbuf->page_len = 0; 713 sndbuf->page_len = 0;
712 sndbuf->len = 0; 714 sndbuf->len = 0;
713 sndbuf->buflen = bufsiz; 715 sndbuf->buflen = bufsiz;
714 rcvbuf->head[0].iov_base = (void *)((char *)task->tk_buffer + bufsiz); 716 rcvbuf->head[0].iov_base = (void *)((char *)req->rq_buffer + bufsiz);
715 rcvbuf->head[0].iov_len = bufsiz; 717 rcvbuf->head[0].iov_len = bufsiz;
716 rcvbuf->tail[0].iov_len = 0; 718 rcvbuf->tail[0].iov_len = 0;
717 rcvbuf->page_len = 0; 719 rcvbuf->page_len = 0;