aboutsummaryrefslogtreecommitdiffstats
path: root/net/sunrpc/sched.c
diff options
context:
space:
mode:
authorChuck Lever <cel@netapp.com>2006-01-03 03:55:49 -0500
committerTrond Myklebust <Trond.Myklebust@netapp.com>2006-01-06 14:58:55 -0500
commit02107148349f31eee7c0fb06fd7a880df73dbd20 (patch)
tree37bffd81e08b8e50394ce89a1aa7a3961f0ffbe7 /net/sunrpc/sched.c
parent03c21733938aad0758f5f88e1cc7ede69fc3c910 (diff)
SUNRPC: switchable buffer allocation
Add RPC client transport switch support for replacing buffer management on a per-transport basis. In the current IPv4 socket transport implementation, RPC buffers are allocated as needed for each RPC message that is sent. Some transport implementations may choose to use pre-allocated buffers for encoding, sending, receiving, and unmarshalling RPC messages, however. For transports capable of direct data placement, the buffers can be carved out of a pre-registered area of memory rather than from a slab cache. Test-plan: Millions of fsx operations. Performance characterization with "sio" and "iozone". Use oprofile and other tools to look for significant regression in CPU utilization. Signed-off-by: Chuck Lever <cel@netapp.com> Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'net/sunrpc/sched.c')
-rw-r--r--net/sunrpc/sched.c50
1 files changed, 27 insertions, 23 deletions
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 48510e3ffa02..7415406aa1ae 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -41,8 +41,6 @@ static mempool_t *rpc_buffer_mempool __read_mostly;
41 41
42static void __rpc_default_timer(struct rpc_task *task); 42static void __rpc_default_timer(struct rpc_task *task);
43static void rpciod_killall(void); 43static void rpciod_killall(void);
44static void rpc_free(struct rpc_task *task);
45
46static void rpc_async_schedule(void *); 44static void rpc_async_schedule(void *);
47 45
48/* 46/*
@@ -599,7 +597,6 @@ void rpc_exit_task(struct rpc_task *task)
599 WARN_ON(RPC_ASSASSINATED(task)); 597 WARN_ON(RPC_ASSASSINATED(task));
600 /* Always release the RPC slot and buffer memory */ 598 /* Always release the RPC slot and buffer memory */
601 xprt_release(task); 599 xprt_release(task);
602 rpc_free(task);
603 } 600 }
604 } 601 }
605} 602}
@@ -724,17 +721,19 @@ static void rpc_async_schedule(void *arg)
724 __rpc_execute((struct rpc_task *)arg); 721 __rpc_execute((struct rpc_task *)arg);
725} 722}
726 723
727/* 724/**
728 * Allocate memory for RPC purposes. 725 * rpc_malloc - allocate an RPC buffer
726 * @task: RPC task that will use this buffer
727 * @size: requested byte size
729 * 728 *
730 * We try to ensure that some NFS reads and writes can always proceed 729 * We try to ensure that some NFS reads and writes can always proceed
731 * by using a mempool when allocating 'small' buffers. 730 * by using a mempool when allocating 'small' buffers.
732 * In order to avoid memory starvation triggering more writebacks of 731 * In order to avoid memory starvation triggering more writebacks of
733 * NFS requests, we use GFP_NOFS rather than GFP_KERNEL. 732 * NFS requests, we use GFP_NOFS rather than GFP_KERNEL.
734 */ 733 */
735void * 734void * rpc_malloc(struct rpc_task *task, size_t size)
736rpc_malloc(struct rpc_task *task, size_t size)
737{ 735{
736 struct rpc_rqst *req = task->tk_rqstp;
738 gfp_t gfp; 737 gfp_t gfp;
739 738
740 if (task->tk_flags & RPC_TASK_SWAPPER) 739 if (task->tk_flags & RPC_TASK_SWAPPER)
@@ -743,27 +742,33 @@ rpc_malloc(struct rpc_task *task, size_t size)
743 gfp = GFP_NOFS; 742 gfp = GFP_NOFS;
744 743
745 if (size > RPC_BUFFER_MAXSIZE) { 744 if (size > RPC_BUFFER_MAXSIZE) {
746 task->tk_buffer = kmalloc(size, gfp); 745 req->rq_buffer = kmalloc(size, gfp);
747 if (task->tk_buffer) 746 if (req->rq_buffer)
748 task->tk_bufsize = size; 747 req->rq_bufsize = size;
749 } else { 748 } else {
750 task->tk_buffer = mempool_alloc(rpc_buffer_mempool, gfp); 749 req->rq_buffer = mempool_alloc(rpc_buffer_mempool, gfp);
751 if (task->tk_buffer) 750 if (req->rq_buffer)
752 task->tk_bufsize = RPC_BUFFER_MAXSIZE; 751 req->rq_bufsize = RPC_BUFFER_MAXSIZE;
753 } 752 }
754 return task->tk_buffer; 753 return req->rq_buffer;
755} 754}
756 755
757static void 756/**
758rpc_free(struct rpc_task *task) 757 * rpc_free - free buffer allocated via rpc_malloc
758 * @task: RPC task with a buffer to be freed
759 *
760 */
761void rpc_free(struct rpc_task *task)
759{ 762{
760 if (task->tk_buffer) { 763 struct rpc_rqst *req = task->tk_rqstp;
761 if (task->tk_bufsize == RPC_BUFFER_MAXSIZE) 764
762 mempool_free(task->tk_buffer, rpc_buffer_mempool); 765 if (req->rq_buffer) {
766 if (req->rq_bufsize == RPC_BUFFER_MAXSIZE)
767 mempool_free(req->rq_buffer, rpc_buffer_mempool);
763 else 768 else
764 kfree(task->tk_buffer); 769 kfree(req->rq_buffer);
765 task->tk_buffer = NULL; 770 req->rq_buffer = NULL;
766 task->tk_bufsize = 0; 771 req->rq_bufsize = 0;
767 } 772 }
768} 773}
769 774
@@ -887,7 +892,6 @@ void rpc_release_task(struct rpc_task *task)
887 xprt_release(task); 892 xprt_release(task);
888 if (task->tk_msg.rpc_cred) 893 if (task->tk_msg.rpc_cred)
889 rpcauth_unbindcred(task); 894 rpcauth_unbindcred(task);
890 rpc_free(task);
891 if (task->tk_client) { 895 if (task->tk_client) {
892 rpc_release_client(task->tk_client); 896 rpc_release_client(task->tk_client);
893 task->tk_client = NULL; 897 task->tk_client = NULL;