aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorChuck Lever <chuck.lever@oracle.com>2007-03-29 16:47:58 -0400
committerTrond Myklebust <Trond.Myklebust@netapp.com>2007-05-01 01:17:11 -0400
commitc5a4dd8b7c15927a8fbff83171b57cad675a79b9 (patch)
tree2d3b1930449b31f69dc70a6e1d4e0f0532f3f118 /net
parent2bea90d43a050bbc4021d44e59beb34f384438db (diff)
SUNRPC: Eliminate side effects from rpc_malloc
Currently rpc_malloc sets req->rq_buffer internally. Make this a more generic interface: return a pointer to the new buffer (or NULL) and make the caller set req->rq_buffer and req->rq_bufsize. This looks much more like kmalloc and eliminates the side effects. To fix a potential deadlock, this patch also replaces GFP_NOFS with GFP_NOWAIT in rpc_malloc. This prevents async RPCs from sleeping outside the RPC's task scheduler while allocating their buffer. Signed-off-by: Chuck Lever <chuck.lever@oracle.com> Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'net')
-rw-r--r--net/sunrpc/clnt.c3
-rw-r--r--net/sunrpc/sched.c65
-rw-r--r--net/sunrpc/xprt.c2
3 files changed, 37 insertions, 33 deletions
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 12487aafaab5..e7dc09ecc470 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -774,7 +774,8 @@ call_allocate(struct rpc_task *task)
774 req->rq_rcvsize = RPC_REPHDRSIZE + slack + proc->p_replen; 774 req->rq_rcvsize = RPC_REPHDRSIZE + slack + proc->p_replen;
775 req->rq_rcvsize <<= 2; 775 req->rq_rcvsize <<= 2;
776 776
777 xprt->ops->buf_alloc(task, req->rq_callsize + req->rq_rcvsize); 777 req->rq_buffer = xprt->ops->buf_alloc(task,
778 req->rq_callsize + req->rq_rcvsize);
778 if (req->rq_buffer != NULL) 779 if (req->rq_buffer != NULL)
779 return; 780 return;
780 781
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 6d87320074b1..4a53e94f8134 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -741,50 +741,53 @@ static void rpc_async_schedule(struct work_struct *work)
741 * @task: RPC task that will use this buffer 741 * @task: RPC task that will use this buffer
742 * @size: requested byte size 742 * @size: requested byte size
743 * 743 *
744 * We try to ensure that some NFS reads and writes can always proceed 744 * To prevent rpciod from hanging, this allocator never sleeps,
745 * by using a mempool when allocating 'small' buffers. 745 * returning NULL if the request cannot be serviced immediately.
746 * The caller can arrange to sleep in a way that is safe for rpciod.
747 *
748 * Most requests are 'small' (under 2KiB) and can be serviced from a
749 * mempool, ensuring that NFS reads and writes can always proceed,
750 * and that there is good locality of reference for these buffers.
751 *
746 * In order to avoid memory starvation triggering more writebacks of 752 * In order to avoid memory starvation triggering more writebacks of
747 * NFS requests, we use GFP_NOFS rather than GFP_KERNEL. 753 * NFS requests, we avoid using GFP_KERNEL.
748 */ 754 */
749void * rpc_malloc(struct rpc_task *task, size_t size) 755void *rpc_malloc(struct rpc_task *task, size_t size)
750{ 756{
751 struct rpc_rqst *req = task->tk_rqstp; 757 size_t *buf;
752 gfp_t gfp; 758 gfp_t gfp = RPC_IS_SWAPPER(task) ? GFP_ATOMIC : GFP_NOWAIT;
753 759
754 if (task->tk_flags & RPC_TASK_SWAPPER) 760 size += sizeof(size_t);
755 gfp = GFP_ATOMIC; 761 if (size <= RPC_BUFFER_MAXSIZE)
762 buf = mempool_alloc(rpc_buffer_mempool, gfp);
756 else 763 else
757 gfp = GFP_NOFS; 764 buf = kmalloc(size, gfp);
758 765 *buf = size;
759 if (size > RPC_BUFFER_MAXSIZE) { 766 dprintk("RPC: %5u allocated buffer of size %u at %p\n",
760 req->rq_buffer = kmalloc(size, gfp); 767 task->tk_pid, size, buf);
761 if (req->rq_buffer) 768 return (void *) ++buf;
762 req->rq_bufsize = size;
763 } else {
764 req->rq_buffer = mempool_alloc(rpc_buffer_mempool, gfp);
765 if (req->rq_buffer)
766 req->rq_bufsize = RPC_BUFFER_MAXSIZE;
767 }
768 return req->rq_buffer;
769} 769}
770 770
771/** 771/**
772 * rpc_free - free buffer allocated via rpc_malloc 772 * rpc_free - free buffer allocated via rpc_malloc
773 * @task: RPC task with a buffer to be freed 773 * @buffer: buffer to free
774 * 774 *
775 */ 775 */
776void rpc_free(struct rpc_task *task) 776void rpc_free(void *buffer)
777{ 777{
778 struct rpc_rqst *req = task->tk_rqstp; 778 size_t size, *buf = (size_t *) buffer;
779 779
780 if (req->rq_buffer) { 780 if (!buffer)
781 if (req->rq_bufsize == RPC_BUFFER_MAXSIZE) 781 return;
782 mempool_free(req->rq_buffer, rpc_buffer_mempool); 782 size = *buf;
783 else 783 buf--;
784 kfree(req->rq_buffer); 784
785 req->rq_buffer = NULL; 785 dprintk("RPC: freeing buffer of size %u at %p\n",
786 req->rq_bufsize = 0; 786 size, buf);
787 } 787 if (size <= RPC_BUFFER_MAXSIZE)
788 mempool_free(buf, rpc_buffer_mempool);
789 else
790 kfree(buf);
788} 791}
789 792
790/* 793/*
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 432ee92cf262..81fe830da8aa 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -854,7 +854,7 @@ void xprt_release(struct rpc_task *task)
854 mod_timer(&xprt->timer, 854 mod_timer(&xprt->timer,
855 xprt->last_used + xprt->idle_timeout); 855 xprt->last_used + xprt->idle_timeout);
856 spin_unlock_bh(&xprt->transport_lock); 856 spin_unlock_bh(&xprt->transport_lock);
857 xprt->ops->buf_free(task); 857 xprt->ops->buf_free(req->rq_buffer);
858 task->tk_rqstp = NULL; 858 task->tk_rqstp = NULL;
859 if (req->rq_release_snd_buf) 859 if (req->rq_release_snd_buf)
860 req->rq_release_snd_buf(req); 860 req->rq_release_snd_buf(req);