diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-07-27 16:23:02 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-07-27 16:23:02 -0400 |
commit | 28890d3598c352ae065b560e0fded3e79c800ba1 (patch) | |
tree | 93267c5b29b9e81185e66a6c2e70e67dc626b63f /net | |
parent | 91d41fdf31f74e6e2e5f3cb018eca4200e36e202 (diff) | |
parent | ed1e6211a0a134ff23592c6f057af982ad5dab52 (diff) |
Merge branch 'nfs-for-3.1' of git://git.linux-nfs.org/projects/trondmy/linux-nfs
* 'nfs-for-3.1' of git://git.linux-nfs.org/projects/trondmy/linux-nfs: (44 commits)
NFSv4: Don't use the delegation->inode in nfs_mark_return_delegation()
nfs: don't use d_move in nfs_async_rename_done
RDMA: Increasing RPCRDMA_MAX_DATA_SEGS
SUNRPC: Replace xprt->resend and xprt->sending with a priority queue
SUNRPC: Allow caller of rpc_sleep_on() to select priority levels
SUNRPC: Support dynamic slot allocation for TCP connections
SUNRPC: Clean up the slot table allocation
SUNRPC: Initalise the struct xprt upon allocation
SUNRPC: Ensure that we grab the XPRT_LOCK before calling xprt_alloc_slot
pnfs: simplify pnfs files module autoloading
nfs: document nfsv4 sillyrename issues
NFS: Convert nfs4_set_ds_client to EXPORT_SYMBOL_GPL
SUNRPC: Convert the backchannel exports to EXPORT_SYMBOL_GPL
SUNRPC: sunrpc should not explicitly depend on NFS config options
NFS: Clean up - simplify the switch to read/write-through-MDS
NFS: Move the pnfs write code into pnfs.c
NFS: Move the pnfs read code into pnfs.c
NFS: Allow the nfs_pageio_descriptor to signal that a re-coalesce is needed
NFS: Use the nfs_pageio_descriptor->pg_bsize in the read/write request
NFS: Cache rpc_ops in struct nfs_pageio_descriptor
...
Diffstat (limited to 'net')
-rw-r--r-- | net/sunrpc/Kconfig | 4 | ||||
-rw-r--r-- | net/sunrpc/Makefile | 2 | ||||
-rw-r--r-- | net/sunrpc/backchannel_rqst.c | 7 | ||||
-rw-r--r-- | net/sunrpc/bc_svc.c | 3 | ||||
-rw-r--r-- | net/sunrpc/clnt.c | 15 | ||||
-rw-r--r-- | net/sunrpc/sched.c | 38 | ||||
-rw-r--r-- | net/sunrpc/svc.c | 6 | ||||
-rw-r--r-- | net/sunrpc/svcsock.c | 14 | ||||
-rw-r--r-- | net/sunrpc/xdr.c | 2 | ||||
-rw-r--r-- | net/sunrpc/xprt.c | 257 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/transport.c | 6 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/xprt_rdma.h | 2 | ||||
-rw-r--r-- | net/sunrpc/xprtsock.c | 57 |
13 files changed, 275 insertions, 138 deletions
diff --git a/net/sunrpc/Kconfig b/net/sunrpc/Kconfig index b2198e65d8bb..ffd243d09188 100644 --- a/net/sunrpc/Kconfig +++ b/net/sunrpc/Kconfig | |||
@@ -4,6 +4,10 @@ config SUNRPC | |||
4 | config SUNRPC_GSS | 4 | config SUNRPC_GSS |
5 | tristate | 5 | tristate |
6 | 6 | ||
7 | config SUNRPC_BACKCHANNEL | ||
8 | bool | ||
9 | depends on SUNRPC | ||
10 | |||
7 | config SUNRPC_XPRT_RDMA | 11 | config SUNRPC_XPRT_RDMA |
8 | tristate | 12 | tristate |
9 | depends on SUNRPC && INFINIBAND && INFINIBAND_ADDR_TRANS && EXPERIMENTAL | 13 | depends on SUNRPC && INFINIBAND && INFINIBAND_ADDR_TRANS && EXPERIMENTAL |
diff --git a/net/sunrpc/Makefile b/net/sunrpc/Makefile index 9d2fca5ad14a..8209a0411bca 100644 --- a/net/sunrpc/Makefile +++ b/net/sunrpc/Makefile | |||
@@ -13,6 +13,6 @@ sunrpc-y := clnt.o xprt.o socklib.o xprtsock.o sched.o \ | |||
13 | addr.o rpcb_clnt.o timer.o xdr.o \ | 13 | addr.o rpcb_clnt.o timer.o xdr.o \ |
14 | sunrpc_syms.o cache.o rpc_pipe.o \ | 14 | sunrpc_syms.o cache.o rpc_pipe.o \ |
15 | svc_xprt.o | 15 | svc_xprt.o |
16 | sunrpc-$(CONFIG_NFS_V4_1) += backchannel_rqst.o bc_svc.o | 16 | sunrpc-$(CONFIG_SUNRPC_BACKCHANNEL) += backchannel_rqst.o bc_svc.o |
17 | sunrpc-$(CONFIG_PROC_FS) += stats.o | 17 | sunrpc-$(CONFIG_PROC_FS) += stats.o |
18 | sunrpc-$(CONFIG_SYSCTL) += sysctl.o | 18 | sunrpc-$(CONFIG_SYSCTL) += sysctl.o |
diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c index cf06af3b63c6..91eaa26e4c42 100644 --- a/net/sunrpc/backchannel_rqst.c +++ b/net/sunrpc/backchannel_rqst.c | |||
@@ -29,8 +29,6 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |||
29 | #define RPCDBG_FACILITY RPCDBG_TRANS | 29 | #define RPCDBG_FACILITY RPCDBG_TRANS |
30 | #endif | 30 | #endif |
31 | 31 | ||
32 | #if defined(CONFIG_NFS_V4_1) | ||
33 | |||
34 | /* | 32 | /* |
35 | * Helper routines that track the number of preallocation elements | 33 | * Helper routines that track the number of preallocation elements |
36 | * on the transport. | 34 | * on the transport. |
@@ -174,7 +172,7 @@ out_free: | |||
174 | dprintk("RPC: setup backchannel transport failed\n"); | 172 | dprintk("RPC: setup backchannel transport failed\n"); |
175 | return -1; | 173 | return -1; |
176 | } | 174 | } |
177 | EXPORT_SYMBOL(xprt_setup_backchannel); | 175 | EXPORT_SYMBOL_GPL(xprt_setup_backchannel); |
178 | 176 | ||
179 | /* | 177 | /* |
180 | * Destroys the backchannel preallocated structures. | 178 | * Destroys the backchannel preallocated structures. |
@@ -204,7 +202,7 @@ void xprt_destroy_backchannel(struct rpc_xprt *xprt, unsigned int max_reqs) | |||
204 | dprintk("RPC: backchannel list empty= %s\n", | 202 | dprintk("RPC: backchannel list empty= %s\n", |
205 | list_empty(&xprt->bc_pa_list) ? "true" : "false"); | 203 | list_empty(&xprt->bc_pa_list) ? "true" : "false"); |
206 | } | 204 | } |
207 | EXPORT_SYMBOL(xprt_destroy_backchannel); | 205 | EXPORT_SYMBOL_GPL(xprt_destroy_backchannel); |
208 | 206 | ||
209 | /* | 207 | /* |
210 | * One or more rpc_rqst structure have been preallocated during the | 208 | * One or more rpc_rqst structure have been preallocated during the |
@@ -279,4 +277,3 @@ void xprt_free_bc_request(struct rpc_rqst *req) | |||
279 | spin_unlock_bh(&xprt->bc_pa_lock); | 277 | spin_unlock_bh(&xprt->bc_pa_lock); |
280 | } | 278 | } |
281 | 279 | ||
282 | #endif /* CONFIG_NFS_V4_1 */ | ||
diff --git a/net/sunrpc/bc_svc.c b/net/sunrpc/bc_svc.c index 1dd1a6890007..0b2eb388cbda 100644 --- a/net/sunrpc/bc_svc.c +++ b/net/sunrpc/bc_svc.c | |||
@@ -27,8 +27,6 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |||
27 | * reply over an existing open connection previously established by the client. | 27 | * reply over an existing open connection previously established by the client. |
28 | */ | 28 | */ |
29 | 29 | ||
30 | #if defined(CONFIG_NFS_V4_1) | ||
31 | |||
32 | #include <linux/module.h> | 30 | #include <linux/module.h> |
33 | 31 | ||
34 | #include <linux/sunrpc/xprt.h> | 32 | #include <linux/sunrpc/xprt.h> |
@@ -63,4 +61,3 @@ int bc_send(struct rpc_rqst *req) | |||
63 | return ret; | 61 | return ret; |
64 | } | 62 | } |
65 | 63 | ||
66 | #endif /* CONFIG_NFS_V4_1 */ | ||
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index c50818f0473b..c5347d29cfb7 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c | |||
@@ -64,9 +64,9 @@ static void call_decode(struct rpc_task *task); | |||
64 | static void call_bind(struct rpc_task *task); | 64 | static void call_bind(struct rpc_task *task); |
65 | static void call_bind_status(struct rpc_task *task); | 65 | static void call_bind_status(struct rpc_task *task); |
66 | static void call_transmit(struct rpc_task *task); | 66 | static void call_transmit(struct rpc_task *task); |
67 | #if defined(CONFIG_NFS_V4_1) | 67 | #if defined(CONFIG_SUNRPC_BACKCHANNEL) |
68 | static void call_bc_transmit(struct rpc_task *task); | 68 | static void call_bc_transmit(struct rpc_task *task); |
69 | #endif /* CONFIG_NFS_V4_1 */ | 69 | #endif /* CONFIG_SUNRPC_BACKCHANNEL */ |
70 | static void call_status(struct rpc_task *task); | 70 | static void call_status(struct rpc_task *task); |
71 | static void call_transmit_status(struct rpc_task *task); | 71 | static void call_transmit_status(struct rpc_task *task); |
72 | static void call_refresh(struct rpc_task *task); | 72 | static void call_refresh(struct rpc_task *task); |
@@ -715,7 +715,7 @@ rpc_call_async(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags, | |||
715 | } | 715 | } |
716 | EXPORT_SYMBOL_GPL(rpc_call_async); | 716 | EXPORT_SYMBOL_GPL(rpc_call_async); |
717 | 717 | ||
718 | #if defined(CONFIG_NFS_V4_1) | 718 | #if defined(CONFIG_SUNRPC_BACKCHANNEL) |
719 | /** | 719 | /** |
720 | * rpc_run_bc_task - Allocate a new RPC task for backchannel use, then run | 720 | * rpc_run_bc_task - Allocate a new RPC task for backchannel use, then run |
721 | * rpc_execute against it | 721 | * rpc_execute against it |
@@ -758,7 +758,7 @@ out: | |||
758 | dprintk("RPC: rpc_run_bc_task: task= %p\n", task); | 758 | dprintk("RPC: rpc_run_bc_task: task= %p\n", task); |
759 | return task; | 759 | return task; |
760 | } | 760 | } |
761 | #endif /* CONFIG_NFS_V4_1 */ | 761 | #endif /* CONFIG_SUNRPC_BACKCHANNEL */ |
762 | 762 | ||
763 | void | 763 | void |
764 | rpc_call_start(struct rpc_task *task) | 764 | rpc_call_start(struct rpc_task *task) |
@@ -1361,7 +1361,7 @@ call_transmit_status(struct rpc_task *task) | |||
1361 | } | 1361 | } |
1362 | } | 1362 | } |
1363 | 1363 | ||
1364 | #if defined(CONFIG_NFS_V4_1) | 1364 | #if defined(CONFIG_SUNRPC_BACKCHANNEL) |
1365 | /* | 1365 | /* |
1366 | * 5b. Send the backchannel RPC reply. On error, drop the reply. In | 1366 | * 5b. Send the backchannel RPC reply. On error, drop the reply. In |
1367 | * addition, disconnect on connectivity errors. | 1367 | * addition, disconnect on connectivity errors. |
@@ -1425,7 +1425,7 @@ call_bc_transmit(struct rpc_task *task) | |||
1425 | } | 1425 | } |
1426 | rpc_wake_up_queued_task(&req->rq_xprt->pending, task); | 1426 | rpc_wake_up_queued_task(&req->rq_xprt->pending, task); |
1427 | } | 1427 | } |
1428 | #endif /* CONFIG_NFS_V4_1 */ | 1428 | #endif /* CONFIG_SUNRPC_BACKCHANNEL */ |
1429 | 1429 | ||
1430 | /* | 1430 | /* |
1431 | * 6. Sort out the RPC call status | 1431 | * 6. Sort out the RPC call status |
@@ -1550,8 +1550,7 @@ call_decode(struct rpc_task *task) | |||
1550 | kxdrdproc_t decode = task->tk_msg.rpc_proc->p_decode; | 1550 | kxdrdproc_t decode = task->tk_msg.rpc_proc->p_decode; |
1551 | __be32 *p; | 1551 | __be32 *p; |
1552 | 1552 | ||
1553 | dprintk("RPC: %5u call_decode (status %d)\n", | 1553 | dprint_status(task); |
1554 | task->tk_pid, task->tk_status); | ||
1555 | 1554 | ||
1556 | if (task->tk_flags & RPC_CALL_MAJORSEEN) { | 1555 | if (task->tk_flags & RPC_CALL_MAJORSEEN) { |
1557 | if (clnt->cl_chatty) | 1556 | if (clnt->cl_chatty) |
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index 4814e246a874..d12ffa545811 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c | |||
@@ -97,14 +97,16 @@ __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task) | |||
97 | /* | 97 | /* |
98 | * Add new request to a priority queue. | 98 | * Add new request to a priority queue. |
99 | */ | 99 | */ |
100 | static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue, struct rpc_task *task) | 100 | static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue, |
101 | struct rpc_task *task, | ||
102 | unsigned char queue_priority) | ||
101 | { | 103 | { |
102 | struct list_head *q; | 104 | struct list_head *q; |
103 | struct rpc_task *t; | 105 | struct rpc_task *t; |
104 | 106 | ||
105 | INIT_LIST_HEAD(&task->u.tk_wait.links); | 107 | INIT_LIST_HEAD(&task->u.tk_wait.links); |
106 | q = &queue->tasks[task->tk_priority]; | 108 | q = &queue->tasks[queue_priority]; |
107 | if (unlikely(task->tk_priority > queue->maxpriority)) | 109 | if (unlikely(queue_priority > queue->maxpriority)) |
108 | q = &queue->tasks[queue->maxpriority]; | 110 | q = &queue->tasks[queue->maxpriority]; |
109 | list_for_each_entry(t, q, u.tk_wait.list) { | 111 | list_for_each_entry(t, q, u.tk_wait.list) { |
110 | if (t->tk_owner == task->tk_owner) { | 112 | if (t->tk_owner == task->tk_owner) { |
@@ -123,12 +125,14 @@ static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue, struct r | |||
123 | * improve overall performance. | 125 | * improve overall performance. |
124 | * Everyone else gets appended to the queue to ensure proper FIFO behavior. | 126 | * Everyone else gets appended to the queue to ensure proper FIFO behavior. |
125 | */ | 127 | */ |
126 | static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task) | 128 | static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, |
129 | struct rpc_task *task, | ||
130 | unsigned char queue_priority) | ||
127 | { | 131 | { |
128 | BUG_ON (RPC_IS_QUEUED(task)); | 132 | BUG_ON (RPC_IS_QUEUED(task)); |
129 | 133 | ||
130 | if (RPC_IS_PRIORITY(queue)) | 134 | if (RPC_IS_PRIORITY(queue)) |
131 | __rpc_add_wait_queue_priority(queue, task); | 135 | __rpc_add_wait_queue_priority(queue, task, queue_priority); |
132 | else if (RPC_IS_SWAPPER(task)) | 136 | else if (RPC_IS_SWAPPER(task)) |
133 | list_add(&task->u.tk_wait.list, &queue->tasks[0]); | 137 | list_add(&task->u.tk_wait.list, &queue->tasks[0]); |
134 | else | 138 | else |
@@ -311,13 +315,15 @@ static void rpc_make_runnable(struct rpc_task *task) | |||
311 | * NB: An RPC task will only receive interrupt-driven events as long | 315 | * NB: An RPC task will only receive interrupt-driven events as long |
312 | * as it's on a wait queue. | 316 | * as it's on a wait queue. |
313 | */ | 317 | */ |
314 | static void __rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, | 318 | static void __rpc_sleep_on_priority(struct rpc_wait_queue *q, |
315 | rpc_action action) | 319 | struct rpc_task *task, |
320 | rpc_action action, | ||
321 | unsigned char queue_priority) | ||
316 | { | 322 | { |
317 | dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n", | 323 | dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n", |
318 | task->tk_pid, rpc_qname(q), jiffies); | 324 | task->tk_pid, rpc_qname(q), jiffies); |
319 | 325 | ||
320 | __rpc_add_wait_queue(q, task); | 326 | __rpc_add_wait_queue(q, task, queue_priority); |
321 | 327 | ||
322 | BUG_ON(task->tk_callback != NULL); | 328 | BUG_ON(task->tk_callback != NULL); |
323 | task->tk_callback = action; | 329 | task->tk_callback = action; |
@@ -334,11 +340,25 @@ void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, | |||
334 | * Protect the queue operations. | 340 | * Protect the queue operations. |
335 | */ | 341 | */ |
336 | spin_lock_bh(&q->lock); | 342 | spin_lock_bh(&q->lock); |
337 | __rpc_sleep_on(q, task, action); | 343 | __rpc_sleep_on_priority(q, task, action, task->tk_priority); |
338 | spin_unlock_bh(&q->lock); | 344 | spin_unlock_bh(&q->lock); |
339 | } | 345 | } |
340 | EXPORT_SYMBOL_GPL(rpc_sleep_on); | 346 | EXPORT_SYMBOL_GPL(rpc_sleep_on); |
341 | 347 | ||
348 | void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task, | ||
349 | rpc_action action, int priority) | ||
350 | { | ||
351 | /* We shouldn't ever put an inactive task to sleep */ | ||
352 | BUG_ON(!RPC_IS_ACTIVATED(task)); | ||
353 | |||
354 | /* | ||
355 | * Protect the queue operations. | ||
356 | */ | ||
357 | spin_lock_bh(&q->lock); | ||
358 | __rpc_sleep_on_priority(q, task, action, priority - RPC_PRIORITY_LOW); | ||
359 | spin_unlock_bh(&q->lock); | ||
360 | } | ||
361 | |||
342 | /** | 362 | /** |
343 | * __rpc_do_wake_up_task - wake up a single rpc_task | 363 | * __rpc_do_wake_up_task - wake up a single rpc_task |
344 | * @queue: wait queue | 364 | * @queue: wait queue |
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index 2b90292e9505..6a69a1131fb7 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c | |||
@@ -1252,7 +1252,7 @@ svc_process(struct svc_rqst *rqstp) | |||
1252 | } | 1252 | } |
1253 | } | 1253 | } |
1254 | 1254 | ||
1255 | #if defined(CONFIG_NFS_V4_1) | 1255 | #if defined(CONFIG_SUNRPC_BACKCHANNEL) |
1256 | /* | 1256 | /* |
1257 | * Process a backchannel RPC request that arrived over an existing | 1257 | * Process a backchannel RPC request that arrived over an existing |
1258 | * outbound connection | 1258 | * outbound connection |
@@ -1300,8 +1300,8 @@ bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req, | |||
1300 | return 0; | 1300 | return 0; |
1301 | } | 1301 | } |
1302 | } | 1302 | } |
1303 | EXPORT_SYMBOL(bc_svc_process); | 1303 | EXPORT_SYMBOL_GPL(bc_svc_process); |
1304 | #endif /* CONFIG_NFS_V4_1 */ | 1304 | #endif /* CONFIG_SUNRPC_BACKCHANNEL */ |
1305 | 1305 | ||
1306 | /* | 1306 | /* |
1307 | * Return (transport-specific) limit on the rpc payload. | 1307 | * Return (transport-specific) limit on the rpc payload. |
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index f2cb5b881dea..767d494de7a2 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c | |||
@@ -68,12 +68,12 @@ static void svc_sock_free(struct svc_xprt *); | |||
68 | static struct svc_xprt *svc_create_socket(struct svc_serv *, int, | 68 | static struct svc_xprt *svc_create_socket(struct svc_serv *, int, |
69 | struct net *, struct sockaddr *, | 69 | struct net *, struct sockaddr *, |
70 | int, int); | 70 | int, int); |
71 | #if defined(CONFIG_NFS_V4_1) | 71 | #if defined(CONFIG_SUNRPC_BACKCHANNEL) |
72 | static struct svc_xprt *svc_bc_create_socket(struct svc_serv *, int, | 72 | static struct svc_xprt *svc_bc_create_socket(struct svc_serv *, int, |
73 | struct net *, struct sockaddr *, | 73 | struct net *, struct sockaddr *, |
74 | int, int); | 74 | int, int); |
75 | static void svc_bc_sock_free(struct svc_xprt *xprt); | 75 | static void svc_bc_sock_free(struct svc_xprt *xprt); |
76 | #endif /* CONFIG_NFS_V4_1 */ | 76 | #endif /* CONFIG_SUNRPC_BACKCHANNEL */ |
77 | 77 | ||
78 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 78 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
79 | static struct lock_class_key svc_key[2]; | 79 | static struct lock_class_key svc_key[2]; |
@@ -1243,7 +1243,7 @@ static struct svc_xprt *svc_tcp_create(struct svc_serv *serv, | |||
1243 | return svc_create_socket(serv, IPPROTO_TCP, net, sa, salen, flags); | 1243 | return svc_create_socket(serv, IPPROTO_TCP, net, sa, salen, flags); |
1244 | } | 1244 | } |
1245 | 1245 | ||
1246 | #if defined(CONFIG_NFS_V4_1) | 1246 | #if defined(CONFIG_SUNRPC_BACKCHANNEL) |
1247 | static struct svc_xprt *svc_bc_create_socket(struct svc_serv *, int, | 1247 | static struct svc_xprt *svc_bc_create_socket(struct svc_serv *, int, |
1248 | struct net *, struct sockaddr *, | 1248 | struct net *, struct sockaddr *, |
1249 | int, int); | 1249 | int, int); |
@@ -1284,7 +1284,7 @@ static void svc_cleanup_bc_xprt_sock(void) | |||
1284 | { | 1284 | { |
1285 | svc_unreg_xprt_class(&svc_tcp_bc_class); | 1285 | svc_unreg_xprt_class(&svc_tcp_bc_class); |
1286 | } | 1286 | } |
1287 | #else /* CONFIG_NFS_V4_1 */ | 1287 | #else /* CONFIG_SUNRPC_BACKCHANNEL */ |
1288 | static void svc_init_bc_xprt_sock(void) | 1288 | static void svc_init_bc_xprt_sock(void) |
1289 | { | 1289 | { |
1290 | } | 1290 | } |
@@ -1292,7 +1292,7 @@ static void svc_init_bc_xprt_sock(void) | |||
1292 | static void svc_cleanup_bc_xprt_sock(void) | 1292 | static void svc_cleanup_bc_xprt_sock(void) |
1293 | { | 1293 | { |
1294 | } | 1294 | } |
1295 | #endif /* CONFIG_NFS_V4_1 */ | 1295 | #endif /* CONFIG_SUNRPC_BACKCHANNEL */ |
1296 | 1296 | ||
1297 | static struct svc_xprt_ops svc_tcp_ops = { | 1297 | static struct svc_xprt_ops svc_tcp_ops = { |
1298 | .xpo_create = svc_tcp_create, | 1298 | .xpo_create = svc_tcp_create, |
@@ -1623,7 +1623,7 @@ static void svc_sock_free(struct svc_xprt *xprt) | |||
1623 | kfree(svsk); | 1623 | kfree(svsk); |
1624 | } | 1624 | } |
1625 | 1625 | ||
1626 | #if defined(CONFIG_NFS_V4_1) | 1626 | #if defined(CONFIG_SUNRPC_BACKCHANNEL) |
1627 | /* | 1627 | /* |
1628 | * Create a back channel svc_xprt which shares the fore channel socket. | 1628 | * Create a back channel svc_xprt which shares the fore channel socket. |
1629 | */ | 1629 | */ |
@@ -1662,4 +1662,4 @@ static void svc_bc_sock_free(struct svc_xprt *xprt) | |||
1662 | if (xprt) | 1662 | if (xprt) |
1663 | kfree(container_of(xprt, struct svc_sock, sk_xprt)); | 1663 | kfree(container_of(xprt, struct svc_sock, sk_xprt)); |
1664 | } | 1664 | } |
1665 | #endif /* CONFIG_NFS_V4_1 */ | 1665 | #endif /* CONFIG_SUNRPC_BACKCHANNEL */ |
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index f008c14ad34c..277ebd4bf095 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c | |||
@@ -126,7 +126,7 @@ xdr_terminate_string(struct xdr_buf *buf, const u32 len) | |||
126 | kaddr[buf->page_base + len] = '\0'; | 126 | kaddr[buf->page_base + len] = '\0'; |
127 | kunmap_atomic(kaddr, KM_USER0); | 127 | kunmap_atomic(kaddr, KM_USER0); |
128 | } | 128 | } |
129 | EXPORT_SYMBOL(xdr_terminate_string); | 129 | EXPORT_SYMBOL_GPL(xdr_terminate_string); |
130 | 130 | ||
131 | void | 131 | void |
132 | xdr_encode_pages(struct xdr_buf *xdr, struct page **pages, unsigned int base, | 132 | xdr_encode_pages(struct xdr_buf *xdr, struct page **pages, unsigned int base, |
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index ce5eb68a9664..9b6a4d1ea8f8 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c | |||
@@ -62,6 +62,7 @@ | |||
62 | /* | 62 | /* |
63 | * Local functions | 63 | * Local functions |
64 | */ | 64 | */ |
65 | static void xprt_init(struct rpc_xprt *xprt, struct net *net); | ||
65 | static void xprt_request_init(struct rpc_task *, struct rpc_xprt *); | 66 | static void xprt_request_init(struct rpc_task *, struct rpc_xprt *); |
66 | static void xprt_connect_status(struct rpc_task *task); | 67 | static void xprt_connect_status(struct rpc_task *task); |
67 | static int __xprt_get_cong(struct rpc_xprt *, struct rpc_task *); | 68 | static int __xprt_get_cong(struct rpc_xprt *, struct rpc_task *); |
@@ -191,10 +192,10 @@ EXPORT_SYMBOL_GPL(xprt_load_transport); | |||
191 | * transport connects from colliding with writes. No congestion control | 192 | * transport connects from colliding with writes. No congestion control |
192 | * is provided. | 193 | * is provided. |
193 | */ | 194 | */ |
194 | int xprt_reserve_xprt(struct rpc_task *task) | 195 | int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task) |
195 | { | 196 | { |
196 | struct rpc_rqst *req = task->tk_rqstp; | 197 | struct rpc_rqst *req = task->tk_rqstp; |
197 | struct rpc_xprt *xprt = req->rq_xprt; | 198 | int priority; |
198 | 199 | ||
199 | if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) { | 200 | if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) { |
200 | if (task == xprt->snd_task) | 201 | if (task == xprt->snd_task) |
@@ -202,8 +203,10 @@ int xprt_reserve_xprt(struct rpc_task *task) | |||
202 | goto out_sleep; | 203 | goto out_sleep; |
203 | } | 204 | } |
204 | xprt->snd_task = task; | 205 | xprt->snd_task = task; |
205 | req->rq_bytes_sent = 0; | 206 | if (req != NULL) { |
206 | req->rq_ntrans++; | 207 | req->rq_bytes_sent = 0; |
208 | req->rq_ntrans++; | ||
209 | } | ||
207 | 210 | ||
208 | return 1; | 211 | return 1; |
209 | 212 | ||
@@ -212,10 +215,13 @@ out_sleep: | |||
212 | task->tk_pid, xprt); | 215 | task->tk_pid, xprt); |
213 | task->tk_timeout = 0; | 216 | task->tk_timeout = 0; |
214 | task->tk_status = -EAGAIN; | 217 | task->tk_status = -EAGAIN; |
215 | if (req->rq_ntrans) | 218 | if (req == NULL) |
216 | rpc_sleep_on(&xprt->resend, task, NULL); | 219 | priority = RPC_PRIORITY_LOW; |
220 | else if (!req->rq_ntrans) | ||
221 | priority = RPC_PRIORITY_NORMAL; | ||
217 | else | 222 | else |
218 | rpc_sleep_on(&xprt->sending, task, NULL); | 223 | priority = RPC_PRIORITY_HIGH; |
224 | rpc_sleep_on_priority(&xprt->sending, task, NULL, priority); | ||
219 | return 0; | 225 | return 0; |
220 | } | 226 | } |
221 | EXPORT_SYMBOL_GPL(xprt_reserve_xprt); | 227 | EXPORT_SYMBOL_GPL(xprt_reserve_xprt); |
@@ -239,22 +245,24 @@ static void xprt_clear_locked(struct rpc_xprt *xprt) | |||
239 | * integrated into the decision of whether a request is allowed to be | 245 | * integrated into the decision of whether a request is allowed to be |
240 | * woken up and given access to the transport. | 246 | * woken up and given access to the transport. |
241 | */ | 247 | */ |
242 | int xprt_reserve_xprt_cong(struct rpc_task *task) | 248 | int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task) |
243 | { | 249 | { |
244 | struct rpc_xprt *xprt = task->tk_xprt; | ||
245 | struct rpc_rqst *req = task->tk_rqstp; | 250 | struct rpc_rqst *req = task->tk_rqstp; |
251 | int priority; | ||
246 | 252 | ||
247 | if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) { | 253 | if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) { |
248 | if (task == xprt->snd_task) | 254 | if (task == xprt->snd_task) |
249 | return 1; | 255 | return 1; |
250 | goto out_sleep; | 256 | goto out_sleep; |
251 | } | 257 | } |
258 | if (req == NULL) { | ||
259 | xprt->snd_task = task; | ||
260 | return 1; | ||
261 | } | ||
252 | if (__xprt_get_cong(xprt, task)) { | 262 | if (__xprt_get_cong(xprt, task)) { |
253 | xprt->snd_task = task; | 263 | xprt->snd_task = task; |
254 | if (req) { | 264 | req->rq_bytes_sent = 0; |
255 | req->rq_bytes_sent = 0; | 265 | req->rq_ntrans++; |
256 | req->rq_ntrans++; | ||
257 | } | ||
258 | return 1; | 266 | return 1; |
259 | } | 267 | } |
260 | xprt_clear_locked(xprt); | 268 | xprt_clear_locked(xprt); |
@@ -262,10 +270,13 @@ out_sleep: | |||
262 | dprintk("RPC: %5u failed to lock transport %p\n", task->tk_pid, xprt); | 270 | dprintk("RPC: %5u failed to lock transport %p\n", task->tk_pid, xprt); |
263 | task->tk_timeout = 0; | 271 | task->tk_timeout = 0; |
264 | task->tk_status = -EAGAIN; | 272 | task->tk_status = -EAGAIN; |
265 | if (req && req->rq_ntrans) | 273 | if (req == NULL) |
266 | rpc_sleep_on(&xprt->resend, task, NULL); | 274 | priority = RPC_PRIORITY_LOW; |
275 | else if (!req->rq_ntrans) | ||
276 | priority = RPC_PRIORITY_NORMAL; | ||
267 | else | 277 | else |
268 | rpc_sleep_on(&xprt->sending, task, NULL); | 278 | priority = RPC_PRIORITY_HIGH; |
279 | rpc_sleep_on_priority(&xprt->sending, task, NULL, priority); | ||
269 | return 0; | 280 | return 0; |
270 | } | 281 | } |
271 | EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong); | 282 | EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong); |
@@ -275,7 +286,7 @@ static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task) | |||
275 | int retval; | 286 | int retval; |
276 | 287 | ||
277 | spin_lock_bh(&xprt->transport_lock); | 288 | spin_lock_bh(&xprt->transport_lock); |
278 | retval = xprt->ops->reserve_xprt(task); | 289 | retval = xprt->ops->reserve_xprt(xprt, task); |
279 | spin_unlock_bh(&xprt->transport_lock); | 290 | spin_unlock_bh(&xprt->transport_lock); |
280 | return retval; | 291 | return retval; |
281 | } | 292 | } |
@@ -288,12 +299,9 @@ static void __xprt_lock_write_next(struct rpc_xprt *xprt) | |||
288 | if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) | 299 | if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) |
289 | return; | 300 | return; |
290 | 301 | ||
291 | task = rpc_wake_up_next(&xprt->resend); | 302 | task = rpc_wake_up_next(&xprt->sending); |
292 | if (!task) { | 303 | if (task == NULL) |
293 | task = rpc_wake_up_next(&xprt->sending); | 304 | goto out_unlock; |
294 | if (!task) | ||
295 | goto out_unlock; | ||
296 | } | ||
297 | 305 | ||
298 | req = task->tk_rqstp; | 306 | req = task->tk_rqstp; |
299 | xprt->snd_task = task; | 307 | xprt->snd_task = task; |
@@ -310,24 +318,25 @@ out_unlock: | |||
310 | static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt) | 318 | static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt) |
311 | { | 319 | { |
312 | struct rpc_task *task; | 320 | struct rpc_task *task; |
321 | struct rpc_rqst *req; | ||
313 | 322 | ||
314 | if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) | 323 | if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) |
315 | return; | 324 | return; |
316 | if (RPCXPRT_CONGESTED(xprt)) | 325 | if (RPCXPRT_CONGESTED(xprt)) |
317 | goto out_unlock; | 326 | goto out_unlock; |
318 | task = rpc_wake_up_next(&xprt->resend); | 327 | task = rpc_wake_up_next(&xprt->sending); |
319 | if (!task) { | 328 | if (task == NULL) |
320 | task = rpc_wake_up_next(&xprt->sending); | 329 | goto out_unlock; |
321 | if (!task) | 330 | |
322 | goto out_unlock; | 331 | req = task->tk_rqstp; |
332 | if (req == NULL) { | ||
333 | xprt->snd_task = task; | ||
334 | return; | ||
323 | } | 335 | } |
324 | if (__xprt_get_cong(xprt, task)) { | 336 | if (__xprt_get_cong(xprt, task)) { |
325 | struct rpc_rqst *req = task->tk_rqstp; | ||
326 | xprt->snd_task = task; | 337 | xprt->snd_task = task; |
327 | if (req) { | 338 | req->rq_bytes_sent = 0; |
328 | req->rq_bytes_sent = 0; | 339 | req->rq_ntrans++; |
329 | req->rq_ntrans++; | ||
330 | } | ||
331 | return; | 340 | return; |
332 | } | 341 | } |
333 | out_unlock: | 342 | out_unlock: |
@@ -852,7 +861,7 @@ int xprt_prepare_transmit(struct rpc_task *task) | |||
852 | err = req->rq_reply_bytes_recvd; | 861 | err = req->rq_reply_bytes_recvd; |
853 | goto out_unlock; | 862 | goto out_unlock; |
854 | } | 863 | } |
855 | if (!xprt->ops->reserve_xprt(task)) | 864 | if (!xprt->ops->reserve_xprt(xprt, task)) |
856 | err = -EAGAIN; | 865 | err = -EAGAIN; |
857 | out_unlock: | 866 | out_unlock: |
858 | spin_unlock_bh(&xprt->transport_lock); | 867 | spin_unlock_bh(&xprt->transport_lock); |
@@ -928,28 +937,66 @@ void xprt_transmit(struct rpc_task *task) | |||
928 | spin_unlock_bh(&xprt->transport_lock); | 937 | spin_unlock_bh(&xprt->transport_lock); |
929 | } | 938 | } |
930 | 939 | ||
940 | static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt, gfp_t gfp_flags) | ||
941 | { | ||
942 | struct rpc_rqst *req = ERR_PTR(-EAGAIN); | ||
943 | |||
944 | if (!atomic_add_unless(&xprt->num_reqs, 1, xprt->max_reqs)) | ||
945 | goto out; | ||
946 | req = kzalloc(sizeof(struct rpc_rqst), gfp_flags); | ||
947 | if (req != NULL) | ||
948 | goto out; | ||
949 | atomic_dec(&xprt->num_reqs); | ||
950 | req = ERR_PTR(-ENOMEM); | ||
951 | out: | ||
952 | return req; | ||
953 | } | ||
954 | |||
955 | static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req) | ||
956 | { | ||
957 | if (atomic_add_unless(&xprt->num_reqs, -1, xprt->min_reqs)) { | ||
958 | kfree(req); | ||
959 | return true; | ||
960 | } | ||
961 | return false; | ||
962 | } | ||
963 | |||
931 | static void xprt_alloc_slot(struct rpc_task *task) | 964 | static void xprt_alloc_slot(struct rpc_task *task) |
932 | { | 965 | { |
933 | struct rpc_xprt *xprt = task->tk_xprt; | 966 | struct rpc_xprt *xprt = task->tk_xprt; |
967 | struct rpc_rqst *req; | ||
934 | 968 | ||
935 | task->tk_status = 0; | ||
936 | if (task->tk_rqstp) | ||
937 | return; | ||
938 | if (!list_empty(&xprt->free)) { | 969 | if (!list_empty(&xprt->free)) { |
939 | struct rpc_rqst *req = list_entry(xprt->free.next, struct rpc_rqst, rq_list); | 970 | req = list_entry(xprt->free.next, struct rpc_rqst, rq_list); |
940 | list_del_init(&req->rq_list); | 971 | list_del(&req->rq_list); |
941 | task->tk_rqstp = req; | 972 | goto out_init_req; |
942 | xprt_request_init(task, xprt); | 973 | } |
943 | return; | 974 | req = xprt_dynamic_alloc_slot(xprt, GFP_NOWAIT); |
975 | if (!IS_ERR(req)) | ||
976 | goto out_init_req; | ||
977 | switch (PTR_ERR(req)) { | ||
978 | case -ENOMEM: | ||
979 | rpc_delay(task, HZ >> 2); | ||
980 | dprintk("RPC: dynamic allocation of request slot " | ||
981 | "failed! Retrying\n"); | ||
982 | break; | ||
983 | case -EAGAIN: | ||
984 | rpc_sleep_on(&xprt->backlog, task, NULL); | ||
985 | dprintk("RPC: waiting for request slot\n"); | ||
944 | } | 986 | } |
945 | dprintk("RPC: waiting for request slot\n"); | ||
946 | task->tk_status = -EAGAIN; | 987 | task->tk_status = -EAGAIN; |
947 | task->tk_timeout = 0; | 988 | return; |
948 | rpc_sleep_on(&xprt->backlog, task, NULL); | 989 | out_init_req: |
990 | task->tk_status = 0; | ||
991 | task->tk_rqstp = req; | ||
992 | xprt_request_init(task, xprt); | ||
949 | } | 993 | } |
950 | 994 | ||
951 | static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req) | 995 | static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req) |
952 | { | 996 | { |
997 | if (xprt_dynamic_free_slot(xprt, req)) | ||
998 | return; | ||
999 | |||
953 | memset(req, 0, sizeof(*req)); /* mark unused */ | 1000 | memset(req, 0, sizeof(*req)); /* mark unused */ |
954 | 1001 | ||
955 | spin_lock(&xprt->reserve_lock); | 1002 | spin_lock(&xprt->reserve_lock); |
@@ -958,25 +1005,49 @@ static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req) | |||
958 | spin_unlock(&xprt->reserve_lock); | 1005 | spin_unlock(&xprt->reserve_lock); |
959 | } | 1006 | } |
960 | 1007 | ||
961 | struct rpc_xprt *xprt_alloc(struct net *net, int size, int max_req) | 1008 | static void xprt_free_all_slots(struct rpc_xprt *xprt) |
1009 | { | ||
1010 | struct rpc_rqst *req; | ||
1011 | while (!list_empty(&xprt->free)) { | ||
1012 | req = list_first_entry(&xprt->free, struct rpc_rqst, rq_list); | ||
1013 | list_del(&req->rq_list); | ||
1014 | kfree(req); | ||
1015 | } | ||
1016 | } | ||
1017 | |||
1018 | struct rpc_xprt *xprt_alloc(struct net *net, size_t size, | ||
1019 | unsigned int num_prealloc, | ||
1020 | unsigned int max_alloc) | ||
962 | { | 1021 | { |
963 | struct rpc_xprt *xprt; | 1022 | struct rpc_xprt *xprt; |
1023 | struct rpc_rqst *req; | ||
1024 | int i; | ||
964 | 1025 | ||
965 | xprt = kzalloc(size, GFP_KERNEL); | 1026 | xprt = kzalloc(size, GFP_KERNEL); |
966 | if (xprt == NULL) | 1027 | if (xprt == NULL) |
967 | goto out; | 1028 | goto out; |
968 | atomic_set(&xprt->count, 1); | ||
969 | 1029 | ||
970 | xprt->max_reqs = max_req; | 1030 | xprt_init(xprt, net); |
971 | xprt->slot = kcalloc(max_req, sizeof(struct rpc_rqst), GFP_KERNEL); | 1031 | |
972 | if (xprt->slot == NULL) | 1032 | for (i = 0; i < num_prealloc; i++) { |
1033 | req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL); | ||
1034 | if (!req) | ||
1035 | break; | ||
1036 | list_add(&req->rq_list, &xprt->free); | ||
1037 | } | ||
1038 | if (i < num_prealloc) | ||
973 | goto out_free; | 1039 | goto out_free; |
1040 | if (max_alloc > num_prealloc) | ||
1041 | xprt->max_reqs = max_alloc; | ||
1042 | else | ||
1043 | xprt->max_reqs = num_prealloc; | ||
1044 | xprt->min_reqs = num_prealloc; | ||
1045 | atomic_set(&xprt->num_reqs, num_prealloc); | ||
974 | 1046 | ||
975 | xprt->xprt_net = get_net(net); | ||
976 | return xprt; | 1047 | return xprt; |
977 | 1048 | ||
978 | out_free: | 1049 | out_free: |
979 | kfree(xprt); | 1050 | xprt_free(xprt); |
980 | out: | 1051 | out: |
981 | return NULL; | 1052 | return NULL; |
982 | } | 1053 | } |
@@ -985,7 +1056,7 @@ EXPORT_SYMBOL_GPL(xprt_alloc); | |||
985 | void xprt_free(struct rpc_xprt *xprt) | 1056 | void xprt_free(struct rpc_xprt *xprt) |
986 | { | 1057 | { |
987 | put_net(xprt->xprt_net); | 1058 | put_net(xprt->xprt_net); |
988 | kfree(xprt->slot); | 1059 | xprt_free_all_slots(xprt); |
989 | kfree(xprt); | 1060 | kfree(xprt); |
990 | } | 1061 | } |
991 | EXPORT_SYMBOL_GPL(xprt_free); | 1062 | EXPORT_SYMBOL_GPL(xprt_free); |
@@ -1001,10 +1072,24 @@ void xprt_reserve(struct rpc_task *task) | |||
1001 | { | 1072 | { |
1002 | struct rpc_xprt *xprt = task->tk_xprt; | 1073 | struct rpc_xprt *xprt = task->tk_xprt; |
1003 | 1074 | ||
1004 | task->tk_status = -EIO; | 1075 | task->tk_status = 0; |
1076 | if (task->tk_rqstp != NULL) | ||
1077 | return; | ||
1078 | |||
1079 | /* Note: grabbing the xprt_lock_write() here is not strictly needed, | ||
1080 | * but ensures that we throttle new slot allocation if the transport | ||
1081 | * is congested (e.g. if reconnecting or if we're out of socket | ||
1082 | * write buffer space). | ||
1083 | */ | ||
1084 | task->tk_timeout = 0; | ||
1085 | task->tk_status = -EAGAIN; | ||
1086 | if (!xprt_lock_write(xprt, task)) | ||
1087 | return; | ||
1088 | |||
1005 | spin_lock(&xprt->reserve_lock); | 1089 | spin_lock(&xprt->reserve_lock); |
1006 | xprt_alloc_slot(task); | 1090 | xprt_alloc_slot(task); |
1007 | spin_unlock(&xprt->reserve_lock); | 1091 | spin_unlock(&xprt->reserve_lock); |
1092 | xprt_release_write(xprt, task); | ||
1008 | } | 1093 | } |
1009 | 1094 | ||
1010 | static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt) | 1095 | static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt) |
@@ -1021,6 +1106,7 @@ static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt) | |||
1021 | { | 1106 | { |
1022 | struct rpc_rqst *req = task->tk_rqstp; | 1107 | struct rpc_rqst *req = task->tk_rqstp; |
1023 | 1108 | ||
1109 | INIT_LIST_HEAD(&req->rq_list); | ||
1024 | req->rq_timeout = task->tk_client->cl_timeout->to_initval; | 1110 | req->rq_timeout = task->tk_client->cl_timeout->to_initval; |
1025 | req->rq_task = task; | 1111 | req->rq_task = task; |
1026 | req->rq_xprt = xprt; | 1112 | req->rq_xprt = xprt; |
@@ -1073,6 +1159,34 @@ void xprt_release(struct rpc_task *task) | |||
1073 | xprt_free_bc_request(req); | 1159 | xprt_free_bc_request(req); |
1074 | } | 1160 | } |
1075 | 1161 | ||
1162 | static void xprt_init(struct rpc_xprt *xprt, struct net *net) | ||
1163 | { | ||
1164 | atomic_set(&xprt->count, 1); | ||
1165 | |||
1166 | spin_lock_init(&xprt->transport_lock); | ||
1167 | spin_lock_init(&xprt->reserve_lock); | ||
1168 | |||
1169 | INIT_LIST_HEAD(&xprt->free); | ||
1170 | INIT_LIST_HEAD(&xprt->recv); | ||
1171 | #if defined(CONFIG_SUNRPC_BACKCHANNEL) | ||
1172 | spin_lock_init(&xprt->bc_pa_lock); | ||
1173 | INIT_LIST_HEAD(&xprt->bc_pa_list); | ||
1174 | #endif /* CONFIG_SUNRPC_BACKCHANNEL */ | ||
1175 | |||
1176 | xprt->last_used = jiffies; | ||
1177 | xprt->cwnd = RPC_INITCWND; | ||
1178 | xprt->bind_index = 0; | ||
1179 | |||
1180 | rpc_init_wait_queue(&xprt->binding, "xprt_binding"); | ||
1181 | rpc_init_wait_queue(&xprt->pending, "xprt_pending"); | ||
1182 | rpc_init_priority_wait_queue(&xprt->sending, "xprt_sending"); | ||
1183 | rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog"); | ||
1184 | |||
1185 | xprt_init_xid(xprt); | ||
1186 | |||
1187 | xprt->xprt_net = get_net(net); | ||
1188 | } | ||
1189 | |||
1076 | /** | 1190 | /** |
1077 | * xprt_create_transport - create an RPC transport | 1191 | * xprt_create_transport - create an RPC transport |
1078 | * @args: rpc transport creation arguments | 1192 | * @args: rpc transport creation arguments |
@@ -1081,7 +1195,6 @@ void xprt_release(struct rpc_task *task) | |||
1081 | struct rpc_xprt *xprt_create_transport(struct xprt_create *args) | 1195 | struct rpc_xprt *xprt_create_transport(struct xprt_create *args) |
1082 | { | 1196 | { |
1083 | struct rpc_xprt *xprt; | 1197 | struct rpc_xprt *xprt; |
1084 | struct rpc_rqst *req; | ||
1085 | struct xprt_class *t; | 1198 | struct xprt_class *t; |
1086 | 1199 | ||
1087 | spin_lock(&xprt_list_lock); | 1200 | spin_lock(&xprt_list_lock); |
@@ -1100,46 +1213,17 @@ found: | |||
1100 | if (IS_ERR(xprt)) { | 1213 | if (IS_ERR(xprt)) { |
1101 | dprintk("RPC: xprt_create_transport: failed, %ld\n", | 1214 | dprintk("RPC: xprt_create_transport: failed, %ld\n", |
1102 | -PTR_ERR(xprt)); | 1215 | -PTR_ERR(xprt)); |
1103 | return xprt; | 1216 | goto out; |
1104 | } | 1217 | } |
1105 | if (test_and_set_bit(XPRT_INITIALIZED, &xprt->state)) | ||
1106 | /* ->setup returned a pre-initialized xprt: */ | ||
1107 | return xprt; | ||
1108 | |||
1109 | spin_lock_init(&xprt->transport_lock); | ||
1110 | spin_lock_init(&xprt->reserve_lock); | ||
1111 | |||
1112 | INIT_LIST_HEAD(&xprt->free); | ||
1113 | INIT_LIST_HEAD(&xprt->recv); | ||
1114 | #if defined(CONFIG_NFS_V4_1) | ||
1115 | spin_lock_init(&xprt->bc_pa_lock); | ||
1116 | INIT_LIST_HEAD(&xprt->bc_pa_list); | ||
1117 | #endif /* CONFIG_NFS_V4_1 */ | ||
1118 | |||
1119 | INIT_WORK(&xprt->task_cleanup, xprt_autoclose); | 1218 | INIT_WORK(&xprt->task_cleanup, xprt_autoclose); |
1120 | if (xprt_has_timer(xprt)) | 1219 | if (xprt_has_timer(xprt)) |
1121 | setup_timer(&xprt->timer, xprt_init_autodisconnect, | 1220 | setup_timer(&xprt->timer, xprt_init_autodisconnect, |
1122 | (unsigned long)xprt); | 1221 | (unsigned long)xprt); |
1123 | else | 1222 | else |
1124 | init_timer(&xprt->timer); | 1223 | init_timer(&xprt->timer); |
1125 | xprt->last_used = jiffies; | ||
1126 | xprt->cwnd = RPC_INITCWND; | ||
1127 | xprt->bind_index = 0; | ||
1128 | |||
1129 | rpc_init_wait_queue(&xprt->binding, "xprt_binding"); | ||
1130 | rpc_init_wait_queue(&xprt->pending, "xprt_pending"); | ||
1131 | rpc_init_wait_queue(&xprt->sending, "xprt_sending"); | ||
1132 | rpc_init_wait_queue(&xprt->resend, "xprt_resend"); | ||
1133 | rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog"); | ||
1134 | |||
1135 | /* initialize free list */ | ||
1136 | for (req = &xprt->slot[xprt->max_reqs-1]; req >= &xprt->slot[0]; req--) | ||
1137 | list_add(&req->rq_list, &xprt->free); | ||
1138 | |||
1139 | xprt_init_xid(xprt); | ||
1140 | |||
1141 | dprintk("RPC: created transport %p with %u slots\n", xprt, | 1224 | dprintk("RPC: created transport %p with %u slots\n", xprt, |
1142 | xprt->max_reqs); | 1225 | xprt->max_reqs); |
1226 | out: | ||
1143 | return xprt; | 1227 | return xprt; |
1144 | } | 1228 | } |
1145 | 1229 | ||
@@ -1157,7 +1241,6 @@ static void xprt_destroy(struct rpc_xprt *xprt) | |||
1157 | rpc_destroy_wait_queue(&xprt->binding); | 1241 | rpc_destroy_wait_queue(&xprt->binding); |
1158 | rpc_destroy_wait_queue(&xprt->pending); | 1242 | rpc_destroy_wait_queue(&xprt->pending); |
1159 | rpc_destroy_wait_queue(&xprt->sending); | 1243 | rpc_destroy_wait_queue(&xprt->sending); |
1160 | rpc_destroy_wait_queue(&xprt->resend); | ||
1161 | rpc_destroy_wait_queue(&xprt->backlog); | 1244 | rpc_destroy_wait_queue(&xprt->backlog); |
1162 | cancel_work_sync(&xprt->task_cleanup); | 1245 | cancel_work_sync(&xprt->task_cleanup); |
1163 | /* | 1246 | /* |
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c index 0867070bb5ca..b446e100286f 100644 --- a/net/sunrpc/xprtrdma/transport.c +++ b/net/sunrpc/xprtrdma/transport.c | |||
@@ -283,6 +283,7 @@ xprt_setup_rdma(struct xprt_create *args) | |||
283 | } | 283 | } |
284 | 284 | ||
285 | xprt = xprt_alloc(args->net, sizeof(struct rpcrdma_xprt), | 285 | xprt = xprt_alloc(args->net, sizeof(struct rpcrdma_xprt), |
286 | xprt_rdma_slot_table_entries, | ||
286 | xprt_rdma_slot_table_entries); | 287 | xprt_rdma_slot_table_entries); |
287 | if (xprt == NULL) { | 288 | if (xprt == NULL) { |
288 | dprintk("RPC: %s: couldn't allocate rpcrdma_xprt\n", | 289 | dprintk("RPC: %s: couldn't allocate rpcrdma_xprt\n", |
@@ -452,9 +453,8 @@ xprt_rdma_connect(struct rpc_task *task) | |||
452 | } | 453 | } |
453 | 454 | ||
454 | static int | 455 | static int |
455 | xprt_rdma_reserve_xprt(struct rpc_task *task) | 456 | xprt_rdma_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task) |
456 | { | 457 | { |
457 | struct rpc_xprt *xprt = task->tk_xprt; | ||
458 | struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); | 458 | struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); |
459 | int credits = atomic_read(&r_xprt->rx_buf.rb_credits); | 459 | int credits = atomic_read(&r_xprt->rx_buf.rb_credits); |
460 | 460 | ||
@@ -466,7 +466,7 @@ xprt_rdma_reserve_xprt(struct rpc_task *task) | |||
466 | BUG_ON(r_xprt->rx_buf.rb_cwndscale <= 0); | 466 | BUG_ON(r_xprt->rx_buf.rb_cwndscale <= 0); |
467 | } | 467 | } |
468 | xprt->cwnd = credits * r_xprt->rx_buf.rb_cwndscale; | 468 | xprt->cwnd = credits * r_xprt->rx_buf.rb_cwndscale; |
469 | return xprt_reserve_xprt_cong(task); | 469 | return xprt_reserve_xprt_cong(xprt, task); |
470 | } | 470 | } |
471 | 471 | ||
472 | /* | 472 | /* |
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h index ddf05288d9f1..08c5d5a128fc 100644 --- a/net/sunrpc/xprtrdma/xprt_rdma.h +++ b/net/sunrpc/xprtrdma/xprt_rdma.h | |||
@@ -109,7 +109,7 @@ struct rpcrdma_ep { | |||
109 | */ | 109 | */ |
110 | 110 | ||
111 | /* temporary static scatter/gather max */ | 111 | /* temporary static scatter/gather max */ |
112 | #define RPCRDMA_MAX_DATA_SEGS (8) /* max scatter/gather */ | 112 | #define RPCRDMA_MAX_DATA_SEGS (64) /* max scatter/gather */ |
113 | #define RPCRDMA_MAX_SEGS (RPCRDMA_MAX_DATA_SEGS + 2) /* head+tail = 2 */ | 113 | #define RPCRDMA_MAX_SEGS (RPCRDMA_MAX_DATA_SEGS + 2) /* head+tail = 2 */ |
114 | #define MAX_RPCRDMAHDR (\ | 114 | #define MAX_RPCRDMAHDR (\ |
115 | /* max supported RPC/RDMA header */ \ | 115 | /* max supported RPC/RDMA header */ \ |
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 72abb7358933..d7f97ef26590 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
@@ -37,7 +37,7 @@ | |||
37 | #include <linux/sunrpc/svcsock.h> | 37 | #include <linux/sunrpc/svcsock.h> |
38 | #include <linux/sunrpc/xprtsock.h> | 38 | #include <linux/sunrpc/xprtsock.h> |
39 | #include <linux/file.h> | 39 | #include <linux/file.h> |
40 | #ifdef CONFIG_NFS_V4_1 | 40 | #ifdef CONFIG_SUNRPC_BACKCHANNEL |
41 | #include <linux/sunrpc/bc_xprt.h> | 41 | #include <linux/sunrpc/bc_xprt.h> |
42 | #endif | 42 | #endif |
43 | 43 | ||
@@ -54,7 +54,8 @@ static void xs_close(struct rpc_xprt *xprt); | |||
54 | * xprtsock tunables | 54 | * xprtsock tunables |
55 | */ | 55 | */ |
56 | unsigned int xprt_udp_slot_table_entries = RPC_DEF_SLOT_TABLE; | 56 | unsigned int xprt_udp_slot_table_entries = RPC_DEF_SLOT_TABLE; |
57 | unsigned int xprt_tcp_slot_table_entries = RPC_DEF_SLOT_TABLE; | 57 | unsigned int xprt_tcp_slot_table_entries = RPC_MIN_SLOT_TABLE; |
58 | unsigned int xprt_max_tcp_slot_table_entries = RPC_MAX_SLOT_TABLE; | ||
58 | 59 | ||
59 | unsigned int xprt_min_resvport = RPC_DEF_MIN_RESVPORT; | 60 | unsigned int xprt_min_resvport = RPC_DEF_MIN_RESVPORT; |
60 | unsigned int xprt_max_resvport = RPC_DEF_MAX_RESVPORT; | 61 | unsigned int xprt_max_resvport = RPC_DEF_MAX_RESVPORT; |
@@ -75,6 +76,7 @@ static unsigned int xs_tcp_fin_timeout __read_mostly = XS_TCP_LINGER_TO; | |||
75 | 76 | ||
76 | static unsigned int min_slot_table_size = RPC_MIN_SLOT_TABLE; | 77 | static unsigned int min_slot_table_size = RPC_MIN_SLOT_TABLE; |
77 | static unsigned int max_slot_table_size = RPC_MAX_SLOT_TABLE; | 78 | static unsigned int max_slot_table_size = RPC_MAX_SLOT_TABLE; |
79 | static unsigned int max_tcp_slot_table_limit = RPC_MAX_SLOT_TABLE_LIMIT; | ||
78 | static unsigned int xprt_min_resvport_limit = RPC_MIN_RESVPORT; | 80 | static unsigned int xprt_min_resvport_limit = RPC_MIN_RESVPORT; |
79 | static unsigned int xprt_max_resvport_limit = RPC_MAX_RESVPORT; | 81 | static unsigned int xprt_max_resvport_limit = RPC_MAX_RESVPORT; |
80 | 82 | ||
@@ -104,6 +106,15 @@ static ctl_table xs_tunables_table[] = { | |||
104 | .extra2 = &max_slot_table_size | 106 | .extra2 = &max_slot_table_size |
105 | }, | 107 | }, |
106 | { | 108 | { |
109 | .procname = "tcp_max_slot_table_entries", | ||
110 | .data = &xprt_max_tcp_slot_table_entries, | ||
111 | .maxlen = sizeof(unsigned int), | ||
112 | .mode = 0644, | ||
113 | .proc_handler = proc_dointvec_minmax, | ||
114 | .extra1 = &min_slot_table_size, | ||
115 | .extra2 = &max_tcp_slot_table_limit | ||
116 | }, | ||
117 | { | ||
107 | .procname = "min_resvport", | 118 | .procname = "min_resvport", |
108 | .data = &xprt_min_resvport, | 119 | .data = &xprt_min_resvport, |
109 | .maxlen = sizeof(unsigned int), | 120 | .maxlen = sizeof(unsigned int), |
@@ -755,6 +766,8 @@ static void xs_tcp_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task) | |||
755 | if (task == NULL) | 766 | if (task == NULL) |
756 | goto out_release; | 767 | goto out_release; |
757 | req = task->tk_rqstp; | 768 | req = task->tk_rqstp; |
769 | if (req == NULL) | ||
770 | goto out_release; | ||
758 | if (req->rq_bytes_sent == 0) | 771 | if (req->rq_bytes_sent == 0) |
759 | goto out_release; | 772 | goto out_release; |
760 | if (req->rq_bytes_sent == req->rq_snd_buf.len) | 773 | if (req->rq_bytes_sent == req->rq_snd_buf.len) |
@@ -1236,7 +1249,7 @@ static inline int xs_tcp_read_reply(struct rpc_xprt *xprt, | |||
1236 | return 0; | 1249 | return 0; |
1237 | } | 1250 | } |
1238 | 1251 | ||
1239 | #if defined(CONFIG_NFS_V4_1) | 1252 | #if defined(CONFIG_SUNRPC_BACKCHANNEL) |
1240 | /* | 1253 | /* |
1241 | * Obtains an rpc_rqst previously allocated and invokes the common | 1254 | * Obtains an rpc_rqst previously allocated and invokes the common |
1242 | * tcp read code to read the data. The result is placed in the callback | 1255 | * tcp read code to read the data. The result is placed in the callback |
@@ -1299,7 +1312,7 @@ static inline int _xs_tcp_read_data(struct rpc_xprt *xprt, | |||
1299 | { | 1312 | { |
1300 | return xs_tcp_read_reply(xprt, desc); | 1313 | return xs_tcp_read_reply(xprt, desc); |
1301 | } | 1314 | } |
1302 | #endif /* CONFIG_NFS_V4_1 */ | 1315 | #endif /* CONFIG_SUNRPC_BACKCHANNEL */ |
1303 | 1316 | ||
1304 | /* | 1317 | /* |
1305 | * Read data off the transport. This can be either an RPC_CALL or an | 1318 | * Read data off the transport. This can be either an RPC_CALL or an |
@@ -2489,7 +2502,8 @@ static int xs_init_anyaddr(const int family, struct sockaddr *sap) | |||
2489 | } | 2502 | } |
2490 | 2503 | ||
2491 | static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args, | 2504 | static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args, |
2492 | unsigned int slot_table_size) | 2505 | unsigned int slot_table_size, |
2506 | unsigned int max_slot_table_size) | ||
2493 | { | 2507 | { |
2494 | struct rpc_xprt *xprt; | 2508 | struct rpc_xprt *xprt; |
2495 | struct sock_xprt *new; | 2509 | struct sock_xprt *new; |
@@ -2499,7 +2513,8 @@ static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args, | |||
2499 | return ERR_PTR(-EBADF); | 2513 | return ERR_PTR(-EBADF); |
2500 | } | 2514 | } |
2501 | 2515 | ||
2502 | xprt = xprt_alloc(args->net, sizeof(*new), slot_table_size); | 2516 | xprt = xprt_alloc(args->net, sizeof(*new), slot_table_size, |
2517 | max_slot_table_size); | ||
2503 | if (xprt == NULL) { | 2518 | if (xprt == NULL) { |
2504 | dprintk("RPC: xs_setup_xprt: couldn't allocate " | 2519 | dprintk("RPC: xs_setup_xprt: couldn't allocate " |
2505 | "rpc_xprt\n"); | 2520 | "rpc_xprt\n"); |
@@ -2541,7 +2556,8 @@ static struct rpc_xprt *xs_setup_local(struct xprt_create *args) | |||
2541 | struct rpc_xprt *xprt; | 2556 | struct rpc_xprt *xprt; |
2542 | struct rpc_xprt *ret; | 2557 | struct rpc_xprt *ret; |
2543 | 2558 | ||
2544 | xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries); | 2559 | xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries, |
2560 | xprt_max_tcp_slot_table_entries); | ||
2545 | if (IS_ERR(xprt)) | 2561 | if (IS_ERR(xprt)) |
2546 | return xprt; | 2562 | return xprt; |
2547 | transport = container_of(xprt, struct sock_xprt, xprt); | 2563 | transport = container_of(xprt, struct sock_xprt, xprt); |
@@ -2605,7 +2621,8 @@ static struct rpc_xprt *xs_setup_udp(struct xprt_create *args) | |||
2605 | struct sock_xprt *transport; | 2621 | struct sock_xprt *transport; |
2606 | struct rpc_xprt *ret; | 2622 | struct rpc_xprt *ret; |
2607 | 2623 | ||
2608 | xprt = xs_setup_xprt(args, xprt_udp_slot_table_entries); | 2624 | xprt = xs_setup_xprt(args, xprt_udp_slot_table_entries, |
2625 | xprt_udp_slot_table_entries); | ||
2609 | if (IS_ERR(xprt)) | 2626 | if (IS_ERR(xprt)) |
2610 | return xprt; | 2627 | return xprt; |
2611 | transport = container_of(xprt, struct sock_xprt, xprt); | 2628 | transport = container_of(xprt, struct sock_xprt, xprt); |
@@ -2681,7 +2698,8 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args) | |||
2681 | struct sock_xprt *transport; | 2698 | struct sock_xprt *transport; |
2682 | struct rpc_xprt *ret; | 2699 | struct rpc_xprt *ret; |
2683 | 2700 | ||
2684 | xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries); | 2701 | xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries, |
2702 | xprt_max_tcp_slot_table_entries); | ||
2685 | if (IS_ERR(xprt)) | 2703 | if (IS_ERR(xprt)) |
2686 | return xprt; | 2704 | return xprt; |
2687 | transport = container_of(xprt, struct sock_xprt, xprt); | 2705 | transport = container_of(xprt, struct sock_xprt, xprt); |
@@ -2760,7 +2778,8 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args) | |||
2760 | */ | 2778 | */ |
2761 | return args->bc_xprt->xpt_bc_xprt; | 2779 | return args->bc_xprt->xpt_bc_xprt; |
2762 | } | 2780 | } |
2763 | xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries); | 2781 | xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries, |
2782 | xprt_tcp_slot_table_entries); | ||
2764 | if (IS_ERR(xprt)) | 2783 | if (IS_ERR(xprt)) |
2765 | return xprt; | 2784 | return xprt; |
2766 | transport = container_of(xprt, struct sock_xprt, xprt); | 2785 | transport = container_of(xprt, struct sock_xprt, xprt); |
@@ -2947,8 +2966,26 @@ static struct kernel_param_ops param_ops_slot_table_size = { | |||
2947 | #define param_check_slot_table_size(name, p) \ | 2966 | #define param_check_slot_table_size(name, p) \ |
2948 | __param_check(name, p, unsigned int); | 2967 | __param_check(name, p, unsigned int); |
2949 | 2968 | ||
2969 | static int param_set_max_slot_table_size(const char *val, | ||
2970 | const struct kernel_param *kp) | ||
2971 | { | ||
2972 | return param_set_uint_minmax(val, kp, | ||
2973 | RPC_MIN_SLOT_TABLE, | ||
2974 | RPC_MAX_SLOT_TABLE_LIMIT); | ||
2975 | } | ||
2976 | |||
2977 | static struct kernel_param_ops param_ops_max_slot_table_size = { | ||
2978 | .set = param_set_max_slot_table_size, | ||
2979 | .get = param_get_uint, | ||
2980 | }; | ||
2981 | |||
2982 | #define param_check_max_slot_table_size(name, p) \ | ||
2983 | __param_check(name, p, unsigned int); | ||
2984 | |||
2950 | module_param_named(tcp_slot_table_entries, xprt_tcp_slot_table_entries, | 2985 | module_param_named(tcp_slot_table_entries, xprt_tcp_slot_table_entries, |
2951 | slot_table_size, 0644); | 2986 | slot_table_size, 0644); |
2987 | module_param_named(tcp_max_slot_table_entries, xprt_max_tcp_slot_table_entries, | ||
2988 | max_slot_table_size, 0644); | ||
2952 | module_param_named(udp_slot_table_entries, xprt_udp_slot_table_entries, | 2989 | module_param_named(udp_slot_table_entries, xprt_udp_slot_table_entries, |
2953 | slot_table_size, 0644); | 2990 | slot_table_size, 0644); |
2954 | 2991 | ||