aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-04-06 13:09:38 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-04-06 13:09:38 -0400
commit2b3a8fd735f86ebeb2b9d061054003000c36b654 (patch)
tree209c969216b0f46315fc1f53a6c999a980d3ed15 /net
parent6f4c98e1c22c28e00b8f050cce895a6b74db15d1 (diff)
parent9581a4ae75517099bc87e1c43d1a8f35b55741b9 (diff)
Merge tag 'nfs-for-3.15-1' of git://git.linux-nfs.org/projects/trondmy/linux-nfs
Pull NFS client updates from Trond Myklebust: "Highlights include: - Stable fix for a use after free issue in the NFSv4.1 open code - Fix the SUNRPC bi-directional RPC code to account for TCP segmentation - Optimise usage of readdirplus when confronted with 'ls -l' situations - Soft mount bugfixes - NFS over RDMA bugfixes - NFSv4 close locking fixes - Various NFSv4.x client state management optimisations - Rename/unlink code cleanups" * tag 'nfs-for-3.15-1' of git://git.linux-nfs.org/projects/trondmy/linux-nfs: (28 commits) nfs: pass string length to pr_notice message about readdir loops NFSv4: Fix a use-after-free problem in open() SUNRPC: rpc_restart_call/rpc_restart_call_prepare should clear task->tk_status SUNRPC: Don't let rpc_delay() clobber non-timeout errors SUNRPC: Ensure call_connect_status() deals correctly with SOFTCONN tasks SUNRPC: Ensure call_status() deals correctly with SOFTCONN tasks NFSv4: Ensure we respect soft mount timeouts during trunking discovery NFSv4: Schedule recovery if nfs40_walk_client_list() is interrupted NFS: advertise only supported callback netids SUNRPC: remove KERN_INFO from dprintk() call sites SUNRPC: Fix large reads on NFS/RDMA NFS: Clean up: revert increase in READDIR RPC buffer max size SUNRPC: Ensure that call_bind times out correctly SUNRPC: Ensure that call_connect times out correctly nfs: emit a fsnotify_nameremove call in sillyrename codepath nfs: remove synchronous rename code nfs: convert nfs_rename to use async_rename infrastructure nfs: make nfs_async_rename non-static nfs: abstract out code needed to complete a sillyrename NFSv4: Clear the open state flags if the new stateid does not match ...
Diffstat (limited to 'net')
-rw-r--r--net/sunrpc/backchannel_rqst.c93
-rw-r--r--net/sunrpc/clnt.c23
-rw-r--r--net/sunrpc/sched.c3
-rw-r--r--net/sunrpc/xprtrdma/rpc_rdma.c4
-rw-r--r--net/sunrpc/xprtrdma/transport.c10
-rw-r--r--net/sunrpc/xprtsock.c28
6 files changed, 93 insertions, 68 deletions
diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c
index e860d4f7ed2a..3513d559bc45 100644
--- a/net/sunrpc/backchannel_rqst.c
+++ b/net/sunrpc/backchannel_rqst.c
@@ -212,39 +212,23 @@ out:
212} 212}
213EXPORT_SYMBOL_GPL(xprt_destroy_backchannel); 213EXPORT_SYMBOL_GPL(xprt_destroy_backchannel);
214 214
215/* 215static struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt, __be32 xid)
216 * One or more rpc_rqst structure have been preallocated during the
217 * backchannel setup. Buffer space for the send and private XDR buffers
218 * has been preallocated as well. Use xprt_alloc_bc_request to allocate
219 * to this request. Use xprt_free_bc_request to return it.
220 *
221 * We know that we're called in soft interrupt context, grab the spin_lock
222 * since there is no need to grab the bottom half spin_lock.
223 *
224 * Return an available rpc_rqst, otherwise NULL if non are available.
225 */
226struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt)
227{ 216{
228 struct rpc_rqst *req; 217 struct rpc_rqst *req = NULL;
229 218
230 dprintk("RPC: allocate a backchannel request\n"); 219 dprintk("RPC: allocate a backchannel request\n");
231 spin_lock(&xprt->bc_pa_lock); 220 if (list_empty(&xprt->bc_pa_list))
232 if (!list_empty(&xprt->bc_pa_list)) { 221 goto not_found;
233 req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst,
234 rq_bc_pa_list);
235 list_del(&req->rq_bc_pa_list);
236 } else {
237 req = NULL;
238 }
239 spin_unlock(&xprt->bc_pa_lock);
240 222
241 if (req != NULL) { 223 req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst,
242 set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state); 224 rq_bc_pa_list);
243 req->rq_reply_bytes_recvd = 0; 225 req->rq_reply_bytes_recvd = 0;
244 req->rq_bytes_sent = 0; 226 req->rq_bytes_sent = 0;
245 memcpy(&req->rq_private_buf, &req->rq_rcv_buf, 227 memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
246 sizeof(req->rq_private_buf)); 228 sizeof(req->rq_private_buf));
247 } 229 req->rq_xid = xid;
230 req->rq_connect_cookie = xprt->connect_cookie;
231not_found:
248 dprintk("RPC: backchannel req=%p\n", req); 232 dprintk("RPC: backchannel req=%p\n", req);
249 return req; 233 return req;
250} 234}
@@ -259,6 +243,7 @@ void xprt_free_bc_request(struct rpc_rqst *req)
259 243
260 dprintk("RPC: free backchannel req=%p\n", req); 244 dprintk("RPC: free backchannel req=%p\n", req);
261 245
246 req->rq_connect_cookie = xprt->connect_cookie - 1;
262 smp_mb__before_clear_bit(); 247 smp_mb__before_clear_bit();
263 WARN_ON_ONCE(!test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state)); 248 WARN_ON_ONCE(!test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state));
264 clear_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state); 249 clear_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
@@ -281,7 +266,57 @@ void xprt_free_bc_request(struct rpc_rqst *req)
281 * may be reused by a new callback request. 266 * may be reused by a new callback request.
282 */ 267 */
283 spin_lock_bh(&xprt->bc_pa_lock); 268 spin_lock_bh(&xprt->bc_pa_lock);
284 list_add(&req->rq_bc_pa_list, &xprt->bc_pa_list); 269 list_add_tail(&req->rq_bc_pa_list, &xprt->bc_pa_list);
285 spin_unlock_bh(&xprt->bc_pa_lock); 270 spin_unlock_bh(&xprt->bc_pa_lock);
286} 271}
287 272
273/*
274 * One or more rpc_rqst structure have been preallocated during the
275 * backchannel setup. Buffer space for the send and private XDR buffers
276 * has been preallocated as well. Use xprt_alloc_bc_request to allocate
277 * to this request. Use xprt_free_bc_request to return it.
278 *
279 * We know that we're called in soft interrupt context, grab the spin_lock
280 * since there is no need to grab the bottom half spin_lock.
281 *
282 * Return an available rpc_rqst, otherwise NULL if non are available.
283 */
284struct rpc_rqst *xprt_lookup_bc_request(struct rpc_xprt *xprt, __be32 xid)
285{
286 struct rpc_rqst *req;
287
288 spin_lock(&xprt->bc_pa_lock);
289 list_for_each_entry(req, &xprt->bc_pa_list, rq_bc_pa_list) {
290 if (req->rq_connect_cookie != xprt->connect_cookie)
291 continue;
292 if (req->rq_xid == xid)
293 goto found;
294 }
295 req = xprt_alloc_bc_request(xprt, xid);
296found:
297 spin_unlock(&xprt->bc_pa_lock);
298 return req;
299}
300
301/*
302 * Add callback request to callback list. The callback
303 * service sleeps on the sv_cb_waitq waiting for new
304 * requests. Wake it up after adding enqueing the
305 * request.
306 */
307void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied)
308{
309 struct rpc_xprt *xprt = req->rq_xprt;
310 struct svc_serv *bc_serv = xprt->bc_serv;
311
312 req->rq_private_buf.len = copied;
313 set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
314
315 dprintk("RPC: add callback request to list\n");
316 spin_lock(&bc_serv->sv_cb_lock);
317 list_del(&req->rq_bc_pa_list);
318 list_add(&req->rq_bc_list, &bc_serv->sv_cb_list);
319 wake_up(&bc_serv->sv_cb_waitq);
320 spin_unlock(&bc_serv->sv_cb_lock);
321}
322
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 0edada973434..f400445d1a44 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -1363,6 +1363,7 @@ rpc_restart_call_prepare(struct rpc_task *task)
1363 if (RPC_ASSASSINATED(task)) 1363 if (RPC_ASSASSINATED(task))
1364 return 0; 1364 return 0;
1365 task->tk_action = call_start; 1365 task->tk_action = call_start;
1366 task->tk_status = 0;
1366 if (task->tk_ops->rpc_call_prepare != NULL) 1367 if (task->tk_ops->rpc_call_prepare != NULL)
1367 task->tk_action = rpc_prepare_task; 1368 task->tk_action = rpc_prepare_task;
1368 return 1; 1369 return 1;
@@ -1379,6 +1380,7 @@ rpc_restart_call(struct rpc_task *task)
1379 if (RPC_ASSASSINATED(task)) 1380 if (RPC_ASSASSINATED(task))
1380 return 0; 1381 return 0;
1381 task->tk_action = call_start; 1382 task->tk_action = call_start;
1383 task->tk_status = 0;
1382 return 1; 1384 return 1;
1383} 1385}
1384EXPORT_SYMBOL_GPL(rpc_restart_call); 1386EXPORT_SYMBOL_GPL(rpc_restart_call);
@@ -1728,9 +1730,7 @@ call_bind_status(struct rpc_task *task)
1728 case -EPROTONOSUPPORT: 1730 case -EPROTONOSUPPORT:
1729 dprintk("RPC: %5u remote rpcbind version unavailable, retrying\n", 1731 dprintk("RPC: %5u remote rpcbind version unavailable, retrying\n",
1730 task->tk_pid); 1732 task->tk_pid);
1731 task->tk_status = 0; 1733 goto retry_timeout;
1732 task->tk_action = call_bind;
1733 return;
1734 case -ECONNREFUSED: /* connection problems */ 1734 case -ECONNREFUSED: /* connection problems */
1735 case -ECONNRESET: 1735 case -ECONNRESET:
1736 case -ECONNABORTED: 1736 case -ECONNABORTED:
@@ -1756,6 +1756,7 @@ call_bind_status(struct rpc_task *task)
1756 return; 1756 return;
1757 1757
1758retry_timeout: 1758retry_timeout:
1759 task->tk_status = 0;
1759 task->tk_action = call_timeout; 1760 task->tk_action = call_timeout;
1760} 1761}
1761 1762
@@ -1798,21 +1799,19 @@ call_connect_status(struct rpc_task *task)
1798 trace_rpc_connect_status(task, status); 1799 trace_rpc_connect_status(task, status);
1799 task->tk_status = 0; 1800 task->tk_status = 0;
1800 switch (status) { 1801 switch (status) {
1801 /* if soft mounted, test if we've timed out */
1802 case -ETIMEDOUT:
1803 task->tk_action = call_timeout;
1804 return;
1805 case -ECONNREFUSED: 1802 case -ECONNREFUSED:
1806 case -ECONNRESET: 1803 case -ECONNRESET:
1807 case -ECONNABORTED: 1804 case -ECONNABORTED:
1808 case -ENETUNREACH: 1805 case -ENETUNREACH:
1809 case -EHOSTUNREACH: 1806 case -EHOSTUNREACH:
1810 /* retry with existing socket, after a delay */
1811 rpc_delay(task, 3*HZ);
1812 if (RPC_IS_SOFTCONN(task)) 1807 if (RPC_IS_SOFTCONN(task))
1813 break; 1808 break;
1809 /* retry with existing socket, after a delay */
1810 rpc_delay(task, 3*HZ);
1814 case -EAGAIN: 1811 case -EAGAIN:
1815 task->tk_action = call_bind; 1812 /* Check for timeouts before looping back to call_bind */
1813 case -ETIMEDOUT:
1814 task->tk_action = call_timeout;
1816 return; 1815 return;
1817 case 0: 1816 case 0:
1818 clnt->cl_stats->netreconn++; 1817 clnt->cl_stats->netreconn++;
@@ -2007,6 +2006,10 @@ call_status(struct rpc_task *task)
2007 case -EHOSTDOWN: 2006 case -EHOSTDOWN:
2008 case -EHOSTUNREACH: 2007 case -EHOSTUNREACH:
2009 case -ENETUNREACH: 2008 case -ENETUNREACH:
2009 if (RPC_IS_SOFTCONN(task)) {
2010 rpc_exit(task, status);
2011 break;
2012 }
2010 /* 2013 /*
2011 * Delay any retries for 3 seconds, then handle as if it 2014 * Delay any retries for 3 seconds, then handle as if it
2012 * were a timeout. 2015 * were a timeout.
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index ff3cc4bf4b24..25578afe1548 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -637,7 +637,8 @@ static void __rpc_queue_timer_fn(unsigned long ptr)
637 637
638static void __rpc_atrun(struct rpc_task *task) 638static void __rpc_atrun(struct rpc_task *task)
639{ 639{
640 task->tk_status = 0; 640 if (task->tk_status == -ETIMEDOUT)
641 task->tk_status = 0;
641} 642}
642 643
643/* 644/*
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
index e03725bfe2b8..96ead526b125 100644
--- a/net/sunrpc/xprtrdma/rpc_rdma.c
+++ b/net/sunrpc/xprtrdma/rpc_rdma.c
@@ -649,9 +649,7 @@ rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
649 break; 649 break;
650 page_base = 0; 650 page_base = 0;
651 } 651 }
652 rqst->rq_rcv_buf.page_len = olen - copy_len; 652 }
653 } else
654 rqst->rq_rcv_buf.page_len = 0;
655 653
656 if (copy_len && rqst->rq_rcv_buf.tail[0].iov_len) { 654 if (copy_len && rqst->rq_rcv_buf.tail[0].iov_len) {
657 curlen = copy_len; 655 curlen = copy_len;
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
index 285dc0884115..1eb9c468d0c9 100644
--- a/net/sunrpc/xprtrdma/transport.c
+++ b/net/sunrpc/xprtrdma/transport.c
@@ -733,7 +733,7 @@ static void __exit xprt_rdma_cleanup(void)
733{ 733{
734 int rc; 734 int rc;
735 735
736 dprintk(KERN_INFO "RPCRDMA Module Removed, deregister RPC RDMA transport\n"); 736 dprintk("RPCRDMA Module Removed, deregister RPC RDMA transport\n");
737#ifdef RPC_DEBUG 737#ifdef RPC_DEBUG
738 if (sunrpc_table_header) { 738 if (sunrpc_table_header) {
739 unregister_sysctl_table(sunrpc_table_header); 739 unregister_sysctl_table(sunrpc_table_header);
@@ -755,14 +755,14 @@ static int __init xprt_rdma_init(void)
755 if (rc) 755 if (rc)
756 return rc; 756 return rc;
757 757
758 dprintk(KERN_INFO "RPCRDMA Module Init, register RPC RDMA transport\n"); 758 dprintk("RPCRDMA Module Init, register RPC RDMA transport\n");
759 759
760 dprintk(KERN_INFO "Defaults:\n"); 760 dprintk("Defaults:\n");
761 dprintk(KERN_INFO "\tSlots %d\n" 761 dprintk("\tSlots %d\n"
762 "\tMaxInlineRead %d\n\tMaxInlineWrite %d\n", 762 "\tMaxInlineRead %d\n\tMaxInlineWrite %d\n",
763 xprt_rdma_slot_table_entries, 763 xprt_rdma_slot_table_entries,
764 xprt_rdma_max_inline_read, xprt_rdma_max_inline_write); 764 xprt_rdma_max_inline_read, xprt_rdma_max_inline_write);
765 dprintk(KERN_INFO "\tPadding %d\n\tMemreg %d\n", 765 dprintk("\tPadding %d\n\tMemreg %d\n",
766 xprt_rdma_inline_write_padding, xprt_rdma_memreg_strategy); 766 xprt_rdma_inline_write_padding, xprt_rdma_memreg_strategy);
767 767
768#ifdef RPC_DEBUG 768#ifdef RPC_DEBUG
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 0addefca8e77..966763d735e9 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -1306,41 +1306,29 @@ static inline int xs_tcp_read_reply(struct rpc_xprt *xprt,
1306 * If we're unable to obtain the rpc_rqst we schedule the closing of the 1306 * If we're unable to obtain the rpc_rqst we schedule the closing of the
1307 * connection and return -1. 1307 * connection and return -1.
1308 */ 1308 */
1309static inline int xs_tcp_read_callback(struct rpc_xprt *xprt, 1309static int xs_tcp_read_callback(struct rpc_xprt *xprt,
1310 struct xdr_skb_reader *desc) 1310 struct xdr_skb_reader *desc)
1311{ 1311{
1312 struct sock_xprt *transport = 1312 struct sock_xprt *transport =
1313 container_of(xprt, struct sock_xprt, xprt); 1313 container_of(xprt, struct sock_xprt, xprt);
1314 struct rpc_rqst *req; 1314 struct rpc_rqst *req;
1315 1315
1316 req = xprt_alloc_bc_request(xprt); 1316 /* Look up and lock the request corresponding to the given XID */
1317 spin_lock(&xprt->transport_lock);
1318 req = xprt_lookup_bc_request(xprt, transport->tcp_xid);
1317 if (req == NULL) { 1319 if (req == NULL) {
1320 spin_unlock(&xprt->transport_lock);
1318 printk(KERN_WARNING "Callback slot table overflowed\n"); 1321 printk(KERN_WARNING "Callback slot table overflowed\n");
1319 xprt_force_disconnect(xprt); 1322 xprt_force_disconnect(xprt);
1320 return -1; 1323 return -1;
1321 } 1324 }
1322 1325
1323 req->rq_xid = transport->tcp_xid;
1324 dprintk("RPC: read callback XID %08x\n", ntohl(req->rq_xid)); 1326 dprintk("RPC: read callback XID %08x\n", ntohl(req->rq_xid));
1325 xs_tcp_read_common(xprt, desc, req); 1327 xs_tcp_read_common(xprt, desc, req);
1326 1328
1327 if (!(transport->tcp_flags & TCP_RCV_COPY_DATA)) { 1329 if (!(transport->tcp_flags & TCP_RCV_COPY_DATA))
1328 struct svc_serv *bc_serv = xprt->bc_serv; 1330 xprt_complete_bc_request(req, transport->tcp_copied);
1329 1331 spin_unlock(&xprt->transport_lock);
1330 /*
1331 * Add callback request to callback list. The callback
1332 * service sleeps on the sv_cb_waitq waiting for new
1333 * requests. Wake it up after adding enqueing the
1334 * request.
1335 */
1336 dprintk("RPC: add callback request to list\n");
1337 spin_lock(&bc_serv->sv_cb_lock);
1338 list_add(&req->rq_bc_list, &bc_serv->sv_cb_list);
1339 spin_unlock(&bc_serv->sv_cb_lock);
1340 wake_up(&bc_serv->sv_cb_waitq);
1341 }
1342
1343 req->rq_private_buf.len = transport->tcp_copied;
1344 1332
1345 return 0; 1333 return 0;
1346} 1334}