aboutsummaryrefslogtreecommitdiffstats
path: root/net/sunrpc
diff options
context:
space:
mode:
authorJ. Bruce Fields <bfields@redhat.com>2013-04-26 11:37:29 -0400
committerJ. Bruce Fields <bfields@redhat.com>2013-04-26 11:37:43 -0400
commitc85b03ab200f6d2c2f80588d96d03c1f8fcaedc3 (patch)
tree34f4f3200ef5db6ca93360879fffda050c04e1ca /net/sunrpc
parentbf8d909705e9d9bac31d9b8eac6734d2b51332a7 (diff)
parentfd068b200fb86e8fa52368e6f6088d2ab297a5d7 (diff)
Merge Trond's nfs-for-next
Merging Trond's nfs-for-next branch, mainly to get b7993cebb841b0da7a33e9d5ce301a9fd3209165 "SUNRPC: Allow rpc_create() to request that TCP slots be unlimited", which a small piece of the gss-proxy work depends on.
Diffstat (limited to 'net/sunrpc')
-rw-r--r--net/sunrpc/Kconfig1
-rw-r--r--net/sunrpc/clnt.c43
-rw-r--r--net/sunrpc/sched.c9
-rw-r--r--net/sunrpc/xprt.c61
-rw-r--r--net/sunrpc/xprtsock.c14
5 files changed, 106 insertions, 22 deletions
diff --git a/net/sunrpc/Kconfig b/net/sunrpc/Kconfig
index 516fe2caac2c..262caf03bd5f 100644
--- a/net/sunrpc/Kconfig
+++ b/net/sunrpc/Kconfig
@@ -24,7 +24,6 @@ config SUNRPC_XPRT_RDMA
24config SUNRPC_SWAP 24config SUNRPC_SWAP
25 bool 25 bool
26 depends on SUNRPC 26 depends on SUNRPC
27 select NETVM
28 27
29config RPCSEC_GSS_KRB5 28config RPCSEC_GSS_KRB5
30 tristate "Secure RPC: Kerberos V mechanism" 29 tristate "Secure RPC: Kerberos V mechanism"
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index dcc446e7fbf6..651245aa829a 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -414,6 +414,8 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args)
414 }; 414 };
415 char servername[48]; 415 char servername[48];
416 416
417 if (args->flags & RPC_CLNT_CREATE_INFINITE_SLOTS)
418 xprtargs.flags |= XPRT_CREATE_INFINITE_SLOTS;
417 /* 419 /*
418 * If the caller chooses not to specify a hostname, whip 420 * If the caller chooses not to specify a hostname, whip
419 * up a string representation of the passed-in address. 421 * up a string representation of the passed-in address.
@@ -1306,6 +1308,8 @@ call_reserve(struct rpc_task *task)
1306 xprt_reserve(task); 1308 xprt_reserve(task);
1307} 1309}
1308 1310
1311static void call_retry_reserve(struct rpc_task *task);
1312
1309/* 1313/*
1310 * 1b. Grok the result of xprt_reserve() 1314 * 1b. Grok the result of xprt_reserve()
1311 */ 1315 */
@@ -1347,7 +1351,7 @@ call_reserveresult(struct rpc_task *task)
1347 case -ENOMEM: 1351 case -ENOMEM:
1348 rpc_delay(task, HZ >> 2); 1352 rpc_delay(task, HZ >> 2);
1349 case -EAGAIN: /* woken up; retry */ 1353 case -EAGAIN: /* woken up; retry */
1350 task->tk_action = call_reserve; 1354 task->tk_action = call_retry_reserve;
1351 return; 1355 return;
1352 case -EIO: /* probably a shutdown */ 1356 case -EIO: /* probably a shutdown */
1353 break; 1357 break;
@@ -1360,6 +1364,19 @@ call_reserveresult(struct rpc_task *task)
1360} 1364}
1361 1365
1362/* 1366/*
1367 * 1c. Retry reserving an RPC call slot
1368 */
1369static void
1370call_retry_reserve(struct rpc_task *task)
1371{
1372 dprint_status(task);
1373
1374 task->tk_status = 0;
1375 task->tk_action = call_reserveresult;
1376 xprt_retry_reserve(task);
1377}
1378
1379/*
1363 * 2. Bind and/or refresh the credentials 1380 * 2. Bind and/or refresh the credentials
1364 */ 1381 */
1365static void 1382static void
@@ -1644,22 +1661,26 @@ call_connect_status(struct rpc_task *task)
1644 1661
1645 dprint_status(task); 1662 dprint_status(task);
1646 1663
1647 task->tk_status = 0;
1648 if (status >= 0 || status == -EAGAIN) {
1649 clnt->cl_stats->netreconn++;
1650 task->tk_action = call_transmit;
1651 return;
1652 }
1653
1654 trace_rpc_connect_status(task, status); 1664 trace_rpc_connect_status(task, status);
1655 switch (status) { 1665 switch (status) {
1656 /* if soft mounted, test if we've timed out */ 1666 /* if soft mounted, test if we've timed out */
1657 case -ETIMEDOUT: 1667 case -ETIMEDOUT:
1658 task->tk_action = call_timeout; 1668 task->tk_action = call_timeout;
1659 break; 1669 return;
1660 default: 1670 case -ECONNREFUSED:
1661 rpc_exit(task, -EIO); 1671 case -ECONNRESET:
1672 case -ENETUNREACH:
1673 if (RPC_IS_SOFTCONN(task))
1674 break;
1675 /* retry with existing socket, after a delay */
1676 case 0:
1677 case -EAGAIN:
1678 task->tk_status = 0;
1679 clnt->cl_stats->netreconn++;
1680 task->tk_action = call_transmit;
1681 return;
1662 } 1682 }
1683 rpc_exit(task, status);
1663} 1684}
1664 1685
1665/* 1686/*
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index fb20f25ddec9..f8529fc8e542 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -180,6 +180,8 @@ static void __rpc_add_wait_queue(struct rpc_wait_queue *queue,
180 list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]); 180 list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]);
181 task->tk_waitqueue = queue; 181 task->tk_waitqueue = queue;
182 queue->qlen++; 182 queue->qlen++;
183 /* barrier matches the read in rpc_wake_up_task_queue_locked() */
184 smp_wmb();
183 rpc_set_queued(task); 185 rpc_set_queued(task);
184 186
185 dprintk("RPC: %5u added to queue %p \"%s\"\n", 187 dprintk("RPC: %5u added to queue %p \"%s\"\n",
@@ -430,8 +432,11 @@ static void __rpc_do_wake_up_task(struct rpc_wait_queue *queue, struct rpc_task
430 */ 432 */
431static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct rpc_task *task) 433static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct rpc_task *task)
432{ 434{
433 if (RPC_IS_QUEUED(task) && task->tk_waitqueue == queue) 435 if (RPC_IS_QUEUED(task)) {
434 __rpc_do_wake_up_task(queue, task); 436 smp_rmb();
437 if (task->tk_waitqueue == queue)
438 __rpc_do_wake_up_task(queue, task);
439 }
435} 440}
436 441
437/* 442/*
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index b7478d5e7ffd..745fca3cfd36 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -948,6 +948,34 @@ void xprt_transmit(struct rpc_task *task)
948 spin_unlock_bh(&xprt->transport_lock); 948 spin_unlock_bh(&xprt->transport_lock);
949} 949}
950 950
951static void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task)
952{
953 set_bit(XPRT_CONGESTED, &xprt->state);
954 rpc_sleep_on(&xprt->backlog, task, NULL);
955}
956
957static void xprt_wake_up_backlog(struct rpc_xprt *xprt)
958{
959 if (rpc_wake_up_next(&xprt->backlog) == NULL)
960 clear_bit(XPRT_CONGESTED, &xprt->state);
961}
962
963static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task)
964{
965 bool ret = false;
966
967 if (!test_bit(XPRT_CONGESTED, &xprt->state))
968 goto out;
969 spin_lock(&xprt->reserve_lock);
970 if (test_bit(XPRT_CONGESTED, &xprt->state)) {
971 rpc_sleep_on(&xprt->backlog, task, NULL);
972 ret = true;
973 }
974 spin_unlock(&xprt->reserve_lock);
975out:
976 return ret;
977}
978
951static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt, gfp_t gfp_flags) 979static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt, gfp_t gfp_flags)
952{ 980{
953 struct rpc_rqst *req = ERR_PTR(-EAGAIN); 981 struct rpc_rqst *req = ERR_PTR(-EAGAIN);
@@ -992,7 +1020,7 @@ void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
992 task->tk_status = -ENOMEM; 1020 task->tk_status = -ENOMEM;
993 break; 1021 break;
994 case -EAGAIN: 1022 case -EAGAIN:
995 rpc_sleep_on(&xprt->backlog, task, NULL); 1023 xprt_add_backlog(xprt, task);
996 dprintk("RPC: waiting for request slot\n"); 1024 dprintk("RPC: waiting for request slot\n");
997 default: 1025 default:
998 task->tk_status = -EAGAIN; 1026 task->tk_status = -EAGAIN;
@@ -1028,7 +1056,7 @@ static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
1028 memset(req, 0, sizeof(*req)); /* mark unused */ 1056 memset(req, 0, sizeof(*req)); /* mark unused */
1029 list_add(&req->rq_list, &xprt->free); 1057 list_add(&req->rq_list, &xprt->free);
1030 } 1058 }
1031 rpc_wake_up_next(&xprt->backlog); 1059 xprt_wake_up_backlog(xprt);
1032 spin_unlock(&xprt->reserve_lock); 1060 spin_unlock(&xprt->reserve_lock);
1033} 1061}
1034 1062
@@ -1092,7 +1120,8 @@ EXPORT_SYMBOL_GPL(xprt_free);
1092 * xprt_reserve - allocate an RPC request slot 1120 * xprt_reserve - allocate an RPC request slot
1093 * @task: RPC task requesting a slot allocation 1121 * @task: RPC task requesting a slot allocation
1094 * 1122 *
1095 * If no more slots are available, place the task on the transport's 1123 * If the transport is marked as being congested, or if no more
1124 * slots are available, place the task on the transport's
1096 * backlog queue. 1125 * backlog queue.
1097 */ 1126 */
1098void xprt_reserve(struct rpc_task *task) 1127void xprt_reserve(struct rpc_task *task)
@@ -1107,6 +1136,32 @@ void xprt_reserve(struct rpc_task *task)
1107 task->tk_status = -EAGAIN; 1136 task->tk_status = -EAGAIN;
1108 rcu_read_lock(); 1137 rcu_read_lock();
1109 xprt = rcu_dereference(task->tk_client->cl_xprt); 1138 xprt = rcu_dereference(task->tk_client->cl_xprt);
1139 if (!xprt_throttle_congested(xprt, task))
1140 xprt->ops->alloc_slot(xprt, task);
1141 rcu_read_unlock();
1142}
1143
1144/**
1145 * xprt_retry_reserve - allocate an RPC request slot
1146 * @task: RPC task requesting a slot allocation
1147 *
1148 * If no more slots are available, place the task on the transport's
1149 * backlog queue.
1150 * Note that the only difference with xprt_reserve is that we now
1151 * ignore the value of the XPRT_CONGESTED flag.
1152 */
1153void xprt_retry_reserve(struct rpc_task *task)
1154{
1155 struct rpc_xprt *xprt;
1156
1157 task->tk_status = 0;
1158 if (task->tk_rqstp != NULL)
1159 return;
1160
1161 task->tk_timeout = 0;
1162 task->tk_status = -EAGAIN;
1163 rcu_read_lock();
1164 xprt = rcu_dereference(task->tk_client->cl_xprt);
1110 xprt->ops->alloc_slot(xprt, task); 1165 xprt->ops->alloc_slot(xprt, task);
1111 rcu_read_unlock(); 1166 rcu_read_unlock();
1112} 1167}
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 3d02130828da..9c2825827dec 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -2207,10 +2207,6 @@ static void xs_tcp_setup_socket(struct work_struct *work)
2207 */ 2207 */
2208 xs_tcp_force_close(xprt); 2208 xs_tcp_force_close(xprt);
2209 break; 2209 break;
2210 case -ECONNREFUSED:
2211 case -ECONNRESET:
2212 case -ENETUNREACH:
2213 /* retry with existing socket, after a delay */
2214 case 0: 2210 case 0:
2215 case -EINPROGRESS: 2211 case -EINPROGRESS:
2216 case -EALREADY: 2212 case -EALREADY:
@@ -2221,6 +2217,10 @@ static void xs_tcp_setup_socket(struct work_struct *work)
2221 /* Happens, for instance, if the user specified a link 2217 /* Happens, for instance, if the user specified a link
2222 * local IPv6 address without a scope-id. 2218 * local IPv6 address without a scope-id.
2223 */ 2219 */
2220 case -ECONNREFUSED:
2221 case -ECONNRESET:
2222 case -ENETUNREACH:
2223 /* retry with existing socket, after a delay */
2224 goto out; 2224 goto out;
2225 } 2225 }
2226out_eagain: 2226out_eagain:
@@ -2767,9 +2767,13 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args)
2767 struct rpc_xprt *xprt; 2767 struct rpc_xprt *xprt;
2768 struct sock_xprt *transport; 2768 struct sock_xprt *transport;
2769 struct rpc_xprt *ret; 2769 struct rpc_xprt *ret;
2770 unsigned int max_slot_table_size = xprt_max_tcp_slot_table_entries;
2771
2772 if (args->flags & XPRT_CREATE_INFINITE_SLOTS)
2773 max_slot_table_size = RPC_MAX_SLOT_TABLE_LIMIT;
2770 2774
2771 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries, 2775 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries,
2772 xprt_max_tcp_slot_table_entries); 2776 max_slot_table_size);
2773 if (IS_ERR(xprt)) 2777 if (IS_ERR(xprt))
2774 return xprt; 2778 return xprt;
2775 transport = container_of(xprt, struct sock_xprt, xprt); 2779 transport = container_of(xprt, struct sock_xprt, xprt);