diff options
Diffstat (limited to 'net/sunrpc')
-rw-r--r-- | net/sunrpc/Kconfig | 4 | ||||
-rw-r--r-- | net/sunrpc/Makefile | 2 | ||||
-rw-r--r-- | net/sunrpc/auth.c | 2 | ||||
-rw-r--r-- | net/sunrpc/auth_gss/auth_gss.c | 70 | ||||
-rw-r--r-- | net/sunrpc/auth_gss/gss_krb5_mech.c | 7 | ||||
-rw-r--r-- | net/sunrpc/auth_gss/gss_mech_switch.c | 32 | ||||
-rw-r--r-- | net/sunrpc/auth_unix.c | 3 | ||||
-rw-r--r-- | net/sunrpc/backchannel_rqst.c | 7 | ||||
-rw-r--r-- | net/sunrpc/bc_svc.c | 3 | ||||
-rw-r--r-- | net/sunrpc/clnt.c | 51 | ||||
-rw-r--r-- | net/sunrpc/rpc_pipe.c | 34 | ||||
-rw-r--r-- | net/sunrpc/sched.c | 38 | ||||
-rw-r--r-- | net/sunrpc/svc.c | 6 | ||||
-rw-r--r-- | net/sunrpc/svc_xprt.c | 11 | ||||
-rw-r--r-- | net/sunrpc/svcauth_unix.c | 79 | ||||
-rw-r--r-- | net/sunrpc/svcsock.c | 16 | ||||
-rw-r--r-- | net/sunrpc/xdr.c | 5 | ||||
-rw-r--r-- | net/sunrpc/xprt.c | 262 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/svc_rdma_transport.c | 1 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/transport.c | 6 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/verbs.c | 1 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/xprt_rdma.h | 4 | ||||
-rw-r--r-- | net/sunrpc/xprtsock.c | 60 |
23 files changed, 399 insertions, 305 deletions
diff --git a/net/sunrpc/Kconfig b/net/sunrpc/Kconfig index b2198e65d8b..ffd243d0918 100644 --- a/net/sunrpc/Kconfig +++ b/net/sunrpc/Kconfig | |||
@@ -4,6 +4,10 @@ config SUNRPC | |||
4 | config SUNRPC_GSS | 4 | config SUNRPC_GSS |
5 | tristate | 5 | tristate |
6 | 6 | ||
7 | config SUNRPC_BACKCHANNEL | ||
8 | bool | ||
9 | depends on SUNRPC | ||
10 | |||
7 | config SUNRPC_XPRT_RDMA | 11 | config SUNRPC_XPRT_RDMA |
8 | tristate | 12 | tristate |
9 | depends on SUNRPC && INFINIBAND && INFINIBAND_ADDR_TRANS && EXPERIMENTAL | 13 | depends on SUNRPC && INFINIBAND && INFINIBAND_ADDR_TRANS && EXPERIMENTAL |
diff --git a/net/sunrpc/Makefile b/net/sunrpc/Makefile index 9d2fca5ad14..8209a0411bc 100644 --- a/net/sunrpc/Makefile +++ b/net/sunrpc/Makefile | |||
@@ -13,6 +13,6 @@ sunrpc-y := clnt.o xprt.o socklib.o xprtsock.o sched.o \ | |||
13 | addr.o rpcb_clnt.o timer.o xdr.o \ | 13 | addr.o rpcb_clnt.o timer.o xdr.o \ |
14 | sunrpc_syms.o cache.o rpc_pipe.o \ | 14 | sunrpc_syms.o cache.o rpc_pipe.o \ |
15 | svc_xprt.o | 15 | svc_xprt.o |
16 | sunrpc-$(CONFIG_NFS_V4_1) += backchannel_rqst.o bc_svc.o | 16 | sunrpc-$(CONFIG_SUNRPC_BACKCHANNEL) += backchannel_rqst.o bc_svc.o |
17 | sunrpc-$(CONFIG_PROC_FS) += stats.o | 17 | sunrpc-$(CONFIG_PROC_FS) += stats.o |
18 | sunrpc-$(CONFIG_SYSCTL) += sysctl.o | 18 | sunrpc-$(CONFIG_SYSCTL) += sysctl.o |
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c index cd6e4aa19db..727e506cacd 100644 --- a/net/sunrpc/auth.c +++ b/net/sunrpc/auth.c | |||
@@ -626,7 +626,7 @@ rpcauth_refreshcred(struct rpc_task *task) | |||
626 | if (err < 0) | 626 | if (err < 0) |
627 | goto out; | 627 | goto out; |
628 | cred = task->tk_rqstp->rq_cred; | 628 | cred = task->tk_rqstp->rq_cred; |
629 | }; | 629 | } |
630 | dprintk("RPC: %5u refreshing %s cred %p\n", | 630 | dprintk("RPC: %5u refreshing %s cred %p\n", |
631 | task->tk_pid, cred->cr_auth->au_ops->au_name, cred); | 631 | task->tk_pid, cred->cr_auth->au_ops->au_name, cred); |
632 | 632 | ||
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index 5daf6cc4fae..e9b76939268 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c | |||
@@ -603,26 +603,6 @@ out: | |||
603 | return err; | 603 | return err; |
604 | } | 604 | } |
605 | 605 | ||
606 | static ssize_t | ||
607 | gss_pipe_upcall(struct file *filp, struct rpc_pipe_msg *msg, | ||
608 | char __user *dst, size_t buflen) | ||
609 | { | ||
610 | char *data = (char *)msg->data + msg->copied; | ||
611 | size_t mlen = min(msg->len, buflen); | ||
612 | unsigned long left; | ||
613 | |||
614 | left = copy_to_user(dst, data, mlen); | ||
615 | if (left == mlen) { | ||
616 | msg->errno = -EFAULT; | ||
617 | return -EFAULT; | ||
618 | } | ||
619 | |||
620 | mlen -= left; | ||
621 | msg->copied += mlen; | ||
622 | msg->errno = 0; | ||
623 | return mlen; | ||
624 | } | ||
625 | |||
626 | #define MSG_BUF_MAXSIZE 1024 | 606 | #define MSG_BUF_MAXSIZE 1024 |
627 | 607 | ||
628 | static ssize_t | 608 | static ssize_t |
@@ -1421,18 +1401,16 @@ gss_wrap_req(struct rpc_task *task, | |||
1421 | goto out; | 1401 | goto out; |
1422 | } | 1402 | } |
1423 | switch (gss_cred->gc_service) { | 1403 | switch (gss_cred->gc_service) { |
1424 | case RPC_GSS_SVC_NONE: | 1404 | case RPC_GSS_SVC_NONE: |
1425 | gss_wrap_req_encode(encode, rqstp, p, obj); | 1405 | gss_wrap_req_encode(encode, rqstp, p, obj); |
1426 | status = 0; | 1406 | status = 0; |
1427 | break; | 1407 | break; |
1428 | case RPC_GSS_SVC_INTEGRITY: | 1408 | case RPC_GSS_SVC_INTEGRITY: |
1429 | status = gss_wrap_req_integ(cred, ctx, encode, | 1409 | status = gss_wrap_req_integ(cred, ctx, encode, rqstp, p, obj); |
1430 | rqstp, p, obj); | 1410 | break; |
1431 | break; | 1411 | case RPC_GSS_SVC_PRIVACY: |
1432 | case RPC_GSS_SVC_PRIVACY: | 1412 | status = gss_wrap_req_priv(cred, ctx, encode, rqstp, p, obj); |
1433 | status = gss_wrap_req_priv(cred, ctx, encode, | 1413 | break; |
1434 | rqstp, p, obj); | ||
1435 | break; | ||
1436 | } | 1414 | } |
1437 | out: | 1415 | out: |
1438 | gss_put_ctx(ctx); | 1416 | gss_put_ctx(ctx); |
@@ -1531,18 +1509,18 @@ gss_unwrap_resp(struct rpc_task *task, | |||
1531 | if (ctx->gc_proc != RPC_GSS_PROC_DATA) | 1509 | if (ctx->gc_proc != RPC_GSS_PROC_DATA) |
1532 | goto out_decode; | 1510 | goto out_decode; |
1533 | switch (gss_cred->gc_service) { | 1511 | switch (gss_cred->gc_service) { |
1534 | case RPC_GSS_SVC_NONE: | 1512 | case RPC_GSS_SVC_NONE: |
1535 | break; | 1513 | break; |
1536 | case RPC_GSS_SVC_INTEGRITY: | 1514 | case RPC_GSS_SVC_INTEGRITY: |
1537 | status = gss_unwrap_resp_integ(cred, ctx, rqstp, &p); | 1515 | status = gss_unwrap_resp_integ(cred, ctx, rqstp, &p); |
1538 | if (status) | 1516 | if (status) |
1539 | goto out; | 1517 | goto out; |
1540 | break; | 1518 | break; |
1541 | case RPC_GSS_SVC_PRIVACY: | 1519 | case RPC_GSS_SVC_PRIVACY: |
1542 | status = gss_unwrap_resp_priv(cred, ctx, rqstp, &p); | 1520 | status = gss_unwrap_resp_priv(cred, ctx, rqstp, &p); |
1543 | if (status) | 1521 | if (status) |
1544 | goto out; | 1522 | goto out; |
1545 | break; | 1523 | break; |
1546 | } | 1524 | } |
1547 | /* take into account extra slack for integrity and privacy cases: */ | 1525 | /* take into account extra slack for integrity and privacy cases: */ |
1548 | cred->cr_auth->au_rslack = cred->cr_auth->au_verfsize + (p - savedp) | 1526 | cred->cr_auth->au_rslack = cred->cr_auth->au_verfsize + (p - savedp) |
@@ -1592,7 +1570,7 @@ static const struct rpc_credops gss_nullops = { | |||
1592 | }; | 1570 | }; |
1593 | 1571 | ||
1594 | static const struct rpc_pipe_ops gss_upcall_ops_v0 = { | 1572 | static const struct rpc_pipe_ops gss_upcall_ops_v0 = { |
1595 | .upcall = gss_pipe_upcall, | 1573 | .upcall = rpc_pipe_generic_upcall, |
1596 | .downcall = gss_pipe_downcall, | 1574 | .downcall = gss_pipe_downcall, |
1597 | .destroy_msg = gss_pipe_destroy_msg, | 1575 | .destroy_msg = gss_pipe_destroy_msg, |
1598 | .open_pipe = gss_pipe_open_v0, | 1576 | .open_pipe = gss_pipe_open_v0, |
@@ -1600,7 +1578,7 @@ static const struct rpc_pipe_ops gss_upcall_ops_v0 = { | |||
1600 | }; | 1578 | }; |
1601 | 1579 | ||
1602 | static const struct rpc_pipe_ops gss_upcall_ops_v1 = { | 1580 | static const struct rpc_pipe_ops gss_upcall_ops_v1 = { |
1603 | .upcall = gss_pipe_upcall, | 1581 | .upcall = rpc_pipe_generic_upcall, |
1604 | .downcall = gss_pipe_downcall, | 1582 | .downcall = gss_pipe_downcall, |
1605 | .destroy_msg = gss_pipe_destroy_msg, | 1583 | .destroy_msg = gss_pipe_destroy_msg, |
1606 | .open_pipe = gss_pipe_open_v1, | 1584 | .open_pipe = gss_pipe_open_v1, |
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c index c3b75333b82..8c67890de42 100644 --- a/net/sunrpc/auth_gss/gss_krb5_mech.c +++ b/net/sunrpc/auth_gss/gss_krb5_mech.c | |||
@@ -744,6 +744,13 @@ static struct pf_desc gss_kerberos_pfs[] = { | |||
744 | }, | 744 | }, |
745 | }; | 745 | }; |
746 | 746 | ||
747 | MODULE_ALIAS("rpc-auth-gss-krb5"); | ||
748 | MODULE_ALIAS("rpc-auth-gss-krb5i"); | ||
749 | MODULE_ALIAS("rpc-auth-gss-krb5p"); | ||
750 | MODULE_ALIAS("rpc-auth-gss-390003"); | ||
751 | MODULE_ALIAS("rpc-auth-gss-390004"); | ||
752 | MODULE_ALIAS("rpc-auth-gss-390005"); | ||
753 | |||
747 | static struct gss_api_mech gss_kerberos_mech = { | 754 | static struct gss_api_mech gss_kerberos_mech = { |
748 | .gm_name = "krb5", | 755 | .gm_name = "krb5", |
749 | .gm_owner = THIS_MODULE, | 756 | .gm_owner = THIS_MODULE, |
diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c index e3c36a27441..ca8cad8251c 100644 --- a/net/sunrpc/auth_gss/gss_mech_switch.c +++ b/net/sunrpc/auth_gss/gss_mech_switch.c | |||
@@ -141,7 +141,7 @@ gss_mech_get(struct gss_api_mech *gm) | |||
141 | EXPORT_SYMBOL_GPL(gss_mech_get); | 141 | EXPORT_SYMBOL_GPL(gss_mech_get); |
142 | 142 | ||
143 | struct gss_api_mech * | 143 | struct gss_api_mech * |
144 | gss_mech_get_by_name(const char *name) | 144 | _gss_mech_get_by_name(const char *name) |
145 | { | 145 | { |
146 | struct gss_api_mech *pos, *gm = NULL; | 146 | struct gss_api_mech *pos, *gm = NULL; |
147 | 147 | ||
@@ -158,6 +158,17 @@ gss_mech_get_by_name(const char *name) | |||
158 | 158 | ||
159 | } | 159 | } |
160 | 160 | ||
161 | struct gss_api_mech * gss_mech_get_by_name(const char *name) | ||
162 | { | ||
163 | struct gss_api_mech *gm = NULL; | ||
164 | |||
165 | gm = _gss_mech_get_by_name(name); | ||
166 | if (!gm) { | ||
167 | request_module("rpc-auth-gss-%s", name); | ||
168 | gm = _gss_mech_get_by_name(name); | ||
169 | } | ||
170 | return gm; | ||
171 | } | ||
161 | EXPORT_SYMBOL_GPL(gss_mech_get_by_name); | 172 | EXPORT_SYMBOL_GPL(gss_mech_get_by_name); |
162 | 173 | ||
163 | struct gss_api_mech * | 174 | struct gss_api_mech * |
@@ -194,10 +205,9 @@ mech_supports_pseudoflavor(struct gss_api_mech *gm, u32 pseudoflavor) | |||
194 | return 0; | 205 | return 0; |
195 | } | 206 | } |
196 | 207 | ||
197 | struct gss_api_mech * | 208 | struct gss_api_mech *_gss_mech_get_by_pseudoflavor(u32 pseudoflavor) |
198 | gss_mech_get_by_pseudoflavor(u32 pseudoflavor) | ||
199 | { | 209 | { |
200 | struct gss_api_mech *pos, *gm = NULL; | 210 | struct gss_api_mech *gm = NULL, *pos; |
201 | 211 | ||
202 | spin_lock(®istered_mechs_lock); | 212 | spin_lock(®istered_mechs_lock); |
203 | list_for_each_entry(pos, ®istered_mechs, gm_list) { | 213 | list_for_each_entry(pos, ®istered_mechs, gm_list) { |
@@ -213,6 +223,20 @@ gss_mech_get_by_pseudoflavor(u32 pseudoflavor) | |||
213 | return gm; | 223 | return gm; |
214 | } | 224 | } |
215 | 225 | ||
226 | struct gss_api_mech * | ||
227 | gss_mech_get_by_pseudoflavor(u32 pseudoflavor) | ||
228 | { | ||
229 | struct gss_api_mech *gm; | ||
230 | |||
231 | gm = _gss_mech_get_by_pseudoflavor(pseudoflavor); | ||
232 | |||
233 | if (!gm) { | ||
234 | request_module("rpc-auth-gss-%u", pseudoflavor); | ||
235 | gm = _gss_mech_get_by_pseudoflavor(pseudoflavor); | ||
236 | } | ||
237 | return gm; | ||
238 | } | ||
239 | |||
216 | EXPORT_SYMBOL_GPL(gss_mech_get_by_pseudoflavor); | 240 | EXPORT_SYMBOL_GPL(gss_mech_get_by_pseudoflavor); |
217 | 241 | ||
218 | int gss_mech_list_pseudoflavors(rpc_authflavor_t *array_ptr) | 242 | int gss_mech_list_pseudoflavors(rpc_authflavor_t *array_ptr) |
diff --git a/net/sunrpc/auth_unix.c b/net/sunrpc/auth_unix.c index 4cb70dc6e7a..e50502d8ceb 100644 --- a/net/sunrpc/auth_unix.c +++ b/net/sunrpc/auth_unix.c | |||
@@ -129,6 +129,9 @@ unx_match(struct auth_cred *acred, struct rpc_cred *rcred, int flags) | |||
129 | for (i = 0; i < groups ; i++) | 129 | for (i = 0; i < groups ; i++) |
130 | if (cred->uc_gids[i] != GROUP_AT(acred->group_info, i)) | 130 | if (cred->uc_gids[i] != GROUP_AT(acred->group_info, i)) |
131 | return 0; | 131 | return 0; |
132 | if (groups < NFS_NGROUPS && | ||
133 | cred->uc_gids[groups] != NOGROUP) | ||
134 | return 0; | ||
132 | return 1; | 135 | return 1; |
133 | } | 136 | } |
134 | 137 | ||
diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c index cf06af3b63c..91eaa26e4c4 100644 --- a/net/sunrpc/backchannel_rqst.c +++ b/net/sunrpc/backchannel_rqst.c | |||
@@ -29,8 +29,6 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |||
29 | #define RPCDBG_FACILITY RPCDBG_TRANS | 29 | #define RPCDBG_FACILITY RPCDBG_TRANS |
30 | #endif | 30 | #endif |
31 | 31 | ||
32 | #if defined(CONFIG_NFS_V4_1) | ||
33 | |||
34 | /* | 32 | /* |
35 | * Helper routines that track the number of preallocation elements | 33 | * Helper routines that track the number of preallocation elements |
36 | * on the transport. | 34 | * on the transport. |
@@ -174,7 +172,7 @@ out_free: | |||
174 | dprintk("RPC: setup backchannel transport failed\n"); | 172 | dprintk("RPC: setup backchannel transport failed\n"); |
175 | return -1; | 173 | return -1; |
176 | } | 174 | } |
177 | EXPORT_SYMBOL(xprt_setup_backchannel); | 175 | EXPORT_SYMBOL_GPL(xprt_setup_backchannel); |
178 | 176 | ||
179 | /* | 177 | /* |
180 | * Destroys the backchannel preallocated structures. | 178 | * Destroys the backchannel preallocated structures. |
@@ -204,7 +202,7 @@ void xprt_destroy_backchannel(struct rpc_xprt *xprt, unsigned int max_reqs) | |||
204 | dprintk("RPC: backchannel list empty= %s\n", | 202 | dprintk("RPC: backchannel list empty= %s\n", |
205 | list_empty(&xprt->bc_pa_list) ? "true" : "false"); | 203 | list_empty(&xprt->bc_pa_list) ? "true" : "false"); |
206 | } | 204 | } |
207 | EXPORT_SYMBOL(xprt_destroy_backchannel); | 205 | EXPORT_SYMBOL_GPL(xprt_destroy_backchannel); |
208 | 206 | ||
209 | /* | 207 | /* |
210 | * One or more rpc_rqst structure have been preallocated during the | 208 | * One or more rpc_rqst structure have been preallocated during the |
@@ -279,4 +277,3 @@ void xprt_free_bc_request(struct rpc_rqst *req) | |||
279 | spin_unlock_bh(&xprt->bc_pa_lock); | 277 | spin_unlock_bh(&xprt->bc_pa_lock); |
280 | } | 278 | } |
281 | 279 | ||
282 | #endif /* CONFIG_NFS_V4_1 */ | ||
diff --git a/net/sunrpc/bc_svc.c b/net/sunrpc/bc_svc.c index 1dd1a689000..0b2eb388cbd 100644 --- a/net/sunrpc/bc_svc.c +++ b/net/sunrpc/bc_svc.c | |||
@@ -27,8 +27,6 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |||
27 | * reply over an existing open connection previously established by the client. | 27 | * reply over an existing open connection previously established by the client. |
28 | */ | 28 | */ |
29 | 29 | ||
30 | #if defined(CONFIG_NFS_V4_1) | ||
31 | |||
32 | #include <linux/module.h> | 30 | #include <linux/module.h> |
33 | 31 | ||
34 | #include <linux/sunrpc/xprt.h> | 32 | #include <linux/sunrpc/xprt.h> |
@@ -63,4 +61,3 @@ int bc_send(struct rpc_rqst *req) | |||
63 | return ret; | 61 | return ret; |
64 | } | 62 | } |
65 | 63 | ||
66 | #endif /* CONFIG_NFS_V4_1 */ | ||
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 8c9141583d6..c5347d29cfb 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c | |||
@@ -64,9 +64,9 @@ static void call_decode(struct rpc_task *task); | |||
64 | static void call_bind(struct rpc_task *task); | 64 | static void call_bind(struct rpc_task *task); |
65 | static void call_bind_status(struct rpc_task *task); | 65 | static void call_bind_status(struct rpc_task *task); |
66 | static void call_transmit(struct rpc_task *task); | 66 | static void call_transmit(struct rpc_task *task); |
67 | #if defined(CONFIG_NFS_V4_1) | 67 | #if defined(CONFIG_SUNRPC_BACKCHANNEL) |
68 | static void call_bc_transmit(struct rpc_task *task); | 68 | static void call_bc_transmit(struct rpc_task *task); |
69 | #endif /* CONFIG_NFS_V4_1 */ | 69 | #endif /* CONFIG_SUNRPC_BACKCHANNEL */ |
70 | static void call_status(struct rpc_task *task); | 70 | static void call_status(struct rpc_task *task); |
71 | static void call_transmit_status(struct rpc_task *task); | 71 | static void call_transmit_status(struct rpc_task *task); |
72 | static void call_refresh(struct rpc_task *task); | 72 | static void call_refresh(struct rpc_task *task); |
@@ -97,8 +97,7 @@ static int | |||
97 | rpc_setup_pipedir(struct rpc_clnt *clnt, char *dir_name) | 97 | rpc_setup_pipedir(struct rpc_clnt *clnt, char *dir_name) |
98 | { | 98 | { |
99 | static uint32_t clntid; | 99 | static uint32_t clntid; |
100 | struct nameidata nd; | 100 | struct path path, dir; |
101 | struct path path; | ||
102 | char name[15]; | 101 | char name[15]; |
103 | struct qstr q = { | 102 | struct qstr q = { |
104 | .name = name, | 103 | .name = name, |
@@ -113,7 +112,7 @@ rpc_setup_pipedir(struct rpc_clnt *clnt, char *dir_name) | |||
113 | path.mnt = rpc_get_mount(); | 112 | path.mnt = rpc_get_mount(); |
114 | if (IS_ERR(path.mnt)) | 113 | if (IS_ERR(path.mnt)) |
115 | return PTR_ERR(path.mnt); | 114 | return PTR_ERR(path.mnt); |
116 | error = vfs_path_lookup(path.mnt->mnt_root, path.mnt, dir_name, 0, &nd); | 115 | error = vfs_path_lookup(path.mnt->mnt_root, path.mnt, dir_name, 0, &dir); |
117 | if (error) | 116 | if (error) |
118 | goto err; | 117 | goto err; |
119 | 118 | ||
@@ -121,7 +120,7 @@ rpc_setup_pipedir(struct rpc_clnt *clnt, char *dir_name) | |||
121 | q.len = snprintf(name, sizeof(name), "clnt%x", (unsigned int)clntid++); | 120 | q.len = snprintf(name, sizeof(name), "clnt%x", (unsigned int)clntid++); |
122 | name[sizeof(name) - 1] = '\0'; | 121 | name[sizeof(name) - 1] = '\0'; |
123 | q.hash = full_name_hash(q.name, q.len); | 122 | q.hash = full_name_hash(q.name, q.len); |
124 | path.dentry = rpc_create_client_dir(nd.path.dentry, &q, clnt); | 123 | path.dentry = rpc_create_client_dir(dir.dentry, &q, clnt); |
125 | if (!IS_ERR(path.dentry)) | 124 | if (!IS_ERR(path.dentry)) |
126 | break; | 125 | break; |
127 | error = PTR_ERR(path.dentry); | 126 | error = PTR_ERR(path.dentry); |
@@ -132,11 +131,11 @@ rpc_setup_pipedir(struct rpc_clnt *clnt, char *dir_name) | |||
132 | goto err_path_put; | 131 | goto err_path_put; |
133 | } | 132 | } |
134 | } | 133 | } |
135 | path_put(&nd.path); | 134 | path_put(&dir); |
136 | clnt->cl_path = path; | 135 | clnt->cl_path = path; |
137 | return 0; | 136 | return 0; |
138 | err_path_put: | 137 | err_path_put: |
139 | path_put(&nd.path); | 138 | path_put(&dir); |
140 | err: | 139 | err: |
141 | rpc_put_mount(); | 140 | rpc_put_mount(); |
142 | return error; | 141 | return error; |
@@ -716,7 +715,7 @@ rpc_call_async(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags, | |||
716 | } | 715 | } |
717 | EXPORT_SYMBOL_GPL(rpc_call_async); | 716 | EXPORT_SYMBOL_GPL(rpc_call_async); |
718 | 717 | ||
719 | #if defined(CONFIG_NFS_V4_1) | 718 | #if defined(CONFIG_SUNRPC_BACKCHANNEL) |
720 | /** | 719 | /** |
721 | * rpc_run_bc_task - Allocate a new RPC task for backchannel use, then run | 720 | * rpc_run_bc_task - Allocate a new RPC task for backchannel use, then run |
722 | * rpc_execute against it | 721 | * rpc_execute against it |
@@ -759,7 +758,7 @@ out: | |||
759 | dprintk("RPC: rpc_run_bc_task: task= %p\n", task); | 758 | dprintk("RPC: rpc_run_bc_task: task= %p\n", task); |
760 | return task; | 759 | return task; |
761 | } | 760 | } |
762 | #endif /* CONFIG_NFS_V4_1 */ | 761 | #endif /* CONFIG_SUNRPC_BACKCHANNEL */ |
763 | 762 | ||
764 | void | 763 | void |
765 | rpc_call_start(struct rpc_task *task) | 764 | rpc_call_start(struct rpc_task *task) |
@@ -1362,7 +1361,7 @@ call_transmit_status(struct rpc_task *task) | |||
1362 | } | 1361 | } |
1363 | } | 1362 | } |
1364 | 1363 | ||
1365 | #if defined(CONFIG_NFS_V4_1) | 1364 | #if defined(CONFIG_SUNRPC_BACKCHANNEL) |
1366 | /* | 1365 | /* |
1367 | * 5b. Send the backchannel RPC reply. On error, drop the reply. In | 1366 | * 5b. Send the backchannel RPC reply. On error, drop the reply. In |
1368 | * addition, disconnect on connectivity errors. | 1367 | * addition, disconnect on connectivity errors. |
@@ -1426,7 +1425,7 @@ call_bc_transmit(struct rpc_task *task) | |||
1426 | } | 1425 | } |
1427 | rpc_wake_up_queued_task(&req->rq_xprt->pending, task); | 1426 | rpc_wake_up_queued_task(&req->rq_xprt->pending, task); |
1428 | } | 1427 | } |
1429 | #endif /* CONFIG_NFS_V4_1 */ | 1428 | #endif /* CONFIG_SUNRPC_BACKCHANNEL */ |
1430 | 1429 | ||
1431 | /* | 1430 | /* |
1432 | * 6. Sort out the RPC call status | 1431 | * 6. Sort out the RPC call status |
@@ -1551,8 +1550,7 @@ call_decode(struct rpc_task *task) | |||
1551 | kxdrdproc_t decode = task->tk_msg.rpc_proc->p_decode; | 1550 | kxdrdproc_t decode = task->tk_msg.rpc_proc->p_decode; |
1552 | __be32 *p; | 1551 | __be32 *p; |
1553 | 1552 | ||
1554 | dprintk("RPC: %5u call_decode (status %d)\n", | 1553 | dprint_status(task); |
1555 | task->tk_pid, task->tk_status); | ||
1556 | 1554 | ||
1557 | if (task->tk_flags & RPC_CALL_MAJORSEEN) { | 1555 | if (task->tk_flags & RPC_CALL_MAJORSEEN) { |
1558 | if (clnt->cl_chatty) | 1556 | if (clnt->cl_chatty) |
@@ -1666,19 +1664,18 @@ rpc_verify_header(struct rpc_task *task) | |||
1666 | if (--len < 0) | 1664 | if (--len < 0) |
1667 | goto out_overflow; | 1665 | goto out_overflow; |
1668 | switch ((n = ntohl(*p++))) { | 1666 | switch ((n = ntohl(*p++))) { |
1669 | case RPC_AUTH_ERROR: | 1667 | case RPC_AUTH_ERROR: |
1670 | break; | 1668 | break; |
1671 | case RPC_MISMATCH: | 1669 | case RPC_MISMATCH: |
1672 | dprintk("RPC: %5u %s: RPC call version " | 1670 | dprintk("RPC: %5u %s: RPC call version mismatch!\n", |
1673 | "mismatch!\n", | 1671 | task->tk_pid, __func__); |
1674 | task->tk_pid, __func__); | 1672 | error = -EPROTONOSUPPORT; |
1675 | error = -EPROTONOSUPPORT; | 1673 | goto out_err; |
1676 | goto out_err; | 1674 | default: |
1677 | default: | 1675 | dprintk("RPC: %5u %s: RPC call rejected, " |
1678 | dprintk("RPC: %5u %s: RPC call rejected, " | 1676 | "unknown error: %x\n", |
1679 | "unknown error: %x\n", | 1677 | task->tk_pid, __func__, n); |
1680 | task->tk_pid, __func__, n); | 1678 | goto out_eio; |
1681 | goto out_eio; | ||
1682 | } | 1679 | } |
1683 | if (--len < 0) | 1680 | if (--len < 0) |
1684 | goto out_overflow; | 1681 | goto out_overflow; |
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 72bc5368396..67dbc188438 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c | |||
@@ -77,6 +77,26 @@ rpc_timeout_upcall_queue(struct work_struct *work) | |||
77 | rpc_purge_list(rpci, &free_list, destroy_msg, -ETIMEDOUT); | 77 | rpc_purge_list(rpci, &free_list, destroy_msg, -ETIMEDOUT); |
78 | } | 78 | } |
79 | 79 | ||
80 | ssize_t rpc_pipe_generic_upcall(struct file *filp, struct rpc_pipe_msg *msg, | ||
81 | char __user *dst, size_t buflen) | ||
82 | { | ||
83 | char *data = (char *)msg->data + msg->copied; | ||
84 | size_t mlen = min(msg->len - msg->copied, buflen); | ||
85 | unsigned long left; | ||
86 | |||
87 | left = copy_to_user(dst, data, mlen); | ||
88 | if (left == mlen) { | ||
89 | msg->errno = -EFAULT; | ||
90 | return -EFAULT; | ||
91 | } | ||
92 | |||
93 | mlen -= left; | ||
94 | msg->copied += mlen; | ||
95 | msg->errno = 0; | ||
96 | return mlen; | ||
97 | } | ||
98 | EXPORT_SYMBOL_GPL(rpc_pipe_generic_upcall); | ||
99 | |||
80 | /** | 100 | /** |
81 | * rpc_queue_upcall - queue an upcall message to userspace | 101 | * rpc_queue_upcall - queue an upcall message to userspace |
82 | * @inode: inode of upcall pipe on which to queue given message | 102 | * @inode: inode of upcall pipe on which to queue given message |
@@ -456,13 +476,13 @@ rpc_get_inode(struct super_block *sb, umode_t mode) | |||
456 | inode->i_ino = get_next_ino(); | 476 | inode->i_ino = get_next_ino(); |
457 | inode->i_mode = mode; | 477 | inode->i_mode = mode; |
458 | inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; | 478 | inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; |
459 | switch(mode & S_IFMT) { | 479 | switch (mode & S_IFMT) { |
460 | case S_IFDIR: | 480 | case S_IFDIR: |
461 | inode->i_fop = &simple_dir_operations; | 481 | inode->i_fop = &simple_dir_operations; |
462 | inode->i_op = &simple_dir_inode_operations; | 482 | inode->i_op = &simple_dir_inode_operations; |
463 | inc_nlink(inode); | 483 | inc_nlink(inode); |
464 | default: | 484 | default: |
465 | break; | 485 | break; |
466 | } | 486 | } |
467 | return inode; | 487 | return inode; |
468 | } | 488 | } |
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index 4814e246a87..d12ffa54581 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c | |||
@@ -97,14 +97,16 @@ __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task) | |||
97 | /* | 97 | /* |
98 | * Add new request to a priority queue. | 98 | * Add new request to a priority queue. |
99 | */ | 99 | */ |
100 | static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue, struct rpc_task *task) | 100 | static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue, |
101 | struct rpc_task *task, | ||
102 | unsigned char queue_priority) | ||
101 | { | 103 | { |
102 | struct list_head *q; | 104 | struct list_head *q; |
103 | struct rpc_task *t; | 105 | struct rpc_task *t; |
104 | 106 | ||
105 | INIT_LIST_HEAD(&task->u.tk_wait.links); | 107 | INIT_LIST_HEAD(&task->u.tk_wait.links); |
106 | q = &queue->tasks[task->tk_priority]; | 108 | q = &queue->tasks[queue_priority]; |
107 | if (unlikely(task->tk_priority > queue->maxpriority)) | 109 | if (unlikely(queue_priority > queue->maxpriority)) |
108 | q = &queue->tasks[queue->maxpriority]; | 110 | q = &queue->tasks[queue->maxpriority]; |
109 | list_for_each_entry(t, q, u.tk_wait.list) { | 111 | list_for_each_entry(t, q, u.tk_wait.list) { |
110 | if (t->tk_owner == task->tk_owner) { | 112 | if (t->tk_owner == task->tk_owner) { |
@@ -123,12 +125,14 @@ static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue, struct r | |||
123 | * improve overall performance. | 125 | * improve overall performance. |
124 | * Everyone else gets appended to the queue to ensure proper FIFO behavior. | 126 | * Everyone else gets appended to the queue to ensure proper FIFO behavior. |
125 | */ | 127 | */ |
126 | static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task) | 128 | static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, |
129 | struct rpc_task *task, | ||
130 | unsigned char queue_priority) | ||
127 | { | 131 | { |
128 | BUG_ON (RPC_IS_QUEUED(task)); | 132 | BUG_ON (RPC_IS_QUEUED(task)); |
129 | 133 | ||
130 | if (RPC_IS_PRIORITY(queue)) | 134 | if (RPC_IS_PRIORITY(queue)) |
131 | __rpc_add_wait_queue_priority(queue, task); | 135 | __rpc_add_wait_queue_priority(queue, task, queue_priority); |
132 | else if (RPC_IS_SWAPPER(task)) | 136 | else if (RPC_IS_SWAPPER(task)) |
133 | list_add(&task->u.tk_wait.list, &queue->tasks[0]); | 137 | list_add(&task->u.tk_wait.list, &queue->tasks[0]); |
134 | else | 138 | else |
@@ -311,13 +315,15 @@ static void rpc_make_runnable(struct rpc_task *task) | |||
311 | * NB: An RPC task will only receive interrupt-driven events as long | 315 | * NB: An RPC task will only receive interrupt-driven events as long |
312 | * as it's on a wait queue. | 316 | * as it's on a wait queue. |
313 | */ | 317 | */ |
314 | static void __rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, | 318 | static void __rpc_sleep_on_priority(struct rpc_wait_queue *q, |
315 | rpc_action action) | 319 | struct rpc_task *task, |
320 | rpc_action action, | ||
321 | unsigned char queue_priority) | ||
316 | { | 322 | { |
317 | dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n", | 323 | dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n", |
318 | task->tk_pid, rpc_qname(q), jiffies); | 324 | task->tk_pid, rpc_qname(q), jiffies); |
319 | 325 | ||
320 | __rpc_add_wait_queue(q, task); | 326 | __rpc_add_wait_queue(q, task, queue_priority); |
321 | 327 | ||
322 | BUG_ON(task->tk_callback != NULL); | 328 | BUG_ON(task->tk_callback != NULL); |
323 | task->tk_callback = action; | 329 | task->tk_callback = action; |
@@ -334,11 +340,25 @@ void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, | |||
334 | * Protect the queue operations. | 340 | * Protect the queue operations. |
335 | */ | 341 | */ |
336 | spin_lock_bh(&q->lock); | 342 | spin_lock_bh(&q->lock); |
337 | __rpc_sleep_on(q, task, action); | 343 | __rpc_sleep_on_priority(q, task, action, task->tk_priority); |
338 | spin_unlock_bh(&q->lock); | 344 | spin_unlock_bh(&q->lock); |
339 | } | 345 | } |
340 | EXPORT_SYMBOL_GPL(rpc_sleep_on); | 346 | EXPORT_SYMBOL_GPL(rpc_sleep_on); |
341 | 347 | ||
348 | void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task, | ||
349 | rpc_action action, int priority) | ||
350 | { | ||
351 | /* We shouldn't ever put an inactive task to sleep */ | ||
352 | BUG_ON(!RPC_IS_ACTIVATED(task)); | ||
353 | |||
354 | /* | ||
355 | * Protect the queue operations. | ||
356 | */ | ||
357 | spin_lock_bh(&q->lock); | ||
358 | __rpc_sleep_on_priority(q, task, action, priority - RPC_PRIORITY_LOW); | ||
359 | spin_unlock_bh(&q->lock); | ||
360 | } | ||
361 | |||
342 | /** | 362 | /** |
343 | * __rpc_do_wake_up_task - wake up a single rpc_task | 363 | * __rpc_do_wake_up_task - wake up a single rpc_task |
344 | * @queue: wait queue | 364 | * @queue: wait queue |
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index 2b90292e950..6a69a1131fb 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c | |||
@@ -1252,7 +1252,7 @@ svc_process(struct svc_rqst *rqstp) | |||
1252 | } | 1252 | } |
1253 | } | 1253 | } |
1254 | 1254 | ||
1255 | #if defined(CONFIG_NFS_V4_1) | 1255 | #if defined(CONFIG_SUNRPC_BACKCHANNEL) |
1256 | /* | 1256 | /* |
1257 | * Process a backchannel RPC request that arrived over an existing | 1257 | * Process a backchannel RPC request that arrived over an existing |
1258 | * outbound connection | 1258 | * outbound connection |
@@ -1300,8 +1300,8 @@ bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req, | |||
1300 | return 0; | 1300 | return 0; |
1301 | } | 1301 | } |
1302 | } | 1302 | } |
1303 | EXPORT_SYMBOL(bc_svc_process); | 1303 | EXPORT_SYMBOL_GPL(bc_svc_process); |
1304 | #endif /* CONFIG_NFS_V4_1 */ | 1304 | #endif /* CONFIG_SUNRPC_BACKCHANNEL */ |
1305 | 1305 | ||
1306 | /* | 1306 | /* |
1307 | * Return (transport-specific) limit on the rpc payload. | 1307 | * Return (transport-specific) limit on the rpc payload. |
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index ab86b7927f8..bd31208bbb6 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c | |||
@@ -902,12 +902,13 @@ void svc_delete_xprt(struct svc_xprt *xprt) | |||
902 | if (!test_and_set_bit(XPT_DETACHED, &xprt->xpt_flags)) | 902 | if (!test_and_set_bit(XPT_DETACHED, &xprt->xpt_flags)) |
903 | list_del_init(&xprt->xpt_list); | 903 | list_del_init(&xprt->xpt_list); |
904 | /* | 904 | /* |
905 | * We used to delete the transport from whichever list | 905 | * The only time we're called while xpt_ready is still on a list |
906 | * it's sk_xprt.xpt_ready node was on, but we don't actually | 906 | * is while the list itself is about to be destroyed (in |
907 | * need to. This is because the only time we're called | 907 | * svc_destroy). BUT svc_xprt_enqueue could still be attempting |
908 | * while still attached to a queue, the queue itself | 908 | * to add new entries to the sp_sockets list, so we can't leave |
909 | * is about to be destroyed (in svc_destroy). | 909 | * a freed xprt on it. |
910 | */ | 910 | */ |
911 | list_del_init(&xprt->xpt_ready); | ||
911 | if (test_bit(XPT_TEMP, &xprt->xpt_flags)) | 912 | if (test_bit(XPT_TEMP, &xprt->xpt_flags)) |
912 | serv->sv_tmpcnt--; | 913 | serv->sv_tmpcnt--; |
913 | spin_unlock_bh(&serv->sv_lock); | 914 | spin_unlock_bh(&serv->sv_lock); |
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c index c8e10216c11..ce136323da8 100644 --- a/net/sunrpc/svcauth_unix.c +++ b/net/sunrpc/svcauth_unix.c | |||
@@ -30,12 +30,10 @@ | |||
30 | 30 | ||
31 | struct unix_domain { | 31 | struct unix_domain { |
32 | struct auth_domain h; | 32 | struct auth_domain h; |
33 | #ifdef CONFIG_NFSD_DEPRECATED | ||
34 | int addr_changes; | ||
35 | #endif /* CONFIG_NFSD_DEPRECATED */ | ||
36 | /* other stuff later */ | 33 | /* other stuff later */ |
37 | }; | 34 | }; |
38 | 35 | ||
36 | extern struct auth_ops svcauth_null; | ||
39 | extern struct auth_ops svcauth_unix; | 37 | extern struct auth_ops svcauth_unix; |
40 | 38 | ||
41 | static void svcauth_unix_domain_release(struct auth_domain *dom) | 39 | static void svcauth_unix_domain_release(struct auth_domain *dom) |
@@ -74,9 +72,6 @@ struct auth_domain *unix_domain_find(char *name) | |||
74 | return NULL; | 72 | return NULL; |
75 | } | 73 | } |
76 | new->h.flavour = &svcauth_unix; | 74 | new->h.flavour = &svcauth_unix; |
77 | #ifdef CONFIG_NFSD_DEPRECATED | ||
78 | new->addr_changes = 0; | ||
79 | #endif /* CONFIG_NFSD_DEPRECATED */ | ||
80 | rv = auth_domain_lookup(name, &new->h); | 75 | rv = auth_domain_lookup(name, &new->h); |
81 | } | 76 | } |
82 | } | 77 | } |
@@ -95,9 +90,6 @@ struct ip_map { | |||
95 | char m_class[8]; /* e.g. "nfsd" */ | 90 | char m_class[8]; /* e.g. "nfsd" */ |
96 | struct in6_addr m_addr; | 91 | struct in6_addr m_addr; |
97 | struct unix_domain *m_client; | 92 | struct unix_domain *m_client; |
98 | #ifdef CONFIG_NFSD_DEPRECATED | ||
99 | int m_add_change; | ||
100 | #endif /* CONFIG_NFSD_DEPRECATED */ | ||
101 | }; | 93 | }; |
102 | 94 | ||
103 | static void ip_map_put(struct kref *kref) | 95 | static void ip_map_put(struct kref *kref) |
@@ -151,9 +143,6 @@ static void update(struct cache_head *cnew, struct cache_head *citem) | |||
151 | 143 | ||
152 | kref_get(&item->m_client->h.ref); | 144 | kref_get(&item->m_client->h.ref); |
153 | new->m_client = item->m_client; | 145 | new->m_client = item->m_client; |
154 | #ifdef CONFIG_NFSD_DEPRECATED | ||
155 | new->m_add_change = item->m_add_change; | ||
156 | #endif /* CONFIG_NFSD_DEPRECATED */ | ||
157 | } | 146 | } |
158 | static struct cache_head *ip_map_alloc(void) | 147 | static struct cache_head *ip_map_alloc(void) |
159 | { | 148 | { |
@@ -338,16 +327,6 @@ static int __ip_map_update(struct cache_detail *cd, struct ip_map *ipm, | |||
338 | ip.h.flags = 0; | 327 | ip.h.flags = 0; |
339 | if (!udom) | 328 | if (!udom) |
340 | set_bit(CACHE_NEGATIVE, &ip.h.flags); | 329 | set_bit(CACHE_NEGATIVE, &ip.h.flags); |
341 | #ifdef CONFIG_NFSD_DEPRECATED | ||
342 | else { | ||
343 | ip.m_add_change = udom->addr_changes; | ||
344 | /* if this is from the legacy set_client system call, | ||
345 | * we need m_add_change to be one higher | ||
346 | */ | ||
347 | if (expiry == NEVER) | ||
348 | ip.m_add_change++; | ||
349 | } | ||
350 | #endif /* CONFIG_NFSD_DEPRECATED */ | ||
351 | ip.h.expiry_time = expiry; | 330 | ip.h.expiry_time = expiry; |
352 | ch = sunrpc_cache_update(cd, &ip.h, &ipm->h, | 331 | ch = sunrpc_cache_update(cd, &ip.h, &ipm->h, |
353 | hash_str(ipm->m_class, IP_HASHBITS) ^ | 332 | hash_str(ipm->m_class, IP_HASHBITS) ^ |
@@ -367,62 +346,6 @@ static inline int ip_map_update(struct net *net, struct ip_map *ipm, | |||
367 | return __ip_map_update(sn->ip_map_cache, ipm, udom, expiry); | 346 | return __ip_map_update(sn->ip_map_cache, ipm, udom, expiry); |
368 | } | 347 | } |
369 | 348 | ||
370 | #ifdef CONFIG_NFSD_DEPRECATED | ||
371 | int auth_unix_add_addr(struct net *net, struct in6_addr *addr, struct auth_domain *dom) | ||
372 | { | ||
373 | struct unix_domain *udom; | ||
374 | struct ip_map *ipmp; | ||
375 | |||
376 | if (dom->flavour != &svcauth_unix) | ||
377 | return -EINVAL; | ||
378 | udom = container_of(dom, struct unix_domain, h); | ||
379 | ipmp = ip_map_lookup(net, "nfsd", addr); | ||
380 | |||
381 | if (ipmp) | ||
382 | return ip_map_update(net, ipmp, udom, NEVER); | ||
383 | else | ||
384 | return -ENOMEM; | ||
385 | } | ||
386 | EXPORT_SYMBOL_GPL(auth_unix_add_addr); | ||
387 | |||
388 | int auth_unix_forget_old(struct auth_domain *dom) | ||
389 | { | ||
390 | struct unix_domain *udom; | ||
391 | |||
392 | if (dom->flavour != &svcauth_unix) | ||
393 | return -EINVAL; | ||
394 | udom = container_of(dom, struct unix_domain, h); | ||
395 | udom->addr_changes++; | ||
396 | return 0; | ||
397 | } | ||
398 | EXPORT_SYMBOL_GPL(auth_unix_forget_old); | ||
399 | |||
400 | struct auth_domain *auth_unix_lookup(struct net *net, struct in6_addr *addr) | ||
401 | { | ||
402 | struct ip_map *ipm; | ||
403 | struct auth_domain *rv; | ||
404 | struct sunrpc_net *sn; | ||
405 | |||
406 | sn = net_generic(net, sunrpc_net_id); | ||
407 | ipm = ip_map_lookup(net, "nfsd", addr); | ||
408 | |||
409 | if (!ipm) | ||
410 | return NULL; | ||
411 | if (cache_check(sn->ip_map_cache, &ipm->h, NULL)) | ||
412 | return NULL; | ||
413 | |||
414 | if ((ipm->m_client->addr_changes - ipm->m_add_change) >0) { | ||
415 | sunrpc_invalidate(&ipm->h, sn->ip_map_cache); | ||
416 | rv = NULL; | ||
417 | } else { | ||
418 | rv = &ipm->m_client->h; | ||
419 | kref_get(&rv->ref); | ||
420 | } | ||
421 | cache_put(&ipm->h, sn->ip_map_cache); | ||
422 | return rv; | ||
423 | } | ||
424 | EXPORT_SYMBOL_GPL(auth_unix_lookup); | ||
425 | #endif /* CONFIG_NFSD_DEPRECATED */ | ||
426 | 349 | ||
427 | void svcauth_unix_purge(void) | 350 | void svcauth_unix_purge(void) |
428 | { | 351 | { |
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index af04f779ce9..767d494de7a 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c | |||
@@ -51,6 +51,8 @@ | |||
51 | #include <linux/sunrpc/stats.h> | 51 | #include <linux/sunrpc/stats.h> |
52 | #include <linux/sunrpc/xprt.h> | 52 | #include <linux/sunrpc/xprt.h> |
53 | 53 | ||
54 | #include "sunrpc.h" | ||
55 | |||
54 | #define RPCDBG_FACILITY RPCDBG_SVCXPRT | 56 | #define RPCDBG_FACILITY RPCDBG_SVCXPRT |
55 | 57 | ||
56 | 58 | ||
@@ -66,12 +68,12 @@ static void svc_sock_free(struct svc_xprt *); | |||
66 | static struct svc_xprt *svc_create_socket(struct svc_serv *, int, | 68 | static struct svc_xprt *svc_create_socket(struct svc_serv *, int, |
67 | struct net *, struct sockaddr *, | 69 | struct net *, struct sockaddr *, |
68 | int, int); | 70 | int, int); |
69 | #if defined(CONFIG_NFS_V4_1) | 71 | #if defined(CONFIG_SUNRPC_BACKCHANNEL) |
70 | static struct svc_xprt *svc_bc_create_socket(struct svc_serv *, int, | 72 | static struct svc_xprt *svc_bc_create_socket(struct svc_serv *, int, |
71 | struct net *, struct sockaddr *, | 73 | struct net *, struct sockaddr *, |
72 | int, int); | 74 | int, int); |
73 | static void svc_bc_sock_free(struct svc_xprt *xprt); | 75 | static void svc_bc_sock_free(struct svc_xprt *xprt); |
74 | #endif /* CONFIG_NFS_V4_1 */ | 76 | #endif /* CONFIG_SUNRPC_BACKCHANNEL */ |
75 | 77 | ||
76 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 78 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
77 | static struct lock_class_key svc_key[2]; | 79 | static struct lock_class_key svc_key[2]; |
@@ -1241,7 +1243,7 @@ static struct svc_xprt *svc_tcp_create(struct svc_serv *serv, | |||
1241 | return svc_create_socket(serv, IPPROTO_TCP, net, sa, salen, flags); | 1243 | return svc_create_socket(serv, IPPROTO_TCP, net, sa, salen, flags); |
1242 | } | 1244 | } |
1243 | 1245 | ||
1244 | #if defined(CONFIG_NFS_V4_1) | 1246 | #if defined(CONFIG_SUNRPC_BACKCHANNEL) |
1245 | static struct svc_xprt *svc_bc_create_socket(struct svc_serv *, int, | 1247 | static struct svc_xprt *svc_bc_create_socket(struct svc_serv *, int, |
1246 | struct net *, struct sockaddr *, | 1248 | struct net *, struct sockaddr *, |
1247 | int, int); | 1249 | int, int); |
@@ -1282,7 +1284,7 @@ static void svc_cleanup_bc_xprt_sock(void) | |||
1282 | { | 1284 | { |
1283 | svc_unreg_xprt_class(&svc_tcp_bc_class); | 1285 | svc_unreg_xprt_class(&svc_tcp_bc_class); |
1284 | } | 1286 | } |
1285 | #else /* CONFIG_NFS_V4_1 */ | 1287 | #else /* CONFIG_SUNRPC_BACKCHANNEL */ |
1286 | static void svc_init_bc_xprt_sock(void) | 1288 | static void svc_init_bc_xprt_sock(void) |
1287 | { | 1289 | { |
1288 | } | 1290 | } |
@@ -1290,7 +1292,7 @@ static void svc_init_bc_xprt_sock(void) | |||
1290 | static void svc_cleanup_bc_xprt_sock(void) | 1292 | static void svc_cleanup_bc_xprt_sock(void) |
1291 | { | 1293 | { |
1292 | } | 1294 | } |
1293 | #endif /* CONFIG_NFS_V4_1 */ | 1295 | #endif /* CONFIG_SUNRPC_BACKCHANNEL */ |
1294 | 1296 | ||
1295 | static struct svc_xprt_ops svc_tcp_ops = { | 1297 | static struct svc_xprt_ops svc_tcp_ops = { |
1296 | .xpo_create = svc_tcp_create, | 1298 | .xpo_create = svc_tcp_create, |
@@ -1621,7 +1623,7 @@ static void svc_sock_free(struct svc_xprt *xprt) | |||
1621 | kfree(svsk); | 1623 | kfree(svsk); |
1622 | } | 1624 | } |
1623 | 1625 | ||
1624 | #if defined(CONFIG_NFS_V4_1) | 1626 | #if defined(CONFIG_SUNRPC_BACKCHANNEL) |
1625 | /* | 1627 | /* |
1626 | * Create a back channel svc_xprt which shares the fore channel socket. | 1628 | * Create a back channel svc_xprt which shares the fore channel socket. |
1627 | */ | 1629 | */ |
@@ -1660,4 +1662,4 @@ static void svc_bc_sock_free(struct svc_xprt *xprt) | |||
1660 | if (xprt) | 1662 | if (xprt) |
1661 | kfree(container_of(xprt, struct svc_sock, sk_xprt)); | 1663 | kfree(container_of(xprt, struct svc_sock, sk_xprt)); |
1662 | } | 1664 | } |
1663 | #endif /* CONFIG_NFS_V4_1 */ | 1665 | #endif /* CONFIG_SUNRPC_BACKCHANNEL */ |
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index f008c14ad34..593f4c60530 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c | |||
@@ -126,7 +126,7 @@ xdr_terminate_string(struct xdr_buf *buf, const u32 len) | |||
126 | kaddr[buf->page_base + len] = '\0'; | 126 | kaddr[buf->page_base + len] = '\0'; |
127 | kunmap_atomic(kaddr, KM_USER0); | 127 | kunmap_atomic(kaddr, KM_USER0); |
128 | } | 128 | } |
129 | EXPORT_SYMBOL(xdr_terminate_string); | 129 | EXPORT_SYMBOL_GPL(xdr_terminate_string); |
130 | 130 | ||
131 | void | 131 | void |
132 | xdr_encode_pages(struct xdr_buf *xdr, struct page **pages, unsigned int base, | 132 | xdr_encode_pages(struct xdr_buf *xdr, struct page **pages, unsigned int base, |
@@ -296,7 +296,7 @@ _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len) | |||
296 | * Copies data into an arbitrary memory location from an array of pages | 296 | * Copies data into an arbitrary memory location from an array of pages |
297 | * The copy is assumed to be non-overlapping. | 297 | * The copy is assumed to be non-overlapping. |
298 | */ | 298 | */ |
299 | static void | 299 | void |
300 | _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len) | 300 | _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len) |
301 | { | 301 | { |
302 | struct page **pgfrom; | 302 | struct page **pgfrom; |
@@ -324,6 +324,7 @@ _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len) | |||
324 | 324 | ||
325 | } while ((len -= copy) != 0); | 325 | } while ((len -= copy) != 0); |
326 | } | 326 | } |
327 | EXPORT_SYMBOL_GPL(_copy_from_pages); | ||
327 | 328 | ||
328 | /* | 329 | /* |
329 | * xdr_shrink_bufhead | 330 | * xdr_shrink_bufhead |
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index ce5eb68a966..c64c0ef519b 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c | |||
@@ -62,6 +62,7 @@ | |||
62 | /* | 62 | /* |
63 | * Local functions | 63 | * Local functions |
64 | */ | 64 | */ |
65 | static void xprt_init(struct rpc_xprt *xprt, struct net *net); | ||
65 | static void xprt_request_init(struct rpc_task *, struct rpc_xprt *); | 66 | static void xprt_request_init(struct rpc_task *, struct rpc_xprt *); |
66 | static void xprt_connect_status(struct rpc_task *task); | 67 | static void xprt_connect_status(struct rpc_task *task); |
67 | static int __xprt_get_cong(struct rpc_xprt *, struct rpc_task *); | 68 | static int __xprt_get_cong(struct rpc_xprt *, struct rpc_task *); |
@@ -186,15 +187,16 @@ EXPORT_SYMBOL_GPL(xprt_load_transport); | |||
186 | /** | 187 | /** |
187 | * xprt_reserve_xprt - serialize write access to transports | 188 | * xprt_reserve_xprt - serialize write access to transports |
188 | * @task: task that is requesting access to the transport | 189 | * @task: task that is requesting access to the transport |
190 | * @xprt: pointer to the target transport | ||
189 | * | 191 | * |
190 | * This prevents mixing the payload of separate requests, and prevents | 192 | * This prevents mixing the payload of separate requests, and prevents |
191 | * transport connects from colliding with writes. No congestion control | 193 | * transport connects from colliding with writes. No congestion control |
192 | * is provided. | 194 | * is provided. |
193 | */ | 195 | */ |
194 | int xprt_reserve_xprt(struct rpc_task *task) | 196 | int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task) |
195 | { | 197 | { |
196 | struct rpc_rqst *req = task->tk_rqstp; | 198 | struct rpc_rqst *req = task->tk_rqstp; |
197 | struct rpc_xprt *xprt = req->rq_xprt; | 199 | int priority; |
198 | 200 | ||
199 | if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) { | 201 | if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) { |
200 | if (task == xprt->snd_task) | 202 | if (task == xprt->snd_task) |
@@ -202,8 +204,10 @@ int xprt_reserve_xprt(struct rpc_task *task) | |||
202 | goto out_sleep; | 204 | goto out_sleep; |
203 | } | 205 | } |
204 | xprt->snd_task = task; | 206 | xprt->snd_task = task; |
205 | req->rq_bytes_sent = 0; | 207 | if (req != NULL) { |
206 | req->rq_ntrans++; | 208 | req->rq_bytes_sent = 0; |
209 | req->rq_ntrans++; | ||
210 | } | ||
207 | 211 | ||
208 | return 1; | 212 | return 1; |
209 | 213 | ||
@@ -212,10 +216,13 @@ out_sleep: | |||
212 | task->tk_pid, xprt); | 216 | task->tk_pid, xprt); |
213 | task->tk_timeout = 0; | 217 | task->tk_timeout = 0; |
214 | task->tk_status = -EAGAIN; | 218 | task->tk_status = -EAGAIN; |
215 | if (req->rq_ntrans) | 219 | if (req == NULL) |
216 | rpc_sleep_on(&xprt->resend, task, NULL); | 220 | priority = RPC_PRIORITY_LOW; |
221 | else if (!req->rq_ntrans) | ||
222 | priority = RPC_PRIORITY_NORMAL; | ||
217 | else | 223 | else |
218 | rpc_sleep_on(&xprt->sending, task, NULL); | 224 | priority = RPC_PRIORITY_HIGH; |
225 | rpc_sleep_on_priority(&xprt->sending, task, NULL, priority); | ||
219 | return 0; | 226 | return 0; |
220 | } | 227 | } |
221 | EXPORT_SYMBOL_GPL(xprt_reserve_xprt); | 228 | EXPORT_SYMBOL_GPL(xprt_reserve_xprt); |
@@ -239,22 +246,24 @@ static void xprt_clear_locked(struct rpc_xprt *xprt) | |||
239 | * integrated into the decision of whether a request is allowed to be | 246 | * integrated into the decision of whether a request is allowed to be |
240 | * woken up and given access to the transport. | 247 | * woken up and given access to the transport. |
241 | */ | 248 | */ |
242 | int xprt_reserve_xprt_cong(struct rpc_task *task) | 249 | int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task) |
243 | { | 250 | { |
244 | struct rpc_xprt *xprt = task->tk_xprt; | ||
245 | struct rpc_rqst *req = task->tk_rqstp; | 251 | struct rpc_rqst *req = task->tk_rqstp; |
252 | int priority; | ||
246 | 253 | ||
247 | if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) { | 254 | if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) { |
248 | if (task == xprt->snd_task) | 255 | if (task == xprt->snd_task) |
249 | return 1; | 256 | return 1; |
250 | goto out_sleep; | 257 | goto out_sleep; |
251 | } | 258 | } |
259 | if (req == NULL) { | ||
260 | xprt->snd_task = task; | ||
261 | return 1; | ||
262 | } | ||
252 | if (__xprt_get_cong(xprt, task)) { | 263 | if (__xprt_get_cong(xprt, task)) { |
253 | xprt->snd_task = task; | 264 | xprt->snd_task = task; |
254 | if (req) { | 265 | req->rq_bytes_sent = 0; |
255 | req->rq_bytes_sent = 0; | 266 | req->rq_ntrans++; |
256 | req->rq_ntrans++; | ||
257 | } | ||
258 | return 1; | 267 | return 1; |
259 | } | 268 | } |
260 | xprt_clear_locked(xprt); | 269 | xprt_clear_locked(xprt); |
@@ -262,10 +271,13 @@ out_sleep: | |||
262 | dprintk("RPC: %5u failed to lock transport %p\n", task->tk_pid, xprt); | 271 | dprintk("RPC: %5u failed to lock transport %p\n", task->tk_pid, xprt); |
263 | task->tk_timeout = 0; | 272 | task->tk_timeout = 0; |
264 | task->tk_status = -EAGAIN; | 273 | task->tk_status = -EAGAIN; |
265 | if (req && req->rq_ntrans) | 274 | if (req == NULL) |
266 | rpc_sleep_on(&xprt->resend, task, NULL); | 275 | priority = RPC_PRIORITY_LOW; |
276 | else if (!req->rq_ntrans) | ||
277 | priority = RPC_PRIORITY_NORMAL; | ||
267 | else | 278 | else |
268 | rpc_sleep_on(&xprt->sending, task, NULL); | 279 | priority = RPC_PRIORITY_HIGH; |
280 | rpc_sleep_on_priority(&xprt->sending, task, NULL, priority); | ||
269 | return 0; | 281 | return 0; |
270 | } | 282 | } |
271 | EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong); | 283 | EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong); |
@@ -275,7 +287,7 @@ static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task) | |||
275 | int retval; | 287 | int retval; |
276 | 288 | ||
277 | spin_lock_bh(&xprt->transport_lock); | 289 | spin_lock_bh(&xprt->transport_lock); |
278 | retval = xprt->ops->reserve_xprt(task); | 290 | retval = xprt->ops->reserve_xprt(xprt, task); |
279 | spin_unlock_bh(&xprt->transport_lock); | 291 | spin_unlock_bh(&xprt->transport_lock); |
280 | return retval; | 292 | return retval; |
281 | } | 293 | } |
@@ -288,12 +300,9 @@ static void __xprt_lock_write_next(struct rpc_xprt *xprt) | |||
288 | if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) | 300 | if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) |
289 | return; | 301 | return; |
290 | 302 | ||
291 | task = rpc_wake_up_next(&xprt->resend); | 303 | task = rpc_wake_up_next(&xprt->sending); |
292 | if (!task) { | 304 | if (task == NULL) |
293 | task = rpc_wake_up_next(&xprt->sending); | 305 | goto out_unlock; |
294 | if (!task) | ||
295 | goto out_unlock; | ||
296 | } | ||
297 | 306 | ||
298 | req = task->tk_rqstp; | 307 | req = task->tk_rqstp; |
299 | xprt->snd_task = task; | 308 | xprt->snd_task = task; |
@@ -310,24 +319,25 @@ out_unlock: | |||
310 | static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt) | 319 | static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt) |
311 | { | 320 | { |
312 | struct rpc_task *task; | 321 | struct rpc_task *task; |
322 | struct rpc_rqst *req; | ||
313 | 323 | ||
314 | if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) | 324 | if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) |
315 | return; | 325 | return; |
316 | if (RPCXPRT_CONGESTED(xprt)) | 326 | if (RPCXPRT_CONGESTED(xprt)) |
317 | goto out_unlock; | 327 | goto out_unlock; |
318 | task = rpc_wake_up_next(&xprt->resend); | 328 | task = rpc_wake_up_next(&xprt->sending); |
319 | if (!task) { | 329 | if (task == NULL) |
320 | task = rpc_wake_up_next(&xprt->sending); | 330 | goto out_unlock; |
321 | if (!task) | 331 | |
322 | goto out_unlock; | 332 | req = task->tk_rqstp; |
333 | if (req == NULL) { | ||
334 | xprt->snd_task = task; | ||
335 | return; | ||
323 | } | 336 | } |
324 | if (__xprt_get_cong(xprt, task)) { | 337 | if (__xprt_get_cong(xprt, task)) { |
325 | struct rpc_rqst *req = task->tk_rqstp; | ||
326 | xprt->snd_task = task; | 338 | xprt->snd_task = task; |
327 | if (req) { | 339 | req->rq_bytes_sent = 0; |
328 | req->rq_bytes_sent = 0; | 340 | req->rq_ntrans++; |
329 | req->rq_ntrans++; | ||
330 | } | ||
331 | return; | 341 | return; |
332 | } | 342 | } |
333 | out_unlock: | 343 | out_unlock: |
@@ -852,7 +862,7 @@ int xprt_prepare_transmit(struct rpc_task *task) | |||
852 | err = req->rq_reply_bytes_recvd; | 862 | err = req->rq_reply_bytes_recvd; |
853 | goto out_unlock; | 863 | goto out_unlock; |
854 | } | 864 | } |
855 | if (!xprt->ops->reserve_xprt(task)) | 865 | if (!xprt->ops->reserve_xprt(xprt, task)) |
856 | err = -EAGAIN; | 866 | err = -EAGAIN; |
857 | out_unlock: | 867 | out_unlock: |
858 | spin_unlock_bh(&xprt->transport_lock); | 868 | spin_unlock_bh(&xprt->transport_lock); |
@@ -928,55 +938,115 @@ void xprt_transmit(struct rpc_task *task) | |||
928 | spin_unlock_bh(&xprt->transport_lock); | 938 | spin_unlock_bh(&xprt->transport_lock); |
929 | } | 939 | } |
930 | 940 | ||
941 | static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt, gfp_t gfp_flags) | ||
942 | { | ||
943 | struct rpc_rqst *req = ERR_PTR(-EAGAIN); | ||
944 | |||
945 | if (!atomic_add_unless(&xprt->num_reqs, 1, xprt->max_reqs)) | ||
946 | goto out; | ||
947 | req = kzalloc(sizeof(struct rpc_rqst), gfp_flags); | ||
948 | if (req != NULL) | ||
949 | goto out; | ||
950 | atomic_dec(&xprt->num_reqs); | ||
951 | req = ERR_PTR(-ENOMEM); | ||
952 | out: | ||
953 | return req; | ||
954 | } | ||
955 | |||
956 | static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req) | ||
957 | { | ||
958 | if (atomic_add_unless(&xprt->num_reqs, -1, xprt->min_reqs)) { | ||
959 | kfree(req); | ||
960 | return true; | ||
961 | } | ||
962 | return false; | ||
963 | } | ||
964 | |||
931 | static void xprt_alloc_slot(struct rpc_task *task) | 965 | static void xprt_alloc_slot(struct rpc_task *task) |
932 | { | 966 | { |
933 | struct rpc_xprt *xprt = task->tk_xprt; | 967 | struct rpc_xprt *xprt = task->tk_xprt; |
968 | struct rpc_rqst *req; | ||
934 | 969 | ||
935 | task->tk_status = 0; | ||
936 | if (task->tk_rqstp) | ||
937 | return; | ||
938 | if (!list_empty(&xprt->free)) { | 970 | if (!list_empty(&xprt->free)) { |
939 | struct rpc_rqst *req = list_entry(xprt->free.next, struct rpc_rqst, rq_list); | 971 | req = list_entry(xprt->free.next, struct rpc_rqst, rq_list); |
940 | list_del_init(&req->rq_list); | 972 | list_del(&req->rq_list); |
941 | task->tk_rqstp = req; | 973 | goto out_init_req; |
942 | xprt_request_init(task, xprt); | 974 | } |
943 | return; | 975 | req = xprt_dynamic_alloc_slot(xprt, GFP_NOWAIT); |
976 | if (!IS_ERR(req)) | ||
977 | goto out_init_req; | ||
978 | switch (PTR_ERR(req)) { | ||
979 | case -ENOMEM: | ||
980 | rpc_delay(task, HZ >> 2); | ||
981 | dprintk("RPC: dynamic allocation of request slot " | ||
982 | "failed! Retrying\n"); | ||
983 | break; | ||
984 | case -EAGAIN: | ||
985 | rpc_sleep_on(&xprt->backlog, task, NULL); | ||
986 | dprintk("RPC: waiting for request slot\n"); | ||
944 | } | 987 | } |
945 | dprintk("RPC: waiting for request slot\n"); | ||
946 | task->tk_status = -EAGAIN; | 988 | task->tk_status = -EAGAIN; |
947 | task->tk_timeout = 0; | 989 | return; |
948 | rpc_sleep_on(&xprt->backlog, task, NULL); | 990 | out_init_req: |
991 | task->tk_status = 0; | ||
992 | task->tk_rqstp = req; | ||
993 | xprt_request_init(task, xprt); | ||
949 | } | 994 | } |
950 | 995 | ||
951 | static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req) | 996 | static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req) |
952 | { | 997 | { |
953 | memset(req, 0, sizeof(*req)); /* mark unused */ | ||
954 | |||
955 | spin_lock(&xprt->reserve_lock); | 998 | spin_lock(&xprt->reserve_lock); |
956 | list_add(&req->rq_list, &xprt->free); | 999 | if (!xprt_dynamic_free_slot(xprt, req)) { |
1000 | memset(req, 0, sizeof(*req)); /* mark unused */ | ||
1001 | list_add(&req->rq_list, &xprt->free); | ||
1002 | } | ||
957 | rpc_wake_up_next(&xprt->backlog); | 1003 | rpc_wake_up_next(&xprt->backlog); |
958 | spin_unlock(&xprt->reserve_lock); | 1004 | spin_unlock(&xprt->reserve_lock); |
959 | } | 1005 | } |
960 | 1006 | ||
961 | struct rpc_xprt *xprt_alloc(struct net *net, int size, int max_req) | 1007 | static void xprt_free_all_slots(struct rpc_xprt *xprt) |
1008 | { | ||
1009 | struct rpc_rqst *req; | ||
1010 | while (!list_empty(&xprt->free)) { | ||
1011 | req = list_first_entry(&xprt->free, struct rpc_rqst, rq_list); | ||
1012 | list_del(&req->rq_list); | ||
1013 | kfree(req); | ||
1014 | } | ||
1015 | } | ||
1016 | |||
1017 | struct rpc_xprt *xprt_alloc(struct net *net, size_t size, | ||
1018 | unsigned int num_prealloc, | ||
1019 | unsigned int max_alloc) | ||
962 | { | 1020 | { |
963 | struct rpc_xprt *xprt; | 1021 | struct rpc_xprt *xprt; |
1022 | struct rpc_rqst *req; | ||
1023 | int i; | ||
964 | 1024 | ||
965 | xprt = kzalloc(size, GFP_KERNEL); | 1025 | xprt = kzalloc(size, GFP_KERNEL); |
966 | if (xprt == NULL) | 1026 | if (xprt == NULL) |
967 | goto out; | 1027 | goto out; |
968 | atomic_set(&xprt->count, 1); | ||
969 | 1028 | ||
970 | xprt->max_reqs = max_req; | 1029 | xprt_init(xprt, net); |
971 | xprt->slot = kcalloc(max_req, sizeof(struct rpc_rqst), GFP_KERNEL); | 1030 | |
972 | if (xprt->slot == NULL) | 1031 | for (i = 0; i < num_prealloc; i++) { |
1032 | req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL); | ||
1033 | if (!req) | ||
1034 | break; | ||
1035 | list_add(&req->rq_list, &xprt->free); | ||
1036 | } | ||
1037 | if (i < num_prealloc) | ||
973 | goto out_free; | 1038 | goto out_free; |
1039 | if (max_alloc > num_prealloc) | ||
1040 | xprt->max_reqs = max_alloc; | ||
1041 | else | ||
1042 | xprt->max_reqs = num_prealloc; | ||
1043 | xprt->min_reqs = num_prealloc; | ||
1044 | atomic_set(&xprt->num_reqs, num_prealloc); | ||
974 | 1045 | ||
975 | xprt->xprt_net = get_net(net); | ||
976 | return xprt; | 1046 | return xprt; |
977 | 1047 | ||
978 | out_free: | 1048 | out_free: |
979 | kfree(xprt); | 1049 | xprt_free(xprt); |
980 | out: | 1050 | out: |
981 | return NULL; | 1051 | return NULL; |
982 | } | 1052 | } |
@@ -985,7 +1055,7 @@ EXPORT_SYMBOL_GPL(xprt_alloc); | |||
985 | void xprt_free(struct rpc_xprt *xprt) | 1055 | void xprt_free(struct rpc_xprt *xprt) |
986 | { | 1056 | { |
987 | put_net(xprt->xprt_net); | 1057 | put_net(xprt->xprt_net); |
988 | kfree(xprt->slot); | 1058 | xprt_free_all_slots(xprt); |
989 | kfree(xprt); | 1059 | kfree(xprt); |
990 | } | 1060 | } |
991 | EXPORT_SYMBOL_GPL(xprt_free); | 1061 | EXPORT_SYMBOL_GPL(xprt_free); |
@@ -1001,10 +1071,24 @@ void xprt_reserve(struct rpc_task *task) | |||
1001 | { | 1071 | { |
1002 | struct rpc_xprt *xprt = task->tk_xprt; | 1072 | struct rpc_xprt *xprt = task->tk_xprt; |
1003 | 1073 | ||
1004 | task->tk_status = -EIO; | 1074 | task->tk_status = 0; |
1075 | if (task->tk_rqstp != NULL) | ||
1076 | return; | ||
1077 | |||
1078 | /* Note: grabbing the xprt_lock_write() here is not strictly needed, | ||
1079 | * but ensures that we throttle new slot allocation if the transport | ||
1080 | * is congested (e.g. if reconnecting or if we're out of socket | ||
1081 | * write buffer space). | ||
1082 | */ | ||
1083 | task->tk_timeout = 0; | ||
1084 | task->tk_status = -EAGAIN; | ||
1085 | if (!xprt_lock_write(xprt, task)) | ||
1086 | return; | ||
1087 | |||
1005 | spin_lock(&xprt->reserve_lock); | 1088 | spin_lock(&xprt->reserve_lock); |
1006 | xprt_alloc_slot(task); | 1089 | xprt_alloc_slot(task); |
1007 | spin_unlock(&xprt->reserve_lock); | 1090 | spin_unlock(&xprt->reserve_lock); |
1091 | xprt_release_write(xprt, task); | ||
1008 | } | 1092 | } |
1009 | 1093 | ||
1010 | static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt) | 1094 | static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt) |
@@ -1021,6 +1105,7 @@ static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt) | |||
1021 | { | 1105 | { |
1022 | struct rpc_rqst *req = task->tk_rqstp; | 1106 | struct rpc_rqst *req = task->tk_rqstp; |
1023 | 1107 | ||
1108 | INIT_LIST_HEAD(&req->rq_list); | ||
1024 | req->rq_timeout = task->tk_client->cl_timeout->to_initval; | 1109 | req->rq_timeout = task->tk_client->cl_timeout->to_initval; |
1025 | req->rq_task = task; | 1110 | req->rq_task = task; |
1026 | req->rq_xprt = xprt; | 1111 | req->rq_xprt = xprt; |
@@ -1073,6 +1158,34 @@ void xprt_release(struct rpc_task *task) | |||
1073 | xprt_free_bc_request(req); | 1158 | xprt_free_bc_request(req); |
1074 | } | 1159 | } |
1075 | 1160 | ||
1161 | static void xprt_init(struct rpc_xprt *xprt, struct net *net) | ||
1162 | { | ||
1163 | atomic_set(&xprt->count, 1); | ||
1164 | |||
1165 | spin_lock_init(&xprt->transport_lock); | ||
1166 | spin_lock_init(&xprt->reserve_lock); | ||
1167 | |||
1168 | INIT_LIST_HEAD(&xprt->free); | ||
1169 | INIT_LIST_HEAD(&xprt->recv); | ||
1170 | #if defined(CONFIG_SUNRPC_BACKCHANNEL) | ||
1171 | spin_lock_init(&xprt->bc_pa_lock); | ||
1172 | INIT_LIST_HEAD(&xprt->bc_pa_list); | ||
1173 | #endif /* CONFIG_SUNRPC_BACKCHANNEL */ | ||
1174 | |||
1175 | xprt->last_used = jiffies; | ||
1176 | xprt->cwnd = RPC_INITCWND; | ||
1177 | xprt->bind_index = 0; | ||
1178 | |||
1179 | rpc_init_wait_queue(&xprt->binding, "xprt_binding"); | ||
1180 | rpc_init_wait_queue(&xprt->pending, "xprt_pending"); | ||
1181 | rpc_init_priority_wait_queue(&xprt->sending, "xprt_sending"); | ||
1182 | rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog"); | ||
1183 | |||
1184 | xprt_init_xid(xprt); | ||
1185 | |||
1186 | xprt->xprt_net = get_net(net); | ||
1187 | } | ||
1188 | |||
1076 | /** | 1189 | /** |
1077 | * xprt_create_transport - create an RPC transport | 1190 | * xprt_create_transport - create an RPC transport |
1078 | * @args: rpc transport creation arguments | 1191 | * @args: rpc transport creation arguments |
@@ -1081,7 +1194,6 @@ void xprt_release(struct rpc_task *task) | |||
1081 | struct rpc_xprt *xprt_create_transport(struct xprt_create *args) | 1194 | struct rpc_xprt *xprt_create_transport(struct xprt_create *args) |
1082 | { | 1195 | { |
1083 | struct rpc_xprt *xprt; | 1196 | struct rpc_xprt *xprt; |
1084 | struct rpc_rqst *req; | ||
1085 | struct xprt_class *t; | 1197 | struct xprt_class *t; |
1086 | 1198 | ||
1087 | spin_lock(&xprt_list_lock); | 1199 | spin_lock(&xprt_list_lock); |
@@ -1100,46 +1212,17 @@ found: | |||
1100 | if (IS_ERR(xprt)) { | 1212 | if (IS_ERR(xprt)) { |
1101 | dprintk("RPC: xprt_create_transport: failed, %ld\n", | 1213 | dprintk("RPC: xprt_create_transport: failed, %ld\n", |
1102 | -PTR_ERR(xprt)); | 1214 | -PTR_ERR(xprt)); |
1103 | return xprt; | 1215 | goto out; |
1104 | } | 1216 | } |
1105 | if (test_and_set_bit(XPRT_INITIALIZED, &xprt->state)) | ||
1106 | /* ->setup returned a pre-initialized xprt: */ | ||
1107 | return xprt; | ||
1108 | |||
1109 | spin_lock_init(&xprt->transport_lock); | ||
1110 | spin_lock_init(&xprt->reserve_lock); | ||
1111 | |||
1112 | INIT_LIST_HEAD(&xprt->free); | ||
1113 | INIT_LIST_HEAD(&xprt->recv); | ||
1114 | #if defined(CONFIG_NFS_V4_1) | ||
1115 | spin_lock_init(&xprt->bc_pa_lock); | ||
1116 | INIT_LIST_HEAD(&xprt->bc_pa_list); | ||
1117 | #endif /* CONFIG_NFS_V4_1 */ | ||
1118 | |||
1119 | INIT_WORK(&xprt->task_cleanup, xprt_autoclose); | 1217 | INIT_WORK(&xprt->task_cleanup, xprt_autoclose); |
1120 | if (xprt_has_timer(xprt)) | 1218 | if (xprt_has_timer(xprt)) |
1121 | setup_timer(&xprt->timer, xprt_init_autodisconnect, | 1219 | setup_timer(&xprt->timer, xprt_init_autodisconnect, |
1122 | (unsigned long)xprt); | 1220 | (unsigned long)xprt); |
1123 | else | 1221 | else |
1124 | init_timer(&xprt->timer); | 1222 | init_timer(&xprt->timer); |
1125 | xprt->last_used = jiffies; | ||
1126 | xprt->cwnd = RPC_INITCWND; | ||
1127 | xprt->bind_index = 0; | ||
1128 | |||
1129 | rpc_init_wait_queue(&xprt->binding, "xprt_binding"); | ||
1130 | rpc_init_wait_queue(&xprt->pending, "xprt_pending"); | ||
1131 | rpc_init_wait_queue(&xprt->sending, "xprt_sending"); | ||
1132 | rpc_init_wait_queue(&xprt->resend, "xprt_resend"); | ||
1133 | rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog"); | ||
1134 | |||
1135 | /* initialize free list */ | ||
1136 | for (req = &xprt->slot[xprt->max_reqs-1]; req >= &xprt->slot[0]; req--) | ||
1137 | list_add(&req->rq_list, &xprt->free); | ||
1138 | |||
1139 | xprt_init_xid(xprt); | ||
1140 | |||
1141 | dprintk("RPC: created transport %p with %u slots\n", xprt, | 1223 | dprintk("RPC: created transport %p with %u slots\n", xprt, |
1142 | xprt->max_reqs); | 1224 | xprt->max_reqs); |
1225 | out: | ||
1143 | return xprt; | 1226 | return xprt; |
1144 | } | 1227 | } |
1145 | 1228 | ||
@@ -1157,7 +1240,6 @@ static void xprt_destroy(struct rpc_xprt *xprt) | |||
1157 | rpc_destroy_wait_queue(&xprt->binding); | 1240 | rpc_destroy_wait_queue(&xprt->binding); |
1158 | rpc_destroy_wait_queue(&xprt->pending); | 1241 | rpc_destroy_wait_queue(&xprt->pending); |
1159 | rpc_destroy_wait_queue(&xprt->sending); | 1242 | rpc_destroy_wait_queue(&xprt->sending); |
1160 | rpc_destroy_wait_queue(&xprt->resend); | ||
1161 | rpc_destroy_wait_queue(&xprt->backlog); | 1243 | rpc_destroy_wait_queue(&xprt->backlog); |
1162 | cancel_work_sync(&xprt->task_cleanup); | 1244 | cancel_work_sync(&xprt->task_cleanup); |
1163 | /* | 1245 | /* |
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index c3c232a88d9..a385430c722 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c | |||
@@ -42,6 +42,7 @@ | |||
42 | #include <linux/sunrpc/svc_xprt.h> | 42 | #include <linux/sunrpc/svc_xprt.h> |
43 | #include <linux/sunrpc/debug.h> | 43 | #include <linux/sunrpc/debug.h> |
44 | #include <linux/sunrpc/rpc_rdma.h> | 44 | #include <linux/sunrpc/rpc_rdma.h> |
45 | #include <linux/interrupt.h> | ||
45 | #include <linux/sched.h> | 46 | #include <linux/sched.h> |
46 | #include <linux/slab.h> | 47 | #include <linux/slab.h> |
47 | #include <linux/spinlock.h> | 48 | #include <linux/spinlock.h> |
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c index 0867070bb5c..b446e100286 100644 --- a/net/sunrpc/xprtrdma/transport.c +++ b/net/sunrpc/xprtrdma/transport.c | |||
@@ -283,6 +283,7 @@ xprt_setup_rdma(struct xprt_create *args) | |||
283 | } | 283 | } |
284 | 284 | ||
285 | xprt = xprt_alloc(args->net, sizeof(struct rpcrdma_xprt), | 285 | xprt = xprt_alloc(args->net, sizeof(struct rpcrdma_xprt), |
286 | xprt_rdma_slot_table_entries, | ||
286 | xprt_rdma_slot_table_entries); | 287 | xprt_rdma_slot_table_entries); |
287 | if (xprt == NULL) { | 288 | if (xprt == NULL) { |
288 | dprintk("RPC: %s: couldn't allocate rpcrdma_xprt\n", | 289 | dprintk("RPC: %s: couldn't allocate rpcrdma_xprt\n", |
@@ -452,9 +453,8 @@ xprt_rdma_connect(struct rpc_task *task) | |||
452 | } | 453 | } |
453 | 454 | ||
454 | static int | 455 | static int |
455 | xprt_rdma_reserve_xprt(struct rpc_task *task) | 456 | xprt_rdma_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task) |
456 | { | 457 | { |
457 | struct rpc_xprt *xprt = task->tk_xprt; | ||
458 | struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); | 458 | struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); |
459 | int credits = atomic_read(&r_xprt->rx_buf.rb_credits); | 459 | int credits = atomic_read(&r_xprt->rx_buf.rb_credits); |
460 | 460 | ||
@@ -466,7 +466,7 @@ xprt_rdma_reserve_xprt(struct rpc_task *task) | |||
466 | BUG_ON(r_xprt->rx_buf.rb_cwndscale <= 0); | 466 | BUG_ON(r_xprt->rx_buf.rb_cwndscale <= 0); |
467 | } | 467 | } |
468 | xprt->cwnd = credits * r_xprt->rx_buf.rb_cwndscale; | 468 | xprt->cwnd = credits * r_xprt->rx_buf.rb_cwndscale; |
469 | return xprt_reserve_xprt_cong(task); | 469 | return xprt_reserve_xprt_cong(xprt, task); |
470 | } | 470 | } |
471 | 471 | ||
472 | /* | 472 | /* |
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index 80f8da344df..28236bab57f 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c | |||
@@ -47,6 +47,7 @@ | |||
47 | * o buffer memory | 47 | * o buffer memory |
48 | */ | 48 | */ |
49 | 49 | ||
50 | #include <linux/interrupt.h> | ||
50 | #include <linux/pci.h> /* for Tavor hack below */ | 51 | #include <linux/pci.h> /* for Tavor hack below */ |
51 | #include <linux/slab.h> | 52 | #include <linux/slab.h> |
52 | 53 | ||
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h index cae761a8536..08c5d5a128f 100644 --- a/net/sunrpc/xprtrdma/xprt_rdma.h +++ b/net/sunrpc/xprtrdma/xprt_rdma.h | |||
@@ -42,7 +42,7 @@ | |||
42 | 42 | ||
43 | #include <linux/wait.h> /* wait_queue_head_t, etc */ | 43 | #include <linux/wait.h> /* wait_queue_head_t, etc */ |
44 | #include <linux/spinlock.h> /* spinlock_t, etc */ | 44 | #include <linux/spinlock.h> /* spinlock_t, etc */ |
45 | #include <asm/atomic.h> /* atomic_t, etc */ | 45 | #include <linux/atomic.h> /* atomic_t, etc */ |
46 | 46 | ||
47 | #include <rdma/rdma_cm.h> /* RDMA connection api */ | 47 | #include <rdma/rdma_cm.h> /* RDMA connection api */ |
48 | #include <rdma/ib_verbs.h> /* RDMA verbs api */ | 48 | #include <rdma/ib_verbs.h> /* RDMA verbs api */ |
@@ -109,7 +109,7 @@ struct rpcrdma_ep { | |||
109 | */ | 109 | */ |
110 | 110 | ||
111 | /* temporary static scatter/gather max */ | 111 | /* temporary static scatter/gather max */ |
112 | #define RPCRDMA_MAX_DATA_SEGS (8) /* max scatter/gather */ | 112 | #define RPCRDMA_MAX_DATA_SEGS (64) /* max scatter/gather */ |
113 | #define RPCRDMA_MAX_SEGS (RPCRDMA_MAX_DATA_SEGS + 2) /* head+tail = 2 */ | 113 | #define RPCRDMA_MAX_SEGS (RPCRDMA_MAX_DATA_SEGS + 2) /* head+tail = 2 */ |
114 | #define MAX_RPCRDMAHDR (\ | 114 | #define MAX_RPCRDMAHDR (\ |
115 | /* max supported RPC/RDMA header */ \ | 115 | /* max supported RPC/RDMA header */ \ |
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 72abb735893..b60b75082f3 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
@@ -37,7 +37,7 @@ | |||
37 | #include <linux/sunrpc/svcsock.h> | 37 | #include <linux/sunrpc/svcsock.h> |
38 | #include <linux/sunrpc/xprtsock.h> | 38 | #include <linux/sunrpc/xprtsock.h> |
39 | #include <linux/file.h> | 39 | #include <linux/file.h> |
40 | #ifdef CONFIG_NFS_V4_1 | 40 | #ifdef CONFIG_SUNRPC_BACKCHANNEL |
41 | #include <linux/sunrpc/bc_xprt.h> | 41 | #include <linux/sunrpc/bc_xprt.h> |
42 | #endif | 42 | #endif |
43 | 43 | ||
@@ -54,7 +54,8 @@ static void xs_close(struct rpc_xprt *xprt); | |||
54 | * xprtsock tunables | 54 | * xprtsock tunables |
55 | */ | 55 | */ |
56 | unsigned int xprt_udp_slot_table_entries = RPC_DEF_SLOT_TABLE; | 56 | unsigned int xprt_udp_slot_table_entries = RPC_DEF_SLOT_TABLE; |
57 | unsigned int xprt_tcp_slot_table_entries = RPC_DEF_SLOT_TABLE; | 57 | unsigned int xprt_tcp_slot_table_entries = RPC_MIN_SLOT_TABLE; |
58 | unsigned int xprt_max_tcp_slot_table_entries = RPC_MAX_SLOT_TABLE; | ||
58 | 59 | ||
59 | unsigned int xprt_min_resvport = RPC_DEF_MIN_RESVPORT; | 60 | unsigned int xprt_min_resvport = RPC_DEF_MIN_RESVPORT; |
60 | unsigned int xprt_max_resvport = RPC_DEF_MAX_RESVPORT; | 61 | unsigned int xprt_max_resvport = RPC_DEF_MAX_RESVPORT; |
@@ -75,6 +76,7 @@ static unsigned int xs_tcp_fin_timeout __read_mostly = XS_TCP_LINGER_TO; | |||
75 | 76 | ||
76 | static unsigned int min_slot_table_size = RPC_MIN_SLOT_TABLE; | 77 | static unsigned int min_slot_table_size = RPC_MIN_SLOT_TABLE; |
77 | static unsigned int max_slot_table_size = RPC_MAX_SLOT_TABLE; | 78 | static unsigned int max_slot_table_size = RPC_MAX_SLOT_TABLE; |
79 | static unsigned int max_tcp_slot_table_limit = RPC_MAX_SLOT_TABLE_LIMIT; | ||
78 | static unsigned int xprt_min_resvport_limit = RPC_MIN_RESVPORT; | 80 | static unsigned int xprt_min_resvport_limit = RPC_MIN_RESVPORT; |
79 | static unsigned int xprt_max_resvport_limit = RPC_MAX_RESVPORT; | 81 | static unsigned int xprt_max_resvport_limit = RPC_MAX_RESVPORT; |
80 | 82 | ||
@@ -104,6 +106,15 @@ static ctl_table xs_tunables_table[] = { | |||
104 | .extra2 = &max_slot_table_size | 106 | .extra2 = &max_slot_table_size |
105 | }, | 107 | }, |
106 | { | 108 | { |
109 | .procname = "tcp_max_slot_table_entries", | ||
110 | .data = &xprt_max_tcp_slot_table_entries, | ||
111 | .maxlen = sizeof(unsigned int), | ||
112 | .mode = 0644, | ||
113 | .proc_handler = proc_dointvec_minmax, | ||
114 | .extra1 = &min_slot_table_size, | ||
115 | .extra2 = &max_tcp_slot_table_limit | ||
116 | }, | ||
117 | { | ||
107 | .procname = "min_resvport", | 118 | .procname = "min_resvport", |
108 | .data = &xprt_min_resvport, | 119 | .data = &xprt_min_resvport, |
109 | .maxlen = sizeof(unsigned int), | 120 | .maxlen = sizeof(unsigned int), |
@@ -485,7 +496,7 @@ static int xs_nospace(struct rpc_task *task) | |||
485 | struct rpc_rqst *req = task->tk_rqstp; | 496 | struct rpc_rqst *req = task->tk_rqstp; |
486 | struct rpc_xprt *xprt = req->rq_xprt; | 497 | struct rpc_xprt *xprt = req->rq_xprt; |
487 | struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); | 498 | struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); |
488 | int ret = 0; | 499 | int ret = -EAGAIN; |
489 | 500 | ||
490 | dprintk("RPC: %5u xmit incomplete (%u left of %u)\n", | 501 | dprintk("RPC: %5u xmit incomplete (%u left of %u)\n", |
491 | task->tk_pid, req->rq_slen - req->rq_bytes_sent, | 502 | task->tk_pid, req->rq_slen - req->rq_bytes_sent, |
@@ -497,7 +508,6 @@ static int xs_nospace(struct rpc_task *task) | |||
497 | /* Don't race with disconnect */ | 508 | /* Don't race with disconnect */ |
498 | if (xprt_connected(xprt)) { | 509 | if (xprt_connected(xprt)) { |
499 | if (test_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags)) { | 510 | if (test_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags)) { |
500 | ret = -EAGAIN; | ||
501 | /* | 511 | /* |
502 | * Notify TCP that we're limited by the application | 512 | * Notify TCP that we're limited by the application |
503 | * window size | 513 | * window size |
@@ -755,6 +765,8 @@ static void xs_tcp_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task) | |||
755 | if (task == NULL) | 765 | if (task == NULL) |
756 | goto out_release; | 766 | goto out_release; |
757 | req = task->tk_rqstp; | 767 | req = task->tk_rqstp; |
768 | if (req == NULL) | ||
769 | goto out_release; | ||
758 | if (req->rq_bytes_sent == 0) | 770 | if (req->rq_bytes_sent == 0) |
759 | goto out_release; | 771 | goto out_release; |
760 | if (req->rq_bytes_sent == req->rq_snd_buf.len) | 772 | if (req->rq_bytes_sent == req->rq_snd_buf.len) |
@@ -1236,7 +1248,7 @@ static inline int xs_tcp_read_reply(struct rpc_xprt *xprt, | |||
1236 | return 0; | 1248 | return 0; |
1237 | } | 1249 | } |
1238 | 1250 | ||
1239 | #if defined(CONFIG_NFS_V4_1) | 1251 | #if defined(CONFIG_SUNRPC_BACKCHANNEL) |
1240 | /* | 1252 | /* |
1241 | * Obtains an rpc_rqst previously allocated and invokes the common | 1253 | * Obtains an rpc_rqst previously allocated and invokes the common |
1242 | * tcp read code to read the data. The result is placed in the callback | 1254 | * tcp read code to read the data. The result is placed in the callback |
@@ -1299,7 +1311,7 @@ static inline int _xs_tcp_read_data(struct rpc_xprt *xprt, | |||
1299 | { | 1311 | { |
1300 | return xs_tcp_read_reply(xprt, desc); | 1312 | return xs_tcp_read_reply(xprt, desc); |
1301 | } | 1313 | } |
1302 | #endif /* CONFIG_NFS_V4_1 */ | 1314 | #endif /* CONFIG_SUNRPC_BACKCHANNEL */ |
1303 | 1315 | ||
1304 | /* | 1316 | /* |
1305 | * Read data off the transport. This can be either an RPC_CALL or an | 1317 | * Read data off the transport. This can be either an RPC_CALL or an |
@@ -2489,7 +2501,8 @@ static int xs_init_anyaddr(const int family, struct sockaddr *sap) | |||
2489 | } | 2501 | } |
2490 | 2502 | ||
2491 | static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args, | 2503 | static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args, |
2492 | unsigned int slot_table_size) | 2504 | unsigned int slot_table_size, |
2505 | unsigned int max_slot_table_size) | ||
2493 | { | 2506 | { |
2494 | struct rpc_xprt *xprt; | 2507 | struct rpc_xprt *xprt; |
2495 | struct sock_xprt *new; | 2508 | struct sock_xprt *new; |
@@ -2499,7 +2512,8 @@ static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args, | |||
2499 | return ERR_PTR(-EBADF); | 2512 | return ERR_PTR(-EBADF); |
2500 | } | 2513 | } |
2501 | 2514 | ||
2502 | xprt = xprt_alloc(args->net, sizeof(*new), slot_table_size); | 2515 | xprt = xprt_alloc(args->net, sizeof(*new), slot_table_size, |
2516 | max_slot_table_size); | ||
2503 | if (xprt == NULL) { | 2517 | if (xprt == NULL) { |
2504 | dprintk("RPC: xs_setup_xprt: couldn't allocate " | 2518 | dprintk("RPC: xs_setup_xprt: couldn't allocate " |
2505 | "rpc_xprt\n"); | 2519 | "rpc_xprt\n"); |
@@ -2541,7 +2555,8 @@ static struct rpc_xprt *xs_setup_local(struct xprt_create *args) | |||
2541 | struct rpc_xprt *xprt; | 2555 | struct rpc_xprt *xprt; |
2542 | struct rpc_xprt *ret; | 2556 | struct rpc_xprt *ret; |
2543 | 2557 | ||
2544 | xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries); | 2558 | xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries, |
2559 | xprt_max_tcp_slot_table_entries); | ||
2545 | if (IS_ERR(xprt)) | 2560 | if (IS_ERR(xprt)) |
2546 | return xprt; | 2561 | return xprt; |
2547 | transport = container_of(xprt, struct sock_xprt, xprt); | 2562 | transport = container_of(xprt, struct sock_xprt, xprt); |
@@ -2605,7 +2620,8 @@ static struct rpc_xprt *xs_setup_udp(struct xprt_create *args) | |||
2605 | struct sock_xprt *transport; | 2620 | struct sock_xprt *transport; |
2606 | struct rpc_xprt *ret; | 2621 | struct rpc_xprt *ret; |
2607 | 2622 | ||
2608 | xprt = xs_setup_xprt(args, xprt_udp_slot_table_entries); | 2623 | xprt = xs_setup_xprt(args, xprt_udp_slot_table_entries, |
2624 | xprt_udp_slot_table_entries); | ||
2609 | if (IS_ERR(xprt)) | 2625 | if (IS_ERR(xprt)) |
2610 | return xprt; | 2626 | return xprt; |
2611 | transport = container_of(xprt, struct sock_xprt, xprt); | 2627 | transport = container_of(xprt, struct sock_xprt, xprt); |
@@ -2681,7 +2697,8 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args) | |||
2681 | struct sock_xprt *transport; | 2697 | struct sock_xprt *transport; |
2682 | struct rpc_xprt *ret; | 2698 | struct rpc_xprt *ret; |
2683 | 2699 | ||
2684 | xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries); | 2700 | xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries, |
2701 | xprt_max_tcp_slot_table_entries); | ||
2685 | if (IS_ERR(xprt)) | 2702 | if (IS_ERR(xprt)) |
2686 | return xprt; | 2703 | return xprt; |
2687 | transport = container_of(xprt, struct sock_xprt, xprt); | 2704 | transport = container_of(xprt, struct sock_xprt, xprt); |
@@ -2760,7 +2777,8 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args) | |||
2760 | */ | 2777 | */ |
2761 | return args->bc_xprt->xpt_bc_xprt; | 2778 | return args->bc_xprt->xpt_bc_xprt; |
2762 | } | 2779 | } |
2763 | xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries); | 2780 | xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries, |
2781 | xprt_tcp_slot_table_entries); | ||
2764 | if (IS_ERR(xprt)) | 2782 | if (IS_ERR(xprt)) |
2765 | return xprt; | 2783 | return xprt; |
2766 | transport = container_of(xprt, struct sock_xprt, xprt); | 2784 | transport = container_of(xprt, struct sock_xprt, xprt); |
@@ -2947,8 +2965,26 @@ static struct kernel_param_ops param_ops_slot_table_size = { | |||
2947 | #define param_check_slot_table_size(name, p) \ | 2965 | #define param_check_slot_table_size(name, p) \ |
2948 | __param_check(name, p, unsigned int); | 2966 | __param_check(name, p, unsigned int); |
2949 | 2967 | ||
2968 | static int param_set_max_slot_table_size(const char *val, | ||
2969 | const struct kernel_param *kp) | ||
2970 | { | ||
2971 | return param_set_uint_minmax(val, kp, | ||
2972 | RPC_MIN_SLOT_TABLE, | ||
2973 | RPC_MAX_SLOT_TABLE_LIMIT); | ||
2974 | } | ||
2975 | |||
2976 | static struct kernel_param_ops param_ops_max_slot_table_size = { | ||
2977 | .set = param_set_max_slot_table_size, | ||
2978 | .get = param_get_uint, | ||
2979 | }; | ||
2980 | |||
2981 | #define param_check_max_slot_table_size(name, p) \ | ||
2982 | __param_check(name, p, unsigned int); | ||
2983 | |||
2950 | module_param_named(tcp_slot_table_entries, xprt_tcp_slot_table_entries, | 2984 | module_param_named(tcp_slot_table_entries, xprt_tcp_slot_table_entries, |
2951 | slot_table_size, 0644); | 2985 | slot_table_size, 0644); |
2986 | module_param_named(tcp_max_slot_table_entries, xprt_max_tcp_slot_table_entries, | ||
2987 | max_slot_table_size, 0644); | ||
2952 | module_param_named(udp_slot_table_entries, xprt_udp_slot_table_entries, | 2988 | module_param_named(udp_slot_table_entries, xprt_udp_slot_table_entries, |
2953 | slot_table_size, 0644); | 2989 | slot_table_size, 0644); |
2954 | 2990 | ||