diff options
Diffstat (limited to 'net')
28 files changed, 788 insertions, 1402 deletions
diff --git a/net/socket.c b/net/socket.c index abf3e2561521..7f67c072d496 100644 --- a/net/socket.c +++ b/net/socket.c | |||
| @@ -1145,7 +1145,7 @@ call_kill: | |||
| 1145 | } | 1145 | } |
| 1146 | EXPORT_SYMBOL(sock_wake_async); | 1146 | EXPORT_SYMBOL(sock_wake_async); |
| 1147 | 1147 | ||
| 1148 | static int __sock_create(struct net *net, int family, int type, int protocol, | 1148 | int __sock_create(struct net *net, int family, int type, int protocol, |
| 1149 | struct socket **res, int kern) | 1149 | struct socket **res, int kern) |
| 1150 | { | 1150 | { |
| 1151 | int err; | 1151 | int err; |
| @@ -1257,6 +1257,7 @@ out_release: | |||
| 1257 | rcu_read_unlock(); | 1257 | rcu_read_unlock(); |
| 1258 | goto out_sock_release; | 1258 | goto out_sock_release; |
| 1259 | } | 1259 | } |
| 1260 | EXPORT_SYMBOL(__sock_create); | ||
| 1260 | 1261 | ||
| 1261 | int sock_create(int family, int type, int protocol, struct socket **res) | 1262 | int sock_create(int family, int type, int protocol, struct socket **res) |
| 1262 | { | 1263 | { |
diff --git a/net/sunrpc/Kconfig b/net/sunrpc/Kconfig index 3376d7657185..8873fd8ddacd 100644 --- a/net/sunrpc/Kconfig +++ b/net/sunrpc/Kconfig | |||
| @@ -36,22 +36,3 @@ config RPCSEC_GSS_KRB5 | |||
| 36 | Kerberos support should be installed. | 36 | Kerberos support should be installed. |
| 37 | 37 | ||
| 38 | If unsure, say Y. | 38 | If unsure, say Y. |
| 39 | |||
| 40 | config RPCSEC_GSS_SPKM3 | ||
| 41 | tristate "Secure RPC: SPKM3 mechanism (EXPERIMENTAL)" | ||
| 42 | depends on SUNRPC && EXPERIMENTAL | ||
| 43 | select SUNRPC_GSS | ||
| 44 | select CRYPTO | ||
| 45 | select CRYPTO_MD5 | ||
| 46 | select CRYPTO_DES | ||
| 47 | select CRYPTO_CAST5 | ||
| 48 | select CRYPTO_CBC | ||
| 49 | help | ||
| 50 | Choose Y here to enable Secure RPC using the SPKM3 public key | ||
| 51 | GSS-API mechanism (RFC 2025). | ||
| 52 | |||
| 53 | Secure RPC calls with SPKM3 require an auxiliary userspace | ||
| 54 | daemon which may be found in the Linux nfs-utils package | ||
| 55 | available from http://linux-nfs.org/. | ||
| 56 | |||
| 57 | If unsure, say N. | ||
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c index 68192e562749..afe67849269f 100644 --- a/net/sunrpc/auth.c +++ b/net/sunrpc/auth.c | |||
| @@ -658,7 +658,7 @@ out1: | |||
| 658 | return err; | 658 | return err; |
| 659 | } | 659 | } |
| 660 | 660 | ||
| 661 | void __exit rpcauth_remove_module(void) | 661 | void rpcauth_remove_module(void) |
| 662 | { | 662 | { |
| 663 | rpc_destroy_authunix(); | 663 | rpc_destroy_authunix(); |
| 664 | rpc_destroy_generic_auth(); | 664 | rpc_destroy_generic_auth(); |
diff --git a/net/sunrpc/auth_generic.c b/net/sunrpc/auth_generic.c index 43162bb3b78f..e010a015d996 100644 --- a/net/sunrpc/auth_generic.c +++ b/net/sunrpc/auth_generic.c | |||
| @@ -158,7 +158,7 @@ int __init rpc_init_generic_auth(void) | |||
| 158 | return rpcauth_init_credcache(&generic_auth); | 158 | return rpcauth_init_credcache(&generic_auth); |
| 159 | } | 159 | } |
| 160 | 160 | ||
| 161 | void __exit rpc_destroy_generic_auth(void) | 161 | void rpc_destroy_generic_auth(void) |
| 162 | { | 162 | { |
| 163 | rpcauth_destroy_credcache(&generic_auth); | 163 | rpcauth_destroy_credcache(&generic_auth); |
| 164 | } | 164 | } |
diff --git a/net/sunrpc/auth_gss/Makefile b/net/sunrpc/auth_gss/Makefile index 74a231735f67..7350d86a32ee 100644 --- a/net/sunrpc/auth_gss/Makefile +++ b/net/sunrpc/auth_gss/Makefile | |||
| @@ -11,8 +11,3 @@ obj-$(CONFIG_RPCSEC_GSS_KRB5) += rpcsec_gss_krb5.o | |||
| 11 | 11 | ||
| 12 | rpcsec_gss_krb5-objs := gss_krb5_mech.o gss_krb5_seal.o gss_krb5_unseal.o \ | 12 | rpcsec_gss_krb5-objs := gss_krb5_mech.o gss_krb5_seal.o gss_krb5_unseal.o \ |
| 13 | gss_krb5_seqnum.o gss_krb5_wrap.o gss_krb5_crypto.o gss_krb5_keys.o | 13 | gss_krb5_seqnum.o gss_krb5_wrap.o gss_krb5_crypto.o gss_krb5_keys.o |
| 14 | |||
| 15 | obj-$(CONFIG_RPCSEC_GSS_SPKM3) += rpcsec_gss_spkm3.o | ||
| 16 | |||
| 17 | rpcsec_gss_spkm3-objs := gss_spkm3_mech.o gss_spkm3_seal.o gss_spkm3_unseal.o \ | ||
| 18 | gss_spkm3_token.o | ||
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c index 778e5dfc5144..f375decc024b 100644 --- a/net/sunrpc/auth_gss/gss_krb5_mech.c +++ b/net/sunrpc/auth_gss/gss_krb5_mech.c | |||
| @@ -427,7 +427,7 @@ static int | |||
| 427 | context_derive_keys_rc4(struct krb5_ctx *ctx) | 427 | context_derive_keys_rc4(struct krb5_ctx *ctx) |
| 428 | { | 428 | { |
| 429 | struct crypto_hash *hmac; | 429 | struct crypto_hash *hmac; |
| 430 | char sigkeyconstant[] = "signaturekey"; | 430 | static const char sigkeyconstant[] = "signaturekey"; |
| 431 | int slen = strlen(sigkeyconstant) + 1; /* include null terminator */ | 431 | int slen = strlen(sigkeyconstant) + 1; /* include null terminator */ |
| 432 | struct hash_desc desc; | 432 | struct hash_desc desc; |
| 433 | struct scatterlist sg[1]; | 433 | struct scatterlist sg[1]; |
diff --git a/net/sunrpc/auth_gss/gss_spkm3_mech.c b/net/sunrpc/auth_gss/gss_spkm3_mech.c deleted file mode 100644 index adade3d313f2..000000000000 --- a/net/sunrpc/auth_gss/gss_spkm3_mech.c +++ /dev/null | |||
| @@ -1,247 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * linux/net/sunrpc/gss_spkm3_mech.c | ||
| 3 | * | ||
| 4 | * Copyright (c) 2003 The Regents of the University of Michigan. | ||
| 5 | * All rights reserved. | ||
| 6 | * | ||
| 7 | * Andy Adamson <andros@umich.edu> | ||
| 8 | * J. Bruce Fields <bfields@umich.edu> | ||
| 9 | * | ||
| 10 | * Redistribution and use in source and binary forms, with or without | ||
| 11 | * modification, are permitted provided that the following conditions | ||
| 12 | * are met: | ||
| 13 | * | ||
| 14 | * 1. Redistributions of source code must retain the above copyright | ||
| 15 | * notice, this list of conditions and the following disclaimer. | ||
| 16 | * 2. Redistributions in binary form must reproduce the above copyright | ||
| 17 | * notice, this list of conditions and the following disclaimer in the | ||
| 18 | * documentation and/or other materials provided with the distribution. | ||
| 19 | * 3. Neither the name of the University nor the names of its | ||
| 20 | * contributors may be used to endorse or promote products derived | ||
| 21 | * from this software without specific prior written permission. | ||
| 22 | * | ||
| 23 | * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED | ||
| 24 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF | ||
| 25 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | ||
| 26 | * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | ||
| 27 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
| 28 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
| 29 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | ||
| 30 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF | ||
| 31 | * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING | ||
| 32 | * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | ||
| 33 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
| 34 | * | ||
| 35 | */ | ||
| 36 | |||
| 37 | #include <linux/err.h> | ||
| 38 | #include <linux/module.h> | ||
| 39 | #include <linux/init.h> | ||
| 40 | #include <linux/types.h> | ||
| 41 | #include <linux/slab.h> | ||
| 42 | #include <linux/sunrpc/auth.h> | ||
| 43 | #include <linux/in.h> | ||
| 44 | #include <linux/sunrpc/svcauth_gss.h> | ||
| 45 | #include <linux/sunrpc/gss_spkm3.h> | ||
| 46 | #include <linux/sunrpc/xdr.h> | ||
| 47 | #include <linux/crypto.h> | ||
| 48 | |||
| 49 | #ifdef RPC_DEBUG | ||
| 50 | # define RPCDBG_FACILITY RPCDBG_AUTH | ||
| 51 | #endif | ||
| 52 | |||
| 53 | static const void * | ||
| 54 | simple_get_bytes(const void *p, const void *end, void *res, int len) | ||
| 55 | { | ||
| 56 | const void *q = (const void *)((const char *)p + len); | ||
| 57 | if (unlikely(q > end || q < p)) | ||
| 58 | return ERR_PTR(-EFAULT); | ||
| 59 | memcpy(res, p, len); | ||
| 60 | return q; | ||
| 61 | } | ||
| 62 | |||
| 63 | static const void * | ||
| 64 | simple_get_netobj(const void *p, const void *end, struct xdr_netobj *res) | ||
| 65 | { | ||
| 66 | const void *q; | ||
| 67 | unsigned int len; | ||
| 68 | p = simple_get_bytes(p, end, &len, sizeof(len)); | ||
| 69 | if (IS_ERR(p)) | ||
| 70 | return p; | ||
| 71 | res->len = len; | ||
| 72 | if (len == 0) { | ||
| 73 | res->data = NULL; | ||
| 74 | return p; | ||
| 75 | } | ||
| 76 | q = (const void *)((const char *)p + len); | ||
| 77 | if (unlikely(q > end || q < p)) | ||
| 78 | return ERR_PTR(-EFAULT); | ||
| 79 | res->data = kmemdup(p, len, GFP_NOFS); | ||
| 80 | if (unlikely(res->data == NULL)) | ||
| 81 | return ERR_PTR(-ENOMEM); | ||
| 82 | return q; | ||
| 83 | } | ||
| 84 | |||
| 85 | static int | ||
| 86 | gss_import_sec_context_spkm3(const void *p, size_t len, | ||
| 87 | struct gss_ctx *ctx_id, | ||
| 88 | gfp_t gfp_mask) | ||
| 89 | { | ||
| 90 | const void *end = (const void *)((const char *)p + len); | ||
| 91 | struct spkm3_ctx *ctx; | ||
| 92 | int version; | ||
| 93 | |||
| 94 | if (!(ctx = kzalloc(sizeof(*ctx), gfp_mask))) | ||
| 95 | goto out_err; | ||
| 96 | |||
| 97 | p = simple_get_bytes(p, end, &version, sizeof(version)); | ||
| 98 | if (IS_ERR(p)) | ||
| 99 | goto out_err_free_ctx; | ||
| 100 | if (version != 1) { | ||
| 101 | dprintk("RPC: unknown spkm3 token format: " | ||
| 102 | "obsolete nfs-utils?\n"); | ||
| 103 | p = ERR_PTR(-EINVAL); | ||
| 104 | goto out_err_free_ctx; | ||
| 105 | } | ||
| 106 | |||
| 107 | p = simple_get_netobj(p, end, &ctx->ctx_id); | ||
| 108 | if (IS_ERR(p)) | ||
| 109 | goto out_err_free_ctx; | ||
| 110 | |||
| 111 | p = simple_get_bytes(p, end, &ctx->endtime, sizeof(ctx->endtime)); | ||
| 112 | if (IS_ERR(p)) | ||
| 113 | goto out_err_free_ctx_id; | ||
| 114 | |||
| 115 | p = simple_get_netobj(p, end, &ctx->mech_used); | ||
| 116 | if (IS_ERR(p)) | ||
| 117 | goto out_err_free_ctx_id; | ||
| 118 | |||
| 119 | p = simple_get_bytes(p, end, &ctx->ret_flags, sizeof(ctx->ret_flags)); | ||
| 120 | if (IS_ERR(p)) | ||
| 121 | goto out_err_free_mech; | ||
| 122 | |||
| 123 | p = simple_get_netobj(p, end, &ctx->conf_alg); | ||
| 124 | if (IS_ERR(p)) | ||
| 125 | goto out_err_free_mech; | ||
| 126 | |||
| 127 | p = simple_get_netobj(p, end, &ctx->derived_conf_key); | ||
| 128 | if (IS_ERR(p)) | ||
| 129 | goto out_err_free_conf_alg; | ||
| 130 | |||
| 131 | p = simple_get_netobj(p, end, &ctx->intg_alg); | ||
| 132 | if (IS_ERR(p)) | ||
| 133 | goto out_err_free_conf_key; | ||
| 134 | |||
| 135 | p = simple_get_netobj(p, end, &ctx->derived_integ_key); | ||
| 136 | if (IS_ERR(p)) | ||
| 137 | goto out_err_free_intg_alg; | ||
| 138 | |||
| 139 | if (p != end) { | ||
| 140 | p = ERR_PTR(-EFAULT); | ||
| 141 | goto out_err_free_intg_key; | ||
| 142 | } | ||
| 143 | |||
| 144 | ctx_id->internal_ctx_id = ctx; | ||
| 145 | |||
| 146 | dprintk("RPC: Successfully imported new spkm context.\n"); | ||
| 147 | return 0; | ||
| 148 | |||
| 149 | out_err_free_intg_key: | ||
| 150 | kfree(ctx->derived_integ_key.data); | ||
| 151 | out_err_free_intg_alg: | ||
| 152 | kfree(ctx->intg_alg.data); | ||
| 153 | out_err_free_conf_key: | ||
| 154 | kfree(ctx->derived_conf_key.data); | ||
| 155 | out_err_free_conf_alg: | ||
| 156 | kfree(ctx->conf_alg.data); | ||
| 157 | out_err_free_mech: | ||
| 158 | kfree(ctx->mech_used.data); | ||
| 159 | out_err_free_ctx_id: | ||
| 160 | kfree(ctx->ctx_id.data); | ||
| 161 | out_err_free_ctx: | ||
| 162 | kfree(ctx); | ||
| 163 | out_err: | ||
| 164 | return PTR_ERR(p); | ||
| 165 | } | ||
| 166 | |||
| 167 | static void | ||
| 168 | gss_delete_sec_context_spkm3(void *internal_ctx) | ||
| 169 | { | ||
| 170 | struct spkm3_ctx *sctx = internal_ctx; | ||
| 171 | |||
| 172 | kfree(sctx->derived_integ_key.data); | ||
| 173 | kfree(sctx->intg_alg.data); | ||
| 174 | kfree(sctx->derived_conf_key.data); | ||
| 175 | kfree(sctx->conf_alg.data); | ||
| 176 | kfree(sctx->mech_used.data); | ||
| 177 | kfree(sctx->ctx_id.data); | ||
| 178 | kfree(sctx); | ||
| 179 | } | ||
| 180 | |||
| 181 | static u32 | ||
| 182 | gss_verify_mic_spkm3(struct gss_ctx *ctx, | ||
| 183 | struct xdr_buf *signbuf, | ||
| 184 | struct xdr_netobj *checksum) | ||
| 185 | { | ||
| 186 | u32 maj_stat = 0; | ||
| 187 | struct spkm3_ctx *sctx = ctx->internal_ctx_id; | ||
| 188 | |||
| 189 | maj_stat = spkm3_read_token(sctx, checksum, signbuf, SPKM_MIC_TOK); | ||
| 190 | |||
| 191 | dprintk("RPC: gss_verify_mic_spkm3 returning %d\n", maj_stat); | ||
| 192 | return maj_stat; | ||
| 193 | } | ||
| 194 | |||
| 195 | static u32 | ||
| 196 | gss_get_mic_spkm3(struct gss_ctx *ctx, | ||
| 197 | struct xdr_buf *message_buffer, | ||
| 198 | struct xdr_netobj *message_token) | ||
| 199 | { | ||
| 200 | u32 err = 0; | ||
| 201 | struct spkm3_ctx *sctx = ctx->internal_ctx_id; | ||
| 202 | |||
| 203 | err = spkm3_make_token(sctx, message_buffer, | ||
| 204 | message_token, SPKM_MIC_TOK); | ||
| 205 | dprintk("RPC: gss_get_mic_spkm3 returning %d\n", err); | ||
| 206 | return err; | ||
| 207 | } | ||
| 208 | |||
| 209 | static const struct gss_api_ops gss_spkm3_ops = { | ||
| 210 | .gss_import_sec_context = gss_import_sec_context_spkm3, | ||
| 211 | .gss_get_mic = gss_get_mic_spkm3, | ||
| 212 | .gss_verify_mic = gss_verify_mic_spkm3, | ||
| 213 | .gss_delete_sec_context = gss_delete_sec_context_spkm3, | ||
| 214 | }; | ||
| 215 | |||
| 216 | static struct pf_desc gss_spkm3_pfs[] = { | ||
| 217 | {RPC_AUTH_GSS_SPKM, RPC_GSS_SVC_NONE, "spkm3"}, | ||
| 218 | {RPC_AUTH_GSS_SPKMI, RPC_GSS_SVC_INTEGRITY, "spkm3i"}, | ||
| 219 | }; | ||
| 220 | |||
| 221 | static struct gss_api_mech gss_spkm3_mech = { | ||
| 222 | .gm_name = "spkm3", | ||
| 223 | .gm_owner = THIS_MODULE, | ||
| 224 | .gm_oid = {7, "\053\006\001\005\005\001\003"}, | ||
| 225 | .gm_ops = &gss_spkm3_ops, | ||
| 226 | .gm_pf_num = ARRAY_SIZE(gss_spkm3_pfs), | ||
| 227 | .gm_pfs = gss_spkm3_pfs, | ||
| 228 | }; | ||
| 229 | |||
| 230 | static int __init init_spkm3_module(void) | ||
| 231 | { | ||
| 232 | int status; | ||
| 233 | |||
| 234 | status = gss_mech_register(&gss_spkm3_mech); | ||
| 235 | if (status) | ||
| 236 | printk("Failed to register spkm3 gss mechanism!\n"); | ||
| 237 | return status; | ||
| 238 | } | ||
| 239 | |||
| 240 | static void __exit cleanup_spkm3_module(void) | ||
| 241 | { | ||
| 242 | gss_mech_unregister(&gss_spkm3_mech); | ||
| 243 | } | ||
| 244 | |||
| 245 | MODULE_LICENSE("GPL"); | ||
| 246 | module_init(init_spkm3_module); | ||
| 247 | module_exit(cleanup_spkm3_module); | ||
diff --git a/net/sunrpc/auth_gss/gss_spkm3_seal.c b/net/sunrpc/auth_gss/gss_spkm3_seal.c deleted file mode 100644 index 5a3a65a0e2b4..000000000000 --- a/net/sunrpc/auth_gss/gss_spkm3_seal.c +++ /dev/null | |||
| @@ -1,186 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * linux/net/sunrpc/gss_spkm3_seal.c | ||
| 3 | * | ||
| 4 | * Copyright (c) 2003 The Regents of the University of Michigan. | ||
| 5 | * All rights reserved. | ||
| 6 | * | ||
| 7 | * Andy Adamson <andros@umich.edu> | ||
| 8 | * | ||
| 9 | * Redistribution and use in source and binary forms, with or without | ||
| 10 | * modification, are permitted provided that the following conditions | ||
| 11 | * are met: | ||
| 12 | * | ||
| 13 | * 1. Redistributions of source code must retain the above copyright | ||
| 14 | * notice, this list of conditions and the following disclaimer. | ||
| 15 | * 2. Redistributions in binary form must reproduce the above copyright | ||
| 16 | * notice, this list of conditions and the following disclaimer in the | ||
| 17 | * documentation and/or other materials provided with the distribution. | ||
| 18 | * 3. Neither the name of the University nor the names of its | ||
| 19 | * contributors may be used to endorse or promote products derived | ||
| 20 | * from this software without specific prior written permission. | ||
| 21 | * | ||
| 22 | * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED | ||
| 23 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF | ||
| 24 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | ||
| 25 | * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | ||
| 26 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
| 27 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
| 28 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | ||
| 29 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF | ||
| 30 | * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING | ||
| 31 | * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | ||
| 32 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
| 33 | * | ||
| 34 | */ | ||
| 35 | |||
| 36 | #include <linux/types.h> | ||
| 37 | #include <linux/jiffies.h> | ||
| 38 | #include <linux/sunrpc/gss_spkm3.h> | ||
| 39 | #include <linux/random.h> | ||
| 40 | #include <linux/crypto.h> | ||
| 41 | #include <linux/pagemap.h> | ||
| 42 | #include <linux/scatterlist.h> | ||
| 43 | #include <linux/sunrpc/xdr.h> | ||
| 44 | |||
| 45 | #ifdef RPC_DEBUG | ||
| 46 | # define RPCDBG_FACILITY RPCDBG_AUTH | ||
| 47 | #endif | ||
| 48 | |||
| 49 | const struct xdr_netobj hmac_md5_oid = { 8, "\x2B\x06\x01\x05\x05\x08\x01\x01"}; | ||
| 50 | const struct xdr_netobj cast5_cbc_oid = {9, "\x2A\x86\x48\x86\xF6\x7D\x07\x42\x0A"}; | ||
| 51 | |||
| 52 | /* | ||
| 53 | * spkm3_make_token() | ||
| 54 | * | ||
| 55 | * Only SPKM_MIC_TOK with md5 intg-alg is supported | ||
| 56 | */ | ||
| 57 | |||
| 58 | u32 | ||
| 59 | spkm3_make_token(struct spkm3_ctx *ctx, | ||
| 60 | struct xdr_buf * text, struct xdr_netobj * token, | ||
| 61 | int toktype) | ||
| 62 | { | ||
| 63 | s32 checksum_type; | ||
| 64 | char tokhdrbuf[25]; | ||
| 65 | char cksumdata[16]; | ||
| 66 | struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata}; | ||
| 67 | struct xdr_netobj mic_hdr = {.len = 0, .data = tokhdrbuf}; | ||
| 68 | int tokenlen = 0; | ||
| 69 | unsigned char *ptr; | ||
| 70 | s32 now; | ||
| 71 | int ctxelen = 0, ctxzbit = 0; | ||
| 72 | int md5elen = 0, md5zbit = 0; | ||
| 73 | |||
| 74 | now = jiffies; | ||
| 75 | |||
| 76 | if (ctx->ctx_id.len != 16) { | ||
| 77 | dprintk("RPC: spkm3_make_token BAD ctx_id.len %d\n", | ||
| 78 | ctx->ctx_id.len); | ||
| 79 | goto out_err; | ||
| 80 | } | ||
| 81 | |||
| 82 | if (!g_OID_equal(&ctx->intg_alg, &hmac_md5_oid)) { | ||
| 83 | dprintk("RPC: gss_spkm3_seal: unsupported I-ALG " | ||
| 84 | "algorithm. only support hmac-md5 I-ALG.\n"); | ||
| 85 | goto out_err; | ||
| 86 | } else | ||
| 87 | checksum_type = CKSUMTYPE_HMAC_MD5; | ||
| 88 | |||
| 89 | if (!g_OID_equal(&ctx->conf_alg, &cast5_cbc_oid)) { | ||
| 90 | dprintk("RPC: gss_spkm3_seal: unsupported C-ALG " | ||
| 91 | "algorithm\n"); | ||
| 92 | goto out_err; | ||
| 93 | } | ||
| 94 | |||
| 95 | if (toktype == SPKM_MIC_TOK) { | ||
| 96 | /* Calculate checksum over the mic-header */ | ||
| 97 | asn1_bitstring_len(&ctx->ctx_id, &ctxelen, &ctxzbit); | ||
| 98 | spkm3_mic_header(&mic_hdr.data, &mic_hdr.len, ctx->ctx_id.data, | ||
| 99 | ctxelen, ctxzbit); | ||
| 100 | if (make_spkm3_checksum(checksum_type, &ctx->derived_integ_key, | ||
| 101 | (char *)mic_hdr.data, mic_hdr.len, | ||
| 102 | text, 0, &md5cksum)) | ||
| 103 | goto out_err; | ||
| 104 | |||
| 105 | asn1_bitstring_len(&md5cksum, &md5elen, &md5zbit); | ||
| 106 | tokenlen = 10 + ctxelen + 1 + md5elen + 1; | ||
| 107 | |||
| 108 | /* Create token header using generic routines */ | ||
| 109 | token->len = g_token_size(&ctx->mech_used, tokenlen + 2); | ||
| 110 | |||
| 111 | ptr = token->data; | ||
| 112 | g_make_token_header(&ctx->mech_used, tokenlen + 2, &ptr); | ||
| 113 | |||
| 114 | spkm3_make_mic_token(&ptr, tokenlen, &mic_hdr, &md5cksum, md5elen, md5zbit); | ||
| 115 | } else if (toktype == SPKM_WRAP_TOK) { /* Not Supported */ | ||
| 116 | dprintk("RPC: gss_spkm3_seal: SPKM_WRAP_TOK " | ||
| 117 | "not supported\n"); | ||
| 118 | goto out_err; | ||
| 119 | } | ||
| 120 | |||
| 121 | /* XXX need to implement sequence numbers, and ctx->expired */ | ||
| 122 | |||
| 123 | return GSS_S_COMPLETE; | ||
| 124 | out_err: | ||
| 125 | token->data = NULL; | ||
| 126 | token->len = 0; | ||
| 127 | return GSS_S_FAILURE; | ||
| 128 | } | ||
| 129 | |||
| 130 | static int | ||
| 131 | spkm3_checksummer(struct scatterlist *sg, void *data) | ||
| 132 | { | ||
| 133 | struct hash_desc *desc = data; | ||
| 134 | |||
| 135 | return crypto_hash_update(desc, sg, sg->length); | ||
| 136 | } | ||
| 137 | |||
| 138 | /* checksum the plaintext data and hdrlen bytes of the token header */ | ||
| 139 | s32 | ||
| 140 | make_spkm3_checksum(s32 cksumtype, struct xdr_netobj *key, char *header, | ||
| 141 | unsigned int hdrlen, struct xdr_buf *body, | ||
| 142 | unsigned int body_offset, struct xdr_netobj *cksum) | ||
| 143 | { | ||
| 144 | char *cksumname; | ||
| 145 | struct hash_desc desc; /* XXX add to ctx? */ | ||
| 146 | struct scatterlist sg[1]; | ||
| 147 | int err; | ||
| 148 | |||
| 149 | switch (cksumtype) { | ||
| 150 | case CKSUMTYPE_HMAC_MD5: | ||
| 151 | cksumname = "hmac(md5)"; | ||
| 152 | break; | ||
| 153 | default: | ||
| 154 | dprintk("RPC: spkm3_make_checksum:" | ||
| 155 | " unsupported checksum %d", cksumtype); | ||
| 156 | return GSS_S_FAILURE; | ||
| 157 | } | ||
| 158 | |||
| 159 | if (key->data == NULL || key->len <= 0) return GSS_S_FAILURE; | ||
| 160 | |||
| 161 | desc.tfm = crypto_alloc_hash(cksumname, 0, CRYPTO_ALG_ASYNC); | ||
| 162 | if (IS_ERR(desc.tfm)) | ||
| 163 | return GSS_S_FAILURE; | ||
| 164 | cksum->len = crypto_hash_digestsize(desc.tfm); | ||
| 165 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; | ||
| 166 | |||
| 167 | err = crypto_hash_setkey(desc.tfm, key->data, key->len); | ||
| 168 | if (err) | ||
| 169 | goto out; | ||
| 170 | |||
| 171 | err = crypto_hash_init(&desc); | ||
| 172 | if (err) | ||
| 173 | goto out; | ||
| 174 | |||
| 175 | sg_init_one(sg, header, hdrlen); | ||
| 176 | crypto_hash_update(&desc, sg, sg->length); | ||
| 177 | |||
| 178 | xdr_process_buf(body, body_offset, body->len - body_offset, | ||
| 179 | spkm3_checksummer, &desc); | ||
| 180 | crypto_hash_final(&desc, cksum->data); | ||
| 181 | |||
| 182 | out: | ||
| 183 | crypto_free_hash(desc.tfm); | ||
| 184 | |||
| 185 | return err ? GSS_S_FAILURE : 0; | ||
| 186 | } | ||
diff --git a/net/sunrpc/auth_gss/gss_spkm3_token.c b/net/sunrpc/auth_gss/gss_spkm3_token.c deleted file mode 100644 index a99825d7caa0..000000000000 --- a/net/sunrpc/auth_gss/gss_spkm3_token.c +++ /dev/null | |||
| @@ -1,267 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * linux/net/sunrpc/gss_spkm3_token.c | ||
| 3 | * | ||
| 4 | * Copyright (c) 2003 The Regents of the University of Michigan. | ||
| 5 | * All rights reserved. | ||
| 6 | * | ||
| 7 | * Andy Adamson <andros@umich.edu> | ||
| 8 | * | ||
| 9 | * Redistribution and use in source and binary forms, with or without | ||
| 10 | * modification, are permitted provided that the following conditions | ||
| 11 | * are met: | ||
| 12 | * | ||
| 13 | * 1. Redistributions of source code must retain the above copyright | ||
| 14 | * notice, this list of conditions and the following disclaimer. | ||
| 15 | * 2. Redistributions in binary form must reproduce the above copyright | ||
| 16 | * notice, this list of conditions and the following disclaimer in the | ||
| 17 | * documentation and/or other materials provided with the distribution. | ||
| 18 | * 3. Neither the name of the University nor the names of its | ||
| 19 | * contributors may be used to endorse or promote products derived | ||
| 20 | * from this software without specific prior written permission. | ||
| 21 | * | ||
| 22 | * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED | ||
| 23 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF | ||
| 24 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | ||
| 25 | * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | ||
| 26 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
| 27 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
| 28 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | ||
| 29 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF | ||
| 30 | * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING | ||
| 31 | * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | ||
| 32 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
| 33 | * | ||
| 34 | */ | ||
| 35 | |||
| 36 | #include <linux/types.h> | ||
| 37 | #include <linux/slab.h> | ||
| 38 | #include <linux/jiffies.h> | ||
| 39 | #include <linux/sunrpc/gss_spkm3.h> | ||
| 40 | #include <linux/random.h> | ||
| 41 | #include <linux/crypto.h> | ||
| 42 | |||
| 43 | #ifdef RPC_DEBUG | ||
| 44 | # define RPCDBG_FACILITY RPCDBG_AUTH | ||
| 45 | #endif | ||
| 46 | |||
| 47 | /* | ||
| 48 | * asn1_bitstring_len() | ||
| 49 | * | ||
| 50 | * calculate the asn1 bitstring length of the xdr_netobject | ||
| 51 | */ | ||
| 52 | void | ||
| 53 | asn1_bitstring_len(struct xdr_netobj *in, int *enclen, int *zerobits) | ||
| 54 | { | ||
| 55 | int i, zbit = 0,elen = in->len; | ||
| 56 | char *ptr; | ||
| 57 | |||
| 58 | ptr = &in->data[in->len -1]; | ||
| 59 | |||
| 60 | /* count trailing 0's */ | ||
| 61 | for(i = in->len; i > 0; i--) { | ||
| 62 | if (*ptr == 0) { | ||
| 63 | ptr--; | ||
| 64 | elen--; | ||
| 65 | } else | ||
| 66 | break; | ||
| 67 | } | ||
| 68 | |||
| 69 | /* count number of 0 bits in final octet */ | ||
| 70 | ptr = &in->data[elen - 1]; | ||
| 71 | for(i = 0; i < 8; i++) { | ||
| 72 | short mask = 0x01; | ||
| 73 | |||
| 74 | if (!((mask << i) & *ptr)) | ||
| 75 | zbit++; | ||
| 76 | else | ||
| 77 | break; | ||
| 78 | } | ||
| 79 | *enclen = elen; | ||
| 80 | *zerobits = zbit; | ||
| 81 | } | ||
| 82 | |||
| 83 | /* | ||
| 84 | * decode_asn1_bitstring() | ||
| 85 | * | ||
| 86 | * decode a bitstring into a buffer of the expected length. | ||
| 87 | * enclen = bit string length | ||
| 88 | * explen = expected length (define in rfc) | ||
| 89 | */ | ||
| 90 | int | ||
| 91 | decode_asn1_bitstring(struct xdr_netobj *out, char *in, int enclen, int explen) | ||
| 92 | { | ||
| 93 | if (!(out->data = kzalloc(explen,GFP_NOFS))) | ||
| 94 | return 0; | ||
| 95 | out->len = explen; | ||
| 96 | memcpy(out->data, in, enclen); | ||
| 97 | return 1; | ||
| 98 | } | ||
| 99 | |||
| 100 | /* | ||
| 101 | * SPKMInnerContextToken choice SPKM_MIC asn1 token layout | ||
| 102 | * | ||
| 103 | * contextid is always 16 bytes plain data. max asn1 bitstring len = 17. | ||
| 104 | * | ||
| 105 | * tokenlen = pos[0] to end of token (max pos[45] with MD5 cksum) | ||
| 106 | * | ||
| 107 | * pos value | ||
| 108 | * ---------- | ||
| 109 | * [0] a4 SPKM-MIC tag | ||
| 110 | * [1] ?? innertoken length (max 44) | ||
| 111 | * | ||
| 112 | * | ||
| 113 | * tok_hdr piece of checksum data starts here | ||
| 114 | * | ||
| 115 | * the maximum mic-header len = 9 + 17 = 26 | ||
| 116 | * mic-header | ||
| 117 | * ---------- | ||
| 118 | * [2] 30 SEQUENCE tag | ||
| 119 | * [3] ?? mic-header length: (max 23) = TokenID + ContextID | ||
| 120 | * | ||
| 121 | * TokenID - all fields constant and can be hardcoded | ||
| 122 | * ------- | ||
| 123 | * [4] 02 Type 2 | ||
| 124 | * [5] 02 Length 2 | ||
| 125 | * [6][7] 01 01 TokenID (SPKM_MIC_TOK) | ||
| 126 | * | ||
| 127 | * ContextID - encoded length not constant, calculated | ||
| 128 | * --------- | ||
| 129 | * [8] 03 Type 3 | ||
| 130 | * [9] ?? encoded length | ||
| 131 | * [10] ?? ctxzbit | ||
| 132 | * [11] contextid | ||
| 133 | * | ||
| 134 | * mic_header piece of checksum data ends here. | ||
| 135 | * | ||
| 136 | * int-cksum - encoded length not constant, calculated | ||
| 137 | * --------- | ||
| 138 | * [??] 03 Type 3 | ||
| 139 | * [??] ?? encoded length | ||
| 140 | * [??] ?? md5zbit | ||
| 141 | * [??] int-cksum (NID_md5 = 16) | ||
| 142 | * | ||
| 143 | * maximum SPKM-MIC innercontext token length = | ||
| 144 | * 10 + encoded contextid_size(17 max) + 2 + encoded | ||
| 145 | * cksum_size (17 maxfor NID_md5) = 46 | ||
| 146 | */ | ||
| 147 | |||
| 148 | /* | ||
| 149 | * spkm3_mic_header() | ||
| 150 | * | ||
| 151 | * Prepare the SPKM_MIC_TOK mic-header for check-sum calculation | ||
| 152 | * elen: 16 byte context id asn1 bitstring encoded length | ||
| 153 | */ | ||
| 154 | void | ||
| 155 | spkm3_mic_header(unsigned char **hdrbuf, unsigned int *hdrlen, unsigned char *ctxdata, int elen, int zbit) | ||
| 156 | { | ||
| 157 | char *hptr = *hdrbuf; | ||
| 158 | char *top = *hdrbuf; | ||
| 159 | |||
| 160 | *(u8 *)hptr++ = 0x30; | ||
| 161 | *(u8 *)hptr++ = elen + 7; /* on the wire header length */ | ||
| 162 | |||
| 163 | /* tokenid */ | ||
| 164 | *(u8 *)hptr++ = 0x02; | ||
| 165 | *(u8 *)hptr++ = 0x02; | ||
| 166 | *(u8 *)hptr++ = 0x01; | ||
| 167 | *(u8 *)hptr++ = 0x01; | ||
| 168 | |||
| 169 | /* coniextid */ | ||
| 170 | *(u8 *)hptr++ = 0x03; | ||
| 171 | *(u8 *)hptr++ = elen + 1; /* add 1 to include zbit */ | ||
| 172 | *(u8 *)hptr++ = zbit; | ||
| 173 | memcpy(hptr, ctxdata, elen); | ||
| 174 | hptr += elen; | ||
| 175 | *hdrlen = hptr - top; | ||
| 176 | } | ||
| 177 | |||
| 178 | /* | ||
| 179 | * spkm3_mic_innercontext_token() | ||
| 180 | * | ||
| 181 | * *tokp points to the beginning of the SPKM_MIC token described | ||
| 182 | * in rfc 2025, section 3.2.1: | ||
| 183 | * | ||
| 184 | * toklen is the inner token length | ||
| 185 | */ | ||
| 186 | void | ||
| 187 | spkm3_make_mic_token(unsigned char **tokp, int toklen, struct xdr_netobj *mic_hdr, struct xdr_netobj *md5cksum, int md5elen, int md5zbit) | ||
| 188 | { | ||
| 189 | unsigned char *ict = *tokp; | ||
| 190 | |||
| 191 | *(u8 *)ict++ = 0xa4; | ||
| 192 | *(u8 *)ict++ = toklen; | ||
| 193 | memcpy(ict, mic_hdr->data, mic_hdr->len); | ||
| 194 | ict += mic_hdr->len; | ||
| 195 | |||
| 196 | *(u8 *)ict++ = 0x03; | ||
| 197 | *(u8 *)ict++ = md5elen + 1; /* add 1 to include zbit */ | ||
| 198 | *(u8 *)ict++ = md5zbit; | ||
| 199 | memcpy(ict, md5cksum->data, md5elen); | ||
| 200 | } | ||
| 201 | |||
| 202 | u32 | ||
| 203 | spkm3_verify_mic_token(unsigned char **tokp, int *mic_hdrlen, unsigned char **cksum) | ||
| 204 | { | ||
| 205 | struct xdr_netobj spkm3_ctx_id = {.len =0, .data = NULL}; | ||
| 206 | unsigned char *ptr = *tokp; | ||
| 207 | int ctxelen; | ||
| 208 | u32 ret = GSS_S_DEFECTIVE_TOKEN; | ||
| 209 | |||
| 210 | /* spkm3 innercontext token preamble */ | ||
| 211 | if ((ptr[0] != 0xa4) || (ptr[2] != 0x30)) { | ||
| 212 | dprintk("RPC: BAD SPKM ictoken preamble\n"); | ||
| 213 | goto out; | ||
| 214 | } | ||
| 215 | |||
| 216 | *mic_hdrlen = ptr[3]; | ||
| 217 | |||
| 218 | /* token type */ | ||
| 219 | if ((ptr[4] != 0x02) || (ptr[5] != 0x02)) { | ||
| 220 | dprintk("RPC: BAD asn1 SPKM3 token type\n"); | ||
| 221 | goto out; | ||
| 222 | } | ||
| 223 | |||
| 224 | /* only support SPKM_MIC_TOK */ | ||
| 225 | if((ptr[6] != 0x01) || (ptr[7] != 0x01)) { | ||
| 226 | dprintk("RPC: ERROR unsupported SPKM3 token\n"); | ||
| 227 | goto out; | ||
| 228 | } | ||
| 229 | |||
| 230 | /* contextid */ | ||
| 231 | if (ptr[8] != 0x03) { | ||
| 232 | dprintk("RPC: BAD SPKM3 asn1 context-id type\n"); | ||
| 233 | goto out; | ||
| 234 | } | ||
| 235 | |||
| 236 | ctxelen = ptr[9]; | ||
| 237 | if (ctxelen > 17) { /* length includes asn1 zbit octet */ | ||
| 238 | dprintk("RPC: BAD SPKM3 contextid len %d\n", ctxelen); | ||
| 239 | goto out; | ||
| 240 | } | ||
| 241 | |||
| 242 | /* ignore ptr[10] */ | ||
| 243 | |||
| 244 | if(!decode_asn1_bitstring(&spkm3_ctx_id, &ptr[11], ctxelen - 1, 16)) | ||
| 245 | goto out; | ||
| 246 | |||
| 247 | /* | ||
| 248 | * in the current implementation: the optional int-alg is not present | ||
| 249 | * so the default int-alg (md5) is used the optional snd-seq field is | ||
| 250 | * also not present | ||
| 251 | */ | ||
| 252 | |||
| 253 | if (*mic_hdrlen != 6 + ctxelen) { | ||
| 254 | dprintk("RPC: BAD SPKM_ MIC_TOK header len %d: we only " | ||
| 255 | "support default int-alg (should be absent) " | ||
| 256 | "and do not support snd-seq\n", *mic_hdrlen); | ||
| 257 | goto out; | ||
| 258 | } | ||
| 259 | /* checksum */ | ||
| 260 | *cksum = (&ptr[10] + ctxelen); /* ctxelen includes ptr[10] */ | ||
| 261 | |||
| 262 | ret = GSS_S_COMPLETE; | ||
| 263 | out: | ||
| 264 | kfree(spkm3_ctx_id.data); | ||
| 265 | return ret; | ||
| 266 | } | ||
| 267 | |||
diff --git a/net/sunrpc/auth_gss/gss_spkm3_unseal.c b/net/sunrpc/auth_gss/gss_spkm3_unseal.c deleted file mode 100644 index cc21ee860bb6..000000000000 --- a/net/sunrpc/auth_gss/gss_spkm3_unseal.c +++ /dev/null | |||
| @@ -1,127 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * linux/net/sunrpc/gss_spkm3_unseal.c | ||
| 3 | * | ||
| 4 | * Copyright (c) 2003 The Regents of the University of Michigan. | ||
| 5 | * All rights reserved. | ||
| 6 | * | ||
| 7 | * Andy Adamson <andros@umich.edu> | ||
| 8 | * | ||
| 9 | * Redistribution and use in source and binary forms, with or without | ||
| 10 | * modification, are permitted provided that the following conditions | ||
| 11 | * are met: | ||
| 12 | * | ||
| 13 | * 1. Redistributions of source code must retain the above copyright | ||
| 14 | * notice, this list of conditions and the following disclaimer. | ||
| 15 | * 2. Redistributions in binary form must reproduce the above copyright | ||
| 16 | * notice, this list of conditions and the following disclaimer in the | ||
| 17 | * documentation and/or other materials provided with the distribution. | ||
| 18 | * 3. Neither the name of the University nor the names of its | ||
| 19 | * contributors may be used to endorse or promote products derived | ||
| 20 | * from this software without specific prior written permission. | ||
| 21 | * | ||
| 22 | * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED | ||
| 23 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF | ||
| 24 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | ||
| 25 | * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | ||
| 26 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
| 27 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
| 28 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | ||
| 29 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF | ||
| 30 | * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING | ||
| 31 | * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | ||
| 32 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
| 33 | * | ||
| 34 | */ | ||
| 35 | |||
| 36 | #include <linux/types.h> | ||
| 37 | #include <linux/slab.h> | ||
| 38 | #include <linux/jiffies.h> | ||
| 39 | #include <linux/sunrpc/gss_spkm3.h> | ||
| 40 | #include <linux/crypto.h> | ||
| 41 | |||
| 42 | #ifdef RPC_DEBUG | ||
| 43 | # define RPCDBG_FACILITY RPCDBG_AUTH | ||
| 44 | #endif | ||
| 45 | |||
| 46 | /* | ||
| 47 | * spkm3_read_token() | ||
| 48 | * | ||
| 49 | * only SPKM_MIC_TOK with md5 intg-alg is supported | ||
| 50 | */ | ||
| 51 | u32 | ||
| 52 | spkm3_read_token(struct spkm3_ctx *ctx, | ||
| 53 | struct xdr_netobj *read_token, /* checksum */ | ||
| 54 | struct xdr_buf *message_buffer, /* signbuf */ | ||
| 55 | int toktype) | ||
| 56 | { | ||
| 57 | s32 checksum_type; | ||
| 58 | s32 code; | ||
| 59 | struct xdr_netobj wire_cksum = {.len =0, .data = NULL}; | ||
| 60 | char cksumdata[16]; | ||
| 61 | struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata}; | ||
| 62 | unsigned char *ptr = (unsigned char *)read_token->data; | ||
| 63 | unsigned char *cksum; | ||
| 64 | int bodysize, md5elen; | ||
| 65 | int mic_hdrlen; | ||
| 66 | u32 ret = GSS_S_DEFECTIVE_TOKEN; | ||
| 67 | |||
| 68 | if (g_verify_token_header((struct xdr_netobj *) &ctx->mech_used, | ||
| 69 | &bodysize, &ptr, read_token->len)) | ||
| 70 | goto out; | ||
| 71 | |||
| 72 | /* decode the token */ | ||
| 73 | |||
| 74 | if (toktype != SPKM_MIC_TOK) { | ||
| 75 | dprintk("RPC: BAD SPKM3 token type: %d\n", toktype); | ||
| 76 | goto out; | ||
| 77 | } | ||
| 78 | |||
| 79 | if ((ret = spkm3_verify_mic_token(&ptr, &mic_hdrlen, &cksum))) | ||
| 80 | goto out; | ||
| 81 | |||
| 82 | if (*cksum++ != 0x03) { | ||
| 83 | dprintk("RPC: spkm3_read_token BAD checksum type\n"); | ||
| 84 | goto out; | ||
| 85 | } | ||
| 86 | md5elen = *cksum++; | ||
| 87 | cksum++; /* move past the zbit */ | ||
| 88 | |||
| 89 | if (!decode_asn1_bitstring(&wire_cksum, cksum, md5elen - 1, 16)) | ||
| 90 | goto out; | ||
| 91 | |||
| 92 | /* HARD CODED FOR MD5 */ | ||
| 93 | |||
| 94 | /* compute the checksum of the message. | ||
| 95 | * ptr + 2 = start of header piece of checksum | ||
| 96 | * mic_hdrlen + 2 = length of header piece of checksum | ||
| 97 | */ | ||
| 98 | ret = GSS_S_DEFECTIVE_TOKEN; | ||
| 99 | if (!g_OID_equal(&ctx->intg_alg, &hmac_md5_oid)) { | ||
| 100 | dprintk("RPC: gss_spkm3_seal: unsupported I-ALG " | ||
| 101 | "algorithm\n"); | ||
| 102 | goto out; | ||
| 103 | } | ||
| 104 | |||
| 105 | checksum_type = CKSUMTYPE_HMAC_MD5; | ||
| 106 | |||
| 107 | code = make_spkm3_checksum(checksum_type, | ||
| 108 | &ctx->derived_integ_key, ptr + 2, mic_hdrlen + 2, | ||
| 109 | message_buffer, 0, &md5cksum); | ||
| 110 | |||
| 111 | if (code) | ||
| 112 | goto out; | ||
| 113 | |||
| 114 | ret = GSS_S_BAD_SIG; | ||
| 115 | code = memcmp(md5cksum.data, wire_cksum.data, wire_cksum.len); | ||
| 116 | if (code) { | ||
| 117 | dprintk("RPC: bad MIC checksum\n"); | ||
| 118 | goto out; | ||
| 119 | } | ||
| 120 | |||
| 121 | |||
| 122 | /* XXX: need to add expiration and sequencing */ | ||
| 123 | ret = GSS_S_COMPLETE; | ||
| 124 | out: | ||
| 125 | kfree(wire_cksum.data); | ||
| 126 | return ret; | ||
| 127 | } | ||
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c index cc385b3a59c2..dec2a6fc7c12 100644 --- a/net/sunrpc/auth_gss/svcauth_gss.c +++ b/net/sunrpc/auth_gss/svcauth_gss.c | |||
| @@ -964,7 +964,7 @@ svcauth_gss_set_client(struct svc_rqst *rqstp) | |||
| 964 | if (rqstp->rq_gssclient == NULL) | 964 | if (rqstp->rq_gssclient == NULL) |
| 965 | return SVC_DENIED; | 965 | return SVC_DENIED; |
| 966 | stat = svcauth_unix_set_client(rqstp); | 966 | stat = svcauth_unix_set_client(rqstp); |
| 967 | if (stat == SVC_DROP) | 967 | if (stat == SVC_DROP || stat == SVC_CLOSE) |
| 968 | return stat; | 968 | return stat; |
| 969 | return SVC_OK; | 969 | return SVC_OK; |
| 970 | } | 970 | } |
| @@ -1018,7 +1018,7 @@ static int svcauth_gss_handle_init(struct svc_rqst *rqstp, | |||
| 1018 | return SVC_DENIED; | 1018 | return SVC_DENIED; |
| 1019 | memset(&rsikey, 0, sizeof(rsikey)); | 1019 | memset(&rsikey, 0, sizeof(rsikey)); |
| 1020 | if (dup_netobj(&rsikey.in_handle, &gc->gc_ctx)) | 1020 | if (dup_netobj(&rsikey.in_handle, &gc->gc_ctx)) |
| 1021 | return SVC_DROP; | 1021 | return SVC_CLOSE; |
| 1022 | *authp = rpc_autherr_badverf; | 1022 | *authp = rpc_autherr_badverf; |
| 1023 | if (svc_safe_getnetobj(argv, &tmpobj)) { | 1023 | if (svc_safe_getnetobj(argv, &tmpobj)) { |
| 1024 | kfree(rsikey.in_handle.data); | 1024 | kfree(rsikey.in_handle.data); |
| @@ -1026,38 +1026,35 @@ static int svcauth_gss_handle_init(struct svc_rqst *rqstp, | |||
| 1026 | } | 1026 | } |
| 1027 | if (dup_netobj(&rsikey.in_token, &tmpobj)) { | 1027 | if (dup_netobj(&rsikey.in_token, &tmpobj)) { |
| 1028 | kfree(rsikey.in_handle.data); | 1028 | kfree(rsikey.in_handle.data); |
| 1029 | return SVC_DROP; | 1029 | return SVC_CLOSE; |
| 1030 | } | 1030 | } |
| 1031 | 1031 | ||
| 1032 | /* Perform upcall, or find upcall result: */ | 1032 | /* Perform upcall, or find upcall result: */ |
| 1033 | rsip = rsi_lookup(&rsikey); | 1033 | rsip = rsi_lookup(&rsikey); |
| 1034 | rsi_free(&rsikey); | 1034 | rsi_free(&rsikey); |
| 1035 | if (!rsip) | 1035 | if (!rsip) |
| 1036 | return SVC_DROP; | 1036 | return SVC_CLOSE; |
| 1037 | switch (cache_check(&rsi_cache, &rsip->h, &rqstp->rq_chandle)) { | 1037 | if (cache_check(&rsi_cache, &rsip->h, &rqstp->rq_chandle) < 0) |
| 1038 | case -EAGAIN: | ||
| 1039 | case -ETIMEDOUT: | ||
| 1040 | case -ENOENT: | ||
| 1041 | /* No upcall result: */ | 1038 | /* No upcall result: */ |
| 1042 | return SVC_DROP; | 1039 | return SVC_CLOSE; |
| 1043 | case 0: | 1040 | |
| 1044 | ret = SVC_DROP; | 1041 | ret = SVC_CLOSE; |
| 1045 | /* Got an answer to the upcall; use it: */ | 1042 | /* Got an answer to the upcall; use it: */ |
| 1046 | if (gss_write_init_verf(rqstp, rsip)) | 1043 | if (gss_write_init_verf(rqstp, rsip)) |
| 1047 | goto out; | 1044 | goto out; |
| 1048 | if (resv->iov_len + 4 > PAGE_SIZE) | 1045 | if (resv->iov_len + 4 > PAGE_SIZE) |
| 1049 | goto out; | 1046 | goto out; |
| 1050 | svc_putnl(resv, RPC_SUCCESS); | 1047 | svc_putnl(resv, RPC_SUCCESS); |
| 1051 | if (svc_safe_putnetobj(resv, &rsip->out_handle)) | 1048 | if (svc_safe_putnetobj(resv, &rsip->out_handle)) |
| 1052 | goto out; | 1049 | goto out; |
| 1053 | if (resv->iov_len + 3 * 4 > PAGE_SIZE) | 1050 | if (resv->iov_len + 3 * 4 > PAGE_SIZE) |
| 1054 | goto out; | 1051 | goto out; |
| 1055 | svc_putnl(resv, rsip->major_status); | 1052 | svc_putnl(resv, rsip->major_status); |
| 1056 | svc_putnl(resv, rsip->minor_status); | 1053 | svc_putnl(resv, rsip->minor_status); |
| 1057 | svc_putnl(resv, GSS_SEQ_WIN); | 1054 | svc_putnl(resv, GSS_SEQ_WIN); |
| 1058 | if (svc_safe_putnetobj(resv, &rsip->out_token)) | 1055 | if (svc_safe_putnetobj(resv, &rsip->out_token)) |
| 1059 | goto out; | 1056 | goto out; |
| 1060 | } | 1057 | |
| 1061 | ret = SVC_COMPLETE; | 1058 | ret = SVC_COMPLETE; |
| 1062 | out: | 1059 | out: |
| 1063 | cache_put(&rsip->h, &rsi_cache); | 1060 | cache_put(&rsip->h, &rsi_cache); |
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c index 7dce81a926c5..e433e7580e27 100644 --- a/net/sunrpc/cache.c +++ b/net/sunrpc/cache.c | |||
| @@ -33,15 +33,16 @@ | |||
| 33 | #include <linux/sunrpc/cache.h> | 33 | #include <linux/sunrpc/cache.h> |
| 34 | #include <linux/sunrpc/stats.h> | 34 | #include <linux/sunrpc/stats.h> |
| 35 | #include <linux/sunrpc/rpc_pipe_fs.h> | 35 | #include <linux/sunrpc/rpc_pipe_fs.h> |
| 36 | #include "netns.h" | ||
| 36 | 37 | ||
| 37 | #define RPCDBG_FACILITY RPCDBG_CACHE | 38 | #define RPCDBG_FACILITY RPCDBG_CACHE |
| 38 | 39 | ||
| 39 | static int cache_defer_req(struct cache_req *req, struct cache_head *item); | 40 | static void cache_defer_req(struct cache_req *req, struct cache_head *item); |
| 40 | static void cache_revisit_request(struct cache_head *item); | 41 | static void cache_revisit_request(struct cache_head *item); |
| 41 | 42 | ||
| 42 | static void cache_init(struct cache_head *h) | 43 | static void cache_init(struct cache_head *h) |
| 43 | { | 44 | { |
| 44 | time_t now = get_seconds(); | 45 | time_t now = seconds_since_boot(); |
| 45 | h->next = NULL; | 46 | h->next = NULL; |
| 46 | h->flags = 0; | 47 | h->flags = 0; |
| 47 | kref_init(&h->ref); | 48 | kref_init(&h->ref); |
| @@ -51,7 +52,7 @@ static void cache_init(struct cache_head *h) | |||
| 51 | 52 | ||
| 52 | static inline int cache_is_expired(struct cache_detail *detail, struct cache_head *h) | 53 | static inline int cache_is_expired(struct cache_detail *detail, struct cache_head *h) |
| 53 | { | 54 | { |
| 54 | return (h->expiry_time < get_seconds()) || | 55 | return (h->expiry_time < seconds_since_boot()) || |
| 55 | (detail->flush_time > h->last_refresh); | 56 | (detail->flush_time > h->last_refresh); |
| 56 | } | 57 | } |
| 57 | 58 | ||
| @@ -126,7 +127,7 @@ static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch); | |||
| 126 | static void cache_fresh_locked(struct cache_head *head, time_t expiry) | 127 | static void cache_fresh_locked(struct cache_head *head, time_t expiry) |
| 127 | { | 128 | { |
| 128 | head->expiry_time = expiry; | 129 | head->expiry_time = expiry; |
| 129 | head->last_refresh = get_seconds(); | 130 | head->last_refresh = seconds_since_boot(); |
| 130 | set_bit(CACHE_VALID, &head->flags); | 131 | set_bit(CACHE_VALID, &head->flags); |
| 131 | } | 132 | } |
| 132 | 133 | ||
| @@ -237,7 +238,7 @@ int cache_check(struct cache_detail *detail, | |||
| 237 | 238 | ||
| 238 | /* now see if we want to start an upcall */ | 239 | /* now see if we want to start an upcall */ |
| 239 | refresh_age = (h->expiry_time - h->last_refresh); | 240 | refresh_age = (h->expiry_time - h->last_refresh); |
| 240 | age = get_seconds() - h->last_refresh; | 241 | age = seconds_since_boot() - h->last_refresh; |
| 241 | 242 | ||
| 242 | if (rqstp == NULL) { | 243 | if (rqstp == NULL) { |
| 243 | if (rv == -EAGAIN) | 244 | if (rv == -EAGAIN) |
| @@ -252,7 +253,7 @@ int cache_check(struct cache_detail *detail, | |||
| 252 | cache_revisit_request(h); | 253 | cache_revisit_request(h); |
| 253 | if (rv == -EAGAIN) { | 254 | if (rv == -EAGAIN) { |
| 254 | set_bit(CACHE_NEGATIVE, &h->flags); | 255 | set_bit(CACHE_NEGATIVE, &h->flags); |
| 255 | cache_fresh_locked(h, get_seconds()+CACHE_NEW_EXPIRY); | 256 | cache_fresh_locked(h, seconds_since_boot()+CACHE_NEW_EXPIRY); |
| 256 | cache_fresh_unlocked(h, detail); | 257 | cache_fresh_unlocked(h, detail); |
| 257 | rv = -ENOENT; | 258 | rv = -ENOENT; |
| 258 | } | 259 | } |
| @@ -267,7 +268,8 @@ int cache_check(struct cache_detail *detail, | |||
| 267 | } | 268 | } |
| 268 | 269 | ||
| 269 | if (rv == -EAGAIN) { | 270 | if (rv == -EAGAIN) { |
| 270 | if (cache_defer_req(rqstp, h) < 0) { | 271 | cache_defer_req(rqstp, h); |
| 272 | if (!test_bit(CACHE_PENDING, &h->flags)) { | ||
| 271 | /* Request is not deferred */ | 273 | /* Request is not deferred */ |
| 272 | rv = cache_is_valid(detail, h); | 274 | rv = cache_is_valid(detail, h); |
| 273 | if (rv == -EAGAIN) | 275 | if (rv == -EAGAIN) |
| @@ -387,11 +389,11 @@ static int cache_clean(void) | |||
| 387 | return -1; | 389 | return -1; |
| 388 | } | 390 | } |
| 389 | current_detail = list_entry(next, struct cache_detail, others); | 391 | current_detail = list_entry(next, struct cache_detail, others); |
| 390 | if (current_detail->nextcheck > get_seconds()) | 392 | if (current_detail->nextcheck > seconds_since_boot()) |
| 391 | current_index = current_detail->hash_size; | 393 | current_index = current_detail->hash_size; |
| 392 | else { | 394 | else { |
| 393 | current_index = 0; | 395 | current_index = 0; |
| 394 | current_detail->nextcheck = get_seconds()+30*60; | 396 | current_detail->nextcheck = seconds_since_boot()+30*60; |
| 395 | } | 397 | } |
| 396 | } | 398 | } |
| 397 | 399 | ||
| @@ -476,7 +478,7 @@ EXPORT_SYMBOL_GPL(cache_flush); | |||
| 476 | void cache_purge(struct cache_detail *detail) | 478 | void cache_purge(struct cache_detail *detail) |
| 477 | { | 479 | { |
| 478 | detail->flush_time = LONG_MAX; | 480 | detail->flush_time = LONG_MAX; |
| 479 | detail->nextcheck = get_seconds(); | 481 | detail->nextcheck = seconds_since_boot(); |
| 480 | cache_flush(); | 482 | cache_flush(); |
| 481 | detail->flush_time = 1; | 483 | detail->flush_time = 1; |
| 482 | } | 484 | } |
| @@ -505,81 +507,155 @@ EXPORT_SYMBOL_GPL(cache_purge); | |||
| 505 | 507 | ||
| 506 | static DEFINE_SPINLOCK(cache_defer_lock); | 508 | static DEFINE_SPINLOCK(cache_defer_lock); |
| 507 | static LIST_HEAD(cache_defer_list); | 509 | static LIST_HEAD(cache_defer_list); |
| 508 | static struct list_head cache_defer_hash[DFR_HASHSIZE]; | 510 | static struct hlist_head cache_defer_hash[DFR_HASHSIZE]; |
| 509 | static int cache_defer_cnt; | 511 | static int cache_defer_cnt; |
| 510 | 512 | ||
| 511 | static int cache_defer_req(struct cache_req *req, struct cache_head *item) | 513 | static void __unhash_deferred_req(struct cache_deferred_req *dreq) |
| 514 | { | ||
| 515 | hlist_del_init(&dreq->hash); | ||
| 516 | if (!list_empty(&dreq->recent)) { | ||
| 517 | list_del_init(&dreq->recent); | ||
| 518 | cache_defer_cnt--; | ||
| 519 | } | ||
| 520 | } | ||
| 521 | |||
| 522 | static void __hash_deferred_req(struct cache_deferred_req *dreq, struct cache_head *item) | ||
| 512 | { | 523 | { |
| 513 | struct cache_deferred_req *dreq, *discard; | ||
| 514 | int hash = DFR_HASH(item); | 524 | int hash = DFR_HASH(item); |
| 515 | 525 | ||
| 516 | if (cache_defer_cnt >= DFR_MAX) { | 526 | INIT_LIST_HEAD(&dreq->recent); |
| 517 | /* too much in the cache, randomly drop this one, | 527 | hlist_add_head(&dreq->hash, &cache_defer_hash[hash]); |
| 518 | * or continue and drop the oldest below | 528 | } |
| 519 | */ | 529 | |
| 520 | if (net_random()&1) | 530 | static void setup_deferral(struct cache_deferred_req *dreq, |
| 521 | return -ENOMEM; | 531 | struct cache_head *item, |
| 522 | } | 532 | int count_me) |
| 523 | dreq = req->defer(req); | 533 | { |
| 524 | if (dreq == NULL) | ||
| 525 | return -ENOMEM; | ||
| 526 | 534 | ||
| 527 | dreq->item = item; | 535 | dreq->item = item; |
| 528 | 536 | ||
| 529 | spin_lock(&cache_defer_lock); | 537 | spin_lock(&cache_defer_lock); |
| 530 | 538 | ||
| 531 | list_add(&dreq->recent, &cache_defer_list); | 539 | __hash_deferred_req(dreq, item); |
| 532 | |||
| 533 | if (cache_defer_hash[hash].next == NULL) | ||
| 534 | INIT_LIST_HEAD(&cache_defer_hash[hash]); | ||
| 535 | list_add(&dreq->hash, &cache_defer_hash[hash]); | ||
| 536 | 540 | ||
| 537 | /* it is in, now maybe clean up */ | 541 | if (count_me) { |
| 538 | discard = NULL; | 542 | cache_defer_cnt++; |
| 539 | if (++cache_defer_cnt > DFR_MAX) { | 543 | list_add(&dreq->recent, &cache_defer_list); |
| 540 | discard = list_entry(cache_defer_list.prev, | ||
| 541 | struct cache_deferred_req, recent); | ||
| 542 | list_del_init(&discard->recent); | ||
| 543 | list_del_init(&discard->hash); | ||
| 544 | cache_defer_cnt--; | ||
| 545 | } | 544 | } |
| 545 | |||
| 546 | spin_unlock(&cache_defer_lock); | 546 | spin_unlock(&cache_defer_lock); |
| 547 | 547 | ||
| 548 | } | ||
| 549 | |||
| 550 | struct thread_deferred_req { | ||
| 551 | struct cache_deferred_req handle; | ||
| 552 | struct completion completion; | ||
| 553 | }; | ||
| 554 | |||
| 555 | static void cache_restart_thread(struct cache_deferred_req *dreq, int too_many) | ||
| 556 | { | ||
| 557 | struct thread_deferred_req *dr = | ||
| 558 | container_of(dreq, struct thread_deferred_req, handle); | ||
| 559 | complete(&dr->completion); | ||
| 560 | } | ||
| 561 | |||
| 562 | static void cache_wait_req(struct cache_req *req, struct cache_head *item) | ||
| 563 | { | ||
| 564 | struct thread_deferred_req sleeper; | ||
| 565 | struct cache_deferred_req *dreq = &sleeper.handle; | ||
| 566 | |||
| 567 | sleeper.completion = COMPLETION_INITIALIZER_ONSTACK(sleeper.completion); | ||
| 568 | dreq->revisit = cache_restart_thread; | ||
| 569 | |||
| 570 | setup_deferral(dreq, item, 0); | ||
| 571 | |||
| 572 | if (!test_bit(CACHE_PENDING, &item->flags) || | ||
| 573 | wait_for_completion_interruptible_timeout( | ||
| 574 | &sleeper.completion, req->thread_wait) <= 0) { | ||
| 575 | /* The completion wasn't completed, so we need | ||
| 576 | * to clean up | ||
| 577 | */ | ||
| 578 | spin_lock(&cache_defer_lock); | ||
| 579 | if (!hlist_unhashed(&sleeper.handle.hash)) { | ||
| 580 | __unhash_deferred_req(&sleeper.handle); | ||
| 581 | spin_unlock(&cache_defer_lock); | ||
| 582 | } else { | ||
| 583 | /* cache_revisit_request already removed | ||
| 584 | * this from the hash table, but hasn't | ||
| 585 | * called ->revisit yet. It will very soon | ||
| 586 | * and we need to wait for it. | ||
| 587 | */ | ||
| 588 | spin_unlock(&cache_defer_lock); | ||
| 589 | wait_for_completion(&sleeper.completion); | ||
| 590 | } | ||
| 591 | } | ||
| 592 | } | ||
| 593 | |||
| 594 | static void cache_limit_defers(void) | ||
| 595 | { | ||
| 596 | /* Make sure we haven't exceed the limit of allowed deferred | ||
| 597 | * requests. | ||
| 598 | */ | ||
| 599 | struct cache_deferred_req *discard = NULL; | ||
| 600 | |||
| 601 | if (cache_defer_cnt <= DFR_MAX) | ||
| 602 | return; | ||
| 603 | |||
| 604 | spin_lock(&cache_defer_lock); | ||
| 605 | |||
| 606 | /* Consider removing either the first or the last */ | ||
| 607 | if (cache_defer_cnt > DFR_MAX) { | ||
| 608 | if (net_random() & 1) | ||
| 609 | discard = list_entry(cache_defer_list.next, | ||
| 610 | struct cache_deferred_req, recent); | ||
| 611 | else | ||
| 612 | discard = list_entry(cache_defer_list.prev, | ||
| 613 | struct cache_deferred_req, recent); | ||
| 614 | __unhash_deferred_req(discard); | ||
| 615 | } | ||
| 616 | spin_unlock(&cache_defer_lock); | ||
| 548 | if (discard) | 617 | if (discard) |
| 549 | /* there was one too many */ | ||
| 550 | discard->revisit(discard, 1); | 618 | discard->revisit(discard, 1); |
| 619 | } | ||
| 551 | 620 | ||
| 552 | if (!test_bit(CACHE_PENDING, &item->flags)) { | 621 | static void cache_defer_req(struct cache_req *req, struct cache_head *item) |
| 553 | /* must have just been validated... */ | 622 | { |
| 554 | cache_revisit_request(item); | 623 | struct cache_deferred_req *dreq; |
| 555 | return -EAGAIN; | 624 | |
| 625 | if (req->thread_wait) { | ||
| 626 | cache_wait_req(req, item); | ||
| 627 | if (!test_bit(CACHE_PENDING, &item->flags)) | ||
| 628 | return; | ||
| 556 | } | 629 | } |
| 557 | return 0; | 630 | dreq = req->defer(req); |
| 631 | if (dreq == NULL) | ||
| 632 | return; | ||
| 633 | setup_deferral(dreq, item, 1); | ||
| 634 | if (!test_bit(CACHE_PENDING, &item->flags)) | ||
| 635 | /* Bit could have been cleared before we managed to | ||
| 636 | * set up the deferral, so need to revisit just in case | ||
| 637 | */ | ||
| 638 | cache_revisit_request(item); | ||
| 639 | |||
| 640 | cache_limit_defers(); | ||
| 558 | } | 641 | } |
| 559 | 642 | ||
| 560 | static void cache_revisit_request(struct cache_head *item) | 643 | static void cache_revisit_request(struct cache_head *item) |
| 561 | { | 644 | { |
| 562 | struct cache_deferred_req *dreq; | 645 | struct cache_deferred_req *dreq; |
| 563 | struct list_head pending; | 646 | struct list_head pending; |
| 564 | 647 | struct hlist_node *lp, *tmp; | |
| 565 | struct list_head *lp; | ||
| 566 | int hash = DFR_HASH(item); | 648 | int hash = DFR_HASH(item); |
| 567 | 649 | ||
| 568 | INIT_LIST_HEAD(&pending); | 650 | INIT_LIST_HEAD(&pending); |
| 569 | spin_lock(&cache_defer_lock); | 651 | spin_lock(&cache_defer_lock); |
| 570 | 652 | ||
| 571 | lp = cache_defer_hash[hash].next; | 653 | hlist_for_each_entry_safe(dreq, lp, tmp, &cache_defer_hash[hash], hash) |
| 572 | if (lp) { | 654 | if (dreq->item == item) { |
| 573 | while (lp != &cache_defer_hash[hash]) { | 655 | __unhash_deferred_req(dreq); |
| 574 | dreq = list_entry(lp, struct cache_deferred_req, hash); | 656 | list_add(&dreq->recent, &pending); |
| 575 | lp = lp->next; | ||
| 576 | if (dreq->item == item) { | ||
| 577 | list_del_init(&dreq->hash); | ||
| 578 | list_move(&dreq->recent, &pending); | ||
| 579 | cache_defer_cnt--; | ||
| 580 | } | ||
| 581 | } | 657 | } |
| 582 | } | 658 | |
| 583 | spin_unlock(&cache_defer_lock); | 659 | spin_unlock(&cache_defer_lock); |
| 584 | 660 | ||
| 585 | while (!list_empty(&pending)) { | 661 | while (!list_empty(&pending)) { |
| @@ -600,9 +676,8 @@ void cache_clean_deferred(void *owner) | |||
| 600 | 676 | ||
| 601 | list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) { | 677 | list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) { |
| 602 | if (dreq->owner == owner) { | 678 | if (dreq->owner == owner) { |
| 603 | list_del_init(&dreq->hash); | 679 | __unhash_deferred_req(dreq); |
| 604 | list_move(&dreq->recent, &pending); | 680 | list_add(&dreq->recent, &pending); |
| 605 | cache_defer_cnt--; | ||
| 606 | } | 681 | } |
| 607 | } | 682 | } |
| 608 | spin_unlock(&cache_defer_lock); | 683 | spin_unlock(&cache_defer_lock); |
| @@ -901,7 +976,7 @@ static int cache_release(struct inode *inode, struct file *filp, | |||
| 901 | filp->private_data = NULL; | 976 | filp->private_data = NULL; |
| 902 | kfree(rp); | 977 | kfree(rp); |
| 903 | 978 | ||
| 904 | cd->last_close = get_seconds(); | 979 | cd->last_close = seconds_since_boot(); |
| 905 | atomic_dec(&cd->readers); | 980 | atomic_dec(&cd->readers); |
| 906 | } | 981 | } |
| 907 | module_put(cd->owner); | 982 | module_put(cd->owner); |
| @@ -1014,6 +1089,23 @@ static void warn_no_listener(struct cache_detail *detail) | |||
| 1014 | } | 1089 | } |
| 1015 | } | 1090 | } |
| 1016 | 1091 | ||
| 1092 | static bool cache_listeners_exist(struct cache_detail *detail) | ||
| 1093 | { | ||
| 1094 | if (atomic_read(&detail->readers)) | ||
| 1095 | return true; | ||
| 1096 | if (detail->last_close == 0) | ||
| 1097 | /* This cache was never opened */ | ||
| 1098 | return false; | ||
| 1099 | if (detail->last_close < seconds_since_boot() - 30) | ||
| 1100 | /* | ||
| 1101 | * We allow for the possibility that someone might | ||
| 1102 | * restart a userspace daemon without restarting the | ||
| 1103 | * server; but after 30 seconds, we give up. | ||
| 1104 | */ | ||
| 1105 | return false; | ||
| 1106 | return true; | ||
| 1107 | } | ||
| 1108 | |||
| 1017 | /* | 1109 | /* |
| 1018 | * register an upcall request to user-space and queue it up for read() by the | 1110 | * register an upcall request to user-space and queue it up for read() by the |
| 1019 | * upcall daemon. | 1111 | * upcall daemon. |
| @@ -1032,10 +1124,9 @@ int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h, | |||
| 1032 | char *bp; | 1124 | char *bp; |
| 1033 | int len; | 1125 | int len; |
| 1034 | 1126 | ||
| 1035 | if (atomic_read(&detail->readers) == 0 && | 1127 | if (!cache_listeners_exist(detail)) { |
| 1036 | detail->last_close < get_seconds() - 30) { | 1128 | warn_no_listener(detail); |
| 1037 | warn_no_listener(detail); | 1129 | return -EINVAL; |
| 1038 | return -EINVAL; | ||
| 1039 | } | 1130 | } |
| 1040 | 1131 | ||
| 1041 | buf = kmalloc(PAGE_SIZE, GFP_KERNEL); | 1132 | buf = kmalloc(PAGE_SIZE, GFP_KERNEL); |
| @@ -1094,13 +1185,19 @@ int qword_get(char **bpp, char *dest, int bufsize) | |||
| 1094 | if (bp[0] == '\\' && bp[1] == 'x') { | 1185 | if (bp[0] == '\\' && bp[1] == 'x') { |
| 1095 | /* HEX STRING */ | 1186 | /* HEX STRING */ |
| 1096 | bp += 2; | 1187 | bp += 2; |
| 1097 | while (isxdigit(bp[0]) && isxdigit(bp[1]) && len < bufsize) { | 1188 | while (len < bufsize) { |
| 1098 | int byte = isdigit(*bp) ? *bp-'0' : toupper(*bp)-'A'+10; | 1189 | int h, l; |
| 1099 | bp++; | 1190 | |
| 1100 | byte <<= 4; | 1191 | h = hex_to_bin(bp[0]); |
| 1101 | byte |= isdigit(*bp) ? *bp-'0' : toupper(*bp)-'A'+10; | 1192 | if (h < 0) |
| 1102 | *dest++ = byte; | 1193 | break; |
| 1103 | bp++; | 1194 | |
| 1195 | l = hex_to_bin(bp[1]); | ||
| 1196 | if (l < 0) | ||
| 1197 | break; | ||
| 1198 | |||
| 1199 | *dest++ = (h << 4) | l; | ||
| 1200 | bp += 2; | ||
| 1104 | len++; | 1201 | len++; |
| 1105 | } | 1202 | } |
| 1106 | } else { | 1203 | } else { |
| @@ -1218,7 +1315,8 @@ static int c_show(struct seq_file *m, void *p) | |||
| 1218 | 1315 | ||
| 1219 | ifdebug(CACHE) | 1316 | ifdebug(CACHE) |
| 1220 | seq_printf(m, "# expiry=%ld refcnt=%d flags=%lx\n", | 1317 | seq_printf(m, "# expiry=%ld refcnt=%d flags=%lx\n", |
| 1221 | cp->expiry_time, atomic_read(&cp->ref.refcount), cp->flags); | 1318 | convert_to_wallclock(cp->expiry_time), |
| 1319 | atomic_read(&cp->ref.refcount), cp->flags); | ||
| 1222 | cache_get(cp); | 1320 | cache_get(cp); |
| 1223 | if (cache_check(cd, cp, NULL)) | 1321 | if (cache_check(cd, cp, NULL)) |
| 1224 | /* cache_check does a cache_put on failure */ | 1322 | /* cache_check does a cache_put on failure */ |
| @@ -1284,7 +1382,7 @@ static ssize_t read_flush(struct file *file, char __user *buf, | |||
| 1284 | unsigned long p = *ppos; | 1382 | unsigned long p = *ppos; |
| 1285 | size_t len; | 1383 | size_t len; |
| 1286 | 1384 | ||
| 1287 | sprintf(tbuf, "%lu\n", cd->flush_time); | 1385 | sprintf(tbuf, "%lu\n", convert_to_wallclock(cd->flush_time)); |
| 1288 | len = strlen(tbuf); | 1386 | len = strlen(tbuf); |
| 1289 | if (p >= len) | 1387 | if (p >= len) |
| 1290 | return 0; | 1388 | return 0; |
| @@ -1302,19 +1400,20 @@ static ssize_t write_flush(struct file *file, const char __user *buf, | |||
| 1302 | struct cache_detail *cd) | 1400 | struct cache_detail *cd) |
| 1303 | { | 1401 | { |
| 1304 | char tbuf[20]; | 1402 | char tbuf[20]; |
| 1305 | char *ep; | 1403 | char *bp, *ep; |
| 1306 | long flushtime; | 1404 | |
| 1307 | if (*ppos || count > sizeof(tbuf)-1) | 1405 | if (*ppos || count > sizeof(tbuf)-1) |
| 1308 | return -EINVAL; | 1406 | return -EINVAL; |
| 1309 | if (copy_from_user(tbuf, buf, count)) | 1407 | if (copy_from_user(tbuf, buf, count)) |
| 1310 | return -EFAULT; | 1408 | return -EFAULT; |
| 1311 | tbuf[count] = 0; | 1409 | tbuf[count] = 0; |
| 1312 | flushtime = simple_strtoul(tbuf, &ep, 0); | 1410 | simple_strtoul(tbuf, &ep, 0); |
| 1313 | if (*ep && *ep != '\n') | 1411 | if (*ep && *ep != '\n') |
| 1314 | return -EINVAL; | 1412 | return -EINVAL; |
| 1315 | 1413 | ||
| 1316 | cd->flush_time = flushtime; | 1414 | bp = tbuf; |
| 1317 | cd->nextcheck = get_seconds(); | 1415 | cd->flush_time = get_expiry(&bp); |
| 1416 | cd->nextcheck = seconds_since_boot(); | ||
| 1318 | cache_flush(); | 1417 | cache_flush(); |
| 1319 | 1418 | ||
| 1320 | *ppos += count; | 1419 | *ppos += count; |
| @@ -1438,8 +1537,10 @@ static const struct file_operations cache_flush_operations_procfs = { | |||
| 1438 | .llseek = no_llseek, | 1537 | .llseek = no_llseek, |
| 1439 | }; | 1538 | }; |
| 1440 | 1539 | ||
| 1441 | static void remove_cache_proc_entries(struct cache_detail *cd) | 1540 | static void remove_cache_proc_entries(struct cache_detail *cd, struct net *net) |
| 1442 | { | 1541 | { |
| 1542 | struct sunrpc_net *sn; | ||
| 1543 | |||
| 1443 | if (cd->u.procfs.proc_ent == NULL) | 1544 | if (cd->u.procfs.proc_ent == NULL) |
| 1444 | return; | 1545 | return; |
| 1445 | if (cd->u.procfs.flush_ent) | 1546 | if (cd->u.procfs.flush_ent) |
| @@ -1449,15 +1550,18 @@ static void remove_cache_proc_entries(struct cache_detail *cd) | |||
| 1449 | if (cd->u.procfs.content_ent) | 1550 | if (cd->u.procfs.content_ent) |
| 1450 | remove_proc_entry("content", cd->u.procfs.proc_ent); | 1551 | remove_proc_entry("content", cd->u.procfs.proc_ent); |
| 1451 | cd->u.procfs.proc_ent = NULL; | 1552 | cd->u.procfs.proc_ent = NULL; |
| 1452 | remove_proc_entry(cd->name, proc_net_rpc); | 1553 | sn = net_generic(net, sunrpc_net_id); |
| 1554 | remove_proc_entry(cd->name, sn->proc_net_rpc); | ||
| 1453 | } | 1555 | } |
| 1454 | 1556 | ||
| 1455 | #ifdef CONFIG_PROC_FS | 1557 | #ifdef CONFIG_PROC_FS |
| 1456 | static int create_cache_proc_entries(struct cache_detail *cd) | 1558 | static int create_cache_proc_entries(struct cache_detail *cd, struct net *net) |
| 1457 | { | 1559 | { |
| 1458 | struct proc_dir_entry *p; | 1560 | struct proc_dir_entry *p; |
| 1561 | struct sunrpc_net *sn; | ||
| 1459 | 1562 | ||
| 1460 | cd->u.procfs.proc_ent = proc_mkdir(cd->name, proc_net_rpc); | 1563 | sn = net_generic(net, sunrpc_net_id); |
| 1564 | cd->u.procfs.proc_ent = proc_mkdir(cd->name, sn->proc_net_rpc); | ||
| 1461 | if (cd->u.procfs.proc_ent == NULL) | 1565 | if (cd->u.procfs.proc_ent == NULL) |
| 1462 | goto out_nomem; | 1566 | goto out_nomem; |
| 1463 | cd->u.procfs.channel_ent = NULL; | 1567 | cd->u.procfs.channel_ent = NULL; |
| @@ -1488,11 +1592,11 @@ static int create_cache_proc_entries(struct cache_detail *cd) | |||
| 1488 | } | 1592 | } |
| 1489 | return 0; | 1593 | return 0; |
| 1490 | out_nomem: | 1594 | out_nomem: |
| 1491 | remove_cache_proc_entries(cd); | 1595 | remove_cache_proc_entries(cd, net); |
| 1492 | return -ENOMEM; | 1596 | return -ENOMEM; |
| 1493 | } | 1597 | } |
| 1494 | #else /* CONFIG_PROC_FS */ | 1598 | #else /* CONFIG_PROC_FS */ |
| 1495 | static int create_cache_proc_entries(struct cache_detail *cd) | 1599 | static int create_cache_proc_entries(struct cache_detail *cd, struct net *net) |
| 1496 | { | 1600 | { |
| 1497 | return 0; | 1601 | return 0; |
| 1498 | } | 1602 | } |
| @@ -1503,23 +1607,33 @@ void __init cache_initialize(void) | |||
| 1503 | INIT_DELAYED_WORK_DEFERRABLE(&cache_cleaner, do_cache_clean); | 1607 | INIT_DELAYED_WORK_DEFERRABLE(&cache_cleaner, do_cache_clean); |
| 1504 | } | 1608 | } |
| 1505 | 1609 | ||
| 1506 | int cache_register(struct cache_detail *cd) | 1610 | int cache_register_net(struct cache_detail *cd, struct net *net) |
| 1507 | { | 1611 | { |
| 1508 | int ret; | 1612 | int ret; |
| 1509 | 1613 | ||
| 1510 | sunrpc_init_cache_detail(cd); | 1614 | sunrpc_init_cache_detail(cd); |
| 1511 | ret = create_cache_proc_entries(cd); | 1615 | ret = create_cache_proc_entries(cd, net); |
| 1512 | if (ret) | 1616 | if (ret) |
| 1513 | sunrpc_destroy_cache_detail(cd); | 1617 | sunrpc_destroy_cache_detail(cd); |
| 1514 | return ret; | 1618 | return ret; |
| 1515 | } | 1619 | } |
| 1620 | |||
| 1621 | int cache_register(struct cache_detail *cd) | ||
| 1622 | { | ||
| 1623 | return cache_register_net(cd, &init_net); | ||
| 1624 | } | ||
| 1516 | EXPORT_SYMBOL_GPL(cache_register); | 1625 | EXPORT_SYMBOL_GPL(cache_register); |
| 1517 | 1626 | ||
| 1518 | void cache_unregister(struct cache_detail *cd) | 1627 | void cache_unregister_net(struct cache_detail *cd, struct net *net) |
| 1519 | { | 1628 | { |
| 1520 | remove_cache_proc_entries(cd); | 1629 | remove_cache_proc_entries(cd, net); |
| 1521 | sunrpc_destroy_cache_detail(cd); | 1630 | sunrpc_destroy_cache_detail(cd); |
| 1522 | } | 1631 | } |
| 1632 | |||
| 1633 | void cache_unregister(struct cache_detail *cd) | ||
| 1634 | { | ||
| 1635 | cache_unregister_net(cd, &init_net); | ||
| 1636 | } | ||
| 1523 | EXPORT_SYMBOL_GPL(cache_unregister); | 1637 | EXPORT_SYMBOL_GPL(cache_unregister); |
| 1524 | 1638 | ||
| 1525 | static ssize_t cache_read_pipefs(struct file *filp, char __user *buf, | 1639 | static ssize_t cache_read_pipefs(struct file *filp, char __user *buf, |
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index cbc5b8ccc8be..9dab9573be41 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c | |||
| @@ -284,6 +284,7 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args) | |||
| 284 | struct rpc_xprt *xprt; | 284 | struct rpc_xprt *xprt; |
| 285 | struct rpc_clnt *clnt; | 285 | struct rpc_clnt *clnt; |
| 286 | struct xprt_create xprtargs = { | 286 | struct xprt_create xprtargs = { |
| 287 | .net = args->net, | ||
| 287 | .ident = args->protocol, | 288 | .ident = args->protocol, |
| 288 | .srcaddr = args->saddress, | 289 | .srcaddr = args->saddress, |
| 289 | .dstaddr = args->address, | 290 | .dstaddr = args->address, |
diff --git a/net/sunrpc/netns.h b/net/sunrpc/netns.h new file mode 100644 index 000000000000..d013bf211cae --- /dev/null +++ b/net/sunrpc/netns.h | |||
| @@ -0,0 +1,19 @@ | |||
| 1 | #ifndef __SUNRPC_NETNS_H__ | ||
| 2 | #define __SUNRPC_NETNS_H__ | ||
| 3 | |||
| 4 | #include <net/net_namespace.h> | ||
| 5 | #include <net/netns/generic.h> | ||
| 6 | |||
| 7 | struct cache_detail; | ||
| 8 | |||
| 9 | struct sunrpc_net { | ||
| 10 | struct proc_dir_entry *proc_net_rpc; | ||
| 11 | struct cache_detail *ip_map_cache; | ||
| 12 | }; | ||
| 13 | |||
| 14 | extern int sunrpc_net_id; | ||
| 15 | |||
| 16 | int ip_map_cache_create(struct net *); | ||
| 17 | void ip_map_cache_destroy(struct net *); | ||
| 18 | |||
| 19 | #endif | ||
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c index 63ec116b4dd4..fa6d7ca2c851 100644 --- a/net/sunrpc/rpcb_clnt.c +++ b/net/sunrpc/rpcb_clnt.c | |||
| @@ -177,6 +177,7 @@ static DEFINE_MUTEX(rpcb_create_local_mutex); | |||
| 177 | static int rpcb_create_local(void) | 177 | static int rpcb_create_local(void) |
| 178 | { | 178 | { |
| 179 | struct rpc_create_args args = { | 179 | struct rpc_create_args args = { |
| 180 | .net = &init_net, | ||
| 180 | .protocol = XPRT_TRANSPORT_TCP, | 181 | .protocol = XPRT_TRANSPORT_TCP, |
| 181 | .address = (struct sockaddr *)&rpcb_inaddr_loopback, | 182 | .address = (struct sockaddr *)&rpcb_inaddr_loopback, |
| 182 | .addrsize = sizeof(rpcb_inaddr_loopback), | 183 | .addrsize = sizeof(rpcb_inaddr_loopback), |
| @@ -229,6 +230,7 @@ static struct rpc_clnt *rpcb_create(char *hostname, struct sockaddr *srvaddr, | |||
| 229 | size_t salen, int proto, u32 version) | 230 | size_t salen, int proto, u32 version) |
| 230 | { | 231 | { |
| 231 | struct rpc_create_args args = { | 232 | struct rpc_create_args args = { |
| 233 | .net = &init_net, | ||
| 232 | .protocol = proto, | 234 | .protocol = proto, |
| 233 | .address = srvaddr, | 235 | .address = srvaddr, |
| 234 | .addrsize = salen, | 236 | .addrsize = salen, |
| @@ -248,7 +250,7 @@ static struct rpc_clnt *rpcb_create(char *hostname, struct sockaddr *srvaddr, | |||
| 248 | ((struct sockaddr_in6 *)srvaddr)->sin6_port = htons(RPCBIND_PORT); | 250 | ((struct sockaddr_in6 *)srvaddr)->sin6_port = htons(RPCBIND_PORT); |
| 249 | break; | 251 | break; |
| 250 | default: | 252 | default: |
| 251 | return NULL; | 253 | return ERR_PTR(-EAFNOSUPPORT); |
| 252 | } | 254 | } |
| 253 | 255 | ||
| 254 | return rpc_create(&args); | 256 | return rpc_create(&args); |
diff --git a/net/sunrpc/stats.c b/net/sunrpc/stats.c index ea1046f3f9a3..f71a73107ae9 100644 --- a/net/sunrpc/stats.c +++ b/net/sunrpc/stats.c | |||
| @@ -22,11 +22,10 @@ | |||
| 22 | #include <linux/sunrpc/clnt.h> | 22 | #include <linux/sunrpc/clnt.h> |
| 23 | #include <linux/sunrpc/svcsock.h> | 23 | #include <linux/sunrpc/svcsock.h> |
| 24 | #include <linux/sunrpc/metrics.h> | 24 | #include <linux/sunrpc/metrics.h> |
| 25 | #include <net/net_namespace.h> | ||
| 26 | 25 | ||
| 27 | #define RPCDBG_FACILITY RPCDBG_MISC | 26 | #include "netns.h" |
| 28 | 27 | ||
| 29 | struct proc_dir_entry *proc_net_rpc = NULL; | 28 | #define RPCDBG_FACILITY RPCDBG_MISC |
| 30 | 29 | ||
| 31 | /* | 30 | /* |
| 32 | * Get RPC client stats | 31 | * Get RPC client stats |
| @@ -218,10 +217,11 @@ EXPORT_SYMBOL_GPL(rpc_print_iostats); | |||
| 218 | static inline struct proc_dir_entry * | 217 | static inline struct proc_dir_entry * |
| 219 | do_register(const char *name, void *data, const struct file_operations *fops) | 218 | do_register(const char *name, void *data, const struct file_operations *fops) |
| 220 | { | 219 | { |
| 221 | rpc_proc_init(); | 220 | struct sunrpc_net *sn; |
| 222 | dprintk("RPC: registering /proc/net/rpc/%s\n", name); | ||
| 223 | 221 | ||
| 224 | return proc_create_data(name, 0, proc_net_rpc, fops, data); | 222 | dprintk("RPC: registering /proc/net/rpc/%s\n", name); |
| 223 | sn = net_generic(&init_net, sunrpc_net_id); | ||
| 224 | return proc_create_data(name, 0, sn->proc_net_rpc, fops, data); | ||
| 225 | } | 225 | } |
| 226 | 226 | ||
| 227 | struct proc_dir_entry * | 227 | struct proc_dir_entry * |
| @@ -234,7 +234,10 @@ EXPORT_SYMBOL_GPL(rpc_proc_register); | |||
| 234 | void | 234 | void |
| 235 | rpc_proc_unregister(const char *name) | 235 | rpc_proc_unregister(const char *name) |
| 236 | { | 236 | { |
| 237 | remove_proc_entry(name, proc_net_rpc); | 237 | struct sunrpc_net *sn; |
| 238 | |||
| 239 | sn = net_generic(&init_net, sunrpc_net_id); | ||
| 240 | remove_proc_entry(name, sn->proc_net_rpc); | ||
| 238 | } | 241 | } |
| 239 | EXPORT_SYMBOL_GPL(rpc_proc_unregister); | 242 | EXPORT_SYMBOL_GPL(rpc_proc_unregister); |
| 240 | 243 | ||
| @@ -248,25 +251,29 @@ EXPORT_SYMBOL_GPL(svc_proc_register); | |||
| 248 | void | 251 | void |
| 249 | svc_proc_unregister(const char *name) | 252 | svc_proc_unregister(const char *name) |
| 250 | { | 253 | { |
| 251 | remove_proc_entry(name, proc_net_rpc); | 254 | struct sunrpc_net *sn; |
| 255 | |||
| 256 | sn = net_generic(&init_net, sunrpc_net_id); | ||
| 257 | remove_proc_entry(name, sn->proc_net_rpc); | ||
| 252 | } | 258 | } |
| 253 | EXPORT_SYMBOL_GPL(svc_proc_unregister); | 259 | EXPORT_SYMBOL_GPL(svc_proc_unregister); |
| 254 | 260 | ||
| 255 | void | 261 | int rpc_proc_init(struct net *net) |
| 256 | rpc_proc_init(void) | ||
| 257 | { | 262 | { |
| 263 | struct sunrpc_net *sn; | ||
| 264 | |||
| 258 | dprintk("RPC: registering /proc/net/rpc\n"); | 265 | dprintk("RPC: registering /proc/net/rpc\n"); |
| 259 | if (!proc_net_rpc) | 266 | sn = net_generic(net, sunrpc_net_id); |
| 260 | proc_net_rpc = proc_mkdir("rpc", init_net.proc_net); | 267 | sn->proc_net_rpc = proc_mkdir("rpc", net->proc_net); |
| 268 | if (sn->proc_net_rpc == NULL) | ||
| 269 | return -ENOMEM; | ||
| 270 | |||
| 271 | return 0; | ||
| 261 | } | 272 | } |
| 262 | 273 | ||
| 263 | void | 274 | void rpc_proc_exit(struct net *net) |
| 264 | rpc_proc_exit(void) | ||
| 265 | { | 275 | { |
| 266 | dprintk("RPC: unregistering /proc/net/rpc\n"); | 276 | dprintk("RPC: unregistering /proc/net/rpc\n"); |
| 267 | if (proc_net_rpc) { | 277 | remove_proc_entry("rpc", net->proc_net); |
| 268 | proc_net_rpc = NULL; | ||
| 269 | remove_proc_entry("rpc", init_net.proc_net); | ||
| 270 | } | ||
| 271 | } | 278 | } |
| 272 | 279 | ||
diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c index c0d085013a2b..9d0809160994 100644 --- a/net/sunrpc/sunrpc_syms.c +++ b/net/sunrpc/sunrpc_syms.c | |||
| @@ -22,7 +22,44 @@ | |||
| 22 | #include <linux/sunrpc/rpc_pipe_fs.h> | 22 | #include <linux/sunrpc/rpc_pipe_fs.h> |
| 23 | #include <linux/sunrpc/xprtsock.h> | 23 | #include <linux/sunrpc/xprtsock.h> |
| 24 | 24 | ||
| 25 | extern struct cache_detail ip_map_cache, unix_gid_cache; | 25 | #include "netns.h" |
| 26 | |||
| 27 | int sunrpc_net_id; | ||
| 28 | |||
| 29 | static __net_init int sunrpc_init_net(struct net *net) | ||
| 30 | { | ||
| 31 | int err; | ||
| 32 | |||
| 33 | err = rpc_proc_init(net); | ||
| 34 | if (err) | ||
| 35 | goto err_proc; | ||
| 36 | |||
| 37 | err = ip_map_cache_create(net); | ||
| 38 | if (err) | ||
| 39 | goto err_ipmap; | ||
| 40 | |||
| 41 | return 0; | ||
| 42 | |||
| 43 | err_ipmap: | ||
| 44 | rpc_proc_exit(net); | ||
| 45 | err_proc: | ||
| 46 | return err; | ||
| 47 | } | ||
| 48 | |||
| 49 | static __net_exit void sunrpc_exit_net(struct net *net) | ||
| 50 | { | ||
| 51 | ip_map_cache_destroy(net); | ||
| 52 | rpc_proc_exit(net); | ||
| 53 | } | ||
| 54 | |||
| 55 | static struct pernet_operations sunrpc_net_ops = { | ||
| 56 | .init = sunrpc_init_net, | ||
| 57 | .exit = sunrpc_exit_net, | ||
| 58 | .id = &sunrpc_net_id, | ||
| 59 | .size = sizeof(struct sunrpc_net), | ||
| 60 | }; | ||
| 61 | |||
| 62 | extern struct cache_detail unix_gid_cache; | ||
| 26 | 63 | ||
| 27 | extern void cleanup_rpcb_clnt(void); | 64 | extern void cleanup_rpcb_clnt(void); |
| 28 | 65 | ||
| @@ -38,18 +75,22 @@ init_sunrpc(void) | |||
| 38 | err = rpcauth_init_module(); | 75 | err = rpcauth_init_module(); |
| 39 | if (err) | 76 | if (err) |
| 40 | goto out3; | 77 | goto out3; |
| 78 | |||
| 79 | cache_initialize(); | ||
| 80 | |||
| 81 | err = register_pernet_subsys(&sunrpc_net_ops); | ||
| 82 | if (err) | ||
| 83 | goto out4; | ||
| 41 | #ifdef RPC_DEBUG | 84 | #ifdef RPC_DEBUG |
| 42 | rpc_register_sysctl(); | 85 | rpc_register_sysctl(); |
| 43 | #endif | 86 | #endif |
| 44 | #ifdef CONFIG_PROC_FS | ||
| 45 | rpc_proc_init(); | ||
| 46 | #endif | ||
| 47 | cache_initialize(); | ||
| 48 | cache_register(&ip_map_cache); | ||
| 49 | cache_register(&unix_gid_cache); | 87 | cache_register(&unix_gid_cache); |
| 50 | svc_init_xprt_sock(); /* svc sock transport */ | 88 | svc_init_xprt_sock(); /* svc sock transport */ |
| 51 | init_socket_xprt(); /* clnt sock transport */ | 89 | init_socket_xprt(); /* clnt sock transport */ |
| 52 | return 0; | 90 | return 0; |
| 91 | |||
| 92 | out4: | ||
| 93 | rpcauth_remove_module(); | ||
| 53 | out3: | 94 | out3: |
| 54 | rpc_destroy_mempool(); | 95 | rpc_destroy_mempool(); |
| 55 | out2: | 96 | out2: |
| @@ -67,14 +108,11 @@ cleanup_sunrpc(void) | |||
| 67 | svc_cleanup_xprt_sock(); | 108 | svc_cleanup_xprt_sock(); |
| 68 | unregister_rpc_pipefs(); | 109 | unregister_rpc_pipefs(); |
| 69 | rpc_destroy_mempool(); | 110 | rpc_destroy_mempool(); |
| 70 | cache_unregister(&ip_map_cache); | ||
| 71 | cache_unregister(&unix_gid_cache); | 111 | cache_unregister(&unix_gid_cache); |
| 112 | unregister_pernet_subsys(&sunrpc_net_ops); | ||
| 72 | #ifdef RPC_DEBUG | 113 | #ifdef RPC_DEBUG |
| 73 | rpc_unregister_sysctl(); | 114 | rpc_unregister_sysctl(); |
| 74 | #endif | 115 | #endif |
| 75 | #ifdef CONFIG_PROC_FS | ||
| 76 | rpc_proc_exit(); | ||
| 77 | #endif | ||
| 78 | rcu_barrier(); /* Wait for completion of call_rcu()'s */ | 116 | rcu_barrier(); /* Wait for completion of call_rcu()'s */ |
| 79 | } | 117 | } |
| 80 | MODULE_LICENSE("GPL"); | 118 | MODULE_LICENSE("GPL"); |
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index d9017d64597e..6359c42c4941 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c | |||
| @@ -1055,6 +1055,9 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv) | |||
| 1055 | goto err_bad; | 1055 | goto err_bad; |
| 1056 | case SVC_DENIED: | 1056 | case SVC_DENIED: |
| 1057 | goto err_bad_auth; | 1057 | goto err_bad_auth; |
| 1058 | case SVC_CLOSE: | ||
| 1059 | if (test_bit(XPT_TEMP, &rqstp->rq_xprt->xpt_flags)) | ||
| 1060 | svc_close_xprt(rqstp->rq_xprt); | ||
| 1058 | case SVC_DROP: | 1061 | case SVC_DROP: |
| 1059 | goto dropit; | 1062 | goto dropit; |
| 1060 | case SVC_COMPLETE: | 1063 | case SVC_COMPLETE: |
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index cbc084939dd8..c82fe739fbdc 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c | |||
| @@ -100,16 +100,14 @@ EXPORT_SYMBOL_GPL(svc_unreg_xprt_class); | |||
| 100 | */ | 100 | */ |
| 101 | int svc_print_xprts(char *buf, int maxlen) | 101 | int svc_print_xprts(char *buf, int maxlen) |
| 102 | { | 102 | { |
| 103 | struct list_head *le; | 103 | struct svc_xprt_class *xcl; |
| 104 | char tmpstr[80]; | 104 | char tmpstr[80]; |
| 105 | int len = 0; | 105 | int len = 0; |
| 106 | buf[0] = '\0'; | 106 | buf[0] = '\0'; |
| 107 | 107 | ||
| 108 | spin_lock(&svc_xprt_class_lock); | 108 | spin_lock(&svc_xprt_class_lock); |
| 109 | list_for_each(le, &svc_xprt_class_list) { | 109 | list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) { |
| 110 | int slen; | 110 | int slen; |
| 111 | struct svc_xprt_class *xcl = | ||
| 112 | list_entry(le, struct svc_xprt_class, xcl_list); | ||
| 113 | 111 | ||
| 114 | sprintf(tmpstr, "%s %d\n", xcl->xcl_name, xcl->xcl_max_payload); | 112 | sprintf(tmpstr, "%s %d\n", xcl->xcl_name, xcl->xcl_max_payload); |
| 115 | slen = strlen(tmpstr); | 113 | slen = strlen(tmpstr); |
| @@ -128,9 +126,9 @@ static void svc_xprt_free(struct kref *kref) | |||
| 128 | struct svc_xprt *xprt = | 126 | struct svc_xprt *xprt = |
| 129 | container_of(kref, struct svc_xprt, xpt_ref); | 127 | container_of(kref, struct svc_xprt, xpt_ref); |
| 130 | struct module *owner = xprt->xpt_class->xcl_owner; | 128 | struct module *owner = xprt->xpt_class->xcl_owner; |
| 131 | if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags) && | 129 | if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags)) |
| 132 | xprt->xpt_auth_cache != NULL) | 130 | svcauth_unix_info_release(xprt); |
| 133 | svcauth_unix_info_release(xprt->xpt_auth_cache); | 131 | put_net(xprt->xpt_net); |
| 134 | xprt->xpt_ops->xpo_free(xprt); | 132 | xprt->xpt_ops->xpo_free(xprt); |
| 135 | module_put(owner); | 133 | module_put(owner); |
| 136 | } | 134 | } |
| @@ -156,15 +154,18 @@ void svc_xprt_init(struct svc_xprt_class *xcl, struct svc_xprt *xprt, | |||
| 156 | INIT_LIST_HEAD(&xprt->xpt_list); | 154 | INIT_LIST_HEAD(&xprt->xpt_list); |
| 157 | INIT_LIST_HEAD(&xprt->xpt_ready); | 155 | INIT_LIST_HEAD(&xprt->xpt_ready); |
| 158 | INIT_LIST_HEAD(&xprt->xpt_deferred); | 156 | INIT_LIST_HEAD(&xprt->xpt_deferred); |
| 157 | INIT_LIST_HEAD(&xprt->xpt_users); | ||
| 159 | mutex_init(&xprt->xpt_mutex); | 158 | mutex_init(&xprt->xpt_mutex); |
| 160 | spin_lock_init(&xprt->xpt_lock); | 159 | spin_lock_init(&xprt->xpt_lock); |
| 161 | set_bit(XPT_BUSY, &xprt->xpt_flags); | 160 | set_bit(XPT_BUSY, &xprt->xpt_flags); |
| 162 | rpc_init_wait_queue(&xprt->xpt_bc_pending, "xpt_bc_pending"); | 161 | rpc_init_wait_queue(&xprt->xpt_bc_pending, "xpt_bc_pending"); |
| 162 | xprt->xpt_net = get_net(&init_net); | ||
| 163 | } | 163 | } |
| 164 | EXPORT_SYMBOL_GPL(svc_xprt_init); | 164 | EXPORT_SYMBOL_GPL(svc_xprt_init); |
| 165 | 165 | ||
| 166 | static struct svc_xprt *__svc_xpo_create(struct svc_xprt_class *xcl, | 166 | static struct svc_xprt *__svc_xpo_create(struct svc_xprt_class *xcl, |
| 167 | struct svc_serv *serv, | 167 | struct svc_serv *serv, |
| 168 | struct net *net, | ||
| 168 | const int family, | 169 | const int family, |
| 169 | const unsigned short port, | 170 | const unsigned short port, |
| 170 | int flags) | 171 | int flags) |
| @@ -199,12 +200,12 @@ static struct svc_xprt *__svc_xpo_create(struct svc_xprt_class *xcl, | |||
| 199 | return ERR_PTR(-EAFNOSUPPORT); | 200 | return ERR_PTR(-EAFNOSUPPORT); |
| 200 | } | 201 | } |
| 201 | 202 | ||
| 202 | return xcl->xcl_ops->xpo_create(serv, sap, len, flags); | 203 | return xcl->xcl_ops->xpo_create(serv, net, sap, len, flags); |
| 203 | } | 204 | } |
| 204 | 205 | ||
| 205 | int svc_create_xprt(struct svc_serv *serv, const char *xprt_name, | 206 | int svc_create_xprt(struct svc_serv *serv, const char *xprt_name, |
| 206 | const int family, const unsigned short port, | 207 | struct net *net, const int family, |
| 207 | int flags) | 208 | const unsigned short port, int flags) |
| 208 | { | 209 | { |
| 209 | struct svc_xprt_class *xcl; | 210 | struct svc_xprt_class *xcl; |
| 210 | 211 | ||
| @@ -220,7 +221,7 @@ int svc_create_xprt(struct svc_serv *serv, const char *xprt_name, | |||
| 220 | goto err; | 221 | goto err; |
| 221 | 222 | ||
| 222 | spin_unlock(&svc_xprt_class_lock); | 223 | spin_unlock(&svc_xprt_class_lock); |
| 223 | newxprt = __svc_xpo_create(xcl, serv, family, port, flags); | 224 | newxprt = __svc_xpo_create(xcl, serv, net, family, port, flags); |
| 224 | if (IS_ERR(newxprt)) { | 225 | if (IS_ERR(newxprt)) { |
| 225 | module_put(xcl->xcl_owner); | 226 | module_put(xcl->xcl_owner); |
| 226 | return PTR_ERR(newxprt); | 227 | return PTR_ERR(newxprt); |
| @@ -329,12 +330,6 @@ void svc_xprt_enqueue(struct svc_xprt *xprt) | |||
| 329 | "svc_xprt_enqueue: " | 330 | "svc_xprt_enqueue: " |
| 330 | "threads and transports both waiting??\n"); | 331 | "threads and transports both waiting??\n"); |
| 331 | 332 | ||
| 332 | if (test_bit(XPT_DEAD, &xprt->xpt_flags)) { | ||
| 333 | /* Don't enqueue dead transports */ | ||
| 334 | dprintk("svc: transport %p is dead, not enqueued\n", xprt); | ||
| 335 | goto out_unlock; | ||
| 336 | } | ||
| 337 | |||
| 338 | pool->sp_stats.packets++; | 333 | pool->sp_stats.packets++; |
| 339 | 334 | ||
| 340 | /* Mark transport as busy. It will remain in this state until | 335 | /* Mark transport as busy. It will remain in this state until |
| @@ -651,6 +646,11 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) | |||
| 651 | if (signalled() || kthread_should_stop()) | 646 | if (signalled() || kthread_should_stop()) |
| 652 | return -EINTR; | 647 | return -EINTR; |
| 653 | 648 | ||
| 649 | /* Normally we will wait up to 5 seconds for any required | ||
| 650 | * cache information to be provided. | ||
| 651 | */ | ||
| 652 | rqstp->rq_chandle.thread_wait = 5*HZ; | ||
| 653 | |||
| 654 | spin_lock_bh(&pool->sp_lock); | 654 | spin_lock_bh(&pool->sp_lock); |
| 655 | xprt = svc_xprt_dequeue(pool); | 655 | xprt = svc_xprt_dequeue(pool); |
| 656 | if (xprt) { | 656 | if (xprt) { |
| @@ -658,6 +658,12 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) | |||
| 658 | svc_xprt_get(xprt); | 658 | svc_xprt_get(xprt); |
| 659 | rqstp->rq_reserved = serv->sv_max_mesg; | 659 | rqstp->rq_reserved = serv->sv_max_mesg; |
| 660 | atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); | 660 | atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); |
| 661 | |||
| 662 | /* As there is a shortage of threads and this request | ||
| 663 | * had to be queued, don't allow the thread to wait so | ||
| 664 | * long for cache updates. | ||
| 665 | */ | ||
| 666 | rqstp->rq_chandle.thread_wait = 1*HZ; | ||
| 661 | } else { | 667 | } else { |
| 662 | /* No data pending. Go to sleep */ | 668 | /* No data pending. Go to sleep */ |
| 663 | svc_thread_enqueue(pool, rqstp); | 669 | svc_thread_enqueue(pool, rqstp); |
| @@ -868,6 +874,19 @@ static void svc_age_temp_xprts(unsigned long closure) | |||
| 868 | mod_timer(&serv->sv_temptimer, jiffies + svc_conn_age_period * HZ); | 874 | mod_timer(&serv->sv_temptimer, jiffies + svc_conn_age_period * HZ); |
| 869 | } | 875 | } |
| 870 | 876 | ||
| 877 | static void call_xpt_users(struct svc_xprt *xprt) | ||
| 878 | { | ||
| 879 | struct svc_xpt_user *u; | ||
| 880 | |||
| 881 | spin_lock(&xprt->xpt_lock); | ||
| 882 | while (!list_empty(&xprt->xpt_users)) { | ||
| 883 | u = list_first_entry(&xprt->xpt_users, struct svc_xpt_user, list); | ||
| 884 | list_del(&u->list); | ||
| 885 | u->callback(u); | ||
| 886 | } | ||
| 887 | spin_unlock(&xprt->xpt_lock); | ||
| 888 | } | ||
| 889 | |||
| 871 | /* | 890 | /* |
| 872 | * Remove a dead transport | 891 | * Remove a dead transport |
| 873 | */ | 892 | */ |
| @@ -878,7 +897,7 @@ void svc_delete_xprt(struct svc_xprt *xprt) | |||
| 878 | 897 | ||
| 879 | /* Only do this once */ | 898 | /* Only do this once */ |
| 880 | if (test_and_set_bit(XPT_DEAD, &xprt->xpt_flags)) | 899 | if (test_and_set_bit(XPT_DEAD, &xprt->xpt_flags)) |
| 881 | return; | 900 | BUG(); |
| 882 | 901 | ||
| 883 | dprintk("svc: svc_delete_xprt(%p)\n", xprt); | 902 | dprintk("svc: svc_delete_xprt(%p)\n", xprt); |
| 884 | xprt->xpt_ops->xpo_detach(xprt); | 903 | xprt->xpt_ops->xpo_detach(xprt); |
| @@ -900,6 +919,7 @@ void svc_delete_xprt(struct svc_xprt *xprt) | |||
| 900 | while ((dr = svc_deferred_dequeue(xprt)) != NULL) | 919 | while ((dr = svc_deferred_dequeue(xprt)) != NULL) |
| 901 | kfree(dr); | 920 | kfree(dr); |
| 902 | 921 | ||
| 922 | call_xpt_users(xprt); | ||
| 903 | svc_xprt_put(xprt); | 923 | svc_xprt_put(xprt); |
| 904 | } | 924 | } |
| 905 | 925 | ||
| @@ -910,10 +930,7 @@ void svc_close_xprt(struct svc_xprt *xprt) | |||
| 910 | /* someone else will have to effect the close */ | 930 | /* someone else will have to effect the close */ |
| 911 | return; | 931 | return; |
| 912 | 932 | ||
| 913 | svc_xprt_get(xprt); | ||
| 914 | svc_delete_xprt(xprt); | 933 | svc_delete_xprt(xprt); |
| 915 | clear_bit(XPT_BUSY, &xprt->xpt_flags); | ||
| 916 | svc_xprt_put(xprt); | ||
| 917 | } | 934 | } |
| 918 | EXPORT_SYMBOL_GPL(svc_close_xprt); | 935 | EXPORT_SYMBOL_GPL(svc_close_xprt); |
| 919 | 936 | ||
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c index 207311610988..560677d187f1 100644 --- a/net/sunrpc/svcauth_unix.c +++ b/net/sunrpc/svcauth_unix.c | |||
| @@ -18,6 +18,8 @@ | |||
| 18 | 18 | ||
| 19 | #include <linux/sunrpc/clnt.h> | 19 | #include <linux/sunrpc/clnt.h> |
| 20 | 20 | ||
| 21 | #include "netns.h" | ||
| 22 | |||
| 21 | /* | 23 | /* |
| 22 | * AUTHUNIX and AUTHNULL credentials are both handled here. | 24 | * AUTHUNIX and AUTHNULL credentials are both handled here. |
| 23 | * AUTHNULL is treated just like AUTHUNIX except that the uid/gid | 25 | * AUTHNULL is treated just like AUTHUNIX except that the uid/gid |
| @@ -92,7 +94,6 @@ struct ip_map { | |||
| 92 | struct unix_domain *m_client; | 94 | struct unix_domain *m_client; |
| 93 | int m_add_change; | 95 | int m_add_change; |
| 94 | }; | 96 | }; |
| 95 | static struct cache_head *ip_table[IP_HASHMAX]; | ||
| 96 | 97 | ||
| 97 | static void ip_map_put(struct kref *kref) | 98 | static void ip_map_put(struct kref *kref) |
| 98 | { | 99 | { |
| @@ -178,8 +179,8 @@ static int ip_map_upcall(struct cache_detail *cd, struct cache_head *h) | |||
| 178 | return sunrpc_cache_pipe_upcall(cd, h, ip_map_request); | 179 | return sunrpc_cache_pipe_upcall(cd, h, ip_map_request); |
| 179 | } | 180 | } |
| 180 | 181 | ||
| 181 | static struct ip_map *ip_map_lookup(char *class, struct in6_addr *addr); | 182 | static struct ip_map *__ip_map_lookup(struct cache_detail *cd, char *class, struct in6_addr *addr); |
| 182 | static int ip_map_update(struct ip_map *ipm, struct unix_domain *udom, time_t expiry); | 183 | static int __ip_map_update(struct cache_detail *cd, struct ip_map *ipm, struct unix_domain *udom, time_t expiry); |
| 183 | 184 | ||
| 184 | static int ip_map_parse(struct cache_detail *cd, | 185 | static int ip_map_parse(struct cache_detail *cd, |
| 185 | char *mesg, int mlen) | 186 | char *mesg, int mlen) |
| @@ -219,10 +220,9 @@ static int ip_map_parse(struct cache_detail *cd, | |||
| 219 | switch (address.sa.sa_family) { | 220 | switch (address.sa.sa_family) { |
| 220 | case AF_INET: | 221 | case AF_INET: |
| 221 | /* Form a mapped IPv4 address in sin6 */ | 222 | /* Form a mapped IPv4 address in sin6 */ |
| 222 | memset(&sin6, 0, sizeof(sin6)); | ||
| 223 | sin6.sin6_family = AF_INET6; | 223 | sin6.sin6_family = AF_INET6; |
| 224 | sin6.sin6_addr.s6_addr32[2] = htonl(0xffff); | 224 | ipv6_addr_set_v4mapped(address.s4.sin_addr.s_addr, |
| 225 | sin6.sin6_addr.s6_addr32[3] = address.s4.sin_addr.s_addr; | 225 | &sin6.sin6_addr); |
| 226 | break; | 226 | break; |
| 227 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | 227 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) |
| 228 | case AF_INET6: | 228 | case AF_INET6: |
| @@ -249,9 +249,9 @@ static int ip_map_parse(struct cache_detail *cd, | |||
| 249 | dom = NULL; | 249 | dom = NULL; |
| 250 | 250 | ||
| 251 | /* IPv6 scope IDs are ignored for now */ | 251 | /* IPv6 scope IDs are ignored for now */ |
| 252 | ipmp = ip_map_lookup(class, &sin6.sin6_addr); | 252 | ipmp = __ip_map_lookup(cd, class, &sin6.sin6_addr); |
| 253 | if (ipmp) { | 253 | if (ipmp) { |
| 254 | err = ip_map_update(ipmp, | 254 | err = __ip_map_update(cd, ipmp, |
| 255 | container_of(dom, struct unix_domain, h), | 255 | container_of(dom, struct unix_domain, h), |
| 256 | expiry); | 256 | expiry); |
| 257 | } else | 257 | } else |
| @@ -294,29 +294,15 @@ static int ip_map_show(struct seq_file *m, | |||
| 294 | } | 294 | } |
| 295 | 295 | ||
| 296 | 296 | ||
| 297 | struct cache_detail ip_map_cache = { | 297 | static struct ip_map *__ip_map_lookup(struct cache_detail *cd, char *class, |
| 298 | .owner = THIS_MODULE, | 298 | struct in6_addr *addr) |
| 299 | .hash_size = IP_HASHMAX, | ||
| 300 | .hash_table = ip_table, | ||
| 301 | .name = "auth.unix.ip", | ||
| 302 | .cache_put = ip_map_put, | ||
| 303 | .cache_upcall = ip_map_upcall, | ||
| 304 | .cache_parse = ip_map_parse, | ||
| 305 | .cache_show = ip_map_show, | ||
| 306 | .match = ip_map_match, | ||
| 307 | .init = ip_map_init, | ||
| 308 | .update = update, | ||
| 309 | .alloc = ip_map_alloc, | ||
| 310 | }; | ||
| 311 | |||
| 312 | static struct ip_map *ip_map_lookup(char *class, struct in6_addr *addr) | ||
| 313 | { | 299 | { |
| 314 | struct ip_map ip; | 300 | struct ip_map ip; |
| 315 | struct cache_head *ch; | 301 | struct cache_head *ch; |
| 316 | 302 | ||
| 317 | strcpy(ip.m_class, class); | 303 | strcpy(ip.m_class, class); |
| 318 | ipv6_addr_copy(&ip.m_addr, addr); | 304 | ipv6_addr_copy(&ip.m_addr, addr); |
| 319 | ch = sunrpc_cache_lookup(&ip_map_cache, &ip.h, | 305 | ch = sunrpc_cache_lookup(cd, &ip.h, |
| 320 | hash_str(class, IP_HASHBITS) ^ | 306 | hash_str(class, IP_HASHBITS) ^ |
| 321 | hash_ip6(*addr)); | 307 | hash_ip6(*addr)); |
| 322 | 308 | ||
| @@ -326,7 +312,17 @@ static struct ip_map *ip_map_lookup(char *class, struct in6_addr *addr) | |||
| 326 | return NULL; | 312 | return NULL; |
| 327 | } | 313 | } |
| 328 | 314 | ||
| 329 | static int ip_map_update(struct ip_map *ipm, struct unix_domain *udom, time_t expiry) | 315 | static inline struct ip_map *ip_map_lookup(struct net *net, char *class, |
| 316 | struct in6_addr *addr) | ||
| 317 | { | ||
| 318 | struct sunrpc_net *sn; | ||
| 319 | |||
| 320 | sn = net_generic(net, sunrpc_net_id); | ||
| 321 | return __ip_map_lookup(sn->ip_map_cache, class, addr); | ||
| 322 | } | ||
| 323 | |||
| 324 | static int __ip_map_update(struct cache_detail *cd, struct ip_map *ipm, | ||
| 325 | struct unix_domain *udom, time_t expiry) | ||
| 330 | { | 326 | { |
| 331 | struct ip_map ip; | 327 | struct ip_map ip; |
| 332 | struct cache_head *ch; | 328 | struct cache_head *ch; |
| @@ -344,17 +340,25 @@ static int ip_map_update(struct ip_map *ipm, struct unix_domain *udom, time_t ex | |||
| 344 | ip.m_add_change++; | 340 | ip.m_add_change++; |
| 345 | } | 341 | } |
| 346 | ip.h.expiry_time = expiry; | 342 | ip.h.expiry_time = expiry; |
| 347 | ch = sunrpc_cache_update(&ip_map_cache, | 343 | ch = sunrpc_cache_update(cd, &ip.h, &ipm->h, |
| 348 | &ip.h, &ipm->h, | ||
| 349 | hash_str(ipm->m_class, IP_HASHBITS) ^ | 344 | hash_str(ipm->m_class, IP_HASHBITS) ^ |
| 350 | hash_ip6(ipm->m_addr)); | 345 | hash_ip6(ipm->m_addr)); |
| 351 | if (!ch) | 346 | if (!ch) |
| 352 | return -ENOMEM; | 347 | return -ENOMEM; |
| 353 | cache_put(ch, &ip_map_cache); | 348 | cache_put(ch, cd); |
| 354 | return 0; | 349 | return 0; |
| 355 | } | 350 | } |
| 356 | 351 | ||
| 357 | int auth_unix_add_addr(struct in6_addr *addr, struct auth_domain *dom) | 352 | static inline int ip_map_update(struct net *net, struct ip_map *ipm, |
| 353 | struct unix_domain *udom, time_t expiry) | ||
| 354 | { | ||
| 355 | struct sunrpc_net *sn; | ||
| 356 | |||
| 357 | sn = net_generic(net, sunrpc_net_id); | ||
| 358 | return __ip_map_update(sn->ip_map_cache, ipm, udom, expiry); | ||
| 359 | } | ||
| 360 | |||
| 361 | int auth_unix_add_addr(struct net *net, struct in6_addr *addr, struct auth_domain *dom) | ||
| 358 | { | 362 | { |
| 359 | struct unix_domain *udom; | 363 | struct unix_domain *udom; |
| 360 | struct ip_map *ipmp; | 364 | struct ip_map *ipmp; |
| @@ -362,10 +366,10 @@ int auth_unix_add_addr(struct in6_addr *addr, struct auth_domain *dom) | |||
| 362 | if (dom->flavour != &svcauth_unix) | 366 | if (dom->flavour != &svcauth_unix) |
| 363 | return -EINVAL; | 367 | return -EINVAL; |
| 364 | udom = container_of(dom, struct unix_domain, h); | 368 | udom = container_of(dom, struct unix_domain, h); |
| 365 | ipmp = ip_map_lookup("nfsd", addr); | 369 | ipmp = ip_map_lookup(net, "nfsd", addr); |
| 366 | 370 | ||
| 367 | if (ipmp) | 371 | if (ipmp) |
| 368 | return ip_map_update(ipmp, udom, NEVER); | 372 | return ip_map_update(net, ipmp, udom, NEVER); |
| 369 | else | 373 | else |
| 370 | return -ENOMEM; | 374 | return -ENOMEM; |
| 371 | } | 375 | } |
| @@ -383,16 +387,18 @@ int auth_unix_forget_old(struct auth_domain *dom) | |||
| 383 | } | 387 | } |
| 384 | EXPORT_SYMBOL_GPL(auth_unix_forget_old); | 388 | EXPORT_SYMBOL_GPL(auth_unix_forget_old); |
| 385 | 389 | ||
| 386 | struct auth_domain *auth_unix_lookup(struct in6_addr *addr) | 390 | struct auth_domain *auth_unix_lookup(struct net *net, struct in6_addr *addr) |
| 387 | { | 391 | { |
| 388 | struct ip_map *ipm; | 392 | struct ip_map *ipm; |
| 389 | struct auth_domain *rv; | 393 | struct auth_domain *rv; |
| 394 | struct sunrpc_net *sn; | ||
| 390 | 395 | ||
| 391 | ipm = ip_map_lookup("nfsd", addr); | 396 | sn = net_generic(net, sunrpc_net_id); |
| 397 | ipm = ip_map_lookup(net, "nfsd", addr); | ||
| 392 | 398 | ||
| 393 | if (!ipm) | 399 | if (!ipm) |
| 394 | return NULL; | 400 | return NULL; |
| 395 | if (cache_check(&ip_map_cache, &ipm->h, NULL)) | 401 | if (cache_check(sn->ip_map_cache, &ipm->h, NULL)) |
| 396 | return NULL; | 402 | return NULL; |
| 397 | 403 | ||
| 398 | if ((ipm->m_client->addr_changes - ipm->m_add_change) >0) { | 404 | if ((ipm->m_client->addr_changes - ipm->m_add_change) >0) { |
| @@ -403,22 +409,29 @@ struct auth_domain *auth_unix_lookup(struct in6_addr *addr) | |||
| 403 | rv = &ipm->m_client->h; | 409 | rv = &ipm->m_client->h; |
| 404 | kref_get(&rv->ref); | 410 | kref_get(&rv->ref); |
| 405 | } | 411 | } |
| 406 | cache_put(&ipm->h, &ip_map_cache); | 412 | cache_put(&ipm->h, sn->ip_map_cache); |
| 407 | return rv; | 413 | return rv; |
| 408 | } | 414 | } |
| 409 | EXPORT_SYMBOL_GPL(auth_unix_lookup); | 415 | EXPORT_SYMBOL_GPL(auth_unix_lookup); |
| 410 | 416 | ||
| 411 | void svcauth_unix_purge(void) | 417 | void svcauth_unix_purge(void) |
| 412 | { | 418 | { |
| 413 | cache_purge(&ip_map_cache); | 419 | struct net *net; |
| 420 | |||
| 421 | for_each_net(net) { | ||
| 422 | struct sunrpc_net *sn; | ||
| 423 | |||
| 424 | sn = net_generic(net, sunrpc_net_id); | ||
| 425 | cache_purge(sn->ip_map_cache); | ||
| 426 | } | ||
| 414 | } | 427 | } |
| 415 | EXPORT_SYMBOL_GPL(svcauth_unix_purge); | 428 | EXPORT_SYMBOL_GPL(svcauth_unix_purge); |
| 416 | 429 | ||
| 417 | static inline struct ip_map * | 430 | static inline struct ip_map * |
| 418 | ip_map_cached_get(struct svc_rqst *rqstp) | 431 | ip_map_cached_get(struct svc_xprt *xprt) |
| 419 | { | 432 | { |
| 420 | struct ip_map *ipm = NULL; | 433 | struct ip_map *ipm = NULL; |
| 421 | struct svc_xprt *xprt = rqstp->rq_xprt; | 434 | struct sunrpc_net *sn; |
| 422 | 435 | ||
| 423 | if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags)) { | 436 | if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags)) { |
| 424 | spin_lock(&xprt->xpt_lock); | 437 | spin_lock(&xprt->xpt_lock); |
| @@ -430,9 +443,10 @@ ip_map_cached_get(struct svc_rqst *rqstp) | |||
| 430 | * remembered, e.g. by a second mount from the | 443 | * remembered, e.g. by a second mount from the |
| 431 | * same IP address. | 444 | * same IP address. |
| 432 | */ | 445 | */ |
| 446 | sn = net_generic(xprt->xpt_net, sunrpc_net_id); | ||
| 433 | xprt->xpt_auth_cache = NULL; | 447 | xprt->xpt_auth_cache = NULL; |
| 434 | spin_unlock(&xprt->xpt_lock); | 448 | spin_unlock(&xprt->xpt_lock); |
| 435 | cache_put(&ipm->h, &ip_map_cache); | 449 | cache_put(&ipm->h, sn->ip_map_cache); |
| 436 | return NULL; | 450 | return NULL; |
| 437 | } | 451 | } |
| 438 | cache_get(&ipm->h); | 452 | cache_get(&ipm->h); |
| @@ -443,10 +457,8 @@ ip_map_cached_get(struct svc_rqst *rqstp) | |||
| 443 | } | 457 | } |
| 444 | 458 | ||
| 445 | static inline void | 459 | static inline void |
| 446 | ip_map_cached_put(struct svc_rqst *rqstp, struct ip_map *ipm) | 460 | ip_map_cached_put(struct svc_xprt *xprt, struct ip_map *ipm) |
| 447 | { | 461 | { |
| 448 | struct svc_xprt *xprt = rqstp->rq_xprt; | ||
| 449 | |||
| 450 | if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags)) { | 462 | if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags)) { |
| 451 | spin_lock(&xprt->xpt_lock); | 463 | spin_lock(&xprt->xpt_lock); |
| 452 | if (xprt->xpt_auth_cache == NULL) { | 464 | if (xprt->xpt_auth_cache == NULL) { |
| @@ -456,15 +468,26 @@ ip_map_cached_put(struct svc_rqst *rqstp, struct ip_map *ipm) | |||
| 456 | } | 468 | } |
| 457 | spin_unlock(&xprt->xpt_lock); | 469 | spin_unlock(&xprt->xpt_lock); |
| 458 | } | 470 | } |
| 459 | if (ipm) | 471 | if (ipm) { |
| 460 | cache_put(&ipm->h, &ip_map_cache); | 472 | struct sunrpc_net *sn; |
| 473 | |||
| 474 | sn = net_generic(xprt->xpt_net, sunrpc_net_id); | ||
| 475 | cache_put(&ipm->h, sn->ip_map_cache); | ||
| 476 | } | ||
| 461 | } | 477 | } |
| 462 | 478 | ||
| 463 | void | 479 | void |
| 464 | svcauth_unix_info_release(void *info) | 480 | svcauth_unix_info_release(struct svc_xprt *xpt) |
| 465 | { | 481 | { |
| 466 | struct ip_map *ipm = info; | 482 | struct ip_map *ipm; |
| 467 | cache_put(&ipm->h, &ip_map_cache); | 483 | |
| 484 | ipm = xpt->xpt_auth_cache; | ||
| 485 | if (ipm != NULL) { | ||
| 486 | struct sunrpc_net *sn; | ||
| 487 | |||
| 488 | sn = net_generic(xpt->xpt_net, sunrpc_net_id); | ||
| 489 | cache_put(&ipm->h, sn->ip_map_cache); | ||
| 490 | } | ||
| 468 | } | 491 | } |
| 469 | 492 | ||
| 470 | /**************************************************************************** | 493 | /**************************************************************************** |
| @@ -674,6 +697,8 @@ static struct group_info *unix_gid_find(uid_t uid, struct svc_rqst *rqstp) | |||
| 674 | switch (ret) { | 697 | switch (ret) { |
| 675 | case -ENOENT: | 698 | case -ENOENT: |
| 676 | return ERR_PTR(-ENOENT); | 699 | return ERR_PTR(-ENOENT); |
| 700 | case -ETIMEDOUT: | ||
| 701 | return ERR_PTR(-ESHUTDOWN); | ||
| 677 | case 0: | 702 | case 0: |
| 678 | gi = get_group_info(ug->gi); | 703 | gi = get_group_info(ug->gi); |
| 679 | cache_put(&ug->h, &unix_gid_cache); | 704 | cache_put(&ug->h, &unix_gid_cache); |
| @@ -691,6 +716,9 @@ svcauth_unix_set_client(struct svc_rqst *rqstp) | |||
| 691 | struct ip_map *ipm; | 716 | struct ip_map *ipm; |
| 692 | struct group_info *gi; | 717 | struct group_info *gi; |
| 693 | struct svc_cred *cred = &rqstp->rq_cred; | 718 | struct svc_cred *cred = &rqstp->rq_cred; |
| 719 | struct svc_xprt *xprt = rqstp->rq_xprt; | ||
| 720 | struct net *net = xprt->xpt_net; | ||
| 721 | struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); | ||
| 694 | 722 | ||
| 695 | switch (rqstp->rq_addr.ss_family) { | 723 | switch (rqstp->rq_addr.ss_family) { |
| 696 | case AF_INET: | 724 | case AF_INET: |
| @@ -709,26 +737,27 @@ svcauth_unix_set_client(struct svc_rqst *rqstp) | |||
| 709 | if (rqstp->rq_proc == 0) | 737 | if (rqstp->rq_proc == 0) |
| 710 | return SVC_OK; | 738 | return SVC_OK; |
| 711 | 739 | ||
| 712 | ipm = ip_map_cached_get(rqstp); | 740 | ipm = ip_map_cached_get(xprt); |
| 713 | if (ipm == NULL) | 741 | if (ipm == NULL) |
| 714 | ipm = ip_map_lookup(rqstp->rq_server->sv_program->pg_class, | 742 | ipm = __ip_map_lookup(sn->ip_map_cache, rqstp->rq_server->sv_program->pg_class, |
| 715 | &sin6->sin6_addr); | 743 | &sin6->sin6_addr); |
| 716 | 744 | ||
| 717 | if (ipm == NULL) | 745 | if (ipm == NULL) |
| 718 | return SVC_DENIED; | 746 | return SVC_DENIED; |
| 719 | 747 | ||
| 720 | switch (cache_check(&ip_map_cache, &ipm->h, &rqstp->rq_chandle)) { | 748 | switch (cache_check(sn->ip_map_cache, &ipm->h, &rqstp->rq_chandle)) { |
| 721 | default: | 749 | default: |
| 722 | BUG(); | 750 | BUG(); |
| 723 | case -EAGAIN: | ||
| 724 | case -ETIMEDOUT: | 751 | case -ETIMEDOUT: |
| 752 | return SVC_CLOSE; | ||
| 753 | case -EAGAIN: | ||
| 725 | return SVC_DROP; | 754 | return SVC_DROP; |
| 726 | case -ENOENT: | 755 | case -ENOENT: |
| 727 | return SVC_DENIED; | 756 | return SVC_DENIED; |
| 728 | case 0: | 757 | case 0: |
| 729 | rqstp->rq_client = &ipm->m_client->h; | 758 | rqstp->rq_client = &ipm->m_client->h; |
| 730 | kref_get(&rqstp->rq_client->ref); | 759 | kref_get(&rqstp->rq_client->ref); |
| 731 | ip_map_cached_put(rqstp, ipm); | 760 | ip_map_cached_put(xprt, ipm); |
| 732 | break; | 761 | break; |
| 733 | } | 762 | } |
| 734 | 763 | ||
| @@ -736,6 +765,8 @@ svcauth_unix_set_client(struct svc_rqst *rqstp) | |||
| 736 | switch (PTR_ERR(gi)) { | 765 | switch (PTR_ERR(gi)) { |
| 737 | case -EAGAIN: | 766 | case -EAGAIN: |
| 738 | return SVC_DROP; | 767 | return SVC_DROP; |
| 768 | case -ESHUTDOWN: | ||
| 769 | return SVC_CLOSE; | ||
| 739 | case -ENOENT: | 770 | case -ENOENT: |
| 740 | break; | 771 | break; |
| 741 | default: | 772 | default: |
| @@ -776,7 +807,7 @@ svcauth_null_accept(struct svc_rqst *rqstp, __be32 *authp) | |||
| 776 | cred->cr_gid = (gid_t) -1; | 807 | cred->cr_gid = (gid_t) -1; |
| 777 | cred->cr_group_info = groups_alloc(0); | 808 | cred->cr_group_info = groups_alloc(0); |
| 778 | if (cred->cr_group_info == NULL) | 809 | if (cred->cr_group_info == NULL) |
| 779 | return SVC_DROP; /* kmalloc failure - client must retry */ | 810 | return SVC_CLOSE; /* kmalloc failure - client must retry */ |
| 780 | 811 | ||
| 781 | /* Put NULL verifier */ | 812 | /* Put NULL verifier */ |
| 782 | svc_putnl(resv, RPC_AUTH_NULL); | 813 | svc_putnl(resv, RPC_AUTH_NULL); |
| @@ -840,7 +871,7 @@ svcauth_unix_accept(struct svc_rqst *rqstp, __be32 *authp) | |||
| 840 | goto badcred; | 871 | goto badcred; |
| 841 | cred->cr_group_info = groups_alloc(slen); | 872 | cred->cr_group_info = groups_alloc(slen); |
| 842 | if (cred->cr_group_info == NULL) | 873 | if (cred->cr_group_info == NULL) |
| 843 | return SVC_DROP; | 874 | return SVC_CLOSE; |
| 844 | for (i = 0; i < slen; i++) | 875 | for (i = 0; i < slen; i++) |
| 845 | GROUP_AT(cred->cr_group_info, i) = svc_getnl(argv); | 876 | GROUP_AT(cred->cr_group_info, i) = svc_getnl(argv); |
| 846 | if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) { | 877 | if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) { |
| @@ -886,3 +917,56 @@ struct auth_ops svcauth_unix = { | |||
| 886 | .set_client = svcauth_unix_set_client, | 917 | .set_client = svcauth_unix_set_client, |
| 887 | }; | 918 | }; |
| 888 | 919 | ||
| 920 | int ip_map_cache_create(struct net *net) | ||
| 921 | { | ||
| 922 | int err = -ENOMEM; | ||
| 923 | struct cache_detail *cd; | ||
| 924 | struct cache_head **tbl; | ||
| 925 | struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); | ||
| 926 | |||
| 927 | cd = kzalloc(sizeof(struct cache_detail), GFP_KERNEL); | ||
| 928 | if (cd == NULL) | ||
| 929 | goto err_cd; | ||
| 930 | |||
| 931 | tbl = kzalloc(IP_HASHMAX * sizeof(struct cache_head *), GFP_KERNEL); | ||
| 932 | if (tbl == NULL) | ||
| 933 | goto err_tbl; | ||
| 934 | |||
| 935 | cd->owner = THIS_MODULE, | ||
| 936 | cd->hash_size = IP_HASHMAX, | ||
| 937 | cd->hash_table = tbl, | ||
| 938 | cd->name = "auth.unix.ip", | ||
| 939 | cd->cache_put = ip_map_put, | ||
| 940 | cd->cache_upcall = ip_map_upcall, | ||
| 941 | cd->cache_parse = ip_map_parse, | ||
| 942 | cd->cache_show = ip_map_show, | ||
| 943 | cd->match = ip_map_match, | ||
| 944 | cd->init = ip_map_init, | ||
| 945 | cd->update = update, | ||
| 946 | cd->alloc = ip_map_alloc, | ||
| 947 | |||
| 948 | err = cache_register_net(cd, net); | ||
| 949 | if (err) | ||
| 950 | goto err_reg; | ||
| 951 | |||
| 952 | sn->ip_map_cache = cd; | ||
| 953 | return 0; | ||
| 954 | |||
| 955 | err_reg: | ||
| 956 | kfree(tbl); | ||
| 957 | err_tbl: | ||
| 958 | kfree(cd); | ||
| 959 | err_cd: | ||
| 960 | return err; | ||
| 961 | } | ||
| 962 | |||
| 963 | void ip_map_cache_destroy(struct net *net) | ||
| 964 | { | ||
| 965 | struct sunrpc_net *sn; | ||
| 966 | |||
| 967 | sn = net_generic(net, sunrpc_net_id); | ||
| 968 | cache_purge(sn->ip_map_cache); | ||
| 969 | cache_unregister_net(sn->ip_map_cache, net); | ||
| 970 | kfree(sn->ip_map_cache->hash_table); | ||
| 971 | kfree(sn->ip_map_cache); | ||
| 972 | } | ||
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 7e534dd09077..07919e16be3e 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c | |||
| @@ -64,7 +64,8 @@ static void svc_tcp_sock_detach(struct svc_xprt *); | |||
| 64 | static void svc_sock_free(struct svc_xprt *); | 64 | static void svc_sock_free(struct svc_xprt *); |
| 65 | 65 | ||
| 66 | static struct svc_xprt *svc_create_socket(struct svc_serv *, int, | 66 | static struct svc_xprt *svc_create_socket(struct svc_serv *, int, |
| 67 | struct sockaddr *, int, int); | 67 | struct net *, struct sockaddr *, |
| 68 | int, int); | ||
| 68 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 69 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 69 | static struct lock_class_key svc_key[2]; | 70 | static struct lock_class_key svc_key[2]; |
| 70 | static struct lock_class_key svc_slock_key[2]; | 71 | static struct lock_class_key svc_slock_key[2]; |
| @@ -657,10 +658,11 @@ static struct svc_xprt *svc_udp_accept(struct svc_xprt *xprt) | |||
| 657 | } | 658 | } |
| 658 | 659 | ||
| 659 | static struct svc_xprt *svc_udp_create(struct svc_serv *serv, | 660 | static struct svc_xprt *svc_udp_create(struct svc_serv *serv, |
| 661 | struct net *net, | ||
| 660 | struct sockaddr *sa, int salen, | 662 | struct sockaddr *sa, int salen, |
| 661 | int flags) | 663 | int flags) |
| 662 | { | 664 | { |
| 663 | return svc_create_socket(serv, IPPROTO_UDP, sa, salen, flags); | 665 | return svc_create_socket(serv, IPPROTO_UDP, net, sa, salen, flags); |
| 664 | } | 666 | } |
| 665 | 667 | ||
| 666 | static struct svc_xprt_ops svc_udp_ops = { | 668 | static struct svc_xprt_ops svc_udp_ops = { |
| @@ -1133,9 +1135,6 @@ static int svc_tcp_sendto(struct svc_rqst *rqstp) | |||
| 1133 | reclen = htonl(0x80000000|((xbufp->len ) - 4)); | 1135 | reclen = htonl(0x80000000|((xbufp->len ) - 4)); |
| 1134 | memcpy(xbufp->head[0].iov_base, &reclen, 4); | 1136 | memcpy(xbufp->head[0].iov_base, &reclen, 4); |
| 1135 | 1137 | ||
| 1136 | if (test_bit(XPT_DEAD, &rqstp->rq_xprt->xpt_flags)) | ||
| 1137 | return -ENOTCONN; | ||
| 1138 | |||
| 1139 | sent = svc_sendto(rqstp, &rqstp->rq_res); | 1138 | sent = svc_sendto(rqstp, &rqstp->rq_res); |
| 1140 | if (sent != xbufp->len) { | 1139 | if (sent != xbufp->len) { |
| 1141 | printk(KERN_NOTICE | 1140 | printk(KERN_NOTICE |
| @@ -1178,10 +1177,11 @@ static int svc_tcp_has_wspace(struct svc_xprt *xprt) | |||
| 1178 | } | 1177 | } |
| 1179 | 1178 | ||
| 1180 | static struct svc_xprt *svc_tcp_create(struct svc_serv *serv, | 1179 | static struct svc_xprt *svc_tcp_create(struct svc_serv *serv, |
| 1180 | struct net *net, | ||
| 1181 | struct sockaddr *sa, int salen, | 1181 | struct sockaddr *sa, int salen, |
| 1182 | int flags) | 1182 | int flags) |
| 1183 | { | 1183 | { |
| 1184 | return svc_create_socket(serv, IPPROTO_TCP, sa, salen, flags); | 1184 | return svc_create_socket(serv, IPPROTO_TCP, net, sa, salen, flags); |
| 1185 | } | 1185 | } |
| 1186 | 1186 | ||
| 1187 | static struct svc_xprt_ops svc_tcp_ops = { | 1187 | static struct svc_xprt_ops svc_tcp_ops = { |
| @@ -1258,19 +1258,13 @@ void svc_sock_update_bufs(struct svc_serv *serv) | |||
| 1258 | * The number of server threads has changed. Update | 1258 | * The number of server threads has changed. Update |
| 1259 | * rcvbuf and sndbuf accordingly on all sockets | 1259 | * rcvbuf and sndbuf accordingly on all sockets |
| 1260 | */ | 1260 | */ |
| 1261 | struct list_head *le; | 1261 | struct svc_sock *svsk; |
| 1262 | 1262 | ||
| 1263 | spin_lock_bh(&serv->sv_lock); | 1263 | spin_lock_bh(&serv->sv_lock); |
| 1264 | list_for_each(le, &serv->sv_permsocks) { | 1264 | list_for_each_entry(svsk, &serv->sv_permsocks, sk_xprt.xpt_list) |
| 1265 | struct svc_sock *svsk = | ||
| 1266 | list_entry(le, struct svc_sock, sk_xprt.xpt_list); | ||
| 1267 | set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags); | 1265 | set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags); |
| 1268 | } | 1266 | list_for_each_entry(svsk, &serv->sv_tempsocks, sk_xprt.xpt_list) |
| 1269 | list_for_each(le, &serv->sv_tempsocks) { | ||
| 1270 | struct svc_sock *svsk = | ||
| 1271 | list_entry(le, struct svc_sock, sk_xprt.xpt_list); | ||
| 1272 | set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags); | 1267 | set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags); |
| 1273 | } | ||
| 1274 | spin_unlock_bh(&serv->sv_lock); | 1268 | spin_unlock_bh(&serv->sv_lock); |
| 1275 | } | 1269 | } |
| 1276 | EXPORT_SYMBOL_GPL(svc_sock_update_bufs); | 1270 | EXPORT_SYMBOL_GPL(svc_sock_update_bufs); |
| @@ -1385,6 +1379,7 @@ EXPORT_SYMBOL_GPL(svc_addsock); | |||
| 1385 | */ | 1379 | */ |
| 1386 | static struct svc_xprt *svc_create_socket(struct svc_serv *serv, | 1380 | static struct svc_xprt *svc_create_socket(struct svc_serv *serv, |
| 1387 | int protocol, | 1381 | int protocol, |
| 1382 | struct net *net, | ||
| 1388 | struct sockaddr *sin, int len, | 1383 | struct sockaddr *sin, int len, |
| 1389 | int flags) | 1384 | int flags) |
| 1390 | { | 1385 | { |
| @@ -1421,7 +1416,7 @@ static struct svc_xprt *svc_create_socket(struct svc_serv *serv, | |||
| 1421 | return ERR_PTR(-EINVAL); | 1416 | return ERR_PTR(-EINVAL); |
| 1422 | } | 1417 | } |
| 1423 | 1418 | ||
| 1424 | error = sock_create_kern(family, type, protocol, &sock); | 1419 | error = __sock_create(net, family, type, protocol, &sock, 1); |
| 1425 | if (error < 0) | 1420 | if (error < 0) |
| 1426 | return ERR_PTR(error); | 1421 | return ERR_PTR(error); |
| 1427 | 1422 | ||
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 970fb00f388c..4c8f18aff7c3 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c | |||
| @@ -199,8 +199,6 @@ int xprt_reserve_xprt(struct rpc_task *task) | |||
| 199 | if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) { | 199 | if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) { |
| 200 | if (task == xprt->snd_task) | 200 | if (task == xprt->snd_task) |
| 201 | return 1; | 201 | return 1; |
| 202 | if (task == NULL) | ||
| 203 | return 0; | ||
| 204 | goto out_sleep; | 202 | goto out_sleep; |
| 205 | } | 203 | } |
| 206 | xprt->snd_task = task; | 204 | xprt->snd_task = task; |
| @@ -757,13 +755,11 @@ static void xprt_connect_status(struct rpc_task *task) | |||
| 757 | */ | 755 | */ |
| 758 | struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid) | 756 | struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid) |
| 759 | { | 757 | { |
| 760 | struct list_head *pos; | 758 | struct rpc_rqst *entry; |
| 761 | 759 | ||
| 762 | list_for_each(pos, &xprt->recv) { | 760 | list_for_each_entry(entry, &xprt->recv, rq_list) |
| 763 | struct rpc_rqst *entry = list_entry(pos, struct rpc_rqst, rq_list); | ||
| 764 | if (entry->rq_xid == xid) | 761 | if (entry->rq_xid == xid) |
| 765 | return entry; | 762 | return entry; |
| 766 | } | ||
| 767 | 763 | ||
| 768 | dprintk("RPC: xprt_lookup_rqst did not find xid %08x\n", | 764 | dprintk("RPC: xprt_lookup_rqst did not find xid %08x\n", |
| 769 | ntohl(xid)); | 765 | ntohl(xid)); |
| @@ -962,6 +958,37 @@ static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req) | |||
| 962 | spin_unlock(&xprt->reserve_lock); | 958 | spin_unlock(&xprt->reserve_lock); |
| 963 | } | 959 | } |
| 964 | 960 | ||
| 961 | struct rpc_xprt *xprt_alloc(struct net *net, int size, int max_req) | ||
| 962 | { | ||
| 963 | struct rpc_xprt *xprt; | ||
| 964 | |||
| 965 | xprt = kzalloc(size, GFP_KERNEL); | ||
| 966 | if (xprt == NULL) | ||
| 967 | goto out; | ||
| 968 | |||
| 969 | xprt->max_reqs = max_req; | ||
| 970 | xprt->slot = kcalloc(max_req, sizeof(struct rpc_rqst), GFP_KERNEL); | ||
| 971 | if (xprt->slot == NULL) | ||
| 972 | goto out_free; | ||
| 973 | |||
| 974 | xprt->xprt_net = get_net(net); | ||
| 975 | return xprt; | ||
| 976 | |||
| 977 | out_free: | ||
| 978 | kfree(xprt); | ||
| 979 | out: | ||
| 980 | return NULL; | ||
| 981 | } | ||
| 982 | EXPORT_SYMBOL_GPL(xprt_alloc); | ||
| 983 | |||
| 984 | void xprt_free(struct rpc_xprt *xprt) | ||
| 985 | { | ||
| 986 | put_net(xprt->xprt_net); | ||
| 987 | kfree(xprt->slot); | ||
| 988 | kfree(xprt); | ||
| 989 | } | ||
| 990 | EXPORT_SYMBOL_GPL(xprt_free); | ||
| 991 | |||
| 965 | /** | 992 | /** |
| 966 | * xprt_reserve - allocate an RPC request slot | 993 | * xprt_reserve - allocate an RPC request slot |
| 967 | * @task: RPC task requesting a slot allocation | 994 | * @task: RPC task requesting a slot allocation |
diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c index d718b8fa9525..09af4fab1a45 100644 --- a/net/sunrpc/xprtrdma/svc_rdma.c +++ b/net/sunrpc/xprtrdma/svc_rdma.c | |||
| @@ -43,6 +43,7 @@ | |||
| 43 | #include <linux/slab.h> | 43 | #include <linux/slab.h> |
| 44 | #include <linux/fs.h> | 44 | #include <linux/fs.h> |
| 45 | #include <linux/sysctl.h> | 45 | #include <linux/sysctl.h> |
| 46 | #include <linux/workqueue.h> | ||
| 46 | #include <linux/sunrpc/clnt.h> | 47 | #include <linux/sunrpc/clnt.h> |
| 47 | #include <linux/sunrpc/sched.h> | 48 | #include <linux/sunrpc/sched.h> |
| 48 | #include <linux/sunrpc/svc_rdma.h> | 49 | #include <linux/sunrpc/svc_rdma.h> |
| @@ -74,6 +75,8 @@ atomic_t rdma_stat_sq_prod; | |||
| 74 | struct kmem_cache *svc_rdma_map_cachep; | 75 | struct kmem_cache *svc_rdma_map_cachep; |
| 75 | struct kmem_cache *svc_rdma_ctxt_cachep; | 76 | struct kmem_cache *svc_rdma_ctxt_cachep; |
| 76 | 77 | ||
| 78 | struct workqueue_struct *svc_rdma_wq; | ||
| 79 | |||
| 77 | /* | 80 | /* |
| 78 | * This function implements reading and resetting an atomic_t stat | 81 | * This function implements reading and resetting an atomic_t stat |
| 79 | * variable through read/write to a proc file. Any write to the file | 82 | * variable through read/write to a proc file. Any write to the file |
| @@ -231,7 +234,7 @@ static ctl_table svcrdma_root_table[] = { | |||
| 231 | void svc_rdma_cleanup(void) | 234 | void svc_rdma_cleanup(void) |
| 232 | { | 235 | { |
| 233 | dprintk("SVCRDMA Module Removed, deregister RPC RDMA transport\n"); | 236 | dprintk("SVCRDMA Module Removed, deregister RPC RDMA transport\n"); |
| 234 | flush_scheduled_work(); | 237 | destroy_workqueue(svc_rdma_wq); |
| 235 | if (svcrdma_table_header) { | 238 | if (svcrdma_table_header) { |
| 236 | unregister_sysctl_table(svcrdma_table_header); | 239 | unregister_sysctl_table(svcrdma_table_header); |
| 237 | svcrdma_table_header = NULL; | 240 | svcrdma_table_header = NULL; |
| @@ -249,6 +252,11 @@ int svc_rdma_init(void) | |||
| 249 | dprintk("\tsq_depth : %d\n", | 252 | dprintk("\tsq_depth : %d\n", |
| 250 | svcrdma_max_requests * RPCRDMA_SQ_DEPTH_MULT); | 253 | svcrdma_max_requests * RPCRDMA_SQ_DEPTH_MULT); |
| 251 | dprintk("\tmax_inline : %d\n", svcrdma_max_req_size); | 254 | dprintk("\tmax_inline : %d\n", svcrdma_max_req_size); |
| 255 | |||
| 256 | svc_rdma_wq = alloc_workqueue("svc_rdma", 0, 0); | ||
| 257 | if (!svc_rdma_wq) | ||
| 258 | return -ENOMEM; | ||
| 259 | |||
| 252 | if (!svcrdma_table_header) | 260 | if (!svcrdma_table_header) |
| 253 | svcrdma_table_header = | 261 | svcrdma_table_header = |
| 254 | register_sysctl_table(svcrdma_root_table); | 262 | register_sysctl_table(svcrdma_root_table); |
| @@ -283,6 +291,7 @@ int svc_rdma_init(void) | |||
| 283 | kmem_cache_destroy(svc_rdma_map_cachep); | 291 | kmem_cache_destroy(svc_rdma_map_cachep); |
| 284 | err0: | 292 | err0: |
| 285 | unregister_sysctl_table(svcrdma_table_header); | 293 | unregister_sysctl_table(svcrdma_table_header); |
| 294 | destroy_workqueue(svc_rdma_wq); | ||
| 286 | return -ENOMEM; | 295 | return -ENOMEM; |
| 287 | } | 296 | } |
| 288 | MODULE_AUTHOR("Tom Tucker <tom@opengridcomputing.com>"); | 297 | MODULE_AUTHOR("Tom Tucker <tom@opengridcomputing.com>"); |
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c index 0194de814933..df67211c4baf 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c | |||
| @@ -263,9 +263,9 @@ static int fast_reg_read_chunks(struct svcxprt_rdma *xprt, | |||
| 263 | frmr->page_list_len = PAGE_ALIGN(byte_count) >> PAGE_SHIFT; | 263 | frmr->page_list_len = PAGE_ALIGN(byte_count) >> PAGE_SHIFT; |
| 264 | for (page_no = 0; page_no < frmr->page_list_len; page_no++) { | 264 | for (page_no = 0; page_no < frmr->page_list_len; page_no++) { |
| 265 | frmr->page_list->page_list[page_no] = | 265 | frmr->page_list->page_list[page_no] = |
| 266 | ib_dma_map_single(xprt->sc_cm_id->device, | 266 | ib_dma_map_page(xprt->sc_cm_id->device, |
| 267 | page_address(rqstp->rq_arg.pages[page_no]), | 267 | rqstp->rq_arg.pages[page_no], 0, |
| 268 | PAGE_SIZE, DMA_FROM_DEVICE); | 268 | PAGE_SIZE, DMA_FROM_DEVICE); |
| 269 | if (ib_dma_mapping_error(xprt->sc_cm_id->device, | 269 | if (ib_dma_mapping_error(xprt->sc_cm_id->device, |
| 270 | frmr->page_list->page_list[page_no])) | 270 | frmr->page_list->page_list[page_no])) |
| 271 | goto fatal_err; | 271 | goto fatal_err; |
| @@ -309,17 +309,21 @@ static int rdma_set_ctxt_sge(struct svcxprt_rdma *xprt, | |||
| 309 | int count) | 309 | int count) |
| 310 | { | 310 | { |
| 311 | int i; | 311 | int i; |
| 312 | unsigned long off; | ||
| 312 | 313 | ||
| 313 | ctxt->count = count; | 314 | ctxt->count = count; |
| 314 | ctxt->direction = DMA_FROM_DEVICE; | 315 | ctxt->direction = DMA_FROM_DEVICE; |
| 315 | for (i = 0; i < count; i++) { | 316 | for (i = 0; i < count; i++) { |
| 316 | ctxt->sge[i].length = 0; /* in case map fails */ | 317 | ctxt->sge[i].length = 0; /* in case map fails */ |
| 317 | if (!frmr) { | 318 | if (!frmr) { |
| 319 | BUG_ON(0 == virt_to_page(vec[i].iov_base)); | ||
| 320 | off = (unsigned long)vec[i].iov_base & ~PAGE_MASK; | ||
| 318 | ctxt->sge[i].addr = | 321 | ctxt->sge[i].addr = |
| 319 | ib_dma_map_single(xprt->sc_cm_id->device, | 322 | ib_dma_map_page(xprt->sc_cm_id->device, |
| 320 | vec[i].iov_base, | 323 | virt_to_page(vec[i].iov_base), |
| 321 | vec[i].iov_len, | 324 | off, |
| 322 | DMA_FROM_DEVICE); | 325 | vec[i].iov_len, |
| 326 | DMA_FROM_DEVICE); | ||
| 323 | if (ib_dma_mapping_error(xprt->sc_cm_id->device, | 327 | if (ib_dma_mapping_error(xprt->sc_cm_id->device, |
| 324 | ctxt->sge[i].addr)) | 328 | ctxt->sge[i].addr)) |
| 325 | return -EINVAL; | 329 | return -EINVAL; |
| @@ -491,6 +495,7 @@ next_sge: | |||
| 491 | printk(KERN_ERR "svcrdma: Error %d posting RDMA_READ\n", | 495 | printk(KERN_ERR "svcrdma: Error %d posting RDMA_READ\n", |
| 492 | err); | 496 | err); |
| 493 | set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags); | 497 | set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags); |
| 498 | svc_rdma_unmap_dma(ctxt); | ||
| 494 | svc_rdma_put_context(ctxt, 0); | 499 | svc_rdma_put_context(ctxt, 0); |
| 495 | goto out; | 500 | goto out; |
| 496 | } | 501 | } |
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c index b15e1ebb2bfa..249a835b703f 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c | |||
| @@ -70,8 +70,8 @@ | |||
| 70 | * on extra page for the RPCRMDA header. | 70 | * on extra page for the RPCRMDA header. |
| 71 | */ | 71 | */ |
| 72 | static int fast_reg_xdr(struct svcxprt_rdma *xprt, | 72 | static int fast_reg_xdr(struct svcxprt_rdma *xprt, |
| 73 | struct xdr_buf *xdr, | 73 | struct xdr_buf *xdr, |
| 74 | struct svc_rdma_req_map *vec) | 74 | struct svc_rdma_req_map *vec) |
| 75 | { | 75 | { |
| 76 | int sge_no; | 76 | int sge_no; |
| 77 | u32 sge_bytes; | 77 | u32 sge_bytes; |
| @@ -96,21 +96,25 @@ static int fast_reg_xdr(struct svcxprt_rdma *xprt, | |||
| 96 | vec->count = 2; | 96 | vec->count = 2; |
| 97 | sge_no++; | 97 | sge_no++; |
| 98 | 98 | ||
| 99 | /* Build the FRMR */ | 99 | /* Map the XDR head */ |
| 100 | frmr->kva = frva; | 100 | frmr->kva = frva; |
| 101 | frmr->direction = DMA_TO_DEVICE; | 101 | frmr->direction = DMA_TO_DEVICE; |
| 102 | frmr->access_flags = 0; | 102 | frmr->access_flags = 0; |
| 103 | frmr->map_len = PAGE_SIZE; | 103 | frmr->map_len = PAGE_SIZE; |
| 104 | frmr->page_list_len = 1; | 104 | frmr->page_list_len = 1; |
| 105 | page_off = (unsigned long)xdr->head[0].iov_base & ~PAGE_MASK; | ||
| 105 | frmr->page_list->page_list[page_no] = | 106 | frmr->page_list->page_list[page_no] = |
| 106 | ib_dma_map_single(xprt->sc_cm_id->device, | 107 | ib_dma_map_page(xprt->sc_cm_id->device, |
| 107 | (void *)xdr->head[0].iov_base, | 108 | virt_to_page(xdr->head[0].iov_base), |
| 108 | PAGE_SIZE, DMA_TO_DEVICE); | 109 | page_off, |
| 110 | PAGE_SIZE - page_off, | ||
| 111 | DMA_TO_DEVICE); | ||
| 109 | if (ib_dma_mapping_error(xprt->sc_cm_id->device, | 112 | if (ib_dma_mapping_error(xprt->sc_cm_id->device, |
| 110 | frmr->page_list->page_list[page_no])) | 113 | frmr->page_list->page_list[page_no])) |
| 111 | goto fatal_err; | 114 | goto fatal_err; |
| 112 | atomic_inc(&xprt->sc_dma_used); | 115 | atomic_inc(&xprt->sc_dma_used); |
| 113 | 116 | ||
| 117 | /* Map the XDR page list */ | ||
| 114 | page_off = xdr->page_base; | 118 | page_off = xdr->page_base; |
| 115 | page_bytes = xdr->page_len + page_off; | 119 | page_bytes = xdr->page_len + page_off; |
| 116 | if (!page_bytes) | 120 | if (!page_bytes) |
| @@ -128,9 +132,9 @@ static int fast_reg_xdr(struct svcxprt_rdma *xprt, | |||
| 128 | page_bytes -= sge_bytes; | 132 | page_bytes -= sge_bytes; |
| 129 | 133 | ||
| 130 | frmr->page_list->page_list[page_no] = | 134 | frmr->page_list->page_list[page_no] = |
| 131 | ib_dma_map_single(xprt->sc_cm_id->device, | 135 | ib_dma_map_page(xprt->sc_cm_id->device, |
| 132 | page_address(page), | 136 | page, page_off, |
| 133 | PAGE_SIZE, DMA_TO_DEVICE); | 137 | sge_bytes, DMA_TO_DEVICE); |
| 134 | if (ib_dma_mapping_error(xprt->sc_cm_id->device, | 138 | if (ib_dma_mapping_error(xprt->sc_cm_id->device, |
| 135 | frmr->page_list->page_list[page_no])) | 139 | frmr->page_list->page_list[page_no])) |
| 136 | goto fatal_err; | 140 | goto fatal_err; |
| @@ -166,8 +170,10 @@ static int fast_reg_xdr(struct svcxprt_rdma *xprt, | |||
| 166 | vec->sge[sge_no].iov_base = frva + frmr->map_len + page_off; | 170 | vec->sge[sge_no].iov_base = frva + frmr->map_len + page_off; |
| 167 | 171 | ||
| 168 | frmr->page_list->page_list[page_no] = | 172 | frmr->page_list->page_list[page_no] = |
| 169 | ib_dma_map_single(xprt->sc_cm_id->device, va, PAGE_SIZE, | 173 | ib_dma_map_page(xprt->sc_cm_id->device, virt_to_page(va), |
| 170 | DMA_TO_DEVICE); | 174 | page_off, |
| 175 | PAGE_SIZE, | ||
| 176 | DMA_TO_DEVICE); | ||
| 171 | if (ib_dma_mapping_error(xprt->sc_cm_id->device, | 177 | if (ib_dma_mapping_error(xprt->sc_cm_id->device, |
| 172 | frmr->page_list->page_list[page_no])) | 178 | frmr->page_list->page_list[page_no])) |
| 173 | goto fatal_err; | 179 | goto fatal_err; |
| @@ -245,6 +251,35 @@ static int map_xdr(struct svcxprt_rdma *xprt, | |||
| 245 | return 0; | 251 | return 0; |
| 246 | } | 252 | } |
| 247 | 253 | ||
| 254 | static dma_addr_t dma_map_xdr(struct svcxprt_rdma *xprt, | ||
| 255 | struct xdr_buf *xdr, | ||
| 256 | u32 xdr_off, size_t len, int dir) | ||
| 257 | { | ||
| 258 | struct page *page; | ||
| 259 | dma_addr_t dma_addr; | ||
| 260 | if (xdr_off < xdr->head[0].iov_len) { | ||
| 261 | /* This offset is in the head */ | ||
| 262 | xdr_off += (unsigned long)xdr->head[0].iov_base & ~PAGE_MASK; | ||
| 263 | page = virt_to_page(xdr->head[0].iov_base); | ||
| 264 | } else { | ||
| 265 | xdr_off -= xdr->head[0].iov_len; | ||
| 266 | if (xdr_off < xdr->page_len) { | ||
| 267 | /* This offset is in the page list */ | ||
| 268 | page = xdr->pages[xdr_off >> PAGE_SHIFT]; | ||
| 269 | xdr_off &= ~PAGE_MASK; | ||
| 270 | } else { | ||
| 271 | /* This offset is in the tail */ | ||
| 272 | xdr_off -= xdr->page_len; | ||
| 273 | xdr_off += (unsigned long) | ||
| 274 | xdr->tail[0].iov_base & ~PAGE_MASK; | ||
| 275 | page = virt_to_page(xdr->tail[0].iov_base); | ||
| 276 | } | ||
| 277 | } | ||
| 278 | dma_addr = ib_dma_map_page(xprt->sc_cm_id->device, page, xdr_off, | ||
| 279 | min_t(size_t, PAGE_SIZE, len), dir); | ||
| 280 | return dma_addr; | ||
| 281 | } | ||
| 282 | |||
| 248 | /* Assumptions: | 283 | /* Assumptions: |
| 249 | * - We are using FRMR | 284 | * - We are using FRMR |
| 250 | * - or - | 285 | * - or - |
| @@ -293,10 +328,9 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp, | |||
| 293 | sge[sge_no].length = sge_bytes; | 328 | sge[sge_no].length = sge_bytes; |
| 294 | if (!vec->frmr) { | 329 | if (!vec->frmr) { |
| 295 | sge[sge_no].addr = | 330 | sge[sge_no].addr = |
| 296 | ib_dma_map_single(xprt->sc_cm_id->device, | 331 | dma_map_xdr(xprt, &rqstp->rq_res, xdr_off, |
| 297 | (void *) | 332 | sge_bytes, DMA_TO_DEVICE); |
| 298 | vec->sge[xdr_sge_no].iov_base + sge_off, | 333 | xdr_off += sge_bytes; |
| 299 | sge_bytes, DMA_TO_DEVICE); | ||
| 300 | if (ib_dma_mapping_error(xprt->sc_cm_id->device, | 334 | if (ib_dma_mapping_error(xprt->sc_cm_id->device, |
| 301 | sge[sge_no].addr)) | 335 | sge[sge_no].addr)) |
| 302 | goto err; | 336 | goto err; |
| @@ -333,6 +367,8 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp, | |||
| 333 | goto err; | 367 | goto err; |
| 334 | return 0; | 368 | return 0; |
| 335 | err: | 369 | err: |
| 370 | svc_rdma_unmap_dma(ctxt); | ||
| 371 | svc_rdma_put_frmr(xprt, vec->frmr); | ||
| 336 | svc_rdma_put_context(ctxt, 0); | 372 | svc_rdma_put_context(ctxt, 0); |
| 337 | /* Fatal error, close transport */ | 373 | /* Fatal error, close transport */ |
| 338 | return -EIO; | 374 | return -EIO; |
| @@ -494,7 +530,8 @@ static int send_reply_chunks(struct svcxprt_rdma *xprt, | |||
| 494 | * In all three cases, this function prepares the RPCRDMA header in | 530 | * In all three cases, this function prepares the RPCRDMA header in |
| 495 | * sge[0], the 'type' parameter indicates the type to place in the | 531 | * sge[0], the 'type' parameter indicates the type to place in the |
| 496 | * RPCRDMA header, and the 'byte_count' field indicates how much of | 532 | * RPCRDMA header, and the 'byte_count' field indicates how much of |
| 497 | * the XDR to include in this RDMA_SEND. | 533 | * the XDR to include in this RDMA_SEND. NB: The offset of the payload |
| 534 | * to send is zero in the XDR. | ||
| 498 | */ | 535 | */ |
| 499 | static int send_reply(struct svcxprt_rdma *rdma, | 536 | static int send_reply(struct svcxprt_rdma *rdma, |
| 500 | struct svc_rqst *rqstp, | 537 | struct svc_rqst *rqstp, |
| @@ -536,23 +573,24 @@ static int send_reply(struct svcxprt_rdma *rdma, | |||
| 536 | ctxt->sge[0].lkey = rdma->sc_dma_lkey; | 573 | ctxt->sge[0].lkey = rdma->sc_dma_lkey; |
| 537 | ctxt->sge[0].length = svc_rdma_xdr_get_reply_hdr_len(rdma_resp); | 574 | ctxt->sge[0].length = svc_rdma_xdr_get_reply_hdr_len(rdma_resp); |
| 538 | ctxt->sge[0].addr = | 575 | ctxt->sge[0].addr = |
| 539 | ib_dma_map_single(rdma->sc_cm_id->device, page_address(page), | 576 | ib_dma_map_page(rdma->sc_cm_id->device, page, 0, |
| 540 | ctxt->sge[0].length, DMA_TO_DEVICE); | 577 | ctxt->sge[0].length, DMA_TO_DEVICE); |
| 541 | if (ib_dma_mapping_error(rdma->sc_cm_id->device, ctxt->sge[0].addr)) | 578 | if (ib_dma_mapping_error(rdma->sc_cm_id->device, ctxt->sge[0].addr)) |
| 542 | goto err; | 579 | goto err; |
| 543 | atomic_inc(&rdma->sc_dma_used); | 580 | atomic_inc(&rdma->sc_dma_used); |
| 544 | 581 | ||
| 545 | ctxt->direction = DMA_TO_DEVICE; | 582 | ctxt->direction = DMA_TO_DEVICE; |
| 546 | 583 | ||
| 547 | /* Determine how many of our SGE are to be transmitted */ | 584 | /* Map the payload indicated by 'byte_count' */ |
| 548 | for (sge_no = 1; byte_count && sge_no < vec->count; sge_no++) { | 585 | for (sge_no = 1; byte_count && sge_no < vec->count; sge_no++) { |
| 586 | int xdr_off = 0; | ||
| 549 | sge_bytes = min_t(size_t, vec->sge[sge_no].iov_len, byte_count); | 587 | sge_bytes = min_t(size_t, vec->sge[sge_no].iov_len, byte_count); |
| 550 | byte_count -= sge_bytes; | 588 | byte_count -= sge_bytes; |
| 551 | if (!vec->frmr) { | 589 | if (!vec->frmr) { |
| 552 | ctxt->sge[sge_no].addr = | 590 | ctxt->sge[sge_no].addr = |
| 553 | ib_dma_map_single(rdma->sc_cm_id->device, | 591 | dma_map_xdr(rdma, &rqstp->rq_res, xdr_off, |
| 554 | vec->sge[sge_no].iov_base, | 592 | sge_bytes, DMA_TO_DEVICE); |
| 555 | sge_bytes, DMA_TO_DEVICE); | 593 | xdr_off += sge_bytes; |
| 556 | if (ib_dma_mapping_error(rdma->sc_cm_id->device, | 594 | if (ib_dma_mapping_error(rdma->sc_cm_id->device, |
| 557 | ctxt->sge[sge_no].addr)) | 595 | ctxt->sge[sge_no].addr)) |
| 558 | goto err; | 596 | goto err; |
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index edea15a54e51..9df1eadc912a 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c | |||
| @@ -45,6 +45,7 @@ | |||
| 45 | #include <linux/sched.h> | 45 | #include <linux/sched.h> |
| 46 | #include <linux/slab.h> | 46 | #include <linux/slab.h> |
| 47 | #include <linux/spinlock.h> | 47 | #include <linux/spinlock.h> |
| 48 | #include <linux/workqueue.h> | ||
| 48 | #include <rdma/ib_verbs.h> | 49 | #include <rdma/ib_verbs.h> |
| 49 | #include <rdma/rdma_cm.h> | 50 | #include <rdma/rdma_cm.h> |
| 50 | #include <linux/sunrpc/svc_rdma.h> | 51 | #include <linux/sunrpc/svc_rdma.h> |
| @@ -52,6 +53,7 @@ | |||
| 52 | #define RPCDBG_FACILITY RPCDBG_SVCXPRT | 53 | #define RPCDBG_FACILITY RPCDBG_SVCXPRT |
| 53 | 54 | ||
| 54 | static struct svc_xprt *svc_rdma_create(struct svc_serv *serv, | 55 | static struct svc_xprt *svc_rdma_create(struct svc_serv *serv, |
| 56 | struct net *net, | ||
| 55 | struct sockaddr *sa, int salen, | 57 | struct sockaddr *sa, int salen, |
| 56 | int flags); | 58 | int flags); |
| 57 | static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt); | 59 | static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt); |
| @@ -89,6 +91,9 @@ struct svc_xprt_class svc_rdma_class = { | |||
| 89 | /* WR context cache. Created in svc_rdma.c */ | 91 | /* WR context cache. Created in svc_rdma.c */ |
| 90 | extern struct kmem_cache *svc_rdma_ctxt_cachep; | 92 | extern struct kmem_cache *svc_rdma_ctxt_cachep; |
| 91 | 93 | ||
| 94 | /* Workqueue created in svc_rdma.c */ | ||
| 95 | extern struct workqueue_struct *svc_rdma_wq; | ||
| 96 | |||
| 92 | struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt) | 97 | struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt) |
| 93 | { | 98 | { |
| 94 | struct svc_rdma_op_ctxt *ctxt; | 99 | struct svc_rdma_op_ctxt *ctxt; |
| @@ -120,7 +125,7 @@ void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt) | |||
| 120 | */ | 125 | */ |
| 121 | if (ctxt->sge[i].lkey == xprt->sc_dma_lkey) { | 126 | if (ctxt->sge[i].lkey == xprt->sc_dma_lkey) { |
| 122 | atomic_dec(&xprt->sc_dma_used); | 127 | atomic_dec(&xprt->sc_dma_used); |
| 123 | ib_dma_unmap_single(xprt->sc_cm_id->device, | 128 | ib_dma_unmap_page(xprt->sc_cm_id->device, |
| 124 | ctxt->sge[i].addr, | 129 | ctxt->sge[i].addr, |
| 125 | ctxt->sge[i].length, | 130 | ctxt->sge[i].length, |
| 126 | ctxt->direction); | 131 | ctxt->direction); |
| @@ -502,8 +507,8 @@ int svc_rdma_post_recv(struct svcxprt_rdma *xprt) | |||
| 502 | BUG_ON(sge_no >= xprt->sc_max_sge); | 507 | BUG_ON(sge_no >= xprt->sc_max_sge); |
| 503 | page = svc_rdma_get_page(); | 508 | page = svc_rdma_get_page(); |
| 504 | ctxt->pages[sge_no] = page; | 509 | ctxt->pages[sge_no] = page; |
| 505 | pa = ib_dma_map_single(xprt->sc_cm_id->device, | 510 | pa = ib_dma_map_page(xprt->sc_cm_id->device, |
| 506 | page_address(page), PAGE_SIZE, | 511 | page, 0, PAGE_SIZE, |
| 507 | DMA_FROM_DEVICE); | 512 | DMA_FROM_DEVICE); |
| 508 | if (ib_dma_mapping_error(xprt->sc_cm_id->device, pa)) | 513 | if (ib_dma_mapping_error(xprt->sc_cm_id->device, pa)) |
| 509 | goto err_put_ctxt; | 514 | goto err_put_ctxt; |
| @@ -511,9 +516,9 @@ int svc_rdma_post_recv(struct svcxprt_rdma *xprt) | |||
| 511 | ctxt->sge[sge_no].addr = pa; | 516 | ctxt->sge[sge_no].addr = pa; |
| 512 | ctxt->sge[sge_no].length = PAGE_SIZE; | 517 | ctxt->sge[sge_no].length = PAGE_SIZE; |
| 513 | ctxt->sge[sge_no].lkey = xprt->sc_dma_lkey; | 518 | ctxt->sge[sge_no].lkey = xprt->sc_dma_lkey; |
| 519 | ctxt->count = sge_no + 1; | ||
| 514 | buflen += PAGE_SIZE; | 520 | buflen += PAGE_SIZE; |
| 515 | } | 521 | } |
| 516 | ctxt->count = sge_no; | ||
| 517 | recv_wr.next = NULL; | 522 | recv_wr.next = NULL; |
| 518 | recv_wr.sg_list = &ctxt->sge[0]; | 523 | recv_wr.sg_list = &ctxt->sge[0]; |
| 519 | recv_wr.num_sge = ctxt->count; | 524 | recv_wr.num_sge = ctxt->count; |
| @@ -529,6 +534,7 @@ int svc_rdma_post_recv(struct svcxprt_rdma *xprt) | |||
| 529 | return ret; | 534 | return ret; |
| 530 | 535 | ||
| 531 | err_put_ctxt: | 536 | err_put_ctxt: |
| 537 | svc_rdma_unmap_dma(ctxt); | ||
| 532 | svc_rdma_put_context(ctxt, 1); | 538 | svc_rdma_put_context(ctxt, 1); |
| 533 | return -ENOMEM; | 539 | return -ENOMEM; |
| 534 | } | 540 | } |
| @@ -670,6 +676,7 @@ static int rdma_cma_handler(struct rdma_cm_id *cma_id, | |||
| 670 | * Create a listening RDMA service endpoint. | 676 | * Create a listening RDMA service endpoint. |
| 671 | */ | 677 | */ |
| 672 | static struct svc_xprt *svc_rdma_create(struct svc_serv *serv, | 678 | static struct svc_xprt *svc_rdma_create(struct svc_serv *serv, |
| 679 | struct net *net, | ||
| 673 | struct sockaddr *sa, int salen, | 680 | struct sockaddr *sa, int salen, |
| 674 | int flags) | 681 | int flags) |
| 675 | { | 682 | { |
| @@ -798,8 +805,8 @@ static void frmr_unmap_dma(struct svcxprt_rdma *xprt, | |||
| 798 | if (ib_dma_mapping_error(frmr->mr->device, addr)) | 805 | if (ib_dma_mapping_error(frmr->mr->device, addr)) |
| 799 | continue; | 806 | continue; |
| 800 | atomic_dec(&xprt->sc_dma_used); | 807 | atomic_dec(&xprt->sc_dma_used); |
| 801 | ib_dma_unmap_single(frmr->mr->device, addr, PAGE_SIZE, | 808 | ib_dma_unmap_page(frmr->mr->device, addr, PAGE_SIZE, |
| 802 | frmr->direction); | 809 | frmr->direction); |
| 803 | } | 810 | } |
| 804 | } | 811 | } |
| 805 | 812 | ||
| @@ -1184,7 +1191,7 @@ static void svc_rdma_free(struct svc_xprt *xprt) | |||
| 1184 | struct svcxprt_rdma *rdma = | 1191 | struct svcxprt_rdma *rdma = |
| 1185 | container_of(xprt, struct svcxprt_rdma, sc_xprt); | 1192 | container_of(xprt, struct svcxprt_rdma, sc_xprt); |
| 1186 | INIT_WORK(&rdma->sc_work, __svc_rdma_free); | 1193 | INIT_WORK(&rdma->sc_work, __svc_rdma_free); |
| 1187 | schedule_work(&rdma->sc_work); | 1194 | queue_work(svc_rdma_wq, &rdma->sc_work); |
| 1188 | } | 1195 | } |
| 1189 | 1196 | ||
| 1190 | static int svc_rdma_has_wspace(struct svc_xprt *xprt) | 1197 | static int svc_rdma_has_wspace(struct svc_xprt *xprt) |
| @@ -1274,7 +1281,7 @@ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr) | |||
| 1274 | atomic_read(&xprt->sc_sq_count) < | 1281 | atomic_read(&xprt->sc_sq_count) < |
| 1275 | xprt->sc_sq_depth); | 1282 | xprt->sc_sq_depth); |
| 1276 | if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags)) | 1283 | if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags)) |
| 1277 | return 0; | 1284 | return -ENOTCONN; |
| 1278 | continue; | 1285 | continue; |
| 1279 | } | 1286 | } |
| 1280 | /* Take a transport ref for each WR posted */ | 1287 | /* Take a transport ref for each WR posted */ |
| @@ -1306,7 +1313,6 @@ void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp, | |||
| 1306 | enum rpcrdma_errcode err) | 1313 | enum rpcrdma_errcode err) |
| 1307 | { | 1314 | { |
| 1308 | struct ib_send_wr err_wr; | 1315 | struct ib_send_wr err_wr; |
| 1309 | struct ib_sge sge; | ||
| 1310 | struct page *p; | 1316 | struct page *p; |
| 1311 | struct svc_rdma_op_ctxt *ctxt; | 1317 | struct svc_rdma_op_ctxt *ctxt; |
| 1312 | u32 *va; | 1318 | u32 *va; |
| @@ -1319,26 +1325,27 @@ void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp, | |||
| 1319 | /* XDR encode error */ | 1325 | /* XDR encode error */ |
| 1320 | length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va); | 1326 | length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va); |
| 1321 | 1327 | ||
| 1328 | ctxt = svc_rdma_get_context(xprt); | ||
| 1329 | ctxt->direction = DMA_FROM_DEVICE; | ||
| 1330 | ctxt->count = 1; | ||
| 1331 | ctxt->pages[0] = p; | ||
| 1332 | |||
| 1322 | /* Prepare SGE for local address */ | 1333 | /* Prepare SGE for local address */ |
| 1323 | sge.addr = ib_dma_map_single(xprt->sc_cm_id->device, | 1334 | ctxt->sge[0].addr = ib_dma_map_page(xprt->sc_cm_id->device, |
| 1324 | page_address(p), PAGE_SIZE, DMA_FROM_DEVICE); | 1335 | p, 0, length, DMA_FROM_DEVICE); |
| 1325 | if (ib_dma_mapping_error(xprt->sc_cm_id->device, sge.addr)) { | 1336 | if (ib_dma_mapping_error(xprt->sc_cm_id->device, ctxt->sge[0].addr)) { |
| 1326 | put_page(p); | 1337 | put_page(p); |
| 1327 | return; | 1338 | return; |
| 1328 | } | 1339 | } |
| 1329 | atomic_inc(&xprt->sc_dma_used); | 1340 | atomic_inc(&xprt->sc_dma_used); |
| 1330 | sge.lkey = xprt->sc_dma_lkey; | 1341 | ctxt->sge[0].lkey = xprt->sc_dma_lkey; |
| 1331 | sge.length = length; | 1342 | ctxt->sge[0].length = length; |
| 1332 | |||
| 1333 | ctxt = svc_rdma_get_context(xprt); | ||
| 1334 | ctxt->count = 1; | ||
| 1335 | ctxt->pages[0] = p; | ||
| 1336 | 1343 | ||
| 1337 | /* Prepare SEND WR */ | 1344 | /* Prepare SEND WR */ |
| 1338 | memset(&err_wr, 0, sizeof err_wr); | 1345 | memset(&err_wr, 0, sizeof err_wr); |
| 1339 | ctxt->wr_op = IB_WR_SEND; | 1346 | ctxt->wr_op = IB_WR_SEND; |
| 1340 | err_wr.wr_id = (unsigned long)ctxt; | 1347 | err_wr.wr_id = (unsigned long)ctxt; |
| 1341 | err_wr.sg_list = &sge; | 1348 | err_wr.sg_list = ctxt->sge; |
| 1342 | err_wr.num_sge = 1; | 1349 | err_wr.num_sge = 1; |
| 1343 | err_wr.opcode = IB_WR_SEND; | 1350 | err_wr.opcode = IB_WR_SEND; |
| 1344 | err_wr.send_flags = IB_SEND_SIGNALED; | 1351 | err_wr.send_flags = IB_SEND_SIGNALED; |
| @@ -1348,9 +1355,7 @@ void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp, | |||
| 1348 | if (ret) { | 1355 | if (ret) { |
| 1349 | dprintk("svcrdma: Error %d posting send for protocol error\n", | 1356 | dprintk("svcrdma: Error %d posting send for protocol error\n", |
| 1350 | ret); | 1357 | ret); |
| 1351 | ib_dma_unmap_single(xprt->sc_cm_id->device, | 1358 | svc_rdma_unmap_dma(ctxt); |
| 1352 | sge.addr, PAGE_SIZE, | ||
| 1353 | DMA_FROM_DEVICE); | ||
| 1354 | svc_rdma_put_context(ctxt, 1); | 1359 | svc_rdma_put_context(ctxt, 1); |
| 1355 | } | 1360 | } |
| 1356 | } | 1361 | } |
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c index a85e866a77f7..0867070bb5ca 100644 --- a/net/sunrpc/xprtrdma/transport.c +++ b/net/sunrpc/xprtrdma/transport.c | |||
| @@ -237,8 +237,7 @@ xprt_rdma_destroy(struct rpc_xprt *xprt) | |||
| 237 | 237 | ||
| 238 | dprintk("RPC: %s: called\n", __func__); | 238 | dprintk("RPC: %s: called\n", __func__); |
| 239 | 239 | ||
| 240 | cancel_delayed_work(&r_xprt->rdma_connect); | 240 | cancel_delayed_work_sync(&r_xprt->rdma_connect); |
| 241 | flush_scheduled_work(); | ||
| 242 | 241 | ||
| 243 | xprt_clear_connected(xprt); | 242 | xprt_clear_connected(xprt); |
| 244 | 243 | ||
| @@ -251,9 +250,7 @@ xprt_rdma_destroy(struct rpc_xprt *xprt) | |||
| 251 | 250 | ||
| 252 | xprt_rdma_free_addresses(xprt); | 251 | xprt_rdma_free_addresses(xprt); |
| 253 | 252 | ||
| 254 | kfree(xprt->slot); | 253 | xprt_free(xprt); |
| 255 | xprt->slot = NULL; | ||
| 256 | kfree(xprt); | ||
| 257 | 254 | ||
| 258 | dprintk("RPC: %s: returning\n", __func__); | 255 | dprintk("RPC: %s: returning\n", __func__); |
| 259 | 256 | ||
| @@ -285,23 +282,14 @@ xprt_setup_rdma(struct xprt_create *args) | |||
| 285 | return ERR_PTR(-EBADF); | 282 | return ERR_PTR(-EBADF); |
| 286 | } | 283 | } |
| 287 | 284 | ||
| 288 | xprt = kzalloc(sizeof(struct rpcrdma_xprt), GFP_KERNEL); | 285 | xprt = xprt_alloc(args->net, sizeof(struct rpcrdma_xprt), |
| 286 | xprt_rdma_slot_table_entries); | ||
| 289 | if (xprt == NULL) { | 287 | if (xprt == NULL) { |
| 290 | dprintk("RPC: %s: couldn't allocate rpcrdma_xprt\n", | 288 | dprintk("RPC: %s: couldn't allocate rpcrdma_xprt\n", |
| 291 | __func__); | 289 | __func__); |
| 292 | return ERR_PTR(-ENOMEM); | 290 | return ERR_PTR(-ENOMEM); |
| 293 | } | 291 | } |
| 294 | 292 | ||
| 295 | xprt->max_reqs = xprt_rdma_slot_table_entries; | ||
| 296 | xprt->slot = kcalloc(xprt->max_reqs, | ||
| 297 | sizeof(struct rpc_rqst), GFP_KERNEL); | ||
| 298 | if (xprt->slot == NULL) { | ||
| 299 | dprintk("RPC: %s: couldn't allocate %d slots\n", | ||
| 300 | __func__, xprt->max_reqs); | ||
| 301 | kfree(xprt); | ||
| 302 | return ERR_PTR(-ENOMEM); | ||
| 303 | } | ||
| 304 | |||
| 305 | /* 60 second timeout, no retries */ | 293 | /* 60 second timeout, no retries */ |
| 306 | xprt->timeout = &xprt_rdma_default_timeout; | 294 | xprt->timeout = &xprt_rdma_default_timeout; |
| 307 | xprt->bind_timeout = (60U * HZ); | 295 | xprt->bind_timeout = (60U * HZ); |
| @@ -410,8 +398,7 @@ out3: | |||
| 410 | out2: | 398 | out2: |
| 411 | rpcrdma_ia_close(&new_xprt->rx_ia); | 399 | rpcrdma_ia_close(&new_xprt->rx_ia); |
| 412 | out1: | 400 | out1: |
| 413 | kfree(xprt->slot); | 401 | xprt_free(xprt); |
| 414 | kfree(xprt); | ||
| 415 | return ERR_PTR(rc); | 402 | return ERR_PTR(rc); |
| 416 | } | 403 | } |
| 417 | 404 | ||
| @@ -460,7 +447,7 @@ xprt_rdma_connect(struct rpc_task *task) | |||
| 460 | } else { | 447 | } else { |
| 461 | schedule_delayed_work(&r_xprt->rdma_connect, 0); | 448 | schedule_delayed_work(&r_xprt->rdma_connect, 0); |
| 462 | if (!RPC_IS_ASYNC(task)) | 449 | if (!RPC_IS_ASYNC(task)) |
| 463 | flush_scheduled_work(); | 450 | flush_delayed_work(&r_xprt->rdma_connect); |
| 464 | } | 451 | } |
| 465 | } | 452 | } |
| 466 | 453 | ||
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index fe9306bf10cc..dfcab5ac65af 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
| @@ -774,8 +774,7 @@ static void xs_destroy(struct rpc_xprt *xprt) | |||
| 774 | 774 | ||
| 775 | xs_close(xprt); | 775 | xs_close(xprt); |
| 776 | xs_free_peer_addresses(xprt); | 776 | xs_free_peer_addresses(xprt); |
| 777 | kfree(xprt->slot); | 777 | xprt_free(xprt); |
| 778 | kfree(xprt); | ||
| 779 | module_put(THIS_MODULE); | 778 | module_put(THIS_MODULE); |
| 780 | } | 779 | } |
| 781 | 780 | ||
| @@ -1516,7 +1515,7 @@ static void xs_set_port(struct rpc_xprt *xprt, unsigned short port) | |||
| 1516 | xs_update_peer_port(xprt); | 1515 | xs_update_peer_port(xprt); |
| 1517 | } | 1516 | } |
| 1518 | 1517 | ||
| 1519 | static unsigned short xs_get_srcport(struct sock_xprt *transport, struct socket *sock) | 1518 | static unsigned short xs_get_srcport(struct sock_xprt *transport) |
| 1520 | { | 1519 | { |
| 1521 | unsigned short port = transport->srcport; | 1520 | unsigned short port = transport->srcport; |
| 1522 | 1521 | ||
| @@ -1525,7 +1524,7 @@ static unsigned short xs_get_srcport(struct sock_xprt *transport, struct socket | |||
| 1525 | return port; | 1524 | return port; |
| 1526 | } | 1525 | } |
| 1527 | 1526 | ||
| 1528 | static unsigned short xs_next_srcport(struct sock_xprt *transport, struct socket *sock, unsigned short port) | 1527 | static unsigned short xs_next_srcport(struct sock_xprt *transport, unsigned short port) |
| 1529 | { | 1528 | { |
| 1530 | if (transport->srcport != 0) | 1529 | if (transport->srcport != 0) |
| 1531 | transport->srcport = 0; | 1530 | transport->srcport = 0; |
| @@ -1535,23 +1534,18 @@ static unsigned short xs_next_srcport(struct sock_xprt *transport, struct socket | |||
| 1535 | return xprt_max_resvport; | 1534 | return xprt_max_resvport; |
| 1536 | return --port; | 1535 | return --port; |
| 1537 | } | 1536 | } |
| 1538 | 1537 | static int xs_bind(struct sock_xprt *transport, struct socket *sock) | |
| 1539 | static int xs_bind4(struct sock_xprt *transport, struct socket *sock) | ||
| 1540 | { | 1538 | { |
| 1541 | struct sockaddr_in myaddr = { | 1539 | struct sockaddr_storage myaddr; |
| 1542 | .sin_family = AF_INET, | ||
| 1543 | }; | ||
| 1544 | struct sockaddr_in *sa; | ||
| 1545 | int err, nloop = 0; | 1540 | int err, nloop = 0; |
| 1546 | unsigned short port = xs_get_srcport(transport, sock); | 1541 | unsigned short port = xs_get_srcport(transport); |
| 1547 | unsigned short last; | 1542 | unsigned short last; |
| 1548 | 1543 | ||
| 1549 | sa = (struct sockaddr_in *)&transport->srcaddr; | 1544 | memcpy(&myaddr, &transport->srcaddr, transport->xprt.addrlen); |
| 1550 | myaddr.sin_addr = sa->sin_addr; | ||
| 1551 | do { | 1545 | do { |
| 1552 | myaddr.sin_port = htons(port); | 1546 | rpc_set_port((struct sockaddr *)&myaddr, port); |
| 1553 | err = kernel_bind(sock, (struct sockaddr *) &myaddr, | 1547 | err = kernel_bind(sock, (struct sockaddr *)&myaddr, |
| 1554 | sizeof(myaddr)); | 1548 | transport->xprt.addrlen); |
| 1555 | if (port == 0) | 1549 | if (port == 0) |
| 1556 | break; | 1550 | break; |
| 1557 | if (err == 0) { | 1551 | if (err == 0) { |
| @@ -1559,48 +1553,23 @@ static int xs_bind4(struct sock_xprt *transport, struct socket *sock) | |||
| 1559 | break; | 1553 | break; |
| 1560 | } | 1554 | } |
| 1561 | last = port; | 1555 | last = port; |
| 1562 | port = xs_next_srcport(transport, sock, port); | 1556 | port = xs_next_srcport(transport, port); |
| 1563 | if (port > last) | 1557 | if (port > last) |
| 1564 | nloop++; | 1558 | nloop++; |
| 1565 | } while (err == -EADDRINUSE && nloop != 2); | 1559 | } while (err == -EADDRINUSE && nloop != 2); |
| 1566 | dprintk("RPC: %s %pI4:%u: %s (%d)\n", | ||
| 1567 | __func__, &myaddr.sin_addr, | ||
| 1568 | port, err ? "failed" : "ok", err); | ||
| 1569 | return err; | ||
| 1570 | } | ||
| 1571 | |||
| 1572 | static int xs_bind6(struct sock_xprt *transport, struct socket *sock) | ||
| 1573 | { | ||
| 1574 | struct sockaddr_in6 myaddr = { | ||
| 1575 | .sin6_family = AF_INET6, | ||
| 1576 | }; | ||
| 1577 | struct sockaddr_in6 *sa; | ||
| 1578 | int err, nloop = 0; | ||
| 1579 | unsigned short port = xs_get_srcport(transport, sock); | ||
| 1580 | unsigned short last; | ||
| 1581 | 1560 | ||
| 1582 | sa = (struct sockaddr_in6 *)&transport->srcaddr; | 1561 | if (myaddr.ss_family == AF_INET) |
| 1583 | myaddr.sin6_addr = sa->sin6_addr; | 1562 | dprintk("RPC: %s %pI4:%u: %s (%d)\n", __func__, |
| 1584 | do { | 1563 | &((struct sockaddr_in *)&myaddr)->sin_addr, |
| 1585 | myaddr.sin6_port = htons(port); | 1564 | port, err ? "failed" : "ok", err); |
| 1586 | err = kernel_bind(sock, (struct sockaddr *) &myaddr, | 1565 | else |
| 1587 | sizeof(myaddr)); | 1566 | dprintk("RPC: %s %pI6:%u: %s (%d)\n", __func__, |
| 1588 | if (port == 0) | 1567 | &((struct sockaddr_in6 *)&myaddr)->sin6_addr, |
| 1589 | break; | 1568 | port, err ? "failed" : "ok", err); |
| 1590 | if (err == 0) { | ||
| 1591 | transport->srcport = port; | ||
| 1592 | break; | ||
| 1593 | } | ||
| 1594 | last = port; | ||
| 1595 | port = xs_next_srcport(transport, sock, port); | ||
| 1596 | if (port > last) | ||
| 1597 | nloop++; | ||
| 1598 | } while (err == -EADDRINUSE && nloop != 2); | ||
| 1599 | dprintk("RPC: xs_bind6 %pI6:%u: %s (%d)\n", | ||
| 1600 | &myaddr.sin6_addr, port, err ? "failed" : "ok", err); | ||
| 1601 | return err; | 1569 | return err; |
| 1602 | } | 1570 | } |
| 1603 | 1571 | ||
| 1572 | |||
| 1604 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 1573 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 1605 | static struct lock_class_key xs_key[2]; | 1574 | static struct lock_class_key xs_key[2]; |
| 1606 | static struct lock_class_key xs_slock_key[2]; | 1575 | static struct lock_class_key xs_slock_key[2]; |
| @@ -1622,6 +1591,18 @@ static inline void xs_reclassify_socket6(struct socket *sock) | |||
| 1622 | sock_lock_init_class_and_name(sk, "slock-AF_INET6-RPC", | 1591 | sock_lock_init_class_and_name(sk, "slock-AF_INET6-RPC", |
| 1623 | &xs_slock_key[1], "sk_lock-AF_INET6-RPC", &xs_key[1]); | 1592 | &xs_slock_key[1], "sk_lock-AF_INET6-RPC", &xs_key[1]); |
| 1624 | } | 1593 | } |
| 1594 | |||
| 1595 | static inline void xs_reclassify_socket(int family, struct socket *sock) | ||
| 1596 | { | ||
| 1597 | switch (family) { | ||
| 1598 | case AF_INET: | ||
| 1599 | xs_reclassify_socket4(sock); | ||
| 1600 | break; | ||
| 1601 | case AF_INET6: | ||
| 1602 | xs_reclassify_socket6(sock); | ||
| 1603 | break; | ||
| 1604 | } | ||
| 1605 | } | ||
| 1625 | #else | 1606 | #else |
| 1626 | static inline void xs_reclassify_socket4(struct socket *sock) | 1607 | static inline void xs_reclassify_socket4(struct socket *sock) |
| 1627 | { | 1608 | { |
| @@ -1630,8 +1611,36 @@ static inline void xs_reclassify_socket4(struct socket *sock) | |||
| 1630 | static inline void xs_reclassify_socket6(struct socket *sock) | 1611 | static inline void xs_reclassify_socket6(struct socket *sock) |
| 1631 | { | 1612 | { |
| 1632 | } | 1613 | } |
| 1614 | |||
| 1615 | static inline void xs_reclassify_socket(int family, struct socket *sock) | ||
| 1616 | { | ||
| 1617 | } | ||
| 1633 | #endif | 1618 | #endif |
| 1634 | 1619 | ||
| 1620 | static struct socket *xs_create_sock(struct rpc_xprt *xprt, | ||
| 1621 | struct sock_xprt *transport, int family, int type, int protocol) | ||
| 1622 | { | ||
| 1623 | struct socket *sock; | ||
| 1624 | int err; | ||
| 1625 | |||
| 1626 | err = __sock_create(xprt->xprt_net, family, type, protocol, &sock, 1); | ||
| 1627 | if (err < 0) { | ||
| 1628 | dprintk("RPC: can't create %d transport socket (%d).\n", | ||
| 1629 | protocol, -err); | ||
| 1630 | goto out; | ||
| 1631 | } | ||
| 1632 | xs_reclassify_socket(family, sock); | ||
| 1633 | |||
| 1634 | if (xs_bind(transport, sock)) { | ||
| 1635 | sock_release(sock); | ||
| 1636 | goto out; | ||
| 1637 | } | ||
| 1638 | |||
| 1639 | return sock; | ||
| 1640 | out: | ||
| 1641 | return ERR_PTR(err); | ||
| 1642 | } | ||
| 1643 | |||
| 1635 | static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) | 1644 | static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) |
| 1636 | { | 1645 | { |
| 1637 | struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); | 1646 | struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); |
| @@ -1661,82 +1670,23 @@ static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) | |||
| 1661 | xs_udp_do_set_buffer_size(xprt); | 1670 | xs_udp_do_set_buffer_size(xprt); |
| 1662 | } | 1671 | } |
| 1663 | 1672 | ||
| 1664 | /** | 1673 | static void xs_udp_setup_socket(struct work_struct *work) |
| 1665 | * xs_udp_connect_worker4 - set up a UDP socket | ||
| 1666 | * @work: RPC transport to connect | ||
| 1667 | * | ||
| 1668 | * Invoked by a work queue tasklet. | ||
| 1669 | */ | ||
| 1670 | static void xs_udp_connect_worker4(struct work_struct *work) | ||
| 1671 | { | 1674 | { |
| 1672 | struct sock_xprt *transport = | 1675 | struct sock_xprt *transport = |
| 1673 | container_of(work, struct sock_xprt, connect_worker.work); | 1676 | container_of(work, struct sock_xprt, connect_worker.work); |
| 1674 | struct rpc_xprt *xprt = &transport->xprt; | 1677 | struct rpc_xprt *xprt = &transport->xprt; |
| 1675 | struct socket *sock = transport->sock; | 1678 | struct socket *sock = transport->sock; |
| 1676 | int err, status = -EIO; | 1679 | int status = -EIO; |
| 1677 | 1680 | ||
| 1678 | if (xprt->shutdown) | 1681 | if (xprt->shutdown) |
| 1679 | goto out; | 1682 | goto out; |
| 1680 | 1683 | ||
| 1681 | /* Start by resetting any existing state */ | 1684 | /* Start by resetting any existing state */ |
| 1682 | xs_reset_transport(transport); | 1685 | xs_reset_transport(transport); |
| 1683 | 1686 | sock = xs_create_sock(xprt, transport, | |
| 1684 | err = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock); | 1687 | xs_addr(xprt)->sa_family, SOCK_DGRAM, IPPROTO_UDP); |
| 1685 | if (err < 0) { | 1688 | if (IS_ERR(sock)) |
| 1686 | dprintk("RPC: can't create UDP transport socket (%d).\n", -err); | ||
| 1687 | goto out; | 1689 | goto out; |
| 1688 | } | ||
| 1689 | xs_reclassify_socket4(sock); | ||
| 1690 | |||
| 1691 | if (xs_bind4(transport, sock)) { | ||
| 1692 | sock_release(sock); | ||
| 1693 | goto out; | ||
| 1694 | } | ||
| 1695 | |||
| 1696 | dprintk("RPC: worker connecting xprt %p via %s to " | ||
| 1697 | "%s (port %s)\n", xprt, | ||
| 1698 | xprt->address_strings[RPC_DISPLAY_PROTO], | ||
| 1699 | xprt->address_strings[RPC_DISPLAY_ADDR], | ||
| 1700 | xprt->address_strings[RPC_DISPLAY_PORT]); | ||
| 1701 | |||
| 1702 | xs_udp_finish_connecting(xprt, sock); | ||
| 1703 | status = 0; | ||
| 1704 | out: | ||
| 1705 | xprt_clear_connecting(xprt); | ||
| 1706 | xprt_wake_pending_tasks(xprt, status); | ||
| 1707 | } | ||
| 1708 | |||
| 1709 | /** | ||
| 1710 | * xs_udp_connect_worker6 - set up a UDP socket | ||
| 1711 | * @work: RPC transport to connect | ||
| 1712 | * | ||
| 1713 | * Invoked by a work queue tasklet. | ||
| 1714 | */ | ||
| 1715 | static void xs_udp_connect_worker6(struct work_struct *work) | ||
| 1716 | { | ||
| 1717 | struct sock_xprt *transport = | ||
| 1718 | container_of(work, struct sock_xprt, connect_worker.work); | ||
| 1719 | struct rpc_xprt *xprt = &transport->xprt; | ||
| 1720 | struct socket *sock = transport->sock; | ||
| 1721 | int err, status = -EIO; | ||
| 1722 | |||
| 1723 | if (xprt->shutdown) | ||
| 1724 | goto out; | ||
| 1725 | |||
| 1726 | /* Start by resetting any existing state */ | ||
| 1727 | xs_reset_transport(transport); | ||
| 1728 | |||
| 1729 | err = sock_create_kern(PF_INET6, SOCK_DGRAM, IPPROTO_UDP, &sock); | ||
| 1730 | if (err < 0) { | ||
| 1731 | dprintk("RPC: can't create UDP transport socket (%d).\n", -err); | ||
| 1732 | goto out; | ||
| 1733 | } | ||
| 1734 | xs_reclassify_socket6(sock); | ||
| 1735 | |||
| 1736 | if (xs_bind6(transport, sock) < 0) { | ||
| 1737 | sock_release(sock); | ||
| 1738 | goto out; | ||
| 1739 | } | ||
| 1740 | 1690 | ||
| 1741 | dprintk("RPC: worker connecting xprt %p via %s to " | 1691 | dprintk("RPC: worker connecting xprt %p via %s to " |
| 1742 | "%s (port %s)\n", xprt, | 1692 | "%s (port %s)\n", xprt, |
| @@ -1755,12 +1705,12 @@ out: | |||
| 1755 | * We need to preserve the port number so the reply cache on the server can | 1705 | * We need to preserve the port number so the reply cache on the server can |
| 1756 | * find our cached RPC replies when we get around to reconnecting. | 1706 | * find our cached RPC replies when we get around to reconnecting. |
| 1757 | */ | 1707 | */ |
| 1758 | static void xs_abort_connection(struct rpc_xprt *xprt, struct sock_xprt *transport) | 1708 | static void xs_abort_connection(struct sock_xprt *transport) |
| 1759 | { | 1709 | { |
| 1760 | int result; | 1710 | int result; |
| 1761 | struct sockaddr any; | 1711 | struct sockaddr any; |
| 1762 | 1712 | ||
| 1763 | dprintk("RPC: disconnecting xprt %p to reuse port\n", xprt); | 1713 | dprintk("RPC: disconnecting xprt %p to reuse port\n", transport); |
| 1764 | 1714 | ||
| 1765 | /* | 1715 | /* |
| 1766 | * Disconnect the transport socket by doing a connect operation | 1716 | * Disconnect the transport socket by doing a connect operation |
| @@ -1770,13 +1720,13 @@ static void xs_abort_connection(struct rpc_xprt *xprt, struct sock_xprt *transpo | |||
| 1770 | any.sa_family = AF_UNSPEC; | 1720 | any.sa_family = AF_UNSPEC; |
| 1771 | result = kernel_connect(transport->sock, &any, sizeof(any), 0); | 1721 | result = kernel_connect(transport->sock, &any, sizeof(any), 0); |
| 1772 | if (!result) | 1722 | if (!result) |
| 1773 | xs_sock_mark_closed(xprt); | 1723 | xs_sock_mark_closed(&transport->xprt); |
| 1774 | else | 1724 | else |
| 1775 | dprintk("RPC: AF_UNSPEC connect return code %d\n", | 1725 | dprintk("RPC: AF_UNSPEC connect return code %d\n", |
| 1776 | result); | 1726 | result); |
| 1777 | } | 1727 | } |
| 1778 | 1728 | ||
| 1779 | static void xs_tcp_reuse_connection(struct rpc_xprt *xprt, struct sock_xprt *transport) | 1729 | static void xs_tcp_reuse_connection(struct sock_xprt *transport) |
| 1780 | { | 1730 | { |
| 1781 | unsigned int state = transport->inet->sk_state; | 1731 | unsigned int state = transport->inet->sk_state; |
| 1782 | 1732 | ||
| @@ -1799,7 +1749,7 @@ static void xs_tcp_reuse_connection(struct rpc_xprt *xprt, struct sock_xprt *tra | |||
| 1799 | "sk_shutdown set to %d\n", | 1749 | "sk_shutdown set to %d\n", |
| 1800 | __func__, transport->inet->sk_shutdown); | 1750 | __func__, transport->inet->sk_shutdown); |
| 1801 | } | 1751 | } |
| 1802 | xs_abort_connection(xprt, transport); | 1752 | xs_abort_connection(transport); |
| 1803 | } | 1753 | } |
| 1804 | 1754 | ||
| 1805 | static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) | 1755 | static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) |
| @@ -1852,12 +1802,12 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) | |||
| 1852 | * | 1802 | * |
| 1853 | * Invoked by a work queue tasklet. | 1803 | * Invoked by a work queue tasklet. |
| 1854 | */ | 1804 | */ |
| 1855 | static void xs_tcp_setup_socket(struct rpc_xprt *xprt, | 1805 | static void xs_tcp_setup_socket(struct work_struct *work) |
| 1856 | struct sock_xprt *transport, | ||
| 1857 | struct socket *(*create_sock)(struct rpc_xprt *, | ||
| 1858 | struct sock_xprt *)) | ||
| 1859 | { | 1806 | { |
| 1807 | struct sock_xprt *transport = | ||
| 1808 | container_of(work, struct sock_xprt, connect_worker.work); | ||
| 1860 | struct socket *sock = transport->sock; | 1809 | struct socket *sock = transport->sock; |
| 1810 | struct rpc_xprt *xprt = &transport->xprt; | ||
| 1861 | int status = -EIO; | 1811 | int status = -EIO; |
| 1862 | 1812 | ||
| 1863 | if (xprt->shutdown) | 1813 | if (xprt->shutdown) |
| @@ -1865,7 +1815,8 @@ static void xs_tcp_setup_socket(struct rpc_xprt *xprt, | |||
| 1865 | 1815 | ||
| 1866 | if (!sock) { | 1816 | if (!sock) { |
| 1867 | clear_bit(XPRT_CONNECTION_ABORT, &xprt->state); | 1817 | clear_bit(XPRT_CONNECTION_ABORT, &xprt->state); |
| 1868 | sock = create_sock(xprt, transport); | 1818 | sock = xs_create_sock(xprt, transport, |
| 1819 | xs_addr(xprt)->sa_family, SOCK_STREAM, IPPROTO_TCP); | ||
| 1869 | if (IS_ERR(sock)) { | 1820 | if (IS_ERR(sock)) { |
| 1870 | status = PTR_ERR(sock); | 1821 | status = PTR_ERR(sock); |
| 1871 | goto out; | 1822 | goto out; |
| @@ -1876,7 +1827,7 @@ static void xs_tcp_setup_socket(struct rpc_xprt *xprt, | |||
| 1876 | abort_and_exit = test_and_clear_bit(XPRT_CONNECTION_ABORT, | 1827 | abort_and_exit = test_and_clear_bit(XPRT_CONNECTION_ABORT, |
| 1877 | &xprt->state); | 1828 | &xprt->state); |
| 1878 | /* "close" the socket, preserving the local port */ | 1829 | /* "close" the socket, preserving the local port */ |
| 1879 | xs_tcp_reuse_connection(xprt, transport); | 1830 | xs_tcp_reuse_connection(transport); |
| 1880 | 1831 | ||
| 1881 | if (abort_and_exit) | 1832 | if (abort_and_exit) |
| 1882 | goto out_eagain; | 1833 | goto out_eagain; |
| @@ -1925,84 +1876,6 @@ out: | |||
| 1925 | xprt_wake_pending_tasks(xprt, status); | 1876 | xprt_wake_pending_tasks(xprt, status); |
| 1926 | } | 1877 | } |
| 1927 | 1878 | ||
| 1928 | static struct socket *xs_create_tcp_sock4(struct rpc_xprt *xprt, | ||
| 1929 | struct sock_xprt *transport) | ||
| 1930 | { | ||
| 1931 | struct socket *sock; | ||
| 1932 | int err; | ||
| 1933 | |||
| 1934 | /* start from scratch */ | ||
| 1935 | err = sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock); | ||
| 1936 | if (err < 0) { | ||
| 1937 | dprintk("RPC: can't create TCP transport socket (%d).\n", | ||
| 1938 | -err); | ||
| 1939 | goto out_err; | ||
| 1940 | } | ||
| 1941 | xs_reclassify_socket4(sock); | ||
| 1942 | |||
| 1943 | if (xs_bind4(transport, sock) < 0) { | ||
| 1944 | sock_release(sock); | ||
| 1945 | goto out_err; | ||
| 1946 | } | ||
| 1947 | return sock; | ||
| 1948 | out_err: | ||
| 1949 | return ERR_PTR(-EIO); | ||
| 1950 | } | ||
| 1951 | |||
| 1952 | /** | ||
| 1953 | * xs_tcp_connect_worker4 - connect a TCP socket to a remote endpoint | ||
| 1954 | * @work: RPC transport to connect | ||
| 1955 | * | ||
| 1956 | * Invoked by a work queue tasklet. | ||
| 1957 | */ | ||
| 1958 | static void xs_tcp_connect_worker4(struct work_struct *work) | ||
| 1959 | { | ||
| 1960 | struct sock_xprt *transport = | ||
| 1961 | container_of(work, struct sock_xprt, connect_worker.work); | ||
| 1962 | struct rpc_xprt *xprt = &transport->xprt; | ||
| 1963 | |||
| 1964 | xs_tcp_setup_socket(xprt, transport, xs_create_tcp_sock4); | ||
| 1965 | } | ||
| 1966 | |||
| 1967 | static struct socket *xs_create_tcp_sock6(struct rpc_xprt *xprt, | ||
| 1968 | struct sock_xprt *transport) | ||
| 1969 | { | ||
| 1970 | struct socket *sock; | ||
| 1971 | int err; | ||
| 1972 | |||
| 1973 | /* start from scratch */ | ||
| 1974 | err = sock_create_kern(PF_INET6, SOCK_STREAM, IPPROTO_TCP, &sock); | ||
| 1975 | if (err < 0) { | ||
| 1976 | dprintk("RPC: can't create TCP transport socket (%d).\n", | ||
| 1977 | -err); | ||
| 1978 | goto out_err; | ||
| 1979 | } | ||
| 1980 | xs_reclassify_socket6(sock); | ||
| 1981 | |||
| 1982 | if (xs_bind6(transport, sock) < 0) { | ||
| 1983 | sock_release(sock); | ||
| 1984 | goto out_err; | ||
| 1985 | } | ||
| 1986 | return sock; | ||
| 1987 | out_err: | ||
| 1988 | return ERR_PTR(-EIO); | ||
| 1989 | } | ||
| 1990 | |||
| 1991 | /** | ||
| 1992 | * xs_tcp_connect_worker6 - connect a TCP socket to a remote endpoint | ||
| 1993 | * @work: RPC transport to connect | ||
| 1994 | * | ||
| 1995 | * Invoked by a work queue tasklet. | ||
| 1996 | */ | ||
| 1997 | static void xs_tcp_connect_worker6(struct work_struct *work) | ||
| 1998 | { | ||
| 1999 | struct sock_xprt *transport = | ||
| 2000 | container_of(work, struct sock_xprt, connect_worker.work); | ||
| 2001 | struct rpc_xprt *xprt = &transport->xprt; | ||
| 2002 | |||
| 2003 | xs_tcp_setup_socket(xprt, transport, xs_create_tcp_sock6); | ||
| 2004 | } | ||
| 2005 | |||
| 2006 | /** | 1879 | /** |
| 2007 | * xs_connect - connect a socket to a remote endpoint | 1880 | * xs_connect - connect a socket to a remote endpoint |
| 2008 | * @task: address of RPC task that manages state of connect request | 1881 | * @task: address of RPC task that manages state of connect request |
| @@ -2262,6 +2135,31 @@ static struct rpc_xprt_ops bc_tcp_ops = { | |||
| 2262 | .print_stats = xs_tcp_print_stats, | 2135 | .print_stats = xs_tcp_print_stats, |
| 2263 | }; | 2136 | }; |
| 2264 | 2137 | ||
| 2138 | static int xs_init_anyaddr(const int family, struct sockaddr *sap) | ||
| 2139 | { | ||
| 2140 | static const struct sockaddr_in sin = { | ||
| 2141 | .sin_family = AF_INET, | ||
| 2142 | .sin_addr.s_addr = htonl(INADDR_ANY), | ||
| 2143 | }; | ||
| 2144 | static const struct sockaddr_in6 sin6 = { | ||
| 2145 | .sin6_family = AF_INET6, | ||
| 2146 | .sin6_addr = IN6ADDR_ANY_INIT, | ||
| 2147 | }; | ||
| 2148 | |||
| 2149 | switch (family) { | ||
| 2150 | case AF_INET: | ||
| 2151 | memcpy(sap, &sin, sizeof(sin)); | ||
| 2152 | break; | ||
| 2153 | case AF_INET6: | ||
| 2154 | memcpy(sap, &sin6, sizeof(sin6)); | ||
| 2155 | break; | ||
| 2156 | default: | ||
| 2157 | dprintk("RPC: %s: Bad address family\n", __func__); | ||
| 2158 | return -EAFNOSUPPORT; | ||
| 2159 | } | ||
| 2160 | return 0; | ||
| 2161 | } | ||
| 2162 | |||
| 2265 | static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args, | 2163 | static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args, |
| 2266 | unsigned int slot_table_size) | 2164 | unsigned int slot_table_size) |
| 2267 | { | 2165 | { |
| @@ -2273,27 +2171,25 @@ static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args, | |||
| 2273 | return ERR_PTR(-EBADF); | 2171 | return ERR_PTR(-EBADF); |
| 2274 | } | 2172 | } |
| 2275 | 2173 | ||
| 2276 | new = kzalloc(sizeof(*new), GFP_KERNEL); | 2174 | xprt = xprt_alloc(args->net, sizeof(*new), slot_table_size); |
| 2277 | if (new == NULL) { | 2175 | if (xprt == NULL) { |
| 2278 | dprintk("RPC: xs_setup_xprt: couldn't allocate " | 2176 | dprintk("RPC: xs_setup_xprt: couldn't allocate " |
| 2279 | "rpc_xprt\n"); | 2177 | "rpc_xprt\n"); |
| 2280 | return ERR_PTR(-ENOMEM); | 2178 | return ERR_PTR(-ENOMEM); |
| 2281 | } | 2179 | } |
| 2282 | xprt = &new->xprt; | ||
| 2283 | |||
| 2284 | xprt->max_reqs = slot_table_size; | ||
| 2285 | xprt->slot = kcalloc(xprt->max_reqs, sizeof(struct rpc_rqst), GFP_KERNEL); | ||
| 2286 | if (xprt->slot == NULL) { | ||
| 2287 | kfree(xprt); | ||
| 2288 | dprintk("RPC: xs_setup_xprt: couldn't allocate slot " | ||
| 2289 | "table\n"); | ||
| 2290 | return ERR_PTR(-ENOMEM); | ||
| 2291 | } | ||
| 2292 | 2180 | ||
| 2181 | new = container_of(xprt, struct sock_xprt, xprt); | ||
| 2293 | memcpy(&xprt->addr, args->dstaddr, args->addrlen); | 2182 | memcpy(&xprt->addr, args->dstaddr, args->addrlen); |
| 2294 | xprt->addrlen = args->addrlen; | 2183 | xprt->addrlen = args->addrlen; |
| 2295 | if (args->srcaddr) | 2184 | if (args->srcaddr) |
| 2296 | memcpy(&new->srcaddr, args->srcaddr, args->addrlen); | 2185 | memcpy(&new->srcaddr, args->srcaddr, args->addrlen); |
| 2186 | else { | ||
| 2187 | int err; | ||
| 2188 | err = xs_init_anyaddr(args->dstaddr->sa_family, | ||
| 2189 | (struct sockaddr *)&new->srcaddr); | ||
| 2190 | if (err != 0) | ||
| 2191 | return ERR_PTR(err); | ||
| 2192 | } | ||
| 2297 | 2193 | ||
| 2298 | return xprt; | 2194 | return xprt; |
| 2299 | } | 2195 | } |
| @@ -2341,7 +2237,7 @@ static struct rpc_xprt *xs_setup_udp(struct xprt_create *args) | |||
| 2341 | xprt_set_bound(xprt); | 2237 | xprt_set_bound(xprt); |
| 2342 | 2238 | ||
| 2343 | INIT_DELAYED_WORK(&transport->connect_worker, | 2239 | INIT_DELAYED_WORK(&transport->connect_worker, |
| 2344 | xs_udp_connect_worker4); | 2240 | xs_udp_setup_socket); |
| 2345 | xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP); | 2241 | xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP); |
| 2346 | break; | 2242 | break; |
| 2347 | case AF_INET6: | 2243 | case AF_INET6: |
| @@ -2349,7 +2245,7 @@ static struct rpc_xprt *xs_setup_udp(struct xprt_create *args) | |||
| 2349 | xprt_set_bound(xprt); | 2245 | xprt_set_bound(xprt); |
| 2350 | 2246 | ||
| 2351 | INIT_DELAYED_WORK(&transport->connect_worker, | 2247 | INIT_DELAYED_WORK(&transport->connect_worker, |
| 2352 | xs_udp_connect_worker6); | 2248 | xs_udp_setup_socket); |
| 2353 | xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP6); | 2249 | xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP6); |
| 2354 | break; | 2250 | break; |
| 2355 | default: | 2251 | default: |
| @@ -2371,8 +2267,7 @@ static struct rpc_xprt *xs_setup_udp(struct xprt_create *args) | |||
| 2371 | return xprt; | 2267 | return xprt; |
| 2372 | ret = ERR_PTR(-EINVAL); | 2268 | ret = ERR_PTR(-EINVAL); |
| 2373 | out_err: | 2269 | out_err: |
| 2374 | kfree(xprt->slot); | 2270 | xprt_free(xprt); |
| 2375 | kfree(xprt); | ||
| 2376 | return ret; | 2271 | return ret; |
| 2377 | } | 2272 | } |
| 2378 | 2273 | ||
| @@ -2416,7 +2311,7 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args) | |||
| 2416 | xprt_set_bound(xprt); | 2311 | xprt_set_bound(xprt); |
| 2417 | 2312 | ||
| 2418 | INIT_DELAYED_WORK(&transport->connect_worker, | 2313 | INIT_DELAYED_WORK(&transport->connect_worker, |
| 2419 | xs_tcp_connect_worker4); | 2314 | xs_tcp_setup_socket); |
| 2420 | xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP); | 2315 | xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP); |
| 2421 | break; | 2316 | break; |
| 2422 | case AF_INET6: | 2317 | case AF_INET6: |
| @@ -2424,7 +2319,7 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args) | |||
| 2424 | xprt_set_bound(xprt); | 2319 | xprt_set_bound(xprt); |
| 2425 | 2320 | ||
| 2426 | INIT_DELAYED_WORK(&transport->connect_worker, | 2321 | INIT_DELAYED_WORK(&transport->connect_worker, |
| 2427 | xs_tcp_connect_worker6); | 2322 | xs_tcp_setup_socket); |
| 2428 | xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP6); | 2323 | xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP6); |
| 2429 | break; | 2324 | break; |
| 2430 | default: | 2325 | default: |
| @@ -2447,8 +2342,7 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args) | |||
| 2447 | return xprt; | 2342 | return xprt; |
| 2448 | ret = ERR_PTR(-EINVAL); | 2343 | ret = ERR_PTR(-EINVAL); |
| 2449 | out_err: | 2344 | out_err: |
| 2450 | kfree(xprt->slot); | 2345 | xprt_free(xprt); |
| 2451 | kfree(xprt); | ||
| 2452 | return ret; | 2346 | return ret; |
| 2453 | } | 2347 | } |
| 2454 | 2348 | ||
| @@ -2507,15 +2401,10 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args) | |||
| 2507 | goto out_err; | 2401 | goto out_err; |
| 2508 | } | 2402 | } |
| 2509 | 2403 | ||
| 2510 | if (xprt_bound(xprt)) | 2404 | dprintk("RPC: set up xprt to %s (port %s) via %s\n", |
| 2511 | dprintk("RPC: set up xprt to %s (port %s) via %s\n", | 2405 | xprt->address_strings[RPC_DISPLAY_ADDR], |
| 2512 | xprt->address_strings[RPC_DISPLAY_ADDR], | 2406 | xprt->address_strings[RPC_DISPLAY_PORT], |
| 2513 | xprt->address_strings[RPC_DISPLAY_PORT], | 2407 | xprt->address_strings[RPC_DISPLAY_PROTO]); |
| 2514 | xprt->address_strings[RPC_DISPLAY_PROTO]); | ||
| 2515 | else | ||
| 2516 | dprintk("RPC: set up xprt to %s (autobind) via %s\n", | ||
| 2517 | xprt->address_strings[RPC_DISPLAY_ADDR], | ||
| 2518 | xprt->address_strings[RPC_DISPLAY_PROTO]); | ||
| 2519 | 2408 | ||
| 2520 | /* | 2409 | /* |
| 2521 | * Since we don't want connections for the backchannel, we set | 2410 | * Since we don't want connections for the backchannel, we set |
| @@ -2528,8 +2417,7 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args) | |||
| 2528 | return xprt; | 2417 | return xprt; |
| 2529 | ret = ERR_PTR(-EINVAL); | 2418 | ret = ERR_PTR(-EINVAL); |
| 2530 | out_err: | 2419 | out_err: |
| 2531 | kfree(xprt->slot); | 2420 | xprt_free(xprt); |
| 2532 | kfree(xprt); | ||
| 2533 | return ret; | 2421 | return ret; |
| 2534 | } | 2422 | } |
| 2535 | 2423 | ||
