aboutsummaryrefslogtreecommitdiffstats
path: root/net/sunrpc
diff options
context:
space:
mode:
Diffstat (limited to 'net/sunrpc')
-rw-r--r--net/sunrpc/auth.c12
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c52
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_crypto.c95
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_mech.c29
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_seal.c4
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_seqnum.c4
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_wrap.c8
-rw-r--r--net/sunrpc/auth_gss/gss_mech_switch.c9
-rw-r--r--net/sunrpc/auth_gss/gss_spkm3_mech.c34
-rw-r--r--net/sunrpc/auth_gss/gss_spkm3_token.c3
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c337
-rw-r--r--net/sunrpc/auth_null.c10
-rw-r--r--net/sunrpc/auth_unix.c11
-rw-r--r--net/sunrpc/cache.c6
-rw-r--r--net/sunrpc/clnt.c300
-rw-r--r--net/sunrpc/pmap_clnt.c267
-rw-r--r--net/sunrpc/rpc_pipe.c122
-rw-r--r--net/sunrpc/sched.c107
-rw-r--r--net/sunrpc/socklib.c2
-rw-r--r--net/sunrpc/stats.c7
-rw-r--r--net/sunrpc/sunrpc_syms.c4
-rw-r--r--net/sunrpc/svc.c61
-rw-r--r--net/sunrpc/svcauth.c4
-rw-r--r--net/sunrpc/svcauth_unix.c38
-rw-r--r--net/sunrpc/svcsock.c55
-rw-r--r--net/sunrpc/sysctl.c1
-rw-r--r--net/sunrpc/timer.c2
-rw-r--r--net/sunrpc/xdr.c81
-rw-r--r--net/sunrpc/xprt.c116
-rw-r--r--net/sunrpc/xprtsock.c166
30 files changed, 1120 insertions, 827 deletions
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index 55163af3dcaf..993ff1a5d945 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -331,8 +331,8 @@ rpcauth_unbindcred(struct rpc_task *task)
331 task->tk_msg.rpc_cred = NULL; 331 task->tk_msg.rpc_cred = NULL;
332} 332}
333 333
334u32 * 334__be32 *
335rpcauth_marshcred(struct rpc_task *task, u32 *p) 335rpcauth_marshcred(struct rpc_task *task, __be32 *p)
336{ 336{
337 struct rpc_cred *cred = task->tk_msg.rpc_cred; 337 struct rpc_cred *cred = task->tk_msg.rpc_cred;
338 338
@@ -342,8 +342,8 @@ rpcauth_marshcred(struct rpc_task *task, u32 *p)
342 return cred->cr_ops->crmarshal(task, p); 342 return cred->cr_ops->crmarshal(task, p);
343} 343}
344 344
345u32 * 345__be32 *
346rpcauth_checkverf(struct rpc_task *task, u32 *p) 346rpcauth_checkverf(struct rpc_task *task, __be32 *p)
347{ 347{
348 struct rpc_cred *cred = task->tk_msg.rpc_cred; 348 struct rpc_cred *cred = task->tk_msg.rpc_cred;
349 349
@@ -355,7 +355,7 @@ rpcauth_checkverf(struct rpc_task *task, u32 *p)
355 355
356int 356int
357rpcauth_wrap_req(struct rpc_task *task, kxdrproc_t encode, void *rqstp, 357rpcauth_wrap_req(struct rpc_task *task, kxdrproc_t encode, void *rqstp,
358 u32 *data, void *obj) 358 __be32 *data, void *obj)
359{ 359{
360 struct rpc_cred *cred = task->tk_msg.rpc_cred; 360 struct rpc_cred *cred = task->tk_msg.rpc_cred;
361 361
@@ -369,7 +369,7 @@ rpcauth_wrap_req(struct rpc_task *task, kxdrproc_t encode, void *rqstp,
369 369
370int 370int
371rpcauth_unwrap_resp(struct rpc_task *task, kxdrproc_t decode, void *rqstp, 371rpcauth_unwrap_resp(struct rpc_task *task, kxdrproc_t decode, void *rqstp,
372 u32 *data, void *obj) 372 __be32 *data, void *obj)
373{ 373{
374 struct rpc_cred *cred = task->tk_msg.rpc_cred; 374 struct rpc_cred *cred = task->tk_msg.rpc_cred;
375 375
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 519ebc17c028..a6ed2d22a6e6 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -88,7 +88,6 @@ struct gss_auth {
88 struct list_head upcalls; 88 struct list_head upcalls;
89 struct rpc_clnt *client; 89 struct rpc_clnt *client;
90 struct dentry *dentry; 90 struct dentry *dentry;
91 char path[48];
92 spinlock_t lock; 91 spinlock_t lock;
93}; 92};
94 93
@@ -225,9 +224,8 @@ gss_alloc_context(void)
225{ 224{
226 struct gss_cl_ctx *ctx; 225 struct gss_cl_ctx *ctx;
227 226
228 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); 227 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
229 if (ctx != NULL) { 228 if (ctx != NULL) {
230 memset(ctx, 0, sizeof(*ctx));
231 ctx->gc_proc = RPC_GSS_PROC_DATA; 229 ctx->gc_proc = RPC_GSS_PROC_DATA;
232 ctx->gc_seq = 1; /* NetApp 6.4R1 doesn't accept seq. no. 0 */ 230 ctx->gc_seq = 1; /* NetApp 6.4R1 doesn't accept seq. no. 0 */
233 spin_lock_init(&ctx->gc_seq_lock); 231 spin_lock_init(&ctx->gc_seq_lock);
@@ -391,9 +389,8 @@ gss_alloc_msg(struct gss_auth *gss_auth, uid_t uid)
391{ 389{
392 struct gss_upcall_msg *gss_msg; 390 struct gss_upcall_msg *gss_msg;
393 391
394 gss_msg = kmalloc(sizeof(*gss_msg), GFP_KERNEL); 392 gss_msg = kzalloc(sizeof(*gss_msg), GFP_KERNEL);
395 if (gss_msg != NULL) { 393 if (gss_msg != NULL) {
396 memset(gss_msg, 0, sizeof(*gss_msg));
397 INIT_LIST_HEAD(&gss_msg->list); 394 INIT_LIST_HEAD(&gss_msg->list);
398 rpc_init_wait_queue(&gss_msg->rpc_waitqueue, "RPCSEC_GSS upcall waitq"); 395 rpc_init_wait_queue(&gss_msg->rpc_waitqueue, "RPCSEC_GSS upcall waitq");
399 init_waitqueue_head(&gss_msg->waitqueue); 396 init_waitqueue_head(&gss_msg->waitqueue);
@@ -692,10 +689,8 @@ gss_create(struct rpc_clnt *clnt, rpc_authflavor_t flavor)
692 if (err) 689 if (err)
693 goto err_put_mech; 690 goto err_put_mech;
694 691
695 snprintf(gss_auth->path, sizeof(gss_auth->path), "%s/%s", 692 gss_auth->dentry = rpc_mkpipe(clnt->cl_dentry, gss_auth->mech->gm_name,
696 clnt->cl_pathname, 693 clnt, &gss_upcall_ops, RPC_PIPE_WAIT_FOR_OPEN);
697 gss_auth->mech->gm_name);
698 gss_auth->dentry = rpc_mkpipe(gss_auth->path, clnt, &gss_upcall_ops, RPC_PIPE_WAIT_FOR_OPEN);
699 if (IS_ERR(gss_auth->dentry)) { 694 if (IS_ERR(gss_auth->dentry)) {
700 err = PTR_ERR(gss_auth->dentry); 695 err = PTR_ERR(gss_auth->dentry);
701 goto err_put_mech; 696 goto err_put_mech;
@@ -720,8 +715,7 @@ gss_destroy(struct rpc_auth *auth)
720 auth, auth->au_flavor); 715 auth, auth->au_flavor);
721 716
722 gss_auth = container_of(auth, struct gss_auth, rpc_auth); 717 gss_auth = container_of(auth, struct gss_auth, rpc_auth);
723 rpc_unlink(gss_auth->path); 718 rpc_unlink(gss_auth->dentry);
724 dput(gss_auth->dentry);
725 gss_auth->dentry = NULL; 719 gss_auth->dentry = NULL;
726 gss_mech_put(gss_auth->mech); 720 gss_mech_put(gss_auth->mech);
727 721
@@ -776,10 +770,9 @@ gss_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
776 dprintk("RPC: gss_create_cred for uid %d, flavor %d\n", 770 dprintk("RPC: gss_create_cred for uid %d, flavor %d\n",
777 acred->uid, auth->au_flavor); 771 acred->uid, auth->au_flavor);
778 772
779 if (!(cred = kmalloc(sizeof(*cred), GFP_KERNEL))) 773 if (!(cred = kzalloc(sizeof(*cred), GFP_KERNEL)))
780 goto out_err; 774 goto out_err;
781 775
782 memset(cred, 0, sizeof(*cred));
783 atomic_set(&cred->gc_count, 1); 776 atomic_set(&cred->gc_count, 1);
784 cred->gc_uid = acred->uid; 777 cred->gc_uid = acred->uid;
785 /* 778 /*
@@ -833,14 +826,14 @@ out:
833* Marshal credentials. 826* Marshal credentials.
834* Maybe we should keep a cached credential for performance reasons. 827* Maybe we should keep a cached credential for performance reasons.
835*/ 828*/
836static u32 * 829static __be32 *
837gss_marshal(struct rpc_task *task, u32 *p) 830gss_marshal(struct rpc_task *task, __be32 *p)
838{ 831{
839 struct rpc_cred *cred = task->tk_msg.rpc_cred; 832 struct rpc_cred *cred = task->tk_msg.rpc_cred;
840 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, 833 struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
841 gc_base); 834 gc_base);
842 struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred); 835 struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
843 u32 *cred_len; 836 __be32 *cred_len;
844 struct rpc_rqst *req = task->tk_rqstp; 837 struct rpc_rqst *req = task->tk_rqstp;
845 u32 maj_stat = 0; 838 u32 maj_stat = 0;
846 struct xdr_netobj mic; 839 struct xdr_netobj mic;
@@ -901,12 +894,12 @@ gss_refresh(struct rpc_task *task)
901 return 0; 894 return 0;
902} 895}
903 896
904static u32 * 897static __be32 *
905gss_validate(struct rpc_task *task, u32 *p) 898gss_validate(struct rpc_task *task, __be32 *p)
906{ 899{
907 struct rpc_cred *cred = task->tk_msg.rpc_cred; 900 struct rpc_cred *cred = task->tk_msg.rpc_cred;
908 struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred); 901 struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
909 u32 seq; 902 __be32 seq;
910 struct kvec iov; 903 struct kvec iov;
911 struct xdr_buf verf_buf; 904 struct xdr_buf verf_buf;
912 struct xdr_netobj mic; 905 struct xdr_netobj mic;
@@ -947,13 +940,14 @@ out_bad:
947 940
948static inline int 941static inline int
949gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx, 942gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
950 kxdrproc_t encode, struct rpc_rqst *rqstp, u32 *p, void *obj) 943 kxdrproc_t encode, struct rpc_rqst *rqstp, __be32 *p, void *obj)
951{ 944{
952 struct xdr_buf *snd_buf = &rqstp->rq_snd_buf; 945 struct xdr_buf *snd_buf = &rqstp->rq_snd_buf;
953 struct xdr_buf integ_buf; 946 struct xdr_buf integ_buf;
954 u32 *integ_len = NULL; 947 __be32 *integ_len = NULL;
955 struct xdr_netobj mic; 948 struct xdr_netobj mic;
956 u32 offset, *q; 949 u32 offset;
950 __be32 *q;
957 struct kvec *iov; 951 struct kvec *iov;
958 u32 maj_stat = 0; 952 u32 maj_stat = 0;
959 int status = -EIO; 953 int status = -EIO;
@@ -1039,13 +1033,13 @@ out:
1039 1033
1040static inline int 1034static inline int
1041gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx, 1035gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
1042 kxdrproc_t encode, struct rpc_rqst *rqstp, u32 *p, void *obj) 1036 kxdrproc_t encode, struct rpc_rqst *rqstp, __be32 *p, void *obj)
1043{ 1037{
1044 struct xdr_buf *snd_buf = &rqstp->rq_snd_buf; 1038 struct xdr_buf *snd_buf = &rqstp->rq_snd_buf;
1045 u32 offset; 1039 u32 offset;
1046 u32 maj_stat; 1040 u32 maj_stat;
1047 int status; 1041 int status;
1048 u32 *opaque_len; 1042 __be32 *opaque_len;
1049 struct page **inpages; 1043 struct page **inpages;
1050 int first; 1044 int first;
1051 int pad; 1045 int pad;
@@ -1102,7 +1096,7 @@ gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
1102 1096
1103static int 1097static int
1104gss_wrap_req(struct rpc_task *task, 1098gss_wrap_req(struct rpc_task *task,
1105 kxdrproc_t encode, void *rqstp, u32 *p, void *obj) 1099 kxdrproc_t encode, void *rqstp, __be32 *p, void *obj)
1106{ 1100{
1107 struct rpc_cred *cred = task->tk_msg.rpc_cred; 1101 struct rpc_cred *cred = task->tk_msg.rpc_cred;
1108 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, 1102 struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
@@ -1139,7 +1133,7 @@ out:
1139 1133
1140static inline int 1134static inline int
1141gss_unwrap_resp_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx, 1135gss_unwrap_resp_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
1142 struct rpc_rqst *rqstp, u32 **p) 1136 struct rpc_rqst *rqstp, __be32 **p)
1143{ 1137{
1144 struct xdr_buf *rcv_buf = &rqstp->rq_rcv_buf; 1138 struct xdr_buf *rcv_buf = &rqstp->rq_rcv_buf;
1145 struct xdr_buf integ_buf; 1139 struct xdr_buf integ_buf;
@@ -1176,7 +1170,7 @@ gss_unwrap_resp_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
1176 1170
1177static inline int 1171static inline int
1178gss_unwrap_resp_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx, 1172gss_unwrap_resp_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
1179 struct rpc_rqst *rqstp, u32 **p) 1173 struct rpc_rqst *rqstp, __be32 **p)
1180{ 1174{
1181 struct xdr_buf *rcv_buf = &rqstp->rq_rcv_buf; 1175 struct xdr_buf *rcv_buf = &rqstp->rq_rcv_buf;
1182 u32 offset; 1176 u32 offset;
@@ -1205,13 +1199,13 @@ gss_unwrap_resp_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
1205 1199
1206static int 1200static int
1207gss_unwrap_resp(struct rpc_task *task, 1201gss_unwrap_resp(struct rpc_task *task,
1208 kxdrproc_t decode, void *rqstp, u32 *p, void *obj) 1202 kxdrproc_t decode, void *rqstp, __be32 *p, void *obj)
1209{ 1203{
1210 struct rpc_cred *cred = task->tk_msg.rpc_cred; 1204 struct rpc_cred *cred = task->tk_msg.rpc_cred;
1211 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, 1205 struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
1212 gc_base); 1206 gc_base);
1213 struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred); 1207 struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
1214 u32 *savedp = p; 1208 __be32 *savedp = p;
1215 struct kvec *head = ((struct rpc_rqst *)rqstp)->rq_rcv_buf.head; 1209 struct kvec *head = ((struct rpc_rqst *)rqstp)->rq_rcv_buf.head;
1216 int savedlen = head->iov_len; 1210 int savedlen = head->iov_len;
1217 int status = -EIO; 1211 int status = -EIO;
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
index 76b969e6904f..e11a40b25cce 100644
--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
+++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
@@ -34,6 +34,7 @@
34 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. 34 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
35 */ 35 */
36 36
37#include <linux/err.h>
37#include <linux/types.h> 38#include <linux/types.h>
38#include <linux/mm.h> 39#include <linux/mm.h>
39#include <linux/slab.h> 40#include <linux/slab.h>
@@ -49,7 +50,7 @@
49 50
50u32 51u32
51krb5_encrypt( 52krb5_encrypt(
52 struct crypto_tfm *tfm, 53 struct crypto_blkcipher *tfm,
53 void * iv, 54 void * iv,
54 void * in, 55 void * in,
55 void * out, 56 void * out,
@@ -58,26 +59,27 @@ krb5_encrypt(
58 u32 ret = -EINVAL; 59 u32 ret = -EINVAL;
59 struct scatterlist sg[1]; 60 struct scatterlist sg[1];
60 u8 local_iv[16] = {0}; 61 u8 local_iv[16] = {0};
62 struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv };
61 63
62 dprintk("RPC: krb5_encrypt: input data:\n"); 64 dprintk("RPC: krb5_encrypt: input data:\n");
63 print_hexl((u32 *)in, length, 0); 65 print_hexl((u32 *)in, length, 0);
64 66
65 if (length % crypto_tfm_alg_blocksize(tfm) != 0) 67 if (length % crypto_blkcipher_blocksize(tfm) != 0)
66 goto out; 68 goto out;
67 69
68 if (crypto_tfm_alg_ivsize(tfm) > 16) { 70 if (crypto_blkcipher_ivsize(tfm) > 16) {
69 dprintk("RPC: gss_k5encrypt: tfm iv size to large %d\n", 71 dprintk("RPC: gss_k5encrypt: tfm iv size to large %d\n",
70 crypto_tfm_alg_ivsize(tfm)); 72 crypto_blkcipher_ivsize(tfm));
71 goto out; 73 goto out;
72 } 74 }
73 75
74 if (iv) 76 if (iv)
75 memcpy(local_iv, iv, crypto_tfm_alg_ivsize(tfm)); 77 memcpy(local_iv, iv, crypto_blkcipher_ivsize(tfm));
76 78
77 memcpy(out, in, length); 79 memcpy(out, in, length);
78 sg_set_buf(sg, out, length); 80 sg_set_buf(sg, out, length);
79 81
80 ret = crypto_cipher_encrypt_iv(tfm, sg, sg, length, local_iv); 82 ret = crypto_blkcipher_encrypt_iv(&desc, sg, sg, length);
81 83
82 dprintk("RPC: krb5_encrypt: output data:\n"); 84 dprintk("RPC: krb5_encrypt: output data:\n");
83 print_hexl((u32 *)out, length, 0); 85 print_hexl((u32 *)out, length, 0);
@@ -90,7 +92,7 @@ EXPORT_SYMBOL(krb5_encrypt);
90 92
91u32 93u32
92krb5_decrypt( 94krb5_decrypt(
93 struct crypto_tfm *tfm, 95 struct crypto_blkcipher *tfm,
94 void * iv, 96 void * iv,
95 void * in, 97 void * in,
96 void * out, 98 void * out,
@@ -99,25 +101,26 @@ krb5_decrypt(
99 u32 ret = -EINVAL; 101 u32 ret = -EINVAL;
100 struct scatterlist sg[1]; 102 struct scatterlist sg[1];
101 u8 local_iv[16] = {0}; 103 u8 local_iv[16] = {0};
104 struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv };
102 105
103 dprintk("RPC: krb5_decrypt: input data:\n"); 106 dprintk("RPC: krb5_decrypt: input data:\n");
104 print_hexl((u32 *)in, length, 0); 107 print_hexl((u32 *)in, length, 0);
105 108
106 if (length % crypto_tfm_alg_blocksize(tfm) != 0) 109 if (length % crypto_blkcipher_blocksize(tfm) != 0)
107 goto out; 110 goto out;
108 111
109 if (crypto_tfm_alg_ivsize(tfm) > 16) { 112 if (crypto_blkcipher_ivsize(tfm) > 16) {
110 dprintk("RPC: gss_k5decrypt: tfm iv size to large %d\n", 113 dprintk("RPC: gss_k5decrypt: tfm iv size to large %d\n",
111 crypto_tfm_alg_ivsize(tfm)); 114 crypto_blkcipher_ivsize(tfm));
112 goto out; 115 goto out;
113 } 116 }
114 if (iv) 117 if (iv)
115 memcpy(local_iv,iv, crypto_tfm_alg_ivsize(tfm)); 118 memcpy(local_iv,iv, crypto_blkcipher_ivsize(tfm));
116 119
117 memcpy(out, in, length); 120 memcpy(out, in, length);
118 sg_set_buf(sg, out, length); 121 sg_set_buf(sg, out, length);
119 122
120 ret = crypto_cipher_decrypt_iv(tfm, sg, sg, length, local_iv); 123 ret = crypto_blkcipher_decrypt_iv(&desc, sg, sg, length);
121 124
122 dprintk("RPC: krb5_decrypt: output_data:\n"); 125 dprintk("RPC: krb5_decrypt: output_data:\n");
123 print_hexl((u32 *)out, length, 0); 126 print_hexl((u32 *)out, length, 0);
@@ -197,11 +200,9 @@ out:
197static int 200static int
198checksummer(struct scatterlist *sg, void *data) 201checksummer(struct scatterlist *sg, void *data)
199{ 202{
200 struct crypto_tfm *tfm = (struct crypto_tfm *)data; 203 struct hash_desc *desc = data;
201 204
202 crypto_digest_update(tfm, sg, 1); 205 return crypto_hash_update(desc, sg, sg->length);
203
204 return 0;
205} 206}
206 207
207/* checksum the plaintext data and hdrlen bytes of the token header */ 208/* checksum the plaintext data and hdrlen bytes of the token header */
@@ -210,8 +211,9 @@ make_checksum(s32 cksumtype, char *header, int hdrlen, struct xdr_buf *body,
210 int body_offset, struct xdr_netobj *cksum) 211 int body_offset, struct xdr_netobj *cksum)
211{ 212{
212 char *cksumname; 213 char *cksumname;
213 struct crypto_tfm *tfm = NULL; /* XXX add to ctx? */ 214 struct hash_desc desc; /* XXX add to ctx? */
214 struct scatterlist sg[1]; 215 struct scatterlist sg[1];
216 int err;
215 217
216 switch (cksumtype) { 218 switch (cksumtype) {
217 case CKSUMTYPE_RSA_MD5: 219 case CKSUMTYPE_RSA_MD5:
@@ -222,25 +224,35 @@ make_checksum(s32 cksumtype, char *header, int hdrlen, struct xdr_buf *body,
222 " unsupported checksum %d", cksumtype); 224 " unsupported checksum %d", cksumtype);
223 return GSS_S_FAILURE; 225 return GSS_S_FAILURE;
224 } 226 }
225 if (!(tfm = crypto_alloc_tfm(cksumname, CRYPTO_TFM_REQ_MAY_SLEEP))) 227 desc.tfm = crypto_alloc_hash(cksumname, 0, CRYPTO_ALG_ASYNC);
228 if (IS_ERR(desc.tfm))
226 return GSS_S_FAILURE; 229 return GSS_S_FAILURE;
227 cksum->len = crypto_tfm_alg_digestsize(tfm); 230 cksum->len = crypto_hash_digestsize(desc.tfm);
231 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
228 232
229 crypto_digest_init(tfm); 233 err = crypto_hash_init(&desc);
234 if (err)
235 goto out;
230 sg_set_buf(sg, header, hdrlen); 236 sg_set_buf(sg, header, hdrlen);
231 crypto_digest_update(tfm, sg, 1); 237 err = crypto_hash_update(&desc, sg, hdrlen);
232 process_xdr_buf(body, body_offset, body->len - body_offset, 238 if (err)
233 checksummer, tfm); 239 goto out;
234 crypto_digest_final(tfm, cksum->data); 240 err = process_xdr_buf(body, body_offset, body->len - body_offset,
235 crypto_free_tfm(tfm); 241 checksummer, &desc);
236 return 0; 242 if (err)
243 goto out;
244 err = crypto_hash_final(&desc, cksum->data);
245
246out:
247 crypto_free_hash(desc.tfm);
248 return err ? GSS_S_FAILURE : 0;
237} 249}
238 250
239EXPORT_SYMBOL(make_checksum); 251EXPORT_SYMBOL(make_checksum);
240 252
241struct encryptor_desc { 253struct encryptor_desc {
242 u8 iv[8]; /* XXX hard-coded blocksize */ 254 u8 iv[8]; /* XXX hard-coded blocksize */
243 struct crypto_tfm *tfm; 255 struct blkcipher_desc desc;
244 int pos; 256 int pos;
245 struct xdr_buf *outbuf; 257 struct xdr_buf *outbuf;
246 struct page **pages; 258 struct page **pages;
@@ -285,8 +297,8 @@ encryptor(struct scatterlist *sg, void *data)
285 if (thislen == 0) 297 if (thislen == 0)
286 return 0; 298 return 0;
287 299
288 ret = crypto_cipher_encrypt_iv(desc->tfm, desc->outfrags, desc->infrags, 300 ret = crypto_blkcipher_encrypt_iv(&desc->desc, desc->outfrags,
289 thislen, desc->iv); 301 desc->infrags, thislen);
290 if (ret) 302 if (ret)
291 return ret; 303 return ret;
292 if (fraglen) { 304 if (fraglen) {
@@ -305,16 +317,18 @@ encryptor(struct scatterlist *sg, void *data)
305} 317}
306 318
307int 319int
308gss_encrypt_xdr_buf(struct crypto_tfm *tfm, struct xdr_buf *buf, int offset, 320gss_encrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf,
309 struct page **pages) 321 int offset, struct page **pages)
310{ 322{
311 int ret; 323 int ret;
312 struct encryptor_desc desc; 324 struct encryptor_desc desc;
313 325
314 BUG_ON((buf->len - offset) % crypto_tfm_alg_blocksize(tfm) != 0); 326 BUG_ON((buf->len - offset) % crypto_blkcipher_blocksize(tfm) != 0);
315 327
316 memset(desc.iv, 0, sizeof(desc.iv)); 328 memset(desc.iv, 0, sizeof(desc.iv));
317 desc.tfm = tfm; 329 desc.desc.tfm = tfm;
330 desc.desc.info = desc.iv;
331 desc.desc.flags = 0;
318 desc.pos = offset; 332 desc.pos = offset;
319 desc.outbuf = buf; 333 desc.outbuf = buf;
320 desc.pages = pages; 334 desc.pages = pages;
@@ -329,7 +343,7 @@ EXPORT_SYMBOL(gss_encrypt_xdr_buf);
329 343
330struct decryptor_desc { 344struct decryptor_desc {
331 u8 iv[8]; /* XXX hard-coded blocksize */ 345 u8 iv[8]; /* XXX hard-coded blocksize */
332 struct crypto_tfm *tfm; 346 struct blkcipher_desc desc;
333 struct scatterlist frags[4]; 347 struct scatterlist frags[4];
334 int fragno; 348 int fragno;
335 int fraglen; 349 int fraglen;
@@ -355,8 +369,8 @@ decryptor(struct scatterlist *sg, void *data)
355 if (thislen == 0) 369 if (thislen == 0)
356 return 0; 370 return 0;
357 371
358 ret = crypto_cipher_decrypt_iv(desc->tfm, desc->frags, desc->frags, 372 ret = crypto_blkcipher_decrypt_iv(&desc->desc, desc->frags,
359 thislen, desc->iv); 373 desc->frags, thislen);
360 if (ret) 374 if (ret)
361 return ret; 375 return ret;
362 if (fraglen) { 376 if (fraglen) {
@@ -373,15 +387,18 @@ decryptor(struct scatterlist *sg, void *data)
373} 387}
374 388
375int 389int
376gss_decrypt_xdr_buf(struct crypto_tfm *tfm, struct xdr_buf *buf, int offset) 390gss_decrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf,
391 int offset)
377{ 392{
378 struct decryptor_desc desc; 393 struct decryptor_desc desc;
379 394
380 /* XXXJBF: */ 395 /* XXXJBF: */
381 BUG_ON((buf->len - offset) % crypto_tfm_alg_blocksize(tfm) != 0); 396 BUG_ON((buf->len - offset) % crypto_blkcipher_blocksize(tfm) != 0);
382 397
383 memset(desc.iv, 0, sizeof(desc.iv)); 398 memset(desc.iv, 0, sizeof(desc.iv));
384 desc.tfm = tfm; 399 desc.desc.tfm = tfm;
400 desc.desc.info = desc.iv;
401 desc.desc.flags = 0;
385 desc.fragno = 0; 402 desc.fragno = 0;
386 desc.fraglen = 0; 403 desc.fraglen = 0;
387 return process_xdr_buf(buf, offset, buf->len - offset, decryptor, &desc); 404 return process_xdr_buf(buf, offset, buf->len - offset, decryptor, &desc);
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
index 129e2bd36aff..325e72e4fd31 100644
--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
+++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
@@ -34,6 +34,7 @@
34 * 34 *
35 */ 35 */
36 36
37#include <linux/err.h>
37#include <linux/module.h> 38#include <linux/module.h>
38#include <linux/init.h> 39#include <linux/init.h>
39#include <linux/types.h> 40#include <linux/types.h>
@@ -78,10 +79,10 @@ simple_get_netobj(const void *p, const void *end, struct xdr_netobj *res)
78} 79}
79 80
80static inline const void * 81static inline const void *
81get_key(const void *p, const void *end, struct crypto_tfm **res) 82get_key(const void *p, const void *end, struct crypto_blkcipher **res)
82{ 83{
83 struct xdr_netobj key; 84 struct xdr_netobj key;
84 int alg, alg_mode; 85 int alg;
85 char *alg_name; 86 char *alg_name;
86 87
87 p = simple_get_bytes(p, end, &alg, sizeof(alg)); 88 p = simple_get_bytes(p, end, &alg, sizeof(alg));
@@ -93,18 +94,19 @@ get_key(const void *p, const void *end, struct crypto_tfm **res)
93 94
94 switch (alg) { 95 switch (alg) {
95 case ENCTYPE_DES_CBC_RAW: 96 case ENCTYPE_DES_CBC_RAW:
96 alg_name = "des"; 97 alg_name = "cbc(des)";
97 alg_mode = CRYPTO_TFM_MODE_CBC;
98 break; 98 break;
99 default: 99 default:
100 printk("gss_kerberos_mech: unsupported algorithm %d\n", alg); 100 printk("gss_kerberos_mech: unsupported algorithm %d\n", alg);
101 goto out_err_free_key; 101 goto out_err_free_key;
102 } 102 }
103 if (!(*res = crypto_alloc_tfm(alg_name, alg_mode))) { 103 *res = crypto_alloc_blkcipher(alg_name, 0, CRYPTO_ALG_ASYNC);
104 if (IS_ERR(*res)) {
104 printk("gss_kerberos_mech: unable to initialize crypto algorithm %s\n", alg_name); 105 printk("gss_kerberos_mech: unable to initialize crypto algorithm %s\n", alg_name);
106 *res = NULL;
105 goto out_err_free_key; 107 goto out_err_free_key;
106 } 108 }
107 if (crypto_cipher_setkey(*res, key.data, key.len)) { 109 if (crypto_blkcipher_setkey(*res, key.data, key.len)) {
108 printk("gss_kerberos_mech: error setting key for crypto algorithm %s\n", alg_name); 110 printk("gss_kerberos_mech: error setting key for crypto algorithm %s\n", alg_name);
109 goto out_err_free_tfm; 111 goto out_err_free_tfm;
110 } 112 }
@@ -113,7 +115,7 @@ get_key(const void *p, const void *end, struct crypto_tfm **res)
113 return p; 115 return p;
114 116
115out_err_free_tfm: 117out_err_free_tfm:
116 crypto_free_tfm(*res); 118 crypto_free_blkcipher(*res);
117out_err_free_key: 119out_err_free_key:
118 kfree(key.data); 120 kfree(key.data);
119 p = ERR_PTR(-EINVAL); 121 p = ERR_PTR(-EINVAL);
@@ -129,9 +131,8 @@ gss_import_sec_context_kerberos(const void *p,
129 const void *end = (const void *)((const char *)p + len); 131 const void *end = (const void *)((const char *)p + len);
130 struct krb5_ctx *ctx; 132 struct krb5_ctx *ctx;
131 133
132 if (!(ctx = kmalloc(sizeof(*ctx), GFP_KERNEL))) 134 if (!(ctx = kzalloc(sizeof(*ctx), GFP_KERNEL)))
133 goto out_err; 135 goto out_err;
134 memset(ctx, 0, sizeof(*ctx));
135 136
136 p = simple_get_bytes(p, end, &ctx->initiate, sizeof(ctx->initiate)); 137 p = simple_get_bytes(p, end, &ctx->initiate, sizeof(ctx->initiate));
137 if (IS_ERR(p)) 138 if (IS_ERR(p))
@@ -169,13 +170,13 @@ gss_import_sec_context_kerberos(const void *p,
169 } 170 }
170 171
171 ctx_id->internal_ctx_id = ctx; 172 ctx_id->internal_ctx_id = ctx;
172 dprintk("RPC: Succesfully imported new context.\n"); 173 dprintk("RPC: Successfully imported new context.\n");
173 return 0; 174 return 0;
174 175
175out_err_free_key2: 176out_err_free_key2:
176 crypto_free_tfm(ctx->seq); 177 crypto_free_blkcipher(ctx->seq);
177out_err_free_key1: 178out_err_free_key1:
178 crypto_free_tfm(ctx->enc); 179 crypto_free_blkcipher(ctx->enc);
179out_err_free_mech: 180out_err_free_mech:
180 kfree(ctx->mech_used.data); 181 kfree(ctx->mech_used.data);
181out_err_free_ctx: 182out_err_free_ctx:
@@ -188,8 +189,8 @@ static void
188gss_delete_sec_context_kerberos(void *internal_ctx) { 189gss_delete_sec_context_kerberos(void *internal_ctx) {
189 struct krb5_ctx *kctx = internal_ctx; 190 struct krb5_ctx *kctx = internal_ctx;
190 191
191 crypto_free_tfm(kctx->seq); 192 crypto_free_blkcipher(kctx->seq);
192 crypto_free_tfm(kctx->enc); 193 crypto_free_blkcipher(kctx->enc);
193 kfree(kctx->mech_used.data); 194 kfree(kctx->mech_used.data);
194 kfree(kctx); 195 kfree(kctx);
195} 196}
diff --git a/net/sunrpc/auth_gss/gss_krb5_seal.c b/net/sunrpc/auth_gss/gss_krb5_seal.c
index f43311221a72..08601ee4cd73 100644
--- a/net/sunrpc/auth_gss/gss_krb5_seal.c
+++ b/net/sunrpc/auth_gss/gss_krb5_seal.c
@@ -70,7 +70,7 @@
70# define RPCDBG_FACILITY RPCDBG_AUTH 70# define RPCDBG_FACILITY RPCDBG_AUTH
71#endif 71#endif
72 72
73spinlock_t krb5_seq_lock = SPIN_LOCK_UNLOCKED; 73DEFINE_SPINLOCK(krb5_seq_lock);
74 74
75u32 75u32
76gss_get_mic_kerberos(struct gss_ctx *gss_ctx, struct xdr_buf *text, 76gss_get_mic_kerberos(struct gss_ctx *gss_ctx, struct xdr_buf *text,
@@ -115,7 +115,7 @@ gss_get_mic_kerberos(struct gss_ctx *gss_ctx, struct xdr_buf *text,
115 krb5_hdr = ptr - 2; 115 krb5_hdr = ptr - 2;
116 msg_start = krb5_hdr + 24; 116 msg_start = krb5_hdr + 24;
117 117
118 *(u16 *)(krb5_hdr + 2) = htons(ctx->signalg); 118 *(__be16 *)(krb5_hdr + 2) = htons(ctx->signalg);
119 memset(krb5_hdr + 4, 0xff, 4); 119 memset(krb5_hdr + 4, 0xff, 4);
120 120
121 if (make_checksum(checksum_type, krb5_hdr, 8, text, 0, &md5cksum)) 121 if (make_checksum(checksum_type, krb5_hdr, 8, text, 0, &md5cksum))
diff --git a/net/sunrpc/auth_gss/gss_krb5_seqnum.c b/net/sunrpc/auth_gss/gss_krb5_seqnum.c
index c53ead39118d..c604baf3a5f6 100644
--- a/net/sunrpc/auth_gss/gss_krb5_seqnum.c
+++ b/net/sunrpc/auth_gss/gss_krb5_seqnum.c
@@ -41,7 +41,7 @@
41#endif 41#endif
42 42
43s32 43s32
44krb5_make_seq_num(struct crypto_tfm *key, 44krb5_make_seq_num(struct crypto_blkcipher *key,
45 int direction, 45 int direction,
46 s32 seqnum, 46 s32 seqnum,
47 unsigned char *cksum, unsigned char *buf) 47 unsigned char *cksum, unsigned char *buf)
@@ -62,7 +62,7 @@ krb5_make_seq_num(struct crypto_tfm *key,
62} 62}
63 63
64s32 64s32
65krb5_get_seq_num(struct crypto_tfm *key, 65krb5_get_seq_num(struct crypto_blkcipher *key,
66 unsigned char *cksum, 66 unsigned char *cksum,
67 unsigned char *buf, 67 unsigned char *buf,
68 int *direction, s32 * seqnum) 68 int *direction, s32 * seqnum)
diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c
index 89d1f3e14128..cc45c1605f80 100644
--- a/net/sunrpc/auth_gss/gss_krb5_wrap.c
+++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c
@@ -149,7 +149,7 @@ gss_wrap_kerberos(struct gss_ctx *ctx, int offset,
149 goto out_err; 149 goto out_err;
150 } 150 }
151 151
152 blocksize = crypto_tfm_alg_blocksize(kctx->enc); 152 blocksize = crypto_blkcipher_blocksize(kctx->enc);
153 gss_krb5_add_padding(buf, offset, blocksize); 153 gss_krb5_add_padding(buf, offset, blocksize);
154 BUG_ON((buf->len - offset) % blocksize); 154 BUG_ON((buf->len - offset) % blocksize);
155 plainlen = blocksize + buf->len - offset; 155 plainlen = blocksize + buf->len - offset;
@@ -177,9 +177,9 @@ gss_wrap_kerberos(struct gss_ctx *ctx, int offset,
177 msg_start = krb5_hdr + 24; 177 msg_start = krb5_hdr + 24;
178 /* XXXJBF: */ BUG_ON(buf->head[0].iov_base + offset + headlen != msg_start + blocksize); 178 /* XXXJBF: */ BUG_ON(buf->head[0].iov_base + offset + headlen != msg_start + blocksize);
179 179
180 *(u16 *)(krb5_hdr + 2) = htons(kctx->signalg); 180 *(__be16 *)(krb5_hdr + 2) = htons(kctx->signalg);
181 memset(krb5_hdr + 4, 0xff, 4); 181 memset(krb5_hdr + 4, 0xff, 4);
182 *(u16 *)(krb5_hdr + 4) = htons(kctx->sealalg); 182 *(__be16 *)(krb5_hdr + 4) = htons(kctx->sealalg);
183 183
184 make_confounder(msg_start, blocksize); 184 make_confounder(msg_start, blocksize);
185 185
@@ -346,7 +346,7 @@ gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf)
346 /* Copy the data back to the right position. XXX: Would probably be 346 /* Copy the data back to the right position. XXX: Would probably be
347 * better to copy and encrypt at the same time. */ 347 * better to copy and encrypt at the same time. */
348 348
349 blocksize = crypto_tfm_alg_blocksize(kctx->enc); 349 blocksize = crypto_blkcipher_blocksize(kctx->enc);
350 data_start = ptr + 22 + blocksize; 350 data_start = ptr + 22 + blocksize;
351 orig_start = buf->head[0].iov_base + offset; 351 orig_start = buf->head[0].iov_base + offset;
352 data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start; 352 data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start;
diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c
index f8bac6ccd524..3db745379d06 100644
--- a/net/sunrpc/auth_gss/gss_mech_switch.c
+++ b/net/sunrpc/auth_gss/gss_mech_switch.c
@@ -224,7 +224,8 @@ EXPORT_SYMBOL(gss_service_to_auth_domain_name);
224void 224void
225gss_mech_put(struct gss_api_mech * gm) 225gss_mech_put(struct gss_api_mech * gm)
226{ 226{
227 module_put(gm->gm_owner); 227 if (gm)
228 module_put(gm->gm_owner);
228} 229}
229 230
230EXPORT_SYMBOL(gss_mech_put); 231EXPORT_SYMBOL(gss_mech_put);
@@ -236,9 +237,8 @@ gss_import_sec_context(const void *input_token, size_t bufsize,
236 struct gss_api_mech *mech, 237 struct gss_api_mech *mech,
237 struct gss_ctx **ctx_id) 238 struct gss_ctx **ctx_id)
238{ 239{
239 if (!(*ctx_id = kmalloc(sizeof(**ctx_id), GFP_KERNEL))) 240 if (!(*ctx_id = kzalloc(sizeof(**ctx_id), GFP_KERNEL)))
240 return GSS_S_FAILURE; 241 return GSS_S_FAILURE;
241 memset(*ctx_id, 0, sizeof(**ctx_id));
242 (*ctx_id)->mech_type = gss_mech_get(mech); 242 (*ctx_id)->mech_type = gss_mech_get(mech);
243 243
244 return mech->gm_ops 244 return mech->gm_ops
@@ -307,8 +307,7 @@ gss_delete_sec_context(struct gss_ctx **context_handle)
307 (*context_handle)->mech_type->gm_ops 307 (*context_handle)->mech_type->gm_ops
308 ->gss_delete_sec_context((*context_handle) 308 ->gss_delete_sec_context((*context_handle)
309 ->internal_ctx_id); 309 ->internal_ctx_id);
310 if ((*context_handle)->mech_type) 310 gss_mech_put((*context_handle)->mech_type);
311 gss_mech_put((*context_handle)->mech_type);
312 kfree(*context_handle); 311 kfree(*context_handle);
313 *context_handle=NULL; 312 *context_handle=NULL;
314 return GSS_S_COMPLETE; 313 return GSS_S_COMPLETE;
diff --git a/net/sunrpc/auth_gss/gss_spkm3_mech.c b/net/sunrpc/auth_gss/gss_spkm3_mech.c
index 5bf11ccba7cd..bdedf456bc17 100644
--- a/net/sunrpc/auth_gss/gss_spkm3_mech.c
+++ b/net/sunrpc/auth_gss/gss_spkm3_mech.c
@@ -34,6 +34,7 @@
34 * 34 *
35 */ 35 */
36 36
37#include <linux/err.h>
37#include <linux/module.h> 38#include <linux/module.h>
38#include <linux/init.h> 39#include <linux/init.h>
39#include <linux/types.h> 40#include <linux/types.h>
@@ -83,10 +84,11 @@ simple_get_netobj(const void *p, const void *end, struct xdr_netobj *res)
83} 84}
84 85
85static inline const void * 86static inline const void *
86get_key(const void *p, const void *end, struct crypto_tfm **res, int *resalg) 87get_key(const void *p, const void *end, struct crypto_blkcipher **res,
88 int *resalg)
87{ 89{
88 struct xdr_netobj key = { 0 }; 90 struct xdr_netobj key = { 0 };
89 int alg_mode,setkey = 0; 91 int setkey = 0;
90 char *alg_name; 92 char *alg_name;
91 93
92 p = simple_get_bytes(p, end, resalg, sizeof(*resalg)); 94 p = simple_get_bytes(p, end, resalg, sizeof(*resalg));
@@ -98,14 +100,12 @@ get_key(const void *p, const void *end, struct crypto_tfm **res, int *resalg)
98 100
99 switch (*resalg) { 101 switch (*resalg) {
100 case NID_des_cbc: 102 case NID_des_cbc:
101 alg_name = "des"; 103 alg_name = "cbc(des)";
102 alg_mode = CRYPTO_TFM_MODE_CBC;
103 setkey = 1; 104 setkey = 1;
104 break; 105 break;
105 case NID_cast5_cbc: 106 case NID_cast5_cbc:
106 /* XXXX here in name only, not used */ 107 /* XXXX here in name only, not used */
107 alg_name = "cast5"; 108 alg_name = "cbc(cast5)";
108 alg_mode = CRYPTO_TFM_MODE_CBC;
109 setkey = 0; /* XXX will need to set to 1 */ 109 setkey = 0; /* XXX will need to set to 1 */
110 break; 110 break;
111 case NID_md5: 111 case NID_md5:
@@ -113,19 +113,20 @@ get_key(const void *p, const void *end, struct crypto_tfm **res, int *resalg)
113 dprintk("RPC: SPKM3 get_key: NID_md5 zero Key length\n"); 113 dprintk("RPC: SPKM3 get_key: NID_md5 zero Key length\n");
114 } 114 }
115 alg_name = "md5"; 115 alg_name = "md5";
116 alg_mode = 0;
117 setkey = 0; 116 setkey = 0;
118 break; 117 break;
119 default: 118 default:
120 dprintk("gss_spkm3_mech: unsupported algorithm %d\n", *resalg); 119 dprintk("gss_spkm3_mech: unsupported algorithm %d\n", *resalg);
121 goto out_err_free_key; 120 goto out_err_free_key;
122 } 121 }
123 if (!(*res = crypto_alloc_tfm(alg_name, alg_mode))) { 122 *res = crypto_alloc_blkcipher(alg_name, 0, CRYPTO_ALG_ASYNC);
123 if (IS_ERR(*res)) {
124 printk("gss_spkm3_mech: unable to initialize crypto algorthm %s\n", alg_name); 124 printk("gss_spkm3_mech: unable to initialize crypto algorthm %s\n", alg_name);
125 *res = NULL;
125 goto out_err_free_key; 126 goto out_err_free_key;
126 } 127 }
127 if (setkey) { 128 if (setkey) {
128 if (crypto_cipher_setkey(*res, key.data, key.len)) { 129 if (crypto_blkcipher_setkey(*res, key.data, key.len)) {
129 printk("gss_spkm3_mech: error setting key for crypto algorthm %s\n", alg_name); 130 printk("gss_spkm3_mech: error setting key for crypto algorthm %s\n", alg_name);
130 goto out_err_free_tfm; 131 goto out_err_free_tfm;
131 } 132 }
@@ -136,7 +137,7 @@ get_key(const void *p, const void *end, struct crypto_tfm **res, int *resalg)
136 return p; 137 return p;
137 138
138out_err_free_tfm: 139out_err_free_tfm:
139 crypto_free_tfm(*res); 140 crypto_free_blkcipher(*res);
140out_err_free_key: 141out_err_free_key:
141 if(key.len > 0) 142 if(key.len > 0)
142 kfree(key.data); 143 kfree(key.data);
@@ -152,9 +153,8 @@ gss_import_sec_context_spkm3(const void *p, size_t len,
152 const void *end = (const void *)((const char *)p + len); 153 const void *end = (const void *)((const char *)p + len);
153 struct spkm3_ctx *ctx; 154 struct spkm3_ctx *ctx;
154 155
155 if (!(ctx = kmalloc(sizeof(*ctx), GFP_KERNEL))) 156 if (!(ctx = kzalloc(sizeof(*ctx), GFP_KERNEL)))
156 goto out_err; 157 goto out_err;
157 memset(ctx, 0, sizeof(*ctx));
158 158
159 p = simple_get_netobj(p, end, &ctx->ctx_id); 159 p = simple_get_netobj(p, end, &ctx->ctx_id);
160 if (IS_ERR(p)) 160 if (IS_ERR(p))
@@ -201,13 +201,13 @@ gss_import_sec_context_spkm3(const void *p, size_t len,
201 201
202 ctx_id->internal_ctx_id = ctx; 202 ctx_id->internal_ctx_id = ctx;
203 203
204 dprintk("Succesfully imported new spkm context.\n"); 204 dprintk("Successfully imported new spkm context.\n");
205 return 0; 205 return 0;
206 206
207out_err_free_key2: 207out_err_free_key2:
208 crypto_free_tfm(ctx->derived_integ_key); 208 crypto_free_blkcipher(ctx->derived_integ_key);
209out_err_free_key1: 209out_err_free_key1:
210 crypto_free_tfm(ctx->derived_conf_key); 210 crypto_free_blkcipher(ctx->derived_conf_key);
211out_err_free_s_key: 211out_err_free_s_key:
212 kfree(ctx->share_key.data); 212 kfree(ctx->share_key.data);
213out_err_free_mech: 213out_err_free_mech:
@@ -224,8 +224,8 @@ static void
224gss_delete_sec_context_spkm3(void *internal_ctx) { 224gss_delete_sec_context_spkm3(void *internal_ctx) {
225 struct spkm3_ctx *sctx = internal_ctx; 225 struct spkm3_ctx *sctx = internal_ctx;
226 226
227 crypto_free_tfm(sctx->derived_integ_key); 227 crypto_free_blkcipher(sctx->derived_integ_key);
228 crypto_free_tfm(sctx->derived_conf_key); 228 crypto_free_blkcipher(sctx->derived_conf_key);
229 kfree(sctx->share_key.data); 229 kfree(sctx->share_key.data);
230 kfree(sctx->mech_used.data); 230 kfree(sctx->mech_used.data);
231 kfree(sctx); 231 kfree(sctx);
diff --git a/net/sunrpc/auth_gss/gss_spkm3_token.c b/net/sunrpc/auth_gss/gss_spkm3_token.c
index af0d7ce74686..854a983ccf26 100644
--- a/net/sunrpc/auth_gss/gss_spkm3_token.c
+++ b/net/sunrpc/auth_gss/gss_spkm3_token.c
@@ -90,10 +90,9 @@ asn1_bitstring_len(struct xdr_netobj *in, int *enclen, int *zerobits)
90int 90int
91decode_asn1_bitstring(struct xdr_netobj *out, char *in, int enclen, int explen) 91decode_asn1_bitstring(struct xdr_netobj *out, char *in, int enclen, int explen)
92{ 92{
93 if (!(out->data = kmalloc(explen,GFP_KERNEL))) 93 if (!(out->data = kzalloc(explen,GFP_KERNEL)))
94 return 0; 94 return 0;
95 out->len = explen; 95 out->len = explen;
96 memset(out->data, 0, explen);
97 memcpy(out->data, in, enclen); 96 memcpy(out->data, in, enclen);
98 return 1; 97 return 1;
99} 98}
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index d51e316c5821..638c0b576203 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -425,6 +425,7 @@ static int rsc_parse(struct cache_detail *cd,
425 struct rsc rsci, *rscp = NULL; 425 struct rsc rsci, *rscp = NULL;
426 time_t expiry; 426 time_t expiry;
427 int status = -EINVAL; 427 int status = -EINVAL;
428 struct gss_api_mech *gm = NULL;
428 429
429 memset(&rsci, 0, sizeof(rsci)); 430 memset(&rsci, 0, sizeof(rsci));
430 /* context handle */ 431 /* context handle */
@@ -453,7 +454,6 @@ static int rsc_parse(struct cache_detail *cd,
453 set_bit(CACHE_NEGATIVE, &rsci.h.flags); 454 set_bit(CACHE_NEGATIVE, &rsci.h.flags);
454 else { 455 else {
455 int N, i; 456 int N, i;
456 struct gss_api_mech *gm;
457 457
458 /* gid */ 458 /* gid */
459 if (get_int(&mesg, &rsci.cred.cr_gid)) 459 if (get_int(&mesg, &rsci.cred.cr_gid))
@@ -488,21 +488,17 @@ static int rsc_parse(struct cache_detail *cd,
488 status = -EINVAL; 488 status = -EINVAL;
489 /* mech-specific data: */ 489 /* mech-specific data: */
490 len = qword_get(&mesg, buf, mlen); 490 len = qword_get(&mesg, buf, mlen);
491 if (len < 0) { 491 if (len < 0)
492 gss_mech_put(gm);
493 goto out; 492 goto out;
494 }
495 status = gss_import_sec_context(buf, len, gm, &rsci.mechctx); 493 status = gss_import_sec_context(buf, len, gm, &rsci.mechctx);
496 if (status) { 494 if (status)
497 gss_mech_put(gm);
498 goto out; 495 goto out;
499 }
500 gss_mech_put(gm);
501 } 496 }
502 rsci.h.expiry_time = expiry; 497 rsci.h.expiry_time = expiry;
503 rscp = rsc_update(&rsci, rscp); 498 rscp = rsc_update(&rsci, rscp);
504 status = 0; 499 status = 0;
505out: 500out:
501 gss_mech_put(gm);
506 rsc_free(&rsci); 502 rsc_free(&rsci);
507 if (rscp) 503 if (rscp)
508 cache_put(&rscp->h, &rsc_cache); 504 cache_put(&rscp->h, &rsc_cache);
@@ -611,7 +607,7 @@ svc_safe_getnetobj(struct kvec *argv, struct xdr_netobj *o)
611 607
612 if (argv->iov_len < 4) 608 if (argv->iov_len < 4)
613 return -1; 609 return -1;
614 o->len = ntohl(svc_getu32(argv)); 610 o->len = svc_getnl(argv);
615 l = round_up_to_quad(o->len); 611 l = round_up_to_quad(o->len);
616 if (argv->iov_len < l) 612 if (argv->iov_len < l)
617 return -1; 613 return -1;
@@ -624,17 +620,17 @@ svc_safe_getnetobj(struct kvec *argv, struct xdr_netobj *o)
624static inline int 620static inline int
625svc_safe_putnetobj(struct kvec *resv, struct xdr_netobj *o) 621svc_safe_putnetobj(struct kvec *resv, struct xdr_netobj *o)
626{ 622{
627 u32 *p; 623 u8 *p;
628 624
629 if (resv->iov_len + 4 > PAGE_SIZE) 625 if (resv->iov_len + 4 > PAGE_SIZE)
630 return -1; 626 return -1;
631 svc_putu32(resv, htonl(o->len)); 627 svc_putnl(resv, o->len);
632 p = resv->iov_base + resv->iov_len; 628 p = resv->iov_base + resv->iov_len;
633 resv->iov_len += round_up_to_quad(o->len); 629 resv->iov_len += round_up_to_quad(o->len);
634 if (resv->iov_len > PAGE_SIZE) 630 if (resv->iov_len > PAGE_SIZE)
635 return -1; 631 return -1;
636 memcpy(p, o->data, o->len); 632 memcpy(p, o->data, o->len);
637 memset((u8 *)p + o->len, 0, round_up_to_quad(o->len) - o->len); 633 memset(p + o->len, 0, round_up_to_quad(o->len) - o->len);
638 return 0; 634 return 0;
639} 635}
640 636
@@ -644,7 +640,7 @@ svc_safe_putnetobj(struct kvec *resv, struct xdr_netobj *o)
644 */ 640 */
645static int 641static int
646gss_verify_header(struct svc_rqst *rqstp, struct rsc *rsci, 642gss_verify_header(struct svc_rqst *rqstp, struct rsc *rsci,
647 u32 *rpcstart, struct rpc_gss_wire_cred *gc, u32 *authp) 643 __be32 *rpcstart, struct rpc_gss_wire_cred *gc, __be32 *authp)
648{ 644{
649 struct gss_ctx *ctx_id = rsci->mechctx; 645 struct gss_ctx *ctx_id = rsci->mechctx;
650 struct xdr_buf rpchdr; 646 struct xdr_buf rpchdr;
@@ -661,7 +657,7 @@ gss_verify_header(struct svc_rqst *rqstp, struct rsc *rsci,
661 *authp = rpc_autherr_badverf; 657 *authp = rpc_autherr_badverf;
662 if (argv->iov_len < 4) 658 if (argv->iov_len < 4)
663 return SVC_DENIED; 659 return SVC_DENIED;
664 flavor = ntohl(svc_getu32(argv)); 660 flavor = svc_getnl(argv);
665 if (flavor != RPC_AUTH_GSS) 661 if (flavor != RPC_AUTH_GSS)
666 return SVC_DENIED; 662 return SVC_DENIED;
667 if (svc_safe_getnetobj(argv, &checksum)) 663 if (svc_safe_getnetobj(argv, &checksum))
@@ -691,9 +687,9 @@ gss_verify_header(struct svc_rqst *rqstp, struct rsc *rsci,
691static int 687static int
692gss_write_null_verf(struct svc_rqst *rqstp) 688gss_write_null_verf(struct svc_rqst *rqstp)
693{ 689{
694 u32 *p; 690 __be32 *p;
695 691
696 svc_putu32(rqstp->rq_res.head, htonl(RPC_AUTH_NULL)); 692 svc_putnl(rqstp->rq_res.head, RPC_AUTH_NULL);
697 p = rqstp->rq_res.head->iov_base + rqstp->rq_res.head->iov_len; 693 p = rqstp->rq_res.head->iov_base + rqstp->rq_res.head->iov_len;
698 /* don't really need to check if head->iov_len > PAGE_SIZE ... */ 694 /* don't really need to check if head->iov_len > PAGE_SIZE ... */
699 *p++ = 0; 695 *p++ = 0;
@@ -705,14 +701,14 @@ gss_write_null_verf(struct svc_rqst *rqstp)
705static int 701static int
706gss_write_verf(struct svc_rqst *rqstp, struct gss_ctx *ctx_id, u32 seq) 702gss_write_verf(struct svc_rqst *rqstp, struct gss_ctx *ctx_id, u32 seq)
707{ 703{
708 u32 xdr_seq; 704 __be32 xdr_seq;
709 u32 maj_stat; 705 u32 maj_stat;
710 struct xdr_buf verf_data; 706 struct xdr_buf verf_data;
711 struct xdr_netobj mic; 707 struct xdr_netobj mic;
712 u32 *p; 708 __be32 *p;
713 struct kvec iov; 709 struct kvec iov;
714 710
715 svc_putu32(rqstp->rq_res.head, htonl(RPC_AUTH_GSS)); 711 svc_putnl(rqstp->rq_res.head, RPC_AUTH_GSS);
716 xdr_seq = htonl(seq); 712 xdr_seq = htonl(seq);
717 713
718 iov.iov_base = &xdr_seq; 714 iov.iov_base = &xdr_seq;
@@ -786,7 +782,7 @@ EXPORT_SYMBOL(svcauth_gss_register_pseudoflavor);
786static inline int 782static inline int
787read_u32_from_xdr_buf(struct xdr_buf *buf, int base, u32 *obj) 783read_u32_from_xdr_buf(struct xdr_buf *buf, int base, u32 *obj)
788{ 784{
789 u32 raw; 785 __be32 raw;
790 int status; 786 int status;
791 787
792 status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj)); 788 status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj));
@@ -809,7 +805,7 @@ unwrap_integ_data(struct xdr_buf *buf, u32 seq, struct gss_ctx *ctx)
809 struct xdr_netobj mic; 805 struct xdr_netobj mic;
810 struct xdr_buf integ_buf; 806 struct xdr_buf integ_buf;
811 807
812 integ_len = ntohl(svc_getu32(&buf->head[0])); 808 integ_len = svc_getnl(&buf->head[0]);
813 if (integ_len & 3) 809 if (integ_len & 3)
814 goto out; 810 goto out;
815 if (integ_len > buf->len) 811 if (integ_len > buf->len)
@@ -829,19 +825,87 @@ unwrap_integ_data(struct xdr_buf *buf, u32 seq, struct gss_ctx *ctx)
829 maj_stat = gss_verify_mic(ctx, &integ_buf, &mic); 825 maj_stat = gss_verify_mic(ctx, &integ_buf, &mic);
830 if (maj_stat != GSS_S_COMPLETE) 826 if (maj_stat != GSS_S_COMPLETE)
831 goto out; 827 goto out;
832 if (ntohl(svc_getu32(&buf->head[0])) != seq) 828 if (svc_getnl(&buf->head[0]) != seq)
833 goto out; 829 goto out;
834 stat = 0; 830 stat = 0;
835out: 831out:
836 return stat; 832 return stat;
837} 833}
838 834
835static inline int
836total_buf_len(struct xdr_buf *buf)
837{
838 return buf->head[0].iov_len + buf->page_len + buf->tail[0].iov_len;
839}
840
841static void
842fix_priv_head(struct xdr_buf *buf, int pad)
843{
844 if (buf->page_len == 0) {
845 /* We need to adjust head and buf->len in tandem in this
846 * case to make svc_defer() work--it finds the original
847 * buffer start using buf->len - buf->head[0].iov_len. */
848 buf->head[0].iov_len -= pad;
849 }
850}
851
852static int
853unwrap_priv_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct gss_ctx *ctx)
854{
855 u32 priv_len, maj_stat;
856 int pad, saved_len, remaining_len, offset;
857
858 rqstp->rq_sendfile_ok = 0;
859
860 priv_len = svc_getnl(&buf->head[0]);
861 if (rqstp->rq_deferred) {
862 /* Already decrypted last time through! The sequence number
863 * check at out_seq is unnecessary but harmless: */
864 goto out_seq;
865 }
866 /* buf->len is the number of bytes from the original start of the
867 * request to the end, where head[0].iov_len is just the bytes
868 * not yet read from the head, so these two values are different: */
869 remaining_len = total_buf_len(buf);
870 if (priv_len > remaining_len)
871 return -EINVAL;
872 pad = remaining_len - priv_len;
873 buf->len -= pad;
874 fix_priv_head(buf, pad);
875
876 /* Maybe it would be better to give gss_unwrap a length parameter: */
877 saved_len = buf->len;
878 buf->len = priv_len;
879 maj_stat = gss_unwrap(ctx, 0, buf);
880 pad = priv_len - buf->len;
881 buf->len = saved_len;
882 buf->len -= pad;
883 /* The upper layers assume the buffer is aligned on 4-byte boundaries.
884 * In the krb5p case, at least, the data ends up offset, so we need to
885 * move it around. */
886 /* XXX: This is very inefficient. It would be better to either do
887 * this while we encrypt, or maybe in the receive code, if we can peak
888 * ahead and work out the service and mechanism there. */
889 offset = buf->head[0].iov_len % 4;
890 if (offset) {
891 buf->buflen = RPCSVC_MAXPAYLOAD;
892 xdr_shift_buf(buf, offset);
893 fix_priv_head(buf, pad);
894 }
895 if (maj_stat != GSS_S_COMPLETE)
896 return -EINVAL;
897out_seq:
898 if (svc_getnl(&buf->head[0]) != seq)
899 return -EINVAL;
900 return 0;
901}
902
839struct gss_svc_data { 903struct gss_svc_data {
840 /* decoded gss client cred: */ 904 /* decoded gss client cred: */
841 struct rpc_gss_wire_cred clcred; 905 struct rpc_gss_wire_cred clcred;
842 /* pointer to the beginning of the procedure-specific results, 906 /* pointer to the beginning of the procedure-specific results,
843 * which may be encrypted/checksummed in svcauth_gss_release: */ 907 * which may be encrypted/checksummed in svcauth_gss_release: */
844 u32 *body_start; 908 __be32 *body_start;
845 struct rsc *rsci; 909 struct rsc *rsci;
846}; 910};
847 911
@@ -882,7 +946,7 @@ gss_write_init_verf(struct svc_rqst *rqstp, struct rsi *rsip)
882 * response here and return SVC_COMPLETE. 946 * response here and return SVC_COMPLETE.
883 */ 947 */
884static int 948static int
885svcauth_gss_accept(struct svc_rqst *rqstp, u32 *authp) 949svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp)
886{ 950{
887 struct kvec *argv = &rqstp->rq_arg.head[0]; 951 struct kvec *argv = &rqstp->rq_arg.head[0];
888 struct kvec *resv = &rqstp->rq_res.head[0]; 952 struct kvec *resv = &rqstp->rq_res.head[0];
@@ -892,8 +956,8 @@ svcauth_gss_accept(struct svc_rqst *rqstp, u32 *authp)
892 struct rpc_gss_wire_cred *gc; 956 struct rpc_gss_wire_cred *gc;
893 struct rsc *rsci = NULL; 957 struct rsc *rsci = NULL;
894 struct rsi *rsip, rsikey; 958 struct rsi *rsip, rsikey;
895 u32 *rpcstart; 959 __be32 *rpcstart;
896 u32 *reject_stat = resv->iov_base + resv->iov_len; 960 __be32 *reject_stat = resv->iov_base + resv->iov_len;
897 int ret; 961 int ret;
898 962
899 dprintk("RPC: svcauth_gss: argv->iov_len = %zd\n",argv->iov_len); 963 dprintk("RPC: svcauth_gss: argv->iov_len = %zd\n",argv->iov_len);
@@ -921,12 +985,12 @@ svcauth_gss_accept(struct svc_rqst *rqstp, u32 *authp)
921 985
922 if (argv->iov_len < 5 * 4) 986 if (argv->iov_len < 5 * 4)
923 goto auth_err; 987 goto auth_err;
924 crlen = ntohl(svc_getu32(argv)); 988 crlen = svc_getnl(argv);
925 if (ntohl(svc_getu32(argv)) != RPC_GSS_VERSION) 989 if (svc_getnl(argv) != RPC_GSS_VERSION)
926 goto auth_err; 990 goto auth_err;
927 gc->gc_proc = ntohl(svc_getu32(argv)); 991 gc->gc_proc = svc_getnl(argv);
928 gc->gc_seq = ntohl(svc_getu32(argv)); 992 gc->gc_seq = svc_getnl(argv);
929 gc->gc_svc = ntohl(svc_getu32(argv)); 993 gc->gc_svc = svc_getnl(argv);
930 if (svc_safe_getnetobj(argv, &gc->gc_ctx)) 994 if (svc_safe_getnetobj(argv, &gc->gc_ctx))
931 goto auth_err; 995 goto auth_err;
932 if (crlen != round_up_to_quad(gc->gc_ctx.len) + 5 * 4) 996 if (crlen != round_up_to_quad(gc->gc_ctx.len) + 5 * 4)
@@ -952,9 +1016,9 @@ svcauth_gss_accept(struct svc_rqst *rqstp, u32 *authp)
952 case RPC_GSS_PROC_CONTINUE_INIT: 1016 case RPC_GSS_PROC_CONTINUE_INIT:
953 if (argv->iov_len < 2 * 4) 1017 if (argv->iov_len < 2 * 4)
954 goto auth_err; 1018 goto auth_err;
955 if (ntohl(svc_getu32(argv)) != RPC_AUTH_NULL) 1019 if (svc_getnl(argv) != RPC_AUTH_NULL)
956 goto auth_err; 1020 goto auth_err;
957 if (ntohl(svc_getu32(argv)) != 0) 1021 if (svc_getnl(argv) != 0)
958 goto auth_err; 1022 goto auth_err;
959 break; 1023 break;
960 case RPC_GSS_PROC_DATA: 1024 case RPC_GSS_PROC_DATA:
@@ -1012,14 +1076,14 @@ svcauth_gss_accept(struct svc_rqst *rqstp, u32 *authp)
1012 goto drop; 1076 goto drop;
1013 if (resv->iov_len + 4 > PAGE_SIZE) 1077 if (resv->iov_len + 4 > PAGE_SIZE)
1014 goto drop; 1078 goto drop;
1015 svc_putu32(resv, rpc_success); 1079 svc_putnl(resv, RPC_SUCCESS);
1016 if (svc_safe_putnetobj(resv, &rsip->out_handle)) 1080 if (svc_safe_putnetobj(resv, &rsip->out_handle))
1017 goto drop; 1081 goto drop;
1018 if (resv->iov_len + 3 * 4 > PAGE_SIZE) 1082 if (resv->iov_len + 3 * 4 > PAGE_SIZE)
1019 goto drop; 1083 goto drop;
1020 svc_putu32(resv, htonl(rsip->major_status)); 1084 svc_putnl(resv, rsip->major_status);
1021 svc_putu32(resv, htonl(rsip->minor_status)); 1085 svc_putnl(resv, rsip->minor_status);
1022 svc_putu32(resv, htonl(GSS_SEQ_WIN)); 1086 svc_putnl(resv, GSS_SEQ_WIN);
1023 if (svc_safe_putnetobj(resv, &rsip->out_token)) 1087 if (svc_safe_putnetobj(resv, &rsip->out_token))
1024 goto drop; 1088 goto drop;
1025 rqstp->rq_client = NULL; 1089 rqstp->rq_client = NULL;
@@ -1029,7 +1093,7 @@ svcauth_gss_accept(struct svc_rqst *rqstp, u32 *authp)
1029 set_bit(CACHE_NEGATIVE, &rsci->h.flags); 1093 set_bit(CACHE_NEGATIVE, &rsci->h.flags);
1030 if (resv->iov_len + 4 > PAGE_SIZE) 1094 if (resv->iov_len + 4 > PAGE_SIZE)
1031 goto drop; 1095 goto drop;
1032 svc_putu32(resv, rpc_success); 1096 svc_putnl(resv, RPC_SUCCESS);
1033 goto complete; 1097 goto complete;
1034 case RPC_GSS_PROC_DATA: 1098 case RPC_GSS_PROC_DATA:
1035 *authp = rpcsec_gsserr_ctxproblem; 1099 *authp = rpcsec_gsserr_ctxproblem;
@@ -1047,11 +1111,18 @@ svcauth_gss_accept(struct svc_rqst *rqstp, u32 *authp)
1047 goto auth_err; 1111 goto auth_err;
1048 /* placeholders for length and seq. number: */ 1112 /* placeholders for length and seq. number: */
1049 svcdata->body_start = resv->iov_base + resv->iov_len; 1113 svcdata->body_start = resv->iov_base + resv->iov_len;
1050 svc_putu32(resv, 0); 1114 svc_putnl(resv, 0);
1051 svc_putu32(resv, 0); 1115 svc_putnl(resv, 0);
1052 break; 1116 break;
1053 case RPC_GSS_SVC_PRIVACY: 1117 case RPC_GSS_SVC_PRIVACY:
1054 /* currently unsupported */ 1118 if (unwrap_priv_data(rqstp, &rqstp->rq_arg,
1119 gc->gc_seq, rsci->mechctx))
1120 goto auth_err;
1121 /* placeholders for length and seq. number: */
1122 svcdata->body_start = resv->iov_base + resv->iov_len;
1123 svc_putnl(resv, 0);
1124 svc_putnl(resv, 0);
1125 break;
1055 default: 1126 default:
1056 goto auth_err; 1127 goto auth_err;
1057 } 1128 }
@@ -1076,8 +1147,8 @@ out:
1076 return ret; 1147 return ret;
1077} 1148}
1078 1149
1079static int 1150static inline int
1080svcauth_gss_release(struct svc_rqst *rqstp) 1151svcauth_gss_wrap_resp_integ(struct svc_rqst *rqstp)
1081{ 1152{
1082 struct gss_svc_data *gsd = (struct gss_svc_data *)rqstp->rq_auth_data; 1153 struct gss_svc_data *gsd = (struct gss_svc_data *)rqstp->rq_auth_data;
1083 struct rpc_gss_wire_cred *gc = &gsd->clcred; 1154 struct rpc_gss_wire_cred *gc = &gsd->clcred;
@@ -1085,73 +1156,151 @@ svcauth_gss_release(struct svc_rqst *rqstp)
1085 struct xdr_buf integ_buf; 1156 struct xdr_buf integ_buf;
1086 struct xdr_netobj mic; 1157 struct xdr_netobj mic;
1087 struct kvec *resv; 1158 struct kvec *resv;
1088 u32 *p; 1159 __be32 *p;
1089 int integ_offset, integ_len; 1160 int integ_offset, integ_len;
1090 int stat = -EINVAL; 1161 int stat = -EINVAL;
1091 1162
1163 p = gsd->body_start;
1164 gsd->body_start = NULL;
1165 /* move accept_stat to right place: */
1166 memcpy(p, p + 2, 4);
1167 /* Don't wrap in failure case: */
1168 /* Counting on not getting here if call was not even accepted! */
1169 if (*p != rpc_success) {
1170 resbuf->head[0].iov_len -= 2 * 4;
1171 goto out;
1172 }
1173 p++;
1174 integ_offset = (u8 *)(p + 1) - (u8 *)resbuf->head[0].iov_base;
1175 integ_len = resbuf->len - integ_offset;
1176 BUG_ON(integ_len % 4);
1177 *p++ = htonl(integ_len);
1178 *p++ = htonl(gc->gc_seq);
1179 if (xdr_buf_subsegment(resbuf, &integ_buf, integ_offset,
1180 integ_len))
1181 BUG();
1182 if (resbuf->page_len == 0
1183 && resbuf->head[0].iov_len + RPC_MAX_AUTH_SIZE
1184 < PAGE_SIZE) {
1185 BUG_ON(resbuf->tail[0].iov_len);
1186 /* Use head for everything */
1187 resv = &resbuf->head[0];
1188 } else if (resbuf->tail[0].iov_base == NULL) {
1189 if (resbuf->head[0].iov_len + RPC_MAX_AUTH_SIZE > PAGE_SIZE)
1190 goto out_err;
1191 resbuf->tail[0].iov_base = resbuf->head[0].iov_base
1192 + resbuf->head[0].iov_len;
1193 resbuf->tail[0].iov_len = 0;
1194 rqstp->rq_restailpage = 0;
1195 resv = &resbuf->tail[0];
1196 } else {
1197 resv = &resbuf->tail[0];
1198 }
1199 mic.data = (u8 *)resv->iov_base + resv->iov_len + 4;
1200 if (gss_get_mic(gsd->rsci->mechctx, &integ_buf, &mic))
1201 goto out_err;
1202 svc_putnl(resv, mic.len);
1203 memset(mic.data + mic.len, 0,
1204 round_up_to_quad(mic.len) - mic.len);
1205 resv->iov_len += XDR_QUADLEN(mic.len) << 2;
1206 /* not strictly required: */
1207 resbuf->len += XDR_QUADLEN(mic.len) << 2;
1208 BUG_ON(resv->iov_len > PAGE_SIZE);
1209out:
1210 stat = 0;
1211out_err:
1212 return stat;
1213}
1214
1215static inline int
1216svcauth_gss_wrap_resp_priv(struct svc_rqst *rqstp)
1217{
1218 struct gss_svc_data *gsd = (struct gss_svc_data *)rqstp->rq_auth_data;
1219 struct rpc_gss_wire_cred *gc = &gsd->clcred;
1220 struct xdr_buf *resbuf = &rqstp->rq_res;
1221 struct page **inpages = NULL;
1222 __be32 *p, *len;
1223 int offset;
1224 int pad;
1225
1226 p = gsd->body_start;
1227 gsd->body_start = NULL;
1228 /* move accept_stat to right place: */
1229 memcpy(p, p + 2, 4);
1230 /* Don't wrap in failure case: */
1231 /* Counting on not getting here if call was not even accepted! */
1232 if (*p != rpc_success) {
1233 resbuf->head[0].iov_len -= 2 * 4;
1234 return 0;
1235 }
1236 p++;
1237 len = p++;
1238 offset = (u8 *)p - (u8 *)resbuf->head[0].iov_base;
1239 *p++ = htonl(gc->gc_seq);
1240 inpages = resbuf->pages;
1241 /* XXX: Would be better to write some xdr helper functions for
1242 * nfs{2,3,4}xdr.c that place the data right, instead of copying: */
1243 if (resbuf->tail[0].iov_base && rqstp->rq_restailpage == 0) {
1244 BUG_ON(resbuf->tail[0].iov_base >= resbuf->head[0].iov_base
1245 + PAGE_SIZE);
1246 BUG_ON(resbuf->tail[0].iov_base < resbuf->head[0].iov_base);
1247 if (resbuf->tail[0].iov_len + resbuf->head[0].iov_len
1248 + 2 * RPC_MAX_AUTH_SIZE > PAGE_SIZE)
1249 return -ENOMEM;
1250 memmove(resbuf->tail[0].iov_base + RPC_MAX_AUTH_SIZE,
1251 resbuf->tail[0].iov_base,
1252 resbuf->tail[0].iov_len);
1253 resbuf->tail[0].iov_base += RPC_MAX_AUTH_SIZE;
1254 }
1255 if (resbuf->tail[0].iov_base == NULL) {
1256 if (resbuf->head[0].iov_len + 2*RPC_MAX_AUTH_SIZE > PAGE_SIZE)
1257 return -ENOMEM;
1258 resbuf->tail[0].iov_base = resbuf->head[0].iov_base
1259 + resbuf->head[0].iov_len + RPC_MAX_AUTH_SIZE;
1260 resbuf->tail[0].iov_len = 0;
1261 rqstp->rq_restailpage = 0;
1262 }
1263 if (gss_wrap(gsd->rsci->mechctx, offset, resbuf, inpages))
1264 return -ENOMEM;
1265 *len = htonl(resbuf->len - offset);
1266 pad = 3 - ((resbuf->len - offset - 1)&3);
1267 p = (__be32 *)(resbuf->tail[0].iov_base + resbuf->tail[0].iov_len);
1268 memset(p, 0, pad);
1269 resbuf->tail[0].iov_len += pad;
1270 resbuf->len += pad;
1271 return 0;
1272}
1273
1274static int
1275svcauth_gss_release(struct svc_rqst *rqstp)
1276{
1277 struct gss_svc_data *gsd = (struct gss_svc_data *)rqstp->rq_auth_data;
1278 struct rpc_gss_wire_cred *gc = &gsd->clcred;
1279 struct xdr_buf *resbuf = &rqstp->rq_res;
1280 int stat = -EINVAL;
1281
1092 if (gc->gc_proc != RPC_GSS_PROC_DATA) 1282 if (gc->gc_proc != RPC_GSS_PROC_DATA)
1093 goto out; 1283 goto out;
1094 /* Release can be called twice, but we only wrap once. */ 1284 /* Release can be called twice, but we only wrap once. */
1095 if (gsd->body_start == NULL) 1285 if (gsd->body_start == NULL)
1096 goto out; 1286 goto out;
1097 /* normally not set till svc_send, but we need it here: */ 1287 /* normally not set till svc_send, but we need it here: */
1098 resbuf->len = resbuf->head[0].iov_len 1288 /* XXX: what for? Do we mess it up the moment we call svc_putu32
1099 + resbuf->page_len + resbuf->tail[0].iov_len; 1289 * or whatever? */
1290 resbuf->len = total_buf_len(resbuf);
1100 switch (gc->gc_svc) { 1291 switch (gc->gc_svc) {
1101 case RPC_GSS_SVC_NONE: 1292 case RPC_GSS_SVC_NONE:
1102 break; 1293 break;
1103 case RPC_GSS_SVC_INTEGRITY: 1294 case RPC_GSS_SVC_INTEGRITY:
1104 p = gsd->body_start; 1295 stat = svcauth_gss_wrap_resp_integ(rqstp);
1105 gsd->body_start = NULL; 1296 if (stat)
1106 /* move accept_stat to right place: */
1107 memcpy(p, p + 2, 4);
1108 /* don't wrap in failure case: */
1109 /* Note: counting on not getting here if call was not even
1110 * accepted! */
1111 if (*p != rpc_success) {
1112 resbuf->head[0].iov_len -= 2 * 4;
1113 goto out;
1114 }
1115 p++;
1116 integ_offset = (u8 *)(p + 1) - (u8 *)resbuf->head[0].iov_base;
1117 integ_len = resbuf->len - integ_offset;
1118 BUG_ON(integ_len % 4);
1119 *p++ = htonl(integ_len);
1120 *p++ = htonl(gc->gc_seq);
1121 if (xdr_buf_subsegment(resbuf, &integ_buf, integ_offset,
1122 integ_len))
1123 BUG();
1124 if (resbuf->page_len == 0
1125 && resbuf->head[0].iov_len + RPC_MAX_AUTH_SIZE
1126 < PAGE_SIZE) {
1127 BUG_ON(resbuf->tail[0].iov_len);
1128 /* Use head for everything */
1129 resv = &resbuf->head[0];
1130 } else if (resbuf->tail[0].iov_base == NULL) {
1131 if (resbuf->head[0].iov_len + RPC_MAX_AUTH_SIZE
1132 > PAGE_SIZE)
1133 goto out_err;
1134 resbuf->tail[0].iov_base =
1135 resbuf->head[0].iov_base
1136 + resbuf->head[0].iov_len;
1137 resbuf->tail[0].iov_len = 0;
1138 rqstp->rq_restailpage = 0;
1139 resv = &resbuf->tail[0];
1140 } else {
1141 resv = &resbuf->tail[0];
1142 }
1143 mic.data = (u8 *)resv->iov_base + resv->iov_len + 4;
1144 if (gss_get_mic(gsd->rsci->mechctx, &integ_buf, &mic))
1145 goto out_err; 1297 goto out_err;
1146 svc_putu32(resv, htonl(mic.len));
1147 memset(mic.data + mic.len, 0,
1148 round_up_to_quad(mic.len) - mic.len);
1149 resv->iov_len += XDR_QUADLEN(mic.len) << 2;
1150 /* not strictly required: */
1151 resbuf->len += XDR_QUADLEN(mic.len) << 2;
1152 BUG_ON(resv->iov_len > PAGE_SIZE);
1153 break; 1298 break;
1154 case RPC_GSS_SVC_PRIVACY: 1299 case RPC_GSS_SVC_PRIVACY:
1300 stat = svcauth_gss_wrap_resp_priv(rqstp);
1301 if (stat)
1302 goto out_err;
1303 break;
1155 default: 1304 default:
1156 goto out_err; 1305 goto out_err;
1157 } 1306 }
diff --git a/net/sunrpc/auth_null.c b/net/sunrpc/auth_null.c
index f56767aaa927..3be257dc32b2 100644
--- a/net/sunrpc/auth_null.c
+++ b/net/sunrpc/auth_null.c
@@ -60,8 +60,8 @@ nul_match(struct auth_cred *acred, struct rpc_cred *cred, int taskflags)
60/* 60/*
61 * Marshal credential. 61 * Marshal credential.
62 */ 62 */
63static u32 * 63static __be32 *
64nul_marshal(struct rpc_task *task, u32 *p) 64nul_marshal(struct rpc_task *task, __be32 *p)
65{ 65{
66 *p++ = htonl(RPC_AUTH_NULL); 66 *p++ = htonl(RPC_AUTH_NULL);
67 *p++ = 0; 67 *p++ = 0;
@@ -81,8 +81,8 @@ nul_refresh(struct rpc_task *task)
81 return 0; 81 return 0;
82} 82}
83 83
84static u32 * 84static __be32 *
85nul_validate(struct rpc_task *task, u32 *p) 85nul_validate(struct rpc_task *task, __be32 *p)
86{ 86{
87 rpc_authflavor_t flavor; 87 rpc_authflavor_t flavor;
88 u32 size; 88 u32 size;
@@ -118,6 +118,8 @@ struct rpc_auth null_auth = {
118 .au_cslack = 4, 118 .au_cslack = 4,
119 .au_rslack = 2, 119 .au_rslack = 2,
120 .au_ops = &authnull_ops, 120 .au_ops = &authnull_ops,
121 .au_flavor = RPC_AUTH_NULL,
122 .au_count = ATOMIC_INIT(0),
121}; 123};
122 124
123static 125static
diff --git a/net/sunrpc/auth_unix.c b/net/sunrpc/auth_unix.c
index df14b6bfbf10..f7f990c9afe2 100644
--- a/net/sunrpc/auth_unix.c
+++ b/net/sunrpc/auth_unix.c
@@ -137,12 +137,12 @@ unx_match(struct auth_cred *acred, struct rpc_cred *rcred, int flags)
137 * Marshal credentials. 137 * Marshal credentials.
138 * Maybe we should keep a cached credential for performance reasons. 138 * Maybe we should keep a cached credential for performance reasons.
139 */ 139 */
140static u32 * 140static __be32 *
141unx_marshal(struct rpc_task *task, u32 *p) 141unx_marshal(struct rpc_task *task, __be32 *p)
142{ 142{
143 struct rpc_clnt *clnt = task->tk_client; 143 struct rpc_clnt *clnt = task->tk_client;
144 struct unx_cred *cred = (struct unx_cred *) task->tk_msg.rpc_cred; 144 struct unx_cred *cred = (struct unx_cred *) task->tk_msg.rpc_cred;
145 u32 *base, *hold; 145 __be32 *base, *hold;
146 int i; 146 int i;
147 147
148 *p++ = htonl(RPC_AUTH_UNIX); 148 *p++ = htonl(RPC_AUTH_UNIX);
@@ -178,8 +178,8 @@ unx_refresh(struct rpc_task *task)
178 return 0; 178 return 0;
179} 179}
180 180
181static u32 * 181static __be32 *
182unx_validate(struct rpc_task *task, u32 *p) 182unx_validate(struct rpc_task *task, __be32 *p)
183{ 183{
184 rpc_authflavor_t flavor; 184 rpc_authflavor_t flavor;
185 u32 size; 185 u32 size;
@@ -225,6 +225,7 @@ struct rpc_auth unix_auth = {
225 .au_cslack = UNX_WRITESLACK, 225 .au_cslack = UNX_WRITESLACK,
226 .au_rslack = 2, /* assume AUTH_NULL verf */ 226 .au_rslack = 2, /* assume AUTH_NULL verf */
227 .au_ops = &authunix_ops, 227 .au_ops = &authunix_ops,
228 .au_flavor = RPC_AUTH_UNIX,
228 .au_count = ATOMIC_INIT(0), 229 .au_count = ATOMIC_INIT(0),
229 .au_credcache = &unix_cred_cache, 230 .au_credcache = &unix_cred_cache,
230}; 231};
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 7026b0866b7b..00cb388ece03 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -71,7 +71,12 @@ struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail,
71 new = detail->alloc(); 71 new = detail->alloc();
72 if (!new) 72 if (!new)
73 return NULL; 73 return NULL;
74 /* must fully initialise 'new', else
75 * we might get lose if we need to
76 * cache_put it soon.
77 */
74 cache_init(new); 78 cache_init(new);
79 detail->init(new, key);
75 80
76 write_lock(&detail->hash_lock); 81 write_lock(&detail->hash_lock);
77 82
@@ -85,7 +90,6 @@ struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail,
85 return tmp; 90 return tmp;
86 } 91 }
87 } 92 }
88 detail->init(new, key);
89 new->next = *head; 93 new->next = *head;
90 *head = new; 94 *head = new;
91 detail->entries++; 95 detail->entries++;
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index aa8965e9d307..124ff0ceb55b 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -60,8 +60,8 @@ static void call_refreshresult(struct rpc_task *task);
60static void call_timeout(struct rpc_task *task); 60static void call_timeout(struct rpc_task *task);
61static void call_connect(struct rpc_task *task); 61static void call_connect(struct rpc_task *task);
62static void call_connect_status(struct rpc_task *task); 62static void call_connect_status(struct rpc_task *task);
63static u32 * call_header(struct rpc_task *task); 63static __be32 * call_header(struct rpc_task *task);
64static u32 * call_verify(struct rpc_task *task); 64static __be32 * call_verify(struct rpc_task *task);
65 65
66 66
67static int 67static int
@@ -97,17 +97,7 @@ rpc_setup_pipedir(struct rpc_clnt *clnt, char *dir_name)
97 } 97 }
98} 98}
99 99
100/* 100static struct rpc_clnt * rpc_new_client(struct rpc_xprt *xprt, char *servname, struct rpc_program *program, u32 vers, rpc_authflavor_t flavor)
101 * Create an RPC client
102 * FIXME: This should also take a flags argument (as in task->tk_flags).
103 * It's called (among others) from pmap_create_client, which may in
104 * turn be called by an async task. In this case, rpciod should not be
105 * made to sleep too long.
106 */
107struct rpc_clnt *
108rpc_new_client(struct rpc_xprt *xprt, char *servname,
109 struct rpc_program *program, u32 vers,
110 rpc_authflavor_t flavor)
111{ 101{
112 struct rpc_version *version; 102 struct rpc_version *version;
113 struct rpc_clnt *clnt = NULL; 103 struct rpc_clnt *clnt = NULL;
@@ -125,10 +115,9 @@ rpc_new_client(struct rpc_xprt *xprt, char *servname,
125 goto out_err; 115 goto out_err;
126 116
127 err = -ENOMEM; 117 err = -ENOMEM;
128 clnt = kmalloc(sizeof(*clnt), GFP_KERNEL); 118 clnt = kzalloc(sizeof(*clnt), GFP_KERNEL);
129 if (!clnt) 119 if (!clnt)
130 goto out_err; 120 goto out_err;
131 memset(clnt, 0, sizeof(*clnt));
132 atomic_set(&clnt->cl_users, 0); 121 atomic_set(&clnt->cl_users, 0);
133 atomic_set(&clnt->cl_count, 1); 122 atomic_set(&clnt->cl_count, 1);
134 clnt->cl_parent = clnt; 123 clnt->cl_parent = clnt;
@@ -148,16 +137,12 @@ rpc_new_client(struct rpc_xprt *xprt, char *servname,
148 clnt->cl_procinfo = version->procs; 137 clnt->cl_procinfo = version->procs;
149 clnt->cl_maxproc = version->nrprocs; 138 clnt->cl_maxproc = version->nrprocs;
150 clnt->cl_protname = program->name; 139 clnt->cl_protname = program->name;
151 clnt->cl_pmap = &clnt->cl_pmap_default;
152 clnt->cl_port = xprt->addr.sin_port;
153 clnt->cl_prog = program->number; 140 clnt->cl_prog = program->number;
154 clnt->cl_vers = version->number; 141 clnt->cl_vers = version->number;
155 clnt->cl_prot = xprt->prot;
156 clnt->cl_stats = program->stats; 142 clnt->cl_stats = program->stats;
157 clnt->cl_metrics = rpc_alloc_iostats(clnt); 143 clnt->cl_metrics = rpc_alloc_iostats(clnt);
158 rpc_init_wait_queue(&clnt->cl_pmap_default.pm_bindwait, "bindwait");
159 144
160 if (!clnt->cl_port) 145 if (!xprt_bound(clnt->cl_xprt))
161 clnt->cl_autobind = 1; 146 clnt->cl_autobind = 1;
162 147
163 clnt->cl_rtt = &clnt->cl_rtt_default; 148 clnt->cl_rtt = &clnt->cl_rtt_default;
@@ -184,8 +169,7 @@ rpc_new_client(struct rpc_xprt *xprt, char *servname,
184 169
185out_no_auth: 170out_no_auth:
186 if (!IS_ERR(clnt->cl_dentry)) { 171 if (!IS_ERR(clnt->cl_dentry)) {
187 rpc_rmdir(clnt->cl_pathname); 172 rpc_rmdir(clnt->cl_dentry);
188 dput(clnt->cl_dentry);
189 rpc_put_mount(); 173 rpc_put_mount();
190 } 174 }
191out_no_path: 175out_no_path:
@@ -193,40 +177,71 @@ out_no_path:
193 kfree(clnt->cl_server); 177 kfree(clnt->cl_server);
194 kfree(clnt); 178 kfree(clnt);
195out_err: 179out_err:
196 xprt_destroy(xprt); 180 xprt_put(xprt);
197out_no_xprt: 181out_no_xprt:
198 return ERR_PTR(err); 182 return ERR_PTR(err);
199} 183}
200 184
201/** 185/*
202 * Create an RPC client 186 * rpc_create - create an RPC client and transport with one call
203 * @xprt - pointer to xprt struct 187 * @args: rpc_clnt create argument structure
204 * @servname - name of server
205 * @info - rpc_program
206 * @version - rpc_program version
207 * @authflavor - rpc_auth flavour to use
208 * 188 *
209 * Creates an RPC client structure, then pings the server in order to 189 * Creates and initializes an RPC transport and an RPC client.
210 * determine if it is up, and if it supports this program and version.
211 * 190 *
212 * This function should never be called by asynchronous tasks such as 191 * It can ping the server in order to determine if it is up, and to see if
213 * the portmapper. 192 * it supports this program and version. RPC_CLNT_CREATE_NOPING disables
193 * this behavior so asynchronous tasks can also use rpc_create.
214 */ 194 */
215struct rpc_clnt *rpc_create_client(struct rpc_xprt *xprt, char *servname, 195struct rpc_clnt *rpc_create(struct rpc_create_args *args)
216 struct rpc_program *info, u32 version, rpc_authflavor_t authflavor)
217{ 196{
197 struct rpc_xprt *xprt;
218 struct rpc_clnt *clnt; 198 struct rpc_clnt *clnt;
219 int err; 199
220 200 xprt = xprt_create_transport(args->protocol, args->address,
221 clnt = rpc_new_client(xprt, servname, info, version, authflavor); 201 args->addrsize, args->timeout);
202 if (IS_ERR(xprt))
203 return (struct rpc_clnt *)xprt;
204
205 /*
206 * By default, kernel RPC client connects from a reserved port.
207 * CAP_NET_BIND_SERVICE will not be set for unprivileged requesters,
208 * but it is always enabled for rpciod, which handles the connect
209 * operation.
210 */
211 xprt->resvport = 1;
212 if (args->flags & RPC_CLNT_CREATE_NONPRIVPORT)
213 xprt->resvport = 0;
214
215 dprintk("RPC: creating %s client for %s (xprt %p)\n",
216 args->program->name, args->servername, xprt);
217
218 clnt = rpc_new_client(xprt, args->servername, args->program,
219 args->version, args->authflavor);
222 if (IS_ERR(clnt)) 220 if (IS_ERR(clnt))
223 return clnt; 221 return clnt;
224 err = rpc_ping(clnt, RPC_TASK_SOFT|RPC_TASK_NOINTR); 222
225 if (err == 0) 223 if (!(args->flags & RPC_CLNT_CREATE_NOPING)) {
226 return clnt; 224 int err = rpc_ping(clnt, RPC_TASK_SOFT|RPC_TASK_NOINTR);
227 rpc_shutdown_client(clnt); 225 if (err != 0) {
228 return ERR_PTR(err); 226 rpc_shutdown_client(clnt);
227 return ERR_PTR(err);
228 }
229 }
230
231 clnt->cl_softrtry = 1;
232 if (args->flags & RPC_CLNT_CREATE_HARDRTRY)
233 clnt->cl_softrtry = 0;
234
235 if (args->flags & RPC_CLNT_CREATE_INTR)
236 clnt->cl_intr = 1;
237 if (args->flags & RPC_CLNT_CREATE_AUTOBIND)
238 clnt->cl_autobind = 1;
239 if (args->flags & RPC_CLNT_CREATE_ONESHOT)
240 clnt->cl_oneshot = 1;
241
242 return clnt;
229} 243}
244EXPORT_SYMBOL_GPL(rpc_create);
230 245
231/* 246/*
232 * This function clones the RPC client structure. It allows us to share the 247 * This function clones the RPC client structure. It allows us to share the
@@ -246,21 +261,17 @@ rpc_clone_client(struct rpc_clnt *clnt)
246 atomic_set(&new->cl_users, 0); 261 atomic_set(&new->cl_users, 0);
247 new->cl_parent = clnt; 262 new->cl_parent = clnt;
248 atomic_inc(&clnt->cl_count); 263 atomic_inc(&clnt->cl_count);
249 /* Duplicate portmapper */ 264 new->cl_xprt = xprt_get(clnt->cl_xprt);
250 rpc_init_wait_queue(&new->cl_pmap_default.pm_bindwait, "bindwait");
251 /* Turn off autobind on clones */ 265 /* Turn off autobind on clones */
252 new->cl_autobind = 0; 266 new->cl_autobind = 0;
253 new->cl_oneshot = 0; 267 new->cl_oneshot = 0;
254 new->cl_dead = 0; 268 new->cl_dead = 0;
255 if (!IS_ERR(new->cl_dentry)) { 269 if (!IS_ERR(new->cl_dentry))
256 dget(new->cl_dentry); 270 dget(new->cl_dentry);
257 rpc_get_mount();
258 }
259 rpc_init_rtt(&new->cl_rtt_default, clnt->cl_xprt->timeout.to_initval); 271 rpc_init_rtt(&new->cl_rtt_default, clnt->cl_xprt->timeout.to_initval);
260 if (new->cl_auth) 272 if (new->cl_auth)
261 atomic_inc(&new->cl_auth->au_count); 273 atomic_inc(&new->cl_auth->au_count);
262 new->cl_pmap = &new->cl_pmap_default; 274 new->cl_metrics = rpc_alloc_iostats(clnt);
263 new->cl_metrics = rpc_alloc_iostats(clnt);
264 return new; 275 return new;
265out_no_clnt: 276out_no_clnt:
266 printk(KERN_INFO "RPC: out of memory in %s\n", __FUNCTION__); 277 printk(KERN_INFO "RPC: out of memory in %s\n", __FUNCTION__);
@@ -318,24 +329,21 @@ rpc_destroy_client(struct rpc_clnt *clnt)
318 clnt->cl_auth = NULL; 329 clnt->cl_auth = NULL;
319 } 330 }
320 if (clnt->cl_parent != clnt) { 331 if (clnt->cl_parent != clnt) {
332 if (!IS_ERR(clnt->cl_dentry))
333 dput(clnt->cl_dentry);
321 rpc_destroy_client(clnt->cl_parent); 334 rpc_destroy_client(clnt->cl_parent);
322 goto out_free; 335 goto out_free;
323 } 336 }
324 if (clnt->cl_pathname[0]) 337 if (!IS_ERR(clnt->cl_dentry)) {
325 rpc_rmdir(clnt->cl_pathname); 338 rpc_rmdir(clnt->cl_dentry);
326 if (clnt->cl_xprt) { 339 rpc_put_mount();
327 xprt_destroy(clnt->cl_xprt);
328 clnt->cl_xprt = NULL;
329 } 340 }
330 if (clnt->cl_server != clnt->cl_inline_name) 341 if (clnt->cl_server != clnt->cl_inline_name)
331 kfree(clnt->cl_server); 342 kfree(clnt->cl_server);
332out_free: 343out_free:
333 rpc_free_iostats(clnt->cl_metrics); 344 rpc_free_iostats(clnt->cl_metrics);
334 clnt->cl_metrics = NULL; 345 clnt->cl_metrics = NULL;
335 if (!IS_ERR(clnt->cl_dentry)) { 346 xprt_put(clnt->cl_xprt);
336 dput(clnt->cl_dentry);
337 rpc_put_mount();
338 }
339 kfree(clnt); 347 kfree(clnt);
340 return 0; 348 return 0;
341} 349}
@@ -544,6 +552,40 @@ rpc_call_setup(struct rpc_task *task, struct rpc_message *msg, int flags)
544 task->tk_action = rpc_exit_task; 552 task->tk_action = rpc_exit_task;
545} 553}
546 554
555/**
556 * rpc_peeraddr - extract remote peer address from clnt's xprt
557 * @clnt: RPC client structure
558 * @buf: target buffer
559 * @size: length of target buffer
560 *
561 * Returns the number of bytes that are actually in the stored address.
562 */
563size_t rpc_peeraddr(struct rpc_clnt *clnt, struct sockaddr *buf, size_t bufsize)
564{
565 size_t bytes;
566 struct rpc_xprt *xprt = clnt->cl_xprt;
567
568 bytes = sizeof(xprt->addr);
569 if (bytes > bufsize)
570 bytes = bufsize;
571 memcpy(buf, &clnt->cl_xprt->addr, bytes);
572 return xprt->addrlen;
573}
574EXPORT_SYMBOL_GPL(rpc_peeraddr);
575
576/**
577 * rpc_peeraddr2str - return remote peer address in printable format
578 * @clnt: RPC client structure
579 * @format: address format
580 *
581 */
582char *rpc_peeraddr2str(struct rpc_clnt *clnt, enum rpc_display_format_t format)
583{
584 struct rpc_xprt *xprt = clnt->cl_xprt;
585 return xprt->ops->print_addr(xprt, format);
586}
587EXPORT_SYMBOL_GPL(rpc_peeraddr2str);
588
547void 589void
548rpc_setbufsize(struct rpc_clnt *clnt, unsigned int sndsize, unsigned int rcvsize) 590rpc_setbufsize(struct rpc_clnt *clnt, unsigned int sndsize, unsigned int rcvsize)
549{ 591{
@@ -564,7 +606,7 @@ size_t rpc_max_payload(struct rpc_clnt *clnt)
564{ 606{
565 return clnt->cl_xprt->max_payload; 607 return clnt->cl_xprt->max_payload;
566} 608}
567EXPORT_SYMBOL(rpc_max_payload); 609EXPORT_SYMBOL_GPL(rpc_max_payload);
568 610
569/** 611/**
570 * rpc_force_rebind - force transport to check that remote port is unchanged 612 * rpc_force_rebind - force transport to check that remote port is unchanged
@@ -574,9 +616,9 @@ EXPORT_SYMBOL(rpc_max_payload);
574void rpc_force_rebind(struct rpc_clnt *clnt) 616void rpc_force_rebind(struct rpc_clnt *clnt)
575{ 617{
576 if (clnt->cl_autobind) 618 if (clnt->cl_autobind)
577 clnt->cl_port = 0; 619 xprt_clear_bound(clnt->cl_xprt);
578} 620}
579EXPORT_SYMBOL(rpc_force_rebind); 621EXPORT_SYMBOL_GPL(rpc_force_rebind);
580 622
581/* 623/*
582 * Restart an (async) RPC call. Usually called from within the 624 * Restart an (async) RPC call. Usually called from within the
@@ -740,7 +782,7 @@ call_encode(struct rpc_task *task)
740 struct xdr_buf *rcvbuf = &req->rq_rcv_buf; 782 struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
741 unsigned int bufsiz; 783 unsigned int bufsiz;
742 kxdrproc_t encode; 784 kxdrproc_t encode;
743 u32 *p; 785 __be32 *p;
744 786
745 dprintk("RPC: %4d call_encode (status %d)\n", 787 dprintk("RPC: %4d call_encode (status %d)\n",
746 task->tk_pid, task->tk_status); 788 task->tk_pid, task->tk_status);
@@ -785,16 +827,16 @@ call_encode(struct rpc_task *task)
785static void 827static void
786call_bind(struct rpc_task *task) 828call_bind(struct rpc_task *task)
787{ 829{
788 struct rpc_clnt *clnt = task->tk_client; 830 struct rpc_xprt *xprt = task->tk_xprt;
789 831
790 dprintk("RPC: %4d call_bind (status %d)\n", 832 dprintk("RPC: %4d call_bind (status %d)\n",
791 task->tk_pid, task->tk_status); 833 task->tk_pid, task->tk_status);
792 834
793 task->tk_action = call_connect; 835 task->tk_action = call_connect;
794 if (!clnt->cl_port) { 836 if (!xprt_bound(xprt)) {
795 task->tk_action = call_bind_status; 837 task->tk_action = call_bind_status;
796 task->tk_timeout = task->tk_xprt->bind_timeout; 838 task->tk_timeout = xprt->bind_timeout;
797 rpc_getport(task, clnt); 839 xprt->ops->rpcbind(task);
798 } 840 }
799} 841}
800 842
@@ -819,15 +861,11 @@ call_bind_status(struct rpc_task *task)
819 dprintk("RPC: %4d remote rpcbind: RPC program/version unavailable\n", 861 dprintk("RPC: %4d remote rpcbind: RPC program/version unavailable\n",
820 task->tk_pid); 862 task->tk_pid);
821 rpc_delay(task, 3*HZ); 863 rpc_delay(task, 3*HZ);
822 goto retry_bind; 864 goto retry_timeout;
823 case -ETIMEDOUT: 865 case -ETIMEDOUT:
824 dprintk("RPC: %4d rpcbind request timed out\n", 866 dprintk("RPC: %4d rpcbind request timed out\n",
825 task->tk_pid); 867 task->tk_pid);
826 if (RPC_IS_SOFT(task)) { 868 goto retry_timeout;
827 status = -EIO;
828 break;
829 }
830 goto retry_bind;
831 case -EPFNOSUPPORT: 869 case -EPFNOSUPPORT:
832 dprintk("RPC: %4d remote rpcbind service unavailable\n", 870 dprintk("RPC: %4d remote rpcbind service unavailable\n",
833 task->tk_pid); 871 task->tk_pid);
@@ -840,16 +878,13 @@ call_bind_status(struct rpc_task *task)
840 dprintk("RPC: %4d unrecognized rpcbind error (%d)\n", 878 dprintk("RPC: %4d unrecognized rpcbind error (%d)\n",
841 task->tk_pid, -task->tk_status); 879 task->tk_pid, -task->tk_status);
842 status = -EIO; 880 status = -EIO;
843 break;
844 } 881 }
845 882
846 rpc_exit(task, status); 883 rpc_exit(task, status);
847 return; 884 return;
848 885
849retry_bind: 886retry_timeout:
850 task->tk_status = 0; 887 task->tk_action = call_timeout;
851 task->tk_action = call_bind;
852 return;
853} 888}
854 889
855/* 890/*
@@ -897,14 +932,16 @@ call_connect_status(struct rpc_task *task)
897 932
898 switch (status) { 933 switch (status) {
899 case -ENOTCONN: 934 case -ENOTCONN:
900 case -ETIMEDOUT:
901 case -EAGAIN: 935 case -EAGAIN:
902 task->tk_action = call_bind; 936 task->tk_action = call_bind;
903 break; 937 if (!RPC_IS_SOFT(task))
904 default: 938 return;
905 rpc_exit(task, -EIO); 939 /* if soft mounted, test if we've timed out */
906 break; 940 case -ETIMEDOUT:
941 task->tk_action = call_timeout;
942 return;
907 } 943 }
944 rpc_exit(task, -EIO);
908} 945}
909 946
910/* 947/*
@@ -922,26 +959,43 @@ call_transmit(struct rpc_task *task)
922 task->tk_status = xprt_prepare_transmit(task); 959 task->tk_status = xprt_prepare_transmit(task);
923 if (task->tk_status != 0) 960 if (task->tk_status != 0)
924 return; 961 return;
962 task->tk_action = call_transmit_status;
925 /* Encode here so that rpcsec_gss can use correct sequence number. */ 963 /* Encode here so that rpcsec_gss can use correct sequence number. */
926 if (rpc_task_need_encode(task)) { 964 if (rpc_task_need_encode(task)) {
927 task->tk_rqstp->rq_bytes_sent = 0; 965 BUG_ON(task->tk_rqstp->rq_bytes_sent != 0);
928 call_encode(task); 966 call_encode(task);
929 /* Did the encode result in an error condition? */ 967 /* Did the encode result in an error condition? */
930 if (task->tk_status != 0) 968 if (task->tk_status != 0)
931 goto out_nosend; 969 return;
932 } 970 }
933 task->tk_action = call_transmit_status;
934 xprt_transmit(task); 971 xprt_transmit(task);
935 if (task->tk_status < 0) 972 if (task->tk_status < 0)
936 return; 973 return;
937 if (!task->tk_msg.rpc_proc->p_decode) { 974 /*
938 task->tk_action = rpc_exit_task; 975 * On success, ensure that we call xprt_end_transmit() before sleeping
939 rpc_wake_up_task(task); 976 * in order to allow access to the socket to other RPC requests.
940 } 977 */
941 return; 978 call_transmit_status(task);
942out_nosend: 979 if (task->tk_msg.rpc_proc->p_decode != NULL)
943 /* release socket write lock before attempting to handle error */ 980 return;
944 xprt_abort_transmit(task); 981 task->tk_action = rpc_exit_task;
982 rpc_wake_up_task(task);
983}
984
985/*
986 * 5a. Handle cleanup after a transmission
987 */
988static void
989call_transmit_status(struct rpc_task *task)
990{
991 task->tk_action = call_status;
992 /*
993 * Special case: if we've been waiting on the socket's write_space()
994 * callback, then don't call xprt_end_transmit().
995 */
996 if (task->tk_status == -EAGAIN)
997 return;
998 xprt_end_transmit(task);
945 rpc_task_force_reencode(task); 999 rpc_task_force_reencode(task);
946} 1000}
947 1001
@@ -969,6 +1023,14 @@ call_status(struct rpc_task *task)
969 1023
970 task->tk_status = 0; 1024 task->tk_status = 0;
971 switch(status) { 1025 switch(status) {
1026 case -EHOSTDOWN:
1027 case -EHOSTUNREACH:
1028 case -ENETUNREACH:
1029 /*
1030 * Delay any retries for 3 seconds, then handle as if it
1031 * were a timeout.
1032 */
1033 rpc_delay(task, 3*HZ);
972 case -ETIMEDOUT: 1034 case -ETIMEDOUT:
973 task->tk_action = call_timeout; 1035 task->tk_action = call_timeout;
974 break; 1036 break;
@@ -988,23 +1050,11 @@ call_status(struct rpc_task *task)
988 printk("%s: RPC call returned error %d\n", 1050 printk("%s: RPC call returned error %d\n",
989 clnt->cl_protname, -status); 1051 clnt->cl_protname, -status);
990 rpc_exit(task, status); 1052 rpc_exit(task, status);
991 break;
992 } 1053 }
993} 1054}
994 1055
995/* 1056/*
996 * 6a. Handle transmission errors. 1057 * 6a. Handle RPC timeout
997 */
998static void
999call_transmit_status(struct rpc_task *task)
1000{
1001 if (task->tk_status != -EAGAIN)
1002 rpc_task_force_reencode(task);
1003 call_status(task);
1004}
1005
1006/*
1007 * 6b. Handle RPC timeout
1008 * We do not release the request slot, so we keep using the 1058 * We do not release the request slot, so we keep using the
1009 * same XID for all retransmits. 1059 * same XID for all retransmits.
1010 */ 1060 */
@@ -1050,7 +1100,7 @@ call_decode(struct rpc_task *task)
1050 struct rpc_clnt *clnt = task->tk_client; 1100 struct rpc_clnt *clnt = task->tk_client;
1051 struct rpc_rqst *req = task->tk_rqstp; 1101 struct rpc_rqst *req = task->tk_rqstp;
1052 kxdrproc_t decode = task->tk_msg.rpc_proc->p_decode; 1102 kxdrproc_t decode = task->tk_msg.rpc_proc->p_decode;
1053 u32 *p; 1103 __be32 *p;
1054 1104
1055 dprintk("RPC: %4d call_decode (status %d)\n", 1105 dprintk("RPC: %4d call_decode (status %d)\n",
1056 task->tk_pid, task->tk_status); 1106 task->tk_pid, task->tk_status);
@@ -1067,10 +1117,10 @@ call_decode(struct rpc_task *task)
1067 clnt->cl_stats->rpcretrans++; 1117 clnt->cl_stats->rpcretrans++;
1068 goto out_retry; 1118 goto out_retry;
1069 } 1119 }
1070 printk(KERN_WARNING "%s: too small RPC reply size (%d bytes)\n", 1120 dprintk("%s: too small RPC reply size (%d bytes)\n",
1071 clnt->cl_protname, task->tk_status); 1121 clnt->cl_protname, task->tk_status);
1072 rpc_exit(task, -EIO); 1122 task->tk_action = call_timeout;
1073 return; 1123 goto out_retry;
1074 } 1124 }
1075 1125
1076 /* 1126 /*
@@ -1147,12 +1197,12 @@ call_refreshresult(struct rpc_task *task)
1147/* 1197/*
1148 * Call header serialization 1198 * Call header serialization
1149 */ 1199 */
1150static u32 * 1200static __be32 *
1151call_header(struct rpc_task *task) 1201call_header(struct rpc_task *task)
1152{ 1202{
1153 struct rpc_clnt *clnt = task->tk_client; 1203 struct rpc_clnt *clnt = task->tk_client;
1154 struct rpc_rqst *req = task->tk_rqstp; 1204 struct rpc_rqst *req = task->tk_rqstp;
1155 u32 *p = req->rq_svec[0].iov_base; 1205 __be32 *p = req->rq_svec[0].iov_base;
1156 1206
1157 /* FIXME: check buffer size? */ 1207 /* FIXME: check buffer size? */
1158 1208
@@ -1171,14 +1221,26 @@ call_header(struct rpc_task *task)
1171/* 1221/*
1172 * Reply header verification 1222 * Reply header verification
1173 */ 1223 */
1174static u32 * 1224static __be32 *
1175call_verify(struct rpc_task *task) 1225call_verify(struct rpc_task *task)
1176{ 1226{
1177 struct kvec *iov = &task->tk_rqstp->rq_rcv_buf.head[0]; 1227 struct kvec *iov = &task->tk_rqstp->rq_rcv_buf.head[0];
1178 int len = task->tk_rqstp->rq_rcv_buf.len >> 2; 1228 int len = task->tk_rqstp->rq_rcv_buf.len >> 2;
1179 u32 *p = iov->iov_base, n; 1229 __be32 *p = iov->iov_base;
1230 u32 n;
1180 int error = -EACCES; 1231 int error = -EACCES;
1181 1232
1233 if ((task->tk_rqstp->rq_rcv_buf.len & 3) != 0) {
1234 /* RFC-1014 says that the representation of XDR data must be a
1235 * multiple of four bytes
1236 * - if it isn't pointer subtraction in the NFS client may give
1237 * undefined results
1238 */
1239 printk(KERN_WARNING
1240 "call_verify: XDR representation not a multiple of"
1241 " 4 bytes: 0x%x\n", task->tk_rqstp->rq_rcv_buf.len);
1242 goto out_eio;
1243 }
1182 if ((len -= 3) < 0) 1244 if ((len -= 3) < 0)
1183 goto out_overflow; 1245 goto out_overflow;
1184 p += 1; /* skip XID */ 1246 p += 1; /* skip XID */
@@ -1242,7 +1304,7 @@ call_verify(struct rpc_task *task)
1242 printk(KERN_WARNING "call_verify: auth check failed\n"); 1304 printk(KERN_WARNING "call_verify: auth check failed\n");
1243 goto out_garbage; /* bad verifier, retry */ 1305 goto out_garbage; /* bad verifier, retry */
1244 } 1306 }
1245 len = p - (u32 *)iov->iov_base - 1; 1307 len = p - (__be32 *)iov->iov_base - 1;
1246 if (len < 0) 1308 if (len < 0)
1247 goto out_overflow; 1309 goto out_overflow;
1248 switch ((n = ntohl(*p++))) { 1310 switch ((n = ntohl(*p++))) {
@@ -1297,12 +1359,12 @@ out_overflow:
1297 goto out_garbage; 1359 goto out_garbage;
1298} 1360}
1299 1361
1300static int rpcproc_encode_null(void *rqstp, u32 *data, void *obj) 1362static int rpcproc_encode_null(void *rqstp, __be32 *data, void *obj)
1301{ 1363{
1302 return 0; 1364 return 0;
1303} 1365}
1304 1366
1305static int rpcproc_decode_null(void *rqstp, u32 *data, void *obj) 1367static int rpcproc_decode_null(void *rqstp, __be32 *data, void *obj)
1306{ 1368{
1307 return 0; 1369 return 0;
1308} 1370}
diff --git a/net/sunrpc/pmap_clnt.c b/net/sunrpc/pmap_clnt.c
index d25b054ec921..919d5ba7ca0a 100644
--- a/net/sunrpc/pmap_clnt.c
+++ b/net/sunrpc/pmap_clnt.c
@@ -1,12 +1,13 @@
1/* 1/*
2 * linux/net/sunrpc/pmap.c 2 * linux/net/sunrpc/pmap_clnt.c
3 * 3 *
4 * Portmapper client. 4 * In-kernel RPC portmapper client.
5 *
6 * Portmapper supports version 2 of the rpcbind protocol (RFC 1833).
5 * 7 *
6 * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> 8 * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
7 */ 9 */
8 10
9#include <linux/config.h>
10#include <linux/types.h> 11#include <linux/types.h>
11#include <linux/socket.h> 12#include <linux/socket.h>
12#include <linux/kernel.h> 13#include <linux/kernel.h>
@@ -14,7 +15,6 @@
14#include <linux/uio.h> 15#include <linux/uio.h>
15#include <linux/in.h> 16#include <linux/in.h>
16#include <linux/sunrpc/clnt.h> 17#include <linux/sunrpc/clnt.h>
17#include <linux/sunrpc/xprt.h>
18#include <linux/sunrpc/sched.h> 18#include <linux/sunrpc/sched.h>
19 19
20#ifdef RPC_DEBUG 20#ifdef RPC_DEBUG
@@ -25,80 +25,141 @@
25#define PMAP_UNSET 2 25#define PMAP_UNSET 2
26#define PMAP_GETPORT 3 26#define PMAP_GETPORT 3
27 27
28struct portmap_args {
29 u32 pm_prog;
30 u32 pm_vers;
31 u32 pm_prot;
32 unsigned short pm_port;
33 struct rpc_xprt * pm_xprt;
34};
35
28static struct rpc_procinfo pmap_procedures[]; 36static struct rpc_procinfo pmap_procedures[];
29static struct rpc_clnt * pmap_create(char *, struct sockaddr_in *, int, int); 37static struct rpc_clnt * pmap_create(char *, struct sockaddr_in *, int, int);
30static void pmap_getport_done(struct rpc_task *); 38static void pmap_getport_done(struct rpc_task *, void *);
31static struct rpc_program pmap_program; 39static struct rpc_program pmap_program;
32static DEFINE_SPINLOCK(pmap_lock);
33 40
34/* 41static void pmap_getport_prepare(struct rpc_task *task, void *calldata)
35 * Obtain the port for a given RPC service on a given host. This one can
36 * be called for an ongoing RPC request.
37 */
38void
39rpc_getport(struct rpc_task *task, struct rpc_clnt *clnt)
40{ 42{
41 struct rpc_portmap *map = clnt->cl_pmap; 43 struct portmap_args *map = calldata;
42 struct sockaddr_in *sap = &clnt->cl_xprt->addr;
43 struct rpc_message msg = { 44 struct rpc_message msg = {
44 .rpc_proc = &pmap_procedures[PMAP_GETPORT], 45 .rpc_proc = &pmap_procedures[PMAP_GETPORT],
45 .rpc_argp = map, 46 .rpc_argp = map,
46 .rpc_resp = &clnt->cl_port, 47 .rpc_resp = &map->pm_port,
47 .rpc_cred = NULL
48 }; 48 };
49
50 rpc_call_setup(task, &msg, 0);
51}
52
53static inline struct portmap_args *pmap_map_alloc(void)
54{
55 return kmalloc(sizeof(struct portmap_args), GFP_NOFS);
56}
57
58static inline void pmap_map_free(struct portmap_args *map)
59{
60 kfree(map);
61}
62
63static void pmap_map_release(void *data)
64{
65 pmap_map_free(data);
66}
67
68static const struct rpc_call_ops pmap_getport_ops = {
69 .rpc_call_prepare = pmap_getport_prepare,
70 .rpc_call_done = pmap_getport_done,
71 .rpc_release = pmap_map_release,
72};
73
74static inline void pmap_wake_portmap_waiters(struct rpc_xprt *xprt, int status)
75{
76 xprt_clear_binding(xprt);
77 rpc_wake_up_status(&xprt->binding, status);
78}
79
80/**
81 * rpc_getport - obtain the port for a given RPC service on a given host
82 * @task: task that is waiting for portmapper request
83 *
84 * This one can be called for an ongoing RPC request, and can be used in
85 * an async (rpciod) context.
86 */
87void rpc_getport(struct rpc_task *task)
88{
89 struct rpc_clnt *clnt = task->tk_client;
90 struct rpc_xprt *xprt = task->tk_xprt;
91 struct sockaddr_in addr;
92 struct portmap_args *map;
49 struct rpc_clnt *pmap_clnt; 93 struct rpc_clnt *pmap_clnt;
50 struct rpc_task *child; 94 struct rpc_task *child;
95 int status;
51 96
52 dprintk("RPC: %4d rpc_getport(%s, %d, %d, %d)\n", 97 dprintk("RPC: %4d rpc_getport(%s, %u, %u, %d)\n",
53 task->tk_pid, clnt->cl_server, 98 task->tk_pid, clnt->cl_server,
54 map->pm_prog, map->pm_vers, map->pm_prot); 99 clnt->cl_prog, clnt->cl_vers, xprt->prot);
55 100
56 /* Autobind on cloned rpc clients is discouraged */ 101 /* Autobind on cloned rpc clients is discouraged */
57 BUG_ON(clnt->cl_parent != clnt); 102 BUG_ON(clnt->cl_parent != clnt);
58 103
59 spin_lock(&pmap_lock); 104 if (xprt_test_and_set_binding(xprt)) {
60 if (map->pm_binding) { 105 task->tk_status = -EACCES; /* tell caller to check again */
61 rpc_sleep_on(&map->pm_bindwait, task, NULL, NULL); 106 rpc_sleep_on(&xprt->binding, task, NULL, NULL);
62 spin_unlock(&pmap_lock);
63 return; 107 return;
64 } 108 }
65 map->pm_binding = 1;
66 spin_unlock(&pmap_lock);
67 109
68 pmap_clnt = pmap_create(clnt->cl_server, sap, map->pm_prot, 0); 110 /* Someone else may have bound if we slept */
69 if (IS_ERR(pmap_clnt)) { 111 status = 0;
70 task->tk_status = PTR_ERR(pmap_clnt); 112 if (xprt_bound(xprt))
113 goto bailout_nofree;
114
115 status = -ENOMEM;
116 map = pmap_map_alloc();
117 if (!map)
118 goto bailout_nofree;
119 map->pm_prog = clnt->cl_prog;
120 map->pm_vers = clnt->cl_vers;
121 map->pm_prot = xprt->prot;
122 map->pm_port = 0;
123 map->pm_xprt = xprt_get(xprt);
124
125 rpc_peeraddr(clnt, (struct sockaddr *) &addr, sizeof(addr));
126 pmap_clnt = pmap_create(clnt->cl_server, &addr, map->pm_prot, 0);
127 status = PTR_ERR(pmap_clnt);
128 if (IS_ERR(pmap_clnt))
71 goto bailout; 129 goto bailout;
72 }
73 task->tk_status = 0;
74 130
75 /* 131 status = -EIO;
76 * Note: rpc_new_child will release client after a failure. 132 child = rpc_run_task(pmap_clnt, RPC_TASK_ASYNC, &pmap_getport_ops, map);
77 */ 133 if (IS_ERR(child))
78 if (!(child = rpc_new_child(pmap_clnt, task)))
79 goto bailout; 134 goto bailout;
135 rpc_release_task(child);
80 136
81 /* Setup the call info struct */ 137 rpc_sleep_on(&xprt->binding, task, NULL, NULL);
82 rpc_call_setup(child, &msg, 0);
83 138
84 /* ... and run the child task */
85 task->tk_xprt->stat.bind_count++; 139 task->tk_xprt->stat.bind_count++;
86 rpc_run_child(task, child, pmap_getport_done);
87 return; 140 return;
88 141
89bailout: 142bailout:
90 spin_lock(&pmap_lock); 143 pmap_map_free(map);
91 map->pm_binding = 0; 144 xprt_put(xprt);
92 rpc_wake_up(&map->pm_bindwait); 145bailout_nofree:
93 spin_unlock(&pmap_lock); 146 task->tk_status = status;
94 rpc_exit(task, -EIO); 147 pmap_wake_portmap_waiters(xprt, status);
95} 148}
96 149
97#ifdef CONFIG_ROOT_NFS 150#ifdef CONFIG_ROOT_NFS
98int 151/**
99rpc_getport_external(struct sockaddr_in *sin, __u32 prog, __u32 vers, int prot) 152 * rpc_getport_external - obtain the port for a given RPC service on a given host
153 * @sin: address of remote peer
154 * @prog: RPC program number to bind
155 * @vers: RPC version number to bind
156 * @prot: transport protocol to use to make this request
157 *
158 * This one is called from outside the RPC client in a synchronous task context.
159 */
160int rpc_getport_external(struct sockaddr_in *sin, __u32 prog, __u32 vers, int prot)
100{ 161{
101 struct rpc_portmap map = { 162 struct portmap_args map = {
102 .pm_prog = prog, 163 .pm_prog = prog,
103 .pm_vers = vers, 164 .pm_vers = vers,
104 .pm_prot = prot, 165 .pm_prot = prot,
@@ -113,7 +174,7 @@ rpc_getport_external(struct sockaddr_in *sin, __u32 prog, __u32 vers, int prot)
113 char hostname[32]; 174 char hostname[32];
114 int status; 175 int status;
115 176
116 dprintk("RPC: rpc_getport_external(%u.%u.%u.%u, %d, %d, %d)\n", 177 dprintk("RPC: rpc_getport_external(%u.%u.%u.%u, %u, %u, %d)\n",
117 NIPQUAD(sin->sin_addr.s_addr), prog, vers, prot); 178 NIPQUAD(sin->sin_addr.s_addr), prog, vers, prot);
118 179
119 sprintf(hostname, "%u.%u.%u.%u", NIPQUAD(sin->sin_addr.s_addr)); 180 sprintf(hostname, "%u.%u.%u.%u", NIPQUAD(sin->sin_addr.s_addr));
@@ -133,45 +194,53 @@ rpc_getport_external(struct sockaddr_in *sin, __u32 prog, __u32 vers, int prot)
133} 194}
134#endif 195#endif
135 196
136static void 197/*
137pmap_getport_done(struct rpc_task *task) 198 * Portmapper child task invokes this callback via tk_exit.
199 */
200static void pmap_getport_done(struct rpc_task *child, void *data)
138{ 201{
139 struct rpc_clnt *clnt = task->tk_client; 202 struct portmap_args *map = data;
140 struct rpc_xprt *xprt = task->tk_xprt; 203 struct rpc_xprt *xprt = map->pm_xprt;
141 struct rpc_portmap *map = clnt->cl_pmap; 204 int status = child->tk_status;
142 205
143 dprintk("RPC: %4d pmap_getport_done(status %d, port %d)\n", 206 if (status < 0) {
144 task->tk_pid, task->tk_status, clnt->cl_port); 207 /* Portmapper not available */
145 208 xprt->ops->set_port(xprt, 0);
146 xprt->ops->set_port(xprt, 0); 209 } else if (map->pm_port == 0) {
147 if (task->tk_status < 0) { 210 /* Requested RPC service wasn't registered */
148 /* Make the calling task exit with an error */ 211 xprt->ops->set_port(xprt, 0);
149 task->tk_action = rpc_exit_task; 212 status = -EACCES;
150 } else if (clnt->cl_port == 0) {
151 /* Program not registered */
152 rpc_exit(task, -EACCES);
153 } else { 213 } else {
154 xprt->ops->set_port(xprt, clnt->cl_port); 214 /* Succeeded */
155 clnt->cl_port = htons(clnt->cl_port); 215 xprt->ops->set_port(xprt, map->pm_port);
216 xprt_set_bound(xprt);
217 status = 0;
156 } 218 }
157 spin_lock(&pmap_lock); 219
158 map->pm_binding = 0; 220 dprintk("RPC: %4d pmap_getport_done(status %d, port %u)\n",
159 rpc_wake_up(&map->pm_bindwait); 221 child->tk_pid, status, map->pm_port);
160 spin_unlock(&pmap_lock); 222
223 pmap_wake_portmap_waiters(xprt, status);
224 xprt_put(xprt);
161} 225}
162 226
163/* 227/**
164 * Set or unset a port registration with the local portmapper. 228 * rpc_register - set or unset a port registration with the local portmapper
229 * @prog: RPC program number to bind
230 * @vers: RPC version number to bind
231 * @prot: transport protocol to use to make this request
232 * @port: port value to register
233 * @okay: result code
234 *
165 * port == 0 means unregister, port != 0 means register. 235 * port == 0 means unregister, port != 0 means register.
166 */ 236 */
167int 237int rpc_register(u32 prog, u32 vers, int prot, unsigned short port, int *okay)
168rpc_register(u32 prog, u32 vers, int prot, unsigned short port, int *okay)
169{ 238{
170 struct sockaddr_in sin = { 239 struct sockaddr_in sin = {
171 .sin_family = AF_INET, 240 .sin_family = AF_INET,
172 .sin_addr.s_addr = htonl(INADDR_LOOPBACK), 241 .sin_addr.s_addr = htonl(INADDR_LOOPBACK),
173 }; 242 };
174 struct rpc_portmap map = { 243 struct portmap_args map = {
175 .pm_prog = prog, 244 .pm_prog = prog,
176 .pm_vers = vers, 245 .pm_vers = vers,
177 .pm_prot = prot, 246 .pm_prot = prot,
@@ -185,7 +254,7 @@ rpc_register(u32 prog, u32 vers, int prot, unsigned short port, int *okay)
185 struct rpc_clnt *pmap_clnt; 254 struct rpc_clnt *pmap_clnt;
186 int error = 0; 255 int error = 0;
187 256
188 dprintk("RPC: registering (%d, %d, %d, %d) with portmapper.\n", 257 dprintk("RPC: registering (%u, %u, %d, %u) with portmapper.\n",
189 prog, vers, prot, port); 258 prog, vers, prot, port);
190 259
191 pmap_clnt = pmap_create("localhost", &sin, IPPROTO_UDP, 1); 260 pmap_clnt = pmap_create("localhost", &sin, IPPROTO_UDP, 1);
@@ -208,38 +277,32 @@ rpc_register(u32 prog, u32 vers, int prot, unsigned short port, int *okay)
208 return error; 277 return error;
209} 278}
210 279
211static struct rpc_clnt * 280static struct rpc_clnt *pmap_create(char *hostname, struct sockaddr_in *srvaddr, int proto, int privileged)
212pmap_create(char *hostname, struct sockaddr_in *srvaddr, int proto, int privileged)
213{ 281{
214 struct rpc_xprt *xprt; 282 struct rpc_create_args args = {
215 struct rpc_clnt *clnt; 283 .protocol = proto,
216 284 .address = (struct sockaddr *)srvaddr,
217 /* printk("pmap: create xprt\n"); */ 285 .addrsize = sizeof(*srvaddr),
218 xprt = xprt_create_proto(proto, srvaddr, NULL); 286 .servername = hostname,
219 if (IS_ERR(xprt)) 287 .program = &pmap_program,
220 return (struct rpc_clnt *)xprt; 288 .version = RPC_PMAP_VERSION,
221 xprt->ops->set_port(xprt, RPC_PMAP_PORT); 289 .authflavor = RPC_AUTH_UNIX,
290 .flags = (RPC_CLNT_CREATE_ONESHOT |
291 RPC_CLNT_CREATE_NOPING),
292 };
293
294 srvaddr->sin_port = htons(RPC_PMAP_PORT);
222 if (!privileged) 295 if (!privileged)
223 xprt->resvport = 0; 296 args.flags |= RPC_CLNT_CREATE_NONPRIVPORT;
224 297 return rpc_create(&args);
225 /* printk("pmap: create clnt\n"); */
226 clnt = rpc_new_client(xprt, hostname,
227 &pmap_program, RPC_PMAP_VERSION,
228 RPC_AUTH_UNIX);
229 if (!IS_ERR(clnt)) {
230 clnt->cl_softrtry = 1;
231 clnt->cl_oneshot = 1;
232 }
233 return clnt;
234} 298}
235 299
236/* 300/*
237 * XDR encode/decode functions for PMAP 301 * XDR encode/decode functions for PMAP
238 */ 302 */
239static int 303static int xdr_encode_mapping(struct rpc_rqst *req, __be32 *p, struct portmap_args *map)
240xdr_encode_mapping(struct rpc_rqst *req, u32 *p, struct rpc_portmap *map)
241{ 304{
242 dprintk("RPC: xdr_encode_mapping(%d, %d, %d, %d)\n", 305 dprintk("RPC: xdr_encode_mapping(%u, %u, %u, %u)\n",
243 map->pm_prog, map->pm_vers, map->pm_prot, map->pm_port); 306 map->pm_prog, map->pm_vers, map->pm_prot, map->pm_port);
244 *p++ = htonl(map->pm_prog); 307 *p++ = htonl(map->pm_prog);
245 *p++ = htonl(map->pm_vers); 308 *p++ = htonl(map->pm_vers);
@@ -250,15 +313,13 @@ xdr_encode_mapping(struct rpc_rqst *req, u32 *p, struct rpc_portmap *map)
250 return 0; 313 return 0;
251} 314}
252 315
253static int 316static int xdr_decode_port(struct rpc_rqst *req, __be32 *p, unsigned short *portp)
254xdr_decode_port(struct rpc_rqst *req, u32 *p, unsigned short *portp)
255{ 317{
256 *portp = (unsigned short) ntohl(*p++); 318 *portp = (unsigned short) ntohl(*p++);
257 return 0; 319 return 0;
258} 320}
259 321
260static int 322static int xdr_decode_bool(struct rpc_rqst *req, __be32 *p, unsigned int *boolp)
261xdr_decode_bool(struct rpc_rqst *req, u32 *p, unsigned int *boolp)
262{ 323{
263 *boolp = (unsigned int) ntohl(*p++); 324 *boolp = (unsigned int) ntohl(*p++);
264 return 0; 325 return 0;
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 8241fa726803..700c6e061a04 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -8,7 +8,6 @@
8 * Copyright (c) 2002, Trond Myklebust <trond.myklebust@fys.uio.no> 8 * Copyright (c) 2002, Trond Myklebust <trond.myklebust@fys.uio.no>
9 * 9 *
10 */ 10 */
11#include <linux/config.h>
12#include <linux/module.h> 11#include <linux/module.h>
13#include <linux/slab.h> 12#include <linux/slab.h>
14#include <linux/string.h> 13#include <linux/string.h>
@@ -328,10 +327,8 @@ rpc_show_info(struct seq_file *m, void *v)
328 seq_printf(m, "RPC server: %s\n", clnt->cl_server); 327 seq_printf(m, "RPC server: %s\n", clnt->cl_server);
329 seq_printf(m, "service: %s (%d) version %d\n", clnt->cl_protname, 328 seq_printf(m, "service: %s (%d) version %d\n", clnt->cl_protname,
330 clnt->cl_prog, clnt->cl_vers); 329 clnt->cl_prog, clnt->cl_vers);
331 seq_printf(m, "address: %u.%u.%u.%u\n", 330 seq_printf(m, "address: %s\n", rpc_peeraddr2str(clnt, RPC_DISPLAY_ADDR));
332 NIPQUAD(clnt->cl_xprt->addr.sin_addr.s_addr)); 331 seq_printf(m, "protocol: %s\n", rpc_peeraddr2str(clnt, RPC_DISPLAY_PROTO));
333 seq_printf(m, "protocol: %s\n",
334 clnt->cl_xprt->prot == IPPROTO_UDP ? "udp" : "tcp");
335 return 0; 332 return 0;
336} 333}
337 334
@@ -439,7 +436,7 @@ struct vfsmount *rpc_get_mount(void)
439{ 436{
440 int err; 437 int err;
441 438
442 err = simple_pin_fs("rpc_pipefs", &rpc_mount, &rpc_mount_count); 439 err = simple_pin_fs(&rpc_pipe_fs_type, &rpc_mount, &rpc_mount_count);
443 if (err != 0) 440 if (err != 0)
444 return ERR_PTR(err); 441 return ERR_PTR(err);
445 return rpc_mount; 442 return rpc_mount;
@@ -491,7 +488,6 @@ rpc_get_inode(struct super_block *sb, int mode)
491 return NULL; 488 return NULL;
492 inode->i_mode = mode; 489 inode->i_mode = mode;
493 inode->i_uid = inode->i_gid = 0; 490 inode->i_uid = inode->i_gid = 0;
494 inode->i_blksize = PAGE_CACHE_SIZE;
495 inode->i_blocks = 0; 491 inode->i_blocks = 0;
496 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; 492 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
497 switch(mode & S_IFMT) { 493 switch(mode & S_IFMT) {
@@ -516,7 +512,7 @@ rpc_depopulate(struct dentry *parent)
516 struct dentry *dentry, *dvec[10]; 512 struct dentry *dentry, *dvec[10];
517 int n = 0; 513 int n = 0;
518 514
519 mutex_lock(&dir->i_mutex); 515 mutex_lock_nested(&dir->i_mutex, I_MUTEX_CHILD);
520repeat: 516repeat:
521 spin_lock(&dcache_lock); 517 spin_lock(&dcache_lock);
522 list_for_each_safe(pos, next, &parent->d_subdirs) { 518 list_for_each_safe(pos, next, &parent->d_subdirs) {
@@ -540,6 +536,7 @@ repeat:
540 rpc_close_pipes(dentry->d_inode); 536 rpc_close_pipes(dentry->d_inode);
541 simple_unlink(dir, dentry); 537 simple_unlink(dir, dentry);
542 } 538 }
539 inode_dir_notify(dir, DN_DELETE);
543 dput(dentry); 540 dput(dentry);
544 } while (n); 541 } while (n);
545 goto repeat; 542 goto repeat;
@@ -611,8 +608,8 @@ __rpc_rmdir(struct inode *dir, struct dentry *dentry)
611 int error; 608 int error;
612 609
613 shrink_dcache_parent(dentry); 610 shrink_dcache_parent(dentry);
614 if (dentry->d_inode) 611 if (d_unhashed(dentry))
615 rpc_close_pipes(dentry->d_inode); 612 return 0;
616 if ((error = simple_rmdir(dir, dentry)) != 0) 613 if ((error = simple_rmdir(dir, dentry)) != 0)
617 return error; 614 return error;
618 if (!error) { 615 if (!error) {
@@ -623,17 +620,13 @@ __rpc_rmdir(struct inode *dir, struct dentry *dentry)
623} 620}
624 621
625static struct dentry * 622static struct dentry *
626rpc_lookup_negative(char *path, struct nameidata *nd) 623rpc_lookup_create(struct dentry *parent, const char *name, int len)
627{ 624{
625 struct inode *dir = parent->d_inode;
628 struct dentry *dentry; 626 struct dentry *dentry;
629 struct inode *dir;
630 int error;
631 627
632 if ((error = rpc_lookup_parent(path, nd)) != 0) 628 mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
633 return ERR_PTR(error); 629 dentry = lookup_one_len(name, parent, len);
634 dir = nd->dentry->d_inode;
635 mutex_lock(&dir->i_mutex);
636 dentry = lookup_one_len(nd->last.name, nd->dentry, nd->last.len);
637 if (IS_ERR(dentry)) 630 if (IS_ERR(dentry))
638 goto out_err; 631 goto out_err;
639 if (dentry->d_inode) { 632 if (dentry->d_inode) {
@@ -644,7 +637,20 @@ rpc_lookup_negative(char *path, struct nameidata *nd)
644 return dentry; 637 return dentry;
645out_err: 638out_err:
646 mutex_unlock(&dir->i_mutex); 639 mutex_unlock(&dir->i_mutex);
647 rpc_release_path(nd); 640 return dentry;
641}
642
643static struct dentry *
644rpc_lookup_negative(char *path, struct nameidata *nd)
645{
646 struct dentry *dentry;
647 int error;
648
649 if ((error = rpc_lookup_parent(path, nd)) != 0)
650 return ERR_PTR(error);
651 dentry = rpc_lookup_create(nd->dentry, nd->last.name, nd->last.len);
652 if (IS_ERR(dentry))
653 rpc_release_path(nd);
648 return dentry; 654 return dentry;
649} 655}
650 656
@@ -668,10 +674,11 @@ rpc_mkdir(char *path, struct rpc_clnt *rpc_client)
668 RPCAUTH_info, RPCAUTH_EOF); 674 RPCAUTH_info, RPCAUTH_EOF);
669 if (error) 675 if (error)
670 goto err_depopulate; 676 goto err_depopulate;
677 dget(dentry);
671out: 678out:
672 mutex_unlock(&dir->i_mutex); 679 mutex_unlock(&dir->i_mutex);
673 rpc_release_path(&nd); 680 rpc_release_path(&nd);
674 return dget(dentry); 681 return dentry;
675err_depopulate: 682err_depopulate:
676 rpc_depopulate(dentry); 683 rpc_depopulate(dentry);
677 __rpc_rmdir(dir, dentry); 684 __rpc_rmdir(dir, dentry);
@@ -684,44 +691,35 @@ err_dput:
684} 691}
685 692
686int 693int
687rpc_rmdir(char *path) 694rpc_rmdir(struct dentry *dentry)
688{ 695{
689 struct nameidata nd; 696 struct dentry *parent;
690 struct dentry *dentry;
691 struct inode *dir; 697 struct inode *dir;
692 int error; 698 int error;
693 699
694 if ((error = rpc_lookup_parent(path, &nd)) != 0) 700 parent = dget_parent(dentry);
695 return error; 701 dir = parent->d_inode;
696 dir = nd.dentry->d_inode; 702 mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
697 mutex_lock(&dir->i_mutex);
698 dentry = lookup_one_len(nd.last.name, nd.dentry, nd.last.len);
699 if (IS_ERR(dentry)) {
700 error = PTR_ERR(dentry);
701 goto out_release;
702 }
703 rpc_depopulate(dentry); 703 rpc_depopulate(dentry);
704 error = __rpc_rmdir(dir, dentry); 704 error = __rpc_rmdir(dir, dentry);
705 dput(dentry); 705 dput(dentry);
706out_release:
707 mutex_unlock(&dir->i_mutex); 706 mutex_unlock(&dir->i_mutex);
708 rpc_release_path(&nd); 707 dput(parent);
709 return error; 708 return error;
710} 709}
711 710
712struct dentry * 711struct dentry *
713rpc_mkpipe(char *path, void *private, struct rpc_pipe_ops *ops, int flags) 712rpc_mkpipe(struct dentry *parent, const char *name, void *private, struct rpc_pipe_ops *ops, int flags)
714{ 713{
715 struct nameidata nd;
716 struct dentry *dentry; 714 struct dentry *dentry;
717 struct inode *dir, *inode; 715 struct inode *dir, *inode;
718 struct rpc_inode *rpci; 716 struct rpc_inode *rpci;
719 717
720 dentry = rpc_lookup_negative(path, &nd); 718 dentry = rpc_lookup_create(parent, name, strlen(name));
721 if (IS_ERR(dentry)) 719 if (IS_ERR(dentry))
722 return dentry; 720 return dentry;
723 dir = nd.dentry->d_inode; 721 dir = parent->d_inode;
724 inode = rpc_get_inode(dir->i_sb, S_IFSOCK | S_IRUSR | S_IWUSR); 722 inode = rpc_get_inode(dir->i_sb, S_IFIFO | S_IRUSR | S_IWUSR);
725 if (!inode) 723 if (!inode)
726 goto err_dput; 724 goto err_dput;
727 inode->i_ino = iunique(dir->i_sb, 100); 725 inode->i_ino = iunique(dir->i_sb, 100);
@@ -732,45 +730,40 @@ rpc_mkpipe(char *path, void *private, struct rpc_pipe_ops *ops, int flags)
732 rpci->flags = flags; 730 rpci->flags = flags;
733 rpci->ops = ops; 731 rpci->ops = ops;
734 inode_dir_notify(dir, DN_CREATE); 732 inode_dir_notify(dir, DN_CREATE);
733 dget(dentry);
735out: 734out:
736 mutex_unlock(&dir->i_mutex); 735 mutex_unlock(&dir->i_mutex);
737 rpc_release_path(&nd); 736 return dentry;
738 return dget(dentry);
739err_dput: 737err_dput:
740 dput(dentry); 738 dput(dentry);
741 dentry = ERR_PTR(-ENOMEM); 739 dentry = ERR_PTR(-ENOMEM);
742 printk(KERN_WARNING "%s: %s() failed to create pipe %s (errno = %d)\n", 740 printk(KERN_WARNING "%s: %s() failed to create pipe %s/%s (errno = %d)\n",
743 __FILE__, __FUNCTION__, path, -ENOMEM); 741 __FILE__, __FUNCTION__, parent->d_name.name, name,
742 -ENOMEM);
744 goto out; 743 goto out;
745} 744}
746 745
747int 746int
748rpc_unlink(char *path) 747rpc_unlink(struct dentry *dentry)
749{ 748{
750 struct nameidata nd; 749 struct dentry *parent;
751 struct dentry *dentry;
752 struct inode *dir; 750 struct inode *dir;
753 int error; 751 int error = 0;
754 752
755 if ((error = rpc_lookup_parent(path, &nd)) != 0) 753 parent = dget_parent(dentry);
756 return error; 754 dir = parent->d_inode;
757 dir = nd.dentry->d_inode; 755 mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
758 mutex_lock(&dir->i_mutex); 756 if (!d_unhashed(dentry)) {
759 dentry = lookup_one_len(nd.last.name, nd.dentry, nd.last.len); 757 d_drop(dentry);
760 if (IS_ERR(dentry)) { 758 if (dentry->d_inode) {
761 error = PTR_ERR(dentry); 759 rpc_close_pipes(dentry->d_inode);
762 goto out_release; 760 error = simple_unlink(dir, dentry);
763 } 761 }
764 d_drop(dentry); 762 inode_dir_notify(dir, DN_DELETE);
765 if (dentry->d_inode) {
766 rpc_close_pipes(dentry->d_inode);
767 error = simple_unlink(dir, dentry);
768 } 763 }
769 dput(dentry); 764 dput(dentry);
770 inode_dir_notify(dir, DN_DELETE);
771out_release:
772 mutex_unlock(&dir->i_mutex); 765 mutex_unlock(&dir->i_mutex);
773 rpc_release_path(&nd); 766 dput(parent);
774 return error; 767 return error;
775} 768}
776 769
@@ -864,7 +857,6 @@ int register_rpc_pipefs(void)
864 857
865void unregister_rpc_pipefs(void) 858void unregister_rpc_pipefs(void)
866{ 859{
867 if (kmem_cache_destroy(rpc_inode_cachep)) 860 kmem_cache_destroy(rpc_inode_cachep);
868 printk(KERN_WARNING "RPC: unable to free inode cache\n");
869 unregister_filesystem(&rpc_pipe_fs_type); 861 unregister_filesystem(&rpc_pipe_fs_type);
870} 862}
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 5c3eee768504..a1ab4eed41f4 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -21,7 +21,6 @@
21#include <linux/mutex.h> 21#include <linux/mutex.h>
22 22
23#include <linux/sunrpc/clnt.h> 23#include <linux/sunrpc/clnt.h>
24#include <linux/sunrpc/xprt.h>
25 24
26#ifdef RPC_DEBUG 25#ifdef RPC_DEBUG
27#define RPCDBG_FACILITY RPCDBG_SCHED 26#define RPCDBG_FACILITY RPCDBG_SCHED
@@ -45,12 +44,6 @@ static void rpciod_killall(void);
45static void rpc_async_schedule(void *); 44static void rpc_async_schedule(void *);
46 45
47/* 46/*
48 * RPC tasks that create another task (e.g. for contacting the portmapper)
49 * will wait on this queue for their child's completion
50 */
51static RPC_WAITQ(childq, "childq");
52
53/*
54 * RPC tasks sit here while waiting for conditions to improve. 47 * RPC tasks sit here while waiting for conditions to improve.
55 */ 48 */
56static RPC_WAITQ(delay_queue, "delayq"); 49static RPC_WAITQ(delay_queue, "delayq");
@@ -324,16 +317,6 @@ static void rpc_make_runnable(struct rpc_task *task)
324} 317}
325 318
326/* 319/*
327 * Place a newly initialized task on the workqueue.
328 */
329static inline void
330rpc_schedule_run(struct rpc_task *task)
331{
332 rpc_set_active(task);
333 rpc_make_runnable(task);
334}
335
336/*
337 * Prepare for sleeping on a wait queue. 320 * Prepare for sleeping on a wait queue.
338 * By always appending tasks to the list we ensure FIFO behavior. 321 * By always appending tasks to the list we ensure FIFO behavior.
339 * NB: An RPC task will only receive interrupt-driven events as long 322 * NB: An RPC task will only receive interrupt-driven events as long
@@ -559,24 +542,20 @@ void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
559 spin_unlock_bh(&queue->lock); 542 spin_unlock_bh(&queue->lock);
560} 543}
561 544
545static void __rpc_atrun(struct rpc_task *task)
546{
547 rpc_wake_up_task(task);
548}
549
562/* 550/*
563 * Run a task at a later time 551 * Run a task at a later time
564 */ 552 */
565static void __rpc_atrun(struct rpc_task *); 553void rpc_delay(struct rpc_task *task, unsigned long delay)
566void
567rpc_delay(struct rpc_task *task, unsigned long delay)
568{ 554{
569 task->tk_timeout = delay; 555 task->tk_timeout = delay;
570 rpc_sleep_on(&delay_queue, task, NULL, __rpc_atrun); 556 rpc_sleep_on(&delay_queue, task, NULL, __rpc_atrun);
571} 557}
572 558
573static void
574__rpc_atrun(struct rpc_task *task)
575{
576 task->tk_status = 0;
577 rpc_wake_up_task(task);
578}
579
580/* 559/*
581 * Helper to call task->tk_ops->rpc_call_prepare 560 * Helper to call task->tk_ops->rpc_call_prepare
582 */ 561 */
@@ -933,72 +912,6 @@ struct rpc_task *rpc_run_task(struct rpc_clnt *clnt, int flags,
933} 912}
934EXPORT_SYMBOL(rpc_run_task); 913EXPORT_SYMBOL(rpc_run_task);
935 914
936/**
937 * rpc_find_parent - find the parent of a child task.
938 * @child: child task
939 * @parent: parent task
940 *
941 * Checks that the parent task is still sleeping on the
942 * queue 'childq'. If so returns a pointer to the parent.
943 * Upon failure returns NULL.
944 *
945 * Caller must hold childq.lock
946 */
947static inline struct rpc_task *rpc_find_parent(struct rpc_task *child, struct rpc_task *parent)
948{
949 struct rpc_task *task;
950 struct list_head *le;
951
952 task_for_each(task, le, &childq.tasks[0])
953 if (task == parent)
954 return parent;
955
956 return NULL;
957}
958
959static void rpc_child_exit(struct rpc_task *child, void *calldata)
960{
961 struct rpc_task *parent;
962
963 spin_lock_bh(&childq.lock);
964 if ((parent = rpc_find_parent(child, calldata)) != NULL) {
965 parent->tk_status = child->tk_status;
966 __rpc_wake_up_task(parent);
967 }
968 spin_unlock_bh(&childq.lock);
969}
970
971static const struct rpc_call_ops rpc_child_ops = {
972 .rpc_call_done = rpc_child_exit,
973};
974
975/*
976 * Note: rpc_new_task releases the client after a failure.
977 */
978struct rpc_task *
979rpc_new_child(struct rpc_clnt *clnt, struct rpc_task *parent)
980{
981 struct rpc_task *task;
982
983 task = rpc_new_task(clnt, RPC_TASK_ASYNC | RPC_TASK_CHILD, &rpc_child_ops, parent);
984 if (!task)
985 goto fail;
986 return task;
987
988fail:
989 parent->tk_status = -ENOMEM;
990 return NULL;
991}
992
993void rpc_run_child(struct rpc_task *task, struct rpc_task *child, rpc_action func)
994{
995 spin_lock_bh(&childq.lock);
996 /* N.B. Is it possible for the child to have already finished? */
997 __rpc_sleep_on(&childq, task, func, NULL);
998 rpc_schedule_run(child);
999 spin_unlock_bh(&childq.lock);
1000}
1001
1002/* 915/*
1003 * Kill all tasks for the given client. 916 * Kill all tasks for the given client.
1004 * XXX: kill their descendants as well? 917 * XXX: kill their descendants as well?
@@ -1146,10 +1059,10 @@ rpc_destroy_mempool(void)
1146 mempool_destroy(rpc_buffer_mempool); 1059 mempool_destroy(rpc_buffer_mempool);
1147 if (rpc_task_mempool) 1060 if (rpc_task_mempool)
1148 mempool_destroy(rpc_task_mempool); 1061 mempool_destroy(rpc_task_mempool);
1149 if (rpc_task_slabp && kmem_cache_destroy(rpc_task_slabp)) 1062 if (rpc_task_slabp)
1150 printk(KERN_INFO "rpc_task: not all structures were freed\n"); 1063 kmem_cache_destroy(rpc_task_slabp);
1151 if (rpc_buffer_slabp && kmem_cache_destroy(rpc_buffer_slabp)) 1064 if (rpc_buffer_slabp)
1152 printk(KERN_INFO "rpc_buffers: not all structures were freed\n"); 1065 kmem_cache_destroy(rpc_buffer_slabp);
1153} 1066}
1154 1067
1155int 1068int
diff --git a/net/sunrpc/socklib.c b/net/sunrpc/socklib.c
index eb330d4f66d6..6f17527b9e69 100644
--- a/net/sunrpc/socklib.c
+++ b/net/sunrpc/socklib.c
@@ -168,7 +168,7 @@ int csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb)
168 return -1; 168 return -1;
169 if ((unsigned short)csum_fold(desc.csum)) 169 if ((unsigned short)csum_fold(desc.csum))
170 return -1; 170 return -1;
171 if (unlikely(skb->ip_summed == CHECKSUM_HW)) 171 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE))
172 netdev_rx_csum_fault(skb->dev); 172 netdev_rx_csum_fault(skb->dev);
173 return 0; 173 return 0;
174no_checksum: 174no_checksum:
diff --git a/net/sunrpc/stats.c b/net/sunrpc/stats.c
index 15c2db26767b..bd98124c3a64 100644
--- a/net/sunrpc/stats.c
+++ b/net/sunrpc/stats.c
@@ -114,13 +114,8 @@ void svc_seq_show(struct seq_file *seq, const struct svc_stat *statp) {
114 */ 114 */
115struct rpc_iostats *rpc_alloc_iostats(struct rpc_clnt *clnt) 115struct rpc_iostats *rpc_alloc_iostats(struct rpc_clnt *clnt)
116{ 116{
117 unsigned int ops = clnt->cl_maxproc;
118 size_t size = ops * sizeof(struct rpc_iostats);
119 struct rpc_iostats *new; 117 struct rpc_iostats *new;
120 118 new = kcalloc(clnt->cl_maxproc, sizeof(struct rpc_iostats), GFP_KERNEL);
121 new = kmalloc(size, GFP_KERNEL);
122 if (new)
123 memset(new, 0 , size);
124 return new; 119 return new;
125} 120}
126EXPORT_SYMBOL(rpc_alloc_iostats); 121EXPORT_SYMBOL(rpc_alloc_iostats);
diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c
index 769114f0f886..26c0531d7e25 100644
--- a/net/sunrpc/sunrpc_syms.c
+++ b/net/sunrpc/sunrpc_syms.c
@@ -6,7 +6,6 @@
6 * Copyright (C) 1997 Olaf Kirch <okir@monad.swb.de> 6 * Copyright (C) 1997 Olaf Kirch <okir@monad.swb.de>
7 */ 7 */
8 8
9#include <linux/config.h>
10#include <linux/module.h> 9#include <linux/module.h>
11 10
12#include <linux/types.h> 11#include <linux/types.h>
@@ -37,8 +36,6 @@ EXPORT_SYMBOL(rpc_wake_up_status);
37EXPORT_SYMBOL(rpc_release_task); 36EXPORT_SYMBOL(rpc_release_task);
38 37
39/* RPC client functions */ 38/* RPC client functions */
40EXPORT_SYMBOL(rpc_create_client);
41EXPORT_SYMBOL(rpc_new_client);
42EXPORT_SYMBOL(rpc_clone_client); 39EXPORT_SYMBOL(rpc_clone_client);
43EXPORT_SYMBOL(rpc_bind_new_program); 40EXPORT_SYMBOL(rpc_bind_new_program);
44EXPORT_SYMBOL(rpc_destroy_client); 41EXPORT_SYMBOL(rpc_destroy_client);
@@ -58,7 +55,6 @@ EXPORT_SYMBOL(rpc_queue_upcall);
58EXPORT_SYMBOL(rpc_mkpipe); 55EXPORT_SYMBOL(rpc_mkpipe);
59 56
60/* Client transport */ 57/* Client transport */
61EXPORT_SYMBOL(xprt_create_proto);
62EXPORT_SYMBOL(xprt_set_timeout); 58EXPORT_SYMBOL(xprt_set_timeout);
63 59
64/* Client credential cache */ 60/* Client credential cache */
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index b08419e1fc68..44b8d9d4c18a 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -32,9 +32,8 @@ svc_create(struct svc_program *prog, unsigned int bufsize)
32 int vers; 32 int vers;
33 unsigned int xdrsize; 33 unsigned int xdrsize;
34 34
35 if (!(serv = kmalloc(sizeof(*serv), GFP_KERNEL))) 35 if (!(serv = kzalloc(sizeof(*serv), GFP_KERNEL)))
36 return NULL; 36 return NULL;
37 memset(serv, 0, sizeof(*serv));
38 serv->sv_name = prog->pg_name; 37 serv->sv_name = prog->pg_name;
39 serv->sv_program = prog; 38 serv->sv_program = prog;
40 serv->sv_nrthreads = 1; 39 serv->sv_nrthreads = 1;
@@ -159,11 +158,10 @@ svc_create_thread(svc_thread_fn func, struct svc_serv *serv)
159 struct svc_rqst *rqstp; 158 struct svc_rqst *rqstp;
160 int error = -ENOMEM; 159 int error = -ENOMEM;
161 160
162 rqstp = kmalloc(sizeof(*rqstp), GFP_KERNEL); 161 rqstp = kzalloc(sizeof(*rqstp), GFP_KERNEL);
163 if (!rqstp) 162 if (!rqstp)
164 goto out; 163 goto out;
165 164
166 memset(rqstp, 0, sizeof(*rqstp));
167 init_waitqueue_head(&rqstp->rq_wait); 165 init_waitqueue_head(&rqstp->rq_wait);
168 166
169 if (!(rqstp->rq_argp = kmalloc(serv->sv_xdrsize, GFP_KERNEL)) 167 if (!(rqstp->rq_argp = kmalloc(serv->sv_xdrsize, GFP_KERNEL))
@@ -258,11 +256,11 @@ svc_process(struct svc_serv *serv, struct svc_rqst *rqstp)
258 struct kvec * argv = &rqstp->rq_arg.head[0]; 256 struct kvec * argv = &rqstp->rq_arg.head[0];
259 struct kvec * resv = &rqstp->rq_res.head[0]; 257 struct kvec * resv = &rqstp->rq_res.head[0];
260 kxdrproc_t xdr; 258 kxdrproc_t xdr;
261 u32 *statp; 259 __be32 *statp;
262 u32 dir, prog, vers, proc, 260 u32 dir, prog, vers, proc;
263 auth_stat, rpc_stat; 261 __be32 auth_stat, rpc_stat;
264 int auth_res; 262 int auth_res;
265 u32 *accept_statp; 263 __be32 *accept_statp;
266 264
267 rpc_stat = rpc_success; 265 rpc_stat = rpc_success;
268 266
@@ -280,19 +278,22 @@ svc_process(struct svc_serv *serv, struct svc_rqst *rqstp)
280 rqstp->rq_res.page_base = 0; 278 rqstp->rq_res.page_base = 0;
281 rqstp->rq_res.page_len = 0; 279 rqstp->rq_res.page_len = 0;
282 rqstp->rq_res.buflen = PAGE_SIZE; 280 rqstp->rq_res.buflen = PAGE_SIZE;
281 rqstp->rq_res.tail[0].iov_base = NULL;
283 rqstp->rq_res.tail[0].iov_len = 0; 282 rqstp->rq_res.tail[0].iov_len = 0;
283 /* Will be turned off only in gss privacy case: */
284 rqstp->rq_sendfile_ok = 1;
284 /* tcp needs a space for the record length... */ 285 /* tcp needs a space for the record length... */
285 if (rqstp->rq_prot == IPPROTO_TCP) 286 if (rqstp->rq_prot == IPPROTO_TCP)
286 svc_putu32(resv, 0); 287 svc_putnl(resv, 0);
287 288
288 rqstp->rq_xid = svc_getu32(argv); 289 rqstp->rq_xid = svc_getu32(argv);
289 svc_putu32(resv, rqstp->rq_xid); 290 svc_putu32(resv, rqstp->rq_xid);
290 291
291 dir = ntohl(svc_getu32(argv)); 292 dir = svc_getnl(argv);
292 vers = ntohl(svc_getu32(argv)); 293 vers = svc_getnl(argv);
293 294
294 /* First words of reply: */ 295 /* First words of reply: */
295 svc_putu32(resv, xdr_one); /* REPLY */ 296 svc_putnl(resv, 1); /* REPLY */
296 297
297 if (dir != 0) /* direction != CALL */ 298 if (dir != 0) /* direction != CALL */
298 goto err_bad_dir; 299 goto err_bad_dir;
@@ -302,11 +303,11 @@ svc_process(struct svc_serv *serv, struct svc_rqst *rqstp)
302 /* Save position in case we later decide to reject: */ 303 /* Save position in case we later decide to reject: */
303 accept_statp = resv->iov_base + resv->iov_len; 304 accept_statp = resv->iov_base + resv->iov_len;
304 305
305 svc_putu32(resv, xdr_zero); /* ACCEPT */ 306 svc_putnl(resv, 0); /* ACCEPT */
306 307
307 rqstp->rq_prog = prog = ntohl(svc_getu32(argv)); /* program number */ 308 rqstp->rq_prog = prog = svc_getnl(argv); /* program number */
308 rqstp->rq_vers = vers = ntohl(svc_getu32(argv)); /* version number */ 309 rqstp->rq_vers = vers = svc_getnl(argv); /* version number */
309 rqstp->rq_proc = proc = ntohl(svc_getu32(argv)); /* procedure number */ 310 rqstp->rq_proc = proc = svc_getnl(argv); /* procedure number */
310 311
311 progp = serv->sv_program; 312 progp = serv->sv_program;
312 313
@@ -360,7 +361,7 @@ svc_process(struct svc_serv *serv, struct svc_rqst *rqstp)
360 361
361 /* Build the reply header. */ 362 /* Build the reply header. */
362 statp = resv->iov_base +resv->iov_len; 363 statp = resv->iov_base +resv->iov_len;
363 svc_putu32(resv, rpc_success); /* RPC_SUCCESS */ 364 svc_putnl(resv, RPC_SUCCESS);
364 365
365 /* Bump per-procedure stats counter */ 366 /* Bump per-procedure stats counter */
366 procp->pc_count++; 367 procp->pc_count++;
@@ -438,10 +439,10 @@ err_bad_dir:
438 439
439err_bad_rpc: 440err_bad_rpc:
440 serv->sv_stats->rpcbadfmt++; 441 serv->sv_stats->rpcbadfmt++;
441 svc_putu32(resv, xdr_one); /* REJECT */ 442 svc_putnl(resv, 1); /* REJECT */
442 svc_putu32(resv, xdr_zero); /* RPC_MISMATCH */ 443 svc_putnl(resv, 0); /* RPC_MISMATCH */
443 svc_putu32(resv, xdr_two); /* Only RPCv2 supported */ 444 svc_putnl(resv, 2); /* Only RPCv2 supported */
444 svc_putu32(resv, xdr_two); 445 svc_putnl(resv, 2);
445 goto sendit; 446 goto sendit;
446 447
447err_bad_auth: 448err_bad_auth:
@@ -449,15 +450,15 @@ err_bad_auth:
449 serv->sv_stats->rpcbadauth++; 450 serv->sv_stats->rpcbadauth++;
450 /* Restore write pointer to location of accept status: */ 451 /* Restore write pointer to location of accept status: */
451 xdr_ressize_check(rqstp, accept_statp); 452 xdr_ressize_check(rqstp, accept_statp);
452 svc_putu32(resv, xdr_one); /* REJECT */ 453 svc_putnl(resv, 1); /* REJECT */
453 svc_putu32(resv, xdr_one); /* AUTH_ERROR */ 454 svc_putnl(resv, 1); /* AUTH_ERROR */
454 svc_putu32(resv, auth_stat); /* status */ 455 svc_putnl(resv, ntohl(auth_stat)); /* status */
455 goto sendit; 456 goto sendit;
456 457
457err_bad_prog: 458err_bad_prog:
458 dprintk("svc: unknown program %d\n", prog); 459 dprintk("svc: unknown program %d\n", prog);
459 serv->sv_stats->rpcbadfmt++; 460 serv->sv_stats->rpcbadfmt++;
460 svc_putu32(resv, rpc_prog_unavail); 461 svc_putnl(resv, RPC_PROG_UNAVAIL);
461 goto sendit; 462 goto sendit;
462 463
463err_bad_vers: 464err_bad_vers:
@@ -465,9 +466,9 @@ err_bad_vers:
465 printk("svc: unknown version (%d)\n", vers); 466 printk("svc: unknown version (%d)\n", vers);
466#endif 467#endif
467 serv->sv_stats->rpcbadfmt++; 468 serv->sv_stats->rpcbadfmt++;
468 svc_putu32(resv, rpc_prog_mismatch); 469 svc_putnl(resv, RPC_PROG_MISMATCH);
469 svc_putu32(resv, htonl(progp->pg_lovers)); 470 svc_putnl(resv, progp->pg_lovers);
470 svc_putu32(resv, htonl(progp->pg_hivers)); 471 svc_putnl(resv, progp->pg_hivers);
471 goto sendit; 472 goto sendit;
472 473
473err_bad_proc: 474err_bad_proc:
@@ -475,7 +476,7 @@ err_bad_proc:
475 printk("svc: unknown procedure (%d)\n", proc); 476 printk("svc: unknown procedure (%d)\n", proc);
476#endif 477#endif
477 serv->sv_stats->rpcbadfmt++; 478 serv->sv_stats->rpcbadfmt++;
478 svc_putu32(resv, rpc_proc_unavail); 479 svc_putnl(resv, RPC_PROC_UNAVAIL);
479 goto sendit; 480 goto sendit;
480 481
481err_garbage: 482err_garbage:
@@ -485,6 +486,6 @@ err_garbage:
485 rpc_stat = rpc_garbage_args; 486 rpc_stat = rpc_garbage_args;
486err_bad: 487err_bad:
487 serv->sv_stats->rpcbadfmt++; 488 serv->sv_stats->rpcbadfmt++;
488 svc_putu32(resv, rpc_stat); 489 svc_putnl(resv, ntohl(rpc_stat));
489 goto sendit; 490 goto sendit;
490} 491}
diff --git a/net/sunrpc/svcauth.c b/net/sunrpc/svcauth.c
index 5b28c6176806..8f2320aded5c 100644
--- a/net/sunrpc/svcauth.c
+++ b/net/sunrpc/svcauth.c
@@ -35,14 +35,14 @@ static struct auth_ops *authtab[RPC_AUTH_MAXFLAVOR] = {
35}; 35};
36 36
37int 37int
38svc_authenticate(struct svc_rqst *rqstp, u32 *authp) 38svc_authenticate(struct svc_rqst *rqstp, __be32 *authp)
39{ 39{
40 rpc_authflavor_t flavor; 40 rpc_authflavor_t flavor;
41 struct auth_ops *aops; 41 struct auth_ops *aops;
42 42
43 *authp = rpc_auth_ok; 43 *authp = rpc_auth_ok;
44 44
45 flavor = ntohl(svc_getu32(&rqstp->rq_arg.head[0])); 45 flavor = svc_getnl(&rqstp->rq_arg.head[0]);
46 46
47 dprintk("svc: svc_authenticate (%d)\n", flavor); 47 dprintk("svc: svc_authenticate (%d)\n", flavor);
48 48
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index 7e5707e2d6b6..1020d54b01d0 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -145,7 +145,7 @@ static void ip_map_request(struct cache_detail *cd,
145{ 145{
146 char text_addr[20]; 146 char text_addr[20];
147 struct ip_map *im = container_of(h, struct ip_map, h); 147 struct ip_map *im = container_of(h, struct ip_map, h);
148 __u32 addr = im->m_addr.s_addr; 148 __be32 addr = im->m_addr.s_addr;
149 149
150 snprintf(text_addr, 20, "%u.%u.%u.%u", 150 snprintf(text_addr, 20, "%u.%u.%u.%u",
151 ntohl(addr) >> 24 & 0xff, 151 ntohl(addr) >> 24 & 0xff,
@@ -249,10 +249,10 @@ static int ip_map_show(struct seq_file *m,
249 249
250 seq_printf(m, "%s %d.%d.%d.%d %s\n", 250 seq_printf(m, "%s %d.%d.%d.%d %s\n",
251 im->m_class, 251 im->m_class,
252 htonl(addr.s_addr) >> 24 & 0xff, 252 ntohl(addr.s_addr) >> 24 & 0xff,
253 htonl(addr.s_addr) >> 16 & 0xff, 253 ntohl(addr.s_addr) >> 16 & 0xff,
254 htonl(addr.s_addr) >> 8 & 0xff, 254 ntohl(addr.s_addr) >> 8 & 0xff,
255 htonl(addr.s_addr) >> 0 & 0xff, 255 ntohl(addr.s_addr) >> 0 & 0xff,
256 dom 256 dom
257 ); 257 );
258 return 0; 258 return 0;
@@ -410,7 +410,7 @@ svcauth_unix_set_client(struct svc_rqst *rqstp)
410} 410}
411 411
412static int 412static int
413svcauth_null_accept(struct svc_rqst *rqstp, u32 *authp) 413svcauth_null_accept(struct svc_rqst *rqstp, __be32 *authp)
414{ 414{
415 struct kvec *argv = &rqstp->rq_arg.head[0]; 415 struct kvec *argv = &rqstp->rq_arg.head[0];
416 struct kvec *resv = &rqstp->rq_res.head[0]; 416 struct kvec *resv = &rqstp->rq_res.head[0];
@@ -427,7 +427,7 @@ svcauth_null_accept(struct svc_rqst *rqstp, u32 *authp)
427 *authp = rpc_autherr_badcred; 427 *authp = rpc_autherr_badcred;
428 return SVC_DENIED; 428 return SVC_DENIED;
429 } 429 }
430 if (svc_getu32(argv) != RPC_AUTH_NULL || svc_getu32(argv) != 0) { 430 if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) {
431 dprintk("svc: bad null verf\n"); 431 dprintk("svc: bad null verf\n");
432 *authp = rpc_autherr_badverf; 432 *authp = rpc_autherr_badverf;
433 return SVC_DENIED; 433 return SVC_DENIED;
@@ -441,8 +441,8 @@ svcauth_null_accept(struct svc_rqst *rqstp, u32 *authp)
441 return SVC_DROP; /* kmalloc failure - client must retry */ 441 return SVC_DROP; /* kmalloc failure - client must retry */
442 442
443 /* Put NULL verifier */ 443 /* Put NULL verifier */
444 svc_putu32(resv, RPC_AUTH_NULL); 444 svc_putnl(resv, RPC_AUTH_NULL);
445 svc_putu32(resv, 0); 445 svc_putnl(resv, 0);
446 446
447 return SVC_OK; 447 return SVC_OK;
448} 448}
@@ -472,7 +472,7 @@ struct auth_ops svcauth_null = {
472 472
473 473
474static int 474static int
475svcauth_unix_accept(struct svc_rqst *rqstp, u32 *authp) 475svcauth_unix_accept(struct svc_rqst *rqstp, __be32 *authp)
476{ 476{
477 struct kvec *argv = &rqstp->rq_arg.head[0]; 477 struct kvec *argv = &rqstp->rq_arg.head[0];
478 struct kvec *resv = &rqstp->rq_res.head[0]; 478 struct kvec *resv = &rqstp->rq_res.head[0];
@@ -488,31 +488,31 @@ svcauth_unix_accept(struct svc_rqst *rqstp, u32 *authp)
488 488
489 svc_getu32(argv); /* length */ 489 svc_getu32(argv); /* length */
490 svc_getu32(argv); /* time stamp */ 490 svc_getu32(argv); /* time stamp */
491 slen = XDR_QUADLEN(ntohl(svc_getu32(argv))); /* machname length */ 491 slen = XDR_QUADLEN(svc_getnl(argv)); /* machname length */
492 if (slen > 64 || (len -= (slen + 3)*4) < 0) 492 if (slen > 64 || (len -= (slen + 3)*4) < 0)
493 goto badcred; 493 goto badcred;
494 argv->iov_base = (void*)((u32*)argv->iov_base + slen); /* skip machname */ 494 argv->iov_base = (void*)((__be32*)argv->iov_base + slen); /* skip machname */
495 argv->iov_len -= slen*4; 495 argv->iov_len -= slen*4;
496 496
497 cred->cr_uid = ntohl(svc_getu32(argv)); /* uid */ 497 cred->cr_uid = svc_getnl(argv); /* uid */
498 cred->cr_gid = ntohl(svc_getu32(argv)); /* gid */ 498 cred->cr_gid = svc_getnl(argv); /* gid */
499 slen = ntohl(svc_getu32(argv)); /* gids length */ 499 slen = svc_getnl(argv); /* gids length */
500 if (slen > 16 || (len -= (slen + 2)*4) < 0) 500 if (slen > 16 || (len -= (slen + 2)*4) < 0)
501 goto badcred; 501 goto badcred;
502 cred->cr_group_info = groups_alloc(slen); 502 cred->cr_group_info = groups_alloc(slen);
503 if (cred->cr_group_info == NULL) 503 if (cred->cr_group_info == NULL)
504 return SVC_DROP; 504 return SVC_DROP;
505 for (i = 0; i < slen; i++) 505 for (i = 0; i < slen; i++)
506 GROUP_AT(cred->cr_group_info, i) = ntohl(svc_getu32(argv)); 506 GROUP_AT(cred->cr_group_info, i) = svc_getnl(argv);
507 507
508 if (svc_getu32(argv) != RPC_AUTH_NULL || svc_getu32(argv) != 0) { 508 if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) {
509 *authp = rpc_autherr_badverf; 509 *authp = rpc_autherr_badverf;
510 return SVC_DENIED; 510 return SVC_DENIED;
511 } 511 }
512 512
513 /* Put NULL verifier */ 513 /* Put NULL verifier */
514 svc_putu32(resv, RPC_AUTH_NULL); 514 svc_putnl(resv, RPC_AUTH_NULL);
515 svc_putu32(resv, 0); 515 svc_putnl(resv, 0);
516 516
517 return SVC_OK; 517 return SVC_OK;
518 518
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index a27905a0ad27..5b0fe1b66a23 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -388,7 +388,7 @@ svc_sendto(struct svc_rqst *rqstp, struct xdr_buf *xdr)
388 /* send head */ 388 /* send head */
389 if (slen == xdr->head[0].iov_len) 389 if (slen == xdr->head[0].iov_len)
390 flags = 0; 390 flags = 0;
391 len = sock->ops->sendpage(sock, rqstp->rq_respages[0], 0, xdr->head[0].iov_len, flags); 391 len = kernel_sendpage(sock, rqstp->rq_respages[0], 0, xdr->head[0].iov_len, flags);
392 if (len != xdr->head[0].iov_len) 392 if (len != xdr->head[0].iov_len)
393 goto out; 393 goto out;
394 slen -= xdr->head[0].iov_len; 394 slen -= xdr->head[0].iov_len;
@@ -400,7 +400,7 @@ svc_sendto(struct svc_rqst *rqstp, struct xdr_buf *xdr)
400 while (pglen > 0) { 400 while (pglen > 0) {
401 if (slen == size) 401 if (slen == size)
402 flags = 0; 402 flags = 0;
403 result = sock->ops->sendpage(sock, *ppage, base, size, flags); 403 result = kernel_sendpage(sock, *ppage, base, size, flags);
404 if (result > 0) 404 if (result > 0)
405 len += result; 405 len += result;
406 if (result != size) 406 if (result != size)
@@ -413,7 +413,7 @@ svc_sendto(struct svc_rqst *rqstp, struct xdr_buf *xdr)
413 } 413 }
414 /* send tail */ 414 /* send tail */
415 if (xdr->tail[0].iov_len) { 415 if (xdr->tail[0].iov_len) {
416 result = sock->ops->sendpage(sock, rqstp->rq_respages[rqstp->rq_restailpage], 416 result = kernel_sendpage(sock, rqstp->rq_respages[rqstp->rq_restailpage],
417 ((unsigned long)xdr->tail[0].iov_base)& (PAGE_SIZE-1), 417 ((unsigned long)xdr->tail[0].iov_base)& (PAGE_SIZE-1),
418 xdr->tail[0].iov_len, 0); 418 xdr->tail[0].iov_len, 0);
419 419
@@ -434,13 +434,10 @@ out:
434static int 434static int
435svc_recv_available(struct svc_sock *svsk) 435svc_recv_available(struct svc_sock *svsk)
436{ 436{
437 mm_segment_t oldfs;
438 struct socket *sock = svsk->sk_sock; 437 struct socket *sock = svsk->sk_sock;
439 int avail, err; 438 int avail, err;
440 439
441 oldfs = get_fs(); set_fs(KERNEL_DS); 440 err = kernel_sock_ioctl(sock, TIOCINQ, (unsigned long) &avail);
442 err = sock->ops->ioctl(sock, TIOCINQ, (unsigned long) &avail);
443 set_fs(oldfs);
444 441
445 return (err >= 0)? avail : err; 442 return (err >= 0)? avail : err;
446} 443}
@@ -472,7 +469,7 @@ svc_recvfrom(struct svc_rqst *rqstp, struct kvec *iov, int nr, int buflen)
472 * at accept time. FIXME 469 * at accept time. FIXME
473 */ 470 */
474 alen = sizeof(rqstp->rq_addr); 471 alen = sizeof(rqstp->rq_addr);
475 sock->ops->getname(sock, (struct sockaddr *)&rqstp->rq_addr, &alen, 1); 472 kernel_getpeername(sock, (struct sockaddr *)&rqstp->rq_addr, &alen);
476 473
477 dprintk("svc: socket %p recvfrom(%p, %Zu) = %d\n", 474 dprintk("svc: socket %p recvfrom(%p, %Zu) = %d\n",
478 rqstp->rq_sock, iov[0].iov_base, iov[0].iov_len, len); 475 rqstp->rq_sock, iov[0].iov_base, iov[0].iov_len, len);
@@ -758,7 +755,6 @@ svc_tcp_accept(struct svc_sock *svsk)
758 struct svc_serv *serv = svsk->sk_server; 755 struct svc_serv *serv = svsk->sk_server;
759 struct socket *sock = svsk->sk_sock; 756 struct socket *sock = svsk->sk_sock;
760 struct socket *newsock; 757 struct socket *newsock;
761 const struct proto_ops *ops;
762 struct svc_sock *newsvsk; 758 struct svc_sock *newsvsk;
763 int err, slen; 759 int err, slen;
764 760
@@ -766,29 +762,23 @@ svc_tcp_accept(struct svc_sock *svsk)
766 if (!sock) 762 if (!sock)
767 return; 763 return;
768 764
769 err = sock_create_lite(PF_INET, SOCK_STREAM, IPPROTO_TCP, &newsock); 765 clear_bit(SK_CONN, &svsk->sk_flags);
770 if (err) { 766 err = kernel_accept(sock, &newsock, O_NONBLOCK);
767 if (err < 0) {
771 if (err == -ENOMEM) 768 if (err == -ENOMEM)
772 printk(KERN_WARNING "%s: no more sockets!\n", 769 printk(KERN_WARNING "%s: no more sockets!\n",
773 serv->sv_name); 770 serv->sv_name);
774 return; 771 else if (err != -EAGAIN && net_ratelimit())
775 }
776
777 dprintk("svc: tcp_accept %p allocated\n", newsock);
778 newsock->ops = ops = sock->ops;
779
780 clear_bit(SK_CONN, &svsk->sk_flags);
781 if ((err = ops->accept(sock, newsock, O_NONBLOCK)) < 0) {
782 if (err != -EAGAIN && net_ratelimit())
783 printk(KERN_WARNING "%s: accept failed (err %d)!\n", 772 printk(KERN_WARNING "%s: accept failed (err %d)!\n",
784 serv->sv_name, -err); 773 serv->sv_name, -err);
785 goto failed; /* aborted connection or whatever */ 774 return;
786 } 775 }
776
787 set_bit(SK_CONN, &svsk->sk_flags); 777 set_bit(SK_CONN, &svsk->sk_flags);
788 svc_sock_enqueue(svsk); 778 svc_sock_enqueue(svsk);
789 779
790 slen = sizeof(sin); 780 slen = sizeof(sin);
791 err = ops->getname(newsock, (struct sockaddr *) &sin, &slen, 1); 781 err = kernel_getpeername(newsock, (struct sockaddr *) &sin, &slen);
792 if (err < 0) { 782 if (err < 0) {
793 if (net_ratelimit()) 783 if (net_ratelimit())
794 printk(KERN_WARNING "%s: peername failed (err %d)!\n", 784 printk(KERN_WARNING "%s: peername failed (err %d)!\n",
@@ -1040,7 +1030,7 @@ svc_tcp_sendto(struct svc_rqst *rqstp)
1040{ 1030{
1041 struct xdr_buf *xbufp = &rqstp->rq_res; 1031 struct xdr_buf *xbufp = &rqstp->rq_res;
1042 int sent; 1032 int sent;
1043 u32 reclen; 1033 __be32 reclen;
1044 1034
1045 /* Set up the first element of the reply kvec. 1035 /* Set up the first element of the reply kvec.
1046 * Any other kvecs that may be in use have been taken 1036 * Any other kvecs that may be in use have been taken
@@ -1322,11 +1312,10 @@ svc_setup_socket(struct svc_serv *serv, struct socket *sock,
1322 struct sock *inet; 1312 struct sock *inet;
1323 1313
1324 dprintk("svc: svc_setup_socket %p\n", sock); 1314 dprintk("svc: svc_setup_socket %p\n", sock);
1325 if (!(svsk = kmalloc(sizeof(*svsk), GFP_KERNEL))) { 1315 if (!(svsk = kzalloc(sizeof(*svsk), GFP_KERNEL))) {
1326 *errp = -ENOMEM; 1316 *errp = -ENOMEM;
1327 return NULL; 1317 return NULL;
1328 } 1318 }
1329 memset(svsk, 0, sizeof(*svsk));
1330 1319
1331 inet = sock->sk; 1320 inet = sock->sk;
1332 1321
@@ -1404,17 +1393,15 @@ svc_create_socket(struct svc_serv *serv, int protocol, struct sockaddr_in *sin)
1404 if ((error = sock_create_kern(PF_INET, type, protocol, &sock)) < 0) 1393 if ((error = sock_create_kern(PF_INET, type, protocol, &sock)) < 0)
1405 return error; 1394 return error;
1406 1395
1407 if (sin != NULL) { 1396 if (type == SOCK_STREAM)
1408 if (type == SOCK_STREAM) 1397 sock->sk->sk_reuse = 1; /* allow address reuse */
1409 sock->sk->sk_reuse = 1; /* allow address reuse */ 1398 error = kernel_bind(sock, (struct sockaddr *) sin,
1410 error = sock->ops->bind(sock, (struct sockaddr *) sin, 1399 sizeof(*sin));
1411 sizeof(*sin)); 1400 if (error < 0)
1412 if (error < 0) 1401 goto bummer;
1413 goto bummer;
1414 }
1415 1402
1416 if (protocol == IPPROTO_TCP) { 1403 if (protocol == IPPROTO_TCP) {
1417 if ((error = sock->ops->listen(sock, 64)) < 0) 1404 if ((error = kernel_listen(sock, 64)) < 0)
1418 goto bummer; 1405 goto bummer;
1419 } 1406 }
1420 1407
diff --git a/net/sunrpc/sysctl.c b/net/sunrpc/sysctl.c
index 1065904841fd..d89b048ad6bb 100644
--- a/net/sunrpc/sysctl.c
+++ b/net/sunrpc/sysctl.c
@@ -7,7 +7,6 @@
7 * impossible at the moment. 7 * impossible at the moment.
8 */ 8 */
9 9
10#include <linux/config.h>
11#include <linux/types.h> 10#include <linux/types.h>
12#include <linux/linkage.h> 11#include <linux/linkage.h>
13#include <linux/ctype.h> 12#include <linux/ctype.h>
diff --git a/net/sunrpc/timer.c b/net/sunrpc/timer.c
index bcbdf6430d5c..8142fdb8a930 100644
--- a/net/sunrpc/timer.c
+++ b/net/sunrpc/timer.c
@@ -19,8 +19,6 @@
19#include <linux/unistd.h> 19#include <linux/unistd.h>
20 20
21#include <linux/sunrpc/clnt.h> 21#include <linux/sunrpc/clnt.h>
22#include <linux/sunrpc/xprt.h>
23#include <linux/sunrpc/timer.h>
24 22
25#define RPC_RTO_MAX (60*HZ) 23#define RPC_RTO_MAX (60*HZ)
26#define RPC_RTO_INIT (HZ/5) 24#define RPC_RTO_INIT (HZ/5)
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index ca4bfa57e116..9022eb8b37ed 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -18,8 +18,8 @@
18/* 18/*
19 * XDR functions for basic NFS types 19 * XDR functions for basic NFS types
20 */ 20 */
21u32 * 21__be32 *
22xdr_encode_netobj(u32 *p, const struct xdr_netobj *obj) 22xdr_encode_netobj(__be32 *p, const struct xdr_netobj *obj)
23{ 23{
24 unsigned int quadlen = XDR_QUADLEN(obj->len); 24 unsigned int quadlen = XDR_QUADLEN(obj->len);
25 25
@@ -29,8 +29,8 @@ xdr_encode_netobj(u32 *p, const struct xdr_netobj *obj)
29 return p + XDR_QUADLEN(obj->len); 29 return p + XDR_QUADLEN(obj->len);
30} 30}
31 31
32u32 * 32__be32 *
33xdr_decode_netobj(u32 *p, struct xdr_netobj *obj) 33xdr_decode_netobj(__be32 *p, struct xdr_netobj *obj)
34{ 34{
35 unsigned int len; 35 unsigned int len;
36 36
@@ -55,7 +55,7 @@ xdr_decode_netobj(u32 *p, struct xdr_netobj *obj)
55 * Returns the updated current XDR buffer position 55 * Returns the updated current XDR buffer position
56 * 56 *
57 */ 57 */
58u32 *xdr_encode_opaque_fixed(u32 *p, const void *ptr, unsigned int nbytes) 58__be32 *xdr_encode_opaque_fixed(__be32 *p, const void *ptr, unsigned int nbytes)
59{ 59{
60 if (likely(nbytes != 0)) { 60 if (likely(nbytes != 0)) {
61 unsigned int quadlen = XDR_QUADLEN(nbytes); 61 unsigned int quadlen = XDR_QUADLEN(nbytes);
@@ -79,21 +79,21 @@ EXPORT_SYMBOL(xdr_encode_opaque_fixed);
79 * 79 *
80 * Returns the updated current XDR buffer position 80 * Returns the updated current XDR buffer position
81 */ 81 */
82u32 *xdr_encode_opaque(u32 *p, const void *ptr, unsigned int nbytes) 82__be32 *xdr_encode_opaque(__be32 *p, const void *ptr, unsigned int nbytes)
83{ 83{
84 *p++ = htonl(nbytes); 84 *p++ = htonl(nbytes);
85 return xdr_encode_opaque_fixed(p, ptr, nbytes); 85 return xdr_encode_opaque_fixed(p, ptr, nbytes);
86} 86}
87EXPORT_SYMBOL(xdr_encode_opaque); 87EXPORT_SYMBOL(xdr_encode_opaque);
88 88
89u32 * 89__be32 *
90xdr_encode_string(u32 *p, const char *string) 90xdr_encode_string(__be32 *p, const char *string)
91{ 91{
92 return xdr_encode_array(p, string, strlen(string)); 92 return xdr_encode_array(p, string, strlen(string));
93} 93}
94 94
95u32 * 95__be32 *
96xdr_decode_string_inplace(u32 *p, char **sp, int *lenp, int maxlen) 96xdr_decode_string_inplace(__be32 *p, char **sp, int *lenp, int maxlen)
97{ 97{
98 unsigned int len; 98 unsigned int len;
99 99
@@ -191,7 +191,6 @@ _shift_data_right_pages(struct page **pages, size_t pgto_base,
191 do { 191 do {
192 /* Are any pointers crossing a page boundary? */ 192 /* Are any pointers crossing a page boundary? */
193 if (pgto_base == 0) { 193 if (pgto_base == 0) {
194 flush_dcache_page(*pgto);
195 pgto_base = PAGE_CACHE_SIZE; 194 pgto_base = PAGE_CACHE_SIZE;
196 pgto--; 195 pgto--;
197 } 196 }
@@ -211,11 +210,11 @@ _shift_data_right_pages(struct page **pages, size_t pgto_base,
211 vto = kmap_atomic(*pgto, KM_USER0); 210 vto = kmap_atomic(*pgto, KM_USER0);
212 vfrom = kmap_atomic(*pgfrom, KM_USER1); 211 vfrom = kmap_atomic(*pgfrom, KM_USER1);
213 memmove(vto + pgto_base, vfrom + pgfrom_base, copy); 212 memmove(vto + pgto_base, vfrom + pgfrom_base, copy);
213 flush_dcache_page(*pgto);
214 kunmap_atomic(vfrom, KM_USER1); 214 kunmap_atomic(vfrom, KM_USER1);
215 kunmap_atomic(vto, KM_USER0); 215 kunmap_atomic(vto, KM_USER0);
216 216
217 } while ((len -= copy) != 0); 217 } while ((len -= copy) != 0);
218 flush_dcache_page(*pgto);
219} 218}
220 219
221/* 220/*
@@ -433,7 +432,7 @@ xdr_shift_buf(struct xdr_buf *buf, size_t len)
433 * of the buffer length, and takes care of adjusting the kvec 432 * of the buffer length, and takes care of adjusting the kvec
434 * length for us. 433 * length for us.
435 */ 434 */
436void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, uint32_t *p) 435void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
437{ 436{
438 struct kvec *iov = buf->head; 437 struct kvec *iov = buf->head;
439 int scratch_len = buf->buflen - buf->page_len - buf->tail[0].iov_len; 438 int scratch_len = buf->buflen - buf->page_len - buf->tail[0].iov_len;
@@ -441,8 +440,8 @@ void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, uint32_t *p)
441 BUG_ON(scratch_len < 0); 440 BUG_ON(scratch_len < 0);
442 xdr->buf = buf; 441 xdr->buf = buf;
443 xdr->iov = iov; 442 xdr->iov = iov;
444 xdr->p = (uint32_t *)((char *)iov->iov_base + iov->iov_len); 443 xdr->p = (__be32 *)((char *)iov->iov_base + iov->iov_len);
445 xdr->end = (uint32_t *)((char *)iov->iov_base + scratch_len); 444 xdr->end = (__be32 *)((char *)iov->iov_base + scratch_len);
446 BUG_ON(iov->iov_len > scratch_len); 445 BUG_ON(iov->iov_len > scratch_len);
447 446
448 if (p != xdr->p && p != NULL) { 447 if (p != xdr->p && p != NULL) {
@@ -466,10 +465,10 @@ EXPORT_SYMBOL(xdr_init_encode);
466 * bytes of data. If so, update the total xdr_buf length, and 465 * bytes of data. If so, update the total xdr_buf length, and
467 * adjust the length of the current kvec. 466 * adjust the length of the current kvec.
468 */ 467 */
469uint32_t * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes) 468__be32 * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes)
470{ 469{
471 uint32_t *p = xdr->p; 470 __be32 *p = xdr->p;
472 uint32_t *q; 471 __be32 *q;
473 472
474 /* align nbytes on the next 32-bit boundary */ 473 /* align nbytes on the next 32-bit boundary */
475 nbytes += 3; 474 nbytes += 3;
@@ -525,7 +524,7 @@ EXPORT_SYMBOL(xdr_write_pages);
525 * @buf: pointer to XDR buffer from which to decode data 524 * @buf: pointer to XDR buffer from which to decode data
526 * @p: current pointer inside XDR buffer 525 * @p: current pointer inside XDR buffer
527 */ 526 */
528void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, uint32_t *p) 527void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
529{ 528{
530 struct kvec *iov = buf->head; 529 struct kvec *iov = buf->head;
531 unsigned int len = iov->iov_len; 530 unsigned int len = iov->iov_len;
@@ -535,7 +534,7 @@ void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, uint32_t *p)
535 xdr->buf = buf; 534 xdr->buf = buf;
536 xdr->iov = iov; 535 xdr->iov = iov;
537 xdr->p = p; 536 xdr->p = p;
538 xdr->end = (uint32_t *)((char *)iov->iov_base + len); 537 xdr->end = (__be32 *)((char *)iov->iov_base + len);
539} 538}
540EXPORT_SYMBOL(xdr_init_decode); 539EXPORT_SYMBOL(xdr_init_decode);
541 540
@@ -549,10 +548,10 @@ EXPORT_SYMBOL(xdr_init_decode);
549 * If so return the current pointer, then update the current 548 * If so return the current pointer, then update the current
550 * pointer position. 549 * pointer position.
551 */ 550 */
552uint32_t * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes) 551__be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
553{ 552{
554 uint32_t *p = xdr->p; 553 __be32 *p = xdr->p;
555 uint32_t *q = p + XDR_QUADLEN(nbytes); 554 __be32 *q = p + XDR_QUADLEN(nbytes);
556 555
557 if (unlikely(q > xdr->end || q < p)) 556 if (unlikely(q > xdr->end || q < p))
558 return NULL; 557 return NULL;
@@ -568,8 +567,7 @@ EXPORT_SYMBOL(xdr_inline_decode);
568 * 567 *
569 * Moves data beyond the current pointer position from the XDR head[] buffer 568 * Moves data beyond the current pointer position from the XDR head[] buffer
570 * into the page list. Any data that lies beyond current position + "len" 569 * into the page list. Any data that lies beyond current position + "len"
571 * bytes is moved into the XDR tail[]. The current pointer is then 570 * bytes is moved into the XDR tail[].
572 * repositioned at the beginning of the XDR tail.
573 */ 571 */
574void xdr_read_pages(struct xdr_stream *xdr, unsigned int len) 572void xdr_read_pages(struct xdr_stream *xdr, unsigned int len)
575{ 573{
@@ -601,11 +599,36 @@ void xdr_read_pages(struct xdr_stream *xdr, unsigned int len)
601 * Position current pointer at beginning of tail, and 599 * Position current pointer at beginning of tail, and
602 * set remaining message length. 600 * set remaining message length.
603 */ 601 */
604 xdr->p = (uint32_t *)((char *)iov->iov_base + padding); 602 xdr->p = (__be32 *)((char *)iov->iov_base + padding);
605 xdr->end = (uint32_t *)((char *)iov->iov_base + end); 603 xdr->end = (__be32 *)((char *)iov->iov_base + end);
606} 604}
607EXPORT_SYMBOL(xdr_read_pages); 605EXPORT_SYMBOL(xdr_read_pages);
608 606
607/**
608 * xdr_enter_page - decode data from the XDR page
609 * @xdr: pointer to xdr_stream struct
610 * @len: number of bytes of page data
611 *
612 * Moves data beyond the current pointer position from the XDR head[] buffer
613 * into the page list. Any data that lies beyond current position + "len"
614 * bytes is moved into the XDR tail[]. The current pointer is then
615 * repositioned at the beginning of the first XDR page.
616 */
617void xdr_enter_page(struct xdr_stream *xdr, unsigned int len)
618{
619 char * kaddr = page_address(xdr->buf->pages[0]);
620 xdr_read_pages(xdr, len);
621 /*
622 * Position current pointer at beginning of tail, and
623 * set remaining message length.
624 */
625 if (len > PAGE_CACHE_SIZE - xdr->buf->page_base)
626 len = PAGE_CACHE_SIZE - xdr->buf->page_base;
627 xdr->p = (__be32 *)(kaddr + xdr->buf->page_base);
628 xdr->end = (__be32 *)((char *)xdr->p + len);
629}
630EXPORT_SYMBOL(xdr_enter_page);
631
609static struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0}; 632static struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0};
610 633
611void 634void
@@ -720,7 +743,7 @@ out:
720int 743int
721xdr_decode_word(struct xdr_buf *buf, int base, u32 *obj) 744xdr_decode_word(struct xdr_buf *buf, int base, u32 *obj)
722{ 745{
723 u32 raw; 746 __be32 raw;
724 int status; 747 int status;
725 748
726 status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj)); 749 status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj));
@@ -733,7 +756,7 @@ xdr_decode_word(struct xdr_buf *buf, int base, u32 *obj)
733int 756int
734xdr_encode_word(struct xdr_buf *buf, int base, u32 obj) 757xdr_encode_word(struct xdr_buf *buf, int base, u32 obj)
735{ 758{
736 u32 raw = htonl(obj); 759 __be32 raw = htonl(obj);
737 760
738 return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj)); 761 return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj));
739} 762}
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 4dd5b3cfe754..80857470dc11 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -41,7 +41,7 @@
41#include <linux/types.h> 41#include <linux/types.h>
42#include <linux/interrupt.h> 42#include <linux/interrupt.h>
43#include <linux/workqueue.h> 43#include <linux/workqueue.h>
44#include <linux/random.h> 44#include <linux/net.h>
45 45
46#include <linux/sunrpc/clnt.h> 46#include <linux/sunrpc/clnt.h>
47#include <linux/sunrpc/metrics.h> 47#include <linux/sunrpc/metrics.h>
@@ -534,7 +534,7 @@ void xprt_connect(struct rpc_task *task)
534 dprintk("RPC: %4d xprt_connect xprt %p %s connected\n", task->tk_pid, 534 dprintk("RPC: %4d xprt_connect xprt %p %s connected\n", task->tk_pid,
535 xprt, (xprt_connected(xprt) ? "is" : "is not")); 535 xprt, (xprt_connected(xprt) ? "is" : "is not"));
536 536
537 if (!xprt->addr.sin_port) { 537 if (!xprt_bound(xprt)) {
538 task->tk_status = -EIO; 538 task->tk_status = -EIO;
539 return; 539 return;
540 } 540 }
@@ -585,13 +585,6 @@ static void xprt_connect_status(struct rpc_task *task)
585 task->tk_pid, -task->tk_status, task->tk_client->cl_server); 585 task->tk_pid, -task->tk_status, task->tk_client->cl_server);
586 xprt_release_write(xprt, task); 586 xprt_release_write(xprt, task);
587 task->tk_status = -EIO; 587 task->tk_status = -EIO;
588 return;
589 }
590
591 /* if soft mounted, just cause this RPC to fail */
592 if (RPC_IS_SOFT(task)) {
593 xprt_release_write(xprt, task);
594 task->tk_status = -EIO;
595 } 588 }
596} 589}
597 590
@@ -601,7 +594,7 @@ static void xprt_connect_status(struct rpc_task *task)
601 * @xid: RPC XID of incoming reply 594 * @xid: RPC XID of incoming reply
602 * 595 *
603 */ 596 */
604struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, u32 xid) 597struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid)
605{ 598{
606 struct list_head *pos; 599 struct list_head *pos;
607 600
@@ -707,12 +700,9 @@ out_unlock:
707 return err; 700 return err;
708} 701}
709 702
710void 703void xprt_end_transmit(struct rpc_task *task)
711xprt_abort_transmit(struct rpc_task *task)
712{ 704{
713 struct rpc_xprt *xprt = task->tk_xprt; 705 xprt_release_write(task->tk_xprt, task);
714
715 xprt_release_write(xprt, task);
716} 706}
717 707
718/** 708/**
@@ -761,8 +751,6 @@ void xprt_transmit(struct rpc_task *task)
761 task->tk_status = -ENOTCONN; 751 task->tk_status = -ENOTCONN;
762 else if (!req->rq_received) 752 else if (!req->rq_received)
763 rpc_sleep_on(&xprt->pending, task, NULL, xprt_timer); 753 rpc_sleep_on(&xprt->pending, task, NULL, xprt_timer);
764
765 xprt->ops->release_xprt(xprt, task);
766 spin_unlock_bh(&xprt->transport_lock); 754 spin_unlock_bh(&xprt->transport_lock);
767 return; 755 return;
768 } 756 }
@@ -772,18 +760,8 @@ void xprt_transmit(struct rpc_task *task)
772 * schedq, and being picked up by a parallel run of rpciod(). 760 * schedq, and being picked up by a parallel run of rpciod().
773 */ 761 */
774 task->tk_status = status; 762 task->tk_status = status;
775 763 if (status == -ECONNREFUSED)
776 switch (status) {
777 case -ECONNREFUSED:
778 rpc_sleep_on(&xprt->sending, task, NULL, NULL); 764 rpc_sleep_on(&xprt->sending, task, NULL, NULL);
779 case -EAGAIN:
780 case -ENOTCONN:
781 return;
782 default:
783 break;
784 }
785 xprt_release_write(xprt, task);
786 return;
787} 765}
788 766
789static inline void do_xprt_reserve(struct rpc_task *task) 767static inline void do_xprt_reserve(struct rpc_task *task)
@@ -823,14 +801,14 @@ void xprt_reserve(struct rpc_task *task)
823 spin_unlock(&xprt->reserve_lock); 801 spin_unlock(&xprt->reserve_lock);
824} 802}
825 803
826static inline u32 xprt_alloc_xid(struct rpc_xprt *xprt) 804static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt)
827{ 805{
828 return xprt->xid++; 806 return xprt->xid++;
829} 807}
830 808
831static inline void xprt_init_xid(struct rpc_xprt *xprt) 809static inline void xprt_init_xid(struct rpc_xprt *xprt)
832{ 810{
833 get_random_bytes(&xprt->xid, sizeof(xprt->xid)); 811 xprt->xid = net_random();
834} 812}
835 813
836static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt) 814static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
@@ -844,6 +822,7 @@ static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
844 req->rq_bufsize = 0; 822 req->rq_bufsize = 0;
845 req->rq_xid = xprt_alloc_xid(xprt); 823 req->rq_xid = xprt_alloc_xid(xprt);
846 req->rq_release_snd_buf = NULL; 824 req->rq_release_snd_buf = NULL;
825 xprt_reset_majortimeo(req);
847 dprintk("RPC: %4d reserved req %p xid %08x\n", task->tk_pid, 826 dprintk("RPC: %4d reserved req %p xid %08x\n", task->tk_pid,
848 req, ntohl(req->rq_xid)); 827 req, ntohl(req->rq_xid));
849} 828}
@@ -902,17 +881,32 @@ void xprt_set_timeout(struct rpc_timeout *to, unsigned int retr, unsigned long i
902 to->to_exponential = 0; 881 to->to_exponential = 0;
903} 882}
904 883
905static struct rpc_xprt *xprt_setup(int proto, struct sockaddr_in *ap, struct rpc_timeout *to) 884/**
885 * xprt_create_transport - create an RPC transport
886 * @proto: requested transport protocol
887 * @ap: remote peer address
888 * @size: length of address
889 * @to: timeout parameters
890 *
891 */
892struct rpc_xprt *xprt_create_transport(int proto, struct sockaddr *ap, size_t size, struct rpc_timeout *to)
906{ 893{
907 int result; 894 int result;
908 struct rpc_xprt *xprt; 895 struct rpc_xprt *xprt;
909 struct rpc_rqst *req; 896 struct rpc_rqst *req;
910 897
911 if ((xprt = kmalloc(sizeof(struct rpc_xprt), GFP_KERNEL)) == NULL) 898 if ((xprt = kzalloc(sizeof(struct rpc_xprt), GFP_KERNEL)) == NULL) {
899 dprintk("RPC: xprt_create_transport: no memory\n");
912 return ERR_PTR(-ENOMEM); 900 return ERR_PTR(-ENOMEM);
913 memset(xprt, 0, sizeof(*xprt)); /* Nnnngh! */ 901 }
914 902 if (size <= sizeof(xprt->addr)) {
915 xprt->addr = *ap; 903 memcpy(&xprt->addr, ap, size);
904 xprt->addrlen = size;
905 } else {
906 kfree(xprt);
907 dprintk("RPC: xprt_create_transport: address too large\n");
908 return ERR_PTR(-EBADF);
909 }
916 910
917 switch (proto) { 911 switch (proto) {
918 case IPPROTO_UDP: 912 case IPPROTO_UDP:
@@ -924,14 +918,15 @@ static struct rpc_xprt *xprt_setup(int proto, struct sockaddr_in *ap, struct rpc
924 default: 918 default:
925 printk(KERN_ERR "RPC: unrecognized transport protocol: %d\n", 919 printk(KERN_ERR "RPC: unrecognized transport protocol: %d\n",
926 proto); 920 proto);
927 result = -EIO; 921 return ERR_PTR(-EIO);
928 break;
929 } 922 }
930 if (result) { 923 if (result) {
931 kfree(xprt); 924 kfree(xprt);
925 dprintk("RPC: xprt_create_transport: failed, %d\n", result);
932 return ERR_PTR(result); 926 return ERR_PTR(result);
933 } 927 }
934 928
929 kref_init(&xprt->kref);
935 spin_lock_init(&xprt->transport_lock); 930 spin_lock_init(&xprt->transport_lock);
936 spin_lock_init(&xprt->reserve_lock); 931 spin_lock_init(&xprt->reserve_lock);
937 932
@@ -944,6 +939,7 @@ static struct rpc_xprt *xprt_setup(int proto, struct sockaddr_in *ap, struct rpc
944 xprt->last_used = jiffies; 939 xprt->last_used = jiffies;
945 xprt->cwnd = RPC_INITCWND; 940 xprt->cwnd = RPC_INITCWND;
946 941
942 rpc_init_wait_queue(&xprt->binding, "xprt_binding");
947 rpc_init_wait_queue(&xprt->pending, "xprt_pending"); 943 rpc_init_wait_queue(&xprt->pending, "xprt_pending");
948 rpc_init_wait_queue(&xprt->sending, "xprt_sending"); 944 rpc_init_wait_queue(&xprt->sending, "xprt_sending");
949 rpc_init_wait_queue(&xprt->resend, "xprt_resend"); 945 rpc_init_wait_queue(&xprt->resend, "xprt_resend");
@@ -957,41 +953,43 @@ static struct rpc_xprt *xprt_setup(int proto, struct sockaddr_in *ap, struct rpc
957 953
958 dprintk("RPC: created transport %p with %u slots\n", xprt, 954 dprintk("RPC: created transport %p with %u slots\n", xprt,
959 xprt->max_reqs); 955 xprt->max_reqs);
960
961 return xprt;
962}
963 956
964/**
965 * xprt_create_proto - create an RPC client transport
966 * @proto: requested transport protocol
967 * @sap: remote peer's address
968 * @to: timeout parameters for new transport
969 *
970 */
971struct rpc_xprt *xprt_create_proto(int proto, struct sockaddr_in *sap, struct rpc_timeout *to)
972{
973 struct rpc_xprt *xprt;
974
975 xprt = xprt_setup(proto, sap, to);
976 if (IS_ERR(xprt))
977 dprintk("RPC: xprt_create_proto failed\n");
978 else
979 dprintk("RPC: xprt_create_proto created xprt %p\n", xprt);
980 return xprt; 957 return xprt;
981} 958}
982 959
983/** 960/**
984 * xprt_destroy - destroy an RPC transport, killing off all requests. 961 * xprt_destroy - destroy an RPC transport, killing off all requests.
985 * @xprt: transport to destroy 962 * @kref: kref for the transport to destroy
986 * 963 *
987 */ 964 */
988int xprt_destroy(struct rpc_xprt *xprt) 965static void xprt_destroy(struct kref *kref)
989{ 966{
967 struct rpc_xprt *xprt = container_of(kref, struct rpc_xprt, kref);
968
990 dprintk("RPC: destroying transport %p\n", xprt); 969 dprintk("RPC: destroying transport %p\n", xprt);
991 xprt->shutdown = 1; 970 xprt->shutdown = 1;
992 del_timer_sync(&xprt->timer); 971 del_timer_sync(&xprt->timer);
993 xprt->ops->destroy(xprt); 972 xprt->ops->destroy(xprt);
994 kfree(xprt); 973 kfree(xprt);
974}
995 975
996 return 0; 976/**
977 * xprt_put - release a reference to an RPC transport.
978 * @xprt: pointer to the transport
979 *
980 */
981void xprt_put(struct rpc_xprt *xprt)
982{
983 kref_put(&xprt->kref, xprt_destroy);
984}
985
986/**
987 * xprt_get - return a reference to an RPC transport.
988 * @xprt: pointer to the transport
989 *
990 */
991struct rpc_xprt *xprt_get(struct rpc_xprt *xprt)
992{
993 kref_get(&xprt->kref);
994 return xprt;
997} 995}
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 4b4e7dfdff14..28100e019225 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -125,6 +125,47 @@ static inline void xs_pktdump(char *msg, u32 *packet, unsigned int count)
125} 125}
126#endif 126#endif
127 127
128static void xs_format_peer_addresses(struct rpc_xprt *xprt)
129{
130 struct sockaddr_in *addr = (struct sockaddr_in *) &xprt->addr;
131 char *buf;
132
133 buf = kzalloc(20, GFP_KERNEL);
134 if (buf) {
135 snprintf(buf, 20, "%u.%u.%u.%u",
136 NIPQUAD(addr->sin_addr.s_addr));
137 }
138 xprt->address_strings[RPC_DISPLAY_ADDR] = buf;
139
140 buf = kzalloc(8, GFP_KERNEL);
141 if (buf) {
142 snprintf(buf, 8, "%u",
143 ntohs(addr->sin_port));
144 }
145 xprt->address_strings[RPC_DISPLAY_PORT] = buf;
146
147 if (xprt->prot == IPPROTO_UDP)
148 xprt->address_strings[RPC_DISPLAY_PROTO] = "udp";
149 else
150 xprt->address_strings[RPC_DISPLAY_PROTO] = "tcp";
151
152 buf = kzalloc(48, GFP_KERNEL);
153 if (buf) {
154 snprintf(buf, 48, "addr=%u.%u.%u.%u port=%u proto=%s",
155 NIPQUAD(addr->sin_addr.s_addr),
156 ntohs(addr->sin_port),
157 xprt->prot == IPPROTO_UDP ? "udp" : "tcp");
158 }
159 xprt->address_strings[RPC_DISPLAY_ALL] = buf;
160}
161
162static void xs_free_peer_addresses(struct rpc_xprt *xprt)
163{
164 kfree(xprt->address_strings[RPC_DISPLAY_ADDR]);
165 kfree(xprt->address_strings[RPC_DISPLAY_PORT]);
166 kfree(xprt->address_strings[RPC_DISPLAY_ALL]);
167}
168
128#define XS_SENDMSG_FLAGS (MSG_DONTWAIT | MSG_NOSIGNAL) 169#define XS_SENDMSG_FLAGS (MSG_DONTWAIT | MSG_NOSIGNAL)
129 170
130static inline int xs_send_head(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base, unsigned int len) 171static inline int xs_send_head(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base, unsigned int len)
@@ -174,7 +215,6 @@ static inline int xs_sendpages(struct socket *sock, struct sockaddr *addr, int a
174 struct page **ppage = xdr->pages; 215 struct page **ppage = xdr->pages;
175 unsigned int len, pglen = xdr->page_len; 216 unsigned int len, pglen = xdr->page_len;
176 int err, ret = 0; 217 int err, ret = 0;
177 ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int);
178 218
179 if (unlikely(!sock)) 219 if (unlikely(!sock))
180 return -ENOTCONN; 220 return -ENOTCONN;
@@ -207,7 +247,6 @@ static inline int xs_sendpages(struct socket *sock, struct sockaddr *addr, int a
207 base &= ~PAGE_CACHE_MASK; 247 base &= ~PAGE_CACHE_MASK;
208 } 248 }
209 249
210 sendpage = sock->ops->sendpage ? : sock_no_sendpage;
211 do { 250 do {
212 int flags = XS_SENDMSG_FLAGS; 251 int flags = XS_SENDMSG_FLAGS;
213 252
@@ -220,10 +259,7 @@ static inline int xs_sendpages(struct socket *sock, struct sockaddr *addr, int a
220 if (pglen != len || xdr->tail[0].iov_len != 0) 259 if (pglen != len || xdr->tail[0].iov_len != 0)
221 flags |= MSG_MORE; 260 flags |= MSG_MORE;
222 261
223 /* Hmm... We might be dealing with highmem pages */ 262 err = kernel_sendpage(sock, *ppage, base, len, flags);
224 if (PageHighMem(*ppage))
225 sendpage = sock_no_sendpage;
226 err = sendpage(sock, *ppage, base, len, flags);
227 if (ret == 0) 263 if (ret == 0)
228 ret = err; 264 ret = err;
229 else if (err > 0) 265 else if (err > 0)
@@ -300,7 +336,7 @@ static int xs_udp_send_request(struct rpc_task *task)
300 336
301 req->rq_xtime = jiffies; 337 req->rq_xtime = jiffies;
302 status = xs_sendpages(xprt->sock, (struct sockaddr *) &xprt->addr, 338 status = xs_sendpages(xprt->sock, (struct sockaddr *) &xprt->addr,
303 sizeof(xprt->addr), xdr, req->rq_bytes_sent); 339 xprt->addrlen, xdr, req->rq_bytes_sent);
304 340
305 dprintk("RPC: xs_udp_send_request(%u) = %d\n", 341 dprintk("RPC: xs_udp_send_request(%u) = %d\n",
306 xdr->len - req->rq_bytes_sent, status); 342 xdr->len - req->rq_bytes_sent, status);
@@ -414,6 +450,33 @@ static int xs_tcp_send_request(struct rpc_task *task)
414} 450}
415 451
416/** 452/**
453 * xs_tcp_release_xprt - clean up after a tcp transmission
454 * @xprt: transport
455 * @task: rpc task
456 *
457 * This cleans up if an error causes us to abort the transmission of a request.
458 * In this case, the socket may need to be reset in order to avoid confusing
459 * the server.
460 */
461static void xs_tcp_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
462{
463 struct rpc_rqst *req;
464
465 if (task != xprt->snd_task)
466 return;
467 if (task == NULL)
468 goto out_release;
469 req = task->tk_rqstp;
470 if (req->rq_bytes_sent == 0)
471 goto out_release;
472 if (req->rq_bytes_sent == req->rq_snd_buf.len)
473 goto out_release;
474 set_bit(XPRT_CLOSE_WAIT, &task->tk_xprt->state);
475out_release:
476 xprt_release_xprt(xprt, task);
477}
478
479/**
417 * xs_close - close a socket 480 * xs_close - close a socket
418 * @xprt: transport 481 * @xprt: transport
419 * 482 *
@@ -463,6 +526,7 @@ static void xs_destroy(struct rpc_xprt *xprt)
463 526
464 xprt_disconnect(xprt); 527 xprt_disconnect(xprt);
465 xs_close(xprt); 528 xs_close(xprt);
529 xs_free_peer_addresses(xprt);
466 kfree(xprt->slot); 530 kfree(xprt->slot);
467} 531}
468 532
@@ -484,7 +548,8 @@ static void xs_udp_data_ready(struct sock *sk, int len)
484 struct rpc_rqst *rovr; 548 struct rpc_rqst *rovr;
485 struct sk_buff *skb; 549 struct sk_buff *skb;
486 int err, repsize, copied; 550 int err, repsize, copied;
487 u32 _xid, *xp; 551 u32 _xid;
552 __be32 *xp;
488 553
489 read_lock(&sk->sk_callback_lock); 554 read_lock(&sk->sk_callback_lock);
490 dprintk("RPC: xs_udp_data_ready...\n"); 555 dprintk("RPC: xs_udp_data_ready...\n");
@@ -930,6 +995,26 @@ static void xs_udp_timer(struct rpc_task *task)
930 xprt_adjust_cwnd(task, -ETIMEDOUT); 995 xprt_adjust_cwnd(task, -ETIMEDOUT);
931} 996}
932 997
998static unsigned short xs_get_random_port(void)
999{
1000 unsigned short range = xprt_max_resvport - xprt_min_resvport;
1001 unsigned short rand = (unsigned short) net_random() % range;
1002 return rand + xprt_min_resvport;
1003}
1004
1005/**
1006 * xs_print_peer_address - format an IPv4 address for printing
1007 * @xprt: generic transport
1008 * @format: flags field indicating which parts of the address to render
1009 */
1010static char *xs_print_peer_address(struct rpc_xprt *xprt, enum rpc_display_format_t format)
1011{
1012 if (xprt->address_strings[format] != NULL)
1013 return xprt->address_strings[format];
1014 else
1015 return "unprintable";
1016}
1017
933/** 1018/**
934 * xs_set_port - reset the port number in the remote endpoint address 1019 * xs_set_port - reset the port number in the remote endpoint address
935 * @xprt: generic transport 1020 * @xprt: generic transport
@@ -938,8 +1023,11 @@ static void xs_udp_timer(struct rpc_task *task)
938 */ 1023 */
939static void xs_set_port(struct rpc_xprt *xprt, unsigned short port) 1024static void xs_set_port(struct rpc_xprt *xprt, unsigned short port)
940{ 1025{
1026 struct sockaddr_in *sap = (struct sockaddr_in *) &xprt->addr;
1027
941 dprintk("RPC: setting port for xprt %p to %u\n", xprt, port); 1028 dprintk("RPC: setting port for xprt %p to %u\n", xprt, port);
942 xprt->addr.sin_port = htons(port); 1029
1030 sap->sin_port = htons(port);
943} 1031}
944 1032
945static int xs_bindresvport(struct rpc_xprt *xprt, struct socket *sock) 1033static int xs_bindresvport(struct rpc_xprt *xprt, struct socket *sock)
@@ -952,7 +1040,7 @@ static int xs_bindresvport(struct rpc_xprt *xprt, struct socket *sock)
952 1040
953 do { 1041 do {
954 myaddr.sin_port = htons(port); 1042 myaddr.sin_port = htons(port);
955 err = sock->ops->bind(sock, (struct sockaddr *) &myaddr, 1043 err = kernel_bind(sock, (struct sockaddr *) &myaddr,
956 sizeof(myaddr)); 1044 sizeof(myaddr));
957 if (err == 0) { 1045 if (err == 0) {
958 xprt->port = port; 1046 xprt->port = port;
@@ -982,11 +1070,9 @@ static void xs_udp_connect_worker(void *args)
982 struct socket *sock = xprt->sock; 1070 struct socket *sock = xprt->sock;
983 int err, status = -EIO; 1071 int err, status = -EIO;
984 1072
985 if (xprt->shutdown || xprt->addr.sin_port == 0) 1073 if (xprt->shutdown || !xprt_bound(xprt))
986 goto out; 1074 goto out;
987 1075
988 dprintk("RPC: xs_udp_connect_worker for xprt %p\n", xprt);
989
990 /* Start by resetting any existing state */ 1076 /* Start by resetting any existing state */
991 xs_close(xprt); 1077 xs_close(xprt);
992 1078
@@ -1000,6 +1086,9 @@ static void xs_udp_connect_worker(void *args)
1000 goto out; 1086 goto out;
1001 } 1087 }
1002 1088
1089 dprintk("RPC: worker connecting xprt %p to address: %s\n",
1090 xprt, xs_print_peer_address(xprt, RPC_DISPLAY_ALL));
1091
1003 if (!xprt->inet) { 1092 if (!xprt->inet) {
1004 struct sock *sk = sock->sk; 1093 struct sock *sk = sock->sk;
1005 1094
@@ -1047,7 +1136,7 @@ static void xs_tcp_reuse_connection(struct rpc_xprt *xprt)
1047 */ 1136 */
1048 memset(&any, 0, sizeof(any)); 1137 memset(&any, 0, sizeof(any));
1049 any.sa_family = AF_UNSPEC; 1138 any.sa_family = AF_UNSPEC;
1050 result = sock->ops->connect(sock, &any, sizeof(any), 0); 1139 result = kernel_connect(sock, &any, sizeof(any), 0);
1051 if (result) 1140 if (result)
1052 dprintk("RPC: AF_UNSPEC connect return code %d\n", 1141 dprintk("RPC: AF_UNSPEC connect return code %d\n",
1053 result); 1142 result);
@@ -1065,11 +1154,9 @@ static void xs_tcp_connect_worker(void *args)
1065 struct socket *sock = xprt->sock; 1154 struct socket *sock = xprt->sock;
1066 int err, status = -EIO; 1155 int err, status = -EIO;
1067 1156
1068 if (xprt->shutdown || xprt->addr.sin_port == 0) 1157 if (xprt->shutdown || !xprt_bound(xprt))
1069 goto out; 1158 goto out;
1070 1159
1071 dprintk("RPC: xs_tcp_connect_worker for xprt %p\n", xprt);
1072
1073 if (!xprt->sock) { 1160 if (!xprt->sock) {
1074 /* start from scratch */ 1161 /* start from scratch */
1075 if ((err = sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock)) < 0) { 1162 if ((err = sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock)) < 0) {
@@ -1085,6 +1172,9 @@ static void xs_tcp_connect_worker(void *args)
1085 /* "close" the socket, preserving the local port */ 1172 /* "close" the socket, preserving the local port */
1086 xs_tcp_reuse_connection(xprt); 1173 xs_tcp_reuse_connection(xprt);
1087 1174
1175 dprintk("RPC: worker connecting xprt %p to address: %s\n",
1176 xprt, xs_print_peer_address(xprt, RPC_DISPLAY_ALL));
1177
1088 if (!xprt->inet) { 1178 if (!xprt->inet) {
1089 struct sock *sk = sock->sk; 1179 struct sock *sk = sock->sk;
1090 1180
@@ -1117,8 +1207,8 @@ static void xs_tcp_connect_worker(void *args)
1117 /* Tell the socket layer to start connecting... */ 1207 /* Tell the socket layer to start connecting... */
1118 xprt->stat.connect_count++; 1208 xprt->stat.connect_count++;
1119 xprt->stat.connect_start = jiffies; 1209 xprt->stat.connect_start = jiffies;
1120 status = sock->ops->connect(sock, (struct sockaddr *) &xprt->addr, 1210 status = kernel_connect(sock, (struct sockaddr *) &xprt->addr,
1121 sizeof(xprt->addr), O_NONBLOCK); 1211 xprt->addrlen, O_NONBLOCK);
1122 dprintk("RPC: %p connect status %d connected %d sock state %d\n", 1212 dprintk("RPC: %p connect status %d connected %d sock state %d\n",
1123 xprt, -status, xprt_connected(xprt), sock->sk->sk_state); 1213 xprt, -status, xprt_connected(xprt), sock->sk->sk_state);
1124 if (status < 0) { 1214 if (status < 0) {
@@ -1226,8 +1316,10 @@ static void xs_tcp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
1226 1316
1227static struct rpc_xprt_ops xs_udp_ops = { 1317static struct rpc_xprt_ops xs_udp_ops = {
1228 .set_buffer_size = xs_udp_set_buffer_size, 1318 .set_buffer_size = xs_udp_set_buffer_size,
1319 .print_addr = xs_print_peer_address,
1229 .reserve_xprt = xprt_reserve_xprt_cong, 1320 .reserve_xprt = xprt_reserve_xprt_cong,
1230 .release_xprt = xprt_release_xprt_cong, 1321 .release_xprt = xprt_release_xprt_cong,
1322 .rpcbind = rpc_getport,
1231 .set_port = xs_set_port, 1323 .set_port = xs_set_port,
1232 .connect = xs_connect, 1324 .connect = xs_connect,
1233 .buf_alloc = rpc_malloc, 1325 .buf_alloc = rpc_malloc,
@@ -1242,8 +1334,10 @@ static struct rpc_xprt_ops xs_udp_ops = {
1242}; 1334};
1243 1335
1244static struct rpc_xprt_ops xs_tcp_ops = { 1336static struct rpc_xprt_ops xs_tcp_ops = {
1337 .print_addr = xs_print_peer_address,
1245 .reserve_xprt = xprt_reserve_xprt, 1338 .reserve_xprt = xprt_reserve_xprt,
1246 .release_xprt = xprt_release_xprt, 1339 .release_xprt = xs_tcp_release_xprt,
1340 .rpcbind = rpc_getport,
1247 .set_port = xs_set_port, 1341 .set_port = xs_set_port,
1248 .connect = xs_connect, 1342 .connect = xs_connect,
1249 .buf_alloc = rpc_malloc, 1343 .buf_alloc = rpc_malloc,
@@ -1264,20 +1358,20 @@ static struct rpc_xprt_ops xs_tcp_ops = {
1264int xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to) 1358int xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to)
1265{ 1359{
1266 size_t slot_table_size; 1360 size_t slot_table_size;
1267 1361 struct sockaddr_in *addr = (struct sockaddr_in *) &xprt->addr;
1268 dprintk("RPC: setting up udp-ipv4 transport...\n");
1269 1362
1270 xprt->max_reqs = xprt_udp_slot_table_entries; 1363 xprt->max_reqs = xprt_udp_slot_table_entries;
1271 slot_table_size = xprt->max_reqs * sizeof(xprt->slot[0]); 1364 slot_table_size = xprt->max_reqs * sizeof(xprt->slot[0]);
1272 xprt->slot = kmalloc(slot_table_size, GFP_KERNEL); 1365 xprt->slot = kzalloc(slot_table_size, GFP_KERNEL);
1273 if (xprt->slot == NULL) 1366 if (xprt->slot == NULL)
1274 return -ENOMEM; 1367 return -ENOMEM;
1275 memset(xprt->slot, 0, slot_table_size); 1368
1369 if (ntohs(addr->sin_port != 0))
1370 xprt_set_bound(xprt);
1371 xprt->port = xs_get_random_port();
1276 1372
1277 xprt->prot = IPPROTO_UDP; 1373 xprt->prot = IPPROTO_UDP;
1278 xprt->port = xprt_max_resvport;
1279 xprt->tsh_size = 0; 1374 xprt->tsh_size = 0;
1280 xprt->resvport = capable(CAP_NET_BIND_SERVICE) ? 1 : 0;
1281 /* XXX: header size can vary due to auth type, IPv6, etc. */ 1375 /* XXX: header size can vary due to auth type, IPv6, etc. */
1282 xprt->max_payload = (1U << 16) - (MAX_HEADER << 3); 1376 xprt->max_payload = (1U << 16) - (MAX_HEADER << 3);
1283 1377
@@ -1294,6 +1388,10 @@ int xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to)
1294 else 1388 else
1295 xprt_set_timeout(&xprt->timeout, 5, 5 * HZ); 1389 xprt_set_timeout(&xprt->timeout, 5, 5 * HZ);
1296 1390
1391 xs_format_peer_addresses(xprt);
1392 dprintk("RPC: set up transport to address %s\n",
1393 xs_print_peer_address(xprt, RPC_DISPLAY_ALL));
1394
1297 return 0; 1395 return 0;
1298} 1396}
1299 1397
@@ -1306,20 +1404,20 @@ int xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to)
1306int xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to) 1404int xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to)
1307{ 1405{
1308 size_t slot_table_size; 1406 size_t slot_table_size;
1309 1407 struct sockaddr_in *addr = (struct sockaddr_in *) &xprt->addr;
1310 dprintk("RPC: setting up tcp-ipv4 transport...\n");
1311 1408
1312 xprt->max_reqs = xprt_tcp_slot_table_entries; 1409 xprt->max_reqs = xprt_tcp_slot_table_entries;
1313 slot_table_size = xprt->max_reqs * sizeof(xprt->slot[0]); 1410 slot_table_size = xprt->max_reqs * sizeof(xprt->slot[0]);
1314 xprt->slot = kmalloc(slot_table_size, GFP_KERNEL); 1411 xprt->slot = kzalloc(slot_table_size, GFP_KERNEL);
1315 if (xprt->slot == NULL) 1412 if (xprt->slot == NULL)
1316 return -ENOMEM; 1413 return -ENOMEM;
1317 memset(xprt->slot, 0, slot_table_size); 1414
1415 if (ntohs(addr->sin_port) != 0)
1416 xprt_set_bound(xprt);
1417 xprt->port = xs_get_random_port();
1318 1418
1319 xprt->prot = IPPROTO_TCP; 1419 xprt->prot = IPPROTO_TCP;
1320 xprt->port = xprt_max_resvport;
1321 xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32); 1420 xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
1322 xprt->resvport = capable(CAP_NET_BIND_SERVICE) ? 1 : 0;
1323 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; 1421 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
1324 1422
1325 INIT_WORK(&xprt->connect_worker, xs_tcp_connect_worker, xprt); 1423 INIT_WORK(&xprt->connect_worker, xs_tcp_connect_worker, xprt);
@@ -1335,5 +1433,9 @@ int xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to)
1335 else 1433 else
1336 xprt_set_timeout(&xprt->timeout, 2, 60 * HZ); 1434 xprt_set_timeout(&xprt->timeout, 2, 60 * HZ);
1337 1435
1436 xs_format_peer_addresses(xprt);
1437 dprintk("RPC: set up transport to address %s\n",
1438 xs_print_peer_address(xprt, RPC_DISPLAY_ALL));
1439
1338 return 0; 1440 return 0;
1339} 1441}