diff options
Diffstat (limited to 'net/sunrpc')
-rw-r--r-- | net/sunrpc/auth.c | 2 | ||||
-rw-r--r-- | net/sunrpc/auth_gss/auth_gss.c | 6 | ||||
-rw-r--r-- | net/sunrpc/auth_gss/gss_krb5_mech.c | 10 | ||||
-rw-r--r-- | net/sunrpc/auth_gss/gss_spkm3_mech.c | 10 | ||||
-rw-r--r-- | net/sunrpc/auth_gss/gss_spkm3_seal.c | 11 | ||||
-rw-r--r-- | net/sunrpc/auth_gss/gss_spkm3_token.c | 3 | ||||
-rw-r--r-- | net/sunrpc/auth_gss/gss_spkm3_unseal.c | 2 | ||||
-rw-r--r-- | net/sunrpc/auth_gss/svcauth_gss.c | 38 | ||||
-rw-r--r-- | net/sunrpc/auth_unix.c | 2 | ||||
-rw-r--r-- | net/sunrpc/cache.c | 5 | ||||
-rw-r--r-- | net/sunrpc/clnt.c | 114 | ||||
-rw-r--r-- | net/sunrpc/pmap_clnt.c | 17 | ||||
-rw-r--r-- | net/sunrpc/rpc_pipe.c | 99 | ||||
-rw-r--r-- | net/sunrpc/sched.c | 222 | ||||
-rw-r--r-- | net/sunrpc/sunrpc_syms.c | 4 | ||||
-rw-r--r-- | net/sunrpc/svc.c | 9 | ||||
-rw-r--r-- | net/sunrpc/svcauth_unix.c | 14 | ||||
-rw-r--r-- | net/sunrpc/svcsock.c | 7 | ||||
-rw-r--r-- | net/sunrpc/xdr.c | 21 | ||||
-rw-r--r-- | net/sunrpc/xprt.c | 66 | ||||
-rw-r--r-- | net/sunrpc/xprtsock.c | 33 |
21 files changed, 400 insertions, 295 deletions
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c index 8c7756036e..9ac1b8c26c 100644 --- a/net/sunrpc/auth.c +++ b/net/sunrpc/auth.c | |||
@@ -94,7 +94,7 @@ rpcauth_init_credcache(struct rpc_auth *auth, unsigned long expire) | |||
94 | struct rpc_cred_cache *new; | 94 | struct rpc_cred_cache *new; |
95 | int i; | 95 | int i; |
96 | 96 | ||
97 | new = (struct rpc_cred_cache *)kmalloc(sizeof(*new), GFP_KERNEL); | 97 | new = kmalloc(sizeof(*new), GFP_KERNEL); |
98 | if (!new) | 98 | if (!new) |
99 | return -ENOMEM; | 99 | return -ENOMEM; |
100 | for (i = 0; i < RPC_CREDCACHE_NR; i++) | 100 | for (i = 0; i < RPC_CREDCACHE_NR; i++) |
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index f44f46f1d8..8d782282ec 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c | |||
@@ -638,7 +638,7 @@ gss_pipe_destroy_msg(struct rpc_pipe_msg *msg) | |||
638 | gss_msg); | 638 | gss_msg); |
639 | atomic_inc(&gss_msg->count); | 639 | atomic_inc(&gss_msg->count); |
640 | gss_unhash_msg(gss_msg); | 640 | gss_unhash_msg(gss_msg); |
641 | if (msg->errno == -ETIMEDOUT || msg->errno == -EPIPE) { | 641 | if (msg->errno == -ETIMEDOUT) { |
642 | unsigned long now = jiffies; | 642 | unsigned long now = jiffies; |
643 | if (time_after(now, ratelimit)) { | 643 | if (time_after(now, ratelimit)) { |
644 | printk(KERN_WARNING "RPC: AUTH_GSS upcall timed out.\n" | 644 | printk(KERN_WARNING "RPC: AUTH_GSS upcall timed out.\n" |
@@ -786,7 +786,9 @@ gss_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int taskflags) | |||
786 | cred->gc_flags = 0; | 786 | cred->gc_flags = 0; |
787 | cred->gc_base.cr_ops = &gss_credops; | 787 | cred->gc_base.cr_ops = &gss_credops; |
788 | cred->gc_service = gss_auth->service; | 788 | cred->gc_service = gss_auth->service; |
789 | err = gss_create_upcall(gss_auth, cred); | 789 | do { |
790 | err = gss_create_upcall(gss_auth, cred); | ||
791 | } while (err == -EAGAIN); | ||
790 | if (err < 0) | 792 | if (err < 0) |
791 | goto out_err; | 793 | goto out_err; |
792 | 794 | ||
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c index 5f1f806a0b..129e2bd36a 100644 --- a/net/sunrpc/auth_gss/gss_krb5_mech.c +++ b/net/sunrpc/auth_gss/gss_krb5_mech.c | |||
@@ -97,13 +97,17 @@ get_key(const void *p, const void *end, struct crypto_tfm **res) | |||
97 | alg_mode = CRYPTO_TFM_MODE_CBC; | 97 | alg_mode = CRYPTO_TFM_MODE_CBC; |
98 | break; | 98 | break; |
99 | default: | 99 | default: |
100 | dprintk("RPC: get_key: unsupported algorithm %d\n", alg); | 100 | printk("gss_kerberos_mech: unsupported algorithm %d\n", alg); |
101 | goto out_err_free_key; | 101 | goto out_err_free_key; |
102 | } | 102 | } |
103 | if (!(*res = crypto_alloc_tfm(alg_name, alg_mode))) | 103 | if (!(*res = crypto_alloc_tfm(alg_name, alg_mode))) { |
104 | printk("gss_kerberos_mech: unable to initialize crypto algorithm %s\n", alg_name); | ||
104 | goto out_err_free_key; | 105 | goto out_err_free_key; |
105 | if (crypto_cipher_setkey(*res, key.data, key.len)) | 106 | } |
107 | if (crypto_cipher_setkey(*res, key.data, key.len)) { | ||
108 | printk("gss_kerberos_mech: error setting key for crypto algorithm %s\n", alg_name); | ||
106 | goto out_err_free_tfm; | 109 | goto out_err_free_tfm; |
110 | } | ||
107 | 111 | ||
108 | kfree(key.data); | 112 | kfree(key.data); |
109 | return p; | 113 | return p; |
diff --git a/net/sunrpc/auth_gss/gss_spkm3_mech.c b/net/sunrpc/auth_gss/gss_spkm3_mech.c index 39b3edc146..58400807d4 100644 --- a/net/sunrpc/auth_gss/gss_spkm3_mech.c +++ b/net/sunrpc/auth_gss/gss_spkm3_mech.c | |||
@@ -111,14 +111,18 @@ get_key(const void *p, const void *end, struct crypto_tfm **res, int *resalg) | |||
111 | setkey = 0; | 111 | setkey = 0; |
112 | break; | 112 | break; |
113 | default: | 113 | default: |
114 | dprintk("RPC: SPKM3 get_key: unsupported algorithm %d", *resalg); | 114 | dprintk("gss_spkm3_mech: unsupported algorithm %d\n", *resalg); |
115 | goto out_err_free_key; | 115 | goto out_err_free_key; |
116 | } | 116 | } |
117 | if (!(*res = crypto_alloc_tfm(alg_name, alg_mode))) | 117 | if (!(*res = crypto_alloc_tfm(alg_name, alg_mode))) { |
118 | printk("gss_spkm3_mech: unable to initialize crypto algorthm %s\n", alg_name); | ||
118 | goto out_err_free_key; | 119 | goto out_err_free_key; |
120 | } | ||
119 | if (setkey) { | 121 | if (setkey) { |
120 | if (crypto_cipher_setkey(*res, key.data, key.len)) | 122 | if (crypto_cipher_setkey(*res, key.data, key.len)) { |
123 | printk("gss_spkm3_mech: error setting key for crypto algorthm %s\n", alg_name); | ||
121 | goto out_err_free_tfm; | 124 | goto out_err_free_tfm; |
125 | } | ||
122 | } | 126 | } |
123 | 127 | ||
124 | if(key.len > 0) | 128 | if(key.len > 0) |
diff --git a/net/sunrpc/auth_gss/gss_spkm3_seal.c b/net/sunrpc/auth_gss/gss_spkm3_seal.c index d1e12b25d6..86fbf7c3e3 100644 --- a/net/sunrpc/auth_gss/gss_spkm3_seal.c +++ b/net/sunrpc/auth_gss/gss_spkm3_seal.c | |||
@@ -59,7 +59,7 @@ spkm3_make_token(struct spkm3_ctx *ctx, | |||
59 | char tokhdrbuf[25]; | 59 | char tokhdrbuf[25]; |
60 | struct xdr_netobj md5cksum = {.len = 0, .data = NULL}; | 60 | struct xdr_netobj md5cksum = {.len = 0, .data = NULL}; |
61 | struct xdr_netobj mic_hdr = {.len = 0, .data = tokhdrbuf}; | 61 | struct xdr_netobj mic_hdr = {.len = 0, .data = tokhdrbuf}; |
62 | int tmsglen, tokenlen = 0; | 62 | int tokenlen = 0; |
63 | unsigned char *ptr; | 63 | unsigned char *ptr; |
64 | s32 now; | 64 | s32 now; |
65 | int ctxelen = 0, ctxzbit = 0; | 65 | int ctxelen = 0, ctxzbit = 0; |
@@ -92,24 +92,23 @@ spkm3_make_token(struct spkm3_ctx *ctx, | |||
92 | } | 92 | } |
93 | 93 | ||
94 | if (toktype == SPKM_MIC_TOK) { | 94 | if (toktype == SPKM_MIC_TOK) { |
95 | tmsglen = 0; | ||
96 | /* Calculate checksum over the mic-header */ | 95 | /* Calculate checksum over the mic-header */ |
97 | asn1_bitstring_len(&ctx->ctx_id, &ctxelen, &ctxzbit); | 96 | asn1_bitstring_len(&ctx->ctx_id, &ctxelen, &ctxzbit); |
98 | spkm3_mic_header(&mic_hdr.data, &mic_hdr.len, ctx->ctx_id.data, | 97 | spkm3_mic_header(&mic_hdr.data, &mic_hdr.len, ctx->ctx_id.data, |
99 | ctxelen, ctxzbit); | 98 | ctxelen, ctxzbit); |
100 | 99 | ||
101 | if (make_checksum(checksum_type, mic_hdr.data, mic_hdr.len, | 100 | if (make_checksum(checksum_type, mic_hdr.data, mic_hdr.len, |
102 | text, &md5cksum)) | 101 | text, 0, &md5cksum)) |
103 | goto out_err; | 102 | goto out_err; |
104 | 103 | ||
105 | asn1_bitstring_len(&md5cksum, &md5elen, &md5zbit); | 104 | asn1_bitstring_len(&md5cksum, &md5elen, &md5zbit); |
106 | tokenlen = 10 + ctxelen + 1 + 2 + md5elen + 1; | 105 | tokenlen = 10 + ctxelen + 1 + md5elen + 1; |
107 | 106 | ||
108 | /* Create token header using generic routines */ | 107 | /* Create token header using generic routines */ |
109 | token->len = g_token_size(&ctx->mech_used, tokenlen + tmsglen); | 108 | token->len = g_token_size(&ctx->mech_used, tokenlen); |
110 | 109 | ||
111 | ptr = token->data; | 110 | ptr = token->data; |
112 | g_make_token_header(&ctx->mech_used, tokenlen + tmsglen, &ptr); | 111 | g_make_token_header(&ctx->mech_used, tokenlen, &ptr); |
113 | 112 | ||
114 | spkm3_make_mic_token(&ptr, tokenlen, &mic_hdr, &md5cksum, md5elen, md5zbit); | 113 | spkm3_make_mic_token(&ptr, tokenlen, &mic_hdr, &md5cksum, md5elen, md5zbit); |
115 | } else if (toktype == SPKM_WRAP_TOK) { /* Not Supported */ | 114 | } else if (toktype == SPKM_WRAP_TOK) { /* Not Supported */ |
diff --git a/net/sunrpc/auth_gss/gss_spkm3_token.c b/net/sunrpc/auth_gss/gss_spkm3_token.c index 1f824578d7..af0d7ce746 100644 --- a/net/sunrpc/auth_gss/gss_spkm3_token.c +++ b/net/sunrpc/auth_gss/gss_spkm3_token.c | |||
@@ -182,6 +182,7 @@ spkm3_mic_header(unsigned char **hdrbuf, unsigned int *hdrlen, unsigned char *ct | |||
182 | * *tokp points to the beginning of the SPKM_MIC token described | 182 | * *tokp points to the beginning of the SPKM_MIC token described |
183 | * in rfc 2025, section 3.2.1: | 183 | * in rfc 2025, section 3.2.1: |
184 | * | 184 | * |
185 | * toklen is the inner token length | ||
185 | */ | 186 | */ |
186 | void | 187 | void |
187 | spkm3_make_mic_token(unsigned char **tokp, int toklen, struct xdr_netobj *mic_hdr, struct xdr_netobj *md5cksum, int md5elen, int md5zbit) | 188 | spkm3_make_mic_token(unsigned char **tokp, int toklen, struct xdr_netobj *mic_hdr, struct xdr_netobj *md5cksum, int md5elen, int md5zbit) |
@@ -189,7 +190,7 @@ spkm3_make_mic_token(unsigned char **tokp, int toklen, struct xdr_netobj *mic_hd | |||
189 | unsigned char *ict = *tokp; | 190 | unsigned char *ict = *tokp; |
190 | 191 | ||
191 | *(u8 *)ict++ = 0xa4; | 192 | *(u8 *)ict++ = 0xa4; |
192 | *(u8 *)ict++ = toklen - 2; | 193 | *(u8 *)ict++ = toklen; |
193 | memcpy(ict, mic_hdr->data, mic_hdr->len); | 194 | memcpy(ict, mic_hdr->data, mic_hdr->len); |
194 | ict += mic_hdr->len; | 195 | ict += mic_hdr->len; |
195 | 196 | ||
diff --git a/net/sunrpc/auth_gss/gss_spkm3_unseal.c b/net/sunrpc/auth_gss/gss_spkm3_unseal.c index 241d5b30df..96851b0ba1 100644 --- a/net/sunrpc/auth_gss/gss_spkm3_unseal.c +++ b/net/sunrpc/auth_gss/gss_spkm3_unseal.c | |||
@@ -95,7 +95,7 @@ spkm3_read_token(struct spkm3_ctx *ctx, | |||
95 | ret = GSS_S_DEFECTIVE_TOKEN; | 95 | ret = GSS_S_DEFECTIVE_TOKEN; |
96 | code = make_checksum(CKSUMTYPE_RSA_MD5, ptr + 2, | 96 | code = make_checksum(CKSUMTYPE_RSA_MD5, ptr + 2, |
97 | mic_hdrlen + 2, | 97 | mic_hdrlen + 2, |
98 | message_buffer, &md5cksum); | 98 | message_buffer, 0, &md5cksum); |
99 | 99 | ||
100 | if (code) | 100 | if (code) |
101 | goto out; | 101 | goto out; |
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c index e4ada15ed8..23632d84d8 100644 --- a/net/sunrpc/auth_gss/svcauth_gss.c +++ b/net/sunrpc/auth_gss/svcauth_gss.c | |||
@@ -420,7 +420,8 @@ static int rsc_parse(struct cache_detail *cd, | |||
420 | gss_mech_put(gm); | 420 | gss_mech_put(gm); |
421 | goto out; | 421 | goto out; |
422 | } | 422 | } |
423 | if (gss_import_sec_context(buf, len, gm, &rsci.mechctx)) { | 423 | status = gss_import_sec_context(buf, len, gm, &rsci.mechctx); |
424 | if (status) { | ||
424 | gss_mech_put(gm); | 425 | gss_mech_put(gm); |
425 | goto out; | 426 | goto out; |
426 | } | 427 | } |
@@ -586,6 +587,20 @@ gss_verify_header(struct svc_rqst *rqstp, struct rsc *rsci, | |||
586 | } | 587 | } |
587 | 588 | ||
588 | static int | 589 | static int |
590 | gss_write_null_verf(struct svc_rqst *rqstp) | ||
591 | { | ||
592 | u32 *p; | ||
593 | |||
594 | svc_putu32(rqstp->rq_res.head, htonl(RPC_AUTH_NULL)); | ||
595 | p = rqstp->rq_res.head->iov_base + rqstp->rq_res.head->iov_len; | ||
596 | /* don't really need to check if head->iov_len > PAGE_SIZE ... */ | ||
597 | *p++ = 0; | ||
598 | if (!xdr_ressize_check(rqstp, p)) | ||
599 | return -1; | ||
600 | return 0; | ||
601 | } | ||
602 | |||
603 | static int | ||
589 | gss_write_verf(struct svc_rqst *rqstp, struct gss_ctx *ctx_id, u32 seq) | 604 | gss_write_verf(struct svc_rqst *rqstp, struct gss_ctx *ctx_id, u32 seq) |
590 | { | 605 | { |
591 | u32 xdr_seq; | 606 | u32 xdr_seq; |
@@ -741,6 +756,21 @@ svcauth_gss_set_client(struct svc_rqst *rqstp) | |||
741 | return SVC_OK; | 756 | return SVC_OK; |
742 | } | 757 | } |
743 | 758 | ||
759 | static inline int | ||
760 | gss_write_init_verf(struct svc_rqst *rqstp, struct rsi *rsip) | ||
761 | { | ||
762 | struct rsc *rsci; | ||
763 | |||
764 | if (rsip->major_status != GSS_S_COMPLETE) | ||
765 | return gss_write_null_verf(rqstp); | ||
766 | rsci = gss_svc_searchbyctx(&rsip->out_handle); | ||
767 | if (rsci == NULL) { | ||
768 | rsip->major_status = GSS_S_NO_CONTEXT; | ||
769 | return gss_write_null_verf(rqstp); | ||
770 | } | ||
771 | return gss_write_verf(rqstp, rsci->mechctx, GSS_SEQ_WIN); | ||
772 | } | ||
773 | |||
744 | /* | 774 | /* |
745 | * Accept an rpcsec packet. | 775 | * Accept an rpcsec packet. |
746 | * If context establishment, punt to user space | 776 | * If context establishment, punt to user space |
@@ -876,11 +906,7 @@ svcauth_gss_accept(struct svc_rqst *rqstp, u32 *authp) | |||
876 | case -ENOENT: | 906 | case -ENOENT: |
877 | goto drop; | 907 | goto drop; |
878 | case 0: | 908 | case 0: |
879 | rsci = gss_svc_searchbyctx(&rsip->out_handle); | 909 | if (gss_write_init_verf(rqstp, rsip)) |
880 | if (!rsci) { | ||
881 | goto drop; | ||
882 | } | ||
883 | if (gss_write_verf(rqstp, rsci->mechctx, GSS_SEQ_WIN)) | ||
884 | goto drop; | 910 | goto drop; |
885 | if (resv->iov_len + 4 > PAGE_SIZE) | 911 | if (resv->iov_len + 4 > PAGE_SIZE) |
886 | goto drop; | 912 | goto drop; |
diff --git a/net/sunrpc/auth_unix.c b/net/sunrpc/auth_unix.c index 890fb5ea0d..1b3ed4fd19 100644 --- a/net/sunrpc/auth_unix.c +++ b/net/sunrpc/auth_unix.c | |||
@@ -70,7 +70,7 @@ unx_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags) | |||
70 | dprintk("RPC: allocating UNIX cred for uid %d gid %d\n", | 70 | dprintk("RPC: allocating UNIX cred for uid %d gid %d\n", |
71 | acred->uid, acred->gid); | 71 | acred->uid, acred->gid); |
72 | 72 | ||
73 | if (!(cred = (struct unx_cred *) kmalloc(sizeof(*cred), GFP_KERNEL))) | 73 | if (!(cred = kmalloc(sizeof(*cred), GFP_KERNEL))) |
74 | return ERR_PTR(-ENOMEM); | 74 | return ERR_PTR(-ENOMEM); |
75 | 75 | ||
76 | atomic_set(&cred->uc_count, 1); | 76 | atomic_set(&cred->uc_count, 1); |
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c index f509e99927..dcaa0c4453 100644 --- a/net/sunrpc/cache.c +++ b/net/sunrpc/cache.c | |||
@@ -575,12 +575,11 @@ cache_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos) | |||
575 | if (rp->q.list.next == &cd->queue) { | 575 | if (rp->q.list.next == &cd->queue) { |
576 | spin_unlock(&queue_lock); | 576 | spin_unlock(&queue_lock); |
577 | up(&queue_io_sem); | 577 | up(&queue_io_sem); |
578 | if (rp->offset) | 578 | BUG_ON(rp->offset); |
579 | BUG(); | ||
580 | return 0; | 579 | return 0; |
581 | } | 580 | } |
582 | rq = container_of(rp->q.list.next, struct cache_request, q.list); | 581 | rq = container_of(rp->q.list.next, struct cache_request, q.list); |
583 | if (rq->q.reader) BUG(); | 582 | BUG_ON(rq->q.reader); |
584 | if (rp->offset == 0) | 583 | if (rp->offset == 0) |
585 | rq->readers++; | 584 | rq->readers++; |
586 | spin_unlock(&queue_lock); | 585 | spin_unlock(&queue_lock); |
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 61c3abeacc..d2f0550c4b 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c | |||
@@ -118,7 +118,7 @@ rpc_new_client(struct rpc_xprt *xprt, char *servname, | |||
118 | goto out_err; | 118 | goto out_err; |
119 | 119 | ||
120 | err = -ENOMEM; | 120 | err = -ENOMEM; |
121 | clnt = (struct rpc_clnt *) kmalloc(sizeof(*clnt), GFP_KERNEL); | 121 | clnt = kmalloc(sizeof(*clnt), GFP_KERNEL); |
122 | if (!clnt) | 122 | if (!clnt) |
123 | goto out_err; | 123 | goto out_err; |
124 | memset(clnt, 0, sizeof(*clnt)); | 124 | memset(clnt, 0, sizeof(*clnt)); |
@@ -225,7 +225,7 @@ rpc_clone_client(struct rpc_clnt *clnt) | |||
225 | { | 225 | { |
226 | struct rpc_clnt *new; | 226 | struct rpc_clnt *new; |
227 | 227 | ||
228 | new = (struct rpc_clnt *)kmalloc(sizeof(*new), GFP_KERNEL); | 228 | new = kmalloc(sizeof(*new), GFP_KERNEL); |
229 | if (!new) | 229 | if (!new) |
230 | goto out_no_clnt; | 230 | goto out_no_clnt; |
231 | memcpy(new, clnt, sizeof(*new)); | 231 | memcpy(new, clnt, sizeof(*new)); |
@@ -268,7 +268,8 @@ rpc_shutdown_client(struct rpc_clnt *clnt) | |||
268 | clnt->cl_oneshot = 0; | 268 | clnt->cl_oneshot = 0; |
269 | clnt->cl_dead = 0; | 269 | clnt->cl_dead = 0; |
270 | rpc_killall_tasks(clnt); | 270 | rpc_killall_tasks(clnt); |
271 | sleep_on_timeout(&destroy_wait, 1*HZ); | 271 | wait_event_timeout(destroy_wait, |
272 | !atomic_read(&clnt->cl_users), 1*HZ); | ||
272 | } | 273 | } |
273 | 274 | ||
274 | if (atomic_read(&clnt->cl_users) < 0) { | 275 | if (atomic_read(&clnt->cl_users) < 0) { |
@@ -374,19 +375,23 @@ out: | |||
374 | * Default callback for async RPC calls | 375 | * Default callback for async RPC calls |
375 | */ | 376 | */ |
376 | static void | 377 | static void |
377 | rpc_default_callback(struct rpc_task *task) | 378 | rpc_default_callback(struct rpc_task *task, void *data) |
378 | { | 379 | { |
379 | } | 380 | } |
380 | 381 | ||
382 | static const struct rpc_call_ops rpc_default_ops = { | ||
383 | .rpc_call_done = rpc_default_callback, | ||
384 | }; | ||
385 | |||
381 | /* | 386 | /* |
382 | * Export the signal mask handling for synchronous code that | 387 | * Export the signal mask handling for synchronous code that |
383 | * sleeps on RPC calls | 388 | * sleeps on RPC calls |
384 | */ | 389 | */ |
385 | #define RPC_INTR_SIGNALS (sigmask(SIGINT) | sigmask(SIGQUIT) | sigmask(SIGKILL)) | 390 | #define RPC_INTR_SIGNALS (sigmask(SIGHUP) | sigmask(SIGINT) | sigmask(SIGQUIT) | sigmask(SIGTERM)) |
386 | 391 | ||
387 | static void rpc_save_sigmask(sigset_t *oldset, int intr) | 392 | static void rpc_save_sigmask(sigset_t *oldset, int intr) |
388 | { | 393 | { |
389 | unsigned long sigallow = 0; | 394 | unsigned long sigallow = sigmask(SIGKILL); |
390 | sigset_t sigmask; | 395 | sigset_t sigmask; |
391 | 396 | ||
392 | /* Block all signals except those listed in sigallow */ | 397 | /* Block all signals except those listed in sigallow */ |
@@ -432,7 +437,7 @@ int rpc_call_sync(struct rpc_clnt *clnt, struct rpc_message *msg, int flags) | |||
432 | BUG_ON(flags & RPC_TASK_ASYNC); | 437 | BUG_ON(flags & RPC_TASK_ASYNC); |
433 | 438 | ||
434 | status = -ENOMEM; | 439 | status = -ENOMEM; |
435 | task = rpc_new_task(clnt, NULL, flags); | 440 | task = rpc_new_task(clnt, flags, &rpc_default_ops, NULL); |
436 | if (task == NULL) | 441 | if (task == NULL) |
437 | goto out; | 442 | goto out; |
438 | 443 | ||
@@ -442,14 +447,15 @@ int rpc_call_sync(struct rpc_clnt *clnt, struct rpc_message *msg, int flags) | |||
442 | rpc_call_setup(task, msg, 0); | 447 | rpc_call_setup(task, msg, 0); |
443 | 448 | ||
444 | /* Set up the call info struct and execute the task */ | 449 | /* Set up the call info struct and execute the task */ |
445 | if (task->tk_status == 0) { | 450 | status = task->tk_status; |
451 | if (status == 0) { | ||
452 | atomic_inc(&task->tk_count); | ||
446 | status = rpc_execute(task); | 453 | status = rpc_execute(task); |
447 | } else { | 454 | if (status == 0) |
448 | status = task->tk_status; | 455 | status = task->tk_status; |
449 | rpc_release_task(task); | ||
450 | } | 456 | } |
451 | |||
452 | rpc_restore_sigmask(&oldset); | 457 | rpc_restore_sigmask(&oldset); |
458 | rpc_release_task(task); | ||
453 | out: | 459 | out: |
454 | return status; | 460 | return status; |
455 | } | 461 | } |
@@ -459,7 +465,7 @@ out: | |||
459 | */ | 465 | */ |
460 | int | 466 | int |
461 | rpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg, int flags, | 467 | rpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg, int flags, |
462 | rpc_action callback, void *data) | 468 | const struct rpc_call_ops *tk_ops, void *data) |
463 | { | 469 | { |
464 | struct rpc_task *task; | 470 | struct rpc_task *task; |
465 | sigset_t oldset; | 471 | sigset_t oldset; |
@@ -472,12 +478,9 @@ rpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg, int flags, | |||
472 | flags |= RPC_TASK_ASYNC; | 478 | flags |= RPC_TASK_ASYNC; |
473 | 479 | ||
474 | /* Create/initialize a new RPC task */ | 480 | /* Create/initialize a new RPC task */ |
475 | if (!callback) | ||
476 | callback = rpc_default_callback; | ||
477 | status = -ENOMEM; | 481 | status = -ENOMEM; |
478 | if (!(task = rpc_new_task(clnt, callback, flags))) | 482 | if (!(task = rpc_new_task(clnt, flags, tk_ops, data))) |
479 | goto out; | 483 | goto out; |
480 | task->tk_calldata = data; | ||
481 | 484 | ||
482 | /* Mask signals on GSS_AUTH upcalls */ | 485 | /* Mask signals on GSS_AUTH upcalls */ |
483 | rpc_task_sigmask(task, &oldset); | 486 | rpc_task_sigmask(task, &oldset); |
@@ -511,7 +514,7 @@ rpc_call_setup(struct rpc_task *task, struct rpc_message *msg, int flags) | |||
511 | if (task->tk_status == 0) | 514 | if (task->tk_status == 0) |
512 | task->tk_action = call_start; | 515 | task->tk_action = call_start; |
513 | else | 516 | else |
514 | task->tk_action = NULL; | 517 | task->tk_action = rpc_exit_task; |
515 | } | 518 | } |
516 | 519 | ||
517 | void | 520 | void |
@@ -536,6 +539,18 @@ size_t rpc_max_payload(struct rpc_clnt *clnt) | |||
536 | } | 539 | } |
537 | EXPORT_SYMBOL(rpc_max_payload); | 540 | EXPORT_SYMBOL(rpc_max_payload); |
538 | 541 | ||
542 | /** | ||
543 | * rpc_force_rebind - force transport to check that remote port is unchanged | ||
544 | * @clnt: client to rebind | ||
545 | * | ||
546 | */ | ||
547 | void rpc_force_rebind(struct rpc_clnt *clnt) | ||
548 | { | ||
549 | if (clnt->cl_autobind) | ||
550 | clnt->cl_port = 0; | ||
551 | } | ||
552 | EXPORT_SYMBOL(rpc_force_rebind); | ||
553 | |||
539 | /* | 554 | /* |
540 | * Restart an (async) RPC call. Usually called from within the | 555 | * Restart an (async) RPC call. Usually called from within the |
541 | * exit handler. | 556 | * exit handler. |
@@ -642,24 +657,26 @@ call_reserveresult(struct rpc_task *task) | |||
642 | 657 | ||
643 | /* | 658 | /* |
644 | * 2. Allocate the buffer. For details, see sched.c:rpc_malloc. | 659 | * 2. Allocate the buffer. For details, see sched.c:rpc_malloc. |
645 | * (Note: buffer memory is freed in rpc_task_release). | 660 | * (Note: buffer memory is freed in xprt_release). |
646 | */ | 661 | */ |
647 | static void | 662 | static void |
648 | call_allocate(struct rpc_task *task) | 663 | call_allocate(struct rpc_task *task) |
649 | { | 664 | { |
665 | struct rpc_rqst *req = task->tk_rqstp; | ||
666 | struct rpc_xprt *xprt = task->tk_xprt; | ||
650 | unsigned int bufsiz; | 667 | unsigned int bufsiz; |
651 | 668 | ||
652 | dprintk("RPC: %4d call_allocate (status %d)\n", | 669 | dprintk("RPC: %4d call_allocate (status %d)\n", |
653 | task->tk_pid, task->tk_status); | 670 | task->tk_pid, task->tk_status); |
654 | task->tk_action = call_bind; | 671 | task->tk_action = call_bind; |
655 | if (task->tk_buffer) | 672 | if (req->rq_buffer) |
656 | return; | 673 | return; |
657 | 674 | ||
658 | /* FIXME: compute buffer requirements more exactly using | 675 | /* FIXME: compute buffer requirements more exactly using |
659 | * auth->au_wslack */ | 676 | * auth->au_wslack */ |
660 | bufsiz = task->tk_msg.rpc_proc->p_bufsiz + RPC_SLACK_SPACE; | 677 | bufsiz = task->tk_msg.rpc_proc->p_bufsiz + RPC_SLACK_SPACE; |
661 | 678 | ||
662 | if (rpc_malloc(task, bufsiz << 1) != NULL) | 679 | if (xprt->ops->buf_alloc(task, bufsiz << 1) != NULL) |
663 | return; | 680 | return; |
664 | printk(KERN_INFO "RPC: buffer allocation failed for task %p\n", task); | 681 | printk(KERN_INFO "RPC: buffer allocation failed for task %p\n", task); |
665 | 682 | ||
@@ -702,14 +719,14 @@ call_encode(struct rpc_task *task) | |||
702 | task->tk_pid, task->tk_status); | 719 | task->tk_pid, task->tk_status); |
703 | 720 | ||
704 | /* Default buffer setup */ | 721 | /* Default buffer setup */ |
705 | bufsiz = task->tk_bufsize >> 1; | 722 | bufsiz = req->rq_bufsize >> 1; |
706 | sndbuf->head[0].iov_base = (void *)task->tk_buffer; | 723 | sndbuf->head[0].iov_base = (void *)req->rq_buffer; |
707 | sndbuf->head[0].iov_len = bufsiz; | 724 | sndbuf->head[0].iov_len = bufsiz; |
708 | sndbuf->tail[0].iov_len = 0; | 725 | sndbuf->tail[0].iov_len = 0; |
709 | sndbuf->page_len = 0; | 726 | sndbuf->page_len = 0; |
710 | sndbuf->len = 0; | 727 | sndbuf->len = 0; |
711 | sndbuf->buflen = bufsiz; | 728 | sndbuf->buflen = bufsiz; |
712 | rcvbuf->head[0].iov_base = (void *)((char *)task->tk_buffer + bufsiz); | 729 | rcvbuf->head[0].iov_base = (void *)((char *)req->rq_buffer + bufsiz); |
713 | rcvbuf->head[0].iov_len = bufsiz; | 730 | rcvbuf->head[0].iov_len = bufsiz; |
714 | rcvbuf->tail[0].iov_len = 0; | 731 | rcvbuf->tail[0].iov_len = 0; |
715 | rcvbuf->page_len = 0; | 732 | rcvbuf->page_len = 0; |
@@ -849,8 +866,7 @@ call_connect_status(struct rpc_task *task) | |||
849 | } | 866 | } |
850 | 867 | ||
851 | /* Something failed: remote service port may have changed */ | 868 | /* Something failed: remote service port may have changed */ |
852 | if (clnt->cl_autobind) | 869 | rpc_force_rebind(clnt); |
853 | clnt->cl_port = 0; | ||
854 | 870 | ||
855 | switch (status) { | 871 | switch (status) { |
856 | case -ENOTCONN: | 872 | case -ENOTCONN: |
@@ -892,7 +908,7 @@ call_transmit(struct rpc_task *task) | |||
892 | if (task->tk_status < 0) | 908 | if (task->tk_status < 0) |
893 | return; | 909 | return; |
894 | if (!task->tk_msg.rpc_proc->p_decode) { | 910 | if (!task->tk_msg.rpc_proc->p_decode) { |
895 | task->tk_action = NULL; | 911 | task->tk_action = rpc_exit_task; |
896 | rpc_wake_up_task(task); | 912 | rpc_wake_up_task(task); |
897 | } | 913 | } |
898 | return; | 914 | return; |
@@ -931,8 +947,7 @@ call_status(struct rpc_task *task) | |||
931 | break; | 947 | break; |
932 | case -ECONNREFUSED: | 948 | case -ECONNREFUSED: |
933 | case -ENOTCONN: | 949 | case -ENOTCONN: |
934 | if (clnt->cl_autobind) | 950 | rpc_force_rebind(clnt); |
935 | clnt->cl_port = 0; | ||
936 | task->tk_action = call_bind; | 951 | task->tk_action = call_bind; |
937 | break; | 952 | break; |
938 | case -EAGAIN: | 953 | case -EAGAIN: |
@@ -943,8 +958,7 @@ call_status(struct rpc_task *task) | |||
943 | rpc_exit(task, status); | 958 | rpc_exit(task, status); |
944 | break; | 959 | break; |
945 | default: | 960 | default: |
946 | if (clnt->cl_chatty) | 961 | printk("%s: RPC call returned error %d\n", |
947 | printk("%s: RPC call returned error %d\n", | ||
948 | clnt->cl_protname, -status); | 962 | clnt->cl_protname, -status); |
949 | rpc_exit(task, status); | 963 | rpc_exit(task, status); |
950 | break; | 964 | break; |
@@ -979,20 +993,18 @@ call_timeout(struct rpc_task *task) | |||
979 | 993 | ||
980 | dprintk("RPC: %4d call_timeout (major)\n", task->tk_pid); | 994 | dprintk("RPC: %4d call_timeout (major)\n", task->tk_pid); |
981 | if (RPC_IS_SOFT(task)) { | 995 | if (RPC_IS_SOFT(task)) { |
982 | if (clnt->cl_chatty) | 996 | printk(KERN_NOTICE "%s: server %s not responding, timed out\n", |
983 | printk(KERN_NOTICE "%s: server %s not responding, timed out\n", | ||
984 | clnt->cl_protname, clnt->cl_server); | 997 | clnt->cl_protname, clnt->cl_server); |
985 | rpc_exit(task, -EIO); | 998 | rpc_exit(task, -EIO); |
986 | return; | 999 | return; |
987 | } | 1000 | } |
988 | 1001 | ||
989 | if (clnt->cl_chatty && !(task->tk_flags & RPC_CALL_MAJORSEEN)) { | 1002 | if (!(task->tk_flags & RPC_CALL_MAJORSEEN)) { |
990 | task->tk_flags |= RPC_CALL_MAJORSEEN; | 1003 | task->tk_flags |= RPC_CALL_MAJORSEEN; |
991 | printk(KERN_NOTICE "%s: server %s not responding, still trying\n", | 1004 | printk(KERN_NOTICE "%s: server %s not responding, still trying\n", |
992 | clnt->cl_protname, clnt->cl_server); | 1005 | clnt->cl_protname, clnt->cl_server); |
993 | } | 1006 | } |
994 | if (clnt->cl_autobind) | 1007 | rpc_force_rebind(clnt); |
995 | clnt->cl_port = 0; | ||
996 | 1008 | ||
997 | retry: | 1009 | retry: |
998 | clnt->cl_stats->rpcretrans++; | 1010 | clnt->cl_stats->rpcretrans++; |
@@ -1014,7 +1026,7 @@ call_decode(struct rpc_task *task) | |||
1014 | dprintk("RPC: %4d call_decode (status %d)\n", | 1026 | dprintk("RPC: %4d call_decode (status %d)\n", |
1015 | task->tk_pid, task->tk_status); | 1027 | task->tk_pid, task->tk_status); |
1016 | 1028 | ||
1017 | if (clnt->cl_chatty && (task->tk_flags & RPC_CALL_MAJORSEEN)) { | 1029 | if (task->tk_flags & RPC_CALL_MAJORSEEN) { |
1018 | printk(KERN_NOTICE "%s: server %s OK\n", | 1030 | printk(KERN_NOTICE "%s: server %s OK\n", |
1019 | clnt->cl_protname, clnt->cl_server); | 1031 | clnt->cl_protname, clnt->cl_server); |
1020 | task->tk_flags &= ~RPC_CALL_MAJORSEEN; | 1032 | task->tk_flags &= ~RPC_CALL_MAJORSEEN; |
@@ -1039,13 +1051,14 @@ call_decode(struct rpc_task *task) | |||
1039 | sizeof(req->rq_rcv_buf)) != 0); | 1051 | sizeof(req->rq_rcv_buf)) != 0); |
1040 | 1052 | ||
1041 | /* Verify the RPC header */ | 1053 | /* Verify the RPC header */ |
1042 | if (!(p = call_verify(task))) { | 1054 | p = call_verify(task); |
1043 | if (task->tk_action == NULL) | 1055 | if (IS_ERR(p)) { |
1044 | return; | 1056 | if (p == ERR_PTR(-EAGAIN)) |
1045 | goto out_retry; | 1057 | goto out_retry; |
1058 | return; | ||
1046 | } | 1059 | } |
1047 | 1060 | ||
1048 | task->tk_action = NULL; | 1061 | task->tk_action = rpc_exit_task; |
1049 | 1062 | ||
1050 | if (decode) | 1063 | if (decode) |
1051 | task->tk_status = rpcauth_unwrap_resp(task, decode, req, p, | 1064 | task->tk_status = rpcauth_unwrap_resp(task, decode, req, p, |
@@ -1138,7 +1151,7 @@ call_verify(struct rpc_task *task) | |||
1138 | 1151 | ||
1139 | if ((n = ntohl(*p++)) != RPC_REPLY) { | 1152 | if ((n = ntohl(*p++)) != RPC_REPLY) { |
1140 | printk(KERN_WARNING "call_verify: not an RPC reply: %x\n", n); | 1153 | printk(KERN_WARNING "call_verify: not an RPC reply: %x\n", n); |
1141 | goto out_retry; | 1154 | goto out_garbage; |
1142 | } | 1155 | } |
1143 | if ((n = ntohl(*p++)) != RPC_MSG_ACCEPTED) { | 1156 | if ((n = ntohl(*p++)) != RPC_MSG_ACCEPTED) { |
1144 | if (--len < 0) | 1157 | if (--len < 0) |
@@ -1168,7 +1181,7 @@ call_verify(struct rpc_task *task) | |||
1168 | task->tk_pid); | 1181 | task->tk_pid); |
1169 | rpcauth_invalcred(task); | 1182 | rpcauth_invalcred(task); |
1170 | task->tk_action = call_refresh; | 1183 | task->tk_action = call_refresh; |
1171 | return NULL; | 1184 | goto out_retry; |
1172 | case RPC_AUTH_BADCRED: | 1185 | case RPC_AUTH_BADCRED: |
1173 | case RPC_AUTH_BADVERF: | 1186 | case RPC_AUTH_BADVERF: |
1174 | /* possibly garbled cred/verf? */ | 1187 | /* possibly garbled cred/verf? */ |
@@ -1178,7 +1191,7 @@ call_verify(struct rpc_task *task) | |||
1178 | dprintk("RPC: %4d call_verify: retry garbled creds\n", | 1191 | dprintk("RPC: %4d call_verify: retry garbled creds\n", |
1179 | task->tk_pid); | 1192 | task->tk_pid); |
1180 | task->tk_action = call_bind; | 1193 | task->tk_action = call_bind; |
1181 | return NULL; | 1194 | goto out_retry; |
1182 | case RPC_AUTH_TOOWEAK: | 1195 | case RPC_AUTH_TOOWEAK: |
1183 | printk(KERN_NOTICE "call_verify: server requires stronger " | 1196 | printk(KERN_NOTICE "call_verify: server requires stronger " |
1184 | "authentication.\n"); | 1197 | "authentication.\n"); |
@@ -1193,7 +1206,7 @@ call_verify(struct rpc_task *task) | |||
1193 | } | 1206 | } |
1194 | if (!(p = rpcauth_checkverf(task, p))) { | 1207 | if (!(p = rpcauth_checkverf(task, p))) { |
1195 | printk(KERN_WARNING "call_verify: auth check failed\n"); | 1208 | printk(KERN_WARNING "call_verify: auth check failed\n"); |
1196 | goto out_retry; /* bad verifier, retry */ | 1209 | goto out_garbage; /* bad verifier, retry */ |
1197 | } | 1210 | } |
1198 | len = p - (u32 *)iov->iov_base - 1; | 1211 | len = p - (u32 *)iov->iov_base - 1; |
1199 | if (len < 0) | 1212 | if (len < 0) |
@@ -1230,23 +1243,24 @@ call_verify(struct rpc_task *task) | |||
1230 | /* Also retry */ | 1243 | /* Also retry */ |
1231 | } | 1244 | } |
1232 | 1245 | ||
1233 | out_retry: | 1246 | out_garbage: |
1234 | task->tk_client->cl_stats->rpcgarbage++; | 1247 | task->tk_client->cl_stats->rpcgarbage++; |
1235 | if (task->tk_garb_retry) { | 1248 | if (task->tk_garb_retry) { |
1236 | task->tk_garb_retry--; | 1249 | task->tk_garb_retry--; |
1237 | dprintk("RPC %s: retrying %4d\n", __FUNCTION__, task->tk_pid); | 1250 | dprintk("RPC %s: retrying %4d\n", __FUNCTION__, task->tk_pid); |
1238 | task->tk_action = call_bind; | 1251 | task->tk_action = call_bind; |
1239 | return NULL; | 1252 | out_retry: |
1253 | return ERR_PTR(-EAGAIN); | ||
1240 | } | 1254 | } |
1241 | printk(KERN_WARNING "RPC %s: retry failed, exit EIO\n", __FUNCTION__); | 1255 | printk(KERN_WARNING "RPC %s: retry failed, exit EIO\n", __FUNCTION__); |
1242 | out_eio: | 1256 | out_eio: |
1243 | error = -EIO; | 1257 | error = -EIO; |
1244 | out_err: | 1258 | out_err: |
1245 | rpc_exit(task, error); | 1259 | rpc_exit(task, error); |
1246 | return NULL; | 1260 | return ERR_PTR(error); |
1247 | out_overflow: | 1261 | out_overflow: |
1248 | printk(KERN_WARNING "RPC %s: server reply was truncated.\n", __FUNCTION__); | 1262 | printk(KERN_WARNING "RPC %s: server reply was truncated.\n", __FUNCTION__); |
1249 | goto out_retry; | 1263 | goto out_garbage; |
1250 | } | 1264 | } |
1251 | 1265 | ||
1252 | static int rpcproc_encode_null(void *rqstp, u32 *data, void *obj) | 1266 | static int rpcproc_encode_null(void *rqstp, u32 *data, void *obj) |
diff --git a/net/sunrpc/pmap_clnt.c b/net/sunrpc/pmap_clnt.c index a398575f94..8139ce68e9 100644 --- a/net/sunrpc/pmap_clnt.c +++ b/net/sunrpc/pmap_clnt.c | |||
@@ -90,8 +90,7 @@ bailout: | |||
90 | map->pm_binding = 0; | 90 | map->pm_binding = 0; |
91 | rpc_wake_up(&map->pm_bindwait); | 91 | rpc_wake_up(&map->pm_bindwait); |
92 | spin_unlock(&pmap_lock); | 92 | spin_unlock(&pmap_lock); |
93 | task->tk_status = -EIO; | 93 | rpc_exit(task, -EIO); |
94 | task->tk_action = NULL; | ||
95 | } | 94 | } |
96 | 95 | ||
97 | #ifdef CONFIG_ROOT_NFS | 96 | #ifdef CONFIG_ROOT_NFS |
@@ -132,21 +131,22 @@ static void | |||
132 | pmap_getport_done(struct rpc_task *task) | 131 | pmap_getport_done(struct rpc_task *task) |
133 | { | 132 | { |
134 | struct rpc_clnt *clnt = task->tk_client; | 133 | struct rpc_clnt *clnt = task->tk_client; |
134 | struct rpc_xprt *xprt = task->tk_xprt; | ||
135 | struct rpc_portmap *map = clnt->cl_pmap; | 135 | struct rpc_portmap *map = clnt->cl_pmap; |
136 | 136 | ||
137 | dprintk("RPC: %4d pmap_getport_done(status %d, port %d)\n", | 137 | dprintk("RPC: %4d pmap_getport_done(status %d, port %d)\n", |
138 | task->tk_pid, task->tk_status, clnt->cl_port); | 138 | task->tk_pid, task->tk_status, clnt->cl_port); |
139 | |||
140 | xprt->ops->set_port(xprt, 0); | ||
139 | if (task->tk_status < 0) { | 141 | if (task->tk_status < 0) { |
140 | /* Make the calling task exit with an error */ | 142 | /* Make the calling task exit with an error */ |
141 | task->tk_action = NULL; | 143 | task->tk_action = rpc_exit_task; |
142 | } else if (clnt->cl_port == 0) { | 144 | } else if (clnt->cl_port == 0) { |
143 | /* Program not registered */ | 145 | /* Program not registered */ |
144 | task->tk_status = -EACCES; | 146 | rpc_exit(task, -EACCES); |
145 | task->tk_action = NULL; | ||
146 | } else { | 147 | } else { |
147 | /* byte-swap port number first */ | 148 | xprt->ops->set_port(xprt, clnt->cl_port); |
148 | clnt->cl_port = htons(clnt->cl_port); | 149 | clnt->cl_port = htons(clnt->cl_port); |
149 | clnt->cl_xprt->addr.sin_port = clnt->cl_port; | ||
150 | } | 150 | } |
151 | spin_lock(&pmap_lock); | 151 | spin_lock(&pmap_lock); |
152 | map->pm_binding = 0; | 152 | map->pm_binding = 0; |
@@ -207,7 +207,7 @@ pmap_create(char *hostname, struct sockaddr_in *srvaddr, int proto, int privileg | |||
207 | xprt = xprt_create_proto(proto, srvaddr, NULL); | 207 | xprt = xprt_create_proto(proto, srvaddr, NULL); |
208 | if (IS_ERR(xprt)) | 208 | if (IS_ERR(xprt)) |
209 | return (struct rpc_clnt *)xprt; | 209 | return (struct rpc_clnt *)xprt; |
210 | xprt->addr.sin_port = htons(RPC_PMAP_PORT); | 210 | xprt->ops->set_port(xprt, RPC_PMAP_PORT); |
211 | if (!privileged) | 211 | if (!privileged) |
212 | xprt->resvport = 0; | 212 | xprt->resvport = 0; |
213 | 213 | ||
@@ -217,7 +217,6 @@ pmap_create(char *hostname, struct sockaddr_in *srvaddr, int proto, int privileg | |||
217 | RPC_AUTH_UNIX); | 217 | RPC_AUTH_UNIX); |
218 | if (!IS_ERR(clnt)) { | 218 | if (!IS_ERR(clnt)) { |
219 | clnt->cl_softrtry = 1; | 219 | clnt->cl_softrtry = 1; |
220 | clnt->cl_chatty = 1; | ||
221 | clnt->cl_oneshot = 1; | 220 | clnt->cl_oneshot = 1; |
222 | } | 221 | } |
223 | return clnt; | 222 | return clnt; |
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 81e00a6c19..9764c80ab0 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c | |||
@@ -39,23 +39,26 @@ static kmem_cache_t *rpc_inode_cachep __read_mostly; | |||
39 | #define RPC_UPCALL_TIMEOUT (30*HZ) | 39 | #define RPC_UPCALL_TIMEOUT (30*HZ) |
40 | 40 | ||
41 | static void | 41 | static void |
42 | __rpc_purge_upcall(struct inode *inode, int err) | 42 | __rpc_purge_list(struct rpc_inode *rpci, struct list_head *head, int err) |
43 | { | 43 | { |
44 | struct rpc_inode *rpci = RPC_I(inode); | ||
45 | struct rpc_pipe_msg *msg; | 44 | struct rpc_pipe_msg *msg; |
45 | void (*destroy_msg)(struct rpc_pipe_msg *); | ||
46 | 46 | ||
47 | while (!list_empty(&rpci->pipe)) { | 47 | destroy_msg = rpci->ops->destroy_msg; |
48 | msg = list_entry(rpci->pipe.next, struct rpc_pipe_msg, list); | 48 | while (!list_empty(head)) { |
49 | list_del_init(&msg->list); | 49 | msg = list_entry(head->next, struct rpc_pipe_msg, list); |
50 | msg->errno = err; | ||
51 | rpci->ops->destroy_msg(msg); | ||
52 | } | ||
53 | while (!list_empty(&rpci->in_upcall)) { | ||
54 | msg = list_entry(rpci->pipe.next, struct rpc_pipe_msg, list); | ||
55 | list_del_init(&msg->list); | 50 | list_del_init(&msg->list); |
56 | msg->errno = err; | 51 | msg->errno = err; |
57 | rpci->ops->destroy_msg(msg); | 52 | destroy_msg(msg); |
58 | } | 53 | } |
54 | } | ||
55 | |||
56 | static void | ||
57 | __rpc_purge_upcall(struct inode *inode, int err) | ||
58 | { | ||
59 | struct rpc_inode *rpci = RPC_I(inode); | ||
60 | |||
61 | __rpc_purge_list(rpci, &rpci->pipe, err); | ||
59 | rpci->pipelen = 0; | 62 | rpci->pipelen = 0; |
60 | wake_up(&rpci->waitq); | 63 | wake_up(&rpci->waitq); |
61 | } | 64 | } |
@@ -66,10 +69,13 @@ rpc_timeout_upcall_queue(void *data) | |||
66 | struct rpc_inode *rpci = (struct rpc_inode *)data; | 69 | struct rpc_inode *rpci = (struct rpc_inode *)data; |
67 | struct inode *inode = &rpci->vfs_inode; | 70 | struct inode *inode = &rpci->vfs_inode; |
68 | 71 | ||
69 | down(&inode->i_sem); | 72 | mutex_lock(&inode->i_mutex); |
73 | if (rpci->ops == NULL) | ||
74 | goto out; | ||
70 | if (rpci->nreaders == 0 && !list_empty(&rpci->pipe)) | 75 | if (rpci->nreaders == 0 && !list_empty(&rpci->pipe)) |
71 | __rpc_purge_upcall(inode, -ETIMEDOUT); | 76 | __rpc_purge_upcall(inode, -ETIMEDOUT); |
72 | up(&inode->i_sem); | 77 | out: |
78 | mutex_unlock(&inode->i_mutex); | ||
73 | } | 79 | } |
74 | 80 | ||
75 | int | 81 | int |
@@ -78,7 +84,7 @@ rpc_queue_upcall(struct inode *inode, struct rpc_pipe_msg *msg) | |||
78 | struct rpc_inode *rpci = RPC_I(inode); | 84 | struct rpc_inode *rpci = RPC_I(inode); |
79 | int res = -EPIPE; | 85 | int res = -EPIPE; |
80 | 86 | ||
81 | down(&inode->i_sem); | 87 | mutex_lock(&inode->i_mutex); |
82 | if (rpci->ops == NULL) | 88 | if (rpci->ops == NULL) |
83 | goto out; | 89 | goto out; |
84 | if (rpci->nreaders) { | 90 | if (rpci->nreaders) { |
@@ -94,7 +100,7 @@ rpc_queue_upcall(struct inode *inode, struct rpc_pipe_msg *msg) | |||
94 | res = 0; | 100 | res = 0; |
95 | } | 101 | } |
96 | out: | 102 | out: |
97 | up(&inode->i_sem); | 103 | mutex_unlock(&inode->i_mutex); |
98 | wake_up(&rpci->waitq); | 104 | wake_up(&rpci->waitq); |
99 | return res; | 105 | return res; |
100 | } | 106 | } |
@@ -110,11 +116,10 @@ rpc_close_pipes(struct inode *inode) | |||
110 | { | 116 | { |
111 | struct rpc_inode *rpci = RPC_I(inode); | 117 | struct rpc_inode *rpci = RPC_I(inode); |
112 | 118 | ||
113 | cancel_delayed_work(&rpci->queue_timeout); | 119 | mutex_lock(&inode->i_mutex); |
114 | flush_scheduled_work(); | ||
115 | down(&inode->i_sem); | ||
116 | if (rpci->ops != NULL) { | 120 | if (rpci->ops != NULL) { |
117 | rpci->nreaders = 0; | 121 | rpci->nreaders = 0; |
122 | __rpc_purge_list(rpci, &rpci->in_upcall, -EPIPE); | ||
118 | __rpc_purge_upcall(inode, -EPIPE); | 123 | __rpc_purge_upcall(inode, -EPIPE); |
119 | rpci->nwriters = 0; | 124 | rpci->nwriters = 0; |
120 | if (rpci->ops->release_pipe) | 125 | if (rpci->ops->release_pipe) |
@@ -122,7 +127,9 @@ rpc_close_pipes(struct inode *inode) | |||
122 | rpci->ops = NULL; | 127 | rpci->ops = NULL; |
123 | } | 128 | } |
124 | rpc_inode_setowner(inode, NULL); | 129 | rpc_inode_setowner(inode, NULL); |
125 | up(&inode->i_sem); | 130 | mutex_unlock(&inode->i_mutex); |
131 | cancel_delayed_work(&rpci->queue_timeout); | ||
132 | flush_scheduled_work(); | ||
126 | } | 133 | } |
127 | 134 | ||
128 | static struct inode * | 135 | static struct inode * |
@@ -147,7 +154,7 @@ rpc_pipe_open(struct inode *inode, struct file *filp) | |||
147 | struct rpc_inode *rpci = RPC_I(inode); | 154 | struct rpc_inode *rpci = RPC_I(inode); |
148 | int res = -ENXIO; | 155 | int res = -ENXIO; |
149 | 156 | ||
150 | down(&inode->i_sem); | 157 | mutex_lock(&inode->i_mutex); |
151 | if (rpci->ops != NULL) { | 158 | if (rpci->ops != NULL) { |
152 | if (filp->f_mode & FMODE_READ) | 159 | if (filp->f_mode & FMODE_READ) |
153 | rpci->nreaders ++; | 160 | rpci->nreaders ++; |
@@ -155,22 +162,22 @@ rpc_pipe_open(struct inode *inode, struct file *filp) | |||
155 | rpci->nwriters ++; | 162 | rpci->nwriters ++; |
156 | res = 0; | 163 | res = 0; |
157 | } | 164 | } |
158 | up(&inode->i_sem); | 165 | mutex_unlock(&inode->i_mutex); |
159 | return res; | 166 | return res; |
160 | } | 167 | } |
161 | 168 | ||
162 | static int | 169 | static int |
163 | rpc_pipe_release(struct inode *inode, struct file *filp) | 170 | rpc_pipe_release(struct inode *inode, struct file *filp) |
164 | { | 171 | { |
165 | struct rpc_inode *rpci = RPC_I(filp->f_dentry->d_inode); | 172 | struct rpc_inode *rpci = RPC_I(inode); |
166 | struct rpc_pipe_msg *msg; | 173 | struct rpc_pipe_msg *msg; |
167 | 174 | ||
168 | down(&inode->i_sem); | 175 | mutex_lock(&inode->i_mutex); |
169 | if (rpci->ops == NULL) | 176 | if (rpci->ops == NULL) |
170 | goto out; | 177 | goto out; |
171 | msg = (struct rpc_pipe_msg *)filp->private_data; | 178 | msg = (struct rpc_pipe_msg *)filp->private_data; |
172 | if (msg != NULL) { | 179 | if (msg != NULL) { |
173 | msg->errno = -EPIPE; | 180 | msg->errno = -EAGAIN; |
174 | list_del_init(&msg->list); | 181 | list_del_init(&msg->list); |
175 | rpci->ops->destroy_msg(msg); | 182 | rpci->ops->destroy_msg(msg); |
176 | } | 183 | } |
@@ -179,11 +186,11 @@ rpc_pipe_release(struct inode *inode, struct file *filp) | |||
179 | if (filp->f_mode & FMODE_READ) | 186 | if (filp->f_mode & FMODE_READ) |
180 | rpci->nreaders --; | 187 | rpci->nreaders --; |
181 | if (!rpci->nreaders) | 188 | if (!rpci->nreaders) |
182 | __rpc_purge_upcall(inode, -EPIPE); | 189 | __rpc_purge_upcall(inode, -EAGAIN); |
183 | if (rpci->ops->release_pipe) | 190 | if (rpci->ops->release_pipe) |
184 | rpci->ops->release_pipe(inode); | 191 | rpci->ops->release_pipe(inode); |
185 | out: | 192 | out: |
186 | up(&inode->i_sem); | 193 | mutex_unlock(&inode->i_mutex); |
187 | return 0; | 194 | return 0; |
188 | } | 195 | } |
189 | 196 | ||
@@ -195,7 +202,7 @@ rpc_pipe_read(struct file *filp, char __user *buf, size_t len, loff_t *offset) | |||
195 | struct rpc_pipe_msg *msg; | 202 | struct rpc_pipe_msg *msg; |
196 | int res = 0; | 203 | int res = 0; |
197 | 204 | ||
198 | down(&inode->i_sem); | 205 | mutex_lock(&inode->i_mutex); |
199 | if (rpci->ops == NULL) { | 206 | if (rpci->ops == NULL) { |
200 | res = -EPIPE; | 207 | res = -EPIPE; |
201 | goto out_unlock; | 208 | goto out_unlock; |
@@ -222,7 +229,7 @@ rpc_pipe_read(struct file *filp, char __user *buf, size_t len, loff_t *offset) | |||
222 | rpci->ops->destroy_msg(msg); | 229 | rpci->ops->destroy_msg(msg); |
223 | } | 230 | } |
224 | out_unlock: | 231 | out_unlock: |
225 | up(&inode->i_sem); | 232 | mutex_unlock(&inode->i_mutex); |
226 | return res; | 233 | return res; |
227 | } | 234 | } |
228 | 235 | ||
@@ -233,11 +240,11 @@ rpc_pipe_write(struct file *filp, const char __user *buf, size_t len, loff_t *of | |||
233 | struct rpc_inode *rpci = RPC_I(inode); | 240 | struct rpc_inode *rpci = RPC_I(inode); |
234 | int res; | 241 | int res; |
235 | 242 | ||
236 | down(&inode->i_sem); | 243 | mutex_lock(&inode->i_mutex); |
237 | res = -EPIPE; | 244 | res = -EPIPE; |
238 | if (rpci->ops != NULL) | 245 | if (rpci->ops != NULL) |
239 | res = rpci->ops->downcall(filp, buf, len); | 246 | res = rpci->ops->downcall(filp, buf, len); |
240 | up(&inode->i_sem); | 247 | mutex_unlock(&inode->i_mutex); |
241 | return res; | 248 | return res; |
242 | } | 249 | } |
243 | 250 | ||
@@ -315,7 +322,7 @@ rpc_info_open(struct inode *inode, struct file *file) | |||
315 | 322 | ||
316 | if (!ret) { | 323 | if (!ret) { |
317 | struct seq_file *m = file->private_data; | 324 | struct seq_file *m = file->private_data; |
318 | down(&inode->i_sem); | 325 | mutex_lock(&inode->i_mutex); |
319 | clnt = RPC_I(inode)->private; | 326 | clnt = RPC_I(inode)->private; |
320 | if (clnt) { | 327 | if (clnt) { |
321 | atomic_inc(&clnt->cl_users); | 328 | atomic_inc(&clnt->cl_users); |
@@ -324,7 +331,7 @@ rpc_info_open(struct inode *inode, struct file *file) | |||
324 | single_release(inode, file); | 331 | single_release(inode, file); |
325 | ret = -EINVAL; | 332 | ret = -EINVAL; |
326 | } | 333 | } |
327 | up(&inode->i_sem); | 334 | mutex_unlock(&inode->i_mutex); |
328 | } | 335 | } |
329 | return ret; | 336 | return ret; |
330 | } | 337 | } |
@@ -484,11 +491,11 @@ rpc_depopulate(struct dentry *parent) | |||
484 | struct dentry *dentry, *dvec[10]; | 491 | struct dentry *dentry, *dvec[10]; |
485 | int n = 0; | 492 | int n = 0; |
486 | 493 | ||
487 | down(&dir->i_sem); | 494 | mutex_lock(&dir->i_mutex); |
488 | repeat: | 495 | repeat: |
489 | spin_lock(&dcache_lock); | 496 | spin_lock(&dcache_lock); |
490 | list_for_each_safe(pos, next, &parent->d_subdirs) { | 497 | list_for_each_safe(pos, next, &parent->d_subdirs) { |
491 | dentry = list_entry(pos, struct dentry, d_child); | 498 | dentry = list_entry(pos, struct dentry, d_u.d_child); |
492 | spin_lock(&dentry->d_lock); | 499 | spin_lock(&dentry->d_lock); |
493 | if (!d_unhashed(dentry)) { | 500 | if (!d_unhashed(dentry)) { |
494 | dget_locked(dentry); | 501 | dget_locked(dentry); |
@@ -512,7 +519,7 @@ repeat: | |||
512 | } while (n); | 519 | } while (n); |
513 | goto repeat; | 520 | goto repeat; |
514 | } | 521 | } |
515 | up(&dir->i_sem); | 522 | mutex_unlock(&dir->i_mutex); |
516 | } | 523 | } |
517 | 524 | ||
518 | static int | 525 | static int |
@@ -525,7 +532,7 @@ rpc_populate(struct dentry *parent, | |||
525 | struct dentry *dentry; | 532 | struct dentry *dentry; |
526 | int mode, i; | 533 | int mode, i; |
527 | 534 | ||
528 | down(&dir->i_sem); | 535 | mutex_lock(&dir->i_mutex); |
529 | for (i = start; i < eof; i++) { | 536 | for (i = start; i < eof; i++) { |
530 | dentry = d_alloc_name(parent, files[i].name); | 537 | dentry = d_alloc_name(parent, files[i].name); |
531 | if (!dentry) | 538 | if (!dentry) |
@@ -545,10 +552,10 @@ rpc_populate(struct dentry *parent, | |||
545 | dir->i_nlink++; | 552 | dir->i_nlink++; |
546 | d_add(dentry, inode); | 553 | d_add(dentry, inode); |
547 | } | 554 | } |
548 | up(&dir->i_sem); | 555 | mutex_unlock(&dir->i_mutex); |
549 | return 0; | 556 | return 0; |
550 | out_bad: | 557 | out_bad: |
551 | up(&dir->i_sem); | 558 | mutex_unlock(&dir->i_mutex); |
552 | printk(KERN_WARNING "%s: %s failed to populate directory %s\n", | 559 | printk(KERN_WARNING "%s: %s failed to populate directory %s\n", |
553 | __FILE__, __FUNCTION__, parent->d_name.name); | 560 | __FILE__, __FUNCTION__, parent->d_name.name); |
554 | return -ENOMEM; | 561 | return -ENOMEM; |
@@ -602,7 +609,7 @@ rpc_lookup_negative(char *path, struct nameidata *nd) | |||
602 | if ((error = rpc_lookup_parent(path, nd)) != 0) | 609 | if ((error = rpc_lookup_parent(path, nd)) != 0) |
603 | return ERR_PTR(error); | 610 | return ERR_PTR(error); |
604 | dir = nd->dentry->d_inode; | 611 | dir = nd->dentry->d_inode; |
605 | down(&dir->i_sem); | 612 | mutex_lock(&dir->i_mutex); |
606 | dentry = lookup_hash(nd); | 613 | dentry = lookup_hash(nd); |
607 | if (IS_ERR(dentry)) | 614 | if (IS_ERR(dentry)) |
608 | goto out_err; | 615 | goto out_err; |
@@ -613,7 +620,7 @@ rpc_lookup_negative(char *path, struct nameidata *nd) | |||
613 | } | 620 | } |
614 | return dentry; | 621 | return dentry; |
615 | out_err: | 622 | out_err: |
616 | up(&dir->i_sem); | 623 | mutex_unlock(&dir->i_mutex); |
617 | rpc_release_path(nd); | 624 | rpc_release_path(nd); |
618 | return dentry; | 625 | return dentry; |
619 | } | 626 | } |
@@ -639,7 +646,7 @@ rpc_mkdir(char *path, struct rpc_clnt *rpc_client) | |||
639 | if (error) | 646 | if (error) |
640 | goto err_depopulate; | 647 | goto err_depopulate; |
641 | out: | 648 | out: |
642 | up(&dir->i_sem); | 649 | mutex_unlock(&dir->i_mutex); |
643 | rpc_release_path(&nd); | 650 | rpc_release_path(&nd); |
644 | return dentry; | 651 | return dentry; |
645 | err_depopulate: | 652 | err_depopulate: |
@@ -664,7 +671,7 @@ rpc_rmdir(char *path) | |||
664 | if ((error = rpc_lookup_parent(path, &nd)) != 0) | 671 | if ((error = rpc_lookup_parent(path, &nd)) != 0) |
665 | return error; | 672 | return error; |
666 | dir = nd.dentry->d_inode; | 673 | dir = nd.dentry->d_inode; |
667 | down(&dir->i_sem); | 674 | mutex_lock(&dir->i_mutex); |
668 | dentry = lookup_hash(&nd); | 675 | dentry = lookup_hash(&nd); |
669 | if (IS_ERR(dentry)) { | 676 | if (IS_ERR(dentry)) { |
670 | error = PTR_ERR(dentry); | 677 | error = PTR_ERR(dentry); |
@@ -674,7 +681,7 @@ rpc_rmdir(char *path) | |||
674 | error = __rpc_rmdir(dir, dentry); | 681 | error = __rpc_rmdir(dir, dentry); |
675 | dput(dentry); | 682 | dput(dentry); |
676 | out_release: | 683 | out_release: |
677 | up(&dir->i_sem); | 684 | mutex_unlock(&dir->i_mutex); |
678 | rpc_release_path(&nd); | 685 | rpc_release_path(&nd); |
679 | return error; | 686 | return error; |
680 | } | 687 | } |
@@ -703,7 +710,7 @@ rpc_mkpipe(char *path, void *private, struct rpc_pipe_ops *ops, int flags) | |||
703 | rpci->ops = ops; | 710 | rpci->ops = ops; |
704 | inode_dir_notify(dir, DN_CREATE); | 711 | inode_dir_notify(dir, DN_CREATE); |
705 | out: | 712 | out: |
706 | up(&dir->i_sem); | 713 | mutex_unlock(&dir->i_mutex); |
707 | rpc_release_path(&nd); | 714 | rpc_release_path(&nd); |
708 | return dentry; | 715 | return dentry; |
709 | err_dput: | 716 | err_dput: |
@@ -725,7 +732,7 @@ rpc_unlink(char *path) | |||
725 | if ((error = rpc_lookup_parent(path, &nd)) != 0) | 732 | if ((error = rpc_lookup_parent(path, &nd)) != 0) |
726 | return error; | 733 | return error; |
727 | dir = nd.dentry->d_inode; | 734 | dir = nd.dentry->d_inode; |
728 | down(&dir->i_sem); | 735 | mutex_lock(&dir->i_mutex); |
729 | dentry = lookup_hash(&nd); | 736 | dentry = lookup_hash(&nd); |
730 | if (IS_ERR(dentry)) { | 737 | if (IS_ERR(dentry)) { |
731 | error = PTR_ERR(dentry); | 738 | error = PTR_ERR(dentry); |
@@ -739,7 +746,7 @@ rpc_unlink(char *path) | |||
739 | dput(dentry); | 746 | dput(dentry); |
740 | inode_dir_notify(dir, DN_DELETE); | 747 | inode_dir_notify(dir, DN_DELETE); |
741 | out_release: | 748 | out_release: |
742 | up(&dir->i_sem); | 749 | mutex_unlock(&dir->i_mutex); |
743 | rpc_release_path(&nd); | 750 | rpc_release_path(&nd); |
744 | return error; | 751 | return error; |
745 | } | 752 | } |
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index 54e60a6575..7415406aa1 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c | |||
@@ -41,8 +41,6 @@ static mempool_t *rpc_buffer_mempool __read_mostly; | |||
41 | 41 | ||
42 | static void __rpc_default_timer(struct rpc_task *task); | 42 | static void __rpc_default_timer(struct rpc_task *task); |
43 | static void rpciod_killall(void); | 43 | static void rpciod_killall(void); |
44 | static void rpc_free(struct rpc_task *task); | ||
45 | |||
46 | static void rpc_async_schedule(void *); | 44 | static void rpc_async_schedule(void *); |
47 | 45 | ||
48 | /* | 46 | /* |
@@ -264,6 +262,35 @@ void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname) | |||
264 | } | 262 | } |
265 | EXPORT_SYMBOL(rpc_init_wait_queue); | 263 | EXPORT_SYMBOL(rpc_init_wait_queue); |
266 | 264 | ||
265 | static int rpc_wait_bit_interruptible(void *word) | ||
266 | { | ||
267 | if (signal_pending(current)) | ||
268 | return -ERESTARTSYS; | ||
269 | schedule(); | ||
270 | return 0; | ||
271 | } | ||
272 | |||
273 | /* | ||
274 | * Mark an RPC call as having completed by clearing the 'active' bit | ||
275 | */ | ||
276 | static inline void rpc_mark_complete_task(struct rpc_task *task) | ||
277 | { | ||
278 | rpc_clear_active(task); | ||
279 | wake_up_bit(&task->tk_runstate, RPC_TASK_ACTIVE); | ||
280 | } | ||
281 | |||
282 | /* | ||
283 | * Allow callers to wait for completion of an RPC call | ||
284 | */ | ||
285 | int __rpc_wait_for_completion_task(struct rpc_task *task, int (*action)(void *)) | ||
286 | { | ||
287 | if (action == NULL) | ||
288 | action = rpc_wait_bit_interruptible; | ||
289 | return wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE, | ||
290 | action, TASK_INTERRUPTIBLE); | ||
291 | } | ||
292 | EXPORT_SYMBOL(__rpc_wait_for_completion_task); | ||
293 | |||
267 | /* | 294 | /* |
268 | * Make an RPC task runnable. | 295 | * Make an RPC task runnable. |
269 | * | 296 | * |
@@ -299,10 +326,7 @@ static void rpc_make_runnable(struct rpc_task *task) | |||
299 | static inline void | 326 | static inline void |
300 | rpc_schedule_run(struct rpc_task *task) | 327 | rpc_schedule_run(struct rpc_task *task) |
301 | { | 328 | { |
302 | /* Don't run a child twice! */ | 329 | rpc_set_active(task); |
303 | if (RPC_IS_ACTIVATED(task)) | ||
304 | return; | ||
305 | task->tk_active = 1; | ||
306 | rpc_make_runnable(task); | 330 | rpc_make_runnable(task); |
307 | } | 331 | } |
308 | 332 | ||
@@ -324,8 +348,7 @@ static void __rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, | |||
324 | } | 348 | } |
325 | 349 | ||
326 | /* Mark the task as being activated if so needed */ | 350 | /* Mark the task as being activated if so needed */ |
327 | if (!RPC_IS_ACTIVATED(task)) | 351 | rpc_set_active(task); |
328 | task->tk_active = 1; | ||
329 | 352 | ||
330 | __rpc_add_wait_queue(q, task); | 353 | __rpc_add_wait_queue(q, task); |
331 | 354 | ||
@@ -555,36 +578,29 @@ __rpc_atrun(struct rpc_task *task) | |||
555 | } | 578 | } |
556 | 579 | ||
557 | /* | 580 | /* |
558 | * Helper that calls task->tk_exit if it exists and then returns | 581 | * Helper to call task->tk_ops->rpc_call_prepare |
559 | * true if we should exit __rpc_execute. | ||
560 | */ | 582 | */ |
561 | static inline int __rpc_do_exit(struct rpc_task *task) | 583 | static void rpc_prepare_task(struct rpc_task *task) |
562 | { | 584 | { |
563 | if (task->tk_exit != NULL) { | 585 | task->tk_ops->rpc_call_prepare(task, task->tk_calldata); |
564 | lock_kernel(); | ||
565 | task->tk_exit(task); | ||
566 | unlock_kernel(); | ||
567 | /* If tk_action is non-null, we should restart the call */ | ||
568 | if (task->tk_action != NULL) { | ||
569 | if (!RPC_ASSASSINATED(task)) { | ||
570 | /* Release RPC slot and buffer memory */ | ||
571 | xprt_release(task); | ||
572 | rpc_free(task); | ||
573 | return 0; | ||
574 | } | ||
575 | printk(KERN_ERR "RPC: dead task tried to walk away.\n"); | ||
576 | } | ||
577 | } | ||
578 | return 1; | ||
579 | } | 586 | } |
580 | 587 | ||
581 | static int rpc_wait_bit_interruptible(void *word) | 588 | /* |
589 | * Helper that calls task->tk_ops->rpc_call_done if it exists | ||
590 | */ | ||
591 | void rpc_exit_task(struct rpc_task *task) | ||
582 | { | 592 | { |
583 | if (signal_pending(current)) | 593 | task->tk_action = NULL; |
584 | return -ERESTARTSYS; | 594 | if (task->tk_ops->rpc_call_done != NULL) { |
585 | schedule(); | 595 | task->tk_ops->rpc_call_done(task, task->tk_calldata); |
586 | return 0; | 596 | if (task->tk_action != NULL) { |
597 | WARN_ON(RPC_ASSASSINATED(task)); | ||
598 | /* Always release the RPC slot and buffer memory */ | ||
599 | xprt_release(task); | ||
600 | } | ||
601 | } | ||
587 | } | 602 | } |
603 | EXPORT_SYMBOL(rpc_exit_task); | ||
588 | 604 | ||
589 | /* | 605 | /* |
590 | * This is the RPC `scheduler' (or rather, the finite state machine). | 606 | * This is the RPC `scheduler' (or rather, the finite state machine). |
@@ -631,12 +647,11 @@ static int __rpc_execute(struct rpc_task *task) | |||
631 | * by someone else. | 647 | * by someone else. |
632 | */ | 648 | */ |
633 | if (!RPC_IS_QUEUED(task)) { | 649 | if (!RPC_IS_QUEUED(task)) { |
634 | if (task->tk_action != NULL) { | 650 | if (task->tk_action == NULL) |
635 | lock_kernel(); | ||
636 | task->tk_action(task); | ||
637 | unlock_kernel(); | ||
638 | } else if (__rpc_do_exit(task)) | ||
639 | break; | 651 | break; |
652 | lock_kernel(); | ||
653 | task->tk_action(task); | ||
654 | unlock_kernel(); | ||
640 | } | 655 | } |
641 | 656 | ||
642 | /* | 657 | /* |
@@ -676,9 +691,9 @@ static int __rpc_execute(struct rpc_task *task) | |||
676 | dprintk("RPC: %4d sync task resuming\n", task->tk_pid); | 691 | dprintk("RPC: %4d sync task resuming\n", task->tk_pid); |
677 | } | 692 | } |
678 | 693 | ||
679 | dprintk("RPC: %4d exit() = %d\n", task->tk_pid, task->tk_status); | 694 | dprintk("RPC: %4d, return %d, status %d\n", task->tk_pid, status, task->tk_status); |
680 | status = task->tk_status; | 695 | /* Wake up anyone who is waiting for task completion */ |
681 | 696 | rpc_mark_complete_task(task); | |
682 | /* Release all resources associated with the task */ | 697 | /* Release all resources associated with the task */ |
683 | rpc_release_task(task); | 698 | rpc_release_task(task); |
684 | return status; | 699 | return status; |
@@ -696,9 +711,7 @@ static int __rpc_execute(struct rpc_task *task) | |||
696 | int | 711 | int |
697 | rpc_execute(struct rpc_task *task) | 712 | rpc_execute(struct rpc_task *task) |
698 | { | 713 | { |
699 | BUG_ON(task->tk_active); | 714 | rpc_set_active(task); |
700 | |||
701 | task->tk_active = 1; | ||
702 | rpc_set_running(task); | 715 | rpc_set_running(task); |
703 | return __rpc_execute(task); | 716 | return __rpc_execute(task); |
704 | } | 717 | } |
@@ -708,17 +721,19 @@ static void rpc_async_schedule(void *arg) | |||
708 | __rpc_execute((struct rpc_task *)arg); | 721 | __rpc_execute((struct rpc_task *)arg); |
709 | } | 722 | } |
710 | 723 | ||
711 | /* | 724 | /** |
712 | * Allocate memory for RPC purposes. | 725 | * rpc_malloc - allocate an RPC buffer |
726 | * @task: RPC task that will use this buffer | ||
727 | * @size: requested byte size | ||
713 | * | 728 | * |
714 | * We try to ensure that some NFS reads and writes can always proceed | 729 | * We try to ensure that some NFS reads and writes can always proceed |
715 | * by using a mempool when allocating 'small' buffers. | 730 | * by using a mempool when allocating 'small' buffers. |
716 | * In order to avoid memory starvation triggering more writebacks of | 731 | * In order to avoid memory starvation triggering more writebacks of |
717 | * NFS requests, we use GFP_NOFS rather than GFP_KERNEL. | 732 | * NFS requests, we use GFP_NOFS rather than GFP_KERNEL. |
718 | */ | 733 | */ |
719 | void * | 734 | void * rpc_malloc(struct rpc_task *task, size_t size) |
720 | rpc_malloc(struct rpc_task *task, size_t size) | ||
721 | { | 735 | { |
736 | struct rpc_rqst *req = task->tk_rqstp; | ||
722 | gfp_t gfp; | 737 | gfp_t gfp; |
723 | 738 | ||
724 | if (task->tk_flags & RPC_TASK_SWAPPER) | 739 | if (task->tk_flags & RPC_TASK_SWAPPER) |
@@ -727,42 +742,52 @@ rpc_malloc(struct rpc_task *task, size_t size) | |||
727 | gfp = GFP_NOFS; | 742 | gfp = GFP_NOFS; |
728 | 743 | ||
729 | if (size > RPC_BUFFER_MAXSIZE) { | 744 | if (size > RPC_BUFFER_MAXSIZE) { |
730 | task->tk_buffer = kmalloc(size, gfp); | 745 | req->rq_buffer = kmalloc(size, gfp); |
731 | if (task->tk_buffer) | 746 | if (req->rq_buffer) |
732 | task->tk_bufsize = size; | 747 | req->rq_bufsize = size; |
733 | } else { | 748 | } else { |
734 | task->tk_buffer = mempool_alloc(rpc_buffer_mempool, gfp); | 749 | req->rq_buffer = mempool_alloc(rpc_buffer_mempool, gfp); |
735 | if (task->tk_buffer) | 750 | if (req->rq_buffer) |
736 | task->tk_bufsize = RPC_BUFFER_MAXSIZE; | 751 | req->rq_bufsize = RPC_BUFFER_MAXSIZE; |
737 | } | 752 | } |
738 | return task->tk_buffer; | 753 | return req->rq_buffer; |
739 | } | 754 | } |
740 | 755 | ||
741 | static void | 756 | /** |
742 | rpc_free(struct rpc_task *task) | 757 | * rpc_free - free buffer allocated via rpc_malloc |
758 | * @task: RPC task with a buffer to be freed | ||
759 | * | ||
760 | */ | ||
761 | void rpc_free(struct rpc_task *task) | ||
743 | { | 762 | { |
744 | if (task->tk_buffer) { | 763 | struct rpc_rqst *req = task->tk_rqstp; |
745 | if (task->tk_bufsize == RPC_BUFFER_MAXSIZE) | 764 | |
746 | mempool_free(task->tk_buffer, rpc_buffer_mempool); | 765 | if (req->rq_buffer) { |
766 | if (req->rq_bufsize == RPC_BUFFER_MAXSIZE) | ||
767 | mempool_free(req->rq_buffer, rpc_buffer_mempool); | ||
747 | else | 768 | else |
748 | kfree(task->tk_buffer); | 769 | kfree(req->rq_buffer); |
749 | task->tk_buffer = NULL; | 770 | req->rq_buffer = NULL; |
750 | task->tk_bufsize = 0; | 771 | req->rq_bufsize = 0; |
751 | } | 772 | } |
752 | } | 773 | } |
753 | 774 | ||
754 | /* | 775 | /* |
755 | * Creation and deletion of RPC task structures | 776 | * Creation and deletion of RPC task structures |
756 | */ | 777 | */ |
757 | void rpc_init_task(struct rpc_task *task, struct rpc_clnt *clnt, rpc_action callback, int flags) | 778 | void rpc_init_task(struct rpc_task *task, struct rpc_clnt *clnt, int flags, const struct rpc_call_ops *tk_ops, void *calldata) |
758 | { | 779 | { |
759 | memset(task, 0, sizeof(*task)); | 780 | memset(task, 0, sizeof(*task)); |
760 | init_timer(&task->tk_timer); | 781 | init_timer(&task->tk_timer); |
761 | task->tk_timer.data = (unsigned long) task; | 782 | task->tk_timer.data = (unsigned long) task; |
762 | task->tk_timer.function = (void (*)(unsigned long)) rpc_run_timer; | 783 | task->tk_timer.function = (void (*)(unsigned long)) rpc_run_timer; |
784 | atomic_set(&task->tk_count, 1); | ||
763 | task->tk_client = clnt; | 785 | task->tk_client = clnt; |
764 | task->tk_flags = flags; | 786 | task->tk_flags = flags; |
765 | task->tk_exit = callback; | 787 | task->tk_ops = tk_ops; |
788 | if (tk_ops->rpc_call_prepare != NULL) | ||
789 | task->tk_action = rpc_prepare_task; | ||
790 | task->tk_calldata = calldata; | ||
766 | 791 | ||
767 | /* Initialize retry counters */ | 792 | /* Initialize retry counters */ |
768 | task->tk_garb_retry = 2; | 793 | task->tk_garb_retry = 2; |
@@ -791,6 +816,8 @@ void rpc_init_task(struct rpc_task *task, struct rpc_clnt *clnt, rpc_action call | |||
791 | list_add_tail(&task->tk_task, &all_tasks); | 816 | list_add_tail(&task->tk_task, &all_tasks); |
792 | spin_unlock(&rpc_sched_lock); | 817 | spin_unlock(&rpc_sched_lock); |
793 | 818 | ||
819 | BUG_ON(task->tk_ops == NULL); | ||
820 | |||
794 | dprintk("RPC: %4d new task procpid %d\n", task->tk_pid, | 821 | dprintk("RPC: %4d new task procpid %d\n", task->tk_pid, |
795 | current->pid); | 822 | current->pid); |
796 | } | 823 | } |
@@ -801,8 +828,7 @@ rpc_alloc_task(void) | |||
801 | return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS); | 828 | return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS); |
802 | } | 829 | } |
803 | 830 | ||
804 | static void | 831 | static void rpc_free_task(struct rpc_task *task) |
805 | rpc_default_free_task(struct rpc_task *task) | ||
806 | { | 832 | { |
807 | dprintk("RPC: %4d freeing task\n", task->tk_pid); | 833 | dprintk("RPC: %4d freeing task\n", task->tk_pid); |
808 | mempool_free(task, rpc_task_mempool); | 834 | mempool_free(task, rpc_task_mempool); |
@@ -813,8 +839,7 @@ rpc_default_free_task(struct rpc_task *task) | |||
813 | * clean up after an allocation failure, as the client may | 839 | * clean up after an allocation failure, as the client may |
814 | * have specified "oneshot". | 840 | * have specified "oneshot". |
815 | */ | 841 | */ |
816 | struct rpc_task * | 842 | struct rpc_task *rpc_new_task(struct rpc_clnt *clnt, int flags, const struct rpc_call_ops *tk_ops, void *calldata) |
817 | rpc_new_task(struct rpc_clnt *clnt, rpc_action callback, int flags) | ||
818 | { | 843 | { |
819 | struct rpc_task *task; | 844 | struct rpc_task *task; |
820 | 845 | ||
@@ -822,10 +847,7 @@ rpc_new_task(struct rpc_clnt *clnt, rpc_action callback, int flags) | |||
822 | if (!task) | 847 | if (!task) |
823 | goto cleanup; | 848 | goto cleanup; |
824 | 849 | ||
825 | rpc_init_task(task, clnt, callback, flags); | 850 | rpc_init_task(task, clnt, flags, tk_ops, calldata); |
826 | |||
827 | /* Replace tk_release */ | ||
828 | task->tk_release = rpc_default_free_task; | ||
829 | 851 | ||
830 | dprintk("RPC: %4d allocated task\n", task->tk_pid); | 852 | dprintk("RPC: %4d allocated task\n", task->tk_pid); |
831 | task->tk_flags |= RPC_TASK_DYNAMIC; | 853 | task->tk_flags |= RPC_TASK_DYNAMIC; |
@@ -845,11 +867,15 @@ cleanup: | |||
845 | 867 | ||
846 | void rpc_release_task(struct rpc_task *task) | 868 | void rpc_release_task(struct rpc_task *task) |
847 | { | 869 | { |
848 | dprintk("RPC: %4d release task\n", task->tk_pid); | 870 | const struct rpc_call_ops *tk_ops = task->tk_ops; |
871 | void *calldata = task->tk_calldata; | ||
849 | 872 | ||
850 | #ifdef RPC_DEBUG | 873 | #ifdef RPC_DEBUG |
851 | BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID); | 874 | BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID); |
852 | #endif | 875 | #endif |
876 | if (!atomic_dec_and_test(&task->tk_count)) | ||
877 | return; | ||
878 | dprintk("RPC: %4d release task\n", task->tk_pid); | ||
853 | 879 | ||
854 | /* Remove from global task list */ | 880 | /* Remove from global task list */ |
855 | spin_lock(&rpc_sched_lock); | 881 | spin_lock(&rpc_sched_lock); |
@@ -857,7 +883,6 @@ void rpc_release_task(struct rpc_task *task) | |||
857 | spin_unlock(&rpc_sched_lock); | 883 | spin_unlock(&rpc_sched_lock); |
858 | 884 | ||
859 | BUG_ON (RPC_IS_QUEUED(task)); | 885 | BUG_ON (RPC_IS_QUEUED(task)); |
860 | task->tk_active = 0; | ||
861 | 886 | ||
862 | /* Synchronously delete any running timer */ | 887 | /* Synchronously delete any running timer */ |
863 | rpc_delete_timer(task); | 888 | rpc_delete_timer(task); |
@@ -867,7 +892,6 @@ void rpc_release_task(struct rpc_task *task) | |||
867 | xprt_release(task); | 892 | xprt_release(task); |
868 | if (task->tk_msg.rpc_cred) | 893 | if (task->tk_msg.rpc_cred) |
869 | rpcauth_unbindcred(task); | 894 | rpcauth_unbindcred(task); |
870 | rpc_free(task); | ||
871 | if (task->tk_client) { | 895 | if (task->tk_client) { |
872 | rpc_release_client(task->tk_client); | 896 | rpc_release_client(task->tk_client); |
873 | task->tk_client = NULL; | 897 | task->tk_client = NULL; |
@@ -876,11 +900,34 @@ void rpc_release_task(struct rpc_task *task) | |||
876 | #ifdef RPC_DEBUG | 900 | #ifdef RPC_DEBUG |
877 | task->tk_magic = 0; | 901 | task->tk_magic = 0; |
878 | #endif | 902 | #endif |
879 | if (task->tk_release) | 903 | if (task->tk_flags & RPC_TASK_DYNAMIC) |
880 | task->tk_release(task); | 904 | rpc_free_task(task); |
905 | if (tk_ops->rpc_release) | ||
906 | tk_ops->rpc_release(calldata); | ||
881 | } | 907 | } |
882 | 908 | ||
883 | /** | 909 | /** |
910 | * rpc_run_task - Allocate a new RPC task, then run rpc_execute against it | ||
911 | * @clnt - pointer to RPC client | ||
912 | * @flags - RPC flags | ||
913 | * @ops - RPC call ops | ||
914 | * @data - user call data | ||
915 | */ | ||
916 | struct rpc_task *rpc_run_task(struct rpc_clnt *clnt, int flags, | ||
917 | const struct rpc_call_ops *ops, | ||
918 | void *data) | ||
919 | { | ||
920 | struct rpc_task *task; | ||
921 | task = rpc_new_task(clnt, flags, ops, data); | ||
922 | if (task == NULL) | ||
923 | return ERR_PTR(-ENOMEM); | ||
924 | atomic_inc(&task->tk_count); | ||
925 | rpc_execute(task); | ||
926 | return task; | ||
927 | } | ||
928 | EXPORT_SYMBOL(rpc_run_task); | ||
929 | |||
930 | /** | ||
884 | * rpc_find_parent - find the parent of a child task. | 931 | * rpc_find_parent - find the parent of a child task. |
885 | * @child: child task | 932 | * @child: child task |
886 | * | 933 | * |
@@ -890,12 +937,11 @@ void rpc_release_task(struct rpc_task *task) | |||
890 | * | 937 | * |
891 | * Caller must hold childq.lock | 938 | * Caller must hold childq.lock |
892 | */ | 939 | */ |
893 | static inline struct rpc_task *rpc_find_parent(struct rpc_task *child) | 940 | static inline struct rpc_task *rpc_find_parent(struct rpc_task *child, struct rpc_task *parent) |
894 | { | 941 | { |
895 | struct rpc_task *task, *parent; | 942 | struct rpc_task *task; |
896 | struct list_head *le; | 943 | struct list_head *le; |
897 | 944 | ||
898 | parent = (struct rpc_task *) child->tk_calldata; | ||
899 | task_for_each(task, le, &childq.tasks[0]) | 945 | task_for_each(task, le, &childq.tasks[0]) |
900 | if (task == parent) | 946 | if (task == parent) |
901 | return parent; | 947 | return parent; |
@@ -903,18 +949,22 @@ static inline struct rpc_task *rpc_find_parent(struct rpc_task *child) | |||
903 | return NULL; | 949 | return NULL; |
904 | } | 950 | } |
905 | 951 | ||
906 | static void rpc_child_exit(struct rpc_task *child) | 952 | static void rpc_child_exit(struct rpc_task *child, void *calldata) |
907 | { | 953 | { |
908 | struct rpc_task *parent; | 954 | struct rpc_task *parent; |
909 | 955 | ||
910 | spin_lock_bh(&childq.lock); | 956 | spin_lock_bh(&childq.lock); |
911 | if ((parent = rpc_find_parent(child)) != NULL) { | 957 | if ((parent = rpc_find_parent(child, calldata)) != NULL) { |
912 | parent->tk_status = child->tk_status; | 958 | parent->tk_status = child->tk_status; |
913 | __rpc_wake_up_task(parent); | 959 | __rpc_wake_up_task(parent); |
914 | } | 960 | } |
915 | spin_unlock_bh(&childq.lock); | 961 | spin_unlock_bh(&childq.lock); |
916 | } | 962 | } |
917 | 963 | ||
964 | static const struct rpc_call_ops rpc_child_ops = { | ||
965 | .rpc_call_done = rpc_child_exit, | ||
966 | }; | ||
967 | |||
918 | /* | 968 | /* |
919 | * Note: rpc_new_task releases the client after a failure. | 969 | * Note: rpc_new_task releases the client after a failure. |
920 | */ | 970 | */ |
@@ -923,11 +973,9 @@ rpc_new_child(struct rpc_clnt *clnt, struct rpc_task *parent) | |||
923 | { | 973 | { |
924 | struct rpc_task *task; | 974 | struct rpc_task *task; |
925 | 975 | ||
926 | task = rpc_new_task(clnt, NULL, RPC_TASK_ASYNC | RPC_TASK_CHILD); | 976 | task = rpc_new_task(clnt, RPC_TASK_ASYNC | RPC_TASK_CHILD, &rpc_child_ops, parent); |
927 | if (!task) | 977 | if (!task) |
928 | goto fail; | 978 | goto fail; |
929 | task->tk_exit = rpc_child_exit; | ||
930 | task->tk_calldata = parent; | ||
931 | return task; | 979 | return task; |
932 | 980 | ||
933 | fail: | 981 | fail: |
@@ -1063,7 +1111,7 @@ void rpc_show_tasks(void) | |||
1063 | return; | 1111 | return; |
1064 | } | 1112 | } |
1065 | printk("-pid- proc flgs status -client- -prog- --rqstp- -timeout " | 1113 | printk("-pid- proc flgs status -client- -prog- --rqstp- -timeout " |
1066 | "-rpcwait -action- --exit--\n"); | 1114 | "-rpcwait -action- ---ops--\n"); |
1067 | alltask_for_each(t, le, &all_tasks) { | 1115 | alltask_for_each(t, le, &all_tasks) { |
1068 | const char *rpc_waitq = "none"; | 1116 | const char *rpc_waitq = "none"; |
1069 | 1117 | ||
@@ -1078,7 +1126,7 @@ void rpc_show_tasks(void) | |||
1078 | (t->tk_client ? t->tk_client->cl_prog : 0), | 1126 | (t->tk_client ? t->tk_client->cl_prog : 0), |
1079 | t->tk_rqstp, t->tk_timeout, | 1127 | t->tk_rqstp, t->tk_timeout, |
1080 | rpc_waitq, | 1128 | rpc_waitq, |
1081 | t->tk_action, t->tk_exit); | 1129 | t->tk_action, t->tk_ops); |
1082 | } | 1130 | } |
1083 | spin_unlock(&rpc_sched_lock); | 1131 | spin_unlock(&rpc_sched_lock); |
1084 | } | 1132 | } |
diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c index a03d4b600c..9f73732035 100644 --- a/net/sunrpc/sunrpc_syms.c +++ b/net/sunrpc/sunrpc_syms.c | |||
@@ -30,8 +30,6 @@ EXPORT_SYMBOL(rpc_init_task); | |||
30 | EXPORT_SYMBOL(rpc_sleep_on); | 30 | EXPORT_SYMBOL(rpc_sleep_on); |
31 | EXPORT_SYMBOL(rpc_wake_up_next); | 31 | EXPORT_SYMBOL(rpc_wake_up_next); |
32 | EXPORT_SYMBOL(rpc_wake_up_task); | 32 | EXPORT_SYMBOL(rpc_wake_up_task); |
33 | EXPORT_SYMBOL(rpc_new_child); | ||
34 | EXPORT_SYMBOL(rpc_run_child); | ||
35 | EXPORT_SYMBOL(rpciod_down); | 33 | EXPORT_SYMBOL(rpciod_down); |
36 | EXPORT_SYMBOL(rpciod_up); | 34 | EXPORT_SYMBOL(rpciod_up); |
37 | EXPORT_SYMBOL(rpc_new_task); | 35 | EXPORT_SYMBOL(rpc_new_task); |
@@ -45,7 +43,6 @@ EXPORT_SYMBOL(rpc_clone_client); | |||
45 | EXPORT_SYMBOL(rpc_bind_new_program); | 43 | EXPORT_SYMBOL(rpc_bind_new_program); |
46 | EXPORT_SYMBOL(rpc_destroy_client); | 44 | EXPORT_SYMBOL(rpc_destroy_client); |
47 | EXPORT_SYMBOL(rpc_shutdown_client); | 45 | EXPORT_SYMBOL(rpc_shutdown_client); |
48 | EXPORT_SYMBOL(rpc_release_client); | ||
49 | EXPORT_SYMBOL(rpc_killall_tasks); | 46 | EXPORT_SYMBOL(rpc_killall_tasks); |
50 | EXPORT_SYMBOL(rpc_call_sync); | 47 | EXPORT_SYMBOL(rpc_call_sync); |
51 | EXPORT_SYMBOL(rpc_call_async); | 48 | EXPORT_SYMBOL(rpc_call_async); |
@@ -120,7 +117,6 @@ EXPORT_SYMBOL(unix_domain_find); | |||
120 | 117 | ||
121 | /* Generic XDR */ | 118 | /* Generic XDR */ |
122 | EXPORT_SYMBOL(xdr_encode_string); | 119 | EXPORT_SYMBOL(xdr_encode_string); |
123 | EXPORT_SYMBOL(xdr_decode_string); | ||
124 | EXPORT_SYMBOL(xdr_decode_string_inplace); | 120 | EXPORT_SYMBOL(xdr_decode_string_inplace); |
125 | EXPORT_SYMBOL(xdr_decode_netobj); | 121 | EXPORT_SYMBOL(xdr_decode_netobj); |
126 | EXPORT_SYMBOL(xdr_encode_netobj); | 122 | EXPORT_SYMBOL(xdr_encode_netobj); |
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index e4296c8b86..b08419e1fc 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c | |||
@@ -32,7 +32,7 @@ svc_create(struct svc_program *prog, unsigned int bufsize) | |||
32 | int vers; | 32 | int vers; |
33 | unsigned int xdrsize; | 33 | unsigned int xdrsize; |
34 | 34 | ||
35 | if (!(serv = (struct svc_serv *) kmalloc(sizeof(*serv), GFP_KERNEL))) | 35 | if (!(serv = kmalloc(sizeof(*serv), GFP_KERNEL))) |
36 | return NULL; | 36 | return NULL; |
37 | memset(serv, 0, sizeof(*serv)); | 37 | memset(serv, 0, sizeof(*serv)); |
38 | serv->sv_name = prog->pg_name; | 38 | serv->sv_name = prog->pg_name; |
@@ -122,8 +122,7 @@ svc_init_buffer(struct svc_rqst *rqstp, unsigned int size) | |||
122 | rqstp->rq_argused = 0; | 122 | rqstp->rq_argused = 0; |
123 | rqstp->rq_resused = 0; | 123 | rqstp->rq_resused = 0; |
124 | arghi = 0; | 124 | arghi = 0; |
125 | if (pages > RPCSVC_MAXPAGES) | 125 | BUG_ON(pages > RPCSVC_MAXPAGES); |
126 | BUG(); | ||
127 | while (pages) { | 126 | while (pages) { |
128 | struct page *p = alloc_page(GFP_KERNEL); | 127 | struct page *p = alloc_page(GFP_KERNEL); |
129 | if (!p) | 128 | if (!p) |
@@ -167,8 +166,8 @@ svc_create_thread(svc_thread_fn func, struct svc_serv *serv) | |||
167 | memset(rqstp, 0, sizeof(*rqstp)); | 166 | memset(rqstp, 0, sizeof(*rqstp)); |
168 | init_waitqueue_head(&rqstp->rq_wait); | 167 | init_waitqueue_head(&rqstp->rq_wait); |
169 | 168 | ||
170 | if (!(rqstp->rq_argp = (u32 *) kmalloc(serv->sv_xdrsize, GFP_KERNEL)) | 169 | if (!(rqstp->rq_argp = kmalloc(serv->sv_xdrsize, GFP_KERNEL)) |
171 | || !(rqstp->rq_resp = (u32 *) kmalloc(serv->sv_xdrsize, GFP_KERNEL)) | 170 | || !(rqstp->rq_resp = kmalloc(serv->sv_xdrsize, GFP_KERNEL)) |
172 | || !svc_init_buffer(rqstp, serv->sv_bufsz)) | 171 | || !svc_init_buffer(rqstp, serv->sv_bufsz)) |
173 | goto out_thread; | 172 | goto out_thread; |
174 | 173 | ||
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c index cac2e774dd..3e6c694bba 100644 --- a/net/sunrpc/svcauth_unix.c +++ b/net/sunrpc/svcauth_unix.c | |||
@@ -101,10 +101,22 @@ static void ip_map_put(struct cache_head *item, struct cache_detail *cd) | |||
101 | } | 101 | } |
102 | } | 102 | } |
103 | 103 | ||
104 | #if IP_HASHBITS == 8 | ||
105 | /* hash_long on a 64 bit machine is currently REALLY BAD for | ||
106 | * IP addresses in reverse-endian (i.e. on a little-endian machine). | ||
107 | * So use a trivial but reliable hash instead | ||
108 | */ | ||
109 | static inline int hash_ip(unsigned long ip) | ||
110 | { | ||
111 | int hash = ip ^ (ip>>16); | ||
112 | return (hash ^ (hash>>8)) & 0xff; | ||
113 | } | ||
114 | #endif | ||
115 | |||
104 | static inline int ip_map_hash(struct ip_map *item) | 116 | static inline int ip_map_hash(struct ip_map *item) |
105 | { | 117 | { |
106 | return hash_str(item->m_class, IP_HASHBITS) ^ | 118 | return hash_str(item->m_class, IP_HASHBITS) ^ |
107 | hash_long((unsigned long)item->m_addr.s_addr, IP_HASHBITS); | 119 | hash_ip((unsigned long)item->m_addr.s_addr); |
108 | } | 120 | } |
109 | static inline int ip_map_match(struct ip_map *item, struct ip_map *tmp) | 121 | static inline int ip_map_match(struct ip_map *item, struct ip_map *tmp) |
110 | { | 122 | { |
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index e50e7cf437..50580620e8 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c | |||
@@ -758,7 +758,7 @@ svc_tcp_accept(struct svc_sock *svsk) | |||
758 | struct svc_serv *serv = svsk->sk_server; | 758 | struct svc_serv *serv = svsk->sk_server; |
759 | struct socket *sock = svsk->sk_sock; | 759 | struct socket *sock = svsk->sk_sock; |
760 | struct socket *newsock; | 760 | struct socket *newsock; |
761 | struct proto_ops *ops; | 761 | const struct proto_ops *ops; |
762 | struct svc_sock *newsvsk; | 762 | struct svc_sock *newsvsk; |
763 | int err, slen; | 763 | int err, slen; |
764 | 764 | ||
@@ -1026,7 +1026,7 @@ svc_tcp_recvfrom(struct svc_rqst *rqstp) | |||
1026 | } else { | 1026 | } else { |
1027 | printk(KERN_NOTICE "%s: recvfrom returned errno %d\n", | 1027 | printk(KERN_NOTICE "%s: recvfrom returned errno %d\n", |
1028 | svsk->sk_server->sv_name, -len); | 1028 | svsk->sk_server->sv_name, -len); |
1029 | svc_sock_received(svsk); | 1029 | goto err_delete; |
1030 | } | 1030 | } |
1031 | 1031 | ||
1032 | return len; | 1032 | return len; |
@@ -1178,6 +1178,7 @@ svc_recv(struct svc_serv *serv, struct svc_rqst *rqstp, long timeout) | |||
1178 | arg->tail[0].iov_len = 0; | 1178 | arg->tail[0].iov_len = 0; |
1179 | 1179 | ||
1180 | try_to_freeze(); | 1180 | try_to_freeze(); |
1181 | cond_resched(); | ||
1181 | if (signalled()) | 1182 | if (signalled()) |
1182 | return -EINTR; | 1183 | return -EINTR; |
1183 | 1184 | ||
@@ -1526,6 +1527,7 @@ svc_defer(struct cache_req *req) | |||
1526 | dr->handle.owner = rqstp->rq_server; | 1527 | dr->handle.owner = rqstp->rq_server; |
1527 | dr->prot = rqstp->rq_prot; | 1528 | dr->prot = rqstp->rq_prot; |
1528 | dr->addr = rqstp->rq_addr; | 1529 | dr->addr = rqstp->rq_addr; |
1530 | dr->daddr = rqstp->rq_daddr; | ||
1529 | dr->argslen = rqstp->rq_arg.len >> 2; | 1531 | dr->argslen = rqstp->rq_arg.len >> 2; |
1530 | memcpy(dr->args, rqstp->rq_arg.head[0].iov_base-skip, dr->argslen<<2); | 1532 | memcpy(dr->args, rqstp->rq_arg.head[0].iov_base-skip, dr->argslen<<2); |
1531 | } | 1533 | } |
@@ -1551,6 +1553,7 @@ static int svc_deferred_recv(struct svc_rqst *rqstp) | |||
1551 | rqstp->rq_arg.len = dr->argslen<<2; | 1553 | rqstp->rq_arg.len = dr->argslen<<2; |
1552 | rqstp->rq_prot = dr->prot; | 1554 | rqstp->rq_prot = dr->prot; |
1553 | rqstp->rq_addr = dr->addr; | 1555 | rqstp->rq_addr = dr->addr; |
1556 | rqstp->rq_daddr = dr->daddr; | ||
1554 | return dr->argslen<<2; | 1557 | return dr->argslen<<2; |
1555 | } | 1558 | } |
1556 | 1559 | ||
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index aaf08cdd19..ca4bfa57e1 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c | |||
@@ -93,27 +93,6 @@ xdr_encode_string(u32 *p, const char *string) | |||
93 | } | 93 | } |
94 | 94 | ||
95 | u32 * | 95 | u32 * |
96 | xdr_decode_string(u32 *p, char **sp, int *lenp, int maxlen) | ||
97 | { | ||
98 | unsigned int len; | ||
99 | char *string; | ||
100 | |||
101 | if ((len = ntohl(*p++)) > maxlen) | ||
102 | return NULL; | ||
103 | if (lenp) | ||
104 | *lenp = len; | ||
105 | if ((len % 4) != 0) { | ||
106 | string = (char *) p; | ||
107 | } else { | ||
108 | string = (char *) (p - 1); | ||
109 | memmove(string, p, len); | ||
110 | } | ||
111 | string[len] = '\0'; | ||
112 | *sp = string; | ||
113 | return p + XDR_QUADLEN(len); | ||
114 | } | ||
115 | |||
116 | u32 * | ||
117 | xdr_decode_string_inplace(u32 *p, char **sp, int *lenp, int maxlen) | 96 | xdr_decode_string_inplace(u32 *p, char **sp, int *lenp, int maxlen) |
118 | { | 97 | { |
119 | unsigned int len; | 98 | unsigned int len; |
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 6dda386035..8ff2c8acb2 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c | |||
@@ -119,6 +119,17 @@ out_sleep: | |||
119 | return 0; | 119 | return 0; |
120 | } | 120 | } |
121 | 121 | ||
122 | static void xprt_clear_locked(struct rpc_xprt *xprt) | ||
123 | { | ||
124 | xprt->snd_task = NULL; | ||
125 | if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state) || xprt->shutdown) { | ||
126 | smp_mb__before_clear_bit(); | ||
127 | clear_bit(XPRT_LOCKED, &xprt->state); | ||
128 | smp_mb__after_clear_bit(); | ||
129 | } else | ||
130 | schedule_work(&xprt->task_cleanup); | ||
131 | } | ||
132 | |||
122 | /* | 133 | /* |
123 | * xprt_reserve_xprt_cong - serialize write access to transports | 134 | * xprt_reserve_xprt_cong - serialize write access to transports |
124 | * @task: task that is requesting access to the transport | 135 | * @task: task that is requesting access to the transport |
@@ -145,9 +156,7 @@ int xprt_reserve_xprt_cong(struct rpc_task *task) | |||
145 | } | 156 | } |
146 | return 1; | 157 | return 1; |
147 | } | 158 | } |
148 | smp_mb__before_clear_bit(); | 159 | xprt_clear_locked(xprt); |
149 | clear_bit(XPRT_LOCKED, &xprt->state); | ||
150 | smp_mb__after_clear_bit(); | ||
151 | out_sleep: | 160 | out_sleep: |
152 | dprintk("RPC: %4d failed to lock transport %p\n", task->tk_pid, xprt); | 161 | dprintk("RPC: %4d failed to lock transport %p\n", task->tk_pid, xprt); |
153 | task->tk_timeout = 0; | 162 | task->tk_timeout = 0; |
@@ -193,9 +202,7 @@ static void __xprt_lock_write_next(struct rpc_xprt *xprt) | |||
193 | return; | 202 | return; |
194 | 203 | ||
195 | out_unlock: | 204 | out_unlock: |
196 | smp_mb__before_clear_bit(); | 205 | xprt_clear_locked(xprt); |
197 | clear_bit(XPRT_LOCKED, &xprt->state); | ||
198 | smp_mb__after_clear_bit(); | ||
199 | } | 206 | } |
200 | 207 | ||
201 | static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt) | 208 | static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt) |
@@ -222,9 +229,7 @@ static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt) | |||
222 | return; | 229 | return; |
223 | } | 230 | } |
224 | out_unlock: | 231 | out_unlock: |
225 | smp_mb__before_clear_bit(); | 232 | xprt_clear_locked(xprt); |
226 | clear_bit(XPRT_LOCKED, &xprt->state); | ||
227 | smp_mb__after_clear_bit(); | ||
228 | } | 233 | } |
229 | 234 | ||
230 | /** | 235 | /** |
@@ -237,10 +242,7 @@ out_unlock: | |||
237 | void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task) | 242 | void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task) |
238 | { | 243 | { |
239 | if (xprt->snd_task == task) { | 244 | if (xprt->snd_task == task) { |
240 | xprt->snd_task = NULL; | 245 | xprt_clear_locked(xprt); |
241 | smp_mb__before_clear_bit(); | ||
242 | clear_bit(XPRT_LOCKED, &xprt->state); | ||
243 | smp_mb__after_clear_bit(); | ||
244 | __xprt_lock_write_next(xprt); | 246 | __xprt_lock_write_next(xprt); |
245 | } | 247 | } |
246 | } | 248 | } |
@@ -256,10 +258,7 @@ void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task) | |||
256 | void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task) | 258 | void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task) |
257 | { | 259 | { |
258 | if (xprt->snd_task == task) { | 260 | if (xprt->snd_task == task) { |
259 | xprt->snd_task = NULL; | 261 | xprt_clear_locked(xprt); |
260 | smp_mb__before_clear_bit(); | ||
261 | clear_bit(XPRT_LOCKED, &xprt->state); | ||
262 | smp_mb__after_clear_bit(); | ||
263 | __xprt_lock_write_next_cong(xprt); | 262 | __xprt_lock_write_next_cong(xprt); |
264 | } | 263 | } |
265 | } | 264 | } |
@@ -535,10 +534,6 @@ void xprt_connect(struct rpc_task *task) | |||
535 | dprintk("RPC: %4d xprt_connect xprt %p %s connected\n", task->tk_pid, | 534 | dprintk("RPC: %4d xprt_connect xprt %p %s connected\n", task->tk_pid, |
536 | xprt, (xprt_connected(xprt) ? "is" : "is not")); | 535 | xprt, (xprt_connected(xprt) ? "is" : "is not")); |
537 | 536 | ||
538 | if (xprt->shutdown) { | ||
539 | task->tk_status = -EIO; | ||
540 | return; | ||
541 | } | ||
542 | if (!xprt->addr.sin_port) { | 537 | if (!xprt->addr.sin_port) { |
543 | task->tk_status = -EIO; | 538 | task->tk_status = -EIO; |
544 | return; | 539 | return; |
@@ -687,9 +682,6 @@ int xprt_prepare_transmit(struct rpc_task *task) | |||
687 | 682 | ||
688 | dprintk("RPC: %4d xprt_prepare_transmit\n", task->tk_pid); | 683 | dprintk("RPC: %4d xprt_prepare_transmit\n", task->tk_pid); |
689 | 684 | ||
690 | if (xprt->shutdown) | ||
691 | return -EIO; | ||
692 | |||
693 | spin_lock_bh(&xprt->transport_lock); | 685 | spin_lock_bh(&xprt->transport_lock); |
694 | if (req->rq_received && !req->rq_bytes_sent) { | 686 | if (req->rq_received && !req->rq_bytes_sent) { |
695 | err = req->rq_received; | 687 | err = req->rq_received; |
@@ -814,11 +806,9 @@ void xprt_reserve(struct rpc_task *task) | |||
814 | struct rpc_xprt *xprt = task->tk_xprt; | 806 | struct rpc_xprt *xprt = task->tk_xprt; |
815 | 807 | ||
816 | task->tk_status = -EIO; | 808 | task->tk_status = -EIO; |
817 | if (!xprt->shutdown) { | 809 | spin_lock(&xprt->reserve_lock); |
818 | spin_lock(&xprt->reserve_lock); | 810 | do_xprt_reserve(task); |
819 | do_xprt_reserve(task); | 811 | spin_unlock(&xprt->reserve_lock); |
820 | spin_unlock(&xprt->reserve_lock); | ||
821 | } | ||
822 | } | 812 | } |
823 | 813 | ||
824 | static inline u32 xprt_alloc_xid(struct rpc_xprt *xprt) | 814 | static inline u32 xprt_alloc_xid(struct rpc_xprt *xprt) |
@@ -838,6 +828,8 @@ static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt) | |||
838 | req->rq_timeout = xprt->timeout.to_initval; | 828 | req->rq_timeout = xprt->timeout.to_initval; |
839 | req->rq_task = task; | 829 | req->rq_task = task; |
840 | req->rq_xprt = xprt; | 830 | req->rq_xprt = xprt; |
831 | req->rq_buffer = NULL; | ||
832 | req->rq_bufsize = 0; | ||
841 | req->rq_xid = xprt_alloc_xid(xprt); | 833 | req->rq_xid = xprt_alloc_xid(xprt); |
842 | req->rq_release_snd_buf = NULL; | 834 | req->rq_release_snd_buf = NULL; |
843 | dprintk("RPC: %4d reserved req %p xid %08x\n", task->tk_pid, | 835 | dprintk("RPC: %4d reserved req %p xid %08x\n", task->tk_pid, |
@@ -863,10 +855,11 @@ void xprt_release(struct rpc_task *task) | |||
863 | if (!list_empty(&req->rq_list)) | 855 | if (!list_empty(&req->rq_list)) |
864 | list_del(&req->rq_list); | 856 | list_del(&req->rq_list); |
865 | xprt->last_used = jiffies; | 857 | xprt->last_used = jiffies; |
866 | if (list_empty(&xprt->recv) && !xprt->shutdown) | 858 | if (list_empty(&xprt->recv)) |
867 | mod_timer(&xprt->timer, | 859 | mod_timer(&xprt->timer, |
868 | xprt->last_used + xprt->idle_timeout); | 860 | xprt->last_used + xprt->idle_timeout); |
869 | spin_unlock_bh(&xprt->transport_lock); | 861 | spin_unlock_bh(&xprt->transport_lock); |
862 | xprt->ops->buf_free(task); | ||
870 | task->tk_rqstp = NULL; | 863 | task->tk_rqstp = NULL; |
871 | if (req->rq_release_snd_buf) | 864 | if (req->rq_release_snd_buf) |
872 | req->rq_release_snd_buf(req); | 865 | req->rq_release_snd_buf(req); |
@@ -974,16 +967,6 @@ struct rpc_xprt *xprt_create_proto(int proto, struct sockaddr_in *sap, struct rp | |||
974 | return xprt; | 967 | return xprt; |
975 | } | 968 | } |
976 | 969 | ||
977 | static void xprt_shutdown(struct rpc_xprt *xprt) | ||
978 | { | ||
979 | xprt->shutdown = 1; | ||
980 | rpc_wake_up(&xprt->sending); | ||
981 | rpc_wake_up(&xprt->resend); | ||
982 | xprt_wake_pending_tasks(xprt, -EIO); | ||
983 | rpc_wake_up(&xprt->backlog); | ||
984 | del_timer_sync(&xprt->timer); | ||
985 | } | ||
986 | |||
987 | /** | 970 | /** |
988 | * xprt_destroy - destroy an RPC transport, killing off all requests. | 971 | * xprt_destroy - destroy an RPC transport, killing off all requests. |
989 | * @xprt: transport to destroy | 972 | * @xprt: transport to destroy |
@@ -992,7 +975,8 @@ static void xprt_shutdown(struct rpc_xprt *xprt) | |||
992 | int xprt_destroy(struct rpc_xprt *xprt) | 975 | int xprt_destroy(struct rpc_xprt *xprt) |
993 | { | 976 | { |
994 | dprintk("RPC: destroying transport %p\n", xprt); | 977 | dprintk("RPC: destroying transport %p\n", xprt); |
995 | xprt_shutdown(xprt); | 978 | xprt->shutdown = 1; |
979 | del_timer_sync(&xprt->timer); | ||
996 | xprt->ops->destroy(xprt); | 980 | xprt->ops->destroy(xprt); |
997 | kfree(xprt); | 981 | kfree(xprt); |
998 | 982 | ||
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 0a51fd46a8..c458f8d1d6 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/udp.h> | 28 | #include <linux/udp.h> |
29 | #include <linux/tcp.h> | 29 | #include <linux/tcp.h> |
30 | #include <linux/sunrpc/clnt.h> | 30 | #include <linux/sunrpc/clnt.h> |
31 | #include <linux/sunrpc/sched.h> | ||
31 | #include <linux/file.h> | 32 | #include <linux/file.h> |
32 | 33 | ||
33 | #include <net/sock.h> | 34 | #include <net/sock.h> |
@@ -424,7 +425,7 @@ static void xs_close(struct rpc_xprt *xprt) | |||
424 | struct sock *sk = xprt->inet; | 425 | struct sock *sk = xprt->inet; |
425 | 426 | ||
426 | if (!sk) | 427 | if (!sk) |
427 | return; | 428 | goto clear_close_wait; |
428 | 429 | ||
429 | dprintk("RPC: xs_close xprt %p\n", xprt); | 430 | dprintk("RPC: xs_close xprt %p\n", xprt); |
430 | 431 | ||
@@ -441,6 +442,10 @@ static void xs_close(struct rpc_xprt *xprt) | |||
441 | sk->sk_no_check = 0; | 442 | sk->sk_no_check = 0; |
442 | 443 | ||
443 | sock_release(sock); | 444 | sock_release(sock); |
445 | clear_close_wait: | ||
446 | smp_mb__before_clear_bit(); | ||
447 | clear_bit(XPRT_CLOSE_WAIT, &xprt->state); | ||
448 | smp_mb__after_clear_bit(); | ||
444 | } | 449 | } |
445 | 450 | ||
446 | /** | 451 | /** |
@@ -800,9 +805,13 @@ static void xs_tcp_state_change(struct sock *sk) | |||
800 | case TCP_SYN_SENT: | 805 | case TCP_SYN_SENT: |
801 | case TCP_SYN_RECV: | 806 | case TCP_SYN_RECV: |
802 | break; | 807 | break; |
808 | case TCP_CLOSE_WAIT: | ||
809 | /* Try to schedule an autoclose RPC calls */ | ||
810 | set_bit(XPRT_CLOSE_WAIT, &xprt->state); | ||
811 | if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0) | ||
812 | schedule_work(&xprt->task_cleanup); | ||
803 | default: | 813 | default: |
804 | xprt_disconnect(xprt); | 814 | xprt_disconnect(xprt); |
805 | break; | ||
806 | } | 815 | } |
807 | out: | 816 | out: |
808 | read_unlock(&sk->sk_callback_lock); | 817 | read_unlock(&sk->sk_callback_lock); |
@@ -920,6 +929,18 @@ static void xs_udp_timer(struct rpc_task *task) | |||
920 | xprt_adjust_cwnd(task, -ETIMEDOUT); | 929 | xprt_adjust_cwnd(task, -ETIMEDOUT); |
921 | } | 930 | } |
922 | 931 | ||
932 | /** | ||
933 | * xs_set_port - reset the port number in the remote endpoint address | ||
934 | * @xprt: generic transport | ||
935 | * @port: new port number | ||
936 | * | ||
937 | */ | ||
938 | static void xs_set_port(struct rpc_xprt *xprt, unsigned short port) | ||
939 | { | ||
940 | dprintk("RPC: setting port for xprt %p to %u\n", xprt, port); | ||
941 | xprt->addr.sin_port = htons(port); | ||
942 | } | ||
943 | |||
923 | static int xs_bindresvport(struct rpc_xprt *xprt, struct socket *sock) | 944 | static int xs_bindresvport(struct rpc_xprt *xprt, struct socket *sock) |
924 | { | 945 | { |
925 | struct sockaddr_in myaddr = { | 946 | struct sockaddr_in myaddr = { |
@@ -990,6 +1011,7 @@ static void xs_udp_connect_worker(void *args) | |||
990 | sk->sk_data_ready = xs_udp_data_ready; | 1011 | sk->sk_data_ready = xs_udp_data_ready; |
991 | sk->sk_write_space = xs_udp_write_space; | 1012 | sk->sk_write_space = xs_udp_write_space; |
992 | sk->sk_no_check = UDP_CSUM_NORCV; | 1013 | sk->sk_no_check = UDP_CSUM_NORCV; |
1014 | sk->sk_allocation = GFP_ATOMIC; | ||
993 | 1015 | ||
994 | xprt_set_connected(xprt); | 1016 | xprt_set_connected(xprt); |
995 | 1017 | ||
@@ -1074,6 +1096,7 @@ static void xs_tcp_connect_worker(void *args) | |||
1074 | sk->sk_data_ready = xs_tcp_data_ready; | 1096 | sk->sk_data_ready = xs_tcp_data_ready; |
1075 | sk->sk_state_change = xs_tcp_state_change; | 1097 | sk->sk_state_change = xs_tcp_state_change; |
1076 | sk->sk_write_space = xs_tcp_write_space; | 1098 | sk->sk_write_space = xs_tcp_write_space; |
1099 | sk->sk_allocation = GFP_ATOMIC; | ||
1077 | 1100 | ||
1078 | /* socket options */ | 1101 | /* socket options */ |
1079 | sk->sk_userlocks |= SOCK_BINDPORT_LOCK; | 1102 | sk->sk_userlocks |= SOCK_BINDPORT_LOCK; |
@@ -1158,7 +1181,10 @@ static struct rpc_xprt_ops xs_udp_ops = { | |||
1158 | .set_buffer_size = xs_udp_set_buffer_size, | 1181 | .set_buffer_size = xs_udp_set_buffer_size, |
1159 | .reserve_xprt = xprt_reserve_xprt_cong, | 1182 | .reserve_xprt = xprt_reserve_xprt_cong, |
1160 | .release_xprt = xprt_release_xprt_cong, | 1183 | .release_xprt = xprt_release_xprt_cong, |
1184 | .set_port = xs_set_port, | ||
1161 | .connect = xs_connect, | 1185 | .connect = xs_connect, |
1186 | .buf_alloc = rpc_malloc, | ||
1187 | .buf_free = rpc_free, | ||
1162 | .send_request = xs_udp_send_request, | 1188 | .send_request = xs_udp_send_request, |
1163 | .set_retrans_timeout = xprt_set_retrans_timeout_rtt, | 1189 | .set_retrans_timeout = xprt_set_retrans_timeout_rtt, |
1164 | .timer = xs_udp_timer, | 1190 | .timer = xs_udp_timer, |
@@ -1170,7 +1196,10 @@ static struct rpc_xprt_ops xs_udp_ops = { | |||
1170 | static struct rpc_xprt_ops xs_tcp_ops = { | 1196 | static struct rpc_xprt_ops xs_tcp_ops = { |
1171 | .reserve_xprt = xprt_reserve_xprt, | 1197 | .reserve_xprt = xprt_reserve_xprt, |
1172 | .release_xprt = xprt_release_xprt, | 1198 | .release_xprt = xprt_release_xprt, |
1199 | .set_port = xs_set_port, | ||
1173 | .connect = xs_connect, | 1200 | .connect = xs_connect, |
1201 | .buf_alloc = rpc_malloc, | ||
1202 | .buf_free = rpc_free, | ||
1174 | .send_request = xs_tcp_send_request, | 1203 | .send_request = xs_tcp_send_request, |
1175 | .set_retrans_timeout = xprt_set_retrans_timeout_def, | 1204 | .set_retrans_timeout = xprt_set_retrans_timeout_def, |
1176 | .close = xs_close, | 1205 | .close = xs_close, |