diff options
56 files changed, 3321 insertions, 1810 deletions
diff --git a/fs/lockd/host.c b/fs/lockd/host.c index 96070bff93fc..572601e98dcd 100644 --- a/fs/lockd/host.c +++ b/fs/lockd/host.c | |||
| @@ -44,9 +44,8 @@ static struct nsm_handle * nsm_find(const struct sockaddr_in *sin, | |||
| 44 | */ | 44 | */ |
| 45 | static struct nlm_host * | 45 | static struct nlm_host * |
| 46 | nlm_lookup_host(int server, const struct sockaddr_in *sin, | 46 | nlm_lookup_host(int server, const struct sockaddr_in *sin, |
| 47 | int proto, int version, | 47 | int proto, int version, const char *hostname, |
| 48 | const char *hostname, | 48 | int hostname_len, const struct sockaddr_in *ssin) |
| 49 | int hostname_len) | ||
| 50 | { | 49 | { |
| 51 | struct hlist_head *chain; | 50 | struct hlist_head *chain; |
| 52 | struct hlist_node *pos; | 51 | struct hlist_node *pos; |
| @@ -54,7 +53,9 @@ nlm_lookup_host(int server, const struct sockaddr_in *sin, | |||
| 54 | struct nsm_handle *nsm = NULL; | 53 | struct nsm_handle *nsm = NULL; |
| 55 | int hash; | 54 | int hash; |
| 56 | 55 | ||
| 57 | dprintk("lockd: nlm_lookup_host(%u.%u.%u.%u, p=%d, v=%d, my role=%s, name=%.*s)\n", | 56 | dprintk("lockd: nlm_lookup_host("NIPQUAD_FMT"->"NIPQUAD_FMT |
| 57 | ", p=%d, v=%d, my role=%s, name=%.*s)\n", | ||
| 58 | NIPQUAD(ssin->sin_addr.s_addr), | ||
| 58 | NIPQUAD(sin->sin_addr.s_addr), proto, version, | 59 | NIPQUAD(sin->sin_addr.s_addr), proto, version, |
| 59 | server? "server" : "client", | 60 | server? "server" : "client", |
| 60 | hostname_len, | 61 | hostname_len, |
| @@ -91,6 +92,8 @@ nlm_lookup_host(int server, const struct sockaddr_in *sin, | |||
| 91 | continue; | 92 | continue; |
| 92 | if (host->h_server != server) | 93 | if (host->h_server != server) |
| 93 | continue; | 94 | continue; |
| 95 | if (!nlm_cmp_addr(&host->h_saddr, ssin)) | ||
| 96 | continue; | ||
| 94 | 97 | ||
| 95 | /* Move to head of hash chain. */ | 98 | /* Move to head of hash chain. */ |
| 96 | hlist_del(&host->h_hash); | 99 | hlist_del(&host->h_hash); |
| @@ -118,6 +121,7 @@ nlm_lookup_host(int server, const struct sockaddr_in *sin, | |||
| 118 | host->h_name = nsm->sm_name; | 121 | host->h_name = nsm->sm_name; |
| 119 | host->h_addr = *sin; | 122 | host->h_addr = *sin; |
| 120 | host->h_addr.sin_port = 0; /* ouch! */ | 123 | host->h_addr.sin_port = 0; /* ouch! */ |
| 124 | host->h_saddr = *ssin; | ||
| 121 | host->h_version = version; | 125 | host->h_version = version; |
| 122 | host->h_proto = proto; | 126 | host->h_proto = proto; |
| 123 | host->h_rpcclnt = NULL; | 127 | host->h_rpcclnt = NULL; |
| @@ -161,15 +165,9 @@ nlm_destroy_host(struct nlm_host *host) | |||
| 161 | */ | 165 | */ |
| 162 | nsm_unmonitor(host); | 166 | nsm_unmonitor(host); |
| 163 | 167 | ||
| 164 | if ((clnt = host->h_rpcclnt) != NULL) { | 168 | clnt = host->h_rpcclnt; |
| 165 | if (atomic_read(&clnt->cl_users)) { | 169 | if (clnt != NULL) |
| 166 | printk(KERN_WARNING | 170 | rpc_shutdown_client(clnt); |
| 167 | "lockd: active RPC handle\n"); | ||
| 168 | clnt->cl_dead = 1; | ||
| 169 | } else { | ||
| 170 | rpc_destroy_client(host->h_rpcclnt); | ||
| 171 | } | ||
| 172 | } | ||
| 173 | kfree(host); | 171 | kfree(host); |
| 174 | } | 172 | } |
| 175 | 173 | ||
| @@ -180,8 +178,10 @@ struct nlm_host * | |||
| 180 | nlmclnt_lookup_host(const struct sockaddr_in *sin, int proto, int version, | 178 | nlmclnt_lookup_host(const struct sockaddr_in *sin, int proto, int version, |
| 181 | const char *hostname, int hostname_len) | 179 | const char *hostname, int hostname_len) |
| 182 | { | 180 | { |
| 181 | struct sockaddr_in ssin = {0}; | ||
| 182 | |||
| 183 | return nlm_lookup_host(0, sin, proto, version, | 183 | return nlm_lookup_host(0, sin, proto, version, |
| 184 | hostname, hostname_len); | 184 | hostname, hostname_len, &ssin); |
| 185 | } | 185 | } |
| 186 | 186 | ||
| 187 | /* | 187 | /* |
| @@ -191,9 +191,12 @@ struct nlm_host * | |||
| 191 | nlmsvc_lookup_host(struct svc_rqst *rqstp, | 191 | nlmsvc_lookup_host(struct svc_rqst *rqstp, |
| 192 | const char *hostname, int hostname_len) | 192 | const char *hostname, int hostname_len) |
| 193 | { | 193 | { |
| 194 | struct sockaddr_in ssin = {0}; | ||
| 195 | |||
| 196 | ssin.sin_addr = rqstp->rq_daddr.addr; | ||
| 194 | return nlm_lookup_host(1, svc_addr_in(rqstp), | 197 | return nlm_lookup_host(1, svc_addr_in(rqstp), |
| 195 | rqstp->rq_prot, rqstp->rq_vers, | 198 | rqstp->rq_prot, rqstp->rq_vers, |
| 196 | hostname, hostname_len); | 199 | hostname, hostname_len, &ssin); |
| 197 | } | 200 | } |
| 198 | 201 | ||
| 199 | /* | 202 | /* |
| @@ -204,8 +207,9 @@ nlm_bind_host(struct nlm_host *host) | |||
| 204 | { | 207 | { |
| 205 | struct rpc_clnt *clnt; | 208 | struct rpc_clnt *clnt; |
| 206 | 209 | ||
| 207 | dprintk("lockd: nlm_bind_host(%08x)\n", | 210 | dprintk("lockd: nlm_bind_host("NIPQUAD_FMT"->"NIPQUAD_FMT")\n", |
| 208 | (unsigned)ntohl(host->h_addr.sin_addr.s_addr)); | 211 | NIPQUAD(host->h_saddr.sin_addr), |
| 212 | NIPQUAD(host->h_addr.sin_addr)); | ||
| 209 | 213 | ||
| 210 | /* Lock host handle */ | 214 | /* Lock host handle */ |
| 211 | mutex_lock(&host->h_mutex); | 215 | mutex_lock(&host->h_mutex); |
| @@ -232,6 +236,7 @@ nlm_bind_host(struct nlm_host *host) | |||
| 232 | .protocol = host->h_proto, | 236 | .protocol = host->h_proto, |
| 233 | .address = (struct sockaddr *)&host->h_addr, | 237 | .address = (struct sockaddr *)&host->h_addr, |
| 234 | .addrsize = sizeof(host->h_addr), | 238 | .addrsize = sizeof(host->h_addr), |
| 239 | .saddress = (struct sockaddr *)&host->h_saddr, | ||
| 235 | .timeout = &timeparms, | 240 | .timeout = &timeparms, |
| 236 | .servername = host->h_name, | 241 | .servername = host->h_name, |
| 237 | .program = &nlm_program, | 242 | .program = &nlm_program, |
diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c index 2102e2d0134d..3353ed8421a7 100644 --- a/fs/lockd/mon.c +++ b/fs/lockd/mon.c | |||
| @@ -61,6 +61,7 @@ nsm_mon_unmon(struct nsm_handle *nsm, u32 proc, struct nsm_res *res) | |||
| 61 | status); | 61 | status); |
| 62 | else | 62 | else |
| 63 | status = 0; | 63 | status = 0; |
| 64 | rpc_shutdown_client(clnt); | ||
| 64 | out: | 65 | out: |
| 65 | return status; | 66 | return status; |
| 66 | } | 67 | } |
| @@ -138,7 +139,6 @@ nsm_create(void) | |||
| 138 | .program = &nsm_program, | 139 | .program = &nsm_program, |
| 139 | .version = SM_VERSION, | 140 | .version = SM_VERSION, |
| 140 | .authflavor = RPC_AUTH_NULL, | 141 | .authflavor = RPC_AUTH_NULL, |
| 141 | .flags = (RPC_CLNT_CREATE_ONESHOT), | ||
| 142 | }; | 142 | }; |
| 143 | 143 | ||
| 144 | return rpc_create(&args); | 144 | return rpc_create(&args); |
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c index 126b1bf02c0e..26809325469c 100644 --- a/fs/lockd/svc.c +++ b/fs/lockd/svc.c | |||
| @@ -123,9 +123,6 @@ lockd(struct svc_rqst *rqstp) | |||
| 123 | /* Process request with signals blocked, but allow SIGKILL. */ | 123 | /* Process request with signals blocked, but allow SIGKILL. */ |
| 124 | allow_signal(SIGKILL); | 124 | allow_signal(SIGKILL); |
| 125 | 125 | ||
| 126 | /* kick rpciod */ | ||
| 127 | rpciod_up(); | ||
| 128 | |||
| 129 | dprintk("NFS locking service started (ver " LOCKD_VERSION ").\n"); | 126 | dprintk("NFS locking service started (ver " LOCKD_VERSION ").\n"); |
| 130 | 127 | ||
| 131 | if (!nlm_timeout) | 128 | if (!nlm_timeout) |
| @@ -202,9 +199,6 @@ lockd(struct svc_rqst *rqstp) | |||
| 202 | /* Exit the RPC thread */ | 199 | /* Exit the RPC thread */ |
| 203 | svc_exit_thread(rqstp); | 200 | svc_exit_thread(rqstp); |
| 204 | 201 | ||
| 205 | /* release rpciod */ | ||
| 206 | rpciod_down(); | ||
| 207 | |||
| 208 | /* Release module */ | 202 | /* Release module */ |
| 209 | unlock_kernel(); | 203 | unlock_kernel(); |
| 210 | module_put_and_exit(0); | 204 | module_put_and_exit(0); |
diff --git a/fs/nfs/Makefile b/fs/nfs/Makefile index f4580b44eef4..b55cb236cf74 100644 --- a/fs/nfs/Makefile +++ b/fs/nfs/Makefile | |||
| @@ -6,8 +6,8 @@ obj-$(CONFIG_NFS_FS) += nfs.o | |||
| 6 | 6 | ||
| 7 | nfs-y := client.o dir.o file.o getroot.o inode.o super.o nfs2xdr.o \ | 7 | nfs-y := client.o dir.o file.o getroot.o inode.o super.o nfs2xdr.o \ |
| 8 | pagelist.o proc.o read.o symlink.o unlink.o \ | 8 | pagelist.o proc.o read.o symlink.o unlink.o \ |
| 9 | write.o namespace.o | 9 | write.o namespace.o mount_clnt.o |
| 10 | nfs-$(CONFIG_ROOT_NFS) += nfsroot.o mount_clnt.o | 10 | nfs-$(CONFIG_ROOT_NFS) += nfsroot.o |
| 11 | nfs-$(CONFIG_NFS_V3) += nfs3proc.o nfs3xdr.o | 11 | nfs-$(CONFIG_NFS_V3) += nfs3proc.o nfs3xdr.o |
| 12 | nfs-$(CONFIG_NFS_V3_ACL) += nfs3acl.o | 12 | nfs-$(CONFIG_NFS_V3_ACL) += nfs3acl.o |
| 13 | nfs-$(CONFIG_NFS_V4) += nfs4proc.o nfs4xdr.o nfs4state.o nfs4renewd.o \ | 13 | nfs-$(CONFIG_NFS_V4) += nfs4proc.o nfs4xdr.o nfs4state.o nfs4renewd.o \ |
diff --git a/fs/nfs/client.c b/fs/nfs/client.c index 881fa4900923..ccb455053ee4 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c | |||
| @@ -102,19 +102,10 @@ static struct nfs_client *nfs_alloc_client(const char *hostname, | |||
| 102 | int nfsversion) | 102 | int nfsversion) |
| 103 | { | 103 | { |
| 104 | struct nfs_client *clp; | 104 | struct nfs_client *clp; |
| 105 | int error; | ||
| 106 | 105 | ||
| 107 | if ((clp = kzalloc(sizeof(*clp), GFP_KERNEL)) == NULL) | 106 | if ((clp = kzalloc(sizeof(*clp), GFP_KERNEL)) == NULL) |
| 108 | goto error_0; | 107 | goto error_0; |
| 109 | 108 | ||
| 110 | error = rpciod_up(); | ||
| 111 | if (error < 0) { | ||
| 112 | dprintk("%s: couldn't start rpciod! Error = %d\n", | ||
| 113 | __FUNCTION__, error); | ||
| 114 | goto error_1; | ||
| 115 | } | ||
| 116 | __set_bit(NFS_CS_RPCIOD, &clp->cl_res_state); | ||
| 117 | |||
| 118 | if (nfsversion == 4) { | 109 | if (nfsversion == 4) { |
| 119 | if (nfs_callback_up() < 0) | 110 | if (nfs_callback_up() < 0) |
| 120 | goto error_2; | 111 | goto error_2; |
| @@ -139,8 +130,6 @@ static struct nfs_client *nfs_alloc_client(const char *hostname, | |||
| 139 | #ifdef CONFIG_NFS_V4 | 130 | #ifdef CONFIG_NFS_V4 |
| 140 | init_rwsem(&clp->cl_sem); | 131 | init_rwsem(&clp->cl_sem); |
| 141 | INIT_LIST_HEAD(&clp->cl_delegations); | 132 | INIT_LIST_HEAD(&clp->cl_delegations); |
| 142 | INIT_LIST_HEAD(&clp->cl_state_owners); | ||
| 143 | INIT_LIST_HEAD(&clp->cl_unused); | ||
| 144 | spin_lock_init(&clp->cl_lock); | 133 | spin_lock_init(&clp->cl_lock); |
| 145 | INIT_DELAYED_WORK(&clp->cl_renewd, nfs4_renew_state); | 134 | INIT_DELAYED_WORK(&clp->cl_renewd, nfs4_renew_state); |
| 146 | rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS client"); | 135 | rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS client"); |
| @@ -154,9 +143,6 @@ error_3: | |||
| 154 | if (__test_and_clear_bit(NFS_CS_CALLBACK, &clp->cl_res_state)) | 143 | if (__test_and_clear_bit(NFS_CS_CALLBACK, &clp->cl_res_state)) |
| 155 | nfs_callback_down(); | 144 | nfs_callback_down(); |
| 156 | error_2: | 145 | error_2: |
| 157 | rpciod_down(); | ||
| 158 | __clear_bit(NFS_CS_RPCIOD, &clp->cl_res_state); | ||
| 159 | error_1: | ||
| 160 | kfree(clp); | 146 | kfree(clp); |
| 161 | error_0: | 147 | error_0: |
| 162 | return NULL; | 148 | return NULL; |
| @@ -167,16 +153,7 @@ static void nfs4_shutdown_client(struct nfs_client *clp) | |||
| 167 | #ifdef CONFIG_NFS_V4 | 153 | #ifdef CONFIG_NFS_V4 |
| 168 | if (__test_and_clear_bit(NFS_CS_RENEWD, &clp->cl_res_state)) | 154 | if (__test_and_clear_bit(NFS_CS_RENEWD, &clp->cl_res_state)) |
| 169 | nfs4_kill_renewd(clp); | 155 | nfs4_kill_renewd(clp); |
| 170 | while (!list_empty(&clp->cl_unused)) { | 156 | BUG_ON(!RB_EMPTY_ROOT(&clp->cl_state_owners)); |
| 171 | struct nfs4_state_owner *sp; | ||
| 172 | |||
| 173 | sp = list_entry(clp->cl_unused.next, | ||
| 174 | struct nfs4_state_owner, | ||
| 175 | so_list); | ||
| 176 | list_del(&sp->so_list); | ||
| 177 | kfree(sp); | ||
| 178 | } | ||
| 179 | BUG_ON(!list_empty(&clp->cl_state_owners)); | ||
| 180 | if (__test_and_clear_bit(NFS_CS_IDMAP, &clp->cl_res_state)) | 157 | if (__test_and_clear_bit(NFS_CS_IDMAP, &clp->cl_res_state)) |
| 181 | nfs_idmap_delete(clp); | 158 | nfs_idmap_delete(clp); |
| 182 | #endif | 159 | #endif |
| @@ -198,9 +175,6 @@ static void nfs_free_client(struct nfs_client *clp) | |||
| 198 | if (__test_and_clear_bit(NFS_CS_CALLBACK, &clp->cl_res_state)) | 175 | if (__test_and_clear_bit(NFS_CS_CALLBACK, &clp->cl_res_state)) |
| 199 | nfs_callback_down(); | 176 | nfs_callback_down(); |
| 200 | 177 | ||
| 201 | if (__test_and_clear_bit(NFS_CS_RPCIOD, &clp->cl_res_state)) | ||
| 202 | rpciod_down(); | ||
| 203 | |||
| 204 | kfree(clp->cl_hostname); | 178 | kfree(clp->cl_hostname); |
| 205 | kfree(clp); | 179 | kfree(clp); |
| 206 | 180 | ||
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c index 7f37d1bea83f..20ac403469a0 100644 --- a/fs/nfs/delegation.c +++ b/fs/nfs/delegation.c | |||
| @@ -27,6 +27,13 @@ static void nfs_free_delegation(struct nfs_delegation *delegation) | |||
| 27 | kfree(delegation); | 27 | kfree(delegation); |
| 28 | } | 28 | } |
| 29 | 29 | ||
| 30 | static void nfs_free_delegation_callback(struct rcu_head *head) | ||
| 31 | { | ||
| 32 | struct nfs_delegation *delegation = container_of(head, struct nfs_delegation, rcu); | ||
| 33 | |||
| 34 | nfs_free_delegation(delegation); | ||
| 35 | } | ||
| 36 | |||
| 30 | static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_state *state) | 37 | static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_state *state) |
| 31 | { | 38 | { |
| 32 | struct inode *inode = state->inode; | 39 | struct inode *inode = state->inode; |
| @@ -57,7 +64,7 @@ out_err: | |||
| 57 | return status; | 64 | return status; |
| 58 | } | 65 | } |
| 59 | 66 | ||
| 60 | static void nfs_delegation_claim_opens(struct inode *inode) | 67 | static void nfs_delegation_claim_opens(struct inode *inode, const nfs4_stateid *stateid) |
| 61 | { | 68 | { |
| 62 | struct nfs_inode *nfsi = NFS_I(inode); | 69 | struct nfs_inode *nfsi = NFS_I(inode); |
| 63 | struct nfs_open_context *ctx; | 70 | struct nfs_open_context *ctx; |
| @@ -72,9 +79,11 @@ again: | |||
| 72 | continue; | 79 | continue; |
| 73 | if (!test_bit(NFS_DELEGATED_STATE, &state->flags)) | 80 | if (!test_bit(NFS_DELEGATED_STATE, &state->flags)) |
| 74 | continue; | 81 | continue; |
| 82 | if (memcmp(state->stateid.data, stateid->data, sizeof(state->stateid.data)) != 0) | ||
| 83 | continue; | ||
| 75 | get_nfs_open_context(ctx); | 84 | get_nfs_open_context(ctx); |
| 76 | spin_unlock(&inode->i_lock); | 85 | spin_unlock(&inode->i_lock); |
| 77 | err = nfs4_open_delegation_recall(ctx->dentry, state); | 86 | err = nfs4_open_delegation_recall(ctx, state, stateid); |
| 78 | if (err >= 0) | 87 | if (err >= 0) |
| 79 | err = nfs_delegation_claim_locks(ctx, state); | 88 | err = nfs_delegation_claim_locks(ctx, state); |
| 80 | put_nfs_open_context(ctx); | 89 | put_nfs_open_context(ctx); |
| @@ -115,10 +124,6 @@ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct | |||
| 115 | struct nfs_delegation *delegation; | 124 | struct nfs_delegation *delegation; |
| 116 | int status = 0; | 125 | int status = 0; |
| 117 | 126 | ||
| 118 | /* Ensure we first revalidate the attributes and page cache! */ | ||
| 119 | if ((nfsi->cache_validity & (NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_ATTR))) | ||
| 120 | __nfs_revalidate_inode(NFS_SERVER(inode), inode); | ||
| 121 | |||
| 122 | delegation = kmalloc(sizeof(*delegation), GFP_KERNEL); | 127 | delegation = kmalloc(sizeof(*delegation), GFP_KERNEL); |
| 123 | if (delegation == NULL) | 128 | if (delegation == NULL) |
| 124 | return -ENOMEM; | 129 | return -ENOMEM; |
| @@ -131,10 +136,10 @@ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct | |||
| 131 | delegation->inode = inode; | 136 | delegation->inode = inode; |
| 132 | 137 | ||
| 133 | spin_lock(&clp->cl_lock); | 138 | spin_lock(&clp->cl_lock); |
| 134 | if (nfsi->delegation == NULL) { | 139 | if (rcu_dereference(nfsi->delegation) == NULL) { |
| 135 | list_add(&delegation->super_list, &clp->cl_delegations); | 140 | list_add_rcu(&delegation->super_list, &clp->cl_delegations); |
| 136 | nfsi->delegation = delegation; | ||
| 137 | nfsi->delegation_state = delegation->type; | 141 | nfsi->delegation_state = delegation->type; |
| 142 | rcu_assign_pointer(nfsi->delegation, delegation); | ||
| 138 | delegation = NULL; | 143 | delegation = NULL; |
| 139 | } else { | 144 | } else { |
| 140 | if (memcmp(&delegation->stateid, &nfsi->delegation->stateid, | 145 | if (memcmp(&delegation->stateid, &nfsi->delegation->stateid, |
| @@ -145,6 +150,12 @@ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct | |||
| 145 | status = -EIO; | 150 | status = -EIO; |
| 146 | } | 151 | } |
| 147 | } | 152 | } |
| 153 | |||
| 154 | /* Ensure we revalidate the attributes and page cache! */ | ||
| 155 | spin_lock(&inode->i_lock); | ||
| 156 | nfsi->cache_validity |= NFS_INO_REVAL_FORCED; | ||
| 157 | spin_unlock(&inode->i_lock); | ||
| 158 | |||
| 148 | spin_unlock(&clp->cl_lock); | 159 | spin_unlock(&clp->cl_lock); |
| 149 | kfree(delegation); | 160 | kfree(delegation); |
| 150 | return status; | 161 | return status; |
| @@ -155,7 +166,7 @@ static int nfs_do_return_delegation(struct inode *inode, struct nfs_delegation * | |||
| 155 | int res = 0; | 166 | int res = 0; |
| 156 | 167 | ||
| 157 | res = nfs4_proc_delegreturn(inode, delegation->cred, &delegation->stateid); | 168 | res = nfs4_proc_delegreturn(inode, delegation->cred, &delegation->stateid); |
| 158 | nfs_free_delegation(delegation); | 169 | call_rcu(&delegation->rcu, nfs_free_delegation_callback); |
| 159 | return res; | 170 | return res; |
| 160 | } | 171 | } |
| 161 | 172 | ||
| @@ -170,33 +181,55 @@ static void nfs_msync_inode(struct inode *inode) | |||
| 170 | /* | 181 | /* |
| 171 | * Basic procedure for returning a delegation to the server | 182 | * Basic procedure for returning a delegation to the server |
| 172 | */ | 183 | */ |
| 173 | int __nfs_inode_return_delegation(struct inode *inode) | 184 | static int __nfs_inode_return_delegation(struct inode *inode, struct nfs_delegation *delegation) |
| 174 | { | 185 | { |
| 175 | struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; | 186 | struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; |
| 176 | struct nfs_inode *nfsi = NFS_I(inode); | 187 | struct nfs_inode *nfsi = NFS_I(inode); |
| 177 | struct nfs_delegation *delegation; | ||
| 178 | int res = 0; | ||
| 179 | 188 | ||
| 180 | nfs_msync_inode(inode); | 189 | nfs_msync_inode(inode); |
| 181 | down_read(&clp->cl_sem); | 190 | down_read(&clp->cl_sem); |
| 182 | /* Guard against new delegated open calls */ | 191 | /* Guard against new delegated open calls */ |
| 183 | down_write(&nfsi->rwsem); | 192 | down_write(&nfsi->rwsem); |
| 184 | spin_lock(&clp->cl_lock); | 193 | nfs_delegation_claim_opens(inode, &delegation->stateid); |
| 185 | delegation = nfsi->delegation; | ||
| 186 | if (delegation != NULL) { | ||
| 187 | list_del_init(&delegation->super_list); | ||
| 188 | nfsi->delegation = NULL; | ||
| 189 | nfsi->delegation_state = 0; | ||
| 190 | } | ||
| 191 | spin_unlock(&clp->cl_lock); | ||
| 192 | nfs_delegation_claim_opens(inode); | ||
| 193 | up_write(&nfsi->rwsem); | 194 | up_write(&nfsi->rwsem); |
| 194 | up_read(&clp->cl_sem); | 195 | up_read(&clp->cl_sem); |
| 195 | nfs_msync_inode(inode); | 196 | nfs_msync_inode(inode); |
| 196 | 197 | ||
| 197 | if (delegation != NULL) | 198 | return nfs_do_return_delegation(inode, delegation); |
| 198 | res = nfs_do_return_delegation(inode, delegation); | 199 | } |
| 199 | return res; | 200 | |
| 201 | static struct nfs_delegation *nfs_detach_delegation_locked(struct nfs_inode *nfsi, const nfs4_stateid *stateid) | ||
| 202 | { | ||
| 203 | struct nfs_delegation *delegation = rcu_dereference(nfsi->delegation); | ||
| 204 | |||
| 205 | if (delegation == NULL) | ||
| 206 | goto nomatch; | ||
| 207 | if (stateid != NULL && memcmp(delegation->stateid.data, stateid->data, | ||
| 208 | sizeof(delegation->stateid.data)) != 0) | ||
| 209 | goto nomatch; | ||
| 210 | list_del_rcu(&delegation->super_list); | ||
| 211 | nfsi->delegation_state = 0; | ||
| 212 | rcu_assign_pointer(nfsi->delegation, NULL); | ||
| 213 | return delegation; | ||
| 214 | nomatch: | ||
| 215 | return NULL; | ||
| 216 | } | ||
| 217 | |||
| 218 | int nfs_inode_return_delegation(struct inode *inode) | ||
| 219 | { | ||
| 220 | struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; | ||
| 221 | struct nfs_inode *nfsi = NFS_I(inode); | ||
| 222 | struct nfs_delegation *delegation; | ||
| 223 | int err = 0; | ||
| 224 | |||
| 225 | if (rcu_dereference(nfsi->delegation) != NULL) { | ||
| 226 | spin_lock(&clp->cl_lock); | ||
| 227 | delegation = nfs_detach_delegation_locked(nfsi, NULL); | ||
| 228 | spin_unlock(&clp->cl_lock); | ||
| 229 | if (delegation != NULL) | ||
| 230 | err = __nfs_inode_return_delegation(inode, delegation); | ||
| 231 | } | ||
| 232 | return err; | ||
| 200 | } | 233 | } |
| 201 | 234 | ||
| 202 | /* | 235 | /* |
| @@ -211,19 +244,23 @@ void nfs_return_all_delegations(struct super_block *sb) | |||
| 211 | if (clp == NULL) | 244 | if (clp == NULL) |
| 212 | return; | 245 | return; |
| 213 | restart: | 246 | restart: |
| 214 | spin_lock(&clp->cl_lock); | 247 | rcu_read_lock(); |
| 215 | list_for_each_entry(delegation, &clp->cl_delegations, super_list) { | 248 | list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) { |
| 216 | if (delegation->inode->i_sb != sb) | 249 | if (delegation->inode->i_sb != sb) |
| 217 | continue; | 250 | continue; |
| 218 | inode = igrab(delegation->inode); | 251 | inode = igrab(delegation->inode); |
| 219 | if (inode == NULL) | 252 | if (inode == NULL) |
| 220 | continue; | 253 | continue; |
| 254 | spin_lock(&clp->cl_lock); | ||
| 255 | delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL); | ||
| 221 | spin_unlock(&clp->cl_lock); | 256 | spin_unlock(&clp->cl_lock); |
| 222 | nfs_inode_return_delegation(inode); | 257 | rcu_read_unlock(); |
| 258 | if (delegation != NULL) | ||
| 259 | __nfs_inode_return_delegation(inode, delegation); | ||
| 223 | iput(inode); | 260 | iput(inode); |
| 224 | goto restart; | 261 | goto restart; |
| 225 | } | 262 | } |
| 226 | spin_unlock(&clp->cl_lock); | 263 | rcu_read_unlock(); |
| 227 | } | 264 | } |
| 228 | 265 | ||
| 229 | static int nfs_do_expire_all_delegations(void *ptr) | 266 | static int nfs_do_expire_all_delegations(void *ptr) |
| @@ -234,22 +271,26 @@ static int nfs_do_expire_all_delegations(void *ptr) | |||
| 234 | 271 | ||
| 235 | allow_signal(SIGKILL); | 272 | allow_signal(SIGKILL); |
| 236 | restart: | 273 | restart: |
| 237 | spin_lock(&clp->cl_lock); | ||
| 238 | if (test_bit(NFS4CLNT_STATE_RECOVER, &clp->cl_state) != 0) | 274 | if (test_bit(NFS4CLNT_STATE_RECOVER, &clp->cl_state) != 0) |
| 239 | goto out; | 275 | goto out; |
| 240 | if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) | 276 | if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) |
| 241 | goto out; | 277 | goto out; |
| 242 | list_for_each_entry(delegation, &clp->cl_delegations, super_list) { | 278 | rcu_read_lock(); |
| 279 | list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) { | ||
| 243 | inode = igrab(delegation->inode); | 280 | inode = igrab(delegation->inode); |
| 244 | if (inode == NULL) | 281 | if (inode == NULL) |
| 245 | continue; | 282 | continue; |
| 283 | spin_lock(&clp->cl_lock); | ||
| 284 | delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL); | ||
| 246 | spin_unlock(&clp->cl_lock); | 285 | spin_unlock(&clp->cl_lock); |
| 247 | nfs_inode_return_delegation(inode); | 286 | rcu_read_unlock(); |
| 287 | if (delegation) | ||
| 288 | __nfs_inode_return_delegation(inode, delegation); | ||
| 248 | iput(inode); | 289 | iput(inode); |
| 249 | goto restart; | 290 | goto restart; |
| 250 | } | 291 | } |
| 292 | rcu_read_unlock(); | ||
| 251 | out: | 293 | out: |
| 252 | spin_unlock(&clp->cl_lock); | ||
| 253 | nfs_put_client(clp); | 294 | nfs_put_client(clp); |
| 254 | module_put_and_exit(0); | 295 | module_put_and_exit(0); |
| 255 | } | 296 | } |
| @@ -280,17 +321,21 @@ void nfs_handle_cb_pathdown(struct nfs_client *clp) | |||
| 280 | if (clp == NULL) | 321 | if (clp == NULL) |
| 281 | return; | 322 | return; |
| 282 | restart: | 323 | restart: |
| 283 | spin_lock(&clp->cl_lock); | 324 | rcu_read_lock(); |
| 284 | list_for_each_entry(delegation, &clp->cl_delegations, super_list) { | 325 | list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) { |
| 285 | inode = igrab(delegation->inode); | 326 | inode = igrab(delegation->inode); |
| 286 | if (inode == NULL) | 327 | if (inode == NULL) |
| 287 | continue; | 328 | continue; |
| 329 | spin_lock(&clp->cl_lock); | ||
| 330 | delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL); | ||
| 288 | spin_unlock(&clp->cl_lock); | 331 | spin_unlock(&clp->cl_lock); |
| 289 | nfs_inode_return_delegation(inode); | 332 | rcu_read_unlock(); |
| 333 | if (delegation != NULL) | ||
| 334 | __nfs_inode_return_delegation(inode, delegation); | ||
| 290 | iput(inode); | 335 | iput(inode); |
| 291 | goto restart; | 336 | goto restart; |
| 292 | } | 337 | } |
| 293 | spin_unlock(&clp->cl_lock); | 338 | rcu_read_unlock(); |
| 294 | } | 339 | } |
| 295 | 340 | ||
| 296 | struct recall_threadargs { | 341 | struct recall_threadargs { |
| @@ -316,21 +361,14 @@ static int recall_thread(void *data) | |||
| 316 | down_read(&clp->cl_sem); | 361 | down_read(&clp->cl_sem); |
| 317 | down_write(&nfsi->rwsem); | 362 | down_write(&nfsi->rwsem); |
| 318 | spin_lock(&clp->cl_lock); | 363 | spin_lock(&clp->cl_lock); |
| 319 | delegation = nfsi->delegation; | 364 | delegation = nfs_detach_delegation_locked(nfsi, args->stateid); |
| 320 | if (delegation != NULL && memcmp(delegation->stateid.data, | 365 | if (delegation != NULL) |
| 321 | args->stateid->data, | ||
| 322 | sizeof(delegation->stateid.data)) == 0) { | ||
| 323 | list_del_init(&delegation->super_list); | ||
| 324 | nfsi->delegation = NULL; | ||
| 325 | nfsi->delegation_state = 0; | ||
| 326 | args->result = 0; | 366 | args->result = 0; |
| 327 | } else { | 367 | else |
| 328 | delegation = NULL; | ||
| 329 | args->result = -ENOENT; | 368 | args->result = -ENOENT; |
| 330 | } | ||
| 331 | spin_unlock(&clp->cl_lock); | 369 | spin_unlock(&clp->cl_lock); |
| 332 | complete(&args->started); | 370 | complete(&args->started); |
| 333 | nfs_delegation_claim_opens(inode); | 371 | nfs_delegation_claim_opens(inode, args->stateid); |
| 334 | up_write(&nfsi->rwsem); | 372 | up_write(&nfsi->rwsem); |
| 335 | up_read(&clp->cl_sem); | 373 | up_read(&clp->cl_sem); |
| 336 | nfs_msync_inode(inode); | 374 | nfs_msync_inode(inode); |
| @@ -371,14 +409,14 @@ struct inode *nfs_delegation_find_inode(struct nfs_client *clp, const struct nfs | |||
| 371 | { | 409 | { |
| 372 | struct nfs_delegation *delegation; | 410 | struct nfs_delegation *delegation; |
| 373 | struct inode *res = NULL; | 411 | struct inode *res = NULL; |
| 374 | spin_lock(&clp->cl_lock); | 412 | rcu_read_lock(); |
| 375 | list_for_each_entry(delegation, &clp->cl_delegations, super_list) { | 413 | list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) { |
| 376 | if (nfs_compare_fh(fhandle, &NFS_I(delegation->inode)->fh) == 0) { | 414 | if (nfs_compare_fh(fhandle, &NFS_I(delegation->inode)->fh) == 0) { |
| 377 | res = igrab(delegation->inode); | 415 | res = igrab(delegation->inode); |
| 378 | break; | 416 | break; |
| 379 | } | 417 | } |
| 380 | } | 418 | } |
| 381 | spin_unlock(&clp->cl_lock); | 419 | rcu_read_unlock(); |
| 382 | return res; | 420 | return res; |
| 383 | } | 421 | } |
| 384 | 422 | ||
| @@ -388,10 +426,10 @@ struct inode *nfs_delegation_find_inode(struct nfs_client *clp, const struct nfs | |||
| 388 | void nfs_delegation_mark_reclaim(struct nfs_client *clp) | 426 | void nfs_delegation_mark_reclaim(struct nfs_client *clp) |
| 389 | { | 427 | { |
| 390 | struct nfs_delegation *delegation; | 428 | struct nfs_delegation *delegation; |
| 391 | spin_lock(&clp->cl_lock); | 429 | rcu_read_lock(); |
| 392 | list_for_each_entry(delegation, &clp->cl_delegations, super_list) | 430 | list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) |
| 393 | delegation->flags |= NFS_DELEGATION_NEED_RECLAIM; | 431 | delegation->flags |= NFS_DELEGATION_NEED_RECLAIM; |
| 394 | spin_unlock(&clp->cl_lock); | 432 | rcu_read_unlock(); |
| 395 | } | 433 | } |
| 396 | 434 | ||
| 397 | /* | 435 | /* |
| @@ -399,39 +437,35 @@ void nfs_delegation_mark_reclaim(struct nfs_client *clp) | |||
| 399 | */ | 437 | */ |
| 400 | void nfs_delegation_reap_unclaimed(struct nfs_client *clp) | 438 | void nfs_delegation_reap_unclaimed(struct nfs_client *clp) |
| 401 | { | 439 | { |
| 402 | struct nfs_delegation *delegation, *n; | 440 | struct nfs_delegation *delegation; |
| 403 | LIST_HEAD(head); | 441 | restart: |
| 404 | spin_lock(&clp->cl_lock); | 442 | rcu_read_lock(); |
| 405 | list_for_each_entry_safe(delegation, n, &clp->cl_delegations, super_list) { | 443 | list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) { |
| 406 | if ((delegation->flags & NFS_DELEGATION_NEED_RECLAIM) == 0) | 444 | if ((delegation->flags & NFS_DELEGATION_NEED_RECLAIM) == 0) |
| 407 | continue; | 445 | continue; |
| 408 | list_move(&delegation->super_list, &head); | 446 | spin_lock(&clp->cl_lock); |
| 409 | NFS_I(delegation->inode)->delegation = NULL; | 447 | delegation = nfs_detach_delegation_locked(NFS_I(delegation->inode), NULL); |
| 410 | NFS_I(delegation->inode)->delegation_state = 0; | 448 | spin_unlock(&clp->cl_lock); |
| 411 | } | 449 | rcu_read_unlock(); |
| 412 | spin_unlock(&clp->cl_lock); | 450 | if (delegation != NULL) |
| 413 | while(!list_empty(&head)) { | 451 | call_rcu(&delegation->rcu, nfs_free_delegation_callback); |
| 414 | delegation = list_entry(head.next, struct nfs_delegation, super_list); | 452 | goto restart; |
| 415 | list_del(&delegation->super_list); | ||
| 416 | nfs_free_delegation(delegation); | ||
| 417 | } | 453 | } |
| 454 | rcu_read_unlock(); | ||
| 418 | } | 455 | } |
| 419 | 456 | ||
| 420 | int nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode) | 457 | int nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode) |
| 421 | { | 458 | { |
| 422 | struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; | ||
| 423 | struct nfs_inode *nfsi = NFS_I(inode); | 459 | struct nfs_inode *nfsi = NFS_I(inode); |
| 424 | struct nfs_delegation *delegation; | 460 | struct nfs_delegation *delegation; |
| 425 | int res = 0; | 461 | int ret = 0; |
| 426 | 462 | ||
| 427 | if (nfsi->delegation_state == 0) | 463 | rcu_read_lock(); |
| 428 | return 0; | 464 | delegation = rcu_dereference(nfsi->delegation); |
| 429 | spin_lock(&clp->cl_lock); | ||
| 430 | delegation = nfsi->delegation; | ||
| 431 | if (delegation != NULL) { | 465 | if (delegation != NULL) { |
| 432 | memcpy(dst->data, delegation->stateid.data, sizeof(dst->data)); | 466 | memcpy(dst->data, delegation->stateid.data, sizeof(dst->data)); |
| 433 | res = 1; | 467 | ret = 1; |
| 434 | } | 468 | } |
| 435 | spin_unlock(&clp->cl_lock); | 469 | rcu_read_unlock(); |
| 436 | return res; | 470 | return ret; |
| 437 | } | 471 | } |
diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h index 2cfd4b24c7fe..5874ce7fdbae 100644 --- a/fs/nfs/delegation.h +++ b/fs/nfs/delegation.h | |||
| @@ -22,11 +22,12 @@ struct nfs_delegation { | |||
| 22 | long flags; | 22 | long flags; |
| 23 | loff_t maxsize; | 23 | loff_t maxsize; |
| 24 | __u64 change_attr; | 24 | __u64 change_attr; |
| 25 | struct rcu_head rcu; | ||
| 25 | }; | 26 | }; |
| 26 | 27 | ||
| 27 | int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res); | 28 | int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res); |
| 28 | void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res); | 29 | void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res); |
| 29 | int __nfs_inode_return_delegation(struct inode *inode); | 30 | int nfs_inode_return_delegation(struct inode *inode); |
| 30 | int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid); | 31 | int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid); |
| 31 | 32 | ||
| 32 | struct inode *nfs_delegation_find_inode(struct nfs_client *clp, const struct nfs_fh *fhandle); | 33 | struct inode *nfs_delegation_find_inode(struct nfs_client *clp, const struct nfs_fh *fhandle); |
| @@ -39,27 +40,24 @@ void nfs_delegation_reap_unclaimed(struct nfs_client *clp); | |||
| 39 | 40 | ||
| 40 | /* NFSv4 delegation-related procedures */ | 41 | /* NFSv4 delegation-related procedures */ |
| 41 | int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid); | 42 | int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid); |
| 42 | int nfs4_open_delegation_recall(struct dentry *dentry, struct nfs4_state *state); | 43 | int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid); |
| 43 | int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl); | 44 | int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl); |
| 44 | int nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode); | 45 | int nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode); |
| 45 | 46 | ||
| 46 | static inline int nfs_have_delegation(struct inode *inode, int flags) | 47 | static inline int nfs_have_delegation(struct inode *inode, int flags) |
| 47 | { | 48 | { |
| 49 | struct nfs_delegation *delegation; | ||
| 50 | int ret = 0; | ||
| 51 | |||
| 48 | flags &= FMODE_READ|FMODE_WRITE; | 52 | flags &= FMODE_READ|FMODE_WRITE; |
| 49 | smp_rmb(); | 53 | rcu_read_lock(); |
| 50 | if ((NFS_I(inode)->delegation_state & flags) == flags) | 54 | delegation = rcu_dereference(NFS_I(inode)->delegation); |
| 51 | return 1; | 55 | if (delegation != NULL && (delegation->type & flags) == flags) |
| 52 | return 0; | 56 | ret = 1; |
| 57 | rcu_read_unlock(); | ||
| 58 | return ret; | ||
| 53 | } | 59 | } |
| 54 | 60 | ||
| 55 | static inline int nfs_inode_return_delegation(struct inode *inode) | ||
| 56 | { | ||
| 57 | int err = 0; | ||
| 58 | |||
| 59 | if (NFS_I(inode)->delegation != NULL) | ||
| 60 | err = __nfs_inode_return_delegation(inode); | ||
| 61 | return err; | ||
| 62 | } | ||
| 63 | #else | 61 | #else |
| 64 | static inline int nfs_have_delegation(struct inode *inode, int flags) | 62 | static inline int nfs_have_delegation(struct inode *inode, int flags) |
| 65 | { | 63 | { |
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index c27258b5d3e1..322141f4ab48 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c | |||
| @@ -897,14 +897,13 @@ int nfs_is_exclusive_create(struct inode *dir, struct nameidata *nd) | |||
| 897 | return (nd->intent.open.flags & O_EXCL) != 0; | 897 | return (nd->intent.open.flags & O_EXCL) != 0; |
| 898 | } | 898 | } |
| 899 | 899 | ||
| 900 | static inline int nfs_reval_fsid(struct vfsmount *mnt, struct inode *dir, | 900 | static inline int nfs_reval_fsid(struct inode *dir, const struct nfs_fattr *fattr) |
| 901 | struct nfs_fh *fh, struct nfs_fattr *fattr) | ||
| 902 | { | 901 | { |
| 903 | struct nfs_server *server = NFS_SERVER(dir); | 902 | struct nfs_server *server = NFS_SERVER(dir); |
| 904 | 903 | ||
| 905 | if (!nfs_fsid_equal(&server->fsid, &fattr->fsid)) | 904 | if (!nfs_fsid_equal(&server->fsid, &fattr->fsid)) |
| 906 | /* Revalidate fsid on root dir */ | 905 | /* Revalidate fsid using the parent directory */ |
| 907 | return __nfs_revalidate_inode(server, mnt->mnt_root->d_inode); | 906 | return __nfs_revalidate_inode(server, dir); |
| 908 | return 0; | 907 | return 0; |
| 909 | } | 908 | } |
| 910 | 909 | ||
| @@ -946,7 +945,7 @@ static struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, stru | |||
| 946 | res = ERR_PTR(error); | 945 | res = ERR_PTR(error); |
| 947 | goto out_unlock; | 946 | goto out_unlock; |
| 948 | } | 947 | } |
| 949 | error = nfs_reval_fsid(nd->mnt, dir, &fhandle, &fattr); | 948 | error = nfs_reval_fsid(dir, &fattr); |
| 950 | if (error < 0) { | 949 | if (error < 0) { |
| 951 | res = ERR_PTR(error); | 950 | res = ERR_PTR(error); |
| 952 | goto out_unlock; | 951 | goto out_unlock; |
| @@ -1244,7 +1243,7 @@ static int nfs_create(struct inode *dir, struct dentry *dentry, int mode, | |||
| 1244 | attr.ia_mode = mode; | 1243 | attr.ia_mode = mode; |
| 1245 | attr.ia_valid = ATTR_MODE; | 1244 | attr.ia_valid = ATTR_MODE; |
| 1246 | 1245 | ||
| 1247 | if (nd && (nd->flags & LOOKUP_CREATE)) | 1246 | if ((nd->flags & LOOKUP_CREATE) != 0) |
| 1248 | open_flags = nd->intent.open.flags; | 1247 | open_flags = nd->intent.open.flags; |
| 1249 | 1248 | ||
| 1250 | lock_kernel(); | 1249 | lock_kernel(); |
| @@ -1535,7 +1534,7 @@ static int nfs_symlink(struct inode *dir, struct dentry *dentry, const char *sym | |||
| 1535 | 1534 | ||
| 1536 | lock_kernel(); | 1535 | lock_kernel(); |
| 1537 | 1536 | ||
| 1538 | page = alloc_page(GFP_KERNEL); | 1537 | page = alloc_page(GFP_HIGHUSER); |
| 1539 | if (!page) { | 1538 | if (!page) { |
| 1540 | unlock_kernel(); | 1539 | unlock_kernel(); |
| 1541 | return -ENOMEM; | 1540 | return -ENOMEM; |
| @@ -1744,8 +1743,8 @@ int nfs_access_cache_shrinker(int nr_to_scan, gfp_t gfp_mask) | |||
| 1744 | struct nfs_inode *nfsi; | 1743 | struct nfs_inode *nfsi; |
| 1745 | struct nfs_access_entry *cache; | 1744 | struct nfs_access_entry *cache; |
| 1746 | 1745 | ||
| 1747 | spin_lock(&nfs_access_lru_lock); | ||
| 1748 | restart: | 1746 | restart: |
| 1747 | spin_lock(&nfs_access_lru_lock); | ||
| 1749 | list_for_each_entry(nfsi, &nfs_access_lru_list, access_cache_inode_lru) { | 1748 | list_for_each_entry(nfsi, &nfs_access_lru_list, access_cache_inode_lru) { |
| 1750 | struct inode *inode; | 1749 | struct inode *inode; |
| 1751 | 1750 | ||
| @@ -1770,6 +1769,7 @@ remove_lru_entry: | |||
| 1770 | clear_bit(NFS_INO_ACL_LRU_SET, &nfsi->flags); | 1769 | clear_bit(NFS_INO_ACL_LRU_SET, &nfsi->flags); |
| 1771 | } | 1770 | } |
| 1772 | spin_unlock(&inode->i_lock); | 1771 | spin_unlock(&inode->i_lock); |
| 1772 | spin_unlock(&nfs_access_lru_lock); | ||
| 1773 | iput(inode); | 1773 | iput(inode); |
| 1774 | goto restart; | 1774 | goto restart; |
| 1775 | } | 1775 | } |
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index 00eee87510fe..a5c82b6f3b45 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c | |||
| @@ -266,7 +266,7 @@ static const struct rpc_call_ops nfs_read_direct_ops = { | |||
| 266 | static ssize_t nfs_direct_read_schedule(struct nfs_direct_req *dreq, unsigned long user_addr, size_t count, loff_t pos) | 266 | static ssize_t nfs_direct_read_schedule(struct nfs_direct_req *dreq, unsigned long user_addr, size_t count, loff_t pos) |
| 267 | { | 267 | { |
| 268 | struct nfs_open_context *ctx = dreq->ctx; | 268 | struct nfs_open_context *ctx = dreq->ctx; |
| 269 | struct inode *inode = ctx->dentry->d_inode; | 269 | struct inode *inode = ctx->path.dentry->d_inode; |
| 270 | size_t rsize = NFS_SERVER(inode)->rsize; | 270 | size_t rsize = NFS_SERVER(inode)->rsize; |
| 271 | unsigned int pgbase; | 271 | unsigned int pgbase; |
| 272 | int result; | 272 | int result; |
| @@ -295,9 +295,14 @@ static ssize_t nfs_direct_read_schedule(struct nfs_direct_req *dreq, unsigned lo | |||
| 295 | break; | 295 | break; |
| 296 | } | 296 | } |
| 297 | if ((unsigned)result < data->npages) { | 297 | if ((unsigned)result < data->npages) { |
| 298 | nfs_direct_release_pages(data->pagevec, result); | 298 | bytes = result * PAGE_SIZE; |
| 299 | nfs_readdata_release(data); | 299 | if (bytes <= pgbase) { |
| 300 | break; | 300 | nfs_direct_release_pages(data->pagevec, result); |
| 301 | nfs_readdata_release(data); | ||
| 302 | break; | ||
| 303 | } | ||
| 304 | bytes -= pgbase; | ||
| 305 | data->npages = result; | ||
| 301 | } | 306 | } |
| 302 | 307 | ||
| 303 | get_dreq(dreq); | 308 | get_dreq(dreq); |
| @@ -601,7 +606,7 @@ static const struct rpc_call_ops nfs_write_direct_ops = { | |||
| 601 | static ssize_t nfs_direct_write_schedule(struct nfs_direct_req *dreq, unsigned long user_addr, size_t count, loff_t pos, int sync) | 606 | static ssize_t nfs_direct_write_schedule(struct nfs_direct_req *dreq, unsigned long user_addr, size_t count, loff_t pos, int sync) |
| 602 | { | 607 | { |
| 603 | struct nfs_open_context *ctx = dreq->ctx; | 608 | struct nfs_open_context *ctx = dreq->ctx; |
| 604 | struct inode *inode = ctx->dentry->d_inode; | 609 | struct inode *inode = ctx->path.dentry->d_inode; |
| 605 | size_t wsize = NFS_SERVER(inode)->wsize; | 610 | size_t wsize = NFS_SERVER(inode)->wsize; |
| 606 | unsigned int pgbase; | 611 | unsigned int pgbase; |
| 607 | int result; | 612 | int result; |
| @@ -630,9 +635,14 @@ static ssize_t nfs_direct_write_schedule(struct nfs_direct_req *dreq, unsigned l | |||
| 630 | break; | 635 | break; |
| 631 | } | 636 | } |
| 632 | if ((unsigned)result < data->npages) { | 637 | if ((unsigned)result < data->npages) { |
| 633 | nfs_direct_release_pages(data->pagevec, result); | 638 | bytes = result * PAGE_SIZE; |
| 634 | nfs_writedata_release(data); | 639 | if (bytes <= pgbase) { |
| 635 | break; | 640 | nfs_direct_release_pages(data->pagevec, result); |
| 641 | nfs_writedata_release(data); | ||
| 642 | break; | ||
| 643 | } | ||
| 644 | bytes -= pgbase; | ||
| 645 | data->npages = result; | ||
| 636 | } | 646 | } |
| 637 | 647 | ||
| 638 | get_dreq(dreq); | 648 | get_dreq(dreq); |
| @@ -763,10 +773,8 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, const struct iovec *iov, | |||
| 763 | (unsigned long) count, (long long) pos); | 773 | (unsigned long) count, (long long) pos); |
| 764 | 774 | ||
| 765 | if (nr_segs != 1) | 775 | if (nr_segs != 1) |
| 766 | return -EINVAL; | ||
| 767 | |||
| 768 | if (count < 0) | ||
| 769 | goto out; | 776 | goto out; |
| 777 | |||
| 770 | retval = -EFAULT; | 778 | retval = -EFAULT; |
| 771 | if (!access_ok(VERIFY_WRITE, buf, count)) | 779 | if (!access_ok(VERIFY_WRITE, buf, count)) |
| 772 | goto out; | 780 | goto out; |
| @@ -814,7 +822,7 @@ out: | |||
| 814 | ssize_t nfs_file_direct_write(struct kiocb *iocb, const struct iovec *iov, | 822 | ssize_t nfs_file_direct_write(struct kiocb *iocb, const struct iovec *iov, |
| 815 | unsigned long nr_segs, loff_t pos) | 823 | unsigned long nr_segs, loff_t pos) |
| 816 | { | 824 | { |
| 817 | ssize_t retval; | 825 | ssize_t retval = -EINVAL; |
| 818 | struct file *file = iocb->ki_filp; | 826 | struct file *file = iocb->ki_filp; |
| 819 | struct address_space *mapping = file->f_mapping; | 827 | struct address_space *mapping = file->f_mapping; |
| 820 | /* XXX: temporary */ | 828 | /* XXX: temporary */ |
| @@ -827,7 +835,7 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, const struct iovec *iov, | |||
| 827 | (unsigned long) count, (long long) pos); | 835 | (unsigned long) count, (long long) pos); |
| 828 | 836 | ||
| 829 | if (nr_segs != 1) | 837 | if (nr_segs != 1) |
| 830 | return -EINVAL; | 838 | goto out; |
| 831 | 839 | ||
| 832 | retval = generic_write_checks(file, &pos, &count, 0); | 840 | retval = generic_write_checks(file, &pos, &count, 0); |
| 833 | if (retval) | 841 | if (retval) |
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index bd9f5a836592..3d9fccf4ef93 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c | |||
| @@ -461,14 +461,14 @@ static struct nfs_open_context *alloc_nfs_open_context(struct vfsmount *mnt, str | |||
| 461 | 461 | ||
| 462 | ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); | 462 | ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); |
| 463 | if (ctx != NULL) { | 463 | if (ctx != NULL) { |
| 464 | atomic_set(&ctx->count, 1); | 464 | ctx->path.dentry = dget(dentry); |
| 465 | ctx->dentry = dget(dentry); | 465 | ctx->path.mnt = mntget(mnt); |
| 466 | ctx->vfsmnt = mntget(mnt); | ||
| 467 | ctx->cred = get_rpccred(cred); | 466 | ctx->cred = get_rpccred(cred); |
| 468 | ctx->state = NULL; | 467 | ctx->state = NULL; |
| 469 | ctx->lockowner = current->files; | 468 | ctx->lockowner = current->files; |
| 470 | ctx->error = 0; | 469 | ctx->error = 0; |
| 471 | ctx->dir_cookie = 0; | 470 | ctx->dir_cookie = 0; |
| 471 | kref_init(&ctx->kref); | ||
| 472 | } | 472 | } |
| 473 | return ctx; | 473 | return ctx; |
| 474 | } | 474 | } |
| @@ -476,27 +476,33 @@ static struct nfs_open_context *alloc_nfs_open_context(struct vfsmount *mnt, str | |||
| 476 | struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx) | 476 | struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx) |
| 477 | { | 477 | { |
| 478 | if (ctx != NULL) | 478 | if (ctx != NULL) |
| 479 | atomic_inc(&ctx->count); | 479 | kref_get(&ctx->kref); |
| 480 | return ctx; | 480 | return ctx; |
| 481 | } | 481 | } |
| 482 | 482 | ||
| 483 | void put_nfs_open_context(struct nfs_open_context *ctx) | 483 | static void nfs_free_open_context(struct kref *kref) |
| 484 | { | 484 | { |
| 485 | if (atomic_dec_and_test(&ctx->count)) { | 485 | struct nfs_open_context *ctx = container_of(kref, |
| 486 | if (!list_empty(&ctx->list)) { | 486 | struct nfs_open_context, kref); |
| 487 | struct inode *inode = ctx->dentry->d_inode; | 487 | |
| 488 | spin_lock(&inode->i_lock); | 488 | if (!list_empty(&ctx->list)) { |
| 489 | list_del(&ctx->list); | 489 | struct inode *inode = ctx->path.dentry->d_inode; |
| 490 | spin_unlock(&inode->i_lock); | 490 | spin_lock(&inode->i_lock); |
| 491 | } | 491 | list_del(&ctx->list); |
| 492 | if (ctx->state != NULL) | 492 | spin_unlock(&inode->i_lock); |
| 493 | nfs4_close_state(ctx->state, ctx->mode); | ||
| 494 | if (ctx->cred != NULL) | ||
| 495 | put_rpccred(ctx->cred); | ||
| 496 | dput(ctx->dentry); | ||
| 497 | mntput(ctx->vfsmnt); | ||
| 498 | kfree(ctx); | ||
| 499 | } | 493 | } |
| 494 | if (ctx->state != NULL) | ||
| 495 | nfs4_close_state(&ctx->path, ctx->state, ctx->mode); | ||
| 496 | if (ctx->cred != NULL) | ||
| 497 | put_rpccred(ctx->cred); | ||
| 498 | dput(ctx->path.dentry); | ||
| 499 | mntput(ctx->path.mnt); | ||
| 500 | kfree(ctx); | ||
| 501 | } | ||
| 502 | |||
| 503 | void put_nfs_open_context(struct nfs_open_context *ctx) | ||
| 504 | { | ||
| 505 | kref_put(&ctx->kref, nfs_free_open_context); | ||
| 500 | } | 506 | } |
| 501 | 507 | ||
| 502 | /* | 508 | /* |
| @@ -961,8 +967,8 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) | |||
| 961 | goto out_changed; | 967 | goto out_changed; |
| 962 | 968 | ||
| 963 | server = NFS_SERVER(inode); | 969 | server = NFS_SERVER(inode); |
| 964 | /* Update the fsid if and only if this is the root directory */ | 970 | /* Update the fsid? */ |
| 965 | if (inode == inode->i_sb->s_root->d_inode | 971 | if (S_ISDIR(inode->i_mode) |
| 966 | && !nfs_fsid_equal(&server->fsid, &fattr->fsid)) | 972 | && !nfs_fsid_equal(&server->fsid, &fattr->fsid)) |
| 967 | server->fsid = fattr->fsid; | 973 | server->fsid = fattr->fsid; |
| 968 | 974 | ||
| @@ -1066,8 +1072,10 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) | |||
| 1066 | invalid &= ~NFS_INO_INVALID_DATA; | 1072 | invalid &= ~NFS_INO_INVALID_DATA; |
| 1067 | if (data_stable) | 1073 | if (data_stable) |
| 1068 | invalid &= ~(NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ATIME|NFS_INO_REVAL_PAGECACHE); | 1074 | invalid &= ~(NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ATIME|NFS_INO_REVAL_PAGECACHE); |
| 1069 | if (!nfs_have_delegation(inode, FMODE_READ)) | 1075 | if (!nfs_have_delegation(inode, FMODE_READ) || |
| 1076 | (nfsi->cache_validity & NFS_INO_REVAL_FORCED)) | ||
| 1070 | nfsi->cache_validity |= invalid; | 1077 | nfsi->cache_validity |= invalid; |
| 1078 | nfsi->cache_validity &= ~NFS_INO_REVAL_FORCED; | ||
| 1071 | 1079 | ||
| 1072 | return 0; | 1080 | return 0; |
| 1073 | out_changed: | 1081 | out_changed: |
| @@ -1103,27 +1111,10 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) | |||
| 1103 | */ | 1111 | */ |
| 1104 | void nfs4_clear_inode(struct inode *inode) | 1112 | void nfs4_clear_inode(struct inode *inode) |
| 1105 | { | 1113 | { |
| 1106 | struct nfs_inode *nfsi = NFS_I(inode); | ||
| 1107 | |||
| 1108 | /* If we are holding a delegation, return it! */ | 1114 | /* If we are holding a delegation, return it! */ |
| 1109 | nfs_inode_return_delegation(inode); | 1115 | nfs_inode_return_delegation(inode); |
| 1110 | /* First call standard NFS clear_inode() code */ | 1116 | /* First call standard NFS clear_inode() code */ |
| 1111 | nfs_clear_inode(inode); | 1117 | nfs_clear_inode(inode); |
| 1112 | /* Now clear out any remaining state */ | ||
| 1113 | while (!list_empty(&nfsi->open_states)) { | ||
| 1114 | struct nfs4_state *state; | ||
| 1115 | |||
| 1116 | state = list_entry(nfsi->open_states.next, | ||
| 1117 | struct nfs4_state, | ||
| 1118 | inode_states); | ||
| 1119 | dprintk("%s(%s/%Ld): found unclaimed NFSv4 state %p\n", | ||
| 1120 | __FUNCTION__, | ||
| 1121 | inode->i_sb->s_id, | ||
| 1122 | (long long)NFS_FILEID(inode), | ||
| 1123 | state); | ||
| 1124 | BUG_ON(atomic_read(&state->count) != 1); | ||
| 1125 | nfs4_close_state(state, state->state); | ||
| 1126 | } | ||
| 1127 | } | 1118 | } |
| 1128 | #endif | 1119 | #endif |
| 1129 | 1120 | ||
| @@ -1165,15 +1156,11 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag | |||
| 1165 | struct nfs_inode *nfsi = (struct nfs_inode *) foo; | 1156 | struct nfs_inode *nfsi = (struct nfs_inode *) foo; |
| 1166 | 1157 | ||
| 1167 | inode_init_once(&nfsi->vfs_inode); | 1158 | inode_init_once(&nfsi->vfs_inode); |
| 1168 | spin_lock_init(&nfsi->req_lock); | ||
| 1169 | INIT_LIST_HEAD(&nfsi->dirty); | ||
| 1170 | INIT_LIST_HEAD(&nfsi->commit); | ||
| 1171 | INIT_LIST_HEAD(&nfsi->open_files); | 1159 | INIT_LIST_HEAD(&nfsi->open_files); |
| 1172 | INIT_LIST_HEAD(&nfsi->access_cache_entry_lru); | 1160 | INIT_LIST_HEAD(&nfsi->access_cache_entry_lru); |
| 1173 | INIT_LIST_HEAD(&nfsi->access_cache_inode_lru); | 1161 | INIT_LIST_HEAD(&nfsi->access_cache_inode_lru); |
| 1174 | INIT_RADIX_TREE(&nfsi->nfs_page_tree, GFP_ATOMIC); | 1162 | INIT_RADIX_TREE(&nfsi->nfs_page_tree, GFP_ATOMIC); |
| 1175 | atomic_set(&nfsi->data_updates, 0); | 1163 | atomic_set(&nfsi->data_updates, 0); |
| 1176 | nfsi->ndirty = 0; | ||
| 1177 | nfsi->ncommit = 0; | 1164 | nfsi->ncommit = 0; |
| 1178 | nfsi->npages = 0; | 1165 | nfsi->npages = 0; |
| 1179 | nfs4_init_once(nfsi); | 1166 | nfs4_init_once(nfsi); |
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index ad2b40db1e65..76cf55d57101 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h | |||
| @@ -183,9 +183,9 @@ unsigned long nfs_block_bits(unsigned long bsize, unsigned char *nrbitsp) | |||
| 183 | /* | 183 | /* |
| 184 | * Calculate the number of 512byte blocks used. | 184 | * Calculate the number of 512byte blocks used. |
| 185 | */ | 185 | */ |
| 186 | static inline unsigned long nfs_calc_block_size(u64 tsize) | 186 | static inline blkcnt_t nfs_calc_block_size(u64 tsize) |
| 187 | { | 187 | { |
| 188 | loff_t used = (tsize + 511) >> 9; | 188 | blkcnt_t used = (tsize + 511) >> 9; |
| 189 | return (used > ULONG_MAX) ? ULONG_MAX : used; | 189 | return (used > ULONG_MAX) ? ULONG_MAX : used; |
| 190 | } | 190 | } |
| 191 | 191 | ||
diff --git a/fs/nfs/mount_clnt.c b/fs/nfs/mount_clnt.c index ca5a266a3140..8afd9f7e7a97 100644 --- a/fs/nfs/mount_clnt.c +++ b/fs/nfs/mount_clnt.c | |||
| @@ -1,7 +1,5 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * linux/fs/nfs/mount_clnt.c | 2 | * In-kernel MOUNT protocol client |
| 3 | * | ||
| 4 | * MOUNT client to support NFSroot. | ||
| 5 | * | 3 | * |
| 6 | * Copyright (C) 1997, Olaf Kirch <okir@monad.swb.de> | 4 | * Copyright (C) 1997, Olaf Kirch <okir@monad.swb.de> |
| 7 | */ | 5 | */ |
| @@ -18,33 +16,31 @@ | |||
| 18 | #include <linux/nfs_fs.h> | 16 | #include <linux/nfs_fs.h> |
| 19 | 17 | ||
| 20 | #ifdef RPC_DEBUG | 18 | #ifdef RPC_DEBUG |
| 21 | # define NFSDBG_FACILITY NFSDBG_ROOT | 19 | # define NFSDBG_FACILITY NFSDBG_MOUNT |
| 22 | #endif | 20 | #endif |
| 23 | 21 | ||
| 24 | /* | ||
| 25 | #define MOUNT_PROGRAM 100005 | ||
| 26 | #define MOUNT_VERSION 1 | ||
| 27 | #define MOUNT_MNT 1 | ||
| 28 | #define MOUNT_UMNT 3 | ||
| 29 | */ | ||
| 30 | |||
| 31 | static struct rpc_clnt * mnt_create(char *, struct sockaddr_in *, | ||
| 32 | int, int); | ||
| 33 | static struct rpc_program mnt_program; | 22 | static struct rpc_program mnt_program; |
| 34 | 23 | ||
| 35 | struct mnt_fhstatus { | 24 | struct mnt_fhstatus { |
| 36 | unsigned int status; | 25 | u32 status; |
| 37 | struct nfs_fh * fh; | 26 | struct nfs_fh *fh; |
| 38 | }; | 27 | }; |
| 39 | 28 | ||
| 40 | /* | 29 | /** |
| 41 | * Obtain an NFS file handle for the given host and path | 30 | * nfs_mount - Obtain an NFS file handle for the given host and path |
| 31 | * @addr: pointer to server's address | ||
| 32 | * @len: size of server's address | ||
| 33 | * @hostname: name of server host, or NULL | ||
| 34 | * @path: pointer to string containing export path to mount | ||
| 35 | * @version: mount version to use for this request | ||
| 36 | * @protocol: transport protocol to use for thie request | ||
| 37 | * @fh: pointer to location to place returned file handle | ||
| 38 | * | ||
| 39 | * Uses default timeout parameters specified by underlying transport. | ||
| 42 | */ | 40 | */ |
| 43 | int | 41 | int nfs_mount(struct sockaddr *addr, size_t len, char *hostname, char *path, |
| 44 | nfsroot_mount(struct sockaddr_in *addr, char *path, struct nfs_fh *fh, | 42 | int version, int protocol, struct nfs_fh *fh) |
| 45 | int version, int protocol) | ||
| 46 | { | 43 | { |
| 47 | struct rpc_clnt *mnt_clnt; | ||
| 48 | struct mnt_fhstatus result = { | 44 | struct mnt_fhstatus result = { |
| 49 | .fh = fh | 45 | .fh = fh |
| 50 | }; | 46 | }; |
| @@ -52,16 +48,25 @@ nfsroot_mount(struct sockaddr_in *addr, char *path, struct nfs_fh *fh, | |||
| 52 | .rpc_argp = path, | 48 | .rpc_argp = path, |
| 53 | .rpc_resp = &result, | 49 | .rpc_resp = &result, |
| 54 | }; | 50 | }; |
| 55 | char hostname[32]; | 51 | struct rpc_create_args args = { |
| 52 | .protocol = protocol, | ||
| 53 | .address = addr, | ||
| 54 | .addrsize = len, | ||
| 55 | .servername = hostname, | ||
| 56 | .program = &mnt_program, | ||
| 57 | .version = version, | ||
| 58 | .authflavor = RPC_AUTH_UNIX, | ||
| 59 | .flags = RPC_CLNT_CREATE_INTR, | ||
| 60 | }; | ||
| 61 | struct rpc_clnt *mnt_clnt; | ||
| 56 | int status; | 62 | int status; |
| 57 | 63 | ||
| 58 | dprintk("NFS: nfs_mount(%08x:%s)\n", | 64 | dprintk("NFS: sending MNT request for %s:%s\n", |
| 59 | (unsigned)ntohl(addr->sin_addr.s_addr), path); | 65 | (hostname ? hostname : "server"), path); |
| 60 | 66 | ||
| 61 | sprintf(hostname, "%u.%u.%u.%u", NIPQUAD(addr->sin_addr.s_addr)); | 67 | mnt_clnt = rpc_create(&args); |
| 62 | mnt_clnt = mnt_create(hostname, addr, version, protocol); | ||
| 63 | if (IS_ERR(mnt_clnt)) | 68 | if (IS_ERR(mnt_clnt)) |
| 64 | return PTR_ERR(mnt_clnt); | 69 | goto out_clnt_err; |
| 65 | 70 | ||
| 66 | if (version == NFS_MNT3_VERSION) | 71 | if (version == NFS_MNT3_VERSION) |
| 67 | msg.rpc_proc = &mnt_clnt->cl_procinfo[MOUNTPROC3_MNT]; | 72 | msg.rpc_proc = &mnt_clnt->cl_procinfo[MOUNTPROC3_MNT]; |
| @@ -69,33 +74,39 @@ nfsroot_mount(struct sockaddr_in *addr, char *path, struct nfs_fh *fh, | |||
| 69 | msg.rpc_proc = &mnt_clnt->cl_procinfo[MNTPROC_MNT]; | 74 | msg.rpc_proc = &mnt_clnt->cl_procinfo[MNTPROC_MNT]; |
| 70 | 75 | ||
| 71 | status = rpc_call_sync(mnt_clnt, &msg, 0); | 76 | status = rpc_call_sync(mnt_clnt, &msg, 0); |
| 72 | return status < 0? status : (result.status? -EACCES : 0); | 77 | rpc_shutdown_client(mnt_clnt); |
| 73 | } | ||
| 74 | 78 | ||
| 75 | static struct rpc_clnt * | 79 | if (status < 0) |
| 76 | mnt_create(char *hostname, struct sockaddr_in *srvaddr, int version, | 80 | goto out_call_err; |
| 77 | int protocol) | 81 | if (result.status != 0) |
| 78 | { | 82 | goto out_mnt_err; |
| 79 | struct rpc_create_args args = { | 83 | |
| 80 | .protocol = protocol, | 84 | dprintk("NFS: MNT request succeeded\n"); |
| 81 | .address = (struct sockaddr *)srvaddr, | 85 | status = 0; |
| 82 | .addrsize = sizeof(*srvaddr), | 86 | |
| 83 | .servername = hostname, | 87 | out: |
| 84 | .program = &mnt_program, | 88 | return status; |
| 85 | .version = version, | 89 | |
| 86 | .authflavor = RPC_AUTH_UNIX, | 90 | out_clnt_err: |
| 87 | .flags = (RPC_CLNT_CREATE_ONESHOT | | 91 | status = PTR_ERR(mnt_clnt); |
| 88 | RPC_CLNT_CREATE_INTR), | 92 | dprintk("NFS: failed to create RPC client, status=%d\n", status); |
| 89 | }; | 93 | goto out; |
| 94 | |||
| 95 | out_call_err: | ||
| 96 | dprintk("NFS: failed to start MNT request, status=%d\n", status); | ||
| 97 | goto out; | ||
| 90 | 98 | ||
| 91 | return rpc_create(&args); | 99 | out_mnt_err: |
| 100 | dprintk("NFS: MNT server returned result %d\n", result.status); | ||
| 101 | status = -EACCES; | ||
| 102 | goto out; | ||
| 92 | } | 103 | } |
| 93 | 104 | ||
| 94 | /* | 105 | /* |
| 95 | * XDR encode/decode functions for MOUNT | 106 | * XDR encode/decode functions for MOUNT |
| 96 | */ | 107 | */ |
| 97 | static int | 108 | static int xdr_encode_dirpath(struct rpc_rqst *req, __be32 *p, |
| 98 | xdr_encode_dirpath(struct rpc_rqst *req, __be32 *p, const char *path) | 109 | const char *path) |
| 99 | { | 110 | { |
| 100 | p = xdr_encode_string(p, path); | 111 | p = xdr_encode_string(p, path); |
| 101 | 112 | ||
| @@ -103,8 +114,8 @@ xdr_encode_dirpath(struct rpc_rqst *req, __be32 *p, const char *path) | |||
| 103 | return 0; | 114 | return 0; |
| 104 | } | 115 | } |
| 105 | 116 | ||
| 106 | static int | 117 | static int xdr_decode_fhstatus(struct rpc_rqst *req, __be32 *p, |
| 107 | xdr_decode_fhstatus(struct rpc_rqst *req, __be32 *p, struct mnt_fhstatus *res) | 118 | struct mnt_fhstatus *res) |
| 108 | { | 119 | { |
| 109 | struct nfs_fh *fh = res->fh; | 120 | struct nfs_fh *fh = res->fh; |
| 110 | 121 | ||
| @@ -115,8 +126,8 @@ xdr_decode_fhstatus(struct rpc_rqst *req, __be32 *p, struct mnt_fhstatus *res) | |||
| 115 | return 0; | 126 | return 0; |
| 116 | } | 127 | } |
| 117 | 128 | ||
| 118 | static int | 129 | static int xdr_decode_fhstatus3(struct rpc_rqst *req, __be32 *p, |
| 119 | xdr_decode_fhstatus3(struct rpc_rqst *req, __be32 *p, struct mnt_fhstatus *res) | 130 | struct mnt_fhstatus *res) |
| 120 | { | 131 | { |
| 121 | struct nfs_fh *fh = res->fh; | 132 | struct nfs_fh *fh = res->fh; |
| 122 | 133 | ||
| @@ -135,53 +146,53 @@ xdr_decode_fhstatus3(struct rpc_rqst *req, __be32 *p, struct mnt_fhstatus *res) | |||
| 135 | #define MNT_fhstatus_sz (1 + 8) | 146 | #define MNT_fhstatus_sz (1 + 8) |
| 136 | #define MNT_fhstatus3_sz (1 + 16) | 147 | #define MNT_fhstatus3_sz (1 + 16) |
| 137 | 148 | ||
| 138 | static struct rpc_procinfo mnt_procedures[] = { | 149 | static struct rpc_procinfo mnt_procedures[] = { |
| 139 | [MNTPROC_MNT] = { | 150 | [MNTPROC_MNT] = { |
| 140 | .p_proc = MNTPROC_MNT, | 151 | .p_proc = MNTPROC_MNT, |
| 141 | .p_encode = (kxdrproc_t) xdr_encode_dirpath, | 152 | .p_encode = (kxdrproc_t) xdr_encode_dirpath, |
| 142 | .p_decode = (kxdrproc_t) xdr_decode_fhstatus, | 153 | .p_decode = (kxdrproc_t) xdr_decode_fhstatus, |
| 143 | .p_arglen = MNT_dirpath_sz, | 154 | .p_arglen = MNT_dirpath_sz, |
| 144 | .p_replen = MNT_fhstatus_sz, | 155 | .p_replen = MNT_fhstatus_sz, |
| 145 | .p_statidx = MNTPROC_MNT, | 156 | .p_statidx = MNTPROC_MNT, |
| 146 | .p_name = "MOUNT", | 157 | .p_name = "MOUNT", |
| 147 | }, | 158 | }, |
| 148 | }; | 159 | }; |
| 149 | 160 | ||
| 150 | static struct rpc_procinfo mnt3_procedures[] = { | 161 | static struct rpc_procinfo mnt3_procedures[] = { |
| 151 | [MOUNTPROC3_MNT] = { | 162 | [MOUNTPROC3_MNT] = { |
| 152 | .p_proc = MOUNTPROC3_MNT, | 163 | .p_proc = MOUNTPROC3_MNT, |
| 153 | .p_encode = (kxdrproc_t) xdr_encode_dirpath, | 164 | .p_encode = (kxdrproc_t) xdr_encode_dirpath, |
| 154 | .p_decode = (kxdrproc_t) xdr_decode_fhstatus3, | 165 | .p_decode = (kxdrproc_t) xdr_decode_fhstatus3, |
| 155 | .p_arglen = MNT_dirpath_sz, | 166 | .p_arglen = MNT_dirpath_sz, |
| 156 | .p_replen = MNT_fhstatus3_sz, | 167 | .p_replen = MNT_fhstatus3_sz, |
| 157 | .p_statidx = MOUNTPROC3_MNT, | 168 | .p_statidx = MOUNTPROC3_MNT, |
| 158 | .p_name = "MOUNT", | 169 | .p_name = "MOUNT", |
| 159 | }, | 170 | }, |
| 160 | }; | 171 | }; |
| 161 | 172 | ||
| 162 | 173 | ||
| 163 | static struct rpc_version mnt_version1 = { | 174 | static struct rpc_version mnt_version1 = { |
| 164 | .number = 1, | 175 | .number = 1, |
| 165 | .nrprocs = 2, | 176 | .nrprocs = 2, |
| 166 | .procs = mnt_procedures | 177 | .procs = mnt_procedures, |
| 167 | }; | 178 | }; |
| 168 | 179 | ||
| 169 | static struct rpc_version mnt_version3 = { | 180 | static struct rpc_version mnt_version3 = { |
| 170 | .number = 3, | 181 | .number = 3, |
| 171 | .nrprocs = 2, | 182 | .nrprocs = 2, |
| 172 | .procs = mnt3_procedures | 183 | .procs = mnt3_procedures, |
| 173 | }; | 184 | }; |
| 174 | 185 | ||
| 175 | static struct rpc_version * mnt_version[] = { | 186 | static struct rpc_version *mnt_version[] = { |
| 176 | NULL, | 187 | NULL, |
| 177 | &mnt_version1, | 188 | &mnt_version1, |
| 178 | NULL, | 189 | NULL, |
| 179 | &mnt_version3, | 190 | &mnt_version3, |
| 180 | }; | 191 | }; |
| 181 | 192 | ||
| 182 | static struct rpc_stat mnt_stats; | 193 | static struct rpc_stat mnt_stats; |
| 183 | 194 | ||
| 184 | static struct rpc_program mnt_program = { | 195 | static struct rpc_program mnt_program = { |
| 185 | .name = "mount", | 196 | .name = "mount", |
| 186 | .number = NFS_MNT_PROGRAM, | 197 | .number = NFS_MNT_PROGRAM, |
| 187 | .nrvers = ARRAY_SIZE(mnt_version), | 198 | .nrvers = ARRAY_SIZE(mnt_version), |
diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c index cd3ca7b5d3db..7fcc78f2aa71 100644 --- a/fs/nfs/nfs2xdr.c +++ b/fs/nfs/nfs2xdr.c | |||
| @@ -223,7 +223,7 @@ nfs_xdr_diropargs(struct rpc_rqst *req, __be32 *p, struct nfs_diropargs *args) | |||
| 223 | static int | 223 | static int |
| 224 | nfs_xdr_readargs(struct rpc_rqst *req, __be32 *p, struct nfs_readargs *args) | 224 | nfs_xdr_readargs(struct rpc_rqst *req, __be32 *p, struct nfs_readargs *args) |
| 225 | { | 225 | { |
| 226 | struct rpc_auth *auth = req->rq_task->tk_auth; | 226 | struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth; |
| 227 | unsigned int replen; | 227 | unsigned int replen; |
| 228 | u32 offset = (u32)args->offset; | 228 | u32 offset = (u32)args->offset; |
| 229 | u32 count = args->count; | 229 | u32 count = args->count; |
| @@ -380,7 +380,7 @@ static int | |||
| 380 | nfs_xdr_readdirargs(struct rpc_rqst *req, __be32 *p, struct nfs_readdirargs *args) | 380 | nfs_xdr_readdirargs(struct rpc_rqst *req, __be32 *p, struct nfs_readdirargs *args) |
| 381 | { | 381 | { |
| 382 | struct rpc_task *task = req->rq_task; | 382 | struct rpc_task *task = req->rq_task; |
| 383 | struct rpc_auth *auth = task->tk_auth; | 383 | struct rpc_auth *auth = task->tk_msg.rpc_cred->cr_auth; |
| 384 | unsigned int replen; | 384 | unsigned int replen; |
| 385 | u32 count = args->count; | 385 | u32 count = args->count; |
| 386 | 386 | ||
| @@ -541,7 +541,7 @@ nfs_xdr_diropres(struct rpc_rqst *req, __be32 *p, struct nfs_diropok *res) | |||
| 541 | static int | 541 | static int |
| 542 | nfs_xdr_readlinkargs(struct rpc_rqst *req, __be32 *p, struct nfs_readlinkargs *args) | 542 | nfs_xdr_readlinkargs(struct rpc_rqst *req, __be32 *p, struct nfs_readlinkargs *args) |
| 543 | { | 543 | { |
| 544 | struct rpc_auth *auth = req->rq_task->tk_auth; | 544 | struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth; |
| 545 | unsigned int replen; | 545 | unsigned int replen; |
| 546 | 546 | ||
| 547 | p = xdr_encode_fhandle(p, args->fh); | 547 | p = xdr_encode_fhandle(p, args->fh); |
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c index 45268d6def2e..814d886b6aa4 100644 --- a/fs/nfs/nfs3proc.c +++ b/fs/nfs/nfs3proc.c | |||
| @@ -335,9 +335,7 @@ again: | |||
| 335 | * not sure this buys us anything (and I'd have | 335 | * not sure this buys us anything (and I'd have |
| 336 | * to revamp the NFSv3 XDR code) */ | 336 | * to revamp the NFSv3 XDR code) */ |
| 337 | status = nfs3_proc_setattr(dentry, &fattr, sattr); | 337 | status = nfs3_proc_setattr(dentry, &fattr, sattr); |
| 338 | if (status == 0) | 338 | nfs_post_op_update_inode(dentry->d_inode, &fattr); |
| 339 | nfs_setattr_update_inode(dentry->d_inode, sattr); | ||
| 340 | nfs_refresh_inode(dentry->d_inode, &fattr); | ||
| 341 | dprintk("NFS reply setattr (post-create): %d\n", status); | 339 | dprintk("NFS reply setattr (post-create): %d\n", status); |
| 342 | } | 340 | } |
| 343 | if (status != 0) | 341 | if (status != 0) |
diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c index b51df8eb9f01..b4647a22f349 100644 --- a/fs/nfs/nfs3xdr.c +++ b/fs/nfs/nfs3xdr.c | |||
| @@ -319,7 +319,7 @@ nfs3_xdr_accessargs(struct rpc_rqst *req, __be32 *p, struct nfs3_accessargs *arg | |||
| 319 | static int | 319 | static int |
| 320 | nfs3_xdr_readargs(struct rpc_rqst *req, __be32 *p, struct nfs_readargs *args) | 320 | nfs3_xdr_readargs(struct rpc_rqst *req, __be32 *p, struct nfs_readargs *args) |
| 321 | { | 321 | { |
| 322 | struct rpc_auth *auth = req->rq_task->tk_auth; | 322 | struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth; |
| 323 | unsigned int replen; | 323 | unsigned int replen; |
| 324 | u32 count = args->count; | 324 | u32 count = args->count; |
| 325 | 325 | ||
| @@ -458,7 +458,7 @@ nfs3_xdr_linkargs(struct rpc_rqst *req, __be32 *p, struct nfs3_linkargs *args) | |||
| 458 | static int | 458 | static int |
| 459 | nfs3_xdr_readdirargs(struct rpc_rqst *req, __be32 *p, struct nfs3_readdirargs *args) | 459 | nfs3_xdr_readdirargs(struct rpc_rqst *req, __be32 *p, struct nfs3_readdirargs *args) |
| 460 | { | 460 | { |
| 461 | struct rpc_auth *auth = req->rq_task->tk_auth; | 461 | struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth; |
| 462 | unsigned int replen; | 462 | unsigned int replen; |
| 463 | u32 count = args->count; | 463 | u32 count = args->count; |
| 464 | 464 | ||
| @@ -643,7 +643,7 @@ static int | |||
| 643 | nfs3_xdr_getaclargs(struct rpc_rqst *req, __be32 *p, | 643 | nfs3_xdr_getaclargs(struct rpc_rqst *req, __be32 *p, |
| 644 | struct nfs3_getaclargs *args) | 644 | struct nfs3_getaclargs *args) |
| 645 | { | 645 | { |
| 646 | struct rpc_auth *auth = req->rq_task->tk_auth; | 646 | struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth; |
| 647 | unsigned int replen; | 647 | unsigned int replen; |
| 648 | 648 | ||
| 649 | p = xdr_encode_fhandle(p, args->fh); | 649 | p = xdr_encode_fhandle(p, args->fh); |
| @@ -773,7 +773,7 @@ nfs3_xdr_accessres(struct rpc_rqst *req, __be32 *p, struct nfs3_accessres *res) | |||
| 773 | static int | 773 | static int |
| 774 | nfs3_xdr_readlinkargs(struct rpc_rqst *req, __be32 *p, struct nfs3_readlinkargs *args) | 774 | nfs3_xdr_readlinkargs(struct rpc_rqst *req, __be32 *p, struct nfs3_readlinkargs *args) |
| 775 | { | 775 | { |
| 776 | struct rpc_auth *auth = req->rq_task->tk_auth; | 776 | struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth; |
| 777 | unsigned int replen; | 777 | unsigned int replen; |
| 778 | 778 | ||
| 779 | p = xdr_encode_fhandle(p, args->fh); | 779 | p = xdr_encode_fhandle(p, args->fh); |
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index cf3a17eb5c09..6c028e734fe6 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h | |||
| @@ -70,19 +70,26 @@ static inline void nfs_confirm_seqid(struct nfs_seqid_counter *seqid, int status | |||
| 70 | seqid->flags |= NFS_SEQID_CONFIRMED; | 70 | seqid->flags |= NFS_SEQID_CONFIRMED; |
| 71 | } | 71 | } |
| 72 | 72 | ||
| 73 | struct nfs_unique_id { | ||
| 74 | struct rb_node rb_node; | ||
| 75 | __u64 id; | ||
| 76 | }; | ||
| 77 | |||
| 73 | /* | 78 | /* |
| 74 | * NFS4 state_owners and lock_owners are simply labels for ordered | 79 | * NFS4 state_owners and lock_owners are simply labels for ordered |
| 75 | * sequences of RPC calls. Their sole purpose is to provide once-only | 80 | * sequences of RPC calls. Their sole purpose is to provide once-only |
| 76 | * semantics by allowing the server to identify replayed requests. | 81 | * semantics by allowing the server to identify replayed requests. |
| 77 | */ | 82 | */ |
| 78 | struct nfs4_state_owner { | 83 | struct nfs4_state_owner { |
| 79 | spinlock_t so_lock; | 84 | struct nfs_unique_id so_owner_id; |
| 80 | struct list_head so_list; /* per-clientid list of state_owners */ | ||
| 81 | struct nfs_client *so_client; | 85 | struct nfs_client *so_client; |
| 82 | u32 so_id; /* 32-bit identifier, unique */ | 86 | struct nfs_server *so_server; |
| 83 | atomic_t so_count; | 87 | struct rb_node so_client_node; |
| 84 | 88 | ||
| 85 | struct rpc_cred *so_cred; /* Associated cred */ | 89 | struct rpc_cred *so_cred; /* Associated cred */ |
| 90 | |||
| 91 | spinlock_t so_lock; | ||
| 92 | atomic_t so_count; | ||
| 86 | struct list_head so_states; | 93 | struct list_head so_states; |
| 87 | struct list_head so_delegations; | 94 | struct list_head so_delegations; |
| 88 | struct nfs_seqid_counter so_seqid; | 95 | struct nfs_seqid_counter so_seqid; |
| @@ -108,7 +115,7 @@ struct nfs4_lock_state { | |||
| 108 | #define NFS_LOCK_INITIALIZED 1 | 115 | #define NFS_LOCK_INITIALIZED 1 |
| 109 | int ls_flags; | 116 | int ls_flags; |
| 110 | struct nfs_seqid_counter ls_seqid; | 117 | struct nfs_seqid_counter ls_seqid; |
| 111 | u32 ls_id; | 118 | struct nfs_unique_id ls_id; |
| 112 | nfs4_stateid ls_stateid; | 119 | nfs4_stateid ls_stateid; |
| 113 | atomic_t ls_count; | 120 | atomic_t ls_count; |
| 114 | }; | 121 | }; |
| @@ -116,7 +123,10 @@ struct nfs4_lock_state { | |||
| 116 | /* bits for nfs4_state->flags */ | 123 | /* bits for nfs4_state->flags */ |
| 117 | enum { | 124 | enum { |
| 118 | LK_STATE_IN_USE, | 125 | LK_STATE_IN_USE, |
| 119 | NFS_DELEGATED_STATE, | 126 | NFS_DELEGATED_STATE, /* Current stateid is delegation */ |
| 127 | NFS_O_RDONLY_STATE, /* OPEN stateid has read-only state */ | ||
| 128 | NFS_O_WRONLY_STATE, /* OPEN stateid has write-only state */ | ||
| 129 | NFS_O_RDWR_STATE, /* OPEN stateid has read/write state */ | ||
| 120 | }; | 130 | }; |
| 121 | 131 | ||
| 122 | struct nfs4_state { | 132 | struct nfs4_state { |
| @@ -130,11 +140,14 @@ struct nfs4_state { | |||
| 130 | unsigned long flags; /* Do we hold any locks? */ | 140 | unsigned long flags; /* Do we hold any locks? */ |
| 131 | spinlock_t state_lock; /* Protects the lock_states list */ | 141 | spinlock_t state_lock; /* Protects the lock_states list */ |
| 132 | 142 | ||
| 133 | nfs4_stateid stateid; | 143 | seqlock_t seqlock; /* Protects the stateid/open_stateid */ |
| 144 | nfs4_stateid stateid; /* Current stateid: may be delegation */ | ||
| 145 | nfs4_stateid open_stateid; /* OPEN stateid */ | ||
| 134 | 146 | ||
| 135 | unsigned int n_rdonly; | 147 | /* The following 3 fields are protected by owner->so_lock */ |
| 136 | unsigned int n_wronly; | 148 | unsigned int n_rdonly; /* Number of read-only references */ |
| 137 | unsigned int n_rdwr; | 149 | unsigned int n_wronly; /* Number of write-only references */ |
| 150 | unsigned int n_rdwr; /* Number of read/write references */ | ||
| 138 | int state; /* State on the server (R,W, or RW) */ | 151 | int state; /* State on the server (R,W, or RW) */ |
| 139 | atomic_t count; | 152 | atomic_t count; |
| 140 | }; | 153 | }; |
| @@ -165,7 +178,7 @@ extern int nfs4_proc_setclientid(struct nfs_client *, u32, unsigned short, struc | |||
| 165 | extern int nfs4_proc_setclientid_confirm(struct nfs_client *, struct rpc_cred *); | 178 | extern int nfs4_proc_setclientid_confirm(struct nfs_client *, struct rpc_cred *); |
| 166 | extern int nfs4_proc_async_renew(struct nfs_client *, struct rpc_cred *); | 179 | extern int nfs4_proc_async_renew(struct nfs_client *, struct rpc_cred *); |
| 167 | extern int nfs4_proc_renew(struct nfs_client *, struct rpc_cred *); | 180 | extern int nfs4_proc_renew(struct nfs_client *, struct rpc_cred *); |
| 168 | extern int nfs4_do_close(struct inode *inode, struct nfs4_state *state); | 181 | extern int nfs4_do_close(struct path *path, struct nfs4_state *state); |
| 169 | extern struct dentry *nfs4_atomic_open(struct inode *, struct dentry *, struct nameidata *); | 182 | extern struct dentry *nfs4_atomic_open(struct inode *, struct dentry *, struct nameidata *); |
| 170 | extern int nfs4_open_revalidate(struct inode *, struct dentry *, int, struct nameidata *); | 183 | extern int nfs4_open_revalidate(struct inode *, struct dentry *, int, struct nameidata *); |
| 171 | extern int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle); | 184 | extern int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle); |
| @@ -189,14 +202,13 @@ extern void nfs4_renew_state(struct work_struct *); | |||
| 189 | 202 | ||
| 190 | /* nfs4state.c */ | 203 | /* nfs4state.c */ |
| 191 | struct rpc_cred *nfs4_get_renew_cred(struct nfs_client *clp); | 204 | struct rpc_cred *nfs4_get_renew_cred(struct nfs_client *clp); |
| 192 | extern u32 nfs4_alloc_lockowner_id(struct nfs_client *); | ||
| 193 | 205 | ||
| 194 | extern struct nfs4_state_owner * nfs4_get_state_owner(struct nfs_server *, struct rpc_cred *); | 206 | extern struct nfs4_state_owner * nfs4_get_state_owner(struct nfs_server *, struct rpc_cred *); |
| 195 | extern void nfs4_put_state_owner(struct nfs4_state_owner *); | 207 | extern void nfs4_put_state_owner(struct nfs4_state_owner *); |
| 196 | extern void nfs4_drop_state_owner(struct nfs4_state_owner *); | 208 | extern void nfs4_drop_state_owner(struct nfs4_state_owner *); |
| 197 | extern struct nfs4_state * nfs4_get_open_state(struct inode *, struct nfs4_state_owner *); | 209 | extern struct nfs4_state * nfs4_get_open_state(struct inode *, struct nfs4_state_owner *); |
| 198 | extern void nfs4_put_open_state(struct nfs4_state *); | 210 | extern void nfs4_put_open_state(struct nfs4_state *); |
| 199 | extern void nfs4_close_state(struct nfs4_state *, mode_t); | 211 | extern void nfs4_close_state(struct path *, struct nfs4_state *, mode_t); |
| 200 | extern void nfs4_state_set_mode_locked(struct nfs4_state *, mode_t); | 212 | extern void nfs4_state_set_mode_locked(struct nfs4_state *, mode_t); |
| 201 | extern void nfs4_schedule_state_recovery(struct nfs_client *); | 213 | extern void nfs4_schedule_state_recovery(struct nfs_client *); |
| 202 | extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp); | 214 | extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp); |
| @@ -222,7 +234,7 @@ extern struct svc_version nfs4_callback_version1; | |||
| 222 | 234 | ||
| 223 | #else | 235 | #else |
| 224 | 236 | ||
| 225 | #define nfs4_close_state(a, b) do { } while (0) | 237 | #define nfs4_close_state(a, b, c) do { } while (0) |
| 226 | 238 | ||
| 227 | #endif /* CONFIG_NFS_V4 */ | 239 | #endif /* CONFIG_NFS_V4 */ |
| 228 | #endif /* __LINUX_FS_NFS_NFS4_FS.H */ | 240 | #endif /* __LINUX_FS_NFS_NFS4_FS.H */ |
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 648e0ac0f90e..fee2da856c95 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c | |||
| @@ -65,6 +65,7 @@ static int nfs4_async_handle_error(struct rpc_task *, const struct nfs_server *) | |||
| 65 | static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry); | 65 | static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry); |
| 66 | static int nfs4_handle_exception(const struct nfs_server *server, int errorcode, struct nfs4_exception *exception); | 66 | static int nfs4_handle_exception(const struct nfs_server *server, int errorcode, struct nfs4_exception *exception); |
| 67 | static int nfs4_wait_clnt_recover(struct rpc_clnt *clnt, struct nfs_client *clp); | 67 | static int nfs4_wait_clnt_recover(struct rpc_clnt *clnt, struct nfs_client *clp); |
| 68 | static int _nfs4_do_access(struct inode *inode, struct rpc_cred *cred, int openflags); | ||
| 68 | 69 | ||
| 69 | /* Prevent leaks of NFSv4 errors into userland */ | 70 | /* Prevent leaks of NFSv4 errors into userland */ |
| 70 | int nfs4_map_errors(int err) | 71 | int nfs4_map_errors(int err) |
| @@ -214,27 +215,39 @@ static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo) | |||
| 214 | } | 215 | } |
| 215 | 216 | ||
| 216 | struct nfs4_opendata { | 217 | struct nfs4_opendata { |
| 217 | atomic_t count; | 218 | struct kref kref; |
| 218 | struct nfs_openargs o_arg; | 219 | struct nfs_openargs o_arg; |
| 219 | struct nfs_openres o_res; | 220 | struct nfs_openres o_res; |
| 220 | struct nfs_open_confirmargs c_arg; | 221 | struct nfs_open_confirmargs c_arg; |
| 221 | struct nfs_open_confirmres c_res; | 222 | struct nfs_open_confirmres c_res; |
| 222 | struct nfs_fattr f_attr; | 223 | struct nfs_fattr f_attr; |
| 223 | struct nfs_fattr dir_attr; | 224 | struct nfs_fattr dir_attr; |
| 224 | struct dentry *dentry; | 225 | struct path path; |
| 225 | struct dentry *dir; | 226 | struct dentry *dir; |
| 226 | struct nfs4_state_owner *owner; | 227 | struct nfs4_state_owner *owner; |
| 228 | struct nfs4_state *state; | ||
| 227 | struct iattr attrs; | 229 | struct iattr attrs; |
| 228 | unsigned long timestamp; | 230 | unsigned long timestamp; |
| 231 | unsigned int rpc_done : 1; | ||
| 229 | int rpc_status; | 232 | int rpc_status; |
| 230 | int cancelled; | 233 | int cancelled; |
| 231 | }; | 234 | }; |
| 232 | 235 | ||
| 233 | static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry, | 236 | |
| 237 | static void nfs4_init_opendata_res(struct nfs4_opendata *p) | ||
| 238 | { | ||
| 239 | p->o_res.f_attr = &p->f_attr; | ||
| 240 | p->o_res.dir_attr = &p->dir_attr; | ||
| 241 | p->o_res.server = p->o_arg.server; | ||
| 242 | nfs_fattr_init(&p->f_attr); | ||
| 243 | nfs_fattr_init(&p->dir_attr); | ||
| 244 | } | ||
| 245 | |||
| 246 | static struct nfs4_opendata *nfs4_opendata_alloc(struct path *path, | ||
| 234 | struct nfs4_state_owner *sp, int flags, | 247 | struct nfs4_state_owner *sp, int flags, |
| 235 | const struct iattr *attrs) | 248 | const struct iattr *attrs) |
| 236 | { | 249 | { |
| 237 | struct dentry *parent = dget_parent(dentry); | 250 | struct dentry *parent = dget_parent(path->dentry); |
| 238 | struct inode *dir = parent->d_inode; | 251 | struct inode *dir = parent->d_inode; |
| 239 | struct nfs_server *server = NFS_SERVER(dir); | 252 | struct nfs_server *server = NFS_SERVER(dir); |
| 240 | struct nfs4_opendata *p; | 253 | struct nfs4_opendata *p; |
| @@ -245,24 +258,19 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry, | |||
| 245 | p->o_arg.seqid = nfs_alloc_seqid(&sp->so_seqid); | 258 | p->o_arg.seqid = nfs_alloc_seqid(&sp->so_seqid); |
| 246 | if (p->o_arg.seqid == NULL) | 259 | if (p->o_arg.seqid == NULL) |
| 247 | goto err_free; | 260 | goto err_free; |
| 248 | atomic_set(&p->count, 1); | 261 | p->path.mnt = mntget(path->mnt); |
| 249 | p->dentry = dget(dentry); | 262 | p->path.dentry = dget(path->dentry); |
| 250 | p->dir = parent; | 263 | p->dir = parent; |
| 251 | p->owner = sp; | 264 | p->owner = sp; |
| 252 | atomic_inc(&sp->so_count); | 265 | atomic_inc(&sp->so_count); |
| 253 | p->o_arg.fh = NFS_FH(dir); | 266 | p->o_arg.fh = NFS_FH(dir); |
| 254 | p->o_arg.open_flags = flags, | 267 | p->o_arg.open_flags = flags, |
| 255 | p->o_arg.clientid = server->nfs_client->cl_clientid; | 268 | p->o_arg.clientid = server->nfs_client->cl_clientid; |
| 256 | p->o_arg.id = sp->so_id; | 269 | p->o_arg.id = sp->so_owner_id.id; |
| 257 | p->o_arg.name = &dentry->d_name; | 270 | p->o_arg.name = &p->path.dentry->d_name; |
| 258 | p->o_arg.server = server; | 271 | p->o_arg.server = server; |
| 259 | p->o_arg.bitmask = server->attr_bitmask; | 272 | p->o_arg.bitmask = server->attr_bitmask; |
| 260 | p->o_arg.claim = NFS4_OPEN_CLAIM_NULL; | 273 | p->o_arg.claim = NFS4_OPEN_CLAIM_NULL; |
| 261 | p->o_res.f_attr = &p->f_attr; | ||
| 262 | p->o_res.dir_attr = &p->dir_attr; | ||
| 263 | p->o_res.server = server; | ||
| 264 | nfs_fattr_init(&p->f_attr); | ||
| 265 | nfs_fattr_init(&p->dir_attr); | ||
| 266 | if (flags & O_EXCL) { | 274 | if (flags & O_EXCL) { |
| 267 | u32 *s = (u32 *) p->o_arg.u.verifier.data; | 275 | u32 *s = (u32 *) p->o_arg.u.verifier.data; |
| 268 | s[0] = jiffies; | 276 | s[0] = jiffies; |
| @@ -274,6 +282,8 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry, | |||
| 274 | p->c_arg.fh = &p->o_res.fh; | 282 | p->c_arg.fh = &p->o_res.fh; |
| 275 | p->c_arg.stateid = &p->o_res.stateid; | 283 | p->c_arg.stateid = &p->o_res.stateid; |
| 276 | p->c_arg.seqid = p->o_arg.seqid; | 284 | p->c_arg.seqid = p->o_arg.seqid; |
| 285 | nfs4_init_opendata_res(p); | ||
| 286 | kref_init(&p->kref); | ||
| 277 | return p; | 287 | return p; |
| 278 | err_free: | 288 | err_free: |
| 279 | kfree(p); | 289 | kfree(p); |
| @@ -282,27 +292,25 @@ err: | |||
| 282 | return NULL; | 292 | return NULL; |
| 283 | } | 293 | } |
| 284 | 294 | ||
| 285 | static void nfs4_opendata_free(struct nfs4_opendata *p) | 295 | static void nfs4_opendata_free(struct kref *kref) |
| 286 | { | 296 | { |
| 287 | if (p != NULL && atomic_dec_and_test(&p->count)) { | 297 | struct nfs4_opendata *p = container_of(kref, |
| 288 | nfs_free_seqid(p->o_arg.seqid); | 298 | struct nfs4_opendata, kref); |
| 289 | nfs4_put_state_owner(p->owner); | 299 | |
| 290 | dput(p->dir); | 300 | nfs_free_seqid(p->o_arg.seqid); |
| 291 | dput(p->dentry); | 301 | if (p->state != NULL) |
| 292 | kfree(p); | 302 | nfs4_put_open_state(p->state); |
| 293 | } | 303 | nfs4_put_state_owner(p->owner); |
| 304 | dput(p->dir); | ||
| 305 | dput(p->path.dentry); | ||
| 306 | mntput(p->path.mnt); | ||
| 307 | kfree(p); | ||
| 294 | } | 308 | } |
| 295 | 309 | ||
| 296 | /* Helper for asynchronous RPC calls */ | 310 | static void nfs4_opendata_put(struct nfs4_opendata *p) |
| 297 | static int nfs4_call_async(struct rpc_clnt *clnt, | ||
| 298 | const struct rpc_call_ops *tk_ops, void *calldata) | ||
| 299 | { | 311 | { |
| 300 | struct rpc_task *task; | 312 | if (p != NULL) |
| 301 | 313 | kref_put(&p->kref, nfs4_opendata_free); | |
| 302 | if (!(task = rpc_new_task(clnt, RPC_TASK_ASYNC, tk_ops, calldata))) | ||
| 303 | return -ENOMEM; | ||
| 304 | rpc_execute(task); | ||
| 305 | return 0; | ||
| 306 | } | 314 | } |
| 307 | 315 | ||
| 308 | static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task) | 316 | static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task) |
| @@ -316,7 +324,34 @@ static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task) | |||
| 316 | return ret; | 324 | return ret; |
| 317 | } | 325 | } |
| 318 | 326 | ||
| 319 | static inline void update_open_stateflags(struct nfs4_state *state, mode_t open_flags) | 327 | static int can_open_cached(struct nfs4_state *state, int mode) |
| 328 | { | ||
| 329 | int ret = 0; | ||
| 330 | switch (mode & (FMODE_READ|FMODE_WRITE|O_EXCL)) { | ||
| 331 | case FMODE_READ: | ||
| 332 | ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0; | ||
| 333 | ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0; | ||
| 334 | break; | ||
| 335 | case FMODE_WRITE: | ||
| 336 | ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0; | ||
| 337 | ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0; | ||
| 338 | break; | ||
| 339 | case FMODE_READ|FMODE_WRITE: | ||
| 340 | ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0; | ||
| 341 | } | ||
| 342 | return ret; | ||
| 343 | } | ||
| 344 | |||
| 345 | static int can_open_delegated(struct nfs_delegation *delegation, mode_t open_flags) | ||
| 346 | { | ||
| 347 | if ((delegation->type & open_flags) != open_flags) | ||
| 348 | return 0; | ||
| 349 | if (delegation->flags & NFS_DELEGATION_NEED_RECLAIM) | ||
| 350 | return 0; | ||
| 351 | return 1; | ||
| 352 | } | ||
| 353 | |||
| 354 | static void update_open_stateflags(struct nfs4_state *state, mode_t open_flags) | ||
| 320 | { | 355 | { |
| 321 | switch (open_flags) { | 356 | switch (open_flags) { |
| 322 | case FMODE_WRITE: | 357 | case FMODE_WRITE: |
| @@ -328,41 +363,176 @@ static inline void update_open_stateflags(struct nfs4_state *state, mode_t open_ | |||
| 328 | case FMODE_READ|FMODE_WRITE: | 363 | case FMODE_READ|FMODE_WRITE: |
| 329 | state->n_rdwr++; | 364 | state->n_rdwr++; |
| 330 | } | 365 | } |
| 366 | nfs4_state_set_mode_locked(state, state->state | open_flags); | ||
| 331 | } | 367 | } |
| 332 | 368 | ||
| 333 | static void update_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid, int open_flags) | 369 | static void nfs_set_open_stateid_locked(struct nfs4_state *state, nfs4_stateid *stateid, int open_flags) |
| 334 | { | 370 | { |
| 335 | struct inode *inode = state->inode; | 371 | if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) |
| 372 | memcpy(state->stateid.data, stateid->data, sizeof(state->stateid.data)); | ||
| 373 | memcpy(state->open_stateid.data, stateid->data, sizeof(state->open_stateid.data)); | ||
| 374 | switch (open_flags) { | ||
| 375 | case FMODE_READ: | ||
| 376 | set_bit(NFS_O_RDONLY_STATE, &state->flags); | ||
| 377 | break; | ||
| 378 | case FMODE_WRITE: | ||
| 379 | set_bit(NFS_O_WRONLY_STATE, &state->flags); | ||
| 380 | break; | ||
| 381 | case FMODE_READ|FMODE_WRITE: | ||
| 382 | set_bit(NFS_O_RDWR_STATE, &state->flags); | ||
| 383 | } | ||
| 384 | } | ||
| 385 | |||
| 386 | static void nfs_set_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid, int open_flags) | ||
| 387 | { | ||
| 388 | write_seqlock(&state->seqlock); | ||
| 389 | nfs_set_open_stateid_locked(state, stateid, open_flags); | ||
| 390 | write_sequnlock(&state->seqlock); | ||
| 391 | } | ||
| 336 | 392 | ||
| 393 | static void update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, nfs4_stateid *deleg_stateid, int open_flags) | ||
| 394 | { | ||
| 337 | open_flags &= (FMODE_READ|FMODE_WRITE); | 395 | open_flags &= (FMODE_READ|FMODE_WRITE); |
| 338 | /* Protect against nfs4_find_state_byowner() */ | 396 | /* |
| 397 | * Protect the call to nfs4_state_set_mode_locked and | ||
| 398 | * serialise the stateid update | ||
| 399 | */ | ||
| 400 | write_seqlock(&state->seqlock); | ||
| 401 | if (deleg_stateid != NULL) { | ||
| 402 | memcpy(state->stateid.data, deleg_stateid->data, sizeof(state->stateid.data)); | ||
| 403 | set_bit(NFS_DELEGATED_STATE, &state->flags); | ||
| 404 | } | ||
| 405 | if (open_stateid != NULL) | ||
| 406 | nfs_set_open_stateid_locked(state, open_stateid, open_flags); | ||
| 407 | write_sequnlock(&state->seqlock); | ||
| 339 | spin_lock(&state->owner->so_lock); | 408 | spin_lock(&state->owner->so_lock); |
| 340 | spin_lock(&inode->i_lock); | ||
| 341 | memcpy(&state->stateid, stateid, sizeof(state->stateid)); | ||
| 342 | update_open_stateflags(state, open_flags); | 409 | update_open_stateflags(state, open_flags); |
| 343 | nfs4_state_set_mode_locked(state, state->state | open_flags); | ||
| 344 | spin_unlock(&inode->i_lock); | ||
| 345 | spin_unlock(&state->owner->so_lock); | 410 | spin_unlock(&state->owner->so_lock); |
| 346 | } | 411 | } |
| 347 | 412 | ||
| 413 | static void nfs4_return_incompatible_delegation(struct inode *inode, mode_t open_flags) | ||
| 414 | { | ||
| 415 | struct nfs_delegation *delegation; | ||
| 416 | |||
| 417 | rcu_read_lock(); | ||
| 418 | delegation = rcu_dereference(NFS_I(inode)->delegation); | ||
| 419 | if (delegation == NULL || (delegation->type & open_flags) == open_flags) { | ||
| 420 | rcu_read_unlock(); | ||
| 421 | return; | ||
| 422 | } | ||
| 423 | rcu_read_unlock(); | ||
| 424 | nfs_inode_return_delegation(inode); | ||
| 425 | } | ||
| 426 | |||
| 427 | static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata) | ||
| 428 | { | ||
| 429 | struct nfs4_state *state = opendata->state; | ||
| 430 | struct nfs_inode *nfsi = NFS_I(state->inode); | ||
| 431 | struct nfs_delegation *delegation; | ||
| 432 | int open_mode = opendata->o_arg.open_flags & (FMODE_READ|FMODE_WRITE|O_EXCL); | ||
| 433 | nfs4_stateid stateid; | ||
| 434 | int ret = -EAGAIN; | ||
| 435 | |||
| 436 | rcu_read_lock(); | ||
| 437 | delegation = rcu_dereference(nfsi->delegation); | ||
| 438 | for (;;) { | ||
| 439 | if (can_open_cached(state, open_mode)) { | ||
| 440 | spin_lock(&state->owner->so_lock); | ||
| 441 | if (can_open_cached(state, open_mode)) { | ||
| 442 | update_open_stateflags(state, open_mode); | ||
| 443 | spin_unlock(&state->owner->so_lock); | ||
| 444 | rcu_read_unlock(); | ||
| 445 | goto out_return_state; | ||
| 446 | } | ||
| 447 | spin_unlock(&state->owner->so_lock); | ||
| 448 | } | ||
| 449 | if (delegation == NULL) | ||
| 450 | break; | ||
| 451 | if (!can_open_delegated(delegation, open_mode)) | ||
| 452 | break; | ||
| 453 | /* Save the delegation */ | ||
| 454 | memcpy(stateid.data, delegation->stateid.data, sizeof(stateid.data)); | ||
| 455 | rcu_read_unlock(); | ||
| 456 | lock_kernel(); | ||
| 457 | ret = _nfs4_do_access(state->inode, state->owner->so_cred, open_mode); | ||
| 458 | unlock_kernel(); | ||
| 459 | if (ret != 0) | ||
| 460 | goto out; | ||
| 461 | ret = -EAGAIN; | ||
| 462 | rcu_read_lock(); | ||
| 463 | delegation = rcu_dereference(nfsi->delegation); | ||
| 464 | /* If no delegation, try a cached open */ | ||
| 465 | if (delegation == NULL) | ||
| 466 | continue; | ||
| 467 | /* Is the delegation still valid? */ | ||
| 468 | if (memcmp(stateid.data, delegation->stateid.data, sizeof(stateid.data)) != 0) | ||
| 469 | continue; | ||
| 470 | rcu_read_unlock(); | ||
| 471 | update_open_stateid(state, NULL, &stateid, open_mode); | ||
| 472 | goto out_return_state; | ||
| 473 | } | ||
| 474 | rcu_read_unlock(); | ||
| 475 | out: | ||
| 476 | return ERR_PTR(ret); | ||
| 477 | out_return_state: | ||
| 478 | atomic_inc(&state->count); | ||
| 479 | return state; | ||
| 480 | } | ||
| 481 | |||
| 348 | static struct nfs4_state *nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data) | 482 | static struct nfs4_state *nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data) |
| 349 | { | 483 | { |
| 350 | struct inode *inode; | 484 | struct inode *inode; |
| 351 | struct nfs4_state *state = NULL; | 485 | struct nfs4_state *state = NULL; |
| 486 | struct nfs_delegation *delegation; | ||
| 487 | nfs4_stateid *deleg_stateid = NULL; | ||
| 488 | int ret; | ||
| 352 | 489 | ||
| 353 | if (!(data->f_attr.valid & NFS_ATTR_FATTR)) | 490 | if (!data->rpc_done) { |
| 491 | state = nfs4_try_open_cached(data); | ||
| 354 | goto out; | 492 | goto out; |
| 493 | } | ||
| 494 | |||
| 495 | ret = -EAGAIN; | ||
| 496 | if (!(data->f_attr.valid & NFS_ATTR_FATTR)) | ||
| 497 | goto err; | ||
| 355 | inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh, &data->f_attr); | 498 | inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh, &data->f_attr); |
| 499 | ret = PTR_ERR(inode); | ||
| 356 | if (IS_ERR(inode)) | 500 | if (IS_ERR(inode)) |
| 357 | goto out; | 501 | goto err; |
| 502 | ret = -ENOMEM; | ||
| 358 | state = nfs4_get_open_state(inode, data->owner); | 503 | state = nfs4_get_open_state(inode, data->owner); |
| 359 | if (state == NULL) | 504 | if (state == NULL) |
| 360 | goto put_inode; | 505 | goto err_put_inode; |
| 361 | update_open_stateid(state, &data->o_res.stateid, data->o_arg.open_flags); | 506 | if (data->o_res.delegation_type != 0) { |
| 362 | put_inode: | 507 | int delegation_flags = 0; |
| 508 | |||
| 509 | rcu_read_lock(); | ||
| 510 | delegation = rcu_dereference(NFS_I(inode)->delegation); | ||
| 511 | if (delegation) | ||
| 512 | delegation_flags = delegation->flags; | ||
| 513 | rcu_read_unlock(); | ||
| 514 | if (!(delegation_flags & NFS_DELEGATION_NEED_RECLAIM)) | ||
| 515 | nfs_inode_set_delegation(state->inode, | ||
| 516 | data->owner->so_cred, | ||
| 517 | &data->o_res); | ||
| 518 | else | ||
| 519 | nfs_inode_reclaim_delegation(state->inode, | ||
| 520 | data->owner->so_cred, | ||
| 521 | &data->o_res); | ||
| 522 | } | ||
| 523 | rcu_read_lock(); | ||
| 524 | delegation = rcu_dereference(NFS_I(inode)->delegation); | ||
| 525 | if (delegation != NULL) | ||
| 526 | deleg_stateid = &delegation->stateid; | ||
| 527 | update_open_stateid(state, &data->o_res.stateid, deleg_stateid, data->o_arg.open_flags); | ||
| 528 | rcu_read_unlock(); | ||
| 363 | iput(inode); | 529 | iput(inode); |
| 364 | out: | 530 | out: |
| 365 | return state; | 531 | return state; |
| 532 | err_put_inode: | ||
| 533 | iput(inode); | ||
| 534 | err: | ||
| 535 | return ERR_PTR(ret); | ||
| 366 | } | 536 | } |
| 367 | 537 | ||
| 368 | static struct nfs_open_context *nfs4_state_find_open_context(struct nfs4_state *state) | 538 | static struct nfs_open_context *nfs4_state_find_open_context(struct nfs4_state *state) |
| @@ -382,79 +552,66 @@ static struct nfs_open_context *nfs4_state_find_open_context(struct nfs4_state * | |||
| 382 | return ERR_PTR(-ENOENT); | 552 | return ERR_PTR(-ENOENT); |
| 383 | } | 553 | } |
| 384 | 554 | ||
| 385 | static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, mode_t openflags, nfs4_stateid *stateid) | 555 | static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, mode_t openflags, struct nfs4_state **res) |
| 386 | { | 556 | { |
| 557 | struct nfs4_state *newstate; | ||
| 387 | int ret; | 558 | int ret; |
| 388 | 559 | ||
| 389 | opendata->o_arg.open_flags = openflags; | 560 | opendata->o_arg.open_flags = openflags; |
| 561 | memset(&opendata->o_res, 0, sizeof(opendata->o_res)); | ||
| 562 | memset(&opendata->c_res, 0, sizeof(opendata->c_res)); | ||
| 563 | nfs4_init_opendata_res(opendata); | ||
| 390 | ret = _nfs4_proc_open(opendata); | 564 | ret = _nfs4_proc_open(opendata); |
| 391 | if (ret != 0) | 565 | if (ret != 0) |
| 392 | return ret; | 566 | return ret; |
| 393 | memcpy(stateid->data, opendata->o_res.stateid.data, | 567 | newstate = nfs4_opendata_to_nfs4_state(opendata); |
| 394 | sizeof(stateid->data)); | 568 | if (IS_ERR(newstate)) |
| 569 | return PTR_ERR(newstate); | ||
| 570 | nfs4_close_state(&opendata->path, newstate, openflags); | ||
| 571 | *res = newstate; | ||
| 395 | return 0; | 572 | return 0; |
| 396 | } | 573 | } |
| 397 | 574 | ||
| 398 | static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state) | 575 | static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state) |
| 399 | { | 576 | { |
| 400 | nfs4_stateid stateid; | ||
| 401 | struct nfs4_state *newstate; | 577 | struct nfs4_state *newstate; |
| 402 | int mode = 0; | ||
| 403 | int delegation = 0; | ||
| 404 | int ret; | 578 | int ret; |
| 405 | 579 | ||
| 406 | /* memory barrier prior to reading state->n_* */ | 580 | /* memory barrier prior to reading state->n_* */ |
| 581 | clear_bit(NFS_DELEGATED_STATE, &state->flags); | ||
| 407 | smp_rmb(); | 582 | smp_rmb(); |
| 408 | if (state->n_rdwr != 0) { | 583 | if (state->n_rdwr != 0) { |
| 409 | ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE, &stateid); | 584 | ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE, &newstate); |
| 410 | if (ret != 0) | 585 | if (ret != 0) |
| 411 | return ret; | 586 | return ret; |
| 412 | mode |= FMODE_READ|FMODE_WRITE; | 587 | if (newstate != state) |
| 413 | if (opendata->o_res.delegation_type != 0) | 588 | return -ESTALE; |
| 414 | delegation = opendata->o_res.delegation_type; | ||
| 415 | smp_rmb(); | ||
| 416 | } | 589 | } |
| 417 | if (state->n_wronly != 0) { | 590 | if (state->n_wronly != 0) { |
| 418 | ret = nfs4_open_recover_helper(opendata, FMODE_WRITE, &stateid); | 591 | ret = nfs4_open_recover_helper(opendata, FMODE_WRITE, &newstate); |
| 419 | if (ret != 0) | 592 | if (ret != 0) |
| 420 | return ret; | 593 | return ret; |
| 421 | mode |= FMODE_WRITE; | 594 | if (newstate != state) |
| 422 | if (opendata->o_res.delegation_type != 0) | 595 | return -ESTALE; |
| 423 | delegation = opendata->o_res.delegation_type; | ||
| 424 | smp_rmb(); | ||
| 425 | } | 596 | } |
| 426 | if (state->n_rdonly != 0) { | 597 | if (state->n_rdonly != 0) { |
| 427 | ret = nfs4_open_recover_helper(opendata, FMODE_READ, &stateid); | 598 | ret = nfs4_open_recover_helper(opendata, FMODE_READ, &newstate); |
| 428 | if (ret != 0) | 599 | if (ret != 0) |
| 429 | return ret; | 600 | return ret; |
| 430 | mode |= FMODE_READ; | 601 | if (newstate != state) |
| 602 | return -ESTALE; | ||
| 431 | } | 603 | } |
| 432 | clear_bit(NFS_DELEGATED_STATE, &state->flags); | 604 | /* |
| 433 | if (mode == 0) | 605 | * We may have performed cached opens for all three recoveries. |
| 434 | return 0; | 606 | * Check if we need to update the current stateid. |
| 435 | if (opendata->o_res.delegation_type == 0) | 607 | */ |
| 436 | opendata->o_res.delegation_type = delegation; | 608 | if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 && |
| 437 | opendata->o_arg.open_flags |= mode; | 609 | memcmp(state->stateid.data, state->open_stateid.data, sizeof(state->stateid.data)) != 0) { |
| 438 | newstate = nfs4_opendata_to_nfs4_state(opendata); | 610 | write_seqlock(&state->seqlock); |
| 439 | if (newstate != NULL) { | 611 | if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) |
| 440 | if (opendata->o_res.delegation_type != 0) { | 612 | memcpy(state->stateid.data, state->open_stateid.data, sizeof(state->stateid.data)); |
| 441 | struct nfs_inode *nfsi = NFS_I(newstate->inode); | 613 | write_sequnlock(&state->seqlock); |
| 442 | int delegation_flags = 0; | ||
| 443 | if (nfsi->delegation) | ||
| 444 | delegation_flags = nfsi->delegation->flags; | ||
| 445 | if (!(delegation_flags & NFS_DELEGATION_NEED_RECLAIM)) | ||
| 446 | nfs_inode_set_delegation(newstate->inode, | ||
| 447 | opendata->owner->so_cred, | ||
| 448 | &opendata->o_res); | ||
| 449 | else | ||
| 450 | nfs_inode_reclaim_delegation(newstate->inode, | ||
| 451 | opendata->owner->so_cred, | ||
| 452 | &opendata->o_res); | ||
| 453 | } | ||
| 454 | nfs4_close_state(newstate, opendata->o_arg.open_flags); | ||
| 455 | } | 614 | } |
| 456 | if (newstate != state) | ||
| 457 | return -ESTALE; | ||
| 458 | return 0; | 615 | return 0; |
| 459 | } | 616 | } |
| 460 | 617 | ||
| @@ -462,41 +619,37 @@ static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state * | |||
| 462 | * OPEN_RECLAIM: | 619 | * OPEN_RECLAIM: |
| 463 | * reclaim state on the server after a reboot. | 620 | * reclaim state on the server after a reboot. |
| 464 | */ | 621 | */ |
| 465 | static int _nfs4_do_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state, struct dentry *dentry) | 622 | static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state) |
| 466 | { | 623 | { |
| 467 | struct nfs_delegation *delegation = NFS_I(state->inode)->delegation; | 624 | struct nfs_delegation *delegation; |
| 468 | struct nfs4_opendata *opendata; | 625 | struct nfs4_opendata *opendata; |
| 469 | int delegation_type = 0; | 626 | int delegation_type = 0; |
| 470 | int status; | 627 | int status; |
| 471 | 628 | ||
| 472 | if (delegation != NULL) { | 629 | opendata = nfs4_opendata_alloc(&ctx->path, state->owner, 0, NULL); |
| 473 | if (!(delegation->flags & NFS_DELEGATION_NEED_RECLAIM)) { | ||
| 474 | memcpy(&state->stateid, &delegation->stateid, | ||
| 475 | sizeof(state->stateid)); | ||
| 476 | set_bit(NFS_DELEGATED_STATE, &state->flags); | ||
| 477 | return 0; | ||
| 478 | } | ||
| 479 | delegation_type = delegation->type; | ||
| 480 | } | ||
| 481 | opendata = nfs4_opendata_alloc(dentry, sp, 0, NULL); | ||
| 482 | if (opendata == NULL) | 630 | if (opendata == NULL) |
| 483 | return -ENOMEM; | 631 | return -ENOMEM; |
| 484 | opendata->o_arg.claim = NFS4_OPEN_CLAIM_PREVIOUS; | 632 | opendata->o_arg.claim = NFS4_OPEN_CLAIM_PREVIOUS; |
| 485 | opendata->o_arg.fh = NFS_FH(state->inode); | 633 | opendata->o_arg.fh = NFS_FH(state->inode); |
| 486 | nfs_copy_fh(&opendata->o_res.fh, opendata->o_arg.fh); | 634 | nfs_copy_fh(&opendata->o_res.fh, opendata->o_arg.fh); |
| 635 | rcu_read_lock(); | ||
| 636 | delegation = rcu_dereference(NFS_I(state->inode)->delegation); | ||
| 637 | if (delegation != NULL && (delegation->flags & NFS_DELEGATION_NEED_RECLAIM) != 0) | ||
| 638 | delegation_type = delegation->flags; | ||
| 639 | rcu_read_unlock(); | ||
| 487 | opendata->o_arg.u.delegation_type = delegation_type; | 640 | opendata->o_arg.u.delegation_type = delegation_type; |
| 488 | status = nfs4_open_recover(opendata, state); | 641 | status = nfs4_open_recover(opendata, state); |
| 489 | nfs4_opendata_free(opendata); | 642 | nfs4_opendata_put(opendata); |
| 490 | return status; | 643 | return status; |
| 491 | } | 644 | } |
| 492 | 645 | ||
| 493 | static int nfs4_do_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state, struct dentry *dentry) | 646 | static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state) |
| 494 | { | 647 | { |
| 495 | struct nfs_server *server = NFS_SERVER(state->inode); | 648 | struct nfs_server *server = NFS_SERVER(state->inode); |
| 496 | struct nfs4_exception exception = { }; | 649 | struct nfs4_exception exception = { }; |
| 497 | int err; | 650 | int err; |
| 498 | do { | 651 | do { |
| 499 | err = _nfs4_do_open_reclaim(sp, state, dentry); | 652 | err = _nfs4_do_open_reclaim(ctx, state); |
| 500 | if (err != -NFS4ERR_DELAY) | 653 | if (err != -NFS4ERR_DELAY) |
| 501 | break; | 654 | break; |
| 502 | nfs4_handle_exception(server, err, &exception); | 655 | nfs4_handle_exception(server, err, &exception); |
| @@ -512,37 +665,35 @@ static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *sta | |||
| 512 | ctx = nfs4_state_find_open_context(state); | 665 | ctx = nfs4_state_find_open_context(state); |
| 513 | if (IS_ERR(ctx)) | 666 | if (IS_ERR(ctx)) |
| 514 | return PTR_ERR(ctx); | 667 | return PTR_ERR(ctx); |
| 515 | ret = nfs4_do_open_reclaim(sp, state, ctx->dentry); | 668 | ret = nfs4_do_open_reclaim(ctx, state); |
| 516 | put_nfs_open_context(ctx); | 669 | put_nfs_open_context(ctx); |
| 517 | return ret; | 670 | return ret; |
| 518 | } | 671 | } |
| 519 | 672 | ||
| 520 | static int _nfs4_open_delegation_recall(struct dentry *dentry, struct nfs4_state *state) | 673 | static int _nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid) |
| 521 | { | 674 | { |
| 522 | struct nfs4_state_owner *sp = state->owner; | 675 | struct nfs4_state_owner *sp = state->owner; |
| 523 | struct nfs4_opendata *opendata; | 676 | struct nfs4_opendata *opendata; |
| 524 | int ret; | 677 | int ret; |
| 525 | 678 | ||
| 526 | if (!test_bit(NFS_DELEGATED_STATE, &state->flags)) | 679 | opendata = nfs4_opendata_alloc(&ctx->path, sp, 0, NULL); |
| 527 | return 0; | ||
| 528 | opendata = nfs4_opendata_alloc(dentry, sp, 0, NULL); | ||
| 529 | if (opendata == NULL) | 680 | if (opendata == NULL) |
| 530 | return -ENOMEM; | 681 | return -ENOMEM; |
| 531 | opendata->o_arg.claim = NFS4_OPEN_CLAIM_DELEGATE_CUR; | 682 | opendata->o_arg.claim = NFS4_OPEN_CLAIM_DELEGATE_CUR; |
| 532 | memcpy(opendata->o_arg.u.delegation.data, state->stateid.data, | 683 | memcpy(opendata->o_arg.u.delegation.data, stateid->data, |
| 533 | sizeof(opendata->o_arg.u.delegation.data)); | 684 | sizeof(opendata->o_arg.u.delegation.data)); |
| 534 | ret = nfs4_open_recover(opendata, state); | 685 | ret = nfs4_open_recover(opendata, state); |
| 535 | nfs4_opendata_free(opendata); | 686 | nfs4_opendata_put(opendata); |
| 536 | return ret; | 687 | return ret; |
| 537 | } | 688 | } |
| 538 | 689 | ||
| 539 | int nfs4_open_delegation_recall(struct dentry *dentry, struct nfs4_state *state) | 690 | int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid) |
| 540 | { | 691 | { |
| 541 | struct nfs4_exception exception = { }; | 692 | struct nfs4_exception exception = { }; |
| 542 | struct nfs_server *server = NFS_SERVER(dentry->d_inode); | 693 | struct nfs_server *server = NFS_SERVER(state->inode); |
| 543 | int err; | 694 | int err; |
| 544 | do { | 695 | do { |
| 545 | err = _nfs4_open_delegation_recall(dentry, state); | 696 | err = _nfs4_open_delegation_recall(ctx, state, stateid); |
| 546 | switch (err) { | 697 | switch (err) { |
| 547 | case 0: | 698 | case 0: |
| 548 | return err; | 699 | return err; |
| @@ -582,9 +733,10 @@ static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata) | |||
| 582 | memcpy(data->o_res.stateid.data, data->c_res.stateid.data, | 733 | memcpy(data->o_res.stateid.data, data->c_res.stateid.data, |
| 583 | sizeof(data->o_res.stateid.data)); | 734 | sizeof(data->o_res.stateid.data)); |
| 584 | renew_lease(data->o_res.server, data->timestamp); | 735 | renew_lease(data->o_res.server, data->timestamp); |
| 736 | data->rpc_done = 1; | ||
| 585 | } | 737 | } |
| 586 | nfs_increment_open_seqid(data->rpc_status, data->c_arg.seqid); | ||
| 587 | nfs_confirm_seqid(&data->owner->so_seqid, data->rpc_status); | 738 | nfs_confirm_seqid(&data->owner->so_seqid, data->rpc_status); |
| 739 | nfs_increment_open_seqid(data->rpc_status, data->c_arg.seqid); | ||
| 588 | } | 740 | } |
| 589 | 741 | ||
| 590 | static void nfs4_open_confirm_release(void *calldata) | 742 | static void nfs4_open_confirm_release(void *calldata) |
| @@ -596,14 +748,14 @@ static void nfs4_open_confirm_release(void *calldata) | |||
| 596 | if (data->cancelled == 0) | 748 | if (data->cancelled == 0) |
| 597 | goto out_free; | 749 | goto out_free; |
| 598 | /* In case of error, no cleanup! */ | 750 | /* In case of error, no cleanup! */ |
| 599 | if (data->rpc_status != 0) | 751 | if (!data->rpc_done) |
| 600 | goto out_free; | 752 | goto out_free; |
| 601 | nfs_confirm_seqid(&data->owner->so_seqid, 0); | 753 | nfs_confirm_seqid(&data->owner->so_seqid, 0); |
| 602 | state = nfs4_opendata_to_nfs4_state(data); | 754 | state = nfs4_opendata_to_nfs4_state(data); |
| 603 | if (state != NULL) | 755 | if (!IS_ERR(state)) |
| 604 | nfs4_close_state(state, data->o_arg.open_flags); | 756 | nfs4_close_state(&data->path, state, data->o_arg.open_flags); |
| 605 | out_free: | 757 | out_free: |
| 606 | nfs4_opendata_free(data); | 758 | nfs4_opendata_put(data); |
| 607 | } | 759 | } |
| 608 | 760 | ||
| 609 | static const struct rpc_call_ops nfs4_open_confirm_ops = { | 761 | static const struct rpc_call_ops nfs4_open_confirm_ops = { |
| @@ -621,12 +773,9 @@ static int _nfs4_proc_open_confirm(struct nfs4_opendata *data) | |||
| 621 | struct rpc_task *task; | 773 | struct rpc_task *task; |
| 622 | int status; | 774 | int status; |
| 623 | 775 | ||
| 624 | atomic_inc(&data->count); | 776 | kref_get(&data->kref); |
| 625 | /* | 777 | data->rpc_done = 0; |
| 626 | * If rpc_run_task() ends up calling ->rpc_release(), we | 778 | data->rpc_status = 0; |
| 627 | * want to ensure that it takes the 'error' code path. | ||
| 628 | */ | ||
| 629 | data->rpc_status = -ENOMEM; | ||
| 630 | task = rpc_run_task(server->client, RPC_TASK_ASYNC, &nfs4_open_confirm_ops, data); | 779 | task = rpc_run_task(server->client, RPC_TASK_ASYNC, &nfs4_open_confirm_ops, data); |
| 631 | if (IS_ERR(task)) | 780 | if (IS_ERR(task)) |
| 632 | return PTR_ERR(task); | 781 | return PTR_ERR(task); |
| @@ -653,13 +802,35 @@ static void nfs4_open_prepare(struct rpc_task *task, void *calldata) | |||
| 653 | 802 | ||
| 654 | if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0) | 803 | if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0) |
| 655 | return; | 804 | return; |
| 805 | /* | ||
| 806 | * Check if we still need to send an OPEN call, or if we can use | ||
| 807 | * a delegation instead. | ||
| 808 | */ | ||
| 809 | if (data->state != NULL) { | ||
| 810 | struct nfs_delegation *delegation; | ||
| 811 | |||
| 812 | if (can_open_cached(data->state, data->o_arg.open_flags & (FMODE_READ|FMODE_WRITE|O_EXCL))) | ||
| 813 | goto out_no_action; | ||
| 814 | rcu_read_lock(); | ||
| 815 | delegation = rcu_dereference(NFS_I(data->state->inode)->delegation); | ||
| 816 | if (delegation != NULL && | ||
| 817 | (delegation->flags & NFS_DELEGATION_NEED_RECLAIM) == 0) { | ||
| 818 | rcu_read_unlock(); | ||
| 819 | goto out_no_action; | ||
| 820 | } | ||
| 821 | rcu_read_unlock(); | ||
| 822 | } | ||
| 656 | /* Update sequence id. */ | 823 | /* Update sequence id. */ |
| 657 | data->o_arg.id = sp->so_id; | 824 | data->o_arg.id = sp->so_owner_id.id; |
| 658 | data->o_arg.clientid = sp->so_client->cl_clientid; | 825 | data->o_arg.clientid = sp->so_client->cl_clientid; |
| 659 | if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS) | 826 | if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS) |
| 660 | msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR]; | 827 | msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR]; |
| 661 | data->timestamp = jiffies; | 828 | data->timestamp = jiffies; |
| 662 | rpc_call_setup(task, &msg, 0); | 829 | rpc_call_setup(task, &msg, 0); |
| 830 | return; | ||
| 831 | out_no_action: | ||
| 832 | task->tk_action = NULL; | ||
| 833 | |||
| 663 | } | 834 | } |
| 664 | 835 | ||
| 665 | static void nfs4_open_done(struct rpc_task *task, void *calldata) | 836 | static void nfs4_open_done(struct rpc_task *task, void *calldata) |
| @@ -683,8 +854,11 @@ static void nfs4_open_done(struct rpc_task *task, void *calldata) | |||
| 683 | data->rpc_status = -ENOTDIR; | 854 | data->rpc_status = -ENOTDIR; |
| 684 | } | 855 | } |
| 685 | renew_lease(data->o_res.server, data->timestamp); | 856 | renew_lease(data->o_res.server, data->timestamp); |
| 857 | if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)) | ||
| 858 | nfs_confirm_seqid(&data->owner->so_seqid, 0); | ||
| 686 | } | 859 | } |
| 687 | nfs_increment_open_seqid(data->rpc_status, data->o_arg.seqid); | 860 | nfs_increment_open_seqid(data->rpc_status, data->o_arg.seqid); |
| 861 | data->rpc_done = 1; | ||
| 688 | } | 862 | } |
| 689 | 863 | ||
| 690 | static void nfs4_open_release(void *calldata) | 864 | static void nfs4_open_release(void *calldata) |
| @@ -696,17 +870,17 @@ static void nfs4_open_release(void *calldata) | |||
| 696 | if (data->cancelled == 0) | 870 | if (data->cancelled == 0) |
| 697 | goto out_free; | 871 | goto out_free; |
| 698 | /* In case of error, no cleanup! */ | 872 | /* In case of error, no cleanup! */ |
| 699 | if (data->rpc_status != 0) | 873 | if (data->rpc_status != 0 || !data->rpc_done) |
| 700 | goto out_free; | 874 | goto out_free; |
| 701 | /* In case we need an open_confirm, no cleanup! */ | 875 | /* In case we need an open_confirm, no cleanup! */ |
| 702 | if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM) | 876 | if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM) |
| 703 | goto out_free; | 877 | goto out_free; |
| 704 | nfs_confirm_seqid(&data->owner->so_seqid, 0); | 878 | nfs_confirm_seqid(&data->owner->so_seqid, 0); |
| 705 | state = nfs4_opendata_to_nfs4_state(data); | 879 | state = nfs4_opendata_to_nfs4_state(data); |
| 706 | if (state != NULL) | 880 | if (!IS_ERR(state)) |
| 707 | nfs4_close_state(state, data->o_arg.open_flags); | 881 | nfs4_close_state(&data->path, state, data->o_arg.open_flags); |
| 708 | out_free: | 882 | out_free: |
| 709 | nfs4_opendata_free(data); | 883 | nfs4_opendata_put(data); |
| 710 | } | 884 | } |
| 711 | 885 | ||
| 712 | static const struct rpc_call_ops nfs4_open_ops = { | 886 | static const struct rpc_call_ops nfs4_open_ops = { |
| @@ -727,12 +901,10 @@ static int _nfs4_proc_open(struct nfs4_opendata *data) | |||
| 727 | struct rpc_task *task; | 901 | struct rpc_task *task; |
| 728 | int status; | 902 | int status; |
| 729 | 903 | ||
| 730 | atomic_inc(&data->count); | 904 | kref_get(&data->kref); |
| 731 | /* | 905 | data->rpc_done = 0; |
| 732 | * If rpc_run_task() ends up calling ->rpc_release(), we | 906 | data->rpc_status = 0; |
| 733 | * want to ensure that it takes the 'error' code path. | 907 | data->cancelled = 0; |
| 734 | */ | ||
| 735 | data->rpc_status = -ENOMEM; | ||
| 736 | task = rpc_run_task(server->client, RPC_TASK_ASYNC, &nfs4_open_ops, data); | 908 | task = rpc_run_task(server->client, RPC_TASK_ASYNC, &nfs4_open_ops, data); |
| 737 | if (IS_ERR(task)) | 909 | if (IS_ERR(task)) |
| 738 | return PTR_ERR(task); | 910 | return PTR_ERR(task); |
| @@ -743,7 +915,7 @@ static int _nfs4_proc_open(struct nfs4_opendata *data) | |||
| 743 | } else | 915 | } else |
| 744 | status = data->rpc_status; | 916 | status = data->rpc_status; |
| 745 | rpc_put_task(task); | 917 | rpc_put_task(task); |
| 746 | if (status != 0) | 918 | if (status != 0 || !data->rpc_done) |
| 747 | return status; | 919 | return status; |
| 748 | 920 | ||
| 749 | if (o_arg->open_flags & O_CREAT) { | 921 | if (o_arg->open_flags & O_CREAT) { |
| @@ -756,7 +928,6 @@ static int _nfs4_proc_open(struct nfs4_opendata *data) | |||
| 756 | if (status != 0) | 928 | if (status != 0) |
| 757 | return status; | 929 | return status; |
| 758 | } | 930 | } |
| 759 | nfs_confirm_seqid(&data->owner->so_seqid, 0); | ||
| 760 | if (!(o_res->f_attr->valid & NFS_ATTR_FATTR)) | 931 | if (!(o_res->f_attr->valid & NFS_ATTR_FATTR)) |
| 761 | return server->nfs_client->rpc_ops->getattr(server, &o_res->fh, o_res->f_attr); | 932 | return server->nfs_client->rpc_ops->getattr(server, &o_res->fh, o_res->f_attr); |
| 762 | return 0; | 933 | return 0; |
| @@ -772,6 +943,8 @@ static int _nfs4_do_access(struct inode *inode, struct rpc_cred *cred, int openf | |||
| 772 | mask |= MAY_READ; | 943 | mask |= MAY_READ; |
| 773 | if (openflags & FMODE_WRITE) | 944 | if (openflags & FMODE_WRITE) |
| 774 | mask |= MAY_WRITE; | 945 | mask |= MAY_WRITE; |
| 946 | if (openflags & FMODE_EXEC) | ||
| 947 | mask |= MAY_EXEC; | ||
| 775 | status = nfs_access_get_cached(inode, cred, &cache); | 948 | status = nfs_access_get_cached(inode, cred, &cache); |
| 776 | if (status == 0) | 949 | if (status == 0) |
| 777 | goto out; | 950 | goto out; |
| @@ -811,43 +984,32 @@ static int nfs4_recover_expired_lease(struct nfs_server *server) | |||
| 811 | * reclaim state on the server after a network partition. | 984 | * reclaim state on the server after a network partition. |
| 812 | * Assumes caller holds the appropriate lock | 985 | * Assumes caller holds the appropriate lock |
| 813 | */ | 986 | */ |
| 814 | static int _nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state, struct dentry *dentry) | 987 | static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state) |
| 815 | { | 988 | { |
| 816 | struct inode *inode = state->inode; | ||
| 817 | struct nfs_delegation *delegation = NFS_I(inode)->delegation; | ||
| 818 | struct nfs4_opendata *opendata; | 989 | struct nfs4_opendata *opendata; |
| 819 | int openflags = state->state & (FMODE_READ|FMODE_WRITE); | ||
| 820 | int ret; | 990 | int ret; |
| 821 | 991 | ||
| 822 | if (delegation != NULL && !(delegation->flags & NFS_DELEGATION_NEED_RECLAIM)) { | 992 | opendata = nfs4_opendata_alloc(&ctx->path, state->owner, 0, NULL); |
| 823 | ret = _nfs4_do_access(inode, sp->so_cred, openflags); | ||
| 824 | if (ret < 0) | ||
| 825 | return ret; | ||
| 826 | memcpy(&state->stateid, &delegation->stateid, sizeof(state->stateid)); | ||
| 827 | set_bit(NFS_DELEGATED_STATE, &state->flags); | ||
| 828 | return 0; | ||
| 829 | } | ||
| 830 | opendata = nfs4_opendata_alloc(dentry, sp, openflags, NULL); | ||
| 831 | if (opendata == NULL) | 993 | if (opendata == NULL) |
| 832 | return -ENOMEM; | 994 | return -ENOMEM; |
| 833 | ret = nfs4_open_recover(opendata, state); | 995 | ret = nfs4_open_recover(opendata, state); |
| 834 | if (ret == -ESTALE) { | 996 | if (ret == -ESTALE) { |
| 835 | /* Invalidate the state owner so we don't ever use it again */ | 997 | /* Invalidate the state owner so we don't ever use it again */ |
| 836 | nfs4_drop_state_owner(sp); | 998 | nfs4_drop_state_owner(state->owner); |
| 837 | d_drop(dentry); | 999 | d_drop(ctx->path.dentry); |
| 838 | } | 1000 | } |
| 839 | nfs4_opendata_free(opendata); | 1001 | nfs4_opendata_put(opendata); |
| 840 | return ret; | 1002 | return ret; |
| 841 | } | 1003 | } |
| 842 | 1004 | ||
| 843 | static inline int nfs4_do_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state, struct dentry *dentry) | 1005 | static inline int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state) |
| 844 | { | 1006 | { |
| 845 | struct nfs_server *server = NFS_SERVER(dentry->d_inode); | 1007 | struct nfs_server *server = NFS_SERVER(state->inode); |
| 846 | struct nfs4_exception exception = { }; | 1008 | struct nfs4_exception exception = { }; |
| 847 | int err; | 1009 | int err; |
| 848 | 1010 | ||
| 849 | do { | 1011 | do { |
| 850 | err = _nfs4_open_expired(sp, state, dentry); | 1012 | err = _nfs4_open_expired(ctx, state); |
| 851 | if (err == -NFS4ERR_DELAY) | 1013 | if (err == -NFS4ERR_DELAY) |
| 852 | nfs4_handle_exception(server, err, &exception); | 1014 | nfs4_handle_exception(server, err, &exception); |
| 853 | } while (exception.retry); | 1015 | } while (exception.retry); |
| @@ -862,107 +1024,38 @@ static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *sta | |||
| 862 | ctx = nfs4_state_find_open_context(state); | 1024 | ctx = nfs4_state_find_open_context(state); |
| 863 | if (IS_ERR(ctx)) | 1025 | if (IS_ERR(ctx)) |
| 864 | return PTR_ERR(ctx); | 1026 | return PTR_ERR(ctx); |
| 865 | ret = nfs4_do_open_expired(sp, state, ctx->dentry); | 1027 | ret = nfs4_do_open_expired(ctx, state); |
| 866 | put_nfs_open_context(ctx); | 1028 | put_nfs_open_context(ctx); |
| 867 | return ret; | 1029 | return ret; |
| 868 | } | 1030 | } |
| 869 | 1031 | ||
| 870 | /* | 1032 | /* |
| 871 | * Returns a referenced nfs4_state if there is an open delegation on the file | 1033 | * on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-* |
| 1034 | * fields corresponding to attributes that were used to store the verifier. | ||
| 1035 | * Make sure we clobber those fields in the later setattr call | ||
| 872 | */ | 1036 | */ |
| 873 | static int _nfs4_open_delegated(struct inode *inode, int flags, struct rpc_cred *cred, struct nfs4_state **res) | 1037 | static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata, struct iattr *sattr) |
| 874 | { | ||
| 875 | struct nfs_delegation *delegation; | ||
| 876 | struct nfs_server *server = NFS_SERVER(inode); | ||
| 877 | struct nfs_client *clp = server->nfs_client; | ||
| 878 | struct nfs_inode *nfsi = NFS_I(inode); | ||
| 879 | struct nfs4_state_owner *sp = NULL; | ||
| 880 | struct nfs4_state *state = NULL; | ||
| 881 | int open_flags = flags & (FMODE_READ|FMODE_WRITE); | ||
| 882 | int err; | ||
| 883 | |||
| 884 | err = -ENOMEM; | ||
| 885 | if (!(sp = nfs4_get_state_owner(server, cred))) { | ||
| 886 | dprintk("%s: nfs4_get_state_owner failed!\n", __FUNCTION__); | ||
| 887 | return err; | ||
| 888 | } | ||
| 889 | err = nfs4_recover_expired_lease(server); | ||
| 890 | if (err != 0) | ||
| 891 | goto out_put_state_owner; | ||
| 892 | /* Protect against reboot recovery - NOTE ORDER! */ | ||
| 893 | down_read(&clp->cl_sem); | ||
| 894 | /* Protect against delegation recall */ | ||
| 895 | down_read(&nfsi->rwsem); | ||
| 896 | delegation = NFS_I(inode)->delegation; | ||
| 897 | err = -ENOENT; | ||
| 898 | if (delegation == NULL || (delegation->type & open_flags) != open_flags) | ||
| 899 | goto out_err; | ||
| 900 | err = -ENOMEM; | ||
| 901 | state = nfs4_get_open_state(inode, sp); | ||
| 902 | if (state == NULL) | ||
| 903 | goto out_err; | ||
| 904 | |||
| 905 | err = -ENOENT; | ||
| 906 | if ((state->state & open_flags) == open_flags) { | ||
| 907 | spin_lock(&inode->i_lock); | ||
| 908 | update_open_stateflags(state, open_flags); | ||
| 909 | spin_unlock(&inode->i_lock); | ||
| 910 | goto out_ok; | ||
| 911 | } else if (state->state != 0) | ||
| 912 | goto out_put_open_state; | ||
| 913 | |||
| 914 | lock_kernel(); | ||
| 915 | err = _nfs4_do_access(inode, cred, open_flags); | ||
| 916 | unlock_kernel(); | ||
| 917 | if (err != 0) | ||
| 918 | goto out_put_open_state; | ||
| 919 | set_bit(NFS_DELEGATED_STATE, &state->flags); | ||
| 920 | update_open_stateid(state, &delegation->stateid, open_flags); | ||
| 921 | out_ok: | ||
| 922 | nfs4_put_state_owner(sp); | ||
| 923 | up_read(&nfsi->rwsem); | ||
| 924 | up_read(&clp->cl_sem); | ||
| 925 | *res = state; | ||
| 926 | return 0; | ||
| 927 | out_put_open_state: | ||
| 928 | nfs4_put_open_state(state); | ||
| 929 | out_err: | ||
| 930 | up_read(&nfsi->rwsem); | ||
| 931 | up_read(&clp->cl_sem); | ||
| 932 | if (err != -EACCES) | ||
| 933 | nfs_inode_return_delegation(inode); | ||
| 934 | out_put_state_owner: | ||
| 935 | nfs4_put_state_owner(sp); | ||
| 936 | return err; | ||
| 937 | } | ||
| 938 | |||
| 939 | static struct nfs4_state *nfs4_open_delegated(struct inode *inode, int flags, struct rpc_cred *cred) | ||
| 940 | { | 1038 | { |
| 941 | struct nfs4_exception exception = { }; | 1039 | if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_ACCESS) && |
| 942 | struct nfs4_state *res = ERR_PTR(-EIO); | 1040 | !(sattr->ia_valid & ATTR_ATIME_SET)) |
| 943 | int err; | 1041 | sattr->ia_valid |= ATTR_ATIME; |
| 944 | 1042 | ||
| 945 | do { | 1043 | if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_MODIFY) && |
| 946 | err = _nfs4_open_delegated(inode, flags, cred, &res); | 1044 | !(sattr->ia_valid & ATTR_MTIME_SET)) |
| 947 | if (err == 0) | 1045 | sattr->ia_valid |= ATTR_MTIME; |
| 948 | break; | ||
| 949 | res = ERR_PTR(nfs4_handle_exception(NFS_SERVER(inode), | ||
| 950 | err, &exception)); | ||
| 951 | } while (exception.retry); | ||
| 952 | return res; | ||
| 953 | } | 1046 | } |
| 954 | 1047 | ||
| 955 | /* | 1048 | /* |
| 956 | * Returns a referenced nfs4_state | 1049 | * Returns a referenced nfs4_state |
| 957 | */ | 1050 | */ |
| 958 | static int _nfs4_do_open(struct inode *dir, struct dentry *dentry, int flags, struct iattr *sattr, struct rpc_cred *cred, struct nfs4_state **res) | 1051 | static int _nfs4_do_open(struct inode *dir, struct path *path, int flags, struct iattr *sattr, struct rpc_cred *cred, struct nfs4_state **res) |
| 959 | { | 1052 | { |
| 960 | struct nfs4_state_owner *sp; | 1053 | struct nfs4_state_owner *sp; |
| 961 | struct nfs4_state *state = NULL; | 1054 | struct nfs4_state *state = NULL; |
| 962 | struct nfs_server *server = NFS_SERVER(dir); | 1055 | struct nfs_server *server = NFS_SERVER(dir); |
| 963 | struct nfs_client *clp = server->nfs_client; | 1056 | struct nfs_client *clp = server->nfs_client; |
| 964 | struct nfs4_opendata *opendata; | 1057 | struct nfs4_opendata *opendata; |
| 965 | int status; | 1058 | int status; |
| 966 | 1059 | ||
| 967 | /* Protect against reboot recovery conflicts */ | 1060 | /* Protect against reboot recovery conflicts */ |
| 968 | status = -ENOMEM; | 1061 | status = -ENOMEM; |
| @@ -973,29 +1066,35 @@ static int _nfs4_do_open(struct inode *dir, struct dentry *dentry, int flags, st | |||
| 973 | status = nfs4_recover_expired_lease(server); | 1066 | status = nfs4_recover_expired_lease(server); |
| 974 | if (status != 0) | 1067 | if (status != 0) |
| 975 | goto err_put_state_owner; | 1068 | goto err_put_state_owner; |
| 1069 | if (path->dentry->d_inode != NULL) | ||
| 1070 | nfs4_return_incompatible_delegation(path->dentry->d_inode, flags & (FMODE_READ|FMODE_WRITE)); | ||
| 976 | down_read(&clp->cl_sem); | 1071 | down_read(&clp->cl_sem); |
| 977 | status = -ENOMEM; | 1072 | status = -ENOMEM; |
| 978 | opendata = nfs4_opendata_alloc(dentry, sp, flags, sattr); | 1073 | opendata = nfs4_opendata_alloc(path, sp, flags, sattr); |
| 979 | if (opendata == NULL) | 1074 | if (opendata == NULL) |
| 980 | goto err_release_rwsem; | 1075 | goto err_release_rwsem; |
| 981 | 1076 | ||
| 1077 | if (path->dentry->d_inode != NULL) | ||
| 1078 | opendata->state = nfs4_get_open_state(path->dentry->d_inode, sp); | ||
| 1079 | |||
| 982 | status = _nfs4_proc_open(opendata); | 1080 | status = _nfs4_proc_open(opendata); |
| 983 | if (status != 0) | 1081 | if (status != 0) |
| 984 | goto err_opendata_free; | 1082 | goto err_opendata_put; |
| 1083 | |||
| 1084 | if (opendata->o_arg.open_flags & O_EXCL) | ||
| 1085 | nfs4_exclusive_attrset(opendata, sattr); | ||
| 985 | 1086 | ||
| 986 | status = -ENOMEM; | ||
| 987 | state = nfs4_opendata_to_nfs4_state(opendata); | 1087 | state = nfs4_opendata_to_nfs4_state(opendata); |
| 988 | if (state == NULL) | 1088 | status = PTR_ERR(state); |
| 989 | goto err_opendata_free; | 1089 | if (IS_ERR(state)) |
| 990 | if (opendata->o_res.delegation_type != 0) | 1090 | goto err_opendata_put; |
| 991 | nfs_inode_set_delegation(state->inode, cred, &opendata->o_res); | 1091 | nfs4_opendata_put(opendata); |
| 992 | nfs4_opendata_free(opendata); | ||
| 993 | nfs4_put_state_owner(sp); | 1092 | nfs4_put_state_owner(sp); |
| 994 | up_read(&clp->cl_sem); | 1093 | up_read(&clp->cl_sem); |
| 995 | *res = state; | 1094 | *res = state; |
| 996 | return 0; | 1095 | return 0; |
| 997 | err_opendata_free: | 1096 | err_opendata_put: |
| 998 | nfs4_opendata_free(opendata); | 1097 | nfs4_opendata_put(opendata); |
| 999 | err_release_rwsem: | 1098 | err_release_rwsem: |
| 1000 | up_read(&clp->cl_sem); | 1099 | up_read(&clp->cl_sem); |
| 1001 | err_put_state_owner: | 1100 | err_put_state_owner: |
| @@ -1006,14 +1105,14 @@ out_err: | |||
| 1006 | } | 1105 | } |
| 1007 | 1106 | ||
| 1008 | 1107 | ||
| 1009 | static struct nfs4_state *nfs4_do_open(struct inode *dir, struct dentry *dentry, int flags, struct iattr *sattr, struct rpc_cred *cred) | 1108 | static struct nfs4_state *nfs4_do_open(struct inode *dir, struct path *path, int flags, struct iattr *sattr, struct rpc_cred *cred) |
| 1010 | { | 1109 | { |
| 1011 | struct nfs4_exception exception = { }; | 1110 | struct nfs4_exception exception = { }; |
| 1012 | struct nfs4_state *res; | 1111 | struct nfs4_state *res; |
| 1013 | int status; | 1112 | int status; |
| 1014 | 1113 | ||
| 1015 | do { | 1114 | do { |
| 1016 | status = _nfs4_do_open(dir, dentry, flags, sattr, cred, &res); | 1115 | status = _nfs4_do_open(dir, path, flags, sattr, cred, &res); |
| 1017 | if (status == 0) | 1116 | if (status == 0) |
| 1018 | break; | 1117 | break; |
| 1019 | /* NOTE: BAD_SEQID means the server and client disagree about the | 1118 | /* NOTE: BAD_SEQID means the server and client disagree about the |
| @@ -1028,7 +1127,9 @@ static struct nfs4_state *nfs4_do_open(struct inode *dir, struct dentry *dentry, | |||
| 1028 | * the user though... | 1127 | * the user though... |
| 1029 | */ | 1128 | */ |
| 1030 | if (status == -NFS4ERR_BAD_SEQID) { | 1129 | if (status == -NFS4ERR_BAD_SEQID) { |
| 1031 | printk(KERN_WARNING "NFS: v4 server returned a bad sequence-id error!\n"); | 1130 | printk(KERN_WARNING "NFS: v4 server %s " |
| 1131 | " returned a bad sequence-id error!\n", | ||
| 1132 | NFS_SERVER(dir)->nfs_client->cl_hostname); | ||
| 1032 | exception.retry = 1; | 1133 | exception.retry = 1; |
| 1033 | continue; | 1134 | continue; |
| 1034 | } | 1135 | } |
| @@ -1042,6 +1143,11 @@ static struct nfs4_state *nfs4_do_open(struct inode *dir, struct dentry *dentry, | |||
| 1042 | exception.retry = 1; | 1143 | exception.retry = 1; |
| 1043 | continue; | 1144 | continue; |
| 1044 | } | 1145 | } |
| 1146 | if (status == -EAGAIN) { | ||
| 1147 | /* We must have found a delegation */ | ||
| 1148 | exception.retry = 1; | ||
| 1149 | continue; | ||
| 1150 | } | ||
| 1045 | res = ERR_PTR(nfs4_handle_exception(NFS_SERVER(dir), | 1151 | res = ERR_PTR(nfs4_handle_exception(NFS_SERVER(dir), |
| 1046 | status, &exception)); | 1152 | status, &exception)); |
| 1047 | } while (exception.retry); | 1153 | } while (exception.retry); |
| @@ -1101,6 +1207,7 @@ static int nfs4_do_setattr(struct inode *inode, struct nfs_fattr *fattr, | |||
| 1101 | } | 1207 | } |
| 1102 | 1208 | ||
| 1103 | struct nfs4_closedata { | 1209 | struct nfs4_closedata { |
| 1210 | struct path path; | ||
| 1104 | struct inode *inode; | 1211 | struct inode *inode; |
| 1105 | struct nfs4_state *state; | 1212 | struct nfs4_state *state; |
| 1106 | struct nfs_closeargs arg; | 1213 | struct nfs_closeargs arg; |
| @@ -1117,6 +1224,8 @@ static void nfs4_free_closedata(void *data) | |||
| 1117 | nfs4_put_open_state(calldata->state); | 1224 | nfs4_put_open_state(calldata->state); |
| 1118 | nfs_free_seqid(calldata->arg.seqid); | 1225 | nfs_free_seqid(calldata->arg.seqid); |
| 1119 | nfs4_put_state_owner(sp); | 1226 | nfs4_put_state_owner(sp); |
| 1227 | dput(calldata->path.dentry); | ||
| 1228 | mntput(calldata->path.mnt); | ||
| 1120 | kfree(calldata); | 1229 | kfree(calldata); |
| 1121 | } | 1230 | } |
| 1122 | 1231 | ||
| @@ -1134,8 +1243,7 @@ static void nfs4_close_done(struct rpc_task *task, void *data) | |||
| 1134 | nfs_increment_open_seqid(task->tk_status, calldata->arg.seqid); | 1243 | nfs_increment_open_seqid(task->tk_status, calldata->arg.seqid); |
| 1135 | switch (task->tk_status) { | 1244 | switch (task->tk_status) { |
| 1136 | case 0: | 1245 | case 0: |
| 1137 | memcpy(&state->stateid, &calldata->res.stateid, | 1246 | nfs_set_open_stateid(state, &calldata->res.stateid, calldata->arg.open_flags); |
| 1138 | sizeof(state->stateid)); | ||
| 1139 | renew_lease(server, calldata->timestamp); | 1247 | renew_lease(server, calldata->timestamp); |
| 1140 | break; | 1248 | break; |
| 1141 | case -NFS4ERR_STALE_STATEID: | 1249 | case -NFS4ERR_STALE_STATEID: |
| @@ -1160,26 +1268,30 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data) | |||
| 1160 | .rpc_resp = &calldata->res, | 1268 | .rpc_resp = &calldata->res, |
| 1161 | .rpc_cred = state->owner->so_cred, | 1269 | .rpc_cred = state->owner->so_cred, |
| 1162 | }; | 1270 | }; |
| 1163 | int mode = 0, old_mode; | 1271 | int clear_rd, clear_wr, clear_rdwr; |
| 1272 | int mode; | ||
| 1164 | 1273 | ||
| 1165 | if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0) | 1274 | if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0) |
| 1166 | return; | 1275 | return; |
| 1167 | /* Recalculate the new open mode in case someone reopened the file | 1276 | |
| 1168 | * while we were waiting in line to be scheduled. | 1277 | mode = FMODE_READ|FMODE_WRITE; |
| 1169 | */ | 1278 | clear_rd = clear_wr = clear_rdwr = 0; |
| 1170 | spin_lock(&state->owner->so_lock); | 1279 | spin_lock(&state->owner->so_lock); |
| 1171 | spin_lock(&calldata->inode->i_lock); | 1280 | /* Calculate the change in open mode */ |
| 1172 | mode = old_mode = state->state; | ||
| 1173 | if (state->n_rdwr == 0) { | 1281 | if (state->n_rdwr == 0) { |
| 1174 | if (state->n_rdonly == 0) | 1282 | if (state->n_rdonly == 0) { |
| 1175 | mode &= ~FMODE_READ; | 1283 | mode &= ~FMODE_READ; |
| 1176 | if (state->n_wronly == 0) | 1284 | clear_rd |= test_and_clear_bit(NFS_O_RDONLY_STATE, &state->flags); |
| 1285 | clear_rdwr |= test_and_clear_bit(NFS_O_RDWR_STATE, &state->flags); | ||
| 1286 | } | ||
| 1287 | if (state->n_wronly == 0) { | ||
| 1177 | mode &= ~FMODE_WRITE; | 1288 | mode &= ~FMODE_WRITE; |
| 1289 | clear_wr |= test_and_clear_bit(NFS_O_WRONLY_STATE, &state->flags); | ||
| 1290 | clear_rdwr |= test_and_clear_bit(NFS_O_RDWR_STATE, &state->flags); | ||
| 1291 | } | ||
| 1178 | } | 1292 | } |
| 1179 | nfs4_state_set_mode_locked(state, mode); | ||
| 1180 | spin_unlock(&calldata->inode->i_lock); | ||
| 1181 | spin_unlock(&state->owner->so_lock); | 1293 | spin_unlock(&state->owner->so_lock); |
| 1182 | if (mode == old_mode || test_bit(NFS_DELEGATED_STATE, &state->flags)) { | 1294 | if (!clear_rd && !clear_wr && !clear_rdwr) { |
| 1183 | /* Note: exit _without_ calling nfs4_close_done */ | 1295 | /* Note: exit _without_ calling nfs4_close_done */ |
| 1184 | task->tk_action = NULL; | 1296 | task->tk_action = NULL; |
| 1185 | return; | 1297 | return; |
| @@ -1209,19 +1321,21 @@ static const struct rpc_call_ops nfs4_close_ops = { | |||
| 1209 | * | 1321 | * |
| 1210 | * NOTE: Caller must be holding the sp->so_owner semaphore! | 1322 | * NOTE: Caller must be holding the sp->so_owner semaphore! |
| 1211 | */ | 1323 | */ |
| 1212 | int nfs4_do_close(struct inode *inode, struct nfs4_state *state) | 1324 | int nfs4_do_close(struct path *path, struct nfs4_state *state) |
| 1213 | { | 1325 | { |
| 1214 | struct nfs_server *server = NFS_SERVER(inode); | 1326 | struct nfs_server *server = NFS_SERVER(state->inode); |
| 1215 | struct nfs4_closedata *calldata; | 1327 | struct nfs4_closedata *calldata; |
| 1328 | struct nfs4_state_owner *sp = state->owner; | ||
| 1329 | struct rpc_task *task; | ||
| 1216 | int status = -ENOMEM; | 1330 | int status = -ENOMEM; |
| 1217 | 1331 | ||
| 1218 | calldata = kmalloc(sizeof(*calldata), GFP_KERNEL); | 1332 | calldata = kmalloc(sizeof(*calldata), GFP_KERNEL); |
| 1219 | if (calldata == NULL) | 1333 | if (calldata == NULL) |
| 1220 | goto out; | 1334 | goto out; |
| 1221 | calldata->inode = inode; | 1335 | calldata->inode = state->inode; |
| 1222 | calldata->state = state; | 1336 | calldata->state = state; |
| 1223 | calldata->arg.fh = NFS_FH(inode); | 1337 | calldata->arg.fh = NFS_FH(state->inode); |
| 1224 | calldata->arg.stateid = &state->stateid; | 1338 | calldata->arg.stateid = &state->open_stateid; |
| 1225 | /* Serialization for the sequence id */ | 1339 | /* Serialization for the sequence id */ |
| 1226 | calldata->arg.seqid = nfs_alloc_seqid(&state->owner->so_seqid); | 1340 | calldata->arg.seqid = nfs_alloc_seqid(&state->owner->so_seqid); |
| 1227 | if (calldata->arg.seqid == NULL) | 1341 | if (calldata->arg.seqid == NULL) |
| @@ -1229,36 +1343,55 @@ int nfs4_do_close(struct inode *inode, struct nfs4_state *state) | |||
| 1229 | calldata->arg.bitmask = server->attr_bitmask; | 1343 | calldata->arg.bitmask = server->attr_bitmask; |
| 1230 | calldata->res.fattr = &calldata->fattr; | 1344 | calldata->res.fattr = &calldata->fattr; |
| 1231 | calldata->res.server = server; | 1345 | calldata->res.server = server; |
| 1346 | calldata->path.mnt = mntget(path->mnt); | ||
| 1347 | calldata->path.dentry = dget(path->dentry); | ||
| 1232 | 1348 | ||
| 1233 | status = nfs4_call_async(server->client, &nfs4_close_ops, calldata); | 1349 | task = rpc_run_task(server->client, RPC_TASK_ASYNC, &nfs4_close_ops, calldata); |
| 1234 | if (status == 0) | 1350 | if (IS_ERR(task)) |
| 1235 | goto out; | 1351 | return PTR_ERR(task); |
| 1236 | 1352 | rpc_put_task(task); | |
| 1237 | nfs_free_seqid(calldata->arg.seqid); | 1353 | return 0; |
| 1238 | out_free_calldata: | 1354 | out_free_calldata: |
| 1239 | kfree(calldata); | 1355 | kfree(calldata); |
| 1240 | out: | 1356 | out: |
| 1357 | nfs4_put_open_state(state); | ||
| 1358 | nfs4_put_state_owner(sp); | ||
| 1241 | return status; | 1359 | return status; |
| 1242 | } | 1360 | } |
| 1243 | 1361 | ||
| 1244 | static int nfs4_intent_set_file(struct nameidata *nd, struct dentry *dentry, struct nfs4_state *state) | 1362 | static int nfs4_intent_set_file(struct nameidata *nd, struct path *path, struct nfs4_state *state) |
| 1245 | { | 1363 | { |
| 1246 | struct file *filp; | 1364 | struct file *filp; |
| 1365 | int ret; | ||
| 1247 | 1366 | ||
| 1248 | filp = lookup_instantiate_filp(nd, dentry, NULL); | 1367 | /* If the open_intent is for execute, we have an extra check to make */ |
| 1368 | if (nd->intent.open.flags & FMODE_EXEC) { | ||
| 1369 | ret = _nfs4_do_access(state->inode, | ||
| 1370 | state->owner->so_cred, | ||
| 1371 | nd->intent.open.flags); | ||
| 1372 | if (ret < 0) | ||
| 1373 | goto out_close; | ||
| 1374 | } | ||
| 1375 | filp = lookup_instantiate_filp(nd, path->dentry, NULL); | ||
| 1249 | if (!IS_ERR(filp)) { | 1376 | if (!IS_ERR(filp)) { |
| 1250 | struct nfs_open_context *ctx; | 1377 | struct nfs_open_context *ctx; |
| 1251 | ctx = (struct nfs_open_context *)filp->private_data; | 1378 | ctx = (struct nfs_open_context *)filp->private_data; |
| 1252 | ctx->state = state; | 1379 | ctx->state = state; |
| 1253 | return 0; | 1380 | return 0; |
| 1254 | } | 1381 | } |
| 1255 | nfs4_close_state(state, nd->intent.open.flags); | 1382 | ret = PTR_ERR(filp); |
| 1256 | return PTR_ERR(filp); | 1383 | out_close: |
| 1384 | nfs4_close_state(path, state, nd->intent.open.flags); | ||
| 1385 | return ret; | ||
| 1257 | } | 1386 | } |
| 1258 | 1387 | ||
| 1259 | struct dentry * | 1388 | struct dentry * |
| 1260 | nfs4_atomic_open(struct inode *dir, struct dentry *dentry, struct nameidata *nd) | 1389 | nfs4_atomic_open(struct inode *dir, struct dentry *dentry, struct nameidata *nd) |
| 1261 | { | 1390 | { |
| 1391 | struct path path = { | ||
| 1392 | .mnt = nd->mnt, | ||
| 1393 | .dentry = dentry, | ||
| 1394 | }; | ||
| 1262 | struct iattr attr; | 1395 | struct iattr attr; |
| 1263 | struct rpc_cred *cred; | 1396 | struct rpc_cred *cred; |
| 1264 | struct nfs4_state *state; | 1397 | struct nfs4_state *state; |
| @@ -1277,7 +1410,7 @@ nfs4_atomic_open(struct inode *dir, struct dentry *dentry, struct nameidata *nd) | |||
| 1277 | cred = rpcauth_lookupcred(NFS_CLIENT(dir)->cl_auth, 0); | 1410 | cred = rpcauth_lookupcred(NFS_CLIENT(dir)->cl_auth, 0); |
| 1278 | if (IS_ERR(cred)) | 1411 | if (IS_ERR(cred)) |
| 1279 | return (struct dentry *)cred; | 1412 | return (struct dentry *)cred; |
| 1280 | state = nfs4_do_open(dir, dentry, nd->intent.open.flags, &attr, cred); | 1413 | state = nfs4_do_open(dir, &path, nd->intent.open.flags, &attr, cred); |
| 1281 | put_rpccred(cred); | 1414 | put_rpccred(cred); |
| 1282 | if (IS_ERR(state)) { | 1415 | if (IS_ERR(state)) { |
| 1283 | if (PTR_ERR(state) == -ENOENT) | 1416 | if (PTR_ERR(state) == -ENOENT) |
| @@ -1287,22 +1420,24 @@ nfs4_atomic_open(struct inode *dir, struct dentry *dentry, struct nameidata *nd) | |||
| 1287 | res = d_add_unique(dentry, igrab(state->inode)); | 1420 | res = d_add_unique(dentry, igrab(state->inode)); |
| 1288 | if (res != NULL) | 1421 | if (res != NULL) |
| 1289 | dentry = res; | 1422 | dentry = res; |
| 1290 | nfs4_intent_set_file(nd, dentry, state); | 1423 | nfs4_intent_set_file(nd, &path, state); |
| 1291 | return res; | 1424 | return res; |
| 1292 | } | 1425 | } |
| 1293 | 1426 | ||
| 1294 | int | 1427 | int |
| 1295 | nfs4_open_revalidate(struct inode *dir, struct dentry *dentry, int openflags, struct nameidata *nd) | 1428 | nfs4_open_revalidate(struct inode *dir, struct dentry *dentry, int openflags, struct nameidata *nd) |
| 1296 | { | 1429 | { |
| 1430 | struct path path = { | ||
| 1431 | .mnt = nd->mnt, | ||
| 1432 | .dentry = dentry, | ||
| 1433 | }; | ||
| 1297 | struct rpc_cred *cred; | 1434 | struct rpc_cred *cred; |
| 1298 | struct nfs4_state *state; | 1435 | struct nfs4_state *state; |
| 1299 | 1436 | ||
| 1300 | cred = rpcauth_lookupcred(NFS_CLIENT(dir)->cl_auth, 0); | 1437 | cred = rpcauth_lookupcred(NFS_CLIENT(dir)->cl_auth, 0); |
| 1301 | if (IS_ERR(cred)) | 1438 | if (IS_ERR(cred)) |
| 1302 | return PTR_ERR(cred); | 1439 | return PTR_ERR(cred); |
| 1303 | state = nfs4_open_delegated(dentry->d_inode, openflags, cred); | 1440 | state = nfs4_do_open(dir, &path, openflags, NULL, cred); |
| 1304 | if (IS_ERR(state)) | ||
| 1305 | state = nfs4_do_open(dir, dentry, openflags, NULL, cred); | ||
| 1306 | put_rpccred(cred); | 1441 | put_rpccred(cred); |
| 1307 | if (IS_ERR(state)) { | 1442 | if (IS_ERR(state)) { |
| 1308 | switch (PTR_ERR(state)) { | 1443 | switch (PTR_ERR(state)) { |
| @@ -1318,10 +1453,10 @@ nfs4_open_revalidate(struct inode *dir, struct dentry *dentry, int openflags, st | |||
| 1318 | } | 1453 | } |
| 1319 | } | 1454 | } |
| 1320 | if (state->inode == dentry->d_inode) { | 1455 | if (state->inode == dentry->d_inode) { |
| 1321 | nfs4_intent_set_file(nd, dentry, state); | 1456 | nfs4_intent_set_file(nd, &path, state); |
| 1322 | return 1; | 1457 | return 1; |
| 1323 | } | 1458 | } |
| 1324 | nfs4_close_state(state, openflags); | 1459 | nfs4_close_state(&path, state, openflags); |
| 1325 | out_drop: | 1460 | out_drop: |
| 1326 | d_drop(dentry); | 1461 | d_drop(dentry); |
| 1327 | return 0; | 1462 | return 0; |
| @@ -1559,8 +1694,6 @@ static int _nfs4_proc_lookupfh(struct nfs_server *server, struct nfs_fh *dirfh, | |||
| 1559 | dprintk("NFS call lookupfh %s\n", name->name); | 1694 | dprintk("NFS call lookupfh %s\n", name->name); |
| 1560 | status = rpc_call_sync(server->client, &msg, 0); | 1695 | status = rpc_call_sync(server->client, &msg, 0); |
| 1561 | dprintk("NFS reply lookupfh: %d\n", status); | 1696 | dprintk("NFS reply lookupfh: %d\n", status); |
| 1562 | if (status == -NFS4ERR_MOVED) | ||
| 1563 | status = -EREMOTE; | ||
| 1564 | return status; | 1697 | return status; |
| 1565 | } | 1698 | } |
| 1566 | 1699 | ||
| @@ -1571,10 +1704,13 @@ static int nfs4_proc_lookupfh(struct nfs_server *server, struct nfs_fh *dirfh, | |||
| 1571 | struct nfs4_exception exception = { }; | 1704 | struct nfs4_exception exception = { }; |
| 1572 | int err; | 1705 | int err; |
| 1573 | do { | 1706 | do { |
| 1574 | err = nfs4_handle_exception(server, | 1707 | err = _nfs4_proc_lookupfh(server, dirfh, name, fhandle, fattr); |
| 1575 | _nfs4_proc_lookupfh(server, dirfh, name, | 1708 | /* FIXME: !!!! */ |
| 1576 | fhandle, fattr), | 1709 | if (err == -NFS4ERR_MOVED) { |
| 1577 | &exception); | 1710 | err = -EREMOTE; |
| 1711 | break; | ||
| 1712 | } | ||
| 1713 | err = nfs4_handle_exception(server, err, &exception); | ||
| 1578 | } while (exception.retry); | 1714 | } while (exception.retry); |
| 1579 | return err; | 1715 | return err; |
| 1580 | } | 1716 | } |
| @@ -1582,28 +1718,10 @@ static int nfs4_proc_lookupfh(struct nfs_server *server, struct nfs_fh *dirfh, | |||
| 1582 | static int _nfs4_proc_lookup(struct inode *dir, struct qstr *name, | 1718 | static int _nfs4_proc_lookup(struct inode *dir, struct qstr *name, |
| 1583 | struct nfs_fh *fhandle, struct nfs_fattr *fattr) | 1719 | struct nfs_fh *fhandle, struct nfs_fattr *fattr) |
| 1584 | { | 1720 | { |
| 1585 | int status; | 1721 | int status; |
| 1586 | struct nfs_server *server = NFS_SERVER(dir); | ||
| 1587 | struct nfs4_lookup_arg args = { | ||
| 1588 | .bitmask = server->attr_bitmask, | ||
| 1589 | .dir_fh = NFS_FH(dir), | ||
| 1590 | .name = name, | ||
| 1591 | }; | ||
| 1592 | struct nfs4_lookup_res res = { | ||
| 1593 | .server = server, | ||
| 1594 | .fattr = fattr, | ||
| 1595 | .fh = fhandle, | ||
| 1596 | }; | ||
| 1597 | struct rpc_message msg = { | ||
| 1598 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP], | ||
| 1599 | .rpc_argp = &args, | ||
| 1600 | .rpc_resp = &res, | ||
| 1601 | }; | ||
| 1602 | |||
| 1603 | nfs_fattr_init(fattr); | ||
| 1604 | 1722 | ||
| 1605 | dprintk("NFS call lookup %s\n", name->name); | 1723 | dprintk("NFS call lookup %s\n", name->name); |
| 1606 | status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0); | 1724 | status = _nfs4_proc_lookupfh(NFS_SERVER(dir), NFS_FH(dir), name, fhandle, fattr); |
| 1607 | if (status == -NFS4ERR_MOVED) | 1725 | if (status == -NFS4ERR_MOVED) |
| 1608 | status = nfs4_get_referral(dir, name, fattr, fhandle); | 1726 | status = nfs4_get_referral(dir, name, fattr, fhandle); |
| 1609 | dprintk("NFS reply lookup: %d\n", status); | 1727 | dprintk("NFS reply lookup: %d\n", status); |
| @@ -1752,6 +1870,10 @@ static int | |||
| 1752 | nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr, | 1870 | nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr, |
| 1753 | int flags, struct nameidata *nd) | 1871 | int flags, struct nameidata *nd) |
| 1754 | { | 1872 | { |
| 1873 | struct path path = { | ||
| 1874 | .mnt = nd->mnt, | ||
| 1875 | .dentry = dentry, | ||
| 1876 | }; | ||
| 1755 | struct nfs4_state *state; | 1877 | struct nfs4_state *state; |
| 1756 | struct rpc_cred *cred; | 1878 | struct rpc_cred *cred; |
| 1757 | int status = 0; | 1879 | int status = 0; |
| @@ -1761,7 +1883,7 @@ nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr, | |||
| 1761 | status = PTR_ERR(cred); | 1883 | status = PTR_ERR(cred); |
| 1762 | goto out; | 1884 | goto out; |
| 1763 | } | 1885 | } |
| 1764 | state = nfs4_do_open(dir, dentry, flags, sattr, cred); | 1886 | state = nfs4_do_open(dir, &path, flags, sattr, cred); |
| 1765 | put_rpccred(cred); | 1887 | put_rpccred(cred); |
| 1766 | if (IS_ERR(state)) { | 1888 | if (IS_ERR(state)) { |
| 1767 | status = PTR_ERR(state); | 1889 | status = PTR_ERR(state); |
| @@ -1773,11 +1895,12 @@ nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr, | |||
| 1773 | status = nfs4_do_setattr(state->inode, &fattr, sattr, state); | 1895 | status = nfs4_do_setattr(state->inode, &fattr, sattr, state); |
| 1774 | if (status == 0) | 1896 | if (status == 0) |
| 1775 | nfs_setattr_update_inode(state->inode, sattr); | 1897 | nfs_setattr_update_inode(state->inode, sattr); |
| 1898 | nfs_post_op_update_inode(state->inode, &fattr); | ||
| 1776 | } | 1899 | } |
| 1777 | if (status == 0 && nd != NULL && (nd->flags & LOOKUP_OPEN)) | 1900 | if (status == 0 && (nd->flags & LOOKUP_OPEN) != 0) |
| 1778 | status = nfs4_intent_set_file(nd, dentry, state); | 1901 | status = nfs4_intent_set_file(nd, &path, state); |
| 1779 | else | 1902 | else |
| 1780 | nfs4_close_state(state, flags); | 1903 | nfs4_close_state(&path, state, flags); |
| 1781 | out: | 1904 | out: |
| 1782 | return status; | 1905 | return status; |
| 1783 | } | 1906 | } |
| @@ -3008,7 +3131,7 @@ static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock | |||
| 3008 | if (status != 0) | 3131 | if (status != 0) |
| 3009 | goto out; | 3132 | goto out; |
| 3010 | lsp = request->fl_u.nfs4_fl.owner; | 3133 | lsp = request->fl_u.nfs4_fl.owner; |
| 3011 | arg.lock_owner.id = lsp->ls_id; | 3134 | arg.lock_owner.id = lsp->ls_id.id; |
| 3012 | status = rpc_call_sync(server->client, &msg, 0); | 3135 | status = rpc_call_sync(server->client, &msg, 0); |
| 3013 | switch (status) { | 3136 | switch (status) { |
| 3014 | case 0: | 3137 | case 0: |
| @@ -3152,6 +3275,11 @@ static struct rpc_task *nfs4_do_unlck(struct file_lock *fl, | |||
| 3152 | { | 3275 | { |
| 3153 | struct nfs4_unlockdata *data; | 3276 | struct nfs4_unlockdata *data; |
| 3154 | 3277 | ||
| 3278 | /* Ensure this is an unlock - when canceling a lock, the | ||
| 3279 | * canceled lock is passed in, and it won't be an unlock. | ||
| 3280 | */ | ||
| 3281 | fl->fl_type = F_UNLCK; | ||
| 3282 | |||
| 3155 | data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid); | 3283 | data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid); |
| 3156 | if (data == NULL) { | 3284 | if (data == NULL) { |
| 3157 | nfs_free_seqid(seqid); | 3285 | nfs_free_seqid(seqid); |
| @@ -3222,7 +3350,7 @@ static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl, | |||
| 3222 | goto out_free; | 3350 | goto out_free; |
| 3223 | p->arg.lock_stateid = &lsp->ls_stateid; | 3351 | p->arg.lock_stateid = &lsp->ls_stateid; |
| 3224 | p->arg.lock_owner.clientid = server->nfs_client->cl_clientid; | 3352 | p->arg.lock_owner.clientid = server->nfs_client->cl_clientid; |
| 3225 | p->arg.lock_owner.id = lsp->ls_id; | 3353 | p->arg.lock_owner.id = lsp->ls_id.id; |
| 3226 | p->lsp = lsp; | 3354 | p->lsp = lsp; |
| 3227 | atomic_inc(&lsp->ls_count); | 3355 | atomic_inc(&lsp->ls_count); |
| 3228 | p->ctx = get_nfs_open_context(ctx); | 3356 | p->ctx = get_nfs_open_context(ctx); |
| @@ -3285,7 +3413,7 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata) | |||
| 3285 | memcpy(data->lsp->ls_stateid.data, data->res.stateid.data, | 3413 | memcpy(data->lsp->ls_stateid.data, data->res.stateid.data, |
| 3286 | sizeof(data->lsp->ls_stateid.data)); | 3414 | sizeof(data->lsp->ls_stateid.data)); |
| 3287 | data->lsp->ls_flags |= NFS_LOCK_INITIALIZED; | 3415 | data->lsp->ls_flags |= NFS_LOCK_INITIALIZED; |
| 3288 | renew_lease(NFS_SERVER(data->ctx->dentry->d_inode), data->timestamp); | 3416 | renew_lease(NFS_SERVER(data->ctx->path.dentry->d_inode), data->timestamp); |
| 3289 | } | 3417 | } |
| 3290 | nfs_increment_lock_seqid(data->rpc_status, data->arg.lock_seqid); | 3418 | nfs_increment_lock_seqid(data->rpc_status, data->arg.lock_seqid); |
| 3291 | out: | 3419 | out: |
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 8ed79d5c54f9..e9662ba81d86 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c | |||
| @@ -38,12 +38,14 @@ | |||
| 38 | * subsequent patch. | 38 | * subsequent patch. |
| 39 | */ | 39 | */ |
| 40 | 40 | ||
| 41 | #include <linux/kernel.h> | ||
| 41 | #include <linux/slab.h> | 42 | #include <linux/slab.h> |
| 42 | #include <linux/smp_lock.h> | 43 | #include <linux/smp_lock.h> |
| 43 | #include <linux/nfs_fs.h> | 44 | #include <linux/nfs_fs.h> |
| 44 | #include <linux/nfs_idmap.h> | 45 | #include <linux/nfs_idmap.h> |
| 45 | #include <linux/kthread.h> | 46 | #include <linux/kthread.h> |
| 46 | #include <linux/module.h> | 47 | #include <linux/module.h> |
| 48 | #include <linux/random.h> | ||
| 47 | #include <linux/workqueue.h> | 49 | #include <linux/workqueue.h> |
| 48 | #include <linux/bitops.h> | 50 | #include <linux/bitops.h> |
| 49 | 51 | ||
| @@ -69,33 +71,14 @@ static int nfs4_init_client(struct nfs_client *clp, struct rpc_cred *cred) | |||
| 69 | return status; | 71 | return status; |
| 70 | } | 72 | } |
| 71 | 73 | ||
| 72 | u32 | ||
| 73 | nfs4_alloc_lockowner_id(struct nfs_client *clp) | ||
| 74 | { | ||
| 75 | return clp->cl_lockowner_id ++; | ||
| 76 | } | ||
| 77 | |||
| 78 | static struct nfs4_state_owner * | ||
| 79 | nfs4_client_grab_unused(struct nfs_client *clp, struct rpc_cred *cred) | ||
| 80 | { | ||
| 81 | struct nfs4_state_owner *sp = NULL; | ||
| 82 | |||
| 83 | if (!list_empty(&clp->cl_unused)) { | ||
| 84 | sp = list_entry(clp->cl_unused.next, struct nfs4_state_owner, so_list); | ||
| 85 | atomic_inc(&sp->so_count); | ||
| 86 | sp->so_cred = cred; | ||
| 87 | list_move(&sp->so_list, &clp->cl_state_owners); | ||
| 88 | clp->cl_nunused--; | ||
| 89 | } | ||
| 90 | return sp; | ||
| 91 | } | ||
| 92 | |||
| 93 | struct rpc_cred *nfs4_get_renew_cred(struct nfs_client *clp) | 74 | struct rpc_cred *nfs4_get_renew_cred(struct nfs_client *clp) |
| 94 | { | 75 | { |
| 95 | struct nfs4_state_owner *sp; | 76 | struct nfs4_state_owner *sp; |
| 77 | struct rb_node *pos; | ||
| 96 | struct rpc_cred *cred = NULL; | 78 | struct rpc_cred *cred = NULL; |
| 97 | 79 | ||
| 98 | list_for_each_entry(sp, &clp->cl_state_owners, so_list) { | 80 | for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) { |
| 81 | sp = rb_entry(pos, struct nfs4_state_owner, so_client_node); | ||
| 99 | if (list_empty(&sp->so_states)) | 82 | if (list_empty(&sp->so_states)) |
| 100 | continue; | 83 | continue; |
| 101 | cred = get_rpccred(sp->so_cred); | 84 | cred = get_rpccred(sp->so_cred); |
| @@ -107,32 +90,146 @@ struct rpc_cred *nfs4_get_renew_cred(struct nfs_client *clp) | |||
| 107 | static struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp) | 90 | static struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp) |
| 108 | { | 91 | { |
| 109 | struct nfs4_state_owner *sp; | 92 | struct nfs4_state_owner *sp; |
| 93 | struct rb_node *pos; | ||
| 110 | 94 | ||
| 111 | if (!list_empty(&clp->cl_state_owners)) { | 95 | pos = rb_first(&clp->cl_state_owners); |
| 112 | sp = list_entry(clp->cl_state_owners.next, | 96 | if (pos != NULL) { |
| 113 | struct nfs4_state_owner, so_list); | 97 | sp = rb_entry(pos, struct nfs4_state_owner, so_client_node); |
| 114 | return get_rpccred(sp->so_cred); | 98 | return get_rpccred(sp->so_cred); |
| 115 | } | 99 | } |
| 116 | return NULL; | 100 | return NULL; |
| 117 | } | 101 | } |
| 118 | 102 | ||
| 103 | static void nfs_alloc_unique_id(struct rb_root *root, struct nfs_unique_id *new, | ||
| 104 | __u64 minval, int maxbits) | ||
| 105 | { | ||
| 106 | struct rb_node **p, *parent; | ||
| 107 | struct nfs_unique_id *pos; | ||
| 108 | __u64 mask = ~0ULL; | ||
| 109 | |||
| 110 | if (maxbits < 64) | ||
| 111 | mask = (1ULL << maxbits) - 1ULL; | ||
| 112 | |||
| 113 | /* Ensure distribution is more or less flat */ | ||
| 114 | get_random_bytes(&new->id, sizeof(new->id)); | ||
| 115 | new->id &= mask; | ||
| 116 | if (new->id < minval) | ||
| 117 | new->id += minval; | ||
| 118 | retry: | ||
| 119 | p = &root->rb_node; | ||
| 120 | parent = NULL; | ||
| 121 | |||
| 122 | while (*p != NULL) { | ||
| 123 | parent = *p; | ||
| 124 | pos = rb_entry(parent, struct nfs_unique_id, rb_node); | ||
| 125 | |||
| 126 | if (new->id < pos->id) | ||
| 127 | p = &(*p)->rb_left; | ||
| 128 | else if (new->id > pos->id) | ||
| 129 | p = &(*p)->rb_right; | ||
| 130 | else | ||
| 131 | goto id_exists; | ||
| 132 | } | ||
| 133 | rb_link_node(&new->rb_node, parent, p); | ||
| 134 | rb_insert_color(&new->rb_node, root); | ||
| 135 | return; | ||
| 136 | id_exists: | ||
| 137 | for (;;) { | ||
| 138 | new->id++; | ||
| 139 | if (new->id < minval || (new->id & mask) != new->id) { | ||
| 140 | new->id = minval; | ||
| 141 | break; | ||
| 142 | } | ||
| 143 | parent = rb_next(parent); | ||
| 144 | if (parent == NULL) | ||
| 145 | break; | ||
| 146 | pos = rb_entry(parent, struct nfs_unique_id, rb_node); | ||
| 147 | if (new->id < pos->id) | ||
| 148 | break; | ||
| 149 | } | ||
| 150 | goto retry; | ||
| 151 | } | ||
| 152 | |||
| 153 | static void nfs_free_unique_id(struct rb_root *root, struct nfs_unique_id *id) | ||
| 154 | { | ||
| 155 | rb_erase(&id->rb_node, root); | ||
| 156 | } | ||
| 157 | |||
| 119 | static struct nfs4_state_owner * | 158 | static struct nfs4_state_owner * |
| 120 | nfs4_find_state_owner(struct nfs_client *clp, struct rpc_cred *cred) | 159 | nfs4_find_state_owner(struct nfs_server *server, struct rpc_cred *cred) |
| 121 | { | 160 | { |
| 161 | struct nfs_client *clp = server->nfs_client; | ||
| 162 | struct rb_node **p = &clp->cl_state_owners.rb_node, | ||
| 163 | *parent = NULL; | ||
| 122 | struct nfs4_state_owner *sp, *res = NULL; | 164 | struct nfs4_state_owner *sp, *res = NULL; |
| 123 | 165 | ||
| 124 | list_for_each_entry(sp, &clp->cl_state_owners, so_list) { | 166 | while (*p != NULL) { |
| 125 | if (sp->so_cred != cred) | 167 | parent = *p; |
| 168 | sp = rb_entry(parent, struct nfs4_state_owner, so_client_node); | ||
| 169 | |||
| 170 | if (server < sp->so_server) { | ||
| 171 | p = &parent->rb_left; | ||
| 126 | continue; | 172 | continue; |
| 127 | atomic_inc(&sp->so_count); | 173 | } |
| 128 | /* Move to the head of the list */ | 174 | if (server > sp->so_server) { |
| 129 | list_move(&sp->so_list, &clp->cl_state_owners); | 175 | p = &parent->rb_right; |
| 130 | res = sp; | 176 | continue; |
| 131 | break; | 177 | } |
| 178 | if (cred < sp->so_cred) | ||
| 179 | p = &parent->rb_left; | ||
| 180 | else if (cred > sp->so_cred) | ||
| 181 | p = &parent->rb_right; | ||
| 182 | else { | ||
| 183 | atomic_inc(&sp->so_count); | ||
| 184 | res = sp; | ||
| 185 | break; | ||
| 186 | } | ||
| 132 | } | 187 | } |
| 133 | return res; | 188 | return res; |
| 134 | } | 189 | } |
| 135 | 190 | ||
| 191 | static struct nfs4_state_owner * | ||
| 192 | nfs4_insert_state_owner(struct nfs_client *clp, struct nfs4_state_owner *new) | ||
| 193 | { | ||
| 194 | struct rb_node **p = &clp->cl_state_owners.rb_node, | ||
| 195 | *parent = NULL; | ||
| 196 | struct nfs4_state_owner *sp; | ||
| 197 | |||
| 198 | while (*p != NULL) { | ||
| 199 | parent = *p; | ||
| 200 | sp = rb_entry(parent, struct nfs4_state_owner, so_client_node); | ||
| 201 | |||
| 202 | if (new->so_server < sp->so_server) { | ||
| 203 | p = &parent->rb_left; | ||
| 204 | continue; | ||
| 205 | } | ||
| 206 | if (new->so_server > sp->so_server) { | ||
| 207 | p = &parent->rb_right; | ||
| 208 | continue; | ||
| 209 | } | ||
| 210 | if (new->so_cred < sp->so_cred) | ||
| 211 | p = &parent->rb_left; | ||
| 212 | else if (new->so_cred > sp->so_cred) | ||
| 213 | p = &parent->rb_right; | ||
| 214 | else { | ||
| 215 | atomic_inc(&sp->so_count); | ||
| 216 | return sp; | ||
| 217 | } | ||
| 218 | } | ||
| 219 | nfs_alloc_unique_id(&clp->cl_openowner_id, &new->so_owner_id, 1, 64); | ||
| 220 | rb_link_node(&new->so_client_node, parent, p); | ||
| 221 | rb_insert_color(&new->so_client_node, &clp->cl_state_owners); | ||
| 222 | return new; | ||
| 223 | } | ||
| 224 | |||
| 225 | static void | ||
| 226 | nfs4_remove_state_owner(struct nfs_client *clp, struct nfs4_state_owner *sp) | ||
| 227 | { | ||
| 228 | if (!RB_EMPTY_NODE(&sp->so_client_node)) | ||
| 229 | rb_erase(&sp->so_client_node, &clp->cl_state_owners); | ||
| 230 | nfs_free_unique_id(&clp->cl_openowner_id, &sp->so_owner_id); | ||
| 231 | } | ||
| 232 | |||
| 136 | /* | 233 | /* |
| 137 | * nfs4_alloc_state_owner(): this is called on the OPEN or CREATE path to | 234 | * nfs4_alloc_state_owner(): this is called on the OPEN or CREATE path to |
| 138 | * create a new state_owner. | 235 | * create a new state_owner. |
| @@ -160,10 +257,14 @@ nfs4_alloc_state_owner(void) | |||
| 160 | void | 257 | void |
| 161 | nfs4_drop_state_owner(struct nfs4_state_owner *sp) | 258 | nfs4_drop_state_owner(struct nfs4_state_owner *sp) |
| 162 | { | 259 | { |
| 163 | struct nfs_client *clp = sp->so_client; | 260 | if (!RB_EMPTY_NODE(&sp->so_client_node)) { |
| 164 | spin_lock(&clp->cl_lock); | 261 | struct nfs_client *clp = sp->so_client; |
| 165 | list_del_init(&sp->so_list); | 262 | |
| 166 | spin_unlock(&clp->cl_lock); | 263 | spin_lock(&clp->cl_lock); |
| 264 | rb_erase(&sp->so_client_node, &clp->cl_state_owners); | ||
| 265 | RB_CLEAR_NODE(&sp->so_client_node); | ||
| 266 | spin_unlock(&clp->cl_lock); | ||
| 267 | } | ||
| 167 | } | 268 | } |
| 168 | 269 | ||
| 169 | /* | 270 | /* |
| @@ -175,26 +276,25 @@ struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct | |||
| 175 | struct nfs_client *clp = server->nfs_client; | 276 | struct nfs_client *clp = server->nfs_client; |
| 176 | struct nfs4_state_owner *sp, *new; | 277 | struct nfs4_state_owner *sp, *new; |
| 177 | 278 | ||
| 178 | get_rpccred(cred); | ||
| 179 | new = nfs4_alloc_state_owner(); | ||
| 180 | spin_lock(&clp->cl_lock); | 279 | spin_lock(&clp->cl_lock); |
| 181 | sp = nfs4_find_state_owner(clp, cred); | 280 | sp = nfs4_find_state_owner(server, cred); |
| 182 | if (sp == NULL) | ||
| 183 | sp = nfs4_client_grab_unused(clp, cred); | ||
| 184 | if (sp == NULL && new != NULL) { | ||
| 185 | list_add(&new->so_list, &clp->cl_state_owners); | ||
| 186 | new->so_client = clp; | ||
| 187 | new->so_id = nfs4_alloc_lockowner_id(clp); | ||
| 188 | new->so_cred = cred; | ||
| 189 | sp = new; | ||
| 190 | new = NULL; | ||
| 191 | } | ||
| 192 | spin_unlock(&clp->cl_lock); | 281 | spin_unlock(&clp->cl_lock); |
| 193 | kfree(new); | ||
| 194 | if (sp != NULL) | 282 | if (sp != NULL) |
| 195 | return sp; | 283 | return sp; |
| 196 | put_rpccred(cred); | 284 | new = nfs4_alloc_state_owner(); |
| 197 | return NULL; | 285 | if (new == NULL) |
| 286 | return NULL; | ||
| 287 | new->so_client = clp; | ||
| 288 | new->so_server = server; | ||
| 289 | new->so_cred = cred; | ||
| 290 | spin_lock(&clp->cl_lock); | ||
| 291 | sp = nfs4_insert_state_owner(clp, new); | ||
| 292 | spin_unlock(&clp->cl_lock); | ||
| 293 | if (sp == new) | ||
| 294 | get_rpccred(cred); | ||
| 295 | else | ||
| 296 | kfree(new); | ||
| 297 | return sp; | ||
| 198 | } | 298 | } |
| 199 | 299 | ||
| 200 | /* | 300 | /* |
| @@ -208,18 +308,7 @@ void nfs4_put_state_owner(struct nfs4_state_owner *sp) | |||
| 208 | 308 | ||
| 209 | if (!atomic_dec_and_lock(&sp->so_count, &clp->cl_lock)) | 309 | if (!atomic_dec_and_lock(&sp->so_count, &clp->cl_lock)) |
| 210 | return; | 310 | return; |
| 211 | if (clp->cl_nunused >= OPENOWNER_POOL_SIZE) | 311 | nfs4_remove_state_owner(clp, sp); |
| 212 | goto out_free; | ||
| 213 | if (list_empty(&sp->so_list)) | ||
| 214 | goto out_free; | ||
| 215 | list_move(&sp->so_list, &clp->cl_unused); | ||
| 216 | clp->cl_nunused++; | ||
| 217 | spin_unlock(&clp->cl_lock); | ||
| 218 | put_rpccred(cred); | ||
| 219 | cred = NULL; | ||
| 220 | return; | ||
| 221 | out_free: | ||
| 222 | list_del(&sp->so_list); | ||
| 223 | spin_unlock(&clp->cl_lock); | 312 | spin_unlock(&clp->cl_lock); |
| 224 | put_rpccred(cred); | 313 | put_rpccred(cred); |
| 225 | kfree(sp); | 314 | kfree(sp); |
| @@ -236,6 +325,7 @@ nfs4_alloc_open_state(void) | |||
| 236 | atomic_set(&state->count, 1); | 325 | atomic_set(&state->count, 1); |
| 237 | INIT_LIST_HEAD(&state->lock_states); | 326 | INIT_LIST_HEAD(&state->lock_states); |
| 238 | spin_lock_init(&state->state_lock); | 327 | spin_lock_init(&state->state_lock); |
| 328 | seqlock_init(&state->seqlock); | ||
| 239 | return state; | 329 | return state; |
| 240 | } | 330 | } |
| 241 | 331 | ||
| @@ -263,13 +353,10 @@ __nfs4_find_state_byowner(struct inode *inode, struct nfs4_state_owner *owner) | |||
| 263 | struct nfs4_state *state; | 353 | struct nfs4_state *state; |
| 264 | 354 | ||
| 265 | list_for_each_entry(state, &nfsi->open_states, inode_states) { | 355 | list_for_each_entry(state, &nfsi->open_states, inode_states) { |
| 266 | /* Is this in the process of being freed? */ | 356 | if (state->owner != owner) |
| 267 | if (state->state == 0) | ||
| 268 | continue; | 357 | continue; |
| 269 | if (state->owner == owner) { | 358 | if (atomic_inc_not_zero(&state->count)) |
| 270 | atomic_inc(&state->count); | ||
| 271 | return state; | 359 | return state; |
| 272 | } | ||
| 273 | } | 360 | } |
| 274 | return NULL; | 361 | return NULL; |
| 275 | } | 362 | } |
| @@ -341,16 +428,15 @@ void nfs4_put_open_state(struct nfs4_state *state) | |||
| 341 | /* | 428 | /* |
| 342 | * Close the current file. | 429 | * Close the current file. |
| 343 | */ | 430 | */ |
| 344 | void nfs4_close_state(struct nfs4_state *state, mode_t mode) | 431 | void nfs4_close_state(struct path *path, struct nfs4_state *state, mode_t mode) |
| 345 | { | 432 | { |
| 346 | struct inode *inode = state->inode; | ||
| 347 | struct nfs4_state_owner *owner = state->owner; | 433 | struct nfs4_state_owner *owner = state->owner; |
| 348 | int oldstate, newstate = 0; | 434 | int call_close = 0; |
| 435 | int newstate; | ||
| 349 | 436 | ||
| 350 | atomic_inc(&owner->so_count); | 437 | atomic_inc(&owner->so_count); |
| 351 | /* Protect against nfs4_find_state() */ | 438 | /* Protect against nfs4_find_state() */ |
| 352 | spin_lock(&owner->so_lock); | 439 | spin_lock(&owner->so_lock); |
| 353 | spin_lock(&inode->i_lock); | ||
| 354 | switch (mode & (FMODE_READ | FMODE_WRITE)) { | 440 | switch (mode & (FMODE_READ | FMODE_WRITE)) { |
| 355 | case FMODE_READ: | 441 | case FMODE_READ: |
| 356 | state->n_rdonly--; | 442 | state->n_rdonly--; |
| @@ -361,24 +447,29 @@ void nfs4_close_state(struct nfs4_state *state, mode_t mode) | |||
| 361 | case FMODE_READ|FMODE_WRITE: | 447 | case FMODE_READ|FMODE_WRITE: |
| 362 | state->n_rdwr--; | 448 | state->n_rdwr--; |
| 363 | } | 449 | } |
| 364 | oldstate = newstate = state->state; | 450 | newstate = FMODE_READ|FMODE_WRITE; |
| 365 | if (state->n_rdwr == 0) { | 451 | if (state->n_rdwr == 0) { |
| 366 | if (state->n_rdonly == 0) | 452 | if (state->n_rdonly == 0) { |
| 367 | newstate &= ~FMODE_READ; | 453 | newstate &= ~FMODE_READ; |
| 368 | if (state->n_wronly == 0) | 454 | call_close |= test_bit(NFS_O_RDONLY_STATE, &state->flags); |
| 455 | call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags); | ||
| 456 | } | ||
| 457 | if (state->n_wronly == 0) { | ||
| 369 | newstate &= ~FMODE_WRITE; | 458 | newstate &= ~FMODE_WRITE; |
| 459 | call_close |= test_bit(NFS_O_WRONLY_STATE, &state->flags); | ||
| 460 | call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags); | ||
| 461 | } | ||
| 462 | if (newstate == 0) | ||
| 463 | clear_bit(NFS_DELEGATED_STATE, &state->flags); | ||
| 370 | } | 464 | } |
| 371 | if (test_bit(NFS_DELEGATED_STATE, &state->flags)) { | 465 | nfs4_state_set_mode_locked(state, newstate); |
| 372 | nfs4_state_set_mode_locked(state, newstate); | ||
| 373 | oldstate = newstate; | ||
| 374 | } | ||
| 375 | spin_unlock(&inode->i_lock); | ||
| 376 | spin_unlock(&owner->so_lock); | 466 | spin_unlock(&owner->so_lock); |
| 377 | 467 | ||
| 378 | if (oldstate != newstate && nfs4_do_close(inode, state) == 0) | 468 | if (!call_close) { |
| 379 | return; | 469 | nfs4_put_open_state(state); |
| 380 | nfs4_put_open_state(state); | 470 | nfs4_put_state_owner(owner); |
| 381 | nfs4_put_state_owner(owner); | 471 | } else |
| 472 | nfs4_do_close(path, state); | ||
| 382 | } | 473 | } |
| 383 | 474 | ||
| 384 | /* | 475 | /* |
| @@ -415,12 +506,22 @@ static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, f | |||
| 415 | atomic_set(&lsp->ls_count, 1); | 506 | atomic_set(&lsp->ls_count, 1); |
| 416 | lsp->ls_owner = fl_owner; | 507 | lsp->ls_owner = fl_owner; |
| 417 | spin_lock(&clp->cl_lock); | 508 | spin_lock(&clp->cl_lock); |
| 418 | lsp->ls_id = nfs4_alloc_lockowner_id(clp); | 509 | nfs_alloc_unique_id(&clp->cl_lockowner_id, &lsp->ls_id, 1, 64); |
| 419 | spin_unlock(&clp->cl_lock); | 510 | spin_unlock(&clp->cl_lock); |
| 420 | INIT_LIST_HEAD(&lsp->ls_locks); | 511 | INIT_LIST_HEAD(&lsp->ls_locks); |
| 421 | return lsp; | 512 | return lsp; |
| 422 | } | 513 | } |
| 423 | 514 | ||
| 515 | static void nfs4_free_lock_state(struct nfs4_lock_state *lsp) | ||
| 516 | { | ||
| 517 | struct nfs_client *clp = lsp->ls_state->owner->so_client; | ||
| 518 | |||
| 519 | spin_lock(&clp->cl_lock); | ||
| 520 | nfs_free_unique_id(&clp->cl_lockowner_id, &lsp->ls_id); | ||
| 521 | spin_unlock(&clp->cl_lock); | ||
| 522 | kfree(lsp); | ||
| 523 | } | ||
| 524 | |||
| 424 | /* | 525 | /* |
| 425 | * Return a compatible lock_state. If no initialized lock_state structure | 526 | * Return a compatible lock_state. If no initialized lock_state structure |
| 426 | * exists, return an uninitialized one. | 527 | * exists, return an uninitialized one. |
| @@ -450,7 +551,8 @@ static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_ | |||
| 450 | return NULL; | 551 | return NULL; |
| 451 | } | 552 | } |
| 452 | spin_unlock(&state->state_lock); | 553 | spin_unlock(&state->state_lock); |
| 453 | kfree(new); | 554 | if (new != NULL) |
| 555 | nfs4_free_lock_state(new); | ||
| 454 | return lsp; | 556 | return lsp; |
| 455 | } | 557 | } |
| 456 | 558 | ||
| @@ -471,7 +573,7 @@ void nfs4_put_lock_state(struct nfs4_lock_state *lsp) | |||
| 471 | if (list_empty(&state->lock_states)) | 573 | if (list_empty(&state->lock_states)) |
| 472 | clear_bit(LK_STATE_IN_USE, &state->flags); | 574 | clear_bit(LK_STATE_IN_USE, &state->flags); |
| 473 | spin_unlock(&state->state_lock); | 575 | spin_unlock(&state->state_lock); |
| 474 | kfree(lsp); | 576 | nfs4_free_lock_state(lsp); |
| 475 | } | 577 | } |
| 476 | 578 | ||
| 477 | static void nfs4_fl_copy_lock(struct file_lock *dst, struct file_lock *src) | 579 | static void nfs4_fl_copy_lock(struct file_lock *dst, struct file_lock *src) |
| @@ -513,8 +615,12 @@ int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl) | |||
| 513 | void nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t fl_owner) | 615 | void nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t fl_owner) |
| 514 | { | 616 | { |
| 515 | struct nfs4_lock_state *lsp; | 617 | struct nfs4_lock_state *lsp; |
| 618 | int seq; | ||
| 516 | 619 | ||
| 517 | memcpy(dst, &state->stateid, sizeof(*dst)); | 620 | do { |
| 621 | seq = read_seqbegin(&state->seqlock); | ||
| 622 | memcpy(dst, &state->stateid, sizeof(*dst)); | ||
| 623 | } while (read_seqretry(&state->seqlock, seq)); | ||
| 518 | if (test_bit(LK_STATE_IN_USE, &state->flags) == 0) | 624 | if (test_bit(LK_STATE_IN_USE, &state->flags) == 0) |
| 519 | return; | 625 | return; |
| 520 | 626 | ||
| @@ -557,12 +663,18 @@ void nfs_free_seqid(struct nfs_seqid *seqid) | |||
| 557 | * failed with a seqid incrementing error - | 663 | * failed with a seqid incrementing error - |
| 558 | * see comments nfs_fs.h:seqid_mutating_error() | 664 | * see comments nfs_fs.h:seqid_mutating_error() |
| 559 | */ | 665 | */ |
| 560 | static inline void nfs_increment_seqid(int status, struct nfs_seqid *seqid) | 666 | static void nfs_increment_seqid(int status, struct nfs_seqid *seqid) |
| 561 | { | 667 | { |
| 562 | switch (status) { | 668 | switch (status) { |
| 563 | case 0: | 669 | case 0: |
| 564 | break; | 670 | break; |
| 565 | case -NFS4ERR_BAD_SEQID: | 671 | case -NFS4ERR_BAD_SEQID: |
| 672 | if (seqid->sequence->flags & NFS_SEQID_CONFIRMED) | ||
| 673 | return; | ||
| 674 | printk(KERN_WARNING "NFS: v4 server returned a bad" | ||
| 675 | "sequence-id error on an" | ||
| 676 | "unconfirmed sequence %p!\n", | ||
| 677 | seqid->sequence); | ||
| 566 | case -NFS4ERR_STALE_CLIENTID: | 678 | case -NFS4ERR_STALE_CLIENTID: |
| 567 | case -NFS4ERR_STALE_STATEID: | 679 | case -NFS4ERR_STALE_STATEID: |
| 568 | case -NFS4ERR_BAD_STATEID: | 680 | case -NFS4ERR_BAD_STATEID: |
| @@ -586,7 +698,7 @@ void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid) | |||
| 586 | struct nfs4_state_owner, so_seqid); | 698 | struct nfs4_state_owner, so_seqid); |
| 587 | nfs4_drop_state_owner(sp); | 699 | nfs4_drop_state_owner(sp); |
| 588 | } | 700 | } |
| 589 | return nfs_increment_seqid(status, seqid); | 701 | nfs_increment_seqid(status, seqid); |
| 590 | } | 702 | } |
| 591 | 703 | ||
| 592 | /* | 704 | /* |
| @@ -596,7 +708,7 @@ void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid) | |||
| 596 | */ | 708 | */ |
| 597 | void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid) | 709 | void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid) |
| 598 | { | 710 | { |
| 599 | return nfs_increment_seqid(status, seqid); | 711 | nfs_increment_seqid(status, seqid); |
| 600 | } | 712 | } |
| 601 | 713 | ||
| 602 | int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task) | 714 | int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task) |
| @@ -748,15 +860,21 @@ out_err: | |||
| 748 | static void nfs4_state_mark_reclaim(struct nfs_client *clp) | 860 | static void nfs4_state_mark_reclaim(struct nfs_client *clp) |
| 749 | { | 861 | { |
| 750 | struct nfs4_state_owner *sp; | 862 | struct nfs4_state_owner *sp; |
| 863 | struct rb_node *pos; | ||
| 751 | struct nfs4_state *state; | 864 | struct nfs4_state *state; |
| 752 | struct nfs4_lock_state *lock; | 865 | struct nfs4_lock_state *lock; |
| 753 | 866 | ||
| 754 | /* Reset all sequence ids to zero */ | 867 | /* Reset all sequence ids to zero */ |
| 755 | list_for_each_entry(sp, &clp->cl_state_owners, so_list) { | 868 | for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) { |
| 869 | sp = rb_entry(pos, struct nfs4_state_owner, so_client_node); | ||
| 756 | sp->so_seqid.counter = 0; | 870 | sp->so_seqid.counter = 0; |
| 757 | sp->so_seqid.flags = 0; | 871 | sp->so_seqid.flags = 0; |
| 758 | spin_lock(&sp->so_lock); | 872 | spin_lock(&sp->so_lock); |
| 759 | list_for_each_entry(state, &sp->so_states, open_states) { | 873 | list_for_each_entry(state, &sp->so_states, open_states) { |
| 874 | clear_bit(NFS_DELEGATED_STATE, &state->flags); | ||
| 875 | clear_bit(NFS_O_RDONLY_STATE, &state->flags); | ||
| 876 | clear_bit(NFS_O_WRONLY_STATE, &state->flags); | ||
| 877 | clear_bit(NFS_O_RDWR_STATE, &state->flags); | ||
| 760 | list_for_each_entry(lock, &state->lock_states, ls_locks) { | 878 | list_for_each_entry(lock, &state->lock_states, ls_locks) { |
| 761 | lock->ls_seqid.counter = 0; | 879 | lock->ls_seqid.counter = 0; |
| 762 | lock->ls_seqid.flags = 0; | 880 | lock->ls_seqid.flags = 0; |
| @@ -771,6 +889,7 @@ static int reclaimer(void *ptr) | |||
| 771 | { | 889 | { |
| 772 | struct nfs_client *clp = ptr; | 890 | struct nfs_client *clp = ptr; |
| 773 | struct nfs4_state_owner *sp; | 891 | struct nfs4_state_owner *sp; |
| 892 | struct rb_node *pos; | ||
| 774 | struct nfs4_state_recovery_ops *ops; | 893 | struct nfs4_state_recovery_ops *ops; |
| 775 | struct rpc_cred *cred; | 894 | struct rpc_cred *cred; |
| 776 | int status = 0; | 895 | int status = 0; |
| @@ -816,7 +935,8 @@ restart_loop: | |||
| 816 | /* Mark all delegations for reclaim */ | 935 | /* Mark all delegations for reclaim */ |
| 817 | nfs_delegation_mark_reclaim(clp); | 936 | nfs_delegation_mark_reclaim(clp); |
| 818 | /* Note: list is protected by exclusive lock on cl->cl_sem */ | 937 | /* Note: list is protected by exclusive lock on cl->cl_sem */ |
| 819 | list_for_each_entry(sp, &clp->cl_state_owners, so_list) { | 938 | for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) { |
| 939 | sp = rb_entry(pos, struct nfs4_state_owner, so_client_node); | ||
| 820 | status = nfs4_reclaim_open_state(ops, sp); | 940 | status = nfs4_reclaim_open_state(ops, sp); |
| 821 | if (status < 0) { | 941 | if (status < 0) { |
| 822 | if (status == -NFS4ERR_NO_GRACE) { | 942 | if (status == -NFS4ERR_NO_GRACE) { |
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 8003c91ccb9a..c08738441f73 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c | |||
| @@ -68,9 +68,10 @@ static int nfs4_stat_to_errno(int); | |||
| 68 | #endif | 68 | #endif |
| 69 | 69 | ||
| 70 | /* lock,open owner id: | 70 | /* lock,open owner id: |
| 71 | * we currently use size 1 (u32) out of (NFS4_OPAQUE_LIMIT >> 2) | 71 | * we currently use size 2 (u64) out of (NFS4_OPAQUE_LIMIT >> 2) |
| 72 | */ | 72 | */ |
| 73 | #define owner_id_maxsz (1 + 1) | 73 | #define open_owner_id_maxsz (1 + 4) |
| 74 | #define lock_owner_id_maxsz (1 + 4) | ||
| 74 | #define compound_encode_hdr_maxsz (3 + (NFS4_MAXTAGLEN >> 2)) | 75 | #define compound_encode_hdr_maxsz (3 + (NFS4_MAXTAGLEN >> 2)) |
| 75 | #define compound_decode_hdr_maxsz (3 + (NFS4_MAXTAGLEN >> 2)) | 76 | #define compound_decode_hdr_maxsz (3 + (NFS4_MAXTAGLEN >> 2)) |
| 76 | #define op_encode_hdr_maxsz (1) | 77 | #define op_encode_hdr_maxsz (1) |
| @@ -87,9 +88,11 @@ static int nfs4_stat_to_errno(int); | |||
| 87 | #define encode_getattr_maxsz (op_encode_hdr_maxsz + nfs4_fattr_bitmap_maxsz) | 88 | #define encode_getattr_maxsz (op_encode_hdr_maxsz + nfs4_fattr_bitmap_maxsz) |
| 88 | #define nfs4_name_maxsz (1 + ((3 + NFS4_MAXNAMLEN) >> 2)) | 89 | #define nfs4_name_maxsz (1 + ((3 + NFS4_MAXNAMLEN) >> 2)) |
| 89 | #define nfs4_path_maxsz (1 + ((3 + NFS4_MAXPATHLEN) >> 2)) | 90 | #define nfs4_path_maxsz (1 + ((3 + NFS4_MAXPATHLEN) >> 2)) |
| 91 | #define nfs4_owner_maxsz (1 + XDR_QUADLEN(IDMAP_NAMESZ)) | ||
| 92 | #define nfs4_group_maxsz (1 + XDR_QUADLEN(IDMAP_NAMESZ)) | ||
| 90 | /* This is based on getfattr, which uses the most attributes: */ | 93 | /* This is based on getfattr, which uses the most attributes: */ |
| 91 | #define nfs4_fattr_value_maxsz (1 + (1 + 2 + 2 + 4 + 2 + 1 + 1 + 2 + 2 + \ | 94 | #define nfs4_fattr_value_maxsz (1 + (1 + 2 + 2 + 4 + 2 + 1 + 1 + 2 + 2 + \ |
| 92 | 3 + 3 + 3 + 2 * nfs4_name_maxsz)) | 95 | 3 + 3 + 3 + nfs4_owner_maxsz + nfs4_group_maxsz)) |
| 93 | #define nfs4_fattr_maxsz (nfs4_fattr_bitmap_maxsz + \ | 96 | #define nfs4_fattr_maxsz (nfs4_fattr_bitmap_maxsz + \ |
| 94 | nfs4_fattr_value_maxsz) | 97 | nfs4_fattr_value_maxsz) |
| 95 | #define decode_getattr_maxsz (op_decode_hdr_maxsz + nfs4_fattr_maxsz) | 98 | #define decode_getattr_maxsz (op_decode_hdr_maxsz + nfs4_fattr_maxsz) |
| @@ -116,8 +119,27 @@ static int nfs4_stat_to_errno(int); | |||
| 116 | 3 + (NFS4_VERIFIER_SIZE >> 2)) | 119 | 3 + (NFS4_VERIFIER_SIZE >> 2)) |
| 117 | #define decode_setclientid_confirm_maxsz \ | 120 | #define decode_setclientid_confirm_maxsz \ |
| 118 | (op_decode_hdr_maxsz) | 121 | (op_decode_hdr_maxsz) |
| 119 | #define encode_lookup_maxsz (op_encode_hdr_maxsz + \ | 122 | #define encode_lookup_maxsz (op_encode_hdr_maxsz + nfs4_name_maxsz) |
| 120 | 1 + ((3 + NFS4_FHSIZE) >> 2)) | 123 | #define decode_lookup_maxsz (op_decode_hdr_maxsz) |
| 124 | #define encode_share_access_maxsz \ | ||
| 125 | (2) | ||
| 126 | #define encode_createmode_maxsz (1 + nfs4_fattr_maxsz) | ||
| 127 | #define encode_opentype_maxsz (1 + encode_createmode_maxsz) | ||
| 128 | #define encode_claim_null_maxsz (1 + nfs4_name_maxsz) | ||
| 129 | #define encode_open_maxsz (op_encode_hdr_maxsz + \ | ||
| 130 | 2 + encode_share_access_maxsz + 2 + \ | ||
| 131 | open_owner_id_maxsz + \ | ||
| 132 | encode_opentype_maxsz + \ | ||
| 133 | encode_claim_null_maxsz) | ||
| 134 | #define decode_ace_maxsz (3 + nfs4_owner_maxsz) | ||
| 135 | #define decode_delegation_maxsz (1 + XDR_QUADLEN(NFS4_STATEID_SIZE) + 1 + \ | ||
| 136 | decode_ace_maxsz) | ||
| 137 | #define decode_change_info_maxsz (5) | ||
| 138 | #define decode_open_maxsz (op_decode_hdr_maxsz + \ | ||
| 139 | XDR_QUADLEN(NFS4_STATEID_SIZE) + \ | ||
| 140 | decode_change_info_maxsz + 1 + \ | ||
| 141 | nfs4_fattr_bitmap_maxsz + \ | ||
| 142 | decode_delegation_maxsz) | ||
| 121 | #define encode_remove_maxsz (op_encode_hdr_maxsz + \ | 143 | #define encode_remove_maxsz (op_encode_hdr_maxsz + \ |
| 122 | nfs4_name_maxsz) | 144 | nfs4_name_maxsz) |
| 123 | #define encode_rename_maxsz (op_encode_hdr_maxsz + \ | 145 | #define encode_rename_maxsz (op_encode_hdr_maxsz + \ |
| @@ -134,9 +156,15 @@ static int nfs4_stat_to_errno(int); | |||
| 134 | #define encode_create_maxsz (op_encode_hdr_maxsz + \ | 156 | #define encode_create_maxsz (op_encode_hdr_maxsz + \ |
| 135 | 2 + nfs4_name_maxsz + \ | 157 | 2 + nfs4_name_maxsz + \ |
| 136 | nfs4_fattr_maxsz) | 158 | nfs4_fattr_maxsz) |
| 137 | #define decode_create_maxsz (op_decode_hdr_maxsz + 8) | 159 | #define decode_create_maxsz (op_decode_hdr_maxsz + \ |
| 160 | decode_change_info_maxsz + \ | ||
| 161 | nfs4_fattr_bitmap_maxsz) | ||
| 138 | #define encode_delegreturn_maxsz (op_encode_hdr_maxsz + 4) | 162 | #define encode_delegreturn_maxsz (op_encode_hdr_maxsz + 4) |
| 139 | #define decode_delegreturn_maxsz (op_decode_hdr_maxsz) | 163 | #define decode_delegreturn_maxsz (op_decode_hdr_maxsz) |
| 164 | #define encode_fs_locations_maxsz \ | ||
| 165 | (encode_getattr_maxsz) | ||
| 166 | #define decode_fs_locations_maxsz \ | ||
| 167 | (0) | ||
| 140 | #define NFS4_enc_compound_sz (1024) /* XXX: large enough? */ | 168 | #define NFS4_enc_compound_sz (1024) /* XXX: large enough? */ |
| 141 | #define NFS4_dec_compound_sz (1024) /* XXX: large enough? */ | 169 | #define NFS4_dec_compound_sz (1024) /* XXX: large enough? */ |
| 142 | #define NFS4_enc_read_sz (compound_encode_hdr_maxsz + \ | 170 | #define NFS4_enc_read_sz (compound_encode_hdr_maxsz + \ |
| @@ -174,16 +202,21 @@ static int nfs4_stat_to_errno(int); | |||
| 174 | op_decode_hdr_maxsz + 2 + \ | 202 | op_decode_hdr_maxsz + 2 + \ |
| 175 | decode_getattr_maxsz) | 203 | decode_getattr_maxsz) |
| 176 | #define NFS4_enc_open_sz (compound_encode_hdr_maxsz + \ | 204 | #define NFS4_enc_open_sz (compound_encode_hdr_maxsz + \ |
| 177 | encode_putfh_maxsz + \ | 205 | encode_putfh_maxsz + \ |
| 178 | op_encode_hdr_maxsz + \ | 206 | encode_savefh_maxsz + \ |
| 179 | 13 + 3 + 2 + 64 + \ | 207 | encode_open_maxsz + \ |
| 180 | encode_getattr_maxsz + \ | 208 | encode_getfh_maxsz + \ |
| 181 | encode_getfh_maxsz) | 209 | encode_getattr_maxsz + \ |
| 210 | encode_restorefh_maxsz + \ | ||
| 211 | encode_getattr_maxsz) | ||
| 182 | #define NFS4_dec_open_sz (compound_decode_hdr_maxsz + \ | 212 | #define NFS4_dec_open_sz (compound_decode_hdr_maxsz + \ |
| 183 | decode_putfh_maxsz + \ | 213 | decode_putfh_maxsz + \ |
| 184 | op_decode_hdr_maxsz + 4 + 5 + 2 + 3 + \ | 214 | decode_savefh_maxsz + \ |
| 185 | decode_getattr_maxsz + \ | 215 | decode_open_maxsz + \ |
| 186 | decode_getfh_maxsz) | 216 | decode_getfh_maxsz + \ |
| 217 | decode_getattr_maxsz + \ | ||
| 218 | decode_restorefh_maxsz + \ | ||
| 219 | decode_getattr_maxsz) | ||
| 187 | #define NFS4_enc_open_confirm_sz \ | 220 | #define NFS4_enc_open_confirm_sz \ |
| 188 | (compound_encode_hdr_maxsz + \ | 221 | (compound_encode_hdr_maxsz + \ |
| 189 | encode_putfh_maxsz + \ | 222 | encode_putfh_maxsz + \ |
| @@ -193,12 +226,12 @@ static int nfs4_stat_to_errno(int); | |||
| 193 | op_decode_hdr_maxsz + 4) | 226 | op_decode_hdr_maxsz + 4) |
| 194 | #define NFS4_enc_open_noattr_sz (compound_encode_hdr_maxsz + \ | 227 | #define NFS4_enc_open_noattr_sz (compound_encode_hdr_maxsz + \ |
| 195 | encode_putfh_maxsz + \ | 228 | encode_putfh_maxsz + \ |
| 196 | op_encode_hdr_maxsz + \ | 229 | encode_open_maxsz + \ |
| 197 | 11) | 230 | encode_getattr_maxsz) |
| 198 | #define NFS4_dec_open_noattr_sz (compound_decode_hdr_maxsz + \ | 231 | #define NFS4_dec_open_noattr_sz (compound_decode_hdr_maxsz + \ |
| 199 | decode_putfh_maxsz + \ | 232 | decode_putfh_maxsz + \ |
| 200 | op_decode_hdr_maxsz + \ | 233 | decode_open_maxsz + \ |
| 201 | 4 + 5 + 2 + 3) | 234 | decode_getattr_maxsz) |
| 202 | #define NFS4_enc_open_downgrade_sz \ | 235 | #define NFS4_enc_open_downgrade_sz \ |
| 203 | (compound_encode_hdr_maxsz + \ | 236 | (compound_encode_hdr_maxsz + \ |
| 204 | encode_putfh_maxsz + \ | 237 | encode_putfh_maxsz + \ |
| @@ -256,19 +289,19 @@ static int nfs4_stat_to_errno(int); | |||
| 256 | op_encode_hdr_maxsz + \ | 289 | op_encode_hdr_maxsz + \ |
| 257 | 1 + 1 + 2 + 2 + \ | 290 | 1 + 1 + 2 + 2 + \ |
| 258 | 1 + 4 + 1 + 2 + \ | 291 | 1 + 4 + 1 + 2 + \ |
| 259 | owner_id_maxsz) | 292 | lock_owner_id_maxsz) |
| 260 | #define NFS4_dec_lock_sz (compound_decode_hdr_maxsz + \ | 293 | #define NFS4_dec_lock_sz (compound_decode_hdr_maxsz + \ |
| 261 | decode_putfh_maxsz + \ | 294 | decode_putfh_maxsz + \ |
| 262 | decode_getattr_maxsz + \ | 295 | decode_getattr_maxsz + \ |
| 263 | op_decode_hdr_maxsz + \ | 296 | op_decode_hdr_maxsz + \ |
| 264 | 2 + 2 + 1 + 2 + \ | 297 | 2 + 2 + 1 + 2 + \ |
| 265 | owner_id_maxsz) | 298 | lock_owner_id_maxsz) |
| 266 | #define NFS4_enc_lockt_sz (compound_encode_hdr_maxsz + \ | 299 | #define NFS4_enc_lockt_sz (compound_encode_hdr_maxsz + \ |
| 267 | encode_putfh_maxsz + \ | 300 | encode_putfh_maxsz + \ |
| 268 | encode_getattr_maxsz + \ | 301 | encode_getattr_maxsz + \ |
| 269 | op_encode_hdr_maxsz + \ | 302 | op_encode_hdr_maxsz + \ |
| 270 | 1 + 2 + 2 + 2 + \ | 303 | 1 + 2 + 2 + 2 + \ |
| 271 | owner_id_maxsz) | 304 | lock_owner_id_maxsz) |
| 272 | #define NFS4_dec_lockt_sz (NFS4_dec_lock_sz) | 305 | #define NFS4_dec_lockt_sz (NFS4_dec_lock_sz) |
| 273 | #define NFS4_enc_locku_sz (compound_encode_hdr_maxsz + \ | 306 | #define NFS4_enc_locku_sz (compound_encode_hdr_maxsz + \ |
| 274 | encode_putfh_maxsz + \ | 307 | encode_putfh_maxsz + \ |
| @@ -298,7 +331,7 @@ static int nfs4_stat_to_errno(int); | |||
| 298 | encode_getfh_maxsz) | 331 | encode_getfh_maxsz) |
| 299 | #define NFS4_dec_lookup_sz (compound_decode_hdr_maxsz + \ | 332 | #define NFS4_dec_lookup_sz (compound_decode_hdr_maxsz + \ |
| 300 | decode_putfh_maxsz + \ | 333 | decode_putfh_maxsz + \ |
| 301 | op_decode_hdr_maxsz + \ | 334 | decode_lookup_maxsz + \ |
| 302 | decode_getattr_maxsz + \ | 335 | decode_getattr_maxsz + \ |
| 303 | decode_getfh_maxsz) | 336 | decode_getfh_maxsz) |
| 304 | #define NFS4_enc_lookup_root_sz (compound_encode_hdr_maxsz + \ | 337 | #define NFS4_enc_lookup_root_sz (compound_encode_hdr_maxsz + \ |
| @@ -417,12 +450,13 @@ static int nfs4_stat_to_errno(int); | |||
| 417 | #define NFS4_enc_fs_locations_sz \ | 450 | #define NFS4_enc_fs_locations_sz \ |
| 418 | (compound_encode_hdr_maxsz + \ | 451 | (compound_encode_hdr_maxsz + \ |
| 419 | encode_putfh_maxsz + \ | 452 | encode_putfh_maxsz + \ |
| 420 | encode_getattr_maxsz) | 453 | encode_lookup_maxsz + \ |
| 454 | encode_fs_locations_maxsz) | ||
| 421 | #define NFS4_dec_fs_locations_sz \ | 455 | #define NFS4_dec_fs_locations_sz \ |
| 422 | (compound_decode_hdr_maxsz + \ | 456 | (compound_decode_hdr_maxsz + \ |
| 423 | decode_putfh_maxsz + \ | 457 | decode_putfh_maxsz + \ |
| 424 | op_decode_hdr_maxsz + \ | 458 | decode_lookup_maxsz + \ |
| 425 | nfs4_fattr_bitmap_maxsz) | 459 | decode_fs_locations_maxsz) |
| 426 | 460 | ||
| 427 | static struct { | 461 | static struct { |
| 428 | unsigned int mode; | 462 | unsigned int mode; |
| @@ -793,13 +827,14 @@ static int encode_lock(struct xdr_stream *xdr, const struct nfs_lock_args *args) | |||
| 793 | WRITE64(nfs4_lock_length(args->fl)); | 827 | WRITE64(nfs4_lock_length(args->fl)); |
| 794 | WRITE32(args->new_lock_owner); | 828 | WRITE32(args->new_lock_owner); |
| 795 | if (args->new_lock_owner){ | 829 | if (args->new_lock_owner){ |
| 796 | RESERVE_SPACE(4+NFS4_STATEID_SIZE+20); | 830 | RESERVE_SPACE(4+NFS4_STATEID_SIZE+32); |
| 797 | WRITE32(args->open_seqid->sequence->counter); | 831 | WRITE32(args->open_seqid->sequence->counter); |
| 798 | WRITEMEM(args->open_stateid->data, NFS4_STATEID_SIZE); | 832 | WRITEMEM(args->open_stateid->data, NFS4_STATEID_SIZE); |
| 799 | WRITE32(args->lock_seqid->sequence->counter); | 833 | WRITE32(args->lock_seqid->sequence->counter); |
| 800 | WRITE64(args->lock_owner.clientid); | 834 | WRITE64(args->lock_owner.clientid); |
| 801 | WRITE32(4); | 835 | WRITE32(16); |
| 802 | WRITE32(args->lock_owner.id); | 836 | WRITEMEM("lock id:", 8); |
| 837 | WRITE64(args->lock_owner.id); | ||
| 803 | } | 838 | } |
| 804 | else { | 839 | else { |
| 805 | RESERVE_SPACE(NFS4_STATEID_SIZE+4); | 840 | RESERVE_SPACE(NFS4_STATEID_SIZE+4); |
| @@ -814,14 +849,15 @@ static int encode_lockt(struct xdr_stream *xdr, const struct nfs_lockt_args *arg | |||
| 814 | { | 849 | { |
| 815 | __be32 *p; | 850 | __be32 *p; |
| 816 | 851 | ||
| 817 | RESERVE_SPACE(40); | 852 | RESERVE_SPACE(52); |
| 818 | WRITE32(OP_LOCKT); | 853 | WRITE32(OP_LOCKT); |
| 819 | WRITE32(nfs4_lock_type(args->fl, 0)); | 854 | WRITE32(nfs4_lock_type(args->fl, 0)); |
| 820 | WRITE64(args->fl->fl_start); | 855 | WRITE64(args->fl->fl_start); |
| 821 | WRITE64(nfs4_lock_length(args->fl)); | 856 | WRITE64(nfs4_lock_length(args->fl)); |
| 822 | WRITE64(args->lock_owner.clientid); | 857 | WRITE64(args->lock_owner.clientid); |
| 823 | WRITE32(4); | 858 | WRITE32(16); |
| 824 | WRITE32(args->lock_owner.id); | 859 | WRITEMEM("lock id:", 8); |
| 860 | WRITE64(args->lock_owner.id); | ||
| 825 | 861 | ||
| 826 | return 0; | 862 | return 0; |
| 827 | } | 863 | } |
| @@ -886,10 +922,11 @@ static inline void encode_openhdr(struct xdr_stream *xdr, const struct nfs_opena | |||
| 886 | WRITE32(OP_OPEN); | 922 | WRITE32(OP_OPEN); |
| 887 | WRITE32(arg->seqid->sequence->counter); | 923 | WRITE32(arg->seqid->sequence->counter); |
| 888 | encode_share_access(xdr, arg->open_flags); | 924 | encode_share_access(xdr, arg->open_flags); |
| 889 | RESERVE_SPACE(16); | 925 | RESERVE_SPACE(28); |
| 890 | WRITE64(arg->clientid); | 926 | WRITE64(arg->clientid); |
| 891 | WRITE32(4); | 927 | WRITE32(16); |
| 892 | WRITE32(arg->id); | 928 | WRITEMEM("open id:", 8); |
| 929 | WRITE64(arg->id); | ||
| 893 | } | 930 | } |
| 894 | 931 | ||
| 895 | static inline void encode_createmode(struct xdr_stream *xdr, const struct nfs_openargs *arg) | 932 | static inline void encode_createmode(struct xdr_stream *xdr, const struct nfs_openargs *arg) |
| @@ -1071,7 +1108,7 @@ static int encode_read(struct xdr_stream *xdr, const struct nfs_readargs *args) | |||
| 1071 | 1108 | ||
| 1072 | static int encode_readdir(struct xdr_stream *xdr, const struct nfs4_readdir_arg *readdir, struct rpc_rqst *req) | 1109 | static int encode_readdir(struct xdr_stream *xdr, const struct nfs4_readdir_arg *readdir, struct rpc_rqst *req) |
| 1073 | { | 1110 | { |
| 1074 | struct rpc_auth *auth = req->rq_task->tk_auth; | 1111 | struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth; |
| 1075 | uint32_t attrs[2] = { | 1112 | uint32_t attrs[2] = { |
| 1076 | FATTR4_WORD0_RDATTR_ERROR|FATTR4_WORD0_FILEID, | 1113 | FATTR4_WORD0_RDATTR_ERROR|FATTR4_WORD0_FILEID, |
| 1077 | FATTR4_WORD1_MOUNTED_ON_FILEID, | 1114 | FATTR4_WORD1_MOUNTED_ON_FILEID, |
| @@ -1117,7 +1154,7 @@ static int encode_readdir(struct xdr_stream *xdr, const struct nfs4_readdir_arg | |||
| 1117 | 1154 | ||
| 1118 | static int encode_readlink(struct xdr_stream *xdr, const struct nfs4_readlink *readlink, struct rpc_rqst *req) | 1155 | static int encode_readlink(struct xdr_stream *xdr, const struct nfs4_readlink *readlink, struct rpc_rqst *req) |
| 1119 | { | 1156 | { |
| 1120 | struct rpc_auth *auth = req->rq_task->tk_auth; | 1157 | struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth; |
| 1121 | unsigned int replen; | 1158 | unsigned int replen; |
| 1122 | __be32 *p; | 1159 | __be32 *p; |
| 1123 | 1160 | ||
| @@ -1735,7 +1772,7 @@ out: | |||
| 1735 | */ | 1772 | */ |
| 1736 | static int nfs4_xdr_enc_read(struct rpc_rqst *req, __be32 *p, struct nfs_readargs *args) | 1773 | static int nfs4_xdr_enc_read(struct rpc_rqst *req, __be32 *p, struct nfs_readargs *args) |
| 1737 | { | 1774 | { |
| 1738 | struct rpc_auth *auth = req->rq_task->tk_auth; | 1775 | struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth; |
| 1739 | struct xdr_stream xdr; | 1776 | struct xdr_stream xdr; |
| 1740 | struct compound_hdr hdr = { | 1777 | struct compound_hdr hdr = { |
| 1741 | .nops = 2, | 1778 | .nops = 2, |
| @@ -1795,7 +1832,7 @@ nfs4_xdr_enc_getacl(struct rpc_rqst *req, __be32 *p, | |||
| 1795 | struct nfs_getaclargs *args) | 1832 | struct nfs_getaclargs *args) |
| 1796 | { | 1833 | { |
| 1797 | struct xdr_stream xdr; | 1834 | struct xdr_stream xdr; |
| 1798 | struct rpc_auth *auth = req->rq_task->tk_auth; | 1835 | struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth; |
| 1799 | struct compound_hdr hdr = { | 1836 | struct compound_hdr hdr = { |
| 1800 | .nops = 2, | 1837 | .nops = 2, |
| 1801 | }; | 1838 | }; |
| @@ -2030,7 +2067,7 @@ static int nfs4_xdr_enc_fs_locations(struct rpc_rqst *req, __be32 *p, struct nfs | |||
| 2030 | struct compound_hdr hdr = { | 2067 | struct compound_hdr hdr = { |
| 2031 | .nops = 3, | 2068 | .nops = 3, |
| 2032 | }; | 2069 | }; |
| 2033 | struct rpc_auth *auth = req->rq_task->tk_auth; | 2070 | struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth; |
| 2034 | int replen; | 2071 | int replen; |
| 2035 | int status; | 2072 | int status; |
| 2036 | 2073 | ||
| @@ -3269,7 +3306,7 @@ static int decode_delegation(struct xdr_stream *xdr, struct nfs_openres *res) | |||
| 3269 | static int decode_open(struct xdr_stream *xdr, struct nfs_openres *res) | 3306 | static int decode_open(struct xdr_stream *xdr, struct nfs_openres *res) |
| 3270 | { | 3307 | { |
| 3271 | __be32 *p; | 3308 | __be32 *p; |
| 3272 | uint32_t bmlen; | 3309 | uint32_t savewords, bmlen, i; |
| 3273 | int status; | 3310 | int status; |
| 3274 | 3311 | ||
| 3275 | status = decode_op_hdr(xdr, OP_OPEN); | 3312 | status = decode_op_hdr(xdr, OP_OPEN); |
| @@ -3287,7 +3324,12 @@ static int decode_open(struct xdr_stream *xdr, struct nfs_openres *res) | |||
| 3287 | goto xdr_error; | 3324 | goto xdr_error; |
| 3288 | 3325 | ||
| 3289 | READ_BUF(bmlen << 2); | 3326 | READ_BUF(bmlen << 2); |
| 3290 | p += bmlen; | 3327 | savewords = min_t(uint32_t, bmlen, NFS4_BITMAP_SIZE); |
| 3328 | for (i = 0; i < savewords; ++i) | ||
| 3329 | READ32(res->attrset[i]); | ||
| 3330 | for (; i < NFS4_BITMAP_SIZE; i++) | ||
| 3331 | res->attrset[i] = 0; | ||
| 3332 | |||
| 3291 | return decode_delegation(xdr, res); | 3333 | return decode_delegation(xdr, res); |
| 3292 | xdr_error: | 3334 | xdr_error: |
| 3293 | dprintk("%s: Bitmap too large! Length = %u\n", __FUNCTION__, bmlen); | 3335 | dprintk("%s: Bitmap too large! Length = %u\n", __FUNCTION__, bmlen); |
diff --git a/fs/nfs/nfsroot.c b/fs/nfs/nfsroot.c index 49d1008ce1d7..3490322d1145 100644 --- a/fs/nfs/nfsroot.c +++ b/fs/nfs/nfsroot.c | |||
| @@ -428,7 +428,7 @@ static int __init root_nfs_getport(int program, int version, int proto) | |||
| 428 | printk(KERN_NOTICE "Looking up port of RPC %d/%d on %u.%u.%u.%u\n", | 428 | printk(KERN_NOTICE "Looking up port of RPC %d/%d on %u.%u.%u.%u\n", |
| 429 | program, version, NIPQUAD(servaddr)); | 429 | program, version, NIPQUAD(servaddr)); |
| 430 | set_sockaddr(&sin, servaddr, 0); | 430 | set_sockaddr(&sin, servaddr, 0); |
| 431 | return rpcb_getport_external(&sin, program, version, proto); | 431 | return rpcb_getport_sync(&sin, program, version, proto); |
| 432 | } | 432 | } |
| 433 | 433 | ||
| 434 | 434 | ||
| @@ -496,7 +496,8 @@ static int __init root_nfs_get_handle(void) | |||
| 496 | NFS_MNT3_VERSION : NFS_MNT_VERSION; | 496 | NFS_MNT3_VERSION : NFS_MNT_VERSION; |
| 497 | 497 | ||
| 498 | set_sockaddr(&sin, servaddr, htons(mount_port)); | 498 | set_sockaddr(&sin, servaddr, htons(mount_port)); |
| 499 | status = nfsroot_mount(&sin, nfs_path, &fh, version, protocol); | 499 | status = nfs_mount((struct sockaddr *) &sin, sizeof(sin), NULL, |
| 500 | nfs_path, version, protocol, &fh); | ||
| 500 | if (status < 0) | 501 | if (status < 0) |
| 501 | printk(KERN_ERR "Root-NFS: Server returned error %d " | 502 | printk(KERN_ERR "Root-NFS: Server returned error %d " |
| 502 | "while mounting %s\n", status, nfs_path); | 503 | "while mounting %s\n", status, nfs_path); |
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index c5bb51a29e80..f56dae5216f4 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c | |||
| @@ -85,9 +85,8 @@ nfs_create_request(struct nfs_open_context *ctx, struct inode *inode, | |||
| 85 | req->wb_offset = offset; | 85 | req->wb_offset = offset; |
| 86 | req->wb_pgbase = offset; | 86 | req->wb_pgbase = offset; |
| 87 | req->wb_bytes = count; | 87 | req->wb_bytes = count; |
| 88 | atomic_set(&req->wb_count, 1); | ||
| 89 | req->wb_context = get_nfs_open_context(ctx); | 88 | req->wb_context = get_nfs_open_context(ctx); |
| 90 | 89 | kref_init(&req->wb_kref); | |
| 91 | return req; | 90 | return req; |
| 92 | } | 91 | } |
| 93 | 92 | ||
| @@ -109,30 +108,31 @@ void nfs_unlock_request(struct nfs_page *req) | |||
| 109 | } | 108 | } |
| 110 | 109 | ||
| 111 | /** | 110 | /** |
| 112 | * nfs_set_page_writeback_locked - Lock a request for writeback | 111 | * nfs_set_page_tag_locked - Tag a request as locked |
| 113 | * @req: | 112 | * @req: |
| 114 | */ | 113 | */ |
| 115 | int nfs_set_page_writeback_locked(struct nfs_page *req) | 114 | static int nfs_set_page_tag_locked(struct nfs_page *req) |
| 116 | { | 115 | { |
| 117 | struct nfs_inode *nfsi = NFS_I(req->wb_context->dentry->d_inode); | 116 | struct nfs_inode *nfsi = NFS_I(req->wb_context->path.dentry->d_inode); |
| 118 | 117 | ||
| 119 | if (!nfs_lock_request(req)) | 118 | if (!nfs_lock_request(req)) |
| 120 | return 0; | 119 | return 0; |
| 121 | radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_WRITEBACK); | 120 | radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED); |
| 122 | return 1; | 121 | return 1; |
| 123 | } | 122 | } |
| 124 | 123 | ||
| 125 | /** | 124 | /** |
| 126 | * nfs_clear_page_writeback - Unlock request and wake up sleepers | 125 | * nfs_clear_page_tag_locked - Clear request tag and wake up sleepers |
| 127 | */ | 126 | */ |
| 128 | void nfs_clear_page_writeback(struct nfs_page *req) | 127 | void nfs_clear_page_tag_locked(struct nfs_page *req) |
| 129 | { | 128 | { |
| 130 | struct nfs_inode *nfsi = NFS_I(req->wb_context->dentry->d_inode); | 129 | struct inode *inode = req->wb_context->path.dentry->d_inode; |
| 130 | struct nfs_inode *nfsi = NFS_I(inode); | ||
| 131 | 131 | ||
| 132 | if (req->wb_page != NULL) { | 132 | if (req->wb_page != NULL) { |
| 133 | spin_lock(&nfsi->req_lock); | 133 | spin_lock(&inode->i_lock); |
| 134 | radix_tree_tag_clear(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_WRITEBACK); | 134 | radix_tree_tag_clear(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED); |
| 135 | spin_unlock(&nfsi->req_lock); | 135 | spin_unlock(&inode->i_lock); |
| 136 | } | 136 | } |
| 137 | nfs_unlock_request(req); | 137 | nfs_unlock_request(req); |
| 138 | } | 138 | } |
| @@ -160,11 +160,9 @@ void nfs_clear_request(struct nfs_page *req) | |||
| 160 | * | 160 | * |
| 161 | * Note: Should never be called with the spinlock held! | 161 | * Note: Should never be called with the spinlock held! |
| 162 | */ | 162 | */ |
| 163 | void | 163 | static void nfs_free_request(struct kref *kref) |
| 164 | nfs_release_request(struct nfs_page *req) | ||
| 165 | { | 164 | { |
| 166 | if (!atomic_dec_and_test(&req->wb_count)) | 165 | struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref); |
| 167 | return; | ||
| 168 | 166 | ||
| 169 | /* Release struct file or cached credential */ | 167 | /* Release struct file or cached credential */ |
| 170 | nfs_clear_request(req); | 168 | nfs_clear_request(req); |
| @@ -172,6 +170,11 @@ nfs_release_request(struct nfs_page *req) | |||
| 172 | nfs_page_free(req); | 170 | nfs_page_free(req); |
| 173 | } | 171 | } |
| 174 | 172 | ||
| 173 | void nfs_release_request(struct nfs_page *req) | ||
| 174 | { | ||
| 175 | kref_put(&req->wb_kref, nfs_free_request); | ||
| 176 | } | ||
| 177 | |||
| 175 | static int nfs_wait_bit_interruptible(void *word) | 178 | static int nfs_wait_bit_interruptible(void *word) |
| 176 | { | 179 | { |
| 177 | int ret = 0; | 180 | int ret = 0; |
| @@ -193,7 +196,7 @@ static int nfs_wait_bit_interruptible(void *word) | |||
| 193 | int | 196 | int |
| 194 | nfs_wait_on_request(struct nfs_page *req) | 197 | nfs_wait_on_request(struct nfs_page *req) |
| 195 | { | 198 | { |
| 196 | struct rpc_clnt *clnt = NFS_CLIENT(req->wb_context->dentry->d_inode); | 199 | struct rpc_clnt *clnt = NFS_CLIENT(req->wb_context->path.dentry->d_inode); |
| 197 | sigset_t oldmask; | 200 | sigset_t oldmask; |
| 198 | int ret = 0; | 201 | int ret = 0; |
| 199 | 202 | ||
| @@ -379,20 +382,20 @@ void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index) | |||
| 379 | /** | 382 | /** |
| 380 | * nfs_scan_list - Scan a list for matching requests | 383 | * nfs_scan_list - Scan a list for matching requests |
| 381 | * @nfsi: NFS inode | 384 | * @nfsi: NFS inode |
| 382 | * @head: One of the NFS inode request lists | ||
| 383 | * @dst: Destination list | 385 | * @dst: Destination list |
| 384 | * @idx_start: lower bound of page->index to scan | 386 | * @idx_start: lower bound of page->index to scan |
| 385 | * @npages: idx_start + npages sets the upper bound to scan. | 387 | * @npages: idx_start + npages sets the upper bound to scan. |
| 388 | * @tag: tag to scan for | ||
| 386 | * | 389 | * |
| 387 | * Moves elements from one of the inode request lists. | 390 | * Moves elements from one of the inode request lists. |
| 388 | * If the number of requests is set to 0, the entire address_space | 391 | * If the number of requests is set to 0, the entire address_space |
| 389 | * starting at index idx_start, is scanned. | 392 | * starting at index idx_start, is scanned. |
| 390 | * The requests are *not* checked to ensure that they form a contiguous set. | 393 | * The requests are *not* checked to ensure that they form a contiguous set. |
| 391 | * You must be holding the inode's req_lock when calling this function | 394 | * You must be holding the inode's i_lock when calling this function |
| 392 | */ | 395 | */ |
| 393 | int nfs_scan_list(struct nfs_inode *nfsi, struct list_head *head, | 396 | int nfs_scan_list(struct nfs_inode *nfsi, |
| 394 | struct list_head *dst, pgoff_t idx_start, | 397 | struct list_head *dst, pgoff_t idx_start, |
| 395 | unsigned int npages) | 398 | unsigned int npages, int tag) |
| 396 | { | 399 | { |
| 397 | struct nfs_page *pgvec[NFS_SCAN_MAXENTRIES]; | 400 | struct nfs_page *pgvec[NFS_SCAN_MAXENTRIES]; |
| 398 | struct nfs_page *req; | 401 | struct nfs_page *req; |
| @@ -407,9 +410,9 @@ int nfs_scan_list(struct nfs_inode *nfsi, struct list_head *head, | |||
| 407 | idx_end = idx_start + npages - 1; | 410 | idx_end = idx_start + npages - 1; |
| 408 | 411 | ||
| 409 | for (;;) { | 412 | for (;;) { |
| 410 | found = radix_tree_gang_lookup(&nfsi->nfs_page_tree, | 413 | found = radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree, |
| 411 | (void **)&pgvec[0], idx_start, | 414 | (void **)&pgvec[0], idx_start, |
| 412 | NFS_SCAN_MAXENTRIES); | 415 | NFS_SCAN_MAXENTRIES, tag); |
| 413 | if (found <= 0) | 416 | if (found <= 0) |
| 414 | break; | 417 | break; |
| 415 | for (i = 0; i < found; i++) { | 418 | for (i = 0; i < found; i++) { |
| @@ -417,15 +420,18 @@ int nfs_scan_list(struct nfs_inode *nfsi, struct list_head *head, | |||
| 417 | if (req->wb_index > idx_end) | 420 | if (req->wb_index > idx_end) |
| 418 | goto out; | 421 | goto out; |
| 419 | idx_start = req->wb_index + 1; | 422 | idx_start = req->wb_index + 1; |
| 420 | if (req->wb_list_head != head) | 423 | if (nfs_set_page_tag_locked(req)) { |
| 421 | continue; | ||
| 422 | if (nfs_set_page_writeback_locked(req)) { | ||
| 423 | nfs_list_remove_request(req); | 424 | nfs_list_remove_request(req); |
| 425 | radix_tree_tag_clear(&nfsi->nfs_page_tree, | ||
| 426 | req->wb_index, tag); | ||
| 424 | nfs_list_add_request(req, dst); | 427 | nfs_list_add_request(req, dst); |
| 425 | res++; | 428 | res++; |
| 429 | if (res == INT_MAX) | ||
| 430 | goto out; | ||
| 426 | } | 431 | } |
| 427 | } | 432 | } |
| 428 | 433 | /* for latency reduction */ | |
| 434 | cond_resched_lock(&nfsi->vfs_inode.i_lock); | ||
| 429 | } | 435 | } |
| 430 | out: | 436 | out: |
| 431 | return res; | 437 | return res; |
diff --git a/fs/nfs/read.c b/fs/nfs/read.c index 7bd7cb95c034..6ae2e58ed05a 100644 --- a/fs/nfs/read.c +++ b/fs/nfs/read.c | |||
| @@ -145,8 +145,8 @@ static void nfs_readpage_release(struct nfs_page *req) | |||
| 145 | unlock_page(req->wb_page); | 145 | unlock_page(req->wb_page); |
| 146 | 146 | ||
| 147 | dprintk("NFS: read done (%s/%Ld %d@%Ld)\n", | 147 | dprintk("NFS: read done (%s/%Ld %d@%Ld)\n", |
| 148 | req->wb_context->dentry->d_inode->i_sb->s_id, | 148 | req->wb_context->path.dentry->d_inode->i_sb->s_id, |
| 149 | (long long)NFS_FILEID(req->wb_context->dentry->d_inode), | 149 | (long long)NFS_FILEID(req->wb_context->path.dentry->d_inode), |
| 150 | req->wb_bytes, | 150 | req->wb_bytes, |
| 151 | (long long)req_offset(req)); | 151 | (long long)req_offset(req)); |
| 152 | nfs_clear_request(req); | 152 | nfs_clear_request(req); |
| @@ -164,7 +164,7 @@ static void nfs_read_rpcsetup(struct nfs_page *req, struct nfs_read_data *data, | |||
| 164 | int flags; | 164 | int flags; |
| 165 | 165 | ||
| 166 | data->req = req; | 166 | data->req = req; |
| 167 | data->inode = inode = req->wb_context->dentry->d_inode; | 167 | data->inode = inode = req->wb_context->path.dentry->d_inode; |
| 168 | data->cred = req->wb_context->cred; | 168 | data->cred = req->wb_context->cred; |
| 169 | 169 | ||
| 170 | data->args.fh = NFS_FH(inode); | 170 | data->args.fh = NFS_FH(inode); |
| @@ -483,17 +483,19 @@ int nfs_readpage(struct file *file, struct page *page) | |||
| 483 | */ | 483 | */ |
| 484 | error = nfs_wb_page(inode, page); | 484 | error = nfs_wb_page(inode, page); |
| 485 | if (error) | 485 | if (error) |
| 486 | goto out_error; | 486 | goto out_unlock; |
| 487 | if (PageUptodate(page)) | ||
| 488 | goto out_unlock; | ||
| 487 | 489 | ||
| 488 | error = -ESTALE; | 490 | error = -ESTALE; |
| 489 | if (NFS_STALE(inode)) | 491 | if (NFS_STALE(inode)) |
| 490 | goto out_error; | 492 | goto out_unlock; |
| 491 | 493 | ||
| 492 | if (file == NULL) { | 494 | if (file == NULL) { |
| 493 | error = -EBADF; | 495 | error = -EBADF; |
| 494 | ctx = nfs_find_open_context(inode, NULL, FMODE_READ); | 496 | ctx = nfs_find_open_context(inode, NULL, FMODE_READ); |
| 495 | if (ctx == NULL) | 497 | if (ctx == NULL) |
| 496 | goto out_error; | 498 | goto out_unlock; |
| 497 | } else | 499 | } else |
| 498 | ctx = get_nfs_open_context((struct nfs_open_context *) | 500 | ctx = get_nfs_open_context((struct nfs_open_context *) |
| 499 | file->private_data); | 501 | file->private_data); |
| @@ -502,8 +504,7 @@ int nfs_readpage(struct file *file, struct page *page) | |||
| 502 | 504 | ||
| 503 | put_nfs_open_context(ctx); | 505 | put_nfs_open_context(ctx); |
| 504 | return error; | 506 | return error; |
| 505 | 507 | out_unlock: | |
| 506 | out_error: | ||
| 507 | unlock_page(page); | 508 | unlock_page(page); |
| 508 | return error; | 509 | return error; |
| 509 | } | 510 | } |
| @@ -520,21 +521,32 @@ readpage_async_filler(void *data, struct page *page) | |||
| 520 | struct inode *inode = page->mapping->host; | 521 | struct inode *inode = page->mapping->host; |
| 521 | struct nfs_page *new; | 522 | struct nfs_page *new; |
| 522 | unsigned int len; | 523 | unsigned int len; |
| 524 | int error; | ||
| 525 | |||
| 526 | error = nfs_wb_page(inode, page); | ||
| 527 | if (error) | ||
| 528 | goto out_unlock; | ||
| 529 | if (PageUptodate(page)) | ||
| 530 | goto out_unlock; | ||
| 523 | 531 | ||
| 524 | nfs_wb_page(inode, page); | ||
| 525 | len = nfs_page_length(page); | 532 | len = nfs_page_length(page); |
| 526 | if (len == 0) | 533 | if (len == 0) |
| 527 | return nfs_return_empty_page(page); | 534 | return nfs_return_empty_page(page); |
| 535 | |||
| 528 | new = nfs_create_request(desc->ctx, inode, page, 0, len); | 536 | new = nfs_create_request(desc->ctx, inode, page, 0, len); |
| 529 | if (IS_ERR(new)) { | 537 | if (IS_ERR(new)) |
| 530 | SetPageError(page); | 538 | goto out_error; |
| 531 | unlock_page(page); | 539 | |
| 532 | return PTR_ERR(new); | ||
| 533 | } | ||
| 534 | if (len < PAGE_CACHE_SIZE) | 540 | if (len < PAGE_CACHE_SIZE) |
| 535 | zero_user_page(page, len, PAGE_CACHE_SIZE - len, KM_USER0); | 541 | zero_user_page(page, len, PAGE_CACHE_SIZE - len, KM_USER0); |
| 536 | nfs_pageio_add_request(desc->pgio, new); | 542 | nfs_pageio_add_request(desc->pgio, new); |
| 537 | return 0; | 543 | return 0; |
| 544 | out_error: | ||
| 545 | error = PTR_ERR(new); | ||
| 546 | SetPageError(page); | ||
| 547 | out_unlock: | ||
| 548 | unlock_page(page); | ||
| 549 | return error; | ||
| 538 | } | 550 | } |
| 539 | 551 | ||
| 540 | int nfs_readpages(struct file *filp, struct address_space *mapping, | 552 | int nfs_readpages(struct file *filp, struct address_space *mapping, |
diff --git a/fs/nfs/super.c b/fs/nfs/super.c index ca20d3cc2609..a2b1af89ca1a 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c | |||
| @@ -45,6 +45,7 @@ | |||
| 45 | #include <linux/inet.h> | 45 | #include <linux/inet.h> |
| 46 | #include <linux/nfs_xdr.h> | 46 | #include <linux/nfs_xdr.h> |
| 47 | #include <linux/magic.h> | 47 | #include <linux/magic.h> |
| 48 | #include <linux/parser.h> | ||
| 48 | 49 | ||
| 49 | #include <asm/system.h> | 50 | #include <asm/system.h> |
| 50 | #include <asm/uaccess.h> | 51 | #include <asm/uaccess.h> |
| @@ -57,6 +58,167 @@ | |||
| 57 | 58 | ||
| 58 | #define NFSDBG_FACILITY NFSDBG_VFS | 59 | #define NFSDBG_FACILITY NFSDBG_VFS |
| 59 | 60 | ||
| 61 | |||
| 62 | struct nfs_parsed_mount_data { | ||
| 63 | int flags; | ||
| 64 | int rsize, wsize; | ||
| 65 | int timeo, retrans; | ||
| 66 | int acregmin, acregmax, | ||
| 67 | acdirmin, acdirmax; | ||
| 68 | int namlen; | ||
| 69 | unsigned int bsize; | ||
| 70 | unsigned int auth_flavor_len; | ||
| 71 | rpc_authflavor_t auth_flavors[1]; | ||
| 72 | char *client_address; | ||
| 73 | |||
| 74 | struct { | ||
| 75 | struct sockaddr_in address; | ||
| 76 | unsigned int program; | ||
| 77 | unsigned int version; | ||
| 78 | unsigned short port; | ||
| 79 | int protocol; | ||
| 80 | } mount_server; | ||
| 81 | |||
| 82 | struct { | ||
| 83 | struct sockaddr_in address; | ||
| 84 | char *hostname; | ||
| 85 | char *export_path; | ||
| 86 | unsigned int program; | ||
| 87 | int protocol; | ||
| 88 | } nfs_server; | ||
| 89 | }; | ||
| 90 | |||
| 91 | enum { | ||
| 92 | /* Mount options that take no arguments */ | ||
| 93 | Opt_soft, Opt_hard, | ||
| 94 | Opt_intr, Opt_nointr, | ||
| 95 | Opt_posix, Opt_noposix, | ||
| 96 | Opt_cto, Opt_nocto, | ||
| 97 | Opt_ac, Opt_noac, | ||
| 98 | Opt_lock, Opt_nolock, | ||
| 99 | Opt_v2, Opt_v3, | ||
| 100 | Opt_udp, Opt_tcp, | ||
| 101 | Opt_acl, Opt_noacl, | ||
| 102 | Opt_rdirplus, Opt_nordirplus, | ||
| 103 | Opt_sharecache, Opt_nosharecache, | ||
| 104 | |||
| 105 | /* Mount options that take integer arguments */ | ||
| 106 | Opt_port, | ||
| 107 | Opt_rsize, Opt_wsize, Opt_bsize, | ||
| 108 | Opt_timeo, Opt_retrans, | ||
| 109 | Opt_acregmin, Opt_acregmax, | ||
| 110 | Opt_acdirmin, Opt_acdirmax, | ||
| 111 | Opt_actimeo, | ||
| 112 | Opt_namelen, | ||
| 113 | Opt_mountport, | ||
| 114 | Opt_mountprog, Opt_mountvers, | ||
| 115 | Opt_nfsprog, Opt_nfsvers, | ||
| 116 | |||
| 117 | /* Mount options that take string arguments */ | ||
| 118 | Opt_sec, Opt_proto, Opt_mountproto, | ||
| 119 | Opt_addr, Opt_mounthost, Opt_clientaddr, | ||
| 120 | |||
| 121 | /* Mount options that are ignored */ | ||
| 122 | Opt_userspace, Opt_deprecated, | ||
| 123 | |||
| 124 | Opt_err | ||
| 125 | }; | ||
| 126 | |||
| 127 | static match_table_t nfs_mount_option_tokens = { | ||
| 128 | { Opt_userspace, "bg" }, | ||
| 129 | { Opt_userspace, "fg" }, | ||
| 130 | { Opt_soft, "soft" }, | ||
| 131 | { Opt_hard, "hard" }, | ||
| 132 | { Opt_intr, "intr" }, | ||
| 133 | { Opt_nointr, "nointr" }, | ||
| 134 | { Opt_posix, "posix" }, | ||
| 135 | { Opt_noposix, "noposix" }, | ||
| 136 | { Opt_cto, "cto" }, | ||
| 137 | { Opt_nocto, "nocto" }, | ||
| 138 | { Opt_ac, "ac" }, | ||
| 139 | { Opt_noac, "noac" }, | ||
| 140 | { Opt_lock, "lock" }, | ||
| 141 | { Opt_nolock, "nolock" }, | ||
| 142 | { Opt_v2, "v2" }, | ||
| 143 | { Opt_v3, "v3" }, | ||
| 144 | { Opt_udp, "udp" }, | ||
| 145 | { Opt_tcp, "tcp" }, | ||
| 146 | { Opt_acl, "acl" }, | ||
| 147 | { Opt_noacl, "noacl" }, | ||
| 148 | { Opt_rdirplus, "rdirplus" }, | ||
| 149 | { Opt_nordirplus, "nordirplus" }, | ||
| 150 | { Opt_sharecache, "sharecache" }, | ||
| 151 | { Opt_nosharecache, "nosharecache" }, | ||
| 152 | |||
| 153 | { Opt_port, "port=%u" }, | ||
| 154 | { Opt_rsize, "rsize=%u" }, | ||
| 155 | { Opt_wsize, "wsize=%u" }, | ||
| 156 | { Opt_bsize, "bsize=%u" }, | ||
| 157 | { Opt_timeo, "timeo=%u" }, | ||
| 158 | { Opt_retrans, "retrans=%u" }, | ||
| 159 | { Opt_acregmin, "acregmin=%u" }, | ||
| 160 | { Opt_acregmax, "acregmax=%u" }, | ||
| 161 | { Opt_acdirmin, "acdirmin=%u" }, | ||
| 162 | { Opt_acdirmax, "acdirmax=%u" }, | ||
| 163 | { Opt_actimeo, "actimeo=%u" }, | ||
| 164 | { Opt_userspace, "retry=%u" }, | ||
| 165 | { Opt_namelen, "namlen=%u" }, | ||
| 166 | { Opt_mountport, "mountport=%u" }, | ||
| 167 | { Opt_mountprog, "mountprog=%u" }, | ||
| 168 | { Opt_mountvers, "mountvers=%u" }, | ||
| 169 | { Opt_nfsprog, "nfsprog=%u" }, | ||
| 170 | { Opt_nfsvers, "nfsvers=%u" }, | ||
| 171 | { Opt_nfsvers, "vers=%u" }, | ||
| 172 | |||
| 173 | { Opt_sec, "sec=%s" }, | ||
| 174 | { Opt_proto, "proto=%s" }, | ||
| 175 | { Opt_mountproto, "mountproto=%s" }, | ||
| 176 | { Opt_addr, "addr=%s" }, | ||
| 177 | { Opt_clientaddr, "clientaddr=%s" }, | ||
| 178 | { Opt_mounthost, "mounthost=%s" }, | ||
| 179 | |||
| 180 | { Opt_err, NULL } | ||
| 181 | }; | ||
| 182 | |||
| 183 | enum { | ||
| 184 | Opt_xprt_udp, Opt_xprt_tcp, | ||
| 185 | |||
| 186 | Opt_xprt_err | ||
| 187 | }; | ||
| 188 | |||
| 189 | static match_table_t nfs_xprt_protocol_tokens = { | ||
| 190 | { Opt_xprt_udp, "udp" }, | ||
| 191 | { Opt_xprt_tcp, "tcp" }, | ||
| 192 | |||
| 193 | { Opt_xprt_err, NULL } | ||
| 194 | }; | ||
| 195 | |||
| 196 | enum { | ||
| 197 | Opt_sec_none, Opt_sec_sys, | ||
| 198 | Opt_sec_krb5, Opt_sec_krb5i, Opt_sec_krb5p, | ||
| 199 | Opt_sec_lkey, Opt_sec_lkeyi, Opt_sec_lkeyp, | ||
| 200 | Opt_sec_spkm, Opt_sec_spkmi, Opt_sec_spkmp, | ||
| 201 | |||
| 202 | Opt_sec_err | ||
| 203 | }; | ||
| 204 | |||
| 205 | static match_table_t nfs_secflavor_tokens = { | ||
| 206 | { Opt_sec_none, "none" }, | ||
| 207 | { Opt_sec_none, "null" }, | ||
| 208 | { Opt_sec_sys, "sys" }, | ||
| 209 | |||
| 210 | { Opt_sec_krb5, "krb5" }, | ||
| 211 | { Opt_sec_krb5i, "krb5i" }, | ||
| 212 | { Opt_sec_krb5p, "krb5p" }, | ||
| 213 | |||
| 214 | { Opt_sec_lkey, "lkey" }, | ||
| 215 | { Opt_sec_lkeyi, "lkeyi" }, | ||
| 216 | { Opt_sec_lkeyp, "lkeyp" }, | ||
| 217 | |||
| 218 | { Opt_sec_err, NULL } | ||
| 219 | }; | ||
| 220 | |||
| 221 | |||
| 60 | static void nfs_umount_begin(struct vfsmount *, int); | 222 | static void nfs_umount_begin(struct vfsmount *, int); |
| 61 | static int nfs_statfs(struct dentry *, struct kstatfs *); | 223 | static int nfs_statfs(struct dentry *, struct kstatfs *); |
| 62 | static int nfs_show_options(struct seq_file *, struct vfsmount *); | 224 | static int nfs_show_options(struct seq_file *, struct vfsmount *); |
| @@ -263,11 +425,11 @@ static const char *nfs_pseudoflavour_to_name(rpc_authflavor_t flavour) | |||
| 263 | { RPC_AUTH_GSS_SPKM, "spkm" }, | 425 | { RPC_AUTH_GSS_SPKM, "spkm" }, |
| 264 | { RPC_AUTH_GSS_SPKMI, "spkmi" }, | 426 | { RPC_AUTH_GSS_SPKMI, "spkmi" }, |
| 265 | { RPC_AUTH_GSS_SPKMP, "spkmp" }, | 427 | { RPC_AUTH_GSS_SPKMP, "spkmp" }, |
| 266 | { -1, "unknown" } | 428 | { UINT_MAX, "unknown" } |
| 267 | }; | 429 | }; |
| 268 | int i; | 430 | int i; |
| 269 | 431 | ||
| 270 | for (i=0; sec_flavours[i].flavour != -1; i++) { | 432 | for (i = 0; sec_flavours[i].flavour != UINT_MAX; i++) { |
| 271 | if (sec_flavours[i].flavour == flavour) | 433 | if (sec_flavours[i].flavour == flavour) |
| 272 | break; | 434 | break; |
| 273 | } | 435 | } |
| @@ -291,6 +453,7 @@ static void nfs_show_mount_options(struct seq_file *m, struct nfs_server *nfss, | |||
| 291 | { NFS_MOUNT_NONLM, ",nolock", "" }, | 453 | { NFS_MOUNT_NONLM, ",nolock", "" }, |
| 292 | { NFS_MOUNT_NOACL, ",noacl", "" }, | 454 | { NFS_MOUNT_NOACL, ",noacl", "" }, |
| 293 | { NFS_MOUNT_NORDIRPLUS, ",nordirplus", "" }, | 455 | { NFS_MOUNT_NORDIRPLUS, ",nordirplus", "" }, |
| 456 | { NFS_MOUNT_UNSHARED, ",nosharecache", ""}, | ||
| 294 | { 0, NULL, NULL } | 457 | { 0, NULL, NULL } |
| 295 | }; | 458 | }; |
| 296 | const struct proc_nfs_info *nfs_infop; | 459 | const struct proc_nfs_info *nfs_infop; |
| @@ -430,87 +593,641 @@ static int nfs_show_stats(struct seq_file *m, struct vfsmount *mnt) | |||
| 430 | */ | 593 | */ |
| 431 | static void nfs_umount_begin(struct vfsmount *vfsmnt, int flags) | 594 | static void nfs_umount_begin(struct vfsmount *vfsmnt, int flags) |
| 432 | { | 595 | { |
| 596 | struct nfs_server *server = NFS_SB(vfsmnt->mnt_sb); | ||
| 597 | struct rpc_clnt *rpc; | ||
| 598 | |||
| 433 | shrink_submounts(vfsmnt, &nfs_automount_list); | 599 | shrink_submounts(vfsmnt, &nfs_automount_list); |
| 600 | |||
| 601 | if (!(flags & MNT_FORCE)) | ||
| 602 | return; | ||
| 603 | /* -EIO all pending I/O */ | ||
| 604 | rpc = server->client_acl; | ||
| 605 | if (!IS_ERR(rpc)) | ||
| 606 | rpc_killall_tasks(rpc); | ||
| 607 | rpc = server->client; | ||
| 608 | if (!IS_ERR(rpc)) | ||
| 609 | rpc_killall_tasks(rpc); | ||
| 434 | } | 610 | } |
| 435 | 611 | ||
| 436 | /* | 612 | /* |
| 437 | * Validate the NFS2/NFS3 mount data | 613 | * Sanity-check a server address provided by the mount command |
| 438 | * - fills in the mount root filehandle | ||
| 439 | */ | 614 | */ |
| 440 | static int nfs_validate_mount_data(struct nfs_mount_data *data, | 615 | static int nfs_verify_server_address(struct sockaddr *addr) |
| 441 | struct nfs_fh *mntfh) | ||
| 442 | { | 616 | { |
| 443 | if (data == NULL) { | 617 | switch (addr->sa_family) { |
| 444 | dprintk("%s: missing data argument\n", __FUNCTION__); | 618 | case AF_INET: { |
| 445 | return -EINVAL; | 619 | struct sockaddr_in *sa = (struct sockaddr_in *) addr; |
| 620 | if (sa->sin_addr.s_addr != INADDR_ANY) | ||
| 621 | return 1; | ||
| 622 | break; | ||
| 623 | } | ||
| 446 | } | 624 | } |
| 447 | 625 | ||
| 448 | if (data->version <= 0 || data->version > NFS_MOUNT_VERSION) { | 626 | return 0; |
| 449 | dprintk("%s: bad mount version\n", __FUNCTION__); | 627 | } |
| 450 | return -EINVAL; | 628 | |
| 629 | /* | ||
| 630 | * Error-check and convert a string of mount options from user space into | ||
| 631 | * a data structure | ||
| 632 | */ | ||
| 633 | static int nfs_parse_mount_options(char *raw, | ||
| 634 | struct nfs_parsed_mount_data *mnt) | ||
| 635 | { | ||
| 636 | char *p, *string; | ||
| 637 | |||
| 638 | if (!raw) { | ||
| 639 | dfprintk(MOUNT, "NFS: mount options string was NULL.\n"); | ||
| 640 | return 1; | ||
| 451 | } | 641 | } |
| 642 | dfprintk(MOUNT, "NFS: nfs mount opts='%s'\n", raw); | ||
| 452 | 643 | ||
| 453 | switch (data->version) { | 644 | while ((p = strsep(&raw, ",")) != NULL) { |
| 454 | case 1: | 645 | substring_t args[MAX_OPT_ARGS]; |
| 455 | data->namlen = 0; | 646 | int option, token; |
| 456 | case 2: | 647 | |
| 457 | data->bsize = 0; | 648 | if (!*p) |
| 458 | case 3: | 649 | continue; |
| 459 | if (data->flags & NFS_MOUNT_VER3) { | 650 | |
| 460 | dprintk("%s: mount structure version %d does not support NFSv3\n", | 651 | dfprintk(MOUNT, "NFS: parsing nfs mount option '%s'\n", p); |
| 461 | __FUNCTION__, | 652 | |
| 462 | data->version); | 653 | token = match_token(p, nfs_mount_option_tokens, args); |
| 463 | return -EINVAL; | 654 | switch (token) { |
| 655 | case Opt_soft: | ||
| 656 | mnt->flags |= NFS_MOUNT_SOFT; | ||
| 657 | break; | ||
| 658 | case Opt_hard: | ||
| 659 | mnt->flags &= ~NFS_MOUNT_SOFT; | ||
| 660 | break; | ||
| 661 | case Opt_intr: | ||
| 662 | mnt->flags |= NFS_MOUNT_INTR; | ||
| 663 | break; | ||
| 664 | case Opt_nointr: | ||
| 665 | mnt->flags &= ~NFS_MOUNT_INTR; | ||
| 666 | break; | ||
| 667 | case Opt_posix: | ||
| 668 | mnt->flags |= NFS_MOUNT_POSIX; | ||
| 669 | break; | ||
| 670 | case Opt_noposix: | ||
| 671 | mnt->flags &= ~NFS_MOUNT_POSIX; | ||
| 672 | break; | ||
| 673 | case Opt_cto: | ||
| 674 | mnt->flags &= ~NFS_MOUNT_NOCTO; | ||
| 675 | break; | ||
| 676 | case Opt_nocto: | ||
| 677 | mnt->flags |= NFS_MOUNT_NOCTO; | ||
| 678 | break; | ||
| 679 | case Opt_ac: | ||
| 680 | mnt->flags &= ~NFS_MOUNT_NOAC; | ||
| 681 | break; | ||
| 682 | case Opt_noac: | ||
| 683 | mnt->flags |= NFS_MOUNT_NOAC; | ||
| 684 | break; | ||
| 685 | case Opt_lock: | ||
| 686 | mnt->flags &= ~NFS_MOUNT_NONLM; | ||
| 687 | break; | ||
| 688 | case Opt_nolock: | ||
| 689 | mnt->flags |= NFS_MOUNT_NONLM; | ||
| 690 | break; | ||
| 691 | case Opt_v2: | ||
| 692 | mnt->flags &= ~NFS_MOUNT_VER3; | ||
| 693 | break; | ||
| 694 | case Opt_v3: | ||
| 695 | mnt->flags |= NFS_MOUNT_VER3; | ||
| 696 | break; | ||
| 697 | case Opt_udp: | ||
| 698 | mnt->flags &= ~NFS_MOUNT_TCP; | ||
| 699 | mnt->nfs_server.protocol = IPPROTO_UDP; | ||
| 700 | mnt->timeo = 7; | ||
| 701 | mnt->retrans = 5; | ||
| 702 | break; | ||
| 703 | case Opt_tcp: | ||
| 704 | mnt->flags |= NFS_MOUNT_TCP; | ||
| 705 | mnt->nfs_server.protocol = IPPROTO_TCP; | ||
| 706 | mnt->timeo = 600; | ||
| 707 | mnt->retrans = 2; | ||
| 708 | break; | ||
| 709 | case Opt_acl: | ||
| 710 | mnt->flags &= ~NFS_MOUNT_NOACL; | ||
| 711 | break; | ||
| 712 | case Opt_noacl: | ||
| 713 | mnt->flags |= NFS_MOUNT_NOACL; | ||
| 714 | break; | ||
| 715 | case Opt_rdirplus: | ||
| 716 | mnt->flags &= ~NFS_MOUNT_NORDIRPLUS; | ||
| 717 | break; | ||
| 718 | case Opt_nordirplus: | ||
| 719 | mnt->flags |= NFS_MOUNT_NORDIRPLUS; | ||
| 720 | break; | ||
| 721 | case Opt_sharecache: | ||
| 722 | mnt->flags &= ~NFS_MOUNT_UNSHARED; | ||
| 723 | break; | ||
| 724 | case Opt_nosharecache: | ||
| 725 | mnt->flags |= NFS_MOUNT_UNSHARED; | ||
| 726 | break; | ||
| 727 | |||
| 728 | case Opt_port: | ||
| 729 | if (match_int(args, &option)) | ||
| 730 | return 0; | ||
| 731 | if (option < 0 || option > 65535) | ||
| 732 | return 0; | ||
| 733 | mnt->nfs_server.address.sin_port = htonl(option); | ||
| 734 | break; | ||
| 735 | case Opt_rsize: | ||
| 736 | if (match_int(args, &mnt->rsize)) | ||
| 737 | return 0; | ||
| 738 | break; | ||
| 739 | case Opt_wsize: | ||
| 740 | if (match_int(args, &mnt->wsize)) | ||
| 741 | return 0; | ||
| 742 | break; | ||
| 743 | case Opt_bsize: | ||
| 744 | if (match_int(args, &option)) | ||
| 745 | return 0; | ||
| 746 | if (option < 0) | ||
| 747 | return 0; | ||
| 748 | mnt->bsize = option; | ||
| 749 | break; | ||
| 750 | case Opt_timeo: | ||
| 751 | if (match_int(args, &mnt->timeo)) | ||
| 752 | return 0; | ||
| 753 | break; | ||
| 754 | case Opt_retrans: | ||
| 755 | if (match_int(args, &mnt->retrans)) | ||
| 756 | return 0; | ||
| 757 | break; | ||
| 758 | case Opt_acregmin: | ||
| 759 | if (match_int(args, &mnt->acregmin)) | ||
| 760 | return 0; | ||
| 761 | break; | ||
| 762 | case Opt_acregmax: | ||
| 763 | if (match_int(args, &mnt->acregmax)) | ||
| 764 | return 0; | ||
| 765 | break; | ||
| 766 | case Opt_acdirmin: | ||
| 767 | if (match_int(args, &mnt->acdirmin)) | ||
| 768 | return 0; | ||
| 769 | break; | ||
| 770 | case Opt_acdirmax: | ||
| 771 | if (match_int(args, &mnt->acdirmax)) | ||
| 772 | return 0; | ||
| 773 | break; | ||
| 774 | case Opt_actimeo: | ||
| 775 | if (match_int(args, &option)) | ||
| 776 | return 0; | ||
| 777 | if (option < 0) | ||
| 778 | return 0; | ||
| 779 | mnt->acregmin = | ||
| 780 | mnt->acregmax = | ||
| 781 | mnt->acdirmin = | ||
| 782 | mnt->acdirmax = option; | ||
| 783 | break; | ||
| 784 | case Opt_namelen: | ||
| 785 | if (match_int(args, &mnt->namlen)) | ||
| 786 | return 0; | ||
| 787 | break; | ||
| 788 | case Opt_mountport: | ||
| 789 | if (match_int(args, &option)) | ||
| 790 | return 0; | ||
| 791 | if (option < 0 || option > 65535) | ||
| 792 | return 0; | ||
| 793 | mnt->mount_server.port = option; | ||
| 794 | break; | ||
| 795 | case Opt_mountprog: | ||
| 796 | if (match_int(args, &option)) | ||
| 797 | return 0; | ||
| 798 | if (option < 0) | ||
| 799 | return 0; | ||
| 800 | mnt->mount_server.program = option; | ||
| 801 | break; | ||
| 802 | case Opt_mountvers: | ||
| 803 | if (match_int(args, &option)) | ||
| 804 | return 0; | ||
| 805 | if (option < 0) | ||
| 806 | return 0; | ||
| 807 | mnt->mount_server.version = option; | ||
| 808 | break; | ||
| 809 | case Opt_nfsprog: | ||
| 810 | if (match_int(args, &option)) | ||
| 811 | return 0; | ||
| 812 | if (option < 0) | ||
| 813 | return 0; | ||
| 814 | mnt->nfs_server.program = option; | ||
| 815 | break; | ||
| 816 | case Opt_nfsvers: | ||
| 817 | if (match_int(args, &option)) | ||
| 818 | return 0; | ||
| 819 | switch (option) { | ||
| 820 | case 2: | ||
| 821 | mnt->flags &= ~NFS_MOUNT_VER3; | ||
| 822 | break; | ||
| 823 | case 3: | ||
| 824 | mnt->flags |= NFS_MOUNT_VER3; | ||
| 825 | break; | ||
| 826 | default: | ||
| 827 | goto out_unrec_vers; | ||
| 464 | } | 828 | } |
| 465 | data->root.size = NFS2_FHSIZE; | 829 | break; |
| 466 | memcpy(data->root.data, data->old_root.data, NFS2_FHSIZE); | 830 | |
| 467 | case 4: | 831 | case Opt_sec: |
| 468 | if (data->flags & NFS_MOUNT_SECFLAVOUR) { | 832 | string = match_strdup(args); |
| 469 | dprintk("%s: mount structure version %d does not support strong security\n", | 833 | if (string == NULL) |
| 470 | __FUNCTION__, | 834 | goto out_nomem; |
| 471 | data->version); | 835 | token = match_token(string, nfs_secflavor_tokens, args); |
| 472 | return -EINVAL; | 836 | kfree(string); |
| 837 | |||
| 838 | /* | ||
| 839 | * The flags setting is for v2/v3. The flavor_len | ||
| 840 | * setting is for v4. v2/v3 also need to know the | ||
| 841 | * difference between NULL and UNIX. | ||
| 842 | */ | ||
| 843 | switch (token) { | ||
| 844 | case Opt_sec_none: | ||
| 845 | mnt->flags &= ~NFS_MOUNT_SECFLAVOUR; | ||
| 846 | mnt->auth_flavor_len = 0; | ||
| 847 | mnt->auth_flavors[0] = RPC_AUTH_NULL; | ||
| 848 | break; | ||
| 849 | case Opt_sec_sys: | ||
| 850 | mnt->flags &= ~NFS_MOUNT_SECFLAVOUR; | ||
| 851 | mnt->auth_flavor_len = 0; | ||
| 852 | mnt->auth_flavors[0] = RPC_AUTH_UNIX; | ||
| 853 | break; | ||
| 854 | case Opt_sec_krb5: | ||
| 855 | mnt->flags |= NFS_MOUNT_SECFLAVOUR; | ||
| 856 | mnt->auth_flavor_len = 1; | ||
| 857 | mnt->auth_flavors[0] = RPC_AUTH_GSS_KRB5; | ||
| 858 | break; | ||
| 859 | case Opt_sec_krb5i: | ||
| 860 | mnt->flags |= NFS_MOUNT_SECFLAVOUR; | ||
| 861 | mnt->auth_flavor_len = 1; | ||
| 862 | mnt->auth_flavors[0] = RPC_AUTH_GSS_KRB5I; | ||
| 863 | break; | ||
| 864 | case Opt_sec_krb5p: | ||
| 865 | mnt->flags |= NFS_MOUNT_SECFLAVOUR; | ||
| 866 | mnt->auth_flavor_len = 1; | ||
| 867 | mnt->auth_flavors[0] = RPC_AUTH_GSS_KRB5P; | ||
| 868 | break; | ||
| 869 | case Opt_sec_lkey: | ||
| 870 | mnt->flags |= NFS_MOUNT_SECFLAVOUR; | ||
| 871 | mnt->auth_flavor_len = 1; | ||
| 872 | mnt->auth_flavors[0] = RPC_AUTH_GSS_LKEY; | ||
| 873 | break; | ||
| 874 | case Opt_sec_lkeyi: | ||
| 875 | mnt->flags |= NFS_MOUNT_SECFLAVOUR; | ||
| 876 | mnt->auth_flavor_len = 1; | ||
| 877 | mnt->auth_flavors[0] = RPC_AUTH_GSS_LKEYI; | ||
| 878 | break; | ||
| 879 | case Opt_sec_lkeyp: | ||
| 880 | mnt->flags |= NFS_MOUNT_SECFLAVOUR; | ||
| 881 | mnt->auth_flavor_len = 1; | ||
| 882 | mnt->auth_flavors[0] = RPC_AUTH_GSS_LKEYP; | ||
| 883 | break; | ||
| 884 | case Opt_sec_spkm: | ||
| 885 | mnt->flags |= NFS_MOUNT_SECFLAVOUR; | ||
| 886 | mnt->auth_flavor_len = 1; | ||
| 887 | mnt->auth_flavors[0] = RPC_AUTH_GSS_SPKM; | ||
| 888 | break; | ||
| 889 | case Opt_sec_spkmi: | ||
| 890 | mnt->flags |= NFS_MOUNT_SECFLAVOUR; | ||
| 891 | mnt->auth_flavor_len = 1; | ||
| 892 | mnt->auth_flavors[0] = RPC_AUTH_GSS_SPKMI; | ||
| 893 | break; | ||
| 894 | case Opt_sec_spkmp: | ||
| 895 | mnt->flags |= NFS_MOUNT_SECFLAVOUR; | ||
| 896 | mnt->auth_flavor_len = 1; | ||
| 897 | mnt->auth_flavors[0] = RPC_AUTH_GSS_SPKMP; | ||
| 898 | break; | ||
| 899 | default: | ||
| 900 | goto out_unrec_sec; | ||
| 473 | } | 901 | } |
| 474 | case 5: | 902 | break; |
| 475 | memset(data->context, 0, sizeof(data->context)); | 903 | case Opt_proto: |
| 476 | } | 904 | string = match_strdup(args); |
| 905 | if (string == NULL) | ||
| 906 | goto out_nomem; | ||
| 907 | token = match_token(string, | ||
| 908 | nfs_xprt_protocol_tokens, args); | ||
| 909 | kfree(string); | ||
| 910 | |||
| 911 | switch (token) { | ||
| 912 | case Opt_udp: | ||
| 913 | mnt->flags &= ~NFS_MOUNT_TCP; | ||
| 914 | mnt->nfs_server.protocol = IPPROTO_UDP; | ||
| 915 | mnt->timeo = 7; | ||
| 916 | mnt->retrans = 5; | ||
| 917 | break; | ||
| 918 | case Opt_tcp: | ||
| 919 | mnt->flags |= NFS_MOUNT_TCP; | ||
| 920 | mnt->nfs_server.protocol = IPPROTO_TCP; | ||
| 921 | mnt->timeo = 600; | ||
| 922 | mnt->retrans = 2; | ||
| 923 | break; | ||
| 924 | default: | ||
| 925 | goto out_unrec_xprt; | ||
| 926 | } | ||
| 927 | break; | ||
| 928 | case Opt_mountproto: | ||
| 929 | string = match_strdup(args); | ||
| 930 | if (string == NULL) | ||
| 931 | goto out_nomem; | ||
| 932 | token = match_token(string, | ||
| 933 | nfs_xprt_protocol_tokens, args); | ||
| 934 | kfree(string); | ||
| 935 | |||
| 936 | switch (token) { | ||
| 937 | case Opt_udp: | ||
| 938 | mnt->mount_server.protocol = IPPROTO_UDP; | ||
| 939 | break; | ||
| 940 | case Opt_tcp: | ||
| 941 | mnt->mount_server.protocol = IPPROTO_TCP; | ||
| 942 | break; | ||
| 943 | default: | ||
| 944 | goto out_unrec_xprt; | ||
| 945 | } | ||
| 946 | break; | ||
| 947 | case Opt_addr: | ||
| 948 | string = match_strdup(args); | ||
| 949 | if (string == NULL) | ||
| 950 | goto out_nomem; | ||
| 951 | mnt->nfs_server.address.sin_family = AF_INET; | ||
| 952 | mnt->nfs_server.address.sin_addr.s_addr = | ||
| 953 | in_aton(string); | ||
| 954 | kfree(string); | ||
| 955 | break; | ||
| 956 | case Opt_clientaddr: | ||
| 957 | string = match_strdup(args); | ||
| 958 | if (string == NULL) | ||
| 959 | goto out_nomem; | ||
| 960 | mnt->client_address = string; | ||
| 961 | break; | ||
| 962 | case Opt_mounthost: | ||
| 963 | string = match_strdup(args); | ||
| 964 | if (string == NULL) | ||
| 965 | goto out_nomem; | ||
| 966 | mnt->mount_server.address.sin_family = AF_INET; | ||
| 967 | mnt->mount_server.address.sin_addr.s_addr = | ||
| 968 | in_aton(string); | ||
| 969 | kfree(string); | ||
| 970 | break; | ||
| 477 | 971 | ||
| 478 | /* Set the pseudoflavor */ | 972 | case Opt_userspace: |
| 479 | if (!(data->flags & NFS_MOUNT_SECFLAVOUR)) | 973 | case Opt_deprecated: |
| 480 | data->pseudoflavor = RPC_AUTH_UNIX; | 974 | break; |
| 481 | 975 | ||
| 482 | #ifndef CONFIG_NFS_V3 | 976 | default: |
| 483 | /* If NFSv3 is not compiled in, return -EPROTONOSUPPORT */ | 977 | goto out_unknown; |
| 484 | if (data->flags & NFS_MOUNT_VER3) { | 978 | } |
| 485 | dprintk("%s: NFSv3 not compiled into kernel\n", __FUNCTION__); | ||
| 486 | return -EPROTONOSUPPORT; | ||
| 487 | } | 979 | } |
| 488 | #endif /* CONFIG_NFS_V3 */ | ||
| 489 | 980 | ||
| 490 | /* We now require that the mount process passes the remote address */ | 981 | return 1; |
| 491 | if (data->addr.sin_addr.s_addr == INADDR_ANY) { | 982 | |
| 492 | dprintk("%s: mount program didn't pass remote address!\n", | 983 | out_nomem: |
| 493 | __FUNCTION__); | 984 | printk(KERN_INFO "NFS: not enough memory to parse option\n"); |
| 494 | return -EINVAL; | 985 | return 0; |
| 986 | |||
| 987 | out_unrec_vers: | ||
| 988 | printk(KERN_INFO "NFS: unrecognized NFS version number\n"); | ||
| 989 | return 0; | ||
| 990 | |||
| 991 | out_unrec_xprt: | ||
| 992 | printk(KERN_INFO "NFS: unrecognized transport protocol\n"); | ||
| 993 | return 0; | ||
| 994 | |||
| 995 | out_unrec_sec: | ||
| 996 | printk(KERN_INFO "NFS: unrecognized security flavor\n"); | ||
| 997 | return 0; | ||
| 998 | |||
| 999 | out_unknown: | ||
| 1000 | printk(KERN_INFO "NFS: unknown mount option: %s\n", p); | ||
| 1001 | return 0; | ||
| 1002 | } | ||
| 1003 | |||
| 1004 | /* | ||
| 1005 | * Use the remote server's MOUNT service to request the NFS file handle | ||
| 1006 | * corresponding to the provided path. | ||
| 1007 | */ | ||
| 1008 | static int nfs_try_mount(struct nfs_parsed_mount_data *args, | ||
| 1009 | struct nfs_fh *root_fh) | ||
| 1010 | { | ||
| 1011 | struct sockaddr_in sin; | ||
| 1012 | int status; | ||
| 1013 | |||
| 1014 | if (args->mount_server.version == 0) { | ||
| 1015 | if (args->flags & NFS_MOUNT_VER3) | ||
| 1016 | args->mount_server.version = NFS_MNT3_VERSION; | ||
| 1017 | else | ||
| 1018 | args->mount_server.version = NFS_MNT_VERSION; | ||
| 495 | } | 1019 | } |
| 496 | 1020 | ||
| 497 | /* Prepare the root filehandle */ | 1021 | /* |
| 498 | if (data->flags & NFS_MOUNT_VER3) | 1022 | * Construct the mount server's address. |
| 499 | mntfh->size = data->root.size; | 1023 | */ |
| 1024 | if (args->mount_server.address.sin_addr.s_addr != INADDR_ANY) | ||
| 1025 | sin = args->mount_server.address; | ||
| 500 | else | 1026 | else |
| 501 | mntfh->size = NFS2_FHSIZE; | 1027 | sin = args->nfs_server.address; |
| 1028 | if (args->mount_server.port == 0) { | ||
| 1029 | status = rpcb_getport_sync(&sin, | ||
| 1030 | args->mount_server.program, | ||
| 1031 | args->mount_server.version, | ||
| 1032 | args->mount_server.protocol); | ||
| 1033 | if (status < 0) | ||
| 1034 | goto out_err; | ||
| 1035 | sin.sin_port = htons(status); | ||
| 1036 | } else | ||
| 1037 | sin.sin_port = htons(args->mount_server.port); | ||
| 1038 | |||
| 1039 | /* | ||
| 1040 | * Now ask the mount server to map our export path | ||
| 1041 | * to a file handle. | ||
| 1042 | */ | ||
| 1043 | status = nfs_mount((struct sockaddr *) &sin, | ||
| 1044 | sizeof(sin), | ||
| 1045 | args->nfs_server.hostname, | ||
| 1046 | args->nfs_server.export_path, | ||
| 1047 | args->mount_server.version, | ||
| 1048 | args->mount_server.protocol, | ||
| 1049 | root_fh); | ||
| 1050 | if (status < 0) | ||
| 1051 | goto out_err; | ||
| 1052 | |||
| 1053 | return status; | ||
| 502 | 1054 | ||
| 503 | if (mntfh->size > sizeof(mntfh->data)) { | 1055 | out_err: |
| 504 | dprintk("%s: invalid root filehandle\n", __FUNCTION__); | 1056 | dfprintk(MOUNT, "NFS: unable to contact server on host " |
| 505 | return -EINVAL; | 1057 | NIPQUAD_FMT "\n", NIPQUAD(sin.sin_addr.s_addr)); |
| 1058 | return status; | ||
| 1059 | } | ||
| 1060 | |||
| 1061 | /* | ||
| 1062 | * Validate the NFS2/NFS3 mount data | ||
| 1063 | * - fills in the mount root filehandle | ||
| 1064 | * | ||
| 1065 | * For option strings, user space handles the following behaviors: | ||
| 1066 | * | ||
| 1067 | * + DNS: mapping server host name to IP address ("addr=" option) | ||
| 1068 | * | ||
| 1069 | * + failure mode: how to behave if a mount request can't be handled | ||
| 1070 | * immediately ("fg/bg" option) | ||
| 1071 | * | ||
| 1072 | * + retry: how often to retry a mount request ("retry=" option) | ||
| 1073 | * | ||
| 1074 | * + breaking back: trying proto=udp after proto=tcp, v2 after v3, | ||
| 1075 | * mountproto=tcp after mountproto=udp, and so on | ||
| 1076 | * | ||
| 1077 | * XXX: as far as I can tell, changing the NFS program number is not | ||
| 1078 | * supported in the NFS client. | ||
| 1079 | */ | ||
| 1080 | static int nfs_validate_mount_data(struct nfs_mount_data **options, | ||
| 1081 | struct nfs_fh *mntfh, | ||
| 1082 | const char *dev_name) | ||
| 1083 | { | ||
| 1084 | struct nfs_mount_data *data = *options; | ||
| 1085 | |||
| 1086 | if (data == NULL) | ||
| 1087 | goto out_no_data; | ||
| 1088 | |||
| 1089 | switch (data->version) { | ||
| 1090 | case 1: | ||
| 1091 | data->namlen = 0; | ||
| 1092 | case 2: | ||
| 1093 | data->bsize = 0; | ||
| 1094 | case 3: | ||
| 1095 | if (data->flags & NFS_MOUNT_VER3) | ||
| 1096 | goto out_no_v3; | ||
| 1097 | data->root.size = NFS2_FHSIZE; | ||
| 1098 | memcpy(data->root.data, data->old_root.data, NFS2_FHSIZE); | ||
| 1099 | case 4: | ||
| 1100 | if (data->flags & NFS_MOUNT_SECFLAVOUR) | ||
| 1101 | goto out_no_sec; | ||
| 1102 | case 5: | ||
| 1103 | memset(data->context, 0, sizeof(data->context)); | ||
| 1104 | case 6: | ||
| 1105 | if (data->flags & NFS_MOUNT_VER3) | ||
| 1106 | mntfh->size = data->root.size; | ||
| 1107 | else | ||
| 1108 | mntfh->size = NFS2_FHSIZE; | ||
| 1109 | |||
| 1110 | if (mntfh->size > sizeof(mntfh->data)) | ||
| 1111 | goto out_invalid_fh; | ||
| 1112 | |||
| 1113 | memcpy(mntfh->data, data->root.data, mntfh->size); | ||
| 1114 | if (mntfh->size < sizeof(mntfh->data)) | ||
| 1115 | memset(mntfh->data + mntfh->size, 0, | ||
| 1116 | sizeof(mntfh->data) - mntfh->size); | ||
| 1117 | break; | ||
| 1118 | default: { | ||
| 1119 | unsigned int len; | ||
| 1120 | char *c; | ||
| 1121 | int status; | ||
| 1122 | struct nfs_parsed_mount_data args = { | ||
| 1123 | .flags = (NFS_MOUNT_VER3 | NFS_MOUNT_TCP), | ||
| 1124 | .rsize = NFS_MAX_FILE_IO_SIZE, | ||
| 1125 | .wsize = NFS_MAX_FILE_IO_SIZE, | ||
| 1126 | .timeo = 600, | ||
| 1127 | .retrans = 2, | ||
| 1128 | .acregmin = 3, | ||
| 1129 | .acregmax = 60, | ||
| 1130 | .acdirmin = 30, | ||
| 1131 | .acdirmax = 60, | ||
| 1132 | .mount_server.protocol = IPPROTO_UDP, | ||
| 1133 | .mount_server.program = NFS_MNT_PROGRAM, | ||
| 1134 | .nfs_server.protocol = IPPROTO_TCP, | ||
| 1135 | .nfs_server.program = NFS_PROGRAM, | ||
| 1136 | }; | ||
| 1137 | |||
| 1138 | if (nfs_parse_mount_options((char *) *options, &args) == 0) | ||
| 1139 | return -EINVAL; | ||
| 1140 | |||
| 1141 | data = kzalloc(sizeof(*data), GFP_KERNEL); | ||
| 1142 | if (data == NULL) | ||
| 1143 | return -ENOMEM; | ||
| 1144 | |||
| 1145 | /* | ||
| 1146 | * NB: after this point, caller will free "data" | ||
| 1147 | * if we return an error | ||
| 1148 | */ | ||
| 1149 | *options = data; | ||
| 1150 | |||
| 1151 | c = strchr(dev_name, ':'); | ||
| 1152 | if (c == NULL) | ||
| 1153 | return -EINVAL; | ||
| 1154 | len = c - dev_name - 1; | ||
| 1155 | if (len > sizeof(data->hostname)) | ||
| 1156 | return -EINVAL; | ||
| 1157 | strncpy(data->hostname, dev_name, len); | ||
| 1158 | args.nfs_server.hostname = data->hostname; | ||
| 1159 | |||
| 1160 | c++; | ||
| 1161 | if (strlen(c) > NFS_MAXPATHLEN) | ||
| 1162 | return -EINVAL; | ||
| 1163 | args.nfs_server.export_path = c; | ||
| 1164 | |||
| 1165 | status = nfs_try_mount(&args, mntfh); | ||
| 1166 | if (status) | ||
| 1167 | return -EINVAL; | ||
| 1168 | |||
| 1169 | /* | ||
| 1170 | * Translate to nfs_mount_data, which nfs_fill_super | ||
| 1171 | * can deal with. | ||
| 1172 | */ | ||
| 1173 | data->version = 6; | ||
| 1174 | data->flags = args.flags; | ||
| 1175 | data->rsize = args.rsize; | ||
| 1176 | data->wsize = args.wsize; | ||
| 1177 | data->timeo = args.timeo; | ||
| 1178 | data->retrans = args.retrans; | ||
| 1179 | data->acregmin = args.acregmin; | ||
| 1180 | data->acregmax = args.acregmax; | ||
| 1181 | data->acdirmin = args.acdirmin; | ||
| 1182 | data->acdirmax = args.acdirmax; | ||
| 1183 | data->addr = args.nfs_server.address; | ||
| 1184 | data->namlen = args.namlen; | ||
| 1185 | data->bsize = args.bsize; | ||
| 1186 | data->pseudoflavor = args.auth_flavors[0]; | ||
| 1187 | |||
| 1188 | break; | ||
| 1189 | } | ||
| 506 | } | 1190 | } |
| 507 | 1191 | ||
| 508 | memcpy(mntfh->data, data->root.data, mntfh->size); | 1192 | if (!(data->flags & NFS_MOUNT_SECFLAVOUR)) |
| 509 | if (mntfh->size < sizeof(mntfh->data)) | 1193 | data->pseudoflavor = RPC_AUTH_UNIX; |
| 510 | memset(mntfh->data + mntfh->size, 0, | 1194 | |
| 511 | sizeof(mntfh->data) - mntfh->size); | 1195 | #ifndef CONFIG_NFS_V3 |
| 1196 | if (data->flags & NFS_MOUNT_VER3) | ||
| 1197 | goto out_v3_not_compiled; | ||
| 1198 | #endif /* !CONFIG_NFS_V3 */ | ||
| 1199 | |||
| 1200 | if (!nfs_verify_server_address((struct sockaddr *) &data->addr)) | ||
| 1201 | goto out_no_address; | ||
| 512 | 1202 | ||
| 513 | return 0; | 1203 | return 0; |
| 1204 | |||
| 1205 | out_no_data: | ||
| 1206 | dfprintk(MOUNT, "NFS: mount program didn't pass any mount data\n"); | ||
| 1207 | return -EINVAL; | ||
| 1208 | |||
| 1209 | out_no_v3: | ||
| 1210 | dfprintk(MOUNT, "NFS: nfs_mount_data version %d does not support v3\n", | ||
| 1211 | data->version); | ||
| 1212 | return -EINVAL; | ||
| 1213 | |||
| 1214 | out_no_sec: | ||
| 1215 | dfprintk(MOUNT, "NFS: nfs_mount_data version supports only AUTH_SYS\n"); | ||
| 1216 | return -EINVAL; | ||
| 1217 | |||
| 1218 | #ifndef CONFIG_NFS_V3 | ||
| 1219 | out_v3_not_compiled: | ||
| 1220 | dfprintk(MOUNT, "NFS: NFSv3 is not compiled into kernel\n"); | ||
| 1221 | return -EPROTONOSUPPORT; | ||
| 1222 | #endif /* !CONFIG_NFS_V3 */ | ||
| 1223 | |||
| 1224 | out_no_address: | ||
| 1225 | dfprintk(MOUNT, "NFS: mount program didn't pass remote address\n"); | ||
| 1226 | return -EINVAL; | ||
| 1227 | |||
| 1228 | out_invalid_fh: | ||
| 1229 | dfprintk(MOUNT, "NFS: invalid root filehandle\n"); | ||
| 1230 | return -EINVAL; | ||
| 514 | } | 1231 | } |
| 515 | 1232 | ||
| 516 | /* | 1233 | /* |
| @@ -600,13 +1317,51 @@ static int nfs_compare_super(struct super_block *sb, void *data) | |||
| 600 | { | 1317 | { |
| 601 | struct nfs_server *server = data, *old = NFS_SB(sb); | 1318 | struct nfs_server *server = data, *old = NFS_SB(sb); |
| 602 | 1319 | ||
| 603 | if (old->nfs_client != server->nfs_client) | 1320 | if (memcmp(&old->nfs_client->cl_addr, |
| 1321 | &server->nfs_client->cl_addr, | ||
| 1322 | sizeof(old->nfs_client->cl_addr)) != 0) | ||
| 1323 | return 0; | ||
| 1324 | /* Note: NFS_MOUNT_UNSHARED == NFS4_MOUNT_UNSHARED */ | ||
| 1325 | if (old->flags & NFS_MOUNT_UNSHARED) | ||
| 604 | return 0; | 1326 | return 0; |
| 605 | if (memcmp(&old->fsid, &server->fsid, sizeof(old->fsid)) != 0) | 1327 | if (memcmp(&old->fsid, &server->fsid, sizeof(old->fsid)) != 0) |
| 606 | return 0; | 1328 | return 0; |
| 607 | return 1; | 1329 | return 1; |
| 608 | } | 1330 | } |
| 609 | 1331 | ||
| 1332 | #define NFS_MS_MASK (MS_RDONLY|MS_NOSUID|MS_NODEV|MS_NOEXEC|MS_SYNCHRONOUS) | ||
| 1333 | |||
| 1334 | static int nfs_compare_mount_options(const struct super_block *s, const struct nfs_server *b, int flags) | ||
| 1335 | { | ||
| 1336 | const struct nfs_server *a = s->s_fs_info; | ||
| 1337 | const struct rpc_clnt *clnt_a = a->client; | ||
| 1338 | const struct rpc_clnt *clnt_b = b->client; | ||
| 1339 | |||
| 1340 | if ((s->s_flags & NFS_MS_MASK) != (flags & NFS_MS_MASK)) | ||
| 1341 | goto Ebusy; | ||
| 1342 | if (a->nfs_client != b->nfs_client) | ||
| 1343 | goto Ebusy; | ||
| 1344 | if (a->flags != b->flags) | ||
| 1345 | goto Ebusy; | ||
| 1346 | if (a->wsize != b->wsize) | ||
| 1347 | goto Ebusy; | ||
| 1348 | if (a->rsize != b->rsize) | ||
| 1349 | goto Ebusy; | ||
| 1350 | if (a->acregmin != b->acregmin) | ||
| 1351 | goto Ebusy; | ||
| 1352 | if (a->acregmax != b->acregmax) | ||
| 1353 | goto Ebusy; | ||
| 1354 | if (a->acdirmin != b->acdirmin) | ||
| 1355 | goto Ebusy; | ||
| 1356 | if (a->acdirmax != b->acdirmax) | ||
| 1357 | goto Ebusy; | ||
| 1358 | if (clnt_a->cl_auth->au_flavor != clnt_b->cl_auth->au_flavor) | ||
| 1359 | goto Ebusy; | ||
| 1360 | return 0; | ||
| 1361 | Ebusy: | ||
| 1362 | return -EBUSY; | ||
| 1363 | } | ||
| 1364 | |||
| 610 | static int nfs_get_sb(struct file_system_type *fs_type, | 1365 | static int nfs_get_sb(struct file_system_type *fs_type, |
| 611 | int flags, const char *dev_name, void *raw_data, struct vfsmount *mnt) | 1366 | int flags, const char *dev_name, void *raw_data, struct vfsmount *mnt) |
| 612 | { | 1367 | { |
| @@ -615,30 +1370,37 @@ static int nfs_get_sb(struct file_system_type *fs_type, | |||
| 615 | struct nfs_fh mntfh; | 1370 | struct nfs_fh mntfh; |
| 616 | struct nfs_mount_data *data = raw_data; | 1371 | struct nfs_mount_data *data = raw_data; |
| 617 | struct dentry *mntroot; | 1372 | struct dentry *mntroot; |
| 1373 | int (*compare_super)(struct super_block *, void *) = nfs_compare_super; | ||
| 618 | int error; | 1374 | int error; |
| 619 | 1375 | ||
| 620 | /* Validate the mount data */ | 1376 | /* Validate the mount data */ |
| 621 | error = nfs_validate_mount_data(data, &mntfh); | 1377 | error = nfs_validate_mount_data(&data, &mntfh, dev_name); |
| 622 | if (error < 0) | 1378 | if (error < 0) |
| 623 | return error; | 1379 | goto out; |
| 624 | 1380 | ||
| 625 | /* Get a volume representation */ | 1381 | /* Get a volume representation */ |
| 626 | server = nfs_create_server(data, &mntfh); | 1382 | server = nfs_create_server(data, &mntfh); |
| 627 | if (IS_ERR(server)) { | 1383 | if (IS_ERR(server)) { |
| 628 | error = PTR_ERR(server); | 1384 | error = PTR_ERR(server); |
| 629 | goto out_err_noserver; | 1385 | goto out; |
| 630 | } | 1386 | } |
| 631 | 1387 | ||
| 1388 | if (server->flags & NFS_MOUNT_UNSHARED) | ||
| 1389 | compare_super = NULL; | ||
| 1390 | |||
| 632 | /* Get a superblock - note that we may end up sharing one that already exists */ | 1391 | /* Get a superblock - note that we may end up sharing one that already exists */ |
| 633 | s = sget(fs_type, nfs_compare_super, nfs_set_super, server); | 1392 | s = sget(fs_type, compare_super, nfs_set_super, server); |
| 634 | if (IS_ERR(s)) { | 1393 | if (IS_ERR(s)) { |
| 635 | error = PTR_ERR(s); | 1394 | error = PTR_ERR(s); |
| 636 | goto out_err_nosb; | 1395 | goto out_err_nosb; |
| 637 | } | 1396 | } |
| 638 | 1397 | ||
| 639 | if (s->s_fs_info != server) { | 1398 | if (s->s_fs_info != server) { |
| 1399 | error = nfs_compare_mount_options(s, server, flags); | ||
| 640 | nfs_free_server(server); | 1400 | nfs_free_server(server); |
| 641 | server = NULL; | 1401 | server = NULL; |
| 1402 | if (error < 0) | ||
| 1403 | goto error_splat_super; | ||
| 642 | } | 1404 | } |
| 643 | 1405 | ||
| 644 | if (!s->s_root) { | 1406 | if (!s->s_root) { |
| @@ -656,17 +1418,21 @@ static int nfs_get_sb(struct file_system_type *fs_type, | |||
| 656 | s->s_flags |= MS_ACTIVE; | 1418 | s->s_flags |= MS_ACTIVE; |
| 657 | mnt->mnt_sb = s; | 1419 | mnt->mnt_sb = s; |
| 658 | mnt->mnt_root = mntroot; | 1420 | mnt->mnt_root = mntroot; |
| 659 | return 0; | 1421 | error = 0; |
| 1422 | |||
| 1423 | out: | ||
| 1424 | if (data != raw_data) | ||
| 1425 | kfree(data); | ||
| 1426 | return error; | ||
| 660 | 1427 | ||
| 661 | out_err_nosb: | 1428 | out_err_nosb: |
| 662 | nfs_free_server(server); | 1429 | nfs_free_server(server); |
| 663 | out_err_noserver: | 1430 | goto out; |
| 664 | return error; | ||
| 665 | 1431 | ||
| 666 | error_splat_super: | 1432 | error_splat_super: |
| 667 | up_write(&s->s_umount); | 1433 | up_write(&s->s_umount); |
| 668 | deactivate_super(s); | 1434 | deactivate_super(s); |
| 669 | return error; | 1435 | goto out; |
| 670 | } | 1436 | } |
| 671 | 1437 | ||
| 672 | /* | 1438 | /* |
| @@ -691,6 +1457,7 @@ static int nfs_xdev_get_sb(struct file_system_type *fs_type, int flags, | |||
| 691 | struct super_block *s; | 1457 | struct super_block *s; |
| 692 | struct nfs_server *server; | 1458 | struct nfs_server *server; |
| 693 | struct dentry *mntroot; | 1459 | struct dentry *mntroot; |
| 1460 | int (*compare_super)(struct super_block *, void *) = nfs_compare_super; | ||
| 694 | int error; | 1461 | int error; |
| 695 | 1462 | ||
| 696 | dprintk("--> nfs_xdev_get_sb()\n"); | 1463 | dprintk("--> nfs_xdev_get_sb()\n"); |
| @@ -702,16 +1469,22 @@ static int nfs_xdev_get_sb(struct file_system_type *fs_type, int flags, | |||
| 702 | goto out_err_noserver; | 1469 | goto out_err_noserver; |
| 703 | } | 1470 | } |
| 704 | 1471 | ||
| 1472 | if (server->flags & NFS_MOUNT_UNSHARED) | ||
| 1473 | compare_super = NULL; | ||
| 1474 | |||
| 705 | /* Get a superblock - note that we may end up sharing one that already exists */ | 1475 | /* Get a superblock - note that we may end up sharing one that already exists */ |
| 706 | s = sget(&nfs_fs_type, nfs_compare_super, nfs_set_super, server); | 1476 | s = sget(&nfs_fs_type, compare_super, nfs_set_super, server); |
| 707 | if (IS_ERR(s)) { | 1477 | if (IS_ERR(s)) { |
| 708 | error = PTR_ERR(s); | 1478 | error = PTR_ERR(s); |
| 709 | goto out_err_nosb; | 1479 | goto out_err_nosb; |
| 710 | } | 1480 | } |
| 711 | 1481 | ||
| 712 | if (s->s_fs_info != server) { | 1482 | if (s->s_fs_info != server) { |
| 1483 | error = nfs_compare_mount_options(s, server, flags); | ||
| 713 | nfs_free_server(server); | 1484 | nfs_free_server(server); |
| 714 | server = NULL; | 1485 | server = NULL; |
| 1486 | if (error < 0) | ||
| 1487 | goto error_splat_super; | ||
| 715 | } | 1488 | } |
| 716 | 1489 | ||
| 717 | if (!s->s_root) { | 1490 | if (!s->s_root) { |
| @@ -772,25 +1545,164 @@ static void nfs4_fill_super(struct super_block *sb) | |||
| 772 | nfs_initialise_sb(sb); | 1545 | nfs_initialise_sb(sb); |
| 773 | } | 1546 | } |
| 774 | 1547 | ||
| 775 | static void *nfs_copy_user_string(char *dst, struct nfs_string *src, int maxlen) | 1548 | /* |
| 1549 | * Validate NFSv4 mount options | ||
| 1550 | */ | ||
| 1551 | static int nfs4_validate_mount_data(struct nfs4_mount_data **options, | ||
| 1552 | const char *dev_name, | ||
| 1553 | struct sockaddr_in *addr, | ||
| 1554 | rpc_authflavor_t *authflavour, | ||
| 1555 | char **hostname, | ||
| 1556 | char **mntpath, | ||
| 1557 | char **ip_addr) | ||
| 776 | { | 1558 | { |
| 777 | void *p = NULL; | 1559 | struct nfs4_mount_data *data = *options; |
| 778 | 1560 | char *c; | |
| 779 | if (!src->len) | 1561 | |
| 780 | return ERR_PTR(-EINVAL); | 1562 | if (data == NULL) |
| 781 | if (src->len < maxlen) | 1563 | goto out_no_data; |
| 782 | maxlen = src->len; | 1564 | |
| 783 | if (dst == NULL) { | 1565 | switch (data->version) { |
| 784 | p = dst = kmalloc(maxlen + 1, GFP_KERNEL); | 1566 | case 1: |
| 785 | if (p == NULL) | 1567 | if (data->host_addrlen != sizeof(*addr)) |
| 786 | return ERR_PTR(-ENOMEM); | 1568 | goto out_no_address; |
| 787 | } | 1569 | if (copy_from_user(addr, data->host_addr, sizeof(*addr))) |
| 788 | if (copy_from_user(dst, src->data, maxlen)) { | 1570 | return -EFAULT; |
| 789 | kfree(p); | 1571 | if (addr->sin_port == 0) |
| 790 | return ERR_PTR(-EFAULT); | 1572 | addr->sin_port = htons(NFS_PORT); |
| 1573 | if (!nfs_verify_server_address((struct sockaddr *) addr)) | ||
| 1574 | goto out_no_address; | ||
| 1575 | |||
| 1576 | switch (data->auth_flavourlen) { | ||
| 1577 | case 0: | ||
| 1578 | *authflavour = RPC_AUTH_UNIX; | ||
| 1579 | break; | ||
| 1580 | case 1: | ||
| 1581 | if (copy_from_user(authflavour, data->auth_flavours, | ||
| 1582 | sizeof(*authflavour))) | ||
| 1583 | return -EFAULT; | ||
| 1584 | break; | ||
| 1585 | default: | ||
| 1586 | goto out_inval_auth; | ||
| 1587 | } | ||
| 1588 | |||
| 1589 | c = strndup_user(data->hostname.data, NFS4_MAXNAMLEN); | ||
| 1590 | if (IS_ERR(c)) | ||
| 1591 | return PTR_ERR(c); | ||
| 1592 | *hostname = c; | ||
| 1593 | |||
| 1594 | c = strndup_user(data->mnt_path.data, NFS4_MAXPATHLEN); | ||
| 1595 | if (IS_ERR(c)) | ||
| 1596 | return PTR_ERR(c); | ||
| 1597 | *mntpath = c; | ||
| 1598 | dfprintk(MOUNT, "NFS: MNTPATH: '%s'\n", *mntpath); | ||
| 1599 | |||
| 1600 | c = strndup_user(data->client_addr.data, 16); | ||
| 1601 | if (IS_ERR(c)) | ||
| 1602 | return PTR_ERR(c); | ||
| 1603 | *ip_addr = c; | ||
| 1604 | |||
| 1605 | break; | ||
| 1606 | default: { | ||
| 1607 | unsigned int len; | ||
| 1608 | struct nfs_parsed_mount_data args = { | ||
| 1609 | .rsize = NFS_MAX_FILE_IO_SIZE, | ||
| 1610 | .wsize = NFS_MAX_FILE_IO_SIZE, | ||
| 1611 | .timeo = 600, | ||
| 1612 | .retrans = 2, | ||
| 1613 | .acregmin = 3, | ||
| 1614 | .acregmax = 60, | ||
| 1615 | .acdirmin = 30, | ||
| 1616 | .acdirmax = 60, | ||
| 1617 | .nfs_server.protocol = IPPROTO_TCP, | ||
| 1618 | }; | ||
| 1619 | |||
| 1620 | if (nfs_parse_mount_options((char *) *options, &args) == 0) | ||
| 1621 | return -EINVAL; | ||
| 1622 | |||
| 1623 | if (!nfs_verify_server_address((struct sockaddr *) | ||
| 1624 | &args.nfs_server.address)) | ||
| 1625 | return -EINVAL; | ||
| 1626 | *addr = args.nfs_server.address; | ||
| 1627 | |||
| 1628 | switch (args.auth_flavor_len) { | ||
| 1629 | case 0: | ||
| 1630 | *authflavour = RPC_AUTH_UNIX; | ||
| 1631 | break; | ||
| 1632 | case 1: | ||
| 1633 | *authflavour = (rpc_authflavor_t) args.auth_flavors[0]; | ||
| 1634 | break; | ||
| 1635 | default: | ||
| 1636 | goto out_inval_auth; | ||
| 1637 | } | ||
| 1638 | |||
| 1639 | /* | ||
| 1640 | * Translate to nfs4_mount_data, which nfs4_fill_super | ||
| 1641 | * can deal with. | ||
| 1642 | */ | ||
| 1643 | data = kzalloc(sizeof(*data), GFP_KERNEL); | ||
| 1644 | if (data == NULL) | ||
| 1645 | return -ENOMEM; | ||
| 1646 | *options = data; | ||
| 1647 | |||
| 1648 | data->version = 1; | ||
| 1649 | data->flags = args.flags & NFS4_MOUNT_FLAGMASK; | ||
| 1650 | data->rsize = args.rsize; | ||
| 1651 | data->wsize = args.wsize; | ||
| 1652 | data->timeo = args.timeo; | ||
| 1653 | data->retrans = args.retrans; | ||
| 1654 | data->acregmin = args.acregmin; | ||
| 1655 | data->acregmax = args.acregmax; | ||
| 1656 | data->acdirmin = args.acdirmin; | ||
| 1657 | data->acdirmax = args.acdirmax; | ||
| 1658 | data->proto = args.nfs_server.protocol; | ||
| 1659 | |||
| 1660 | /* | ||
| 1661 | * Split "dev_name" into "hostname:mntpath". | ||
| 1662 | */ | ||
| 1663 | c = strchr(dev_name, ':'); | ||
| 1664 | if (c == NULL) | ||
| 1665 | return -EINVAL; | ||
| 1666 | /* while calculating len, pretend ':' is '\0' */ | ||
| 1667 | len = c - dev_name; | ||
| 1668 | if (len > NFS4_MAXNAMLEN) | ||
| 1669 | return -EINVAL; | ||
| 1670 | *hostname = kzalloc(len, GFP_KERNEL); | ||
| 1671 | if (*hostname == NULL) | ||
| 1672 | return -ENOMEM; | ||
| 1673 | strncpy(*hostname, dev_name, len - 1); | ||
| 1674 | |||
| 1675 | c++; /* step over the ':' */ | ||
| 1676 | len = strlen(c); | ||
| 1677 | if (len > NFS4_MAXPATHLEN) | ||
| 1678 | return -EINVAL; | ||
| 1679 | *mntpath = kzalloc(len + 1, GFP_KERNEL); | ||
| 1680 | if (*mntpath == NULL) | ||
| 1681 | return -ENOMEM; | ||
| 1682 | strncpy(*mntpath, c, len); | ||
| 1683 | |||
| 1684 | dprintk("MNTPATH: %s\n", *mntpath); | ||
| 1685 | |||
| 1686 | *ip_addr = args.client_address; | ||
| 1687 | |||
| 1688 | break; | ||
| 1689 | } | ||
| 791 | } | 1690 | } |
| 792 | dst[maxlen] = '\0'; | 1691 | |
| 793 | return dst; | 1692 | return 0; |
| 1693 | |||
| 1694 | out_no_data: | ||
| 1695 | dfprintk(MOUNT, "NFS4: mount program didn't pass any mount data\n"); | ||
| 1696 | return -EINVAL; | ||
| 1697 | |||
| 1698 | out_inval_auth: | ||
| 1699 | dfprintk(MOUNT, "NFS4: Invalid number of RPC auth flavours %d\n", | ||
| 1700 | data->auth_flavourlen); | ||
| 1701 | return -EINVAL; | ||
| 1702 | |||
| 1703 | out_no_address: | ||
| 1704 | dfprintk(MOUNT, "NFS4: mount program didn't pass remote address\n"); | ||
| 1705 | return -EINVAL; | ||
| 794 | } | 1706 | } |
| 795 | 1707 | ||
| 796 | /* | 1708 | /* |
| @@ -806,81 +1718,29 @@ static int nfs4_get_sb(struct file_system_type *fs_type, | |||
| 806 | rpc_authflavor_t authflavour; | 1718 | rpc_authflavor_t authflavour; |
| 807 | struct nfs_fh mntfh; | 1719 | struct nfs_fh mntfh; |
| 808 | struct dentry *mntroot; | 1720 | struct dentry *mntroot; |
| 809 | char *mntpath = NULL, *hostname = NULL, ip_addr[16]; | 1721 | char *mntpath = NULL, *hostname = NULL, *ip_addr = NULL; |
| 810 | void *p; | 1722 | int (*compare_super)(struct super_block *, void *) = nfs_compare_super; |
| 811 | int error; | 1723 | int error; |
| 812 | 1724 | ||
| 813 | if (data == NULL) { | 1725 | /* Validate the mount data */ |
| 814 | dprintk("%s: missing data argument\n", __FUNCTION__); | 1726 | error = nfs4_validate_mount_data(&data, dev_name, &addr, &authflavour, |
| 815 | return -EINVAL; | 1727 | &hostname, &mntpath, &ip_addr); |
| 816 | } | 1728 | if (error < 0) |
| 817 | if (data->version <= 0 || data->version > NFS4_MOUNT_VERSION) { | 1729 | goto out; |
| 818 | dprintk("%s: bad mount version\n", __FUNCTION__); | ||
| 819 | return -EINVAL; | ||
| 820 | } | ||
| 821 | |||
| 822 | /* We now require that the mount process passes the remote address */ | ||
| 823 | if (data->host_addrlen != sizeof(addr)) | ||
| 824 | return -EINVAL; | ||
| 825 | |||
| 826 | if (copy_from_user(&addr, data->host_addr, sizeof(addr))) | ||
| 827 | return -EFAULT; | ||
| 828 | |||
| 829 | if (addr.sin_family != AF_INET || | ||
| 830 | addr.sin_addr.s_addr == INADDR_ANY | ||
| 831 | ) { | ||
| 832 | dprintk("%s: mount program didn't pass remote IP address!\n", | ||
| 833 | __FUNCTION__); | ||
| 834 | return -EINVAL; | ||
| 835 | } | ||
| 836 | /* RFC3530: The default port for NFS is 2049 */ | ||
| 837 | if (addr.sin_port == 0) | ||
| 838 | addr.sin_port = htons(NFS_PORT); | ||
| 839 | |||
| 840 | /* Grab the authentication type */ | ||
| 841 | authflavour = RPC_AUTH_UNIX; | ||
| 842 | if (data->auth_flavourlen != 0) { | ||
| 843 | if (data->auth_flavourlen != 1) { | ||
| 844 | dprintk("%s: Invalid number of RPC auth flavours %d.\n", | ||
| 845 | __FUNCTION__, data->auth_flavourlen); | ||
| 846 | error = -EINVAL; | ||
| 847 | goto out_err_noserver; | ||
| 848 | } | ||
| 849 | |||
| 850 | if (copy_from_user(&authflavour, data->auth_flavours, | ||
| 851 | sizeof(authflavour))) { | ||
| 852 | error = -EFAULT; | ||
| 853 | goto out_err_noserver; | ||
| 854 | } | ||
| 855 | } | ||
| 856 | |||
| 857 | p = nfs_copy_user_string(NULL, &data->hostname, 256); | ||
| 858 | if (IS_ERR(p)) | ||
| 859 | goto out_err; | ||
| 860 | hostname = p; | ||
| 861 | |||
| 862 | p = nfs_copy_user_string(NULL, &data->mnt_path, 1024); | ||
| 863 | if (IS_ERR(p)) | ||
| 864 | goto out_err; | ||
| 865 | mntpath = p; | ||
| 866 | |||
| 867 | dprintk("MNTPATH: %s\n", mntpath); | ||
| 868 | |||
| 869 | p = nfs_copy_user_string(ip_addr, &data->client_addr, | ||
| 870 | sizeof(ip_addr) - 1); | ||
| 871 | if (IS_ERR(p)) | ||
| 872 | goto out_err; | ||
| 873 | 1730 | ||
| 874 | /* Get a volume representation */ | 1731 | /* Get a volume representation */ |
| 875 | server = nfs4_create_server(data, hostname, &addr, mntpath, ip_addr, | 1732 | server = nfs4_create_server(data, hostname, &addr, mntpath, ip_addr, |
| 876 | authflavour, &mntfh); | 1733 | authflavour, &mntfh); |
| 877 | if (IS_ERR(server)) { | 1734 | if (IS_ERR(server)) { |
| 878 | error = PTR_ERR(server); | 1735 | error = PTR_ERR(server); |
| 879 | goto out_err_noserver; | 1736 | goto out; |
| 880 | } | 1737 | } |
| 881 | 1738 | ||
| 1739 | if (server->flags & NFS4_MOUNT_UNSHARED) | ||
| 1740 | compare_super = NULL; | ||
| 1741 | |||
| 882 | /* Get a superblock - note that we may end up sharing one that already exists */ | 1742 | /* Get a superblock - note that we may end up sharing one that already exists */ |
| 883 | s = sget(fs_type, nfs_compare_super, nfs_set_super, server); | 1743 | s = sget(fs_type, compare_super, nfs_set_super, server); |
| 884 | if (IS_ERR(s)) { | 1744 | if (IS_ERR(s)) { |
| 885 | error = PTR_ERR(s); | 1745 | error = PTR_ERR(s); |
| 886 | goto out_free; | 1746 | goto out_free; |
| @@ -906,25 +1766,22 @@ static int nfs4_get_sb(struct file_system_type *fs_type, | |||
| 906 | s->s_flags |= MS_ACTIVE; | 1766 | s->s_flags |= MS_ACTIVE; |
| 907 | mnt->mnt_sb = s; | 1767 | mnt->mnt_sb = s; |
| 908 | mnt->mnt_root = mntroot; | 1768 | mnt->mnt_root = mntroot; |
| 1769 | error = 0; | ||
| 1770 | |||
| 1771 | out: | ||
| 1772 | kfree(ip_addr); | ||
| 909 | kfree(mntpath); | 1773 | kfree(mntpath); |
| 910 | kfree(hostname); | 1774 | kfree(hostname); |
| 911 | return 0; | 1775 | return error; |
| 912 | |||
| 913 | out_err: | ||
| 914 | error = PTR_ERR(p); | ||
| 915 | goto out_err_noserver; | ||
| 916 | 1776 | ||
| 917 | out_free: | 1777 | out_free: |
| 918 | nfs_free_server(server); | 1778 | nfs_free_server(server); |
| 919 | out_err_noserver: | 1779 | goto out; |
| 920 | kfree(mntpath); | ||
| 921 | kfree(hostname); | ||
| 922 | return error; | ||
| 923 | 1780 | ||
| 924 | error_splat_super: | 1781 | error_splat_super: |
| 925 | up_write(&s->s_umount); | 1782 | up_write(&s->s_umount); |
| 926 | deactivate_super(s); | 1783 | deactivate_super(s); |
| 927 | goto out_err_noserver; | 1784 | goto out; |
| 928 | } | 1785 | } |
| 929 | 1786 | ||
| 930 | static void nfs4_kill_super(struct super_block *sb) | 1787 | static void nfs4_kill_super(struct super_block *sb) |
| @@ -949,6 +1806,7 @@ static int nfs4_xdev_get_sb(struct file_system_type *fs_type, int flags, | |||
| 949 | struct super_block *s; | 1806 | struct super_block *s; |
| 950 | struct nfs_server *server; | 1807 | struct nfs_server *server; |
| 951 | struct dentry *mntroot; | 1808 | struct dentry *mntroot; |
| 1809 | int (*compare_super)(struct super_block *, void *) = nfs_compare_super; | ||
| 952 | int error; | 1810 | int error; |
| 953 | 1811 | ||
| 954 | dprintk("--> nfs4_xdev_get_sb()\n"); | 1812 | dprintk("--> nfs4_xdev_get_sb()\n"); |
| @@ -960,8 +1818,11 @@ static int nfs4_xdev_get_sb(struct file_system_type *fs_type, int flags, | |||
| 960 | goto out_err_noserver; | 1818 | goto out_err_noserver; |
| 961 | } | 1819 | } |
| 962 | 1820 | ||
| 1821 | if (server->flags & NFS4_MOUNT_UNSHARED) | ||
| 1822 | compare_super = NULL; | ||
| 1823 | |||
| 963 | /* Get a superblock - note that we may end up sharing one that already exists */ | 1824 | /* Get a superblock - note that we may end up sharing one that already exists */ |
| 964 | s = sget(&nfs_fs_type, nfs_compare_super, nfs_set_super, server); | 1825 | s = sget(&nfs_fs_type, compare_super, nfs_set_super, server); |
| 965 | if (IS_ERR(s)) { | 1826 | if (IS_ERR(s)) { |
| 966 | error = PTR_ERR(s); | 1827 | error = PTR_ERR(s); |
| 967 | goto out_err_nosb; | 1828 | goto out_err_nosb; |
| @@ -1016,6 +1877,7 @@ static int nfs4_referral_get_sb(struct file_system_type *fs_type, int flags, | |||
| 1016 | struct nfs_server *server; | 1877 | struct nfs_server *server; |
| 1017 | struct dentry *mntroot; | 1878 | struct dentry *mntroot; |
| 1018 | struct nfs_fh mntfh; | 1879 | struct nfs_fh mntfh; |
| 1880 | int (*compare_super)(struct super_block *, void *) = nfs_compare_super; | ||
| 1019 | int error; | 1881 | int error; |
| 1020 | 1882 | ||
| 1021 | dprintk("--> nfs4_referral_get_sb()\n"); | 1883 | dprintk("--> nfs4_referral_get_sb()\n"); |
| @@ -1027,8 +1889,11 @@ static int nfs4_referral_get_sb(struct file_system_type *fs_type, int flags, | |||
| 1027 | goto out_err_noserver; | 1889 | goto out_err_noserver; |
| 1028 | } | 1890 | } |
| 1029 | 1891 | ||
| 1892 | if (server->flags & NFS4_MOUNT_UNSHARED) | ||
| 1893 | compare_super = NULL; | ||
| 1894 | |||
| 1030 | /* Get a superblock - note that we may end up sharing one that already exists */ | 1895 | /* Get a superblock - note that we may end up sharing one that already exists */ |
| 1031 | s = sget(&nfs_fs_type, nfs_compare_super, nfs_set_super, server); | 1896 | s = sget(&nfs_fs_type, compare_super, nfs_set_super, server); |
| 1032 | if (IS_ERR(s)) { | 1897 | if (IS_ERR(s)) { |
| 1033 | error = PTR_ERR(s); | 1898 | error = PTR_ERR(s); |
| 1034 | goto out_err_nosb; | 1899 | goto out_err_nosb; |
diff --git a/fs/nfs/write.c b/fs/nfs/write.c index af344a158e01..73ac992ece85 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c | |||
| @@ -117,19 +117,19 @@ static struct nfs_page *nfs_page_find_request_locked(struct page *page) | |||
| 117 | if (PagePrivate(page)) { | 117 | if (PagePrivate(page)) { |
| 118 | req = (struct nfs_page *)page_private(page); | 118 | req = (struct nfs_page *)page_private(page); |
| 119 | if (req != NULL) | 119 | if (req != NULL) |
| 120 | atomic_inc(&req->wb_count); | 120 | kref_get(&req->wb_kref); |
| 121 | } | 121 | } |
| 122 | return req; | 122 | return req; |
| 123 | } | 123 | } |
| 124 | 124 | ||
| 125 | static struct nfs_page *nfs_page_find_request(struct page *page) | 125 | static struct nfs_page *nfs_page_find_request(struct page *page) |
| 126 | { | 126 | { |
| 127 | struct inode *inode = page->mapping->host; | ||
| 127 | struct nfs_page *req = NULL; | 128 | struct nfs_page *req = NULL; |
| 128 | spinlock_t *req_lock = &NFS_I(page->mapping->host)->req_lock; | ||
| 129 | 129 | ||
| 130 | spin_lock(req_lock); | 130 | spin_lock(&inode->i_lock); |
| 131 | req = nfs_page_find_request_locked(page); | 131 | req = nfs_page_find_request_locked(page); |
| 132 | spin_unlock(req_lock); | 132 | spin_unlock(&inode->i_lock); |
| 133 | return req; | 133 | return req; |
| 134 | } | 134 | } |
| 135 | 135 | ||
| @@ -191,8 +191,6 @@ static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page, | |||
| 191 | } | 191 | } |
| 192 | /* Update file length */ | 192 | /* Update file length */ |
| 193 | nfs_grow_file(page, offset, count); | 193 | nfs_grow_file(page, offset, count); |
| 194 | /* Set the PG_uptodate flag? */ | ||
| 195 | nfs_mark_uptodate(page, offset, count); | ||
| 196 | nfs_unlock_request(req); | 194 | nfs_unlock_request(req); |
| 197 | return 0; | 195 | return 0; |
| 198 | } | 196 | } |
| @@ -253,16 +251,16 @@ static void nfs_end_page_writeback(struct page *page) | |||
| 253 | static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio, | 251 | static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio, |
| 254 | struct page *page) | 252 | struct page *page) |
| 255 | { | 253 | { |
| 254 | struct inode *inode = page->mapping->host; | ||
| 255 | struct nfs_inode *nfsi = NFS_I(inode); | ||
| 256 | struct nfs_page *req; | 256 | struct nfs_page *req; |
| 257 | struct nfs_inode *nfsi = NFS_I(page->mapping->host); | ||
| 258 | spinlock_t *req_lock = &nfsi->req_lock; | ||
| 259 | int ret; | 257 | int ret; |
| 260 | 258 | ||
| 261 | spin_lock(req_lock); | 259 | spin_lock(&inode->i_lock); |
| 262 | for(;;) { | 260 | for(;;) { |
| 263 | req = nfs_page_find_request_locked(page); | 261 | req = nfs_page_find_request_locked(page); |
| 264 | if (req == NULL) { | 262 | if (req == NULL) { |
| 265 | spin_unlock(req_lock); | 263 | spin_unlock(&inode->i_lock); |
| 266 | return 1; | 264 | return 1; |
| 267 | } | 265 | } |
| 268 | if (nfs_lock_request_dontget(req)) | 266 | if (nfs_lock_request_dontget(req)) |
| @@ -272,28 +270,28 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio, | |||
| 272 | * succeed provided that someone hasn't already marked the | 270 | * succeed provided that someone hasn't already marked the |
| 273 | * request as dirty (in which case we don't care). | 271 | * request as dirty (in which case we don't care). |
| 274 | */ | 272 | */ |
| 275 | spin_unlock(req_lock); | 273 | spin_unlock(&inode->i_lock); |
| 276 | ret = nfs_wait_on_request(req); | 274 | ret = nfs_wait_on_request(req); |
| 277 | nfs_release_request(req); | 275 | nfs_release_request(req); |
| 278 | if (ret != 0) | 276 | if (ret != 0) |
| 279 | return ret; | 277 | return ret; |
| 280 | spin_lock(req_lock); | 278 | spin_lock(&inode->i_lock); |
| 281 | } | 279 | } |
| 282 | if (test_bit(PG_NEED_COMMIT, &req->wb_flags)) { | 280 | if (test_bit(PG_NEED_COMMIT, &req->wb_flags)) { |
| 283 | /* This request is marked for commit */ | 281 | /* This request is marked for commit */ |
| 284 | spin_unlock(req_lock); | 282 | spin_unlock(&inode->i_lock); |
| 285 | nfs_unlock_request(req); | 283 | nfs_unlock_request(req); |
| 286 | nfs_pageio_complete(pgio); | 284 | nfs_pageio_complete(pgio); |
| 287 | return 1; | 285 | return 1; |
| 288 | } | 286 | } |
| 289 | if (nfs_set_page_writeback(page) != 0) { | 287 | if (nfs_set_page_writeback(page) != 0) { |
| 290 | spin_unlock(req_lock); | 288 | spin_unlock(&inode->i_lock); |
| 291 | BUG(); | 289 | BUG(); |
| 292 | } | 290 | } |
| 293 | radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, | 291 | radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, |
| 294 | NFS_PAGE_TAG_WRITEBACK); | 292 | NFS_PAGE_TAG_LOCKED); |
| 295 | ret = test_bit(PG_NEED_FLUSH, &req->wb_flags); | 293 | ret = test_bit(PG_NEED_FLUSH, &req->wb_flags); |
| 296 | spin_unlock(req_lock); | 294 | spin_unlock(&inode->i_lock); |
| 297 | nfs_pageio_add_request(pgio, req); | 295 | nfs_pageio_add_request(pgio, req); |
| 298 | return ret; | 296 | return ret; |
| 299 | } | 297 | } |
| @@ -400,7 +398,7 @@ static int nfs_inode_add_request(struct inode *inode, struct nfs_page *req) | |||
| 400 | if (PageDirty(req->wb_page)) | 398 | if (PageDirty(req->wb_page)) |
| 401 | set_bit(PG_NEED_FLUSH, &req->wb_flags); | 399 | set_bit(PG_NEED_FLUSH, &req->wb_flags); |
| 402 | nfsi->npages++; | 400 | nfsi->npages++; |
| 403 | atomic_inc(&req->wb_count); | 401 | kref_get(&req->wb_kref); |
| 404 | return 0; | 402 | return 0; |
| 405 | } | 403 | } |
| 406 | 404 | ||
| @@ -409,12 +407,12 @@ static int nfs_inode_add_request(struct inode *inode, struct nfs_page *req) | |||
| 409 | */ | 407 | */ |
| 410 | static void nfs_inode_remove_request(struct nfs_page *req) | 408 | static void nfs_inode_remove_request(struct nfs_page *req) |
| 411 | { | 409 | { |
| 412 | struct inode *inode = req->wb_context->dentry->d_inode; | 410 | struct inode *inode = req->wb_context->path.dentry->d_inode; |
| 413 | struct nfs_inode *nfsi = NFS_I(inode); | 411 | struct nfs_inode *nfsi = NFS_I(inode); |
| 414 | 412 | ||
| 415 | BUG_ON (!NFS_WBACK_BUSY(req)); | 413 | BUG_ON (!NFS_WBACK_BUSY(req)); |
| 416 | 414 | ||
| 417 | spin_lock(&nfsi->req_lock); | 415 | spin_lock(&inode->i_lock); |
| 418 | set_page_private(req->wb_page, 0); | 416 | set_page_private(req->wb_page, 0); |
| 419 | ClearPagePrivate(req->wb_page); | 417 | ClearPagePrivate(req->wb_page); |
| 420 | radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index); | 418 | radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index); |
| @@ -422,11 +420,11 @@ static void nfs_inode_remove_request(struct nfs_page *req) | |||
| 422 | __set_page_dirty_nobuffers(req->wb_page); | 420 | __set_page_dirty_nobuffers(req->wb_page); |
| 423 | nfsi->npages--; | 421 | nfsi->npages--; |
| 424 | if (!nfsi->npages) { | 422 | if (!nfsi->npages) { |
| 425 | spin_unlock(&nfsi->req_lock); | 423 | spin_unlock(&inode->i_lock); |
| 426 | nfs_end_data_update(inode); | 424 | nfs_end_data_update(inode); |
| 427 | iput(inode); | 425 | iput(inode); |
| 428 | } else | 426 | } else |
| 429 | spin_unlock(&nfsi->req_lock); | 427 | spin_unlock(&inode->i_lock); |
| 430 | nfs_clear_request(req); | 428 | nfs_clear_request(req); |
| 431 | nfs_release_request(req); | 429 | nfs_release_request(req); |
| 432 | } | 430 | } |
| @@ -457,14 +455,16 @@ nfs_dirty_request(struct nfs_page *req) | |||
| 457 | static void | 455 | static void |
| 458 | nfs_mark_request_commit(struct nfs_page *req) | 456 | nfs_mark_request_commit(struct nfs_page *req) |
| 459 | { | 457 | { |
| 460 | struct inode *inode = req->wb_context->dentry->d_inode; | 458 | struct inode *inode = req->wb_context->path.dentry->d_inode; |
| 461 | struct nfs_inode *nfsi = NFS_I(inode); | 459 | struct nfs_inode *nfsi = NFS_I(inode); |
| 462 | 460 | ||
| 463 | spin_lock(&nfsi->req_lock); | 461 | spin_lock(&inode->i_lock); |
| 464 | nfs_list_add_request(req, &nfsi->commit); | ||
| 465 | nfsi->ncommit++; | 462 | nfsi->ncommit++; |
| 466 | set_bit(PG_NEED_COMMIT, &(req)->wb_flags); | 463 | set_bit(PG_NEED_COMMIT, &(req)->wb_flags); |
| 467 | spin_unlock(&nfsi->req_lock); | 464 | radix_tree_tag_set(&nfsi->nfs_page_tree, |
| 465 | req->wb_index, | ||
| 466 | NFS_PAGE_TAG_COMMIT); | ||
| 467 | spin_unlock(&inode->i_lock); | ||
| 468 | inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); | 468 | inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); |
| 469 | __mark_inode_dirty(inode, I_DIRTY_DATASYNC); | 469 | __mark_inode_dirty(inode, I_DIRTY_DATASYNC); |
| 470 | } | 470 | } |
| @@ -526,18 +526,18 @@ static int nfs_wait_on_requests_locked(struct inode *inode, pgoff_t idx_start, u | |||
| 526 | idx_end = idx_start + npages - 1; | 526 | idx_end = idx_start + npages - 1; |
| 527 | 527 | ||
| 528 | next = idx_start; | 528 | next = idx_start; |
| 529 | while (radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree, (void **)&req, next, 1, NFS_PAGE_TAG_WRITEBACK)) { | 529 | while (radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree, (void **)&req, next, 1, NFS_PAGE_TAG_LOCKED)) { |
| 530 | if (req->wb_index > idx_end) | 530 | if (req->wb_index > idx_end) |
| 531 | break; | 531 | break; |
| 532 | 532 | ||
| 533 | next = req->wb_index + 1; | 533 | next = req->wb_index + 1; |
| 534 | BUG_ON(!NFS_WBACK_BUSY(req)); | 534 | BUG_ON(!NFS_WBACK_BUSY(req)); |
| 535 | 535 | ||
| 536 | atomic_inc(&req->wb_count); | 536 | kref_get(&req->wb_kref); |
| 537 | spin_unlock(&nfsi->req_lock); | 537 | spin_unlock(&inode->i_lock); |
| 538 | error = nfs_wait_on_request(req); | 538 | error = nfs_wait_on_request(req); |
| 539 | nfs_release_request(req); | 539 | nfs_release_request(req); |
| 540 | spin_lock(&nfsi->req_lock); | 540 | spin_lock(&inode->i_lock); |
| 541 | if (error < 0) | 541 | if (error < 0) |
| 542 | return error; | 542 | return error; |
| 543 | res++; | 543 | res++; |
| @@ -577,10 +577,9 @@ nfs_scan_commit(struct inode *inode, struct list_head *dst, pgoff_t idx_start, u | |||
| 577 | int res = 0; | 577 | int res = 0; |
| 578 | 578 | ||
| 579 | if (nfsi->ncommit != 0) { | 579 | if (nfsi->ncommit != 0) { |
| 580 | res = nfs_scan_list(nfsi, &nfsi->commit, dst, idx_start, npages); | 580 | res = nfs_scan_list(nfsi, dst, idx_start, npages, |
| 581 | NFS_PAGE_TAG_COMMIT); | ||
| 581 | nfsi->ncommit -= res; | 582 | nfsi->ncommit -= res; |
| 582 | if ((nfsi->ncommit == 0) != list_empty(&nfsi->commit)) | ||
| 583 | printk(KERN_ERR "NFS: desynchronized value of nfs_i.ncommit.\n"); | ||
| 584 | } | 583 | } |
| 585 | return res; | 584 | return res; |
| 586 | } | 585 | } |
| @@ -603,7 +602,6 @@ static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx, | |||
| 603 | { | 602 | { |
| 604 | struct address_space *mapping = page->mapping; | 603 | struct address_space *mapping = page->mapping; |
| 605 | struct inode *inode = mapping->host; | 604 | struct inode *inode = mapping->host; |
| 606 | struct nfs_inode *nfsi = NFS_I(inode); | ||
| 607 | struct nfs_page *req, *new = NULL; | 605 | struct nfs_page *req, *new = NULL; |
| 608 | pgoff_t rqend, end; | 606 | pgoff_t rqend, end; |
| 609 | 607 | ||
| @@ -613,13 +611,13 @@ static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx, | |||
| 613 | /* Loop over all inode entries and see if we find | 611 | /* Loop over all inode entries and see if we find |
| 614 | * A request for the page we wish to update | 612 | * A request for the page we wish to update |
| 615 | */ | 613 | */ |
| 616 | spin_lock(&nfsi->req_lock); | 614 | spin_lock(&inode->i_lock); |
| 617 | req = nfs_page_find_request_locked(page); | 615 | req = nfs_page_find_request_locked(page); |
| 618 | if (req) { | 616 | if (req) { |
| 619 | if (!nfs_lock_request_dontget(req)) { | 617 | if (!nfs_lock_request_dontget(req)) { |
| 620 | int error; | 618 | int error; |
| 621 | 619 | ||
| 622 | spin_unlock(&nfsi->req_lock); | 620 | spin_unlock(&inode->i_lock); |
| 623 | error = nfs_wait_on_request(req); | 621 | error = nfs_wait_on_request(req); |
| 624 | nfs_release_request(req); | 622 | nfs_release_request(req); |
| 625 | if (error < 0) { | 623 | if (error < 0) { |
| @@ -629,7 +627,7 @@ static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx, | |||
| 629 | } | 627 | } |
| 630 | continue; | 628 | continue; |
| 631 | } | 629 | } |
| 632 | spin_unlock(&nfsi->req_lock); | 630 | spin_unlock(&inode->i_lock); |
| 633 | if (new) | 631 | if (new) |
| 634 | nfs_release_request(new); | 632 | nfs_release_request(new); |
| 635 | break; | 633 | break; |
| @@ -640,14 +638,14 @@ static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx, | |||
| 640 | nfs_lock_request_dontget(new); | 638 | nfs_lock_request_dontget(new); |
| 641 | error = nfs_inode_add_request(inode, new); | 639 | error = nfs_inode_add_request(inode, new); |
| 642 | if (error) { | 640 | if (error) { |
| 643 | spin_unlock(&nfsi->req_lock); | 641 | spin_unlock(&inode->i_lock); |
| 644 | nfs_unlock_request(new); | 642 | nfs_unlock_request(new); |
| 645 | return ERR_PTR(error); | 643 | return ERR_PTR(error); |
| 646 | } | 644 | } |
| 647 | spin_unlock(&nfsi->req_lock); | 645 | spin_unlock(&inode->i_lock); |
| 648 | return new; | 646 | return new; |
| 649 | } | 647 | } |
| 650 | spin_unlock(&nfsi->req_lock); | 648 | spin_unlock(&inode->i_lock); |
| 651 | 649 | ||
| 652 | new = nfs_create_request(ctx, inode, page, offset, bytes); | 650 | new = nfs_create_request(ctx, inode, page, offset, bytes); |
| 653 | if (IS_ERR(new)) | 651 | if (IS_ERR(new)) |
| @@ -751,12 +749,17 @@ int nfs_updatepage(struct file *file, struct page *page, | |||
| 751 | static void nfs_writepage_release(struct nfs_page *req) | 749 | static void nfs_writepage_release(struct nfs_page *req) |
| 752 | { | 750 | { |
| 753 | 751 | ||
| 754 | if (PageError(req->wb_page) || !nfs_reschedule_unstable_write(req)) { | 752 | if (PageError(req->wb_page)) { |
| 753 | nfs_end_page_writeback(req->wb_page); | ||
| 754 | nfs_inode_remove_request(req); | ||
| 755 | } else if (!nfs_reschedule_unstable_write(req)) { | ||
| 756 | /* Set the PG_uptodate flag */ | ||
| 757 | nfs_mark_uptodate(req->wb_page, req->wb_pgbase, req->wb_bytes); | ||
| 755 | nfs_end_page_writeback(req->wb_page); | 758 | nfs_end_page_writeback(req->wb_page); |
| 756 | nfs_inode_remove_request(req); | 759 | nfs_inode_remove_request(req); |
| 757 | } else | 760 | } else |
| 758 | nfs_end_page_writeback(req->wb_page); | 761 | nfs_end_page_writeback(req->wb_page); |
| 759 | nfs_clear_page_writeback(req); | 762 | nfs_clear_page_tag_locked(req); |
| 760 | } | 763 | } |
| 761 | 764 | ||
| 762 | static inline int flush_task_priority(int how) | 765 | static inline int flush_task_priority(int how) |
| @@ -786,7 +789,7 @@ static void nfs_write_rpcsetup(struct nfs_page *req, | |||
| 786 | * NB: take care not to mess about with data->commit et al. */ | 789 | * NB: take care not to mess about with data->commit et al. */ |
| 787 | 790 | ||
| 788 | data->req = req; | 791 | data->req = req; |
| 789 | data->inode = inode = req->wb_context->dentry->d_inode; | 792 | data->inode = inode = req->wb_context->path.dentry->d_inode; |
| 790 | data->cred = req->wb_context->cred; | 793 | data->cred = req->wb_context->cred; |
| 791 | 794 | ||
| 792 | data->args.fh = NFS_FH(inode); | 795 | data->args.fh = NFS_FH(inode); |
| @@ -885,7 +888,7 @@ out_bad: | |||
| 885 | } | 888 | } |
| 886 | nfs_redirty_request(req); | 889 | nfs_redirty_request(req); |
| 887 | nfs_end_page_writeback(req->wb_page); | 890 | nfs_end_page_writeback(req->wb_page); |
| 888 | nfs_clear_page_writeback(req); | 891 | nfs_clear_page_tag_locked(req); |
| 889 | return -ENOMEM; | 892 | return -ENOMEM; |
| 890 | } | 893 | } |
| 891 | 894 | ||
| @@ -928,7 +931,7 @@ static int nfs_flush_one(struct inode *inode, struct list_head *head, unsigned i | |||
| 928 | nfs_list_remove_request(req); | 931 | nfs_list_remove_request(req); |
| 929 | nfs_redirty_request(req); | 932 | nfs_redirty_request(req); |
| 930 | nfs_end_page_writeback(req->wb_page); | 933 | nfs_end_page_writeback(req->wb_page); |
| 931 | nfs_clear_page_writeback(req); | 934 | nfs_clear_page_tag_locked(req); |
| 932 | } | 935 | } |
| 933 | return -ENOMEM; | 936 | return -ENOMEM; |
| 934 | } | 937 | } |
| @@ -954,8 +957,8 @@ static void nfs_writeback_done_partial(struct rpc_task *task, void *calldata) | |||
| 954 | struct page *page = req->wb_page; | 957 | struct page *page = req->wb_page; |
| 955 | 958 | ||
| 956 | dprintk("NFS: write (%s/%Ld %d@%Ld)", | 959 | dprintk("NFS: write (%s/%Ld %d@%Ld)", |
| 957 | req->wb_context->dentry->d_inode->i_sb->s_id, | 960 | req->wb_context->path.dentry->d_inode->i_sb->s_id, |
| 958 | (long long)NFS_FILEID(req->wb_context->dentry->d_inode), | 961 | (long long)NFS_FILEID(req->wb_context->path.dentry->d_inode), |
| 959 | req->wb_bytes, | 962 | req->wb_bytes, |
| 960 | (long long)req_offset(req)); | 963 | (long long)req_offset(req)); |
| 961 | 964 | ||
| @@ -970,9 +973,9 @@ static void nfs_writeback_done_partial(struct rpc_task *task, void *calldata) | |||
| 970 | } | 973 | } |
| 971 | 974 | ||
| 972 | if (nfs_write_need_commit(data)) { | 975 | if (nfs_write_need_commit(data)) { |
| 973 | spinlock_t *req_lock = &NFS_I(page->mapping->host)->req_lock; | 976 | struct inode *inode = page->mapping->host; |
| 974 | 977 | ||
| 975 | spin_lock(req_lock); | 978 | spin_lock(&inode->i_lock); |
| 976 | if (test_bit(PG_NEED_RESCHED, &req->wb_flags)) { | 979 | if (test_bit(PG_NEED_RESCHED, &req->wb_flags)) { |
| 977 | /* Do nothing we need to resend the writes */ | 980 | /* Do nothing we need to resend the writes */ |
| 978 | } else if (!test_and_set_bit(PG_NEED_COMMIT, &req->wb_flags)) { | 981 | } else if (!test_and_set_bit(PG_NEED_COMMIT, &req->wb_flags)) { |
| @@ -983,7 +986,7 @@ static void nfs_writeback_done_partial(struct rpc_task *task, void *calldata) | |||
| 983 | clear_bit(PG_NEED_COMMIT, &req->wb_flags); | 986 | clear_bit(PG_NEED_COMMIT, &req->wb_flags); |
| 984 | dprintk(" server reboot detected\n"); | 987 | dprintk(" server reboot detected\n"); |
| 985 | } | 988 | } |
| 986 | spin_unlock(req_lock); | 989 | spin_unlock(&inode->i_lock); |
| 987 | } else | 990 | } else |
| 988 | dprintk(" OK\n"); | 991 | dprintk(" OK\n"); |
| 989 | 992 | ||
| @@ -1020,8 +1023,8 @@ static void nfs_writeback_done_full(struct rpc_task *task, void *calldata) | |||
| 1020 | page = req->wb_page; | 1023 | page = req->wb_page; |
| 1021 | 1024 | ||
| 1022 | dprintk("NFS: write (%s/%Ld %d@%Ld)", | 1025 | dprintk("NFS: write (%s/%Ld %d@%Ld)", |
| 1023 | req->wb_context->dentry->d_inode->i_sb->s_id, | 1026 | req->wb_context->path.dentry->d_inode->i_sb->s_id, |
| 1024 | (long long)NFS_FILEID(req->wb_context->dentry->d_inode), | 1027 | (long long)NFS_FILEID(req->wb_context->path.dentry->d_inode), |
| 1025 | req->wb_bytes, | 1028 | req->wb_bytes, |
| 1026 | (long long)req_offset(req)); | 1029 | (long long)req_offset(req)); |
| 1027 | 1030 | ||
| @@ -1039,12 +1042,14 @@ static void nfs_writeback_done_full(struct rpc_task *task, void *calldata) | |||
| 1039 | dprintk(" marked for commit\n"); | 1042 | dprintk(" marked for commit\n"); |
| 1040 | goto next; | 1043 | goto next; |
| 1041 | } | 1044 | } |
| 1045 | /* Set the PG_uptodate flag? */ | ||
| 1046 | nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes); | ||
| 1042 | dprintk(" OK\n"); | 1047 | dprintk(" OK\n"); |
| 1043 | remove_request: | 1048 | remove_request: |
| 1044 | nfs_end_page_writeback(page); | 1049 | nfs_end_page_writeback(page); |
| 1045 | nfs_inode_remove_request(req); | 1050 | nfs_inode_remove_request(req); |
| 1046 | next: | 1051 | next: |
| 1047 | nfs_clear_page_writeback(req); | 1052 | nfs_clear_page_tag_locked(req); |
| 1048 | } | 1053 | } |
| 1049 | } | 1054 | } |
| 1050 | 1055 | ||
| @@ -1157,7 +1162,7 @@ static void nfs_commit_rpcsetup(struct list_head *head, | |||
| 1157 | 1162 | ||
| 1158 | list_splice_init(head, &data->pages); | 1163 | list_splice_init(head, &data->pages); |
| 1159 | first = nfs_list_entry(data->pages.next); | 1164 | first = nfs_list_entry(data->pages.next); |
| 1160 | inode = first->wb_context->dentry->d_inode; | 1165 | inode = first->wb_context->path.dentry->d_inode; |
| 1161 | 1166 | ||
| 1162 | data->inode = inode; | 1167 | data->inode = inode; |
| 1163 | data->cred = first->wb_context->cred; | 1168 | data->cred = first->wb_context->cred; |
| @@ -1207,7 +1212,7 @@ nfs_commit_list(struct inode *inode, struct list_head *head, int how) | |||
| 1207 | nfs_list_remove_request(req); | 1212 | nfs_list_remove_request(req); |
| 1208 | nfs_mark_request_commit(req); | 1213 | nfs_mark_request_commit(req); |
| 1209 | dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); | 1214 | dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); |
| 1210 | nfs_clear_page_writeback(req); | 1215 | nfs_clear_page_tag_locked(req); |
| 1211 | } | 1216 | } |
| 1212 | return -ENOMEM; | 1217 | return -ENOMEM; |
| 1213 | } | 1218 | } |
| @@ -1234,8 +1239,8 @@ static void nfs_commit_done(struct rpc_task *task, void *calldata) | |||
| 1234 | dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); | 1239 | dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); |
| 1235 | 1240 | ||
| 1236 | dprintk("NFS: commit (%s/%Ld %d@%Ld)", | 1241 | dprintk("NFS: commit (%s/%Ld %d@%Ld)", |
| 1237 | req->wb_context->dentry->d_inode->i_sb->s_id, | 1242 | req->wb_context->path.dentry->d_inode->i_sb->s_id, |
| 1238 | (long long)NFS_FILEID(req->wb_context->dentry->d_inode), | 1243 | (long long)NFS_FILEID(req->wb_context->path.dentry->d_inode), |
| 1239 | req->wb_bytes, | 1244 | req->wb_bytes, |
| 1240 | (long long)req_offset(req)); | 1245 | (long long)req_offset(req)); |
| 1241 | if (task->tk_status < 0) { | 1246 | if (task->tk_status < 0) { |
| @@ -1249,6 +1254,9 @@ static void nfs_commit_done(struct rpc_task *task, void *calldata) | |||
| 1249 | * returned by the server against all stored verfs. */ | 1254 | * returned by the server against all stored verfs. */ |
| 1250 | if (!memcmp(req->wb_verf.verifier, data->verf.verifier, sizeof(data->verf.verifier))) { | 1255 | if (!memcmp(req->wb_verf.verifier, data->verf.verifier, sizeof(data->verf.verifier))) { |
| 1251 | /* We have a match */ | 1256 | /* We have a match */ |
| 1257 | /* Set the PG_uptodate flag */ | ||
| 1258 | nfs_mark_uptodate(req->wb_page, req->wb_pgbase, | ||
| 1259 | req->wb_bytes); | ||
| 1252 | nfs_inode_remove_request(req); | 1260 | nfs_inode_remove_request(req); |
| 1253 | dprintk(" OK\n"); | 1261 | dprintk(" OK\n"); |
| 1254 | goto next; | 1262 | goto next; |
| @@ -1257,7 +1265,7 @@ static void nfs_commit_done(struct rpc_task *task, void *calldata) | |||
| 1257 | dprintk(" mismatch\n"); | 1265 | dprintk(" mismatch\n"); |
| 1258 | nfs_redirty_request(req); | 1266 | nfs_redirty_request(req); |
| 1259 | next: | 1267 | next: |
| 1260 | nfs_clear_page_writeback(req); | 1268 | nfs_clear_page_tag_locked(req); |
| 1261 | } | 1269 | } |
| 1262 | } | 1270 | } |
| 1263 | 1271 | ||
| @@ -1268,13 +1276,12 @@ static const struct rpc_call_ops nfs_commit_ops = { | |||
| 1268 | 1276 | ||
| 1269 | int nfs_commit_inode(struct inode *inode, int how) | 1277 | int nfs_commit_inode(struct inode *inode, int how) |
| 1270 | { | 1278 | { |
| 1271 | struct nfs_inode *nfsi = NFS_I(inode); | ||
| 1272 | LIST_HEAD(head); | 1279 | LIST_HEAD(head); |
| 1273 | int res; | 1280 | int res; |
| 1274 | 1281 | ||
| 1275 | spin_lock(&nfsi->req_lock); | 1282 | spin_lock(&inode->i_lock); |
| 1276 | res = nfs_scan_commit(inode, &head, 0, 0); | 1283 | res = nfs_scan_commit(inode, &head, 0, 0); |
| 1277 | spin_unlock(&nfsi->req_lock); | 1284 | spin_unlock(&inode->i_lock); |
| 1278 | if (res) { | 1285 | if (res) { |
| 1279 | int error = nfs_commit_list(inode, &head, how); | 1286 | int error = nfs_commit_list(inode, &head, how); |
| 1280 | if (error < 0) | 1287 | if (error < 0) |
| @@ -1292,7 +1299,6 @@ static inline int nfs_commit_list(struct inode *inode, struct list_head *head, i | |||
| 1292 | long nfs_sync_mapping_wait(struct address_space *mapping, struct writeback_control *wbc, int how) | 1299 | long nfs_sync_mapping_wait(struct address_space *mapping, struct writeback_control *wbc, int how) |
| 1293 | { | 1300 | { |
| 1294 | struct inode *inode = mapping->host; | 1301 | struct inode *inode = mapping->host; |
| 1295 | struct nfs_inode *nfsi = NFS_I(inode); | ||
| 1296 | pgoff_t idx_start, idx_end; | 1302 | pgoff_t idx_start, idx_end; |
| 1297 | unsigned int npages = 0; | 1303 | unsigned int npages = 0; |
| 1298 | LIST_HEAD(head); | 1304 | LIST_HEAD(head); |
| @@ -1314,7 +1320,7 @@ long nfs_sync_mapping_wait(struct address_space *mapping, struct writeback_contr | |||
| 1314 | } | 1320 | } |
| 1315 | } | 1321 | } |
| 1316 | how &= ~FLUSH_NOCOMMIT; | 1322 | how &= ~FLUSH_NOCOMMIT; |
| 1317 | spin_lock(&nfsi->req_lock); | 1323 | spin_lock(&inode->i_lock); |
| 1318 | do { | 1324 | do { |
| 1319 | ret = nfs_wait_on_requests_locked(inode, idx_start, npages); | 1325 | ret = nfs_wait_on_requests_locked(inode, idx_start, npages); |
| 1320 | if (ret != 0) | 1326 | if (ret != 0) |
| @@ -1325,18 +1331,19 @@ long nfs_sync_mapping_wait(struct address_space *mapping, struct writeback_contr | |||
| 1325 | if (pages == 0) | 1331 | if (pages == 0) |
| 1326 | break; | 1332 | break; |
| 1327 | if (how & FLUSH_INVALIDATE) { | 1333 | if (how & FLUSH_INVALIDATE) { |
| 1328 | spin_unlock(&nfsi->req_lock); | 1334 | spin_unlock(&inode->i_lock); |
| 1329 | nfs_cancel_commit_list(&head); | 1335 | nfs_cancel_commit_list(&head); |
| 1330 | ret = pages; | 1336 | ret = pages; |
| 1331 | spin_lock(&nfsi->req_lock); | 1337 | spin_lock(&inode->i_lock); |
| 1332 | continue; | 1338 | continue; |
| 1333 | } | 1339 | } |
| 1334 | pages += nfs_scan_commit(inode, &head, 0, 0); | 1340 | pages += nfs_scan_commit(inode, &head, 0, 0); |
| 1335 | spin_unlock(&nfsi->req_lock); | 1341 | spin_unlock(&inode->i_lock); |
| 1336 | ret = nfs_commit_list(inode, &head, how); | 1342 | ret = nfs_commit_list(inode, &head, how); |
| 1337 | spin_lock(&nfsi->req_lock); | 1343 | spin_lock(&inode->i_lock); |
| 1344 | |||
| 1338 | } while (ret >= 0); | 1345 | } while (ret >= 0); |
| 1339 | spin_unlock(&nfsi->req_lock); | 1346 | spin_unlock(&inode->i_lock); |
| 1340 | return ret; | 1347 | return ret; |
| 1341 | } | 1348 | } |
| 1342 | 1349 | ||
| @@ -1430,7 +1437,6 @@ int nfs_set_page_dirty(struct page *page) | |||
| 1430 | { | 1437 | { |
| 1431 | struct address_space *mapping = page->mapping; | 1438 | struct address_space *mapping = page->mapping; |
| 1432 | struct inode *inode; | 1439 | struct inode *inode; |
| 1433 | spinlock_t *req_lock; | ||
| 1434 | struct nfs_page *req; | 1440 | struct nfs_page *req; |
| 1435 | int ret; | 1441 | int ret; |
| 1436 | 1442 | ||
| @@ -1439,18 +1445,17 @@ int nfs_set_page_dirty(struct page *page) | |||
| 1439 | inode = mapping->host; | 1445 | inode = mapping->host; |
| 1440 | if (!inode) | 1446 | if (!inode) |
| 1441 | goto out_raced; | 1447 | goto out_raced; |
| 1442 | req_lock = &NFS_I(inode)->req_lock; | 1448 | spin_lock(&inode->i_lock); |
| 1443 | spin_lock(req_lock); | ||
| 1444 | req = nfs_page_find_request_locked(page); | 1449 | req = nfs_page_find_request_locked(page); |
| 1445 | if (req != NULL) { | 1450 | if (req != NULL) { |
| 1446 | /* Mark any existing write requests for flushing */ | 1451 | /* Mark any existing write requests for flushing */ |
| 1447 | ret = !test_and_set_bit(PG_NEED_FLUSH, &req->wb_flags); | 1452 | ret = !test_and_set_bit(PG_NEED_FLUSH, &req->wb_flags); |
| 1448 | spin_unlock(req_lock); | 1453 | spin_unlock(&inode->i_lock); |
| 1449 | nfs_release_request(req); | 1454 | nfs_release_request(req); |
| 1450 | return ret; | 1455 | return ret; |
| 1451 | } | 1456 | } |
| 1452 | ret = __set_page_dirty_nobuffers(page); | 1457 | ret = __set_page_dirty_nobuffers(page); |
| 1453 | spin_unlock(req_lock); | 1458 | spin_unlock(&inode->i_lock); |
| 1454 | return ret; | 1459 | return ret; |
| 1455 | out_raced: | 1460 | out_raced: |
| 1456 | return !TestSetPageDirty(page); | 1461 | return !TestSetPageDirty(page); |
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c index 864090edc28b..5443c52b57aa 100644 --- a/fs/nfsd/nfs4callback.c +++ b/fs/nfsd/nfs4callback.c | |||
| @@ -394,7 +394,6 @@ nfsd4_probe_callback(struct nfs4_client *clp) | |||
| 394 | .rpc_proc = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_NULL], | 394 | .rpc_proc = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_NULL], |
| 395 | .rpc_argp = clp, | 395 | .rpc_argp = clp, |
| 396 | }; | 396 | }; |
| 397 | char clientname[16]; | ||
| 398 | int status; | 397 | int status; |
| 399 | 398 | ||
| 400 | if (atomic_read(&cb->cb_set)) | 399 | if (atomic_read(&cb->cb_set)) |
| @@ -417,11 +416,6 @@ nfsd4_probe_callback(struct nfs4_client *clp) | |||
| 417 | memset(program->stats, 0, sizeof(cb->cb_stat)); | 416 | memset(program->stats, 0, sizeof(cb->cb_stat)); |
| 418 | program->stats->program = program; | 417 | program->stats->program = program; |
| 419 | 418 | ||
| 420 | /* Just here to make some printk's more useful: */ | ||
| 421 | snprintf(clientname, sizeof(clientname), | ||
| 422 | "%u.%u.%u.%u", NIPQUAD(addr.sin_addr)); | ||
| 423 | args.servername = clientname; | ||
| 424 | |||
| 425 | /* Create RPC client */ | 419 | /* Create RPC client */ |
| 426 | cb->cb_client = rpc_create(&args); | 420 | cb->cb_client = rpc_create(&args); |
| 427 | if (IS_ERR(cb->cb_client)) { | 421 | if (IS_ERR(cb->cb_client)) { |
| @@ -429,29 +423,23 @@ nfsd4_probe_callback(struct nfs4_client *clp) | |||
| 429 | goto out_err; | 423 | goto out_err; |
| 430 | } | 424 | } |
| 431 | 425 | ||
| 432 | /* Kick rpciod, put the call on the wire. */ | ||
| 433 | if (rpciod_up() != 0) | ||
| 434 | goto out_clnt; | ||
| 435 | |||
| 436 | /* the task holds a reference to the nfs4_client struct */ | 426 | /* the task holds a reference to the nfs4_client struct */ |
| 437 | atomic_inc(&clp->cl_count); | 427 | atomic_inc(&clp->cl_count); |
| 438 | 428 | ||
| 439 | msg.rpc_cred = nfsd4_lookupcred(clp,0); | 429 | msg.rpc_cred = nfsd4_lookupcred(clp,0); |
| 440 | if (IS_ERR(msg.rpc_cred)) | 430 | if (IS_ERR(msg.rpc_cred)) |
| 441 | goto out_rpciod; | 431 | goto out_release_clp; |
| 442 | status = rpc_call_async(cb->cb_client, &msg, RPC_TASK_ASYNC, &nfs4_cb_null_ops, NULL); | 432 | status = rpc_call_async(cb->cb_client, &msg, RPC_TASK_ASYNC, &nfs4_cb_null_ops, NULL); |
| 443 | put_rpccred(msg.rpc_cred); | 433 | put_rpccred(msg.rpc_cred); |
| 444 | 434 | ||
| 445 | if (status != 0) { | 435 | if (status != 0) { |
| 446 | dprintk("NFSD: asynchronous NFSPROC4_CB_NULL failed!\n"); | 436 | dprintk("NFSD: asynchronous NFSPROC4_CB_NULL failed!\n"); |
| 447 | goto out_rpciod; | 437 | goto out_release_clp; |
| 448 | } | 438 | } |
| 449 | return; | 439 | return; |
| 450 | 440 | ||
| 451 | out_rpciod: | 441 | out_release_clp: |
| 452 | atomic_dec(&clp->cl_count); | 442 | atomic_dec(&clp->cl_count); |
| 453 | rpciod_down(); | ||
| 454 | out_clnt: | ||
| 455 | rpc_shutdown_client(cb->cb_client); | 443 | rpc_shutdown_client(cb->cb_client); |
| 456 | out_err: | 444 | out_err: |
| 457 | cb->cb_client = NULL; | 445 | cb->cb_client = NULL; |
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 3cc8ce422ab1..8c52913d7cb6 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c | |||
| @@ -378,7 +378,6 @@ shutdown_callback_client(struct nfs4_client *clp) | |||
| 378 | if (clnt) { | 378 | if (clnt) { |
| 379 | clp->cl_callback.cb_client = NULL; | 379 | clp->cl_callback.cb_client = NULL; |
| 380 | rpc_shutdown_client(clnt); | 380 | rpc_shutdown_client(clnt); |
| 381 | rpciod_down(); | ||
| 382 | } | 381 | } |
| 383 | } | 382 | } |
| 384 | 383 | ||
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h index 05707e2fccae..e2d1ce36b367 100644 --- a/include/linux/lockd/lockd.h +++ b/include/linux/lockd/lockd.h | |||
| @@ -39,6 +39,7 @@ | |||
| 39 | struct nlm_host { | 39 | struct nlm_host { |
| 40 | struct hlist_node h_hash; /* doubly linked list */ | 40 | struct hlist_node h_hash; /* doubly linked list */ |
| 41 | struct sockaddr_in h_addr; /* peer address */ | 41 | struct sockaddr_in h_addr; /* peer address */ |
| 42 | struct sockaddr_in h_saddr; /* our address (optional) */ | ||
| 42 | struct rpc_clnt * h_rpcclnt; /* RPC client to talk to peer */ | 43 | struct rpc_clnt * h_rpcclnt; /* RPC client to talk to peer */ |
| 43 | char * h_name; /* remote hostname */ | 44 | char * h_name; /* remote hostname */ |
| 44 | u32 h_version; /* interface version */ | 45 | u32 h_version; /* interface version */ |
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index 7e7f33a38fc0..8726491de154 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h | |||
| @@ -15,6 +15,7 @@ | |||
| 15 | 15 | ||
| 16 | #include <linux/types.h> | 16 | #include <linux/types.h> |
| 17 | 17 | ||
| 18 | #define NFS4_BITMAP_SIZE 2 | ||
| 18 | #define NFS4_VERIFIER_SIZE 8 | 19 | #define NFS4_VERIFIER_SIZE 8 |
| 19 | #define NFS4_STATEID_SIZE 16 | 20 | #define NFS4_STATEID_SIZE 16 |
| 20 | #define NFS4_FHSIZE 128 | 21 | #define NFS4_FHSIZE 128 |
diff --git a/include/linux/nfs4_mount.h b/include/linux/nfs4_mount.h index 26b4c83f831d..a0dcf6655657 100644 --- a/include/linux/nfs4_mount.h +++ b/include/linux/nfs4_mount.h | |||
| @@ -65,6 +65,7 @@ struct nfs4_mount_data { | |||
| 65 | #define NFS4_MOUNT_NOCTO 0x0010 /* 1 */ | 65 | #define NFS4_MOUNT_NOCTO 0x0010 /* 1 */ |
| 66 | #define NFS4_MOUNT_NOAC 0x0020 /* 1 */ | 66 | #define NFS4_MOUNT_NOAC 0x0020 /* 1 */ |
| 67 | #define NFS4_MOUNT_STRICTLOCK 0x1000 /* 1 */ | 67 | #define NFS4_MOUNT_STRICTLOCK 0x1000 /* 1 */ |
| 68 | #define NFS4_MOUNT_FLAGMASK 0xFFFF | 68 | #define NFS4_MOUNT_UNSHARED 0x8000 /* 1 */ |
| 69 | #define NFS4_MOUNT_FLAGMASK 0x9033 | ||
| 69 | 70 | ||
| 70 | #endif | 71 | #endif |
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 0543439a97af..c098ae194f79 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h | |||
| @@ -30,7 +30,9 @@ | |||
| 30 | #ifdef __KERNEL__ | 30 | #ifdef __KERNEL__ |
| 31 | 31 | ||
| 32 | #include <linux/in.h> | 32 | #include <linux/in.h> |
| 33 | #include <linux/kref.h> | ||
| 33 | #include <linux/mm.h> | 34 | #include <linux/mm.h> |
| 35 | #include <linux/namei.h> | ||
| 34 | #include <linux/pagemap.h> | 36 | #include <linux/pagemap.h> |
| 35 | #include <linux/rbtree.h> | 37 | #include <linux/rbtree.h> |
| 36 | #include <linux/rwsem.h> | 38 | #include <linux/rwsem.h> |
| @@ -69,9 +71,8 @@ struct nfs_access_entry { | |||
| 69 | 71 | ||
| 70 | struct nfs4_state; | 72 | struct nfs4_state; |
| 71 | struct nfs_open_context { | 73 | struct nfs_open_context { |
| 72 | atomic_t count; | 74 | struct kref kref; |
| 73 | struct vfsmount *vfsmnt; | 75 | struct path path; |
| 74 | struct dentry *dentry; | ||
| 75 | struct rpc_cred *cred; | 76 | struct rpc_cred *cred; |
| 76 | struct nfs4_state *state; | 77 | struct nfs4_state *state; |
| 77 | fl_owner_t lockowner; | 78 | fl_owner_t lockowner; |
| @@ -155,13 +156,9 @@ struct nfs_inode { | |||
| 155 | /* | 156 | /* |
| 156 | * This is the list of dirty unwritten pages. | 157 | * This is the list of dirty unwritten pages. |
| 157 | */ | 158 | */ |
| 158 | spinlock_t req_lock; | ||
| 159 | struct list_head dirty; | ||
| 160 | struct list_head commit; | ||
| 161 | struct radix_tree_root nfs_page_tree; | 159 | struct radix_tree_root nfs_page_tree; |
| 162 | 160 | ||
| 163 | unsigned int ndirty, | 161 | unsigned long ncommit, |
| 164 | ncommit, | ||
| 165 | npages; | 162 | npages; |
| 166 | 163 | ||
| 167 | /* Open contexts for shared mmap writes */ | 164 | /* Open contexts for shared mmap writes */ |
| @@ -187,6 +184,7 @@ struct nfs_inode { | |||
| 187 | #define NFS_INO_INVALID_ACCESS 0x0008 /* cached access cred invalid */ | 184 | #define NFS_INO_INVALID_ACCESS 0x0008 /* cached access cred invalid */ |
| 188 | #define NFS_INO_INVALID_ACL 0x0010 /* cached acls are invalid */ | 185 | #define NFS_INO_INVALID_ACL 0x0010 /* cached acls are invalid */ |
| 189 | #define NFS_INO_REVAL_PAGECACHE 0x0020 /* must revalidate pagecache */ | 186 | #define NFS_INO_REVAL_PAGECACHE 0x0020 /* must revalidate pagecache */ |
| 187 | #define NFS_INO_REVAL_FORCED 0x0040 /* force revalidation ignoring a delegation */ | ||
| 190 | 188 | ||
| 191 | /* | 189 | /* |
| 192 | * Bit offsets in flags field | 190 | * Bit offsets in flags field |
| @@ -496,21 +494,18 @@ static inline void nfs3_forget_cached_acls(struct inode *inode) | |||
| 496 | 494 | ||
| 497 | /* | 495 | /* |
| 498 | * linux/fs/mount_clnt.c | 496 | * linux/fs/mount_clnt.c |
| 499 | * (Used only by nfsroot module) | ||
| 500 | */ | 497 | */ |
| 501 | extern int nfsroot_mount(struct sockaddr_in *, char *, struct nfs_fh *, | 498 | extern int nfs_mount(struct sockaddr *, size_t, char *, char *, |
| 502 | int, int); | 499 | int, int, struct nfs_fh *); |
| 503 | 500 | ||
| 504 | /* | 501 | /* |
| 505 | * inline functions | 502 | * inline functions |
| 506 | */ | 503 | */ |
| 507 | 504 | ||
| 508 | static inline loff_t | 505 | static inline loff_t nfs_size_to_loff_t(__u64 size) |
| 509 | nfs_size_to_loff_t(__u64 size) | ||
| 510 | { | 506 | { |
| 511 | loff_t maxsz = (((loff_t) ULONG_MAX) << PAGE_CACHE_SHIFT) + PAGE_CACHE_SIZE - 1; | 507 | if (size > (__u64) OFFSET_MAX - 1) |
| 512 | if (size > maxsz) | 508 | return OFFSET_MAX - 1; |
| 513 | return maxsz; | ||
| 514 | return (loff_t) size; | 509 | return (loff_t) size; |
| 515 | } | 510 | } |
| 516 | 511 | ||
| @@ -557,6 +552,7 @@ extern void * nfs_root_data(void); | |||
| 557 | #define NFSDBG_ROOT 0x0080 | 552 | #define NFSDBG_ROOT 0x0080 |
| 558 | #define NFSDBG_CALLBACK 0x0100 | 553 | #define NFSDBG_CALLBACK 0x0100 |
| 559 | #define NFSDBG_CLIENT 0x0200 | 554 | #define NFSDBG_CLIENT 0x0200 |
| 555 | #define NFSDBG_MOUNT 0x0400 | ||
| 560 | #define NFSDBG_ALL 0xFFFF | 556 | #define NFSDBG_ALL 0xFFFF |
| 561 | 557 | ||
| 562 | #ifdef __KERNEL__ | 558 | #ifdef __KERNEL__ |
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index 52b4378311c8..0cac49bc0955 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h | |||
| @@ -16,7 +16,6 @@ struct nfs_client { | |||
| 16 | #define NFS_CS_INITING 1 /* busy initialising */ | 16 | #define NFS_CS_INITING 1 /* busy initialising */ |
| 17 | int cl_nfsversion; /* NFS protocol version */ | 17 | int cl_nfsversion; /* NFS protocol version */ |
| 18 | unsigned long cl_res_state; /* NFS resources state */ | 18 | unsigned long cl_res_state; /* NFS resources state */ |
| 19 | #define NFS_CS_RPCIOD 0 /* - rpciod started */ | ||
| 20 | #define NFS_CS_CALLBACK 1 /* - callback started */ | 19 | #define NFS_CS_CALLBACK 1 /* - callback started */ |
| 21 | #define NFS_CS_IDMAP 2 /* - idmap started */ | 20 | #define NFS_CS_IDMAP 2 /* - idmap started */ |
| 22 | #define NFS_CS_RENEWD 3 /* - renewd started */ | 21 | #define NFS_CS_RENEWD 3 /* - renewd started */ |
| @@ -35,7 +34,8 @@ struct nfs_client { | |||
| 35 | nfs4_verifier cl_confirm; | 34 | nfs4_verifier cl_confirm; |
| 36 | unsigned long cl_state; | 35 | unsigned long cl_state; |
| 37 | 36 | ||
| 38 | u32 cl_lockowner_id; | 37 | struct rb_root cl_openowner_id; |
| 38 | struct rb_root cl_lockowner_id; | ||
| 39 | 39 | ||
| 40 | /* | 40 | /* |
| 41 | * The following rwsem ensures exclusive access to the server | 41 | * The following rwsem ensures exclusive access to the server |
| @@ -44,9 +44,7 @@ struct nfs_client { | |||
| 44 | struct rw_semaphore cl_sem; | 44 | struct rw_semaphore cl_sem; |
| 45 | 45 | ||
| 46 | struct list_head cl_delegations; | 46 | struct list_head cl_delegations; |
| 47 | struct list_head cl_state_owners; | 47 | struct rb_root cl_state_owners; |
| 48 | struct list_head cl_unused; | ||
| 49 | int cl_nunused; | ||
| 50 | spinlock_t cl_lock; | 48 | spinlock_t cl_lock; |
| 51 | 49 | ||
| 52 | unsigned long cl_lease_time; | 50 | unsigned long cl_lease_time; |
diff --git a/include/linux/nfs_mount.h b/include/linux/nfs_mount.h index cc8b9c59acb8..a3ade89a64d2 100644 --- a/include/linux/nfs_mount.h +++ b/include/linux/nfs_mount.h | |||
| @@ -37,7 +37,7 @@ struct nfs_mount_data { | |||
| 37 | int acdirmin; /* 1 */ | 37 | int acdirmin; /* 1 */ |
| 38 | int acdirmax; /* 1 */ | 38 | int acdirmax; /* 1 */ |
| 39 | struct sockaddr_in addr; /* 1 */ | 39 | struct sockaddr_in addr; /* 1 */ |
| 40 | char hostname[256]; /* 1 */ | 40 | char hostname[NFS_MAXNAMLEN + 1]; /* 1 */ |
| 41 | int namlen; /* 2 */ | 41 | int namlen; /* 2 */ |
| 42 | unsigned int bsize; /* 3 */ | 42 | unsigned int bsize; /* 3 */ |
| 43 | struct nfs3_fh root; /* 4 */ | 43 | struct nfs3_fh root; /* 4 */ |
| @@ -62,6 +62,7 @@ struct nfs_mount_data { | |||
| 62 | #define NFS_MOUNT_STRICTLOCK 0x1000 /* reserved for NFSv4 */ | 62 | #define NFS_MOUNT_STRICTLOCK 0x1000 /* reserved for NFSv4 */ |
| 63 | #define NFS_MOUNT_SECFLAVOUR 0x2000 /* 5 */ | 63 | #define NFS_MOUNT_SECFLAVOUR 0x2000 /* 5 */ |
| 64 | #define NFS_MOUNT_NORDIRPLUS 0x4000 /* 5 */ | 64 | #define NFS_MOUNT_NORDIRPLUS 0x4000 /* 5 */ |
| 65 | #define NFS_MOUNT_UNSHARED 0x8000 /* 5 */ | ||
| 65 | #define NFS_MOUNT_FLAGMASK 0xFFFF | 66 | #define NFS_MOUNT_FLAGMASK 0xFFFF |
| 66 | 67 | ||
| 67 | #endif | 68 | #endif |
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h index bd193af80162..78e60798d10e 100644 --- a/include/linux/nfs_page.h +++ b/include/linux/nfs_page.h | |||
| @@ -16,12 +16,13 @@ | |||
| 16 | #include <linux/sunrpc/auth.h> | 16 | #include <linux/sunrpc/auth.h> |
| 17 | #include <linux/nfs_xdr.h> | 17 | #include <linux/nfs_xdr.h> |
| 18 | 18 | ||
| 19 | #include <asm/atomic.h> | 19 | #include <linux/kref.h> |
| 20 | 20 | ||
| 21 | /* | 21 | /* |
| 22 | * Valid flags for the radix tree | 22 | * Valid flags for the radix tree |
| 23 | */ | 23 | */ |
| 24 | #define NFS_PAGE_TAG_WRITEBACK 0 | 24 | #define NFS_PAGE_TAG_LOCKED 0 |
| 25 | #define NFS_PAGE_TAG_COMMIT 1 | ||
| 25 | 26 | ||
| 26 | /* | 27 | /* |
| 27 | * Valid flags for a dirty buffer | 28 | * Valid flags for a dirty buffer |
| @@ -33,8 +34,7 @@ | |||
| 33 | 34 | ||
| 34 | struct nfs_inode; | 35 | struct nfs_inode; |
| 35 | struct nfs_page { | 36 | struct nfs_page { |
| 36 | struct list_head wb_list, /* Defines state of page: */ | 37 | struct list_head wb_list; /* Defines state of page: */ |
| 37 | *wb_list_head; /* read/write/commit */ | ||
| 38 | struct page *wb_page; /* page to read in/write out */ | 38 | struct page *wb_page; /* page to read in/write out */ |
| 39 | struct nfs_open_context *wb_context; /* File state context info */ | 39 | struct nfs_open_context *wb_context; /* File state context info */ |
| 40 | atomic_t wb_complete; /* i/os we're waiting for */ | 40 | atomic_t wb_complete; /* i/os we're waiting for */ |
| @@ -42,7 +42,7 @@ struct nfs_page { | |||
| 42 | unsigned int wb_offset, /* Offset & ~PAGE_CACHE_MASK */ | 42 | unsigned int wb_offset, /* Offset & ~PAGE_CACHE_MASK */ |
| 43 | wb_pgbase, /* Start of page data */ | 43 | wb_pgbase, /* Start of page data */ |
| 44 | wb_bytes; /* Length of request */ | 44 | wb_bytes; /* Length of request */ |
| 45 | atomic_t wb_count; /* reference count */ | 45 | struct kref wb_kref; /* reference count */ |
| 46 | unsigned long wb_flags; | 46 | unsigned long wb_flags; |
| 47 | struct nfs_writeverf wb_verf; /* Commit cookie */ | 47 | struct nfs_writeverf wb_verf; /* Commit cookie */ |
| 48 | }; | 48 | }; |
| @@ -71,8 +71,8 @@ extern void nfs_clear_request(struct nfs_page *req); | |||
| 71 | extern void nfs_release_request(struct nfs_page *req); | 71 | extern void nfs_release_request(struct nfs_page *req); |
| 72 | 72 | ||
| 73 | 73 | ||
| 74 | extern int nfs_scan_list(struct nfs_inode *nfsi, struct list_head *head, struct list_head *dst, | 74 | extern int nfs_scan_list(struct nfs_inode *nfsi, struct list_head *dst, |
| 75 | pgoff_t idx_start, unsigned int npages); | 75 | pgoff_t idx_start, unsigned int npages, int tag); |
| 76 | extern void nfs_pageio_init(struct nfs_pageio_descriptor *desc, | 76 | extern void nfs_pageio_init(struct nfs_pageio_descriptor *desc, |
| 77 | struct inode *inode, | 77 | struct inode *inode, |
| 78 | int (*doio)(struct inode *, struct list_head *, unsigned int, size_t, int), | 78 | int (*doio)(struct inode *, struct list_head *, unsigned int, size_t, int), |
| @@ -84,12 +84,11 @@ extern void nfs_pageio_complete(struct nfs_pageio_descriptor *desc); | |||
| 84 | extern void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *, pgoff_t); | 84 | extern void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *, pgoff_t); |
| 85 | extern int nfs_wait_on_request(struct nfs_page *); | 85 | extern int nfs_wait_on_request(struct nfs_page *); |
| 86 | extern void nfs_unlock_request(struct nfs_page *req); | 86 | extern void nfs_unlock_request(struct nfs_page *req); |
| 87 | extern int nfs_set_page_writeback_locked(struct nfs_page *req); | 87 | extern void nfs_clear_page_tag_locked(struct nfs_page *req); |
| 88 | extern void nfs_clear_page_writeback(struct nfs_page *req); | ||
| 89 | 88 | ||
| 90 | 89 | ||
| 91 | /* | 90 | /* |
| 92 | * Lock the page of an asynchronous request without incrementing the wb_count | 91 | * Lock the page of an asynchronous request without getting a new reference |
| 93 | */ | 92 | */ |
| 94 | static inline int | 93 | static inline int |
| 95 | nfs_lock_request_dontget(struct nfs_page *req) | 94 | nfs_lock_request_dontget(struct nfs_page *req) |
| @@ -98,14 +97,14 @@ nfs_lock_request_dontget(struct nfs_page *req) | |||
| 98 | } | 97 | } |
| 99 | 98 | ||
| 100 | /* | 99 | /* |
| 101 | * Lock the page of an asynchronous request | 100 | * Lock the page of an asynchronous request and take a reference |
| 102 | */ | 101 | */ |
| 103 | static inline int | 102 | static inline int |
| 104 | nfs_lock_request(struct nfs_page *req) | 103 | nfs_lock_request(struct nfs_page *req) |
| 105 | { | 104 | { |
| 106 | if (test_and_set_bit(PG_BUSY, &req->wb_flags)) | 105 | if (test_and_set_bit(PG_BUSY, &req->wb_flags)) |
| 107 | return 0; | 106 | return 0; |
| 108 | atomic_inc(&req->wb_count); | 107 | kref_get(&req->wb_kref); |
| 109 | return 1; | 108 | return 1; |
| 110 | } | 109 | } |
| 111 | 110 | ||
| @@ -118,7 +117,6 @@ static inline void | |||
| 118 | nfs_list_add_request(struct nfs_page *req, struct list_head *head) | 117 | nfs_list_add_request(struct nfs_page *req, struct list_head *head) |
| 119 | { | 118 | { |
| 120 | list_add_tail(&req->wb_list, head); | 119 | list_add_tail(&req->wb_list, head); |
| 121 | req->wb_list_head = head; | ||
| 122 | } | 120 | } |
| 123 | 121 | ||
| 124 | 122 | ||
| @@ -132,7 +130,6 @@ nfs_list_remove_request(struct nfs_page *req) | |||
| 132 | if (list_empty(&req->wb_list)) | 130 | if (list_empty(&req->wb_list)) |
| 133 | return; | 131 | return; |
| 134 | list_del_init(&req->wb_list); | 132 | list_del_init(&req->wb_list); |
| 135 | req->wb_list_head = NULL; | ||
| 136 | } | 133 | } |
| 137 | 134 | ||
| 138 | static inline struct nfs_page * | 135 | static inline struct nfs_page * |
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 10c26ed0db71..38d77681cf27 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h | |||
| @@ -119,7 +119,7 @@ struct nfs_openargs { | |||
| 119 | struct nfs_seqid * seqid; | 119 | struct nfs_seqid * seqid; |
| 120 | int open_flags; | 120 | int open_flags; |
| 121 | __u64 clientid; | 121 | __u64 clientid; |
| 122 | __u32 id; | 122 | __u64 id; |
| 123 | union { | 123 | union { |
| 124 | struct iattr * attrs; /* UNCHECKED, GUARDED */ | 124 | struct iattr * attrs; /* UNCHECKED, GUARDED */ |
| 125 | nfs4_verifier verifier; /* EXCLUSIVE */ | 125 | nfs4_verifier verifier; /* EXCLUSIVE */ |
| @@ -144,6 +144,7 @@ struct nfs_openres { | |||
| 144 | nfs4_stateid delegation; | 144 | nfs4_stateid delegation; |
| 145 | __u32 do_recall; | 145 | __u32 do_recall; |
| 146 | __u64 maxsize; | 146 | __u64 maxsize; |
| 147 | __u32 attrset[NFS4_BITMAP_SIZE]; | ||
| 147 | }; | 148 | }; |
| 148 | 149 | ||
| 149 | /* | 150 | /* |
| @@ -180,7 +181,7 @@ struct nfs_closeres { | |||
| 180 | * */ | 181 | * */ |
| 181 | struct nfs_lowner { | 182 | struct nfs_lowner { |
| 182 | __u64 clientid; | 183 | __u64 clientid; |
| 183 | u32 id; | 184 | __u64 id; |
| 184 | }; | 185 | }; |
| 185 | 186 | ||
| 186 | struct nfs_lock_args { | 187 | struct nfs_lock_args { |
diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h index 534cdc7be58d..7a69ca3bebaf 100644 --- a/include/linux/sunrpc/auth.h +++ b/include/linux/sunrpc/auth.h | |||
| @@ -16,6 +16,7 @@ | |||
| 16 | #include <linux/sunrpc/xdr.h> | 16 | #include <linux/sunrpc/xdr.h> |
| 17 | 17 | ||
| 18 | #include <asm/atomic.h> | 18 | #include <asm/atomic.h> |
| 19 | #include <linux/rcupdate.h> | ||
| 19 | 20 | ||
| 20 | /* size of the nodename buffer */ | 21 | /* size of the nodename buffer */ |
| 21 | #define UNX_MAXNODENAME 32 | 22 | #define UNX_MAXNODENAME 32 |
| @@ -30,22 +31,28 @@ struct auth_cred { | |||
| 30 | /* | 31 | /* |
| 31 | * Client user credentials | 32 | * Client user credentials |
| 32 | */ | 33 | */ |
| 34 | struct rpc_auth; | ||
| 35 | struct rpc_credops; | ||
| 33 | struct rpc_cred { | 36 | struct rpc_cred { |
| 34 | struct hlist_node cr_hash; /* hash chain */ | 37 | struct hlist_node cr_hash; /* hash chain */ |
| 35 | struct rpc_credops * cr_ops; | 38 | struct list_head cr_lru; /* lru garbage collection */ |
| 36 | unsigned long cr_expire; /* when to gc */ | 39 | struct rcu_head cr_rcu; |
| 37 | atomic_t cr_count; /* ref count */ | 40 | struct rpc_auth * cr_auth; |
| 38 | unsigned short cr_flags; /* various flags */ | 41 | const struct rpc_credops *cr_ops; |
| 39 | #ifdef RPC_DEBUG | 42 | #ifdef RPC_DEBUG |
| 40 | unsigned long cr_magic; /* 0x0f4aa4f0 */ | 43 | unsigned long cr_magic; /* 0x0f4aa4f0 */ |
| 41 | #endif | 44 | #endif |
| 45 | unsigned long cr_expire; /* when to gc */ | ||
| 46 | unsigned long cr_flags; /* various flags */ | ||
| 47 | atomic_t cr_count; /* ref count */ | ||
| 42 | 48 | ||
| 43 | uid_t cr_uid; | 49 | uid_t cr_uid; |
| 44 | 50 | ||
| 45 | /* per-flavor data */ | 51 | /* per-flavor data */ |
| 46 | }; | 52 | }; |
| 47 | #define RPCAUTH_CRED_NEW 0x0001 | 53 | #define RPCAUTH_CRED_NEW 0 |
| 48 | #define RPCAUTH_CRED_UPTODATE 0x0002 | 54 | #define RPCAUTH_CRED_UPTODATE 1 |
| 55 | #define RPCAUTH_CRED_HASHED 2 | ||
| 49 | 56 | ||
| 50 | #define RPCAUTH_CRED_MAGIC 0x0f4aa4f0 | 57 | #define RPCAUTH_CRED_MAGIC 0x0f4aa4f0 |
| 51 | 58 | ||
| @@ -56,10 +63,10 @@ struct rpc_cred { | |||
| 56 | #define RPC_CREDCACHE_MASK (RPC_CREDCACHE_NR - 1) | 63 | #define RPC_CREDCACHE_MASK (RPC_CREDCACHE_NR - 1) |
| 57 | struct rpc_cred_cache { | 64 | struct rpc_cred_cache { |
| 58 | struct hlist_head hashtable[RPC_CREDCACHE_NR]; | 65 | struct hlist_head hashtable[RPC_CREDCACHE_NR]; |
| 59 | unsigned long nextgc; /* next garbage collection */ | 66 | spinlock_t lock; |
| 60 | unsigned long expire; /* cache expiry interval */ | ||
| 61 | }; | 67 | }; |
| 62 | 68 | ||
| 69 | struct rpc_authops; | ||
| 63 | struct rpc_auth { | 70 | struct rpc_auth { |
| 64 | unsigned int au_cslack; /* call cred size estimate */ | 71 | unsigned int au_cslack; /* call cred size estimate */ |
| 65 | /* guess at number of u32's auth adds before | 72 | /* guess at number of u32's auth adds before |
| @@ -69,7 +76,7 @@ struct rpc_auth { | |||
| 69 | unsigned int au_verfsize; | 76 | unsigned int au_verfsize; |
| 70 | 77 | ||
| 71 | unsigned int au_flags; /* various flags */ | 78 | unsigned int au_flags; /* various flags */ |
| 72 | struct rpc_authops * au_ops; /* operations */ | 79 | const struct rpc_authops *au_ops; /* operations */ |
| 73 | rpc_authflavor_t au_flavor; /* pseudoflavor (note may | 80 | rpc_authflavor_t au_flavor; /* pseudoflavor (note may |
| 74 | * differ from the flavor in | 81 | * differ from the flavor in |
| 75 | * au_ops->au_flavor in gss | 82 | * au_ops->au_flavor in gss |
| @@ -115,17 +122,19 @@ struct rpc_credops { | |||
| 115 | void *, __be32 *, void *); | 122 | void *, __be32 *, void *); |
| 116 | }; | 123 | }; |
| 117 | 124 | ||
| 118 | extern struct rpc_authops authunix_ops; | 125 | extern const struct rpc_authops authunix_ops; |
| 119 | extern struct rpc_authops authnull_ops; | 126 | extern const struct rpc_authops authnull_ops; |
| 120 | #ifdef CONFIG_SUNRPC_SECURE | 127 | |
| 121 | extern struct rpc_authops authdes_ops; | 128 | void __init rpc_init_authunix(void); |
| 122 | #endif | 129 | void __init rpcauth_init_module(void); |
| 130 | void __exit rpcauth_remove_module(void); | ||
| 123 | 131 | ||
| 124 | int rpcauth_register(struct rpc_authops *); | 132 | int rpcauth_register(const struct rpc_authops *); |
| 125 | int rpcauth_unregister(struct rpc_authops *); | 133 | int rpcauth_unregister(const struct rpc_authops *); |
| 126 | struct rpc_auth * rpcauth_create(rpc_authflavor_t, struct rpc_clnt *); | 134 | struct rpc_auth * rpcauth_create(rpc_authflavor_t, struct rpc_clnt *); |
| 127 | void rpcauth_destroy(struct rpc_auth *); | 135 | void rpcauth_release(struct rpc_auth *); |
| 128 | struct rpc_cred * rpcauth_lookup_credcache(struct rpc_auth *, struct auth_cred *, int); | 136 | struct rpc_cred * rpcauth_lookup_credcache(struct rpc_auth *, struct auth_cred *, int); |
| 137 | void rpcauth_init_cred(struct rpc_cred *, const struct auth_cred *, struct rpc_auth *, const struct rpc_credops *); | ||
| 129 | struct rpc_cred * rpcauth_lookupcred(struct rpc_auth *, int); | 138 | struct rpc_cred * rpcauth_lookupcred(struct rpc_auth *, int); |
| 130 | struct rpc_cred * rpcauth_bindcred(struct rpc_task *); | 139 | struct rpc_cred * rpcauth_bindcred(struct rpc_task *); |
| 131 | void rpcauth_holdcred(struct rpc_task *); | 140 | void rpcauth_holdcred(struct rpc_task *); |
| @@ -138,8 +147,9 @@ int rpcauth_unwrap_resp(struct rpc_task *task, kxdrproc_t decode, void *rqstp, | |||
| 138 | int rpcauth_refreshcred(struct rpc_task *); | 147 | int rpcauth_refreshcred(struct rpc_task *); |
| 139 | void rpcauth_invalcred(struct rpc_task *); | 148 | void rpcauth_invalcred(struct rpc_task *); |
| 140 | int rpcauth_uptodatecred(struct rpc_task *); | 149 | int rpcauth_uptodatecred(struct rpc_task *); |
| 141 | int rpcauth_init_credcache(struct rpc_auth *, unsigned long); | 150 | int rpcauth_init_credcache(struct rpc_auth *); |
| 142 | void rpcauth_free_credcache(struct rpc_auth *); | 151 | void rpcauth_destroy_credcache(struct rpc_auth *); |
| 152 | void rpcauth_clear_credcache(struct rpc_cred_cache *); | ||
| 143 | 153 | ||
| 144 | static inline | 154 | static inline |
| 145 | struct rpc_cred * get_rpccred(struct rpc_cred *cred) | 155 | struct rpc_cred * get_rpccred(struct rpc_cred *cred) |
diff --git a/include/linux/sunrpc/auth_gss.h b/include/linux/sunrpc/auth_gss.h index 2db2fbf34947..67658e17a375 100644 --- a/include/linux/sunrpc/auth_gss.h +++ b/include/linux/sunrpc/auth_gss.h | |||
| @@ -75,6 +75,7 @@ struct gss_cl_ctx { | |||
| 75 | struct xdr_netobj gc_wire_ctx; | 75 | struct xdr_netobj gc_wire_ctx; |
| 76 | u32 gc_win; | 76 | u32 gc_win; |
| 77 | unsigned long gc_expiry; | 77 | unsigned long gc_expiry; |
| 78 | struct rcu_head gc_rcu; | ||
| 78 | }; | 79 | }; |
| 79 | 80 | ||
| 80 | struct gss_upcall_msg; | 81 | struct gss_upcall_msg; |
| @@ -85,11 +86,6 @@ struct gss_cred { | |||
| 85 | struct gss_upcall_msg *gc_upcall; | 86 | struct gss_upcall_msg *gc_upcall; |
| 86 | }; | 87 | }; |
| 87 | 88 | ||
| 88 | #define gc_uid gc_base.cr_uid | ||
| 89 | #define gc_count gc_base.cr_count | ||
| 90 | #define gc_flags gc_base.cr_flags | ||
| 91 | #define gc_expire gc_base.cr_expire | ||
| 92 | |||
| 93 | #endif /* __KERNEL__ */ | 89 | #endif /* __KERNEL__ */ |
| 94 | #endif /* _LINUX_SUNRPC_AUTH_GSS_H */ | 90 | #endif /* _LINUX_SUNRPC_AUTH_GSS_H */ |
| 95 | 91 | ||
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index 66611423c8ee..c0d9d14983b3 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h | |||
| @@ -24,8 +24,10 @@ struct rpc_inode; | |||
| 24 | * The high-level client handle | 24 | * The high-level client handle |
| 25 | */ | 25 | */ |
| 26 | struct rpc_clnt { | 26 | struct rpc_clnt { |
| 27 | atomic_t cl_count; /* Number of clones */ | 27 | struct kref cl_kref; /* Number of references */ |
| 28 | atomic_t cl_users; /* number of references */ | 28 | struct list_head cl_clients; /* Global list of clients */ |
| 29 | struct list_head cl_tasks; /* List of tasks */ | ||
| 30 | spinlock_t cl_lock; /* spinlock */ | ||
| 29 | struct rpc_xprt * cl_xprt; /* transport */ | 31 | struct rpc_xprt * cl_xprt; /* transport */ |
| 30 | struct rpc_procinfo * cl_procinfo; /* procedure info */ | 32 | struct rpc_procinfo * cl_procinfo; /* procedure info */ |
| 31 | u32 cl_prog, /* RPC program number */ | 33 | u32 cl_prog, /* RPC program number */ |
| @@ -41,9 +43,7 @@ struct rpc_clnt { | |||
| 41 | unsigned int cl_softrtry : 1,/* soft timeouts */ | 43 | unsigned int cl_softrtry : 1,/* soft timeouts */ |
| 42 | cl_intr : 1,/* interruptible */ | 44 | cl_intr : 1,/* interruptible */ |
| 43 | cl_discrtry : 1,/* disconnect before retry */ | 45 | cl_discrtry : 1,/* disconnect before retry */ |
| 44 | cl_autobind : 1,/* use getport() */ | 46 | cl_autobind : 1;/* use getport() */ |
| 45 | cl_oneshot : 1,/* dispose after use */ | ||
| 46 | cl_dead : 1;/* abandoned */ | ||
| 47 | 47 | ||
| 48 | struct rpc_rtt * cl_rtt; /* RTO estimator data */ | 48 | struct rpc_rtt * cl_rtt; /* RTO estimator data */ |
| 49 | 49 | ||
| @@ -98,6 +98,7 @@ struct rpc_create_args { | |||
| 98 | int protocol; | 98 | int protocol; |
| 99 | struct sockaddr *address; | 99 | struct sockaddr *address; |
| 100 | size_t addrsize; | 100 | size_t addrsize; |
| 101 | struct sockaddr *saddress; | ||
| 101 | struct rpc_timeout *timeout; | 102 | struct rpc_timeout *timeout; |
| 102 | char *servername; | 103 | char *servername; |
| 103 | struct rpc_program *program; | 104 | struct rpc_program *program; |
| @@ -110,20 +111,20 @@ struct rpc_create_args { | |||
| 110 | #define RPC_CLNT_CREATE_HARDRTRY (1UL << 0) | 111 | #define RPC_CLNT_CREATE_HARDRTRY (1UL << 0) |
| 111 | #define RPC_CLNT_CREATE_INTR (1UL << 1) | 112 | #define RPC_CLNT_CREATE_INTR (1UL << 1) |
| 112 | #define RPC_CLNT_CREATE_AUTOBIND (1UL << 2) | 113 | #define RPC_CLNT_CREATE_AUTOBIND (1UL << 2) |
| 113 | #define RPC_CLNT_CREATE_ONESHOT (1UL << 3) | 114 | #define RPC_CLNT_CREATE_NONPRIVPORT (1UL << 3) |
| 114 | #define RPC_CLNT_CREATE_NONPRIVPORT (1UL << 4) | 115 | #define RPC_CLNT_CREATE_NOPING (1UL << 4) |
| 115 | #define RPC_CLNT_CREATE_NOPING (1UL << 5) | 116 | #define RPC_CLNT_CREATE_DISCRTRY (1UL << 5) |
| 116 | #define RPC_CLNT_CREATE_DISCRTRY (1UL << 6) | ||
| 117 | 117 | ||
| 118 | struct rpc_clnt *rpc_create(struct rpc_create_args *args); | 118 | struct rpc_clnt *rpc_create(struct rpc_create_args *args); |
| 119 | struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *, | 119 | struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *, |
| 120 | struct rpc_program *, int); | 120 | struct rpc_program *, int); |
| 121 | struct rpc_clnt *rpc_clone_client(struct rpc_clnt *); | 121 | struct rpc_clnt *rpc_clone_client(struct rpc_clnt *); |
| 122 | int rpc_shutdown_client(struct rpc_clnt *); | 122 | void rpc_shutdown_client(struct rpc_clnt *); |
| 123 | int rpc_destroy_client(struct rpc_clnt *); | ||
| 124 | void rpc_release_client(struct rpc_clnt *); | 123 | void rpc_release_client(struct rpc_clnt *); |
| 124 | |||
| 125 | int rpcb_register(u32, u32, int, unsigned short, int *); | 125 | int rpcb_register(u32, u32, int, unsigned short, int *); |
| 126 | void rpcb_getport(struct rpc_task *); | 126 | int rpcb_getport_sync(struct sockaddr_in *, __u32, __u32, int); |
| 127 | void rpcb_getport_async(struct rpc_task *); | ||
| 127 | 128 | ||
| 128 | void rpc_call_setup(struct rpc_task *, struct rpc_message *, int); | 129 | void rpc_call_setup(struct rpc_task *, struct rpc_message *, int); |
| 129 | 130 | ||
| @@ -132,20 +133,16 @@ int rpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg, | |||
| 132 | void *calldata); | 133 | void *calldata); |
| 133 | int rpc_call_sync(struct rpc_clnt *clnt, struct rpc_message *msg, | 134 | int rpc_call_sync(struct rpc_clnt *clnt, struct rpc_message *msg, |
| 134 | int flags); | 135 | int flags); |
| 136 | struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred, | ||
| 137 | int flags); | ||
| 135 | void rpc_restart_call(struct rpc_task *); | 138 | void rpc_restart_call(struct rpc_task *); |
| 136 | void rpc_clnt_sigmask(struct rpc_clnt *clnt, sigset_t *oldset); | 139 | void rpc_clnt_sigmask(struct rpc_clnt *clnt, sigset_t *oldset); |
| 137 | void rpc_clnt_sigunmask(struct rpc_clnt *clnt, sigset_t *oldset); | 140 | void rpc_clnt_sigunmask(struct rpc_clnt *clnt, sigset_t *oldset); |
| 138 | void rpc_setbufsize(struct rpc_clnt *, unsigned int, unsigned int); | 141 | void rpc_setbufsize(struct rpc_clnt *, unsigned int, unsigned int); |
| 139 | size_t rpc_max_payload(struct rpc_clnt *); | 142 | size_t rpc_max_payload(struct rpc_clnt *); |
| 140 | void rpc_force_rebind(struct rpc_clnt *); | 143 | void rpc_force_rebind(struct rpc_clnt *); |
| 141 | int rpc_ping(struct rpc_clnt *clnt, int flags); | ||
| 142 | size_t rpc_peeraddr(struct rpc_clnt *, struct sockaddr *, size_t); | 144 | size_t rpc_peeraddr(struct rpc_clnt *, struct sockaddr *, size_t); |
| 143 | char * rpc_peeraddr2str(struct rpc_clnt *, enum rpc_display_format_t); | 145 | char * rpc_peeraddr2str(struct rpc_clnt *, enum rpc_display_format_t); |
| 144 | 146 | ||
| 145 | /* | ||
| 146 | * Helper function for NFSroot support | ||
| 147 | */ | ||
| 148 | int rpcb_getport_external(struct sockaddr_in *, __u32, __u32, int); | ||
| 149 | |||
| 150 | #endif /* __KERNEL__ */ | 147 | #endif /* __KERNEL__ */ |
| 151 | #endif /* _LINUX_SUNRPC_CLNT_H */ | 148 | #endif /* _LINUX_SUNRPC_CLNT_H */ |
diff --git a/include/linux/sunrpc/gss_api.h b/include/linux/sunrpc/gss_api.h index 5eca9e442051..bbac101ac372 100644 --- a/include/linux/sunrpc/gss_api.h +++ b/include/linux/sunrpc/gss_api.h | |||
| @@ -77,7 +77,7 @@ struct gss_api_mech { | |||
| 77 | struct module *gm_owner; | 77 | struct module *gm_owner; |
| 78 | struct xdr_netobj gm_oid; | 78 | struct xdr_netobj gm_oid; |
| 79 | char *gm_name; | 79 | char *gm_name; |
| 80 | struct gss_api_ops *gm_ops; | 80 | const struct gss_api_ops *gm_ops; |
| 81 | /* pseudoflavors supported by this mechanism: */ | 81 | /* pseudoflavors supported by this mechanism: */ |
| 82 | int gm_pf_num; | 82 | int gm_pf_num; |
| 83 | struct pf_desc * gm_pfs; | 83 | struct pf_desc * gm_pfs; |
diff --git a/include/linux/sunrpc/rpc_pipe_fs.h b/include/linux/sunrpc/rpc_pipe_fs.h index ad293760f6eb..51b977a4ca20 100644 --- a/include/linux/sunrpc/rpc_pipe_fs.h +++ b/include/linux/sunrpc/rpc_pipe_fs.h | |||
| @@ -23,9 +23,11 @@ struct rpc_inode { | |||
| 23 | void *private; | 23 | void *private; |
| 24 | struct list_head pipe; | 24 | struct list_head pipe; |
| 25 | struct list_head in_upcall; | 25 | struct list_head in_upcall; |
| 26 | struct list_head in_downcall; | ||
| 26 | int pipelen; | 27 | int pipelen; |
| 27 | int nreaders; | 28 | int nreaders; |
| 28 | int nwriters; | 29 | int nwriters; |
| 30 | int nkern_readwriters; | ||
| 29 | wait_queue_head_t waitq; | 31 | wait_queue_head_t waitq; |
| 30 | #define RPC_PIPE_WAIT_FOR_OPEN 1 | 32 | #define RPC_PIPE_WAIT_FOR_OPEN 1 |
| 31 | int flags; | 33 | int flags; |
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h index 2047fb202a13..8ea077db0099 100644 --- a/include/linux/sunrpc/sched.h +++ b/include/linux/sunrpc/sched.h | |||
| @@ -98,7 +98,6 @@ struct rpc_task { | |||
| 98 | unsigned short tk_pid; /* debugging aid */ | 98 | unsigned short tk_pid; /* debugging aid */ |
| 99 | #endif | 99 | #endif |
| 100 | }; | 100 | }; |
| 101 | #define tk_auth tk_client->cl_auth | ||
| 102 | #define tk_xprt tk_client->cl_xprt | 101 | #define tk_xprt tk_client->cl_xprt |
| 103 | 102 | ||
| 104 | /* support walking a list of tasks on a wait queue */ | 103 | /* support walking a list of tasks on a wait queue */ |
| @@ -110,11 +109,6 @@ struct rpc_task { | |||
| 110 | if (!list_empty(head) && \ | 109 | if (!list_empty(head) && \ |
| 111 | ((task=list_entry((head)->next, struct rpc_task, u.tk_wait.list)),1)) | 110 | ((task=list_entry((head)->next, struct rpc_task, u.tk_wait.list)),1)) |
| 112 | 111 | ||
| 113 | /* .. and walking list of all tasks */ | ||
| 114 | #define alltask_for_each(task, pos, head) \ | ||
| 115 | list_for_each(pos, head) \ | ||
| 116 | if ((task=list_entry(pos, struct rpc_task, tk_task)),1) | ||
| 117 | |||
| 118 | typedef void (*rpc_action)(struct rpc_task *); | 112 | typedef void (*rpc_action)(struct rpc_task *); |
| 119 | 113 | ||
| 120 | struct rpc_call_ops { | 114 | struct rpc_call_ops { |
diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h index e21dd93ac4b7..a53e0fa855d2 100644 --- a/include/linux/sunrpc/svcsock.h +++ b/include/linux/sunrpc/svcsock.h | |||
| @@ -59,6 +59,7 @@ struct svc_sock { | |||
| 59 | /* cache of various info for TCP sockets */ | 59 | /* cache of various info for TCP sockets */ |
| 60 | void *sk_info_authunix; | 60 | void *sk_info_authunix; |
| 61 | 61 | ||
| 62 | struct sockaddr_storage sk_local; /* local address */ | ||
| 62 | struct sockaddr_storage sk_remote; /* remote peer's address */ | 63 | struct sockaddr_storage sk_remote; /* remote peer's address */ |
| 63 | int sk_remotelen; /* length of address */ | 64 | int sk_remotelen; /* length of address */ |
| 64 | }; | 65 | }; |
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index 34f7590506fa..d11cedd14f0f 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h | |||
| @@ -17,6 +17,8 @@ | |||
| 17 | #include <linux/sunrpc/xdr.h> | 17 | #include <linux/sunrpc/xdr.h> |
| 18 | #include <linux/sunrpc/msg_prot.h> | 18 | #include <linux/sunrpc/msg_prot.h> |
| 19 | 19 | ||
| 20 | #ifdef __KERNEL__ | ||
| 21 | |||
| 20 | extern unsigned int xprt_udp_slot_table_entries; | 22 | extern unsigned int xprt_udp_slot_table_entries; |
| 21 | extern unsigned int xprt_tcp_slot_table_entries; | 23 | extern unsigned int xprt_tcp_slot_table_entries; |
| 22 | 24 | ||
| @@ -194,7 +196,13 @@ struct rpc_xprt { | |||
| 194 | char * address_strings[RPC_DISPLAY_MAX]; | 196 | char * address_strings[RPC_DISPLAY_MAX]; |
| 195 | }; | 197 | }; |
| 196 | 198 | ||
| 197 | #ifdef __KERNEL__ | 199 | struct rpc_xprtsock_create { |
| 200 | int proto; /* IPPROTO_UDP or IPPROTO_TCP */ | ||
| 201 | struct sockaddr * srcaddr; /* optional local address */ | ||
| 202 | struct sockaddr * dstaddr; /* remote peer address */ | ||
| 203 | size_t addrlen; | ||
| 204 | struct rpc_timeout * timeout; /* optional timeout parameters */ | ||
| 205 | }; | ||
| 198 | 206 | ||
| 199 | /* | 207 | /* |
| 200 | * Transport operations used by ULPs | 208 | * Transport operations used by ULPs |
| @@ -204,7 +212,7 @@ void xprt_set_timeout(struct rpc_timeout *to, unsigned int retr, unsigned long | |||
| 204 | /* | 212 | /* |
| 205 | * Generic internal transport functions | 213 | * Generic internal transport functions |
| 206 | */ | 214 | */ |
| 207 | struct rpc_xprt * xprt_create_transport(int proto, struct sockaddr *addr, size_t size, struct rpc_timeout *toparms); | 215 | struct rpc_xprt * xprt_create_transport(struct rpc_xprtsock_create *args); |
| 208 | void xprt_connect(struct rpc_task *task); | 216 | void xprt_connect(struct rpc_task *task); |
| 209 | void xprt_reserve(struct rpc_task *task); | 217 | void xprt_reserve(struct rpc_task *task); |
| 210 | int xprt_reserve_xprt(struct rpc_task *task); | 218 | int xprt_reserve_xprt(struct rpc_task *task); |
| @@ -242,8 +250,8 @@ void xprt_disconnect(struct rpc_xprt *xprt); | |||
| 242 | /* | 250 | /* |
| 243 | * Socket transport setup operations | 251 | * Socket transport setup operations |
| 244 | */ | 252 | */ |
| 245 | struct rpc_xprt * xs_setup_udp(struct sockaddr *addr, size_t addrlen, struct rpc_timeout *to); | 253 | struct rpc_xprt * xs_setup_udp(struct rpc_xprtsock_create *args); |
| 246 | struct rpc_xprt * xs_setup_tcp(struct sockaddr *addr, size_t addrlen, struct rpc_timeout *to); | 254 | struct rpc_xprt * xs_setup_tcp(struct rpc_xprtsock_create *args); |
| 247 | int init_socket_xprt(void); | 255 | int init_socket_xprt(void); |
| 248 | void cleanup_socket_xprt(void); | 256 | void cleanup_socket_xprt(void); |
| 249 | 257 | ||
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c index 9527f2bb1744..aa55d0a03e6f 100644 --- a/net/sunrpc/auth.c +++ b/net/sunrpc/auth.c | |||
| @@ -13,17 +13,22 @@ | |||
| 13 | #include <linux/errno.h> | 13 | #include <linux/errno.h> |
| 14 | #include <linux/sunrpc/clnt.h> | 14 | #include <linux/sunrpc/clnt.h> |
| 15 | #include <linux/spinlock.h> | 15 | #include <linux/spinlock.h> |
| 16 | #include <linux/smp_lock.h> | ||
| 16 | 17 | ||
| 17 | #ifdef RPC_DEBUG | 18 | #ifdef RPC_DEBUG |
| 18 | # define RPCDBG_FACILITY RPCDBG_AUTH | 19 | # define RPCDBG_FACILITY RPCDBG_AUTH |
| 19 | #endif | 20 | #endif |
| 20 | 21 | ||
| 21 | static struct rpc_authops * auth_flavors[RPC_AUTH_MAXFLAVOR] = { | 22 | static DEFINE_SPINLOCK(rpc_authflavor_lock); |
| 23 | static const struct rpc_authops *auth_flavors[RPC_AUTH_MAXFLAVOR] = { | ||
| 22 | &authnull_ops, /* AUTH_NULL */ | 24 | &authnull_ops, /* AUTH_NULL */ |
| 23 | &authunix_ops, /* AUTH_UNIX */ | 25 | &authunix_ops, /* AUTH_UNIX */ |
| 24 | NULL, /* others can be loadable modules */ | 26 | NULL, /* others can be loadable modules */ |
| 25 | }; | 27 | }; |
| 26 | 28 | ||
| 29 | static LIST_HEAD(cred_unused); | ||
| 30 | static unsigned long number_cred_unused; | ||
| 31 | |||
| 27 | static u32 | 32 | static u32 |
| 28 | pseudoflavor_to_flavor(u32 flavor) { | 33 | pseudoflavor_to_flavor(u32 flavor) { |
| 29 | if (flavor >= RPC_AUTH_MAXFLAVOR) | 34 | if (flavor >= RPC_AUTH_MAXFLAVOR) |
| @@ -32,55 +37,67 @@ pseudoflavor_to_flavor(u32 flavor) { | |||
| 32 | } | 37 | } |
| 33 | 38 | ||
| 34 | int | 39 | int |
| 35 | rpcauth_register(struct rpc_authops *ops) | 40 | rpcauth_register(const struct rpc_authops *ops) |
| 36 | { | 41 | { |
| 37 | rpc_authflavor_t flavor; | 42 | rpc_authflavor_t flavor; |
| 43 | int ret = -EPERM; | ||
| 38 | 44 | ||
| 39 | if ((flavor = ops->au_flavor) >= RPC_AUTH_MAXFLAVOR) | 45 | if ((flavor = ops->au_flavor) >= RPC_AUTH_MAXFLAVOR) |
| 40 | return -EINVAL; | 46 | return -EINVAL; |
| 41 | if (auth_flavors[flavor] != NULL) | 47 | spin_lock(&rpc_authflavor_lock); |
| 42 | return -EPERM; /* what else? */ | 48 | if (auth_flavors[flavor] == NULL) { |
| 43 | auth_flavors[flavor] = ops; | 49 | auth_flavors[flavor] = ops; |
| 44 | return 0; | 50 | ret = 0; |
| 51 | } | ||
| 52 | spin_unlock(&rpc_authflavor_lock); | ||
| 53 | return ret; | ||
| 45 | } | 54 | } |
| 46 | 55 | ||
| 47 | int | 56 | int |
| 48 | rpcauth_unregister(struct rpc_authops *ops) | 57 | rpcauth_unregister(const struct rpc_authops *ops) |
| 49 | { | 58 | { |
| 50 | rpc_authflavor_t flavor; | 59 | rpc_authflavor_t flavor; |
| 60 | int ret = -EPERM; | ||
| 51 | 61 | ||
| 52 | if ((flavor = ops->au_flavor) >= RPC_AUTH_MAXFLAVOR) | 62 | if ((flavor = ops->au_flavor) >= RPC_AUTH_MAXFLAVOR) |
| 53 | return -EINVAL; | 63 | return -EINVAL; |
| 54 | if (auth_flavors[flavor] != ops) | 64 | spin_lock(&rpc_authflavor_lock); |
| 55 | return -EPERM; /* what else? */ | 65 | if (auth_flavors[flavor] == ops) { |
| 56 | auth_flavors[flavor] = NULL; | 66 | auth_flavors[flavor] = NULL; |
| 57 | return 0; | 67 | ret = 0; |
| 68 | } | ||
| 69 | spin_unlock(&rpc_authflavor_lock); | ||
| 70 | return ret; | ||
| 58 | } | 71 | } |
| 59 | 72 | ||
| 60 | struct rpc_auth * | 73 | struct rpc_auth * |
| 61 | rpcauth_create(rpc_authflavor_t pseudoflavor, struct rpc_clnt *clnt) | 74 | rpcauth_create(rpc_authflavor_t pseudoflavor, struct rpc_clnt *clnt) |
| 62 | { | 75 | { |
| 63 | struct rpc_auth *auth; | 76 | struct rpc_auth *auth; |
| 64 | struct rpc_authops *ops; | 77 | const struct rpc_authops *ops; |
| 65 | u32 flavor = pseudoflavor_to_flavor(pseudoflavor); | 78 | u32 flavor = pseudoflavor_to_flavor(pseudoflavor); |
| 66 | 79 | ||
| 67 | auth = ERR_PTR(-EINVAL); | 80 | auth = ERR_PTR(-EINVAL); |
| 68 | if (flavor >= RPC_AUTH_MAXFLAVOR) | 81 | if (flavor >= RPC_AUTH_MAXFLAVOR) |
| 69 | goto out; | 82 | goto out; |
| 70 | 83 | ||
| 71 | /* FIXME - auth_flavors[] really needs an rw lock, | ||
| 72 | * and module refcounting. */ | ||
| 73 | #ifdef CONFIG_KMOD | 84 | #ifdef CONFIG_KMOD |
| 74 | if ((ops = auth_flavors[flavor]) == NULL) | 85 | if ((ops = auth_flavors[flavor]) == NULL) |
| 75 | request_module("rpc-auth-%u", flavor); | 86 | request_module("rpc-auth-%u", flavor); |
| 76 | #endif | 87 | #endif |
| 77 | if ((ops = auth_flavors[flavor]) == NULL) | 88 | spin_lock(&rpc_authflavor_lock); |
| 89 | ops = auth_flavors[flavor]; | ||
| 90 | if (ops == NULL || !try_module_get(ops->owner)) { | ||
| 91 | spin_unlock(&rpc_authflavor_lock); | ||
| 78 | goto out; | 92 | goto out; |
| 93 | } | ||
| 94 | spin_unlock(&rpc_authflavor_lock); | ||
| 79 | auth = ops->create(clnt, pseudoflavor); | 95 | auth = ops->create(clnt, pseudoflavor); |
| 96 | module_put(ops->owner); | ||
| 80 | if (IS_ERR(auth)) | 97 | if (IS_ERR(auth)) |
| 81 | return auth; | 98 | return auth; |
| 82 | if (clnt->cl_auth) | 99 | if (clnt->cl_auth) |
| 83 | rpcauth_destroy(clnt->cl_auth); | 100 | rpcauth_release(clnt->cl_auth); |
| 84 | clnt->cl_auth = auth; | 101 | clnt->cl_auth = auth; |
| 85 | 102 | ||
| 86 | out: | 103 | out: |
| @@ -88,7 +105,7 @@ out: | |||
| 88 | } | 105 | } |
| 89 | 106 | ||
| 90 | void | 107 | void |
| 91 | rpcauth_destroy(struct rpc_auth *auth) | 108 | rpcauth_release(struct rpc_auth *auth) |
| 92 | { | 109 | { |
| 93 | if (!atomic_dec_and_test(&auth->au_count)) | 110 | if (!atomic_dec_and_test(&auth->au_count)) |
| 94 | return; | 111 | return; |
| @@ -97,11 +114,31 @@ rpcauth_destroy(struct rpc_auth *auth) | |||
| 97 | 114 | ||
| 98 | static DEFINE_SPINLOCK(rpc_credcache_lock); | 115 | static DEFINE_SPINLOCK(rpc_credcache_lock); |
| 99 | 116 | ||
| 117 | static void | ||
| 118 | rpcauth_unhash_cred_locked(struct rpc_cred *cred) | ||
| 119 | { | ||
| 120 | hlist_del_rcu(&cred->cr_hash); | ||
| 121 | smp_mb__before_clear_bit(); | ||
| 122 | clear_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags); | ||
| 123 | } | ||
| 124 | |||
| 125 | static void | ||
| 126 | rpcauth_unhash_cred(struct rpc_cred *cred) | ||
| 127 | { | ||
| 128 | spinlock_t *cache_lock; | ||
| 129 | |||
| 130 | cache_lock = &cred->cr_auth->au_credcache->lock; | ||
| 131 | spin_lock(cache_lock); | ||
| 132 | if (atomic_read(&cred->cr_count) == 0) | ||
| 133 | rpcauth_unhash_cred_locked(cred); | ||
| 134 | spin_unlock(cache_lock); | ||
| 135 | } | ||
| 136 | |||
| 100 | /* | 137 | /* |
| 101 | * Initialize RPC credential cache | 138 | * Initialize RPC credential cache |
| 102 | */ | 139 | */ |
| 103 | int | 140 | int |
| 104 | rpcauth_init_credcache(struct rpc_auth *auth, unsigned long expire) | 141 | rpcauth_init_credcache(struct rpc_auth *auth) |
| 105 | { | 142 | { |
| 106 | struct rpc_cred_cache *new; | 143 | struct rpc_cred_cache *new; |
| 107 | int i; | 144 | int i; |
| @@ -111,8 +148,7 @@ rpcauth_init_credcache(struct rpc_auth *auth, unsigned long expire) | |||
| 111 | return -ENOMEM; | 148 | return -ENOMEM; |
| 112 | for (i = 0; i < RPC_CREDCACHE_NR; i++) | 149 | for (i = 0; i < RPC_CREDCACHE_NR; i++) |
| 113 | INIT_HLIST_HEAD(&new->hashtable[i]); | 150 | INIT_HLIST_HEAD(&new->hashtable[i]); |
| 114 | new->expire = expire; | 151 | spin_lock_init(&new->lock); |
| 115 | new->nextgc = jiffies + (expire >> 1); | ||
| 116 | auth->au_credcache = new; | 152 | auth->au_credcache = new; |
| 117 | return 0; | 153 | return 0; |
| 118 | } | 154 | } |
| @@ -121,13 +157,13 @@ rpcauth_init_credcache(struct rpc_auth *auth, unsigned long expire) | |||
| 121 | * Destroy a list of credentials | 157 | * Destroy a list of credentials |
| 122 | */ | 158 | */ |
| 123 | static inline | 159 | static inline |
| 124 | void rpcauth_destroy_credlist(struct hlist_head *head) | 160 | void rpcauth_destroy_credlist(struct list_head *head) |
| 125 | { | 161 | { |
| 126 | struct rpc_cred *cred; | 162 | struct rpc_cred *cred; |
| 127 | 163 | ||
| 128 | while (!hlist_empty(head)) { | 164 | while (!list_empty(head)) { |
| 129 | cred = hlist_entry(head->first, struct rpc_cred, cr_hash); | 165 | cred = list_entry(head->next, struct rpc_cred, cr_lru); |
| 130 | hlist_del_init(&cred->cr_hash); | 166 | list_del_init(&cred->cr_lru); |
| 131 | put_rpccred(cred); | 167 | put_rpccred(cred); |
| 132 | } | 168 | } |
| 133 | } | 169 | } |
| @@ -137,58 +173,95 @@ void rpcauth_destroy_credlist(struct hlist_head *head) | |||
| 137 | * that are not referenced. | 173 | * that are not referenced. |
| 138 | */ | 174 | */ |
| 139 | void | 175 | void |
| 140 | rpcauth_free_credcache(struct rpc_auth *auth) | 176 | rpcauth_clear_credcache(struct rpc_cred_cache *cache) |
| 141 | { | 177 | { |
| 142 | struct rpc_cred_cache *cache = auth->au_credcache; | 178 | LIST_HEAD(free); |
| 143 | HLIST_HEAD(free); | 179 | struct hlist_head *head; |
| 144 | struct hlist_node *pos, *next; | ||
| 145 | struct rpc_cred *cred; | 180 | struct rpc_cred *cred; |
| 146 | int i; | 181 | int i; |
| 147 | 182 | ||
| 148 | spin_lock(&rpc_credcache_lock); | 183 | spin_lock(&rpc_credcache_lock); |
| 184 | spin_lock(&cache->lock); | ||
| 149 | for (i = 0; i < RPC_CREDCACHE_NR; i++) { | 185 | for (i = 0; i < RPC_CREDCACHE_NR; i++) { |
| 150 | hlist_for_each_safe(pos, next, &cache->hashtable[i]) { | 186 | head = &cache->hashtable[i]; |
| 151 | cred = hlist_entry(pos, struct rpc_cred, cr_hash); | 187 | while (!hlist_empty(head)) { |
| 152 | __hlist_del(&cred->cr_hash); | 188 | cred = hlist_entry(head->first, struct rpc_cred, cr_hash); |
| 153 | hlist_add_head(&cred->cr_hash, &free); | 189 | get_rpccred(cred); |
| 190 | if (!list_empty(&cred->cr_lru)) { | ||
| 191 | list_del(&cred->cr_lru); | ||
| 192 | number_cred_unused--; | ||
| 193 | } | ||
| 194 | list_add_tail(&cred->cr_lru, &free); | ||
| 195 | rpcauth_unhash_cred_locked(cred); | ||
| 154 | } | 196 | } |
| 155 | } | 197 | } |
| 198 | spin_unlock(&cache->lock); | ||
| 156 | spin_unlock(&rpc_credcache_lock); | 199 | spin_unlock(&rpc_credcache_lock); |
| 157 | rpcauth_destroy_credlist(&free); | 200 | rpcauth_destroy_credlist(&free); |
| 158 | } | 201 | } |
| 159 | 202 | ||
| 160 | static void | 203 | /* |
| 161 | rpcauth_prune_expired(struct rpc_auth *auth, struct rpc_cred *cred, struct hlist_head *free) | 204 | * Destroy the RPC credential cache |
| 205 | */ | ||
| 206 | void | ||
| 207 | rpcauth_destroy_credcache(struct rpc_auth *auth) | ||
| 162 | { | 208 | { |
| 163 | if (atomic_read(&cred->cr_count) != 1) | 209 | struct rpc_cred_cache *cache = auth->au_credcache; |
| 164 | return; | 210 | |
| 165 | if (time_after(jiffies, cred->cr_expire + auth->au_credcache->expire)) | 211 | if (cache) { |
| 166 | cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE; | 212 | auth->au_credcache = NULL; |
| 167 | if (!(cred->cr_flags & RPCAUTH_CRED_UPTODATE)) { | 213 | rpcauth_clear_credcache(cache); |
| 168 | __hlist_del(&cred->cr_hash); | 214 | kfree(cache); |
| 169 | hlist_add_head(&cred->cr_hash, free); | ||
| 170 | } | 215 | } |
| 171 | } | 216 | } |
| 172 | 217 | ||
| 173 | /* | 218 | /* |
| 174 | * Remove stale credentials. Avoid sleeping inside the loop. | 219 | * Remove stale credentials. Avoid sleeping inside the loop. |
| 175 | */ | 220 | */ |
| 176 | static void | 221 | static int |
| 177 | rpcauth_gc_credcache(struct rpc_auth *auth, struct hlist_head *free) | 222 | rpcauth_prune_expired(struct list_head *free, int nr_to_scan) |
| 178 | { | 223 | { |
| 179 | struct rpc_cred_cache *cache = auth->au_credcache; | 224 | spinlock_t *cache_lock; |
| 180 | struct hlist_node *pos, *next; | 225 | struct rpc_cred *cred; |
| 181 | struct rpc_cred *cred; | ||
| 182 | int i; | ||
| 183 | 226 | ||
| 184 | dprintk("RPC: gc'ing RPC credentials for auth %p\n", auth); | 227 | while (!list_empty(&cred_unused)) { |
| 185 | for (i = 0; i < RPC_CREDCACHE_NR; i++) { | 228 | cred = list_entry(cred_unused.next, struct rpc_cred, cr_lru); |
| 186 | hlist_for_each_safe(pos, next, &cache->hashtable[i]) { | 229 | list_del_init(&cred->cr_lru); |
| 187 | cred = hlist_entry(pos, struct rpc_cred, cr_hash); | 230 | number_cred_unused--; |
| 188 | rpcauth_prune_expired(auth, cred, free); | 231 | if (atomic_read(&cred->cr_count) != 0) |
| 232 | continue; | ||
| 233 | cache_lock = &cred->cr_auth->au_credcache->lock; | ||
| 234 | spin_lock(cache_lock); | ||
| 235 | if (atomic_read(&cred->cr_count) == 0) { | ||
| 236 | get_rpccred(cred); | ||
| 237 | list_add_tail(&cred->cr_lru, free); | ||
| 238 | rpcauth_unhash_cred_locked(cred); | ||
| 239 | nr_to_scan--; | ||
| 189 | } | 240 | } |
| 241 | spin_unlock(cache_lock); | ||
| 242 | if (nr_to_scan == 0) | ||
| 243 | break; | ||
| 190 | } | 244 | } |
| 191 | cache->nextgc = jiffies + cache->expire; | 245 | return nr_to_scan; |
| 246 | } | ||
| 247 | |||
| 248 | /* | ||
| 249 | * Run memory cache shrinker. | ||
| 250 | */ | ||
| 251 | static int | ||
| 252 | rpcauth_cache_shrinker(int nr_to_scan, gfp_t gfp_mask) | ||
| 253 | { | ||
| 254 | LIST_HEAD(free); | ||
| 255 | int res; | ||
| 256 | |||
| 257 | if (list_empty(&cred_unused)) | ||
| 258 | return 0; | ||
| 259 | spin_lock(&rpc_credcache_lock); | ||
| 260 | nr_to_scan = rpcauth_prune_expired(&free, nr_to_scan); | ||
| 261 | res = (number_cred_unused / 100) * sysctl_vfs_cache_pressure; | ||
| 262 | spin_unlock(&rpc_credcache_lock); | ||
| 263 | rpcauth_destroy_credlist(&free); | ||
| 264 | return res; | ||
| 192 | } | 265 | } |
| 193 | 266 | ||
| 194 | /* | 267 | /* |
| @@ -198,53 +271,56 @@ struct rpc_cred * | |||
| 198 | rpcauth_lookup_credcache(struct rpc_auth *auth, struct auth_cred * acred, | 271 | rpcauth_lookup_credcache(struct rpc_auth *auth, struct auth_cred * acred, |
| 199 | int flags) | 272 | int flags) |
| 200 | { | 273 | { |
| 274 | LIST_HEAD(free); | ||
| 201 | struct rpc_cred_cache *cache = auth->au_credcache; | 275 | struct rpc_cred_cache *cache = auth->au_credcache; |
| 202 | HLIST_HEAD(free); | 276 | struct hlist_node *pos; |
| 203 | struct hlist_node *pos, *next; | 277 | struct rpc_cred *cred = NULL, |
| 204 | struct rpc_cred *new = NULL, | 278 | *entry, *new; |
| 205 | *cred = NULL; | ||
| 206 | int nr = 0; | 279 | int nr = 0; |
| 207 | 280 | ||
| 208 | if (!(flags & RPCAUTH_LOOKUP_ROOTCREDS)) | 281 | if (!(flags & RPCAUTH_LOOKUP_ROOTCREDS)) |
| 209 | nr = acred->uid & RPC_CREDCACHE_MASK; | 282 | nr = acred->uid & RPC_CREDCACHE_MASK; |
| 210 | retry: | 283 | |
| 211 | spin_lock(&rpc_credcache_lock); | 284 | rcu_read_lock(); |
| 212 | if (time_before(cache->nextgc, jiffies)) | 285 | hlist_for_each_entry_rcu(entry, pos, &cache->hashtable[nr], cr_hash) { |
| 213 | rpcauth_gc_credcache(auth, &free); | 286 | if (!entry->cr_ops->crmatch(acred, entry, flags)) |
| 214 | hlist_for_each_safe(pos, next, &cache->hashtable[nr]) { | 287 | continue; |
| 215 | struct rpc_cred *entry; | 288 | spin_lock(&cache->lock); |
| 216 | entry = hlist_entry(pos, struct rpc_cred, cr_hash); | 289 | if (test_bit(RPCAUTH_CRED_HASHED, &entry->cr_flags) == 0) { |
| 217 | if (entry->cr_ops->crmatch(acred, entry, flags)) { | 290 | spin_unlock(&cache->lock); |
| 218 | hlist_del(&entry->cr_hash); | 291 | continue; |
| 219 | cred = entry; | ||
| 220 | break; | ||
| 221 | } | 292 | } |
| 222 | rpcauth_prune_expired(auth, entry, &free); | 293 | cred = get_rpccred(entry); |
| 294 | spin_unlock(&cache->lock); | ||
| 295 | break; | ||
| 223 | } | 296 | } |
| 224 | if (new) { | 297 | rcu_read_unlock(); |
| 225 | if (cred) | ||
| 226 | hlist_add_head(&new->cr_hash, &free); | ||
| 227 | else | ||
| 228 | cred = new; | ||
| 229 | } | ||
| 230 | if (cred) { | ||
| 231 | hlist_add_head(&cred->cr_hash, &cache->hashtable[nr]); | ||
| 232 | get_rpccred(cred); | ||
| 233 | } | ||
| 234 | spin_unlock(&rpc_credcache_lock); | ||
| 235 | 298 | ||
| 236 | rpcauth_destroy_credlist(&free); | 299 | if (cred != NULL) |
| 300 | goto found; | ||
| 237 | 301 | ||
| 238 | if (!cred) { | 302 | new = auth->au_ops->crcreate(auth, acred, flags); |
| 239 | new = auth->au_ops->crcreate(auth, acred, flags); | 303 | if (IS_ERR(new)) { |
| 240 | if (!IS_ERR(new)) { | 304 | cred = new; |
| 241 | #ifdef RPC_DEBUG | 305 | goto out; |
| 242 | new->cr_magic = RPCAUTH_CRED_MAGIC; | 306 | } |
| 243 | #endif | 307 | |
| 244 | goto retry; | 308 | spin_lock(&cache->lock); |
| 245 | } else | 309 | hlist_for_each_entry(entry, pos, &cache->hashtable[nr], cr_hash) { |
| 246 | cred = new; | 310 | if (!entry->cr_ops->crmatch(acred, entry, flags)) |
| 247 | } else if ((cred->cr_flags & RPCAUTH_CRED_NEW) | 311 | continue; |
| 312 | cred = get_rpccred(entry); | ||
| 313 | break; | ||
| 314 | } | ||
| 315 | if (cred == NULL) { | ||
| 316 | cred = new; | ||
| 317 | set_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags); | ||
| 318 | hlist_add_head_rcu(&cred->cr_hash, &cache->hashtable[nr]); | ||
| 319 | } else | ||
| 320 | list_add_tail(&new->cr_lru, &free); | ||
| 321 | spin_unlock(&cache->lock); | ||
| 322 | found: | ||
| 323 | if (test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags) | ||
| 248 | && cred->cr_ops->cr_init != NULL | 324 | && cred->cr_ops->cr_init != NULL |
| 249 | && !(flags & RPCAUTH_LOOKUP_NEW)) { | 325 | && !(flags & RPCAUTH_LOOKUP_NEW)) { |
| 250 | int res = cred->cr_ops->cr_init(auth, cred); | 326 | int res = cred->cr_ops->cr_init(auth, cred); |
| @@ -253,8 +329,9 @@ retry: | |||
| 253 | cred = ERR_PTR(res); | 329 | cred = ERR_PTR(res); |
| 254 | } | 330 | } |
| 255 | } | 331 | } |
| 256 | 332 | rpcauth_destroy_credlist(&free); | |
| 257 | return (struct rpc_cred *) cred; | 333 | out: |
| 334 | return cred; | ||
| 258 | } | 335 | } |
| 259 | 336 | ||
| 260 | struct rpc_cred * | 337 | struct rpc_cred * |
| @@ -275,10 +352,27 @@ rpcauth_lookupcred(struct rpc_auth *auth, int flags) | |||
| 275 | return ret; | 352 | return ret; |
| 276 | } | 353 | } |
| 277 | 354 | ||
| 355 | void | ||
| 356 | rpcauth_init_cred(struct rpc_cred *cred, const struct auth_cred *acred, | ||
| 357 | struct rpc_auth *auth, const struct rpc_credops *ops) | ||
| 358 | { | ||
| 359 | INIT_HLIST_NODE(&cred->cr_hash); | ||
| 360 | INIT_LIST_HEAD(&cred->cr_lru); | ||
| 361 | atomic_set(&cred->cr_count, 1); | ||
| 362 | cred->cr_auth = auth; | ||
| 363 | cred->cr_ops = ops; | ||
| 364 | cred->cr_expire = jiffies; | ||
| 365 | #ifdef RPC_DEBUG | ||
| 366 | cred->cr_magic = RPCAUTH_CRED_MAGIC; | ||
| 367 | #endif | ||
| 368 | cred->cr_uid = acred->uid; | ||
| 369 | } | ||
| 370 | EXPORT_SYMBOL(rpcauth_init_cred); | ||
| 371 | |||
| 278 | struct rpc_cred * | 372 | struct rpc_cred * |
| 279 | rpcauth_bindcred(struct rpc_task *task) | 373 | rpcauth_bindcred(struct rpc_task *task) |
| 280 | { | 374 | { |
| 281 | struct rpc_auth *auth = task->tk_auth; | 375 | struct rpc_auth *auth = task->tk_client->cl_auth; |
| 282 | struct auth_cred acred = { | 376 | struct auth_cred acred = { |
| 283 | .uid = current->fsuid, | 377 | .uid = current->fsuid, |
| 284 | .gid = current->fsgid, | 378 | .gid = current->fsgid, |
| @@ -288,7 +382,7 @@ rpcauth_bindcred(struct rpc_task *task) | |||
| 288 | int flags = 0; | 382 | int flags = 0; |
| 289 | 383 | ||
| 290 | dprintk("RPC: %5u looking up %s cred\n", | 384 | dprintk("RPC: %5u looking up %s cred\n", |
| 291 | task->tk_pid, task->tk_auth->au_ops->au_name); | 385 | task->tk_pid, task->tk_client->cl_auth->au_ops->au_name); |
| 292 | get_group_info(acred.group_info); | 386 | get_group_info(acred.group_info); |
| 293 | if (task->tk_flags & RPC_TASK_ROOTCREDS) | 387 | if (task->tk_flags & RPC_TASK_ROOTCREDS) |
| 294 | flags |= RPCAUTH_LOOKUP_ROOTCREDS; | 388 | flags |= RPCAUTH_LOOKUP_ROOTCREDS; |
| @@ -304,19 +398,42 @@ rpcauth_bindcred(struct rpc_task *task) | |||
| 304 | void | 398 | void |
| 305 | rpcauth_holdcred(struct rpc_task *task) | 399 | rpcauth_holdcred(struct rpc_task *task) |
| 306 | { | 400 | { |
| 307 | dprintk("RPC: %5u holding %s cred %p\n", | 401 | struct rpc_cred *cred = task->tk_msg.rpc_cred; |
| 308 | task->tk_pid, task->tk_auth->au_ops->au_name, | 402 | if (cred != NULL) { |
| 309 | task->tk_msg.rpc_cred); | 403 | get_rpccred(cred); |
| 310 | if (task->tk_msg.rpc_cred) | 404 | dprintk("RPC: %5u holding %s cred %p\n", task->tk_pid, |
| 311 | get_rpccred(task->tk_msg.rpc_cred); | 405 | cred->cr_auth->au_ops->au_name, cred); |
| 406 | } | ||
| 312 | } | 407 | } |
| 313 | 408 | ||
| 314 | void | 409 | void |
| 315 | put_rpccred(struct rpc_cred *cred) | 410 | put_rpccred(struct rpc_cred *cred) |
| 316 | { | 411 | { |
| 317 | cred->cr_expire = jiffies; | 412 | /* Fast path for unhashed credentials */ |
| 413 | if (test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags) != 0) | ||
| 414 | goto need_lock; | ||
| 415 | |||
| 318 | if (!atomic_dec_and_test(&cred->cr_count)) | 416 | if (!atomic_dec_and_test(&cred->cr_count)) |
| 319 | return; | 417 | return; |
| 418 | goto out_destroy; | ||
| 419 | need_lock: | ||
| 420 | if (!atomic_dec_and_lock(&cred->cr_count, &rpc_credcache_lock)) | ||
| 421 | return; | ||
| 422 | if (!list_empty(&cred->cr_lru)) { | ||
| 423 | number_cred_unused--; | ||
| 424 | list_del_init(&cred->cr_lru); | ||
| 425 | } | ||
| 426 | if (test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) == 0) | ||
| 427 | rpcauth_unhash_cred(cred); | ||
| 428 | else if (test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags) != 0) { | ||
| 429 | cred->cr_expire = jiffies; | ||
| 430 | list_add_tail(&cred->cr_lru, &cred_unused); | ||
| 431 | number_cred_unused++; | ||
| 432 | spin_unlock(&rpc_credcache_lock); | ||
| 433 | return; | ||
| 434 | } | ||
| 435 | spin_unlock(&rpc_credcache_lock); | ||
| 436 | out_destroy: | ||
| 320 | cred->cr_ops->crdestroy(cred); | 437 | cred->cr_ops->crdestroy(cred); |
| 321 | } | 438 | } |
| 322 | 439 | ||
| @@ -326,7 +443,7 @@ rpcauth_unbindcred(struct rpc_task *task) | |||
| 326 | struct rpc_cred *cred = task->tk_msg.rpc_cred; | 443 | struct rpc_cred *cred = task->tk_msg.rpc_cred; |
| 327 | 444 | ||
| 328 | dprintk("RPC: %5u releasing %s cred %p\n", | 445 | dprintk("RPC: %5u releasing %s cred %p\n", |
| 329 | task->tk_pid, task->tk_auth->au_ops->au_name, cred); | 446 | task->tk_pid, cred->cr_auth->au_ops->au_name, cred); |
| 330 | 447 | ||
| 331 | put_rpccred(cred); | 448 | put_rpccred(cred); |
| 332 | task->tk_msg.rpc_cred = NULL; | 449 | task->tk_msg.rpc_cred = NULL; |
| @@ -338,7 +455,7 @@ rpcauth_marshcred(struct rpc_task *task, __be32 *p) | |||
| 338 | struct rpc_cred *cred = task->tk_msg.rpc_cred; | 455 | struct rpc_cred *cred = task->tk_msg.rpc_cred; |
| 339 | 456 | ||
| 340 | dprintk("RPC: %5u marshaling %s cred %p\n", | 457 | dprintk("RPC: %5u marshaling %s cred %p\n", |
| 341 | task->tk_pid, task->tk_auth->au_ops->au_name, cred); | 458 | task->tk_pid, cred->cr_auth->au_ops->au_name, cred); |
| 342 | 459 | ||
| 343 | return cred->cr_ops->crmarshal(task, p); | 460 | return cred->cr_ops->crmarshal(task, p); |
| 344 | } | 461 | } |
| @@ -349,7 +466,7 @@ rpcauth_checkverf(struct rpc_task *task, __be32 *p) | |||
| 349 | struct rpc_cred *cred = task->tk_msg.rpc_cred; | 466 | struct rpc_cred *cred = task->tk_msg.rpc_cred; |
| 350 | 467 | ||
| 351 | dprintk("RPC: %5u validating %s cred %p\n", | 468 | dprintk("RPC: %5u validating %s cred %p\n", |
| 352 | task->tk_pid, task->tk_auth->au_ops->au_name, cred); | 469 | task->tk_pid, cred->cr_auth->au_ops->au_name, cred); |
| 353 | 470 | ||
| 354 | return cred->cr_ops->crvalidate(task, p); | 471 | return cred->cr_ops->crvalidate(task, p); |
| 355 | } | 472 | } |
| @@ -359,13 +476,17 @@ rpcauth_wrap_req(struct rpc_task *task, kxdrproc_t encode, void *rqstp, | |||
| 359 | __be32 *data, void *obj) | 476 | __be32 *data, void *obj) |
| 360 | { | 477 | { |
| 361 | struct rpc_cred *cred = task->tk_msg.rpc_cred; | 478 | struct rpc_cred *cred = task->tk_msg.rpc_cred; |
| 479 | int ret; | ||
| 362 | 480 | ||
| 363 | dprintk("RPC: %5u using %s cred %p to wrap rpc data\n", | 481 | dprintk("RPC: %5u using %s cred %p to wrap rpc data\n", |
| 364 | task->tk_pid, cred->cr_ops->cr_name, cred); | 482 | task->tk_pid, cred->cr_ops->cr_name, cred); |
| 365 | if (cred->cr_ops->crwrap_req) | 483 | if (cred->cr_ops->crwrap_req) |
| 366 | return cred->cr_ops->crwrap_req(task, encode, rqstp, data, obj); | 484 | return cred->cr_ops->crwrap_req(task, encode, rqstp, data, obj); |
| 367 | /* By default, we encode the arguments normally. */ | 485 | /* By default, we encode the arguments normally. */ |
| 368 | return encode(rqstp, data, obj); | 486 | lock_kernel(); |
| 487 | ret = encode(rqstp, data, obj); | ||
| 488 | unlock_kernel(); | ||
| 489 | return ret; | ||
| 369 | } | 490 | } |
| 370 | 491 | ||
| 371 | int | 492 | int |
| @@ -373,6 +494,7 @@ rpcauth_unwrap_resp(struct rpc_task *task, kxdrproc_t decode, void *rqstp, | |||
| 373 | __be32 *data, void *obj) | 494 | __be32 *data, void *obj) |
| 374 | { | 495 | { |
| 375 | struct rpc_cred *cred = task->tk_msg.rpc_cred; | 496 | struct rpc_cred *cred = task->tk_msg.rpc_cred; |
| 497 | int ret; | ||
| 376 | 498 | ||
| 377 | dprintk("RPC: %5u using %s cred %p to unwrap rpc data\n", | 499 | dprintk("RPC: %5u using %s cred %p to unwrap rpc data\n", |
| 378 | task->tk_pid, cred->cr_ops->cr_name, cred); | 500 | task->tk_pid, cred->cr_ops->cr_name, cred); |
| @@ -380,7 +502,10 @@ rpcauth_unwrap_resp(struct rpc_task *task, kxdrproc_t decode, void *rqstp, | |||
| 380 | return cred->cr_ops->crunwrap_resp(task, decode, rqstp, | 502 | return cred->cr_ops->crunwrap_resp(task, decode, rqstp, |
| 381 | data, obj); | 503 | data, obj); |
| 382 | /* By default, we decode the arguments normally. */ | 504 | /* By default, we decode the arguments normally. */ |
| 383 | return decode(rqstp, data, obj); | 505 | lock_kernel(); |
| 506 | ret = decode(rqstp, data, obj); | ||
| 507 | unlock_kernel(); | ||
| 508 | return ret; | ||
| 384 | } | 509 | } |
| 385 | 510 | ||
| 386 | int | 511 | int |
| @@ -390,7 +515,7 @@ rpcauth_refreshcred(struct rpc_task *task) | |||
| 390 | int err; | 515 | int err; |
| 391 | 516 | ||
| 392 | dprintk("RPC: %5u refreshing %s cred %p\n", | 517 | dprintk("RPC: %5u refreshing %s cred %p\n", |
| 393 | task->tk_pid, task->tk_auth->au_ops->au_name, cred); | 518 | task->tk_pid, cred->cr_auth->au_ops->au_name, cred); |
| 394 | 519 | ||
| 395 | err = cred->cr_ops->crrefresh(task); | 520 | err = cred->cr_ops->crrefresh(task); |
| 396 | if (err < 0) | 521 | if (err < 0) |
| @@ -401,17 +526,34 @@ rpcauth_refreshcred(struct rpc_task *task) | |||
| 401 | void | 526 | void |
| 402 | rpcauth_invalcred(struct rpc_task *task) | 527 | rpcauth_invalcred(struct rpc_task *task) |
| 403 | { | 528 | { |
| 529 | struct rpc_cred *cred = task->tk_msg.rpc_cred; | ||
| 530 | |||
| 404 | dprintk("RPC: %5u invalidating %s cred %p\n", | 531 | dprintk("RPC: %5u invalidating %s cred %p\n", |
| 405 | task->tk_pid, task->tk_auth->au_ops->au_name, task->tk_msg.rpc_cred); | 532 | task->tk_pid, cred->cr_auth->au_ops->au_name, cred); |
| 406 | spin_lock(&rpc_credcache_lock); | 533 | if (cred) |
| 407 | if (task->tk_msg.rpc_cred) | 534 | clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); |
| 408 | task->tk_msg.rpc_cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE; | ||
| 409 | spin_unlock(&rpc_credcache_lock); | ||
| 410 | } | 535 | } |
| 411 | 536 | ||
| 412 | int | 537 | int |
| 413 | rpcauth_uptodatecred(struct rpc_task *task) | 538 | rpcauth_uptodatecred(struct rpc_task *task) |
| 414 | { | 539 | { |
| 415 | return !(task->tk_msg.rpc_cred) || | 540 | struct rpc_cred *cred = task->tk_msg.rpc_cred; |
| 416 | (task->tk_msg.rpc_cred->cr_flags & RPCAUTH_CRED_UPTODATE); | 541 | |
| 542 | return cred == NULL || | ||
| 543 | test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) != 0; | ||
| 544 | } | ||
| 545 | |||
| 546 | |||
| 547 | static struct shrinker *rpc_cred_shrinker; | ||
| 548 | |||
| 549 | void __init rpcauth_init_module(void) | ||
| 550 | { | ||
| 551 | rpc_init_authunix(); | ||
| 552 | rpc_cred_shrinker = set_shrinker(DEFAULT_SEEKS, rpcauth_cache_shrinker); | ||
| 553 | } | ||
| 554 | |||
| 555 | void __exit rpcauth_remove_module(void) | ||
| 556 | { | ||
| 557 | if (rpc_cred_shrinker != NULL) | ||
| 558 | remove_shrinker(rpc_cred_shrinker); | ||
| 417 | } | 559 | } |
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index 4e4ccc5b6fea..baf4096d52d4 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c | |||
| @@ -54,9 +54,10 @@ | |||
| 54 | #include <linux/sunrpc/gss_api.h> | 54 | #include <linux/sunrpc/gss_api.h> |
| 55 | #include <asm/uaccess.h> | 55 | #include <asm/uaccess.h> |
| 56 | 56 | ||
| 57 | static struct rpc_authops authgss_ops; | 57 | static const struct rpc_authops authgss_ops; |
| 58 | 58 | ||
| 59 | static struct rpc_credops gss_credops; | 59 | static const struct rpc_credops gss_credops; |
| 60 | static const struct rpc_credops gss_nullops; | ||
| 60 | 61 | ||
| 61 | #ifdef RPC_DEBUG | 62 | #ifdef RPC_DEBUG |
| 62 | # define RPCDBG_FACILITY RPCDBG_AUTH | 63 | # define RPCDBG_FACILITY RPCDBG_AUTH |
| @@ -64,7 +65,6 @@ static struct rpc_credops gss_credops; | |||
| 64 | 65 | ||
| 65 | #define NFS_NGROUPS 16 | 66 | #define NFS_NGROUPS 16 |
| 66 | 67 | ||
| 67 | #define GSS_CRED_EXPIRE (60 * HZ) /* XXX: reasonable? */ | ||
| 68 | #define GSS_CRED_SLACK 1024 /* XXX: unused */ | 68 | #define GSS_CRED_SLACK 1024 /* XXX: unused */ |
| 69 | /* length of a krb5 verifier (48), plus data added before arguments when | 69 | /* length of a krb5 verifier (48), plus data added before arguments when |
| 70 | * using integrity (two 4-byte integers): */ | 70 | * using integrity (two 4-byte integers): */ |
| @@ -79,19 +79,16 @@ static struct rpc_credops gss_credops; | |||
| 79 | /* dump the buffer in `emacs-hexl' style */ | 79 | /* dump the buffer in `emacs-hexl' style */ |
| 80 | #define isprint(c) ((c > 0x1f) && (c < 0x7f)) | 80 | #define isprint(c) ((c > 0x1f) && (c < 0x7f)) |
| 81 | 81 | ||
| 82 | static DEFINE_RWLOCK(gss_ctx_lock); | ||
| 83 | |||
| 84 | struct gss_auth { | 82 | struct gss_auth { |
| 83 | struct kref kref; | ||
| 85 | struct rpc_auth rpc_auth; | 84 | struct rpc_auth rpc_auth; |
| 86 | struct gss_api_mech *mech; | 85 | struct gss_api_mech *mech; |
| 87 | enum rpc_gss_svc service; | 86 | enum rpc_gss_svc service; |
| 88 | struct list_head upcalls; | ||
| 89 | struct rpc_clnt *client; | 87 | struct rpc_clnt *client; |
| 90 | struct dentry *dentry; | 88 | struct dentry *dentry; |
| 91 | spinlock_t lock; | ||
| 92 | }; | 89 | }; |
| 93 | 90 | ||
| 94 | static void gss_destroy_ctx(struct gss_cl_ctx *); | 91 | static void gss_free_ctx(struct gss_cl_ctx *); |
| 95 | static struct rpc_pipe_ops gss_upcall_ops; | 92 | static struct rpc_pipe_ops gss_upcall_ops; |
| 96 | 93 | ||
| 97 | static inline struct gss_cl_ctx * | 94 | static inline struct gss_cl_ctx * |
| @@ -105,20 +102,24 @@ static inline void | |||
| 105 | gss_put_ctx(struct gss_cl_ctx *ctx) | 102 | gss_put_ctx(struct gss_cl_ctx *ctx) |
| 106 | { | 103 | { |
| 107 | if (atomic_dec_and_test(&ctx->count)) | 104 | if (atomic_dec_and_test(&ctx->count)) |
| 108 | gss_destroy_ctx(ctx); | 105 | gss_free_ctx(ctx); |
| 109 | } | 106 | } |
| 110 | 107 | ||
| 108 | /* gss_cred_set_ctx: | ||
| 109 | * called by gss_upcall_callback and gss_create_upcall in order | ||
| 110 | * to set the gss context. The actual exchange of an old context | ||
| 111 | * and a new one is protected by the inode->i_lock. | ||
| 112 | */ | ||
| 111 | static void | 113 | static void |
| 112 | gss_cred_set_ctx(struct rpc_cred *cred, struct gss_cl_ctx *ctx) | 114 | gss_cred_set_ctx(struct rpc_cred *cred, struct gss_cl_ctx *ctx) |
| 113 | { | 115 | { |
| 114 | struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); | 116 | struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); |
| 115 | struct gss_cl_ctx *old; | 117 | struct gss_cl_ctx *old; |
| 116 | write_lock(&gss_ctx_lock); | 118 | |
| 117 | old = gss_cred->gc_ctx; | 119 | old = gss_cred->gc_ctx; |
| 118 | gss_cred->gc_ctx = ctx; | 120 | rcu_assign_pointer(gss_cred->gc_ctx, ctx); |
| 119 | cred->cr_flags |= RPCAUTH_CRED_UPTODATE; | 121 | set_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); |
| 120 | cred->cr_flags &= ~RPCAUTH_CRED_NEW; | 122 | clear_bit(RPCAUTH_CRED_NEW, &cred->cr_flags); |
| 121 | write_unlock(&gss_ctx_lock); | ||
| 122 | if (old) | 123 | if (old) |
| 123 | gss_put_ctx(old); | 124 | gss_put_ctx(old); |
| 124 | } | 125 | } |
| @@ -129,10 +130,10 @@ gss_cred_is_uptodate_ctx(struct rpc_cred *cred) | |||
| 129 | struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); | 130 | struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); |
| 130 | int res = 0; | 131 | int res = 0; |
| 131 | 132 | ||
| 132 | read_lock(&gss_ctx_lock); | 133 | rcu_read_lock(); |
| 133 | if ((cred->cr_flags & RPCAUTH_CRED_UPTODATE) && gss_cred->gc_ctx) | 134 | if (test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) && gss_cred->gc_ctx) |
| 134 | res = 1; | 135 | res = 1; |
| 135 | read_unlock(&gss_ctx_lock); | 136 | rcu_read_unlock(); |
| 136 | return res; | 137 | return res; |
| 137 | } | 138 | } |
| 138 | 139 | ||
| @@ -171,10 +172,10 @@ gss_cred_get_ctx(struct rpc_cred *cred) | |||
| 171 | struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); | 172 | struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); |
| 172 | struct gss_cl_ctx *ctx = NULL; | 173 | struct gss_cl_ctx *ctx = NULL; |
| 173 | 174 | ||
| 174 | read_lock(&gss_ctx_lock); | 175 | rcu_read_lock(); |
| 175 | if (gss_cred->gc_ctx) | 176 | if (gss_cred->gc_ctx) |
| 176 | ctx = gss_get_ctx(gss_cred->gc_ctx); | 177 | ctx = gss_get_ctx(gss_cred->gc_ctx); |
| 177 | read_unlock(&gss_ctx_lock); | 178 | rcu_read_unlock(); |
| 178 | return ctx; | 179 | return ctx; |
| 179 | } | 180 | } |
| 180 | 181 | ||
| @@ -269,10 +270,10 @@ gss_release_msg(struct gss_upcall_msg *gss_msg) | |||
| 269 | } | 270 | } |
| 270 | 271 | ||
| 271 | static struct gss_upcall_msg * | 272 | static struct gss_upcall_msg * |
| 272 | __gss_find_upcall(struct gss_auth *gss_auth, uid_t uid) | 273 | __gss_find_upcall(struct rpc_inode *rpci, uid_t uid) |
| 273 | { | 274 | { |
| 274 | struct gss_upcall_msg *pos; | 275 | struct gss_upcall_msg *pos; |
| 275 | list_for_each_entry(pos, &gss_auth->upcalls, list) { | 276 | list_for_each_entry(pos, &rpci->in_downcall, list) { |
| 276 | if (pos->uid != uid) | 277 | if (pos->uid != uid) |
| 277 | continue; | 278 | continue; |
| 278 | atomic_inc(&pos->count); | 279 | atomic_inc(&pos->count); |
| @@ -290,24 +291,24 @@ __gss_find_upcall(struct gss_auth *gss_auth, uid_t uid) | |||
| 290 | static inline struct gss_upcall_msg * | 291 | static inline struct gss_upcall_msg * |
| 291 | gss_add_msg(struct gss_auth *gss_auth, struct gss_upcall_msg *gss_msg) | 292 | gss_add_msg(struct gss_auth *gss_auth, struct gss_upcall_msg *gss_msg) |
| 292 | { | 293 | { |
| 294 | struct inode *inode = gss_auth->dentry->d_inode; | ||
| 295 | struct rpc_inode *rpci = RPC_I(inode); | ||
| 293 | struct gss_upcall_msg *old; | 296 | struct gss_upcall_msg *old; |
| 294 | 297 | ||
| 295 | spin_lock(&gss_auth->lock); | 298 | spin_lock(&inode->i_lock); |
| 296 | old = __gss_find_upcall(gss_auth, gss_msg->uid); | 299 | old = __gss_find_upcall(rpci, gss_msg->uid); |
| 297 | if (old == NULL) { | 300 | if (old == NULL) { |
| 298 | atomic_inc(&gss_msg->count); | 301 | atomic_inc(&gss_msg->count); |
| 299 | list_add(&gss_msg->list, &gss_auth->upcalls); | 302 | list_add(&gss_msg->list, &rpci->in_downcall); |
| 300 | } else | 303 | } else |
| 301 | gss_msg = old; | 304 | gss_msg = old; |
| 302 | spin_unlock(&gss_auth->lock); | 305 | spin_unlock(&inode->i_lock); |
| 303 | return gss_msg; | 306 | return gss_msg; |
| 304 | } | 307 | } |
| 305 | 308 | ||
| 306 | static void | 309 | static void |
| 307 | __gss_unhash_msg(struct gss_upcall_msg *gss_msg) | 310 | __gss_unhash_msg(struct gss_upcall_msg *gss_msg) |
| 308 | { | 311 | { |
| 309 | if (list_empty(&gss_msg->list)) | ||
| 310 | return; | ||
| 311 | list_del_init(&gss_msg->list); | 312 | list_del_init(&gss_msg->list); |
| 312 | rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno); | 313 | rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno); |
| 313 | wake_up_all(&gss_msg->waitqueue); | 314 | wake_up_all(&gss_msg->waitqueue); |
| @@ -318,10 +319,14 @@ static void | |||
| 318 | gss_unhash_msg(struct gss_upcall_msg *gss_msg) | 319 | gss_unhash_msg(struct gss_upcall_msg *gss_msg) |
| 319 | { | 320 | { |
| 320 | struct gss_auth *gss_auth = gss_msg->auth; | 321 | struct gss_auth *gss_auth = gss_msg->auth; |
| 322 | struct inode *inode = gss_auth->dentry->d_inode; | ||
| 321 | 323 | ||
| 322 | spin_lock(&gss_auth->lock); | 324 | if (list_empty(&gss_msg->list)) |
| 323 | __gss_unhash_msg(gss_msg); | 325 | return; |
| 324 | spin_unlock(&gss_auth->lock); | 326 | spin_lock(&inode->i_lock); |
| 327 | if (!list_empty(&gss_msg->list)) | ||
| 328 | __gss_unhash_msg(gss_msg); | ||
| 329 | spin_unlock(&inode->i_lock); | ||
| 325 | } | 330 | } |
| 326 | 331 | ||
| 327 | static void | 332 | static void |
| @@ -330,16 +335,16 @@ gss_upcall_callback(struct rpc_task *task) | |||
| 330 | struct gss_cred *gss_cred = container_of(task->tk_msg.rpc_cred, | 335 | struct gss_cred *gss_cred = container_of(task->tk_msg.rpc_cred, |
| 331 | struct gss_cred, gc_base); | 336 | struct gss_cred, gc_base); |
| 332 | struct gss_upcall_msg *gss_msg = gss_cred->gc_upcall; | 337 | struct gss_upcall_msg *gss_msg = gss_cred->gc_upcall; |
| 338 | struct inode *inode = gss_msg->auth->dentry->d_inode; | ||
| 333 | 339 | ||
| 334 | BUG_ON(gss_msg == NULL); | 340 | spin_lock(&inode->i_lock); |
| 335 | if (gss_msg->ctx) | 341 | if (gss_msg->ctx) |
| 336 | gss_cred_set_ctx(task->tk_msg.rpc_cred, gss_get_ctx(gss_msg->ctx)); | 342 | gss_cred_set_ctx(task->tk_msg.rpc_cred, gss_get_ctx(gss_msg->ctx)); |
| 337 | else | 343 | else |
| 338 | task->tk_status = gss_msg->msg.errno; | 344 | task->tk_status = gss_msg->msg.errno; |
| 339 | spin_lock(&gss_msg->auth->lock); | ||
| 340 | gss_cred->gc_upcall = NULL; | 345 | gss_cred->gc_upcall = NULL; |
| 341 | rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno); | 346 | rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno); |
| 342 | spin_unlock(&gss_msg->auth->lock); | 347 | spin_unlock(&inode->i_lock); |
| 343 | gss_release_msg(gss_msg); | 348 | gss_release_msg(gss_msg); |
| 344 | } | 349 | } |
| 345 | 350 | ||
| @@ -386,11 +391,12 @@ static inline int | |||
| 386 | gss_refresh_upcall(struct rpc_task *task) | 391 | gss_refresh_upcall(struct rpc_task *task) |
| 387 | { | 392 | { |
| 388 | struct rpc_cred *cred = task->tk_msg.rpc_cred; | 393 | struct rpc_cred *cred = task->tk_msg.rpc_cred; |
| 389 | struct gss_auth *gss_auth = container_of(task->tk_client->cl_auth, | 394 | struct gss_auth *gss_auth = container_of(cred->cr_auth, |
| 390 | struct gss_auth, rpc_auth); | 395 | struct gss_auth, rpc_auth); |
| 391 | struct gss_cred *gss_cred = container_of(cred, | 396 | struct gss_cred *gss_cred = container_of(cred, |
| 392 | struct gss_cred, gc_base); | 397 | struct gss_cred, gc_base); |
| 393 | struct gss_upcall_msg *gss_msg; | 398 | struct gss_upcall_msg *gss_msg; |
| 399 | struct inode *inode = gss_auth->dentry->d_inode; | ||
| 394 | int err = 0; | 400 | int err = 0; |
| 395 | 401 | ||
| 396 | dprintk("RPC: %5u gss_refresh_upcall for uid %u\n", task->tk_pid, | 402 | dprintk("RPC: %5u gss_refresh_upcall for uid %u\n", task->tk_pid, |
| @@ -400,7 +406,7 @@ gss_refresh_upcall(struct rpc_task *task) | |||
| 400 | err = PTR_ERR(gss_msg); | 406 | err = PTR_ERR(gss_msg); |
| 401 | goto out; | 407 | goto out; |
| 402 | } | 408 | } |
| 403 | spin_lock(&gss_auth->lock); | 409 | spin_lock(&inode->i_lock); |
| 404 | if (gss_cred->gc_upcall != NULL) | 410 | if (gss_cred->gc_upcall != NULL) |
| 405 | rpc_sleep_on(&gss_cred->gc_upcall->rpc_waitqueue, task, NULL, NULL); | 411 | rpc_sleep_on(&gss_cred->gc_upcall->rpc_waitqueue, task, NULL, NULL); |
| 406 | else if (gss_msg->ctx == NULL && gss_msg->msg.errno >= 0) { | 412 | else if (gss_msg->ctx == NULL && gss_msg->msg.errno >= 0) { |
| @@ -411,7 +417,7 @@ gss_refresh_upcall(struct rpc_task *task) | |||
| 411 | rpc_sleep_on(&gss_msg->rpc_waitqueue, task, gss_upcall_callback, NULL); | 417 | rpc_sleep_on(&gss_msg->rpc_waitqueue, task, gss_upcall_callback, NULL); |
| 412 | } else | 418 | } else |
| 413 | err = gss_msg->msg.errno; | 419 | err = gss_msg->msg.errno; |
| 414 | spin_unlock(&gss_auth->lock); | 420 | spin_unlock(&inode->i_lock); |
| 415 | gss_release_msg(gss_msg); | 421 | gss_release_msg(gss_msg); |
| 416 | out: | 422 | out: |
| 417 | dprintk("RPC: %5u gss_refresh_upcall for uid %u result %d\n", | 423 | dprintk("RPC: %5u gss_refresh_upcall for uid %u result %d\n", |
| @@ -422,6 +428,7 @@ out: | |||
| 422 | static inline int | 428 | static inline int |
| 423 | gss_create_upcall(struct gss_auth *gss_auth, struct gss_cred *gss_cred) | 429 | gss_create_upcall(struct gss_auth *gss_auth, struct gss_cred *gss_cred) |
| 424 | { | 430 | { |
| 431 | struct inode *inode = gss_auth->dentry->d_inode; | ||
| 425 | struct rpc_cred *cred = &gss_cred->gc_base; | 432 | struct rpc_cred *cred = &gss_cred->gc_base; |
| 426 | struct gss_upcall_msg *gss_msg; | 433 | struct gss_upcall_msg *gss_msg; |
| 427 | DEFINE_WAIT(wait); | 434 | DEFINE_WAIT(wait); |
| @@ -435,12 +442,11 @@ gss_create_upcall(struct gss_auth *gss_auth, struct gss_cred *gss_cred) | |||
| 435 | } | 442 | } |
| 436 | for (;;) { | 443 | for (;;) { |
| 437 | prepare_to_wait(&gss_msg->waitqueue, &wait, TASK_INTERRUPTIBLE); | 444 | prepare_to_wait(&gss_msg->waitqueue, &wait, TASK_INTERRUPTIBLE); |
| 438 | spin_lock(&gss_auth->lock); | 445 | spin_lock(&inode->i_lock); |
| 439 | if (gss_msg->ctx != NULL || gss_msg->msg.errno < 0) { | 446 | if (gss_msg->ctx != NULL || gss_msg->msg.errno < 0) { |
| 440 | spin_unlock(&gss_auth->lock); | ||
| 441 | break; | 447 | break; |
| 442 | } | 448 | } |
| 443 | spin_unlock(&gss_auth->lock); | 449 | spin_unlock(&inode->i_lock); |
| 444 | if (signalled()) { | 450 | if (signalled()) { |
| 445 | err = -ERESTARTSYS; | 451 | err = -ERESTARTSYS; |
| 446 | goto out_intr; | 452 | goto out_intr; |
| @@ -451,6 +457,7 @@ gss_create_upcall(struct gss_auth *gss_auth, struct gss_cred *gss_cred) | |||
| 451 | gss_cred_set_ctx(cred, gss_get_ctx(gss_msg->ctx)); | 457 | gss_cred_set_ctx(cred, gss_get_ctx(gss_msg->ctx)); |
| 452 | else | 458 | else |
| 453 | err = gss_msg->msg.errno; | 459 | err = gss_msg->msg.errno; |
| 460 | spin_unlock(&inode->i_lock); | ||
| 454 | out_intr: | 461 | out_intr: |
| 455 | finish_wait(&gss_msg->waitqueue, &wait); | 462 | finish_wait(&gss_msg->waitqueue, &wait); |
| 456 | gss_release_msg(gss_msg); | 463 | gss_release_msg(gss_msg); |
| @@ -489,12 +496,11 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) | |||
| 489 | const void *p, *end; | 496 | const void *p, *end; |
| 490 | void *buf; | 497 | void *buf; |
| 491 | struct rpc_clnt *clnt; | 498 | struct rpc_clnt *clnt; |
| 492 | struct gss_auth *gss_auth; | ||
| 493 | struct rpc_cred *cred; | ||
| 494 | struct gss_upcall_msg *gss_msg; | 499 | struct gss_upcall_msg *gss_msg; |
| 500 | struct inode *inode = filp->f_path.dentry->d_inode; | ||
| 495 | struct gss_cl_ctx *ctx; | 501 | struct gss_cl_ctx *ctx; |
| 496 | uid_t uid; | 502 | uid_t uid; |
| 497 | int err = -EFBIG; | 503 | ssize_t err = -EFBIG; |
| 498 | 504 | ||
| 499 | if (mlen > MSG_BUF_MAXSIZE) | 505 | if (mlen > MSG_BUF_MAXSIZE) |
| 500 | goto out; | 506 | goto out; |
| @@ -503,7 +509,7 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) | |||
| 503 | if (!buf) | 509 | if (!buf) |
| 504 | goto out; | 510 | goto out; |
| 505 | 511 | ||
| 506 | clnt = RPC_I(filp->f_path.dentry->d_inode)->private; | 512 | clnt = RPC_I(inode)->private; |
| 507 | err = -EFAULT; | 513 | err = -EFAULT; |
| 508 | if (copy_from_user(buf, src, mlen)) | 514 | if (copy_from_user(buf, src, mlen)) |
| 509 | goto err; | 515 | goto err; |
| @@ -519,43 +525,38 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) | |||
| 519 | ctx = gss_alloc_context(); | 525 | ctx = gss_alloc_context(); |
| 520 | if (ctx == NULL) | 526 | if (ctx == NULL) |
| 521 | goto err; | 527 | goto err; |
| 522 | err = 0; | 528 | |
| 523 | gss_auth = container_of(clnt->cl_auth, struct gss_auth, rpc_auth); | 529 | err = -ENOENT; |
| 524 | p = gss_fill_context(p, end, ctx, gss_auth->mech); | 530 | /* Find a matching upcall */ |
| 531 | spin_lock(&inode->i_lock); | ||
| 532 | gss_msg = __gss_find_upcall(RPC_I(inode), uid); | ||
| 533 | if (gss_msg == NULL) { | ||
| 534 | spin_unlock(&inode->i_lock); | ||
| 535 | goto err_put_ctx; | ||
| 536 | } | ||
| 537 | list_del_init(&gss_msg->list); | ||
| 538 | spin_unlock(&inode->i_lock); | ||
| 539 | |||
| 540 | p = gss_fill_context(p, end, ctx, gss_msg->auth->mech); | ||
| 525 | if (IS_ERR(p)) { | 541 | if (IS_ERR(p)) { |
| 526 | err = PTR_ERR(p); | 542 | err = PTR_ERR(p); |
| 527 | if (err != -EACCES) | 543 | gss_msg->msg.errno = (err == -EACCES) ? -EACCES : -EAGAIN; |
| 528 | goto err_put_ctx; | 544 | goto err_release_msg; |
| 529 | } | ||
| 530 | spin_lock(&gss_auth->lock); | ||
| 531 | gss_msg = __gss_find_upcall(gss_auth, uid); | ||
| 532 | if (gss_msg) { | ||
| 533 | if (err == 0 && gss_msg->ctx == NULL) | ||
| 534 | gss_msg->ctx = gss_get_ctx(ctx); | ||
| 535 | gss_msg->msg.errno = err; | ||
| 536 | __gss_unhash_msg(gss_msg); | ||
| 537 | spin_unlock(&gss_auth->lock); | ||
| 538 | gss_release_msg(gss_msg); | ||
| 539 | } else { | ||
| 540 | struct auth_cred acred = { .uid = uid }; | ||
| 541 | spin_unlock(&gss_auth->lock); | ||
| 542 | cred = rpcauth_lookup_credcache(clnt->cl_auth, &acred, RPCAUTH_LOOKUP_NEW); | ||
| 543 | if (IS_ERR(cred)) { | ||
| 544 | err = PTR_ERR(cred); | ||
| 545 | goto err_put_ctx; | ||
| 546 | } | ||
| 547 | gss_cred_set_ctx(cred, gss_get_ctx(ctx)); | ||
| 548 | } | 545 | } |
| 549 | gss_put_ctx(ctx); | 546 | gss_msg->ctx = gss_get_ctx(ctx); |
| 550 | kfree(buf); | 547 | err = mlen; |
| 551 | dprintk("RPC: gss_pipe_downcall returning length %Zu\n", mlen); | 548 | |
| 552 | return mlen; | 549 | err_release_msg: |
| 550 | spin_lock(&inode->i_lock); | ||
| 551 | __gss_unhash_msg(gss_msg); | ||
| 552 | spin_unlock(&inode->i_lock); | ||
| 553 | gss_release_msg(gss_msg); | ||
| 553 | err_put_ctx: | 554 | err_put_ctx: |
| 554 | gss_put_ctx(ctx); | 555 | gss_put_ctx(ctx); |
| 555 | err: | 556 | err: |
| 556 | kfree(buf); | 557 | kfree(buf); |
| 557 | out: | 558 | out: |
| 558 | dprintk("RPC: gss_pipe_downcall returning %d\n", err); | 559 | dprintk("RPC: gss_pipe_downcall returning %Zd\n", err); |
| 559 | return err; | 560 | return err; |
| 560 | } | 561 | } |
| 561 | 562 | ||
| @@ -563,27 +564,21 @@ static void | |||
| 563 | gss_pipe_release(struct inode *inode) | 564 | gss_pipe_release(struct inode *inode) |
| 564 | { | 565 | { |
| 565 | struct rpc_inode *rpci = RPC_I(inode); | 566 | struct rpc_inode *rpci = RPC_I(inode); |
| 566 | struct rpc_clnt *clnt; | 567 | struct gss_upcall_msg *gss_msg; |
| 567 | struct rpc_auth *auth; | ||
| 568 | struct gss_auth *gss_auth; | ||
| 569 | 568 | ||
| 570 | clnt = rpci->private; | 569 | spin_lock(&inode->i_lock); |
| 571 | auth = clnt->cl_auth; | 570 | while (!list_empty(&rpci->in_downcall)) { |
| 572 | gss_auth = container_of(auth, struct gss_auth, rpc_auth); | ||
| 573 | spin_lock(&gss_auth->lock); | ||
| 574 | while (!list_empty(&gss_auth->upcalls)) { | ||
| 575 | struct gss_upcall_msg *gss_msg; | ||
| 576 | 571 | ||
| 577 | gss_msg = list_entry(gss_auth->upcalls.next, | 572 | gss_msg = list_entry(rpci->in_downcall.next, |
| 578 | struct gss_upcall_msg, list); | 573 | struct gss_upcall_msg, list); |
| 579 | gss_msg->msg.errno = -EPIPE; | 574 | gss_msg->msg.errno = -EPIPE; |
| 580 | atomic_inc(&gss_msg->count); | 575 | atomic_inc(&gss_msg->count); |
| 581 | __gss_unhash_msg(gss_msg); | 576 | __gss_unhash_msg(gss_msg); |
| 582 | spin_unlock(&gss_auth->lock); | 577 | spin_unlock(&inode->i_lock); |
| 583 | gss_release_msg(gss_msg); | 578 | gss_release_msg(gss_msg); |
| 584 | spin_lock(&gss_auth->lock); | 579 | spin_lock(&inode->i_lock); |
| 585 | } | 580 | } |
| 586 | spin_unlock(&gss_auth->lock); | 581 | spin_unlock(&inode->i_lock); |
| 587 | } | 582 | } |
| 588 | 583 | ||
| 589 | static void | 584 | static void |
| @@ -637,18 +632,13 @@ gss_create(struct rpc_clnt *clnt, rpc_authflavor_t flavor) | |||
| 637 | gss_auth->service = gss_pseudoflavor_to_service(gss_auth->mech, flavor); | 632 | gss_auth->service = gss_pseudoflavor_to_service(gss_auth->mech, flavor); |
| 638 | if (gss_auth->service == 0) | 633 | if (gss_auth->service == 0) |
| 639 | goto err_put_mech; | 634 | goto err_put_mech; |
| 640 | INIT_LIST_HEAD(&gss_auth->upcalls); | ||
| 641 | spin_lock_init(&gss_auth->lock); | ||
| 642 | auth = &gss_auth->rpc_auth; | 635 | auth = &gss_auth->rpc_auth; |
| 643 | auth->au_cslack = GSS_CRED_SLACK >> 2; | 636 | auth->au_cslack = GSS_CRED_SLACK >> 2; |
| 644 | auth->au_rslack = GSS_VERF_SLACK >> 2; | 637 | auth->au_rslack = GSS_VERF_SLACK >> 2; |
| 645 | auth->au_ops = &authgss_ops; | 638 | auth->au_ops = &authgss_ops; |
| 646 | auth->au_flavor = flavor; | 639 | auth->au_flavor = flavor; |
| 647 | atomic_set(&auth->au_count, 1); | 640 | atomic_set(&auth->au_count, 1); |
| 648 | 641 | kref_init(&gss_auth->kref); | |
| 649 | err = rpcauth_init_credcache(auth, GSS_CRED_EXPIRE); | ||
| 650 | if (err) | ||
| 651 | goto err_put_mech; | ||
| 652 | 642 | ||
| 653 | gss_auth->dentry = rpc_mkpipe(clnt->cl_dentry, gss_auth->mech->gm_name, | 643 | gss_auth->dentry = rpc_mkpipe(clnt->cl_dentry, gss_auth->mech->gm_name, |
| 654 | clnt, &gss_upcall_ops, RPC_PIPE_WAIT_FOR_OPEN); | 644 | clnt, &gss_upcall_ops, RPC_PIPE_WAIT_FOR_OPEN); |
| @@ -657,7 +647,13 @@ gss_create(struct rpc_clnt *clnt, rpc_authflavor_t flavor) | |||
| 657 | goto err_put_mech; | 647 | goto err_put_mech; |
| 658 | } | 648 | } |
| 659 | 649 | ||
| 650 | err = rpcauth_init_credcache(auth); | ||
| 651 | if (err) | ||
| 652 | goto err_unlink_pipe; | ||
| 653 | |||
| 660 | return auth; | 654 | return auth; |
| 655 | err_unlink_pipe: | ||
| 656 | rpc_unlink(gss_auth->dentry); | ||
| 661 | err_put_mech: | 657 | err_put_mech: |
| 662 | gss_mech_put(gss_auth->mech); | 658 | gss_mech_put(gss_auth->mech); |
| 663 | err_free: | 659 | err_free: |
| @@ -668,6 +664,25 @@ out_dec: | |||
| 668 | } | 664 | } |
| 669 | 665 | ||
| 670 | static void | 666 | static void |
| 667 | gss_free(struct gss_auth *gss_auth) | ||
| 668 | { | ||
| 669 | rpc_unlink(gss_auth->dentry); | ||
| 670 | gss_auth->dentry = NULL; | ||
| 671 | gss_mech_put(gss_auth->mech); | ||
| 672 | |||
| 673 | kfree(gss_auth); | ||
| 674 | module_put(THIS_MODULE); | ||
| 675 | } | ||
| 676 | |||
| 677 | static void | ||
| 678 | gss_free_callback(struct kref *kref) | ||
| 679 | { | ||
| 680 | struct gss_auth *gss_auth = container_of(kref, struct gss_auth, kref); | ||
| 681 | |||
| 682 | gss_free(gss_auth); | ||
| 683 | } | ||
| 684 | |||
| 685 | static void | ||
| 671 | gss_destroy(struct rpc_auth *auth) | 686 | gss_destroy(struct rpc_auth *auth) |
| 672 | { | 687 | { |
| 673 | struct gss_auth *gss_auth; | 688 | struct gss_auth *gss_auth; |
| @@ -675,23 +690,51 @@ gss_destroy(struct rpc_auth *auth) | |||
| 675 | dprintk("RPC: destroying GSS authenticator %p flavor %d\n", | 690 | dprintk("RPC: destroying GSS authenticator %p flavor %d\n", |
| 676 | auth, auth->au_flavor); | 691 | auth, auth->au_flavor); |
| 677 | 692 | ||
| 693 | rpcauth_destroy_credcache(auth); | ||
| 694 | |||
| 678 | gss_auth = container_of(auth, struct gss_auth, rpc_auth); | 695 | gss_auth = container_of(auth, struct gss_auth, rpc_auth); |
| 679 | rpc_unlink(gss_auth->dentry); | 696 | kref_put(&gss_auth->kref, gss_free_callback); |
| 680 | gss_auth->dentry = NULL; | 697 | } |
| 681 | gss_mech_put(gss_auth->mech); | ||
| 682 | 698 | ||
| 683 | rpcauth_free_credcache(auth); | 699 | /* |
| 684 | kfree(gss_auth); | 700 | * gss_destroying_context will cause the RPCSEC_GSS to send a NULL RPC call |
| 685 | module_put(THIS_MODULE); | 701 | * to the server with the GSS control procedure field set to |
| 702 | * RPC_GSS_PROC_DESTROY. This should normally cause the server to release | ||
| 703 | * all RPCSEC_GSS state associated with that context. | ||
| 704 | */ | ||
| 705 | static int | ||
| 706 | gss_destroying_context(struct rpc_cred *cred) | ||
| 707 | { | ||
| 708 | struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); | ||
| 709 | struct gss_auth *gss_auth = container_of(cred->cr_auth, struct gss_auth, rpc_auth); | ||
| 710 | struct rpc_task *task; | ||
| 711 | |||
| 712 | if (gss_cred->gc_ctx == NULL || | ||
| 713 | gss_cred->gc_ctx->gc_proc == RPC_GSS_PROC_DESTROY) | ||
| 714 | return 0; | ||
| 715 | |||
| 716 | gss_cred->gc_ctx->gc_proc = RPC_GSS_PROC_DESTROY; | ||
| 717 | cred->cr_ops = &gss_nullops; | ||
| 718 | |||
| 719 | /* Take a reference to ensure the cred will be destroyed either | ||
| 720 | * by the RPC call or by the put_rpccred() below */ | ||
| 721 | get_rpccred(cred); | ||
| 722 | |||
| 723 | task = rpc_call_null(gss_auth->client, cred, RPC_TASK_ASYNC); | ||
| 724 | if (!IS_ERR(task)) | ||
| 725 | rpc_put_task(task); | ||
| 726 | |||
| 727 | put_rpccred(cred); | ||
| 728 | return 1; | ||
| 686 | } | 729 | } |
| 687 | 730 | ||
| 688 | /* gss_destroy_cred (and gss_destroy_ctx) are used to clean up after failure | 731 | /* gss_destroy_cred (and gss_free_ctx) are used to clean up after failure |
| 689 | * to create a new cred or context, so they check that things have been | 732 | * to create a new cred or context, so they check that things have been |
| 690 | * allocated before freeing them. */ | 733 | * allocated before freeing them. */ |
| 691 | static void | 734 | static void |
| 692 | gss_destroy_ctx(struct gss_cl_ctx *ctx) | 735 | gss_do_free_ctx(struct gss_cl_ctx *ctx) |
| 693 | { | 736 | { |
| 694 | dprintk("RPC: gss_destroy_ctx\n"); | 737 | dprintk("RPC: gss_free_ctx\n"); |
| 695 | 738 | ||
| 696 | if (ctx->gc_gss_ctx) | 739 | if (ctx->gc_gss_ctx) |
| 697 | gss_delete_sec_context(&ctx->gc_gss_ctx); | 740 | gss_delete_sec_context(&ctx->gc_gss_ctx); |
| @@ -701,15 +744,46 @@ gss_destroy_ctx(struct gss_cl_ctx *ctx) | |||
| 701 | } | 744 | } |
| 702 | 745 | ||
| 703 | static void | 746 | static void |
| 704 | gss_destroy_cred(struct rpc_cred *rc) | 747 | gss_free_ctx_callback(struct rcu_head *head) |
| 705 | { | 748 | { |
| 706 | struct gss_cred *cred = container_of(rc, struct gss_cred, gc_base); | 749 | struct gss_cl_ctx *ctx = container_of(head, struct gss_cl_ctx, gc_rcu); |
| 750 | gss_do_free_ctx(ctx); | ||
| 751 | } | ||
| 707 | 752 | ||
| 708 | dprintk("RPC: gss_destroy_cred \n"); | 753 | static void |
| 754 | gss_free_ctx(struct gss_cl_ctx *ctx) | ||
| 755 | { | ||
| 756 | call_rcu(&ctx->gc_rcu, gss_free_ctx_callback); | ||
| 757 | } | ||
| 709 | 758 | ||
| 710 | if (cred->gc_ctx) | 759 | static void |
| 711 | gss_put_ctx(cred->gc_ctx); | 760 | gss_free_cred(struct gss_cred *gss_cred) |
| 712 | kfree(cred); | 761 | { |
| 762 | dprintk("RPC: gss_free_cred %p\n", gss_cred); | ||
| 763 | kfree(gss_cred); | ||
| 764 | } | ||
| 765 | |||
| 766 | static void | ||
| 767 | gss_free_cred_callback(struct rcu_head *head) | ||
| 768 | { | ||
| 769 | struct gss_cred *gss_cred = container_of(head, struct gss_cred, gc_base.cr_rcu); | ||
| 770 | gss_free_cred(gss_cred); | ||
| 771 | } | ||
| 772 | |||
| 773 | static void | ||
| 774 | gss_destroy_cred(struct rpc_cred *cred) | ||
| 775 | { | ||
| 776 | struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); | ||
| 777 | struct gss_auth *gss_auth = container_of(cred->cr_auth, struct gss_auth, rpc_auth); | ||
| 778 | struct gss_cl_ctx *ctx = gss_cred->gc_ctx; | ||
| 779 | |||
| 780 | if (gss_destroying_context(cred)) | ||
| 781 | return; | ||
| 782 | rcu_assign_pointer(gss_cred->gc_ctx, NULL); | ||
| 783 | call_rcu(&cred->cr_rcu, gss_free_cred_callback); | ||
| 784 | if (ctx) | ||
| 785 | gss_put_ctx(ctx); | ||
| 786 | kref_put(&gss_auth->kref, gss_free_callback); | ||
| 713 | } | 787 | } |
| 714 | 788 | ||
| 715 | /* | 789 | /* |
| @@ -734,16 +808,14 @@ gss_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags) | |||
| 734 | if (!(cred = kzalloc(sizeof(*cred), GFP_KERNEL))) | 808 | if (!(cred = kzalloc(sizeof(*cred), GFP_KERNEL))) |
| 735 | goto out_err; | 809 | goto out_err; |
| 736 | 810 | ||
| 737 | atomic_set(&cred->gc_count, 1); | 811 | rpcauth_init_cred(&cred->gc_base, acred, auth, &gss_credops); |
| 738 | cred->gc_uid = acred->uid; | ||
| 739 | /* | 812 | /* |
| 740 | * Note: in order to force a call to call_refresh(), we deliberately | 813 | * Note: in order to force a call to call_refresh(), we deliberately |
| 741 | * fail to flag the credential as RPCAUTH_CRED_UPTODATE. | 814 | * fail to flag the credential as RPCAUTH_CRED_UPTODATE. |
| 742 | */ | 815 | */ |
| 743 | cred->gc_flags = 0; | 816 | cred->gc_base.cr_flags = 1UL << RPCAUTH_CRED_NEW; |
| 744 | cred->gc_base.cr_ops = &gss_credops; | ||
| 745 | cred->gc_base.cr_flags = RPCAUTH_CRED_NEW; | ||
| 746 | cred->gc_service = gss_auth->service; | 817 | cred->gc_service = gss_auth->service; |
| 818 | kref_get(&gss_auth->kref); | ||
| 747 | return &cred->gc_base; | 819 | return &cred->gc_base; |
| 748 | 820 | ||
| 749 | out_err: | 821 | out_err: |
| @@ -774,7 +846,7 @@ gss_match(struct auth_cred *acred, struct rpc_cred *rc, int flags) | |||
| 774 | * we don't really care if the credential has expired or not, | 846 | * we don't really care if the credential has expired or not, |
| 775 | * since the caller should be prepared to reinitialise it. | 847 | * since the caller should be prepared to reinitialise it. |
| 776 | */ | 848 | */ |
| 777 | if ((flags & RPCAUTH_LOOKUP_NEW) && (rc->cr_flags & RPCAUTH_CRED_NEW)) | 849 | if ((flags & RPCAUTH_LOOKUP_NEW) && test_bit(RPCAUTH_CRED_NEW, &rc->cr_flags)) |
| 778 | goto out; | 850 | goto out; |
| 779 | /* Don't match with creds that have expired. */ | 851 | /* Don't match with creds that have expired. */ |
| 780 | if (gss_cred->gc_ctx && time_after(jiffies, gss_cred->gc_ctx->gc_expiry)) | 852 | if (gss_cred->gc_ctx && time_after(jiffies, gss_cred->gc_ctx->gc_expiry)) |
| @@ -830,7 +902,7 @@ gss_marshal(struct rpc_task *task, __be32 *p) | |||
| 830 | mic.data = (u8 *)(p + 1); | 902 | mic.data = (u8 *)(p + 1); |
| 831 | maj_stat = gss_get_mic(ctx->gc_gss_ctx, &verf_buf, &mic); | 903 | maj_stat = gss_get_mic(ctx->gc_gss_ctx, &verf_buf, &mic); |
| 832 | if (maj_stat == GSS_S_CONTEXT_EXPIRED) { | 904 | if (maj_stat == GSS_S_CONTEXT_EXPIRED) { |
| 833 | cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE; | 905 | clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); |
| 834 | } else if (maj_stat != 0) { | 906 | } else if (maj_stat != 0) { |
| 835 | printk("gss_marshal: gss_get_mic FAILED (%d)\n", maj_stat); | 907 | printk("gss_marshal: gss_get_mic FAILED (%d)\n", maj_stat); |
| 836 | goto out_put_ctx; | 908 | goto out_put_ctx; |
| @@ -855,6 +927,13 @@ gss_refresh(struct rpc_task *task) | |||
| 855 | return 0; | 927 | return 0; |
| 856 | } | 928 | } |
| 857 | 929 | ||
| 930 | /* Dummy refresh routine: used only when destroying the context */ | ||
| 931 | static int | ||
| 932 | gss_refresh_null(struct rpc_task *task) | ||
| 933 | { | ||
| 934 | return -EACCES; | ||
| 935 | } | ||
| 936 | |||
| 858 | static __be32 * | 937 | static __be32 * |
| 859 | gss_validate(struct rpc_task *task, __be32 *p) | 938 | gss_validate(struct rpc_task *task, __be32 *p) |
| 860 | { | 939 | { |
| @@ -883,12 +962,15 @@ gss_validate(struct rpc_task *task, __be32 *p) | |||
| 883 | 962 | ||
| 884 | maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &verf_buf, &mic); | 963 | maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &verf_buf, &mic); |
| 885 | if (maj_stat == GSS_S_CONTEXT_EXPIRED) | 964 | if (maj_stat == GSS_S_CONTEXT_EXPIRED) |
| 886 | cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE; | 965 | clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); |
| 887 | if (maj_stat) | 966 | if (maj_stat) { |
| 967 | dprintk("RPC: %5u gss_validate: gss_verify_mic returned" | ||
| 968 | "error 0x%08x\n", task->tk_pid, maj_stat); | ||
| 888 | goto out_bad; | 969 | goto out_bad; |
| 970 | } | ||
| 889 | /* We leave it to unwrap to calculate au_rslack. For now we just | 971 | /* We leave it to unwrap to calculate au_rslack. For now we just |
| 890 | * calculate the length of the verifier: */ | 972 | * calculate the length of the verifier: */ |
| 891 | task->tk_auth->au_verfsize = XDR_QUADLEN(len) + 2; | 973 | cred->cr_auth->au_verfsize = XDR_QUADLEN(len) + 2; |
| 892 | gss_put_ctx(ctx); | 974 | gss_put_ctx(ctx); |
| 893 | dprintk("RPC: %5u gss_validate: gss_verify_mic succeeded.\n", | 975 | dprintk("RPC: %5u gss_validate: gss_verify_mic succeeded.\n", |
| 894 | task->tk_pid); | 976 | task->tk_pid); |
| @@ -917,7 +999,9 @@ gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx, | |||
| 917 | offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base; | 999 | offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base; |
| 918 | *p++ = htonl(rqstp->rq_seqno); | 1000 | *p++ = htonl(rqstp->rq_seqno); |
| 919 | 1001 | ||
| 1002 | lock_kernel(); | ||
| 920 | status = encode(rqstp, p, obj); | 1003 | status = encode(rqstp, p, obj); |
| 1004 | unlock_kernel(); | ||
| 921 | if (status) | 1005 | if (status) |
| 922 | return status; | 1006 | return status; |
| 923 | 1007 | ||
| @@ -937,7 +1021,7 @@ gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx, | |||
| 937 | maj_stat = gss_get_mic(ctx->gc_gss_ctx, &integ_buf, &mic); | 1021 | maj_stat = gss_get_mic(ctx->gc_gss_ctx, &integ_buf, &mic); |
| 938 | status = -EIO; /* XXX? */ | 1022 | status = -EIO; /* XXX? */ |
| 939 | if (maj_stat == GSS_S_CONTEXT_EXPIRED) | 1023 | if (maj_stat == GSS_S_CONTEXT_EXPIRED) |
| 940 | cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE; | 1024 | clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); |
| 941 | else if (maj_stat) | 1025 | else if (maj_stat) |
| 942 | return status; | 1026 | return status; |
| 943 | q = xdr_encode_opaque(p, NULL, mic.len); | 1027 | q = xdr_encode_opaque(p, NULL, mic.len); |
| @@ -1011,7 +1095,9 @@ gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx, | |||
| 1011 | offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base; | 1095 | offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base; |
| 1012 | *p++ = htonl(rqstp->rq_seqno); | 1096 | *p++ = htonl(rqstp->rq_seqno); |
| 1013 | 1097 | ||
| 1098 | lock_kernel(); | ||
| 1014 | status = encode(rqstp, p, obj); | 1099 | status = encode(rqstp, p, obj); |
| 1100 | unlock_kernel(); | ||
| 1015 | if (status) | 1101 | if (status) |
| 1016 | return status; | 1102 | return status; |
| 1017 | 1103 | ||
| @@ -1036,7 +1122,7 @@ gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx, | |||
| 1036 | /* We're assuming that when GSS_S_CONTEXT_EXPIRED, the encryption was | 1122 | /* We're assuming that when GSS_S_CONTEXT_EXPIRED, the encryption was |
| 1037 | * done anyway, so it's safe to put the request on the wire: */ | 1123 | * done anyway, so it's safe to put the request on the wire: */ |
| 1038 | if (maj_stat == GSS_S_CONTEXT_EXPIRED) | 1124 | if (maj_stat == GSS_S_CONTEXT_EXPIRED) |
| 1039 | cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE; | 1125 | clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); |
| 1040 | else if (maj_stat) | 1126 | else if (maj_stat) |
| 1041 | return status; | 1127 | return status; |
| 1042 | 1128 | ||
| @@ -1070,12 +1156,16 @@ gss_wrap_req(struct rpc_task *task, | |||
| 1070 | /* The spec seems a little ambiguous here, but I think that not | 1156 | /* The spec seems a little ambiguous here, but I think that not |
| 1071 | * wrapping context destruction requests makes the most sense. | 1157 | * wrapping context destruction requests makes the most sense. |
| 1072 | */ | 1158 | */ |
| 1159 | lock_kernel(); | ||
| 1073 | status = encode(rqstp, p, obj); | 1160 | status = encode(rqstp, p, obj); |
| 1161 | unlock_kernel(); | ||
| 1074 | goto out; | 1162 | goto out; |
| 1075 | } | 1163 | } |
| 1076 | switch (gss_cred->gc_service) { | 1164 | switch (gss_cred->gc_service) { |
| 1077 | case RPC_GSS_SVC_NONE: | 1165 | case RPC_GSS_SVC_NONE: |
| 1166 | lock_kernel(); | ||
| 1078 | status = encode(rqstp, p, obj); | 1167 | status = encode(rqstp, p, obj); |
| 1168 | unlock_kernel(); | ||
| 1079 | break; | 1169 | break; |
| 1080 | case RPC_GSS_SVC_INTEGRITY: | 1170 | case RPC_GSS_SVC_INTEGRITY: |
| 1081 | status = gss_wrap_req_integ(cred, ctx, encode, | 1171 | status = gss_wrap_req_integ(cred, ctx, encode, |
| @@ -1123,7 +1213,7 @@ gss_unwrap_resp_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx, | |||
| 1123 | 1213 | ||
| 1124 | maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &integ_buf, &mic); | 1214 | maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &integ_buf, &mic); |
| 1125 | if (maj_stat == GSS_S_CONTEXT_EXPIRED) | 1215 | if (maj_stat == GSS_S_CONTEXT_EXPIRED) |
| 1126 | cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE; | 1216 | clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); |
| 1127 | if (maj_stat != GSS_S_COMPLETE) | 1217 | if (maj_stat != GSS_S_COMPLETE) |
| 1128 | return status; | 1218 | return status; |
| 1129 | return 0; | 1219 | return 0; |
| @@ -1148,7 +1238,7 @@ gss_unwrap_resp_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx, | |||
| 1148 | 1238 | ||
| 1149 | maj_stat = gss_unwrap(ctx->gc_gss_ctx, offset, rcv_buf); | 1239 | maj_stat = gss_unwrap(ctx->gc_gss_ctx, offset, rcv_buf); |
| 1150 | if (maj_stat == GSS_S_CONTEXT_EXPIRED) | 1240 | if (maj_stat == GSS_S_CONTEXT_EXPIRED) |
| 1151 | cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE; | 1241 | clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); |
| 1152 | if (maj_stat != GSS_S_COMPLETE) | 1242 | if (maj_stat != GSS_S_COMPLETE) |
| 1153 | return status; | 1243 | return status; |
| 1154 | if (ntohl(*(*p)++) != rqstp->rq_seqno) | 1244 | if (ntohl(*(*p)++) != rqstp->rq_seqno) |
| @@ -1188,10 +1278,12 @@ gss_unwrap_resp(struct rpc_task *task, | |||
| 1188 | break; | 1278 | break; |
| 1189 | } | 1279 | } |
| 1190 | /* take into account extra slack for integrity and privacy cases: */ | 1280 | /* take into account extra slack for integrity and privacy cases: */ |
| 1191 | task->tk_auth->au_rslack = task->tk_auth->au_verfsize + (p - savedp) | 1281 | cred->cr_auth->au_rslack = cred->cr_auth->au_verfsize + (p - savedp) |
| 1192 | + (savedlen - head->iov_len); | 1282 | + (savedlen - head->iov_len); |
| 1193 | out_decode: | 1283 | out_decode: |
| 1284 | lock_kernel(); | ||
| 1194 | status = decode(rqstp, p, obj); | 1285 | status = decode(rqstp, p, obj); |
| 1286 | unlock_kernel(); | ||
| 1195 | out: | 1287 | out: |
| 1196 | gss_put_ctx(ctx); | 1288 | gss_put_ctx(ctx); |
| 1197 | dprintk("RPC: %5u gss_unwrap_resp returning %d\n", task->tk_pid, | 1289 | dprintk("RPC: %5u gss_unwrap_resp returning %d\n", task->tk_pid, |
| @@ -1199,7 +1291,7 @@ out: | |||
| 1199 | return status; | 1291 | return status; |
| 1200 | } | 1292 | } |
| 1201 | 1293 | ||
| 1202 | static struct rpc_authops authgss_ops = { | 1294 | static const struct rpc_authops authgss_ops = { |
| 1203 | .owner = THIS_MODULE, | 1295 | .owner = THIS_MODULE, |
| 1204 | .au_flavor = RPC_AUTH_GSS, | 1296 | .au_flavor = RPC_AUTH_GSS, |
| 1205 | #ifdef RPC_DEBUG | 1297 | #ifdef RPC_DEBUG |
| @@ -1211,7 +1303,7 @@ static struct rpc_authops authgss_ops = { | |||
| 1211 | .crcreate = gss_create_cred | 1303 | .crcreate = gss_create_cred |
| 1212 | }; | 1304 | }; |
| 1213 | 1305 | ||
| 1214 | static struct rpc_credops gss_credops = { | 1306 | static const struct rpc_credops gss_credops = { |
| 1215 | .cr_name = "AUTH_GSS", | 1307 | .cr_name = "AUTH_GSS", |
| 1216 | .crdestroy = gss_destroy_cred, | 1308 | .crdestroy = gss_destroy_cred, |
| 1217 | .cr_init = gss_cred_init, | 1309 | .cr_init = gss_cred_init, |
| @@ -1223,6 +1315,17 @@ static struct rpc_credops gss_credops = { | |||
| 1223 | .crunwrap_resp = gss_unwrap_resp, | 1315 | .crunwrap_resp = gss_unwrap_resp, |
| 1224 | }; | 1316 | }; |
| 1225 | 1317 | ||
| 1318 | static const struct rpc_credops gss_nullops = { | ||
| 1319 | .cr_name = "AUTH_GSS", | ||
| 1320 | .crdestroy = gss_destroy_cred, | ||
| 1321 | .crmatch = gss_match, | ||
| 1322 | .crmarshal = gss_marshal, | ||
| 1323 | .crrefresh = gss_refresh_null, | ||
| 1324 | .crvalidate = gss_validate, | ||
| 1325 | .crwrap_req = gss_wrap_req, | ||
| 1326 | .crunwrap_resp = gss_unwrap_resp, | ||
| 1327 | }; | ||
| 1328 | |||
| 1226 | static struct rpc_pipe_ops gss_upcall_ops = { | 1329 | static struct rpc_pipe_ops gss_upcall_ops = { |
| 1227 | .upcall = gss_pipe_upcall, | 1330 | .upcall = gss_pipe_upcall, |
| 1228 | .downcall = gss_pipe_downcall, | 1331 | .downcall = gss_pipe_downcall, |
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c index 7b1943217053..71b9daefdff3 100644 --- a/net/sunrpc/auth_gss/gss_krb5_mech.c +++ b/net/sunrpc/auth_gss/gss_krb5_mech.c | |||
| @@ -201,7 +201,7 @@ gss_delete_sec_context_kerberos(void *internal_ctx) { | |||
| 201 | kfree(kctx); | 201 | kfree(kctx); |
| 202 | } | 202 | } |
| 203 | 203 | ||
| 204 | static struct gss_api_ops gss_kerberos_ops = { | 204 | static const struct gss_api_ops gss_kerberos_ops = { |
| 205 | .gss_import_sec_context = gss_import_sec_context_kerberos, | 205 | .gss_import_sec_context = gss_import_sec_context_kerberos, |
| 206 | .gss_get_mic = gss_get_mic_kerberos, | 206 | .gss_get_mic = gss_get_mic_kerberos, |
| 207 | .gss_verify_mic = gss_verify_mic_kerberos, | 207 | .gss_verify_mic = gss_verify_mic_kerberos, |
diff --git a/net/sunrpc/auth_gss/gss_spkm3_mech.c b/net/sunrpc/auth_gss/gss_spkm3_mech.c index 7e15aa68ae64..577d590e755f 100644 --- a/net/sunrpc/auth_gss/gss_spkm3_mech.c +++ b/net/sunrpc/auth_gss/gss_spkm3_mech.c | |||
| @@ -202,7 +202,7 @@ gss_get_mic_spkm3(struct gss_ctx *ctx, | |||
| 202 | return err; | 202 | return err; |
| 203 | } | 203 | } |
| 204 | 204 | ||
| 205 | static struct gss_api_ops gss_spkm3_ops = { | 205 | static const struct gss_api_ops gss_spkm3_ops = { |
| 206 | .gss_import_sec_context = gss_import_sec_context_spkm3, | 206 | .gss_import_sec_context = gss_import_sec_context_spkm3, |
| 207 | .gss_get_mic = gss_get_mic_spkm3, | 207 | .gss_get_mic = gss_get_mic_spkm3, |
| 208 | .gss_verify_mic = gss_verify_mic_spkm3, | 208 | .gss_verify_mic = gss_verify_mic_spkm3, |
diff --git a/net/sunrpc/auth_null.c b/net/sunrpc/auth_null.c index 3df9fccab2f8..537d0e8589dd 100644 --- a/net/sunrpc/auth_null.c +++ b/net/sunrpc/auth_null.c | |||
| @@ -76,7 +76,7 @@ nul_marshal(struct rpc_task *task, __be32 *p) | |||
| 76 | static int | 76 | static int |
| 77 | nul_refresh(struct rpc_task *task) | 77 | nul_refresh(struct rpc_task *task) |
| 78 | { | 78 | { |
| 79 | task->tk_msg.rpc_cred->cr_flags |= RPCAUTH_CRED_UPTODATE; | 79 | set_bit(RPCAUTH_CRED_UPTODATE, &task->tk_msg.rpc_cred->cr_flags); |
| 80 | return 0; | 80 | return 0; |
| 81 | } | 81 | } |
| 82 | 82 | ||
| @@ -101,7 +101,7 @@ nul_validate(struct rpc_task *task, __be32 *p) | |||
| 101 | return p; | 101 | return p; |
| 102 | } | 102 | } |
| 103 | 103 | ||
| 104 | struct rpc_authops authnull_ops = { | 104 | const struct rpc_authops authnull_ops = { |
| 105 | .owner = THIS_MODULE, | 105 | .owner = THIS_MODULE, |
| 106 | .au_flavor = RPC_AUTH_NULL, | 106 | .au_flavor = RPC_AUTH_NULL, |
| 107 | #ifdef RPC_DEBUG | 107 | #ifdef RPC_DEBUG |
| @@ -122,7 +122,7 @@ struct rpc_auth null_auth = { | |||
| 122 | }; | 122 | }; |
| 123 | 123 | ||
| 124 | static | 124 | static |
| 125 | struct rpc_credops null_credops = { | 125 | const struct rpc_credops null_credops = { |
| 126 | .cr_name = "AUTH_NULL", | 126 | .cr_name = "AUTH_NULL", |
| 127 | .crdestroy = nul_destroy_cred, | 127 | .crdestroy = nul_destroy_cred, |
| 128 | .crmatch = nul_match, | 128 | .crmatch = nul_match, |
| @@ -133,9 +133,11 @@ struct rpc_credops null_credops = { | |||
| 133 | 133 | ||
| 134 | static | 134 | static |
| 135 | struct rpc_cred null_cred = { | 135 | struct rpc_cred null_cred = { |
| 136 | .cr_lru = LIST_HEAD_INIT(null_cred.cr_lru), | ||
| 137 | .cr_auth = &null_auth, | ||
| 136 | .cr_ops = &null_credops, | 138 | .cr_ops = &null_credops, |
| 137 | .cr_count = ATOMIC_INIT(1), | 139 | .cr_count = ATOMIC_INIT(1), |
| 138 | .cr_flags = RPCAUTH_CRED_UPTODATE, | 140 | .cr_flags = 1UL << RPCAUTH_CRED_UPTODATE, |
| 139 | #ifdef RPC_DEBUG | 141 | #ifdef RPC_DEBUG |
| 140 | .cr_magic = RPCAUTH_CRED_MAGIC, | 142 | .cr_magic = RPCAUTH_CRED_MAGIC, |
| 141 | #endif | 143 | #endif |
diff --git a/net/sunrpc/auth_unix.c b/net/sunrpc/auth_unix.c index 4e7733aee36e..5ed91e5bcee4 100644 --- a/net/sunrpc/auth_unix.c +++ b/net/sunrpc/auth_unix.c | |||
| @@ -20,11 +20,6 @@ struct unx_cred { | |||
| 20 | gid_t uc_gids[NFS_NGROUPS]; | 20 | gid_t uc_gids[NFS_NGROUPS]; |
| 21 | }; | 21 | }; |
| 22 | #define uc_uid uc_base.cr_uid | 22 | #define uc_uid uc_base.cr_uid |
| 23 | #define uc_count uc_base.cr_count | ||
| 24 | #define uc_flags uc_base.cr_flags | ||
| 25 | #define uc_expire uc_base.cr_expire | ||
| 26 | |||
| 27 | #define UNX_CRED_EXPIRE (60 * HZ) | ||
| 28 | 23 | ||
| 29 | #define UNX_WRITESLACK (21 + (UNX_MAXNODENAME >> 2)) | 24 | #define UNX_WRITESLACK (21 + (UNX_MAXNODENAME >> 2)) |
| 30 | 25 | ||
| @@ -34,15 +29,14 @@ struct unx_cred { | |||
| 34 | 29 | ||
| 35 | static struct rpc_auth unix_auth; | 30 | static struct rpc_auth unix_auth; |
| 36 | static struct rpc_cred_cache unix_cred_cache; | 31 | static struct rpc_cred_cache unix_cred_cache; |
| 37 | static struct rpc_credops unix_credops; | 32 | static const struct rpc_credops unix_credops; |
| 38 | 33 | ||
| 39 | static struct rpc_auth * | 34 | static struct rpc_auth * |
| 40 | unx_create(struct rpc_clnt *clnt, rpc_authflavor_t flavor) | 35 | unx_create(struct rpc_clnt *clnt, rpc_authflavor_t flavor) |
| 41 | { | 36 | { |
| 42 | dprintk("RPC: creating UNIX authenticator for client %p\n", | 37 | dprintk("RPC: creating UNIX authenticator for client %p\n", |
| 43 | clnt); | 38 | clnt); |
| 44 | if (atomic_inc_return(&unix_auth.au_count) == 0) | 39 | atomic_inc(&unix_auth.au_count); |
| 45 | unix_cred_cache.nextgc = jiffies + (unix_cred_cache.expire >> 1); | ||
| 46 | return &unix_auth; | 40 | return &unix_auth; |
| 47 | } | 41 | } |
| 48 | 42 | ||
| @@ -50,7 +44,7 @@ static void | |||
| 50 | unx_destroy(struct rpc_auth *auth) | 44 | unx_destroy(struct rpc_auth *auth) |
| 51 | { | 45 | { |
| 52 | dprintk("RPC: destroying UNIX authenticator %p\n", auth); | 46 | dprintk("RPC: destroying UNIX authenticator %p\n", auth); |
| 53 | rpcauth_free_credcache(auth); | 47 | rpcauth_clear_credcache(auth->au_credcache); |
| 54 | } | 48 | } |
| 55 | 49 | ||
| 56 | /* | 50 | /* |
| @@ -74,8 +68,8 @@ unx_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags) | |||
| 74 | if (!(cred = kmalloc(sizeof(*cred), GFP_KERNEL))) | 68 | if (!(cred = kmalloc(sizeof(*cred), GFP_KERNEL))) |
| 75 | return ERR_PTR(-ENOMEM); | 69 | return ERR_PTR(-ENOMEM); |
| 76 | 70 | ||
| 77 | atomic_set(&cred->uc_count, 1); | 71 | rpcauth_init_cred(&cred->uc_base, acred, auth, &unix_credops); |
| 78 | cred->uc_flags = RPCAUTH_CRED_UPTODATE; | 72 | cred->uc_base.cr_flags = 1UL << RPCAUTH_CRED_UPTODATE; |
| 79 | if (flags & RPCAUTH_LOOKUP_ROOTCREDS) { | 73 | if (flags & RPCAUTH_LOOKUP_ROOTCREDS) { |
| 80 | cred->uc_uid = 0; | 74 | cred->uc_uid = 0; |
| 81 | cred->uc_gid = 0; | 75 | cred->uc_gid = 0; |
| @@ -85,22 +79,34 @@ unx_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags) | |||
| 85 | if (groups > NFS_NGROUPS) | 79 | if (groups > NFS_NGROUPS) |
| 86 | groups = NFS_NGROUPS; | 80 | groups = NFS_NGROUPS; |
| 87 | 81 | ||
| 88 | cred->uc_uid = acred->uid; | ||
| 89 | cred->uc_gid = acred->gid; | 82 | cred->uc_gid = acred->gid; |
| 90 | for (i = 0; i < groups; i++) | 83 | for (i = 0; i < groups; i++) |
| 91 | cred->uc_gids[i] = GROUP_AT(acred->group_info, i); | 84 | cred->uc_gids[i] = GROUP_AT(acred->group_info, i); |
| 92 | if (i < NFS_NGROUPS) | 85 | if (i < NFS_NGROUPS) |
| 93 | cred->uc_gids[i] = NOGROUP; | 86 | cred->uc_gids[i] = NOGROUP; |
| 94 | } | 87 | } |
| 95 | cred->uc_base.cr_ops = &unix_credops; | ||
| 96 | 88 | ||
| 97 | return (struct rpc_cred *) cred; | 89 | return &cred->uc_base; |
| 90 | } | ||
| 91 | |||
| 92 | static void | ||
| 93 | unx_free_cred(struct unx_cred *unx_cred) | ||
| 94 | { | ||
| 95 | dprintk("RPC: unx_free_cred %p\n", unx_cred); | ||
| 96 | kfree(unx_cred); | ||
| 97 | } | ||
| 98 | |||
| 99 | static void | ||
| 100 | unx_free_cred_callback(struct rcu_head *head) | ||
| 101 | { | ||
| 102 | struct unx_cred *unx_cred = container_of(head, struct unx_cred, uc_base.cr_rcu); | ||
| 103 | unx_free_cred(unx_cred); | ||
| 98 | } | 104 | } |
| 99 | 105 | ||
| 100 | static void | 106 | static void |
| 101 | unx_destroy_cred(struct rpc_cred *cred) | 107 | unx_destroy_cred(struct rpc_cred *cred) |
| 102 | { | 108 | { |
| 103 | kfree(cred); | 109 | call_rcu(&cred->cr_rcu, unx_free_cred_callback); |
| 104 | } | 110 | } |
| 105 | 111 | ||
| 106 | /* | 112 | /* |
| @@ -111,7 +117,7 @@ unx_destroy_cred(struct rpc_cred *cred) | |||
| 111 | static int | 117 | static int |
| 112 | unx_match(struct auth_cred *acred, struct rpc_cred *rcred, int flags) | 118 | unx_match(struct auth_cred *acred, struct rpc_cred *rcred, int flags) |
| 113 | { | 119 | { |
| 114 | struct unx_cred *cred = (struct unx_cred *) rcred; | 120 | struct unx_cred *cred = container_of(rcred, struct unx_cred, uc_base); |
| 115 | int i; | 121 | int i; |
| 116 | 122 | ||
| 117 | if (!(flags & RPCAUTH_LOOKUP_ROOTCREDS)) { | 123 | if (!(flags & RPCAUTH_LOOKUP_ROOTCREDS)) { |
| @@ -142,7 +148,7 @@ static __be32 * | |||
| 142 | unx_marshal(struct rpc_task *task, __be32 *p) | 148 | unx_marshal(struct rpc_task *task, __be32 *p) |
| 143 | { | 149 | { |
| 144 | struct rpc_clnt *clnt = task->tk_client; | 150 | struct rpc_clnt *clnt = task->tk_client; |
| 145 | struct unx_cred *cred = (struct unx_cred *) task->tk_msg.rpc_cred; | 151 | struct unx_cred *cred = container_of(task->tk_msg.rpc_cred, struct unx_cred, uc_base); |
| 146 | __be32 *base, *hold; | 152 | __be32 *base, *hold; |
| 147 | int i; | 153 | int i; |
| 148 | 154 | ||
| @@ -175,7 +181,7 @@ unx_marshal(struct rpc_task *task, __be32 *p) | |||
| 175 | static int | 181 | static int |
| 176 | unx_refresh(struct rpc_task *task) | 182 | unx_refresh(struct rpc_task *task) |
| 177 | { | 183 | { |
| 178 | task->tk_msg.rpc_cred->cr_flags |= RPCAUTH_CRED_UPTODATE; | 184 | set_bit(RPCAUTH_CRED_UPTODATE, &task->tk_msg.rpc_cred->cr_flags); |
| 179 | return 0; | 185 | return 0; |
| 180 | } | 186 | } |
| 181 | 187 | ||
| @@ -198,13 +204,18 @@ unx_validate(struct rpc_task *task, __be32 *p) | |||
| 198 | printk("RPC: giant verf size: %u\n", size); | 204 | printk("RPC: giant verf size: %u\n", size); |
| 199 | return NULL; | 205 | return NULL; |
| 200 | } | 206 | } |
| 201 | task->tk_auth->au_rslack = (size >> 2) + 2; | 207 | task->tk_msg.rpc_cred->cr_auth->au_rslack = (size >> 2) + 2; |
| 202 | p += (size >> 2); | 208 | p += (size >> 2); |
| 203 | 209 | ||
| 204 | return p; | 210 | return p; |
| 205 | } | 211 | } |
| 206 | 212 | ||
| 207 | struct rpc_authops authunix_ops = { | 213 | void __init rpc_init_authunix(void) |
| 214 | { | ||
| 215 | spin_lock_init(&unix_cred_cache.lock); | ||
| 216 | } | ||
| 217 | |||
| 218 | const struct rpc_authops authunix_ops = { | ||
| 208 | .owner = THIS_MODULE, | 219 | .owner = THIS_MODULE, |
| 209 | .au_flavor = RPC_AUTH_UNIX, | 220 | .au_flavor = RPC_AUTH_UNIX, |
| 210 | #ifdef RPC_DEBUG | 221 | #ifdef RPC_DEBUG |
| @@ -218,7 +229,6 @@ struct rpc_authops authunix_ops = { | |||
| 218 | 229 | ||
| 219 | static | 230 | static |
| 220 | struct rpc_cred_cache unix_cred_cache = { | 231 | struct rpc_cred_cache unix_cred_cache = { |
| 221 | .expire = UNX_CRED_EXPIRE, | ||
| 222 | }; | 232 | }; |
| 223 | 233 | ||
| 224 | static | 234 | static |
| @@ -232,7 +242,7 @@ struct rpc_auth unix_auth = { | |||
| 232 | }; | 242 | }; |
| 233 | 243 | ||
| 234 | static | 244 | static |
| 235 | struct rpc_credops unix_credops = { | 245 | const struct rpc_credops unix_credops = { |
| 236 | .cr_name = "AUTH_UNIX", | 246 | .cr_name = "AUTH_UNIX", |
| 237 | .crdestroy = unx_destroy_cred, | 247 | .crdestroy = unx_destroy_cred, |
| 238 | .crmatch = unx_match, | 248 | .crmatch = unx_match, |
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index d8fbee40a19c..52429b1ffcc1 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c | |||
| @@ -44,6 +44,12 @@ | |||
| 44 | dprintk("RPC: %5u %s (status %d)\n", t->tk_pid, \ | 44 | dprintk("RPC: %5u %s (status %d)\n", t->tk_pid, \ |
| 45 | __FUNCTION__, t->tk_status) | 45 | __FUNCTION__, t->tk_status) |
| 46 | 46 | ||
| 47 | /* | ||
| 48 | * All RPC clients are linked into this list | ||
| 49 | */ | ||
| 50 | static LIST_HEAD(all_clients); | ||
| 51 | static DEFINE_SPINLOCK(rpc_client_lock); | ||
| 52 | |||
| 47 | static DECLARE_WAIT_QUEUE_HEAD(destroy_wait); | 53 | static DECLARE_WAIT_QUEUE_HEAD(destroy_wait); |
| 48 | 54 | ||
| 49 | 55 | ||
| @@ -66,6 +72,21 @@ static void call_connect_status(struct rpc_task *task); | |||
| 66 | static __be32 * call_header(struct rpc_task *task); | 72 | static __be32 * call_header(struct rpc_task *task); |
| 67 | static __be32 * call_verify(struct rpc_task *task); | 73 | static __be32 * call_verify(struct rpc_task *task); |
| 68 | 74 | ||
| 75 | static int rpc_ping(struct rpc_clnt *clnt, int flags); | ||
| 76 | |||
| 77 | static void rpc_register_client(struct rpc_clnt *clnt) | ||
| 78 | { | ||
| 79 | spin_lock(&rpc_client_lock); | ||
| 80 | list_add(&clnt->cl_clients, &all_clients); | ||
| 81 | spin_unlock(&rpc_client_lock); | ||
| 82 | } | ||
| 83 | |||
| 84 | static void rpc_unregister_client(struct rpc_clnt *clnt) | ||
| 85 | { | ||
| 86 | spin_lock(&rpc_client_lock); | ||
| 87 | list_del(&clnt->cl_clients); | ||
| 88 | spin_unlock(&rpc_client_lock); | ||
| 89 | } | ||
| 69 | 90 | ||
| 70 | static int | 91 | static int |
| 71 | rpc_setup_pipedir(struct rpc_clnt *clnt, char *dir_name) | 92 | rpc_setup_pipedir(struct rpc_clnt *clnt, char *dir_name) |
| @@ -111,6 +132,9 @@ static struct rpc_clnt * rpc_new_client(struct rpc_xprt *xprt, char *servname, s | |||
| 111 | dprintk("RPC: creating %s client for %s (xprt %p)\n", | 132 | dprintk("RPC: creating %s client for %s (xprt %p)\n", |
| 112 | program->name, servname, xprt); | 133 | program->name, servname, xprt); |
| 113 | 134 | ||
| 135 | err = rpciod_up(); | ||
| 136 | if (err) | ||
| 137 | goto out_no_rpciod; | ||
| 114 | err = -EINVAL; | 138 | err = -EINVAL; |
| 115 | if (!xprt) | 139 | if (!xprt) |
| 116 | goto out_no_xprt; | 140 | goto out_no_xprt; |
| @@ -121,8 +145,6 @@ static struct rpc_clnt * rpc_new_client(struct rpc_xprt *xprt, char *servname, s | |||
| 121 | clnt = kzalloc(sizeof(*clnt), GFP_KERNEL); | 145 | clnt = kzalloc(sizeof(*clnt), GFP_KERNEL); |
| 122 | if (!clnt) | 146 | if (!clnt) |
| 123 | goto out_err; | 147 | goto out_err; |
| 124 | atomic_set(&clnt->cl_users, 0); | ||
| 125 | atomic_set(&clnt->cl_count, 1); | ||
| 126 | clnt->cl_parent = clnt; | 148 | clnt->cl_parent = clnt; |
| 127 | 149 | ||
| 128 | clnt->cl_server = clnt->cl_inline_name; | 150 | clnt->cl_server = clnt->cl_inline_name; |
| @@ -148,6 +170,8 @@ static struct rpc_clnt * rpc_new_client(struct rpc_xprt *xprt, char *servname, s | |||
| 148 | if (clnt->cl_metrics == NULL) | 170 | if (clnt->cl_metrics == NULL) |
| 149 | goto out_no_stats; | 171 | goto out_no_stats; |
| 150 | clnt->cl_program = program; | 172 | clnt->cl_program = program; |
| 173 | INIT_LIST_HEAD(&clnt->cl_tasks); | ||
| 174 | spin_lock_init(&clnt->cl_lock); | ||
| 151 | 175 | ||
| 152 | if (!xprt_bound(clnt->cl_xprt)) | 176 | if (!xprt_bound(clnt->cl_xprt)) |
| 153 | clnt->cl_autobind = 1; | 177 | clnt->cl_autobind = 1; |
| @@ -155,6 +179,8 @@ static struct rpc_clnt * rpc_new_client(struct rpc_xprt *xprt, char *servname, s | |||
| 155 | clnt->cl_rtt = &clnt->cl_rtt_default; | 179 | clnt->cl_rtt = &clnt->cl_rtt_default; |
| 156 | rpc_init_rtt(&clnt->cl_rtt_default, xprt->timeout.to_initval); | 180 | rpc_init_rtt(&clnt->cl_rtt_default, xprt->timeout.to_initval); |
| 157 | 181 | ||
| 182 | kref_init(&clnt->cl_kref); | ||
| 183 | |||
| 158 | err = rpc_setup_pipedir(clnt, program->pipe_dir_name); | 184 | err = rpc_setup_pipedir(clnt, program->pipe_dir_name); |
| 159 | if (err < 0) | 185 | if (err < 0) |
| 160 | goto out_no_path; | 186 | goto out_no_path; |
| @@ -172,6 +198,7 @@ static struct rpc_clnt * rpc_new_client(struct rpc_xprt *xprt, char *servname, s | |||
| 172 | if (clnt->cl_nodelen > UNX_MAXNODENAME) | 198 | if (clnt->cl_nodelen > UNX_MAXNODENAME) |
| 173 | clnt->cl_nodelen = UNX_MAXNODENAME; | 199 | clnt->cl_nodelen = UNX_MAXNODENAME; |
| 174 | memcpy(clnt->cl_nodename, utsname()->nodename, clnt->cl_nodelen); | 200 | memcpy(clnt->cl_nodename, utsname()->nodename, clnt->cl_nodelen); |
| 201 | rpc_register_client(clnt); | ||
| 175 | return clnt; | 202 | return clnt; |
| 176 | 203 | ||
| 177 | out_no_auth: | 204 | out_no_auth: |
| @@ -188,6 +215,8 @@ out_no_stats: | |||
| 188 | out_err: | 215 | out_err: |
| 189 | xprt_put(xprt); | 216 | xprt_put(xprt); |
| 190 | out_no_xprt: | 217 | out_no_xprt: |
| 218 | rpciod_down(); | ||
| 219 | out_no_rpciod: | ||
| 191 | return ERR_PTR(err); | 220 | return ERR_PTR(err); |
| 192 | } | 221 | } |
| 193 | 222 | ||
| @@ -205,13 +234,32 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args) | |||
| 205 | { | 234 | { |
| 206 | struct rpc_xprt *xprt; | 235 | struct rpc_xprt *xprt; |
| 207 | struct rpc_clnt *clnt; | 236 | struct rpc_clnt *clnt; |
| 237 | struct rpc_xprtsock_create xprtargs = { | ||
| 238 | .proto = args->protocol, | ||
| 239 | .srcaddr = args->saddress, | ||
| 240 | .dstaddr = args->address, | ||
| 241 | .addrlen = args->addrsize, | ||
| 242 | .timeout = args->timeout | ||
| 243 | }; | ||
| 244 | char servername[20]; | ||
| 208 | 245 | ||
| 209 | xprt = xprt_create_transport(args->protocol, args->address, | 246 | xprt = xprt_create_transport(&xprtargs); |
| 210 | args->addrsize, args->timeout); | ||
| 211 | if (IS_ERR(xprt)) | 247 | if (IS_ERR(xprt)) |
| 212 | return (struct rpc_clnt *)xprt; | 248 | return (struct rpc_clnt *)xprt; |
| 213 | 249 | ||
| 214 | /* | 250 | /* |
| 251 | * If the caller chooses not to specify a hostname, whip | ||
| 252 | * up a string representation of the passed-in address. | ||
| 253 | */ | ||
| 254 | if (args->servername == NULL) { | ||
| 255 | struct sockaddr_in *addr = | ||
| 256 | (struct sockaddr_in *) &args->address; | ||
| 257 | snprintf(servername, sizeof(servername), NIPQUAD_FMT, | ||
| 258 | NIPQUAD(addr->sin_addr.s_addr)); | ||
| 259 | args->servername = servername; | ||
| 260 | } | ||
| 261 | |||
| 262 | /* | ||
| 215 | * By default, kernel RPC client connects from a reserved port. | 263 | * By default, kernel RPC client connects from a reserved port. |
| 216 | * CAP_NET_BIND_SERVICE will not be set for unprivileged requesters, | 264 | * CAP_NET_BIND_SERVICE will not be set for unprivileged requesters, |
| 217 | * but it is always enabled for rpciod, which handles the connect | 265 | * but it is always enabled for rpciod, which handles the connect |
| @@ -245,8 +293,6 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args) | |||
| 245 | clnt->cl_intr = 1; | 293 | clnt->cl_intr = 1; |
| 246 | if (args->flags & RPC_CLNT_CREATE_AUTOBIND) | 294 | if (args->flags & RPC_CLNT_CREATE_AUTOBIND) |
| 247 | clnt->cl_autobind = 1; | 295 | clnt->cl_autobind = 1; |
| 248 | if (args->flags & RPC_CLNT_CREATE_ONESHOT) | ||
| 249 | clnt->cl_oneshot = 1; | ||
| 250 | if (args->flags & RPC_CLNT_CREATE_DISCRTRY) | 296 | if (args->flags & RPC_CLNT_CREATE_DISCRTRY) |
| 251 | clnt->cl_discrtry = 1; | 297 | clnt->cl_discrtry = 1; |
| 252 | 298 | ||
| @@ -268,24 +314,25 @@ rpc_clone_client(struct rpc_clnt *clnt) | |||
| 268 | new = kmemdup(clnt, sizeof(*new), GFP_KERNEL); | 314 | new = kmemdup(clnt, sizeof(*new), GFP_KERNEL); |
| 269 | if (!new) | 315 | if (!new) |
| 270 | goto out_no_clnt; | 316 | goto out_no_clnt; |
| 271 | atomic_set(&new->cl_count, 1); | 317 | new->cl_parent = clnt; |
| 272 | atomic_set(&new->cl_users, 0); | 318 | /* Turn off autobind on clones */ |
| 319 | new->cl_autobind = 0; | ||
| 320 | INIT_LIST_HEAD(&new->cl_tasks); | ||
| 321 | spin_lock_init(&new->cl_lock); | ||
| 322 | rpc_init_rtt(&new->cl_rtt_default, clnt->cl_xprt->timeout.to_initval); | ||
| 273 | new->cl_metrics = rpc_alloc_iostats(clnt); | 323 | new->cl_metrics = rpc_alloc_iostats(clnt); |
| 274 | if (new->cl_metrics == NULL) | 324 | if (new->cl_metrics == NULL) |
| 275 | goto out_no_stats; | 325 | goto out_no_stats; |
| 326 | kref_init(&new->cl_kref); | ||
| 276 | err = rpc_setup_pipedir(new, clnt->cl_program->pipe_dir_name); | 327 | err = rpc_setup_pipedir(new, clnt->cl_program->pipe_dir_name); |
| 277 | if (err != 0) | 328 | if (err != 0) |
| 278 | goto out_no_path; | 329 | goto out_no_path; |
| 279 | new->cl_parent = clnt; | ||
| 280 | atomic_inc(&clnt->cl_count); | ||
| 281 | new->cl_xprt = xprt_get(clnt->cl_xprt); | ||
| 282 | /* Turn off autobind on clones */ | ||
| 283 | new->cl_autobind = 0; | ||
| 284 | new->cl_oneshot = 0; | ||
| 285 | new->cl_dead = 0; | ||
| 286 | rpc_init_rtt(&new->cl_rtt_default, clnt->cl_xprt->timeout.to_initval); | ||
| 287 | if (new->cl_auth) | 330 | if (new->cl_auth) |
| 288 | atomic_inc(&new->cl_auth->au_count); | 331 | atomic_inc(&new->cl_auth->au_count); |
| 332 | xprt_get(clnt->cl_xprt); | ||
| 333 | kref_get(&clnt->cl_kref); | ||
| 334 | rpc_register_client(new); | ||
| 335 | rpciod_up(); | ||
| 289 | return new; | 336 | return new; |
| 290 | out_no_path: | 337 | out_no_path: |
| 291 | rpc_free_iostats(new->cl_metrics); | 338 | rpc_free_iostats(new->cl_metrics); |
| @@ -298,86 +345,86 @@ out_no_clnt: | |||
| 298 | 345 | ||
| 299 | /* | 346 | /* |
| 300 | * Properly shut down an RPC client, terminating all outstanding | 347 | * Properly shut down an RPC client, terminating all outstanding |
| 301 | * requests. Note that we must be certain that cl_oneshot and | 348 | * requests. |
| 302 | * cl_dead are cleared, or else the client would be destroyed | ||
| 303 | * when the last task releases it. | ||
| 304 | */ | 349 | */ |
| 305 | int | 350 | void rpc_shutdown_client(struct rpc_clnt *clnt) |
| 306 | rpc_shutdown_client(struct rpc_clnt *clnt) | ||
| 307 | { | 351 | { |
| 308 | dprintk("RPC: shutting down %s client for %s, tasks=%d\n", | 352 | dprintk("RPC: shutting down %s client for %s\n", |
| 309 | clnt->cl_protname, clnt->cl_server, | 353 | clnt->cl_protname, clnt->cl_server); |
| 310 | atomic_read(&clnt->cl_users)); | 354 | |
| 311 | 355 | while (!list_empty(&clnt->cl_tasks)) { | |
| 312 | while (atomic_read(&clnt->cl_users) > 0) { | ||
| 313 | /* Don't let rpc_release_client destroy us */ | ||
| 314 | clnt->cl_oneshot = 0; | ||
| 315 | clnt->cl_dead = 0; | ||
| 316 | rpc_killall_tasks(clnt); | 356 | rpc_killall_tasks(clnt); |
| 317 | wait_event_timeout(destroy_wait, | 357 | wait_event_timeout(destroy_wait, |
| 318 | !atomic_read(&clnt->cl_users), 1*HZ); | 358 | list_empty(&clnt->cl_tasks), 1*HZ); |
| 319 | } | ||
| 320 | |||
| 321 | if (atomic_read(&clnt->cl_users) < 0) { | ||
| 322 | printk(KERN_ERR "RPC: rpc_shutdown_client clnt %p tasks=%d\n", | ||
| 323 | clnt, atomic_read(&clnt->cl_users)); | ||
| 324 | #ifdef RPC_DEBUG | ||
| 325 | rpc_show_tasks(); | ||
| 326 | #endif | ||
| 327 | BUG(); | ||
| 328 | } | 359 | } |
| 329 | 360 | ||
| 330 | return rpc_destroy_client(clnt); | 361 | rpc_release_client(clnt); |
| 331 | } | 362 | } |
| 332 | 363 | ||
| 333 | /* | 364 | /* |
| 334 | * Delete an RPC client | 365 | * Free an RPC client |
| 335 | */ | 366 | */ |
| 336 | int | 367 | static void |
| 337 | rpc_destroy_client(struct rpc_clnt *clnt) | 368 | rpc_free_client(struct kref *kref) |
| 338 | { | 369 | { |
| 339 | if (!atomic_dec_and_test(&clnt->cl_count)) | 370 | struct rpc_clnt *clnt = container_of(kref, struct rpc_clnt, cl_kref); |
| 340 | return 1; | ||
| 341 | BUG_ON(atomic_read(&clnt->cl_users) != 0); | ||
| 342 | 371 | ||
| 343 | dprintk("RPC: destroying %s client for %s\n", | 372 | dprintk("RPC: destroying %s client for %s\n", |
| 344 | clnt->cl_protname, clnt->cl_server); | 373 | clnt->cl_protname, clnt->cl_server); |
| 345 | if (clnt->cl_auth) { | ||
| 346 | rpcauth_destroy(clnt->cl_auth); | ||
| 347 | clnt->cl_auth = NULL; | ||
| 348 | } | ||
| 349 | if (!IS_ERR(clnt->cl_dentry)) { | 374 | if (!IS_ERR(clnt->cl_dentry)) { |
| 350 | rpc_rmdir(clnt->cl_dentry); | 375 | rpc_rmdir(clnt->cl_dentry); |
| 351 | rpc_put_mount(); | 376 | rpc_put_mount(); |
| 352 | } | 377 | } |
| 353 | if (clnt->cl_parent != clnt) { | 378 | if (clnt->cl_parent != clnt) { |
| 354 | rpc_destroy_client(clnt->cl_parent); | 379 | rpc_release_client(clnt->cl_parent); |
| 355 | goto out_free; | 380 | goto out_free; |
| 356 | } | 381 | } |
| 357 | if (clnt->cl_server != clnt->cl_inline_name) | 382 | if (clnt->cl_server != clnt->cl_inline_name) |
| 358 | kfree(clnt->cl_server); | 383 | kfree(clnt->cl_server); |
| 359 | out_free: | 384 | out_free: |
| 385 | rpc_unregister_client(clnt); | ||
| 360 | rpc_free_iostats(clnt->cl_metrics); | 386 | rpc_free_iostats(clnt->cl_metrics); |
| 361 | clnt->cl_metrics = NULL; | 387 | clnt->cl_metrics = NULL; |
| 362 | xprt_put(clnt->cl_xprt); | 388 | xprt_put(clnt->cl_xprt); |
| 389 | rpciod_down(); | ||
| 363 | kfree(clnt); | 390 | kfree(clnt); |
| 364 | return 0; | ||
| 365 | } | 391 | } |
| 366 | 392 | ||
| 367 | /* | 393 | /* |
| 368 | * Release an RPC client | 394 | * Free an RPC client |
| 395 | */ | ||
| 396 | static void | ||
| 397 | rpc_free_auth(struct kref *kref) | ||
| 398 | { | ||
| 399 | struct rpc_clnt *clnt = container_of(kref, struct rpc_clnt, cl_kref); | ||
| 400 | |||
| 401 | if (clnt->cl_auth == NULL) { | ||
| 402 | rpc_free_client(kref); | ||
| 403 | return; | ||
| 404 | } | ||
| 405 | |||
| 406 | /* | ||
| 407 | * Note: RPCSEC_GSS may need to send NULL RPC calls in order to | ||
| 408 | * release remaining GSS contexts. This mechanism ensures | ||
| 409 | * that it can do so safely. | ||
| 410 | */ | ||
| 411 | kref_init(kref); | ||
| 412 | rpcauth_release(clnt->cl_auth); | ||
| 413 | clnt->cl_auth = NULL; | ||
| 414 | kref_put(kref, rpc_free_client); | ||
| 415 | } | ||
| 416 | |||
| 417 | /* | ||
| 418 | * Release reference to the RPC client | ||
| 369 | */ | 419 | */ |
| 370 | void | 420 | void |
| 371 | rpc_release_client(struct rpc_clnt *clnt) | 421 | rpc_release_client(struct rpc_clnt *clnt) |
| 372 | { | 422 | { |
| 373 | dprintk("RPC: rpc_release_client(%p, %d)\n", | 423 | dprintk("RPC: rpc_release_client(%p)\n", clnt); |
| 374 | clnt, atomic_read(&clnt->cl_users)); | ||
| 375 | 424 | ||
| 376 | if (!atomic_dec_and_test(&clnt->cl_users)) | 425 | if (list_empty(&clnt->cl_tasks)) |
| 377 | return; | 426 | wake_up(&destroy_wait); |
| 378 | wake_up(&destroy_wait); | 427 | kref_put(&clnt->cl_kref, rpc_free_auth); |
| 379 | if (clnt->cl_oneshot || clnt->cl_dead) | ||
| 380 | rpc_destroy_client(clnt); | ||
| 381 | } | 428 | } |
| 382 | 429 | ||
| 383 | /** | 430 | /** |
| @@ -468,82 +515,96 @@ void rpc_clnt_sigunmask(struct rpc_clnt *clnt, sigset_t *oldset) | |||
| 468 | rpc_restore_sigmask(oldset); | 515 | rpc_restore_sigmask(oldset); |
| 469 | } | 516 | } |
| 470 | 517 | ||
| 471 | /* | 518 | static |
| 472 | * New rpc_call implementation | 519 | struct rpc_task *rpc_do_run_task(struct rpc_clnt *clnt, |
| 520 | struct rpc_message *msg, | ||
| 521 | int flags, | ||
| 522 | const struct rpc_call_ops *ops, | ||
| 523 | void *data) | ||
| 524 | { | ||
| 525 | struct rpc_task *task, *ret; | ||
| 526 | sigset_t oldset; | ||
| 527 | |||
| 528 | task = rpc_new_task(clnt, flags, ops, data); | ||
| 529 | if (task == NULL) { | ||
| 530 | rpc_release_calldata(ops, data); | ||
| 531 | return ERR_PTR(-ENOMEM); | ||
| 532 | } | ||
| 533 | |||
| 534 | /* Mask signals on synchronous RPC calls and RPCSEC_GSS upcalls */ | ||
| 535 | rpc_task_sigmask(task, &oldset); | ||
| 536 | if (msg != NULL) { | ||
| 537 | rpc_call_setup(task, msg, 0); | ||
| 538 | if (task->tk_status != 0) { | ||
| 539 | ret = ERR_PTR(task->tk_status); | ||
| 540 | rpc_put_task(task); | ||
| 541 | goto out; | ||
| 542 | } | ||
| 543 | } | ||
| 544 | atomic_inc(&task->tk_count); | ||
| 545 | rpc_execute(task); | ||
| 546 | ret = task; | ||
| 547 | out: | ||
| 548 | rpc_restore_sigmask(&oldset); | ||
| 549 | return ret; | ||
| 550 | } | ||
| 551 | |||
| 552 | /** | ||
| 553 | * rpc_call_sync - Perform a synchronous RPC call | ||
| 554 | * @clnt: pointer to RPC client | ||
| 555 | * @msg: RPC call parameters | ||
| 556 | * @flags: RPC call flags | ||
| 473 | */ | 557 | */ |
| 474 | int rpc_call_sync(struct rpc_clnt *clnt, struct rpc_message *msg, int flags) | 558 | int rpc_call_sync(struct rpc_clnt *clnt, struct rpc_message *msg, int flags) |
| 475 | { | 559 | { |
| 476 | struct rpc_task *task; | 560 | struct rpc_task *task; |
| 477 | sigset_t oldset; | 561 | int status; |
| 478 | int status; | ||
| 479 | |||
| 480 | /* If this client is slain all further I/O fails */ | ||
| 481 | if (clnt->cl_dead) | ||
| 482 | return -EIO; | ||
| 483 | 562 | ||
| 484 | BUG_ON(flags & RPC_TASK_ASYNC); | 563 | BUG_ON(flags & RPC_TASK_ASYNC); |
| 485 | 564 | ||
| 486 | task = rpc_new_task(clnt, flags, &rpc_default_ops, NULL); | 565 | task = rpc_do_run_task(clnt, msg, flags, &rpc_default_ops, NULL); |
| 487 | if (task == NULL) | 566 | if (IS_ERR(task)) |
| 488 | return -ENOMEM; | 567 | return PTR_ERR(task); |
| 489 | |||
| 490 | /* Mask signals on RPC calls _and_ GSS_AUTH upcalls */ | ||
| 491 | rpc_task_sigmask(task, &oldset); | ||
| 492 | |||
| 493 | /* Set up the call info struct and execute the task */ | ||
| 494 | rpc_call_setup(task, msg, 0); | ||
| 495 | if (task->tk_status == 0) { | ||
| 496 | atomic_inc(&task->tk_count); | ||
| 497 | rpc_execute(task); | ||
| 498 | } | ||
| 499 | status = task->tk_status; | 568 | status = task->tk_status; |
| 500 | rpc_put_task(task); | 569 | rpc_put_task(task); |
| 501 | rpc_restore_sigmask(&oldset); | ||
| 502 | return status; | 570 | return status; |
| 503 | } | 571 | } |
| 504 | 572 | ||
| 505 | /* | 573 | /** |
| 506 | * New rpc_call implementation | 574 | * rpc_call_async - Perform an asynchronous RPC call |
| 575 | * @clnt: pointer to RPC client | ||
| 576 | * @msg: RPC call parameters | ||
| 577 | * @flags: RPC call flags | ||
| 578 | * @ops: RPC call ops | ||
| 579 | * @data: user call data | ||
| 507 | */ | 580 | */ |
| 508 | int | 581 | int |
| 509 | rpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg, int flags, | 582 | rpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg, int flags, |
| 510 | const struct rpc_call_ops *tk_ops, void *data) | 583 | const struct rpc_call_ops *tk_ops, void *data) |
| 511 | { | 584 | { |
| 512 | struct rpc_task *task; | 585 | struct rpc_task *task; |
| 513 | sigset_t oldset; | ||
| 514 | int status; | ||
| 515 | |||
| 516 | /* If this client is slain all further I/O fails */ | ||
| 517 | status = -EIO; | ||
| 518 | if (clnt->cl_dead) | ||
| 519 | goto out_release; | ||
| 520 | |||
| 521 | flags |= RPC_TASK_ASYNC; | ||
| 522 | |||
| 523 | /* Create/initialize a new RPC task */ | ||
| 524 | status = -ENOMEM; | ||
| 525 | if (!(task = rpc_new_task(clnt, flags, tk_ops, data))) | ||
| 526 | goto out_release; | ||
| 527 | |||
| 528 | /* Mask signals on GSS_AUTH upcalls */ | ||
| 529 | rpc_task_sigmask(task, &oldset); | ||
| 530 | 586 | ||
| 531 | rpc_call_setup(task, msg, 0); | 587 | task = rpc_do_run_task(clnt, msg, flags|RPC_TASK_ASYNC, tk_ops, data); |
| 532 | 588 | if (IS_ERR(task)) | |
| 533 | /* Set up the call info struct and execute the task */ | 589 | return PTR_ERR(task); |
| 534 | status = task->tk_status; | 590 | rpc_put_task(task); |
| 535 | if (status == 0) | 591 | return 0; |
| 536 | rpc_execute(task); | ||
| 537 | else | ||
| 538 | rpc_put_task(task); | ||
| 539 | |||
| 540 | rpc_restore_sigmask(&oldset); | ||
| 541 | return status; | ||
| 542 | out_release: | ||
| 543 | rpc_release_calldata(tk_ops, data); | ||
| 544 | return status; | ||
| 545 | } | 592 | } |
| 546 | 593 | ||
| 594 | /** | ||
| 595 | * rpc_run_task - Allocate a new RPC task, then run rpc_execute against it | ||
| 596 | * @clnt: pointer to RPC client | ||
| 597 | * @flags: RPC flags | ||
| 598 | * @ops: RPC call ops | ||
| 599 | * @data: user call data | ||
| 600 | */ | ||
| 601 | struct rpc_task *rpc_run_task(struct rpc_clnt *clnt, int flags, | ||
| 602 | const struct rpc_call_ops *tk_ops, | ||
| 603 | void *data) | ||
| 604 | { | ||
| 605 | return rpc_do_run_task(clnt, NULL, flags, tk_ops, data); | ||
| 606 | } | ||
| 607 | EXPORT_SYMBOL(rpc_run_task); | ||
| 547 | 608 | ||
| 548 | void | 609 | void |
| 549 | rpc_call_setup(struct rpc_task *task, struct rpc_message *msg, int flags) | 610 | rpc_call_setup(struct rpc_task *task, struct rpc_message *msg, int flags) |
| @@ -745,7 +806,7 @@ call_reserveresult(struct rpc_task *task) | |||
| 745 | static void | 806 | static void |
| 746 | call_allocate(struct rpc_task *task) | 807 | call_allocate(struct rpc_task *task) |
| 747 | { | 808 | { |
| 748 | unsigned int slack = task->tk_auth->au_cslack; | 809 | unsigned int slack = task->tk_msg.rpc_cred->cr_auth->au_cslack; |
| 749 | struct rpc_rqst *req = task->tk_rqstp; | 810 | struct rpc_rqst *req = task->tk_rqstp; |
| 750 | struct rpc_xprt *xprt = task->tk_xprt; | 811 | struct rpc_xprt *xprt = task->tk_xprt; |
| 751 | struct rpc_procinfo *proc = task->tk_msg.rpc_proc; | 812 | struct rpc_procinfo *proc = task->tk_msg.rpc_proc; |
| @@ -843,10 +904,8 @@ call_encode(struct rpc_task *task) | |||
| 843 | if (encode == NULL) | 904 | if (encode == NULL) |
| 844 | return; | 905 | return; |
| 845 | 906 | ||
| 846 | lock_kernel(); | ||
| 847 | task->tk_status = rpcauth_wrap_req(task, encode, req, p, | 907 | task->tk_status = rpcauth_wrap_req(task, encode, req, p, |
| 848 | task->tk_msg.rpc_argp); | 908 | task->tk_msg.rpc_argp); |
| 849 | unlock_kernel(); | ||
| 850 | if (task->tk_status == -ENOMEM) { | 909 | if (task->tk_status == -ENOMEM) { |
| 851 | /* XXX: Is this sane? */ | 910 | /* XXX: Is this sane? */ |
| 852 | rpc_delay(task, 3*HZ); | 911 | rpc_delay(task, 3*HZ); |
| @@ -1177,10 +1236,8 @@ call_decode(struct rpc_task *task) | |||
| 1177 | task->tk_action = rpc_exit_task; | 1236 | task->tk_action = rpc_exit_task; |
| 1178 | 1237 | ||
| 1179 | if (decode) { | 1238 | if (decode) { |
| 1180 | lock_kernel(); | ||
| 1181 | task->tk_status = rpcauth_unwrap_resp(task, decode, req, p, | 1239 | task->tk_status = rpcauth_unwrap_resp(task, decode, req, p, |
| 1182 | task->tk_msg.rpc_resp); | 1240 | task->tk_msg.rpc_resp); |
| 1183 | unlock_kernel(); | ||
| 1184 | } | 1241 | } |
| 1185 | dprintk("RPC: %5u call_decode result %d\n", task->tk_pid, | 1242 | dprintk("RPC: %5u call_decode result %d\n", task->tk_pid, |
| 1186 | task->tk_status); | 1243 | task->tk_status); |
| @@ -1273,9 +1330,9 @@ call_verify(struct rpc_task *task) | |||
| 1273 | * - if it isn't pointer subtraction in the NFS client may give | 1330 | * - if it isn't pointer subtraction in the NFS client may give |
| 1274 | * undefined results | 1331 | * undefined results |
| 1275 | */ | 1332 | */ |
| 1276 | printk(KERN_WARNING | 1333 | dprintk("RPC: %5u %s: XDR representation not a multiple of" |
| 1277 | "call_verify: XDR representation not a multiple of" | 1334 | " 4 bytes: 0x%x\n", task->tk_pid, __FUNCTION__, |
| 1278 | " 4 bytes: 0x%x\n", task->tk_rqstp->rq_rcv_buf.len); | 1335 | task->tk_rqstp->rq_rcv_buf.len); |
| 1279 | goto out_eio; | 1336 | goto out_eio; |
| 1280 | } | 1337 | } |
| 1281 | if ((len -= 3) < 0) | 1338 | if ((len -= 3) < 0) |
| @@ -1283,7 +1340,8 @@ call_verify(struct rpc_task *task) | |||
| 1283 | p += 1; /* skip XID */ | 1340 | p += 1; /* skip XID */ |
| 1284 | 1341 | ||
| 1285 | if ((n = ntohl(*p++)) != RPC_REPLY) { | 1342 | if ((n = ntohl(*p++)) != RPC_REPLY) { |
| 1286 | printk(KERN_WARNING "call_verify: not an RPC reply: %x\n", n); | 1343 | dprintk("RPC: %5u %s: not an RPC reply: %x\n", |
| 1344 | task->tk_pid, __FUNCTION__, n); | ||
| 1287 | goto out_garbage; | 1345 | goto out_garbage; |
| 1288 | } | 1346 | } |
| 1289 | if ((n = ntohl(*p++)) != RPC_MSG_ACCEPTED) { | 1347 | if ((n = ntohl(*p++)) != RPC_MSG_ACCEPTED) { |
| @@ -1334,7 +1392,8 @@ call_verify(struct rpc_task *task) | |||
| 1334 | "authentication.\n", task->tk_client->cl_server); | 1392 | "authentication.\n", task->tk_client->cl_server); |
| 1335 | break; | 1393 | break; |
| 1336 | default: | 1394 | default: |
| 1337 | printk(KERN_WARNING "call_verify: unknown auth error: %x\n", n); | 1395 | dprintk("RPC: %5u %s: unknown auth error: %x\n", |
| 1396 | task->tk_pid, __FUNCTION__, n); | ||
| 1338 | error = -EIO; | 1397 | error = -EIO; |
| 1339 | } | 1398 | } |
| 1340 | dprintk("RPC: %5u %s: call rejected %d\n", | 1399 | dprintk("RPC: %5u %s: call rejected %d\n", |
| @@ -1342,7 +1401,8 @@ call_verify(struct rpc_task *task) | |||
| 1342 | goto out_err; | 1401 | goto out_err; |
| 1343 | } | 1402 | } |
| 1344 | if (!(p = rpcauth_checkverf(task, p))) { | 1403 | if (!(p = rpcauth_checkverf(task, p))) { |
| 1345 | printk(KERN_WARNING "call_verify: auth check failed\n"); | 1404 | dprintk("RPC: %5u %s: auth check failed\n", |
| 1405 | task->tk_pid, __FUNCTION__); | ||
| 1346 | goto out_garbage; /* bad verifier, retry */ | 1406 | goto out_garbage; /* bad verifier, retry */ |
| 1347 | } | 1407 | } |
| 1348 | len = p - (__be32 *)iov->iov_base - 1; | 1408 | len = p - (__be32 *)iov->iov_base - 1; |
| @@ -1381,7 +1441,8 @@ call_verify(struct rpc_task *task) | |||
| 1381 | task->tk_pid, __FUNCTION__); | 1441 | task->tk_pid, __FUNCTION__); |
| 1382 | break; /* retry */ | 1442 | break; /* retry */ |
| 1383 | default: | 1443 | default: |
| 1384 | printk(KERN_WARNING "call_verify: server accept status: %x\n", n); | 1444 | dprintk("RPC: %5u %s: server accept status: %x\n", |
| 1445 | task->tk_pid, __FUNCTION__, n); | ||
| 1385 | /* Also retry */ | 1446 | /* Also retry */ |
| 1386 | } | 1447 | } |
| 1387 | 1448 | ||
| @@ -1395,14 +1456,16 @@ out_garbage: | |||
| 1395 | out_retry: | 1456 | out_retry: |
| 1396 | return ERR_PTR(-EAGAIN); | 1457 | return ERR_PTR(-EAGAIN); |
| 1397 | } | 1458 | } |
| 1398 | printk(KERN_WARNING "RPC %s: retry failed, exit EIO\n", __FUNCTION__); | ||
| 1399 | out_eio: | 1459 | out_eio: |
| 1400 | error = -EIO; | 1460 | error = -EIO; |
| 1401 | out_err: | 1461 | out_err: |
| 1402 | rpc_exit(task, error); | 1462 | rpc_exit(task, error); |
| 1463 | dprintk("RPC: %5u %s: call failed with error %d\n", task->tk_pid, | ||
| 1464 | __FUNCTION__, error); | ||
| 1403 | return ERR_PTR(error); | 1465 | return ERR_PTR(error); |
| 1404 | out_overflow: | 1466 | out_overflow: |
| 1405 | printk(KERN_WARNING "RPC %s: server reply was truncated.\n", __FUNCTION__); | 1467 | dprintk("RPC: %5u %s: server reply was truncated.\n", task->tk_pid, |
| 1468 | __FUNCTION__); | ||
| 1406 | goto out_garbage; | 1469 | goto out_garbage; |
| 1407 | } | 1470 | } |
| 1408 | 1471 | ||
| @@ -1421,7 +1484,7 @@ static struct rpc_procinfo rpcproc_null = { | |||
| 1421 | .p_decode = rpcproc_decode_null, | 1484 | .p_decode = rpcproc_decode_null, |
| 1422 | }; | 1485 | }; |
| 1423 | 1486 | ||
| 1424 | int rpc_ping(struct rpc_clnt *clnt, int flags) | 1487 | static int rpc_ping(struct rpc_clnt *clnt, int flags) |
| 1425 | { | 1488 | { |
| 1426 | struct rpc_message msg = { | 1489 | struct rpc_message msg = { |
| 1427 | .rpc_proc = &rpcproc_null, | 1490 | .rpc_proc = &rpcproc_null, |
| @@ -1432,3 +1495,51 @@ int rpc_ping(struct rpc_clnt *clnt, int flags) | |||
| 1432 | put_rpccred(msg.rpc_cred); | 1495 | put_rpccred(msg.rpc_cred); |
| 1433 | return err; | 1496 | return err; |
| 1434 | } | 1497 | } |
| 1498 | |||
| 1499 | struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred, int flags) | ||
| 1500 | { | ||
| 1501 | struct rpc_message msg = { | ||
| 1502 | .rpc_proc = &rpcproc_null, | ||
| 1503 | .rpc_cred = cred, | ||
| 1504 | }; | ||
| 1505 | return rpc_do_run_task(clnt, &msg, flags, &rpc_default_ops, NULL); | ||
| 1506 | } | ||
| 1507 | EXPORT_SYMBOL(rpc_call_null); | ||
| 1508 | |||
| 1509 | #ifdef RPC_DEBUG | ||
| 1510 | void rpc_show_tasks(void) | ||
| 1511 | { | ||
| 1512 | struct rpc_clnt *clnt; | ||
| 1513 | struct rpc_task *t; | ||
| 1514 | |||
| 1515 | spin_lock(&rpc_client_lock); | ||
| 1516 | if (list_empty(&all_clients)) | ||
| 1517 | goto out; | ||
| 1518 | printk("-pid- proc flgs status -client- -prog- --rqstp- -timeout " | ||
| 1519 | "-rpcwait -action- ---ops--\n"); | ||
| 1520 | list_for_each_entry(clnt, &all_clients, cl_clients) { | ||
| 1521 | if (list_empty(&clnt->cl_tasks)) | ||
| 1522 | continue; | ||
| 1523 | spin_lock(&clnt->cl_lock); | ||
| 1524 | list_for_each_entry(t, &clnt->cl_tasks, tk_task) { | ||
| 1525 | const char *rpc_waitq = "none"; | ||
| 1526 | |||
| 1527 | if (RPC_IS_QUEUED(t)) | ||
| 1528 | rpc_waitq = rpc_qname(t->u.tk_wait.rpc_waitq); | ||
| 1529 | |||
| 1530 | printk("%5u %04d %04x %6d %8p %6d %8p %8ld %8s %8p %8p\n", | ||
| 1531 | t->tk_pid, | ||
| 1532 | (t->tk_msg.rpc_proc ? t->tk_msg.rpc_proc->p_proc : -1), | ||
| 1533 | t->tk_flags, t->tk_status, | ||
| 1534 | t->tk_client, | ||
| 1535 | (t->tk_client ? t->tk_client->cl_prog : 0), | ||
| 1536 | t->tk_rqstp, t->tk_timeout, | ||
| 1537 | rpc_waitq, | ||
| 1538 | t->tk_action, t->tk_ops); | ||
| 1539 | } | ||
| 1540 | spin_unlock(&clnt->cl_lock); | ||
| 1541 | } | ||
| 1542 | out: | ||
| 1543 | spin_unlock(&rpc_client_lock); | ||
| 1544 | } | ||
| 1545 | #endif | ||
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 5887457dc936..e787b6a43eee 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c | |||
| @@ -344,7 +344,7 @@ rpc_info_open(struct inode *inode, struct file *file) | |||
| 344 | mutex_lock(&inode->i_mutex); | 344 | mutex_lock(&inode->i_mutex); |
| 345 | clnt = RPC_I(inode)->private; | 345 | clnt = RPC_I(inode)->private; |
| 346 | if (clnt) { | 346 | if (clnt) { |
| 347 | atomic_inc(&clnt->cl_users); | 347 | kref_get(&clnt->cl_kref); |
| 348 | m->private = clnt; | 348 | m->private = clnt; |
| 349 | } else { | 349 | } else { |
| 350 | single_release(inode, file); | 350 | single_release(inode, file); |
| @@ -448,6 +448,15 @@ void rpc_put_mount(void) | |||
| 448 | simple_release_fs(&rpc_mount, &rpc_mount_count); | 448 | simple_release_fs(&rpc_mount, &rpc_mount_count); |
| 449 | } | 449 | } |
| 450 | 450 | ||
| 451 | static int rpc_delete_dentry(struct dentry *dentry) | ||
| 452 | { | ||
| 453 | return 1; | ||
| 454 | } | ||
| 455 | |||
| 456 | static struct dentry_operations rpc_dentry_operations = { | ||
| 457 | .d_delete = rpc_delete_dentry, | ||
| 458 | }; | ||
| 459 | |||
| 451 | static int | 460 | static int |
| 452 | rpc_lookup_parent(char *path, struct nameidata *nd) | 461 | rpc_lookup_parent(char *path, struct nameidata *nd) |
| 453 | { | 462 | { |
| @@ -506,7 +515,7 @@ rpc_get_inode(struct super_block *sb, int mode) | |||
| 506 | * FIXME: This probably has races. | 515 | * FIXME: This probably has races. |
| 507 | */ | 516 | */ |
| 508 | static void | 517 | static void |
| 509 | rpc_depopulate(struct dentry *parent) | 518 | rpc_depopulate(struct dentry *parent, int start, int eof) |
| 510 | { | 519 | { |
| 511 | struct inode *dir = parent->d_inode; | 520 | struct inode *dir = parent->d_inode; |
| 512 | struct list_head *pos, *next; | 521 | struct list_head *pos, *next; |
| @@ -518,6 +527,10 @@ repeat: | |||
| 518 | spin_lock(&dcache_lock); | 527 | spin_lock(&dcache_lock); |
| 519 | list_for_each_safe(pos, next, &parent->d_subdirs) { | 528 | list_for_each_safe(pos, next, &parent->d_subdirs) { |
| 520 | dentry = list_entry(pos, struct dentry, d_u.d_child); | 529 | dentry = list_entry(pos, struct dentry, d_u.d_child); |
| 530 | if (!dentry->d_inode || | ||
| 531 | dentry->d_inode->i_ino < start || | ||
| 532 | dentry->d_inode->i_ino >= eof) | ||
| 533 | continue; | ||
| 521 | spin_lock(&dentry->d_lock); | 534 | spin_lock(&dentry->d_lock); |
| 522 | if (!d_unhashed(dentry)) { | 535 | if (!d_unhashed(dentry)) { |
| 523 | dget_locked(dentry); | 536 | dget_locked(dentry); |
| @@ -533,11 +546,11 @@ repeat: | |||
| 533 | if (n) { | 546 | if (n) { |
| 534 | do { | 547 | do { |
| 535 | dentry = dvec[--n]; | 548 | dentry = dvec[--n]; |
| 536 | if (dentry->d_inode) { | 549 | if (S_ISREG(dentry->d_inode->i_mode)) |
| 537 | rpc_close_pipes(dentry->d_inode); | ||
| 538 | simple_unlink(dir, dentry); | 550 | simple_unlink(dir, dentry); |
| 539 | } | 551 | else if (S_ISDIR(dentry->d_inode->i_mode)) |
| 540 | inode_dir_notify(dir, DN_DELETE); | 552 | simple_rmdir(dir, dentry); |
| 553 | d_delete(dentry); | ||
| 541 | dput(dentry); | 554 | dput(dentry); |
| 542 | } while (n); | 555 | } while (n); |
| 543 | goto repeat; | 556 | goto repeat; |
| @@ -560,6 +573,7 @@ rpc_populate(struct dentry *parent, | |||
| 560 | dentry = d_alloc_name(parent, files[i].name); | 573 | dentry = d_alloc_name(parent, files[i].name); |
| 561 | if (!dentry) | 574 | if (!dentry) |
| 562 | goto out_bad; | 575 | goto out_bad; |
| 576 | dentry->d_op = &rpc_dentry_operations; | ||
| 563 | mode = files[i].mode; | 577 | mode = files[i].mode; |
| 564 | inode = rpc_get_inode(dir->i_sb, mode); | 578 | inode = rpc_get_inode(dir->i_sb, mode); |
| 565 | if (!inode) { | 579 | if (!inode) { |
| @@ -607,21 +621,14 @@ static int | |||
| 607 | __rpc_rmdir(struct inode *dir, struct dentry *dentry) | 621 | __rpc_rmdir(struct inode *dir, struct dentry *dentry) |
| 608 | { | 622 | { |
| 609 | int error; | 623 | int error; |
| 610 | 624 | error = simple_rmdir(dir, dentry); | |
| 611 | shrink_dcache_parent(dentry); | 625 | if (!error) |
| 612 | if (d_unhashed(dentry)) | 626 | d_delete(dentry); |
| 613 | return 0; | 627 | return error; |
| 614 | if ((error = simple_rmdir(dir, dentry)) != 0) | ||
| 615 | return error; | ||
| 616 | if (!error) { | ||
| 617 | inode_dir_notify(dir, DN_DELETE); | ||
| 618 | d_drop(dentry); | ||
| 619 | } | ||
| 620 | return 0; | ||
| 621 | } | 628 | } |
| 622 | 629 | ||
| 623 | static struct dentry * | 630 | static struct dentry * |
| 624 | rpc_lookup_create(struct dentry *parent, const char *name, int len) | 631 | rpc_lookup_create(struct dentry *parent, const char *name, int len, int exclusive) |
| 625 | { | 632 | { |
| 626 | struct inode *dir = parent->d_inode; | 633 | struct inode *dir = parent->d_inode; |
| 627 | struct dentry *dentry; | 634 | struct dentry *dentry; |
| @@ -630,7 +637,9 @@ rpc_lookup_create(struct dentry *parent, const char *name, int len) | |||
| 630 | dentry = lookup_one_len(name, parent, len); | 637 | dentry = lookup_one_len(name, parent, len); |
| 631 | if (IS_ERR(dentry)) | 638 | if (IS_ERR(dentry)) |
| 632 | goto out_err; | 639 | goto out_err; |
| 633 | if (dentry->d_inode) { | 640 | if (!dentry->d_inode) |
| 641 | dentry->d_op = &rpc_dentry_operations; | ||
| 642 | else if (exclusive) { | ||
| 634 | dput(dentry); | 643 | dput(dentry); |
| 635 | dentry = ERR_PTR(-EEXIST); | 644 | dentry = ERR_PTR(-EEXIST); |
| 636 | goto out_err; | 645 | goto out_err; |
| @@ -649,7 +658,7 @@ rpc_lookup_negative(char *path, struct nameidata *nd) | |||
| 649 | 658 | ||
| 650 | if ((error = rpc_lookup_parent(path, nd)) != 0) | 659 | if ((error = rpc_lookup_parent(path, nd)) != 0) |
| 651 | return ERR_PTR(error); | 660 | return ERR_PTR(error); |
| 652 | dentry = rpc_lookup_create(nd->dentry, nd->last.name, nd->last.len); | 661 | dentry = rpc_lookup_create(nd->dentry, nd->last.name, nd->last.len, 1); |
| 653 | if (IS_ERR(dentry)) | 662 | if (IS_ERR(dentry)) |
| 654 | rpc_release_path(nd); | 663 | rpc_release_path(nd); |
| 655 | return dentry; | 664 | return dentry; |
| @@ -681,7 +690,7 @@ out: | |||
| 681 | rpc_release_path(&nd); | 690 | rpc_release_path(&nd); |
| 682 | return dentry; | 691 | return dentry; |
| 683 | err_depopulate: | 692 | err_depopulate: |
| 684 | rpc_depopulate(dentry); | 693 | rpc_depopulate(dentry, RPCAUTH_info, RPCAUTH_EOF); |
| 685 | __rpc_rmdir(dir, dentry); | 694 | __rpc_rmdir(dir, dentry); |
| 686 | err_dput: | 695 | err_dput: |
| 687 | dput(dentry); | 696 | dput(dentry); |
| @@ -701,7 +710,7 @@ rpc_rmdir(struct dentry *dentry) | |||
| 701 | parent = dget_parent(dentry); | 710 | parent = dget_parent(dentry); |
| 702 | dir = parent->d_inode; | 711 | dir = parent->d_inode; |
| 703 | mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); | 712 | mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); |
| 704 | rpc_depopulate(dentry); | 713 | rpc_depopulate(dentry, RPCAUTH_info, RPCAUTH_EOF); |
| 705 | error = __rpc_rmdir(dir, dentry); | 714 | error = __rpc_rmdir(dir, dentry); |
| 706 | dput(dentry); | 715 | dput(dentry); |
| 707 | mutex_unlock(&dir->i_mutex); | 716 | mutex_unlock(&dir->i_mutex); |
| @@ -716,10 +725,21 @@ rpc_mkpipe(struct dentry *parent, const char *name, void *private, struct rpc_pi | |||
| 716 | struct inode *dir, *inode; | 725 | struct inode *dir, *inode; |
| 717 | struct rpc_inode *rpci; | 726 | struct rpc_inode *rpci; |
| 718 | 727 | ||
| 719 | dentry = rpc_lookup_create(parent, name, strlen(name)); | 728 | dentry = rpc_lookup_create(parent, name, strlen(name), 0); |
| 720 | if (IS_ERR(dentry)) | 729 | if (IS_ERR(dentry)) |
| 721 | return dentry; | 730 | return dentry; |
| 722 | dir = parent->d_inode; | 731 | dir = parent->d_inode; |
| 732 | if (dentry->d_inode) { | ||
| 733 | rpci = RPC_I(dentry->d_inode); | ||
| 734 | if (rpci->private != private || | ||
| 735 | rpci->ops != ops || | ||
| 736 | rpci->flags != flags) { | ||
| 737 | dput (dentry); | ||
| 738 | dentry = ERR_PTR(-EBUSY); | ||
| 739 | } | ||
| 740 | rpci->nkern_readwriters++; | ||
| 741 | goto out; | ||
| 742 | } | ||
| 723 | inode = rpc_get_inode(dir->i_sb, S_IFIFO | S_IRUSR | S_IWUSR); | 743 | inode = rpc_get_inode(dir->i_sb, S_IFIFO | S_IRUSR | S_IWUSR); |
| 724 | if (!inode) | 744 | if (!inode) |
| 725 | goto err_dput; | 745 | goto err_dput; |
| @@ -730,6 +750,7 @@ rpc_mkpipe(struct dentry *parent, const char *name, void *private, struct rpc_pi | |||
| 730 | rpci->private = private; | 750 | rpci->private = private; |
| 731 | rpci->flags = flags; | 751 | rpci->flags = flags; |
| 732 | rpci->ops = ops; | 752 | rpci->ops = ops; |
| 753 | rpci->nkern_readwriters = 1; | ||
| 733 | inode_dir_notify(dir, DN_CREATE); | 754 | inode_dir_notify(dir, DN_CREATE); |
| 734 | dget(dentry); | 755 | dget(dentry); |
| 735 | out: | 756 | out: |
| @@ -754,13 +775,11 @@ rpc_unlink(struct dentry *dentry) | |||
| 754 | parent = dget_parent(dentry); | 775 | parent = dget_parent(dentry); |
| 755 | dir = parent->d_inode; | 776 | dir = parent->d_inode; |
| 756 | mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); | 777 | mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); |
| 757 | if (!d_unhashed(dentry)) { | 778 | if (--RPC_I(dentry->d_inode)->nkern_readwriters == 0) { |
| 758 | d_drop(dentry); | 779 | rpc_close_pipes(dentry->d_inode); |
| 759 | if (dentry->d_inode) { | 780 | error = simple_unlink(dir, dentry); |
| 760 | rpc_close_pipes(dentry->d_inode); | 781 | if (!error) |
| 761 | error = simple_unlink(dir, dentry); | 782 | d_delete(dentry); |
| 762 | } | ||
| 763 | inode_dir_notify(dir, DN_DELETE); | ||
| 764 | } | 783 | } |
| 765 | dput(dentry); | 784 | dput(dentry); |
| 766 | mutex_unlock(&dir->i_mutex); | 785 | mutex_unlock(&dir->i_mutex); |
| @@ -833,6 +852,7 @@ init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) | |||
| 833 | rpci->nreaders = 0; | 852 | rpci->nreaders = 0; |
| 834 | rpci->nwriters = 0; | 853 | rpci->nwriters = 0; |
| 835 | INIT_LIST_HEAD(&rpci->in_upcall); | 854 | INIT_LIST_HEAD(&rpci->in_upcall); |
| 855 | INIT_LIST_HEAD(&rpci->in_downcall); | ||
| 836 | INIT_LIST_HEAD(&rpci->pipe); | 856 | INIT_LIST_HEAD(&rpci->pipe); |
| 837 | rpci->pipelen = 0; | 857 | rpci->pipelen = 0; |
| 838 | init_waitqueue_head(&rpci->waitq); | 858 | init_waitqueue_head(&rpci->waitq); |
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c index 6c7aa8a1f0c6..d1740dbab991 100644 --- a/net/sunrpc/rpcb_clnt.c +++ b/net/sunrpc/rpcb_clnt.c | |||
| @@ -12,6 +12,8 @@ | |||
| 12 | * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> | 12 | * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> |
| 13 | */ | 13 | */ |
| 14 | 14 | ||
| 15 | #include <linux/module.h> | ||
| 16 | |||
| 15 | #include <linux/types.h> | 17 | #include <linux/types.h> |
| 16 | #include <linux/socket.h> | 18 | #include <linux/socket.h> |
| 17 | #include <linux/kernel.h> | 19 | #include <linux/kernel.h> |
| @@ -184,8 +186,8 @@ static struct rpc_clnt *rpcb_create(char *hostname, struct sockaddr *srvaddr, | |||
| 184 | .program = &rpcb_program, | 186 | .program = &rpcb_program, |
| 185 | .version = version, | 187 | .version = version, |
| 186 | .authflavor = RPC_AUTH_UNIX, | 188 | .authflavor = RPC_AUTH_UNIX, |
| 187 | .flags = (RPC_CLNT_CREATE_ONESHOT | | 189 | .flags = (RPC_CLNT_CREATE_NOPING | |
| 188 | RPC_CLNT_CREATE_NOPING), | 190 | RPC_CLNT_CREATE_INTR), |
| 189 | }; | 191 | }; |
| 190 | 192 | ||
| 191 | ((struct sockaddr_in *)srvaddr)->sin_port = htons(RPCBIND_PORT); | 193 | ((struct sockaddr_in *)srvaddr)->sin_port = htons(RPCBIND_PORT); |
| @@ -238,6 +240,7 @@ int rpcb_register(u32 prog, u32 vers, int prot, unsigned short port, int *okay) | |||
| 238 | 240 | ||
| 239 | error = rpc_call_sync(rpcb_clnt, &msg, 0); | 241 | error = rpc_call_sync(rpcb_clnt, &msg, 0); |
| 240 | 242 | ||
| 243 | rpc_shutdown_client(rpcb_clnt); | ||
| 241 | if (error < 0) | 244 | if (error < 0) |
| 242 | printk(KERN_WARNING "RPC: failed to contact local rpcbind " | 245 | printk(KERN_WARNING "RPC: failed to contact local rpcbind " |
| 243 | "server (errno %d).\n", -error); | 246 | "server (errno %d).\n", -error); |
| @@ -246,21 +249,20 @@ int rpcb_register(u32 prog, u32 vers, int prot, unsigned short port, int *okay) | |||
| 246 | return error; | 249 | return error; |
| 247 | } | 250 | } |
| 248 | 251 | ||
| 249 | #ifdef CONFIG_ROOT_NFS | ||
| 250 | /** | 252 | /** |
| 251 | * rpcb_getport_external - obtain the port for an RPC service on a given host | 253 | * rpcb_getport_sync - obtain the port for an RPC service on a given host |
| 252 | * @sin: address of remote peer | 254 | * @sin: address of remote peer |
| 253 | * @prog: RPC program number to bind | 255 | * @prog: RPC program number to bind |
| 254 | * @vers: RPC version number to bind | 256 | * @vers: RPC version number to bind |
| 255 | * @prot: transport protocol to use to make this request | 257 | * @prot: transport protocol to use to make this request |
| 256 | * | 258 | * |
| 257 | * Called from outside the RPC client in a synchronous task context. | 259 | * Called from outside the RPC client in a synchronous task context. |
| 260 | * Uses default timeout parameters specified by underlying transport. | ||
| 258 | * | 261 | * |
| 259 | * For now, this supports only version 2 queries, but is used only by | 262 | * XXX: Needs to support IPv6, and rpcbind versions 3 and 4 |
| 260 | * mount_clnt for NFS_ROOT. | ||
| 261 | */ | 263 | */ |
| 262 | int rpcb_getport_external(struct sockaddr_in *sin, __u32 prog, | 264 | int rpcb_getport_sync(struct sockaddr_in *sin, __u32 prog, |
| 263 | __u32 vers, int prot) | 265 | __u32 vers, int prot) |
| 264 | { | 266 | { |
| 265 | struct rpcbind_args map = { | 267 | struct rpcbind_args map = { |
| 266 | .r_prog = prog, | 268 | .r_prog = prog, |
| @@ -277,15 +279,16 @@ int rpcb_getport_external(struct sockaddr_in *sin, __u32 prog, | |||
| 277 | char hostname[40]; | 279 | char hostname[40]; |
| 278 | int status; | 280 | int status; |
| 279 | 281 | ||
| 280 | dprintk("RPC: rpcb_getport_external(%u.%u.%u.%u, %u, %u, %d)\n", | 282 | dprintk("RPC: %s(" NIPQUAD_FMT ", %u, %u, %d)\n", |
| 281 | NIPQUAD(sin->sin_addr.s_addr), prog, vers, prot); | 283 | __FUNCTION__, NIPQUAD(sin->sin_addr.s_addr), prog, vers, prot); |
| 282 | 284 | ||
| 283 | sprintf(hostname, "%u.%u.%u.%u", NIPQUAD(sin->sin_addr.s_addr)); | 285 | sprintf(hostname, NIPQUAD_FMT, NIPQUAD(sin->sin_addr.s_addr)); |
| 284 | rpcb_clnt = rpcb_create(hostname, (struct sockaddr *)sin, prot, 2, 0); | 286 | rpcb_clnt = rpcb_create(hostname, (struct sockaddr *)sin, prot, 2, 0); |
| 285 | if (IS_ERR(rpcb_clnt)) | 287 | if (IS_ERR(rpcb_clnt)) |
| 286 | return PTR_ERR(rpcb_clnt); | 288 | return PTR_ERR(rpcb_clnt); |
| 287 | 289 | ||
| 288 | status = rpc_call_sync(rpcb_clnt, &msg, 0); | 290 | status = rpc_call_sync(rpcb_clnt, &msg, 0); |
| 291 | rpc_shutdown_client(rpcb_clnt); | ||
| 289 | 292 | ||
| 290 | if (status >= 0) { | 293 | if (status >= 0) { |
| 291 | if (map.r_port != 0) | 294 | if (map.r_port != 0) |
| @@ -294,16 +297,16 @@ int rpcb_getport_external(struct sockaddr_in *sin, __u32 prog, | |||
| 294 | } | 297 | } |
| 295 | return status; | 298 | return status; |
| 296 | } | 299 | } |
| 297 | #endif | 300 | EXPORT_SYMBOL_GPL(rpcb_getport_sync); |
| 298 | 301 | ||
| 299 | /** | 302 | /** |
| 300 | * rpcb_getport - obtain the port for a given RPC service on a given host | 303 | * rpcb_getport_async - obtain the port for a given RPC service on a given host |
| 301 | * @task: task that is waiting for portmapper request | 304 | * @task: task that is waiting for portmapper request |
| 302 | * | 305 | * |
| 303 | * This one can be called for an ongoing RPC request, and can be used in | 306 | * This one can be called for an ongoing RPC request, and can be used in |
| 304 | * an async (rpciod) context. | 307 | * an async (rpciod) context. |
| 305 | */ | 308 | */ |
| 306 | void rpcb_getport(struct rpc_task *task) | 309 | void rpcb_getport_async(struct rpc_task *task) |
| 307 | { | 310 | { |
| 308 | struct rpc_clnt *clnt = task->tk_client; | 311 | struct rpc_clnt *clnt = task->tk_client; |
| 309 | int bind_version; | 312 | int bind_version; |
| @@ -314,17 +317,17 @@ void rpcb_getport(struct rpc_task *task) | |||
| 314 | struct sockaddr addr; | 317 | struct sockaddr addr; |
| 315 | int status; | 318 | int status; |
| 316 | 319 | ||
| 317 | dprintk("RPC: %5u rpcb_getport(%s, %u, %u, %d)\n", | 320 | dprintk("RPC: %5u %s(%s, %u, %u, %d)\n", |
| 318 | task->tk_pid, clnt->cl_server, | 321 | task->tk_pid, __FUNCTION__, |
| 319 | clnt->cl_prog, clnt->cl_vers, xprt->prot); | 322 | clnt->cl_server, clnt->cl_prog, clnt->cl_vers, xprt->prot); |
| 320 | 323 | ||
| 321 | /* Autobind on cloned rpc clients is discouraged */ | 324 | /* Autobind on cloned rpc clients is discouraged */ |
| 322 | BUG_ON(clnt->cl_parent != clnt); | 325 | BUG_ON(clnt->cl_parent != clnt); |
| 323 | 326 | ||
| 324 | if (xprt_test_and_set_binding(xprt)) { | 327 | if (xprt_test_and_set_binding(xprt)) { |
| 325 | status = -EACCES; /* tell caller to check again */ | 328 | status = -EACCES; /* tell caller to check again */ |
| 326 | dprintk("RPC: %5u rpcb_getport waiting for another binder\n", | 329 | dprintk("RPC: %5u %s: waiting for another binder\n", |
| 327 | task->tk_pid); | 330 | task->tk_pid, __FUNCTION__); |
| 328 | goto bailout_nowake; | 331 | goto bailout_nowake; |
| 329 | } | 332 | } |
| 330 | 333 | ||
| @@ -335,27 +338,28 @@ void rpcb_getport(struct rpc_task *task) | |||
| 335 | /* Someone else may have bound if we slept */ | 338 | /* Someone else may have bound if we slept */ |
| 336 | if (xprt_bound(xprt)) { | 339 | if (xprt_bound(xprt)) { |
| 337 | status = 0; | 340 | status = 0; |
| 338 | dprintk("RPC: %5u rpcb_getport already bound\n", task->tk_pid); | 341 | dprintk("RPC: %5u %s: already bound\n", |
| 342 | task->tk_pid, __FUNCTION__); | ||
| 339 | goto bailout_nofree; | 343 | goto bailout_nofree; |
| 340 | } | 344 | } |
| 341 | 345 | ||
| 342 | if (rpcb_next_version[xprt->bind_index].rpc_proc == NULL) { | 346 | if (rpcb_next_version[xprt->bind_index].rpc_proc == NULL) { |
| 343 | xprt->bind_index = 0; | 347 | xprt->bind_index = 0; |
| 344 | status = -EACCES; /* tell caller to try again later */ | 348 | status = -EACCES; /* tell caller to try again later */ |
| 345 | dprintk("RPC: %5u rpcb_getport no more getport versions " | 349 | dprintk("RPC: %5u %s: no more getport versions available\n", |
| 346 | "available\n", task->tk_pid); | 350 | task->tk_pid, __FUNCTION__); |
| 347 | goto bailout_nofree; | 351 | goto bailout_nofree; |
| 348 | } | 352 | } |
| 349 | bind_version = rpcb_next_version[xprt->bind_index].rpc_vers; | 353 | bind_version = rpcb_next_version[xprt->bind_index].rpc_vers; |
| 350 | 354 | ||
| 351 | dprintk("RPC: %5u rpcb_getport trying rpcbind version %u\n", | 355 | dprintk("RPC: %5u %s: trying rpcbind version %u\n", |
| 352 | task->tk_pid, bind_version); | 356 | task->tk_pid, __FUNCTION__, bind_version); |
| 353 | 357 | ||
| 354 | map = kzalloc(sizeof(struct rpcbind_args), GFP_ATOMIC); | 358 | map = kzalloc(sizeof(struct rpcbind_args), GFP_ATOMIC); |
| 355 | if (!map) { | 359 | if (!map) { |
| 356 | status = -ENOMEM; | 360 | status = -ENOMEM; |
| 357 | dprintk("RPC: %5u rpcb_getport no memory available\n", | 361 | dprintk("RPC: %5u %s: no memory available\n", |
| 358 | task->tk_pid); | 362 | task->tk_pid, __FUNCTION__); |
| 359 | goto bailout_nofree; | 363 | goto bailout_nofree; |
| 360 | } | 364 | } |
| 361 | map->r_prog = clnt->cl_prog; | 365 | map->r_prog = clnt->cl_prog; |
| @@ -373,16 +377,17 @@ void rpcb_getport(struct rpc_task *task) | |||
| 373 | rpcb_clnt = rpcb_create(clnt->cl_server, &addr, xprt->prot, bind_version, 0); | 377 | rpcb_clnt = rpcb_create(clnt->cl_server, &addr, xprt->prot, bind_version, 0); |
| 374 | if (IS_ERR(rpcb_clnt)) { | 378 | if (IS_ERR(rpcb_clnt)) { |
| 375 | status = PTR_ERR(rpcb_clnt); | 379 | status = PTR_ERR(rpcb_clnt); |
| 376 | dprintk("RPC: %5u rpcb_getport rpcb_create failed, error %ld\n", | 380 | dprintk("RPC: %5u %s: rpcb_create failed, error %ld\n", |
| 377 | task->tk_pid, PTR_ERR(rpcb_clnt)); | 381 | task->tk_pid, __FUNCTION__, PTR_ERR(rpcb_clnt)); |
| 378 | goto bailout; | 382 | goto bailout; |
| 379 | } | 383 | } |
| 380 | 384 | ||
| 381 | child = rpc_run_task(rpcb_clnt, RPC_TASK_ASYNC, &rpcb_getport_ops, map); | 385 | child = rpc_run_task(rpcb_clnt, RPC_TASK_ASYNC, &rpcb_getport_ops, map); |
| 386 | rpc_release_client(rpcb_clnt); | ||
| 382 | if (IS_ERR(child)) { | 387 | if (IS_ERR(child)) { |
| 383 | status = -EIO; | 388 | status = -EIO; |
| 384 | dprintk("RPC: %5u rpcb_getport rpc_run_task failed\n", | 389 | dprintk("RPC: %5u %s: rpc_run_task failed\n", |
| 385 | task->tk_pid); | 390 | task->tk_pid, __FUNCTION__); |
| 386 | goto bailout_nofree; | 391 | goto bailout_nofree; |
| 387 | } | 392 | } |
| 388 | rpc_put_task(child); | 393 | rpc_put_task(child); |
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index 944d75396fb3..2ac43c41c3a9 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c | |||
| @@ -25,7 +25,6 @@ | |||
| 25 | #ifdef RPC_DEBUG | 25 | #ifdef RPC_DEBUG |
| 26 | #define RPCDBG_FACILITY RPCDBG_SCHED | 26 | #define RPCDBG_FACILITY RPCDBG_SCHED |
| 27 | #define RPC_TASK_MAGIC_ID 0xf00baa | 27 | #define RPC_TASK_MAGIC_ID 0xf00baa |
| 28 | static int rpc_task_id; | ||
| 29 | #endif | 28 | #endif |
| 30 | 29 | ||
| 31 | /* | 30 | /* |
| @@ -40,7 +39,6 @@ static mempool_t *rpc_task_mempool __read_mostly; | |||
| 40 | static mempool_t *rpc_buffer_mempool __read_mostly; | 39 | static mempool_t *rpc_buffer_mempool __read_mostly; |
| 41 | 40 | ||
| 42 | static void __rpc_default_timer(struct rpc_task *task); | 41 | static void __rpc_default_timer(struct rpc_task *task); |
| 43 | static void rpciod_killall(void); | ||
| 44 | static void rpc_async_schedule(struct work_struct *); | 42 | static void rpc_async_schedule(struct work_struct *); |
| 45 | static void rpc_release_task(struct rpc_task *task); | 43 | static void rpc_release_task(struct rpc_task *task); |
| 46 | 44 | ||
| @@ -50,23 +48,13 @@ static void rpc_release_task(struct rpc_task *task); | |||
| 50 | static RPC_WAITQ(delay_queue, "delayq"); | 48 | static RPC_WAITQ(delay_queue, "delayq"); |
| 51 | 49 | ||
| 52 | /* | 50 | /* |
| 53 | * All RPC tasks are linked into this list | ||
| 54 | */ | ||
| 55 | static LIST_HEAD(all_tasks); | ||
| 56 | |||
| 57 | /* | ||
| 58 | * rpciod-related stuff | 51 | * rpciod-related stuff |
| 59 | */ | 52 | */ |
| 60 | static DEFINE_MUTEX(rpciod_mutex); | 53 | static DEFINE_MUTEX(rpciod_mutex); |
| 61 | static unsigned int rpciod_users; | 54 | static atomic_t rpciod_users = ATOMIC_INIT(0); |
| 62 | struct workqueue_struct *rpciod_workqueue; | 55 | struct workqueue_struct *rpciod_workqueue; |
| 63 | 56 | ||
| 64 | /* | 57 | /* |
| 65 | * Spinlock for other critical sections of code. | ||
| 66 | */ | ||
| 67 | static DEFINE_SPINLOCK(rpc_sched_lock); | ||
| 68 | |||
| 69 | /* | ||
| 70 | * Disable the timer for a given RPC task. Should be called with | 58 | * Disable the timer for a given RPC task. Should be called with |
| 71 | * queue->lock and bh_disabled in order to avoid races within | 59 | * queue->lock and bh_disabled in order to avoid races within |
| 72 | * rpc_run_timer(). | 60 | * rpc_run_timer(). |
| @@ -267,18 +255,33 @@ static int rpc_wait_bit_interruptible(void *word) | |||
| 267 | return 0; | 255 | return 0; |
| 268 | } | 256 | } |
| 269 | 257 | ||
| 258 | #ifdef RPC_DEBUG | ||
| 259 | static void rpc_task_set_debuginfo(struct rpc_task *task) | ||
| 260 | { | ||
| 261 | static atomic_t rpc_pid; | ||
| 262 | |||
| 263 | task->tk_magic = RPC_TASK_MAGIC_ID; | ||
| 264 | task->tk_pid = atomic_inc_return(&rpc_pid); | ||
| 265 | } | ||
| 266 | #else | ||
| 267 | static inline void rpc_task_set_debuginfo(struct rpc_task *task) | ||
| 268 | { | ||
| 269 | } | ||
| 270 | #endif | ||
| 271 | |||
| 270 | static void rpc_set_active(struct rpc_task *task) | 272 | static void rpc_set_active(struct rpc_task *task) |
| 271 | { | 273 | { |
| 274 | struct rpc_clnt *clnt; | ||
| 272 | if (test_and_set_bit(RPC_TASK_ACTIVE, &task->tk_runstate) != 0) | 275 | if (test_and_set_bit(RPC_TASK_ACTIVE, &task->tk_runstate) != 0) |
| 273 | return; | 276 | return; |
| 274 | spin_lock(&rpc_sched_lock); | 277 | rpc_task_set_debuginfo(task); |
| 275 | #ifdef RPC_DEBUG | ||
| 276 | task->tk_magic = RPC_TASK_MAGIC_ID; | ||
| 277 | task->tk_pid = rpc_task_id++; | ||
| 278 | #endif | ||
| 279 | /* Add to global list of all tasks */ | 278 | /* Add to global list of all tasks */ |
| 280 | list_add_tail(&task->tk_task, &all_tasks); | 279 | clnt = task->tk_client; |
| 281 | spin_unlock(&rpc_sched_lock); | 280 | if (clnt != NULL) { |
| 281 | spin_lock(&clnt->cl_lock); | ||
| 282 | list_add_tail(&task->tk_task, &clnt->cl_tasks); | ||
| 283 | spin_unlock(&clnt->cl_lock); | ||
| 284 | } | ||
| 282 | } | 285 | } |
| 283 | 286 | ||
| 284 | /* | 287 | /* |
| @@ -818,6 +821,7 @@ void rpc_init_task(struct rpc_task *task, struct rpc_clnt *clnt, int flags, cons | |||
| 818 | if (tk_ops->rpc_call_prepare != NULL) | 821 | if (tk_ops->rpc_call_prepare != NULL) |
| 819 | task->tk_action = rpc_prepare_task; | 822 | task->tk_action = rpc_prepare_task; |
| 820 | task->tk_calldata = calldata; | 823 | task->tk_calldata = calldata; |
| 824 | INIT_LIST_HEAD(&task->tk_task); | ||
| 821 | 825 | ||
| 822 | /* Initialize retry counters */ | 826 | /* Initialize retry counters */ |
| 823 | task->tk_garb_retry = 2; | 827 | task->tk_garb_retry = 2; |
| @@ -830,7 +834,7 @@ void rpc_init_task(struct rpc_task *task, struct rpc_clnt *clnt, int flags, cons | |||
| 830 | task->tk_workqueue = rpciod_workqueue; | 834 | task->tk_workqueue = rpciod_workqueue; |
| 831 | 835 | ||
| 832 | if (clnt) { | 836 | if (clnt) { |
| 833 | atomic_inc(&clnt->cl_users); | 837 | kref_get(&clnt->cl_kref); |
| 834 | if (clnt->cl_softrtry) | 838 | if (clnt->cl_softrtry) |
| 835 | task->tk_flags |= RPC_TASK_SOFT; | 839 | task->tk_flags |= RPC_TASK_SOFT; |
| 836 | if (!clnt->cl_intr) | 840 | if (!clnt->cl_intr) |
| @@ -860,9 +864,7 @@ static void rpc_free_task(struct rcu_head *rcu) | |||
| 860 | } | 864 | } |
| 861 | 865 | ||
| 862 | /* | 866 | /* |
| 863 | * Create a new task for the specified client. We have to | 867 | * Create a new task for the specified client. |
| 864 | * clean up after an allocation failure, as the client may | ||
| 865 | * have specified "oneshot". | ||
| 866 | */ | 868 | */ |
| 867 | struct rpc_task *rpc_new_task(struct rpc_clnt *clnt, int flags, const struct rpc_call_ops *tk_ops, void *calldata) | 869 | struct rpc_task *rpc_new_task(struct rpc_clnt *clnt, int flags, const struct rpc_call_ops *tk_ops, void *calldata) |
| 868 | { | 870 | { |
| @@ -870,7 +872,7 @@ struct rpc_task *rpc_new_task(struct rpc_clnt *clnt, int flags, const struct rpc | |||
| 870 | 872 | ||
| 871 | task = rpc_alloc_task(); | 873 | task = rpc_alloc_task(); |
| 872 | if (!task) | 874 | if (!task) |
| 873 | goto cleanup; | 875 | goto out; |
| 874 | 876 | ||
| 875 | rpc_init_task(task, clnt, flags, tk_ops, calldata); | 877 | rpc_init_task(task, clnt, flags, tk_ops, calldata); |
| 876 | 878 | ||
| @@ -878,16 +880,6 @@ struct rpc_task *rpc_new_task(struct rpc_clnt *clnt, int flags, const struct rpc | |||
| 878 | task->tk_flags |= RPC_TASK_DYNAMIC; | 880 | task->tk_flags |= RPC_TASK_DYNAMIC; |
| 879 | out: | 881 | out: |
| 880 | return task; | 882 | return task; |
| 881 | |||
| 882 | cleanup: | ||
| 883 | /* Check whether to release the client */ | ||
| 884 | if (clnt) { | ||
| 885 | printk("rpc_new_task: failed, users=%d, oneshot=%d\n", | ||
| 886 | atomic_read(&clnt->cl_users), clnt->cl_oneshot); | ||
| 887 | atomic_inc(&clnt->cl_users); /* pretend we were used ... */ | ||
| 888 | rpc_release_client(clnt); | ||
| 889 | } | ||
| 890 | goto out; | ||
| 891 | } | 883 | } |
| 892 | 884 | ||
| 893 | 885 | ||
| @@ -920,11 +912,13 @@ static void rpc_release_task(struct rpc_task *task) | |||
| 920 | #endif | 912 | #endif |
| 921 | dprintk("RPC: %5u release task\n", task->tk_pid); | 913 | dprintk("RPC: %5u release task\n", task->tk_pid); |
| 922 | 914 | ||
| 923 | /* Remove from global task list */ | 915 | if (!list_empty(&task->tk_task)) { |
| 924 | spin_lock(&rpc_sched_lock); | 916 | struct rpc_clnt *clnt = task->tk_client; |
| 925 | list_del(&task->tk_task); | 917 | /* Remove from client task list */ |
| 926 | spin_unlock(&rpc_sched_lock); | 918 | spin_lock(&clnt->cl_lock); |
| 927 | 919 | list_del(&task->tk_task); | |
| 920 | spin_unlock(&clnt->cl_lock); | ||
| 921 | } | ||
| 928 | BUG_ON (RPC_IS_QUEUED(task)); | 922 | BUG_ON (RPC_IS_QUEUED(task)); |
| 929 | 923 | ||
| 930 | /* Synchronously delete any running timer */ | 924 | /* Synchronously delete any running timer */ |
| @@ -939,29 +933,6 @@ static void rpc_release_task(struct rpc_task *task) | |||
| 939 | rpc_put_task(task); | 933 | rpc_put_task(task); |
| 940 | } | 934 | } |
| 941 | 935 | ||
| 942 | /** | ||
| 943 | * rpc_run_task - Allocate a new RPC task, then run rpc_execute against it | ||
| 944 | * @clnt: pointer to RPC client | ||
| 945 | * @flags: RPC flags | ||
| 946 | * @ops: RPC call ops | ||
| 947 | * @data: user call data | ||
| 948 | */ | ||
| 949 | struct rpc_task *rpc_run_task(struct rpc_clnt *clnt, int flags, | ||
| 950 | const struct rpc_call_ops *ops, | ||
| 951 | void *data) | ||
| 952 | { | ||
| 953 | struct rpc_task *task; | ||
| 954 | task = rpc_new_task(clnt, flags, ops, data); | ||
| 955 | if (task == NULL) { | ||
| 956 | rpc_release_calldata(ops, data); | ||
| 957 | return ERR_PTR(-ENOMEM); | ||
| 958 | } | ||
| 959 | atomic_inc(&task->tk_count); | ||
| 960 | rpc_execute(task); | ||
| 961 | return task; | ||
| 962 | } | ||
| 963 | EXPORT_SYMBOL(rpc_run_task); | ||
| 964 | |||
| 965 | /* | 936 | /* |
| 966 | * Kill all tasks for the given client. | 937 | * Kill all tasks for the given client. |
| 967 | * XXX: kill their descendants as well? | 938 | * XXX: kill their descendants as well? |
| @@ -969,44 +940,25 @@ EXPORT_SYMBOL(rpc_run_task); | |||
| 969 | void rpc_killall_tasks(struct rpc_clnt *clnt) | 940 | void rpc_killall_tasks(struct rpc_clnt *clnt) |
| 970 | { | 941 | { |
| 971 | struct rpc_task *rovr; | 942 | struct rpc_task *rovr; |
| 972 | struct list_head *le; | ||
| 973 | 943 | ||
| 974 | dprintk("RPC: killing all tasks for client %p\n", clnt); | ||
| 975 | 944 | ||
| 945 | if (list_empty(&clnt->cl_tasks)) | ||
| 946 | return; | ||
| 947 | dprintk("RPC: killing all tasks for client %p\n", clnt); | ||
| 976 | /* | 948 | /* |
| 977 | * Spin lock all_tasks to prevent changes... | 949 | * Spin lock all_tasks to prevent changes... |
| 978 | */ | 950 | */ |
| 979 | spin_lock(&rpc_sched_lock); | 951 | spin_lock(&clnt->cl_lock); |
| 980 | alltask_for_each(rovr, le, &all_tasks) { | 952 | list_for_each_entry(rovr, &clnt->cl_tasks, tk_task) { |
| 981 | if (! RPC_IS_ACTIVATED(rovr)) | 953 | if (! RPC_IS_ACTIVATED(rovr)) |
| 982 | continue; | 954 | continue; |
| 983 | if (!clnt || rovr->tk_client == clnt) { | 955 | if (!(rovr->tk_flags & RPC_TASK_KILLED)) { |
| 984 | rovr->tk_flags |= RPC_TASK_KILLED; | 956 | rovr->tk_flags |= RPC_TASK_KILLED; |
| 985 | rpc_exit(rovr, -EIO); | 957 | rpc_exit(rovr, -EIO); |
| 986 | rpc_wake_up_task(rovr); | 958 | rpc_wake_up_task(rovr); |
| 987 | } | 959 | } |
| 988 | } | 960 | } |
| 989 | spin_unlock(&rpc_sched_lock); | 961 | spin_unlock(&clnt->cl_lock); |
| 990 | } | ||
| 991 | |||
| 992 | static void rpciod_killall(void) | ||
| 993 | { | ||
| 994 | unsigned long flags; | ||
| 995 | |||
| 996 | while (!list_empty(&all_tasks)) { | ||
| 997 | clear_thread_flag(TIF_SIGPENDING); | ||
| 998 | rpc_killall_tasks(NULL); | ||
| 999 | flush_workqueue(rpciod_workqueue); | ||
| 1000 | if (!list_empty(&all_tasks)) { | ||
| 1001 | dprintk("RPC: rpciod_killall: waiting for tasks " | ||
| 1002 | "to exit\n"); | ||
| 1003 | yield(); | ||
| 1004 | } | ||
| 1005 | } | ||
| 1006 | |||
| 1007 | spin_lock_irqsave(¤t->sighand->siglock, flags); | ||
| 1008 | recalc_sigpending(); | ||
| 1009 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); | ||
| 1010 | } | 962 | } |
| 1011 | 963 | ||
| 1012 | /* | 964 | /* |
| @@ -1018,28 +970,27 @@ rpciod_up(void) | |||
| 1018 | struct workqueue_struct *wq; | 970 | struct workqueue_struct *wq; |
| 1019 | int error = 0; | 971 | int error = 0; |
| 1020 | 972 | ||
| 973 | if (atomic_inc_not_zero(&rpciod_users)) | ||
| 974 | return 0; | ||
| 975 | |||
| 1021 | mutex_lock(&rpciod_mutex); | 976 | mutex_lock(&rpciod_mutex); |
| 1022 | dprintk("RPC: rpciod_up: users %u\n", rpciod_users); | 977 | |
| 1023 | rpciod_users++; | 978 | /* Guard against races with rpciod_down() */ |
| 1024 | if (rpciod_workqueue) | 979 | if (rpciod_workqueue != NULL) |
| 1025 | goto out; | 980 | goto out_ok; |
| 1026 | /* | ||
| 1027 | * If there's no pid, we should be the first user. | ||
| 1028 | */ | ||
| 1029 | if (rpciod_users > 1) | ||
| 1030 | printk(KERN_WARNING "rpciod_up: no workqueue, %u users??\n", rpciod_users); | ||
| 1031 | /* | 981 | /* |
| 1032 | * Create the rpciod thread and wait for it to start. | 982 | * Create the rpciod thread and wait for it to start. |
| 1033 | */ | 983 | */ |
| 984 | dprintk("RPC: creating workqueue rpciod\n"); | ||
| 1034 | error = -ENOMEM; | 985 | error = -ENOMEM; |
| 1035 | wq = create_workqueue("rpciod"); | 986 | wq = create_workqueue("rpciod"); |
| 1036 | if (wq == NULL) { | 987 | if (wq == NULL) |
| 1037 | printk(KERN_WARNING "rpciod_up: create workqueue failed, error=%d\n", error); | ||
| 1038 | rpciod_users--; | ||
| 1039 | goto out; | 988 | goto out; |
| 1040 | } | 989 | |
| 1041 | rpciod_workqueue = wq; | 990 | rpciod_workqueue = wq; |
| 1042 | error = 0; | 991 | error = 0; |
| 992 | out_ok: | ||
| 993 | atomic_inc(&rpciod_users); | ||
| 1043 | out: | 994 | out: |
| 1044 | mutex_unlock(&rpciod_mutex); | 995 | mutex_unlock(&rpciod_mutex); |
| 1045 | return error; | 996 | return error; |
| @@ -1048,59 +999,19 @@ out: | |||
| 1048 | void | 999 | void |
| 1049 | rpciod_down(void) | 1000 | rpciod_down(void) |
| 1050 | { | 1001 | { |
| 1002 | if (!atomic_dec_and_test(&rpciod_users)) | ||
| 1003 | return; | ||
| 1004 | |||
| 1051 | mutex_lock(&rpciod_mutex); | 1005 | mutex_lock(&rpciod_mutex); |
| 1052 | dprintk("RPC: rpciod_down sema %u\n", rpciod_users); | 1006 | dprintk("RPC: destroying workqueue rpciod\n"); |
| 1053 | if (rpciod_users) { | ||
| 1054 | if (--rpciod_users) | ||
| 1055 | goto out; | ||
| 1056 | } else | ||
| 1057 | printk(KERN_WARNING "rpciod_down: no users??\n"); | ||
| 1058 | 1007 | ||
| 1059 | if (!rpciod_workqueue) { | 1008 | if (atomic_read(&rpciod_users) == 0 && rpciod_workqueue != NULL) { |
| 1060 | dprintk("RPC: rpciod_down: Nothing to do!\n"); | 1009 | destroy_workqueue(rpciod_workqueue); |
| 1061 | goto out; | 1010 | rpciod_workqueue = NULL; |
| 1062 | } | 1011 | } |
| 1063 | rpciod_killall(); | ||
| 1064 | |||
| 1065 | destroy_workqueue(rpciod_workqueue); | ||
| 1066 | rpciod_workqueue = NULL; | ||
| 1067 | out: | ||
| 1068 | mutex_unlock(&rpciod_mutex); | 1012 | mutex_unlock(&rpciod_mutex); |
| 1069 | } | 1013 | } |
| 1070 | 1014 | ||
| 1071 | #ifdef RPC_DEBUG | ||
| 1072 | void rpc_show_tasks(void) | ||
| 1073 | { | ||
| 1074 | struct list_head *le; | ||
| 1075 | struct rpc_task *t; | ||
| 1076 | |||
| 1077 | spin_lock(&rpc_sched_lock); | ||
| 1078 | if (list_empty(&all_tasks)) { | ||
| 1079 | spin_unlock(&rpc_sched_lock); | ||
| 1080 | return; | ||
| 1081 | } | ||
| 1082 | printk("-pid- proc flgs status -client- -prog- --rqstp- -timeout " | ||
| 1083 | "-rpcwait -action- ---ops--\n"); | ||
| 1084 | alltask_for_each(t, le, &all_tasks) { | ||
| 1085 | const char *rpc_waitq = "none"; | ||
| 1086 | |||
| 1087 | if (RPC_IS_QUEUED(t)) | ||
| 1088 | rpc_waitq = rpc_qname(t->u.tk_wait.rpc_waitq); | ||
| 1089 | |||
| 1090 | printk("%5u %04d %04x %6d %8p %6d %8p %8ld %8s %8p %8p\n", | ||
| 1091 | t->tk_pid, | ||
| 1092 | (t->tk_msg.rpc_proc ? t->tk_msg.rpc_proc->p_proc : -1), | ||
| 1093 | t->tk_flags, t->tk_status, | ||
| 1094 | t->tk_client, | ||
| 1095 | (t->tk_client ? t->tk_client->cl_prog : 0), | ||
| 1096 | t->tk_rqstp, t->tk_timeout, | ||
| 1097 | rpc_waitq, | ||
| 1098 | t->tk_action, t->tk_ops); | ||
| 1099 | } | ||
| 1100 | spin_unlock(&rpc_sched_lock); | ||
| 1101 | } | ||
| 1102 | #endif | ||
| 1103 | |||
| 1104 | void | 1015 | void |
| 1105 | rpc_destroy_mempool(void) | 1016 | rpc_destroy_mempool(void) |
| 1106 | { | 1017 | { |
diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c index 73075dec83c0..384c4ad5ab86 100644 --- a/net/sunrpc/sunrpc_syms.c +++ b/net/sunrpc/sunrpc_syms.c | |||
| @@ -28,15 +28,11 @@ EXPORT_SYMBOL(rpc_init_task); | |||
| 28 | EXPORT_SYMBOL(rpc_sleep_on); | 28 | EXPORT_SYMBOL(rpc_sleep_on); |
| 29 | EXPORT_SYMBOL(rpc_wake_up_next); | 29 | EXPORT_SYMBOL(rpc_wake_up_next); |
| 30 | EXPORT_SYMBOL(rpc_wake_up_task); | 30 | EXPORT_SYMBOL(rpc_wake_up_task); |
| 31 | EXPORT_SYMBOL(rpciod_down); | ||
| 32 | EXPORT_SYMBOL(rpciod_up); | ||
| 33 | EXPORT_SYMBOL(rpc_new_task); | ||
| 34 | EXPORT_SYMBOL(rpc_wake_up_status); | 31 | EXPORT_SYMBOL(rpc_wake_up_status); |
| 35 | 32 | ||
| 36 | /* RPC client functions */ | 33 | /* RPC client functions */ |
| 37 | EXPORT_SYMBOL(rpc_clone_client); | 34 | EXPORT_SYMBOL(rpc_clone_client); |
| 38 | EXPORT_SYMBOL(rpc_bind_new_program); | 35 | EXPORT_SYMBOL(rpc_bind_new_program); |
| 39 | EXPORT_SYMBOL(rpc_destroy_client); | ||
| 40 | EXPORT_SYMBOL(rpc_shutdown_client); | 36 | EXPORT_SYMBOL(rpc_shutdown_client); |
| 41 | EXPORT_SYMBOL(rpc_killall_tasks); | 37 | EXPORT_SYMBOL(rpc_killall_tasks); |
| 42 | EXPORT_SYMBOL(rpc_call_sync); | 38 | EXPORT_SYMBOL(rpc_call_sync); |
| @@ -61,7 +57,7 @@ EXPORT_SYMBOL(rpcauth_unregister); | |||
| 61 | EXPORT_SYMBOL(rpcauth_create); | 57 | EXPORT_SYMBOL(rpcauth_create); |
| 62 | EXPORT_SYMBOL(rpcauth_lookupcred); | 58 | EXPORT_SYMBOL(rpcauth_lookupcred); |
| 63 | EXPORT_SYMBOL(rpcauth_lookup_credcache); | 59 | EXPORT_SYMBOL(rpcauth_lookup_credcache); |
| 64 | EXPORT_SYMBOL(rpcauth_free_credcache); | 60 | EXPORT_SYMBOL(rpcauth_destroy_credcache); |
| 65 | EXPORT_SYMBOL(rpcauth_init_credcache); | 61 | EXPORT_SYMBOL(rpcauth_init_credcache); |
| 66 | EXPORT_SYMBOL(put_rpccred); | 62 | EXPORT_SYMBOL(put_rpccred); |
| 67 | 63 | ||
| @@ -156,6 +152,7 @@ init_sunrpc(void) | |||
| 156 | cache_register(&ip_map_cache); | 152 | cache_register(&ip_map_cache); |
| 157 | cache_register(&unix_gid_cache); | 153 | cache_register(&unix_gid_cache); |
| 158 | init_socket_xprt(); | 154 | init_socket_xprt(); |
| 155 | rpcauth_init_module(); | ||
| 159 | out: | 156 | out: |
| 160 | return err; | 157 | return err; |
| 161 | } | 158 | } |
| @@ -163,6 +160,7 @@ out: | |||
| 163 | static void __exit | 160 | static void __exit |
| 164 | cleanup_sunrpc(void) | 161 | cleanup_sunrpc(void) |
| 165 | { | 162 | { |
| 163 | rpcauth_remove_module(); | ||
| 166 | cleanup_socket_xprt(); | 164 | cleanup_socket_xprt(); |
| 167 | unregister_rpc_pipefs(); | 165 | unregister_rpc_pipefs(); |
| 168 | rpc_destroy_mempool(); | 166 | rpc_destroy_mempool(); |
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 5baf48de2558..64b9b8c743c4 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c | |||
| @@ -644,6 +644,7 @@ svc_recvfrom(struct svc_rqst *rqstp, struct kvec *iov, int nr, int buflen) | |||
| 644 | struct msghdr msg = { | 644 | struct msghdr msg = { |
| 645 | .msg_flags = MSG_DONTWAIT, | 645 | .msg_flags = MSG_DONTWAIT, |
| 646 | }; | 646 | }; |
| 647 | struct sockaddr *sin; | ||
| 647 | int len; | 648 | int len; |
| 648 | 649 | ||
| 649 | len = kernel_recvmsg(svsk->sk_sock, &msg, iov, nr, buflen, | 650 | len = kernel_recvmsg(svsk->sk_sock, &msg, iov, nr, buflen, |
| @@ -654,6 +655,19 @@ svc_recvfrom(struct svc_rqst *rqstp, struct kvec *iov, int nr, int buflen) | |||
| 654 | memcpy(&rqstp->rq_addr, &svsk->sk_remote, svsk->sk_remotelen); | 655 | memcpy(&rqstp->rq_addr, &svsk->sk_remote, svsk->sk_remotelen); |
| 655 | rqstp->rq_addrlen = svsk->sk_remotelen; | 656 | rqstp->rq_addrlen = svsk->sk_remotelen; |
| 656 | 657 | ||
| 658 | /* Destination address in request is needed for binding the | ||
| 659 | * source address in RPC callbacks later. | ||
| 660 | */ | ||
| 661 | sin = (struct sockaddr *)&svsk->sk_local; | ||
| 662 | switch (sin->sa_family) { | ||
| 663 | case AF_INET: | ||
| 664 | rqstp->rq_daddr.addr = ((struct sockaddr_in *)sin)->sin_addr; | ||
| 665 | break; | ||
| 666 | case AF_INET6: | ||
| 667 | rqstp->rq_daddr.addr6 = ((struct sockaddr_in6 *)sin)->sin6_addr; | ||
| 668 | break; | ||
| 669 | } | ||
| 670 | |||
| 657 | dprintk("svc: socket %p recvfrom(%p, %Zu) = %d\n", | 671 | dprintk("svc: socket %p recvfrom(%p, %Zu) = %d\n", |
| 658 | svsk, iov[0].iov_base, iov[0].iov_len, len); | 672 | svsk, iov[0].iov_base, iov[0].iov_len, len); |
| 659 | 673 | ||
| @@ -1064,6 +1078,12 @@ svc_tcp_accept(struct svc_sock *svsk) | |||
| 1064 | goto failed; | 1078 | goto failed; |
| 1065 | memcpy(&newsvsk->sk_remote, sin, slen); | 1079 | memcpy(&newsvsk->sk_remote, sin, slen); |
| 1066 | newsvsk->sk_remotelen = slen; | 1080 | newsvsk->sk_remotelen = slen; |
| 1081 | err = kernel_getsockname(newsock, sin, &slen); | ||
| 1082 | if (unlikely(err < 0)) { | ||
| 1083 | dprintk("svc_tcp_accept: kernel_getsockname error %d\n", -err); | ||
| 1084 | slen = offsetof(struct sockaddr, sa_data); | ||
| 1085 | } | ||
| 1086 | memcpy(&newsvsk->sk_local, sin, slen); | ||
| 1067 | 1087 | ||
| 1068 | svc_sock_received(newsvsk); | 1088 | svc_sock_received(newsvsk); |
| 1069 | 1089 | ||
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 5b05b73e4c1d..c8c2edccad7e 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c | |||
| @@ -127,7 +127,7 @@ static void xprt_clear_locked(struct rpc_xprt *xprt) | |||
| 127 | clear_bit(XPRT_LOCKED, &xprt->state); | 127 | clear_bit(XPRT_LOCKED, &xprt->state); |
| 128 | smp_mb__after_clear_bit(); | 128 | smp_mb__after_clear_bit(); |
| 129 | } else | 129 | } else |
| 130 | schedule_work(&xprt->task_cleanup); | 130 | queue_work(rpciod_workqueue, &xprt->task_cleanup); |
| 131 | } | 131 | } |
| 132 | 132 | ||
| 133 | /* | 133 | /* |
| @@ -515,7 +515,7 @@ xprt_init_autodisconnect(unsigned long data) | |||
| 515 | if (xprt_connecting(xprt)) | 515 | if (xprt_connecting(xprt)) |
| 516 | xprt_release_write(xprt, NULL); | 516 | xprt_release_write(xprt, NULL); |
| 517 | else | 517 | else |
| 518 | schedule_work(&xprt->task_cleanup); | 518 | queue_work(rpciod_workqueue, &xprt->task_cleanup); |
| 519 | return; | 519 | return; |
| 520 | out_abort: | 520 | out_abort: |
| 521 | spin_unlock(&xprt->transport_lock); | 521 | spin_unlock(&xprt->transport_lock); |
| @@ -886,27 +886,24 @@ void xprt_set_timeout(struct rpc_timeout *to, unsigned int retr, unsigned long i | |||
| 886 | 886 | ||
| 887 | /** | 887 | /** |
| 888 | * xprt_create_transport - create an RPC transport | 888 | * xprt_create_transport - create an RPC transport |
| 889 | * @proto: requested transport protocol | 889 | * @args: rpc transport creation arguments |
| 890 | * @ap: remote peer address | ||
| 891 | * @size: length of address | ||
| 892 | * @to: timeout parameters | ||
| 893 | * | 890 | * |
| 894 | */ | 891 | */ |
| 895 | struct rpc_xprt *xprt_create_transport(int proto, struct sockaddr *ap, size_t size, struct rpc_timeout *to) | 892 | struct rpc_xprt *xprt_create_transport(struct rpc_xprtsock_create *args) |
| 896 | { | 893 | { |
| 897 | struct rpc_xprt *xprt; | 894 | struct rpc_xprt *xprt; |
| 898 | struct rpc_rqst *req; | 895 | struct rpc_rqst *req; |
| 899 | 896 | ||
| 900 | switch (proto) { | 897 | switch (args->proto) { |
| 901 | case IPPROTO_UDP: | 898 | case IPPROTO_UDP: |
| 902 | xprt = xs_setup_udp(ap, size, to); | 899 | xprt = xs_setup_udp(args); |
| 903 | break; | 900 | break; |
| 904 | case IPPROTO_TCP: | 901 | case IPPROTO_TCP: |
| 905 | xprt = xs_setup_tcp(ap, size, to); | 902 | xprt = xs_setup_tcp(args); |
| 906 | break; | 903 | break; |
| 907 | default: | 904 | default: |
| 908 | printk(KERN_ERR "RPC: unrecognized transport protocol: %d\n", | 905 | printk(KERN_ERR "RPC: unrecognized transport protocol: %d\n", |
| 909 | proto); | 906 | args->proto); |
| 910 | return ERR_PTR(-EIO); | 907 | return ERR_PTR(-EIO); |
| 911 | } | 908 | } |
| 912 | if (IS_ERR(xprt)) { | 909 | if (IS_ERR(xprt)) { |
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index cc33c5880abb..4ae7eed7f617 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
| @@ -235,6 +235,7 @@ struct sock_xprt { | |||
| 235 | * Connection of transports | 235 | * Connection of transports |
| 236 | */ | 236 | */ |
| 237 | struct delayed_work connect_worker; | 237 | struct delayed_work connect_worker; |
| 238 | struct sockaddr_storage addr; | ||
| 238 | unsigned short port; | 239 | unsigned short port; |
| 239 | 240 | ||
| 240 | /* | 241 | /* |
| @@ -653,8 +654,7 @@ static void xs_destroy(struct rpc_xprt *xprt) | |||
| 653 | 654 | ||
| 654 | dprintk("RPC: xs_destroy xprt %p\n", xprt); | 655 | dprintk("RPC: xs_destroy xprt %p\n", xprt); |
| 655 | 656 | ||
| 656 | cancel_delayed_work(&transport->connect_worker); | 657 | cancel_rearming_delayed_work(&transport->connect_worker); |
| 657 | flush_scheduled_work(); | ||
| 658 | 658 | ||
| 659 | xprt_disconnect(xprt); | 659 | xprt_disconnect(xprt); |
| 660 | xs_close(xprt); | 660 | xs_close(xprt); |
| @@ -1001,7 +1001,7 @@ static void xs_tcp_state_change(struct sock *sk) | |||
| 1001 | /* Try to schedule an autoclose RPC calls */ | 1001 | /* Try to schedule an autoclose RPC calls */ |
| 1002 | set_bit(XPRT_CLOSE_WAIT, &xprt->state); | 1002 | set_bit(XPRT_CLOSE_WAIT, &xprt->state); |
| 1003 | if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0) | 1003 | if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0) |
| 1004 | schedule_work(&xprt->task_cleanup); | 1004 | queue_work(rpciod_workqueue, &xprt->task_cleanup); |
| 1005 | default: | 1005 | default: |
| 1006 | xprt_disconnect(xprt); | 1006 | xprt_disconnect(xprt); |
| 1007 | } | 1007 | } |
| @@ -1146,31 +1146,36 @@ static void xs_set_port(struct rpc_xprt *xprt, unsigned short port) | |||
| 1146 | sap->sin_port = htons(port); | 1146 | sap->sin_port = htons(port); |
| 1147 | } | 1147 | } |
| 1148 | 1148 | ||
| 1149 | static int xs_bindresvport(struct sock_xprt *transport, struct socket *sock) | 1149 | static int xs_bind(struct sock_xprt *transport, struct socket *sock) |
| 1150 | { | 1150 | { |
| 1151 | struct sockaddr_in myaddr = { | 1151 | struct sockaddr_in myaddr = { |
| 1152 | .sin_family = AF_INET, | 1152 | .sin_family = AF_INET, |
| 1153 | }; | 1153 | }; |
| 1154 | struct sockaddr_in *sa; | ||
| 1154 | int err; | 1155 | int err; |
| 1155 | unsigned short port = transport->port; | 1156 | unsigned short port = transport->port; |
| 1156 | 1157 | ||
| 1158 | if (!transport->xprt.resvport) | ||
| 1159 | port = 0; | ||
| 1160 | sa = (struct sockaddr_in *)&transport->addr; | ||
| 1161 | myaddr.sin_addr = sa->sin_addr; | ||
| 1157 | do { | 1162 | do { |
| 1158 | myaddr.sin_port = htons(port); | 1163 | myaddr.sin_port = htons(port); |
| 1159 | err = kernel_bind(sock, (struct sockaddr *) &myaddr, | 1164 | err = kernel_bind(sock, (struct sockaddr *) &myaddr, |
| 1160 | sizeof(myaddr)); | 1165 | sizeof(myaddr)); |
| 1166 | if (!transport->xprt.resvport) | ||
| 1167 | break; | ||
| 1161 | if (err == 0) { | 1168 | if (err == 0) { |
| 1162 | transport->port = port; | 1169 | transport->port = port; |
| 1163 | dprintk("RPC: xs_bindresvport bound to port %u\n", | 1170 | break; |
| 1164 | port); | ||
| 1165 | return 0; | ||
| 1166 | } | 1171 | } |
| 1167 | if (port <= xprt_min_resvport) | 1172 | if (port <= xprt_min_resvport) |
| 1168 | port = xprt_max_resvport; | 1173 | port = xprt_max_resvport; |
| 1169 | else | 1174 | else |
| 1170 | port--; | 1175 | port--; |
| 1171 | } while (err == -EADDRINUSE && port != transport->port); | 1176 | } while (err == -EADDRINUSE && port != transport->port); |
| 1172 | 1177 | dprintk("RPC: xs_bind "NIPQUAD_FMT":%u: %s (%d)\n", | |
| 1173 | dprintk("RPC: can't bind to reserved port (%d).\n", -err); | 1178 | NIPQUAD(myaddr.sin_addr), port, err ? "failed" : "ok", err); |
| 1174 | return err; | 1179 | return err; |
| 1175 | } | 1180 | } |
| 1176 | 1181 | ||
| @@ -1229,7 +1234,7 @@ static void xs_udp_connect_worker(struct work_struct *work) | |||
| 1229 | } | 1234 | } |
| 1230 | xs_reclassify_socket(sock); | 1235 | xs_reclassify_socket(sock); |
| 1231 | 1236 | ||
| 1232 | if (xprt->resvport && xs_bindresvport(transport, sock) < 0) { | 1237 | if (xs_bind(transport, sock)) { |
| 1233 | sock_release(sock); | 1238 | sock_release(sock); |
| 1234 | goto out; | 1239 | goto out; |
| 1235 | } | 1240 | } |
| @@ -1316,7 +1321,7 @@ static void xs_tcp_connect_worker(struct work_struct *work) | |||
| 1316 | } | 1321 | } |
| 1317 | xs_reclassify_socket(sock); | 1322 | xs_reclassify_socket(sock); |
| 1318 | 1323 | ||
| 1319 | if (xprt->resvport && xs_bindresvport(transport, sock) < 0) { | 1324 | if (xs_bind(transport, sock)) { |
| 1320 | sock_release(sock); | 1325 | sock_release(sock); |
| 1321 | goto out; | 1326 | goto out; |
| 1322 | } | 1327 | } |
| @@ -1410,18 +1415,16 @@ static void xs_connect(struct rpc_task *task) | |||
| 1410 | dprintk("RPC: xs_connect delayed xprt %p for %lu " | 1415 | dprintk("RPC: xs_connect delayed xprt %p for %lu " |
| 1411 | "seconds\n", | 1416 | "seconds\n", |
| 1412 | xprt, xprt->reestablish_timeout / HZ); | 1417 | xprt, xprt->reestablish_timeout / HZ); |
| 1413 | schedule_delayed_work(&transport->connect_worker, | 1418 | queue_delayed_work(rpciod_workqueue, |
| 1414 | xprt->reestablish_timeout); | 1419 | &transport->connect_worker, |
| 1420 | xprt->reestablish_timeout); | ||
| 1415 | xprt->reestablish_timeout <<= 1; | 1421 | xprt->reestablish_timeout <<= 1; |
| 1416 | if (xprt->reestablish_timeout > XS_TCP_MAX_REEST_TO) | 1422 | if (xprt->reestablish_timeout > XS_TCP_MAX_REEST_TO) |
| 1417 | xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO; | 1423 | xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO; |
| 1418 | } else { | 1424 | } else { |
| 1419 | dprintk("RPC: xs_connect scheduled xprt %p\n", xprt); | 1425 | dprintk("RPC: xs_connect scheduled xprt %p\n", xprt); |
| 1420 | schedule_delayed_work(&transport->connect_worker, 0); | 1426 | queue_delayed_work(rpciod_workqueue, |
| 1421 | 1427 | &transport->connect_worker, 0); | |
| 1422 | /* flush_scheduled_work can sleep... */ | ||
| 1423 | if (!RPC_IS_ASYNC(task)) | ||
| 1424 | flush_scheduled_work(); | ||
| 1425 | } | 1428 | } |
| 1426 | } | 1429 | } |
| 1427 | 1430 | ||
| @@ -1476,7 +1479,7 @@ static struct rpc_xprt_ops xs_udp_ops = { | |||
| 1476 | .set_buffer_size = xs_udp_set_buffer_size, | 1479 | .set_buffer_size = xs_udp_set_buffer_size, |
| 1477 | .reserve_xprt = xprt_reserve_xprt_cong, | 1480 | .reserve_xprt = xprt_reserve_xprt_cong, |
| 1478 | .release_xprt = xprt_release_xprt_cong, | 1481 | .release_xprt = xprt_release_xprt_cong, |
| 1479 | .rpcbind = rpcb_getport, | 1482 | .rpcbind = rpcb_getport_async, |
| 1480 | .set_port = xs_set_port, | 1483 | .set_port = xs_set_port, |
| 1481 | .connect = xs_connect, | 1484 | .connect = xs_connect, |
| 1482 | .buf_alloc = rpc_malloc, | 1485 | .buf_alloc = rpc_malloc, |
| @@ -1493,7 +1496,7 @@ static struct rpc_xprt_ops xs_udp_ops = { | |||
| 1493 | static struct rpc_xprt_ops xs_tcp_ops = { | 1496 | static struct rpc_xprt_ops xs_tcp_ops = { |
| 1494 | .reserve_xprt = xprt_reserve_xprt, | 1497 | .reserve_xprt = xprt_reserve_xprt, |
| 1495 | .release_xprt = xs_tcp_release_xprt, | 1498 | .release_xprt = xs_tcp_release_xprt, |
| 1496 | .rpcbind = rpcb_getport, | 1499 | .rpcbind = rpcb_getport_async, |
| 1497 | .set_port = xs_set_port, | 1500 | .set_port = xs_set_port, |
| 1498 | .connect = xs_connect, | 1501 | .connect = xs_connect, |
| 1499 | .buf_alloc = rpc_malloc, | 1502 | .buf_alloc = rpc_malloc, |
| @@ -1505,12 +1508,12 @@ static struct rpc_xprt_ops xs_tcp_ops = { | |||
| 1505 | .print_stats = xs_tcp_print_stats, | 1508 | .print_stats = xs_tcp_print_stats, |
| 1506 | }; | 1509 | }; |
| 1507 | 1510 | ||
| 1508 | static struct rpc_xprt *xs_setup_xprt(struct sockaddr *addr, size_t addrlen, unsigned int slot_table_size) | 1511 | static struct rpc_xprt *xs_setup_xprt(struct rpc_xprtsock_create *args, unsigned int slot_table_size) |
| 1509 | { | 1512 | { |
| 1510 | struct rpc_xprt *xprt; | 1513 | struct rpc_xprt *xprt; |
| 1511 | struct sock_xprt *new; | 1514 | struct sock_xprt *new; |
| 1512 | 1515 | ||
| 1513 | if (addrlen > sizeof(xprt->addr)) { | 1516 | if (args->addrlen > sizeof(xprt->addr)) { |
| 1514 | dprintk("RPC: xs_setup_xprt: address too large\n"); | 1517 | dprintk("RPC: xs_setup_xprt: address too large\n"); |
| 1515 | return ERR_PTR(-EBADF); | 1518 | return ERR_PTR(-EBADF); |
| 1516 | } | 1519 | } |
| @@ -1532,8 +1535,10 @@ static struct rpc_xprt *xs_setup_xprt(struct sockaddr *addr, size_t addrlen, uns | |||
| 1532 | return ERR_PTR(-ENOMEM); | 1535 | return ERR_PTR(-ENOMEM); |
| 1533 | } | 1536 | } |
| 1534 | 1537 | ||
| 1535 | memcpy(&xprt->addr, addr, addrlen); | 1538 | memcpy(&xprt->addr, args->dstaddr, args->addrlen); |
| 1536 | xprt->addrlen = addrlen; | 1539 | xprt->addrlen = args->addrlen; |
| 1540 | if (args->srcaddr) | ||
| 1541 | memcpy(&new->addr, args->srcaddr, args->addrlen); | ||
| 1537 | new->port = xs_get_random_port(); | 1542 | new->port = xs_get_random_port(); |
| 1538 | 1543 | ||
| 1539 | return xprt; | 1544 | return xprt; |
| @@ -1541,22 +1546,20 @@ static struct rpc_xprt *xs_setup_xprt(struct sockaddr *addr, size_t addrlen, uns | |||
| 1541 | 1546 | ||
| 1542 | /** | 1547 | /** |
| 1543 | * xs_setup_udp - Set up transport to use a UDP socket | 1548 | * xs_setup_udp - Set up transport to use a UDP socket |
| 1544 | * @addr: address of remote server | 1549 | * @args: rpc transport creation arguments |
| 1545 | * @addrlen: length of address in bytes | ||
| 1546 | * @to: timeout parameters | ||
| 1547 | * | 1550 | * |
| 1548 | */ | 1551 | */ |
| 1549 | struct rpc_xprt *xs_setup_udp(struct sockaddr *addr, size_t addrlen, struct rpc_timeout *to) | 1552 | struct rpc_xprt *xs_setup_udp(struct rpc_xprtsock_create *args) |
| 1550 | { | 1553 | { |
| 1551 | struct rpc_xprt *xprt; | 1554 | struct rpc_xprt *xprt; |
| 1552 | struct sock_xprt *transport; | 1555 | struct sock_xprt *transport; |
| 1553 | 1556 | ||
| 1554 | xprt = xs_setup_xprt(addr, addrlen, xprt_udp_slot_table_entries); | 1557 | xprt = xs_setup_xprt(args, xprt_udp_slot_table_entries); |
| 1555 | if (IS_ERR(xprt)) | 1558 | if (IS_ERR(xprt)) |
| 1556 | return xprt; | 1559 | return xprt; |
| 1557 | transport = container_of(xprt, struct sock_xprt, xprt); | 1560 | transport = container_of(xprt, struct sock_xprt, xprt); |
| 1558 | 1561 | ||
| 1559 | if (ntohs(((struct sockaddr_in *)addr)->sin_port) != 0) | 1562 | if (ntohs(((struct sockaddr_in *)args->dstaddr)->sin_port) != 0) |
| 1560 | xprt_set_bound(xprt); | 1563 | xprt_set_bound(xprt); |
| 1561 | 1564 | ||
| 1562 | xprt->prot = IPPROTO_UDP; | 1565 | xprt->prot = IPPROTO_UDP; |
| @@ -1572,8 +1575,8 @@ struct rpc_xprt *xs_setup_udp(struct sockaddr *addr, size_t addrlen, struct rpc_ | |||
| 1572 | 1575 | ||
| 1573 | xprt->ops = &xs_udp_ops; | 1576 | xprt->ops = &xs_udp_ops; |
| 1574 | 1577 | ||
| 1575 | if (to) | 1578 | if (args->timeout) |
| 1576 | xprt->timeout = *to; | 1579 | xprt->timeout = *args->timeout; |
| 1577 | else | 1580 | else |
| 1578 | xprt_set_timeout(&xprt->timeout, 5, 5 * HZ); | 1581 | xprt_set_timeout(&xprt->timeout, 5, 5 * HZ); |
| 1579 | 1582 | ||
| @@ -1586,22 +1589,20 @@ struct rpc_xprt *xs_setup_udp(struct sockaddr *addr, size_t addrlen, struct rpc_ | |||
| 1586 | 1589 | ||
| 1587 | /** | 1590 | /** |
| 1588 | * xs_setup_tcp - Set up transport to use a TCP socket | 1591 | * xs_setup_tcp - Set up transport to use a TCP socket |
| 1589 | * @addr: address of remote server | 1592 | * @args: rpc transport creation arguments |
| 1590 | * @addrlen: length of address in bytes | ||
| 1591 | * @to: timeout parameters | ||
| 1592 | * | 1593 | * |
| 1593 | */ | 1594 | */ |
| 1594 | struct rpc_xprt *xs_setup_tcp(struct sockaddr *addr, size_t addrlen, struct rpc_timeout *to) | 1595 | struct rpc_xprt *xs_setup_tcp(struct rpc_xprtsock_create *args) |
| 1595 | { | 1596 | { |
| 1596 | struct rpc_xprt *xprt; | 1597 | struct rpc_xprt *xprt; |
| 1597 | struct sock_xprt *transport; | 1598 | struct sock_xprt *transport; |
| 1598 | 1599 | ||
| 1599 | xprt = xs_setup_xprt(addr, addrlen, xprt_tcp_slot_table_entries); | 1600 | xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries); |
| 1600 | if (IS_ERR(xprt)) | 1601 | if (IS_ERR(xprt)) |
| 1601 | return xprt; | 1602 | return xprt; |
| 1602 | transport = container_of(xprt, struct sock_xprt, xprt); | 1603 | transport = container_of(xprt, struct sock_xprt, xprt); |
| 1603 | 1604 | ||
| 1604 | if (ntohs(((struct sockaddr_in *)addr)->sin_port) != 0) | 1605 | if (ntohs(((struct sockaddr_in *)args->dstaddr)->sin_port) != 0) |
| 1605 | xprt_set_bound(xprt); | 1606 | xprt_set_bound(xprt); |
| 1606 | 1607 | ||
| 1607 | xprt->prot = IPPROTO_TCP; | 1608 | xprt->prot = IPPROTO_TCP; |
| @@ -1616,8 +1617,8 @@ struct rpc_xprt *xs_setup_tcp(struct sockaddr *addr, size_t addrlen, struct rpc_ | |||
| 1616 | 1617 | ||
| 1617 | xprt->ops = &xs_tcp_ops; | 1618 | xprt->ops = &xs_tcp_ops; |
| 1618 | 1619 | ||
| 1619 | if (to) | 1620 | if (args->timeout) |
| 1620 | xprt->timeout = *to; | 1621 | xprt->timeout = *args->timeout; |
| 1621 | else | 1622 | else |
| 1622 | xprt_set_timeout(&xprt->timeout, 2, 60 * HZ); | 1623 | xprt_set_timeout(&xprt->timeout, 2, 60 * HZ); |
| 1623 | 1624 | ||
