diff options
| -rw-r--r-- | fs/nfs/callback_proc.c | 19 | ||||
| -rw-r--r-- | fs/nfs/dir.c | 62 | ||||
| -rw-r--r-- | fs/nfs/inode.c | 34 | ||||
| -rw-r--r-- | fs/nfs/internal.h | 8 | ||||
| -rw-r--r-- | fs/nfs/nfs3proc.c | 36 | ||||
| -rw-r--r-- | fs/nfs/nfs4_fs.h | 11 | ||||
| -rw-r--r-- | fs/nfs/nfs4client.c | 7 | ||||
| -rw-r--r-- | fs/nfs/nfs4proc.c | 197 | ||||
| -rw-r--r-- | fs/nfs/nfs4state.c | 6 | ||||
| -rw-r--r-- | fs/nfs/nfs4xdr.c | 3 | ||||
| -rw-r--r-- | fs/nfs/pnfs.c | 17 | ||||
| -rw-r--r-- | fs/nfs/proc.c | 25 | ||||
| -rw-r--r-- | fs/nfs/unlink.c | 35 | ||||
| -rw-r--r-- | include/linux/nfs_fs.h | 2 | ||||
| -rw-r--r-- | include/linux/nfs_xdr.h | 3 | ||||
| -rw-r--r-- | include/linux/sunrpc/bc_xprt.h | 3 | ||||
| -rw-r--r-- | net/sunrpc/backchannel_rqst.c | 93 | ||||
| -rw-r--r-- | net/sunrpc/clnt.c | 23 | ||||
| -rw-r--r-- | net/sunrpc/sched.c | 3 | ||||
| -rw-r--r-- | net/sunrpc/xprtrdma/rpc_rdma.c | 4 | ||||
| -rw-r--r-- | net/sunrpc/xprtrdma/transport.c | 10 | ||||
| -rw-r--r-- | net/sunrpc/xprtsock.c | 28 |
22 files changed, 368 insertions, 261 deletions
diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c index ae2e87b95453..41db5258e7a7 100644 --- a/fs/nfs/callback_proc.c +++ b/fs/nfs/callback_proc.c | |||
| @@ -112,7 +112,8 @@ out: | |||
| 112 | * TODO: keep track of all layouts (and delegations) in a hash table | 112 | * TODO: keep track of all layouts (and delegations) in a hash table |
| 113 | * hashed by filehandle. | 113 | * hashed by filehandle. |
| 114 | */ | 114 | */ |
| 115 | static struct pnfs_layout_hdr * get_layout_by_fh_locked(struct nfs_client *clp, struct nfs_fh *fh) | 115 | static struct pnfs_layout_hdr * get_layout_by_fh_locked(struct nfs_client *clp, |
| 116 | struct nfs_fh *fh, nfs4_stateid *stateid) | ||
| 116 | { | 117 | { |
| 117 | struct nfs_server *server; | 118 | struct nfs_server *server; |
| 118 | struct inode *ino; | 119 | struct inode *ino; |
| @@ -120,17 +121,19 @@ static struct pnfs_layout_hdr * get_layout_by_fh_locked(struct nfs_client *clp, | |||
| 120 | 121 | ||
| 121 | list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) { | 122 | list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) { |
| 122 | list_for_each_entry(lo, &server->layouts, plh_layouts) { | 123 | list_for_each_entry(lo, &server->layouts, plh_layouts) { |
| 124 | if (!nfs4_stateid_match_other(&lo->plh_stateid, stateid)) | ||
| 125 | continue; | ||
| 123 | if (nfs_compare_fh(fh, &NFS_I(lo->plh_inode)->fh)) | 126 | if (nfs_compare_fh(fh, &NFS_I(lo->plh_inode)->fh)) |
| 124 | continue; | 127 | continue; |
| 125 | ino = igrab(lo->plh_inode); | 128 | ino = igrab(lo->plh_inode); |
| 126 | if (!ino) | 129 | if (!ino) |
| 127 | continue; | 130 | break; |
| 128 | spin_lock(&ino->i_lock); | 131 | spin_lock(&ino->i_lock); |
| 129 | /* Is this layout in the process of being freed? */ | 132 | /* Is this layout in the process of being freed? */ |
| 130 | if (NFS_I(ino)->layout != lo) { | 133 | if (NFS_I(ino)->layout != lo) { |
| 131 | spin_unlock(&ino->i_lock); | 134 | spin_unlock(&ino->i_lock); |
| 132 | iput(ino); | 135 | iput(ino); |
| 133 | continue; | 136 | break; |
| 134 | } | 137 | } |
| 135 | pnfs_get_layout_hdr(lo); | 138 | pnfs_get_layout_hdr(lo); |
| 136 | spin_unlock(&ino->i_lock); | 139 | spin_unlock(&ino->i_lock); |
| @@ -141,13 +144,14 @@ static struct pnfs_layout_hdr * get_layout_by_fh_locked(struct nfs_client *clp, | |||
| 141 | return NULL; | 144 | return NULL; |
| 142 | } | 145 | } |
| 143 | 146 | ||
| 144 | static struct pnfs_layout_hdr * get_layout_by_fh(struct nfs_client *clp, struct nfs_fh *fh) | 147 | static struct pnfs_layout_hdr * get_layout_by_fh(struct nfs_client *clp, |
| 148 | struct nfs_fh *fh, nfs4_stateid *stateid) | ||
| 145 | { | 149 | { |
| 146 | struct pnfs_layout_hdr *lo; | 150 | struct pnfs_layout_hdr *lo; |
| 147 | 151 | ||
| 148 | spin_lock(&clp->cl_lock); | 152 | spin_lock(&clp->cl_lock); |
| 149 | rcu_read_lock(); | 153 | rcu_read_lock(); |
| 150 | lo = get_layout_by_fh_locked(clp, fh); | 154 | lo = get_layout_by_fh_locked(clp, fh, stateid); |
| 151 | rcu_read_unlock(); | 155 | rcu_read_unlock(); |
| 152 | spin_unlock(&clp->cl_lock); | 156 | spin_unlock(&clp->cl_lock); |
| 153 | 157 | ||
| @@ -162,9 +166,9 @@ static u32 initiate_file_draining(struct nfs_client *clp, | |||
| 162 | u32 rv = NFS4ERR_NOMATCHING_LAYOUT; | 166 | u32 rv = NFS4ERR_NOMATCHING_LAYOUT; |
| 163 | LIST_HEAD(free_me_list); | 167 | LIST_HEAD(free_me_list); |
| 164 | 168 | ||
| 165 | lo = get_layout_by_fh(clp, &args->cbl_fh); | 169 | lo = get_layout_by_fh(clp, &args->cbl_fh, &args->cbl_stateid); |
| 166 | if (!lo) | 170 | if (!lo) |
| 167 | return NFS4ERR_NOMATCHING_LAYOUT; | 171 | goto out; |
| 168 | 172 | ||
| 169 | ino = lo->plh_inode; | 173 | ino = lo->plh_inode; |
| 170 | spin_lock(&ino->i_lock); | 174 | spin_lock(&ino->i_lock); |
| @@ -179,6 +183,7 @@ static u32 initiate_file_draining(struct nfs_client *clp, | |||
| 179 | pnfs_free_lseg_list(&free_me_list); | 183 | pnfs_free_lseg_list(&free_me_list); |
| 180 | pnfs_put_layout_hdr(lo); | 184 | pnfs_put_layout_hdr(lo); |
| 181 | iput(ino); | 185 | iput(ino); |
| 186 | out: | ||
| 182 | return rv; | 187 | return rv; |
| 183 | } | 188 | } |
| 184 | 189 | ||
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 4a48fe4b84b6..d9f3d067cd15 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c | |||
| @@ -69,21 +69,28 @@ const struct address_space_operations nfs_dir_aops = { | |||
| 69 | 69 | ||
| 70 | static struct nfs_open_dir_context *alloc_nfs_open_dir_context(struct inode *dir, struct rpc_cred *cred) | 70 | static struct nfs_open_dir_context *alloc_nfs_open_dir_context(struct inode *dir, struct rpc_cred *cred) |
| 71 | { | 71 | { |
| 72 | struct nfs_inode *nfsi = NFS_I(dir); | ||
| 72 | struct nfs_open_dir_context *ctx; | 73 | struct nfs_open_dir_context *ctx; |
| 73 | ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); | 74 | ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); |
| 74 | if (ctx != NULL) { | 75 | if (ctx != NULL) { |
| 75 | ctx->duped = 0; | 76 | ctx->duped = 0; |
| 76 | ctx->attr_gencount = NFS_I(dir)->attr_gencount; | 77 | ctx->attr_gencount = nfsi->attr_gencount; |
| 77 | ctx->dir_cookie = 0; | 78 | ctx->dir_cookie = 0; |
| 78 | ctx->dup_cookie = 0; | 79 | ctx->dup_cookie = 0; |
| 79 | ctx->cred = get_rpccred(cred); | 80 | ctx->cred = get_rpccred(cred); |
| 81 | spin_lock(&dir->i_lock); | ||
| 82 | list_add(&ctx->list, &nfsi->open_files); | ||
| 83 | spin_unlock(&dir->i_lock); | ||
| 80 | return ctx; | 84 | return ctx; |
| 81 | } | 85 | } |
| 82 | return ERR_PTR(-ENOMEM); | 86 | return ERR_PTR(-ENOMEM); |
| 83 | } | 87 | } |
| 84 | 88 | ||
| 85 | static void put_nfs_open_dir_context(struct nfs_open_dir_context *ctx) | 89 | static void put_nfs_open_dir_context(struct inode *dir, struct nfs_open_dir_context *ctx) |
| 86 | { | 90 | { |
| 91 | spin_lock(&dir->i_lock); | ||
| 92 | list_del(&ctx->list); | ||
| 93 | spin_unlock(&dir->i_lock); | ||
| 87 | put_rpccred(ctx->cred); | 94 | put_rpccred(ctx->cred); |
| 88 | kfree(ctx); | 95 | kfree(ctx); |
| 89 | } | 96 | } |
| @@ -126,7 +133,7 @@ out: | |||
| 126 | static int | 133 | static int |
| 127 | nfs_closedir(struct inode *inode, struct file *filp) | 134 | nfs_closedir(struct inode *inode, struct file *filp) |
| 128 | { | 135 | { |
| 129 | put_nfs_open_dir_context(filp->private_data); | 136 | put_nfs_open_dir_context(filp->f_path.dentry->d_inode, filp->private_data); |
| 130 | return 0; | 137 | return 0; |
| 131 | } | 138 | } |
| 132 | 139 | ||
| @@ -306,10 +313,9 @@ int nfs_readdir_search_for_cookie(struct nfs_cache_array *array, nfs_readdir_des | |||
| 306 | if (printk_ratelimit()) { | 313 | if (printk_ratelimit()) { |
| 307 | pr_notice("NFS: directory %pD2 contains a readdir loop." | 314 | pr_notice("NFS: directory %pD2 contains a readdir loop." |
| 308 | "Please contact your server vendor. " | 315 | "Please contact your server vendor. " |
| 309 | "The file: %s has duplicate cookie %llu\n", | 316 | "The file: %.*s has duplicate cookie %llu\n", |
| 310 | desc->file, | 317 | desc->file, array->array[i].string.len, |
| 311 | array->array[i].string.name, | 318 | array->array[i].string.name, *desc->dir_cookie); |
| 312 | *desc->dir_cookie); | ||
| 313 | } | 319 | } |
| 314 | status = -ELOOP; | 320 | status = -ELOOP; |
| 315 | goto out; | 321 | goto out; |
| @@ -437,6 +443,22 @@ void nfs_advise_use_readdirplus(struct inode *dir) | |||
| 437 | set_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(dir)->flags); | 443 | set_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(dir)->flags); |
| 438 | } | 444 | } |
| 439 | 445 | ||
| 446 | /* | ||
| 447 | * This function is mainly for use by nfs_getattr(). | ||
| 448 | * | ||
| 449 | * If this is an 'ls -l', we want to force use of readdirplus. | ||
| 450 | * Do this by checking if there is an active file descriptor | ||
| 451 | * and calling nfs_advise_use_readdirplus, then forcing a | ||
| 452 | * cache flush. | ||
| 453 | */ | ||
| 454 | void nfs_force_use_readdirplus(struct inode *dir) | ||
| 455 | { | ||
| 456 | if (!list_empty(&NFS_I(dir)->open_files)) { | ||
| 457 | nfs_advise_use_readdirplus(dir); | ||
| 458 | nfs_zap_mapping(dir, dir->i_mapping); | ||
| 459 | } | ||
| 460 | } | ||
| 461 | |||
| 440 | static | 462 | static |
| 441 | void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry) | 463 | void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry) |
| 442 | { | 464 | { |
| @@ -815,6 +837,17 @@ int uncached_readdir(nfs_readdir_descriptor_t *desc) | |||
| 815 | goto out; | 837 | goto out; |
| 816 | } | 838 | } |
| 817 | 839 | ||
| 840 | static bool nfs_dir_mapping_need_revalidate(struct inode *dir) | ||
| 841 | { | ||
| 842 | struct nfs_inode *nfsi = NFS_I(dir); | ||
| 843 | |||
| 844 | if (nfs_attribute_cache_expired(dir)) | ||
| 845 | return true; | ||
| 846 | if (nfsi->cache_validity & NFS_INO_INVALID_DATA) | ||
| 847 | return true; | ||
| 848 | return false; | ||
| 849 | } | ||
| 850 | |||
| 818 | /* The file offset position represents the dirent entry number. A | 851 | /* The file offset position represents the dirent entry number. A |
| 819 | last cookie cache takes care of the common case of reading the | 852 | last cookie cache takes care of the common case of reading the |
| 820 | whole directory. | 853 | whole directory. |
| @@ -847,7 +880,7 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx) | |||
| 847 | desc->plus = nfs_use_readdirplus(inode, ctx) ? 1 : 0; | 880 | desc->plus = nfs_use_readdirplus(inode, ctx) ? 1 : 0; |
| 848 | 881 | ||
| 849 | nfs_block_sillyrename(dentry); | 882 | nfs_block_sillyrename(dentry); |
| 850 | if (ctx->pos == 0 || nfs_attribute_cache_expired(inode)) | 883 | if (ctx->pos == 0 || nfs_dir_mapping_need_revalidate(inode)) |
| 851 | res = nfs_revalidate_mapping(inode, file->f_mapping); | 884 | res = nfs_revalidate_mapping(inode, file->f_mapping); |
| 852 | if (res < 0) | 885 | if (res < 0) |
| 853 | goto out; | 886 | goto out; |
| @@ -1911,6 +1944,7 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
| 1911 | struct inode *old_inode = old_dentry->d_inode; | 1944 | struct inode *old_inode = old_dentry->d_inode; |
| 1912 | struct inode *new_inode = new_dentry->d_inode; | 1945 | struct inode *new_inode = new_dentry->d_inode; |
| 1913 | struct dentry *dentry = NULL, *rehash = NULL; | 1946 | struct dentry *dentry = NULL, *rehash = NULL; |
| 1947 | struct rpc_task *task; | ||
| 1914 | int error = -EBUSY; | 1948 | int error = -EBUSY; |
| 1915 | 1949 | ||
| 1916 | dfprintk(VFS, "NFS: rename(%pd2 -> %pd2, ct=%d)\n", | 1950 | dfprintk(VFS, "NFS: rename(%pd2 -> %pd2, ct=%d)\n", |
| @@ -1958,8 +1992,16 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
| 1958 | if (new_inode != NULL) | 1992 | if (new_inode != NULL) |
| 1959 | NFS_PROTO(new_inode)->return_delegation(new_inode); | 1993 | NFS_PROTO(new_inode)->return_delegation(new_inode); |
| 1960 | 1994 | ||
| 1961 | error = NFS_PROTO(old_dir)->rename(old_dir, &old_dentry->d_name, | 1995 | task = nfs_async_rename(old_dir, new_dir, old_dentry, new_dentry, NULL); |
| 1962 | new_dir, &new_dentry->d_name); | 1996 | if (IS_ERR(task)) { |
| 1997 | error = PTR_ERR(task); | ||
| 1998 | goto out; | ||
| 1999 | } | ||
| 2000 | |||
| 2001 | error = rpc_wait_for_completion_task(task); | ||
| 2002 | if (error == 0) | ||
| 2003 | error = task->tk_status; | ||
| 2004 | rpc_put_task(task); | ||
| 1963 | nfs_mark_for_revalidate(old_inode); | 2005 | nfs_mark_for_revalidate(old_inode); |
| 1964 | out: | 2006 | out: |
| 1965 | if (rehash) | 2007 | if (rehash) |
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index c4702baa22b8..0c438973f3c8 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c | |||
| @@ -588,6 +588,25 @@ void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr) | |||
| 588 | } | 588 | } |
| 589 | EXPORT_SYMBOL_GPL(nfs_setattr_update_inode); | 589 | EXPORT_SYMBOL_GPL(nfs_setattr_update_inode); |
| 590 | 590 | ||
| 591 | static void nfs_request_parent_use_readdirplus(struct dentry *dentry) | ||
| 592 | { | ||
| 593 | struct dentry *parent; | ||
| 594 | |||
| 595 | parent = dget_parent(dentry); | ||
| 596 | nfs_force_use_readdirplus(parent->d_inode); | ||
| 597 | dput(parent); | ||
| 598 | } | ||
| 599 | |||
| 600 | static bool nfs_need_revalidate_inode(struct inode *inode) | ||
| 601 | { | ||
| 602 | if (NFS_I(inode)->cache_validity & | ||
| 603 | (NFS_INO_INVALID_ATTR|NFS_INO_INVALID_LABEL)) | ||
| 604 | return true; | ||
| 605 | if (nfs_attribute_cache_expired(inode)) | ||
| 606 | return true; | ||
| 607 | return false; | ||
| 608 | } | ||
| 609 | |||
| 591 | int nfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) | 610 | int nfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) |
| 592 | { | 611 | { |
| 593 | struct inode *inode = dentry->d_inode; | 612 | struct inode *inode = dentry->d_inode; |
| @@ -616,10 +635,13 @@ int nfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) | |||
| 616 | ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode))) | 635 | ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode))) |
| 617 | need_atime = 0; | 636 | need_atime = 0; |
| 618 | 637 | ||
| 619 | if (need_atime) | 638 | if (need_atime || nfs_need_revalidate_inode(inode)) { |
| 620 | err = __nfs_revalidate_inode(NFS_SERVER(inode), inode); | 639 | struct nfs_server *server = NFS_SERVER(inode); |
| 621 | else | 640 | |
| 622 | err = nfs_revalidate_inode(NFS_SERVER(inode), inode); | 641 | if (server->caps & NFS_CAP_READDIRPLUS) |
| 642 | nfs_request_parent_use_readdirplus(dentry); | ||
| 643 | err = __nfs_revalidate_inode(server, inode); | ||
| 644 | } | ||
| 623 | if (!err) { | 645 | if (!err) { |
| 624 | generic_fillattr(inode, stat); | 646 | generic_fillattr(inode, stat); |
| 625 | stat->ino = nfs_compat_user_ino64(NFS_FILEID(inode)); | 647 | stat->ino = nfs_compat_user_ino64(NFS_FILEID(inode)); |
| @@ -961,9 +983,7 @@ int nfs_attribute_cache_expired(struct inode *inode) | |||
| 961 | */ | 983 | */ |
| 962 | int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode) | 984 | int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode) |
| 963 | { | 985 | { |
| 964 | if (!(NFS_I(inode)->cache_validity & | 986 | if (!nfs_need_revalidate_inode(inode)) |
| 965 | (NFS_INO_INVALID_ATTR|NFS_INO_INVALID_LABEL)) | ||
| 966 | && !nfs_attribute_cache_expired(inode)) | ||
| 967 | return NFS_STALE(inode) ? -ESTALE : 0; | 987 | return NFS_STALE(inode) ? -ESTALE : 0; |
| 968 | return __nfs_revalidate_inode(server, inode); | 988 | return __nfs_revalidate_inode(server, inode); |
| 969 | } | 989 | } |
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index b46cf5a67329..dd8bfc2e2464 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h | |||
| @@ -301,6 +301,7 @@ extern struct nfs_client *nfs_init_client(struct nfs_client *clp, | |||
| 301 | const char *ip_addr); | 301 | const char *ip_addr); |
| 302 | 302 | ||
| 303 | /* dir.c */ | 303 | /* dir.c */ |
| 304 | extern void nfs_force_use_readdirplus(struct inode *dir); | ||
| 304 | extern unsigned long nfs_access_cache_count(struct shrinker *shrink, | 305 | extern unsigned long nfs_access_cache_count(struct shrinker *shrink, |
| 305 | struct shrink_control *sc); | 306 | struct shrink_control *sc); |
| 306 | extern unsigned long nfs_access_cache_scan(struct shrinker *shrink, | 307 | extern unsigned long nfs_access_cache_scan(struct shrinker *shrink, |
| @@ -474,6 +475,13 @@ extern int nfs_migrate_page(struct address_space *, | |||
| 474 | #define nfs_migrate_page NULL | 475 | #define nfs_migrate_page NULL |
| 475 | #endif | 476 | #endif |
| 476 | 477 | ||
| 478 | /* unlink.c */ | ||
| 479 | extern struct rpc_task * | ||
| 480 | nfs_async_rename(struct inode *old_dir, struct inode *new_dir, | ||
| 481 | struct dentry *old_dentry, struct dentry *new_dentry, | ||
| 482 | void (*complete)(struct rpc_task *, struct nfs_renamedata *)); | ||
| 483 | extern int nfs_sillyrename(struct inode *dir, struct dentry *dentry); | ||
| 484 | |||
| 477 | /* direct.c */ | 485 | /* direct.c */ |
| 478 | void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo, | 486 | void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo, |
| 479 | struct nfs_direct_req *dreq); | 487 | struct nfs_direct_req *dreq); |
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c index a462ef0fb5d6..db60149c4579 100644 --- a/fs/nfs/nfs3proc.c +++ b/fs/nfs/nfs3proc.c | |||
| @@ -479,41 +479,6 @@ nfs3_proc_rename_done(struct rpc_task *task, struct inode *old_dir, | |||
| 479 | } | 479 | } |
| 480 | 480 | ||
| 481 | static int | 481 | static int |
| 482 | nfs3_proc_rename(struct inode *old_dir, struct qstr *old_name, | ||
| 483 | struct inode *new_dir, struct qstr *new_name) | ||
| 484 | { | ||
| 485 | struct nfs_renameargs arg = { | ||
| 486 | .old_dir = NFS_FH(old_dir), | ||
| 487 | .old_name = old_name, | ||
| 488 | .new_dir = NFS_FH(new_dir), | ||
| 489 | .new_name = new_name, | ||
| 490 | }; | ||
| 491 | struct nfs_renameres res; | ||
| 492 | struct rpc_message msg = { | ||
| 493 | .rpc_proc = &nfs3_procedures[NFS3PROC_RENAME], | ||
| 494 | .rpc_argp = &arg, | ||
| 495 | .rpc_resp = &res, | ||
| 496 | }; | ||
| 497 | int status = -ENOMEM; | ||
| 498 | |||
| 499 | dprintk("NFS call rename %s -> %s\n", old_name->name, new_name->name); | ||
| 500 | |||
| 501 | res.old_fattr = nfs_alloc_fattr(); | ||
| 502 | res.new_fattr = nfs_alloc_fattr(); | ||
| 503 | if (res.old_fattr == NULL || res.new_fattr == NULL) | ||
| 504 | goto out; | ||
| 505 | |||
| 506 | status = rpc_call_sync(NFS_CLIENT(old_dir), &msg, 0); | ||
| 507 | nfs_post_op_update_inode(old_dir, res.old_fattr); | ||
| 508 | nfs_post_op_update_inode(new_dir, res.new_fattr); | ||
| 509 | out: | ||
| 510 | nfs_free_fattr(res.old_fattr); | ||
| 511 | nfs_free_fattr(res.new_fattr); | ||
| 512 | dprintk("NFS reply rename: %d\n", status); | ||
| 513 | return status; | ||
| 514 | } | ||
| 515 | |||
| 516 | static int | ||
| 517 | nfs3_proc_link(struct inode *inode, struct inode *dir, struct qstr *name) | 482 | nfs3_proc_link(struct inode *inode, struct inode *dir, struct qstr *name) |
| 518 | { | 483 | { |
| 519 | struct nfs3_linkargs arg = { | 484 | struct nfs3_linkargs arg = { |
| @@ -968,7 +933,6 @@ const struct nfs_rpc_ops nfs_v3_clientops = { | |||
| 968 | .unlink_setup = nfs3_proc_unlink_setup, | 933 | .unlink_setup = nfs3_proc_unlink_setup, |
| 969 | .unlink_rpc_prepare = nfs3_proc_unlink_rpc_prepare, | 934 | .unlink_rpc_prepare = nfs3_proc_unlink_rpc_prepare, |
| 970 | .unlink_done = nfs3_proc_unlink_done, | 935 | .unlink_done = nfs3_proc_unlink_done, |
| 971 | .rename = nfs3_proc_rename, | ||
| 972 | .rename_setup = nfs3_proc_rename_setup, | 936 | .rename_setup = nfs3_proc_rename_setup, |
| 973 | .rename_rpc_prepare = nfs3_proc_rename_rpc_prepare, | 937 | .rename_rpc_prepare = nfs3_proc_rename_rpc_prepare, |
| 974 | .rename_done = nfs3_proc_rename_done, | 938 | .rename_done = nfs3_proc_rename_done, |
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index a5b27c2d9689..e1d1badbe53c 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h | |||
| @@ -427,6 +427,7 @@ extern void nfs4_close_sync(struct nfs4_state *, fmode_t); | |||
| 427 | extern void nfs4_state_set_mode_locked(struct nfs4_state *, fmode_t); | 427 | extern void nfs4_state_set_mode_locked(struct nfs4_state *, fmode_t); |
| 428 | extern void nfs_inode_find_state_and_recover(struct inode *inode, | 428 | extern void nfs_inode_find_state_and_recover(struct inode *inode, |
| 429 | const nfs4_stateid *stateid); | 429 | const nfs4_stateid *stateid); |
| 430 | extern int nfs4_state_mark_reclaim_nograce(struct nfs_client *, struct nfs4_state *); | ||
| 430 | extern void nfs4_schedule_lease_recovery(struct nfs_client *); | 431 | extern void nfs4_schedule_lease_recovery(struct nfs_client *); |
| 431 | extern int nfs4_wait_clnt_recover(struct nfs_client *clp); | 432 | extern int nfs4_wait_clnt_recover(struct nfs_client *clp); |
| 432 | extern int nfs4_client_recover_expired_lease(struct nfs_client *clp); | 433 | extern int nfs4_client_recover_expired_lease(struct nfs_client *clp); |
| @@ -500,6 +501,16 @@ static inline bool nfs4_stateid_match(const nfs4_stateid *dst, const nfs4_statei | |||
| 500 | return memcmp(dst, src, sizeof(*dst)) == 0; | 501 | return memcmp(dst, src, sizeof(*dst)) == 0; |
| 501 | } | 502 | } |
| 502 | 503 | ||
| 504 | static inline bool nfs4_stateid_match_other(const nfs4_stateid *dst, const nfs4_stateid *src) | ||
| 505 | { | ||
| 506 | return memcmp(dst->other, src->other, NFS4_STATEID_OTHER_SIZE) == 0; | ||
| 507 | } | ||
| 508 | |||
| 509 | static inline bool nfs4_stateid_is_newer(const nfs4_stateid *s1, const nfs4_stateid *s2) | ||
| 510 | { | ||
| 511 | return (s32)(be32_to_cpu(s1->seqid) - be32_to_cpu(s2->seqid)) > 0; | ||
| 512 | } | ||
| 513 | |||
| 503 | static inline bool nfs4_valid_open_stateid(const struct nfs4_state *state) | 514 | static inline bool nfs4_valid_open_stateid(const struct nfs4_state *state) |
| 504 | { | 515 | { |
| 505 | return test_bit(NFS_STATE_RECOVERY_FAILED, &state->flags) == 0; | 516 | return test_bit(NFS_STATE_RECOVERY_FAILED, &state->flags) == 0; |
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c index 0e46d3d1b6cc..aa9ef4876046 100644 --- a/fs/nfs/nfs4client.c +++ b/fs/nfs/nfs4client.c | |||
| @@ -531,6 +531,13 @@ int nfs40_walk_client_list(struct nfs_client *new, | |||
| 531 | *result = pos; | 531 | *result = pos; |
| 532 | dprintk("NFS: <-- %s using nfs_client = %p ({%d})\n", | 532 | dprintk("NFS: <-- %s using nfs_client = %p ({%d})\n", |
| 533 | __func__, pos, atomic_read(&pos->cl_count)); | 533 | __func__, pos, atomic_read(&pos->cl_count)); |
| 534 | goto out; | ||
| 535 | case -ERESTARTSYS: | ||
| 536 | case -ETIMEDOUT: | ||
| 537 | /* The callback path may have been inadvertently | ||
| 538 | * changed. Schedule recovery! | ||
| 539 | */ | ||
| 540 | nfs4_schedule_path_down_recovery(pos); | ||
| 534 | default: | 541 | default: |
| 535 | goto out; | 542 | goto out; |
| 536 | } | 543 | } |
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 450bfedbe2f4..397be39c6dc8 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c | |||
| @@ -1068,6 +1068,7 @@ static void nfs4_opendata_free(struct kref *kref) | |||
| 1068 | dput(p->dentry); | 1068 | dput(p->dentry); |
| 1069 | nfs_sb_deactive(sb); | 1069 | nfs_sb_deactive(sb); |
| 1070 | nfs_fattr_free_names(&p->f_attr); | 1070 | nfs_fattr_free_names(&p->f_attr); |
| 1071 | kfree(p->f_attr.mdsthreshold); | ||
| 1071 | kfree(p); | 1072 | kfree(p); |
| 1072 | } | 1073 | } |
| 1073 | 1074 | ||
| @@ -1137,12 +1138,71 @@ static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode) | |||
| 1137 | nfs4_state_set_mode_locked(state, state->state | fmode); | 1138 | nfs4_state_set_mode_locked(state, state->state | fmode); |
| 1138 | } | 1139 | } |
| 1139 | 1140 | ||
| 1140 | static void nfs_set_open_stateid_locked(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode) | 1141 | static void nfs_test_and_clear_all_open_stateid(struct nfs4_state *state) |
| 1142 | { | ||
| 1143 | struct nfs_client *clp = state->owner->so_server->nfs_client; | ||
| 1144 | bool need_recover = false; | ||
| 1145 | |||
| 1146 | if (test_and_clear_bit(NFS_O_RDONLY_STATE, &state->flags) && state->n_rdonly) | ||
| 1147 | need_recover = true; | ||
| 1148 | if (test_and_clear_bit(NFS_O_WRONLY_STATE, &state->flags) && state->n_wronly) | ||
| 1149 | need_recover = true; | ||
| 1150 | if (test_and_clear_bit(NFS_O_RDWR_STATE, &state->flags) && state->n_rdwr) | ||
| 1151 | need_recover = true; | ||
| 1152 | if (need_recover) | ||
| 1153 | nfs4_state_mark_reclaim_nograce(clp, state); | ||
| 1154 | } | ||
| 1155 | |||
| 1156 | static bool nfs_need_update_open_stateid(struct nfs4_state *state, | ||
| 1157 | nfs4_stateid *stateid) | ||
| 1158 | { | ||
| 1159 | if (test_and_set_bit(NFS_OPEN_STATE, &state->flags) == 0) | ||
| 1160 | return true; | ||
| 1161 | if (!nfs4_stateid_match_other(stateid, &state->open_stateid)) { | ||
| 1162 | nfs_test_and_clear_all_open_stateid(state); | ||
| 1163 | return true; | ||
| 1164 | } | ||
| 1165 | if (nfs4_stateid_is_newer(stateid, &state->open_stateid)) | ||
| 1166 | return true; | ||
| 1167 | return false; | ||
| 1168 | } | ||
| 1169 | |||
| 1170 | static void nfs_clear_open_stateid_locked(struct nfs4_state *state, | ||
| 1171 | nfs4_stateid *stateid, fmode_t fmode) | ||
| 1141 | { | 1172 | { |
| 1173 | clear_bit(NFS_O_RDWR_STATE, &state->flags); | ||
| 1174 | switch (fmode & (FMODE_READ|FMODE_WRITE)) { | ||
| 1175 | case FMODE_WRITE: | ||
| 1176 | clear_bit(NFS_O_RDONLY_STATE, &state->flags); | ||
| 1177 | break; | ||
| 1178 | case FMODE_READ: | ||
| 1179 | clear_bit(NFS_O_WRONLY_STATE, &state->flags); | ||
| 1180 | break; | ||
| 1181 | case 0: | ||
| 1182 | clear_bit(NFS_O_RDONLY_STATE, &state->flags); | ||
| 1183 | clear_bit(NFS_O_WRONLY_STATE, &state->flags); | ||
| 1184 | clear_bit(NFS_OPEN_STATE, &state->flags); | ||
| 1185 | } | ||
| 1186 | if (stateid == NULL) | ||
| 1187 | return; | ||
| 1188 | if (!nfs_need_update_open_stateid(state, stateid)) | ||
| 1189 | return; | ||
| 1142 | if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) | 1190 | if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) |
| 1143 | nfs4_stateid_copy(&state->stateid, stateid); | 1191 | nfs4_stateid_copy(&state->stateid, stateid); |
| 1144 | nfs4_stateid_copy(&state->open_stateid, stateid); | 1192 | nfs4_stateid_copy(&state->open_stateid, stateid); |
| 1145 | set_bit(NFS_OPEN_STATE, &state->flags); | 1193 | } |
| 1194 | |||
| 1195 | static void nfs_clear_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode) | ||
| 1196 | { | ||
| 1197 | write_seqlock(&state->seqlock); | ||
| 1198 | nfs_clear_open_stateid_locked(state, stateid, fmode); | ||
| 1199 | write_sequnlock(&state->seqlock); | ||
| 1200 | if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags)) | ||
| 1201 | nfs4_schedule_state_manager(state->owner->so_server->nfs_client); | ||
| 1202 | } | ||
| 1203 | |||
| 1204 | static void nfs_set_open_stateid_locked(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode) | ||
| 1205 | { | ||
| 1146 | switch (fmode) { | 1206 | switch (fmode) { |
| 1147 | case FMODE_READ: | 1207 | case FMODE_READ: |
| 1148 | set_bit(NFS_O_RDONLY_STATE, &state->flags); | 1208 | set_bit(NFS_O_RDONLY_STATE, &state->flags); |
| @@ -1153,13 +1213,11 @@ static void nfs_set_open_stateid_locked(struct nfs4_state *state, nfs4_stateid * | |||
| 1153 | case FMODE_READ|FMODE_WRITE: | 1213 | case FMODE_READ|FMODE_WRITE: |
| 1154 | set_bit(NFS_O_RDWR_STATE, &state->flags); | 1214 | set_bit(NFS_O_RDWR_STATE, &state->flags); |
| 1155 | } | 1215 | } |
| 1156 | } | 1216 | if (!nfs_need_update_open_stateid(state, stateid)) |
| 1157 | 1217 | return; | |
| 1158 | static void nfs_set_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode) | 1218 | if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) |
| 1159 | { | 1219 | nfs4_stateid_copy(&state->stateid, stateid); |
| 1160 | write_seqlock(&state->seqlock); | 1220 | nfs4_stateid_copy(&state->open_stateid, stateid); |
| 1161 | nfs_set_open_stateid_locked(state, stateid, fmode); | ||
| 1162 | write_sequnlock(&state->seqlock); | ||
| 1163 | } | 1221 | } |
| 1164 | 1222 | ||
| 1165 | static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, const nfs4_stateid *deleg_stateid, fmode_t fmode) | 1223 | static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, const nfs4_stateid *deleg_stateid, fmode_t fmode) |
| @@ -1217,6 +1275,8 @@ no_delegation: | |||
| 1217 | __update_open_stateid(state, open_stateid, NULL, fmode); | 1275 | __update_open_stateid(state, open_stateid, NULL, fmode); |
| 1218 | ret = 1; | 1276 | ret = 1; |
| 1219 | } | 1277 | } |
| 1278 | if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags)) | ||
| 1279 | nfs4_schedule_state_manager(state->owner->so_server->nfs_client); | ||
| 1220 | 1280 | ||
| 1221 | return ret; | 1281 | return ret; |
| 1222 | } | 1282 | } |
| @@ -1450,12 +1510,15 @@ static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state * | |||
| 1450 | struct nfs4_state *newstate; | 1510 | struct nfs4_state *newstate; |
| 1451 | int ret; | 1511 | int ret; |
| 1452 | 1512 | ||
| 1513 | /* Don't trigger recovery in nfs_test_and_clear_all_open_stateid */ | ||
| 1514 | clear_bit(NFS_O_RDWR_STATE, &state->flags); | ||
| 1515 | clear_bit(NFS_O_WRONLY_STATE, &state->flags); | ||
| 1516 | clear_bit(NFS_O_RDONLY_STATE, &state->flags); | ||
| 1453 | /* memory barrier prior to reading state->n_* */ | 1517 | /* memory barrier prior to reading state->n_* */ |
| 1454 | clear_bit(NFS_DELEGATED_STATE, &state->flags); | 1518 | clear_bit(NFS_DELEGATED_STATE, &state->flags); |
| 1455 | clear_bit(NFS_OPEN_STATE, &state->flags); | 1519 | clear_bit(NFS_OPEN_STATE, &state->flags); |
| 1456 | smp_rmb(); | 1520 | smp_rmb(); |
| 1457 | if (state->n_rdwr != 0) { | 1521 | if (state->n_rdwr != 0) { |
| 1458 | clear_bit(NFS_O_RDWR_STATE, &state->flags); | ||
| 1459 | ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE, &newstate); | 1522 | ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE, &newstate); |
| 1460 | if (ret != 0) | 1523 | if (ret != 0) |
| 1461 | return ret; | 1524 | return ret; |
| @@ -1463,7 +1526,6 @@ static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state * | |||
| 1463 | return -ESTALE; | 1526 | return -ESTALE; |
| 1464 | } | 1527 | } |
| 1465 | if (state->n_wronly != 0) { | 1528 | if (state->n_wronly != 0) { |
| 1466 | clear_bit(NFS_O_WRONLY_STATE, &state->flags); | ||
| 1467 | ret = nfs4_open_recover_helper(opendata, FMODE_WRITE, &newstate); | 1529 | ret = nfs4_open_recover_helper(opendata, FMODE_WRITE, &newstate); |
| 1468 | if (ret != 0) | 1530 | if (ret != 0) |
| 1469 | return ret; | 1531 | return ret; |
| @@ -1471,7 +1533,6 @@ static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state * | |||
| 1471 | return -ESTALE; | 1533 | return -ESTALE; |
| 1472 | } | 1534 | } |
| 1473 | if (state->n_rdonly != 0) { | 1535 | if (state->n_rdonly != 0) { |
| 1474 | clear_bit(NFS_O_RDONLY_STATE, &state->flags); | ||
| 1475 | ret = nfs4_open_recover_helper(opendata, FMODE_READ, &newstate); | 1536 | ret = nfs4_open_recover_helper(opendata, FMODE_READ, &newstate); |
| 1476 | if (ret != 0) | 1537 | if (ret != 0) |
| 1477 | return ret; | 1538 | return ret; |
| @@ -2244,10 +2305,12 @@ static int _nfs4_do_open(struct inode *dir, | |||
| 2244 | } | 2305 | } |
| 2245 | } | 2306 | } |
| 2246 | 2307 | ||
| 2247 | if (ctx_th && server->attr_bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD) { | 2308 | if (server->attr_bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD) { |
| 2248 | opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc(); | 2309 | if (!opendata->f_attr.mdsthreshold) { |
| 2249 | if (!opendata->f_attr.mdsthreshold) | 2310 | opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc(); |
| 2250 | goto err_free_label; | 2311 | if (!opendata->f_attr.mdsthreshold) |
| 2312 | goto err_free_label; | ||
| 2313 | } | ||
| 2251 | opendata->o_arg.open_bitmap = &nfs4_pnfs_open_bitmap[0]; | 2314 | opendata->o_arg.open_bitmap = &nfs4_pnfs_open_bitmap[0]; |
| 2252 | } | 2315 | } |
| 2253 | if (dentry->d_inode != NULL) | 2316 | if (dentry->d_inode != NULL) |
| @@ -2275,11 +2338,10 @@ static int _nfs4_do_open(struct inode *dir, | |||
| 2275 | if (opendata->file_created) | 2338 | if (opendata->file_created) |
| 2276 | *opened |= FILE_CREATED; | 2339 | *opened |= FILE_CREATED; |
| 2277 | 2340 | ||
| 2278 | if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server)) | 2341 | if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server)) { |
| 2279 | *ctx_th = opendata->f_attr.mdsthreshold; | 2342 | *ctx_th = opendata->f_attr.mdsthreshold; |
| 2280 | else | 2343 | opendata->f_attr.mdsthreshold = NULL; |
| 2281 | kfree(opendata->f_attr.mdsthreshold); | 2344 | } |
| 2282 | opendata->f_attr.mdsthreshold = NULL; | ||
| 2283 | 2345 | ||
| 2284 | nfs4_label_free(olabel); | 2346 | nfs4_label_free(olabel); |
| 2285 | 2347 | ||
| @@ -2289,7 +2351,6 @@ static int _nfs4_do_open(struct inode *dir, | |||
| 2289 | err_free_label: | 2351 | err_free_label: |
| 2290 | nfs4_label_free(olabel); | 2352 | nfs4_label_free(olabel); |
| 2291 | err_opendata_put: | 2353 | err_opendata_put: |
| 2292 | kfree(opendata->f_attr.mdsthreshold); | ||
| 2293 | nfs4_opendata_put(opendata); | 2354 | nfs4_opendata_put(opendata); |
| 2294 | err_put_state_owner: | 2355 | err_put_state_owner: |
| 2295 | nfs4_put_state_owner(sp); | 2356 | nfs4_put_state_owner(sp); |
| @@ -2479,26 +2540,6 @@ static void nfs4_free_closedata(void *data) | |||
| 2479 | kfree(calldata); | 2540 | kfree(calldata); |
| 2480 | } | 2541 | } |
| 2481 | 2542 | ||
| 2482 | static void nfs4_close_clear_stateid_flags(struct nfs4_state *state, | ||
| 2483 | fmode_t fmode) | ||
| 2484 | { | ||
| 2485 | spin_lock(&state->owner->so_lock); | ||
| 2486 | clear_bit(NFS_O_RDWR_STATE, &state->flags); | ||
| 2487 | switch (fmode & (FMODE_READ|FMODE_WRITE)) { | ||
| 2488 | case FMODE_WRITE: | ||
| 2489 | clear_bit(NFS_O_RDONLY_STATE, &state->flags); | ||
| 2490 | break; | ||
| 2491 | case FMODE_READ: | ||
| 2492 | clear_bit(NFS_O_WRONLY_STATE, &state->flags); | ||
| 2493 | break; | ||
| 2494 | case 0: | ||
| 2495 | clear_bit(NFS_O_RDONLY_STATE, &state->flags); | ||
| 2496 | clear_bit(NFS_O_WRONLY_STATE, &state->flags); | ||
| 2497 | clear_bit(NFS_OPEN_STATE, &state->flags); | ||
| 2498 | } | ||
| 2499 | spin_unlock(&state->owner->so_lock); | ||
| 2500 | } | ||
| 2501 | |||
| 2502 | static void nfs4_close_done(struct rpc_task *task, void *data) | 2543 | static void nfs4_close_done(struct rpc_task *task, void *data) |
| 2503 | { | 2544 | { |
| 2504 | struct nfs4_closedata *calldata = data; | 2545 | struct nfs4_closedata *calldata = data; |
| @@ -2517,9 +2558,9 @@ static void nfs4_close_done(struct rpc_task *task, void *data) | |||
| 2517 | if (calldata->roc) | 2558 | if (calldata->roc) |
| 2518 | pnfs_roc_set_barrier(state->inode, | 2559 | pnfs_roc_set_barrier(state->inode, |
| 2519 | calldata->roc_barrier); | 2560 | calldata->roc_barrier); |
| 2520 | nfs_set_open_stateid(state, &calldata->res.stateid, 0); | 2561 | nfs_clear_open_stateid(state, &calldata->res.stateid, 0); |
| 2521 | renew_lease(server, calldata->timestamp); | 2562 | renew_lease(server, calldata->timestamp); |
| 2522 | break; | 2563 | goto out_release; |
| 2523 | case -NFS4ERR_ADMIN_REVOKED: | 2564 | case -NFS4ERR_ADMIN_REVOKED: |
| 2524 | case -NFS4ERR_STALE_STATEID: | 2565 | case -NFS4ERR_STALE_STATEID: |
| 2525 | case -NFS4ERR_OLD_STATEID: | 2566 | case -NFS4ERR_OLD_STATEID: |
| @@ -2533,7 +2574,7 @@ static void nfs4_close_done(struct rpc_task *task, void *data) | |||
| 2533 | goto out_release; | 2574 | goto out_release; |
| 2534 | } | 2575 | } |
| 2535 | } | 2576 | } |
| 2536 | nfs4_close_clear_stateid_flags(state, calldata->arg.fmode); | 2577 | nfs_clear_open_stateid(state, NULL, calldata->arg.fmode); |
| 2537 | out_release: | 2578 | out_release: |
| 2538 | nfs_release_seqid(calldata->arg.seqid); | 2579 | nfs_release_seqid(calldata->arg.seqid); |
| 2539 | nfs_refresh_inode(calldata->inode, calldata->res.fattr); | 2580 | nfs_refresh_inode(calldata->inode, calldata->res.fattr); |
| @@ -3507,49 +3548,6 @@ static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir, | |||
| 3507 | return 1; | 3548 | return 1; |
| 3508 | } | 3549 | } |
| 3509 | 3550 | ||
| 3510 | static int _nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name, | ||
| 3511 | struct inode *new_dir, struct qstr *new_name) | ||
| 3512 | { | ||
| 3513 | struct nfs_server *server = NFS_SERVER(old_dir); | ||
| 3514 | struct nfs_renameargs arg = { | ||
| 3515 | .old_dir = NFS_FH(old_dir), | ||
| 3516 | .new_dir = NFS_FH(new_dir), | ||
| 3517 | .old_name = old_name, | ||
| 3518 | .new_name = new_name, | ||
| 3519 | }; | ||
| 3520 | struct nfs_renameres res = { | ||
| 3521 | .server = server, | ||
| 3522 | }; | ||
| 3523 | struct rpc_message msg = { | ||
| 3524 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME], | ||
| 3525 | .rpc_argp = &arg, | ||
| 3526 | .rpc_resp = &res, | ||
| 3527 | }; | ||
| 3528 | int status = -ENOMEM; | ||
| 3529 | |||
| 3530 | status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); | ||
| 3531 | if (!status) { | ||
| 3532 | update_changeattr(old_dir, &res.old_cinfo); | ||
| 3533 | update_changeattr(new_dir, &res.new_cinfo); | ||
| 3534 | } | ||
| 3535 | return status; | ||
| 3536 | } | ||
| 3537 | |||
| 3538 | static int nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name, | ||
| 3539 | struct inode *new_dir, struct qstr *new_name) | ||
| 3540 | { | ||
| 3541 | struct nfs4_exception exception = { }; | ||
| 3542 | int err; | ||
| 3543 | do { | ||
| 3544 | err = _nfs4_proc_rename(old_dir, old_name, | ||
| 3545 | new_dir, new_name); | ||
| 3546 | trace_nfs4_rename(old_dir, old_name, new_dir, new_name, err); | ||
| 3547 | err = nfs4_handle_exception(NFS_SERVER(old_dir), err, | ||
| 3548 | &exception); | ||
| 3549 | } while (exception.retry); | ||
| 3550 | return err; | ||
| 3551 | } | ||
| 3552 | |||
| 3553 | static int _nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name) | 3551 | static int _nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name) |
| 3554 | { | 3552 | { |
| 3555 | struct nfs_server *server = NFS_SERVER(inode); | 3553 | struct nfs_server *server = NFS_SERVER(inode); |
| @@ -4884,6 +4882,20 @@ nfs4_init_uniform_client_string(const struct nfs_client *clp, | |||
| 4884 | nodename); | 4882 | nodename); |
| 4885 | } | 4883 | } |
| 4886 | 4884 | ||
| 4885 | /* | ||
| 4886 | * nfs4_callback_up_net() starts only "tcp" and "tcp6" callback | ||
| 4887 | * services. Advertise one based on the address family of the | ||
| 4888 | * clientaddr. | ||
| 4889 | */ | ||
| 4890 | static unsigned int | ||
| 4891 | nfs4_init_callback_netid(const struct nfs_client *clp, char *buf, size_t len) | ||
| 4892 | { | ||
| 4893 | if (strchr(clp->cl_ipaddr, ':') != NULL) | ||
| 4894 | return scnprintf(buf, len, "tcp6"); | ||
| 4895 | else | ||
| 4896 | return scnprintf(buf, len, "tcp"); | ||
| 4897 | } | ||
| 4898 | |||
| 4887 | /** | 4899 | /** |
| 4888 | * nfs4_proc_setclientid - Negotiate client ID | 4900 | * nfs4_proc_setclientid - Negotiate client ID |
| 4889 | * @clp: state data structure | 4901 | * @clp: state data structure |
| @@ -4925,12 +4937,10 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program, | |||
| 4925 | setclientid.sc_name, | 4937 | setclientid.sc_name, |
| 4926 | sizeof(setclientid.sc_name)); | 4938 | sizeof(setclientid.sc_name)); |
| 4927 | /* cb_client4 */ | 4939 | /* cb_client4 */ |
| 4928 | rcu_read_lock(); | 4940 | setclientid.sc_netid_len = |
| 4929 | setclientid.sc_netid_len = scnprintf(setclientid.sc_netid, | 4941 | nfs4_init_callback_netid(clp, |
| 4930 | sizeof(setclientid.sc_netid), "%s", | 4942 | setclientid.sc_netid, |
| 4931 | rpc_peeraddr2str(clp->cl_rpcclient, | 4943 | sizeof(setclientid.sc_netid)); |
| 4932 | RPC_DISPLAY_NETID)); | ||
| 4933 | rcu_read_unlock(); | ||
| 4934 | setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr, | 4944 | setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr, |
| 4935 | sizeof(setclientid.sc_uaddr), "%s.%u.%u", | 4945 | sizeof(setclientid.sc_uaddr), "%s.%u.%u", |
| 4936 | clp->cl_ipaddr, port >> 8, port & 255); | 4946 | clp->cl_ipaddr, port >> 8, port & 255); |
| @@ -8408,7 +8418,6 @@ const struct nfs_rpc_ops nfs_v4_clientops = { | |||
| 8408 | .unlink_setup = nfs4_proc_unlink_setup, | 8418 | .unlink_setup = nfs4_proc_unlink_setup, |
| 8409 | .unlink_rpc_prepare = nfs4_proc_unlink_rpc_prepare, | 8419 | .unlink_rpc_prepare = nfs4_proc_unlink_rpc_prepare, |
| 8410 | .unlink_done = nfs4_proc_unlink_done, | 8420 | .unlink_done = nfs4_proc_unlink_done, |
| 8411 | .rename = nfs4_proc_rename, | ||
| 8412 | .rename_setup = nfs4_proc_rename_setup, | 8421 | .rename_setup = nfs4_proc_rename_setup, |
| 8413 | .rename_rpc_prepare = nfs4_proc_rename_rpc_prepare, | 8422 | .rename_rpc_prepare = nfs4_proc_rename_rpc_prepare, |
| 8414 | .rename_done = nfs4_proc_rename_done, | 8423 | .rename_done = nfs4_proc_rename_done, |
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 0deb32105ccf..2349518eef2c 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c | |||
| @@ -1316,7 +1316,7 @@ static int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_st | |||
| 1316 | return 1; | 1316 | return 1; |
| 1317 | } | 1317 | } |
| 1318 | 1318 | ||
| 1319 | static int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state) | 1319 | int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state) |
| 1320 | { | 1320 | { |
| 1321 | set_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags); | 1321 | set_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags); |
| 1322 | clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags); | 1322 | clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags); |
| @@ -2075,8 +2075,10 @@ again: | |||
| 2075 | switch (status) { | 2075 | switch (status) { |
| 2076 | case 0: | 2076 | case 0: |
| 2077 | break; | 2077 | break; |
| 2078 | case -NFS4ERR_DELAY: | ||
| 2079 | case -ETIMEDOUT: | 2078 | case -ETIMEDOUT: |
| 2079 | if (clnt->cl_softrtry) | ||
| 2080 | break; | ||
| 2081 | case -NFS4ERR_DELAY: | ||
| 2080 | case -EAGAIN: | 2082 | case -EAGAIN: |
| 2081 | ssleep(1); | 2083 | ssleep(1); |
| 2082 | case -NFS4ERR_STALE_CLIENTID: | 2084 | case -NFS4ERR_STALE_CLIENTID: |
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 72f3bf1754ef..73ce8d4fe2c8 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c | |||
| @@ -203,8 +203,7 @@ static int nfs4_stat_to_errno(int); | |||
| 203 | 2 + encode_verifier_maxsz + 5 + \ | 203 | 2 + encode_verifier_maxsz + 5 + \ |
| 204 | nfs4_label_maxsz) | 204 | nfs4_label_maxsz) |
| 205 | #define decode_readdir_maxsz (op_decode_hdr_maxsz + \ | 205 | #define decode_readdir_maxsz (op_decode_hdr_maxsz + \ |
| 206 | decode_verifier_maxsz + \ | 206 | decode_verifier_maxsz) |
| 207 | nfs4_label_maxsz + nfs4_fattr_maxsz) | ||
| 208 | #define encode_readlink_maxsz (op_encode_hdr_maxsz) | 207 | #define encode_readlink_maxsz (op_encode_hdr_maxsz) |
| 209 | #define decode_readlink_maxsz (op_decode_hdr_maxsz + 1) | 208 | #define decode_readlink_maxsz (op_decode_hdr_maxsz + 1) |
| 210 | #define encode_write_maxsz (op_encode_hdr_maxsz + \ | 209 | #define encode_write_maxsz (op_encode_hdr_maxsz + \ |
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 4755858e37a0..cb53d450ae32 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c | |||
| @@ -662,7 +662,18 @@ pnfs_destroy_all_layouts(struct nfs_client *clp) | |||
| 662 | */ | 662 | */ |
| 663 | static bool pnfs_seqid_is_newer(u32 s1, u32 s2) | 663 | static bool pnfs_seqid_is_newer(u32 s1, u32 s2) |
| 664 | { | 664 | { |
| 665 | return (s32)s1 - (s32)s2 > 0; | 665 | return (s32)(s1 - s2) > 0; |
| 666 | } | ||
| 667 | |||
| 668 | static void | ||
| 669 | pnfs_verify_layout_stateid(struct pnfs_layout_hdr *lo, | ||
| 670 | const nfs4_stateid *new, | ||
| 671 | struct list_head *free_me_list) | ||
| 672 | { | ||
| 673 | if (nfs4_stateid_match_other(&lo->plh_stateid, new)) | ||
| 674 | return; | ||
| 675 | /* Layout is new! Kill existing layout segments */ | ||
| 676 | pnfs_mark_matching_lsegs_invalid(lo, free_me_list, NULL); | ||
| 666 | } | 677 | } |
| 667 | 678 | ||
| 668 | /* update lo->plh_stateid with new if is more recent */ | 679 | /* update lo->plh_stateid with new if is more recent */ |
| @@ -1315,6 +1326,7 @@ pnfs_layout_process(struct nfs4_layoutget *lgp) | |||
| 1315 | struct nfs4_layoutget_res *res = &lgp->res; | 1326 | struct nfs4_layoutget_res *res = &lgp->res; |
| 1316 | struct pnfs_layout_segment *lseg; | 1327 | struct pnfs_layout_segment *lseg; |
| 1317 | struct inode *ino = lo->plh_inode; | 1328 | struct inode *ino = lo->plh_inode; |
| 1329 | LIST_HEAD(free_me); | ||
| 1318 | int status = 0; | 1330 | int status = 0; |
| 1319 | 1331 | ||
| 1320 | /* Inject layout blob into I/O device driver */ | 1332 | /* Inject layout blob into I/O device driver */ |
| @@ -1341,6 +1353,8 @@ pnfs_layout_process(struct nfs4_layoutget *lgp) | |||
| 1341 | goto out_forget_reply; | 1353 | goto out_forget_reply; |
| 1342 | } | 1354 | } |
| 1343 | 1355 | ||
| 1356 | /* Check that the new stateid matches the old stateid */ | ||
| 1357 | pnfs_verify_layout_stateid(lo, &res->stateid, &free_me); | ||
| 1344 | /* Done processing layoutget. Set the layout stateid */ | 1358 | /* Done processing layoutget. Set the layout stateid */ |
| 1345 | pnfs_set_layout_stateid(lo, &res->stateid, false); | 1359 | pnfs_set_layout_stateid(lo, &res->stateid, false); |
| 1346 | 1360 | ||
| @@ -1355,6 +1369,7 @@ pnfs_layout_process(struct nfs4_layoutget *lgp) | |||
| 1355 | } | 1369 | } |
| 1356 | 1370 | ||
| 1357 | spin_unlock(&ino->i_lock); | 1371 | spin_unlock(&ino->i_lock); |
| 1372 | pnfs_free_lseg_list(&free_me); | ||
| 1358 | return lseg; | 1373 | return lseg; |
| 1359 | out: | 1374 | out: |
| 1360 | return ERR_PTR(status); | 1375 | return ERR_PTR(status); |
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c index fddbba2d9eff..e55ce9e8b034 100644 --- a/fs/nfs/proc.c +++ b/fs/nfs/proc.c | |||
| @@ -357,30 +357,6 @@ nfs_proc_rename_done(struct rpc_task *task, struct inode *old_dir, | |||
| 357 | } | 357 | } |
| 358 | 358 | ||
| 359 | static int | 359 | static int |
| 360 | nfs_proc_rename(struct inode *old_dir, struct qstr *old_name, | ||
| 361 | struct inode *new_dir, struct qstr *new_name) | ||
| 362 | { | ||
| 363 | struct nfs_renameargs arg = { | ||
| 364 | .old_dir = NFS_FH(old_dir), | ||
| 365 | .old_name = old_name, | ||
| 366 | .new_dir = NFS_FH(new_dir), | ||
| 367 | .new_name = new_name, | ||
| 368 | }; | ||
| 369 | struct rpc_message msg = { | ||
| 370 | .rpc_proc = &nfs_procedures[NFSPROC_RENAME], | ||
| 371 | .rpc_argp = &arg, | ||
| 372 | }; | ||
| 373 | int status; | ||
| 374 | |||
| 375 | dprintk("NFS call rename %s -> %s\n", old_name->name, new_name->name); | ||
| 376 | status = rpc_call_sync(NFS_CLIENT(old_dir), &msg, 0); | ||
| 377 | nfs_mark_for_revalidate(old_dir); | ||
| 378 | nfs_mark_for_revalidate(new_dir); | ||
| 379 | dprintk("NFS reply rename: %d\n", status); | ||
| 380 | return status; | ||
| 381 | } | ||
| 382 | |||
| 383 | static int | ||
| 384 | nfs_proc_link(struct inode *inode, struct inode *dir, struct qstr *name) | 360 | nfs_proc_link(struct inode *inode, struct inode *dir, struct qstr *name) |
| 385 | { | 361 | { |
| 386 | struct nfs_linkargs arg = { | 362 | struct nfs_linkargs arg = { |
| @@ -745,7 +721,6 @@ const struct nfs_rpc_ops nfs_v2_clientops = { | |||
| 745 | .unlink_setup = nfs_proc_unlink_setup, | 721 | .unlink_setup = nfs_proc_unlink_setup, |
| 746 | .unlink_rpc_prepare = nfs_proc_unlink_rpc_prepare, | 722 | .unlink_rpc_prepare = nfs_proc_unlink_rpc_prepare, |
| 747 | .unlink_done = nfs_proc_unlink_done, | 723 | .unlink_done = nfs_proc_unlink_done, |
| 748 | .rename = nfs_proc_rename, | ||
| 749 | .rename_setup = nfs_proc_rename_setup, | 724 | .rename_setup = nfs_proc_rename_setup, |
| 750 | .rename_rpc_prepare = nfs_proc_rename_rpc_prepare, | 725 | .rename_rpc_prepare = nfs_proc_rename_rpc_prepare, |
| 751 | .rename_done = nfs_proc_rename_done, | 726 | .rename_done = nfs_proc_rename_done, |
diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c index 11d78944de79..de54129336c6 100644 --- a/fs/nfs/unlink.c +++ b/fs/nfs/unlink.c | |||
| @@ -14,6 +14,7 @@ | |||
| 14 | #include <linux/sched.h> | 14 | #include <linux/sched.h> |
| 15 | #include <linux/wait.h> | 15 | #include <linux/wait.h> |
| 16 | #include <linux/namei.h> | 16 | #include <linux/namei.h> |
| 17 | #include <linux/fsnotify.h> | ||
| 17 | 18 | ||
| 18 | #include "internal.h" | 19 | #include "internal.h" |
| 19 | #include "nfs4_fs.h" | 20 | #include "nfs4_fs.h" |
| @@ -353,8 +354,8 @@ static void nfs_async_rename_done(struct rpc_task *task, void *calldata) | |||
| 353 | return; | 354 | return; |
| 354 | } | 355 | } |
| 355 | 356 | ||
| 356 | if (task->tk_status != 0) | 357 | if (data->complete) |
| 357 | nfs_cancel_async_unlink(old_dentry); | 358 | data->complete(task, data); |
| 358 | } | 359 | } |
| 359 | 360 | ||
| 360 | /** | 361 | /** |
| @@ -399,9 +400,10 @@ static const struct rpc_call_ops nfs_rename_ops = { | |||
| 399 | * | 400 | * |
| 400 | * It's expected that valid references to the dentries and inodes are held | 401 | * It's expected that valid references to the dentries and inodes are held |
| 401 | */ | 402 | */ |
| 402 | static struct rpc_task * | 403 | struct rpc_task * |
| 403 | nfs_async_rename(struct inode *old_dir, struct inode *new_dir, | 404 | nfs_async_rename(struct inode *old_dir, struct inode *new_dir, |
| 404 | struct dentry *old_dentry, struct dentry *new_dentry) | 405 | struct dentry *old_dentry, struct dentry *new_dentry, |
| 406 | void (*complete)(struct rpc_task *, struct nfs_renamedata *)) | ||
| 405 | { | 407 | { |
| 406 | struct nfs_renamedata *data; | 408 | struct nfs_renamedata *data; |
| 407 | struct rpc_message msg = { }; | 409 | struct rpc_message msg = { }; |
| @@ -438,6 +440,7 @@ nfs_async_rename(struct inode *old_dir, struct inode *new_dir, | |||
| 438 | data->new_dentry = dget(new_dentry); | 440 | data->new_dentry = dget(new_dentry); |
| 439 | nfs_fattr_init(&data->old_fattr); | 441 | nfs_fattr_init(&data->old_fattr); |
| 440 | nfs_fattr_init(&data->new_fattr); | 442 | nfs_fattr_init(&data->new_fattr); |
| 443 | data->complete = complete; | ||
| 441 | 444 | ||
| 442 | /* set up nfs_renameargs */ | 445 | /* set up nfs_renameargs */ |
| 443 | data->args.old_dir = NFS_FH(old_dir); | 446 | data->args.old_dir = NFS_FH(old_dir); |
| @@ -456,6 +459,27 @@ nfs_async_rename(struct inode *old_dir, struct inode *new_dir, | |||
| 456 | return rpc_run_task(&task_setup_data); | 459 | return rpc_run_task(&task_setup_data); |
| 457 | } | 460 | } |
| 458 | 461 | ||
| 462 | /* | ||
| 463 | * Perform tasks needed when a sillyrename is done such as cancelling the | ||
| 464 | * queued async unlink if it failed. | ||
| 465 | */ | ||
| 466 | static void | ||
| 467 | nfs_complete_sillyrename(struct rpc_task *task, struct nfs_renamedata *data) | ||
| 468 | { | ||
| 469 | struct dentry *dentry = data->old_dentry; | ||
| 470 | |||
| 471 | if (task->tk_status != 0) { | ||
| 472 | nfs_cancel_async_unlink(dentry); | ||
| 473 | return; | ||
| 474 | } | ||
| 475 | |||
| 476 | /* | ||
| 477 | * vfs_unlink and the like do not issue this when a file is | ||
| 478 | * sillyrenamed, so do it here. | ||
| 479 | */ | ||
| 480 | fsnotify_nameremove(dentry, 0); | ||
| 481 | } | ||
| 482 | |||
| 459 | #define SILLYNAME_PREFIX ".nfs" | 483 | #define SILLYNAME_PREFIX ".nfs" |
| 460 | #define SILLYNAME_PREFIX_LEN ((unsigned)sizeof(SILLYNAME_PREFIX) - 1) | 484 | #define SILLYNAME_PREFIX_LEN ((unsigned)sizeof(SILLYNAME_PREFIX) - 1) |
| 461 | #define SILLYNAME_FILEID_LEN ((unsigned)sizeof(u64) << 1) | 485 | #define SILLYNAME_FILEID_LEN ((unsigned)sizeof(u64) << 1) |
| @@ -548,7 +572,8 @@ nfs_sillyrename(struct inode *dir, struct dentry *dentry) | |||
| 548 | } | 572 | } |
| 549 | 573 | ||
| 550 | /* run the rename task, undo unlink if it fails */ | 574 | /* run the rename task, undo unlink if it fails */ |
| 551 | task = nfs_async_rename(dir, dir, dentry, sdentry); | 575 | task = nfs_async_rename(dir, dir, dentry, sdentry, |
| 576 | nfs_complete_sillyrename); | ||
| 552 | if (IS_ERR(task)) { | 577 | if (IS_ERR(task)) { |
| 553 | error = -EBUSY; | 578 | error = -EBUSY; |
| 554 | nfs_cancel_async_unlink(dentry); | 579 | nfs_cancel_async_unlink(dentry); |
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 0ae5807480f4..fa6918b0f829 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h | |||
| @@ -92,6 +92,7 @@ struct nfs_open_context { | |||
| 92 | }; | 92 | }; |
| 93 | 93 | ||
| 94 | struct nfs_open_dir_context { | 94 | struct nfs_open_dir_context { |
| 95 | struct list_head list; | ||
| 95 | struct rpc_cred *cred; | 96 | struct rpc_cred *cred; |
| 96 | unsigned long attr_gencount; | 97 | unsigned long attr_gencount; |
| 97 | __u64 dir_cookie; | 98 | __u64 dir_cookie; |
| @@ -510,7 +511,6 @@ extern void nfs_complete_unlink(struct dentry *dentry, struct inode *); | |||
| 510 | extern void nfs_wait_on_sillyrename(struct dentry *dentry); | 511 | extern void nfs_wait_on_sillyrename(struct dentry *dentry); |
| 511 | extern void nfs_block_sillyrename(struct dentry *dentry); | 512 | extern void nfs_block_sillyrename(struct dentry *dentry); |
| 512 | extern void nfs_unblock_sillyrename(struct dentry *dentry); | 513 | extern void nfs_unblock_sillyrename(struct dentry *dentry); |
| 513 | extern int nfs_sillyrename(struct inode *dir, struct dentry *dentry); | ||
| 514 | 514 | ||
| 515 | /* | 515 | /* |
| 516 | * linux/fs/nfs/write.c | 516 | * linux/fs/nfs/write.c |
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 5624e4e2763c..6fb5b2335b59 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h | |||
| @@ -1402,6 +1402,7 @@ struct nfs_renamedata { | |||
| 1402 | struct inode *new_dir; | 1402 | struct inode *new_dir; |
| 1403 | struct dentry *new_dentry; | 1403 | struct dentry *new_dentry; |
| 1404 | struct nfs_fattr new_fattr; | 1404 | struct nfs_fattr new_fattr; |
| 1405 | void (*complete)(struct rpc_task *, struct nfs_renamedata *); | ||
| 1405 | }; | 1406 | }; |
| 1406 | 1407 | ||
| 1407 | struct nfs_access_entry; | 1408 | struct nfs_access_entry; |
| @@ -1444,8 +1445,6 @@ struct nfs_rpc_ops { | |||
| 1444 | void (*unlink_setup) (struct rpc_message *, struct inode *dir); | 1445 | void (*unlink_setup) (struct rpc_message *, struct inode *dir); |
| 1445 | void (*unlink_rpc_prepare) (struct rpc_task *, struct nfs_unlinkdata *); | 1446 | void (*unlink_rpc_prepare) (struct rpc_task *, struct nfs_unlinkdata *); |
| 1446 | int (*unlink_done) (struct rpc_task *, struct inode *); | 1447 | int (*unlink_done) (struct rpc_task *, struct inode *); |
| 1447 | int (*rename) (struct inode *, struct qstr *, | ||
| 1448 | struct inode *, struct qstr *); | ||
| 1449 | void (*rename_setup) (struct rpc_message *msg, struct inode *dir); | 1448 | void (*rename_setup) (struct rpc_message *msg, struct inode *dir); |
| 1450 | void (*rename_rpc_prepare)(struct rpc_task *task, struct nfs_renamedata *); | 1449 | void (*rename_rpc_prepare)(struct rpc_task *task, struct nfs_renamedata *); |
| 1451 | int (*rename_done) (struct rpc_task *task, struct inode *old_dir, struct inode *new_dir); | 1450 | int (*rename_done) (struct rpc_task *task, struct inode *old_dir, struct inode *new_dir); |
diff --git a/include/linux/sunrpc/bc_xprt.h b/include/linux/sunrpc/bc_xprt.h index 969c0a671dbf..2ca67b55e0fe 100644 --- a/include/linux/sunrpc/bc_xprt.h +++ b/include/linux/sunrpc/bc_xprt.h | |||
| @@ -32,7 +32,8 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |||
| 32 | #include <linux/sunrpc/sched.h> | 32 | #include <linux/sunrpc/sched.h> |
| 33 | 33 | ||
| 34 | #ifdef CONFIG_SUNRPC_BACKCHANNEL | 34 | #ifdef CONFIG_SUNRPC_BACKCHANNEL |
| 35 | struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt); | 35 | struct rpc_rqst *xprt_lookup_bc_request(struct rpc_xprt *xprt, __be32 xid); |
| 36 | void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied); | ||
| 36 | void xprt_free_bc_request(struct rpc_rqst *req); | 37 | void xprt_free_bc_request(struct rpc_rqst *req); |
| 37 | int xprt_setup_backchannel(struct rpc_xprt *, unsigned int min_reqs); | 38 | int xprt_setup_backchannel(struct rpc_xprt *, unsigned int min_reqs); |
| 38 | void xprt_destroy_backchannel(struct rpc_xprt *, unsigned int max_reqs); | 39 | void xprt_destroy_backchannel(struct rpc_xprt *, unsigned int max_reqs); |
diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c index e860d4f7ed2a..3513d559bc45 100644 --- a/net/sunrpc/backchannel_rqst.c +++ b/net/sunrpc/backchannel_rqst.c | |||
| @@ -212,39 +212,23 @@ out: | |||
| 212 | } | 212 | } |
| 213 | EXPORT_SYMBOL_GPL(xprt_destroy_backchannel); | 213 | EXPORT_SYMBOL_GPL(xprt_destroy_backchannel); |
| 214 | 214 | ||
| 215 | /* | 215 | static struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt, __be32 xid) |
| 216 | * One or more rpc_rqst structure have been preallocated during the | ||
| 217 | * backchannel setup. Buffer space for the send and private XDR buffers | ||
| 218 | * has been preallocated as well. Use xprt_alloc_bc_request to allocate | ||
| 219 | * to this request. Use xprt_free_bc_request to return it. | ||
| 220 | * | ||
| 221 | * We know that we're called in soft interrupt context, grab the spin_lock | ||
| 222 | * since there is no need to grab the bottom half spin_lock. | ||
| 223 | * | ||
| 224 | * Return an available rpc_rqst, otherwise NULL if non are available. | ||
| 225 | */ | ||
| 226 | struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt) | ||
| 227 | { | 216 | { |
| 228 | struct rpc_rqst *req; | 217 | struct rpc_rqst *req = NULL; |
| 229 | 218 | ||
| 230 | dprintk("RPC: allocate a backchannel request\n"); | 219 | dprintk("RPC: allocate a backchannel request\n"); |
| 231 | spin_lock(&xprt->bc_pa_lock); | 220 | if (list_empty(&xprt->bc_pa_list)) |
| 232 | if (!list_empty(&xprt->bc_pa_list)) { | 221 | goto not_found; |
| 233 | req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst, | ||
| 234 | rq_bc_pa_list); | ||
| 235 | list_del(&req->rq_bc_pa_list); | ||
| 236 | } else { | ||
| 237 | req = NULL; | ||
| 238 | } | ||
| 239 | spin_unlock(&xprt->bc_pa_lock); | ||
| 240 | 222 | ||
| 241 | if (req != NULL) { | 223 | req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst, |
| 242 | set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state); | 224 | rq_bc_pa_list); |
| 243 | req->rq_reply_bytes_recvd = 0; | 225 | req->rq_reply_bytes_recvd = 0; |
| 244 | req->rq_bytes_sent = 0; | 226 | req->rq_bytes_sent = 0; |
| 245 | memcpy(&req->rq_private_buf, &req->rq_rcv_buf, | 227 | memcpy(&req->rq_private_buf, &req->rq_rcv_buf, |
| 246 | sizeof(req->rq_private_buf)); | 228 | sizeof(req->rq_private_buf)); |
| 247 | } | 229 | req->rq_xid = xid; |
| 230 | req->rq_connect_cookie = xprt->connect_cookie; | ||
| 231 | not_found: | ||
| 248 | dprintk("RPC: backchannel req=%p\n", req); | 232 | dprintk("RPC: backchannel req=%p\n", req); |
| 249 | return req; | 233 | return req; |
| 250 | } | 234 | } |
| @@ -259,6 +243,7 @@ void xprt_free_bc_request(struct rpc_rqst *req) | |||
| 259 | 243 | ||
| 260 | dprintk("RPC: free backchannel req=%p\n", req); | 244 | dprintk("RPC: free backchannel req=%p\n", req); |
| 261 | 245 | ||
| 246 | req->rq_connect_cookie = xprt->connect_cookie - 1; | ||
| 262 | smp_mb__before_clear_bit(); | 247 | smp_mb__before_clear_bit(); |
| 263 | WARN_ON_ONCE(!test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state)); | 248 | WARN_ON_ONCE(!test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state)); |
| 264 | clear_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state); | 249 | clear_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state); |
| @@ -281,7 +266,57 @@ void xprt_free_bc_request(struct rpc_rqst *req) | |||
| 281 | * may be reused by a new callback request. | 266 | * may be reused by a new callback request. |
| 282 | */ | 267 | */ |
| 283 | spin_lock_bh(&xprt->bc_pa_lock); | 268 | spin_lock_bh(&xprt->bc_pa_lock); |
| 284 | list_add(&req->rq_bc_pa_list, &xprt->bc_pa_list); | 269 | list_add_tail(&req->rq_bc_pa_list, &xprt->bc_pa_list); |
| 285 | spin_unlock_bh(&xprt->bc_pa_lock); | 270 | spin_unlock_bh(&xprt->bc_pa_lock); |
| 286 | } | 271 | } |
| 287 | 272 | ||
| 273 | /* | ||
| 274 | * One or more rpc_rqst structure have been preallocated during the | ||
| 275 | * backchannel setup. Buffer space for the send and private XDR buffers | ||
| 276 | * has been preallocated as well. Use xprt_alloc_bc_request to allocate | ||
| 277 | * to this request. Use xprt_free_bc_request to return it. | ||
| 278 | * | ||
| 279 | * We know that we're called in soft interrupt context, grab the spin_lock | ||
| 280 | * since there is no need to grab the bottom half spin_lock. | ||
| 281 | * | ||
| 282 | * Return an available rpc_rqst, otherwise NULL if non are available. | ||
| 283 | */ | ||
| 284 | struct rpc_rqst *xprt_lookup_bc_request(struct rpc_xprt *xprt, __be32 xid) | ||
| 285 | { | ||
| 286 | struct rpc_rqst *req; | ||
| 287 | |||
| 288 | spin_lock(&xprt->bc_pa_lock); | ||
| 289 | list_for_each_entry(req, &xprt->bc_pa_list, rq_bc_pa_list) { | ||
| 290 | if (req->rq_connect_cookie != xprt->connect_cookie) | ||
| 291 | continue; | ||
| 292 | if (req->rq_xid == xid) | ||
| 293 | goto found; | ||
| 294 | } | ||
| 295 | req = xprt_alloc_bc_request(xprt, xid); | ||
| 296 | found: | ||
| 297 | spin_unlock(&xprt->bc_pa_lock); | ||
| 298 | return req; | ||
| 299 | } | ||
| 300 | |||
| 301 | /* | ||
| 302 | * Add callback request to callback list. The callback | ||
| 303 | * service sleeps on the sv_cb_waitq waiting for new | ||
| 304 | * requests. Wake it up after adding enqueing the | ||
| 305 | * request. | ||
| 306 | */ | ||
| 307 | void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied) | ||
| 308 | { | ||
| 309 | struct rpc_xprt *xprt = req->rq_xprt; | ||
| 310 | struct svc_serv *bc_serv = xprt->bc_serv; | ||
| 311 | |||
| 312 | req->rq_private_buf.len = copied; | ||
| 313 | set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state); | ||
| 314 | |||
| 315 | dprintk("RPC: add callback request to list\n"); | ||
| 316 | spin_lock(&bc_serv->sv_cb_lock); | ||
| 317 | list_del(&req->rq_bc_pa_list); | ||
| 318 | list_add(&req->rq_bc_list, &bc_serv->sv_cb_list); | ||
| 319 | wake_up(&bc_serv->sv_cb_waitq); | ||
| 320 | spin_unlock(&bc_serv->sv_cb_lock); | ||
| 321 | } | ||
| 322 | |||
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 0edada973434..f400445d1a44 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c | |||
| @@ -1363,6 +1363,7 @@ rpc_restart_call_prepare(struct rpc_task *task) | |||
| 1363 | if (RPC_ASSASSINATED(task)) | 1363 | if (RPC_ASSASSINATED(task)) |
| 1364 | return 0; | 1364 | return 0; |
| 1365 | task->tk_action = call_start; | 1365 | task->tk_action = call_start; |
| 1366 | task->tk_status = 0; | ||
| 1366 | if (task->tk_ops->rpc_call_prepare != NULL) | 1367 | if (task->tk_ops->rpc_call_prepare != NULL) |
| 1367 | task->tk_action = rpc_prepare_task; | 1368 | task->tk_action = rpc_prepare_task; |
| 1368 | return 1; | 1369 | return 1; |
| @@ -1379,6 +1380,7 @@ rpc_restart_call(struct rpc_task *task) | |||
| 1379 | if (RPC_ASSASSINATED(task)) | 1380 | if (RPC_ASSASSINATED(task)) |
| 1380 | return 0; | 1381 | return 0; |
| 1381 | task->tk_action = call_start; | 1382 | task->tk_action = call_start; |
| 1383 | task->tk_status = 0; | ||
| 1382 | return 1; | 1384 | return 1; |
| 1383 | } | 1385 | } |
| 1384 | EXPORT_SYMBOL_GPL(rpc_restart_call); | 1386 | EXPORT_SYMBOL_GPL(rpc_restart_call); |
| @@ -1728,9 +1730,7 @@ call_bind_status(struct rpc_task *task) | |||
| 1728 | case -EPROTONOSUPPORT: | 1730 | case -EPROTONOSUPPORT: |
| 1729 | dprintk("RPC: %5u remote rpcbind version unavailable, retrying\n", | 1731 | dprintk("RPC: %5u remote rpcbind version unavailable, retrying\n", |
| 1730 | task->tk_pid); | 1732 | task->tk_pid); |
| 1731 | task->tk_status = 0; | 1733 | goto retry_timeout; |
| 1732 | task->tk_action = call_bind; | ||
| 1733 | return; | ||
| 1734 | case -ECONNREFUSED: /* connection problems */ | 1734 | case -ECONNREFUSED: /* connection problems */ |
| 1735 | case -ECONNRESET: | 1735 | case -ECONNRESET: |
| 1736 | case -ECONNABORTED: | 1736 | case -ECONNABORTED: |
| @@ -1756,6 +1756,7 @@ call_bind_status(struct rpc_task *task) | |||
| 1756 | return; | 1756 | return; |
| 1757 | 1757 | ||
| 1758 | retry_timeout: | 1758 | retry_timeout: |
| 1759 | task->tk_status = 0; | ||
| 1759 | task->tk_action = call_timeout; | 1760 | task->tk_action = call_timeout; |
| 1760 | } | 1761 | } |
| 1761 | 1762 | ||
| @@ -1798,21 +1799,19 @@ call_connect_status(struct rpc_task *task) | |||
| 1798 | trace_rpc_connect_status(task, status); | 1799 | trace_rpc_connect_status(task, status); |
| 1799 | task->tk_status = 0; | 1800 | task->tk_status = 0; |
| 1800 | switch (status) { | 1801 | switch (status) { |
| 1801 | /* if soft mounted, test if we've timed out */ | ||
| 1802 | case -ETIMEDOUT: | ||
| 1803 | task->tk_action = call_timeout; | ||
| 1804 | return; | ||
| 1805 | case -ECONNREFUSED: | 1802 | case -ECONNREFUSED: |
| 1806 | case -ECONNRESET: | 1803 | case -ECONNRESET: |
| 1807 | case -ECONNABORTED: | 1804 | case -ECONNABORTED: |
| 1808 | case -ENETUNREACH: | 1805 | case -ENETUNREACH: |
| 1809 | case -EHOSTUNREACH: | 1806 | case -EHOSTUNREACH: |
| 1810 | /* retry with existing socket, after a delay */ | ||
| 1811 | rpc_delay(task, 3*HZ); | ||
| 1812 | if (RPC_IS_SOFTCONN(task)) | 1807 | if (RPC_IS_SOFTCONN(task)) |
| 1813 | break; | 1808 | break; |
| 1809 | /* retry with existing socket, after a delay */ | ||
| 1810 | rpc_delay(task, 3*HZ); | ||
| 1814 | case -EAGAIN: | 1811 | case -EAGAIN: |
| 1815 | task->tk_action = call_bind; | 1812 | /* Check for timeouts before looping back to call_bind */ |
| 1813 | case -ETIMEDOUT: | ||
| 1814 | task->tk_action = call_timeout; | ||
| 1816 | return; | 1815 | return; |
| 1817 | case 0: | 1816 | case 0: |
| 1818 | clnt->cl_stats->netreconn++; | 1817 | clnt->cl_stats->netreconn++; |
| @@ -2007,6 +2006,10 @@ call_status(struct rpc_task *task) | |||
| 2007 | case -EHOSTDOWN: | 2006 | case -EHOSTDOWN: |
| 2008 | case -EHOSTUNREACH: | 2007 | case -EHOSTUNREACH: |
| 2009 | case -ENETUNREACH: | 2008 | case -ENETUNREACH: |
| 2009 | if (RPC_IS_SOFTCONN(task)) { | ||
| 2010 | rpc_exit(task, status); | ||
| 2011 | break; | ||
| 2012 | } | ||
| 2010 | /* | 2013 | /* |
| 2011 | * Delay any retries for 3 seconds, then handle as if it | 2014 | * Delay any retries for 3 seconds, then handle as if it |
| 2012 | * were a timeout. | 2015 | * were a timeout. |
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index ff3cc4bf4b24..25578afe1548 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c | |||
| @@ -637,7 +637,8 @@ static void __rpc_queue_timer_fn(unsigned long ptr) | |||
| 637 | 637 | ||
| 638 | static void __rpc_atrun(struct rpc_task *task) | 638 | static void __rpc_atrun(struct rpc_task *task) |
| 639 | { | 639 | { |
| 640 | task->tk_status = 0; | 640 | if (task->tk_status == -ETIMEDOUT) |
| 641 | task->tk_status = 0; | ||
| 641 | } | 642 | } |
| 642 | 643 | ||
| 643 | /* | 644 | /* |
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c index e03725bfe2b8..96ead526b125 100644 --- a/net/sunrpc/xprtrdma/rpc_rdma.c +++ b/net/sunrpc/xprtrdma/rpc_rdma.c | |||
| @@ -649,9 +649,7 @@ rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad) | |||
| 649 | break; | 649 | break; |
| 650 | page_base = 0; | 650 | page_base = 0; |
| 651 | } | 651 | } |
| 652 | rqst->rq_rcv_buf.page_len = olen - copy_len; | 652 | } |
| 653 | } else | ||
| 654 | rqst->rq_rcv_buf.page_len = 0; | ||
| 655 | 653 | ||
| 656 | if (copy_len && rqst->rq_rcv_buf.tail[0].iov_len) { | 654 | if (copy_len && rqst->rq_rcv_buf.tail[0].iov_len) { |
| 657 | curlen = copy_len; | 655 | curlen = copy_len; |
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c index 285dc0884115..1eb9c468d0c9 100644 --- a/net/sunrpc/xprtrdma/transport.c +++ b/net/sunrpc/xprtrdma/transport.c | |||
| @@ -733,7 +733,7 @@ static void __exit xprt_rdma_cleanup(void) | |||
| 733 | { | 733 | { |
| 734 | int rc; | 734 | int rc; |
| 735 | 735 | ||
| 736 | dprintk(KERN_INFO "RPCRDMA Module Removed, deregister RPC RDMA transport\n"); | 736 | dprintk("RPCRDMA Module Removed, deregister RPC RDMA transport\n"); |
| 737 | #ifdef RPC_DEBUG | 737 | #ifdef RPC_DEBUG |
| 738 | if (sunrpc_table_header) { | 738 | if (sunrpc_table_header) { |
| 739 | unregister_sysctl_table(sunrpc_table_header); | 739 | unregister_sysctl_table(sunrpc_table_header); |
| @@ -755,14 +755,14 @@ static int __init xprt_rdma_init(void) | |||
| 755 | if (rc) | 755 | if (rc) |
| 756 | return rc; | 756 | return rc; |
| 757 | 757 | ||
| 758 | dprintk(KERN_INFO "RPCRDMA Module Init, register RPC RDMA transport\n"); | 758 | dprintk("RPCRDMA Module Init, register RPC RDMA transport\n"); |
| 759 | 759 | ||
| 760 | dprintk(KERN_INFO "Defaults:\n"); | 760 | dprintk("Defaults:\n"); |
| 761 | dprintk(KERN_INFO "\tSlots %d\n" | 761 | dprintk("\tSlots %d\n" |
| 762 | "\tMaxInlineRead %d\n\tMaxInlineWrite %d\n", | 762 | "\tMaxInlineRead %d\n\tMaxInlineWrite %d\n", |
| 763 | xprt_rdma_slot_table_entries, | 763 | xprt_rdma_slot_table_entries, |
| 764 | xprt_rdma_max_inline_read, xprt_rdma_max_inline_write); | 764 | xprt_rdma_max_inline_read, xprt_rdma_max_inline_write); |
| 765 | dprintk(KERN_INFO "\tPadding %d\n\tMemreg %d\n", | 765 | dprintk("\tPadding %d\n\tMemreg %d\n", |
| 766 | xprt_rdma_inline_write_padding, xprt_rdma_memreg_strategy); | 766 | xprt_rdma_inline_write_padding, xprt_rdma_memreg_strategy); |
| 767 | 767 | ||
| 768 | #ifdef RPC_DEBUG | 768 | #ifdef RPC_DEBUG |
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 0addefca8e77..966763d735e9 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
| @@ -1306,41 +1306,29 @@ static inline int xs_tcp_read_reply(struct rpc_xprt *xprt, | |||
| 1306 | * If we're unable to obtain the rpc_rqst we schedule the closing of the | 1306 | * If we're unable to obtain the rpc_rqst we schedule the closing of the |
| 1307 | * connection and return -1. | 1307 | * connection and return -1. |
| 1308 | */ | 1308 | */ |
| 1309 | static inline int xs_tcp_read_callback(struct rpc_xprt *xprt, | 1309 | static int xs_tcp_read_callback(struct rpc_xprt *xprt, |
| 1310 | struct xdr_skb_reader *desc) | 1310 | struct xdr_skb_reader *desc) |
| 1311 | { | 1311 | { |
| 1312 | struct sock_xprt *transport = | 1312 | struct sock_xprt *transport = |
| 1313 | container_of(xprt, struct sock_xprt, xprt); | 1313 | container_of(xprt, struct sock_xprt, xprt); |
| 1314 | struct rpc_rqst *req; | 1314 | struct rpc_rqst *req; |
| 1315 | 1315 | ||
| 1316 | req = xprt_alloc_bc_request(xprt); | 1316 | /* Look up and lock the request corresponding to the given XID */ |
| 1317 | spin_lock(&xprt->transport_lock); | ||
| 1318 | req = xprt_lookup_bc_request(xprt, transport->tcp_xid); | ||
| 1317 | if (req == NULL) { | 1319 | if (req == NULL) { |
| 1320 | spin_unlock(&xprt->transport_lock); | ||
| 1318 | printk(KERN_WARNING "Callback slot table overflowed\n"); | 1321 | printk(KERN_WARNING "Callback slot table overflowed\n"); |
| 1319 | xprt_force_disconnect(xprt); | 1322 | xprt_force_disconnect(xprt); |
| 1320 | return -1; | 1323 | return -1; |
| 1321 | } | 1324 | } |
| 1322 | 1325 | ||
| 1323 | req->rq_xid = transport->tcp_xid; | ||
| 1324 | dprintk("RPC: read callback XID %08x\n", ntohl(req->rq_xid)); | 1326 | dprintk("RPC: read callback XID %08x\n", ntohl(req->rq_xid)); |
| 1325 | xs_tcp_read_common(xprt, desc, req); | 1327 | xs_tcp_read_common(xprt, desc, req); |
| 1326 | 1328 | ||
| 1327 | if (!(transport->tcp_flags & TCP_RCV_COPY_DATA)) { | 1329 | if (!(transport->tcp_flags & TCP_RCV_COPY_DATA)) |
| 1328 | struct svc_serv *bc_serv = xprt->bc_serv; | 1330 | xprt_complete_bc_request(req, transport->tcp_copied); |
| 1329 | 1331 | spin_unlock(&xprt->transport_lock); | |
| 1330 | /* | ||
| 1331 | * Add callback request to callback list. The callback | ||
| 1332 | * service sleeps on the sv_cb_waitq waiting for new | ||
| 1333 | * requests. Wake it up after adding enqueing the | ||
| 1334 | * request. | ||
| 1335 | */ | ||
| 1336 | dprintk("RPC: add callback request to list\n"); | ||
| 1337 | spin_lock(&bc_serv->sv_cb_lock); | ||
| 1338 | list_add(&req->rq_bc_list, &bc_serv->sv_cb_list); | ||
| 1339 | spin_unlock(&bc_serv->sv_cb_lock); | ||
| 1340 | wake_up(&bc_serv->sv_cb_waitq); | ||
| 1341 | } | ||
| 1342 | |||
| 1343 | req->rq_private_buf.len = transport->tcp_copied; | ||
| 1344 | 1332 | ||
| 1345 | return 0; | 1333 | return 0; |
| 1346 | } | 1334 | } |
