diff options
45 files changed, 476 insertions, 635 deletions
diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c index 0b4acc1c5e7d..a5c019e1a447 100644 --- a/fs/lockd/clntproc.c +++ b/fs/lockd/clntproc.c | |||
@@ -361,7 +361,6 @@ static int __nlm_async_call(struct nlm_rqst *req, u32 proc, struct rpc_message * | |||
361 | { | 361 | { |
362 | struct nlm_host *host = req->a_host; | 362 | struct nlm_host *host = req->a_host; |
363 | struct rpc_clnt *clnt; | 363 | struct rpc_clnt *clnt; |
364 | int status = -ENOLCK; | ||
365 | 364 | ||
366 | dprintk("lockd: call procedure %d on %s (async)\n", | 365 | dprintk("lockd: call procedure %d on %s (async)\n", |
367 | (int)proc, host->h_name); | 366 | (int)proc, host->h_name); |
@@ -373,12 +372,10 @@ static int __nlm_async_call(struct nlm_rqst *req, u32 proc, struct rpc_message * | |||
373 | msg->rpc_proc = &clnt->cl_procinfo[proc]; | 372 | msg->rpc_proc = &clnt->cl_procinfo[proc]; |
374 | 373 | ||
375 | /* bootstrap and kick off the async RPC call */ | 374 | /* bootstrap and kick off the async RPC call */ |
376 | status = rpc_call_async(clnt, msg, RPC_TASK_ASYNC, tk_ops, req); | 375 | return rpc_call_async(clnt, msg, RPC_TASK_ASYNC, tk_ops, req); |
377 | if (status == 0) | ||
378 | return 0; | ||
379 | out_err: | 376 | out_err: |
380 | nlm_release_call(req); | 377 | tk_ops->rpc_release(req); |
381 | return status; | 378 | return -ENOLCK; |
382 | } | 379 | } |
383 | 380 | ||
384 | int nlm_async_call(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops) | 381 | int nlm_async_call(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops) |
diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c index c7db0a5bccdc..cf51f849e76c 100644 --- a/fs/lockd/svclock.c +++ b/fs/lockd/svclock.c | |||
@@ -593,9 +593,7 @@ callback: | |||
593 | 593 | ||
594 | /* Call the client */ | 594 | /* Call the client */ |
595 | kref_get(&block->b_count); | 595 | kref_get(&block->b_count); |
596 | if (nlm_async_call(block->b_call, NLMPROC_GRANTED_MSG, | 596 | nlm_async_call(block->b_call, NLMPROC_GRANTED_MSG, &nlmsvc_grant_ops); |
597 | &nlmsvc_grant_ops) < 0) | ||
598 | nlmsvc_release_block(block); | ||
599 | } | 597 | } |
600 | 598 | ||
601 | /* | 599 | /* |
diff --git a/fs/nfs/client.c b/fs/nfs/client.c index ae9f36e393cf..2190e6c2792e 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c | |||
@@ -394,7 +394,8 @@ static void nfs_init_timeout_values(struct rpc_timeout *to, int proto, | |||
394 | static int nfs_create_rpc_client(struct nfs_client *clp, int proto, | 394 | static int nfs_create_rpc_client(struct nfs_client *clp, int proto, |
395 | unsigned int timeo, | 395 | unsigned int timeo, |
396 | unsigned int retrans, | 396 | unsigned int retrans, |
397 | rpc_authflavor_t flavor) | 397 | rpc_authflavor_t flavor, |
398 | int flags) | ||
398 | { | 399 | { |
399 | struct rpc_timeout timeparms; | 400 | struct rpc_timeout timeparms; |
400 | struct rpc_clnt *clnt = NULL; | 401 | struct rpc_clnt *clnt = NULL; |
@@ -407,6 +408,7 @@ static int nfs_create_rpc_client(struct nfs_client *clp, int proto, | |||
407 | .program = &nfs_program, | 408 | .program = &nfs_program, |
408 | .version = clp->rpc_ops->version, | 409 | .version = clp->rpc_ops->version, |
409 | .authflavor = flavor, | 410 | .authflavor = flavor, |
411 | .flags = flags, | ||
410 | }; | 412 | }; |
411 | 413 | ||
412 | if (!IS_ERR(clp->cl_rpcclient)) | 414 | if (!IS_ERR(clp->cl_rpcclient)) |
@@ -548,7 +550,7 @@ static int nfs_init_client(struct nfs_client *clp, const struct nfs_mount_data * | |||
548 | * - RFC 2623, sec 2.3.2 | 550 | * - RFC 2623, sec 2.3.2 |
549 | */ | 551 | */ |
550 | error = nfs_create_rpc_client(clp, proto, data->timeo, data->retrans, | 552 | error = nfs_create_rpc_client(clp, proto, data->timeo, data->retrans, |
551 | RPC_AUTH_UNIX); | 553 | RPC_AUTH_UNIX, 0); |
552 | if (error < 0) | 554 | if (error < 0) |
553 | goto error; | 555 | goto error; |
554 | nfs_mark_client_ready(clp, NFS_CS_READY); | 556 | nfs_mark_client_ready(clp, NFS_CS_READY); |
@@ -868,7 +870,8 @@ static int nfs4_init_client(struct nfs_client *clp, | |||
868 | /* Check NFS protocol revision and initialize RPC op vector */ | 870 | /* Check NFS protocol revision and initialize RPC op vector */ |
869 | clp->rpc_ops = &nfs_v4_clientops; | 871 | clp->rpc_ops = &nfs_v4_clientops; |
870 | 872 | ||
871 | error = nfs_create_rpc_client(clp, proto, timeo, retrans, authflavour); | 873 | error = nfs_create_rpc_client(clp, proto, timeo, retrans, authflavour, |
874 | RPC_CLNT_CREATE_DISCRTRY); | ||
872 | if (error < 0) | 875 | if (error < 0) |
873 | goto error; | 876 | goto error; |
874 | memcpy(clp->cl_ipaddr, ip_addr, sizeof(clp->cl_ipaddr)); | 877 | memcpy(clp->cl_ipaddr, ip_addr, sizeof(clp->cl_ipaddr)); |
@@ -1030,7 +1033,7 @@ error: | |||
1030 | * Create an NFS4 referral server record | 1033 | * Create an NFS4 referral server record |
1031 | */ | 1034 | */ |
1032 | struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data, | 1035 | struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data, |
1033 | struct nfs_fh *fh) | 1036 | struct nfs_fh *mntfh) |
1034 | { | 1037 | { |
1035 | struct nfs_client *parent_client; | 1038 | struct nfs_client *parent_client; |
1036 | struct nfs_server *server, *parent_server; | 1039 | struct nfs_server *server, *parent_server; |
@@ -1069,8 +1072,13 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data, | |||
1069 | BUG_ON(!server->nfs_client->rpc_ops); | 1072 | BUG_ON(!server->nfs_client->rpc_ops); |
1070 | BUG_ON(!server->nfs_client->rpc_ops->file_inode_ops); | 1073 | BUG_ON(!server->nfs_client->rpc_ops->file_inode_ops); |
1071 | 1074 | ||
1075 | /* Probe the root fh to retrieve its FSID and filehandle */ | ||
1076 | error = nfs4_path_walk(server, mntfh, data->mnt_path); | ||
1077 | if (error < 0) | ||
1078 | goto error; | ||
1079 | |||
1072 | /* probe the filesystem info for this server filesystem */ | 1080 | /* probe the filesystem info for this server filesystem */ |
1073 | error = nfs_probe_fsinfo(server, fh, &fattr); | 1081 | error = nfs_probe_fsinfo(server, mntfh, &fattr); |
1074 | if (error < 0) | 1082 | if (error < 0) |
1075 | goto error; | 1083 | goto error; |
1076 | 1084 | ||
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index f03a770bacb0..92d8ec859e22 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c | |||
@@ -637,7 +637,7 @@ int nfs_fsync_dir(struct file *filp, struct dentry *dentry, int datasync) | |||
637 | * In the case it has, we assume that the dentries are untrustworthy | 637 | * In the case it has, we assume that the dentries are untrustworthy |
638 | * and may need to be looked up again. | 638 | * and may need to be looked up again. |
639 | */ | 639 | */ |
640 | static inline int nfs_check_verifier(struct inode *dir, struct dentry *dentry) | 640 | static int nfs_check_verifier(struct inode *dir, struct dentry *dentry) |
641 | { | 641 | { |
642 | if (IS_ROOT(dentry)) | 642 | if (IS_ROOT(dentry)) |
643 | return 1; | 643 | return 1; |
@@ -652,6 +652,12 @@ static inline void nfs_set_verifier(struct dentry * dentry, unsigned long verf) | |||
652 | dentry->d_fsdata = (void *)verf; | 652 | dentry->d_fsdata = (void *)verf; |
653 | } | 653 | } |
654 | 654 | ||
655 | static void nfs_refresh_verifier(struct dentry * dentry, unsigned long verf) | ||
656 | { | ||
657 | if (time_after(verf, (unsigned long)dentry->d_fsdata)) | ||
658 | nfs_set_verifier(dentry, verf); | ||
659 | } | ||
660 | |||
655 | /* | 661 | /* |
656 | * Whenever an NFS operation succeeds, we know that the dentry | 662 | * Whenever an NFS operation succeeds, we know that the dentry |
657 | * is valid, so we update the revalidation timestamp. | 663 | * is valid, so we update the revalidation timestamp. |
@@ -785,7 +791,7 @@ static int nfs_lookup_revalidate(struct dentry * dentry, struct nameidata *nd) | |||
785 | goto out_bad; | 791 | goto out_bad; |
786 | 792 | ||
787 | nfs_renew_times(dentry); | 793 | nfs_renew_times(dentry); |
788 | nfs_set_verifier(dentry, verifier); | 794 | nfs_refresh_verifier(dentry, verifier); |
789 | out_valid: | 795 | out_valid: |
790 | unlock_kernel(); | 796 | unlock_kernel(); |
791 | dput(parent); | 797 | dput(parent); |
@@ -1085,7 +1091,7 @@ static int nfs_open_revalidate(struct dentry *dentry, struct nameidata *nd) | |||
1085 | verifier = nfs_save_change_attribute(dir); | 1091 | verifier = nfs_save_change_attribute(dir); |
1086 | ret = nfs4_open_revalidate(dir, dentry, openflags, nd); | 1092 | ret = nfs4_open_revalidate(dir, dentry, openflags, nd); |
1087 | if (!ret) | 1093 | if (!ret) |
1088 | nfs_set_verifier(dentry, verifier); | 1094 | nfs_refresh_verifier(dentry, verifier); |
1089 | unlock_kernel(); | 1095 | unlock_kernel(); |
1090 | out: | 1096 | out: |
1091 | dput(parent); | 1097 | dput(parent); |
@@ -1123,8 +1129,21 @@ static struct dentry *nfs_readdir_lookup(nfs_readdir_descriptor_t *desc) | |||
1123 | } | 1129 | } |
1124 | name.hash = full_name_hash(name.name, name.len); | 1130 | name.hash = full_name_hash(name.name, name.len); |
1125 | dentry = d_lookup(parent, &name); | 1131 | dentry = d_lookup(parent, &name); |
1126 | if (dentry != NULL) | 1132 | if (dentry != NULL) { |
1127 | return dentry; | 1133 | /* Is this a positive dentry that matches the readdir info? */ |
1134 | if (dentry->d_inode != NULL && | ||
1135 | (NFS_FILEID(dentry->d_inode) == entry->ino || | ||
1136 | d_mountpoint(dentry))) { | ||
1137 | if (!desc->plus || entry->fh->size == 0) | ||
1138 | return dentry; | ||
1139 | if (nfs_compare_fh(NFS_FH(dentry->d_inode), | ||
1140 | entry->fh) == 0) | ||
1141 | goto out_renew; | ||
1142 | } | ||
1143 | /* No, so d_drop to allow one to be created */ | ||
1144 | d_drop(dentry); | ||
1145 | dput(dentry); | ||
1146 | } | ||
1128 | if (!desc->plus || !(entry->fattr->valid & NFS_ATTR_FATTR)) | 1147 | if (!desc->plus || !(entry->fattr->valid & NFS_ATTR_FATTR)) |
1129 | return NULL; | 1148 | return NULL; |
1130 | /* Note: caller is already holding the dir->i_mutex! */ | 1149 | /* Note: caller is already holding the dir->i_mutex! */ |
@@ -1149,6 +1168,10 @@ static struct dentry *nfs_readdir_lookup(nfs_readdir_descriptor_t *desc) | |||
1149 | nfs_renew_times(dentry); | 1168 | nfs_renew_times(dentry); |
1150 | nfs_set_verifier(dentry, nfs_save_change_attribute(dir)); | 1169 | nfs_set_verifier(dentry, nfs_save_change_attribute(dir)); |
1151 | return dentry; | 1170 | return dentry; |
1171 | out_renew: | ||
1172 | nfs_renew_times(dentry); | ||
1173 | nfs_refresh_verifier(dentry, nfs_save_change_attribute(dir)); | ||
1174 | return dentry; | ||
1152 | } | 1175 | } |
1153 | 1176 | ||
1154 | /* | 1177 | /* |
@@ -1443,6 +1466,8 @@ static int nfs_unlink(struct inode *dir, struct dentry *dentry) | |||
1443 | if (atomic_read(&dentry->d_count) > 1) { | 1466 | if (atomic_read(&dentry->d_count) > 1) { |
1444 | spin_unlock(&dentry->d_lock); | 1467 | spin_unlock(&dentry->d_lock); |
1445 | spin_unlock(&dcache_lock); | 1468 | spin_unlock(&dcache_lock); |
1469 | /* Start asynchronous writeout of the inode */ | ||
1470 | write_inode_now(dentry->d_inode, 0); | ||
1446 | error = nfs_sillyrename(dir, dentry); | 1471 | error = nfs_sillyrename(dir, dentry); |
1447 | unlock_kernel(); | 1472 | unlock_kernel(); |
1448 | return error; | 1473 | return error; |
@@ -1684,7 +1709,7 @@ out: | |||
1684 | if (!error) { | 1709 | if (!error) { |
1685 | d_move(old_dentry, new_dentry); | 1710 | d_move(old_dentry, new_dentry); |
1686 | nfs_renew_times(new_dentry); | 1711 | nfs_renew_times(new_dentry); |
1687 | nfs_set_verifier(new_dentry, nfs_save_change_attribute(new_dir)); | 1712 | nfs_refresh_verifier(new_dentry, nfs_save_change_attribute(new_dir)); |
1688 | } | 1713 | } |
1689 | 1714 | ||
1690 | /* new dentry created? */ | 1715 | /* new dentry created? */ |
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index bd21d7fde650..b1c98ea39b72 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c | |||
@@ -309,7 +309,8 @@ static ssize_t nfs_direct_read_schedule(struct nfs_direct_req *dreq, unsigned lo | |||
309 | 309 | ||
310 | rpc_execute(&data->task); | 310 | rpc_execute(&data->task); |
311 | 311 | ||
312 | dfprintk(VFS, "NFS: %5u initiated direct read call (req %s/%Ld, %zu bytes @ offset %Lu)\n", | 312 | dprintk("NFS: %5u initiated direct read call " |
313 | "(req %s/%Ld, %zu bytes @ offset %Lu)\n", | ||
313 | data->task.tk_pid, | 314 | data->task.tk_pid, |
314 | inode->i_sb->s_id, | 315 | inode->i_sb->s_id, |
315 | (long long)NFS_FILEID(inode), | 316 | (long long)NFS_FILEID(inode), |
@@ -639,7 +640,8 @@ static ssize_t nfs_direct_write_schedule(struct nfs_direct_req *dreq, unsigned l | |||
639 | 640 | ||
640 | rpc_execute(&data->task); | 641 | rpc_execute(&data->task); |
641 | 642 | ||
642 | dfprintk(VFS, "NFS: %5u initiated direct write call (req %s/%Ld, %zu bytes @ offset %Lu)\n", | 643 | dprintk("NFS: %5u initiated direct write call " |
644 | "(req %s/%Ld, %zu bytes @ offset %Lu)\n", | ||
643 | data->task.tk_pid, | 645 | data->task.tk_pid, |
644 | inode->i_sb->s_id, | 646 | inode->i_sb->s_id, |
645 | (long long)NFS_FILEID(inode), | 647 | (long long)NFS_FILEID(inode), |
@@ -797,7 +799,7 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, const struct iovec *iov, | |||
797 | const char __user *buf = iov[0].iov_base; | 799 | const char __user *buf = iov[0].iov_base; |
798 | size_t count = iov[0].iov_len; | 800 | size_t count = iov[0].iov_len; |
799 | 801 | ||
800 | dfprintk(VFS, "nfs: direct write(%s/%s, %lu@%Ld)\n", | 802 | dprintk("nfs: direct write(%s/%s, %lu@%Ld)\n", |
801 | file->f_path.dentry->d_parent->d_name.name, | 803 | file->f_path.dentry->d_parent->d_name.name, |
802 | file->f_path.dentry->d_name.name, | 804 | file->f_path.dentry->d_name.name, |
803 | (unsigned long) count, (long long) pos); | 805 | (unsigned long) count, (long long) pos); |
diff --git a/fs/nfs/getroot.c b/fs/nfs/getroot.c index 8391bd7a83ce..6ef268f7c300 100644 --- a/fs/nfs/getroot.c +++ b/fs/nfs/getroot.c | |||
@@ -135,17 +135,15 @@ int nfs4_path_walk(struct nfs_server *server, | |||
135 | struct nfs_fh lastfh; | 135 | struct nfs_fh lastfh; |
136 | struct qstr name; | 136 | struct qstr name; |
137 | int ret; | 137 | int ret; |
138 | //int referral_count = 0; | ||
139 | 138 | ||
140 | dprintk("--> nfs4_path_walk(,,%s)\n", path); | 139 | dprintk("--> nfs4_path_walk(,,%s)\n", path); |
141 | 140 | ||
142 | fsinfo.fattr = &fattr; | 141 | fsinfo.fattr = &fattr; |
143 | nfs_fattr_init(&fattr); | 142 | nfs_fattr_init(&fattr); |
144 | 143 | ||
145 | if (*path++ != '/') { | 144 | /* Eat leading slashes */ |
146 | dprintk("nfs4_get_root: Path does not begin with a slash\n"); | 145 | while (*path == '/') |
147 | return -EINVAL; | 146 | path++; |
148 | } | ||
149 | 147 | ||
150 | /* Start by getting the root filehandle from the server */ | 148 | /* Start by getting the root filehandle from the server */ |
151 | ret = server->nfs_client->rpc_ops->getroot(server, mntfh, &fsinfo); | 149 | ret = server->nfs_client->rpc_ops->getroot(server, mntfh, &fsinfo); |
@@ -160,6 +158,7 @@ int nfs4_path_walk(struct nfs_server *server, | |||
160 | return -ENOTDIR; | 158 | return -ENOTDIR; |
161 | } | 159 | } |
162 | 160 | ||
161 | /* FIXME: It is quite valid for the server to return a referral here */ | ||
163 | if (fattr.valid & NFS_ATTR_FATTR_V4_REFERRAL) { | 162 | if (fattr.valid & NFS_ATTR_FATTR_V4_REFERRAL) { |
164 | printk(KERN_ERR "nfs4_get_root:" | 163 | printk(KERN_ERR "nfs4_get_root:" |
165 | " getroot obtained referral\n"); | 164 | " getroot obtained referral\n"); |
@@ -187,6 +186,7 @@ eat_dot_dir: | |||
187 | goto eat_dot_dir; | 186 | goto eat_dot_dir; |
188 | } | 187 | } |
189 | 188 | ||
189 | /* FIXME: Why shouldn't the user be able to use ".." in the path? */ | ||
190 | if (path[0] == '.' && path[1] == '.' && (path[2] == '/' || !path[2]) | 190 | if (path[0] == '.' && path[1] == '.' && (path[2] == '/' || !path[2]) |
191 | ) { | 191 | ) { |
192 | printk(KERN_ERR "nfs4_get_root:" | 192 | printk(KERN_ERR "nfs4_get_root:" |
@@ -212,6 +212,7 @@ eat_dot_dir: | |||
212 | return -ENOTDIR; | 212 | return -ENOTDIR; |
213 | } | 213 | } |
214 | 214 | ||
215 | /* FIXME: Referrals are quite valid here too */ | ||
215 | if (fattr.valid & NFS_ATTR_FATTR_V4_REFERRAL) { | 216 | if (fattr.valid & NFS_ATTR_FATTR_V4_REFERRAL) { |
216 | printk(KERN_ERR "nfs4_get_root:" | 217 | printk(KERN_ERR "nfs4_get_root:" |
217 | " lookupfh obtained referral\n"); | 218 | " lookupfh obtained referral\n"); |
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index d83498282837..af53c02f473b 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c | |||
@@ -65,13 +65,18 @@ nfs_fattr_to_ino_t(struct nfs_fattr *fattr) | |||
65 | 65 | ||
66 | int nfs_write_inode(struct inode *inode, int sync) | 66 | int nfs_write_inode(struct inode *inode, int sync) |
67 | { | 67 | { |
68 | int flags = sync ? FLUSH_SYNC : 0; | ||
69 | int ret; | 68 | int ret; |
70 | 69 | ||
71 | ret = nfs_commit_inode(inode, flags); | 70 | if (sync) { |
72 | if (ret < 0) | 71 | ret = filemap_fdatawait(inode->i_mapping); |
73 | return ret; | 72 | if (ret == 0) |
74 | return 0; | 73 | ret = nfs_commit_inode(inode, FLUSH_SYNC); |
74 | } else | ||
75 | ret = nfs_commit_inode(inode, 0); | ||
76 | if (ret >= 0) | ||
77 | return 0; | ||
78 | __mark_inode_dirty(inode, I_DIRTY_DATASYNC); | ||
79 | return ret; | ||
75 | } | 80 | } |
76 | 81 | ||
77 | void nfs_clear_inode(struct inode *inode) | 82 | void nfs_clear_inode(struct inode *inode) |
@@ -235,6 +240,7 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr) | |||
235 | 240 | ||
236 | if (inode->i_state & I_NEW) { | 241 | if (inode->i_state & I_NEW) { |
237 | struct nfs_inode *nfsi = NFS_I(inode); | 242 | struct nfs_inode *nfsi = NFS_I(inode); |
243 | unsigned long now = jiffies; | ||
238 | 244 | ||
239 | /* We set i_ino for the few things that still rely on it, | 245 | /* We set i_ino for the few things that still rely on it, |
240 | * such as stat(2) */ | 246 | * such as stat(2) */ |
@@ -271,7 +277,8 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr) | |||
271 | init_special_inode(inode, inode->i_mode, fattr->rdev); | 277 | init_special_inode(inode, inode->i_mode, fattr->rdev); |
272 | 278 | ||
273 | nfsi->read_cache_jiffies = fattr->time_start; | 279 | nfsi->read_cache_jiffies = fattr->time_start; |
274 | nfsi->last_updated = jiffies; | 280 | nfsi->last_updated = now; |
281 | nfsi->cache_change_attribute = now; | ||
275 | inode->i_atime = fattr->atime; | 282 | inode->i_atime = fattr->atime; |
276 | inode->i_mtime = fattr->mtime; | 283 | inode->i_mtime = fattr->mtime; |
277 | inode->i_ctime = fattr->ctime; | 284 | inode->i_ctime = fattr->ctime; |
@@ -290,7 +297,7 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr) | |||
290 | inode->i_blocks = fattr->du.nfs2.blocks; | 297 | inode->i_blocks = fattr->du.nfs2.blocks; |
291 | } | 298 | } |
292 | nfsi->attrtimeo = NFS_MINATTRTIMEO(inode); | 299 | nfsi->attrtimeo = NFS_MINATTRTIMEO(inode); |
293 | nfsi->attrtimeo_timestamp = jiffies; | 300 | nfsi->attrtimeo_timestamp = now; |
294 | memset(nfsi->cookieverf, 0, sizeof(nfsi->cookieverf)); | 301 | memset(nfsi->cookieverf, 0, sizeof(nfsi->cookieverf)); |
295 | nfsi->access_cache = RB_ROOT; | 302 | nfsi->access_cache = RB_ROOT; |
296 | 303 | ||
@@ -783,20 +790,21 @@ void nfs_end_data_update(struct inode *inode) | |||
783 | static void nfs_wcc_update_inode(struct inode *inode, struct nfs_fattr *fattr) | 790 | static void nfs_wcc_update_inode(struct inode *inode, struct nfs_fattr *fattr) |
784 | { | 791 | { |
785 | struct nfs_inode *nfsi = NFS_I(inode); | 792 | struct nfs_inode *nfsi = NFS_I(inode); |
793 | unsigned long now = jiffies; | ||
786 | 794 | ||
787 | /* If we have atomic WCC data, we may update some attributes */ | 795 | /* If we have atomic WCC data, we may update some attributes */ |
788 | if ((fattr->valid & NFS_ATTR_WCC) != 0) { | 796 | if ((fattr->valid & NFS_ATTR_WCC) != 0) { |
789 | if (timespec_equal(&inode->i_ctime, &fattr->pre_ctime)) { | 797 | if (timespec_equal(&inode->i_ctime, &fattr->pre_ctime)) { |
790 | memcpy(&inode->i_ctime, &fattr->ctime, sizeof(inode->i_ctime)); | 798 | memcpy(&inode->i_ctime, &fattr->ctime, sizeof(inode->i_ctime)); |
791 | nfsi->cache_change_attribute = jiffies; | 799 | nfsi->cache_change_attribute = now; |
792 | } | 800 | } |
793 | if (timespec_equal(&inode->i_mtime, &fattr->pre_mtime)) { | 801 | if (timespec_equal(&inode->i_mtime, &fattr->pre_mtime)) { |
794 | memcpy(&inode->i_mtime, &fattr->mtime, sizeof(inode->i_mtime)); | 802 | memcpy(&inode->i_mtime, &fattr->mtime, sizeof(inode->i_mtime)); |
795 | nfsi->cache_change_attribute = jiffies; | 803 | nfsi->cache_change_attribute = now; |
796 | } | 804 | } |
797 | if (inode->i_size == fattr->pre_size && nfsi->npages == 0) { | 805 | if (inode->i_size == fattr->pre_size && nfsi->npages == 0) { |
798 | inode->i_size = fattr->size; | 806 | inode->i_size = fattr->size; |
799 | nfsi->cache_change_attribute = jiffies; | 807 | nfsi->cache_change_attribute = now; |
800 | } | 808 | } |
801 | } | 809 | } |
802 | } | 810 | } |
@@ -934,6 +942,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) | |||
934 | struct nfs_inode *nfsi = NFS_I(inode); | 942 | struct nfs_inode *nfsi = NFS_I(inode); |
935 | loff_t cur_isize, new_isize; | 943 | loff_t cur_isize, new_isize; |
936 | unsigned int invalid = 0; | 944 | unsigned int invalid = 0; |
945 | unsigned long now = jiffies; | ||
937 | int data_stable; | 946 | int data_stable; |
938 | 947 | ||
939 | dfprintk(VFS, "NFS: %s(%s/%ld ct=%d info=0x%x)\n", | 948 | dfprintk(VFS, "NFS: %s(%s/%ld ct=%d info=0x%x)\n", |
@@ -959,7 +968,11 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) | |||
959 | * Update the read time so we don't revalidate too often. | 968 | * Update the read time so we don't revalidate too often. |
960 | */ | 969 | */ |
961 | nfsi->read_cache_jiffies = fattr->time_start; | 970 | nfsi->read_cache_jiffies = fattr->time_start; |
962 | nfsi->last_updated = jiffies; | 971 | nfsi->last_updated = now; |
972 | |||
973 | /* Fix a wraparound issue with nfsi->cache_change_attribute */ | ||
974 | if (time_before(now, nfsi->cache_change_attribute)) | ||
975 | nfsi->cache_change_attribute = now - 600*HZ; | ||
963 | 976 | ||
964 | /* Are we racing with known updates of the metadata on the server? */ | 977 | /* Are we racing with known updates of the metadata on the server? */ |
965 | data_stable = nfs_verify_change_attribute(inode, fattr->time_start); | 978 | data_stable = nfs_verify_change_attribute(inode, fattr->time_start); |
@@ -985,7 +998,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) | |||
985 | inode->i_size = new_isize; | 998 | inode->i_size = new_isize; |
986 | invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA; | 999 | invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA; |
987 | } | 1000 | } |
988 | nfsi->cache_change_attribute = jiffies; | 1001 | nfsi->cache_change_attribute = now; |
989 | dprintk("NFS: isize change on server for file %s/%ld\n", | 1002 | dprintk("NFS: isize change on server for file %s/%ld\n", |
990 | inode->i_sb->s_id, inode->i_ino); | 1003 | inode->i_sb->s_id, inode->i_ino); |
991 | } | 1004 | } |
@@ -996,14 +1009,14 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) | |||
996 | dprintk("NFS: mtime change on server for file %s/%ld\n", | 1009 | dprintk("NFS: mtime change on server for file %s/%ld\n", |
997 | inode->i_sb->s_id, inode->i_ino); | 1010 | inode->i_sb->s_id, inode->i_ino); |
998 | invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA; | 1011 | invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA; |
999 | nfsi->cache_change_attribute = jiffies; | 1012 | nfsi->cache_change_attribute = now; |
1000 | } | 1013 | } |
1001 | 1014 | ||
1002 | /* If ctime has changed we should definitely clear access+acl caches */ | 1015 | /* If ctime has changed we should definitely clear access+acl caches */ |
1003 | if (!timespec_equal(&inode->i_ctime, &fattr->ctime)) { | 1016 | if (!timespec_equal(&inode->i_ctime, &fattr->ctime)) { |
1004 | invalid |= NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL; | 1017 | invalid |= NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL; |
1005 | memcpy(&inode->i_ctime, &fattr->ctime, sizeof(inode->i_ctime)); | 1018 | memcpy(&inode->i_ctime, &fattr->ctime, sizeof(inode->i_ctime)); |
1006 | nfsi->cache_change_attribute = jiffies; | 1019 | nfsi->cache_change_attribute = now; |
1007 | } | 1020 | } |
1008 | memcpy(&inode->i_atime, &fattr->atime, sizeof(inode->i_atime)); | 1021 | memcpy(&inode->i_atime, &fattr->atime, sizeof(inode->i_atime)); |
1009 | 1022 | ||
@@ -1032,18 +1045,18 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) | |||
1032 | inode->i_sb->s_id, inode->i_ino); | 1045 | inode->i_sb->s_id, inode->i_ino); |
1033 | nfsi->change_attr = fattr->change_attr; | 1046 | nfsi->change_attr = fattr->change_attr; |
1034 | invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL; | 1047 | invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL; |
1035 | nfsi->cache_change_attribute = jiffies; | 1048 | nfsi->cache_change_attribute = now; |
1036 | } | 1049 | } |
1037 | 1050 | ||
1038 | /* Update attrtimeo value if we're out of the unstable period */ | 1051 | /* Update attrtimeo value if we're out of the unstable period */ |
1039 | if (invalid & NFS_INO_INVALID_ATTR) { | 1052 | if (invalid & NFS_INO_INVALID_ATTR) { |
1040 | nfs_inc_stats(inode, NFSIOS_ATTRINVALIDATE); | 1053 | nfs_inc_stats(inode, NFSIOS_ATTRINVALIDATE); |
1041 | nfsi->attrtimeo = NFS_MINATTRTIMEO(inode); | 1054 | nfsi->attrtimeo = NFS_MINATTRTIMEO(inode); |
1042 | nfsi->attrtimeo_timestamp = jiffies; | 1055 | nfsi->attrtimeo_timestamp = now; |
1043 | } else if (time_after(jiffies, nfsi->attrtimeo_timestamp+nfsi->attrtimeo)) { | 1056 | } else if (time_after(now, nfsi->attrtimeo_timestamp+nfsi->attrtimeo)) { |
1044 | if ((nfsi->attrtimeo <<= 1) > NFS_MAXATTRTIMEO(inode)) | 1057 | if ((nfsi->attrtimeo <<= 1) > NFS_MAXATTRTIMEO(inode)) |
1045 | nfsi->attrtimeo = NFS_MAXATTRTIMEO(inode); | 1058 | nfsi->attrtimeo = NFS_MAXATTRTIMEO(inode); |
1046 | nfsi->attrtimeo_timestamp = jiffies; | 1059 | nfsi->attrtimeo_timestamp = now; |
1047 | } | 1060 | } |
1048 | /* Don't invalidate the data if we were to blame */ | 1061 | /* Don't invalidate the data if we were to blame */ |
1049 | if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) | 1062 | if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) |
@@ -1122,7 +1135,6 @@ struct inode *nfs_alloc_inode(struct super_block *sb) | |||
1122 | return NULL; | 1135 | return NULL; |
1123 | nfsi->flags = 0UL; | 1136 | nfsi->flags = 0UL; |
1124 | nfsi->cache_validity = 0UL; | 1137 | nfsi->cache_validity = 0UL; |
1125 | nfsi->cache_change_attribute = jiffies; | ||
1126 | #ifdef CONFIG_NFS_V3_ACL | 1138 | #ifdef CONFIG_NFS_V3_ACL |
1127 | nfsi->acl_access = ERR_PTR(-EAGAIN); | 1139 | nfsi->acl_access = ERR_PTR(-EAGAIN); |
1128 | nfsi->acl_default = ERR_PTR(-EAGAIN); | 1140 | nfsi->acl_default = ERR_PTR(-EAGAIN); |
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index a28f6ce2e131..6610f2b02077 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h | |||
@@ -107,10 +107,6 @@ extern __be32 *nfs4_decode_dirent(__be32 *p, struct nfs_entry *entry, int plus); | |||
107 | /* nfs4proc.c */ | 107 | /* nfs4proc.c */ |
108 | #ifdef CONFIG_NFS_V4 | 108 | #ifdef CONFIG_NFS_V4 |
109 | extern struct rpc_procinfo nfs4_procedures[]; | 109 | extern struct rpc_procinfo nfs4_procedures[]; |
110 | |||
111 | extern int nfs4_proc_fs_locations(struct inode *dir, struct dentry *dentry, | ||
112 | struct nfs4_fs_locations *fs_locations, | ||
113 | struct page *page); | ||
114 | #endif | 110 | #endif |
115 | 111 | ||
116 | /* dir.c */ | 112 | /* dir.c */ |
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c index acd8fe9762d3..7d0371e2bad5 100644 --- a/fs/nfs/nfs3proc.c +++ b/fs/nfs/nfs3proc.c | |||
@@ -253,29 +253,6 @@ static int nfs3_proc_readlink(struct inode *inode, struct page *page, | |||
253 | return status; | 253 | return status; |
254 | } | 254 | } |
255 | 255 | ||
256 | static int nfs3_proc_read(struct nfs_read_data *rdata) | ||
257 | { | ||
258 | int flags = rdata->flags; | ||
259 | struct inode * inode = rdata->inode; | ||
260 | struct nfs_fattr * fattr = rdata->res.fattr; | ||
261 | struct rpc_message msg = { | ||
262 | .rpc_proc = &nfs3_procedures[NFS3PROC_READ], | ||
263 | .rpc_argp = &rdata->args, | ||
264 | .rpc_resp = &rdata->res, | ||
265 | .rpc_cred = rdata->cred, | ||
266 | }; | ||
267 | int status; | ||
268 | |||
269 | dprintk("NFS call read %d @ %Ld\n", rdata->args.count, | ||
270 | (long long) rdata->args.offset); | ||
271 | nfs_fattr_init(fattr); | ||
272 | status = rpc_call_sync(NFS_CLIENT(inode), &msg, flags); | ||
273 | if (status >= 0) | ||
274 | nfs_refresh_inode(inode, fattr); | ||
275 | dprintk("NFS reply read: %d\n", status); | ||
276 | return status; | ||
277 | } | ||
278 | |||
279 | /* | 256 | /* |
280 | * Create a regular file. | 257 | * Create a regular file. |
281 | * For now, we don't implement O_EXCL. | 258 | * For now, we don't implement O_EXCL. |
@@ -855,7 +832,6 @@ const struct nfs_rpc_ops nfs_v3_clientops = { | |||
855 | .lookup = nfs3_proc_lookup, | 832 | .lookup = nfs3_proc_lookup, |
856 | .access = nfs3_proc_access, | 833 | .access = nfs3_proc_access, |
857 | .readlink = nfs3_proc_readlink, | 834 | .readlink = nfs3_proc_readlink, |
858 | .read = nfs3_proc_read, | ||
859 | .create = nfs3_proc_create, | 835 | .create = nfs3_proc_create, |
860 | .remove = nfs3_proc_remove, | 836 | .remove = nfs3_proc_remove, |
861 | .unlink_setup = nfs3_proc_unlink_setup, | 837 | .unlink_setup = nfs3_proc_unlink_setup, |
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index e2341766c4f0..cf3a17eb5c09 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h | |||
@@ -169,7 +169,7 @@ extern int nfs4_do_close(struct inode *inode, struct nfs4_state *state); | |||
169 | extern struct dentry *nfs4_atomic_open(struct inode *, struct dentry *, struct nameidata *); | 169 | extern struct dentry *nfs4_atomic_open(struct inode *, struct dentry *, struct nameidata *); |
170 | extern int nfs4_open_revalidate(struct inode *, struct dentry *, int, struct nameidata *); | 170 | extern int nfs4_open_revalidate(struct inode *, struct dentry *, int, struct nameidata *); |
171 | extern int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle); | 171 | extern int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle); |
172 | extern int nfs4_proc_fs_locations(struct inode *dir, struct dentry *dentry, | 172 | extern int nfs4_proc_fs_locations(struct inode *dir, struct qstr *name, |
173 | struct nfs4_fs_locations *fs_locations, struct page *page); | 173 | struct nfs4_fs_locations *fs_locations, struct page *page); |
174 | 174 | ||
175 | extern struct nfs4_state_recovery_ops nfs4_reboot_recovery_ops; | 175 | extern struct nfs4_state_recovery_ops nfs4_reboot_recovery_ops; |
diff --git a/fs/nfs/nfs4namespace.c b/fs/nfs/nfs4namespace.c index b872779d7cd5..dd5fef20c702 100644 --- a/fs/nfs/nfs4namespace.c +++ b/fs/nfs/nfs4namespace.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/vfs.h> | 16 | #include <linux/vfs.h> |
17 | #include <linux/inet.h> | 17 | #include <linux/inet.h> |
18 | #include "internal.h" | 18 | #include "internal.h" |
19 | #include "nfs4_fs.h" | ||
19 | 20 | ||
20 | #define NFSDBG_FACILITY NFSDBG_VFS | 21 | #define NFSDBG_FACILITY NFSDBG_VFS |
21 | 22 | ||
@@ -130,7 +131,6 @@ static struct vfsmount *nfs_follow_referral(const struct vfsmount *mnt_parent, | |||
130 | .authflavor = NFS_SB(mnt_parent->mnt_sb)->client->cl_auth->au_flavor, | 131 | .authflavor = NFS_SB(mnt_parent->mnt_sb)->client->cl_auth->au_flavor, |
131 | }; | 132 | }; |
132 | char *page = NULL, *page2 = NULL; | 133 | char *page = NULL, *page2 = NULL; |
133 | char *devname; | ||
134 | int loc, s, error; | 134 | int loc, s, error; |
135 | 135 | ||
136 | if (locations == NULL || locations->nlocations <= 0) | 136 | if (locations == NULL || locations->nlocations <= 0) |
@@ -154,12 +154,6 @@ static struct vfsmount *nfs_follow_referral(const struct vfsmount *mnt_parent, | |||
154 | goto out; | 154 | goto out; |
155 | } | 155 | } |
156 | 156 | ||
157 | devname = nfs_devname(mnt_parent, dentry, page, PAGE_SIZE); | ||
158 | if (IS_ERR(devname)) { | ||
159 | mnt = (struct vfsmount *)devname; | ||
160 | goto out; | ||
161 | } | ||
162 | |||
163 | loc = 0; | 157 | loc = 0; |
164 | while (loc < locations->nlocations && IS_ERR(mnt)) { | 158 | while (loc < locations->nlocations && IS_ERR(mnt)) { |
165 | const struct nfs4_fs_location *location = &locations->locations[loc]; | 159 | const struct nfs4_fs_location *location = &locations->locations[loc]; |
@@ -194,7 +188,11 @@ static struct vfsmount *nfs_follow_referral(const struct vfsmount *mnt_parent, | |||
194 | addr.sin_port = htons(NFS_PORT); | 188 | addr.sin_port = htons(NFS_PORT); |
195 | mountdata.addr = &addr; | 189 | mountdata.addr = &addr; |
196 | 190 | ||
197 | mnt = vfs_kern_mount(&nfs4_referral_fs_type, 0, devname, &mountdata); | 191 | snprintf(page, PAGE_SIZE, "%s:%s", |
192 | mountdata.hostname, | ||
193 | mountdata.mnt_path); | ||
194 | |||
195 | mnt = vfs_kern_mount(&nfs4_referral_fs_type, 0, page, &mountdata); | ||
198 | if (!IS_ERR(mnt)) { | 196 | if (!IS_ERR(mnt)) { |
199 | break; | 197 | break; |
200 | } | 198 | } |
@@ -242,7 +240,7 @@ struct vfsmount *nfs_do_refmount(const struct vfsmount *mnt_parent, struct dentr | |||
242 | dprintk("%s: getting locations for %s/%s\n", | 240 | dprintk("%s: getting locations for %s/%s\n", |
243 | __FUNCTION__, parent->d_name.name, dentry->d_name.name); | 241 | __FUNCTION__, parent->d_name.name, dentry->d_name.name); |
244 | 242 | ||
245 | err = nfs4_proc_fs_locations(parent->d_inode, dentry, fs_locations, page); | 243 | err = nfs4_proc_fs_locations(parent->d_inode, &dentry->d_name, fs_locations, page); |
246 | dput(parent); | 244 | dput(parent); |
247 | if (err != 0 || | 245 | if (err != 0 || |
248 | fs_locations->nlocations <= 0 || | 246 | fs_locations->nlocations <= 0 || |
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 1daee65b517e..f52cf5c33c6c 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c | |||
@@ -1140,7 +1140,6 @@ static void nfs4_close_done(struct rpc_task *task, void *data) | |||
1140 | break; | 1140 | break; |
1141 | case -NFS4ERR_STALE_STATEID: | 1141 | case -NFS4ERR_STALE_STATEID: |
1142 | case -NFS4ERR_EXPIRED: | 1142 | case -NFS4ERR_EXPIRED: |
1143 | nfs4_schedule_state_recovery(server->nfs_client); | ||
1144 | break; | 1143 | break; |
1145 | default: | 1144 | default: |
1146 | if (nfs4_async_handle_error(task, server) == -EAGAIN) { | 1145 | if (nfs4_async_handle_error(task, server) == -EAGAIN) { |
@@ -1424,7 +1423,6 @@ static int nfs4_get_referral(struct inode *dir, struct qstr *name, struct nfs_fa | |||
1424 | int status = -ENOMEM; | 1423 | int status = -ENOMEM; |
1425 | struct page *page = NULL; | 1424 | struct page *page = NULL; |
1426 | struct nfs4_fs_locations *locations = NULL; | 1425 | struct nfs4_fs_locations *locations = NULL; |
1427 | struct dentry dentry = {}; | ||
1428 | 1426 | ||
1429 | page = alloc_page(GFP_KERNEL); | 1427 | page = alloc_page(GFP_KERNEL); |
1430 | if (page == NULL) | 1428 | if (page == NULL) |
@@ -1433,9 +1431,7 @@ static int nfs4_get_referral(struct inode *dir, struct qstr *name, struct nfs_fa | |||
1433 | if (locations == NULL) | 1431 | if (locations == NULL) |
1434 | goto out; | 1432 | goto out; |
1435 | 1433 | ||
1436 | dentry.d_name.name = name->name; | 1434 | status = nfs4_proc_fs_locations(dir, name, locations, page); |
1437 | dentry.d_name.len = name->len; | ||
1438 | status = nfs4_proc_fs_locations(dir, &dentry, locations, page); | ||
1439 | if (status != 0) | 1435 | if (status != 0) |
1440 | goto out; | 1436 | goto out; |
1441 | /* Make sure server returned a different fsid for the referral */ | 1437 | /* Make sure server returned a different fsid for the referral */ |
@@ -1737,44 +1733,6 @@ static int nfs4_proc_readlink(struct inode *inode, struct page *page, | |||
1737 | return err; | 1733 | return err; |
1738 | } | 1734 | } |
1739 | 1735 | ||
1740 | static int _nfs4_proc_read(struct nfs_read_data *rdata) | ||
1741 | { | ||
1742 | int flags = rdata->flags; | ||
1743 | struct inode *inode = rdata->inode; | ||
1744 | struct nfs_fattr *fattr = rdata->res.fattr; | ||
1745 | struct nfs_server *server = NFS_SERVER(inode); | ||
1746 | struct rpc_message msg = { | ||
1747 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ], | ||
1748 | .rpc_argp = &rdata->args, | ||
1749 | .rpc_resp = &rdata->res, | ||
1750 | .rpc_cred = rdata->cred, | ||
1751 | }; | ||
1752 | unsigned long timestamp = jiffies; | ||
1753 | int status; | ||
1754 | |||
1755 | dprintk("NFS call read %d @ %Ld\n", rdata->args.count, | ||
1756 | (long long) rdata->args.offset); | ||
1757 | |||
1758 | nfs_fattr_init(fattr); | ||
1759 | status = rpc_call_sync(server->client, &msg, flags); | ||
1760 | if (!status) | ||
1761 | renew_lease(server, timestamp); | ||
1762 | dprintk("NFS reply read: %d\n", status); | ||
1763 | return status; | ||
1764 | } | ||
1765 | |||
1766 | static int nfs4_proc_read(struct nfs_read_data *rdata) | ||
1767 | { | ||
1768 | struct nfs4_exception exception = { }; | ||
1769 | int err; | ||
1770 | do { | ||
1771 | err = nfs4_handle_exception(NFS_SERVER(rdata->inode), | ||
1772 | _nfs4_proc_read(rdata), | ||
1773 | &exception); | ||
1774 | } while (exception.retry); | ||
1775 | return err; | ||
1776 | } | ||
1777 | |||
1778 | /* | 1736 | /* |
1779 | * Got race? | 1737 | * Got race? |
1780 | * We will need to arrange for the VFS layer to provide an atomic open. | 1738 | * We will need to arrange for the VFS layer to provide an atomic open. |
@@ -2753,11 +2711,15 @@ static int nfs4_wait_clnt_recover(struct rpc_clnt *clnt, struct nfs_client *clp) | |||
2753 | 2711 | ||
2754 | might_sleep(); | 2712 | might_sleep(); |
2755 | 2713 | ||
2714 | rwsem_acquire(&clp->cl_sem.dep_map, 0, 0, _RET_IP_); | ||
2715 | |||
2756 | rpc_clnt_sigmask(clnt, &oldset); | 2716 | rpc_clnt_sigmask(clnt, &oldset); |
2757 | res = wait_on_bit(&clp->cl_state, NFS4CLNT_STATE_RECOVER, | 2717 | res = wait_on_bit(&clp->cl_state, NFS4CLNT_STATE_RECOVER, |
2758 | nfs4_wait_bit_interruptible, | 2718 | nfs4_wait_bit_interruptible, |
2759 | TASK_INTERRUPTIBLE); | 2719 | TASK_INTERRUPTIBLE); |
2760 | rpc_clnt_sigunmask(clnt, &oldset); | 2720 | rpc_clnt_sigunmask(clnt, &oldset); |
2721 | |||
2722 | rwsem_release(&clp->cl_sem.dep_map, 1, _RET_IP_); | ||
2761 | return res; | 2723 | return res; |
2762 | } | 2724 | } |
2763 | 2725 | ||
@@ -2996,7 +2958,6 @@ int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4 | |||
2996 | switch (err) { | 2958 | switch (err) { |
2997 | case -NFS4ERR_STALE_STATEID: | 2959 | case -NFS4ERR_STALE_STATEID: |
2998 | case -NFS4ERR_EXPIRED: | 2960 | case -NFS4ERR_EXPIRED: |
2999 | nfs4_schedule_state_recovery(server->nfs_client); | ||
3000 | case 0: | 2961 | case 0: |
3001 | return 0; | 2962 | return 0; |
3002 | } | 2963 | } |
@@ -3150,12 +3111,10 @@ static void nfs4_locku_done(struct rpc_task *task, void *data) | |||
3150 | break; | 3111 | break; |
3151 | case -NFS4ERR_STALE_STATEID: | 3112 | case -NFS4ERR_STALE_STATEID: |
3152 | case -NFS4ERR_EXPIRED: | 3113 | case -NFS4ERR_EXPIRED: |
3153 | nfs4_schedule_state_recovery(calldata->server->nfs_client); | ||
3154 | break; | 3114 | break; |
3155 | default: | 3115 | default: |
3156 | if (nfs4_async_handle_error(task, calldata->server) == -EAGAIN) { | 3116 | if (nfs4_async_handle_error(task, calldata->server) == -EAGAIN) |
3157 | rpc_restart_call(task); | 3117 | rpc_restart_call(task); |
3158 | } | ||
3159 | } | 3118 | } |
3160 | } | 3119 | } |
3161 | 3120 | ||
@@ -3585,7 +3544,7 @@ ssize_t nfs4_listxattr(struct dentry *dentry, char *buf, size_t buflen) | |||
3585 | return len; | 3544 | return len; |
3586 | } | 3545 | } |
3587 | 3546 | ||
3588 | int nfs4_proc_fs_locations(struct inode *dir, struct dentry *dentry, | 3547 | int nfs4_proc_fs_locations(struct inode *dir, struct qstr *name, |
3589 | struct nfs4_fs_locations *fs_locations, struct page *page) | 3548 | struct nfs4_fs_locations *fs_locations, struct page *page) |
3590 | { | 3549 | { |
3591 | struct nfs_server *server = NFS_SERVER(dir); | 3550 | struct nfs_server *server = NFS_SERVER(dir); |
@@ -3595,7 +3554,7 @@ int nfs4_proc_fs_locations(struct inode *dir, struct dentry *dentry, | |||
3595 | }; | 3554 | }; |
3596 | struct nfs4_fs_locations_arg args = { | 3555 | struct nfs4_fs_locations_arg args = { |
3597 | .dir_fh = NFS_FH(dir), | 3556 | .dir_fh = NFS_FH(dir), |
3598 | .name = &dentry->d_name, | 3557 | .name = name, |
3599 | .page = page, | 3558 | .page = page, |
3600 | .bitmask = bitmask, | 3559 | .bitmask = bitmask, |
3601 | }; | 3560 | }; |
@@ -3607,7 +3566,7 @@ int nfs4_proc_fs_locations(struct inode *dir, struct dentry *dentry, | |||
3607 | int status; | 3566 | int status; |
3608 | 3567 | ||
3609 | dprintk("%s: start\n", __FUNCTION__); | 3568 | dprintk("%s: start\n", __FUNCTION__); |
3610 | fs_locations->fattr.valid = 0; | 3569 | nfs_fattr_init(&fs_locations->fattr); |
3611 | fs_locations->server = server; | 3570 | fs_locations->server = server; |
3612 | fs_locations->nlocations = 0; | 3571 | fs_locations->nlocations = 0; |
3613 | status = rpc_call_sync(server->client, &msg, 0); | 3572 | status = rpc_call_sync(server->client, &msg, 0); |
@@ -3646,7 +3605,6 @@ const struct nfs_rpc_ops nfs_v4_clientops = { | |||
3646 | .lookup = nfs4_proc_lookup, | 3605 | .lookup = nfs4_proc_lookup, |
3647 | .access = nfs4_proc_access, | 3606 | .access = nfs4_proc_access, |
3648 | .readlink = nfs4_proc_readlink, | 3607 | .readlink = nfs4_proc_readlink, |
3649 | .read = nfs4_proc_read, | ||
3650 | .create = nfs4_proc_create, | 3608 | .create = nfs4_proc_create, |
3651 | .remove = nfs4_proc_remove, | 3609 | .remove = nfs4_proc_remove, |
3652 | .unlink_setup = nfs4_proc_unlink_setup, | 3610 | .unlink_setup = nfs4_proc_unlink_setup, |
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 0cf3fa312a33..f02d522fd788 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c | |||
@@ -387,8 +387,10 @@ static int nfs4_stat_to_errno(int); | |||
387 | decode_putfh_maxsz + \ | 387 | decode_putfh_maxsz + \ |
388 | op_decode_hdr_maxsz + 12) | 388 | op_decode_hdr_maxsz + 12) |
389 | #define NFS4_enc_server_caps_sz (compound_encode_hdr_maxsz + \ | 389 | #define NFS4_enc_server_caps_sz (compound_encode_hdr_maxsz + \ |
390 | encode_putfh_maxsz + \ | ||
390 | encode_getattr_maxsz) | 391 | encode_getattr_maxsz) |
391 | #define NFS4_dec_server_caps_sz (compound_decode_hdr_maxsz + \ | 392 | #define NFS4_dec_server_caps_sz (compound_decode_hdr_maxsz + \ |
393 | decode_putfh_maxsz + \ | ||
392 | decode_getattr_maxsz) | 394 | decode_getattr_maxsz) |
393 | #define NFS4_enc_delegreturn_sz (compound_encode_hdr_maxsz + \ | 395 | #define NFS4_enc_delegreturn_sz (compound_encode_hdr_maxsz + \ |
394 | encode_putfh_maxsz + \ | 396 | encode_putfh_maxsz + \ |
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c index 560536ad74a4..1dcf56de9482 100644 --- a/fs/nfs/proc.c +++ b/fs/nfs/proc.c | |||
@@ -186,35 +186,6 @@ static int nfs_proc_readlink(struct inode *inode, struct page *page, | |||
186 | return status; | 186 | return status; |
187 | } | 187 | } |
188 | 188 | ||
189 | static int nfs_proc_read(struct nfs_read_data *rdata) | ||
190 | { | ||
191 | int flags = rdata->flags; | ||
192 | struct inode * inode = rdata->inode; | ||
193 | struct nfs_fattr * fattr = rdata->res.fattr; | ||
194 | struct rpc_message msg = { | ||
195 | .rpc_proc = &nfs_procedures[NFSPROC_READ], | ||
196 | .rpc_argp = &rdata->args, | ||
197 | .rpc_resp = &rdata->res, | ||
198 | .rpc_cred = rdata->cred, | ||
199 | }; | ||
200 | int status; | ||
201 | |||
202 | dprintk("NFS call read %d @ %Ld\n", rdata->args.count, | ||
203 | (long long) rdata->args.offset); | ||
204 | nfs_fattr_init(fattr); | ||
205 | status = rpc_call_sync(NFS_CLIENT(inode), &msg, flags); | ||
206 | if (status >= 0) { | ||
207 | nfs_refresh_inode(inode, fattr); | ||
208 | /* Emulate the eof flag, which isn't normally needed in NFSv2 | ||
209 | * as it is guaranteed to always return the file attributes | ||
210 | */ | ||
211 | if (rdata->args.offset + rdata->args.count >= fattr->size) | ||
212 | rdata->res.eof = 1; | ||
213 | } | ||
214 | dprintk("NFS reply read: %d\n", status); | ||
215 | return status; | ||
216 | } | ||
217 | |||
218 | static int | 189 | static int |
219 | nfs_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr, | 190 | nfs_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr, |
220 | int flags, struct nameidata *nd) | 191 | int flags, struct nameidata *nd) |
@@ -666,7 +637,6 @@ const struct nfs_rpc_ops nfs_v2_clientops = { | |||
666 | .lookup = nfs_proc_lookup, | 637 | .lookup = nfs_proc_lookup, |
667 | .access = NULL, /* access */ | 638 | .access = NULL, /* access */ |
668 | .readlink = nfs_proc_readlink, | 639 | .readlink = nfs_proc_readlink, |
669 | .read = nfs_proc_read, | ||
670 | .create = nfs_proc_create, | 640 | .create = nfs_proc_create, |
671 | .remove = nfs_proc_remove, | 641 | .remove = nfs_proc_remove, |
672 | .unlink_setup = nfs_proc_unlink_setup, | 642 | .unlink_setup = nfs_proc_unlink_setup, |
diff --git a/fs/nfs/read.c b/fs/nfs/read.c index a9c26521a9e2..6ab4d5a9edf2 100644 --- a/fs/nfs/read.c +++ b/fs/nfs/read.c | |||
@@ -5,14 +5,6 @@ | |||
5 | * | 5 | * |
6 | * Partial copy of Linus' read cache modifications to fs/nfs/file.c | 6 | * Partial copy of Linus' read cache modifications to fs/nfs/file.c |
7 | * modified for async RPC by okir@monad.swb.de | 7 | * modified for async RPC by okir@monad.swb.de |
8 | * | ||
9 | * We do an ugly hack here in order to return proper error codes to the | ||
10 | * user program when a read request failed: since generic_file_read | ||
11 | * only checks the return value of inode->i_op->readpage() which is always 0 | ||
12 | * for async RPC, we set the error bit of the page to 1 when an error occurs, | ||
13 | * and make nfs_readpage transmit requests synchronously when encountering this. | ||
14 | * This is only a small problem, though, since we now retry all operations | ||
15 | * within the RPC code when root squashing is suspected. | ||
16 | */ | 8 | */ |
17 | 9 | ||
18 | #include <linux/time.h> | 10 | #include <linux/time.h> |
@@ -122,93 +114,6 @@ static void nfs_readpage_truncate_uninitialised_page(struct nfs_read_data *data) | |||
122 | } | 114 | } |
123 | } | 115 | } |
124 | 116 | ||
125 | /* | ||
126 | * Read a page synchronously. | ||
127 | */ | ||
128 | static int nfs_readpage_sync(struct nfs_open_context *ctx, struct inode *inode, | ||
129 | struct page *page) | ||
130 | { | ||
131 | unsigned int rsize = NFS_SERVER(inode)->rsize; | ||
132 | unsigned int count = PAGE_CACHE_SIZE; | ||
133 | int result = -ENOMEM; | ||
134 | struct nfs_read_data *rdata; | ||
135 | |||
136 | rdata = nfs_readdata_alloc(count); | ||
137 | if (!rdata) | ||
138 | goto out_unlock; | ||
139 | |||
140 | memset(rdata, 0, sizeof(*rdata)); | ||
141 | rdata->flags = (IS_SWAPFILE(inode)? NFS_RPC_SWAPFLAGS : 0); | ||
142 | rdata->cred = ctx->cred; | ||
143 | rdata->inode = inode; | ||
144 | INIT_LIST_HEAD(&rdata->pages); | ||
145 | rdata->args.fh = NFS_FH(inode); | ||
146 | rdata->args.context = ctx; | ||
147 | rdata->args.pages = &page; | ||
148 | rdata->args.pgbase = 0UL; | ||
149 | rdata->args.count = rsize; | ||
150 | rdata->res.fattr = &rdata->fattr; | ||
151 | |||
152 | dprintk("NFS: nfs_readpage_sync(%p)\n", page); | ||
153 | |||
154 | /* | ||
155 | * This works now because the socket layer never tries to DMA | ||
156 | * into this buffer directly. | ||
157 | */ | ||
158 | do { | ||
159 | if (count < rsize) | ||
160 | rdata->args.count = count; | ||
161 | rdata->res.count = rdata->args.count; | ||
162 | rdata->args.offset = page_offset(page) + rdata->args.pgbase; | ||
163 | |||
164 | dprintk("NFS: nfs_proc_read(%s, (%s/%Ld), %Lu, %u)\n", | ||
165 | NFS_SERVER(inode)->nfs_client->cl_hostname, | ||
166 | inode->i_sb->s_id, | ||
167 | (long long)NFS_FILEID(inode), | ||
168 | (unsigned long long)rdata->args.pgbase, | ||
169 | rdata->args.count); | ||
170 | |||
171 | lock_kernel(); | ||
172 | result = NFS_PROTO(inode)->read(rdata); | ||
173 | unlock_kernel(); | ||
174 | |||
175 | /* | ||
176 | * Even if we had a partial success we can't mark the page | ||
177 | * cache valid. | ||
178 | */ | ||
179 | if (result < 0) { | ||
180 | if (result == -EISDIR) | ||
181 | result = -EINVAL; | ||
182 | goto io_error; | ||
183 | } | ||
184 | count -= result; | ||
185 | rdata->args.pgbase += result; | ||
186 | nfs_add_stats(inode, NFSIOS_SERVERREADBYTES, result); | ||
187 | |||
188 | /* Note: result == 0 should only happen if we're caching | ||
189 | * a write that extends the file and punches a hole. | ||
190 | */ | ||
191 | if (rdata->res.eof != 0 || result == 0) | ||
192 | break; | ||
193 | } while (count); | ||
194 | spin_lock(&inode->i_lock); | ||
195 | NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATIME; | ||
196 | spin_unlock(&inode->i_lock); | ||
197 | |||
198 | if (rdata->res.eof || rdata->res.count == rdata->args.count) { | ||
199 | SetPageUptodate(page); | ||
200 | if (rdata->res.eof && count != 0) | ||
201 | memclear_highpage_flush(page, rdata->args.pgbase, count); | ||
202 | } | ||
203 | result = 0; | ||
204 | |||
205 | io_error: | ||
206 | nfs_readdata_free(rdata); | ||
207 | out_unlock: | ||
208 | unlock_page(page); | ||
209 | return result; | ||
210 | } | ||
211 | |||
212 | static int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode, | 117 | static int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode, |
213 | struct page *page) | 118 | struct page *page) |
214 | { | 119 | { |
@@ -278,7 +183,7 @@ static void nfs_read_rpcsetup(struct nfs_page *req, struct nfs_read_data *data, | |||
278 | 183 | ||
279 | data->task.tk_cookie = (unsigned long)inode; | 184 | data->task.tk_cookie = (unsigned long)inode; |
280 | 185 | ||
281 | dprintk("NFS: %4d initiated read call (req %s/%Ld, %u bytes @ offset %Lu)\n", | 186 | dprintk("NFS: %5u initiated read call (req %s/%Ld, %u bytes @ offset %Lu)\n", |
282 | data->task.tk_pid, | 187 | data->task.tk_pid, |
283 | inode->i_sb->s_id, | 188 | inode->i_sb->s_id, |
284 | (long long)NFS_FILEID(inode), | 189 | (long long)NFS_FILEID(inode), |
@@ -452,7 +357,7 @@ int nfs_readpage_result(struct rpc_task *task, struct nfs_read_data *data) | |||
452 | { | 357 | { |
453 | int status; | 358 | int status; |
454 | 359 | ||
455 | dprintk("%s: %4d, (status %d)\n", __FUNCTION__, task->tk_pid, | 360 | dprintk("NFS: %s: %5u, (status %d)\n", __FUNCTION__, task->tk_pid, |
456 | task->tk_status); | 361 | task->tk_status); |
457 | 362 | ||
458 | status = NFS_PROTO(data->inode)->read_done(task, data); | 363 | status = NFS_PROTO(data->inode)->read_done(task, data); |
@@ -621,15 +526,9 @@ int nfs_readpage(struct file *file, struct page *page) | |||
621 | } else | 526 | } else |
622 | ctx = get_nfs_open_context((struct nfs_open_context *) | 527 | ctx = get_nfs_open_context((struct nfs_open_context *) |
623 | file->private_data); | 528 | file->private_data); |
624 | if (!IS_SYNC(inode)) { | ||
625 | error = nfs_readpage_async(ctx, inode, page); | ||
626 | goto out; | ||
627 | } | ||
628 | 529 | ||
629 | error = nfs_readpage_sync(ctx, inode, page); | 530 | error = nfs_readpage_async(ctx, inode, page); |
630 | if (error < 0 && IS_SWAPFILE(inode)) | 531 | |
631 | printk("Aiee.. nfs swap-in of page failed!\n"); | ||
632 | out: | ||
633 | put_nfs_open_context(ctx); | 532 | put_nfs_open_context(ctx); |
634 | return error; | 533 | return error; |
635 | 534 | ||
diff --git a/fs/nfs/super.c b/fs/nfs/super.c index baa28860ad27..bb516a2cfbaf 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c | |||
@@ -1045,7 +1045,7 @@ static int nfs4_referral_get_sb(struct file_system_type *fs_type, int flags, | |||
1045 | nfs4_fill_super(s); | 1045 | nfs4_fill_super(s); |
1046 | } | 1046 | } |
1047 | 1047 | ||
1048 | mntroot = nfs4_get_root(s, data->fh); | 1048 | mntroot = nfs4_get_root(s, &mntfh); |
1049 | if (IS_ERR(mntroot)) { | 1049 | if (IS_ERR(mntroot)) { |
1050 | error = PTR_ERR(mntroot); | 1050 | error = PTR_ERR(mntroot); |
1051 | goto error_splat_super; | 1051 | goto error_splat_super; |
diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 345492e78643..febdade91670 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c | |||
@@ -1,47 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * linux/fs/nfs/write.c | 2 | * linux/fs/nfs/write.c |
3 | * | 3 | * |
4 | * Writing file data over NFS. | 4 | * Write file data over NFS. |
5 | * | ||
6 | * We do it like this: When a (user) process wishes to write data to an | ||
7 | * NFS file, a write request is allocated that contains the RPC task data | ||
8 | * plus some info on the page to be written, and added to the inode's | ||
9 | * write chain. If the process writes past the end of the page, an async | ||
10 | * RPC call to write the page is scheduled immediately; otherwise, the call | ||
11 | * is delayed for a few seconds. | ||
12 | * | ||
13 | * Just like readahead, no async I/O is performed if wsize < PAGE_SIZE. | ||
14 | * | ||
15 | * Write requests are kept on the inode's writeback list. Each entry in | ||
16 | * that list references the page (portion) to be written. When the | ||
17 | * cache timeout has expired, the RPC task is woken up, and tries to | ||
18 | * lock the page. As soon as it manages to do so, the request is moved | ||
19 | * from the writeback list to the writelock list. | ||
20 | * | ||
21 | * Note: we must make sure never to confuse the inode passed in the | ||
22 | * write_page request with the one in page->inode. As far as I understand | ||
23 | * it, these are different when doing a swap-out. | ||
24 | * | ||
25 | * To understand everything that goes on here and in the NFS read code, | ||
26 | * one should be aware that a page is locked in exactly one of the following | ||
27 | * cases: | ||
28 | * | ||
29 | * - A write request is in progress. | ||
30 | * - A user process is in generic_file_write/nfs_update_page | ||
31 | * - A user process is in generic_file_read | ||
32 | * | ||
33 | * Also note that because of the way pages are invalidated in | ||
34 | * nfs_revalidate_inode, the following assertions hold: | ||
35 | * | ||
36 | * - If a page is dirty, there will be no read requests (a page will | ||
37 | * not be re-read unless invalidated by nfs_revalidate_inode). | ||
38 | * - If the page is not uptodate, there will be no pending write | ||
39 | * requests, and no process will be in nfs_update_page. | ||
40 | * | ||
41 | * FIXME: Interaction with the vmscan routines is not optimal yet. | ||
42 | * Either vmscan must be made nfs-savvy, or we need a different page | ||
43 | * reclaim concept that supports something like FS-independent | ||
44 | * buffer_heads with a b_ops-> field. | ||
45 | * | 5 | * |
46 | * Copyright (C) 1996, 1997, Olaf Kirch <okir@monad.swb.de> | 6 | * Copyright (C) 1996, 1997, Olaf Kirch <okir@monad.swb.de> |
47 | */ | 7 | */ |
@@ -79,7 +39,6 @@ static struct nfs_page * nfs_update_request(struct nfs_open_context*, | |||
79 | unsigned int, unsigned int); | 39 | unsigned int, unsigned int); |
80 | static void nfs_mark_request_dirty(struct nfs_page *req); | 40 | static void nfs_mark_request_dirty(struct nfs_page *req); |
81 | static int nfs_wait_on_write_congestion(struct address_space *, int); | 41 | static int nfs_wait_on_write_congestion(struct address_space *, int); |
82 | static int nfs_wait_on_requests(struct inode *, unsigned long, unsigned int); | ||
83 | static long nfs_flush_mapping(struct address_space *mapping, struct writeback_control *wbc, int how); | 42 | static long nfs_flush_mapping(struct address_space *mapping, struct writeback_control *wbc, int how); |
84 | static const struct rpc_call_ops nfs_write_partial_ops; | 43 | static const struct rpc_call_ops nfs_write_partial_ops; |
85 | static const struct rpc_call_ops nfs_write_full_ops; | 44 | static const struct rpc_call_ops nfs_write_full_ops; |
@@ -194,6 +153,13 @@ static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int c | |||
194 | i_size_write(inode, end); | 153 | i_size_write(inode, end); |
195 | } | 154 | } |
196 | 155 | ||
156 | /* A writeback failed: mark the page as bad, and invalidate the page cache */ | ||
157 | static void nfs_set_pageerror(struct page *page) | ||
158 | { | ||
159 | SetPageError(page); | ||
160 | nfs_zap_mapping(page->mapping->host, page->mapping); | ||
161 | } | ||
162 | |||
197 | /* We can set the PG_uptodate flag if we see that a write request | 163 | /* We can set the PG_uptodate flag if we see that a write request |
198 | * covers the full page. | 164 | * covers the full page. |
199 | */ | 165 | */ |
@@ -323,7 +289,7 @@ static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc | |||
323 | err = 0; | 289 | err = 0; |
324 | out: | 290 | out: |
325 | if (!wbc->for_writepages) | 291 | if (!wbc->for_writepages) |
326 | nfs_flush_mapping(page->mapping, wbc, wb_priority(wbc)); | 292 | nfs_flush_mapping(page->mapping, wbc, FLUSH_STABLE|wb_priority(wbc)); |
327 | return err; | 293 | return err; |
328 | } | 294 | } |
329 | 295 | ||
@@ -360,14 +326,7 @@ int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc) | |||
360 | if (err < 0) | 326 | if (err < 0) |
361 | goto out; | 327 | goto out; |
362 | nfs_add_stats(inode, NFSIOS_WRITEPAGES, err); | 328 | nfs_add_stats(inode, NFSIOS_WRITEPAGES, err); |
363 | if (!wbc->nonblocking && wbc->sync_mode == WB_SYNC_ALL) { | 329 | err = 0; |
364 | err = nfs_wait_on_requests(inode, 0, 0); | ||
365 | if (err < 0) | ||
366 | goto out; | ||
367 | } | ||
368 | err = nfs_commit_inode(inode, wb_priority(wbc)); | ||
369 | if (err > 0) | ||
370 | err = 0; | ||
371 | out: | 330 | out: |
372 | clear_bit(BDI_write_congested, &bdi->state); | 331 | clear_bit(BDI_write_congested, &bdi->state); |
373 | wake_up_all(&nfs_write_congestion); | 332 | wake_up_all(&nfs_write_congestion); |
@@ -516,17 +475,6 @@ static int nfs_wait_on_requests_locked(struct inode *inode, unsigned long idx_st | |||
516 | return res; | 475 | return res; |
517 | } | 476 | } |
518 | 477 | ||
519 | static int nfs_wait_on_requests(struct inode *inode, unsigned long idx_start, unsigned int npages) | ||
520 | { | ||
521 | struct nfs_inode *nfsi = NFS_I(inode); | ||
522 | int ret; | ||
523 | |||
524 | spin_lock(&nfsi->req_lock); | ||
525 | ret = nfs_wait_on_requests_locked(inode, idx_start, npages); | ||
526 | spin_unlock(&nfsi->req_lock); | ||
527 | return ret; | ||
528 | } | ||
529 | |||
530 | static void nfs_cancel_dirty_list(struct list_head *head) | 478 | static void nfs_cancel_dirty_list(struct list_head *head) |
531 | { | 479 | { |
532 | struct nfs_page *req; | 480 | struct nfs_page *req; |
@@ -773,7 +721,7 @@ int nfs_updatepage(struct file *file, struct page *page, | |||
773 | dprintk("NFS: nfs_updatepage returns %d (isize %Ld)\n", | 721 | dprintk("NFS: nfs_updatepage returns %d (isize %Ld)\n", |
774 | status, (long long)i_size_read(inode)); | 722 | status, (long long)i_size_read(inode)); |
775 | if (status < 0) | 723 | if (status < 0) |
776 | ClearPageUptodate(page); | 724 | nfs_set_pageerror(page); |
777 | return status; | 725 | return status; |
778 | } | 726 | } |
779 | 727 | ||
@@ -852,7 +800,8 @@ static void nfs_write_rpcsetup(struct nfs_page *req, | |||
852 | data->task.tk_priority = flush_task_priority(how); | 800 | data->task.tk_priority = flush_task_priority(how); |
853 | data->task.tk_cookie = (unsigned long)inode; | 801 | data->task.tk_cookie = (unsigned long)inode; |
854 | 802 | ||
855 | dprintk("NFS: %4d initiated write call (req %s/%Ld, %u bytes @ offset %Lu)\n", | 803 | dprintk("NFS: %5u initiated write call " |
804 | "(req %s/%Ld, %u bytes @ offset %Lu)\n", | ||
856 | data->task.tk_pid, | 805 | data->task.tk_pid, |
857 | inode->i_sb->s_id, | 806 | inode->i_sb->s_id, |
858 | (long long)NFS_FILEID(inode), | 807 | (long long)NFS_FILEID(inode), |
@@ -1034,8 +983,7 @@ static void nfs_writeback_done_partial(struct rpc_task *task, void *calldata) | |||
1034 | return; | 983 | return; |
1035 | 984 | ||
1036 | if (task->tk_status < 0) { | 985 | if (task->tk_status < 0) { |
1037 | ClearPageUptodate(page); | 986 | nfs_set_pageerror(page); |
1038 | SetPageError(page); | ||
1039 | req->wb_context->error = task->tk_status; | 987 | req->wb_context->error = task->tk_status; |
1040 | dprintk(", error = %d\n", task->tk_status); | 988 | dprintk(", error = %d\n", task->tk_status); |
1041 | } else { | 989 | } else { |
@@ -1092,8 +1040,7 @@ static void nfs_writeback_done_full(struct rpc_task *task, void *calldata) | |||
1092 | (long long)req_offset(req)); | 1040 | (long long)req_offset(req)); |
1093 | 1041 | ||
1094 | if (task->tk_status < 0) { | 1042 | if (task->tk_status < 0) { |
1095 | ClearPageUptodate(page); | 1043 | nfs_set_pageerror(page); |
1096 | SetPageError(page); | ||
1097 | req->wb_context->error = task->tk_status; | 1044 | req->wb_context->error = task->tk_status; |
1098 | end_page_writeback(page); | 1045 | end_page_writeback(page); |
1099 | nfs_inode_remove_request(req); | 1046 | nfs_inode_remove_request(req); |
@@ -1134,7 +1081,7 @@ int nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data) | |||
1134 | struct nfs_writeres *resp = &data->res; | 1081 | struct nfs_writeres *resp = &data->res; |
1135 | int status; | 1082 | int status; |
1136 | 1083 | ||
1137 | dprintk("NFS: %4d nfs_writeback_done (status %d)\n", | 1084 | dprintk("NFS: %5u nfs_writeback_done (status %d)\n", |
1138 | task->tk_pid, task->tk_status); | 1085 | task->tk_pid, task->tk_status); |
1139 | 1086 | ||
1140 | /* | 1087 | /* |
@@ -1250,7 +1197,7 @@ static void nfs_commit_rpcsetup(struct list_head *head, | |||
1250 | data->task.tk_priority = flush_task_priority(how); | 1197 | data->task.tk_priority = flush_task_priority(how); |
1251 | data->task.tk_cookie = (unsigned long)inode; | 1198 | data->task.tk_cookie = (unsigned long)inode; |
1252 | 1199 | ||
1253 | dprintk("NFS: %4d initiated commit call\n", data->task.tk_pid); | 1200 | dprintk("NFS: %5u initiated commit call\n", data->task.tk_pid); |
1254 | } | 1201 | } |
1255 | 1202 | ||
1256 | /* | 1203 | /* |
@@ -1291,7 +1238,7 @@ static void nfs_commit_done(struct rpc_task *task, void *calldata) | |||
1291 | struct nfs_write_data *data = calldata; | 1238 | struct nfs_write_data *data = calldata; |
1292 | struct nfs_page *req; | 1239 | struct nfs_page *req; |
1293 | 1240 | ||
1294 | dprintk("NFS: %4d nfs_commit_done (status %d)\n", | 1241 | dprintk("NFS: %5u nfs_commit_done (status %d)\n", |
1295 | task->tk_pid, task->tk_status); | 1242 | task->tk_pid, task->tk_status); |
1296 | 1243 | ||
1297 | /* Call the NFS version-specific code */ | 1244 | /* Call the NFS version-specific code */ |
@@ -1516,6 +1463,8 @@ int nfs_wb_page_priority(struct inode *inode, struct page *page, int how) | |||
1516 | if (ret < 0) | 1463 | if (ret < 0) |
1517 | goto out; | 1464 | goto out; |
1518 | } | 1465 | } |
1466 | if (!PagePrivate(page)) | ||
1467 | return 0; | ||
1519 | ret = nfs_sync_mapping_wait(page->mapping, &wbc, how); | 1468 | ret = nfs_sync_mapping_wait(page->mapping, &wbc, how); |
1520 | if (ret >= 0) | 1469 | if (ret >= 0) |
1521 | return 0; | 1470 | return 0; |
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index ed0f2eac8f50..47aaa2c66738 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h | |||
@@ -11,14 +11,6 @@ | |||
11 | 11 | ||
12 | #include <linux/magic.h> | 12 | #include <linux/magic.h> |
13 | 13 | ||
14 | /* | ||
15 | * Enable debugging support for nfs client. | ||
16 | * Requires RPC_DEBUG. | ||
17 | */ | ||
18 | #ifdef RPC_DEBUG | ||
19 | # define NFS_DEBUG | ||
20 | #endif | ||
21 | |||
22 | /* Default timeout values */ | 14 | /* Default timeout values */ |
23 | #define NFS_MAX_UDP_TIMEOUT (60*HZ) | 15 | #define NFS_MAX_UDP_TIMEOUT (60*HZ) |
24 | #define NFS_MAX_TCP_TIMEOUT (600*HZ) | 16 | #define NFS_MAX_TCP_TIMEOUT (600*HZ) |
@@ -567,6 +559,15 @@ extern void * nfs_root_data(void); | |||
567 | #define NFSDBG_ALL 0xFFFF | 559 | #define NFSDBG_ALL 0xFFFF |
568 | 560 | ||
569 | #ifdef __KERNEL__ | 561 | #ifdef __KERNEL__ |
562 | |||
563 | /* | ||
564 | * Enable debugging support for nfs client. | ||
565 | * Requires RPC_DEBUG. | ||
566 | */ | ||
567 | #ifdef RPC_DEBUG | ||
568 | # define NFS_DEBUG | ||
569 | #endif | ||
570 | |||
570 | # undef ifdebug | 571 | # undef ifdebug |
571 | # ifdef NFS_DEBUG | 572 | # ifdef NFS_DEBUG |
572 | # define ifdebug(fac) if (unlikely(nfs_debug & NFSDBG_##fac)) | 573 | # define ifdebug(fac) if (unlikely(nfs_debug & NFSDBG_##fac)) |
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 30d7116d601e..10c26ed0db71 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h | |||
@@ -784,7 +784,6 @@ struct nfs_rpc_ops { | |||
784 | int (*access) (struct inode *, struct nfs_access_entry *); | 784 | int (*access) (struct inode *, struct nfs_access_entry *); |
785 | int (*readlink)(struct inode *, struct page *, unsigned int, | 785 | int (*readlink)(struct inode *, struct page *, unsigned int, |
786 | unsigned int); | 786 | unsigned int); |
787 | int (*read) (struct nfs_read_data *); | ||
788 | int (*create) (struct inode *, struct dentry *, | 787 | int (*create) (struct inode *, struct dentry *, |
789 | struct iattr *, int, struct nameidata *); | 788 | struct iattr *, int, struct nameidata *); |
790 | int (*remove) (struct inode *, struct qstr *); | 789 | int (*remove) (struct inode *, struct qstr *); |
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index a1be89deb3af..c7a78eef2b4f 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h | |||
@@ -40,6 +40,7 @@ struct rpc_clnt { | |||
40 | 40 | ||
41 | unsigned int cl_softrtry : 1,/* soft timeouts */ | 41 | unsigned int cl_softrtry : 1,/* soft timeouts */ |
42 | cl_intr : 1,/* interruptible */ | 42 | cl_intr : 1,/* interruptible */ |
43 | cl_discrtry : 1,/* disconnect before retry */ | ||
43 | cl_autobind : 1,/* use getport() */ | 44 | cl_autobind : 1,/* use getport() */ |
44 | cl_oneshot : 1,/* dispose after use */ | 45 | cl_oneshot : 1,/* dispose after use */ |
45 | cl_dead : 1;/* abandoned */ | 46 | cl_dead : 1;/* abandoned */ |
@@ -111,6 +112,7 @@ struct rpc_create_args { | |||
111 | #define RPC_CLNT_CREATE_ONESHOT (1UL << 3) | 112 | #define RPC_CLNT_CREATE_ONESHOT (1UL << 3) |
112 | #define RPC_CLNT_CREATE_NONPRIVPORT (1UL << 4) | 113 | #define RPC_CLNT_CREATE_NONPRIVPORT (1UL << 4) |
113 | #define RPC_CLNT_CREATE_NOPING (1UL << 5) | 114 | #define RPC_CLNT_CREATE_NOPING (1UL << 5) |
115 | #define RPC_CLNT_CREATE_DISCRTRY (1UL << 6) | ||
114 | 116 | ||
115 | struct rpc_clnt *rpc_create(struct rpc_create_args *args); | 117 | struct rpc_clnt *rpc_create(struct rpc_create_args *args); |
116 | struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *, | 118 | struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *, |
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h index 8b6ce60ea057..de9fc576fa1c 100644 --- a/include/linux/sunrpc/sched.h +++ b/include/linux/sunrpc/sched.h | |||
@@ -253,7 +253,7 @@ void rpc_put_task(struct rpc_task *); | |||
253 | void rpc_exit_task(struct rpc_task *); | 253 | void rpc_exit_task(struct rpc_task *); |
254 | void rpc_release_calldata(const struct rpc_call_ops *, void *); | 254 | void rpc_release_calldata(const struct rpc_call_ops *, void *); |
255 | void rpc_killall_tasks(struct rpc_clnt *); | 255 | void rpc_killall_tasks(struct rpc_clnt *); |
256 | int rpc_execute(struct rpc_task *); | 256 | void rpc_execute(struct rpc_task *); |
257 | void rpc_init_priority_wait_queue(struct rpc_wait_queue *, const char *); | 257 | void rpc_init_priority_wait_queue(struct rpc_wait_queue *, const char *); |
258 | void rpc_init_wait_queue(struct rpc_wait_queue *, const char *); | 258 | void rpc_init_wait_queue(struct rpc_wait_queue *, const char *); |
259 | void rpc_sleep_on(struct rpc_wait_queue *, struct rpc_task *, | 259 | void rpc_sleep_on(struct rpc_wait_queue *, struct rpc_task *, |
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c index 76f7eac4082d..9527f2bb1744 100644 --- a/net/sunrpc/auth.c +++ b/net/sunrpc/auth.c | |||
@@ -181,7 +181,7 @@ rpcauth_gc_credcache(struct rpc_auth *auth, struct hlist_head *free) | |||
181 | struct rpc_cred *cred; | 181 | struct rpc_cred *cred; |
182 | int i; | 182 | int i; |
183 | 183 | ||
184 | dprintk("RPC: gc'ing RPC credentials for auth %p\n", auth); | 184 | dprintk("RPC: gc'ing RPC credentials for auth %p\n", auth); |
185 | for (i = 0; i < RPC_CREDCACHE_NR; i++) { | 185 | for (i = 0; i < RPC_CREDCACHE_NR; i++) { |
186 | hlist_for_each_safe(pos, next, &cache->hashtable[i]) { | 186 | hlist_for_each_safe(pos, next, &cache->hashtable[i]) { |
187 | cred = hlist_entry(pos, struct rpc_cred, cr_hash); | 187 | cred = hlist_entry(pos, struct rpc_cred, cr_hash); |
@@ -267,7 +267,7 @@ rpcauth_lookupcred(struct rpc_auth *auth, int flags) | |||
267 | }; | 267 | }; |
268 | struct rpc_cred *ret; | 268 | struct rpc_cred *ret; |
269 | 269 | ||
270 | dprintk("RPC: looking up %s cred\n", | 270 | dprintk("RPC: looking up %s cred\n", |
271 | auth->au_ops->au_name); | 271 | auth->au_ops->au_name); |
272 | get_group_info(acred.group_info); | 272 | get_group_info(acred.group_info); |
273 | ret = auth->au_ops->lookup_cred(auth, &acred, flags); | 273 | ret = auth->au_ops->lookup_cred(auth, &acred, flags); |
@@ -287,7 +287,7 @@ rpcauth_bindcred(struct rpc_task *task) | |||
287 | struct rpc_cred *ret; | 287 | struct rpc_cred *ret; |
288 | int flags = 0; | 288 | int flags = 0; |
289 | 289 | ||
290 | dprintk("RPC: %4d looking up %s cred\n", | 290 | dprintk("RPC: %5u looking up %s cred\n", |
291 | task->tk_pid, task->tk_auth->au_ops->au_name); | 291 | task->tk_pid, task->tk_auth->au_ops->au_name); |
292 | get_group_info(acred.group_info); | 292 | get_group_info(acred.group_info); |
293 | if (task->tk_flags & RPC_TASK_ROOTCREDS) | 293 | if (task->tk_flags & RPC_TASK_ROOTCREDS) |
@@ -304,8 +304,9 @@ rpcauth_bindcred(struct rpc_task *task) | |||
304 | void | 304 | void |
305 | rpcauth_holdcred(struct rpc_task *task) | 305 | rpcauth_holdcred(struct rpc_task *task) |
306 | { | 306 | { |
307 | dprintk("RPC: %4d holding %s cred %p\n", | 307 | dprintk("RPC: %5u holding %s cred %p\n", |
308 | task->tk_pid, task->tk_auth->au_ops->au_name, task->tk_msg.rpc_cred); | 308 | task->tk_pid, task->tk_auth->au_ops->au_name, |
309 | task->tk_msg.rpc_cred); | ||
309 | if (task->tk_msg.rpc_cred) | 310 | if (task->tk_msg.rpc_cred) |
310 | get_rpccred(task->tk_msg.rpc_cred); | 311 | get_rpccred(task->tk_msg.rpc_cred); |
311 | } | 312 | } |
@@ -324,7 +325,7 @@ rpcauth_unbindcred(struct rpc_task *task) | |||
324 | { | 325 | { |
325 | struct rpc_cred *cred = task->tk_msg.rpc_cred; | 326 | struct rpc_cred *cred = task->tk_msg.rpc_cred; |
326 | 327 | ||
327 | dprintk("RPC: %4d releasing %s cred %p\n", | 328 | dprintk("RPC: %5u releasing %s cred %p\n", |
328 | task->tk_pid, task->tk_auth->au_ops->au_name, cred); | 329 | task->tk_pid, task->tk_auth->au_ops->au_name, cred); |
329 | 330 | ||
330 | put_rpccred(cred); | 331 | put_rpccred(cred); |
@@ -336,7 +337,7 @@ rpcauth_marshcred(struct rpc_task *task, __be32 *p) | |||
336 | { | 337 | { |
337 | struct rpc_cred *cred = task->tk_msg.rpc_cred; | 338 | struct rpc_cred *cred = task->tk_msg.rpc_cred; |
338 | 339 | ||
339 | dprintk("RPC: %4d marshaling %s cred %p\n", | 340 | dprintk("RPC: %5u marshaling %s cred %p\n", |
340 | task->tk_pid, task->tk_auth->au_ops->au_name, cred); | 341 | task->tk_pid, task->tk_auth->au_ops->au_name, cred); |
341 | 342 | ||
342 | return cred->cr_ops->crmarshal(task, p); | 343 | return cred->cr_ops->crmarshal(task, p); |
@@ -347,7 +348,7 @@ rpcauth_checkverf(struct rpc_task *task, __be32 *p) | |||
347 | { | 348 | { |
348 | struct rpc_cred *cred = task->tk_msg.rpc_cred; | 349 | struct rpc_cred *cred = task->tk_msg.rpc_cred; |
349 | 350 | ||
350 | dprintk("RPC: %4d validating %s cred %p\n", | 351 | dprintk("RPC: %5u validating %s cred %p\n", |
351 | task->tk_pid, task->tk_auth->au_ops->au_name, cred); | 352 | task->tk_pid, task->tk_auth->au_ops->au_name, cred); |
352 | 353 | ||
353 | return cred->cr_ops->crvalidate(task, p); | 354 | return cred->cr_ops->crvalidate(task, p); |
@@ -359,7 +360,7 @@ rpcauth_wrap_req(struct rpc_task *task, kxdrproc_t encode, void *rqstp, | |||
359 | { | 360 | { |
360 | struct rpc_cred *cred = task->tk_msg.rpc_cred; | 361 | struct rpc_cred *cred = task->tk_msg.rpc_cred; |
361 | 362 | ||
362 | dprintk("RPC: %4d using %s cred %p to wrap rpc data\n", | 363 | dprintk("RPC: %5u using %s cred %p to wrap rpc data\n", |
363 | task->tk_pid, cred->cr_ops->cr_name, cred); | 364 | task->tk_pid, cred->cr_ops->cr_name, cred); |
364 | if (cred->cr_ops->crwrap_req) | 365 | if (cred->cr_ops->crwrap_req) |
365 | return cred->cr_ops->crwrap_req(task, encode, rqstp, data, obj); | 366 | return cred->cr_ops->crwrap_req(task, encode, rqstp, data, obj); |
@@ -373,7 +374,7 @@ rpcauth_unwrap_resp(struct rpc_task *task, kxdrproc_t decode, void *rqstp, | |||
373 | { | 374 | { |
374 | struct rpc_cred *cred = task->tk_msg.rpc_cred; | 375 | struct rpc_cred *cred = task->tk_msg.rpc_cred; |
375 | 376 | ||
376 | dprintk("RPC: %4d using %s cred %p to unwrap rpc data\n", | 377 | dprintk("RPC: %5u using %s cred %p to unwrap rpc data\n", |
377 | task->tk_pid, cred->cr_ops->cr_name, cred); | 378 | task->tk_pid, cred->cr_ops->cr_name, cred); |
378 | if (cred->cr_ops->crunwrap_resp) | 379 | if (cred->cr_ops->crunwrap_resp) |
379 | return cred->cr_ops->crunwrap_resp(task, decode, rqstp, | 380 | return cred->cr_ops->crunwrap_resp(task, decode, rqstp, |
@@ -388,7 +389,7 @@ rpcauth_refreshcred(struct rpc_task *task) | |||
388 | struct rpc_cred *cred = task->tk_msg.rpc_cred; | 389 | struct rpc_cred *cred = task->tk_msg.rpc_cred; |
389 | int err; | 390 | int err; |
390 | 391 | ||
391 | dprintk("RPC: %4d refreshing %s cred %p\n", | 392 | dprintk("RPC: %5u refreshing %s cred %p\n", |
392 | task->tk_pid, task->tk_auth->au_ops->au_name, cred); | 393 | task->tk_pid, task->tk_auth->au_ops->au_name, cred); |
393 | 394 | ||
394 | err = cred->cr_ops->crrefresh(task); | 395 | err = cred->cr_ops->crrefresh(task); |
@@ -400,7 +401,7 @@ rpcauth_refreshcred(struct rpc_task *task) | |||
400 | void | 401 | void |
401 | rpcauth_invalcred(struct rpc_task *task) | 402 | rpcauth_invalcred(struct rpc_task *task) |
402 | { | 403 | { |
403 | dprintk("RPC: %4d invalidating %s cred %p\n", | 404 | dprintk("RPC: %5u invalidating %s cred %p\n", |
404 | task->tk_pid, task->tk_auth->au_ops->au_name, task->tk_msg.rpc_cred); | 405 | task->tk_pid, task->tk_auth->au_ops->au_name, task->tk_msg.rpc_cred); |
405 | spin_lock(&rpc_credcache_lock); | 406 | spin_lock(&rpc_credcache_lock); |
406 | if (task->tk_msg.rpc_cred) | 407 | if (task->tk_msg.rpc_cred) |
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index 718fb94ad0f7..4e4ccc5b6fea 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c | |||
@@ -241,7 +241,7 @@ gss_fill_context(const void *p, const void *end, struct gss_cl_ctx *ctx, struct | |||
241 | } | 241 | } |
242 | return q; | 242 | return q; |
243 | err: | 243 | err: |
244 | dprintk("RPC: gss_fill_context returning %ld\n", -PTR_ERR(p)); | 244 | dprintk("RPC: gss_fill_context returning %ld\n", -PTR_ERR(p)); |
245 | return p; | 245 | return p; |
246 | } | 246 | } |
247 | 247 | ||
@@ -276,10 +276,10 @@ __gss_find_upcall(struct gss_auth *gss_auth, uid_t uid) | |||
276 | if (pos->uid != uid) | 276 | if (pos->uid != uid) |
277 | continue; | 277 | continue; |
278 | atomic_inc(&pos->count); | 278 | atomic_inc(&pos->count); |
279 | dprintk("RPC: gss_find_upcall found msg %p\n", pos); | 279 | dprintk("RPC: gss_find_upcall found msg %p\n", pos); |
280 | return pos; | 280 | return pos; |
281 | } | 281 | } |
282 | dprintk("RPC: gss_find_upcall found nothing\n"); | 282 | dprintk("RPC: gss_find_upcall found nothing\n"); |
283 | return NULL; | 283 | return NULL; |
284 | } | 284 | } |
285 | 285 | ||
@@ -393,7 +393,8 @@ gss_refresh_upcall(struct rpc_task *task) | |||
393 | struct gss_upcall_msg *gss_msg; | 393 | struct gss_upcall_msg *gss_msg; |
394 | int err = 0; | 394 | int err = 0; |
395 | 395 | ||
396 | dprintk("RPC: %4u gss_refresh_upcall for uid %u\n", task->tk_pid, cred->cr_uid); | 396 | dprintk("RPC: %5u gss_refresh_upcall for uid %u\n", task->tk_pid, |
397 | cred->cr_uid); | ||
397 | gss_msg = gss_setup_upcall(task->tk_client, gss_auth, cred); | 398 | gss_msg = gss_setup_upcall(task->tk_client, gss_auth, cred); |
398 | if (IS_ERR(gss_msg)) { | 399 | if (IS_ERR(gss_msg)) { |
399 | err = PTR_ERR(gss_msg); | 400 | err = PTR_ERR(gss_msg); |
@@ -413,8 +414,8 @@ gss_refresh_upcall(struct rpc_task *task) | |||
413 | spin_unlock(&gss_auth->lock); | 414 | spin_unlock(&gss_auth->lock); |
414 | gss_release_msg(gss_msg); | 415 | gss_release_msg(gss_msg); |
415 | out: | 416 | out: |
416 | dprintk("RPC: %4u gss_refresh_upcall for uid %u result %d\n", task->tk_pid, | 417 | dprintk("RPC: %5u gss_refresh_upcall for uid %u result %d\n", |
417 | cred->cr_uid, err); | 418 | task->tk_pid, cred->cr_uid, err); |
418 | return err; | 419 | return err; |
419 | } | 420 | } |
420 | 421 | ||
@@ -426,7 +427,7 @@ gss_create_upcall(struct gss_auth *gss_auth, struct gss_cred *gss_cred) | |||
426 | DEFINE_WAIT(wait); | 427 | DEFINE_WAIT(wait); |
427 | int err = 0; | 428 | int err = 0; |
428 | 429 | ||
429 | dprintk("RPC: gss_upcall for uid %u\n", cred->cr_uid); | 430 | dprintk("RPC: gss_upcall for uid %u\n", cred->cr_uid); |
430 | gss_msg = gss_setup_upcall(gss_auth->client, gss_auth, cred); | 431 | gss_msg = gss_setup_upcall(gss_auth->client, gss_auth, cred); |
431 | if (IS_ERR(gss_msg)) { | 432 | if (IS_ERR(gss_msg)) { |
432 | err = PTR_ERR(gss_msg); | 433 | err = PTR_ERR(gss_msg); |
@@ -454,7 +455,8 @@ out_intr: | |||
454 | finish_wait(&gss_msg->waitqueue, &wait); | 455 | finish_wait(&gss_msg->waitqueue, &wait); |
455 | gss_release_msg(gss_msg); | 456 | gss_release_msg(gss_msg); |
456 | out: | 457 | out: |
457 | dprintk("RPC: gss_create_upcall for uid %u result %d\n", cred->cr_uid, err); | 458 | dprintk("RPC: gss_create_upcall for uid %u result %d\n", |
459 | cred->cr_uid, err); | ||
458 | return err; | 460 | return err; |
459 | } | 461 | } |
460 | 462 | ||
@@ -546,14 +548,14 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) | |||
546 | } | 548 | } |
547 | gss_put_ctx(ctx); | 549 | gss_put_ctx(ctx); |
548 | kfree(buf); | 550 | kfree(buf); |
549 | dprintk("RPC: gss_pipe_downcall returning length %Zu\n", mlen); | 551 | dprintk("RPC: gss_pipe_downcall returning length %Zu\n", mlen); |
550 | return mlen; | 552 | return mlen; |
551 | err_put_ctx: | 553 | err_put_ctx: |
552 | gss_put_ctx(ctx); | 554 | gss_put_ctx(ctx); |
553 | err: | 555 | err: |
554 | kfree(buf); | 556 | kfree(buf); |
555 | out: | 557 | out: |
556 | dprintk("RPC: gss_pipe_downcall returning %d\n", err); | 558 | dprintk("RPC: gss_pipe_downcall returning %d\n", err); |
557 | return err; | 559 | return err; |
558 | } | 560 | } |
559 | 561 | ||
@@ -591,7 +593,7 @@ gss_pipe_destroy_msg(struct rpc_pipe_msg *msg) | |||
591 | static unsigned long ratelimit; | 593 | static unsigned long ratelimit; |
592 | 594 | ||
593 | if (msg->errno < 0) { | 595 | if (msg->errno < 0) { |
594 | dprintk("RPC: gss_pipe_destroy_msg releasing msg %p\n", | 596 | dprintk("RPC: gss_pipe_destroy_msg releasing msg %p\n", |
595 | gss_msg); | 597 | gss_msg); |
596 | atomic_inc(&gss_msg->count); | 598 | atomic_inc(&gss_msg->count); |
597 | gss_unhash_msg(gss_msg); | 599 | gss_unhash_msg(gss_msg); |
@@ -618,7 +620,7 @@ gss_create(struct rpc_clnt *clnt, rpc_authflavor_t flavor) | |||
618 | struct rpc_auth * auth; | 620 | struct rpc_auth * auth; |
619 | int err = -ENOMEM; /* XXX? */ | 621 | int err = -ENOMEM; /* XXX? */ |
620 | 622 | ||
621 | dprintk("RPC: creating GSS authenticator for client %p\n",clnt); | 623 | dprintk("RPC: creating GSS authenticator for client %p\n", clnt); |
622 | 624 | ||
623 | if (!try_module_get(THIS_MODULE)) | 625 | if (!try_module_get(THIS_MODULE)) |
624 | return ERR_PTR(err); | 626 | return ERR_PTR(err); |
@@ -670,8 +672,8 @@ gss_destroy(struct rpc_auth *auth) | |||
670 | { | 672 | { |
671 | struct gss_auth *gss_auth; | 673 | struct gss_auth *gss_auth; |
672 | 674 | ||
673 | dprintk("RPC: destroying GSS authenticator %p flavor %d\n", | 675 | dprintk("RPC: destroying GSS authenticator %p flavor %d\n", |
674 | auth, auth->au_flavor); | 676 | auth, auth->au_flavor); |
675 | 677 | ||
676 | gss_auth = container_of(auth, struct gss_auth, rpc_auth); | 678 | gss_auth = container_of(auth, struct gss_auth, rpc_auth); |
677 | rpc_unlink(gss_auth->dentry); | 679 | rpc_unlink(gss_auth->dentry); |
@@ -689,7 +691,7 @@ gss_destroy(struct rpc_auth *auth) | |||
689 | static void | 691 | static void |
690 | gss_destroy_ctx(struct gss_cl_ctx *ctx) | 692 | gss_destroy_ctx(struct gss_cl_ctx *ctx) |
691 | { | 693 | { |
692 | dprintk("RPC: gss_destroy_ctx\n"); | 694 | dprintk("RPC: gss_destroy_ctx\n"); |
693 | 695 | ||
694 | if (ctx->gc_gss_ctx) | 696 | if (ctx->gc_gss_ctx) |
695 | gss_delete_sec_context(&ctx->gc_gss_ctx); | 697 | gss_delete_sec_context(&ctx->gc_gss_ctx); |
@@ -703,7 +705,7 @@ gss_destroy_cred(struct rpc_cred *rc) | |||
703 | { | 705 | { |
704 | struct gss_cred *cred = container_of(rc, struct gss_cred, gc_base); | 706 | struct gss_cred *cred = container_of(rc, struct gss_cred, gc_base); |
705 | 707 | ||
706 | dprintk("RPC: gss_destroy_cred \n"); | 708 | dprintk("RPC: gss_destroy_cred \n"); |
707 | 709 | ||
708 | if (cred->gc_ctx) | 710 | if (cred->gc_ctx) |
709 | gss_put_ctx(cred->gc_ctx); | 711 | gss_put_ctx(cred->gc_ctx); |
@@ -726,7 +728,7 @@ gss_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags) | |||
726 | struct gss_cred *cred = NULL; | 728 | struct gss_cred *cred = NULL; |
727 | int err = -ENOMEM; | 729 | int err = -ENOMEM; |
728 | 730 | ||
729 | dprintk("RPC: gss_create_cred for uid %d, flavor %d\n", | 731 | dprintk("RPC: gss_create_cred for uid %d, flavor %d\n", |
730 | acred->uid, auth->au_flavor); | 732 | acred->uid, auth->au_flavor); |
731 | 733 | ||
732 | if (!(cred = kzalloc(sizeof(*cred), GFP_KERNEL))) | 734 | if (!(cred = kzalloc(sizeof(*cred), GFP_KERNEL))) |
@@ -745,7 +747,7 @@ gss_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags) | |||
745 | return &cred->gc_base; | 747 | return &cred->gc_base; |
746 | 748 | ||
747 | out_err: | 749 | out_err: |
748 | dprintk("RPC: gss_create_cred failed with error %d\n", err); | 750 | dprintk("RPC: gss_create_cred failed with error %d\n", err); |
749 | return ERR_PTR(err); | 751 | return ERR_PTR(err); |
750 | } | 752 | } |
751 | 753 | ||
@@ -799,7 +801,7 @@ gss_marshal(struct rpc_task *task, __be32 *p) | |||
799 | struct kvec iov; | 801 | struct kvec iov; |
800 | struct xdr_buf verf_buf; | 802 | struct xdr_buf verf_buf; |
801 | 803 | ||
802 | dprintk("RPC: %4u gss_marshal\n", task->tk_pid); | 804 | dprintk("RPC: %5u gss_marshal\n", task->tk_pid); |
803 | 805 | ||
804 | *p++ = htonl(RPC_AUTH_GSS); | 806 | *p++ = htonl(RPC_AUTH_GSS); |
805 | cred_len = p++; | 807 | cred_len = p++; |
@@ -865,7 +867,7 @@ gss_validate(struct rpc_task *task, __be32 *p) | |||
865 | u32 flav,len; | 867 | u32 flav,len; |
866 | u32 maj_stat; | 868 | u32 maj_stat; |
867 | 869 | ||
868 | dprintk("RPC: %4u gss_validate\n", task->tk_pid); | 870 | dprintk("RPC: %5u gss_validate\n", task->tk_pid); |
869 | 871 | ||
870 | flav = ntohl(*p++); | 872 | flav = ntohl(*p++); |
871 | if ((len = ntohl(*p++)) > RPC_MAX_AUTH_SIZE) | 873 | if ((len = ntohl(*p++)) > RPC_MAX_AUTH_SIZE) |
@@ -888,12 +890,12 @@ gss_validate(struct rpc_task *task, __be32 *p) | |||
888 | * calculate the length of the verifier: */ | 890 | * calculate the length of the verifier: */ |
889 | task->tk_auth->au_verfsize = XDR_QUADLEN(len) + 2; | 891 | task->tk_auth->au_verfsize = XDR_QUADLEN(len) + 2; |
890 | gss_put_ctx(ctx); | 892 | gss_put_ctx(ctx); |
891 | dprintk("RPC: %4u GSS gss_validate: gss_verify_mic succeeded.\n", | 893 | dprintk("RPC: %5u gss_validate: gss_verify_mic succeeded.\n", |
892 | task->tk_pid); | 894 | task->tk_pid); |
893 | return p + XDR_QUADLEN(len); | 895 | return p + XDR_QUADLEN(len); |
894 | out_bad: | 896 | out_bad: |
895 | gss_put_ctx(ctx); | 897 | gss_put_ctx(ctx); |
896 | dprintk("RPC: %4u gss_validate failed.\n", task->tk_pid); | 898 | dprintk("RPC: %5u gss_validate failed.\n", task->tk_pid); |
897 | return NULL; | 899 | return NULL; |
898 | } | 900 | } |
899 | 901 | ||
@@ -1063,7 +1065,7 @@ gss_wrap_req(struct rpc_task *task, | |||
1063 | struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred); | 1065 | struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred); |
1064 | int status = -EIO; | 1066 | int status = -EIO; |
1065 | 1067 | ||
1066 | dprintk("RPC: %4u gss_wrap_req\n", task->tk_pid); | 1068 | dprintk("RPC: %5u gss_wrap_req\n", task->tk_pid); |
1067 | if (ctx->gc_proc != RPC_GSS_PROC_DATA) { | 1069 | if (ctx->gc_proc != RPC_GSS_PROC_DATA) { |
1068 | /* The spec seems a little ambiguous here, but I think that not | 1070 | /* The spec seems a little ambiguous here, but I think that not |
1069 | * wrapping context destruction requests makes the most sense. | 1071 | * wrapping context destruction requests makes the most sense. |
@@ -1086,7 +1088,7 @@ gss_wrap_req(struct rpc_task *task, | |||
1086 | } | 1088 | } |
1087 | out: | 1089 | out: |
1088 | gss_put_ctx(ctx); | 1090 | gss_put_ctx(ctx); |
1089 | dprintk("RPC: %4u gss_wrap_req returning %d\n", task->tk_pid, status); | 1091 | dprintk("RPC: %5u gss_wrap_req returning %d\n", task->tk_pid, status); |
1090 | return status; | 1092 | return status; |
1091 | } | 1093 | } |
1092 | 1094 | ||
@@ -1192,7 +1194,7 @@ out_decode: | |||
1192 | status = decode(rqstp, p, obj); | 1194 | status = decode(rqstp, p, obj); |
1193 | out: | 1195 | out: |
1194 | gss_put_ctx(ctx); | 1196 | gss_put_ctx(ctx); |
1195 | dprintk("RPC: %4u gss_unwrap_resp returning %d\n", task->tk_pid, | 1197 | dprintk("RPC: %5u gss_unwrap_resp returning %d\n", task->tk_pid, |
1196 | status); | 1198 | status); |
1197 | return status; | 1199 | return status; |
1198 | } | 1200 | } |
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c index 0a9948de0992..f441aa0b26dc 100644 --- a/net/sunrpc/auth_gss/gss_krb5_crypto.c +++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c | |||
@@ -66,8 +66,8 @@ krb5_encrypt( | |||
66 | goto out; | 66 | goto out; |
67 | 67 | ||
68 | if (crypto_blkcipher_ivsize(tfm) > 16) { | 68 | if (crypto_blkcipher_ivsize(tfm) > 16) { |
69 | dprintk("RPC: gss_k5encrypt: tfm iv size to large %d\n", | 69 | dprintk("RPC: gss_k5encrypt: tfm iv size to large %d\n", |
70 | crypto_blkcipher_ivsize(tfm)); | 70 | crypto_blkcipher_ivsize(tfm)); |
71 | goto out; | 71 | goto out; |
72 | } | 72 | } |
73 | 73 | ||
@@ -79,7 +79,7 @@ krb5_encrypt( | |||
79 | 79 | ||
80 | ret = crypto_blkcipher_encrypt_iv(&desc, sg, sg, length); | 80 | ret = crypto_blkcipher_encrypt_iv(&desc, sg, sg, length); |
81 | out: | 81 | out: |
82 | dprintk("RPC: krb5_encrypt returns %d\n",ret); | 82 | dprintk("RPC: krb5_encrypt returns %d\n", ret); |
83 | return ret; | 83 | return ret; |
84 | } | 84 | } |
85 | 85 | ||
@@ -102,7 +102,7 @@ krb5_decrypt( | |||
102 | goto out; | 102 | goto out; |
103 | 103 | ||
104 | if (crypto_blkcipher_ivsize(tfm) > 16) { | 104 | if (crypto_blkcipher_ivsize(tfm) > 16) { |
105 | dprintk("RPC: gss_k5decrypt: tfm iv size to large %d\n", | 105 | dprintk("RPC: gss_k5decrypt: tfm iv size to large %d\n", |
106 | crypto_blkcipher_ivsize(tfm)); | 106 | crypto_blkcipher_ivsize(tfm)); |
107 | goto out; | 107 | goto out; |
108 | } | 108 | } |
@@ -114,7 +114,7 @@ krb5_decrypt( | |||
114 | 114 | ||
115 | ret = crypto_blkcipher_decrypt_iv(&desc, sg, sg, length); | 115 | ret = crypto_blkcipher_decrypt_iv(&desc, sg, sg, length); |
116 | out: | 116 | out: |
117 | dprintk("RPC: gss_k5decrypt returns %d\n",ret); | 117 | dprintk("RPC: gss_k5decrypt returns %d\n",ret); |
118 | return ret; | 118 | return ret; |
119 | } | 119 | } |
120 | 120 | ||
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c index 05d4bee86fc0..7b1943217053 100644 --- a/net/sunrpc/auth_gss/gss_krb5_mech.c +++ b/net/sunrpc/auth_gss/gss_krb5_mech.c | |||
@@ -175,7 +175,8 @@ gss_import_sec_context_kerberos(const void *p, | |||
175 | } | 175 | } |
176 | 176 | ||
177 | ctx_id->internal_ctx_id = ctx; | 177 | ctx_id->internal_ctx_id = ctx; |
178 | dprintk("RPC: Successfully imported new context.\n"); | 178 | |
179 | dprintk("RPC: Successfully imported new context.\n"); | ||
179 | return 0; | 180 | return 0; |
180 | 181 | ||
181 | out_err_free_key2: | 182 | out_err_free_key2: |
diff --git a/net/sunrpc/auth_gss/gss_krb5_seal.c b/net/sunrpc/auth_gss/gss_krb5_seal.c index d0bb5064f8c5..a0d9faa59cb5 100644 --- a/net/sunrpc/auth_gss/gss_krb5_seal.c +++ b/net/sunrpc/auth_gss/gss_krb5_seal.c | |||
@@ -83,7 +83,7 @@ gss_get_mic_kerberos(struct gss_ctx *gss_ctx, struct xdr_buf *text, | |||
83 | s32 now; | 83 | s32 now; |
84 | u32 seq_send; | 84 | u32 seq_send; |
85 | 85 | ||
86 | dprintk("RPC: gss_krb5_seal\n"); | 86 | dprintk("RPC: gss_krb5_seal\n"); |
87 | 87 | ||
88 | now = get_seconds(); | 88 | now = get_seconds(); |
89 | 89 | ||
diff --git a/net/sunrpc/auth_gss/gss_krb5_seqnum.c b/net/sunrpc/auth_gss/gss_krb5_seqnum.c index 3e315a68efaa..43f3421f1e6a 100644 --- a/net/sunrpc/auth_gss/gss_krb5_seqnum.c +++ b/net/sunrpc/auth_gss/gss_krb5_seqnum.c | |||
@@ -70,7 +70,7 @@ krb5_get_seq_num(struct crypto_blkcipher *key, | |||
70 | s32 code; | 70 | s32 code; |
71 | unsigned char plain[8]; | 71 | unsigned char plain[8]; |
72 | 72 | ||
73 | dprintk("RPC: krb5_get_seq_num:\n"); | 73 | dprintk("RPC: krb5_get_seq_num:\n"); |
74 | 74 | ||
75 | if ((code = krb5_decrypt(key, cksum, buf, plain, 8))) | 75 | if ((code = krb5_decrypt(key, cksum, buf, plain, 8))) |
76 | return code; | 76 | return code; |
diff --git a/net/sunrpc/auth_gss/gss_krb5_unseal.c b/net/sunrpc/auth_gss/gss_krb5_unseal.c index 87f8977ccece..e30a993466bc 100644 --- a/net/sunrpc/auth_gss/gss_krb5_unseal.c +++ b/net/sunrpc/auth_gss/gss_krb5_unseal.c | |||
@@ -86,7 +86,7 @@ gss_verify_mic_kerberos(struct gss_ctx *gss_ctx, | |||
86 | unsigned char *ptr = (unsigned char *)read_token->data; | 86 | unsigned char *ptr = (unsigned char *)read_token->data; |
87 | int bodysize; | 87 | int bodysize; |
88 | 88 | ||
89 | dprintk("RPC: krb5_read_token\n"); | 89 | dprintk("RPC: krb5_read_token\n"); |
90 | 90 | ||
91 | if (g_verify_token_header(&ctx->mech_used, &bodysize, &ptr, | 91 | if (g_verify_token_header(&ctx->mech_used, &bodysize, &ptr, |
92 | read_token->len)) | 92 | read_token->len)) |
diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c index fe25b3d898dc..42b3220bed39 100644 --- a/net/sunrpc/auth_gss/gss_krb5_wrap.c +++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c | |||
@@ -129,7 +129,7 @@ gss_wrap_kerberos(struct gss_ctx *ctx, int offset, | |||
129 | struct page **tmp_pages; | 129 | struct page **tmp_pages; |
130 | u32 seq_send; | 130 | u32 seq_send; |
131 | 131 | ||
132 | dprintk("RPC: gss_wrap_kerberos\n"); | 132 | dprintk("RPC: gss_wrap_kerberos\n"); |
133 | 133 | ||
134 | now = get_seconds(); | 134 | now = get_seconds(); |
135 | 135 | ||
@@ -215,7 +215,7 @@ gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf) | |||
215 | int data_len; | 215 | int data_len; |
216 | int blocksize; | 216 | int blocksize; |
217 | 217 | ||
218 | dprintk("RPC: gss_unwrap_kerberos\n"); | 218 | dprintk("RPC: gss_unwrap_kerberos\n"); |
219 | 219 | ||
220 | ptr = (u8 *)buf->head[0].iov_base + offset; | 220 | ptr = (u8 *)buf->head[0].iov_base + offset; |
221 | if (g_verify_token_header(&kctx->mech_used, &bodysize, &ptr, | 221 | if (g_verify_token_header(&kctx->mech_used, &bodysize, &ptr, |
diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c index 3423890e4a30..26872517ccf3 100644 --- a/net/sunrpc/auth_gss/gss_mech_switch.c +++ b/net/sunrpc/auth_gss/gss_mech_switch.c | |||
@@ -113,7 +113,7 @@ gss_mech_register(struct gss_api_mech *gm) | |||
113 | spin_lock(®istered_mechs_lock); | 113 | spin_lock(®istered_mechs_lock); |
114 | list_add(&gm->gm_list, ®istered_mechs); | 114 | list_add(&gm->gm_list, ®istered_mechs); |
115 | spin_unlock(®istered_mechs_lock); | 115 | spin_unlock(®istered_mechs_lock); |
116 | dprintk("RPC: registered gss mechanism %s\n", gm->gm_name); | 116 | dprintk("RPC: registered gss mechanism %s\n", gm->gm_name); |
117 | return 0; | 117 | return 0; |
118 | } | 118 | } |
119 | 119 | ||
@@ -125,7 +125,7 @@ gss_mech_unregister(struct gss_api_mech *gm) | |||
125 | spin_lock(®istered_mechs_lock); | 125 | spin_lock(®istered_mechs_lock); |
126 | list_del(&gm->gm_list); | 126 | list_del(&gm->gm_list); |
127 | spin_unlock(®istered_mechs_lock); | 127 | spin_unlock(®istered_mechs_lock); |
128 | dprintk("RPC: unregistered gss mechanism %s\n", gm->gm_name); | 128 | dprintk("RPC: unregistered gss mechanism %s\n", gm->gm_name); |
129 | gss_mech_free(gm); | 129 | gss_mech_free(gm); |
130 | } | 130 | } |
131 | 131 | ||
@@ -298,7 +298,7 @@ gss_unwrap(struct gss_ctx *ctx_id, | |||
298 | u32 | 298 | u32 |
299 | gss_delete_sec_context(struct gss_ctx **context_handle) | 299 | gss_delete_sec_context(struct gss_ctx **context_handle) |
300 | { | 300 | { |
301 | dprintk("RPC: gss_delete_sec_context deleting %p\n", | 301 | dprintk("RPC: gss_delete_sec_context deleting %p\n", |
302 | *context_handle); | 302 | *context_handle); |
303 | 303 | ||
304 | if (!*context_handle) | 304 | if (!*context_handle) |
diff --git a/net/sunrpc/auth_gss/gss_spkm3_mech.c b/net/sunrpc/auth_gss/gss_spkm3_mech.c index 8ef3f1c19435..7e15aa68ae64 100644 --- a/net/sunrpc/auth_gss/gss_spkm3_mech.c +++ b/net/sunrpc/auth_gss/gss_spkm3_mech.c | |||
@@ -97,7 +97,8 @@ gss_import_sec_context_spkm3(const void *p, size_t len, | |||
97 | if (IS_ERR(p)) | 97 | if (IS_ERR(p)) |
98 | goto out_err_free_ctx; | 98 | goto out_err_free_ctx; |
99 | if (version != 1) { | 99 | if (version != 1) { |
100 | dprintk("RPC: unknown spkm3 token format: obsolete nfs-utils?\n"); | 100 | dprintk("RPC: unknown spkm3 token format: " |
101 | "obsolete nfs-utils?\n"); | ||
101 | goto out_err_free_ctx; | 102 | goto out_err_free_ctx; |
102 | } | 103 | } |
103 | 104 | ||
@@ -138,7 +139,7 @@ gss_import_sec_context_spkm3(const void *p, size_t len, | |||
138 | 139 | ||
139 | ctx_id->internal_ctx_id = ctx; | 140 | ctx_id->internal_ctx_id = ctx; |
140 | 141 | ||
141 | dprintk("Successfully imported new spkm context.\n"); | 142 | dprintk("RPC: Successfully imported new spkm context.\n"); |
142 | return 0; | 143 | return 0; |
143 | 144 | ||
144 | out_err_free_intg_key: | 145 | out_err_free_intg_key: |
@@ -183,7 +184,7 @@ gss_verify_mic_spkm3(struct gss_ctx *ctx, | |||
183 | 184 | ||
184 | maj_stat = spkm3_read_token(sctx, checksum, signbuf, SPKM_MIC_TOK); | 185 | maj_stat = spkm3_read_token(sctx, checksum, signbuf, SPKM_MIC_TOK); |
185 | 186 | ||
186 | dprintk("RPC: gss_verify_mic_spkm3 returning %d\n", maj_stat); | 187 | dprintk("RPC: gss_verify_mic_spkm3 returning %d\n", maj_stat); |
187 | return maj_stat; | 188 | return maj_stat; |
188 | } | 189 | } |
189 | 190 | ||
@@ -197,7 +198,7 @@ gss_get_mic_spkm3(struct gss_ctx *ctx, | |||
197 | 198 | ||
198 | err = spkm3_make_token(sctx, message_buffer, | 199 | err = spkm3_make_token(sctx, message_buffer, |
199 | message_token, SPKM_MIC_TOK); | 200 | message_token, SPKM_MIC_TOK); |
200 | dprintk("RPC: gss_get_mic_spkm3 returning %d\n", err); | 201 | dprintk("RPC: gss_get_mic_spkm3 returning %d\n", err); |
201 | return err; | 202 | return err; |
202 | } | 203 | } |
203 | 204 | ||
diff --git a/net/sunrpc/auth_gss/gss_spkm3_seal.c b/net/sunrpc/auth_gss/gss_spkm3_seal.c index b179d58c6249..104cbf4f769f 100644 --- a/net/sunrpc/auth_gss/gss_spkm3_seal.c +++ b/net/sunrpc/auth_gss/gss_spkm3_seal.c | |||
@@ -75,20 +75,21 @@ spkm3_make_token(struct spkm3_ctx *ctx, | |||
75 | now = jiffies; | 75 | now = jiffies; |
76 | 76 | ||
77 | if (ctx->ctx_id.len != 16) { | 77 | if (ctx->ctx_id.len != 16) { |
78 | dprintk("RPC: spkm3_make_token BAD ctx_id.len %d\n", | 78 | dprintk("RPC: spkm3_make_token BAD ctx_id.len %d\n", |
79 | ctx->ctx_id.len); | 79 | ctx->ctx_id.len); |
80 | goto out_err; | 80 | goto out_err; |
81 | } | 81 | } |
82 | 82 | ||
83 | if (!g_OID_equal(&ctx->intg_alg, &hmac_md5_oid)) { | 83 | if (!g_OID_equal(&ctx->intg_alg, &hmac_md5_oid)) { |
84 | dprintk("RPC: gss_spkm3_seal: unsupported I-ALG algorithm." | 84 | dprintk("RPC: gss_spkm3_seal: unsupported I-ALG " |
85 | "only support hmac-md5 I-ALG.\n"); | 85 | "algorithm. only support hmac-md5 I-ALG.\n"); |
86 | goto out_err; | 86 | goto out_err; |
87 | } else | 87 | } else |
88 | checksum_type = CKSUMTYPE_HMAC_MD5; | 88 | checksum_type = CKSUMTYPE_HMAC_MD5; |
89 | 89 | ||
90 | if (!g_OID_equal(&ctx->conf_alg, &cast5_cbc_oid)) { | 90 | if (!g_OID_equal(&ctx->conf_alg, &cast5_cbc_oid)) { |
91 | dprintk("RPC: gss_spkm3_seal: unsupported C-ALG algorithm\n"); | 91 | dprintk("RPC: gss_spkm3_seal: unsupported C-ALG " |
92 | "algorithm\n"); | ||
92 | goto out_err; | 93 | goto out_err; |
93 | } | 94 | } |
94 | 95 | ||
@@ -113,7 +114,8 @@ spkm3_make_token(struct spkm3_ctx *ctx, | |||
113 | 114 | ||
114 | spkm3_make_mic_token(&ptr, tokenlen, &mic_hdr, &md5cksum, md5elen, md5zbit); | 115 | spkm3_make_mic_token(&ptr, tokenlen, &mic_hdr, &md5cksum, md5elen, md5zbit); |
115 | } else if (toktype == SPKM_WRAP_TOK) { /* Not Supported */ | 116 | } else if (toktype == SPKM_WRAP_TOK) { /* Not Supported */ |
116 | dprintk("RPC: gss_spkm3_seal: SPKM_WRAP_TOK not supported\n"); | 117 | dprintk("RPC: gss_spkm3_seal: SPKM_WRAP_TOK " |
118 | "not supported\n"); | ||
117 | goto out_err; | 119 | goto out_err; |
118 | } | 120 | } |
119 | 121 | ||
@@ -153,7 +155,7 @@ make_spkm3_checksum(s32 cksumtype, struct xdr_netobj *key, char *header, | |||
153 | cksumname = "md5"; | 155 | cksumname = "md5"; |
154 | break; | 156 | break; |
155 | default: | 157 | default: |
156 | dprintk("RPC: spkm3_make_checksum:" | 158 | dprintk("RPC: spkm3_make_checksum:" |
157 | " unsupported checksum %d", cksumtype); | 159 | " unsupported checksum %d", cksumtype); |
158 | return GSS_S_FAILURE; | 160 | return GSS_S_FAILURE; |
159 | } | 161 | } |
diff --git a/net/sunrpc/auth_gss/gss_spkm3_token.c b/net/sunrpc/auth_gss/gss_spkm3_token.c index 8400b621971e..6cdd241ad267 100644 --- a/net/sunrpc/auth_gss/gss_spkm3_token.c +++ b/net/sunrpc/auth_gss/gss_spkm3_token.c | |||
@@ -209,7 +209,7 @@ spkm3_verify_mic_token(unsigned char **tokp, int *mic_hdrlen, unsigned char **ck | |||
209 | 209 | ||
210 | /* spkm3 innercontext token preamble */ | 210 | /* spkm3 innercontext token preamble */ |
211 | if ((ptr[0] != 0xa4) || (ptr[2] != 0x30)) { | 211 | if ((ptr[0] != 0xa4) || (ptr[2] != 0x30)) { |
212 | dprintk("RPC: BAD SPKM ictoken preamble\n"); | 212 | dprintk("RPC: BAD SPKM ictoken preamble\n"); |
213 | goto out; | 213 | goto out; |
214 | } | 214 | } |
215 | 215 | ||
@@ -217,25 +217,25 @@ spkm3_verify_mic_token(unsigned char **tokp, int *mic_hdrlen, unsigned char **ck | |||
217 | 217 | ||
218 | /* token type */ | 218 | /* token type */ |
219 | if ((ptr[4] != 0x02) || (ptr[5] != 0x02)) { | 219 | if ((ptr[4] != 0x02) || (ptr[5] != 0x02)) { |
220 | dprintk("RPC: BAD asn1 SPKM3 token type\n"); | 220 | dprintk("RPC: BAD asn1 SPKM3 token type\n"); |
221 | goto out; | 221 | goto out; |
222 | } | 222 | } |
223 | 223 | ||
224 | /* only support SPKM_MIC_TOK */ | 224 | /* only support SPKM_MIC_TOK */ |
225 | if((ptr[6] != 0x01) || (ptr[7] != 0x01)) { | 225 | if((ptr[6] != 0x01) || (ptr[7] != 0x01)) { |
226 | dprintk("RPC: ERROR unsupported SPKM3 token \n"); | 226 | dprintk("RPC: ERROR unsupported SPKM3 token \n"); |
227 | goto out; | 227 | goto out; |
228 | } | 228 | } |
229 | 229 | ||
230 | /* contextid */ | 230 | /* contextid */ |
231 | if (ptr[8] != 0x03) { | 231 | if (ptr[8] != 0x03) { |
232 | dprintk("RPC: BAD SPKM3 asn1 context-id type\n"); | 232 | dprintk("RPC: BAD SPKM3 asn1 context-id type\n"); |
233 | goto out; | 233 | goto out; |
234 | } | 234 | } |
235 | 235 | ||
236 | ctxelen = ptr[9]; | 236 | ctxelen = ptr[9]; |
237 | if (ctxelen > 17) { /* length includes asn1 zbit octet */ | 237 | if (ctxelen > 17) { /* length includes asn1 zbit octet */ |
238 | dprintk("RPC: BAD SPKM3 contextid len %d\n", ctxelen); | 238 | dprintk("RPC: BAD SPKM3 contextid len %d\n", ctxelen); |
239 | goto out; | 239 | goto out; |
240 | } | 240 | } |
241 | 241 | ||
@@ -251,7 +251,9 @@ spkm3_verify_mic_token(unsigned char **tokp, int *mic_hdrlen, unsigned char **ck | |||
251 | */ | 251 | */ |
252 | 252 | ||
253 | if (*mic_hdrlen != 6 + ctxelen) { | 253 | if (*mic_hdrlen != 6 + ctxelen) { |
254 | dprintk("RPC: BAD SPKM_ MIC_TOK header len %d: we only support default int-alg (should be absent) and do not support snd-seq\n", *mic_hdrlen); | 254 | dprintk("RPC: BAD SPKM_ MIC_TOK header len %d: we only " |
255 | "support default int-alg (should be absent) " | ||
256 | "and do not support snd-seq\n", *mic_hdrlen); | ||
255 | goto out; | 257 | goto out; |
256 | } | 258 | } |
257 | /* checksum */ | 259 | /* checksum */ |
diff --git a/net/sunrpc/auth_gss/gss_spkm3_unseal.c b/net/sunrpc/auth_gss/gss_spkm3_unseal.c index 35a1b34c4a1d..cc21ee860bb6 100644 --- a/net/sunrpc/auth_gss/gss_spkm3_unseal.c +++ b/net/sunrpc/auth_gss/gss_spkm3_unseal.c | |||
@@ -72,7 +72,7 @@ spkm3_read_token(struct spkm3_ctx *ctx, | |||
72 | /* decode the token */ | 72 | /* decode the token */ |
73 | 73 | ||
74 | if (toktype != SPKM_MIC_TOK) { | 74 | if (toktype != SPKM_MIC_TOK) { |
75 | dprintk("RPC: BAD SPKM3 token type: %d\n", toktype); | 75 | dprintk("RPC: BAD SPKM3 token type: %d\n", toktype); |
76 | goto out; | 76 | goto out; |
77 | } | 77 | } |
78 | 78 | ||
@@ -80,7 +80,7 @@ spkm3_read_token(struct spkm3_ctx *ctx, | |||
80 | goto out; | 80 | goto out; |
81 | 81 | ||
82 | if (*cksum++ != 0x03) { | 82 | if (*cksum++ != 0x03) { |
83 | dprintk("RPC: spkm3_read_token BAD checksum type\n"); | 83 | dprintk("RPC: spkm3_read_token BAD checksum type\n"); |
84 | goto out; | 84 | goto out; |
85 | } | 85 | } |
86 | md5elen = *cksum++; | 86 | md5elen = *cksum++; |
@@ -97,7 +97,8 @@ spkm3_read_token(struct spkm3_ctx *ctx, | |||
97 | */ | 97 | */ |
98 | ret = GSS_S_DEFECTIVE_TOKEN; | 98 | ret = GSS_S_DEFECTIVE_TOKEN; |
99 | if (!g_OID_equal(&ctx->intg_alg, &hmac_md5_oid)) { | 99 | if (!g_OID_equal(&ctx->intg_alg, &hmac_md5_oid)) { |
100 | dprintk("RPC: gss_spkm3_seal: unsupported I-ALG algorithm\n"); | 100 | dprintk("RPC: gss_spkm3_seal: unsupported I-ALG " |
101 | "algorithm\n"); | ||
101 | goto out; | 102 | goto out; |
102 | } | 103 | } |
103 | 104 | ||
@@ -113,7 +114,7 @@ spkm3_read_token(struct spkm3_ctx *ctx, | |||
113 | ret = GSS_S_BAD_SIG; | 114 | ret = GSS_S_BAD_SIG; |
114 | code = memcmp(md5cksum.data, wire_cksum.data, wire_cksum.len); | 115 | code = memcmp(md5cksum.data, wire_cksum.data, wire_cksum.len); |
115 | if (code) { | 116 | if (code) { |
116 | dprintk("RPC: bad MIC checksum\n"); | 117 | dprintk("RPC: bad MIC checksum\n"); |
117 | goto out; | 118 | goto out; |
118 | } | 119 | } |
119 | 120 | ||
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c index 8fde38ecaf21..db298b501c81 100644 --- a/net/sunrpc/auth_gss/svcauth_gss.c +++ b/net/sunrpc/auth_gss/svcauth_gss.c | |||
@@ -669,14 +669,14 @@ gss_verify_header(struct svc_rqst *rqstp, struct rsc *rsci, | |||
669 | } | 669 | } |
670 | 670 | ||
671 | if (gc->gc_seq > MAXSEQ) { | 671 | if (gc->gc_seq > MAXSEQ) { |
672 | dprintk("RPC: svcauth_gss: discarding request with large sequence number %d\n", | 672 | dprintk("RPC: svcauth_gss: discarding request with " |
673 | gc->gc_seq); | 673 | "large sequence number %d\n", gc->gc_seq); |
674 | *authp = rpcsec_gsserr_ctxproblem; | 674 | *authp = rpcsec_gsserr_ctxproblem; |
675 | return SVC_DENIED; | 675 | return SVC_DENIED; |
676 | } | 676 | } |
677 | if (!gss_check_seq_num(rsci, gc->gc_seq)) { | 677 | if (!gss_check_seq_num(rsci, gc->gc_seq)) { |
678 | dprintk("RPC: svcauth_gss: discarding request with old sequence number %d\n", | 678 | dprintk("RPC: svcauth_gss: discarding request with " |
679 | gc->gc_seq); | 679 | "old sequence number %d\n", gc->gc_seq); |
680 | return SVC_DROP; | 680 | return SVC_DROP; |
681 | } | 681 | } |
682 | return SVC_OK; | 682 | return SVC_OK; |
@@ -958,7 +958,8 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp) | |||
958 | __be32 *reject_stat = resv->iov_base + resv->iov_len; | 958 | __be32 *reject_stat = resv->iov_base + resv->iov_len; |
959 | int ret; | 959 | int ret; |
960 | 960 | ||
961 | dprintk("RPC: svcauth_gss: argv->iov_len = %zd\n",argv->iov_len); | 961 | dprintk("RPC: svcauth_gss: argv->iov_len = %zd\n", |
962 | argv->iov_len); | ||
962 | 963 | ||
963 | *authp = rpc_autherr_badcred; | 964 | *authp = rpc_autherr_badcred; |
964 | if (!svcdata) | 965 | if (!svcdata) |
diff --git a/net/sunrpc/auth_unix.c b/net/sunrpc/auth_unix.c index f7f990c9afe2..4e7733aee36e 100644 --- a/net/sunrpc/auth_unix.c +++ b/net/sunrpc/auth_unix.c | |||
@@ -39,7 +39,8 @@ static struct rpc_credops unix_credops; | |||
39 | static struct rpc_auth * | 39 | static struct rpc_auth * |
40 | unx_create(struct rpc_clnt *clnt, rpc_authflavor_t flavor) | 40 | unx_create(struct rpc_clnt *clnt, rpc_authflavor_t flavor) |
41 | { | 41 | { |
42 | dprintk("RPC: creating UNIX authenticator for client %p\n", clnt); | 42 | dprintk("RPC: creating UNIX authenticator for client %p\n", |
43 | clnt); | ||
43 | if (atomic_inc_return(&unix_auth.au_count) == 0) | 44 | if (atomic_inc_return(&unix_auth.au_count) == 0) |
44 | unix_cred_cache.nextgc = jiffies + (unix_cred_cache.expire >> 1); | 45 | unix_cred_cache.nextgc = jiffies + (unix_cred_cache.expire >> 1); |
45 | return &unix_auth; | 46 | return &unix_auth; |
@@ -48,7 +49,7 @@ unx_create(struct rpc_clnt *clnt, rpc_authflavor_t flavor) | |||
48 | static void | 49 | static void |
49 | unx_destroy(struct rpc_auth *auth) | 50 | unx_destroy(struct rpc_auth *auth) |
50 | { | 51 | { |
51 | dprintk("RPC: destroying UNIX authenticator %p\n", auth); | 52 | dprintk("RPC: destroying UNIX authenticator %p\n", auth); |
52 | rpcauth_free_credcache(auth); | 53 | rpcauth_free_credcache(auth); |
53 | } | 54 | } |
54 | 55 | ||
@@ -67,8 +68,8 @@ unx_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags) | |||
67 | struct unx_cred *cred; | 68 | struct unx_cred *cred; |
68 | int i; | 69 | int i; |
69 | 70 | ||
70 | dprintk("RPC: allocating UNIX cred for uid %d gid %d\n", | 71 | dprintk("RPC: allocating UNIX cred for uid %d gid %d\n", |
71 | acred->uid, acred->gid); | 72 | acred->uid, acred->gid); |
72 | 73 | ||
73 | if (!(cred = kmalloc(sizeof(*cred), GFP_KERNEL))) | 74 | if (!(cred = kmalloc(sizeof(*cred), GFP_KERNEL))) |
74 | return ERR_PTR(-ENOMEM); | 75 | return ERR_PTR(-ENOMEM); |
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c index 8612044b9189..f02f24ae9468 100644 --- a/net/sunrpc/cache.c +++ b/net/sunrpc/cache.c | |||
@@ -215,7 +215,8 @@ int cache_check(struct cache_detail *detail, | |||
215 | if (rv == -EAGAIN) | 215 | if (rv == -EAGAIN) |
216 | rv = -ENOENT; | 216 | rv = -ENOENT; |
217 | } else if (rv == -EAGAIN || age > refresh_age/2) { | 217 | } else if (rv == -EAGAIN || age > refresh_age/2) { |
218 | dprintk("Want update, refage=%ld, age=%ld\n", refresh_age, age); | 218 | dprintk("RPC: Want update, refage=%ld, age=%ld\n", |
219 | refresh_age, age); | ||
219 | if (!test_and_set_bit(CACHE_PENDING, &h->flags)) { | 220 | if (!test_and_set_bit(CACHE_PENDING, &h->flags)) { |
220 | switch (cache_make_upcall(detail, h)) { | 221 | switch (cache_make_upcall(detail, h)) { |
221 | case -EINVAL: | 222 | case -EINVAL: |
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index c95a61736d1c..6d7221fe990a 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c | |||
@@ -42,6 +42,10 @@ | |||
42 | # define RPCDBG_FACILITY RPCDBG_CALL | 42 | # define RPCDBG_FACILITY RPCDBG_CALL |
43 | #endif | 43 | #endif |
44 | 44 | ||
45 | #define dprint_status(t) \ | ||
46 | dprintk("RPC: %5u %s (status %d)\n", t->tk_pid, \ | ||
47 | __FUNCTION__, t->tk_status) | ||
48 | |||
45 | static DECLARE_WAIT_QUEUE_HEAD(destroy_wait); | 49 | static DECLARE_WAIT_QUEUE_HEAD(destroy_wait); |
46 | 50 | ||
47 | 51 | ||
@@ -106,8 +110,8 @@ static struct rpc_clnt * rpc_new_client(struct rpc_xprt *xprt, char *servname, s | |||
106 | int err; | 110 | int err; |
107 | int len; | 111 | int len; |
108 | 112 | ||
109 | dprintk("RPC: creating %s client for %s (xprt %p)\n", | 113 | dprintk("RPC: creating %s client for %s (xprt %p)\n", |
110 | program->name, servname, xprt); | 114 | program->name, servname, xprt); |
111 | 115 | ||
112 | err = -EINVAL; | 116 | err = -EINVAL; |
113 | if (!xprt) | 117 | if (!xprt) |
@@ -220,7 +224,7 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args) | |||
220 | xprt->resvport = 0; | 224 | xprt->resvport = 0; |
221 | 225 | ||
222 | dprintk("RPC: creating %s client for %s (xprt %p)\n", | 226 | dprintk("RPC: creating %s client for %s (xprt %p)\n", |
223 | args->program->name, args->servername, xprt); | 227 | args->program->name, args->servername, xprt); |
224 | 228 | ||
225 | clnt = rpc_new_client(xprt, args->servername, args->program, | 229 | clnt = rpc_new_client(xprt, args->servername, args->program, |
226 | args->version, args->authflavor); | 230 | args->version, args->authflavor); |
@@ -245,6 +249,8 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args) | |||
245 | clnt->cl_autobind = 1; | 249 | clnt->cl_autobind = 1; |
246 | if (args->flags & RPC_CLNT_CREATE_ONESHOT) | 250 | if (args->flags & RPC_CLNT_CREATE_ONESHOT) |
247 | clnt->cl_oneshot = 1; | 251 | clnt->cl_oneshot = 1; |
252 | if (args->flags & RPC_CLNT_CREATE_DISCRTRY) | ||
253 | clnt->cl_discrtry = 1; | ||
248 | 254 | ||
249 | return clnt; | 255 | return clnt; |
250 | } | 256 | } |
@@ -288,7 +294,7 @@ out_no_path: | |||
288 | out_no_stats: | 294 | out_no_stats: |
289 | kfree(new); | 295 | kfree(new); |
290 | out_no_clnt: | 296 | out_no_clnt: |
291 | dprintk("RPC: %s returned error %d\n", __FUNCTION__, err); | 297 | dprintk("RPC: %s: returned error %d\n", __FUNCTION__, err); |
292 | return ERR_PTR(err); | 298 | return ERR_PTR(err); |
293 | } | 299 | } |
294 | 300 | ||
@@ -301,7 +307,7 @@ out_no_clnt: | |||
301 | int | 307 | int |
302 | rpc_shutdown_client(struct rpc_clnt *clnt) | 308 | rpc_shutdown_client(struct rpc_clnt *clnt) |
303 | { | 309 | { |
304 | dprintk("RPC: shutting down %s client for %s, tasks=%d\n", | 310 | dprintk("RPC: shutting down %s client for %s, tasks=%d\n", |
305 | clnt->cl_protname, clnt->cl_server, | 311 | clnt->cl_protname, clnt->cl_server, |
306 | atomic_read(&clnt->cl_users)); | 312 | atomic_read(&clnt->cl_users)); |
307 | 313 | ||
@@ -336,7 +342,7 @@ rpc_destroy_client(struct rpc_clnt *clnt) | |||
336 | return 1; | 342 | return 1; |
337 | BUG_ON(atomic_read(&clnt->cl_users) != 0); | 343 | BUG_ON(atomic_read(&clnt->cl_users) != 0); |
338 | 344 | ||
339 | dprintk("RPC: destroying %s client for %s\n", | 345 | dprintk("RPC: destroying %s client for %s\n", |
340 | clnt->cl_protname, clnt->cl_server); | 346 | clnt->cl_protname, clnt->cl_server); |
341 | if (clnt->cl_auth) { | 347 | if (clnt->cl_auth) { |
342 | rpcauth_destroy(clnt->cl_auth); | 348 | rpcauth_destroy(clnt->cl_auth); |
@@ -366,8 +372,8 @@ out_free: | |||
366 | void | 372 | void |
367 | rpc_release_client(struct rpc_clnt *clnt) | 373 | rpc_release_client(struct rpc_clnt *clnt) |
368 | { | 374 | { |
369 | dprintk("RPC: rpc_release_client(%p, %d)\n", | 375 | dprintk("RPC: rpc_release_client(%p, %d)\n", |
370 | clnt, atomic_read(&clnt->cl_users)); | 376 | clnt, atomic_read(&clnt->cl_users)); |
371 | 377 | ||
372 | if (!atomic_dec_and_test(&clnt->cl_users)) | 378 | if (!atomic_dec_and_test(&clnt->cl_users)) |
373 | return; | 379 | return; |
@@ -486,17 +492,13 @@ int rpc_call_sync(struct rpc_clnt *clnt, struct rpc_message *msg, int flags) | |||
486 | /* Mask signals on RPC calls _and_ GSS_AUTH upcalls */ | 492 | /* Mask signals on RPC calls _and_ GSS_AUTH upcalls */ |
487 | rpc_task_sigmask(task, &oldset); | 493 | rpc_task_sigmask(task, &oldset); |
488 | 494 | ||
489 | rpc_call_setup(task, msg, 0); | ||
490 | |||
491 | /* Set up the call info struct and execute the task */ | 495 | /* Set up the call info struct and execute the task */ |
496 | rpc_call_setup(task, msg, 0); | ||
497 | if (task->tk_status == 0) { | ||
498 | atomic_inc(&task->tk_count); | ||
499 | rpc_execute(task); | ||
500 | } | ||
492 | status = task->tk_status; | 501 | status = task->tk_status; |
493 | if (status != 0) | ||
494 | goto out; | ||
495 | atomic_inc(&task->tk_count); | ||
496 | status = rpc_execute(task); | ||
497 | if (status == 0) | ||
498 | status = task->tk_status; | ||
499 | out: | ||
500 | rpc_put_task(task); | 502 | rpc_put_task(task); |
501 | rpc_restore_sigmask(&oldset); | 503 | rpc_restore_sigmask(&oldset); |
502 | return status; | 504 | return status; |
@@ -658,9 +660,10 @@ call_start(struct rpc_task *task) | |||
658 | { | 660 | { |
659 | struct rpc_clnt *clnt = task->tk_client; | 661 | struct rpc_clnt *clnt = task->tk_client; |
660 | 662 | ||
661 | dprintk("RPC: %4d call_start %s%d proc %d (%s)\n", task->tk_pid, | 663 | dprintk("RPC: %5u call_start %s%d proc %d (%s)\n", task->tk_pid, |
662 | clnt->cl_protname, clnt->cl_vers, task->tk_msg.rpc_proc->p_proc, | 664 | clnt->cl_protname, clnt->cl_vers, |
663 | (RPC_IS_ASYNC(task) ? "async" : "sync")); | 665 | task->tk_msg.rpc_proc->p_proc, |
666 | (RPC_IS_ASYNC(task) ? "async" : "sync")); | ||
664 | 667 | ||
665 | /* Increment call count */ | 668 | /* Increment call count */ |
666 | task->tk_msg.rpc_proc->p_count++; | 669 | task->tk_msg.rpc_proc->p_count++; |
@@ -674,7 +677,7 @@ call_start(struct rpc_task *task) | |||
674 | static void | 677 | static void |
675 | call_reserve(struct rpc_task *task) | 678 | call_reserve(struct rpc_task *task) |
676 | { | 679 | { |
677 | dprintk("RPC: %4d call_reserve\n", task->tk_pid); | 680 | dprint_status(task); |
678 | 681 | ||
679 | if (!rpcauth_uptodatecred(task)) { | 682 | if (!rpcauth_uptodatecred(task)) { |
680 | task->tk_action = call_refresh; | 683 | task->tk_action = call_refresh; |
@@ -694,8 +697,7 @@ call_reserveresult(struct rpc_task *task) | |||
694 | { | 697 | { |
695 | int status = task->tk_status; | 698 | int status = task->tk_status; |
696 | 699 | ||
697 | dprintk("RPC: %4d call_reserveresult (status %d)\n", | 700 | dprint_status(task); |
698 | task->tk_pid, task->tk_status); | ||
699 | 701 | ||
700 | /* | 702 | /* |
701 | * After a call to xprt_reserve(), we must have either | 703 | * After a call to xprt_reserve(), we must have either |
@@ -749,8 +751,8 @@ call_allocate(struct rpc_task *task) | |||
749 | struct rpc_xprt *xprt = task->tk_xprt; | 751 | struct rpc_xprt *xprt = task->tk_xprt; |
750 | unsigned int bufsiz; | 752 | unsigned int bufsiz; |
751 | 753 | ||
752 | dprintk("RPC: %4d call_allocate (status %d)\n", | 754 | dprint_status(task); |
753 | task->tk_pid, task->tk_status); | 755 | |
754 | task->tk_action = call_bind; | 756 | task->tk_action = call_bind; |
755 | if (req->rq_buffer) | 757 | if (req->rq_buffer) |
756 | return; | 758 | return; |
@@ -761,7 +763,8 @@ call_allocate(struct rpc_task *task) | |||
761 | 763 | ||
762 | if (xprt->ops->buf_alloc(task, bufsiz << 1) != NULL) | 764 | if (xprt->ops->buf_alloc(task, bufsiz << 1) != NULL) |
763 | return; | 765 | return; |
764 | printk(KERN_INFO "RPC: buffer allocation failed for task %p\n", task); | 766 | |
767 | dprintk("RPC: %5u rpc_buffer allocation failed\n", task->tk_pid); | ||
765 | 768 | ||
766 | if (RPC_IS_ASYNC(task) || !signalled()) { | 769 | if (RPC_IS_ASYNC(task) || !signalled()) { |
767 | xprt_release(task); | 770 | xprt_release(task); |
@@ -798,8 +801,7 @@ call_encode(struct rpc_task *task) | |||
798 | kxdrproc_t encode; | 801 | kxdrproc_t encode; |
799 | __be32 *p; | 802 | __be32 *p; |
800 | 803 | ||
801 | dprintk("RPC: %4d call_encode (status %d)\n", | 804 | dprint_status(task); |
802 | task->tk_pid, task->tk_status); | ||
803 | 805 | ||
804 | /* Default buffer setup */ | 806 | /* Default buffer setup */ |
805 | bufsiz = req->rq_bufsize >> 1; | 807 | bufsiz = req->rq_bufsize >> 1; |
@@ -845,8 +847,7 @@ call_bind(struct rpc_task *task) | |||
845 | { | 847 | { |
846 | struct rpc_xprt *xprt = task->tk_xprt; | 848 | struct rpc_xprt *xprt = task->tk_xprt; |
847 | 849 | ||
848 | dprintk("RPC: %4d call_bind (status %d)\n", | 850 | dprint_status(task); |
849 | task->tk_pid, task->tk_status); | ||
850 | 851 | ||
851 | task->tk_action = call_connect; | 852 | task->tk_action = call_connect; |
852 | if (!xprt_bound(xprt)) { | 853 | if (!xprt_bound(xprt)) { |
@@ -865,8 +866,7 @@ call_bind_status(struct rpc_task *task) | |||
865 | int status = -EACCES; | 866 | int status = -EACCES; |
866 | 867 | ||
867 | if (task->tk_status >= 0) { | 868 | if (task->tk_status >= 0) { |
868 | dprintk("RPC: %4d call_bind_status (status %d)\n", | 869 | dprint_status(task); |
869 | task->tk_pid, task->tk_status); | ||
870 | task->tk_status = 0; | 870 | task->tk_status = 0; |
871 | task->tk_action = call_connect; | 871 | task->tk_action = call_connect; |
872 | return; | 872 | return; |
@@ -874,24 +874,24 @@ call_bind_status(struct rpc_task *task) | |||
874 | 874 | ||
875 | switch (task->tk_status) { | 875 | switch (task->tk_status) { |
876 | case -EACCES: | 876 | case -EACCES: |
877 | dprintk("RPC: %4d remote rpcbind: RPC program/version unavailable\n", | 877 | dprintk("RPC: %5u remote rpcbind: RPC program/version " |
878 | task->tk_pid); | 878 | "unavailable\n", task->tk_pid); |
879 | rpc_delay(task, 3*HZ); | 879 | rpc_delay(task, 3*HZ); |
880 | goto retry_timeout; | 880 | goto retry_timeout; |
881 | case -ETIMEDOUT: | 881 | case -ETIMEDOUT: |
882 | dprintk("RPC: %4d rpcbind request timed out\n", | 882 | dprintk("RPC: %5u rpcbind request timed out\n", |
883 | task->tk_pid); | 883 | task->tk_pid); |
884 | goto retry_timeout; | 884 | goto retry_timeout; |
885 | case -EPFNOSUPPORT: | 885 | case -EPFNOSUPPORT: |
886 | dprintk("RPC: %4d remote rpcbind service unavailable\n", | 886 | dprintk("RPC: %5u remote rpcbind service unavailable\n", |
887 | task->tk_pid); | 887 | task->tk_pid); |
888 | break; | 888 | break; |
889 | case -EPROTONOSUPPORT: | 889 | case -EPROTONOSUPPORT: |
890 | dprintk("RPC: %4d remote rpcbind version 2 unavailable\n", | 890 | dprintk("RPC: %5u remote rpcbind version 2 unavailable\n", |
891 | task->tk_pid); | 891 | task->tk_pid); |
892 | break; | 892 | break; |
893 | default: | 893 | default: |
894 | dprintk("RPC: %4d unrecognized rpcbind error (%d)\n", | 894 | dprintk("RPC: %5u unrecognized rpcbind error (%d)\n", |
895 | task->tk_pid, -task->tk_status); | 895 | task->tk_pid, -task->tk_status); |
896 | status = -EIO; | 896 | status = -EIO; |
897 | } | 897 | } |
@@ -911,7 +911,7 @@ call_connect(struct rpc_task *task) | |||
911 | { | 911 | { |
912 | struct rpc_xprt *xprt = task->tk_xprt; | 912 | struct rpc_xprt *xprt = task->tk_xprt; |
913 | 913 | ||
914 | dprintk("RPC: %4d call_connect xprt %p %s connected\n", | 914 | dprintk("RPC: %5u call_connect xprt %p %s connected\n", |
915 | task->tk_pid, xprt, | 915 | task->tk_pid, xprt, |
916 | (xprt_connected(xprt) ? "is" : "is not")); | 916 | (xprt_connected(xprt) ? "is" : "is not")); |
917 | 917 | ||
@@ -933,8 +933,7 @@ call_connect_status(struct rpc_task *task) | |||
933 | struct rpc_clnt *clnt = task->tk_client; | 933 | struct rpc_clnt *clnt = task->tk_client; |
934 | int status = task->tk_status; | 934 | int status = task->tk_status; |
935 | 935 | ||
936 | dprintk("RPC: %5u call_connect_status (status %d)\n", | 936 | dprint_status(task); |
937 | task->tk_pid, task->tk_status); | ||
938 | 937 | ||
939 | task->tk_status = 0; | 938 | task->tk_status = 0; |
940 | if (status >= 0) { | 939 | if (status >= 0) { |
@@ -966,8 +965,7 @@ call_connect_status(struct rpc_task *task) | |||
966 | static void | 965 | static void |
967 | call_transmit(struct rpc_task *task) | 966 | call_transmit(struct rpc_task *task) |
968 | { | 967 | { |
969 | dprintk("RPC: %4d call_transmit (status %d)\n", | 968 | dprint_status(task); |
970 | task->tk_pid, task->tk_status); | ||
971 | 969 | ||
972 | task->tk_action = call_status; | 970 | task->tk_action = call_status; |
973 | if (task->tk_status < 0) | 971 | if (task->tk_status < 0) |
@@ -1028,8 +1026,7 @@ call_status(struct rpc_task *task) | |||
1028 | if (req->rq_received > 0 && !req->rq_bytes_sent) | 1026 | if (req->rq_received > 0 && !req->rq_bytes_sent) |
1029 | task->tk_status = req->rq_received; | 1027 | task->tk_status = req->rq_received; |
1030 | 1028 | ||
1031 | dprintk("RPC: %4d call_status (status %d)\n", | 1029 | dprint_status(task); |
1032 | task->tk_pid, task->tk_status); | ||
1033 | 1030 | ||
1034 | status = task->tk_status; | 1031 | status = task->tk_status; |
1035 | if (status >= 0) { | 1032 | if (status >= 0) { |
@@ -1080,11 +1077,11 @@ call_timeout(struct rpc_task *task) | |||
1080 | struct rpc_clnt *clnt = task->tk_client; | 1077 | struct rpc_clnt *clnt = task->tk_client; |
1081 | 1078 | ||
1082 | if (xprt_adjust_timeout(task->tk_rqstp) == 0) { | 1079 | if (xprt_adjust_timeout(task->tk_rqstp) == 0) { |
1083 | dprintk("RPC: %4d call_timeout (minor)\n", task->tk_pid); | 1080 | dprintk("RPC: %5u call_timeout (minor)\n", task->tk_pid); |
1084 | goto retry; | 1081 | goto retry; |
1085 | } | 1082 | } |
1086 | 1083 | ||
1087 | dprintk("RPC: %4d call_timeout (major)\n", task->tk_pid); | 1084 | dprintk("RPC: %5u call_timeout (major)\n", task->tk_pid); |
1088 | task->tk_timeouts++; | 1085 | task->tk_timeouts++; |
1089 | 1086 | ||
1090 | if (RPC_IS_SOFT(task)) { | 1087 | if (RPC_IS_SOFT(task)) { |
@@ -1118,8 +1115,8 @@ call_decode(struct rpc_task *task) | |||
1118 | kxdrproc_t decode = task->tk_msg.rpc_proc->p_decode; | 1115 | kxdrproc_t decode = task->tk_msg.rpc_proc->p_decode; |
1119 | __be32 *p; | 1116 | __be32 *p; |
1120 | 1117 | ||
1121 | dprintk("RPC: %4d call_decode (status %d)\n", | 1118 | dprintk("RPC: %5u call_decode (status %d)\n", |
1122 | task->tk_pid, task->tk_status); | 1119 | task->tk_pid, task->tk_status); |
1123 | 1120 | ||
1124 | if (task->tk_flags & RPC_CALL_MAJORSEEN) { | 1121 | if (task->tk_flags & RPC_CALL_MAJORSEEN) { |
1125 | printk(KERN_NOTICE "%s: server %s OK\n", | 1122 | printk(KERN_NOTICE "%s: server %s OK\n", |
@@ -1133,8 +1130,8 @@ call_decode(struct rpc_task *task) | |||
1133 | clnt->cl_stats->rpcretrans++; | 1130 | clnt->cl_stats->rpcretrans++; |
1134 | goto out_retry; | 1131 | goto out_retry; |
1135 | } | 1132 | } |
1136 | dprintk("%s: too small RPC reply size (%d bytes)\n", | 1133 | dprintk("RPC: %s: too small RPC reply size (%d bytes)\n", |
1137 | clnt->cl_protname, task->tk_status); | 1134 | clnt->cl_protname, task->tk_status); |
1138 | task->tk_action = call_timeout; | 1135 | task->tk_action = call_timeout; |
1139 | goto out_retry; | 1136 | goto out_retry; |
1140 | } | 1137 | } |
@@ -1166,8 +1163,8 @@ call_decode(struct rpc_task *task) | |||
1166 | task->tk_msg.rpc_resp); | 1163 | task->tk_msg.rpc_resp); |
1167 | unlock_kernel(); | 1164 | unlock_kernel(); |
1168 | } | 1165 | } |
1169 | dprintk("RPC: %4d call_decode result %d\n", task->tk_pid, | 1166 | dprintk("RPC: %5u call_decode result %d\n", task->tk_pid, |
1170 | task->tk_status); | 1167 | task->tk_status); |
1171 | return; | 1168 | return; |
1172 | out_retry: | 1169 | out_retry: |
1173 | req->rq_received = req->rq_private_buf.len = 0; | 1170 | req->rq_received = req->rq_private_buf.len = 0; |
@@ -1180,7 +1177,7 @@ out_retry: | |||
1180 | static void | 1177 | static void |
1181 | call_refresh(struct rpc_task *task) | 1178 | call_refresh(struct rpc_task *task) |
1182 | { | 1179 | { |
1183 | dprintk("RPC: %4d call_refresh\n", task->tk_pid); | 1180 | dprint_status(task); |
1184 | 1181 | ||
1185 | xprt_release(task); /* Must do to obtain new XID */ | 1182 | xprt_release(task); /* Must do to obtain new XID */ |
1186 | task->tk_action = call_refreshresult; | 1183 | task->tk_action = call_refreshresult; |
@@ -1196,8 +1193,8 @@ static void | |||
1196 | call_refreshresult(struct rpc_task *task) | 1193 | call_refreshresult(struct rpc_task *task) |
1197 | { | 1194 | { |
1198 | int status = task->tk_status; | 1195 | int status = task->tk_status; |
1199 | dprintk("RPC: %4d call_refreshresult (status %d)\n", | 1196 | |
1200 | task->tk_pid, task->tk_status); | 1197 | dprint_status(task); |
1201 | 1198 | ||
1202 | task->tk_status = 0; | 1199 | task->tk_status = 0; |
1203 | task->tk_action = call_reserve; | 1200 | task->tk_action = call_reserve; |
@@ -1275,11 +1272,15 @@ call_verify(struct rpc_task *task) | |||
1275 | case RPC_AUTH_ERROR: | 1272 | case RPC_AUTH_ERROR: |
1276 | break; | 1273 | break; |
1277 | case RPC_MISMATCH: | 1274 | case RPC_MISMATCH: |
1278 | dprintk("%s: RPC call version mismatch!\n", __FUNCTION__); | 1275 | dprintk("RPC: %5u %s: RPC call version " |
1276 | "mismatch!\n", | ||
1277 | task->tk_pid, __FUNCTION__); | ||
1279 | error = -EPROTONOSUPPORT; | 1278 | error = -EPROTONOSUPPORT; |
1280 | goto out_err; | 1279 | goto out_err; |
1281 | default: | 1280 | default: |
1282 | dprintk("%s: RPC call rejected, unknown error: %x\n", __FUNCTION__, n); | 1281 | dprintk("RPC: %5u %s: RPC call rejected, " |
1282 | "unknown error: %x\n", | ||
1283 | task->tk_pid, __FUNCTION__, n); | ||
1283 | goto out_eio; | 1284 | goto out_eio; |
1284 | } | 1285 | } |
1285 | if (--len < 0) | 1286 | if (--len < 0) |
@@ -1292,8 +1293,8 @@ call_verify(struct rpc_task *task) | |||
1292 | if (!task->tk_cred_retry) | 1293 | if (!task->tk_cred_retry) |
1293 | break; | 1294 | break; |
1294 | task->tk_cred_retry--; | 1295 | task->tk_cred_retry--; |
1295 | dprintk("RPC: %4d call_verify: retry stale creds\n", | 1296 | dprintk("RPC: %5u %s: retry stale creds\n", |
1296 | task->tk_pid); | 1297 | task->tk_pid, __FUNCTION__); |
1297 | rpcauth_invalcred(task); | 1298 | rpcauth_invalcred(task); |
1298 | task->tk_action = call_refresh; | 1299 | task->tk_action = call_refresh; |
1299 | goto out_retry; | 1300 | goto out_retry; |
@@ -1303,8 +1304,8 @@ call_verify(struct rpc_task *task) | |||
1303 | if (!task->tk_garb_retry) | 1304 | if (!task->tk_garb_retry) |
1304 | break; | 1305 | break; |
1305 | task->tk_garb_retry--; | 1306 | task->tk_garb_retry--; |
1306 | dprintk("RPC: %4d call_verify: retry garbled creds\n", | 1307 | dprintk("RPC: %5u %s: retry garbled creds\n", |
1307 | task->tk_pid); | 1308 | task->tk_pid, __FUNCTION__); |
1308 | task->tk_action = call_bind; | 1309 | task->tk_action = call_bind; |
1309 | goto out_retry; | 1310 | goto out_retry; |
1310 | case RPC_AUTH_TOOWEAK: | 1311 | case RPC_AUTH_TOOWEAK: |
@@ -1315,8 +1316,8 @@ call_verify(struct rpc_task *task) | |||
1315 | printk(KERN_WARNING "call_verify: unknown auth error: %x\n", n); | 1316 | printk(KERN_WARNING "call_verify: unknown auth error: %x\n", n); |
1316 | error = -EIO; | 1317 | error = -EIO; |
1317 | } | 1318 | } |
1318 | dprintk("RPC: %4d call_verify: call rejected %d\n", | 1319 | dprintk("RPC: %5u %s: call rejected %d\n", |
1319 | task->tk_pid, n); | 1320 | task->tk_pid, __FUNCTION__, n); |
1320 | goto out_err; | 1321 | goto out_err; |
1321 | } | 1322 | } |
1322 | if (!(p = rpcauth_checkverf(task, p))) { | 1323 | if (!(p = rpcauth_checkverf(task, p))) { |
@@ -1330,20 +1331,24 @@ call_verify(struct rpc_task *task) | |||
1330 | case RPC_SUCCESS: | 1331 | case RPC_SUCCESS: |
1331 | return p; | 1332 | return p; |
1332 | case RPC_PROG_UNAVAIL: | 1333 | case RPC_PROG_UNAVAIL: |
1333 | dprintk("RPC: call_verify: program %u is unsupported by server %s\n", | 1334 | dprintk("RPC: %5u %s: program %u is unsupported by server %s\n", |
1335 | task->tk_pid, __FUNCTION__, | ||
1334 | (unsigned int)task->tk_client->cl_prog, | 1336 | (unsigned int)task->tk_client->cl_prog, |
1335 | task->tk_client->cl_server); | 1337 | task->tk_client->cl_server); |
1336 | error = -EPFNOSUPPORT; | 1338 | error = -EPFNOSUPPORT; |
1337 | goto out_err; | 1339 | goto out_err; |
1338 | case RPC_PROG_MISMATCH: | 1340 | case RPC_PROG_MISMATCH: |
1339 | dprintk("RPC: call_verify: program %u, version %u unsupported by server %s\n", | 1341 | dprintk("RPC: %5u %s: program %u, version %u unsupported by " |
1342 | "server %s\n", task->tk_pid, __FUNCTION__, | ||
1340 | (unsigned int)task->tk_client->cl_prog, | 1343 | (unsigned int)task->tk_client->cl_prog, |
1341 | (unsigned int)task->tk_client->cl_vers, | 1344 | (unsigned int)task->tk_client->cl_vers, |
1342 | task->tk_client->cl_server); | 1345 | task->tk_client->cl_server); |
1343 | error = -EPROTONOSUPPORT; | 1346 | error = -EPROTONOSUPPORT; |
1344 | goto out_err; | 1347 | goto out_err; |
1345 | case RPC_PROC_UNAVAIL: | 1348 | case RPC_PROC_UNAVAIL: |
1346 | dprintk("RPC: call_verify: proc %p unsupported by program %u, version %u on server %s\n", | 1349 | dprintk("RPC: %5u %s: proc %p unsupported by program %u, " |
1350 | "version %u on server %s\n", | ||
1351 | task->tk_pid, __FUNCTION__, | ||
1347 | task->tk_msg.rpc_proc, | 1352 | task->tk_msg.rpc_proc, |
1348 | task->tk_client->cl_prog, | 1353 | task->tk_client->cl_prog, |
1349 | task->tk_client->cl_vers, | 1354 | task->tk_client->cl_vers, |
@@ -1351,7 +1356,8 @@ call_verify(struct rpc_task *task) | |||
1351 | error = -EOPNOTSUPP; | 1356 | error = -EOPNOTSUPP; |
1352 | goto out_err; | 1357 | goto out_err; |
1353 | case RPC_GARBAGE_ARGS: | 1358 | case RPC_GARBAGE_ARGS: |
1354 | dprintk("RPC: %4d %s: server saw garbage\n", task->tk_pid, __FUNCTION__); | 1359 | dprintk("RPC: %5u %s: server saw garbage\n", |
1360 | task->tk_pid, __FUNCTION__); | ||
1355 | break; /* retry */ | 1361 | break; /* retry */ |
1356 | default: | 1362 | default: |
1357 | printk(KERN_WARNING "call_verify: server accept status: %x\n", n); | 1363 | printk(KERN_WARNING "call_verify: server accept status: %x\n", n); |
@@ -1362,7 +1368,8 @@ out_garbage: | |||
1362 | task->tk_client->cl_stats->rpcgarbage++; | 1368 | task->tk_client->cl_stats->rpcgarbage++; |
1363 | if (task->tk_garb_retry) { | 1369 | if (task->tk_garb_retry) { |
1364 | task->tk_garb_retry--; | 1370 | task->tk_garb_retry--; |
1365 | dprintk("RPC %s: retrying %4d\n", __FUNCTION__, task->tk_pid); | 1371 | dprintk("RPC: %5u %s: retrying\n", |
1372 | task->tk_pid, __FUNCTION__); | ||
1366 | task->tk_action = call_bind; | 1373 | task->tk_action = call_bind; |
1367 | out_retry: | 1374 | out_retry: |
1368 | return ERR_PTR(-EAGAIN); | 1375 | return ERR_PTR(-EAGAIN); |
diff --git a/net/sunrpc/pmap_clnt.c b/net/sunrpc/pmap_clnt.c index f4e1357bc186..d9f765344589 100644 --- a/net/sunrpc/pmap_clnt.c +++ b/net/sunrpc/pmap_clnt.c | |||
@@ -62,7 +62,10 @@ static inline void pmap_map_free(struct portmap_args *map) | |||
62 | 62 | ||
63 | static void pmap_map_release(void *data) | 63 | static void pmap_map_release(void *data) |
64 | { | 64 | { |
65 | pmap_map_free(data); | 65 | struct portmap_args *map = data; |
66 | |||
67 | xprt_put(map->pm_xprt); | ||
68 | pmap_map_free(map); | ||
66 | } | 69 | } |
67 | 70 | ||
68 | static const struct rpc_call_ops pmap_getport_ops = { | 71 | static const struct rpc_call_ops pmap_getport_ops = { |
@@ -94,7 +97,7 @@ void rpc_getport(struct rpc_task *task) | |||
94 | struct rpc_task *child; | 97 | struct rpc_task *child; |
95 | int status; | 98 | int status; |
96 | 99 | ||
97 | dprintk("RPC: %4d rpc_getport(%s, %u, %u, %d)\n", | 100 | dprintk("RPC: %5u rpc_getport(%s, %u, %u, %d)\n", |
98 | task->tk_pid, clnt->cl_server, | 101 | task->tk_pid, clnt->cl_server, |
99 | clnt->cl_prog, clnt->cl_vers, xprt->prot); | 102 | clnt->cl_prog, clnt->cl_vers, xprt->prot); |
100 | 103 | ||
@@ -133,7 +136,7 @@ void rpc_getport(struct rpc_task *task) | |||
133 | status = -EIO; | 136 | status = -EIO; |
134 | child = rpc_run_task(pmap_clnt, RPC_TASK_ASYNC, &pmap_getport_ops, map); | 137 | child = rpc_run_task(pmap_clnt, RPC_TASK_ASYNC, &pmap_getport_ops, map); |
135 | if (IS_ERR(child)) | 138 | if (IS_ERR(child)) |
136 | goto bailout; | 139 | goto bailout_nofree; |
137 | rpc_put_task(child); | 140 | rpc_put_task(child); |
138 | 141 | ||
139 | task->tk_xprt->stat.bind_count++; | 142 | task->tk_xprt->stat.bind_count++; |
@@ -175,7 +178,7 @@ int rpc_getport_external(struct sockaddr_in *sin, __u32 prog, __u32 vers, int pr | |||
175 | char hostname[32]; | 178 | char hostname[32]; |
176 | int status; | 179 | int status; |
177 | 180 | ||
178 | dprintk("RPC: rpc_getport_external(%u.%u.%u.%u, %u, %u, %d)\n", | 181 | dprintk("RPC: rpc_getport_external(%u.%u.%u.%u, %u, %u, %d)\n", |
179 | NIPQUAD(sin->sin_addr.s_addr), prog, vers, prot); | 182 | NIPQUAD(sin->sin_addr.s_addr), prog, vers, prot); |
180 | 183 | ||
181 | sprintf(hostname, "%u.%u.%u.%u", NIPQUAD(sin->sin_addr.s_addr)); | 184 | sprintf(hostname, "%u.%u.%u.%u", NIPQUAD(sin->sin_addr.s_addr)); |
@@ -218,11 +221,10 @@ static void pmap_getport_done(struct rpc_task *child, void *data) | |||
218 | status = 0; | 221 | status = 0; |
219 | } | 222 | } |
220 | 223 | ||
221 | dprintk("RPC: %4d pmap_getport_done(status %d, port %u)\n", | 224 | dprintk("RPC: %5u pmap_getport_done(status %d, port %u)\n", |
222 | child->tk_pid, status, map->pm_port); | 225 | child->tk_pid, status, map->pm_port); |
223 | 226 | ||
224 | pmap_wake_portmap_waiters(xprt, status); | 227 | pmap_wake_portmap_waiters(xprt, status); |
225 | xprt_put(xprt); | ||
226 | } | 228 | } |
227 | 229 | ||
228 | /** | 230 | /** |
@@ -255,13 +257,14 @@ int rpc_register(u32 prog, u32 vers, int prot, unsigned short port, int *okay) | |||
255 | struct rpc_clnt *pmap_clnt; | 257 | struct rpc_clnt *pmap_clnt; |
256 | int error = 0; | 258 | int error = 0; |
257 | 259 | ||
258 | dprintk("RPC: registering (%u, %u, %d, %u) with portmapper.\n", | 260 | dprintk("RPC: registering (%u, %u, %d, %u) with portmapper.\n", |
259 | prog, vers, prot, port); | 261 | prog, vers, prot, port); |
260 | 262 | ||
261 | pmap_clnt = pmap_create("localhost", &sin, IPPROTO_UDP, 1); | 263 | pmap_clnt = pmap_create("localhost", &sin, IPPROTO_UDP, 1); |
262 | if (IS_ERR(pmap_clnt)) { | 264 | if (IS_ERR(pmap_clnt)) { |
263 | error = PTR_ERR(pmap_clnt); | 265 | error = PTR_ERR(pmap_clnt); |
264 | dprintk("RPC: couldn't create pmap client. Error = %d\n", error); | 266 | dprintk("RPC: couldn't create pmap client. Error = %d\n", |
267 | error); | ||
265 | return error; | 268 | return error; |
266 | } | 269 | } |
267 | 270 | ||
@@ -272,7 +275,7 @@ int rpc_register(u32 prog, u32 vers, int prot, unsigned short port, int *okay) | |||
272 | "RPC: failed to contact portmap (errno %d).\n", | 275 | "RPC: failed to contact portmap (errno %d).\n", |
273 | error); | 276 | error); |
274 | } | 277 | } |
275 | dprintk("RPC: registration status %d/%d\n", error, *okay); | 278 | dprintk("RPC: registration status %d/%d\n", error, *okay); |
276 | 279 | ||
277 | /* Client deleted automatically because cl_oneshot == 1 */ | 280 | /* Client deleted automatically because cl_oneshot == 1 */ |
278 | return error; | 281 | return error; |
@@ -303,8 +306,9 @@ static struct rpc_clnt *pmap_create(char *hostname, struct sockaddr_in *srvaddr, | |||
303 | */ | 306 | */ |
304 | static int xdr_encode_mapping(struct rpc_rqst *req, __be32 *p, struct portmap_args *map) | 307 | static int xdr_encode_mapping(struct rpc_rqst *req, __be32 *p, struct portmap_args *map) |
305 | { | 308 | { |
306 | dprintk("RPC: xdr_encode_mapping(%u, %u, %u, %u)\n", | 309 | dprintk("RPC: xdr_encode_mapping(%u, %u, %u, %u)\n", |
307 | map->pm_prog, map->pm_vers, map->pm_prot, map->pm_port); | 310 | map->pm_prog, map->pm_vers, |
311 | map->pm_prot, map->pm_port); | ||
308 | *p++ = htonl(map->pm_prog); | 312 | *p++ = htonl(map->pm_prog); |
309 | *p++ = htonl(map->pm_vers); | 313 | *p++ = htonl(map->pm_vers); |
310 | *p++ = htonl(map->pm_prot); | 314 | *p++ = htonl(map->pm_prot); |
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index e1fad77a2257..9b9ea5045569 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c | |||
@@ -589,7 +589,7 @@ __rpc_mkdir(struct inode *dir, struct dentry *dentry) | |||
589 | { | 589 | { |
590 | struct inode *inode; | 590 | struct inode *inode; |
591 | 591 | ||
592 | inode = rpc_get_inode(dir->i_sb, S_IFDIR | S_IRUSR | S_IXUSR); | 592 | inode = rpc_get_inode(dir->i_sb, S_IFDIR | S_IRUGO | S_IXUGO); |
593 | if (!inode) | 593 | if (!inode) |
594 | goto out_err; | 594 | goto out_err; |
595 | inode->i_ino = iunique(dir->i_sb, 100); | 595 | inode->i_ino = iunique(dir->i_sb, 100); |
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index 54a6b92525ea..6d87320074b1 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c | |||
@@ -74,7 +74,7 @@ static DEFINE_SPINLOCK(rpc_sched_lock); | |||
74 | static inline void | 74 | static inline void |
75 | __rpc_disable_timer(struct rpc_task *task) | 75 | __rpc_disable_timer(struct rpc_task *task) |
76 | { | 76 | { |
77 | dprintk("RPC: %4d disabling timer\n", task->tk_pid); | 77 | dprintk("RPC: %5u disabling timer\n", task->tk_pid); |
78 | task->tk_timeout_fn = NULL; | 78 | task->tk_timeout_fn = NULL; |
79 | task->tk_timeout = 0; | 79 | task->tk_timeout = 0; |
80 | } | 80 | } |
@@ -93,7 +93,7 @@ static void rpc_run_timer(struct rpc_task *task) | |||
93 | callback = task->tk_timeout_fn; | 93 | callback = task->tk_timeout_fn; |
94 | task->tk_timeout_fn = NULL; | 94 | task->tk_timeout_fn = NULL; |
95 | if (callback && RPC_IS_QUEUED(task)) { | 95 | if (callback && RPC_IS_QUEUED(task)) { |
96 | dprintk("RPC: %4d running timer\n", task->tk_pid); | 96 | dprintk("RPC: %5u running timer\n", task->tk_pid); |
97 | callback(task); | 97 | callback(task); |
98 | } | 98 | } |
99 | smp_mb__before_clear_bit(); | 99 | smp_mb__before_clear_bit(); |
@@ -110,7 +110,7 @@ __rpc_add_timer(struct rpc_task *task, rpc_action timer) | |||
110 | if (!task->tk_timeout) | 110 | if (!task->tk_timeout) |
111 | return; | 111 | return; |
112 | 112 | ||
113 | dprintk("RPC: %4d setting alarm for %lu ms\n", | 113 | dprintk("RPC: %5u setting alarm for %lu ms\n", |
114 | task->tk_pid, task->tk_timeout * 1000 / HZ); | 114 | task->tk_pid, task->tk_timeout * 1000 / HZ); |
115 | 115 | ||
116 | if (timer) | 116 | if (timer) |
@@ -132,7 +132,7 @@ rpc_delete_timer(struct rpc_task *task) | |||
132 | return; | 132 | return; |
133 | if (test_and_clear_bit(RPC_TASK_HAS_TIMER, &task->tk_runstate)) { | 133 | if (test_and_clear_bit(RPC_TASK_HAS_TIMER, &task->tk_runstate)) { |
134 | del_singleshot_timer_sync(&task->tk_timer); | 134 | del_singleshot_timer_sync(&task->tk_timer); |
135 | dprintk("RPC: %4d deleting timer\n", task->tk_pid); | 135 | dprintk("RPC: %5u deleting timer\n", task->tk_pid); |
136 | } | 136 | } |
137 | } | 137 | } |
138 | 138 | ||
@@ -179,8 +179,8 @@ static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, struct rpc_task * | |||
179 | queue->qlen++; | 179 | queue->qlen++; |
180 | rpc_set_queued(task); | 180 | rpc_set_queued(task); |
181 | 181 | ||
182 | dprintk("RPC: %4d added to queue %p \"%s\"\n", | 182 | dprintk("RPC: %5u added to queue %p \"%s\"\n", |
183 | task->tk_pid, queue, rpc_qname(queue)); | 183 | task->tk_pid, queue, rpc_qname(queue)); |
184 | } | 184 | } |
185 | 185 | ||
186 | /* | 186 | /* |
@@ -212,8 +212,8 @@ static void __rpc_remove_wait_queue(struct rpc_task *task) | |||
212 | else | 212 | else |
213 | list_del(&task->u.tk_wait.list); | 213 | list_del(&task->u.tk_wait.list); |
214 | queue->qlen--; | 214 | queue->qlen--; |
215 | dprintk("RPC: %4d removed from queue %p \"%s\"\n", | 215 | dprintk("RPC: %5u removed from queue %p \"%s\"\n", |
216 | task->tk_pid, queue, rpc_qname(queue)); | 216 | task->tk_pid, queue, rpc_qname(queue)); |
217 | } | 217 | } |
218 | 218 | ||
219 | static inline void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority) | 219 | static inline void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority) |
@@ -344,8 +344,8 @@ static void rpc_make_runnable(struct rpc_task *task) | |||
344 | static void __rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, | 344 | static void __rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, |
345 | rpc_action action, rpc_action timer) | 345 | rpc_action action, rpc_action timer) |
346 | { | 346 | { |
347 | dprintk("RPC: %4d sleep_on(queue \"%s\" time %ld)\n", task->tk_pid, | 347 | dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n", |
348 | rpc_qname(q), jiffies); | 348 | task->tk_pid, rpc_qname(q), jiffies); |
349 | 349 | ||
350 | if (!RPC_IS_ASYNC(task) && !RPC_IS_ACTIVATED(task)) { | 350 | if (!RPC_IS_ASYNC(task) && !RPC_IS_ACTIVATED(task)) { |
351 | printk(KERN_ERR "RPC: Inactive synchronous task put to sleep!\n"); | 351 | printk(KERN_ERR "RPC: Inactive synchronous task put to sleep!\n"); |
@@ -381,7 +381,8 @@ void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, | |||
381 | */ | 381 | */ |
382 | static void __rpc_do_wake_up_task(struct rpc_task *task) | 382 | static void __rpc_do_wake_up_task(struct rpc_task *task) |
383 | { | 383 | { |
384 | dprintk("RPC: %4d __rpc_wake_up_task (now %ld)\n", task->tk_pid, jiffies); | 384 | dprintk("RPC: %5u __rpc_wake_up_task (now %lu)\n", |
385 | task->tk_pid, jiffies); | ||
385 | 386 | ||
386 | #ifdef RPC_DEBUG | 387 | #ifdef RPC_DEBUG |
387 | BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID); | 388 | BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID); |
@@ -397,7 +398,7 @@ static void __rpc_do_wake_up_task(struct rpc_task *task) | |||
397 | 398 | ||
398 | rpc_make_runnable(task); | 399 | rpc_make_runnable(task); |
399 | 400 | ||
400 | dprintk("RPC: __rpc_wake_up_task done\n"); | 401 | dprintk("RPC: __rpc_wake_up_task done\n"); |
401 | } | 402 | } |
402 | 403 | ||
403 | /* | 404 | /* |
@@ -418,7 +419,7 @@ static void __rpc_wake_up_task(struct rpc_task *task) | |||
418 | static void | 419 | static void |
419 | __rpc_default_timer(struct rpc_task *task) | 420 | __rpc_default_timer(struct rpc_task *task) |
420 | { | 421 | { |
421 | dprintk("RPC: %d timeout (default timer)\n", task->tk_pid); | 422 | dprintk("RPC: %5u timeout (default timer)\n", task->tk_pid); |
422 | task->tk_status = -ETIMEDOUT; | 423 | task->tk_status = -ETIMEDOUT; |
423 | rpc_wake_up_task(task); | 424 | rpc_wake_up_task(task); |
424 | } | 425 | } |
@@ -502,7 +503,8 @@ struct rpc_task * rpc_wake_up_next(struct rpc_wait_queue *queue) | |||
502 | { | 503 | { |
503 | struct rpc_task *task = NULL; | 504 | struct rpc_task *task = NULL; |
504 | 505 | ||
505 | dprintk("RPC: wake_up_next(%p \"%s\")\n", queue, rpc_qname(queue)); | 506 | dprintk("RPC: wake_up_next(%p \"%s\")\n", |
507 | queue, rpc_qname(queue)); | ||
506 | rcu_read_lock_bh(); | 508 | rcu_read_lock_bh(); |
507 | spin_lock(&queue->lock); | 509 | spin_lock(&queue->lock); |
508 | if (RPC_IS_PRIORITY(queue)) | 510 | if (RPC_IS_PRIORITY(queue)) |
@@ -625,12 +627,12 @@ void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata) | |||
625 | /* | 627 | /* |
626 | * This is the RPC `scheduler' (or rather, the finite state machine). | 628 | * This is the RPC `scheduler' (or rather, the finite state machine). |
627 | */ | 629 | */ |
628 | static int __rpc_execute(struct rpc_task *task) | 630 | static void __rpc_execute(struct rpc_task *task) |
629 | { | 631 | { |
630 | int status = 0; | 632 | int status = 0; |
631 | 633 | ||
632 | dprintk("RPC: %4d rpc_execute flgs %x\n", | 634 | dprintk("RPC: %5u __rpc_execute flags=0x%x\n", |
633 | task->tk_pid, task->tk_flags); | 635 | task->tk_pid, task->tk_flags); |
634 | 636 | ||
635 | BUG_ON(RPC_IS_QUEUED(task)); | 637 | BUG_ON(RPC_IS_QUEUED(task)); |
636 | 638 | ||
@@ -679,14 +681,14 @@ static int __rpc_execute(struct rpc_task *task) | |||
679 | if (RPC_IS_ASYNC(task)) { | 681 | if (RPC_IS_ASYNC(task)) { |
680 | /* Careful! we may have raced... */ | 682 | /* Careful! we may have raced... */ |
681 | if (RPC_IS_QUEUED(task)) | 683 | if (RPC_IS_QUEUED(task)) |
682 | return 0; | 684 | return; |
683 | if (rpc_test_and_set_running(task)) | 685 | if (rpc_test_and_set_running(task)) |
684 | return 0; | 686 | return; |
685 | continue; | 687 | continue; |
686 | } | 688 | } |
687 | 689 | ||
688 | /* sync task: sleep here */ | 690 | /* sync task: sleep here */ |
689 | dprintk("RPC: %4d sync task going to sleep\n", task->tk_pid); | 691 | dprintk("RPC: %5u sync task going to sleep\n", task->tk_pid); |
690 | /* Note: Caller should be using rpc_clnt_sigmask() */ | 692 | /* Note: Caller should be using rpc_clnt_sigmask() */ |
691 | status = out_of_line_wait_on_bit(&task->tk_runstate, | 693 | status = out_of_line_wait_on_bit(&task->tk_runstate, |
692 | RPC_TASK_QUEUED, rpc_wait_bit_interruptible, | 694 | RPC_TASK_QUEUED, rpc_wait_bit_interruptible, |
@@ -698,19 +700,19 @@ static int __rpc_execute(struct rpc_task *task) | |||
698 | * clean up after sleeping on some queue, we don't | 700 | * clean up after sleeping on some queue, we don't |
699 | * break the loop here, but go around once more. | 701 | * break the loop here, but go around once more. |
700 | */ | 702 | */ |
701 | dprintk("RPC: %4d got signal\n", task->tk_pid); | 703 | dprintk("RPC: %5u got signal\n", task->tk_pid); |
702 | task->tk_flags |= RPC_TASK_KILLED; | 704 | task->tk_flags |= RPC_TASK_KILLED; |
703 | rpc_exit(task, -ERESTARTSYS); | 705 | rpc_exit(task, -ERESTARTSYS); |
704 | rpc_wake_up_task(task); | 706 | rpc_wake_up_task(task); |
705 | } | 707 | } |
706 | rpc_set_running(task); | 708 | rpc_set_running(task); |
707 | dprintk("RPC: %4d sync task resuming\n", task->tk_pid); | 709 | dprintk("RPC: %5u sync task resuming\n", task->tk_pid); |
708 | } | 710 | } |
709 | 711 | ||
710 | dprintk("RPC: %4d, return %d, status %d\n", task->tk_pid, status, task->tk_status); | 712 | dprintk("RPC: %5u return %d, status %d\n", task->tk_pid, status, |
713 | task->tk_status); | ||
711 | /* Release all resources associated with the task */ | 714 | /* Release all resources associated with the task */ |
712 | rpc_release_task(task); | 715 | rpc_release_task(task); |
713 | return status; | ||
714 | } | 716 | } |
715 | 717 | ||
716 | /* | 718 | /* |
@@ -722,12 +724,11 @@ static int __rpc_execute(struct rpc_task *task) | |||
722 | * released. In particular note that tk_release() will have | 724 | * released. In particular note that tk_release() will have |
723 | * been called, so your task memory may have been freed. | 725 | * been called, so your task memory may have been freed. |
724 | */ | 726 | */ |
725 | int | 727 | void rpc_execute(struct rpc_task *task) |
726 | rpc_execute(struct rpc_task *task) | ||
727 | { | 728 | { |
728 | rpc_set_active(task); | 729 | rpc_set_active(task); |
729 | rpc_set_running(task); | 730 | rpc_set_running(task); |
730 | return __rpc_execute(task); | 731 | __rpc_execute(task); |
731 | } | 732 | } |
732 | 733 | ||
733 | static void rpc_async_schedule(struct work_struct *work) | 734 | static void rpc_async_schedule(struct work_struct *work) |
@@ -826,7 +827,7 @@ void rpc_init_task(struct rpc_task *task, struct rpc_clnt *clnt, int flags, cons | |||
826 | /* starting timestamp */ | 827 | /* starting timestamp */ |
827 | task->tk_start = jiffies; | 828 | task->tk_start = jiffies; |
828 | 829 | ||
829 | dprintk("RPC: %4d new task procpid %d\n", task->tk_pid, | 830 | dprintk("RPC: new task initialized, procpid %u\n", |
830 | current->pid); | 831 | current->pid); |
831 | } | 832 | } |
832 | 833 | ||
@@ -839,7 +840,7 @@ rpc_alloc_task(void) | |||
839 | static void rpc_free_task(struct rcu_head *rcu) | 840 | static void rpc_free_task(struct rcu_head *rcu) |
840 | { | 841 | { |
841 | struct rpc_task *task = container_of(rcu, struct rpc_task, u.tk_rcu); | 842 | struct rpc_task *task = container_of(rcu, struct rpc_task, u.tk_rcu); |
842 | dprintk("RPC: %4d freeing task\n", task->tk_pid); | 843 | dprintk("RPC: %5u freeing task\n", task->tk_pid); |
843 | mempool_free(task, rpc_task_mempool); | 844 | mempool_free(task, rpc_task_mempool); |
844 | } | 845 | } |
845 | 846 | ||
@@ -858,7 +859,7 @@ struct rpc_task *rpc_new_task(struct rpc_clnt *clnt, int flags, const struct rpc | |||
858 | 859 | ||
859 | rpc_init_task(task, clnt, flags, tk_ops, calldata); | 860 | rpc_init_task(task, clnt, flags, tk_ops, calldata); |
860 | 861 | ||
861 | dprintk("RPC: %4d allocated task\n", task->tk_pid); | 862 | dprintk("RPC: allocated task %p\n", task); |
862 | task->tk_flags |= RPC_TASK_DYNAMIC; | 863 | task->tk_flags |= RPC_TASK_DYNAMIC; |
863 | out: | 864 | out: |
864 | return task; | 865 | return task; |
@@ -902,7 +903,7 @@ static void rpc_release_task(struct rpc_task *task) | |||
902 | #ifdef RPC_DEBUG | 903 | #ifdef RPC_DEBUG |
903 | BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID); | 904 | BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID); |
904 | #endif | 905 | #endif |
905 | dprintk("RPC: %4d release task\n", task->tk_pid); | 906 | dprintk("RPC: %5u release task\n", task->tk_pid); |
906 | 907 | ||
907 | /* Remove from global task list */ | 908 | /* Remove from global task list */ |
908 | spin_lock(&rpc_sched_lock); | 909 | spin_lock(&rpc_sched_lock); |
@@ -955,7 +956,7 @@ void rpc_killall_tasks(struct rpc_clnt *clnt) | |||
955 | struct rpc_task *rovr; | 956 | struct rpc_task *rovr; |
956 | struct list_head *le; | 957 | struct list_head *le; |
957 | 958 | ||
958 | dprintk("RPC: killing all tasks for client %p\n", clnt); | 959 | dprintk("RPC: killing all tasks for client %p\n", clnt); |
959 | 960 | ||
960 | /* | 961 | /* |
961 | * Spin lock all_tasks to prevent changes... | 962 | * Spin lock all_tasks to prevent changes... |
@@ -984,7 +985,8 @@ static void rpciod_killall(void) | |||
984 | rpc_killall_tasks(NULL); | 985 | rpc_killall_tasks(NULL); |
985 | flush_workqueue(rpciod_workqueue); | 986 | flush_workqueue(rpciod_workqueue); |
986 | if (!list_empty(&all_tasks)) { | 987 | if (!list_empty(&all_tasks)) { |
987 | dprintk("rpciod_killall: waiting for tasks to exit\n"); | 988 | dprintk("RPC: rpciod_killall: waiting for tasks " |
989 | "to exit\n"); | ||
988 | yield(); | 990 | yield(); |
989 | } | 991 | } |
990 | } | 992 | } |
@@ -1004,7 +1006,7 @@ rpciod_up(void) | |||
1004 | int error = 0; | 1006 | int error = 0; |
1005 | 1007 | ||
1006 | mutex_lock(&rpciod_mutex); | 1008 | mutex_lock(&rpciod_mutex); |
1007 | dprintk("rpciod_up: users %d\n", rpciod_users); | 1009 | dprintk("RPC: rpciod_up: users %u\n", rpciod_users); |
1008 | rpciod_users++; | 1010 | rpciod_users++; |
1009 | if (rpciod_workqueue) | 1011 | if (rpciod_workqueue) |
1010 | goto out; | 1012 | goto out; |
@@ -1012,7 +1014,7 @@ rpciod_up(void) | |||
1012 | * If there's no pid, we should be the first user. | 1014 | * If there's no pid, we should be the first user. |
1013 | */ | 1015 | */ |
1014 | if (rpciod_users > 1) | 1016 | if (rpciod_users > 1) |
1015 | printk(KERN_WARNING "rpciod_up: no workqueue, %d users??\n", rpciod_users); | 1017 | printk(KERN_WARNING "rpciod_up: no workqueue, %u users??\n", rpciod_users); |
1016 | /* | 1018 | /* |
1017 | * Create the rpciod thread and wait for it to start. | 1019 | * Create the rpciod thread and wait for it to start. |
1018 | */ | 1020 | */ |
@@ -1034,7 +1036,7 @@ void | |||
1034 | rpciod_down(void) | 1036 | rpciod_down(void) |
1035 | { | 1037 | { |
1036 | mutex_lock(&rpciod_mutex); | 1038 | mutex_lock(&rpciod_mutex); |
1037 | dprintk("rpciod_down sema %d\n", rpciod_users); | 1039 | dprintk("RPC: rpciod_down sema %u\n", rpciod_users); |
1038 | if (rpciod_users) { | 1040 | if (rpciod_users) { |
1039 | if (--rpciod_users) | 1041 | if (--rpciod_users) |
1040 | goto out; | 1042 | goto out; |
@@ -1042,7 +1044,7 @@ rpciod_down(void) | |||
1042 | printk(KERN_WARNING "rpciod_down: no users??\n"); | 1044 | printk(KERN_WARNING "rpciod_down: no users??\n"); |
1043 | 1045 | ||
1044 | if (!rpciod_workqueue) { | 1046 | if (!rpciod_workqueue) { |
1045 | dprintk("rpciod_down: Nothing to do!\n"); | 1047 | dprintk("RPC: rpciod_down: Nothing to do!\n"); |
1046 | goto out; | 1048 | goto out; |
1047 | } | 1049 | } |
1048 | rpciod_killall(); | 1050 | rpciod_killall(); |
@@ -1072,7 +1074,7 @@ void rpc_show_tasks(void) | |||
1072 | if (RPC_IS_QUEUED(t)) | 1074 | if (RPC_IS_QUEUED(t)) |
1073 | rpc_waitq = rpc_qname(t->u.tk_wait.rpc_waitq); | 1075 | rpc_waitq = rpc_qname(t->u.tk_wait.rpc_waitq); |
1074 | 1076 | ||
1075 | printk("%05d %04d %04x %06d %8p %6d %8p %08ld %8s %8p %8p\n", | 1077 | printk("%5u %04d %04x %6d %8p %6d %8p %8ld %8s %8p %8p\n", |
1076 | t->tk_pid, | 1078 | t->tk_pid, |
1077 | (t->tk_msg.rpc_proc ? t->tk_msg.rpc_proc->p_proc : -1), | 1079 | (t->tk_msg.rpc_proc ? t->tk_msg.rpc_proc->p_proc : -1), |
1078 | t->tk_flags, t->tk_status, | 1080 | t->tk_flags, t->tk_status, |
diff --git a/net/sunrpc/stats.c b/net/sunrpc/stats.c index 044d9484bb8c..2878e20ebd04 100644 --- a/net/sunrpc/stats.c +++ b/net/sunrpc/stats.c | |||
@@ -226,7 +226,7 @@ do_register(const char *name, void *data, const struct file_operations *fops) | |||
226 | struct proc_dir_entry *ent; | 226 | struct proc_dir_entry *ent; |
227 | 227 | ||
228 | rpc_proc_init(); | 228 | rpc_proc_init(); |
229 | dprintk("RPC: registering /proc/net/rpc/%s\n", name); | 229 | dprintk("RPC: registering /proc/net/rpc/%s\n", name); |
230 | 230 | ||
231 | ent = create_proc_entry(name, 0, proc_net_rpc); | 231 | ent = create_proc_entry(name, 0, proc_net_rpc); |
232 | if (ent) { | 232 | if (ent) { |
@@ -263,7 +263,7 @@ svc_proc_unregister(const char *name) | |||
263 | void | 263 | void |
264 | rpc_proc_init(void) | 264 | rpc_proc_init(void) |
265 | { | 265 | { |
266 | dprintk("RPC: registering /proc/net/rpc\n"); | 266 | dprintk("RPC: registering /proc/net/rpc\n"); |
267 | if (!proc_net_rpc) { | 267 | if (!proc_net_rpc) { |
268 | struct proc_dir_entry *ent; | 268 | struct proc_dir_entry *ent; |
269 | ent = proc_mkdir("rpc", proc_net); | 269 | ent = proc_mkdir("rpc", proc_net); |
@@ -277,7 +277,7 @@ rpc_proc_init(void) | |||
277 | void | 277 | void |
278 | rpc_proc_exit(void) | 278 | rpc_proc_exit(void) |
279 | { | 279 | { |
280 | dprintk("RPC: unregistering /proc/net/rpc\n"); | 280 | dprintk("RPC: unregistering /proc/net/rpc\n"); |
281 | if (proc_net_rpc) { | 281 | if (proc_net_rpc) { |
282 | proc_net_rpc = NULL; | 282 | proc_net_rpc = NULL; |
283 | remove_proc_entry("net/rpc", NULL); | 283 | remove_proc_entry("net/rpc", NULL); |
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index b00511d39b65..4ab137403e1a 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c | |||
@@ -317,7 +317,7 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools, | |||
317 | for (i = 0; i < serv->sv_nrpools; i++) { | 317 | for (i = 0; i < serv->sv_nrpools; i++) { |
318 | struct svc_pool *pool = &serv->sv_pools[i]; | 318 | struct svc_pool *pool = &serv->sv_pools[i]; |
319 | 319 | ||
320 | dprintk("initialising pool %u for %s\n", | 320 | dprintk("svc: initialising pool %u for %s\n", |
321 | i, serv->sv_name); | 321 | i, serv->sv_name); |
322 | 322 | ||
323 | pool->sp_id = i; | 323 | pool->sp_id = i; |
@@ -368,7 +368,7 @@ svc_destroy(struct svc_serv *serv) | |||
368 | { | 368 | { |
369 | struct svc_sock *svsk; | 369 | struct svc_sock *svsk; |
370 | 370 | ||
371 | dprintk("RPC: svc_destroy(%s, %d)\n", | 371 | dprintk("svc: svc_destroy(%s, %d)\n", |
372 | serv->sv_program->pg_name, | 372 | serv->sv_program->pg_name, |
373 | serv->sv_nrthreads); | 373 | serv->sv_nrthreads); |
374 | 374 | ||
@@ -654,7 +654,7 @@ svc_register(struct svc_serv *serv, int proto, unsigned short port) | |||
654 | if (progp->pg_vers[i] == NULL) | 654 | if (progp->pg_vers[i] == NULL) |
655 | continue; | 655 | continue; |
656 | 656 | ||
657 | dprintk("RPC: svc_register(%s, %s, %d, %d)%s\n", | 657 | dprintk("svc: svc_register(%s, %s, %d, %d)%s\n", |
658 | progp->pg_name, | 658 | progp->pg_name, |
659 | proto == IPPROTO_UDP? "udp" : "tcp", | 659 | proto == IPPROTO_UDP? "udp" : "tcp", |
660 | port, | 660 | port, |
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index e7c71a1ea3d4..ee6ffa01dfb1 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c | |||
@@ -108,7 +108,7 @@ int xprt_reserve_xprt(struct rpc_task *task) | |||
108 | return 1; | 108 | return 1; |
109 | 109 | ||
110 | out_sleep: | 110 | out_sleep: |
111 | dprintk("RPC: %4d failed to lock transport %p\n", | 111 | dprintk("RPC: %5u failed to lock transport %p\n", |
112 | task->tk_pid, xprt); | 112 | task->tk_pid, xprt); |
113 | task->tk_timeout = 0; | 113 | task->tk_timeout = 0; |
114 | task->tk_status = -EAGAIN; | 114 | task->tk_status = -EAGAIN; |
@@ -158,7 +158,7 @@ int xprt_reserve_xprt_cong(struct rpc_task *task) | |||
158 | } | 158 | } |
159 | xprt_clear_locked(xprt); | 159 | xprt_clear_locked(xprt); |
160 | out_sleep: | 160 | out_sleep: |
161 | dprintk("RPC: %4d failed to lock transport %p\n", task->tk_pid, xprt); | 161 | dprintk("RPC: %5u failed to lock transport %p\n", task->tk_pid, xprt); |
162 | task->tk_timeout = 0; | 162 | task->tk_timeout = 0; |
163 | task->tk_status = -EAGAIN; | 163 | task->tk_status = -EAGAIN; |
164 | if (req && req->rq_ntrans) | 164 | if (req && req->rq_ntrans) |
@@ -281,7 +281,7 @@ __xprt_get_cong(struct rpc_xprt *xprt, struct rpc_task *task) | |||
281 | 281 | ||
282 | if (req->rq_cong) | 282 | if (req->rq_cong) |
283 | return 1; | 283 | return 1; |
284 | dprintk("RPC: %4d xprt_cwnd_limited cong = %ld cwnd = %ld\n", | 284 | dprintk("RPC: %5u xprt_cwnd_limited cong = %lu cwnd = %lu\n", |
285 | task->tk_pid, xprt->cong, xprt->cwnd); | 285 | task->tk_pid, xprt->cong, xprt->cwnd); |
286 | if (RPCXPRT_CONGESTED(xprt)) | 286 | if (RPCXPRT_CONGESTED(xprt)) |
287 | return 0; | 287 | return 0; |
@@ -340,7 +340,7 @@ void xprt_adjust_cwnd(struct rpc_task *task, int result) | |||
340 | if (cwnd < RPC_CWNDSCALE) | 340 | if (cwnd < RPC_CWNDSCALE) |
341 | cwnd = RPC_CWNDSCALE; | 341 | cwnd = RPC_CWNDSCALE; |
342 | } | 342 | } |
343 | dprintk("RPC: cong %ld, cwnd was %ld, now %ld\n", | 343 | dprintk("RPC: cong %ld, cwnd was %ld, now %ld\n", |
344 | xprt->cong, xprt->cwnd, cwnd); | 344 | xprt->cong, xprt->cwnd, cwnd); |
345 | xprt->cwnd = cwnd; | 345 | xprt->cwnd = cwnd; |
346 | __xprt_put_cong(xprt, req); | 346 | __xprt_put_cong(xprt, req); |
@@ -387,8 +387,8 @@ void xprt_write_space(struct rpc_xprt *xprt) | |||
387 | 387 | ||
388 | spin_lock_bh(&xprt->transport_lock); | 388 | spin_lock_bh(&xprt->transport_lock); |
389 | if (xprt->snd_task) { | 389 | if (xprt->snd_task) { |
390 | dprintk("RPC: write space: waking waiting task on xprt %p\n", | 390 | dprintk("RPC: write space: waking waiting task on " |
391 | xprt); | 391 | "xprt %p\n", xprt); |
392 | rpc_wake_up_task(xprt->snd_task); | 392 | rpc_wake_up_task(xprt->snd_task); |
393 | } | 393 | } |
394 | spin_unlock_bh(&xprt->transport_lock); | 394 | spin_unlock_bh(&xprt->transport_lock); |
@@ -494,7 +494,7 @@ static void xprt_autoclose(struct work_struct *work) | |||
494 | */ | 494 | */ |
495 | void xprt_disconnect(struct rpc_xprt *xprt) | 495 | void xprt_disconnect(struct rpc_xprt *xprt) |
496 | { | 496 | { |
497 | dprintk("RPC: disconnected transport %p\n", xprt); | 497 | dprintk("RPC: disconnected transport %p\n", xprt); |
498 | spin_lock_bh(&xprt->transport_lock); | 498 | spin_lock_bh(&xprt->transport_lock); |
499 | xprt_clear_connected(xprt); | 499 | xprt_clear_connected(xprt); |
500 | xprt_wake_pending_tasks(xprt, -ENOTCONN); | 500 | xprt_wake_pending_tasks(xprt, -ENOTCONN); |
@@ -530,7 +530,7 @@ void xprt_connect(struct rpc_task *task) | |||
530 | { | 530 | { |
531 | struct rpc_xprt *xprt = task->tk_xprt; | 531 | struct rpc_xprt *xprt = task->tk_xprt; |
532 | 532 | ||
533 | dprintk("RPC: %4d xprt_connect xprt %p %s connected\n", task->tk_pid, | 533 | dprintk("RPC: %5u xprt_connect xprt %p %s connected\n", task->tk_pid, |
534 | xprt, (xprt_connected(xprt) ? "is" : "is not")); | 534 | xprt, (xprt_connected(xprt) ? "is" : "is not")); |
535 | 535 | ||
536 | if (!xprt_bound(xprt)) { | 536 | if (!xprt_bound(xprt)) { |
@@ -560,7 +560,7 @@ static void xprt_connect_status(struct rpc_task *task) | |||
560 | if (task->tk_status >= 0) { | 560 | if (task->tk_status >= 0) { |
561 | xprt->stat.connect_count++; | 561 | xprt->stat.connect_count++; |
562 | xprt->stat.connect_time += (long)jiffies - xprt->stat.connect_start; | 562 | xprt->stat.connect_time += (long)jiffies - xprt->stat.connect_start; |
563 | dprintk("RPC: %4d xprt_connect_status: connection established\n", | 563 | dprintk("RPC: %5u xprt_connect_status: connection established\n", |
564 | task->tk_pid); | 564 | task->tk_pid); |
565 | return; | 565 | return; |
566 | } | 566 | } |
@@ -568,20 +568,22 @@ static void xprt_connect_status(struct rpc_task *task) | |||
568 | switch (task->tk_status) { | 568 | switch (task->tk_status) { |
569 | case -ECONNREFUSED: | 569 | case -ECONNREFUSED: |
570 | case -ECONNRESET: | 570 | case -ECONNRESET: |
571 | dprintk("RPC: %4d xprt_connect_status: server %s refused connection\n", | 571 | dprintk("RPC: %5u xprt_connect_status: server %s refused " |
572 | task->tk_pid, task->tk_client->cl_server); | 572 | "connection\n", task->tk_pid, |
573 | task->tk_client->cl_server); | ||
573 | break; | 574 | break; |
574 | case -ENOTCONN: | 575 | case -ENOTCONN: |
575 | dprintk("RPC: %4d xprt_connect_status: connection broken\n", | 576 | dprintk("RPC: %5u xprt_connect_status: connection broken\n", |
576 | task->tk_pid); | 577 | task->tk_pid); |
577 | break; | 578 | break; |
578 | case -ETIMEDOUT: | 579 | case -ETIMEDOUT: |
579 | dprintk("RPC: %4d xprt_connect_status: connect attempt timed out\n", | 580 | dprintk("RPC: %5u xprt_connect_status: connect attempt timed " |
580 | task->tk_pid); | 581 | "out\n", task->tk_pid); |
581 | break; | 582 | break; |
582 | default: | 583 | default: |
583 | dprintk("RPC: %4d xprt_connect_status: error %d connecting to server %s\n", | 584 | dprintk("RPC: %5u xprt_connect_status: error %d connecting to " |
584 | task->tk_pid, -task->tk_status, task->tk_client->cl_server); | 585 | "server %s\n", task->tk_pid, -task->tk_status, |
586 | task->tk_client->cl_server); | ||
585 | xprt_release_write(xprt, task); | 587 | xprt_release_write(xprt, task); |
586 | task->tk_status = -EIO; | 588 | task->tk_status = -EIO; |
587 | } | 589 | } |
@@ -602,6 +604,9 @@ struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid) | |||
602 | if (entry->rq_xid == xid) | 604 | if (entry->rq_xid == xid) |
603 | return entry; | 605 | return entry; |
604 | } | 606 | } |
607 | |||
608 | dprintk("RPC: xprt_lookup_rqst did not find xid %08x\n", | ||
609 | ntohl(xid)); | ||
605 | xprt->stat.bad_xids++; | 610 | xprt->stat.bad_xids++; |
606 | return NULL; | 611 | return NULL; |
607 | } | 612 | } |
@@ -654,7 +659,7 @@ static void xprt_timer(struct rpc_task *task) | |||
654 | struct rpc_rqst *req = task->tk_rqstp; | 659 | struct rpc_rqst *req = task->tk_rqstp; |
655 | struct rpc_xprt *xprt = req->rq_xprt; | 660 | struct rpc_xprt *xprt = req->rq_xprt; |
656 | 661 | ||
657 | dprintk("RPC: %4d xprt_timer\n", task->tk_pid); | 662 | dprintk("RPC: %5u xprt_timer\n", task->tk_pid); |
658 | 663 | ||
659 | spin_lock(&xprt->transport_lock); | 664 | spin_lock(&xprt->transport_lock); |
660 | if (!req->rq_received) { | 665 | if (!req->rq_received) { |
@@ -678,7 +683,7 @@ int xprt_prepare_transmit(struct rpc_task *task) | |||
678 | struct rpc_xprt *xprt = req->rq_xprt; | 683 | struct rpc_xprt *xprt = req->rq_xprt; |
679 | int err = 0; | 684 | int err = 0; |
680 | 685 | ||
681 | dprintk("RPC: %4d xprt_prepare_transmit\n", task->tk_pid); | 686 | dprintk("RPC: %5u xprt_prepare_transmit\n", task->tk_pid); |
682 | 687 | ||
683 | spin_lock_bh(&xprt->transport_lock); | 688 | spin_lock_bh(&xprt->transport_lock); |
684 | if (req->rq_received && !req->rq_bytes_sent) { | 689 | if (req->rq_received && !req->rq_bytes_sent) { |
@@ -716,7 +721,7 @@ void xprt_transmit(struct rpc_task *task) | |||
716 | struct rpc_xprt *xprt = req->rq_xprt; | 721 | struct rpc_xprt *xprt = req->rq_xprt; |
717 | int status; | 722 | int status; |
718 | 723 | ||
719 | dprintk("RPC: %4d xprt_transmit(%u)\n", task->tk_pid, req->rq_slen); | 724 | dprintk("RPC: %5u xprt_transmit(%u)\n", task->tk_pid, req->rq_slen); |
720 | 725 | ||
721 | if (!req->rq_received) { | 726 | if (!req->rq_received) { |
722 | if (list_empty(&req->rq_list)) { | 727 | if (list_empty(&req->rq_list)) { |
@@ -730,13 +735,23 @@ void xprt_transmit(struct rpc_task *task) | |||
730 | xprt_reset_majortimeo(req); | 735 | xprt_reset_majortimeo(req); |
731 | /* Turn off autodisconnect */ | 736 | /* Turn off autodisconnect */ |
732 | del_singleshot_timer_sync(&xprt->timer); | 737 | del_singleshot_timer_sync(&xprt->timer); |
738 | } else { | ||
739 | /* If all request bytes have been sent, | ||
740 | * then we must be retransmitting this one */ | ||
741 | if (!req->rq_bytes_sent) { | ||
742 | if (task->tk_client->cl_discrtry) { | ||
743 | xprt_disconnect(xprt); | ||
744 | task->tk_status = -ENOTCONN; | ||
745 | return; | ||
746 | } | ||
747 | } | ||
733 | } | 748 | } |
734 | } else if (!req->rq_bytes_sent) | 749 | } else if (!req->rq_bytes_sent) |
735 | return; | 750 | return; |
736 | 751 | ||
737 | status = xprt->ops->send_request(task); | 752 | status = xprt->ops->send_request(task); |
738 | if (status == 0) { | 753 | if (status == 0) { |
739 | dprintk("RPC: %4d xmit complete\n", task->tk_pid); | 754 | dprintk("RPC: %5u xmit complete\n", task->tk_pid); |
740 | spin_lock_bh(&xprt->transport_lock); | 755 | spin_lock_bh(&xprt->transport_lock); |
741 | 756 | ||
742 | xprt->ops->set_retrans_timeout(task); | 757 | xprt->ops->set_retrans_timeout(task); |
@@ -777,7 +792,7 @@ static inline void do_xprt_reserve(struct rpc_task *task) | |||
777 | xprt_request_init(task, xprt); | 792 | xprt_request_init(task, xprt); |
778 | return; | 793 | return; |
779 | } | 794 | } |
780 | dprintk("RPC: waiting for request slot\n"); | 795 | dprintk("RPC: waiting for request slot\n"); |
781 | task->tk_status = -EAGAIN; | 796 | task->tk_status = -EAGAIN; |
782 | task->tk_timeout = 0; | 797 | task->tk_timeout = 0; |
783 | rpc_sleep_on(&xprt->backlog, task, NULL, NULL); | 798 | rpc_sleep_on(&xprt->backlog, task, NULL, NULL); |
@@ -822,7 +837,7 @@ static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt) | |||
822 | req->rq_xid = xprt_alloc_xid(xprt); | 837 | req->rq_xid = xprt_alloc_xid(xprt); |
823 | req->rq_release_snd_buf = NULL; | 838 | req->rq_release_snd_buf = NULL; |
824 | xprt_reset_majortimeo(req); | 839 | xprt_reset_majortimeo(req); |
825 | dprintk("RPC: %4d reserved req %p xid %08x\n", task->tk_pid, | 840 | dprintk("RPC: %5u reserved req %p xid %08x\n", task->tk_pid, |
826 | req, ntohl(req->rq_xid)); | 841 | req, ntohl(req->rq_xid)); |
827 | } | 842 | } |
828 | 843 | ||
@@ -856,7 +871,7 @@ void xprt_release(struct rpc_task *task) | |||
856 | req->rq_release_snd_buf(req); | 871 | req->rq_release_snd_buf(req); |
857 | memset(req, 0, sizeof(*req)); /* mark unused */ | 872 | memset(req, 0, sizeof(*req)); /* mark unused */ |
858 | 873 | ||
859 | dprintk("RPC: %4d release request %p\n", task->tk_pid, req); | 874 | dprintk("RPC: %5u release request %p\n", task->tk_pid, req); |
860 | 875 | ||
861 | spin_lock(&xprt->reserve_lock); | 876 | spin_lock(&xprt->reserve_lock); |
862 | list_add(&req->rq_list, &xprt->free); | 877 | list_add(&req->rq_list, &xprt->free); |
@@ -906,7 +921,7 @@ struct rpc_xprt *xprt_create_transport(int proto, struct sockaddr *ap, size_t si | |||
906 | return ERR_PTR(-EIO); | 921 | return ERR_PTR(-EIO); |
907 | } | 922 | } |
908 | if (IS_ERR(xprt)) { | 923 | if (IS_ERR(xprt)) { |
909 | dprintk("RPC: xprt_create_transport: failed, %ld\n", | 924 | dprintk("RPC: xprt_create_transport: failed, %ld\n", |
910 | -PTR_ERR(xprt)); | 925 | -PTR_ERR(xprt)); |
911 | return xprt; | 926 | return xprt; |
912 | } | 927 | } |
@@ -936,7 +951,7 @@ struct rpc_xprt *xprt_create_transport(int proto, struct sockaddr *ap, size_t si | |||
936 | 951 | ||
937 | xprt_init_xid(xprt); | 952 | xprt_init_xid(xprt); |
938 | 953 | ||
939 | dprintk("RPC: created transport %p with %u slots\n", xprt, | 954 | dprintk("RPC: created transport %p with %u slots\n", xprt, |
940 | xprt->max_reqs); | 955 | xprt->max_reqs); |
941 | 956 | ||
942 | return xprt; | 957 | return xprt; |
@@ -951,7 +966,7 @@ static void xprt_destroy(struct kref *kref) | |||
951 | { | 966 | { |
952 | struct rpc_xprt *xprt = container_of(kref, struct rpc_xprt, kref); | 967 | struct rpc_xprt *xprt = container_of(kref, struct rpc_xprt, kref); |
953 | 968 | ||
954 | dprintk("RPC: destroying transport %p\n", xprt); | 969 | dprintk("RPC: destroying transport %p\n", xprt); |
955 | xprt->shutdown = 1; | 970 | xprt->shutdown = 1; |
956 | del_timer_sync(&xprt->timer); | 971 | del_timer_sync(&xprt->timer); |
957 | 972 | ||
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 49cabffd7fdb..64736b3a59a7 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
@@ -192,7 +192,7 @@ static void xs_pktdump(char *msg, u32 *packet, unsigned int count) | |||
192 | u8 *buf = (u8 *) packet; | 192 | u8 *buf = (u8 *) packet; |
193 | int j; | 193 | int j; |
194 | 194 | ||
195 | dprintk("RPC: %s\n", msg); | 195 | dprintk("RPC: %s\n", msg); |
196 | for (j = 0; j < count && j < 128; j += 4) { | 196 | for (j = 0; j < count && j < 128; j += 4) { |
197 | if (!(j & 31)) { | 197 | if (!(j & 31)) { |
198 | if (j) | 198 | if (j) |
@@ -418,7 +418,7 @@ static void xs_nospace(struct rpc_task *task) | |||
418 | struct rpc_xprt *xprt = req->rq_xprt; | 418 | struct rpc_xprt *xprt = req->rq_xprt; |
419 | struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); | 419 | struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); |
420 | 420 | ||
421 | dprintk("RPC: %4d xmit incomplete (%u left of %u)\n", | 421 | dprintk("RPC: %5u xmit incomplete (%u left of %u)\n", |
422 | task->tk_pid, req->rq_slen - req->rq_bytes_sent, | 422 | task->tk_pid, req->rq_slen - req->rq_bytes_sent, |
423 | req->rq_slen); | 423 | req->rq_slen); |
424 | 424 | ||
@@ -467,7 +467,7 @@ static int xs_udp_send_request(struct rpc_task *task) | |||
467 | xprt->addrlen, xdr, | 467 | xprt->addrlen, xdr, |
468 | req->rq_bytes_sent); | 468 | req->rq_bytes_sent); |
469 | 469 | ||
470 | dprintk("RPC: xs_udp_send_request(%u) = %d\n", | 470 | dprintk("RPC: xs_udp_send_request(%u) = %d\n", |
471 | xdr->len - req->rq_bytes_sent, status); | 471 | xdr->len - req->rq_bytes_sent, status); |
472 | 472 | ||
473 | if (likely(status >= (int) req->rq_slen)) | 473 | if (likely(status >= (int) req->rq_slen)) |
@@ -488,7 +488,7 @@ static int xs_udp_send_request(struct rpc_task *task) | |||
488 | xs_nospace(task); | 488 | xs_nospace(task); |
489 | break; | 489 | break; |
490 | default: | 490 | default: |
491 | dprintk("RPC: sendmsg returned unrecognized error %d\n", | 491 | dprintk("RPC: sendmsg returned unrecognized error %d\n", |
492 | -status); | 492 | -status); |
493 | break; | 493 | break; |
494 | } | 494 | } |
@@ -539,7 +539,7 @@ static int xs_tcp_send_request(struct rpc_task *task) | |||
539 | status = xs_sendpages(transport->sock, | 539 | status = xs_sendpages(transport->sock, |
540 | NULL, 0, xdr, req->rq_bytes_sent); | 540 | NULL, 0, xdr, req->rq_bytes_sent); |
541 | 541 | ||
542 | dprintk("RPC: xs_tcp_send_request(%u) = %d\n", | 542 | dprintk("RPC: xs_tcp_send_request(%u) = %d\n", |
543 | xdr->len - req->rq_bytes_sent, status); | 543 | xdr->len - req->rq_bytes_sent, status); |
544 | 544 | ||
545 | if (unlikely(status < 0)) | 545 | if (unlikely(status < 0)) |
@@ -570,7 +570,7 @@ static int xs_tcp_send_request(struct rpc_task *task) | |||
570 | status = -ENOTCONN; | 570 | status = -ENOTCONN; |
571 | break; | 571 | break; |
572 | default: | 572 | default: |
573 | dprintk("RPC: sendmsg returned unrecognized error %d\n", | 573 | dprintk("RPC: sendmsg returned unrecognized error %d\n", |
574 | -status); | 574 | -status); |
575 | xprt_disconnect(xprt); | 575 | xprt_disconnect(xprt); |
576 | break; | 576 | break; |
@@ -622,7 +622,7 @@ static void xs_close(struct rpc_xprt *xprt) | |||
622 | if (!sk) | 622 | if (!sk) |
623 | goto clear_close_wait; | 623 | goto clear_close_wait; |
624 | 624 | ||
625 | dprintk("RPC: xs_close xprt %p\n", xprt); | 625 | dprintk("RPC: xs_close xprt %p\n", xprt); |
626 | 626 | ||
627 | write_lock_bh(&sk->sk_callback_lock); | 627 | write_lock_bh(&sk->sk_callback_lock); |
628 | transport->inet = NULL; | 628 | transport->inet = NULL; |
@@ -652,7 +652,7 @@ static void xs_destroy(struct rpc_xprt *xprt) | |||
652 | { | 652 | { |
653 | struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); | 653 | struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); |
654 | 654 | ||
655 | dprintk("RPC: xs_destroy xprt %p\n", xprt); | 655 | dprintk("RPC: xs_destroy xprt %p\n", xprt); |
656 | 656 | ||
657 | cancel_delayed_work(&transport->connect_worker); | 657 | cancel_delayed_work(&transport->connect_worker); |
658 | flush_scheduled_work(); | 658 | flush_scheduled_work(); |
@@ -686,7 +686,7 @@ static void xs_udp_data_ready(struct sock *sk, int len) | |||
686 | __be32 *xp; | 686 | __be32 *xp; |
687 | 687 | ||
688 | read_lock(&sk->sk_callback_lock); | 688 | read_lock(&sk->sk_callback_lock); |
689 | dprintk("RPC: xs_udp_data_ready...\n"); | 689 | dprintk("RPC: xs_udp_data_ready...\n"); |
690 | if (!(xprt = xprt_from_sock(sk))) | 690 | if (!(xprt = xprt_from_sock(sk))) |
691 | goto out; | 691 | goto out; |
692 | 692 | ||
@@ -698,7 +698,7 @@ static void xs_udp_data_ready(struct sock *sk, int len) | |||
698 | 698 | ||
699 | repsize = skb->len - sizeof(struct udphdr); | 699 | repsize = skb->len - sizeof(struct udphdr); |
700 | if (repsize < 4) { | 700 | if (repsize < 4) { |
701 | dprintk("RPC: impossible RPC reply size %d!\n", repsize); | 701 | dprintk("RPC: impossible RPC reply size %d!\n", repsize); |
702 | goto dropit; | 702 | goto dropit; |
703 | } | 703 | } |
704 | 704 | ||
@@ -762,11 +762,11 @@ static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_rea | |||
762 | 762 | ||
763 | /* Sanity check of the record length */ | 763 | /* Sanity check of the record length */ |
764 | if (unlikely(transport->tcp_reclen < 4)) { | 764 | if (unlikely(transport->tcp_reclen < 4)) { |
765 | dprintk("RPC: invalid TCP record fragment length\n"); | 765 | dprintk("RPC: invalid TCP record fragment length\n"); |
766 | xprt_disconnect(xprt); | 766 | xprt_disconnect(xprt); |
767 | return; | 767 | return; |
768 | } | 768 | } |
769 | dprintk("RPC: reading TCP record fragment of length %d\n", | 769 | dprintk("RPC: reading TCP record fragment of length %d\n", |
770 | transport->tcp_reclen); | 770 | transport->tcp_reclen); |
771 | } | 771 | } |
772 | 772 | ||
@@ -789,7 +789,7 @@ static inline void xs_tcp_read_xid(struct sock_xprt *transport, struct xdr_skb_r | |||
789 | char *p; | 789 | char *p; |
790 | 790 | ||
791 | len = sizeof(transport->tcp_xid) - transport->tcp_offset; | 791 | len = sizeof(transport->tcp_xid) - transport->tcp_offset; |
792 | dprintk("RPC: reading XID (%Zu bytes)\n", len); | 792 | dprintk("RPC: reading XID (%Zu bytes)\n", len); |
793 | p = ((char *) &transport->tcp_xid) + transport->tcp_offset; | 793 | p = ((char *) &transport->tcp_xid) + transport->tcp_offset; |
794 | used = xdr_skb_read_bits(desc, p, len); | 794 | used = xdr_skb_read_bits(desc, p, len); |
795 | transport->tcp_offset += used; | 795 | transport->tcp_offset += used; |
@@ -798,7 +798,7 @@ static inline void xs_tcp_read_xid(struct sock_xprt *transport, struct xdr_skb_r | |||
798 | transport->tcp_flags &= ~TCP_RCV_COPY_XID; | 798 | transport->tcp_flags &= ~TCP_RCV_COPY_XID; |
799 | transport->tcp_flags |= TCP_RCV_COPY_DATA; | 799 | transport->tcp_flags |= TCP_RCV_COPY_DATA; |
800 | transport->tcp_copied = 4; | 800 | transport->tcp_copied = 4; |
801 | dprintk("RPC: reading reply for XID %08x\n", | 801 | dprintk("RPC: reading reply for XID %08x\n", |
802 | ntohl(transport->tcp_xid)); | 802 | ntohl(transport->tcp_xid)); |
803 | xs_tcp_check_fraghdr(transport); | 803 | xs_tcp_check_fraghdr(transport); |
804 | } | 804 | } |
@@ -816,7 +816,7 @@ static inline void xs_tcp_read_request(struct rpc_xprt *xprt, struct xdr_skb_rea | |||
816 | req = xprt_lookup_rqst(xprt, transport->tcp_xid); | 816 | req = xprt_lookup_rqst(xprt, transport->tcp_xid); |
817 | if (!req) { | 817 | if (!req) { |
818 | transport->tcp_flags &= ~TCP_RCV_COPY_DATA; | 818 | transport->tcp_flags &= ~TCP_RCV_COPY_DATA; |
819 | dprintk("RPC: XID %08x request not found!\n", | 819 | dprintk("RPC: XID %08x request not found!\n", |
820 | ntohl(transport->tcp_xid)); | 820 | ntohl(transport->tcp_xid)); |
821 | spin_unlock(&xprt->transport_lock); | 821 | spin_unlock(&xprt->transport_lock); |
822 | return; | 822 | return; |
@@ -853,19 +853,20 @@ static inline void xs_tcp_read_request(struct rpc_xprt *xprt, struct xdr_skb_rea | |||
853 | * be discarded. | 853 | * be discarded. |
854 | */ | 854 | */ |
855 | transport->tcp_flags &= ~TCP_RCV_COPY_DATA; | 855 | transport->tcp_flags &= ~TCP_RCV_COPY_DATA; |
856 | dprintk("RPC: XID %08x truncated request\n", | 856 | dprintk("RPC: XID %08x truncated request\n", |
857 | ntohl(transport->tcp_xid)); | 857 | ntohl(transport->tcp_xid)); |
858 | dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u\n", | 858 | dprintk("RPC: xprt = %p, tcp_copied = %lu, " |
859 | xprt, transport->tcp_copied, transport->tcp_offset, | 859 | "tcp_offset = %u, tcp_reclen = %u\n", |
860 | transport->tcp_reclen); | 860 | xprt, transport->tcp_copied, |
861 | transport->tcp_offset, transport->tcp_reclen); | ||
861 | goto out; | 862 | goto out; |
862 | } | 863 | } |
863 | 864 | ||
864 | dprintk("RPC: XID %08x read %Zd bytes\n", | 865 | dprintk("RPC: XID %08x read %Zd bytes\n", |
865 | ntohl(transport->tcp_xid), r); | 866 | ntohl(transport->tcp_xid), r); |
866 | dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u\n", | 867 | dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, " |
867 | xprt, transport->tcp_copied, transport->tcp_offset, | 868 | "tcp_reclen = %u\n", xprt, transport->tcp_copied, |
868 | transport->tcp_reclen); | 869 | transport->tcp_offset, transport->tcp_reclen); |
869 | 870 | ||
870 | if (transport->tcp_copied == req->rq_private_buf.buflen) | 871 | if (transport->tcp_copied == req->rq_private_buf.buflen) |
871 | transport->tcp_flags &= ~TCP_RCV_COPY_DATA; | 872 | transport->tcp_flags &= ~TCP_RCV_COPY_DATA; |
@@ -891,7 +892,7 @@ static inline void xs_tcp_read_discard(struct sock_xprt *transport, struct xdr_s | |||
891 | desc->count -= len; | 892 | desc->count -= len; |
892 | desc->offset += len; | 893 | desc->offset += len; |
893 | transport->tcp_offset += len; | 894 | transport->tcp_offset += len; |
894 | dprintk("RPC: discarded %Zu bytes\n", len); | 895 | dprintk("RPC: discarded %Zu bytes\n", len); |
895 | xs_tcp_check_fraghdr(transport); | 896 | xs_tcp_check_fraghdr(transport); |
896 | } | 897 | } |
897 | 898 | ||
@@ -905,7 +906,7 @@ static int xs_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, uns | |||
905 | .count = len, | 906 | .count = len, |
906 | }; | 907 | }; |
907 | 908 | ||
908 | dprintk("RPC: xs_tcp_data_recv started\n"); | 909 | dprintk("RPC: xs_tcp_data_recv started\n"); |
909 | do { | 910 | do { |
910 | /* Read in a new fragment marker if necessary */ | 911 | /* Read in a new fragment marker if necessary */ |
911 | /* Can we ever really expect to get completely empty fragments? */ | 912 | /* Can we ever really expect to get completely empty fragments? */ |
@@ -926,7 +927,7 @@ static int xs_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, uns | |||
926 | /* Skip over any trailing bytes on short reads */ | 927 | /* Skip over any trailing bytes on short reads */ |
927 | xs_tcp_read_discard(transport, &desc); | 928 | xs_tcp_read_discard(transport, &desc); |
928 | } while (desc.count); | 929 | } while (desc.count); |
929 | dprintk("RPC: xs_tcp_data_recv done\n"); | 930 | dprintk("RPC: xs_tcp_data_recv done\n"); |
930 | return len - desc.count; | 931 | return len - desc.count; |
931 | } | 932 | } |
932 | 933 | ||
@@ -941,8 +942,9 @@ static void xs_tcp_data_ready(struct sock *sk, int bytes) | |||
941 | struct rpc_xprt *xprt; | 942 | struct rpc_xprt *xprt; |
942 | read_descriptor_t rd_desc; | 943 | read_descriptor_t rd_desc; |
943 | 944 | ||
945 | dprintk("RPC: xs_tcp_data_ready...\n"); | ||
946 | |||
944 | read_lock(&sk->sk_callback_lock); | 947 | read_lock(&sk->sk_callback_lock); |
945 | dprintk("RPC: xs_tcp_data_ready...\n"); | ||
946 | if (!(xprt = xprt_from_sock(sk))) | 948 | if (!(xprt = xprt_from_sock(sk))) |
947 | goto out; | 949 | goto out; |
948 | if (xprt->shutdown) | 950 | if (xprt->shutdown) |
@@ -968,11 +970,11 @@ static void xs_tcp_state_change(struct sock *sk) | |||
968 | read_lock(&sk->sk_callback_lock); | 970 | read_lock(&sk->sk_callback_lock); |
969 | if (!(xprt = xprt_from_sock(sk))) | 971 | if (!(xprt = xprt_from_sock(sk))) |
970 | goto out; | 972 | goto out; |
971 | dprintk("RPC: xs_tcp_state_change client %p...\n", xprt); | 973 | dprintk("RPC: xs_tcp_state_change client %p...\n", xprt); |
972 | dprintk("RPC: state %x conn %d dead %d zapped %d\n", | 974 | dprintk("RPC: state %x conn %d dead %d zapped %d\n", |
973 | sk->sk_state, xprt_connected(xprt), | 975 | sk->sk_state, xprt_connected(xprt), |
974 | sock_flag(sk, SOCK_DEAD), | 976 | sock_flag(sk, SOCK_DEAD), |
975 | sock_flag(sk, SOCK_ZAPPED)); | 977 | sock_flag(sk, SOCK_ZAPPED)); |
976 | 978 | ||
977 | switch (sk->sk_state) { | 979 | switch (sk->sk_state) { |
978 | case TCP_ESTABLISHED: | 980 | case TCP_ESTABLISHED: |
@@ -1140,7 +1142,7 @@ static void xs_set_port(struct rpc_xprt *xprt, unsigned short port) | |||
1140 | { | 1142 | { |
1141 | struct sockaddr_in *sap = (struct sockaddr_in *) &xprt->addr; | 1143 | struct sockaddr_in *sap = (struct sockaddr_in *) &xprt->addr; |
1142 | 1144 | ||
1143 | dprintk("RPC: setting port for xprt %p to %u\n", xprt, port); | 1145 | dprintk("RPC: setting port for xprt %p to %u\n", xprt, port); |
1144 | 1146 | ||
1145 | sap->sin_port = htons(port); | 1147 | sap->sin_port = htons(port); |
1146 | } | 1148 | } |
@@ -1159,7 +1161,7 @@ static int xs_bindresvport(struct sock_xprt *transport, struct socket *sock) | |||
1159 | sizeof(myaddr)); | 1161 | sizeof(myaddr)); |
1160 | if (err == 0) { | 1162 | if (err == 0) { |
1161 | transport->port = port; | 1163 | transport->port = port; |
1162 | dprintk("RPC: xs_bindresvport bound to port %u\n", | 1164 | dprintk("RPC: xs_bindresvport bound to port %u\n", |
1163 | port); | 1165 | port); |
1164 | return 0; | 1166 | return 0; |
1165 | } | 1167 | } |
@@ -1169,7 +1171,7 @@ static int xs_bindresvport(struct sock_xprt *transport, struct socket *sock) | |||
1169 | port--; | 1171 | port--; |
1170 | } while (err == -EADDRINUSE && port != transport->port); | 1172 | } while (err == -EADDRINUSE && port != transport->port); |
1171 | 1173 | ||
1172 | dprintk("RPC: can't bind to reserved port (%d).\n", -err); | 1174 | dprintk("RPC: can't bind to reserved port (%d).\n", -err); |
1173 | return err; | 1175 | return err; |
1174 | } | 1176 | } |
1175 | 1177 | ||
@@ -1223,7 +1225,7 @@ static void xs_udp_connect_worker(struct work_struct *work) | |||
1223 | xs_close(xprt); | 1225 | xs_close(xprt); |
1224 | 1226 | ||
1225 | if ((err = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock)) < 0) { | 1227 | if ((err = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock)) < 0) { |
1226 | dprintk("RPC: can't create UDP transport socket (%d).\n", -err); | 1228 | dprintk("RPC: can't create UDP transport socket (%d).\n", -err); |
1227 | goto out; | 1229 | goto out; |
1228 | } | 1230 | } |
1229 | xs_reclassify_socket(sock); | 1231 | xs_reclassify_socket(sock); |
@@ -1233,7 +1235,7 @@ static void xs_udp_connect_worker(struct work_struct *work) | |||
1233 | goto out; | 1235 | goto out; |
1234 | } | 1236 | } |
1235 | 1237 | ||
1236 | dprintk("RPC: worker connecting xprt %p to address: %s\n", | 1238 | dprintk("RPC: worker connecting xprt %p to address: %s\n", |
1237 | xprt, xprt->address_strings[RPC_DISPLAY_ALL]); | 1239 | xprt, xprt->address_strings[RPC_DISPLAY_ALL]); |
1238 | 1240 | ||
1239 | if (!transport->inet) { | 1241 | if (!transport->inet) { |
@@ -1275,7 +1277,7 @@ static void xs_tcp_reuse_connection(struct rpc_xprt *xprt) | |||
1275 | struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); | 1277 | struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); |
1276 | struct sockaddr any; | 1278 | struct sockaddr any; |
1277 | 1279 | ||
1278 | dprintk("RPC: disconnecting xprt %p to reuse port\n", xprt); | 1280 | dprintk("RPC: disconnecting xprt %p to reuse port\n", xprt); |
1279 | 1281 | ||
1280 | /* | 1282 | /* |
1281 | * Disconnect the transport socket by doing a connect operation | 1283 | * Disconnect the transport socket by doing a connect operation |
@@ -1285,7 +1287,7 @@ static void xs_tcp_reuse_connection(struct rpc_xprt *xprt) | |||
1285 | any.sa_family = AF_UNSPEC; | 1287 | any.sa_family = AF_UNSPEC; |
1286 | result = kernel_connect(transport->sock, &any, sizeof(any), 0); | 1288 | result = kernel_connect(transport->sock, &any, sizeof(any), 0); |
1287 | if (result) | 1289 | if (result) |
1288 | dprintk("RPC: AF_UNSPEC connect return code %d\n", | 1290 | dprintk("RPC: AF_UNSPEC connect return code %d\n", |
1289 | result); | 1291 | result); |
1290 | } | 1292 | } |
1291 | 1293 | ||
@@ -1309,7 +1311,8 @@ static void xs_tcp_connect_worker(struct work_struct *work) | |||
1309 | if (!sock) { | 1311 | if (!sock) { |
1310 | /* start from scratch */ | 1312 | /* start from scratch */ |
1311 | if ((err = sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock)) < 0) { | 1313 | if ((err = sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock)) < 0) { |
1312 | dprintk("RPC: can't create TCP transport socket (%d).\n", -err); | 1314 | dprintk("RPC: can't create TCP transport " |
1315 | "socket (%d).\n", -err); | ||
1313 | goto out; | 1316 | goto out; |
1314 | } | 1317 | } |
1315 | xs_reclassify_socket(sock); | 1318 | xs_reclassify_socket(sock); |
@@ -1322,7 +1325,7 @@ static void xs_tcp_connect_worker(struct work_struct *work) | |||
1322 | /* "close" the socket, preserving the local port */ | 1325 | /* "close" the socket, preserving the local port */ |
1323 | xs_tcp_reuse_connection(xprt); | 1326 | xs_tcp_reuse_connection(xprt); |
1324 | 1327 | ||
1325 | dprintk("RPC: worker connecting xprt %p to address: %s\n", | 1328 | dprintk("RPC: worker connecting xprt %p to address: %s\n", |
1326 | xprt, xprt->address_strings[RPC_DISPLAY_ALL]); | 1329 | xprt, xprt->address_strings[RPC_DISPLAY_ALL]); |
1327 | 1330 | ||
1328 | if (!transport->inet) { | 1331 | if (!transport->inet) { |
@@ -1359,8 +1362,9 @@ static void xs_tcp_connect_worker(struct work_struct *work) | |||
1359 | xprt->stat.connect_start = jiffies; | 1362 | xprt->stat.connect_start = jiffies; |
1360 | status = kernel_connect(sock, (struct sockaddr *) &xprt->addr, | 1363 | status = kernel_connect(sock, (struct sockaddr *) &xprt->addr, |
1361 | xprt->addrlen, O_NONBLOCK); | 1364 | xprt->addrlen, O_NONBLOCK); |
1362 | dprintk("RPC: %p connect status %d connected %d sock state %d\n", | 1365 | dprintk("RPC: %p connect status %d connected %d sock state %d\n", |
1363 | xprt, -status, xprt_connected(xprt), sock->sk->sk_state); | 1366 | xprt, -status, xprt_connected(xprt), |
1367 | sock->sk->sk_state); | ||
1364 | if (status < 0) { | 1368 | if (status < 0) { |
1365 | switch (status) { | 1369 | switch (status) { |
1366 | case -EINPROGRESS: | 1370 | case -EINPROGRESS: |
@@ -1404,7 +1408,8 @@ static void xs_connect(struct rpc_task *task) | |||
1404 | return; | 1408 | return; |
1405 | 1409 | ||
1406 | if (transport->sock != NULL) { | 1410 | if (transport->sock != NULL) { |
1407 | dprintk("RPC: xs_connect delayed xprt %p for %lu seconds\n", | 1411 | dprintk("RPC: xs_connect delayed xprt %p for %lu " |
1412 | "seconds\n", | ||
1408 | xprt, xprt->reestablish_timeout / HZ); | 1413 | xprt, xprt->reestablish_timeout / HZ); |
1409 | schedule_delayed_work(&transport->connect_worker, | 1414 | schedule_delayed_work(&transport->connect_worker, |
1410 | xprt->reestablish_timeout); | 1415 | xprt->reestablish_timeout); |
@@ -1412,7 +1417,7 @@ static void xs_connect(struct rpc_task *task) | |||
1412 | if (xprt->reestablish_timeout > XS_TCP_MAX_REEST_TO) | 1417 | if (xprt->reestablish_timeout > XS_TCP_MAX_REEST_TO) |
1413 | xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO; | 1418 | xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO; |
1414 | } else { | 1419 | } else { |
1415 | dprintk("RPC: xs_connect scheduled xprt %p\n", xprt); | 1420 | dprintk("RPC: xs_connect scheduled xprt %p\n", xprt); |
1416 | schedule_delayed_work(&transport->connect_worker, 0); | 1421 | schedule_delayed_work(&transport->connect_worker, 0); |
1417 | 1422 | ||
1418 | /* flush_scheduled_work can sleep... */ | 1423 | /* flush_scheduled_work can sleep... */ |
@@ -1507,13 +1512,14 @@ static struct rpc_xprt *xs_setup_xprt(struct sockaddr *addr, size_t addrlen, uns | |||
1507 | struct sock_xprt *new; | 1512 | struct sock_xprt *new; |
1508 | 1513 | ||
1509 | if (addrlen > sizeof(xprt->addr)) { | 1514 | if (addrlen > sizeof(xprt->addr)) { |
1510 | dprintk("RPC: xs_setup_xprt: address too large\n"); | 1515 | dprintk("RPC: xs_setup_xprt: address too large\n"); |
1511 | return ERR_PTR(-EBADF); | 1516 | return ERR_PTR(-EBADF); |
1512 | } | 1517 | } |
1513 | 1518 | ||
1514 | new = kzalloc(sizeof(*new), GFP_KERNEL); | 1519 | new = kzalloc(sizeof(*new), GFP_KERNEL); |
1515 | if (new == NULL) { | 1520 | if (new == NULL) { |
1516 | dprintk("RPC: xs_setup_xprt: couldn't allocate rpc_xprt\n"); | 1521 | dprintk("RPC: xs_setup_xprt: couldn't allocate " |
1522 | "rpc_xprt\n"); | ||
1517 | return ERR_PTR(-ENOMEM); | 1523 | return ERR_PTR(-ENOMEM); |
1518 | } | 1524 | } |
1519 | xprt = &new->xprt; | 1525 | xprt = &new->xprt; |
@@ -1522,7 +1528,8 @@ static struct rpc_xprt *xs_setup_xprt(struct sockaddr *addr, size_t addrlen, uns | |||
1522 | xprt->slot = kcalloc(xprt->max_reqs, sizeof(struct rpc_rqst), GFP_KERNEL); | 1528 | xprt->slot = kcalloc(xprt->max_reqs, sizeof(struct rpc_rqst), GFP_KERNEL); |
1523 | if (xprt->slot == NULL) { | 1529 | if (xprt->slot == NULL) { |
1524 | kfree(xprt); | 1530 | kfree(xprt); |
1525 | dprintk("RPC: xs_setup_xprt: couldn't allocate slot table\n"); | 1531 | dprintk("RPC: xs_setup_xprt: couldn't allocate slot " |
1532 | "table\n"); | ||
1526 | return ERR_PTR(-ENOMEM); | 1533 | return ERR_PTR(-ENOMEM); |
1527 | } | 1534 | } |
1528 | 1535 | ||
@@ -1572,7 +1579,7 @@ struct rpc_xprt *xs_setup_udp(struct sockaddr *addr, size_t addrlen, struct rpc_ | |||
1572 | xprt_set_timeout(&xprt->timeout, 5, 5 * HZ); | 1579 | xprt_set_timeout(&xprt->timeout, 5, 5 * HZ); |
1573 | 1580 | ||
1574 | xs_format_peer_addresses(xprt); | 1581 | xs_format_peer_addresses(xprt); |
1575 | dprintk("RPC: set up transport to address %s\n", | 1582 | dprintk("RPC: set up transport to address %s\n", |
1576 | xprt->address_strings[RPC_DISPLAY_ALL]); | 1583 | xprt->address_strings[RPC_DISPLAY_ALL]); |
1577 | 1584 | ||
1578 | return xprt; | 1585 | return xprt; |
@@ -1616,7 +1623,7 @@ struct rpc_xprt *xs_setup_tcp(struct sockaddr *addr, size_t addrlen, struct rpc_ | |||
1616 | xprt_set_timeout(&xprt->timeout, 2, 60 * HZ); | 1623 | xprt_set_timeout(&xprt->timeout, 2, 60 * HZ); |
1617 | 1624 | ||
1618 | xs_format_peer_addresses(xprt); | 1625 | xs_format_peer_addresses(xprt); |
1619 | dprintk("RPC: set up transport to address %s\n", | 1626 | dprintk("RPC: set up transport to address %s\n", |
1620 | xprt->address_strings[RPC_DISPLAY_ALL]); | 1627 | xprt->address_strings[RPC_DISPLAY_ALL]); |
1621 | 1628 | ||
1622 | return xprt; | 1629 | return xprt; |