diff options
author | Linus Torvalds <torvalds@g5.osdl.org> | 2006-09-23 19:58:40 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-09-23 19:58:40 -0400 |
commit | 9f261e011340bcd22c1dd48b465153bd78caa8c8 (patch) | |
tree | b1c266ea746a0e8591e6af781aef22854e652ff9 /net | |
parent | a4c12d6c5dde48c69464baf7c703e425ee511433 (diff) | |
parent | 026ed5c9185dcc4b2df92e98c3d61a01cea19cbf (diff) |
Merge git://git.linux-nfs.org/pub/linux/nfs-2.6
* git://git.linux-nfs.org/pub/linux/nfs-2.6: (74 commits)
NFS: unmark NFS direct I/O as experimental
NFS: add comments clarifying the use of nfs_post_op_update()
NFSv4: rpc_mkpipe creating socket inodes w/out sk buffers
NFS: Use SEEK_END instead of hardcoded value
NFSv4: When mounting with a port=0 argument, substitute port=2049
NFSv4: Poll more aggressively when handling NFS4ERR_DELAY
NFSv4: Handle the condition NFS4ERR_FILE_OPEN
NFSv4: Retry lease recovery if it failed during a synchronous operation.
NFS: Don't invalidate the symlink we just stuffed into the cache
NFS: Make read() return an ESTALE if the file has been deleted
NFSv4: It's perfectly legal for clp to be NULL here....
NFS: nfs_lookup - don't hash dentry when optimising away the lookup
SUNRPC: Fix Oops in pmap_getport_done
SUNRPC: Add refcounting to the struct rpc_xprt
SUNRPC: Clean up soft task error handling
SUNRPC: Handle ENETUNREACH, EHOSTUNREACH and EHOSTDOWN socket errors
SUNRPC: rpc_delay() should not clobber the rpc_task->tk_status
Fix a referral error Oops
NFS: NFS_ROOT should use the new rpc_create API
NFS: Fix up compiler warnings on 64-bit platforms in client.c
...
Manually resolved conflict in net/sunrpc/xprtsock.c
Diffstat (limited to 'net')
-rw-r--r-- | net/sunrpc/auth_gss/auth_gss.c | 7 | ||||
-rw-r--r-- | net/sunrpc/clnt.c | 194 | ||||
-rw-r--r-- | net/sunrpc/pmap_clnt.c | 266 | ||||
-rw-r--r-- | net/sunrpc/rpc_pipe.c | 46 | ||||
-rw-r--r-- | net/sunrpc/sched.c | 99 | ||||
-rw-r--r-- | net/sunrpc/sunrpc_syms.c | 3 | ||||
-rw-r--r-- | net/sunrpc/timer.c | 2 | ||||
-rw-r--r-- | net/sunrpc/xprt.c | 86 | ||||
-rw-r--r-- | net/sunrpc/xprtsock.c | 108 |
9 files changed, 460 insertions, 351 deletions
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index ef1cf5b476c8..6eed3e166ba3 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c | |||
@@ -88,7 +88,6 @@ struct gss_auth { | |||
88 | struct list_head upcalls; | 88 | struct list_head upcalls; |
89 | struct rpc_clnt *client; | 89 | struct rpc_clnt *client; |
90 | struct dentry *dentry; | 90 | struct dentry *dentry; |
91 | char path[48]; | ||
92 | spinlock_t lock; | 91 | spinlock_t lock; |
93 | }; | 92 | }; |
94 | 93 | ||
@@ -690,10 +689,8 @@ gss_create(struct rpc_clnt *clnt, rpc_authflavor_t flavor) | |||
690 | if (err) | 689 | if (err) |
691 | goto err_put_mech; | 690 | goto err_put_mech; |
692 | 691 | ||
693 | snprintf(gss_auth->path, sizeof(gss_auth->path), "%s/%s", | 692 | gss_auth->dentry = rpc_mkpipe(clnt->cl_dentry, gss_auth->mech->gm_name, |
694 | clnt->cl_pathname, | 693 | clnt, &gss_upcall_ops, RPC_PIPE_WAIT_FOR_OPEN); |
695 | gss_auth->mech->gm_name); | ||
696 | gss_auth->dentry = rpc_mkpipe(gss_auth->path, clnt, &gss_upcall_ops, RPC_PIPE_WAIT_FOR_OPEN); | ||
697 | if (IS_ERR(gss_auth->dentry)) { | 694 | if (IS_ERR(gss_auth->dentry)) { |
698 | err = PTR_ERR(gss_auth->dentry); | 695 | err = PTR_ERR(gss_auth->dentry); |
699 | goto err_put_mech; | 696 | goto err_put_mech; |
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 3e19d321067a..084a0ad5c64e 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c | |||
@@ -97,17 +97,7 @@ rpc_setup_pipedir(struct rpc_clnt *clnt, char *dir_name) | |||
97 | } | 97 | } |
98 | } | 98 | } |
99 | 99 | ||
100 | /* | 100 | static struct rpc_clnt * rpc_new_client(struct rpc_xprt *xprt, char *servname, struct rpc_program *program, u32 vers, rpc_authflavor_t flavor) |
101 | * Create an RPC client | ||
102 | * FIXME: This should also take a flags argument (as in task->tk_flags). | ||
103 | * It's called (among others) from pmap_create_client, which may in | ||
104 | * turn be called by an async task. In this case, rpciod should not be | ||
105 | * made to sleep too long. | ||
106 | */ | ||
107 | struct rpc_clnt * | ||
108 | rpc_new_client(struct rpc_xprt *xprt, char *servname, | ||
109 | struct rpc_program *program, u32 vers, | ||
110 | rpc_authflavor_t flavor) | ||
111 | { | 101 | { |
112 | struct rpc_version *version; | 102 | struct rpc_version *version; |
113 | struct rpc_clnt *clnt = NULL; | 103 | struct rpc_clnt *clnt = NULL; |
@@ -147,16 +137,12 @@ rpc_new_client(struct rpc_xprt *xprt, char *servname, | |||
147 | clnt->cl_procinfo = version->procs; | 137 | clnt->cl_procinfo = version->procs; |
148 | clnt->cl_maxproc = version->nrprocs; | 138 | clnt->cl_maxproc = version->nrprocs; |
149 | clnt->cl_protname = program->name; | 139 | clnt->cl_protname = program->name; |
150 | clnt->cl_pmap = &clnt->cl_pmap_default; | ||
151 | clnt->cl_port = xprt->addr.sin_port; | ||
152 | clnt->cl_prog = program->number; | 140 | clnt->cl_prog = program->number; |
153 | clnt->cl_vers = version->number; | 141 | clnt->cl_vers = version->number; |
154 | clnt->cl_prot = xprt->prot; | ||
155 | clnt->cl_stats = program->stats; | 142 | clnt->cl_stats = program->stats; |
156 | clnt->cl_metrics = rpc_alloc_iostats(clnt); | 143 | clnt->cl_metrics = rpc_alloc_iostats(clnt); |
157 | rpc_init_wait_queue(&clnt->cl_pmap_default.pm_bindwait, "bindwait"); | ||
158 | 144 | ||
159 | if (!clnt->cl_port) | 145 | if (!xprt_bound(clnt->cl_xprt)) |
160 | clnt->cl_autobind = 1; | 146 | clnt->cl_autobind = 1; |
161 | 147 | ||
162 | clnt->cl_rtt = &clnt->cl_rtt_default; | 148 | clnt->cl_rtt = &clnt->cl_rtt_default; |
@@ -191,40 +177,71 @@ out_no_path: | |||
191 | kfree(clnt->cl_server); | 177 | kfree(clnt->cl_server); |
192 | kfree(clnt); | 178 | kfree(clnt); |
193 | out_err: | 179 | out_err: |
194 | xprt_destroy(xprt); | 180 | xprt_put(xprt); |
195 | out_no_xprt: | 181 | out_no_xprt: |
196 | return ERR_PTR(err); | 182 | return ERR_PTR(err); |
197 | } | 183 | } |
198 | 184 | ||
199 | /** | 185 | /* |
200 | * Create an RPC client | 186 | * rpc_create - create an RPC client and transport with one call |
201 | * @xprt - pointer to xprt struct | 187 | * @args: rpc_clnt create argument structure |
202 | * @servname - name of server | ||
203 | * @info - rpc_program | ||
204 | * @version - rpc_program version | ||
205 | * @authflavor - rpc_auth flavour to use | ||
206 | * | 188 | * |
207 | * Creates an RPC client structure, then pings the server in order to | 189 | * Creates and initializes an RPC transport and an RPC client. |
208 | * determine if it is up, and if it supports this program and version. | ||
209 | * | 190 | * |
210 | * This function should never be called by asynchronous tasks such as | 191 | * It can ping the server in order to determine if it is up, and to see if |
211 | * the portmapper. | 192 | * it supports this program and version. RPC_CLNT_CREATE_NOPING disables |
193 | * this behavior so asynchronous tasks can also use rpc_create. | ||
212 | */ | 194 | */ |
213 | struct rpc_clnt *rpc_create_client(struct rpc_xprt *xprt, char *servname, | 195 | struct rpc_clnt *rpc_create(struct rpc_create_args *args) |
214 | struct rpc_program *info, u32 version, rpc_authflavor_t authflavor) | ||
215 | { | 196 | { |
197 | struct rpc_xprt *xprt; | ||
216 | struct rpc_clnt *clnt; | 198 | struct rpc_clnt *clnt; |
217 | int err; | 199 | |
218 | 200 | xprt = xprt_create_transport(args->protocol, args->address, | |
219 | clnt = rpc_new_client(xprt, servname, info, version, authflavor); | 201 | args->addrsize, args->timeout); |
202 | if (IS_ERR(xprt)) | ||
203 | return (struct rpc_clnt *)xprt; | ||
204 | |||
205 | /* | ||
206 | * By default, kernel RPC client connects from a reserved port. | ||
207 | * CAP_NET_BIND_SERVICE will not be set for unprivileged requesters, | ||
208 | * but it is always enabled for rpciod, which handles the connect | ||
209 | * operation. | ||
210 | */ | ||
211 | xprt->resvport = 1; | ||
212 | if (args->flags & RPC_CLNT_CREATE_NONPRIVPORT) | ||
213 | xprt->resvport = 0; | ||
214 | |||
215 | dprintk("RPC: creating %s client for %s (xprt %p)\n", | ||
216 | args->program->name, args->servername, xprt); | ||
217 | |||
218 | clnt = rpc_new_client(xprt, args->servername, args->program, | ||
219 | args->version, args->authflavor); | ||
220 | if (IS_ERR(clnt)) | 220 | if (IS_ERR(clnt)) |
221 | return clnt; | 221 | return clnt; |
222 | err = rpc_ping(clnt, RPC_TASK_SOFT|RPC_TASK_NOINTR); | 222 | |
223 | if (err == 0) | 223 | if (!(args->flags & RPC_CLNT_CREATE_NOPING)) { |
224 | return clnt; | 224 | int err = rpc_ping(clnt, RPC_TASK_SOFT|RPC_TASK_NOINTR); |
225 | rpc_shutdown_client(clnt); | 225 | if (err != 0) { |
226 | return ERR_PTR(err); | 226 | rpc_shutdown_client(clnt); |
227 | return ERR_PTR(err); | ||
228 | } | ||
229 | } | ||
230 | |||
231 | clnt->cl_softrtry = 1; | ||
232 | if (args->flags & RPC_CLNT_CREATE_HARDRTRY) | ||
233 | clnt->cl_softrtry = 0; | ||
234 | |||
235 | if (args->flags & RPC_CLNT_CREATE_INTR) | ||
236 | clnt->cl_intr = 1; | ||
237 | if (args->flags & RPC_CLNT_CREATE_AUTOBIND) | ||
238 | clnt->cl_autobind = 1; | ||
239 | if (args->flags & RPC_CLNT_CREATE_ONESHOT) | ||
240 | clnt->cl_oneshot = 1; | ||
241 | |||
242 | return clnt; | ||
227 | } | 243 | } |
244 | EXPORT_SYMBOL_GPL(rpc_create); | ||
228 | 245 | ||
229 | /* | 246 | /* |
230 | * This function clones the RPC client structure. It allows us to share the | 247 | * This function clones the RPC client structure. It allows us to share the |
@@ -244,8 +261,7 @@ rpc_clone_client(struct rpc_clnt *clnt) | |||
244 | atomic_set(&new->cl_users, 0); | 261 | atomic_set(&new->cl_users, 0); |
245 | new->cl_parent = clnt; | 262 | new->cl_parent = clnt; |
246 | atomic_inc(&clnt->cl_count); | 263 | atomic_inc(&clnt->cl_count); |
247 | /* Duplicate portmapper */ | 264 | new->cl_xprt = xprt_get(clnt->cl_xprt); |
248 | rpc_init_wait_queue(&new->cl_pmap_default.pm_bindwait, "bindwait"); | ||
249 | /* Turn off autobind on clones */ | 265 | /* Turn off autobind on clones */ |
250 | new->cl_autobind = 0; | 266 | new->cl_autobind = 0; |
251 | new->cl_oneshot = 0; | 267 | new->cl_oneshot = 0; |
@@ -255,8 +271,7 @@ rpc_clone_client(struct rpc_clnt *clnt) | |||
255 | rpc_init_rtt(&new->cl_rtt_default, clnt->cl_xprt->timeout.to_initval); | 271 | rpc_init_rtt(&new->cl_rtt_default, clnt->cl_xprt->timeout.to_initval); |
256 | if (new->cl_auth) | 272 | if (new->cl_auth) |
257 | atomic_inc(&new->cl_auth->au_count); | 273 | atomic_inc(&new->cl_auth->au_count); |
258 | new->cl_pmap = &new->cl_pmap_default; | 274 | new->cl_metrics = rpc_alloc_iostats(clnt); |
259 | new->cl_metrics = rpc_alloc_iostats(clnt); | ||
260 | return new; | 275 | return new; |
261 | out_no_clnt: | 276 | out_no_clnt: |
262 | printk(KERN_INFO "RPC: out of memory in %s\n", __FUNCTION__); | 277 | printk(KERN_INFO "RPC: out of memory in %s\n", __FUNCTION__); |
@@ -323,15 +338,12 @@ rpc_destroy_client(struct rpc_clnt *clnt) | |||
323 | rpc_rmdir(clnt->cl_dentry); | 338 | rpc_rmdir(clnt->cl_dentry); |
324 | rpc_put_mount(); | 339 | rpc_put_mount(); |
325 | } | 340 | } |
326 | if (clnt->cl_xprt) { | ||
327 | xprt_destroy(clnt->cl_xprt); | ||
328 | clnt->cl_xprt = NULL; | ||
329 | } | ||
330 | if (clnt->cl_server != clnt->cl_inline_name) | 341 | if (clnt->cl_server != clnt->cl_inline_name) |
331 | kfree(clnt->cl_server); | 342 | kfree(clnt->cl_server); |
332 | out_free: | 343 | out_free: |
333 | rpc_free_iostats(clnt->cl_metrics); | 344 | rpc_free_iostats(clnt->cl_metrics); |
334 | clnt->cl_metrics = NULL; | 345 | clnt->cl_metrics = NULL; |
346 | xprt_put(clnt->cl_xprt); | ||
335 | kfree(clnt); | 347 | kfree(clnt); |
336 | return 0; | 348 | return 0; |
337 | } | 349 | } |
@@ -540,6 +552,40 @@ rpc_call_setup(struct rpc_task *task, struct rpc_message *msg, int flags) | |||
540 | task->tk_action = rpc_exit_task; | 552 | task->tk_action = rpc_exit_task; |
541 | } | 553 | } |
542 | 554 | ||
555 | /** | ||
556 | * rpc_peeraddr - extract remote peer address from clnt's xprt | ||
557 | * @clnt: RPC client structure | ||
558 | * @buf: target buffer | ||
559 | * @size: length of target buffer | ||
560 | * | ||
561 | * Returns the number of bytes that are actually in the stored address. | ||
562 | */ | ||
563 | size_t rpc_peeraddr(struct rpc_clnt *clnt, struct sockaddr *buf, size_t bufsize) | ||
564 | { | ||
565 | size_t bytes; | ||
566 | struct rpc_xprt *xprt = clnt->cl_xprt; | ||
567 | |||
568 | bytes = sizeof(xprt->addr); | ||
569 | if (bytes > bufsize) | ||
570 | bytes = bufsize; | ||
571 | memcpy(buf, &clnt->cl_xprt->addr, bytes); | ||
572 | return xprt->addrlen; | ||
573 | } | ||
574 | EXPORT_SYMBOL_GPL(rpc_peeraddr); | ||
575 | |||
576 | /** | ||
577 | * rpc_peeraddr2str - return remote peer address in printable format | ||
578 | * @clnt: RPC client structure | ||
579 | * @format: address format | ||
580 | * | ||
581 | */ | ||
582 | char *rpc_peeraddr2str(struct rpc_clnt *clnt, enum rpc_display_format_t format) | ||
583 | { | ||
584 | struct rpc_xprt *xprt = clnt->cl_xprt; | ||
585 | return xprt->ops->print_addr(xprt, format); | ||
586 | } | ||
587 | EXPORT_SYMBOL_GPL(rpc_peeraddr2str); | ||
588 | |||
543 | void | 589 | void |
544 | rpc_setbufsize(struct rpc_clnt *clnt, unsigned int sndsize, unsigned int rcvsize) | 590 | rpc_setbufsize(struct rpc_clnt *clnt, unsigned int sndsize, unsigned int rcvsize) |
545 | { | 591 | { |
@@ -560,7 +606,7 @@ size_t rpc_max_payload(struct rpc_clnt *clnt) | |||
560 | { | 606 | { |
561 | return clnt->cl_xprt->max_payload; | 607 | return clnt->cl_xprt->max_payload; |
562 | } | 608 | } |
563 | EXPORT_SYMBOL(rpc_max_payload); | 609 | EXPORT_SYMBOL_GPL(rpc_max_payload); |
564 | 610 | ||
565 | /** | 611 | /** |
566 | * rpc_force_rebind - force transport to check that remote port is unchanged | 612 | * rpc_force_rebind - force transport to check that remote port is unchanged |
@@ -570,9 +616,9 @@ EXPORT_SYMBOL(rpc_max_payload); | |||
570 | void rpc_force_rebind(struct rpc_clnt *clnt) | 616 | void rpc_force_rebind(struct rpc_clnt *clnt) |
571 | { | 617 | { |
572 | if (clnt->cl_autobind) | 618 | if (clnt->cl_autobind) |
573 | clnt->cl_port = 0; | 619 | xprt_clear_bound(clnt->cl_xprt); |
574 | } | 620 | } |
575 | EXPORT_SYMBOL(rpc_force_rebind); | 621 | EXPORT_SYMBOL_GPL(rpc_force_rebind); |
576 | 622 | ||
577 | /* | 623 | /* |
578 | * Restart an (async) RPC call. Usually called from within the | 624 | * Restart an (async) RPC call. Usually called from within the |
@@ -781,16 +827,16 @@ call_encode(struct rpc_task *task) | |||
781 | static void | 827 | static void |
782 | call_bind(struct rpc_task *task) | 828 | call_bind(struct rpc_task *task) |
783 | { | 829 | { |
784 | struct rpc_clnt *clnt = task->tk_client; | 830 | struct rpc_xprt *xprt = task->tk_xprt; |
785 | 831 | ||
786 | dprintk("RPC: %4d call_bind (status %d)\n", | 832 | dprintk("RPC: %4d call_bind (status %d)\n", |
787 | task->tk_pid, task->tk_status); | 833 | task->tk_pid, task->tk_status); |
788 | 834 | ||
789 | task->tk_action = call_connect; | 835 | task->tk_action = call_connect; |
790 | if (!clnt->cl_port) { | 836 | if (!xprt_bound(xprt)) { |
791 | task->tk_action = call_bind_status; | 837 | task->tk_action = call_bind_status; |
792 | task->tk_timeout = task->tk_xprt->bind_timeout; | 838 | task->tk_timeout = xprt->bind_timeout; |
793 | rpc_getport(task, clnt); | 839 | xprt->ops->rpcbind(task); |
794 | } | 840 | } |
795 | } | 841 | } |
796 | 842 | ||
@@ -815,15 +861,11 @@ call_bind_status(struct rpc_task *task) | |||
815 | dprintk("RPC: %4d remote rpcbind: RPC program/version unavailable\n", | 861 | dprintk("RPC: %4d remote rpcbind: RPC program/version unavailable\n", |
816 | task->tk_pid); | 862 | task->tk_pid); |
817 | rpc_delay(task, 3*HZ); | 863 | rpc_delay(task, 3*HZ); |
818 | goto retry_bind; | 864 | goto retry_timeout; |
819 | case -ETIMEDOUT: | 865 | case -ETIMEDOUT: |
820 | dprintk("RPC: %4d rpcbind request timed out\n", | 866 | dprintk("RPC: %4d rpcbind request timed out\n", |
821 | task->tk_pid); | 867 | task->tk_pid); |
822 | if (RPC_IS_SOFT(task)) { | 868 | goto retry_timeout; |
823 | status = -EIO; | ||
824 | break; | ||
825 | } | ||
826 | goto retry_bind; | ||
827 | case -EPFNOSUPPORT: | 869 | case -EPFNOSUPPORT: |
828 | dprintk("RPC: %4d remote rpcbind service unavailable\n", | 870 | dprintk("RPC: %4d remote rpcbind service unavailable\n", |
829 | task->tk_pid); | 871 | task->tk_pid); |
@@ -836,16 +878,13 @@ call_bind_status(struct rpc_task *task) | |||
836 | dprintk("RPC: %4d unrecognized rpcbind error (%d)\n", | 878 | dprintk("RPC: %4d unrecognized rpcbind error (%d)\n", |
837 | task->tk_pid, -task->tk_status); | 879 | task->tk_pid, -task->tk_status); |
838 | status = -EIO; | 880 | status = -EIO; |
839 | break; | ||
840 | } | 881 | } |
841 | 882 | ||
842 | rpc_exit(task, status); | 883 | rpc_exit(task, status); |
843 | return; | 884 | return; |
844 | 885 | ||
845 | retry_bind: | 886 | retry_timeout: |
846 | task->tk_status = 0; | 887 | task->tk_action = call_timeout; |
847 | task->tk_action = call_bind; | ||
848 | return; | ||
849 | } | 888 | } |
850 | 889 | ||
851 | /* | 890 | /* |
@@ -893,14 +932,16 @@ call_connect_status(struct rpc_task *task) | |||
893 | 932 | ||
894 | switch (status) { | 933 | switch (status) { |
895 | case -ENOTCONN: | 934 | case -ENOTCONN: |
896 | case -ETIMEDOUT: | ||
897 | case -EAGAIN: | 935 | case -EAGAIN: |
898 | task->tk_action = call_bind; | 936 | task->tk_action = call_bind; |
899 | break; | 937 | if (!RPC_IS_SOFT(task)) |
900 | default: | 938 | return; |
901 | rpc_exit(task, -EIO); | 939 | /* if soft mounted, test if we've timed out */ |
902 | break; | 940 | case -ETIMEDOUT: |
941 | task->tk_action = call_timeout; | ||
942 | return; | ||
903 | } | 943 | } |
944 | rpc_exit(task, -EIO); | ||
904 | } | 945 | } |
905 | 946 | ||
906 | /* | 947 | /* |
@@ -982,6 +1023,14 @@ call_status(struct rpc_task *task) | |||
982 | 1023 | ||
983 | task->tk_status = 0; | 1024 | task->tk_status = 0; |
984 | switch(status) { | 1025 | switch(status) { |
1026 | case -EHOSTDOWN: | ||
1027 | case -EHOSTUNREACH: | ||
1028 | case -ENETUNREACH: | ||
1029 | /* | ||
1030 | * Delay any retries for 3 seconds, then handle as if it | ||
1031 | * were a timeout. | ||
1032 | */ | ||
1033 | rpc_delay(task, 3*HZ); | ||
985 | case -ETIMEDOUT: | 1034 | case -ETIMEDOUT: |
986 | task->tk_action = call_timeout; | 1035 | task->tk_action = call_timeout; |
987 | break; | 1036 | break; |
@@ -1001,7 +1050,6 @@ call_status(struct rpc_task *task) | |||
1001 | printk("%s: RPC call returned error %d\n", | 1050 | printk("%s: RPC call returned error %d\n", |
1002 | clnt->cl_protname, -status); | 1051 | clnt->cl_protname, -status); |
1003 | rpc_exit(task, status); | 1052 | rpc_exit(task, status); |
1004 | break; | ||
1005 | } | 1053 | } |
1006 | } | 1054 | } |
1007 | 1055 | ||
@@ -1069,10 +1117,10 @@ call_decode(struct rpc_task *task) | |||
1069 | clnt->cl_stats->rpcretrans++; | 1117 | clnt->cl_stats->rpcretrans++; |
1070 | goto out_retry; | 1118 | goto out_retry; |
1071 | } | 1119 | } |
1072 | printk(KERN_WARNING "%s: too small RPC reply size (%d bytes)\n", | 1120 | dprintk("%s: too small RPC reply size (%d bytes)\n", |
1073 | clnt->cl_protname, task->tk_status); | 1121 | clnt->cl_protname, task->tk_status); |
1074 | rpc_exit(task, -EIO); | 1122 | task->tk_action = call_timeout; |
1075 | return; | 1123 | goto out_retry; |
1076 | } | 1124 | } |
1077 | 1125 | ||
1078 | /* | 1126 | /* |
diff --git a/net/sunrpc/pmap_clnt.c b/net/sunrpc/pmap_clnt.c index 623180f224c9..c04609d3476a 100644 --- a/net/sunrpc/pmap_clnt.c +++ b/net/sunrpc/pmap_clnt.c | |||
@@ -1,7 +1,9 @@ | |||
1 | /* | 1 | /* |
2 | * linux/net/sunrpc/pmap.c | 2 | * linux/net/sunrpc/pmap_clnt.c |
3 | * | 3 | * |
4 | * Portmapper client. | 4 | * In-kernel RPC portmapper client. |
5 | * | ||
6 | * Portmapper supports version 2 of the rpcbind protocol (RFC 1833). | ||
5 | * | 7 | * |
6 | * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> | 8 | * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> |
7 | */ | 9 | */ |
@@ -13,7 +15,6 @@ | |||
13 | #include <linux/uio.h> | 15 | #include <linux/uio.h> |
14 | #include <linux/in.h> | 16 | #include <linux/in.h> |
15 | #include <linux/sunrpc/clnt.h> | 17 | #include <linux/sunrpc/clnt.h> |
16 | #include <linux/sunrpc/xprt.h> | ||
17 | #include <linux/sunrpc/sched.h> | 18 | #include <linux/sunrpc/sched.h> |
18 | 19 | ||
19 | #ifdef RPC_DEBUG | 20 | #ifdef RPC_DEBUG |
@@ -24,80 +25,141 @@ | |||
24 | #define PMAP_UNSET 2 | 25 | #define PMAP_UNSET 2 |
25 | #define PMAP_GETPORT 3 | 26 | #define PMAP_GETPORT 3 |
26 | 27 | ||
28 | struct portmap_args { | ||
29 | u32 pm_prog; | ||
30 | u32 pm_vers; | ||
31 | u32 pm_prot; | ||
32 | unsigned short pm_port; | ||
33 | struct rpc_xprt * pm_xprt; | ||
34 | }; | ||
35 | |||
27 | static struct rpc_procinfo pmap_procedures[]; | 36 | static struct rpc_procinfo pmap_procedures[]; |
28 | static struct rpc_clnt * pmap_create(char *, struct sockaddr_in *, int, int); | 37 | static struct rpc_clnt * pmap_create(char *, struct sockaddr_in *, int, int); |
29 | static void pmap_getport_done(struct rpc_task *); | 38 | static void pmap_getport_done(struct rpc_task *, void *); |
30 | static struct rpc_program pmap_program; | 39 | static struct rpc_program pmap_program; |
31 | static DEFINE_SPINLOCK(pmap_lock); | ||
32 | 40 | ||
33 | /* | 41 | static void pmap_getport_prepare(struct rpc_task *task, void *calldata) |
34 | * Obtain the port for a given RPC service on a given host. This one can | ||
35 | * be called for an ongoing RPC request. | ||
36 | */ | ||
37 | void | ||
38 | rpc_getport(struct rpc_task *task, struct rpc_clnt *clnt) | ||
39 | { | 42 | { |
40 | struct rpc_portmap *map = clnt->cl_pmap; | 43 | struct portmap_args *map = calldata; |
41 | struct sockaddr_in *sap = &clnt->cl_xprt->addr; | ||
42 | struct rpc_message msg = { | 44 | struct rpc_message msg = { |
43 | .rpc_proc = &pmap_procedures[PMAP_GETPORT], | 45 | .rpc_proc = &pmap_procedures[PMAP_GETPORT], |
44 | .rpc_argp = map, | 46 | .rpc_argp = map, |
45 | .rpc_resp = &clnt->cl_port, | 47 | .rpc_resp = &map->pm_port, |
46 | .rpc_cred = NULL | ||
47 | }; | 48 | }; |
49 | |||
50 | rpc_call_setup(task, &msg, 0); | ||
51 | } | ||
52 | |||
53 | static inline struct portmap_args *pmap_map_alloc(void) | ||
54 | { | ||
55 | return kmalloc(sizeof(struct portmap_args), GFP_NOFS); | ||
56 | } | ||
57 | |||
58 | static inline void pmap_map_free(struct portmap_args *map) | ||
59 | { | ||
60 | kfree(map); | ||
61 | } | ||
62 | |||
63 | static void pmap_map_release(void *data) | ||
64 | { | ||
65 | pmap_map_free(data); | ||
66 | } | ||
67 | |||
68 | static const struct rpc_call_ops pmap_getport_ops = { | ||
69 | .rpc_call_prepare = pmap_getport_prepare, | ||
70 | .rpc_call_done = pmap_getport_done, | ||
71 | .rpc_release = pmap_map_release, | ||
72 | }; | ||
73 | |||
74 | static inline void pmap_wake_portmap_waiters(struct rpc_xprt *xprt, int status) | ||
75 | { | ||
76 | xprt_clear_binding(xprt); | ||
77 | rpc_wake_up_status(&xprt->binding, status); | ||
78 | } | ||
79 | |||
80 | /** | ||
81 | * rpc_getport - obtain the port for a given RPC service on a given host | ||
82 | * @task: task that is waiting for portmapper request | ||
83 | * | ||
84 | * This one can be called for an ongoing RPC request, and can be used in | ||
85 | * an async (rpciod) context. | ||
86 | */ | ||
87 | void rpc_getport(struct rpc_task *task) | ||
88 | { | ||
89 | struct rpc_clnt *clnt = task->tk_client; | ||
90 | struct rpc_xprt *xprt = task->tk_xprt; | ||
91 | struct sockaddr_in addr; | ||
92 | struct portmap_args *map; | ||
48 | struct rpc_clnt *pmap_clnt; | 93 | struct rpc_clnt *pmap_clnt; |
49 | struct rpc_task *child; | 94 | struct rpc_task *child; |
95 | int status; | ||
50 | 96 | ||
51 | dprintk("RPC: %4d rpc_getport(%s, %d, %d, %d)\n", | 97 | dprintk("RPC: %4d rpc_getport(%s, %u, %u, %d)\n", |
52 | task->tk_pid, clnt->cl_server, | 98 | task->tk_pid, clnt->cl_server, |
53 | map->pm_prog, map->pm_vers, map->pm_prot); | 99 | clnt->cl_prog, clnt->cl_vers, xprt->prot); |
54 | 100 | ||
55 | /* Autobind on cloned rpc clients is discouraged */ | 101 | /* Autobind on cloned rpc clients is discouraged */ |
56 | BUG_ON(clnt->cl_parent != clnt); | 102 | BUG_ON(clnt->cl_parent != clnt); |
57 | 103 | ||
58 | spin_lock(&pmap_lock); | 104 | if (xprt_test_and_set_binding(xprt)) { |
59 | if (map->pm_binding) { | 105 | task->tk_status = -EACCES; /* tell caller to check again */ |
60 | rpc_sleep_on(&map->pm_bindwait, task, NULL, NULL); | 106 | rpc_sleep_on(&xprt->binding, task, NULL, NULL); |
61 | spin_unlock(&pmap_lock); | ||
62 | return; | 107 | return; |
63 | } | 108 | } |
64 | map->pm_binding = 1; | ||
65 | spin_unlock(&pmap_lock); | ||
66 | 109 | ||
67 | pmap_clnt = pmap_create(clnt->cl_server, sap, map->pm_prot, 0); | 110 | /* Someone else may have bound if we slept */ |
68 | if (IS_ERR(pmap_clnt)) { | 111 | status = 0; |
69 | task->tk_status = PTR_ERR(pmap_clnt); | 112 | if (xprt_bound(xprt)) |
113 | goto bailout_nofree; | ||
114 | |||
115 | status = -ENOMEM; | ||
116 | map = pmap_map_alloc(); | ||
117 | if (!map) | ||
118 | goto bailout_nofree; | ||
119 | map->pm_prog = clnt->cl_prog; | ||
120 | map->pm_vers = clnt->cl_vers; | ||
121 | map->pm_prot = xprt->prot; | ||
122 | map->pm_port = 0; | ||
123 | map->pm_xprt = xprt_get(xprt); | ||
124 | |||
125 | rpc_peeraddr(clnt, (struct sockaddr *) &addr, sizeof(addr)); | ||
126 | pmap_clnt = pmap_create(clnt->cl_server, &addr, map->pm_prot, 0); | ||
127 | status = PTR_ERR(pmap_clnt); | ||
128 | if (IS_ERR(pmap_clnt)) | ||
70 | goto bailout; | 129 | goto bailout; |
71 | } | ||
72 | task->tk_status = 0; | ||
73 | 130 | ||
74 | /* | 131 | status = -EIO; |
75 | * Note: rpc_new_child will release client after a failure. | 132 | child = rpc_run_task(pmap_clnt, RPC_TASK_ASYNC, &pmap_getport_ops, map); |
76 | */ | 133 | if (IS_ERR(child)) |
77 | if (!(child = rpc_new_child(pmap_clnt, task))) | ||
78 | goto bailout; | 134 | goto bailout; |
135 | rpc_release_task(child); | ||
79 | 136 | ||
80 | /* Setup the call info struct */ | 137 | rpc_sleep_on(&xprt->binding, task, NULL, NULL); |
81 | rpc_call_setup(child, &msg, 0); | ||
82 | 138 | ||
83 | /* ... and run the child task */ | ||
84 | task->tk_xprt->stat.bind_count++; | 139 | task->tk_xprt->stat.bind_count++; |
85 | rpc_run_child(task, child, pmap_getport_done); | ||
86 | return; | 140 | return; |
87 | 141 | ||
88 | bailout: | 142 | bailout: |
89 | spin_lock(&pmap_lock); | 143 | pmap_map_free(map); |
90 | map->pm_binding = 0; | 144 | xprt_put(xprt); |
91 | rpc_wake_up(&map->pm_bindwait); | 145 | bailout_nofree: |
92 | spin_unlock(&pmap_lock); | 146 | task->tk_status = status; |
93 | rpc_exit(task, -EIO); | 147 | pmap_wake_portmap_waiters(xprt, status); |
94 | } | 148 | } |
95 | 149 | ||
96 | #ifdef CONFIG_ROOT_NFS | 150 | #ifdef CONFIG_ROOT_NFS |
97 | int | 151 | /** |
98 | rpc_getport_external(struct sockaddr_in *sin, __u32 prog, __u32 vers, int prot) | 152 | * rpc_getport_external - obtain the port for a given RPC service on a given host |
153 | * @sin: address of remote peer | ||
154 | * @prog: RPC program number to bind | ||
155 | * @vers: RPC version number to bind | ||
156 | * @prot: transport protocol to use to make this request | ||
157 | * | ||
158 | * This one is called from outside the RPC client in a synchronous task context. | ||
159 | */ | ||
160 | int rpc_getport_external(struct sockaddr_in *sin, __u32 prog, __u32 vers, int prot) | ||
99 | { | 161 | { |
100 | struct rpc_portmap map = { | 162 | struct portmap_args map = { |
101 | .pm_prog = prog, | 163 | .pm_prog = prog, |
102 | .pm_vers = vers, | 164 | .pm_vers = vers, |
103 | .pm_prot = prot, | 165 | .pm_prot = prot, |
@@ -112,7 +174,7 @@ rpc_getport_external(struct sockaddr_in *sin, __u32 prog, __u32 vers, int prot) | |||
112 | char hostname[32]; | 174 | char hostname[32]; |
113 | int status; | 175 | int status; |
114 | 176 | ||
115 | dprintk("RPC: rpc_getport_external(%u.%u.%u.%u, %d, %d, %d)\n", | 177 | dprintk("RPC: rpc_getport_external(%u.%u.%u.%u, %u, %u, %d)\n", |
116 | NIPQUAD(sin->sin_addr.s_addr), prog, vers, prot); | 178 | NIPQUAD(sin->sin_addr.s_addr), prog, vers, prot); |
117 | 179 | ||
118 | sprintf(hostname, "%u.%u.%u.%u", NIPQUAD(sin->sin_addr.s_addr)); | 180 | sprintf(hostname, "%u.%u.%u.%u", NIPQUAD(sin->sin_addr.s_addr)); |
@@ -132,45 +194,53 @@ rpc_getport_external(struct sockaddr_in *sin, __u32 prog, __u32 vers, int prot) | |||
132 | } | 194 | } |
133 | #endif | 195 | #endif |
134 | 196 | ||
135 | static void | 197 | /* |
136 | pmap_getport_done(struct rpc_task *task) | 198 | * Portmapper child task invokes this callback via tk_exit. |
199 | */ | ||
200 | static void pmap_getport_done(struct rpc_task *child, void *data) | ||
137 | { | 201 | { |
138 | struct rpc_clnt *clnt = task->tk_client; | 202 | struct portmap_args *map = data; |
139 | struct rpc_xprt *xprt = task->tk_xprt; | 203 | struct rpc_xprt *xprt = map->pm_xprt; |
140 | struct rpc_portmap *map = clnt->cl_pmap; | 204 | int status = child->tk_status; |
141 | 205 | ||
142 | dprintk("RPC: %4d pmap_getport_done(status %d, port %d)\n", | 206 | if (status < 0) { |
143 | task->tk_pid, task->tk_status, clnt->cl_port); | 207 | /* Portmapper not available */ |
144 | 208 | xprt->ops->set_port(xprt, 0); | |
145 | xprt->ops->set_port(xprt, 0); | 209 | } else if (map->pm_port == 0) { |
146 | if (task->tk_status < 0) { | 210 | /* Requested RPC service wasn't registered */ |
147 | /* Make the calling task exit with an error */ | 211 | xprt->ops->set_port(xprt, 0); |
148 | task->tk_action = rpc_exit_task; | 212 | status = -EACCES; |
149 | } else if (clnt->cl_port == 0) { | ||
150 | /* Program not registered */ | ||
151 | rpc_exit(task, -EACCES); | ||
152 | } else { | 213 | } else { |
153 | xprt->ops->set_port(xprt, clnt->cl_port); | 214 | /* Succeeded */ |
154 | clnt->cl_port = htons(clnt->cl_port); | 215 | xprt->ops->set_port(xprt, map->pm_port); |
216 | xprt_set_bound(xprt); | ||
217 | status = 0; | ||
155 | } | 218 | } |
156 | spin_lock(&pmap_lock); | 219 | |
157 | map->pm_binding = 0; | 220 | dprintk("RPC: %4d pmap_getport_done(status %d, port %u)\n", |
158 | rpc_wake_up(&map->pm_bindwait); | 221 | child->tk_pid, status, map->pm_port); |
159 | spin_unlock(&pmap_lock); | 222 | |
223 | pmap_wake_portmap_waiters(xprt, status); | ||
224 | xprt_put(xprt); | ||
160 | } | 225 | } |
161 | 226 | ||
162 | /* | 227 | /** |
163 | * Set or unset a port registration with the local portmapper. | 228 | * rpc_register - set or unset a port registration with the local portmapper |
229 | * @prog: RPC program number to bind | ||
230 | * @vers: RPC version number to bind | ||
231 | * @prot: transport protocol to use to make this request | ||
232 | * @port: port value to register | ||
233 | * @okay: result code | ||
234 | * | ||
164 | * port == 0 means unregister, port != 0 means register. | 235 | * port == 0 means unregister, port != 0 means register. |
165 | */ | 236 | */ |
166 | int | 237 | int rpc_register(u32 prog, u32 vers, int prot, unsigned short port, int *okay) |
167 | rpc_register(u32 prog, u32 vers, int prot, unsigned short port, int *okay) | ||
168 | { | 238 | { |
169 | struct sockaddr_in sin = { | 239 | struct sockaddr_in sin = { |
170 | .sin_family = AF_INET, | 240 | .sin_family = AF_INET, |
171 | .sin_addr.s_addr = htonl(INADDR_LOOPBACK), | 241 | .sin_addr.s_addr = htonl(INADDR_LOOPBACK), |
172 | }; | 242 | }; |
173 | struct rpc_portmap map = { | 243 | struct portmap_args map = { |
174 | .pm_prog = prog, | 244 | .pm_prog = prog, |
175 | .pm_vers = vers, | 245 | .pm_vers = vers, |
176 | .pm_prot = prot, | 246 | .pm_prot = prot, |
@@ -184,7 +254,7 @@ rpc_register(u32 prog, u32 vers, int prot, unsigned short port, int *okay) | |||
184 | struct rpc_clnt *pmap_clnt; | 254 | struct rpc_clnt *pmap_clnt; |
185 | int error = 0; | 255 | int error = 0; |
186 | 256 | ||
187 | dprintk("RPC: registering (%d, %d, %d, %d) with portmapper.\n", | 257 | dprintk("RPC: registering (%u, %u, %d, %u) with portmapper.\n", |
188 | prog, vers, prot, port); | 258 | prog, vers, prot, port); |
189 | 259 | ||
190 | pmap_clnt = pmap_create("localhost", &sin, IPPROTO_UDP, 1); | 260 | pmap_clnt = pmap_create("localhost", &sin, IPPROTO_UDP, 1); |
@@ -207,38 +277,32 @@ rpc_register(u32 prog, u32 vers, int prot, unsigned short port, int *okay) | |||
207 | return error; | 277 | return error; |
208 | } | 278 | } |
209 | 279 | ||
210 | static struct rpc_clnt * | 280 | static struct rpc_clnt *pmap_create(char *hostname, struct sockaddr_in *srvaddr, int proto, int privileged) |
211 | pmap_create(char *hostname, struct sockaddr_in *srvaddr, int proto, int privileged) | ||
212 | { | 281 | { |
213 | struct rpc_xprt *xprt; | 282 | struct rpc_create_args args = { |
214 | struct rpc_clnt *clnt; | 283 | .protocol = proto, |
215 | 284 | .address = (struct sockaddr *)srvaddr, | |
216 | /* printk("pmap: create xprt\n"); */ | 285 | .addrsize = sizeof(*srvaddr), |
217 | xprt = xprt_create_proto(proto, srvaddr, NULL); | 286 | .servername = hostname, |
218 | if (IS_ERR(xprt)) | 287 | .program = &pmap_program, |
219 | return (struct rpc_clnt *)xprt; | 288 | .version = RPC_PMAP_VERSION, |
220 | xprt->ops->set_port(xprt, RPC_PMAP_PORT); | 289 | .authflavor = RPC_AUTH_UNIX, |
290 | .flags = (RPC_CLNT_CREATE_ONESHOT | | ||
291 | RPC_CLNT_CREATE_NOPING), | ||
292 | }; | ||
293 | |||
294 | srvaddr->sin_port = htons(RPC_PMAP_PORT); | ||
221 | if (!privileged) | 295 | if (!privileged) |
222 | xprt->resvport = 0; | 296 | args.flags |= RPC_CLNT_CREATE_NONPRIVPORT; |
223 | 297 | return rpc_create(&args); | |
224 | /* printk("pmap: create clnt\n"); */ | ||
225 | clnt = rpc_new_client(xprt, hostname, | ||
226 | &pmap_program, RPC_PMAP_VERSION, | ||
227 | RPC_AUTH_UNIX); | ||
228 | if (!IS_ERR(clnt)) { | ||
229 | clnt->cl_softrtry = 1; | ||
230 | clnt->cl_oneshot = 1; | ||
231 | } | ||
232 | return clnt; | ||
233 | } | 298 | } |
234 | 299 | ||
235 | /* | 300 | /* |
236 | * XDR encode/decode functions for PMAP | 301 | * XDR encode/decode functions for PMAP |
237 | */ | 302 | */ |
238 | static int | 303 | static int xdr_encode_mapping(struct rpc_rqst *req, u32 *p, struct portmap_args *map) |
239 | xdr_encode_mapping(struct rpc_rqst *req, u32 *p, struct rpc_portmap *map) | ||
240 | { | 304 | { |
241 | dprintk("RPC: xdr_encode_mapping(%d, %d, %d, %d)\n", | 305 | dprintk("RPC: xdr_encode_mapping(%u, %u, %u, %u)\n", |
242 | map->pm_prog, map->pm_vers, map->pm_prot, map->pm_port); | 306 | map->pm_prog, map->pm_vers, map->pm_prot, map->pm_port); |
243 | *p++ = htonl(map->pm_prog); | 307 | *p++ = htonl(map->pm_prog); |
244 | *p++ = htonl(map->pm_vers); | 308 | *p++ = htonl(map->pm_vers); |
@@ -249,15 +313,13 @@ xdr_encode_mapping(struct rpc_rqst *req, u32 *p, struct rpc_portmap *map) | |||
249 | return 0; | 313 | return 0; |
250 | } | 314 | } |
251 | 315 | ||
252 | static int | 316 | static int xdr_decode_port(struct rpc_rqst *req, u32 *p, unsigned short *portp) |
253 | xdr_decode_port(struct rpc_rqst *req, u32 *p, unsigned short *portp) | ||
254 | { | 317 | { |
255 | *portp = (unsigned short) ntohl(*p++); | 318 | *portp = (unsigned short) ntohl(*p++); |
256 | return 0; | 319 | return 0; |
257 | } | 320 | } |
258 | 321 | ||
259 | static int | 322 | static int xdr_decode_bool(struct rpc_rqst *req, u32 *p, unsigned int *boolp) |
260 | xdr_decode_bool(struct rpc_rqst *req, u32 *p, unsigned int *boolp) | ||
261 | { | 323 | { |
262 | *boolp = (unsigned int) ntohl(*p++); | 324 | *boolp = (unsigned int) ntohl(*p++); |
263 | return 0; | 325 | return 0; |
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 0b1a1ac8a4bc..dfa504fe383f 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c | |||
@@ -327,10 +327,8 @@ rpc_show_info(struct seq_file *m, void *v) | |||
327 | seq_printf(m, "RPC server: %s\n", clnt->cl_server); | 327 | seq_printf(m, "RPC server: %s\n", clnt->cl_server); |
328 | seq_printf(m, "service: %s (%d) version %d\n", clnt->cl_protname, | 328 | seq_printf(m, "service: %s (%d) version %d\n", clnt->cl_protname, |
329 | clnt->cl_prog, clnt->cl_vers); | 329 | clnt->cl_prog, clnt->cl_vers); |
330 | seq_printf(m, "address: %u.%u.%u.%u\n", | 330 | seq_printf(m, "address: %s\n", rpc_peeraddr2str(clnt, RPC_DISPLAY_ADDR)); |
331 | NIPQUAD(clnt->cl_xprt->addr.sin_addr.s_addr)); | 331 | seq_printf(m, "protocol: %s\n", rpc_peeraddr2str(clnt, RPC_DISPLAY_PROTO)); |
332 | seq_printf(m, "protocol: %s\n", | ||
333 | clnt->cl_xprt->prot == IPPROTO_UDP ? "udp" : "tcp"); | ||
334 | return 0; | 332 | return 0; |
335 | } | 333 | } |
336 | 334 | ||
@@ -623,17 +621,13 @@ __rpc_rmdir(struct inode *dir, struct dentry *dentry) | |||
623 | } | 621 | } |
624 | 622 | ||
625 | static struct dentry * | 623 | static struct dentry * |
626 | rpc_lookup_negative(char *path, struct nameidata *nd) | 624 | rpc_lookup_create(struct dentry *parent, const char *name, int len) |
627 | { | 625 | { |
626 | struct inode *dir = parent->d_inode; | ||
628 | struct dentry *dentry; | 627 | struct dentry *dentry; |
629 | struct inode *dir; | ||
630 | int error; | ||
631 | 628 | ||
632 | if ((error = rpc_lookup_parent(path, nd)) != 0) | ||
633 | return ERR_PTR(error); | ||
634 | dir = nd->dentry->d_inode; | ||
635 | mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); | 629 | mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); |
636 | dentry = lookup_one_len(nd->last.name, nd->dentry, nd->last.len); | 630 | dentry = lookup_one_len(name, parent, len); |
637 | if (IS_ERR(dentry)) | 631 | if (IS_ERR(dentry)) |
638 | goto out_err; | 632 | goto out_err; |
639 | if (dentry->d_inode) { | 633 | if (dentry->d_inode) { |
@@ -644,7 +638,20 @@ rpc_lookup_negative(char *path, struct nameidata *nd) | |||
644 | return dentry; | 638 | return dentry; |
645 | out_err: | 639 | out_err: |
646 | mutex_unlock(&dir->i_mutex); | 640 | mutex_unlock(&dir->i_mutex); |
647 | rpc_release_path(nd); | 641 | return dentry; |
642 | } | ||
643 | |||
644 | static struct dentry * | ||
645 | rpc_lookup_negative(char *path, struct nameidata *nd) | ||
646 | { | ||
647 | struct dentry *dentry; | ||
648 | int error; | ||
649 | |||
650 | if ((error = rpc_lookup_parent(path, nd)) != 0) | ||
651 | return ERR_PTR(error); | ||
652 | dentry = rpc_lookup_create(nd->dentry, nd->last.name, nd->last.len); | ||
653 | if (IS_ERR(dentry)) | ||
654 | rpc_release_path(nd); | ||
648 | return dentry; | 655 | return dentry; |
649 | } | 656 | } |
650 | 657 | ||
@@ -703,18 +710,17 @@ rpc_rmdir(struct dentry *dentry) | |||
703 | } | 710 | } |
704 | 711 | ||
705 | struct dentry * | 712 | struct dentry * |
706 | rpc_mkpipe(char *path, void *private, struct rpc_pipe_ops *ops, int flags) | 713 | rpc_mkpipe(struct dentry *parent, const char *name, void *private, struct rpc_pipe_ops *ops, int flags) |
707 | { | 714 | { |
708 | struct nameidata nd; | ||
709 | struct dentry *dentry; | 715 | struct dentry *dentry; |
710 | struct inode *dir, *inode; | 716 | struct inode *dir, *inode; |
711 | struct rpc_inode *rpci; | 717 | struct rpc_inode *rpci; |
712 | 718 | ||
713 | dentry = rpc_lookup_negative(path, &nd); | 719 | dentry = rpc_lookup_create(parent, name, strlen(name)); |
714 | if (IS_ERR(dentry)) | 720 | if (IS_ERR(dentry)) |
715 | return dentry; | 721 | return dentry; |
716 | dir = nd.dentry->d_inode; | 722 | dir = parent->d_inode; |
717 | inode = rpc_get_inode(dir->i_sb, S_IFSOCK | S_IRUSR | S_IWUSR); | 723 | inode = rpc_get_inode(dir->i_sb, S_IFIFO | S_IRUSR | S_IWUSR); |
718 | if (!inode) | 724 | if (!inode) |
719 | goto err_dput; | 725 | goto err_dput; |
720 | inode->i_ino = iunique(dir->i_sb, 100); | 726 | inode->i_ino = iunique(dir->i_sb, 100); |
@@ -728,13 +734,13 @@ rpc_mkpipe(char *path, void *private, struct rpc_pipe_ops *ops, int flags) | |||
728 | dget(dentry); | 734 | dget(dentry); |
729 | out: | 735 | out: |
730 | mutex_unlock(&dir->i_mutex); | 736 | mutex_unlock(&dir->i_mutex); |
731 | rpc_release_path(&nd); | ||
732 | return dentry; | 737 | return dentry; |
733 | err_dput: | 738 | err_dput: |
734 | dput(dentry); | 739 | dput(dentry); |
735 | dentry = ERR_PTR(-ENOMEM); | 740 | dentry = ERR_PTR(-ENOMEM); |
736 | printk(KERN_WARNING "%s: %s() failed to create pipe %s (errno = %d)\n", | 741 | printk(KERN_WARNING "%s: %s() failed to create pipe %s/%s (errno = %d)\n", |
737 | __FILE__, __FUNCTION__, path, -ENOMEM); | 742 | __FILE__, __FUNCTION__, parent->d_name.name, name, |
743 | -ENOMEM); | ||
738 | goto out; | 744 | goto out; |
739 | } | 745 | } |
740 | 746 | ||
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index 5c3eee768504..6390461a9756 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c | |||
@@ -21,7 +21,6 @@ | |||
21 | #include <linux/mutex.h> | 21 | #include <linux/mutex.h> |
22 | 22 | ||
23 | #include <linux/sunrpc/clnt.h> | 23 | #include <linux/sunrpc/clnt.h> |
24 | #include <linux/sunrpc/xprt.h> | ||
25 | 24 | ||
26 | #ifdef RPC_DEBUG | 25 | #ifdef RPC_DEBUG |
27 | #define RPCDBG_FACILITY RPCDBG_SCHED | 26 | #define RPCDBG_FACILITY RPCDBG_SCHED |
@@ -45,12 +44,6 @@ static void rpciod_killall(void); | |||
45 | static void rpc_async_schedule(void *); | 44 | static void rpc_async_schedule(void *); |
46 | 45 | ||
47 | /* | 46 | /* |
48 | * RPC tasks that create another task (e.g. for contacting the portmapper) | ||
49 | * will wait on this queue for their child's completion | ||
50 | */ | ||
51 | static RPC_WAITQ(childq, "childq"); | ||
52 | |||
53 | /* | ||
54 | * RPC tasks sit here while waiting for conditions to improve. | 47 | * RPC tasks sit here while waiting for conditions to improve. |
55 | */ | 48 | */ |
56 | static RPC_WAITQ(delay_queue, "delayq"); | 49 | static RPC_WAITQ(delay_queue, "delayq"); |
@@ -324,16 +317,6 @@ static void rpc_make_runnable(struct rpc_task *task) | |||
324 | } | 317 | } |
325 | 318 | ||
326 | /* | 319 | /* |
327 | * Place a newly initialized task on the workqueue. | ||
328 | */ | ||
329 | static inline void | ||
330 | rpc_schedule_run(struct rpc_task *task) | ||
331 | { | ||
332 | rpc_set_active(task); | ||
333 | rpc_make_runnable(task); | ||
334 | } | ||
335 | |||
336 | /* | ||
337 | * Prepare for sleeping on a wait queue. | 320 | * Prepare for sleeping on a wait queue. |
338 | * By always appending tasks to the list we ensure FIFO behavior. | 321 | * By always appending tasks to the list we ensure FIFO behavior. |
339 | * NB: An RPC task will only receive interrupt-driven events as long | 322 | * NB: An RPC task will only receive interrupt-driven events as long |
@@ -559,24 +542,20 @@ void rpc_wake_up_status(struct rpc_wait_queue *queue, int status) | |||
559 | spin_unlock_bh(&queue->lock); | 542 | spin_unlock_bh(&queue->lock); |
560 | } | 543 | } |
561 | 544 | ||
545 | static void __rpc_atrun(struct rpc_task *task) | ||
546 | { | ||
547 | rpc_wake_up_task(task); | ||
548 | } | ||
549 | |||
562 | /* | 550 | /* |
563 | * Run a task at a later time | 551 | * Run a task at a later time |
564 | */ | 552 | */ |
565 | static void __rpc_atrun(struct rpc_task *); | 553 | void rpc_delay(struct rpc_task *task, unsigned long delay) |
566 | void | ||
567 | rpc_delay(struct rpc_task *task, unsigned long delay) | ||
568 | { | 554 | { |
569 | task->tk_timeout = delay; | 555 | task->tk_timeout = delay; |
570 | rpc_sleep_on(&delay_queue, task, NULL, __rpc_atrun); | 556 | rpc_sleep_on(&delay_queue, task, NULL, __rpc_atrun); |
571 | } | 557 | } |
572 | 558 | ||
573 | static void | ||
574 | __rpc_atrun(struct rpc_task *task) | ||
575 | { | ||
576 | task->tk_status = 0; | ||
577 | rpc_wake_up_task(task); | ||
578 | } | ||
579 | |||
580 | /* | 559 | /* |
581 | * Helper to call task->tk_ops->rpc_call_prepare | 560 | * Helper to call task->tk_ops->rpc_call_prepare |
582 | */ | 561 | */ |
@@ -933,72 +912,6 @@ struct rpc_task *rpc_run_task(struct rpc_clnt *clnt, int flags, | |||
933 | } | 912 | } |
934 | EXPORT_SYMBOL(rpc_run_task); | 913 | EXPORT_SYMBOL(rpc_run_task); |
935 | 914 | ||
936 | /** | ||
937 | * rpc_find_parent - find the parent of a child task. | ||
938 | * @child: child task | ||
939 | * @parent: parent task | ||
940 | * | ||
941 | * Checks that the parent task is still sleeping on the | ||
942 | * queue 'childq'. If so returns a pointer to the parent. | ||
943 | * Upon failure returns NULL. | ||
944 | * | ||
945 | * Caller must hold childq.lock | ||
946 | */ | ||
947 | static inline struct rpc_task *rpc_find_parent(struct rpc_task *child, struct rpc_task *parent) | ||
948 | { | ||
949 | struct rpc_task *task; | ||
950 | struct list_head *le; | ||
951 | |||
952 | task_for_each(task, le, &childq.tasks[0]) | ||
953 | if (task == parent) | ||
954 | return parent; | ||
955 | |||
956 | return NULL; | ||
957 | } | ||
958 | |||
959 | static void rpc_child_exit(struct rpc_task *child, void *calldata) | ||
960 | { | ||
961 | struct rpc_task *parent; | ||
962 | |||
963 | spin_lock_bh(&childq.lock); | ||
964 | if ((parent = rpc_find_parent(child, calldata)) != NULL) { | ||
965 | parent->tk_status = child->tk_status; | ||
966 | __rpc_wake_up_task(parent); | ||
967 | } | ||
968 | spin_unlock_bh(&childq.lock); | ||
969 | } | ||
970 | |||
971 | static const struct rpc_call_ops rpc_child_ops = { | ||
972 | .rpc_call_done = rpc_child_exit, | ||
973 | }; | ||
974 | |||
975 | /* | ||
976 | * Note: rpc_new_task releases the client after a failure. | ||
977 | */ | ||
978 | struct rpc_task * | ||
979 | rpc_new_child(struct rpc_clnt *clnt, struct rpc_task *parent) | ||
980 | { | ||
981 | struct rpc_task *task; | ||
982 | |||
983 | task = rpc_new_task(clnt, RPC_TASK_ASYNC | RPC_TASK_CHILD, &rpc_child_ops, parent); | ||
984 | if (!task) | ||
985 | goto fail; | ||
986 | return task; | ||
987 | |||
988 | fail: | ||
989 | parent->tk_status = -ENOMEM; | ||
990 | return NULL; | ||
991 | } | ||
992 | |||
993 | void rpc_run_child(struct rpc_task *task, struct rpc_task *child, rpc_action func) | ||
994 | { | ||
995 | spin_lock_bh(&childq.lock); | ||
996 | /* N.B. Is it possible for the child to have already finished? */ | ||
997 | __rpc_sleep_on(&childq, task, func, NULL); | ||
998 | rpc_schedule_run(child); | ||
999 | spin_unlock_bh(&childq.lock); | ||
1000 | } | ||
1001 | |||
1002 | /* | 915 | /* |
1003 | * Kill all tasks for the given client. | 916 | * Kill all tasks for the given client. |
1004 | * XXX: kill their descendants as well? | 917 | * XXX: kill their descendants as well? |
diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c index f38f939ce95f..26c0531d7e25 100644 --- a/net/sunrpc/sunrpc_syms.c +++ b/net/sunrpc/sunrpc_syms.c | |||
@@ -36,8 +36,6 @@ EXPORT_SYMBOL(rpc_wake_up_status); | |||
36 | EXPORT_SYMBOL(rpc_release_task); | 36 | EXPORT_SYMBOL(rpc_release_task); |
37 | 37 | ||
38 | /* RPC client functions */ | 38 | /* RPC client functions */ |
39 | EXPORT_SYMBOL(rpc_create_client); | ||
40 | EXPORT_SYMBOL(rpc_new_client); | ||
41 | EXPORT_SYMBOL(rpc_clone_client); | 39 | EXPORT_SYMBOL(rpc_clone_client); |
42 | EXPORT_SYMBOL(rpc_bind_new_program); | 40 | EXPORT_SYMBOL(rpc_bind_new_program); |
43 | EXPORT_SYMBOL(rpc_destroy_client); | 41 | EXPORT_SYMBOL(rpc_destroy_client); |
@@ -57,7 +55,6 @@ EXPORT_SYMBOL(rpc_queue_upcall); | |||
57 | EXPORT_SYMBOL(rpc_mkpipe); | 55 | EXPORT_SYMBOL(rpc_mkpipe); |
58 | 56 | ||
59 | /* Client transport */ | 57 | /* Client transport */ |
60 | EXPORT_SYMBOL(xprt_create_proto); | ||
61 | EXPORT_SYMBOL(xprt_set_timeout); | 58 | EXPORT_SYMBOL(xprt_set_timeout); |
62 | 59 | ||
63 | /* Client credential cache */ | 60 | /* Client credential cache */ |
diff --git a/net/sunrpc/timer.c b/net/sunrpc/timer.c index bcbdf6430d5c..8142fdb8a930 100644 --- a/net/sunrpc/timer.c +++ b/net/sunrpc/timer.c | |||
@@ -19,8 +19,6 @@ | |||
19 | #include <linux/unistd.h> | 19 | #include <linux/unistd.h> |
20 | 20 | ||
21 | #include <linux/sunrpc/clnt.h> | 21 | #include <linux/sunrpc/clnt.h> |
22 | #include <linux/sunrpc/xprt.h> | ||
23 | #include <linux/sunrpc/timer.h> | ||
24 | 22 | ||
25 | #define RPC_RTO_MAX (60*HZ) | 23 | #define RPC_RTO_MAX (60*HZ) |
26 | #define RPC_RTO_INIT (HZ/5) | 24 | #define RPC_RTO_INIT (HZ/5) |
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index e8c2bc4977f3..1f786f68729d 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c | |||
@@ -534,7 +534,7 @@ void xprt_connect(struct rpc_task *task) | |||
534 | dprintk("RPC: %4d xprt_connect xprt %p %s connected\n", task->tk_pid, | 534 | dprintk("RPC: %4d xprt_connect xprt %p %s connected\n", task->tk_pid, |
535 | xprt, (xprt_connected(xprt) ? "is" : "is not")); | 535 | xprt, (xprt_connected(xprt) ? "is" : "is not")); |
536 | 536 | ||
537 | if (!xprt->addr.sin_port) { | 537 | if (!xprt_bound(xprt)) { |
538 | task->tk_status = -EIO; | 538 | task->tk_status = -EIO; |
539 | return; | 539 | return; |
540 | } | 540 | } |
@@ -585,13 +585,6 @@ static void xprt_connect_status(struct rpc_task *task) | |||
585 | task->tk_pid, -task->tk_status, task->tk_client->cl_server); | 585 | task->tk_pid, -task->tk_status, task->tk_client->cl_server); |
586 | xprt_release_write(xprt, task); | 586 | xprt_release_write(xprt, task); |
587 | task->tk_status = -EIO; | 587 | task->tk_status = -EIO; |
588 | return; | ||
589 | } | ||
590 | |||
591 | /* if soft mounted, just cause this RPC to fail */ | ||
592 | if (RPC_IS_SOFT(task)) { | ||
593 | xprt_release_write(xprt, task); | ||
594 | task->tk_status = -EIO; | ||
595 | } | 588 | } |
596 | } | 589 | } |
597 | 590 | ||
@@ -829,6 +822,7 @@ static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt) | |||
829 | req->rq_bufsize = 0; | 822 | req->rq_bufsize = 0; |
830 | req->rq_xid = xprt_alloc_xid(xprt); | 823 | req->rq_xid = xprt_alloc_xid(xprt); |
831 | req->rq_release_snd_buf = NULL; | 824 | req->rq_release_snd_buf = NULL; |
825 | xprt_reset_majortimeo(req); | ||
832 | dprintk("RPC: %4d reserved req %p xid %08x\n", task->tk_pid, | 826 | dprintk("RPC: %4d reserved req %p xid %08x\n", task->tk_pid, |
833 | req, ntohl(req->rq_xid)); | 827 | req, ntohl(req->rq_xid)); |
834 | } | 828 | } |
@@ -887,16 +881,32 @@ void xprt_set_timeout(struct rpc_timeout *to, unsigned int retr, unsigned long i | |||
887 | to->to_exponential = 0; | 881 | to->to_exponential = 0; |
888 | } | 882 | } |
889 | 883 | ||
890 | static struct rpc_xprt *xprt_setup(int proto, struct sockaddr_in *ap, struct rpc_timeout *to) | 884 | /** |
885 | * xprt_create_transport - create an RPC transport | ||
886 | * @proto: requested transport protocol | ||
887 | * @ap: remote peer address | ||
888 | * @size: length of address | ||
889 | * @to: timeout parameters | ||
890 | * | ||
891 | */ | ||
892 | struct rpc_xprt *xprt_create_transport(int proto, struct sockaddr *ap, size_t size, struct rpc_timeout *to) | ||
891 | { | 893 | { |
892 | int result; | 894 | int result; |
893 | struct rpc_xprt *xprt; | 895 | struct rpc_xprt *xprt; |
894 | struct rpc_rqst *req; | 896 | struct rpc_rqst *req; |
895 | 897 | ||
896 | if ((xprt = kzalloc(sizeof(struct rpc_xprt), GFP_KERNEL)) == NULL) | 898 | if ((xprt = kzalloc(sizeof(struct rpc_xprt), GFP_KERNEL)) == NULL) { |
899 | dprintk("RPC: xprt_create_transport: no memory\n"); | ||
897 | return ERR_PTR(-ENOMEM); | 900 | return ERR_PTR(-ENOMEM); |
898 | 901 | } | |
899 | xprt->addr = *ap; | 902 | if (size <= sizeof(xprt->addr)) { |
903 | memcpy(&xprt->addr, ap, size); | ||
904 | xprt->addrlen = size; | ||
905 | } else { | ||
906 | kfree(xprt); | ||
907 | dprintk("RPC: xprt_create_transport: address too large\n"); | ||
908 | return ERR_PTR(-EBADF); | ||
909 | } | ||
900 | 910 | ||
901 | switch (proto) { | 911 | switch (proto) { |
902 | case IPPROTO_UDP: | 912 | case IPPROTO_UDP: |
@@ -908,14 +918,15 @@ static struct rpc_xprt *xprt_setup(int proto, struct sockaddr_in *ap, struct rpc | |||
908 | default: | 918 | default: |
909 | printk(KERN_ERR "RPC: unrecognized transport protocol: %d\n", | 919 | printk(KERN_ERR "RPC: unrecognized transport protocol: %d\n", |
910 | proto); | 920 | proto); |
911 | result = -EIO; | 921 | return ERR_PTR(-EIO); |
912 | break; | ||
913 | } | 922 | } |
914 | if (result) { | 923 | if (result) { |
915 | kfree(xprt); | 924 | kfree(xprt); |
925 | dprintk("RPC: xprt_create_transport: failed, %d\n", result); | ||
916 | return ERR_PTR(result); | 926 | return ERR_PTR(result); |
917 | } | 927 | } |
918 | 928 | ||
929 | kref_init(&xprt->kref); | ||
919 | spin_lock_init(&xprt->transport_lock); | 930 | spin_lock_init(&xprt->transport_lock); |
920 | spin_lock_init(&xprt->reserve_lock); | 931 | spin_lock_init(&xprt->reserve_lock); |
921 | 932 | ||
@@ -928,6 +939,7 @@ static struct rpc_xprt *xprt_setup(int proto, struct sockaddr_in *ap, struct rpc | |||
928 | xprt->last_used = jiffies; | 939 | xprt->last_used = jiffies; |
929 | xprt->cwnd = RPC_INITCWND; | 940 | xprt->cwnd = RPC_INITCWND; |
930 | 941 | ||
942 | rpc_init_wait_queue(&xprt->binding, "xprt_binding"); | ||
931 | rpc_init_wait_queue(&xprt->pending, "xprt_pending"); | 943 | rpc_init_wait_queue(&xprt->pending, "xprt_pending"); |
932 | rpc_init_wait_queue(&xprt->sending, "xprt_sending"); | 944 | rpc_init_wait_queue(&xprt->sending, "xprt_sending"); |
933 | rpc_init_wait_queue(&xprt->resend, "xprt_resend"); | 945 | rpc_init_wait_queue(&xprt->resend, "xprt_resend"); |
@@ -941,41 +953,43 @@ static struct rpc_xprt *xprt_setup(int proto, struct sockaddr_in *ap, struct rpc | |||
941 | 953 | ||
942 | dprintk("RPC: created transport %p with %u slots\n", xprt, | 954 | dprintk("RPC: created transport %p with %u slots\n", xprt, |
943 | xprt->max_reqs); | 955 | xprt->max_reqs); |
944 | |||
945 | return xprt; | ||
946 | } | ||
947 | 956 | ||
948 | /** | ||
949 | * xprt_create_proto - create an RPC client transport | ||
950 | * @proto: requested transport protocol | ||
951 | * @sap: remote peer's address | ||
952 | * @to: timeout parameters for new transport | ||
953 | * | ||
954 | */ | ||
955 | struct rpc_xprt *xprt_create_proto(int proto, struct sockaddr_in *sap, struct rpc_timeout *to) | ||
956 | { | ||
957 | struct rpc_xprt *xprt; | ||
958 | |||
959 | xprt = xprt_setup(proto, sap, to); | ||
960 | if (IS_ERR(xprt)) | ||
961 | dprintk("RPC: xprt_create_proto failed\n"); | ||
962 | else | ||
963 | dprintk("RPC: xprt_create_proto created xprt %p\n", xprt); | ||
964 | return xprt; | 957 | return xprt; |
965 | } | 958 | } |
966 | 959 | ||
967 | /** | 960 | /** |
968 | * xprt_destroy - destroy an RPC transport, killing off all requests. | 961 | * xprt_destroy - destroy an RPC transport, killing off all requests. |
969 | * @xprt: transport to destroy | 962 | * @kref: kref for the transport to destroy |
970 | * | 963 | * |
971 | */ | 964 | */ |
972 | int xprt_destroy(struct rpc_xprt *xprt) | 965 | static void xprt_destroy(struct kref *kref) |
973 | { | 966 | { |
967 | struct rpc_xprt *xprt = container_of(kref, struct rpc_xprt, kref); | ||
968 | |||
974 | dprintk("RPC: destroying transport %p\n", xprt); | 969 | dprintk("RPC: destroying transport %p\n", xprt); |
975 | xprt->shutdown = 1; | 970 | xprt->shutdown = 1; |
976 | del_timer_sync(&xprt->timer); | 971 | del_timer_sync(&xprt->timer); |
977 | xprt->ops->destroy(xprt); | 972 | xprt->ops->destroy(xprt); |
978 | kfree(xprt); | 973 | kfree(xprt); |
974 | } | ||
979 | 975 | ||
980 | return 0; | 976 | /** |
977 | * xprt_put - release a reference to an RPC transport. | ||
978 | * @xprt: pointer to the transport | ||
979 | * | ||
980 | */ | ||
981 | void xprt_put(struct rpc_xprt *xprt) | ||
982 | { | ||
983 | kref_put(&xprt->kref, xprt_destroy); | ||
984 | } | ||
985 | |||
986 | /** | ||
987 | * xprt_get - return a reference to an RPC transport. | ||
988 | * @xprt: pointer to the transport | ||
989 | * | ||
990 | */ | ||
991 | struct rpc_xprt *xprt_get(struct rpc_xprt *xprt) | ||
992 | { | ||
993 | kref_get(&xprt->kref); | ||
994 | return xprt; | ||
981 | } | 995 | } |
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 897bdd982315..9b62923a9c06 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
@@ -125,6 +125,47 @@ static inline void xs_pktdump(char *msg, u32 *packet, unsigned int count) | |||
125 | } | 125 | } |
126 | #endif | 126 | #endif |
127 | 127 | ||
128 | static void xs_format_peer_addresses(struct rpc_xprt *xprt) | ||
129 | { | ||
130 | struct sockaddr_in *addr = (struct sockaddr_in *) &xprt->addr; | ||
131 | char *buf; | ||
132 | |||
133 | buf = kzalloc(20, GFP_KERNEL); | ||
134 | if (buf) { | ||
135 | snprintf(buf, 20, "%u.%u.%u.%u", | ||
136 | NIPQUAD(addr->sin_addr.s_addr)); | ||
137 | } | ||
138 | xprt->address_strings[RPC_DISPLAY_ADDR] = buf; | ||
139 | |||
140 | buf = kzalloc(8, GFP_KERNEL); | ||
141 | if (buf) { | ||
142 | snprintf(buf, 8, "%u", | ||
143 | ntohs(addr->sin_port)); | ||
144 | } | ||
145 | xprt->address_strings[RPC_DISPLAY_PORT] = buf; | ||
146 | |||
147 | if (xprt->prot == IPPROTO_UDP) | ||
148 | xprt->address_strings[RPC_DISPLAY_PROTO] = "udp"; | ||
149 | else | ||
150 | xprt->address_strings[RPC_DISPLAY_PROTO] = "tcp"; | ||
151 | |||
152 | buf = kzalloc(48, GFP_KERNEL); | ||
153 | if (buf) { | ||
154 | snprintf(buf, 48, "addr=%u.%u.%u.%u port=%u proto=%s", | ||
155 | NIPQUAD(addr->sin_addr.s_addr), | ||
156 | ntohs(addr->sin_port), | ||
157 | xprt->prot == IPPROTO_UDP ? "udp" : "tcp"); | ||
158 | } | ||
159 | xprt->address_strings[RPC_DISPLAY_ALL] = buf; | ||
160 | } | ||
161 | |||
162 | static void xs_free_peer_addresses(struct rpc_xprt *xprt) | ||
163 | { | ||
164 | kfree(xprt->address_strings[RPC_DISPLAY_ADDR]); | ||
165 | kfree(xprt->address_strings[RPC_DISPLAY_PORT]); | ||
166 | kfree(xprt->address_strings[RPC_DISPLAY_ALL]); | ||
167 | } | ||
168 | |||
128 | #define XS_SENDMSG_FLAGS (MSG_DONTWAIT | MSG_NOSIGNAL) | 169 | #define XS_SENDMSG_FLAGS (MSG_DONTWAIT | MSG_NOSIGNAL) |
129 | 170 | ||
130 | static inline int xs_send_head(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base, unsigned int len) | 171 | static inline int xs_send_head(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base, unsigned int len) |
@@ -295,7 +336,7 @@ static int xs_udp_send_request(struct rpc_task *task) | |||
295 | 336 | ||
296 | req->rq_xtime = jiffies; | 337 | req->rq_xtime = jiffies; |
297 | status = xs_sendpages(xprt->sock, (struct sockaddr *) &xprt->addr, | 338 | status = xs_sendpages(xprt->sock, (struct sockaddr *) &xprt->addr, |
298 | sizeof(xprt->addr), xdr, req->rq_bytes_sent); | 339 | xprt->addrlen, xdr, req->rq_bytes_sent); |
299 | 340 | ||
300 | dprintk("RPC: xs_udp_send_request(%u) = %d\n", | 341 | dprintk("RPC: xs_udp_send_request(%u) = %d\n", |
301 | xdr->len - req->rq_bytes_sent, status); | 342 | xdr->len - req->rq_bytes_sent, status); |
@@ -485,6 +526,7 @@ static void xs_destroy(struct rpc_xprt *xprt) | |||
485 | 526 | ||
486 | xprt_disconnect(xprt); | 527 | xprt_disconnect(xprt); |
487 | xs_close(xprt); | 528 | xs_close(xprt); |
529 | xs_free_peer_addresses(xprt); | ||
488 | kfree(xprt->slot); | 530 | kfree(xprt->slot); |
489 | } | 531 | } |
490 | 532 | ||
@@ -960,6 +1002,19 @@ static unsigned short xs_get_random_port(void) | |||
960 | } | 1002 | } |
961 | 1003 | ||
962 | /** | 1004 | /** |
1005 | * xs_print_peer_address - format an IPv4 address for printing | ||
1006 | * @xprt: generic transport | ||
1007 | * @format: flags field indicating which parts of the address to render | ||
1008 | */ | ||
1009 | static char *xs_print_peer_address(struct rpc_xprt *xprt, enum rpc_display_format_t format) | ||
1010 | { | ||
1011 | if (xprt->address_strings[format] != NULL) | ||
1012 | return xprt->address_strings[format]; | ||
1013 | else | ||
1014 | return "unprintable"; | ||
1015 | } | ||
1016 | |||
1017 | /** | ||
963 | * xs_set_port - reset the port number in the remote endpoint address | 1018 | * xs_set_port - reset the port number in the remote endpoint address |
964 | * @xprt: generic transport | 1019 | * @xprt: generic transport |
965 | * @port: new port number | 1020 | * @port: new port number |
@@ -967,8 +1022,11 @@ static unsigned short xs_get_random_port(void) | |||
967 | */ | 1022 | */ |
968 | static void xs_set_port(struct rpc_xprt *xprt, unsigned short port) | 1023 | static void xs_set_port(struct rpc_xprt *xprt, unsigned short port) |
969 | { | 1024 | { |
1025 | struct sockaddr_in *sap = (struct sockaddr_in *) &xprt->addr; | ||
1026 | |||
970 | dprintk("RPC: setting port for xprt %p to %u\n", xprt, port); | 1027 | dprintk("RPC: setting port for xprt %p to %u\n", xprt, port); |
971 | xprt->addr.sin_port = htons(port); | 1028 | |
1029 | sap->sin_port = htons(port); | ||
972 | } | 1030 | } |
973 | 1031 | ||
974 | static int xs_bindresvport(struct rpc_xprt *xprt, struct socket *sock) | 1032 | static int xs_bindresvport(struct rpc_xprt *xprt, struct socket *sock) |
@@ -1011,11 +1069,9 @@ static void xs_udp_connect_worker(void *args) | |||
1011 | struct socket *sock = xprt->sock; | 1069 | struct socket *sock = xprt->sock; |
1012 | int err, status = -EIO; | 1070 | int err, status = -EIO; |
1013 | 1071 | ||
1014 | if (xprt->shutdown || xprt->addr.sin_port == 0) | 1072 | if (xprt->shutdown || !xprt_bound(xprt)) |
1015 | goto out; | 1073 | goto out; |
1016 | 1074 | ||
1017 | dprintk("RPC: xs_udp_connect_worker for xprt %p\n", xprt); | ||
1018 | |||
1019 | /* Start by resetting any existing state */ | 1075 | /* Start by resetting any existing state */ |
1020 | xs_close(xprt); | 1076 | xs_close(xprt); |
1021 | 1077 | ||
@@ -1029,6 +1085,9 @@ static void xs_udp_connect_worker(void *args) | |||
1029 | goto out; | 1085 | goto out; |
1030 | } | 1086 | } |
1031 | 1087 | ||
1088 | dprintk("RPC: worker connecting xprt %p to address: %s\n", | ||
1089 | xprt, xs_print_peer_address(xprt, RPC_DISPLAY_ALL)); | ||
1090 | |||
1032 | if (!xprt->inet) { | 1091 | if (!xprt->inet) { |
1033 | struct sock *sk = sock->sk; | 1092 | struct sock *sk = sock->sk; |
1034 | 1093 | ||
@@ -1094,11 +1153,9 @@ static void xs_tcp_connect_worker(void *args) | |||
1094 | struct socket *sock = xprt->sock; | 1153 | struct socket *sock = xprt->sock; |
1095 | int err, status = -EIO; | 1154 | int err, status = -EIO; |
1096 | 1155 | ||
1097 | if (xprt->shutdown || xprt->addr.sin_port == 0) | 1156 | if (xprt->shutdown || !xprt_bound(xprt)) |
1098 | goto out; | 1157 | goto out; |
1099 | 1158 | ||
1100 | dprintk("RPC: xs_tcp_connect_worker for xprt %p\n", xprt); | ||
1101 | |||
1102 | if (!xprt->sock) { | 1159 | if (!xprt->sock) { |
1103 | /* start from scratch */ | 1160 | /* start from scratch */ |
1104 | if ((err = sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock)) < 0) { | 1161 | if ((err = sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock)) < 0) { |
@@ -1114,6 +1171,9 @@ static void xs_tcp_connect_worker(void *args) | |||
1114 | /* "close" the socket, preserving the local port */ | 1171 | /* "close" the socket, preserving the local port */ |
1115 | xs_tcp_reuse_connection(xprt); | 1172 | xs_tcp_reuse_connection(xprt); |
1116 | 1173 | ||
1174 | dprintk("RPC: worker connecting xprt %p to address: %s\n", | ||
1175 | xprt, xs_print_peer_address(xprt, RPC_DISPLAY_ALL)); | ||
1176 | |||
1117 | if (!xprt->inet) { | 1177 | if (!xprt->inet) { |
1118 | struct sock *sk = sock->sk; | 1178 | struct sock *sk = sock->sk; |
1119 | 1179 | ||
@@ -1147,7 +1207,7 @@ static void xs_tcp_connect_worker(void *args) | |||
1147 | xprt->stat.connect_count++; | 1207 | xprt->stat.connect_count++; |
1148 | xprt->stat.connect_start = jiffies; | 1208 | xprt->stat.connect_start = jiffies; |
1149 | status = kernel_connect(sock, (struct sockaddr *) &xprt->addr, | 1209 | status = kernel_connect(sock, (struct sockaddr *) &xprt->addr, |
1150 | sizeof(xprt->addr), O_NONBLOCK); | 1210 | xprt->addrlen, O_NONBLOCK); |
1151 | dprintk("RPC: %p connect status %d connected %d sock state %d\n", | 1211 | dprintk("RPC: %p connect status %d connected %d sock state %d\n", |
1152 | xprt, -status, xprt_connected(xprt), sock->sk->sk_state); | 1212 | xprt, -status, xprt_connected(xprt), sock->sk->sk_state); |
1153 | if (status < 0) { | 1213 | if (status < 0) { |
@@ -1255,8 +1315,10 @@ static void xs_tcp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq) | |||
1255 | 1315 | ||
1256 | static struct rpc_xprt_ops xs_udp_ops = { | 1316 | static struct rpc_xprt_ops xs_udp_ops = { |
1257 | .set_buffer_size = xs_udp_set_buffer_size, | 1317 | .set_buffer_size = xs_udp_set_buffer_size, |
1318 | .print_addr = xs_print_peer_address, | ||
1258 | .reserve_xprt = xprt_reserve_xprt_cong, | 1319 | .reserve_xprt = xprt_reserve_xprt_cong, |
1259 | .release_xprt = xprt_release_xprt_cong, | 1320 | .release_xprt = xprt_release_xprt_cong, |
1321 | .rpcbind = rpc_getport, | ||
1260 | .set_port = xs_set_port, | 1322 | .set_port = xs_set_port, |
1261 | .connect = xs_connect, | 1323 | .connect = xs_connect, |
1262 | .buf_alloc = rpc_malloc, | 1324 | .buf_alloc = rpc_malloc, |
@@ -1271,8 +1333,10 @@ static struct rpc_xprt_ops xs_udp_ops = { | |||
1271 | }; | 1333 | }; |
1272 | 1334 | ||
1273 | static struct rpc_xprt_ops xs_tcp_ops = { | 1335 | static struct rpc_xprt_ops xs_tcp_ops = { |
1336 | .print_addr = xs_print_peer_address, | ||
1274 | .reserve_xprt = xprt_reserve_xprt, | 1337 | .reserve_xprt = xprt_reserve_xprt, |
1275 | .release_xprt = xs_tcp_release_xprt, | 1338 | .release_xprt = xs_tcp_release_xprt, |
1339 | .rpcbind = rpc_getport, | ||
1276 | .set_port = xs_set_port, | 1340 | .set_port = xs_set_port, |
1277 | .connect = xs_connect, | 1341 | .connect = xs_connect, |
1278 | .buf_alloc = rpc_malloc, | 1342 | .buf_alloc = rpc_malloc, |
@@ -1293,8 +1357,7 @@ static struct rpc_xprt_ops xs_tcp_ops = { | |||
1293 | int xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to) | 1357 | int xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to) |
1294 | { | 1358 | { |
1295 | size_t slot_table_size; | 1359 | size_t slot_table_size; |
1296 | 1360 | struct sockaddr_in *addr = (struct sockaddr_in *) &xprt->addr; | |
1297 | dprintk("RPC: setting up udp-ipv4 transport...\n"); | ||
1298 | 1361 | ||
1299 | xprt->max_reqs = xprt_udp_slot_table_entries; | 1362 | xprt->max_reqs = xprt_udp_slot_table_entries; |
1300 | slot_table_size = xprt->max_reqs * sizeof(xprt->slot[0]); | 1363 | slot_table_size = xprt->max_reqs * sizeof(xprt->slot[0]); |
@@ -1302,10 +1365,12 @@ int xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to) | |||
1302 | if (xprt->slot == NULL) | 1365 | if (xprt->slot == NULL) |
1303 | return -ENOMEM; | 1366 | return -ENOMEM; |
1304 | 1367 | ||
1305 | xprt->prot = IPPROTO_UDP; | 1368 | if (ntohs(addr->sin_port != 0)) |
1369 | xprt_set_bound(xprt); | ||
1306 | xprt->port = xs_get_random_port(); | 1370 | xprt->port = xs_get_random_port(); |
1371 | |||
1372 | xprt->prot = IPPROTO_UDP; | ||
1307 | xprt->tsh_size = 0; | 1373 | xprt->tsh_size = 0; |
1308 | xprt->resvport = capable(CAP_NET_BIND_SERVICE) ? 1 : 0; | ||
1309 | /* XXX: header size can vary due to auth type, IPv6, etc. */ | 1374 | /* XXX: header size can vary due to auth type, IPv6, etc. */ |
1310 | xprt->max_payload = (1U << 16) - (MAX_HEADER << 3); | 1375 | xprt->max_payload = (1U << 16) - (MAX_HEADER << 3); |
1311 | 1376 | ||
@@ -1322,6 +1387,10 @@ int xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to) | |||
1322 | else | 1387 | else |
1323 | xprt_set_timeout(&xprt->timeout, 5, 5 * HZ); | 1388 | xprt_set_timeout(&xprt->timeout, 5, 5 * HZ); |
1324 | 1389 | ||
1390 | xs_format_peer_addresses(xprt); | ||
1391 | dprintk("RPC: set up transport to address %s\n", | ||
1392 | xs_print_peer_address(xprt, RPC_DISPLAY_ALL)); | ||
1393 | |||
1325 | return 0; | 1394 | return 0; |
1326 | } | 1395 | } |
1327 | 1396 | ||
@@ -1334,8 +1403,7 @@ int xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to) | |||
1334 | int xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to) | 1403 | int xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to) |
1335 | { | 1404 | { |
1336 | size_t slot_table_size; | 1405 | size_t slot_table_size; |
1337 | 1406 | struct sockaddr_in *addr = (struct sockaddr_in *) &xprt->addr; | |
1338 | dprintk("RPC: setting up tcp-ipv4 transport...\n"); | ||
1339 | 1407 | ||
1340 | xprt->max_reqs = xprt_tcp_slot_table_entries; | 1408 | xprt->max_reqs = xprt_tcp_slot_table_entries; |
1341 | slot_table_size = xprt->max_reqs * sizeof(xprt->slot[0]); | 1409 | slot_table_size = xprt->max_reqs * sizeof(xprt->slot[0]); |
@@ -1343,10 +1411,12 @@ int xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to) | |||
1343 | if (xprt->slot == NULL) | 1411 | if (xprt->slot == NULL) |
1344 | return -ENOMEM; | 1412 | return -ENOMEM; |
1345 | 1413 | ||
1346 | xprt->prot = IPPROTO_TCP; | 1414 | if (ntohs(addr->sin_port) != 0) |
1415 | xprt_set_bound(xprt); | ||
1347 | xprt->port = xs_get_random_port(); | 1416 | xprt->port = xs_get_random_port(); |
1417 | |||
1418 | xprt->prot = IPPROTO_TCP; | ||
1348 | xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32); | 1419 | xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32); |
1349 | xprt->resvport = capable(CAP_NET_BIND_SERVICE) ? 1 : 0; | ||
1350 | xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; | 1420 | xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; |
1351 | 1421 | ||
1352 | INIT_WORK(&xprt->connect_worker, xs_tcp_connect_worker, xprt); | 1422 | INIT_WORK(&xprt->connect_worker, xs_tcp_connect_worker, xprt); |
@@ -1362,5 +1432,9 @@ int xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to) | |||
1362 | else | 1432 | else |
1363 | xprt_set_timeout(&xprt->timeout, 2, 60 * HZ); | 1433 | xprt_set_timeout(&xprt->timeout, 2, 60 * HZ); |
1364 | 1434 | ||
1435 | xs_format_peer_addresses(xprt); | ||
1436 | dprintk("RPC: set up transport to address %s\n", | ||
1437 | xs_print_peer_address(xprt, RPC_DISPLAY_ALL)); | ||
1438 | |||
1365 | return 0; | 1439 | return 0; |
1366 | } | 1440 | } |